iwlwifi: mvm: fix accessing Null pointer during fw dump collection
[cascardo/linux.git] / arch / powerpc / kernel / smp.c
1 /*
2  * SMP support for ppc.
3  *
4  * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5  * deal of code from the sparc and intel versions.
6  *
7  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8  *
9  * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10  * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 #undef DEBUG
19
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/device.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
34
35 #include <asm/ptrace.h>
36 #include <linux/atomic.h>
37 #include <asm/irq.h>
38 #include <asm/hw_irq.h>
39 #include <asm/kvm_ppc.h>
40 #include <asm/page.h>
41 #include <asm/pgtable.h>
42 #include <asm/prom.h>
43 #include <asm/smp.h>
44 #include <asm/time.h>
45 #include <asm/machdep.h>
46 #include <asm/cputhreads.h>
47 #include <asm/cputable.h>
48 #include <asm/mpic.h>
49 #include <asm/vdso_datapage.h>
50 #ifdef CONFIG_PPC64
51 #include <asm/paca.h>
52 #endif
53 #include <asm/vdso.h>
54 #include <asm/debug.h>
55 #include <asm/kexec.h>
56
57 #ifdef DEBUG
58 #include <asm/udbg.h>
59 #define DBG(fmt...) udbg_printf(fmt)
60 #else
61 #define DBG(fmt...)
62 #endif
63
64 #ifdef CONFIG_HOTPLUG_CPU
65 /* State of each CPU during hotplug phases */
66 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
67 #endif
68
69 struct thread_info *secondary_ti;
70
71 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
72 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
73
74 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
75 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
76
77 /* SMP operations for this machine */
78 struct smp_ops_t *smp_ops;
79
80 /* Can't be static due to PowerMac hackery */
81 volatile unsigned int cpu_callin_map[NR_CPUS];
82
83 int smt_enabled_at_boot = 1;
84
85 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
86
87 /*
88  * Returns 1 if the specified cpu should be brought up during boot.
89  * Used to inhibit booting threads if they've been disabled or
90  * limited on the command line
91  */
92 int smp_generic_cpu_bootable(unsigned int nr)
93 {
94         /* Special case - we inhibit secondary thread startup
95          * during boot if the user requests it.
96          */
97         if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
98                 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
99                         return 0;
100                 if (smt_enabled_at_boot
101                     && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
102                         return 0;
103         }
104
105         return 1;
106 }
107
108
109 #ifdef CONFIG_PPC64
110 int smp_generic_kick_cpu(int nr)
111 {
112         BUG_ON(nr < 0 || nr >= NR_CPUS);
113
114         /*
115          * The processor is currently spinning, waiting for the
116          * cpu_start field to become non-zero After we set cpu_start,
117          * the processor will continue on to secondary_start
118          */
119         if (!paca[nr].cpu_start) {
120                 paca[nr].cpu_start = 1;
121                 smp_mb();
122                 return 0;
123         }
124
125 #ifdef CONFIG_HOTPLUG_CPU
126         /*
127          * Ok it's not there, so it might be soft-unplugged, let's
128          * try to bring it back
129          */
130         generic_set_cpu_up(nr);
131         smp_wmb();
132         smp_send_reschedule(nr);
133 #endif /* CONFIG_HOTPLUG_CPU */
134
135         return 0;
136 }
137 #endif /* CONFIG_PPC64 */
138
139 static irqreturn_t call_function_action(int irq, void *data)
140 {
141         generic_smp_call_function_interrupt();
142         return IRQ_HANDLED;
143 }
144
145 static irqreturn_t reschedule_action(int irq, void *data)
146 {
147         scheduler_ipi();
148         return IRQ_HANDLED;
149 }
150
151 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
152 {
153         tick_broadcast_ipi_handler();
154         return IRQ_HANDLED;
155 }
156
157 static irqreturn_t debug_ipi_action(int irq, void *data)
158 {
159         if (crash_ipi_function_ptr) {
160                 crash_ipi_function_ptr(get_irq_regs());
161                 return IRQ_HANDLED;
162         }
163
164 #ifdef CONFIG_DEBUGGER
165         debugger_ipi(get_irq_regs());
166 #endif /* CONFIG_DEBUGGER */
167
168         return IRQ_HANDLED;
169 }
170
171 static irq_handler_t smp_ipi_action[] = {
172         [PPC_MSG_CALL_FUNCTION] =  call_function_action,
173         [PPC_MSG_RESCHEDULE] = reschedule_action,
174         [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
175         [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
176 };
177
178 const char *smp_ipi_name[] = {
179         [PPC_MSG_CALL_FUNCTION] =  "ipi call function",
180         [PPC_MSG_RESCHEDULE] = "ipi reschedule",
181         [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
182         [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
183 };
184
185 /* optional function to request ipi, for controllers with >= 4 ipis */
186 int smp_request_message_ipi(int virq, int msg)
187 {
188         int err;
189
190         if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
191                 return -EINVAL;
192         }
193 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
194         if (msg == PPC_MSG_DEBUGGER_BREAK) {
195                 return 1;
196         }
197 #endif
198         err = request_irq(virq, smp_ipi_action[msg],
199                           IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
200                           smp_ipi_name[msg], NULL);
201         WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
202                 virq, smp_ipi_name[msg], err);
203
204         return err;
205 }
206
207 #ifdef CONFIG_PPC_SMP_MUXED_IPI
208 struct cpu_messages {
209         long messages;                  /* current messages */
210         unsigned long data;             /* data for cause ipi */
211 };
212 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
213
214 void smp_muxed_ipi_set_data(int cpu, unsigned long data)
215 {
216         struct cpu_messages *info = &per_cpu(ipi_message, cpu);
217
218         info->data = data;
219 }
220
221 void smp_muxed_ipi_set_message(int cpu, int msg)
222 {
223         struct cpu_messages *info = &per_cpu(ipi_message, cpu);
224         char *message = (char *)&info->messages;
225
226         /*
227          * Order previous accesses before accesses in the IPI handler.
228          */
229         smp_mb();
230         message[msg] = 1;
231 }
232
233 void smp_muxed_ipi_message_pass(int cpu, int msg)
234 {
235         struct cpu_messages *info = &per_cpu(ipi_message, cpu);
236
237         smp_muxed_ipi_set_message(cpu, msg);
238         /*
239          * cause_ipi functions are required to include a full barrier
240          * before doing whatever causes the IPI.
241          */
242         smp_ops->cause_ipi(cpu, info->data);
243 }
244
245 #ifdef __BIG_ENDIAN__
246 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
247 #else
248 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
249 #endif
250
251 irqreturn_t smp_ipi_demux(void)
252 {
253         struct cpu_messages *info = this_cpu_ptr(&ipi_message);
254         unsigned long all;
255
256         mb();   /* order any irq clear */
257
258         do {
259                 all = xchg(&info->messages, 0);
260 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
261                 /*
262                  * Must check for PPC_MSG_RM_HOST_ACTION messages
263                  * before PPC_MSG_CALL_FUNCTION messages because when
264                  * a VM is destroyed, we call kick_all_cpus_sync()
265                  * to ensure that any pending PPC_MSG_RM_HOST_ACTION
266                  * messages have completed before we free any VCPUs.
267                  */
268                 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
269                         kvmppc_xics_ipi_action();
270 #endif
271                 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
272                         generic_smp_call_function_interrupt();
273                 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
274                         scheduler_ipi();
275                 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
276                         tick_broadcast_ipi_handler();
277                 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
278                         debug_ipi_action(0, NULL);
279         } while (info->messages);
280
281         return IRQ_HANDLED;
282 }
283 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
284
285 static inline void do_message_pass(int cpu, int msg)
286 {
287         if (smp_ops->message_pass)
288                 smp_ops->message_pass(cpu, msg);
289 #ifdef CONFIG_PPC_SMP_MUXED_IPI
290         else
291                 smp_muxed_ipi_message_pass(cpu, msg);
292 #endif
293 }
294
295 void smp_send_reschedule(int cpu)
296 {
297         if (likely(smp_ops))
298                 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
299 }
300 EXPORT_SYMBOL_GPL(smp_send_reschedule);
301
302 void arch_send_call_function_single_ipi(int cpu)
303 {
304         do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
305 }
306
307 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
308 {
309         unsigned int cpu;
310
311         for_each_cpu(cpu, mask)
312                 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
313 }
314
315 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
316 void tick_broadcast(const struct cpumask *mask)
317 {
318         unsigned int cpu;
319
320         for_each_cpu(cpu, mask)
321                 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
322 }
323 #endif
324
325 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
326 void smp_send_debugger_break(void)
327 {
328         int cpu;
329         int me = raw_smp_processor_id();
330
331         if (unlikely(!smp_ops))
332                 return;
333
334         for_each_online_cpu(cpu)
335                 if (cpu != me)
336                         do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
337 }
338 #endif
339
340 #ifdef CONFIG_KEXEC
341 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
342 {
343         crash_ipi_function_ptr = crash_ipi_callback;
344         if (crash_ipi_callback) {
345                 mb();
346                 smp_send_debugger_break();
347         }
348 }
349 #endif
350
351 static void stop_this_cpu(void *dummy)
352 {
353         /* Remove this CPU */
354         set_cpu_online(smp_processor_id(), false);
355
356         local_irq_disable();
357         while (1)
358                 ;
359 }
360
361 void smp_send_stop(void)
362 {
363         smp_call_function(stop_this_cpu, NULL, 0);
364 }
365
366 struct thread_info *current_set[NR_CPUS];
367
368 static void smp_store_cpu_info(int id)
369 {
370         per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
371 #ifdef CONFIG_PPC_FSL_BOOK3E
372         per_cpu(next_tlbcam_idx, id)
373                 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
374 #endif
375 }
376
377 void __init smp_prepare_cpus(unsigned int max_cpus)
378 {
379         unsigned int cpu;
380
381         DBG("smp_prepare_cpus\n");
382
383         /* 
384          * setup_cpu may need to be called on the boot cpu. We havent
385          * spun any cpus up but lets be paranoid.
386          */
387         BUG_ON(boot_cpuid != smp_processor_id());
388
389         /* Fixup boot cpu */
390         smp_store_cpu_info(boot_cpuid);
391         cpu_callin_map[boot_cpuid] = 1;
392
393         for_each_possible_cpu(cpu) {
394                 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
395                                         GFP_KERNEL, cpu_to_node(cpu));
396                 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
397                                         GFP_KERNEL, cpu_to_node(cpu));
398                 /*
399                  * numa_node_id() works after this.
400                  */
401                 if (cpu_present(cpu)) {
402                         set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
403                         set_cpu_numa_mem(cpu,
404                                 local_memory_node(numa_cpu_lookup_table[cpu]));
405                 }
406         }
407
408         cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
409         cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
410
411         if (smp_ops && smp_ops->probe)
412                 smp_ops->probe();
413 }
414
415 void smp_prepare_boot_cpu(void)
416 {
417         BUG_ON(smp_processor_id() != boot_cpuid);
418 #ifdef CONFIG_PPC64
419         paca[boot_cpuid].__current = current;
420 #endif
421         set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
422         current_set[boot_cpuid] = task_thread_info(current);
423 }
424
425 #ifdef CONFIG_HOTPLUG_CPU
426
427 int generic_cpu_disable(void)
428 {
429         unsigned int cpu = smp_processor_id();
430
431         if (cpu == boot_cpuid)
432                 return -EBUSY;
433
434         set_cpu_online(cpu, false);
435 #ifdef CONFIG_PPC64
436         vdso_data->processorCount--;
437 #endif
438         migrate_irqs();
439         return 0;
440 }
441
442 void generic_cpu_die(unsigned int cpu)
443 {
444         int i;
445
446         for (i = 0; i < 100; i++) {
447                 smp_rmb();
448                 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
449                         return;
450                 msleep(100);
451         }
452         printk(KERN_ERR "CPU%d didn't die...\n", cpu);
453 }
454
455 void generic_set_cpu_dead(unsigned int cpu)
456 {
457         per_cpu(cpu_state, cpu) = CPU_DEAD;
458 }
459
460 /*
461  * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
462  * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
463  * which makes the delay in generic_cpu_die() not happen.
464  */
465 void generic_set_cpu_up(unsigned int cpu)
466 {
467         per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
468 }
469
470 int generic_check_cpu_restart(unsigned int cpu)
471 {
472         return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
473 }
474
475 static bool secondaries_inhibited(void)
476 {
477         return kvm_hv_mode_active();
478 }
479
480 #else /* HOTPLUG_CPU */
481
482 #define secondaries_inhibited()         0
483
484 #endif
485
486 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
487 {
488         struct thread_info *ti = task_thread_info(idle);
489
490 #ifdef CONFIG_PPC64
491         paca[cpu].__current = idle;
492         paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
493 #endif
494         ti->cpu = cpu;
495         secondary_ti = current_set[cpu] = ti;
496 }
497
498 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
499 {
500         int rc, c;
501
502         /*
503          * Don't allow secondary threads to come online if inhibited
504          */
505         if (threads_per_core > 1 && secondaries_inhibited() &&
506             cpu_thread_in_subcore(cpu))
507                 return -EBUSY;
508
509         if (smp_ops == NULL ||
510             (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
511                 return -EINVAL;
512
513         cpu_idle_thread_init(cpu, tidle);
514
515         /* Make sure callin-map entry is 0 (can be leftover a CPU
516          * hotplug
517          */
518         cpu_callin_map[cpu] = 0;
519
520         /* The information for processor bringup must
521          * be written out to main store before we release
522          * the processor.
523          */
524         smp_mb();
525
526         /* wake up cpus */
527         DBG("smp: kicking cpu %d\n", cpu);
528         rc = smp_ops->kick_cpu(cpu);
529         if (rc) {
530                 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
531                 return rc;
532         }
533
534         /*
535          * wait to see if the cpu made a callin (is actually up).
536          * use this value that I found through experimentation.
537          * -- Cort
538          */
539         if (system_state < SYSTEM_RUNNING)
540                 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
541                         udelay(100);
542 #ifdef CONFIG_HOTPLUG_CPU
543         else
544                 /*
545                  * CPUs can take much longer to come up in the
546                  * hotplug case.  Wait five seconds.
547                  */
548                 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
549                         msleep(1);
550 #endif
551
552         if (!cpu_callin_map[cpu]) {
553                 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
554                 return -ENOENT;
555         }
556
557         DBG("Processor %u found.\n", cpu);
558
559         if (smp_ops->give_timebase)
560                 smp_ops->give_timebase();
561
562         /* Wait until cpu puts itself in the online & active maps */
563         while (!cpu_online(cpu) || !cpu_active(cpu))
564                 cpu_relax();
565
566         return 0;
567 }
568
569 /* Return the value of the reg property corresponding to the given
570  * logical cpu.
571  */
572 int cpu_to_core_id(int cpu)
573 {
574         struct device_node *np;
575         const __be32 *reg;
576         int id = -1;
577
578         np = of_get_cpu_node(cpu, NULL);
579         if (!np)
580                 goto out;
581
582         reg = of_get_property(np, "reg", NULL);
583         if (!reg)
584                 goto out;
585
586         id = be32_to_cpup(reg);
587 out:
588         of_node_put(np);
589         return id;
590 }
591
592 /* Helper routines for cpu to core mapping */
593 int cpu_core_index_of_thread(int cpu)
594 {
595         return cpu >> threads_shift;
596 }
597 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
598
599 int cpu_first_thread_of_core(int core)
600 {
601         return core << threads_shift;
602 }
603 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
604
605 static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
606 {
607         const struct cpumask *mask;
608         struct device_node *np;
609         int i, plen;
610         const __be32 *prop;
611
612         mask = add ? cpu_online_mask : cpu_present_mask;
613         for_each_cpu(i, mask) {
614                 np = of_get_cpu_node(i, NULL);
615                 if (!np)
616                         continue;
617                 prop = of_get_property(np, "ibm,chip-id", &plen);
618                 if (prop && plen == sizeof(int) &&
619                     of_read_number(prop, 1) == chipid) {
620                         if (add) {
621                                 cpumask_set_cpu(cpu, cpu_core_mask(i));
622                                 cpumask_set_cpu(i, cpu_core_mask(cpu));
623                         } else {
624                                 cpumask_clear_cpu(cpu, cpu_core_mask(i));
625                                 cpumask_clear_cpu(i, cpu_core_mask(cpu));
626                         }
627                 }
628                 of_node_put(np);
629         }
630 }
631
632 /* Must be called when no change can occur to cpu_present_mask,
633  * i.e. during cpu online or offline.
634  */
635 static struct device_node *cpu_to_l2cache(int cpu)
636 {
637         struct device_node *np;
638         struct device_node *cache;
639
640         if (!cpu_present(cpu))
641                 return NULL;
642
643         np = of_get_cpu_node(cpu, NULL);
644         if (np == NULL)
645                 return NULL;
646
647         cache = of_find_next_cache_node(np);
648
649         of_node_put(np);
650
651         return cache;
652 }
653
654 static void traverse_core_siblings(int cpu, bool add)
655 {
656         struct device_node *l2_cache, *np;
657         const struct cpumask *mask;
658         int i, chip, plen;
659         const __be32 *prop;
660
661         /* First see if we have ibm,chip-id properties in cpu nodes */
662         np = of_get_cpu_node(cpu, NULL);
663         if (np) {
664                 chip = -1;
665                 prop = of_get_property(np, "ibm,chip-id", &plen);
666                 if (prop && plen == sizeof(int))
667                         chip = of_read_number(prop, 1);
668                 of_node_put(np);
669                 if (chip >= 0) {
670                         traverse_siblings_chip_id(cpu, add, chip);
671                         return;
672                 }
673         }
674
675         l2_cache = cpu_to_l2cache(cpu);
676         mask = add ? cpu_online_mask : cpu_present_mask;
677         for_each_cpu(i, mask) {
678                 np = cpu_to_l2cache(i);
679                 if (!np)
680                         continue;
681                 if (np == l2_cache) {
682                         if (add) {
683                                 cpumask_set_cpu(cpu, cpu_core_mask(i));
684                                 cpumask_set_cpu(i, cpu_core_mask(cpu));
685                         } else {
686                                 cpumask_clear_cpu(cpu, cpu_core_mask(i));
687                                 cpumask_clear_cpu(i, cpu_core_mask(cpu));
688                         }
689                 }
690                 of_node_put(np);
691         }
692         of_node_put(l2_cache);
693 }
694
695 /* Activate a secondary processor. */
696 void start_secondary(void *unused)
697 {
698         unsigned int cpu = smp_processor_id();
699         int i, base;
700
701         atomic_inc(&init_mm.mm_count);
702         current->active_mm = &init_mm;
703
704         smp_store_cpu_info(cpu);
705         set_dec(tb_ticks_per_jiffy);
706         preempt_disable();
707         cpu_callin_map[cpu] = 1;
708
709         if (smp_ops->setup_cpu)
710                 smp_ops->setup_cpu(cpu);
711         if (smp_ops->take_timebase)
712                 smp_ops->take_timebase();
713
714         secondary_cpu_time_init();
715
716 #ifdef CONFIG_PPC64
717         if (system_state == SYSTEM_RUNNING)
718                 vdso_data->processorCount++;
719
720         vdso_getcpu_init();
721 #endif
722         /* Update sibling maps */
723         base = cpu_first_thread_sibling(cpu);
724         for (i = 0; i < threads_per_core; i++) {
725                 if (cpu_is_offline(base + i) && (cpu != base + i))
726                         continue;
727                 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
728                 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
729
730                 /* cpu_core_map should be a superset of
731                  * cpu_sibling_map even if we don't have cache
732                  * information, so update the former here, too.
733                  */
734                 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
735                 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
736         }
737         traverse_core_siblings(cpu, true);
738
739         set_numa_node(numa_cpu_lookup_table[cpu]);
740         set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
741
742         smp_wmb();
743         notify_cpu_starting(cpu);
744         set_cpu_online(cpu, true);
745
746         local_irq_enable();
747
748         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
749
750         BUG();
751 }
752
753 int setup_profiling_timer(unsigned int multiplier)
754 {
755         return 0;
756 }
757
758 #ifdef CONFIG_SCHED_SMT
759 /* cpumask of CPUs with asymetric SMT dependancy */
760 static int powerpc_smt_flags(void)
761 {
762         int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
763
764         if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
765                 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
766                 flags |= SD_ASYM_PACKING;
767         }
768         return flags;
769 }
770 #endif
771
772 static struct sched_domain_topology_level powerpc_topology[] = {
773 #ifdef CONFIG_SCHED_SMT
774         { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
775 #endif
776         { cpu_cpu_mask, SD_INIT_NAME(DIE) },
777         { NULL, },
778 };
779
780 void __init smp_cpus_done(unsigned int max_cpus)
781 {
782         cpumask_var_t old_mask;
783
784         /* We want the setup_cpu() here to be called from CPU 0, but our
785          * init thread may have been "borrowed" by another CPU in the meantime
786          * se we pin us down to CPU 0 for a short while
787          */
788         alloc_cpumask_var(&old_mask, GFP_NOWAIT);
789         cpumask_copy(old_mask, tsk_cpus_allowed(current));
790         set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
791         
792         if (smp_ops && smp_ops->setup_cpu)
793                 smp_ops->setup_cpu(boot_cpuid);
794
795         set_cpus_allowed_ptr(current, old_mask);
796
797         free_cpumask_var(old_mask);
798
799         if (smp_ops && smp_ops->bringup_done)
800                 smp_ops->bringup_done();
801
802         dump_numa_cpu_topology();
803
804         set_sched_topology(powerpc_topology);
805
806 }
807
808 #ifdef CONFIG_HOTPLUG_CPU
809 int __cpu_disable(void)
810 {
811         int cpu = smp_processor_id();
812         int base, i;
813         int err;
814
815         if (!smp_ops->cpu_disable)
816                 return -ENOSYS;
817
818         err = smp_ops->cpu_disable();
819         if (err)
820                 return err;
821
822         /* Update sibling maps */
823         base = cpu_first_thread_sibling(cpu);
824         for (i = 0; i < threads_per_core; i++) {
825                 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
826                 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
827                 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
828                 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
829         }
830         traverse_core_siblings(cpu, false);
831
832         return 0;
833 }
834
835 void __cpu_die(unsigned int cpu)
836 {
837         if (smp_ops->cpu_die)
838                 smp_ops->cpu_die(cpu);
839 }
840
841 void cpu_die(void)
842 {
843         if (ppc_md.cpu_die)
844                 ppc_md.cpu_die();
845
846         /* If we return, we re-enter start_secondary */
847         start_secondary_resume();
848 }
849
850 #endif