2 * pseries CPU Hotplug infrastructure.
4 * Split out from arch/powerpc/platforms/pseries/setup.c
5 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
7 * Peter Bergner, IBM March 2001.
8 * Copyright (C) 2001 IBM.
9 * Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 * Plus various changes from other IBM teams...
13 * Copyright (C) 2006 Michael Ellerman, IBM Corporation
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/sched.h> /* for idle_task_exit */
27 #include <linux/cpu.h>
29 #include <linux/slab.h>
32 #include <asm/firmware.h>
33 #include <asm/machdep.h>
34 #include <asm/vdso_datapage.h>
36 #include <asm/plpar_wrappers.h>
39 #include "offline_states.h"
41 /* This version can't take the spinlock, because it never returns */
42 static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
44 static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
46 static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE;
48 static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE;
50 static int cede_offline_enabled __read_mostly = 1;
53 * Enable/disable cede_offline when available.
55 static int __init setup_cede_offline(char *str)
57 if (!strcmp(str, "off"))
58 cede_offline_enabled = 0;
59 else if (!strcmp(str, "on"))
60 cede_offline_enabled = 1;
66 __setup("cede_offline=", setup_cede_offline);
68 enum cpu_state_vals get_cpu_current_state(int cpu)
70 return per_cpu(current_state, cpu);
73 void set_cpu_current_state(int cpu, enum cpu_state_vals state)
75 per_cpu(current_state, cpu) = state;
78 enum cpu_state_vals get_preferred_offline_state(int cpu)
80 return per_cpu(preferred_offline_state, cpu);
83 void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
85 per_cpu(preferred_offline_state, cpu) = state;
88 void set_default_offline_state(int cpu)
90 per_cpu(preferred_offline_state, cpu) = default_offline_state;
93 static void rtas_stop_self(void)
95 static struct rtas_args args;
99 BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
101 printk("cpu %u (hwid %u) Ready to die...\n",
102 smp_processor_id(), hard_smp_processor_id());
104 rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
106 panic("Alas, I survived.\n");
109 static void pseries_mach_cpu_die(void)
111 unsigned int cpu = smp_processor_id();
112 unsigned int hwcpu = hard_smp_processor_id();
113 u8 cede_latency_hint = 0;
119 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
120 set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
121 if (ppc_md.suspend_disable_cpu)
122 ppc_md.suspend_disable_cpu();
124 cede_latency_hint = 2;
126 get_lppaca()->idle = 1;
127 if (!lppaca_shared_proc(get_lppaca()))
128 get_lppaca()->donate_dedicated_cpu = 1;
130 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
131 while (!prep_irq_for_idle()) {
136 extended_cede_processor(cede_latency_hint);
141 if (!lppaca_shared_proc(get_lppaca()))
142 get_lppaca()->donate_dedicated_cpu = 0;
143 get_lppaca()->idle = 0;
145 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
146 unregister_slb_shadow(hwcpu);
150 * Call to start_secondary_resume() will not return.
151 * Kernel stack will be reset and start_secondary()
152 * will be called to continue the online operation.
154 start_secondary_resume();
158 /* Requested state is CPU_STATE_OFFLINE at this point */
159 WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
161 set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
162 unregister_slb_shadow(hwcpu);
165 /* Should never get here... */
170 static int pseries_cpu_disable(void)
172 int cpu = smp_processor_id();
174 set_cpu_online(cpu, false);
175 vdso_data->processorCount--;
177 /*fix boot_cpuid here*/
178 if (cpu == boot_cpuid)
179 boot_cpuid = cpumask_any(cpu_online_mask);
181 /* FIXME: abstract this to not be platform specific later on */
182 xics_migrate_irqs_away();
187 * pseries_cpu_die: Wait for the cpu to die.
188 * @cpu: logical processor id of the CPU whose death we're awaiting.
190 * This function is called from the context of the thread which is performing
191 * the cpu-offline. Here we wait for long enough to allow the cpu in question
192 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD
195 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to
198 static void pseries_cpu_die(unsigned int cpu)
202 unsigned int pcpu = get_hard_smp_processor_id(cpu);
204 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
206 for (tries = 0; tries < 5000; tries++) {
207 if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) {
213 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
215 for (tries = 0; tries < 25; tries++) {
216 cpu_status = smp_query_cpu_stopped(pcpu);
217 if (cpu_status == QCSS_STOPPED ||
218 cpu_status == QCSS_HARDWARE_ERROR)
224 if (cpu_status != 0) {
225 printk("Querying DEAD? cpu %i (%i) shows %i\n",
226 cpu, pcpu, cpu_status);
229 /* Isolation and deallocation are definitely done by
230 * drslot_chrp_cpu. If they were not they would be
231 * done here. Change isolate state to Isolate and
232 * change allocation-state to Unusable.
234 paca[cpu].cpu_start = 0;
238 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
239 * here is that a cpu device node may represent up to two logical cpus
240 * in the SMT case. We must honor the assumption in other code that
241 * the logical ids for sibling SMT threads x and y are adjacent, such
242 * that x^1 == y and y^1 == x.
244 static int pseries_add_processor(struct device_node *np)
247 cpumask_var_t candidate_mask, tmp;
248 int err = -ENOSPC, len, nthreads, i;
249 const __be32 *intserv;
251 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
255 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
256 zalloc_cpumask_var(&tmp, GFP_KERNEL);
258 nthreads = len / sizeof(u32);
259 for (i = 0; i < nthreads; i++)
260 cpumask_set_cpu(i, tmp);
262 cpu_maps_update_begin();
264 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
266 /* Get a bitmap of unoccupied slots. */
267 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
268 if (cpumask_empty(candidate_mask)) {
269 /* If we get here, it most likely means that NR_CPUS is
270 * less than the partition's max processors setting.
272 printk(KERN_ERR "Cannot add cpu %s; this system configuration"
273 " supports %d logical cpus.\n", np->full_name,
274 num_possible_cpus());
278 while (!cpumask_empty(tmp))
279 if (cpumask_subset(tmp, candidate_mask))
280 /* Found a range where we can insert the new cpu(s) */
283 cpumask_shift_left(tmp, tmp, nthreads);
285 if (cpumask_empty(tmp)) {
286 printk(KERN_ERR "Unable to find space in cpu_present_mask for"
287 " processor %s with %d thread(s)\n", np->name,
292 for_each_cpu(cpu, tmp) {
293 BUG_ON(cpu_present(cpu));
294 set_cpu_present(cpu, true);
295 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
299 cpu_maps_update_done();
300 free_cpumask_var(candidate_mask);
301 free_cpumask_var(tmp);
306 * Update the present map for a cpu node which is going away, and set
307 * the hard id in the paca(s) to -1 to be consistent with boot time
308 * convention for non-present cpus.
310 static void pseries_remove_processor(struct device_node *np)
313 int len, nthreads, i;
314 const __be32 *intserv;
317 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
321 nthreads = len / sizeof(u32);
323 cpu_maps_update_begin();
324 for (i = 0; i < nthreads; i++) {
325 thread = be32_to_cpu(intserv[i]);
326 for_each_present_cpu(cpu) {
327 if (get_hard_smp_processor_id(cpu) != thread)
329 BUG_ON(cpu_online(cpu));
330 set_cpu_present(cpu, false);
331 set_hard_smp_processor_id(cpu, -1);
334 if (cpu >= nr_cpu_ids)
335 printk(KERN_WARNING "Could not find cpu to remove "
336 "with physical id 0x%x\n", thread);
338 cpu_maps_update_done();
341 static int dlpar_online_cpu(struct device_node *dn)
345 int len, nthreads, i;
346 const __be32 *intserv;
349 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
353 nthreads = len / sizeof(u32);
355 cpu_maps_update_begin();
356 for (i = 0; i < nthreads; i++) {
357 thread = be32_to_cpu(intserv[i]);
358 for_each_present_cpu(cpu) {
359 if (get_hard_smp_processor_id(cpu) != thread)
361 BUG_ON(get_cpu_current_state(cpu)
362 != CPU_STATE_OFFLINE);
363 cpu_maps_update_done();
364 rc = device_online(get_cpu_device(cpu));
367 cpu_maps_update_begin();
371 if (cpu == num_possible_cpus())
372 printk(KERN_WARNING "Could not find cpu to online "
373 "with physical id 0x%x\n", thread);
375 cpu_maps_update_done();
382 static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
384 struct device_node *child = NULL;
389 /* Assume cpu doesn't exist */
392 for_each_child_of_node(parent, child) {
393 rc = of_property_read_u32(child, "ibm,my-drc-index",
398 if (my_drc_index == drc_index) {
408 static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
417 rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
422 if (drc == drc_index)
429 static ssize_t dlpar_cpu_add(u32 drc_index)
431 struct device_node *dn, *parent;
434 pr_debug("Attempting to add CPU, drc index: %x\n", drc_index);
436 parent = of_find_node_by_path("/cpus");
438 pr_warn("Failed to find CPU root node \"/cpus\"\n");
442 if (dlpar_cpu_exists(parent, drc_index)) {
444 pr_warn("CPU with drc index %x already exists\n", drc_index);
448 if (!valid_cpu_drc_index(parent, drc_index)) {
450 pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index);
454 rc = dlpar_acquire_drc(drc_index);
456 pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
462 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
465 pr_warn("Failed call to configure-connector, drc index: %x\n",
467 dlpar_release_drc(drc_index);
471 rc = dlpar_attach_node(dn);
474 pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
475 dn->name, rc, drc_index);
477 rc = dlpar_release_drc(drc_index);
479 dlpar_free_cc_nodes(dn);
484 rc = dlpar_online_cpu(dn);
487 pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n",
488 dn->name, rc, drc_index);
490 rc = dlpar_detach_node(dn);
492 dlpar_release_drc(drc_index);
497 pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name,
502 static int dlpar_offline_cpu(struct device_node *dn)
506 int len, nthreads, i;
507 const __be32 *intserv;
510 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
514 nthreads = len / sizeof(u32);
516 cpu_maps_update_begin();
517 for (i = 0; i < nthreads; i++) {
518 thread = be32_to_cpu(intserv[i]);
519 for_each_present_cpu(cpu) {
520 if (get_hard_smp_processor_id(cpu) != thread)
523 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
526 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
527 set_preferred_offline_state(cpu,
529 cpu_maps_update_done();
530 rc = device_offline(get_cpu_device(cpu));
533 cpu_maps_update_begin();
539 * The cpu is in CPU_STATE_INACTIVE.
540 * Upgrade it's state to CPU_STATE_OFFLINE.
542 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
543 BUG_ON(plpar_hcall_norets(H_PROD, thread)
548 if (cpu == num_possible_cpus())
549 printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread);
551 cpu_maps_update_done();
558 static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
562 pr_debug("Attemping to remove CPU %s, drc index: %x\n",
563 dn->name, drc_index);
565 rc = dlpar_offline_cpu(dn);
567 pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc);
571 rc = dlpar_release_drc(drc_index);
573 pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n",
574 drc_index, dn->name, rc);
575 dlpar_online_cpu(dn);
579 rc = dlpar_detach_node(dn);
583 pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc);
585 rc = dlpar_acquire_drc(drc_index);
587 dlpar_online_cpu(dn);
592 pr_debug("Successfully removed CPU, drc index: %x\n", drc_index);
596 static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
598 struct device_node *dn;
602 for_each_node_by_type(dn, "cpu") {
603 rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index);
607 if (my_index == drc_index)
614 static int dlpar_cpu_remove_by_index(u32 drc_index)
616 struct device_node *dn;
619 dn = cpu_drc_index_to_dn(drc_index);
621 pr_warn("Cannot find CPU (drc index %x) to remove\n",
626 rc = dlpar_cpu_remove(dn, drc_index);
631 static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove)
633 struct device_node *dn;
637 /* We want to find cpus_to_remove + 1 CPUs to ensure we do not
638 * remove the last CPU.
640 for_each_node_by_type(dn, "cpu") {
643 if (cpus_found > cpus_to_remove) {
648 /* Note that cpus_found is always 1 ahead of the index
649 * into the cpu_drcs array, so we use cpus_found - 1
651 rc = of_property_read_u32(dn, "ibm,my-drc-index",
652 &cpu_drcs[cpus_found - 1]);
654 pr_warn("Error occurred getting drc-index for %s\n",
661 if (cpus_found < cpus_to_remove) {
662 pr_warn("Failed to find enough CPUs (%d of %d) to remove\n",
663 cpus_found, cpus_to_remove);
664 } else if (cpus_found == cpus_to_remove) {
665 pr_warn("Cannot remove all CPUs\n");
671 static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
675 int cpus_removed = 0;
678 pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove);
680 cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL);
684 cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove);
685 if (cpus_found <= cpus_to_remove) {
690 for (i = 0; i < cpus_to_remove; i++) {
691 rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
698 if (cpus_removed != cpus_to_remove) {
699 pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
701 for (i = 0; i < cpus_removed; i++)
702 dlpar_cpu_add(cpu_drcs[i]);
713 static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add)
715 struct device_node *parent;
719 parent = of_find_node_by_path("/cpus");
721 pr_warn("Could not find CPU root node in device tree\n");
726 /* Search the ibm,drc-indexes array for possible CPU drcs to
727 * add. Note that the format of the ibm,drc-indexes array is
728 * the number of entries in the array followed by the array
729 * of drc values so we start looking at index = 1.
732 while (cpus_found < cpus_to_add) {
735 rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
740 if (dlpar_cpu_exists(parent, drc))
743 cpu_drcs[cpus_found++] = drc;
750 static int dlpar_cpu_add_by_count(u32 cpus_to_add)
757 pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add);
759 cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL);
763 cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add);
764 if (cpus_found < cpus_to_add) {
765 pr_warn("Failed to find enough CPUs (%d of %d) to add\n",
766 cpus_found, cpus_to_add);
771 for (i = 0; i < cpus_to_add; i++) {
772 rc = dlpar_cpu_add(cpu_drcs[i]);
779 if (cpus_added < cpus_to_add) {
780 pr_warn("CPU hot-add failed, removing any added CPUs\n");
782 for (i = 0; i < cpus_added; i++)
783 dlpar_cpu_remove_by_index(cpu_drcs[i]);
794 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
796 u32 count, drc_index;
799 count = hp_elog->_drc_u.drc_count;
800 drc_index = hp_elog->_drc_u.drc_index;
802 lock_device_hotplug();
804 switch (hp_elog->action) {
805 case PSERIES_HP_ELOG_ACTION_REMOVE:
806 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
807 rc = dlpar_cpu_remove_by_count(count);
808 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
809 rc = dlpar_cpu_remove_by_index(drc_index);
813 case PSERIES_HP_ELOG_ACTION_ADD:
814 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
815 rc = dlpar_cpu_add_by_count(count);
816 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
817 rc = dlpar_cpu_add(drc_index);
822 pr_err("Invalid action (%d) specified\n", hp_elog->action);
827 unlock_device_hotplug();
831 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
833 static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
838 rc = kstrtou32(buf, 0, &drc_index);
842 rc = dlpar_cpu_add(drc_index);
844 return rc ? rc : count;
847 static ssize_t dlpar_cpu_release(const char *buf, size_t count)
849 struct device_node *dn;
853 dn = of_find_node_by_path(buf);
857 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
863 rc = dlpar_cpu_remove(dn, drc_index);
866 return rc ? rc : count;
869 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
871 static int pseries_smp_notifier(struct notifier_block *nb,
872 unsigned long action, void *data)
874 struct of_reconfig_data *rd = data;
878 case OF_RECONFIG_ATTACH_NODE:
879 err = pseries_add_processor(rd->dn);
881 case OF_RECONFIG_DETACH_NODE:
882 pseries_remove_processor(rd->dn);
885 return notifier_from_errno(err);
888 static struct notifier_block pseries_smp_nb = {
889 .notifier_call = pseries_smp_notifier,
892 #define MAX_CEDE_LATENCY_LEVELS 4
893 #define CEDE_LATENCY_PARAM_LENGTH 10
894 #define CEDE_LATENCY_PARAM_MAX_LENGTH \
895 (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char))
896 #define CEDE_LATENCY_TOKEN 45
898 static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH];
900 static int parse_cede_parameters(void)
902 memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH);
903 return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
906 __pa(cede_parameters),
907 CEDE_LATENCY_PARAM_MAX_LENGTH);
910 static int __init pseries_cpu_hotplug_init(void)
912 struct device_node *np;
917 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
918 ppc_md.cpu_probe = dlpar_cpu_probe;
919 ppc_md.cpu_release = dlpar_cpu_release;
920 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
922 for_each_node_by_name(np, "interrupt-controller") {
923 typep = of_get_property(np, "compatible", NULL);
924 if (strstr(typep, "open-pic")) {
927 printk(KERN_INFO "CPU Hotplug not supported on "
928 "systems using MPIC\n");
933 rtas_stop_self_token = rtas_token("stop-self");
934 qcss_tok = rtas_token("query-cpu-stopped-state");
936 if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
937 qcss_tok == RTAS_UNKNOWN_SERVICE) {
938 printk(KERN_INFO "CPU Hotplug not supported by firmware "
943 ppc_md.cpu_die = pseries_mach_cpu_die;
944 smp_ops->cpu_disable = pseries_cpu_disable;
945 smp_ops->cpu_die = pseries_cpu_die;
947 /* Processors can be added/removed only on LPAR */
948 if (firmware_has_feature(FW_FEATURE_LPAR)) {
949 of_reconfig_notifier_register(&pseries_smp_nb);
950 cpu_maps_update_begin();
951 if (cede_offline_enabled && parse_cede_parameters() == 0) {
952 default_offline_state = CPU_STATE_INACTIVE;
953 for_each_online_cpu(cpu)
954 set_default_offline_state(cpu);
956 cpu_maps_update_done();
961 machine_arch_initcall(pseries, pseries_cpu_hotplug_init);