2 * Copyright 2013, Michael (Ellerman|Neuling), IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #define pr_fmt(fmt) "powernv: " fmt
12 #include <linux/kernel.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/device.h>
16 #include <linux/gfp.h>
17 #include <linux/smp.h>
18 #include <linux/stop_machine.h>
20 #include <asm/cputhreads.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/machdep.h>
31 * Split/unsplit procedure:
33 * A core can be in one of three states, unsplit, 2-way split, and 4-way split.
35 * The mapping to subcores_per_core is simple:
37 * State | subcores_per_core
38 * ------------|------------------
43 * The core is split along thread boundaries, the mapping between subcores and
44 * threads is as follows:
47 * ----------------------------
49 * ----------------------------
50 * Thread | 0 1 2 3 4 5 6 7 |
51 * ----------------------------
54 * -------------------------------------
56 * -------------------------------------
57 * Thread | 0 1 2 3 | 4 5 6 7 |
58 * -------------------------------------
61 * -----------------------------------------
62 * Subcore | 0 | 1 | 2 | 3 |
63 * -----------------------------------------
64 * Thread | 0 1 | 2 3 | 4 5 | 6 7 |
65 * -----------------------------------------
71 * It is not possible to transition between either of the split states, the
72 * core must first be unsplit. The legal transitions are:
74 * ----------- ---------------
75 * | | <----> | 2-way split |
79 * | | <----> | 4-way split |
80 * ----------- ---------------
85 * Unsplitting is the simpler procedure. It requires thread 0 to request the
86 * unsplit while all other threads NAP.
88 * Thread 0 clears HID0_POWER8_DYNLPARDIS (Dynamic LPAR Disable). This tells
89 * the hardware that if all threads except 0 are napping, the hardware should
92 * Non-zero threads are sent to a NAP loop, they don't exit the loop until they
93 * see the core unsplit.
95 * Core 0 spins waiting for the hardware to see all the other threads napping
96 * and perform the unsplit.
98 * Once thread 0 sees the unsplit, it IPIs the secondary threads to wake them
99 * out of NAP. They will then see the core unsplit and exit the NAP loop.
104 * The basic splitting procedure is fairly straight forward. However it is
105 * complicated by the fact that after the split occurs, the newly created
106 * subcores are not in a fully initialised state.
108 * Most notably the subcores do not have the correct value for SDR1, which
109 * means they must not be running in virtual mode when the split occurs. The
110 * subcores have separate timebases SPRs but these are pre-synchronised by
113 * To begin with secondary threads are sent to an assembly routine. There they
114 * switch to real mode, so they are immune to the uninitialised SDR1 value.
115 * Once in real mode they indicate that they are in real mode, and spin waiting
116 * to see the core split.
118 * Thread 0 waits to see that all secondaries are in real mode, and then begins
119 * the splitting procedure. It firstly sets HID0_POWER8_DYNLPARDIS, which
120 * prevents the hardware from unsplitting. Then it sets the appropriate HID bit
121 * to request the split, and spins waiting to see that the split has happened.
123 * Concurrently the secondaries will notice the split. When they do they set up
124 * their SPRs, notably SDR1, and then they can return to virtual mode and exit
128 /* Initialised at boot by subcore_init() */
129 static int subcores_per_core;
132 * Used to communicate to offline cpus that we want them to pop out of the
133 * offline loop and do a split or unsplit.
135 * 0 - no split happening
136 * 1 - unsplit in progress
137 * 2 - split to 2 in progress
138 * 4 - split to 4 in progress
140 static int new_split_mode;
142 static cpumask_var_t cpu_offline_mask;
149 static DEFINE_PER_CPU(struct split_state, split_state);
151 static void wait_for_sync_step(int step)
153 int i, cpu = smp_processor_id();
155 for (i = cpu + 1; i < cpu + threads_per_core; i++)
156 while(per_cpu(split_state, i).step < step)
159 /* Order the wait loop vs any subsequent loads/stores. */
163 static void unsplit_core(void)
168 mask = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
170 cpu = smp_processor_id();
171 if (cpu_thread_in_core(cpu) != 0) {
172 while (mfspr(SPRN_HID0) & mask)
175 per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
179 hid0 = mfspr(SPRN_HID0);
180 hid0 &= ~HID0_POWER8_DYNLPARDIS;
181 mtspr(SPRN_HID0, hid0);
183 while (mfspr(SPRN_HID0) & mask)
186 /* Wake secondaries out of NAP */
187 for (i = cpu + 1; i < cpu + threads_per_core; i++)
188 smp_send_reschedule(i);
190 wait_for_sync_step(SYNC_STEP_UNSPLIT);
193 static void split_core(int new_mode)
195 struct { u64 value; u64 mask; } split_parms[2] = {
196 { HID0_POWER8_1TO2LPAR, HID0_POWER8_2LPARMODE },
197 { HID0_POWER8_1TO4LPAR, HID0_POWER8_4LPARMODE }
202 /* Convert new_mode (2 or 4) into an index into our parms array */
203 i = (new_mode >> 1) - 1;
204 BUG_ON(i < 0 || i > 1);
206 cpu = smp_processor_id();
207 if (cpu_thread_in_core(cpu) != 0) {
208 split_core_secondary_loop(&per_cpu(split_state, cpu).step);
212 wait_for_sync_step(SYNC_STEP_REAL_MODE);
215 hid0 = mfspr(SPRN_HID0);
216 hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value;
217 mtspr(SPRN_HID0, hid0);
219 /* Wait for it to happen */
220 while (!(mfspr(SPRN_HID0) & split_parms[i].mask))
224 static void cpu_do_split(int new_mode)
227 * At boot subcores_per_core will be 0, so we will always unsplit at
228 * boot. In the usual case where the core is already unsplit it's a
229 * nop, and this just ensures the kernel's notion of the mode is
230 * consistent with the hardware.
232 if (subcores_per_core != 1)
236 split_core(new_mode);
239 per_cpu(split_state, smp_processor_id()).step = SYNC_STEP_FINISHED;
242 bool cpu_core_split_required(void)
249 cpu_do_split(new_split_mode);
254 static int cpu_update_split_mode(void *data)
256 int cpu, new_mode = *(int *)data;
258 if (this_cpu_ptr(&split_state)->master) {
259 new_split_mode = new_mode;
262 cpumask_andnot(cpu_offline_mask, cpu_present_mask,
265 /* This should work even though the cpu is offline */
266 for_each_cpu(cpu, cpu_offline_mask)
267 smp_send_reschedule(cpu);
270 cpu_do_split(new_mode);
272 if (this_cpu_ptr(&split_state)->master) {
273 /* Wait for all cpus to finish before we touch subcores_per_core */
274 for_each_present_cpu(cpu) {
275 if (cpu >= setup_max_cpus)
278 while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED)
284 /* Make the new mode public */
285 subcores_per_core = new_mode;
286 threads_per_subcore = threads_per_core / subcores_per_core;
288 /* Make sure the new mode is written before we exit */
295 static int set_subcores_per_core(int new_mode)
297 struct split_state *state;
300 if (kvm_hv_mode_active()) {
301 pr_err("Unable to change split core mode while KVM active.\n");
306 * We are only called at boot, or from the sysfs write. If that ever
307 * changes we'll need a lock here.
309 BUG_ON(new_mode < 1 || new_mode > 4 || new_mode == 3);
311 for_each_present_cpu(cpu) {
312 state = &per_cpu(split_state, cpu);
313 state->step = SYNC_STEP_INITIAL;
319 /* This cpu will update the globals before exiting stop machine */
320 this_cpu_ptr(&split_state)->master = 1;
322 /* Ensure state is consistent before we call the other cpus */
325 stop_machine(cpu_update_split_mode, &new_mode, cpu_online_mask);
332 static ssize_t __used store_subcores_per_core(struct device *dev,
333 struct device_attribute *attr, const char *buf,
339 /* We are serialised by the attribute lock */
341 rc = sscanf(buf, "%lx", &val);
349 if (subcores_per_core == val)
357 rc = set_subcores_per_core(val);
365 static ssize_t show_subcores_per_core(struct device *dev,
366 struct device_attribute *attr, char *buf)
368 return sprintf(buf, "%x\n", subcores_per_core);
371 static DEVICE_ATTR(subcores_per_core, 0644,
372 show_subcores_per_core, store_subcores_per_core);
374 static int subcore_init(void)
376 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
380 * We need all threads in a core to be present to split/unsplit so
381 * continue only if max_cpus are aligned to threads_per_core.
383 if (setup_max_cpus % threads_per_core)
386 BUG_ON(!alloc_cpumask_var(&cpu_offline_mask, GFP_KERNEL));
388 set_subcores_per_core(1);
390 return device_create_file(cpu_subsys.dev_root,
391 &dev_attr_subcores_per_core);
393 machine_device_initcall(powernv, subcore_init);