1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/errno.h>
4 #include <linux/kernel.h>
7 #include <linux/prctl.h>
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
12 #include <linux/clockchips.h>
13 #include <linux/random.h>
14 #include <linux/user-return-notifier.h>
15 #include <linux/dmi.h>
16 #include <linux/utsname.h>
17 #include <linux/stackprotector.h>
18 #include <linux/tick.h>
19 #include <linux/cpuidle.h>
20 #include <trace/events/power.h>
21 #include <linux/hw_breakpoint.h>
24 #include <asm/syscalls.h>
26 #include <asm/uaccess.h>
28 #include <asm/fpu-internal.h>
29 #include <asm/debugreg.h>
31 #include <asm/tlbflush.h>
34 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
35 * no more per-task TSS's. The TSS size is kept cacheline-aligned
36 * so they are allowed to end up in the .data..cacheline_aligned
37 * section. Since TSS's are completely CPU-local, we want them
38 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
40 __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
43 static DEFINE_PER_CPU(unsigned char, is_idle);
44 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
46 void idle_notifier_register(struct notifier_block *n)
48 atomic_notifier_chain_register(&idle_notifier, n);
50 EXPORT_SYMBOL_GPL(idle_notifier_register);
52 void idle_notifier_unregister(struct notifier_block *n)
54 atomic_notifier_chain_unregister(&idle_notifier, n);
56 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
59 struct kmem_cache *task_xstate_cachep;
60 EXPORT_SYMBOL_GPL(task_xstate_cachep);
63 * this gets called so that we can store lazy state into memory and copy the
64 * current task into the new thread.
66 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
70 dst->thread.fpu_counter = 0;
71 dst->thread.fpu.has_fpu = 0;
72 dst->thread.fpu.state = NULL;
73 task_disable_lazy_fpu_restore(dst);
74 if (tsk_used_math(src)) {
75 int err = fpu_alloc(&dst->thread.fpu);
83 void free_thread_xstate(struct task_struct *tsk)
85 fpu_free(&tsk->thread.fpu);
88 void arch_release_task_struct(struct task_struct *tsk)
90 free_thread_xstate(tsk);
93 void arch_task_cache_init(void)
96 kmem_cache_create("task_xstate", xstate_size,
97 __alignof__(union thread_xstate),
98 SLAB_PANIC | SLAB_NOTRACK, NULL);
103 * Free current thread data structures etc..
105 void exit_thread(void)
107 struct task_struct *me = current;
108 struct thread_struct *t = &me->thread;
109 unsigned long *bp = t->io_bitmap_ptr;
112 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
114 t->io_bitmap_ptr = NULL;
115 clear_thread_flag(TIF_IO_BITMAP);
117 * Careful, clear this in the TSS too:
119 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
120 t->io_bitmap_max = 0;
128 void flush_thread(void)
130 struct task_struct *tsk = current;
132 flush_ptrace_hw_breakpoint(tsk);
133 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
137 * Free the FPU state for non xsave platforms. They get reallocated
138 * lazily at the first use.
140 if (!use_eager_fpu())
141 free_thread_xstate(tsk);
142 else if (!used_math()) {
143 /* kthread execs. TODO: cleanup this horror. */
144 if (WARN_ON(init_fpu(current)))
145 force_sig(SIGKILL, current);
146 math_state_restore();
150 static void hard_disable_TSC(void)
152 cr4_set_bits(X86_CR4_TSD);
155 void disable_TSC(void)
158 if (!test_and_set_thread_flag(TIF_NOTSC))
160 * Must flip the CPU state synchronously with
161 * TIF_NOTSC in the current running context.
167 static void hard_enable_TSC(void)
169 cr4_clear_bits(X86_CR4_TSD);
172 static void enable_TSC(void)
175 if (test_and_clear_thread_flag(TIF_NOTSC))
177 * Must flip the CPU state synchronously with
178 * TIF_NOTSC in the current running context.
184 int get_tsc_mode(unsigned long adr)
188 if (test_thread_flag(TIF_NOTSC))
189 val = PR_TSC_SIGSEGV;
193 return put_user(val, (unsigned int __user *)adr);
196 int set_tsc_mode(unsigned int val)
198 if (val == PR_TSC_SIGSEGV)
200 else if (val == PR_TSC_ENABLE)
208 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
209 struct tss_struct *tss)
211 struct thread_struct *prev, *next;
213 prev = &prev_p->thread;
214 next = &next_p->thread;
216 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
217 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
218 unsigned long debugctl = get_debugctlmsr();
220 debugctl &= ~DEBUGCTLMSR_BTF;
221 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
222 debugctl |= DEBUGCTLMSR_BTF;
224 update_debugctlmsr(debugctl);
227 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
228 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
229 /* prev and next are different */
230 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
236 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
238 * Copy the relevant range of the IO bitmap.
239 * Normally this is 128 bytes or less:
241 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
242 max(prev->io_bitmap_max, next->io_bitmap_max));
243 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
245 * Clear any possible leftover bits:
247 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
249 propagate_user_return_notify(prev_p, next_p);
253 * Idle related variables and functions
255 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
256 EXPORT_SYMBOL(boot_option_idle_override);
258 static void (*x86_idle)(void);
261 static inline void play_dead(void)
268 void enter_idle(void)
270 this_cpu_write(is_idle, 1);
271 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
274 static void __exit_idle(void)
276 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
278 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
281 /* Called from interrupts to signify idle end */
284 /* idle loop has pid 0 */
291 void arch_cpu_idle_enter(void)
297 void arch_cpu_idle_exit(void)
302 void arch_cpu_idle_dead(void)
308 * Called from the generic idle code.
310 void arch_cpu_idle(void)
316 * We use this if we don't have any better idle routine..
318 void default_idle(void)
320 trace_cpu_idle_rcuidle(1, smp_processor_id());
322 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
324 #ifdef CONFIG_APM_MODULE
325 EXPORT_SYMBOL(default_idle);
329 bool xen_set_default_idle(void)
331 bool ret = !!x86_idle;
333 x86_idle = default_idle;
338 void stop_this_cpu(void *dummy)
344 set_cpu_online(smp_processor_id(), false);
345 disable_local_APIC();
351 bool amd_e400_c1e_detected;
352 EXPORT_SYMBOL(amd_e400_c1e_detected);
354 static cpumask_var_t amd_e400_c1e_mask;
356 void amd_e400_remove_cpu(int cpu)
358 if (amd_e400_c1e_mask != NULL)
359 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
363 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
364 * pending message MSR. If we detect C1E, then we handle it the same
365 * way as C3 power states (local apic timer and TSC stop)
367 static void amd_e400_idle(void)
369 if (!amd_e400_c1e_detected) {
372 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
374 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
375 amd_e400_c1e_detected = true;
376 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
377 mark_tsc_unstable("TSC halt in AMD C1E");
378 pr_info("System has AMD C1E enabled\n");
382 if (amd_e400_c1e_detected) {
383 int cpu = smp_processor_id();
385 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
386 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
388 * Force broadcast so ACPI can not interfere.
390 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
392 pr_info("Switch to broadcast mode on CPU%d\n", cpu);
394 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
399 * The switch back from broadcast mode needs to be
400 * called with interrupts disabled.
403 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
409 void select_idle_routine(const struct cpuinfo_x86 *c)
412 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
413 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
415 if (x86_idle || boot_option_idle_override == IDLE_POLL)
418 if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
419 /* E400: APIC timer interrupt does not wake up CPU from C1e */
420 pr_info("using AMD E400 aware idle routine\n");
421 x86_idle = amd_e400_idle;
423 x86_idle = default_idle;
426 void __init init_amd_e400_c1e_mask(void)
428 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
429 if (x86_idle == amd_e400_idle)
430 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
433 static int __init idle_setup(char *str)
438 if (!strcmp(str, "poll")) {
439 pr_info("using polling idle threads\n");
440 boot_option_idle_override = IDLE_POLL;
441 cpu_idle_poll_ctrl(true);
442 } else if (!strcmp(str, "halt")) {
444 * When the boot option of idle=halt is added, halt is
445 * forced to be used for CPU idle. In such case CPU C2/C3
446 * won't be used again.
447 * To continue to load the CPU idle driver, don't touch
448 * the boot_option_idle_override.
450 x86_idle = default_idle;
451 boot_option_idle_override = IDLE_HALT;
452 } else if (!strcmp(str, "nomwait")) {
454 * If the boot option of "idle=nomwait" is added,
455 * it means that mwait will be disabled for CPU C2/C3
456 * states. In such case it won't touch the variable
457 * of boot_option_idle_override.
459 boot_option_idle_override = IDLE_NOMWAIT;
465 early_param("idle", idle_setup);
467 unsigned long arch_align_stack(unsigned long sp)
469 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
470 sp -= get_random_int() % 8192;
474 unsigned long arch_randomize_brk(struct mm_struct *mm)
476 unsigned long range_end = mm->brk + 0x02000000;
477 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;