2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
21 #include <linux/kernel.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
37 #include <linux/ftrace.h>
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
45 #include <asm/proto.h>
48 #include <asm/syscalls.h>
49 #include <asm/debugreg.h>
50 #include <asm/switch_to.h>
51 #include <asm/xen/hypervisor.h>
53 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
55 /* Prints also some state that isn't saved in the pt_regs */
56 void __show_regs(struct pt_regs *regs, int all)
58 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
59 unsigned long d0, d1, d2, d3, d6, d7;
60 unsigned int fsindex, gsindex;
61 unsigned int ds, cs, es;
63 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
64 printk_address(regs->ip);
65 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
66 regs->sp, regs->flags);
67 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
68 regs->ax, regs->bx, regs->cx);
69 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
70 regs->dx, regs->si, regs->di);
71 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
72 regs->bp, regs->r8, regs->r9);
73 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
74 regs->r10, regs->r11, regs->r12);
75 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
76 regs->r13, regs->r14, regs->r15);
78 asm("movl %%ds,%0" : "=r" (ds));
79 asm("movl %%cs,%0" : "=r" (cs));
80 asm("movl %%es,%0" : "=r" (es));
81 asm("movl %%fs,%0" : "=r" (fsindex));
82 asm("movl %%gs,%0" : "=r" (gsindex));
84 rdmsrl(MSR_FS_BASE, fs);
85 rdmsrl(MSR_GS_BASE, gs);
86 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
96 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
97 fs, fsindex, gs, gsindex, shadowgs);
98 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
100 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
110 /* Only print out debug registers if they are in their non-default state. */
111 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
112 (d6 == DR6_RESERVED) && (d7 == 0x400))
115 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
116 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
118 if (boot_cpu_has(X86_FEATURE_OSPKE))
119 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
122 void release_thread(struct task_struct *dead_task)
125 #ifdef CONFIG_MODIFY_LDT_SYSCALL
126 if (dead_task->mm->context.ldt) {
127 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
129 dead_task->mm->context.ldt->entries,
130 dead_task->mm->context.ldt->size);
137 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
138 unsigned long arg, struct task_struct *p, unsigned long tls)
141 struct pt_regs *childregs;
142 struct fork_frame *fork_frame;
143 struct inactive_task_frame *frame;
144 struct task_struct *me = current;
146 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
147 childregs = task_pt_regs(p);
148 fork_frame = container_of(childregs, struct fork_frame, regs);
149 frame = &fork_frame->frame;
151 frame->ret_addr = (unsigned long) ret_from_fork;
152 p->thread.sp = (unsigned long) fork_frame;
153 p->thread.io_bitmap_ptr = NULL;
155 savesegment(gs, p->thread.gsindex);
156 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
157 savesegment(fs, p->thread.fsindex);
158 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
159 savesegment(es, p->thread.es);
160 savesegment(ds, p->thread.ds);
161 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
163 if (unlikely(p->flags & PF_KTHREAD)) {
165 memset(childregs, 0, sizeof(struct pt_regs));
166 frame->bx = sp; /* function */
171 *childregs = *current_pt_regs();
178 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
179 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
180 IO_BITMAP_BYTES, GFP_KERNEL);
181 if (!p->thread.io_bitmap_ptr) {
182 p->thread.io_bitmap_max = 0;
185 set_tsk_thread_flag(p, TIF_IO_BITMAP);
189 * Set a new TLS for the child thread?
191 if (clone_flags & CLONE_SETTLS) {
192 #ifdef CONFIG_IA32_EMULATION
193 if (in_ia32_syscall())
194 err = do_set_thread_area(p, -1,
195 (struct user_desc __user *)tls, 0);
198 err = do_arch_prctl(p, ARCH_SET_FS, tls);
204 if (err && p->thread.io_bitmap_ptr) {
205 kfree(p->thread.io_bitmap_ptr);
206 p->thread.io_bitmap_max = 0;
213 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
214 unsigned long new_sp,
215 unsigned int _cs, unsigned int _ss, unsigned int _ds)
218 loadsegment(es, _ds);
219 loadsegment(ds, _ds);
225 regs->flags = X86_EFLAGS_IF;
230 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
232 start_thread_common(regs, new_ip, new_sp,
233 __USER_CS, __USER_DS, 0);
237 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
239 start_thread_common(regs, new_ip, new_sp,
240 test_thread_flag(TIF_X32)
241 ? __USER_CS : __USER32_CS,
242 __USER_DS, __USER_DS);
247 * switch_to(x,y) should switch tasks from x to y.
249 * This could still be optimized:
250 * - fold all the options into a flag word and test it with a single test.
251 * - could test fs/gs bitsliced
253 * Kprobes not supported here. Set the probe on schedule instead.
254 * Function graph tracer not supported too.
256 __visible __notrace_funcgraph struct task_struct *
257 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
259 struct thread_struct *prev = &prev_p->thread;
260 struct thread_struct *next = &next_p->thread;
261 struct fpu *prev_fpu = &prev->fpu;
262 struct fpu *next_fpu = &next->fpu;
263 int cpu = smp_processor_id();
264 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
265 unsigned prev_fsindex, prev_gsindex;
266 fpu_switch_t fpu_switch;
268 fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
270 /* We must save %fs and %gs before load_TLS() because
271 * %fs and %gs may be cleared by load_TLS().
273 * (e.g. xen_load_tls())
275 savesegment(fs, prev_fsindex);
276 savesegment(gs, prev_gsindex);
279 * Load TLS before restoring any segments so that segment loads
280 * reference the correct GDT entries.
285 * Leave lazy mode, flushing any hypercalls made here. This
286 * must be done after loading TLS entries in the GDT but before
287 * loading segments that might reference them, and and it must
288 * be done before fpu__restore(), so the TS bit is up to
291 arch_end_context_switch(next_p);
295 * Reading them only returns the selectors, but writing them (if
296 * nonzero) loads the full descriptor from the GDT or LDT. The
297 * LDT for next is loaded in switch_mm, and the GDT is loaded
300 * We therefore need to write new values to the segment
301 * registers on every context switch unless both the new and old
304 * Note that we don't need to do anything for CS and SS, as
305 * those are saved and restored as part of pt_regs.
307 savesegment(es, prev->es);
308 if (unlikely(next->es | prev->es))
309 loadsegment(es, next->es);
311 savesegment(ds, prev->ds);
312 if (unlikely(next->ds | prev->ds))
313 loadsegment(ds, next->ds);
318 * These are even more complicated than DS and ES: they have
319 * 64-bit bases are that controlled by arch_prctl. The bases
320 * don't necessarily match the selectors, as user code can do
321 * any number of things to cause them to be inconsistent.
323 * We don't promise to preserve the bases if the selectors are
324 * nonzero. We also don't promise to preserve the base if the
325 * selector is zero and the base doesn't match whatever was
326 * most recently passed to ARCH_SET_FS/GS. (If/when the
327 * FSGSBASE instructions are enabled, we'll need to offer
328 * stronger guarantees.)
331 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
335 /* Loading a nonzero value into FS sets the index and base. */
336 loadsegment(fs, next->fsindex);
339 /* Next index is zero but next base is nonzero. */
342 wrmsrl(MSR_FS_BASE, next->fsbase);
344 /* Next base and index are both zero. */
345 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
347 * We don't know the previous base and can't
348 * find out without RDMSR. Forcibly clear it.
350 loadsegment(fs, __USER_DS);
354 * If the previous index is zero and ARCH_SET_FS
355 * didn't change the base, then the base is
356 * also zero and we don't need to do anything.
358 if (prev->fsbase || prev_fsindex)
364 * Save the old state and preserve the invariant.
365 * NB: if prev_fsindex == 0, then we can't reliably learn the base
366 * without RDMSR because Intel user code can zero it without telling
367 * us and AMD user code can program any 32-bit value without telling
372 prev->fsindex = prev_fsindex;
375 /* Loading a nonzero value into GS sets the index and base. */
376 load_gs_index(next->gsindex);
379 /* Next index is zero but next base is nonzero. */
382 wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
384 /* Next base and index are both zero. */
385 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
387 * We don't know the previous base and can't
388 * find out without RDMSR. Forcibly clear it.
390 * This contains a pointless SWAPGS pair.
391 * Fixing it would involve an explicit check
392 * for Xen or a new pvop.
394 load_gs_index(__USER_DS);
398 * If the previous index is zero and ARCH_SET_GS
399 * didn't change the base, then the base is
400 * also zero and we don't need to do anything.
402 if (prev->gsbase || prev_gsindex)
408 * Save the old state and preserve the invariant.
409 * NB: if prev_gsindex == 0, then we can't reliably learn the base
410 * without RDMSR because Intel user code can zero it without telling
411 * us and AMD user code can program any 32-bit value without telling
416 prev->gsindex = prev_gsindex;
418 switch_fpu_finish(next_fpu, fpu_switch);
421 * Switch the PDA and FPU contexts.
423 this_cpu_write(current_task, next_p);
425 /* Reload esp0 and ss1. This changes current_thread_info(). */
429 * Now maybe reload the debug registers and handle I/O bitmaps
431 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
432 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
433 __switch_to_xtra(prev_p, next_p, tss);
437 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
438 * current_pt_regs()->flags may not match the current task's
439 * intended IOPL. We need to switch it manually.
441 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
442 prev->iopl != next->iopl))
443 xen_set_iopl_mask(next->iopl);
446 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
448 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
449 * does not update the cached descriptor. As a result, if we
450 * do SYSRET while SS is NULL, we'll end up in user mode with
451 * SS apparently equal to __USER_DS but actually unusable.
453 * The straightforward workaround would be to fix it up just
454 * before SYSRET, but that would slow down the system call
455 * fast paths. Instead, we ensure that SS is never NULL in
456 * system call context. We do this by replacing NULL SS
457 * selectors at every context switch. SYSCALL sets up a valid
458 * SS, so the only way to get NULL is to re-enter the kernel
459 * from CPL 3 through an interrupt. Since that can't happen
460 * in the same task as a running syscall, we are guaranteed to
461 * context switch between every interrupt vector entry and a
464 * We read SS first because SS reads are much faster than
465 * writes. Out of caution, we force SS to __KERNEL_DS even if
466 * it previously had a different non-NULL value.
468 unsigned short ss_sel;
469 savesegment(ss, ss_sel);
470 if (ss_sel != __KERNEL_DS)
471 loadsegment(ss, __KERNEL_DS);
477 void set_personality_64bit(void)
479 /* inherit personality from parent */
481 /* Make sure to be in 64bit mode */
482 clear_thread_flag(TIF_IA32);
483 clear_thread_flag(TIF_ADDR32);
484 clear_thread_flag(TIF_X32);
486 /* Ensure the corresponding mm is not marked. */
488 current->mm->context.ia32_compat = 0;
490 /* TBD: overwrites user setup. Should have two bits.
491 But 64bit processes have always behaved this way,
492 so it's not too bad. The main problem is just that
493 32bit childs are affected again. */
494 current->personality &= ~READ_IMPLIES_EXEC;
497 void set_personality_ia32(bool x32)
499 /* inherit personality from parent */
501 /* Make sure to be in 32bit mode */
502 set_thread_flag(TIF_ADDR32);
504 /* Mark the associated mm as containing 32-bit tasks. */
506 clear_thread_flag(TIF_IA32);
507 set_thread_flag(TIF_X32);
509 current->mm->context.ia32_compat = TIF_X32;
510 current->personality &= ~READ_IMPLIES_EXEC;
511 /* in_compat_syscall() uses the presence of the x32
512 syscall bit flag to determine compat status */
513 current->thread.status &= ~TS_COMPAT;
515 set_thread_flag(TIF_IA32);
516 clear_thread_flag(TIF_X32);
518 current->mm->context.ia32_compat = TIF_IA32;
519 current->personality |= force_personality32;
520 /* Prepare the first "return" to user space */
521 current->thread.status |= TS_COMPAT;
524 EXPORT_SYMBOL_GPL(set_personality_ia32);
526 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
529 int doit = task == current;
534 if (addr >= TASK_SIZE_MAX)
537 task->thread.gsindex = 0;
538 task->thread.gsbase = addr;
541 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
546 /* Not strictly needed for fs, but do it for symmetry
548 if (addr >= TASK_SIZE_MAX)
551 task->thread.fsindex = 0;
552 task->thread.fsbase = addr;
554 /* set the selector to 0 to not confuse __switch_to */
556 ret = wrmsrl_safe(MSR_FS_BASE, addr);
563 rdmsrl(MSR_FS_BASE, base);
565 base = task->thread.fsbase;
566 ret = put_user(base, (unsigned long __user *)addr);
572 rdmsrl(MSR_KERNEL_GS_BASE, base);
574 base = task->thread.gsbase;
575 ret = put_user(base, (unsigned long __user *)addr);
587 long sys_arch_prctl(int code, unsigned long addr)
589 return do_arch_prctl(current, code, addr);
592 unsigned long KSTK_ESP(struct task_struct *task)
594 return task_pt_regs(task)->sp;