x86/asm/64: Rename thread_struct's fs and gs to fsbase and gsbase
authorAndy Lutomirski <luto@kernel.org>
Tue, 26 Apr 2016 19:23:29 +0000 (12:23 -0700)
committerIngo Molnar <mingo@kernel.org>
Fri, 29 Apr 2016 09:56:42 +0000 (11:56 +0200)
Unlike ds and es, these are base addresses, not selectors.  Rename
them so their meaning is more obvious.

On x86_32, the field is still called fs.  Fixing that could make sense
as a future cleanup.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/69a18a51c4cba0ce29a241e570fc618ad721d908.1461698311.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/elf.h
arch/x86/include/asm/processor.h
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kvm/svm.c

index 15340e3..fea7724 100644 (file)
@@ -176,7 +176,7 @@ static inline void elf_common_init(struct thread_struct *t,
        regs->si = regs->di = regs->bp = 0;
        regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
        regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
-       t->fs = t->gs = 0;
+       t->fsbase = t->gsbase = 0;
        t->fsindex = t->gsindex = 0;
        t->ds = t->es = ds;
 }
@@ -226,8 +226,8 @@ do {                                                                \
        (pr_reg)[18] = (regs)->flags;                           \
        (pr_reg)[19] = (regs)->sp;                              \
        (pr_reg)[20] = (regs)->ss;                              \
-       (pr_reg)[21] = current->thread.fs;                      \
-       (pr_reg)[22] = current->thread.gs;                      \
+       (pr_reg)[21] = current->thread.fsbase;                  \
+       (pr_reg)[22] = current->thread.gsbase;                  \
        asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v;       \
        asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v;       \
        asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v;       \
index 9264476..9251aa9 100644 (file)
@@ -388,9 +388,16 @@ struct thread_struct {
        unsigned long           ip;
 #endif
 #ifdef CONFIG_X86_64
-       unsigned long           fs;
+       unsigned long           fsbase;
+       unsigned long           gsbase;
+#else
+       /*
+        * XXX: this could presumably be unsigned short.  Alternatively,
+        * 32-bit kernels could be taught to use fsindex instead.
+        */
+       unsigned long fs;
+       unsigned long gs;
 #endif
-       unsigned long           gs;
 
        /* Save middle states of ptrace breakpoints */
        struct perf_event       *ptrace_bps[HBP_NUM];
index 864fe2c..4285f6a 100644 (file)
@@ -150,9 +150,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
        p->thread.io_bitmap_ptr = NULL;
 
        savesegment(gs, p->thread.gsindex);
-       p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
+       p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
        savesegment(fs, p->thread.fsindex);
-       p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
+       p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
        savesegment(es, p->thread.es);
        savesegment(ds, p->thread.ds);
        memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
@@ -329,18 +329,18 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * stronger guarantees.)
         *
         * As an invariant,
-        * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) is
+        * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
         * impossible.
         */
        if (next->fsindex) {
                /* Loading a nonzero value into FS sets the index and base. */
                loadsegment(fs, next->fsindex);
        } else {
-               if (next->fs) {
+               if (next->fsbase) {
                        /* Next index is zero but next base is nonzero. */
                        if (prev_fsindex)
                                loadsegment(fs, 0);
-                       wrmsrl(MSR_FS_BASE, next->fs);
+                       wrmsrl(MSR_FS_BASE, next->fsbase);
                } else {
                        /* Next base and index are both zero. */
                        if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
@@ -356,7 +356,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                                 * didn't change the base, then the base is
                                 * also zero and we don't need to do anything.
                                 */
-                               if (prev->fs || prev_fsindex)
+                               if (prev->fsbase || prev_fsindex)
                                        loadsegment(fs, 0);
                        }
                }
@@ -369,18 +369,18 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * us.
         */
        if (prev_fsindex)
-               prev->fs = 0;
+               prev->fsbase = 0;
        prev->fsindex = prev_fsindex;
 
        if (next->gsindex) {
                /* Loading a nonzero value into GS sets the index and base. */
                load_gs_index(next->gsindex);
        } else {
-               if (next->gs) {
+               if (next->gsbase) {
                        /* Next index is zero but next base is nonzero. */
                        if (prev_gsindex)
                                load_gs_index(0);
-                       wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
+                       wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
                } else {
                        /* Next base and index are both zero. */
                        if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
@@ -400,7 +400,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                                 * didn't change the base, then the base is
                                 * also zero and we don't need to do anything.
                                 */
-                               if (prev->gs || prev_gsindex)
+                               if (prev->gsbase || prev_gsindex)
                                        load_gs_index(0);
                        }
                }
@@ -413,7 +413,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * us.
         */
        if (prev_gsindex)
-               prev->gs = 0;
+               prev->gsbase = 0;
        prev->gsindex = prev_gsindex;
 
        switch_fpu_finish(next_fpu, fpu_switch);
@@ -536,7 +536,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                        return -EPERM;
                cpu = get_cpu();
                task->thread.gsindex = 0;
-               task->thread.gs = addr;
+               task->thread.gsbase = addr;
                if (doit) {
                        load_gs_index(0);
                        ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
@@ -549,7 +549,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                        return -EPERM;
                cpu = get_cpu();
                task->thread.fsindex = 0;
-               task->thread.fs = addr;
+               task->thread.fsbase = addr;
                if (doit) {
                        /* set the selector to 0 to not confuse __switch_to */
                        loadsegment(fs, 0);
@@ -562,7 +562,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                if (doit)
                        rdmsrl(MSR_FS_BASE, base);
                else
-                       base = task->thread.fs;
+                       base = task->thread.fsbase;
                ret = put_user(base, (unsigned long __user *)addr);
                break;
        }
@@ -571,7 +571,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                if (doit)
                        rdmsrl(MSR_KERNEL_GS_BASE, base);
                else
-                       base = task->thread.gs;
+                       base = task->thread.gsbase;
                ret = put_user(base, (unsigned long __user *)addr);
                break;
        }
index e72ab40..e60ef91 100644 (file)
@@ -399,7 +399,7 @@ static int putreg(struct task_struct *child,
                 * to set either thread.fs or thread.fsindex and the
                 * corresponding GDT slot.
                 */
-               if (child->thread.fs != value)
+               if (child->thread.fsbase != value)
                        return do_arch_prctl(child, ARCH_SET_FS, value);
                return 0;
        case offsetof(struct user_regs_struct,gs_base):
@@ -408,7 +408,7 @@ static int putreg(struct task_struct *child,
                 */
                if (value >= TASK_SIZE_OF(child))
                        return -EIO;
-               if (child->thread.gs != value)
+               if (child->thread.gsbase != value)
                        return do_arch_prctl(child, ARCH_SET_GS, value);
                return 0;
 #endif
@@ -438,14 +438,14 @@ static unsigned long getreg(struct task_struct *task, unsigned long offset)
                 * XXX: This will not behave as expected if called on
                 * current or if fsindex != 0.
                 */
-               return task->thread.fs;
+               return task->thread.fsbase;
        }
        case offsetof(struct user_regs_struct, gs_base): {
                /*
                 * XXX: This will not behave as expected if called on
                 * current or if fsindex != 0.
                 */
-               return task->thread.gs;
+               return task->thread.gsbase;
        }
 #endif
        }
index 31346a3..fafd720 100644 (file)
@@ -1254,7 +1254,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
        kvm_load_ldt(svm->host.ldt);
 #ifdef CONFIG_X86_64
        loadsegment(fs, svm->host.fs);
-       wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+       wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
        load_gs_index(svm->host.gs);
 #else
 #ifdef CONFIG_X86_32_LAZY_GS