1 #ifndef __ASM_X86_XSAVE_H
2 #define __ASM_X86_XSAVE_H
4 #include <linux/types.h>
5 #include <asm/processor.h>
7 #define XSTATE_CPUID 0x0000000d
10 #define XSTATE_SSE 0x2
11 #define XSTATE_YMM 0x4
12 #define XSTATE_BNDREGS 0x8
13 #define XSTATE_BNDCSR 0x10
14 #define XSTATE_OPMASK 0x20
15 #define XSTATE_ZMM_Hi256 0x40
16 #define XSTATE_Hi16_ZMM 0x80
18 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
20 #define FXSAVE_SIZE 512
22 #define XSAVE_HDR_SIZE 64
23 #define XSAVE_HDR_OFFSET FXSAVE_SIZE
25 #define XSAVE_YMM_SIZE 256
26 #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
28 /* Supported features which support lazy state saving */
29 #define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
30 | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
32 /* Supported features which require eager state saving */
33 #define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
35 /* All currently supported features */
36 #define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
39 #define REX_PREFIX "0x48, "
44 extern unsigned int xstate_size;
45 extern u64 pcntxt_mask;
46 extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
47 extern struct xsave_struct *init_xstate_buf;
49 extern void xsave_init(void);
50 extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
51 extern int init_fpu(struct task_struct *child);
53 static inline int fpu_xrstor_checking(struct xsave_struct *fx)
57 asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
59 ".section .fixup,\"ax\"\n"
60 "3: movl $-1,%[err]\n"
65 : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
71 static inline int xsave_user(struct xsave_struct __user *buf)
76 * Clear the xsave header first, so that reserved fields are
77 * initialized to zero.
79 err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
83 __asm__ __volatile__(ASM_STAC "\n"
84 "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
86 ".section .fixup,\"ax\"\n"
87 "3: movl $-1,%[err]\n"
92 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
97 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
100 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
102 u32 hmask = mask >> 32;
104 __asm__ __volatile__(ASM_STAC "\n"
105 "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
107 ".section .fixup,\"ax\"\n"
108 "3: movl $-1,%[err]\n"
113 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
114 : "memory"); /* memory required? */
118 static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
121 u32 hmask = mask >> 32;
123 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
124 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
128 static inline void xsave_state(struct xsave_struct *fx, u64 mask)
131 u32 hmask = mask >> 32;
133 asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
134 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
138 static inline void fpu_xsave(struct fpu *fpu)
140 /* This, however, we can work around by forcing the compiler to select
141 an addressing mode that doesn't require extended registers. */
143 ".byte " REX_PREFIX "0x0f,0xae,0x27",
144 ".byte " REX_PREFIX "0x0f,0xae,0x37",
145 X86_FEATURE_XSAVEOPT,
146 [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) :