]>
Commit | Line | Data |
---|---|---|
dc1e35c6 SS |
1 | #ifndef __ASM_X86_XSAVE_H |
2 | #define __ASM_X86_XSAVE_H | |
3 | ||
6152e4b1 | 4 | #include <linux/types.h> |
dc1e35c6 | 5 | #include <asm/processor.h> |
dc1e35c6 | 6 | |
ee813d53 | 7 | #define XSTATE_CPUID 0x0000000d |
dc1e35c6 SS |
8 | |
9 | #define XSTATE_FP 0x1 | |
10 | #define XSTATE_SSE 0x2 | |
a30469e7 | 11 | #define XSTATE_YMM 0x4 |
dc1e35c6 SS |
12 | |
13 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) | |
14 | ||
15 | #define FXSAVE_SIZE 512 | |
16 | ||
2d5b5a66 SY |
17 | #define XSAVE_HDR_SIZE 64 |
18 | #define XSAVE_HDR_OFFSET FXSAVE_SIZE | |
19 | ||
20 | #define XSAVE_YMM_SIZE 256 | |
21 | #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) | |
5ee481da | 22 | |
dc1e35c6 SS |
23 | /* |
24 | * These are the features that the OS can handle currently. | |
25 | */ | |
a30469e7 | 26 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) |
dc1e35c6 | 27 | |
b359e8a4 SS |
28 | #ifdef CONFIG_X86_64 |
29 | #define REX_PREFIX "0x48, " | |
30 | #else | |
31 | #define REX_PREFIX | |
32 | #endif | |
33 | ||
6152e4b1 PA |
34 | extern unsigned int xstate_size; |
35 | extern u64 pcntxt_mask; | |
5b3efd50 | 36 | extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; |
dc1e35c6 | 37 | |
dc1e35c6 | 38 | extern void xsave_init(void); |
5b3efd50 | 39 | extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); |
b359e8a4 | 40 | extern int init_fpu(struct task_struct *child); |
c37b5efe SS |
41 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, |
42 | void __user *fpstate, | |
43 | struct _fpx_sw_bytes *sw); | |
b359e8a4 | 44 | |
86603283 | 45 | static inline int fpu_xrstor_checking(struct fpu *fpu) |
b359e8a4 | 46 | { |
86603283 | 47 | struct xsave_struct *fx = &fpu->state->xsave; |
b359e8a4 SS |
48 | int err; |
49 | ||
50 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | |
51 | "2:\n" | |
52 | ".section .fixup,\"ax\"\n" | |
53 | "3: movl $-1,%[err]\n" | |
54 | " jmp 2b\n" | |
55 | ".previous\n" | |
56 | _ASM_EXTABLE(1b, 3b) | |
57 | : [err] "=r" (err) | |
58 | : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) | |
59 | : "memory"); | |
60 | ||
61 | return err; | |
62 | } | |
63 | ||
c37b5efe | 64 | static inline int xsave_user(struct xsave_struct __user *buf) |
9dc89c0f SS |
65 | { |
66 | int err; | |
8e221b6d SS |
67 | |
68 | /* | |
69 | * Clear the xsave header first, so that reserved fields are | |
70 | * initialized to zero. | |
71 | */ | |
72 | err = __clear_user(&buf->xsave_hdr, | |
73 | sizeof(struct xsave_hdr_struct)); | |
74 | if (unlikely(err)) | |
75 | return -EFAULT; | |
76 | ||
9dc89c0f SS |
77 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" |
78 | "2:\n" | |
79 | ".section .fixup,\"ax\"\n" | |
80 | "3: movl $-1,%[err]\n" | |
81 | " jmp 2b\n" | |
82 | ".previous\n" | |
83 | ".section __ex_table,\"a\"\n" | |
84 | _ASM_ALIGN "\n" | |
85 | _ASM_PTR "1b,3b\n" | |
86 | ".previous" | |
87 | : [err] "=r" (err) | |
88 | : "D" (buf), "a" (-1), "d" (-1), "0" (0) | |
89 | : "memory"); | |
90 | if (unlikely(err) && __clear_user(buf, xstate_size)) | |
91 | err = -EFAULT; | |
92 | /* No need to clear here because the caller clears USED_MATH */ | |
93 | return err; | |
94 | } | |
95 | ||
6152e4b1 | 96 | static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) |
9dc89c0f SS |
97 | { |
98 | int err; | |
99 | struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); | |
6152e4b1 PA |
100 | u32 lmask = mask; |
101 | u32 hmask = mask >> 32; | |
9dc89c0f SS |
102 | |
103 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" | |
104 | "2:\n" | |
105 | ".section .fixup,\"ax\"\n" | |
106 | "3: movl $-1,%[err]\n" | |
107 | " jmp 2b\n" | |
108 | ".previous\n" | |
109 | ".section __ex_table,\"a\"\n" | |
110 | _ASM_ALIGN "\n" | |
111 | _ASM_PTR "1b,3b\n" | |
112 | ".previous" | |
113 | : [err] "=r" (err) | |
114 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) | |
115 | : "memory"); /* memory required? */ | |
116 | return err; | |
117 | } | |
118 | ||
6152e4b1 | 119 | static inline void xrstor_state(struct xsave_struct *fx, u64 mask) |
9dc89c0f | 120 | { |
6152e4b1 PA |
121 | u32 lmask = mask; |
122 | u32 hmask = mask >> 32; | |
123 | ||
9dc89c0f SS |
124 | asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" |
125 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | |
126 | : "memory"); | |
127 | } | |
128 | ||
29104e10 SS |
129 | static inline void xsave_state(struct xsave_struct *fx, u64 mask) |
130 | { | |
131 | u32 lmask = mask; | |
132 | u32 hmask = mask >> 32; | |
133 | ||
134 | asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t" | |
135 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | |
136 | : "memory"); | |
137 | } | |
138 | ||
86603283 | 139 | static inline void fpu_xsave(struct fpu *fpu) |
b359e8a4 SS |
140 | { |
141 | /* This, however, we can work around by forcing the compiler to select | |
142 | an addressing mode that doesn't require extended registers. */ | |
6bad06b7 SS |
143 | alternative_input( |
144 | ".byte " REX_PREFIX "0x0f,0xae,0x27", | |
145 | ".byte " REX_PREFIX "0x0f,0xae,0x37", | |
146 | X86_FEATURE_XSAVEOPT, | |
147 | [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) : | |
148 | "memory"); | |
b359e8a4 | 149 | } |
dc1e35c6 | 150 | #endif |