]>
Commit | Line | Data |
---|---|---|
dc1e35c6 SS |
1 | #ifndef __ASM_X86_XSAVE_H |
2 | #define __ASM_X86_XSAVE_H | |
3 | ||
6152e4b1 | 4 | #include <linux/types.h> |
dc1e35c6 SS |
5 | #include <asm/processor.h> |
6 | #include <asm/i387.h> | |
7 | ||
8 | #define XSTATE_FP 0x1 | |
9 | #define XSTATE_SSE 0x2 | |
10 | ||
11 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) | |
12 | ||
13 | #define FXSAVE_SIZE 512 | |
14 | ||
15 | /* | |
16 | * These are the features that the OS can handle currently. | |
17 | */ | |
6152e4b1 | 18 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) |
dc1e35c6 | 19 | |
b359e8a4 SS |
20 | #ifdef CONFIG_X86_64 |
21 | #define REX_PREFIX "0x48, " | |
22 | #else | |
23 | #define REX_PREFIX | |
24 | #endif | |
25 | ||
6152e4b1 PA |
26 | extern unsigned int xstate_size; |
27 | extern u64 pcntxt_mask; | |
dc1e35c6 SS |
28 | extern struct xsave_struct *init_xstate_buf; |
29 | ||
30 | extern void xsave_cntxt_init(void); | |
31 | extern void xsave_init(void); | |
b359e8a4 | 32 | extern int init_fpu(struct task_struct *child); |
c37b5efe SS |
33 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, |
34 | void __user *fpstate, | |
35 | struct _fpx_sw_bytes *sw); | |
b359e8a4 SS |
36 | |
37 | static inline int xrstor_checking(struct xsave_struct *fx) | |
38 | { | |
39 | int err; | |
40 | ||
41 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | |
42 | "2:\n" | |
43 | ".section .fixup,\"ax\"\n" | |
44 | "3: movl $-1,%[err]\n" | |
45 | " jmp 2b\n" | |
46 | ".previous\n" | |
47 | _ASM_EXTABLE(1b, 3b) | |
48 | : [err] "=r" (err) | |
49 | : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) | |
50 | : "memory"); | |
51 | ||
52 | return err; | |
53 | } | |
54 | ||
c37b5efe | 55 | static inline int xsave_user(struct xsave_struct __user *buf) |
9dc89c0f SS |
56 | { |
57 | int err; | |
58 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" | |
59 | "2:\n" | |
60 | ".section .fixup,\"ax\"\n" | |
61 | "3: movl $-1,%[err]\n" | |
62 | " jmp 2b\n" | |
63 | ".previous\n" | |
64 | ".section __ex_table,\"a\"\n" | |
65 | _ASM_ALIGN "\n" | |
66 | _ASM_PTR "1b,3b\n" | |
67 | ".previous" | |
68 | : [err] "=r" (err) | |
69 | : "D" (buf), "a" (-1), "d" (-1), "0" (0) | |
70 | : "memory"); | |
71 | if (unlikely(err) && __clear_user(buf, xstate_size)) | |
72 | err = -EFAULT; | |
73 | /* No need to clear here because the caller clears USED_MATH */ | |
74 | return err; | |
75 | } | |
76 | ||
6152e4b1 | 77 | static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) |
9dc89c0f SS |
78 | { |
79 | int err; | |
80 | struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); | |
6152e4b1 PA |
81 | u32 lmask = mask; |
82 | u32 hmask = mask >> 32; | |
9dc89c0f SS |
83 | |
84 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" | |
85 | "2:\n" | |
86 | ".section .fixup,\"ax\"\n" | |
87 | "3: movl $-1,%[err]\n" | |
88 | " jmp 2b\n" | |
89 | ".previous\n" | |
90 | ".section __ex_table,\"a\"\n" | |
91 | _ASM_ALIGN "\n" | |
92 | _ASM_PTR "1b,3b\n" | |
93 | ".previous" | |
94 | : [err] "=r" (err) | |
95 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) | |
96 | : "memory"); /* memory required? */ | |
97 | return err; | |
98 | } | |
99 | ||
6152e4b1 | 100 | static inline void xrstor_state(struct xsave_struct *fx, u64 mask) |
9dc89c0f | 101 | { |
6152e4b1 PA |
102 | u32 lmask = mask; |
103 | u32 hmask = mask >> 32; | |
104 | ||
9dc89c0f SS |
105 | asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" |
106 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | |
107 | : "memory"); | |
108 | } | |
109 | ||
b359e8a4 SS |
110 | static inline void xsave(struct task_struct *tsk) |
111 | { | |
112 | /* This, however, we can work around by forcing the compiler to select | |
113 | an addressing mode that doesn't require extended registers. */ | |
114 | __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" | |
115 | : : "D" (&(tsk->thread.xstate->xsave)), | |
116 | "a" (-1), "d"(-1) : "memory"); | |
117 | } | |
dc1e35c6 | 118 | #endif |