]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * User address space access functions. | |
3 | * | |
4 | * Copyright 1997 Andi Kleen <ak@muc.de> | |
5 | * Copyright 1997 Linus Torvalds | |
6 | * Copyright 2002 Andi Kleen <ak@suse.de> | |
7 | */ | |
e683014c | 8 | #include <linux/export.h> |
13d4ea09 | 9 | #include <linux/uaccess.h> |
0aed55af | 10 | #include <linux/highmem.h> |
1da177e4 | 11 | |
1da177e4 LT |
12 | /* |
13 | * Zero Userspace | |
14 | */ | |
15 | ||
16 | unsigned long __clear_user(void __user *addr, unsigned long size) | |
17 | { | |
18 | long __d0; | |
3ee1afa3 | 19 | might_fault(); |
1da177e4 LT |
20 | /* no memory constraint because it doesn't change any memory gcc knows |
21 | about */ | |
63bcff2a | 22 | stac(); |
1da177e4 LT |
23 | asm volatile( |
24 | " testq %[size8],%[size8]\n" | |
25 | " jz 4f\n" | |
26 | "0: movq %[zero],(%[dst])\n" | |
27 | " addq %[eight],%[dst]\n" | |
28 | " decl %%ecx ; jnz 0b\n" | |
29 | "4: movq %[size1],%%rcx\n" | |
30 | " testl %%ecx,%%ecx\n" | |
31 | " jz 2f\n" | |
32 | "1: movb %b[zero],(%[dst])\n" | |
33 | " incq %[dst]\n" | |
34 | " decl %%ecx ; jnz 1b\n" | |
35 | "2:\n" | |
36 | ".section .fixup,\"ax\"\n" | |
37 | "3: lea 0(%[size1],%[size8],8),%[size8]\n" | |
38 | " jmp 2b\n" | |
39 | ".previous\n" | |
8da804f2 PA |
40 | _ASM_EXTABLE(0b,3b) |
41 | _ASM_EXTABLE(1b,2b) | |
e0a96129 | 42 | : [size8] "=&c"(size), [dst] "=&D" (__d0) |
1da177e4 LT |
43 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), |
44 | [zero] "r" (0UL), [eight] "r" (8UL)); | |
63bcff2a | 45 | clac(); |
1da177e4 LT |
46 | return size; |
47 | } | |
2ee60e17 | 48 | EXPORT_SYMBOL(__clear_user); |
1da177e4 LT |
49 | |
50 | unsigned long clear_user(void __user *to, unsigned long n) | |
51 | { | |
52 | if (access_ok(VERIFY_WRITE, to, n)) | |
53 | return __clear_user(to, n); | |
54 | return n; | |
55 | } | |
2ee60e17 | 56 | EXPORT_SYMBOL(clear_user); |
1da177e4 | 57 | |
1129585a VM |
58 | /* |
59 | * Try to copy last bytes and clear the rest if needed. | |
60 | * Since protection fault in copy_from/to_user is not a normal situation, | |
61 | * it is not necessary to optimize tail handling. | |
62 | */ | |
277d5b40 | 63 | __visible unsigned long |
cae2a173 | 64 | copy_user_handle_tail(char *to, char *from, unsigned len) |
1129585a | 65 | { |
66db3feb | 66 | for (; len; --len, to++) { |
cae2a173 LT |
67 | char c; |
68 | ||
1129585a VM |
69 | if (__get_user_nocheck(c, from++, sizeof(char))) |
70 | break; | |
66db3feb | 71 | if (__put_user_nocheck(c, to, sizeof(char))) |
1129585a VM |
72 | break; |
73 | } | |
63bcff2a | 74 | clac(); |
1129585a VM |
75 | return len; |
76 | } | |
0aed55af DW |
77 | |
78 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE | |
79 | /** | |
80 | * clean_cache_range - write back a cache range with CLWB | |
81 | * @vaddr: virtual start address | |
82 | * @size: number of bytes to write back | |
83 | * | |
84 | * Write back a cache range using the CLWB (cache line write back) | |
85 | * instruction. Note that @size is internally rounded up to be cache | |
86 | * line size aligned. | |
87 | */ | |
88 | static void clean_cache_range(void *addr, size_t size) | |
89 | { | |
90 | u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; | |
91 | unsigned long clflush_mask = x86_clflush_size - 1; | |
92 | void *vend = addr + size; | |
93 | void *p; | |
94 | ||
95 | for (p = (void *)((unsigned long)addr & ~clflush_mask); | |
96 | p < vend; p += x86_clflush_size) | |
97 | clwb(p); | |
98 | } | |
99 | ||
4e4f00a9 DW |
100 | void arch_wb_cache_pmem(void *addr, size_t size) |
101 | { | |
102 | clean_cache_range(addr, size); | |
103 | } | |
104 | EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); | |
105 | ||
0aed55af DW |
106 | long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) |
107 | { | |
108 | unsigned long flushed, dest = (unsigned long) dst; | |
109 | long rc = __copy_user_nocache(dst, src, size, 0); | |
110 | ||
111 | /* | |
112 | * __copy_user_nocache() uses non-temporal stores for the bulk | |
113 | * of the transfer, but we need to manually flush if the | |
114 | * transfer is unaligned. A cached memory copy is used when | |
115 | * destination or size is not naturally aligned. That is: | |
116 | * - Require 8-byte alignment when size is 8 bytes or larger. | |
117 | * - Require 4-byte alignment when size is 4 bytes. | |
118 | */ | |
119 | if (size < 8) { | |
120 | if (!IS_ALIGNED(dest, 4) || size != 4) | |
121 | clean_cache_range(dst, 1); | |
122 | } else { | |
123 | if (!IS_ALIGNED(dest, 8)) { | |
124 | dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); | |
125 | clean_cache_range(dst, 1); | |
126 | } | |
127 | ||
128 | flushed = dest - (unsigned long) dst; | |
129 | if (size > flushed && !IS_ALIGNED(size - flushed, 8)) | |
130 | clean_cache_range(dst + size - 1, 1); | |
131 | } | |
132 | ||
133 | return rc; | |
134 | } | |
135 | ||
136 | void memcpy_flushcache(void *_dst, const void *_src, size_t size) | |
137 | { | |
138 | unsigned long dest = (unsigned long) _dst; | |
139 | unsigned long source = (unsigned long) _src; | |
140 | ||
141 | /* cache copy and flush to align dest */ | |
142 | if (!IS_ALIGNED(dest, 8)) { | |
143 | unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest); | |
144 | ||
145 | memcpy((void *) dest, (void *) source, len); | |
146 | clean_cache_range((void *) dest, len); | |
147 | dest += len; | |
148 | source += len; | |
149 | size -= len; | |
150 | if (!size) | |
151 | return; | |
152 | } | |
153 | ||
154 | /* 4x8 movnti loop */ | |
155 | while (size >= 32) { | |
156 | asm("movq (%0), %%r8\n" | |
157 | "movq 8(%0), %%r9\n" | |
158 | "movq 16(%0), %%r10\n" | |
159 | "movq 24(%0), %%r11\n" | |
160 | "movnti %%r8, (%1)\n" | |
161 | "movnti %%r9, 8(%1)\n" | |
162 | "movnti %%r10, 16(%1)\n" | |
163 | "movnti %%r11, 24(%1)\n" | |
164 | :: "r" (source), "r" (dest) | |
165 | : "memory", "r8", "r9", "r10", "r11"); | |
166 | dest += 32; | |
167 | source += 32; | |
168 | size -= 32; | |
169 | } | |
170 | ||
171 | /* 1x8 movnti loop */ | |
172 | while (size >= 8) { | |
173 | asm("movq (%0), %%r8\n" | |
174 | "movnti %%r8, (%1)\n" | |
175 | :: "r" (source), "r" (dest) | |
176 | : "memory", "r8"); | |
177 | dest += 8; | |
178 | source += 8; | |
179 | size -= 8; | |
180 | } | |
181 | ||
182 | /* 1x4 movnti loop */ | |
183 | while (size >= 4) { | |
184 | asm("movl (%0), %%r8d\n" | |
185 | "movnti %%r8d, (%1)\n" | |
186 | :: "r" (source), "r" (dest) | |
187 | : "memory", "r8"); | |
188 | dest += 4; | |
189 | source += 4; | |
190 | size -= 4; | |
191 | } | |
192 | ||
193 | /* cache copy for remaining bytes */ | |
194 | if (size) { | |
195 | memcpy((void *) dest, (void *) source, size); | |
196 | clean_cache_range((void *) dest, size); | |
197 | } | |
198 | } | |
199 | EXPORT_SYMBOL_GPL(memcpy_flushcache); | |
200 | ||
201 | void memcpy_page_flushcache(char *to, struct page *page, size_t offset, | |
202 | size_t len) | |
203 | { | |
204 | char *from = kmap_atomic(page); | |
205 | ||
206 | memcpy_flushcache(to, from + offset, len); | |
207 | kunmap_atomic(from); | |
208 | } | |
209 | #endif |