]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
2e5d9c85 | 2 | /* |
3 | * Handle caching attributes in page tables (PAT) | |
4 | * | |
5 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
6 | * Suresh B Siddha <suresh.b.siddha@intel.com> | |
7 | * | |
8 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | |
9 | */ | |
10 | ||
ad2cde16 | 11 | #include <linux/seq_file.h> |
57c8a661 | 12 | #include <linux/memblock.h> |
ad2cde16 | 13 | #include <linux/debugfs.h> |
9de94dbb | 14 | #include <linux/ioport.h> |
2e5d9c85 | 15 | #include <linux/kernel.h> |
f25748e3 | 16 | #include <linux/pfn_t.h> |
5a0e3ad6 | 17 | #include <linux/slab.h> |
ad2cde16 | 18 | #include <linux/mm.h> |
2e5d9c85 | 19 | #include <linux/fs.h> |
335ef896 | 20 | #include <linux/rbtree.h> |
2e5d9c85 | 21 | |
ad2cde16 | 22 | #include <asm/cacheflush.h> |
2e5d9c85 | 23 | #include <asm/processor.h> |
ad2cde16 | 24 | #include <asm/tlbflush.h> |
fd12a0d6 | 25 | #include <asm/x86_init.h> |
2e5d9c85 | 26 | #include <asm/pgtable.h> |
2e5d9c85 | 27 | #include <asm/fcntl.h> |
66441bd3 | 28 | #include <asm/e820/api.h> |
2e5d9c85 | 29 | #include <asm/mtrr.h> |
ad2cde16 IM |
30 | #include <asm/page.h> |
31 | #include <asm/msr.h> | |
32 | #include <asm/pat.h> | |
e7f260a2 | 33 | #include <asm/io.h> |
2e5d9c85 | 34 | |
be5a0c12 | 35 | #include "pat_internal.h" |
bd809af1 | 36 | #include "mm_internal.h" |
be5a0c12 | 37 | |
9e76561f LR |
38 | #undef pr_fmt |
39 | #define pr_fmt(fmt) "" fmt | |
40 | ||
99c13b8c MP |
41 | static bool __read_mostly boot_cpu_done; |
42 | static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); | |
43 | static bool __read_mostly pat_initialized; | |
44 | static bool __read_mostly init_cm_done; | |
2e5d9c85 | 45 | |
224bb1e5 | 46 | void pat_disable(const char *reason) |
2e5d9c85 | 47 | { |
99c13b8c | 48 | if (pat_disabled) |
224bb1e5 TK |
49 | return; |
50 | ||
51 | if (boot_cpu_done) { | |
52 | WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n"); | |
53 | return; | |
54 | } | |
55 | ||
99c13b8c | 56 | pat_disabled = true; |
9e76561f | 57 | pr_info("x86/PAT: %s\n", reason); |
2e5d9c85 | 58 | } |
2e5d9c85 | 59 | |
be524fb9 | 60 | static int __init nopat(char *str) |
2e5d9c85 | 61 | { |
8d4a4300 | 62 | pat_disable("PAT support disabled."); |
2e5d9c85 | 63 | return 0; |
64 | } | |
8d4a4300 | 65 | early_param("nopat", nopat); |
cb32edf6 LR |
66 | |
67 | bool pat_enabled(void) | |
75a04811 | 68 | { |
99c13b8c | 69 | return pat_initialized; |
75a04811 | 70 | } |
fbe7193a | 71 | EXPORT_SYMBOL_GPL(pat_enabled); |
77b52b4c | 72 | |
be5a0c12 | 73 | int pat_debug_enable; |
ad2cde16 | 74 | |
77b52b4c VP |
75 | static int __init pat_debug_setup(char *str) |
76 | { | |
be5a0c12 | 77 | pat_debug_enable = 1; |
77b52b4c VP |
78 | return 0; |
79 | } | |
80 | __setup("debugpat", pat_debug_setup); | |
81 | ||
0dbcae88 TG |
82 | #ifdef CONFIG_X86_PAT |
83 | /* | |
35a5a104 TK |
84 | * X86 PAT uses page flags arch_1 and uncached together to keep track of |
85 | * memory type of pages that have backing page struct. | |
86 | * | |
87 | * X86 PAT supports 4 different memory types: | |
88 | * - _PAGE_CACHE_MODE_WB | |
89 | * - _PAGE_CACHE_MODE_WC | |
90 | * - _PAGE_CACHE_MODE_UC_MINUS | |
91 | * - _PAGE_CACHE_MODE_WT | |
92 | * | |
93 | * _PAGE_CACHE_MODE_WB is the default type. | |
0dbcae88 TG |
94 | */ |
95 | ||
35a5a104 | 96 | #define _PGMT_WB 0 |
0dbcae88 TG |
97 | #define _PGMT_WC (1UL << PG_arch_1) |
98 | #define _PGMT_UC_MINUS (1UL << PG_uncached) | |
35a5a104 | 99 | #define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1) |
0dbcae88 TG |
100 | #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) |
101 | #define _PGMT_CLEAR_MASK (~_PGMT_MASK) | |
102 | ||
103 | static inline enum page_cache_mode get_page_memtype(struct page *pg) | |
104 | { | |
105 | unsigned long pg_flags = pg->flags & _PGMT_MASK; | |
106 | ||
35a5a104 TK |
107 | if (pg_flags == _PGMT_WB) |
108 | return _PAGE_CACHE_MODE_WB; | |
0dbcae88 TG |
109 | else if (pg_flags == _PGMT_WC) |
110 | return _PAGE_CACHE_MODE_WC; | |
111 | else if (pg_flags == _PGMT_UC_MINUS) | |
112 | return _PAGE_CACHE_MODE_UC_MINUS; | |
113 | else | |
35a5a104 | 114 | return _PAGE_CACHE_MODE_WT; |
0dbcae88 TG |
115 | } |
116 | ||
117 | static inline void set_page_memtype(struct page *pg, | |
118 | enum page_cache_mode memtype) | |
119 | { | |
120 | unsigned long memtype_flags; | |
121 | unsigned long old_flags; | |
122 | unsigned long new_flags; | |
123 | ||
124 | switch (memtype) { | |
125 | case _PAGE_CACHE_MODE_WC: | |
126 | memtype_flags = _PGMT_WC; | |
127 | break; | |
128 | case _PAGE_CACHE_MODE_UC_MINUS: | |
129 | memtype_flags = _PGMT_UC_MINUS; | |
130 | break; | |
35a5a104 TK |
131 | case _PAGE_CACHE_MODE_WT: |
132 | memtype_flags = _PGMT_WT; | |
0dbcae88 | 133 | break; |
35a5a104 | 134 | case _PAGE_CACHE_MODE_WB: |
0dbcae88 | 135 | default: |
35a5a104 | 136 | memtype_flags = _PGMT_WB; |
0dbcae88 TG |
137 | break; |
138 | } | |
139 | ||
140 | do { | |
141 | old_flags = pg->flags; | |
142 | new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; | |
143 | } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); | |
144 | } | |
145 | #else | |
146 | static inline enum page_cache_mode get_page_memtype(struct page *pg) | |
147 | { | |
148 | return -1; | |
149 | } | |
150 | static inline void set_page_memtype(struct page *pg, | |
151 | enum page_cache_mode memtype) | |
152 | { | |
153 | } | |
154 | #endif | |
155 | ||
2e5d9c85 | 156 | enum { |
157 | PAT_UC = 0, /* uncached */ | |
158 | PAT_WC = 1, /* Write combining */ | |
159 | PAT_WT = 4, /* Write Through */ | |
160 | PAT_WP = 5, /* Write Protected */ | |
161 | PAT_WB = 6, /* Write Back (default) */ | |
6a6256f9 | 162 | PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */ |
2e5d9c85 | 163 | }; |
164 | ||
bd809af1 JG |
165 | #define CM(c) (_PAGE_CACHE_MODE_ ## c) |
166 | ||
167 | static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg) | |
168 | { | |
169 | enum page_cache_mode cache; | |
170 | char *cache_mode; | |
171 | ||
172 | switch (pat_val) { | |
173 | case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; | |
174 | case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; | |
175 | case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; | |
176 | case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; | |
177 | case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; | |
178 | case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; | |
179 | default: cache = CM(WB); cache_mode = "WB "; break; | |
180 | } | |
181 | ||
182 | memcpy(msg, cache_mode, 4); | |
183 | ||
184 | return cache; | |
185 | } | |
186 | ||
187 | #undef CM | |
188 | ||
189 | /* | |
190 | * Update the cache mode to pgprot translation tables according to PAT | |
191 | * configuration. | |
192 | * Using lower indices is preferred, so we start with highest index. | |
193 | */ | |
88ba2811 | 194 | static void __init_cache_modes(u64 pat) |
bd809af1 | 195 | { |
bd809af1 JG |
196 | enum page_cache_mode cache; |
197 | char pat_msg[33]; | |
9cd25aac | 198 | int i; |
bd809af1 | 199 | |
bd809af1 JG |
200 | pat_msg[32] = 0; |
201 | for (i = 7; i >= 0; i--) { | |
202 | cache = pat_get_cache_mode((pat >> (i * 8)) & 7, | |
203 | pat_msg + 4 * i); | |
204 | update_cache_mode_entry(i, cache); | |
205 | } | |
9e76561f | 206 | pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); |
99c13b8c MP |
207 | |
208 | init_cm_done = true; | |
bd809af1 JG |
209 | } |
210 | ||
cd7a4e93 | 211 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
2e5d9c85 | 212 | |
9dac6290 | 213 | static void pat_bsp_init(u64 pat) |
2e5d9c85 | 214 | { |
9cd25aac BP |
215 | u64 tmp_pat; |
216 | ||
d63dcf49 | 217 | if (!boot_cpu_has(X86_FEATURE_PAT)) { |
9dac6290 BP |
218 | pat_disable("PAT not supported by CPU."); |
219 | return; | |
220 | } | |
2e5d9c85 | 221 | |
9cd25aac BP |
222 | rdmsrl(MSR_IA32_CR_PAT, tmp_pat); |
223 | if (!tmp_pat) { | |
9dac6290 | 224 | pat_disable("PAT MSR is 0, disabled."); |
2e5d9c85 | 225 | return; |
9dac6290 BP |
226 | } |
227 | ||
228 | wrmsrl(MSR_IA32_CR_PAT, pat); | |
99c13b8c | 229 | pat_initialized = true; |
2e5d9c85 | 230 | |
02f037d6 | 231 | __init_cache_modes(pat); |
9dac6290 BP |
232 | } |
233 | ||
234 | static void pat_ap_init(u64 pat) | |
235 | { | |
c08d5174 | 236 | if (!boot_cpu_has(X86_FEATURE_PAT)) { |
9dac6290 BP |
237 | /* |
238 | * If this happens we are on a secondary CPU, but switched to | |
239 | * PAT on the boot CPU. We have no way to undo PAT. | |
240 | */ | |
241 | panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n"); | |
8d4a4300 | 242 | } |
2e5d9c85 | 243 | |
9dac6290 BP |
244 | wrmsrl(MSR_IA32_CR_PAT, pat); |
245 | } | |
246 | ||
99c13b8c | 247 | void init_cache_modes(void) |
9dac6290 | 248 | { |
02f037d6 | 249 | u64 pat = 0; |
9dac6290 | 250 | |
02f037d6 TK |
251 | if (init_cm_done) |
252 | return; | |
253 | ||
254 | if (boot_cpu_has(X86_FEATURE_PAT)) { | |
255 | /* | |
256 | * CPU supports PAT. Set PAT table to be consistent with | |
257 | * PAT MSR. This case supports "nopat" boot option, and | |
258 | * virtual machine environments which support PAT without | |
259 | * MTRRs. In specific, Xen has unique setup to PAT MSR. | |
260 | * | |
261 | * If PAT MSR returns 0, it is considered invalid and emulates | |
262 | * as No PAT. | |
263 | */ | |
264 | rdmsrl(MSR_IA32_CR_PAT, pat); | |
265 | } | |
266 | ||
267 | if (!pat) { | |
9cd25aac BP |
268 | /* |
269 | * No PAT. Emulate the PAT table that corresponds to the two | |
02f037d6 TK |
270 | * cache bits, PWT (Write Through) and PCD (Cache Disable). |
271 | * This setup is also the same as the BIOS default setup. | |
9cd25aac | 272 | * |
d79a40ca | 273 | * PTE encoding: |
9cd25aac BP |
274 | * |
275 | * PCD | |
276 | * |PWT PAT | |
277 | * || slot | |
278 | * 00 0 WB : _PAGE_CACHE_MODE_WB | |
279 | * 01 1 WT : _PAGE_CACHE_MODE_WT | |
280 | * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS | |
281 | * 11 3 UC : _PAGE_CACHE_MODE_UC | |
282 | * | |
283 | * NOTE: When WC or WP is used, it is redirected to UC- per | |
284 | * the default setup in __cachemode2pte_tbl[]. | |
285 | */ | |
286 | pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) | | |
287 | PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC); | |
02f037d6 TK |
288 | } |
289 | ||
290 | __init_cache_modes(pat); | |
02f037d6 TK |
291 | } |
292 | ||
293 | /** | |
294 | * pat_init - Initialize PAT MSR and PAT table | |
295 | * | |
296 | * This function initializes PAT MSR and PAT table with an OS-defined value | |
aac7b79e | 297 | * to enable additional cache attributes, WC, WT and WP. |
02f037d6 TK |
298 | * |
299 | * This function must be called on all CPUs using the specific sequence of | |
300 | * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this | |
301 | * procedure for PAT. | |
302 | */ | |
303 | void pat_init(void) | |
304 | { | |
305 | u64 pat; | |
306 | struct cpuinfo_x86 *c = &boot_cpu_data; | |
307 | ||
99c13b8c | 308 | if (pat_disabled) |
02f037d6 | 309 | return; |
d79a40ca | 310 | |
02f037d6 TK |
311 | if ((c->x86_vendor == X86_VENDOR_INTEL) && |
312 | (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || | |
313 | ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { | |
9cd25aac | 314 | /* |
d79a40ca TK |
315 | * PAT support with the lower four entries. Intel Pentium 2, |
316 | * 3, M, and 4 are affected by PAT errata, which makes the | |
317 | * upper four entries unusable. To be on the safe side, we don't | |
318 | * use those. | |
319 | * | |
320 | * PTE encoding: | |
9cd25aac BP |
321 | * PAT |
322 | * |PCD | |
d79a40ca TK |
323 | * ||PWT PAT |
324 | * ||| slot | |
325 | * 000 0 WB : _PAGE_CACHE_MODE_WB | |
326 | * 001 1 WC : _PAGE_CACHE_MODE_WC | |
327 | * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS | |
328 | * 011 3 UC : _PAGE_CACHE_MODE_UC | |
9cd25aac | 329 | * PAT bit unused |
d79a40ca TK |
330 | * |
331 | * NOTE: When WT or WP is used, it is redirected to UC- per | |
332 | * the default setup in __cachemode2pte_tbl[]. | |
9cd25aac BP |
333 | */ |
334 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | | |
335 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); | |
d79a40ca TK |
336 | } else { |
337 | /* | |
338 | * Full PAT support. We put WT in slot 7 to improve | |
339 | * robustness in the presence of errata that might cause | |
340 | * the high PAT bit to be ignored. This way, a buggy slot 7 | |
341 | * access will hit slot 3, and slot 3 is UC, so at worst | |
342 | * we lose performance without causing a correctness issue. | |
343 | * Pentium 4 erratum N46 is an example for such an erratum, | |
344 | * although we try not to use PAT at all on affected CPUs. | |
345 | * | |
346 | * PTE encoding: | |
347 | * PAT | |
348 | * |PCD | |
349 | * ||PWT PAT | |
350 | * ||| slot | |
351 | * 000 0 WB : _PAGE_CACHE_MODE_WB | |
352 | * 001 1 WC : _PAGE_CACHE_MODE_WC | |
353 | * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS | |
354 | * 011 3 UC : _PAGE_CACHE_MODE_UC | |
355 | * 100 4 WB : Reserved | |
aac7b79e | 356 | * 101 5 WP : _PAGE_CACHE_MODE_WP |
d79a40ca TK |
357 | * 110 6 UC-: Reserved |
358 | * 111 7 WT : _PAGE_CACHE_MODE_WT | |
359 | * | |
360 | * The reserved slots are unused, but mapped to their | |
361 | * corresponding types in the presence of PAT errata. | |
362 | */ | |
363 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | | |
aac7b79e | 364 | PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT); |
9cd25aac | 365 | } |
2e5d9c85 | 366 | |
9dac6290 BP |
367 | if (!boot_cpu_done) { |
368 | pat_bsp_init(pat); | |
369 | boot_cpu_done = true; | |
370 | } else { | |
371 | pat_ap_init(pat); | |
9d34cfdf | 372 | } |
2e5d9c85 | 373 | } |
374 | ||
375 | #undef PAT | |
376 | ||
9e41a49a | 377 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ |
335ef896 | 378 | |
2e5d9c85 | 379 | /* |
380 | * Does intersection of PAT memory type and MTRR memory type and returns | |
381 | * the resulting memory type as PAT understands it. | |
382 | * (Type in pat and mtrr will not have same value) | |
383 | * The intersection is based on "Effective Memory Type" tables in IA-32 | |
384 | * SDM vol 3a | |
385 | */ | |
e00c8cc9 JG |
386 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, |
387 | enum page_cache_mode req_type) | |
2e5d9c85 | 388 | { |
c26421d0 VP |
389 | /* |
390 | * Look for MTRR hint to get the effective type in case where PAT | |
391 | * request is for WB. | |
392 | */ | |
e00c8cc9 | 393 | if (req_type == _PAGE_CACHE_MODE_WB) { |
b73522e0 | 394 | u8 mtrr_type, uniform; |
dd0c7c49 | 395 | |
b73522e0 | 396 | mtrr_type = mtrr_type_lookup(start, end, &uniform); |
b6ff32d9 | 397 | if (mtrr_type != MTRR_TYPE_WRBACK) |
e00c8cc9 | 398 | return _PAGE_CACHE_MODE_UC_MINUS; |
b6ff32d9 | 399 | |
e00c8cc9 | 400 | return _PAGE_CACHE_MODE_WB; |
dd0c7c49 AH |
401 | } |
402 | ||
403 | return req_type; | |
2e5d9c85 | 404 | } |
405 | ||
fa83523f JD |
406 | struct pagerange_state { |
407 | unsigned long cur_pfn; | |
408 | int ram; | |
409 | int not_ram; | |
410 | }; | |
411 | ||
412 | static int | |
413 | pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) | |
414 | { | |
415 | struct pagerange_state *state = arg; | |
416 | ||
417 | state->not_ram |= initial_pfn > state->cur_pfn; | |
418 | state->ram |= total_nr_pages > 0; | |
419 | state->cur_pfn = initial_pfn + total_nr_pages; | |
420 | ||
421 | return state->ram && state->not_ram; | |
422 | } | |
423 | ||
3709c857 | 424 | static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) |
be03d9e8 | 425 | { |
fa83523f JD |
426 | int ret = 0; |
427 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
428 | unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
429 | struct pagerange_state state = {start_pfn, 0, 0}; | |
430 | ||
431 | /* | |
432 | * For legacy reasons, physical address range in the legacy ISA | |
433 | * region is tracked as non-RAM. This will allow users of | |
434 | * /dev/mem to map portions of legacy ISA region, even when | |
435 | * some of those portions are listed(or not even listed) with | |
436 | * different e820 types(RAM/reserved/..) | |
437 | */ | |
438 | if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) | |
439 | start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; | |
440 | ||
441 | if (start_pfn < end_pfn) { | |
442 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, | |
443 | &state, pagerange_is_ram_callback); | |
be03d9e8 SS |
444 | } |
445 | ||
fa83523f | 446 | return (ret > 0) ? -1 : (state.ram ? 1 : 0); |
be03d9e8 SS |
447 | } |
448 | ||
9542ada8 | 449 | /* |
f5841740 | 450 | * For RAM pages, we use page flags to mark the pages with appropriate type. |
35a5a104 TK |
451 | * The page flags are limited to four types, WB (default), WC, WT and UC-. |
452 | * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting | |
453 | * a new memory type is only allowed for a page mapped with the default WB | |
454 | * type. | |
0d69bdff TK |
455 | * |
456 | * Here we do two passes: | |
457 | * - Find the memtype of all the pages in the range, look for any conflicts. | |
458 | * - In case of no conflicts, set the new memtype for pages in the range. | |
9542ada8 | 459 | */ |
e00c8cc9 JG |
460 | static int reserve_ram_pages_type(u64 start, u64 end, |
461 | enum page_cache_mode req_type, | |
462 | enum page_cache_mode *new_type) | |
9542ada8 SS |
463 | { |
464 | struct page *page; | |
f5841740 VP |
465 | u64 pfn; |
466 | ||
35a5a104 | 467 | if (req_type == _PAGE_CACHE_MODE_WP) { |
0d69bdff TK |
468 | if (new_type) |
469 | *new_type = _PAGE_CACHE_MODE_UC_MINUS; | |
470 | return -EINVAL; | |
471 | } | |
472 | ||
e00c8cc9 | 473 | if (req_type == _PAGE_CACHE_MODE_UC) { |
f5841740 VP |
474 | /* We do not support strong UC */ |
475 | WARN_ON_ONCE(1); | |
e00c8cc9 | 476 | req_type = _PAGE_CACHE_MODE_UC_MINUS; |
f5841740 | 477 | } |
9542ada8 SS |
478 | |
479 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
e00c8cc9 | 480 | enum page_cache_mode type; |
9542ada8 | 481 | |
f5841740 VP |
482 | page = pfn_to_page(pfn); |
483 | type = get_page_memtype(page); | |
35a5a104 | 484 | if (type != _PAGE_CACHE_MODE_WB) { |
9e76561f | 485 | pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", |
365811d6 | 486 | start, end - 1, type, req_type); |
f5841740 VP |
487 | if (new_type) |
488 | *new_type = type; | |
489 | ||
490 | return -EBUSY; | |
491 | } | |
9542ada8 | 492 | } |
9542ada8 | 493 | |
f5841740 VP |
494 | if (new_type) |
495 | *new_type = req_type; | |
496 | ||
497 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
9542ada8 | 498 | page = pfn_to_page(pfn); |
f5841740 | 499 | set_page_memtype(page, req_type); |
9542ada8 | 500 | } |
f5841740 | 501 | return 0; |
9542ada8 SS |
502 | } |
503 | ||
504 | static int free_ram_pages_type(u64 start, u64 end) | |
505 | { | |
506 | struct page *page; | |
f5841740 | 507 | u64 pfn; |
9542ada8 SS |
508 | |
509 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
510 | page = pfn_to_page(pfn); | |
35a5a104 | 511 | set_page_memtype(page, _PAGE_CACHE_MODE_WB); |
9542ada8 SS |
512 | } |
513 | return 0; | |
9542ada8 SS |
514 | } |
515 | ||
510ee090 DW |
516 | static u64 sanitize_phys(u64 address) |
517 | { | |
518 | /* | |
519 | * When changing the memtype for pages containing poison allow | |
520 | * for a "decoy" virtual address (bit 63 clear) passed to | |
521 | * set_memory_X(). __pa() on a "decoy" address results in a | |
522 | * physical address with bit 63 set. | |
51c3fbd8 DW |
523 | * |
524 | * Decoy addresses are not present for 32-bit builds, see | |
525 | * set_mce_nospec(). | |
510ee090 | 526 | */ |
51c3fbd8 DW |
527 | if (IS_ENABLED(CONFIG_X86_64)) |
528 | return address & __PHYSICAL_MASK; | |
529 | return address; | |
510ee090 DW |
530 | } |
531 | ||
e7f260a2 | 532 | /* |
533 | * req_type typically has one of the: | |
e00c8cc9 JG |
534 | * - _PAGE_CACHE_MODE_WB |
535 | * - _PAGE_CACHE_MODE_WC | |
536 | * - _PAGE_CACHE_MODE_UC_MINUS | |
537 | * - _PAGE_CACHE_MODE_UC | |
0d69bdff | 538 | * - _PAGE_CACHE_MODE_WT |
e7f260a2 | 539 | * |
ac97991e AH |
540 | * If new_type is NULL, function will return an error if it cannot reserve the |
541 | * region with req_type. If new_type is non-NULL, function will return | |
542 | * available type in new_type in case of no error. In case of any error | |
e7f260a2 | 543 | * it will return a negative return value. |
544 | */ | |
e00c8cc9 JG |
545 | int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, |
546 | enum page_cache_mode *new_type) | |
2e5d9c85 | 547 | { |
be5a0c12 | 548 | struct memtype *new; |
e00c8cc9 | 549 | enum page_cache_mode actual_type; |
9542ada8 | 550 | int is_range_ram; |
ad2cde16 | 551 | int err = 0; |
2e5d9c85 | 552 | |
510ee090 DW |
553 | start = sanitize_phys(start); |
554 | end = sanitize_phys(end); | |
51c3fbd8 DW |
555 | if (start >= end) { |
556 | WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, | |
557 | start, end - 1, cattr_name(req_type)); | |
558 | return -EINVAL; | |
559 | } | |
69e26be9 | 560 | |
cb32edf6 | 561 | if (!pat_enabled()) { |
e7f260a2 | 562 | /* This is identical to page table setting without PAT */ |
7202fdb1 BP |
563 | if (new_type) |
564 | *new_type = req_type; | |
2e5d9c85 | 565 | return 0; |
566 | } | |
567 | ||
568 | /* Low ISA region is always mapped WB in page table. No need to track */ | |
8a271389 | 569 | if (x86_platform.is_untracked_pat_range(start, end)) { |
ac97991e | 570 | if (new_type) |
e00c8cc9 | 571 | *new_type = _PAGE_CACHE_MODE_WB; |
2e5d9c85 | 572 | return 0; |
573 | } | |
574 | ||
b6ff32d9 SS |
575 | /* |
576 | * Call mtrr_lookup to get the type hint. This is an | |
577 | * optimization for /dev/mem mmap'ers into WB memory (BIOS | |
578 | * tools and ACPI tools). Use WB request for WB memory and use | |
579 | * UC_MINUS otherwise. | |
580 | */ | |
e00c8cc9 | 581 | actual_type = pat_x_mtrr_type(start, end, req_type); |
2e5d9c85 | 582 | |
95971342 SS |
583 | if (new_type) |
584 | *new_type = actual_type; | |
585 | ||
be03d9e8 | 586 | is_range_ram = pat_pagerange_is_ram(start, end); |
f5841740 VP |
587 | if (is_range_ram == 1) { |
588 | ||
f5841740 | 589 | err = reserve_ram_pages_type(start, end, req_type, new_type); |
f5841740 VP |
590 | |
591 | return err; | |
592 | } else if (is_range_ram < 0) { | |
9542ada8 | 593 | return -EINVAL; |
f5841740 | 594 | } |
9542ada8 | 595 | |
6a4f3b52 | 596 | new = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
ac97991e | 597 | if (!new) |
2e5d9c85 | 598 | return -ENOMEM; |
599 | ||
ad2cde16 IM |
600 | new->start = start; |
601 | new->end = end; | |
602 | new->type = actual_type; | |
2e5d9c85 | 603 | |
2e5d9c85 | 604 | spin_lock(&memtype_lock); |
605 | ||
9e41a49a | 606 | err = rbt_memtype_check_insert(new, new_type); |
2e5d9c85 | 607 | if (err) { |
9e76561f LR |
608 | pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n", |
609 | start, end - 1, | |
610 | cattr_name(new->type), cattr_name(req_type)); | |
ac97991e | 611 | kfree(new); |
2e5d9c85 | 612 | spin_unlock(&memtype_lock); |
ad2cde16 | 613 | |
2e5d9c85 | 614 | return err; |
615 | } | |
616 | ||
2e5d9c85 | 617 | spin_unlock(&memtype_lock); |
3e9c83b3 | 618 | |
365811d6 BH |
619 | dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", |
620 | start, end - 1, cattr_name(new->type), cattr_name(req_type), | |
3e9c83b3 AH |
621 | new_type ? cattr_name(*new_type) : "-"); |
622 | ||
2e5d9c85 | 623 | return err; |
624 | } | |
625 | ||
626 | int free_memtype(u64 start, u64 end) | |
627 | { | |
2e5d9c85 | 628 | int err = -EINVAL; |
9542ada8 | 629 | int is_range_ram; |
20413f27 | 630 | struct memtype *entry; |
2e5d9c85 | 631 | |
cb32edf6 | 632 | if (!pat_enabled()) |
2e5d9c85 | 633 | return 0; |
2e5d9c85 | 634 | |
510ee090 DW |
635 | start = sanitize_phys(start); |
636 | end = sanitize_phys(end); | |
637 | ||
2e5d9c85 | 638 | /* Low ISA region is always mapped WB. No need to track */ |
8a271389 | 639 | if (x86_platform.is_untracked_pat_range(start, end)) |
2e5d9c85 | 640 | return 0; |
2e5d9c85 | 641 | |
be03d9e8 | 642 | is_range_ram = pat_pagerange_is_ram(start, end); |
f5841740 VP |
643 | if (is_range_ram == 1) { |
644 | ||
f5841740 | 645 | err = free_ram_pages_type(start, end); |
f5841740 VP |
646 | |
647 | return err; | |
648 | } else if (is_range_ram < 0) { | |
9542ada8 | 649 | return -EINVAL; |
f5841740 | 650 | } |
9542ada8 | 651 | |
2e5d9c85 | 652 | spin_lock(&memtype_lock); |
20413f27 | 653 | entry = rbt_memtype_erase(start, end); |
2e5d9c85 | 654 | spin_unlock(&memtype_lock); |
655 | ||
2039e6ac | 656 | if (IS_ERR(entry)) { |
9e76561f LR |
657 | pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", |
658 | current->comm, current->pid, start, end - 1); | |
20413f27 | 659 | return -EINVAL; |
2e5d9c85 | 660 | } |
6997ab49 | 661 | |
20413f27 XF |
662 | kfree(entry); |
663 | ||
365811d6 | 664 | dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1); |
ad2cde16 | 665 | |
20413f27 | 666 | return 0; |
2e5d9c85 | 667 | } |
668 | ||
f0970c13 | 669 | |
637b86e7 VP |
670 | /** |
671 | * lookup_memtype - Looksup the memory type for a physical address | |
672 | * @paddr: physical address of which memory type needs to be looked up | |
673 | * | |
674 | * Only to be called when PAT is enabled | |
675 | * | |
2a374698 | 676 | * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS |
35a5a104 | 677 | * or _PAGE_CACHE_MODE_WT. |
637b86e7 | 678 | */ |
2a374698 | 679 | static enum page_cache_mode lookup_memtype(u64 paddr) |
637b86e7 | 680 | { |
2a374698 | 681 | enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; |
637b86e7 VP |
682 | struct memtype *entry; |
683 | ||
8a271389 | 684 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) |
637b86e7 VP |
685 | return rettype; |
686 | ||
687 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { | |
688 | struct page *page; | |
637b86e7 | 689 | |
35a5a104 TK |
690 | page = pfn_to_page(paddr >> PAGE_SHIFT); |
691 | return get_page_memtype(page); | |
637b86e7 VP |
692 | } |
693 | ||
694 | spin_lock(&memtype_lock); | |
695 | ||
9e41a49a | 696 | entry = rbt_memtype_lookup(paddr); |
637b86e7 VP |
697 | if (entry != NULL) |
698 | rettype = entry->type; | |
699 | else | |
2a374698 | 700 | rettype = _PAGE_CACHE_MODE_UC_MINUS; |
637b86e7 VP |
701 | |
702 | spin_unlock(&memtype_lock); | |
703 | return rettype; | |
704 | } | |
705 | ||
b8d7044b HZ |
706 | /** |
707 | * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type | |
708 | * of @pfn cannot be overridden by UC MTRR memory type. | |
709 | * | |
710 | * Only to be called when PAT is enabled. | |
711 | * | |
712 | * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC. | |
713 | * Returns false in other cases. | |
714 | */ | |
715 | bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn) | |
716 | { | |
717 | enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn)); | |
718 | ||
719 | return cm == _PAGE_CACHE_MODE_UC || | |
720 | cm == _PAGE_CACHE_MODE_UC_MINUS || | |
721 | cm == _PAGE_CACHE_MODE_WC; | |
722 | } | |
723 | EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); | |
724 | ||
9fd126bc VP |
725 | /** |
726 | * io_reserve_memtype - Request a memory type mapping for a region of memory | |
727 | * @start: start (physical address) of the region | |
728 | * @end: end (physical address) of the region | |
729 | * @type: A pointer to memtype, with requested type. On success, requested | |
730 | * or any other compatible type that was available for the region is returned | |
731 | * | |
732 | * On success, returns 0 | |
733 | * On failure, returns non-zero | |
734 | */ | |
735 | int io_reserve_memtype(resource_size_t start, resource_size_t end, | |
49a3b3cb | 736 | enum page_cache_mode *type) |
9fd126bc | 737 | { |
b855192c | 738 | resource_size_t size = end - start; |
49a3b3cb JG |
739 | enum page_cache_mode req_type = *type; |
740 | enum page_cache_mode new_type; | |
9fd126bc VP |
741 | int ret; |
742 | ||
b855192c | 743 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
9fd126bc VP |
744 | |
745 | ret = reserve_memtype(start, end, req_type, &new_type); | |
746 | if (ret) | |
747 | goto out_err; | |
748 | ||
b855192c | 749 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) |
9fd126bc VP |
750 | goto out_free; |
751 | ||
b855192c | 752 | if (kernel_map_sync_memtype(start, size, new_type) < 0) |
9fd126bc VP |
753 | goto out_free; |
754 | ||
755 | *type = new_type; | |
756 | return 0; | |
757 | ||
758 | out_free: | |
759 | free_memtype(start, end); | |
760 | ret = -EBUSY; | |
761 | out_err: | |
762 | return ret; | |
763 | } | |
764 | ||
765 | /** | |
766 | * io_free_memtype - Release a memory type mapping for a region of memory | |
767 | * @start: start (physical address) of the region | |
768 | * @end: end (physical address) of the region | |
769 | */ | |
770 | void io_free_memtype(resource_size_t start, resource_size_t end) | |
771 | { | |
772 | free_memtype(start, end); | |
773 | } | |
774 | ||
8ef42276 DA |
775 | int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) |
776 | { | |
777 | enum page_cache_mode type = _PAGE_CACHE_MODE_WC; | |
778 | ||
779 | return io_reserve_memtype(start, start + size, &type); | |
780 | } | |
781 | EXPORT_SYMBOL(arch_io_reserve_memtype_wc); | |
782 | ||
783 | void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) | |
784 | { | |
785 | io_free_memtype(start, start + size); | |
786 | } | |
787 | EXPORT_SYMBOL(arch_io_free_memtype_wc); | |
788 | ||
f0970c13 | 789 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
790 | unsigned long size, pgprot_t vma_prot) | |
791 | { | |
8458bf94 TL |
792 | if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size)) |
793 | vma_prot = pgprot_decrypted(vma_prot); | |
794 | ||
f0970c13 | 795 | return vma_prot; |
796 | } | |
797 | ||
d092633b | 798 | #ifdef CONFIG_STRICT_DEVMEM |
1f40a8bf | 799 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */ |
0124cecf VP |
800 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
801 | { | |
802 | return 1; | |
803 | } | |
804 | #else | |
9e41bff2 | 805 | /* This check is needed to avoid cache aliasing when PAT is enabled */ |
0124cecf VP |
806 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
807 | { | |
808 | u64 from = ((u64)pfn) << PAGE_SHIFT; | |
809 | u64 to = from + size; | |
810 | u64 cursor = from; | |
811 | ||
cb32edf6 | 812 | if (!pat_enabled()) |
9e41bff2 RT |
813 | return 1; |
814 | ||
0124cecf | 815 | while (cursor < to) { |
39380b80 | 816 | if (!devmem_is_allowed(pfn)) |
0124cecf | 817 | return 0; |
0124cecf VP |
818 | cursor += PAGE_SIZE; |
819 | pfn++; | |
820 | } | |
821 | return 1; | |
822 | } | |
d092633b | 823 | #endif /* CONFIG_STRICT_DEVMEM */ |
0124cecf | 824 | |
f0970c13 | 825 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
826 | unsigned long size, pgprot_t *vma_prot) | |
827 | { | |
e00c8cc9 | 828 | enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; |
f0970c13 | 829 | |
0124cecf VP |
830 | if (!range_is_allowed(pfn, size)) |
831 | return 0; | |
832 | ||
6b2f3d1f | 833 | if (file->f_flags & O_DSYNC) |
e00c8cc9 | 834 | pcm = _PAGE_CACHE_MODE_UC_MINUS; |
f0970c13 | 835 | |
e7f260a2 | 836 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
e00c8cc9 | 837 | cachemode2protval(pcm)); |
f0970c13 | 838 | return 1; |
839 | } | |
e7f260a2 | 840 | |
7880f746 VP |
841 | /* |
842 | * Change the memory type for the physial address range in kernel identity | |
843 | * mapping space if that range is a part of identity map. | |
844 | */ | |
b14097bd JG |
845 | int kernel_map_sync_memtype(u64 base, unsigned long size, |
846 | enum page_cache_mode pcm) | |
7880f746 VP |
847 | { |
848 | unsigned long id_sz; | |
849 | ||
a25b9316 | 850 | if (base > __pa(high_memory-1)) |
7880f746 VP |
851 | return 0; |
852 | ||
60f583d5 DH |
853 | /* |
854 | * some areas in the middle of the kernel identity range | |
855 | * are not mapped, like the PCI space. | |
856 | */ | |
857 | if (!page_is_ram(base >> PAGE_SHIFT)) | |
858 | return 0; | |
859 | ||
a25b9316 | 860 | id_sz = (__pa(high_memory-1) <= base + size) ? |
7880f746 VP |
861 | __pa(high_memory) - base : |
862 | size; | |
863 | ||
b14097bd | 864 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { |
9e76561f | 865 | pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n", |
7880f746 | 866 | current->comm, current->pid, |
e00c8cc9 | 867 | cattr_name(pcm), |
365811d6 | 868 | base, (unsigned long long)(base + size-1)); |
7880f746 VP |
869 | return -EINVAL; |
870 | } | |
871 | return 0; | |
872 | } | |
873 | ||
5899329b | 874 | /* |
875 | * Internal interface to reserve a range of physical memory with prot. | |
876 | * Reserved non RAM regions only and after successful reserve_memtype, | |
877 | * this func also keeps identity mapping (if any) in sync with this new prot. | |
878 | */ | |
cdecff68 | 879 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
880 | int strict_prot) | |
5899329b | 881 | { |
882 | int is_ram = 0; | |
7880f746 | 883 | int ret; |
e00c8cc9 JG |
884 | enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); |
885 | enum page_cache_mode pcm = want_pcm; | |
5899329b | 886 | |
be03d9e8 | 887 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 888 | |
be03d9e8 | 889 | /* |
d886c73c VP |
890 | * reserve_pfn_range() for RAM pages. We do not refcount to keep |
891 | * track of number of mappings of RAM pages. We can assert that | |
892 | * the type requested matches the type of first page in the range. | |
be03d9e8 | 893 | */ |
d886c73c | 894 | if (is_ram) { |
cb32edf6 | 895 | if (!pat_enabled()) |
d886c73c VP |
896 | return 0; |
897 | ||
e00c8cc9 JG |
898 | pcm = lookup_memtype(paddr); |
899 | if (want_pcm != pcm) { | |
9e76561f | 900 | pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", |
d886c73c | 901 | current->comm, current->pid, |
e00c8cc9 | 902 | cattr_name(want_pcm), |
d886c73c | 903 | (unsigned long long)paddr, |
365811d6 | 904 | (unsigned long long)(paddr + size - 1), |
e00c8cc9 | 905 | cattr_name(pcm)); |
d886c73c | 906 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
e00c8cc9 JG |
907 | (~_PAGE_CACHE_MASK)) | |
908 | cachemode2protval(pcm)); | |
d886c73c | 909 | } |
4bb9c5c0 | 910 | return 0; |
d886c73c | 911 | } |
5899329b | 912 | |
e00c8cc9 | 913 | ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); |
5899329b | 914 | if (ret) |
915 | return ret; | |
916 | ||
e00c8cc9 | 917 | if (pcm != want_pcm) { |
1adcaafe | 918 | if (strict_prot || |
e00c8cc9 | 919 | !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { |
cdecff68 | 920 | free_memtype(paddr, paddr + size); |
9e76561f LR |
921 | pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", |
922 | current->comm, current->pid, | |
923 | cattr_name(want_pcm), | |
924 | (unsigned long long)paddr, | |
925 | (unsigned long long)(paddr + size - 1), | |
926 | cattr_name(pcm)); | |
cdecff68 | 927 | return -EINVAL; |
928 | } | |
929 | /* | |
930 | * We allow returning different type than the one requested in | |
931 | * non strict case. | |
932 | */ | |
933 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | |
934 | (~_PAGE_CACHE_MASK)) | | |
e00c8cc9 | 935 | cachemode2protval(pcm)); |
5899329b | 936 | } |
937 | ||
e00c8cc9 | 938 | if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { |
5899329b | 939 | free_memtype(paddr, paddr + size); |
5899329b | 940 | return -EINVAL; |
941 | } | |
942 | return 0; | |
943 | } | |
944 | ||
945 | /* | |
946 | * Internal interface to free a range of physical memory. | |
947 | * Frees non RAM regions only. | |
948 | */ | |
949 | static void free_pfn_range(u64 paddr, unsigned long size) | |
950 | { | |
951 | int is_ram; | |
952 | ||
be03d9e8 | 953 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 954 | if (is_ram == 0) |
955 | free_memtype(paddr, paddr + size); | |
956 | } | |
957 | ||
958 | /* | |
5180da41 | 959 | * track_pfn_copy is called when vma that is covering the pfnmap gets |
5899329b | 960 | * copied through copy_page_range(). |
961 | * | |
962 | * If the vma has a linear pfn mapping for the entire range, we get the prot | |
963 | * from pte and reserve the entire vma range with single reserve_pfn_range call. | |
5899329b | 964 | */ |
5180da41 | 965 | int track_pfn_copy(struct vm_area_struct *vma) |
5899329b | 966 | { |
c1c15b65 | 967 | resource_size_t paddr; |
982d789a | 968 | unsigned long prot; |
4b065046 | 969 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
cdecff68 | 970 | pgprot_t pgprot; |
5899329b | 971 | |
b3b9c293 | 972 | if (vma->vm_flags & VM_PAT) { |
5899329b | 973 | /* |
982d789a | 974 | * reserve the whole chunk covered by vma. We need the |
975 | * starting address and protection from pte. | |
5899329b | 976 | */ |
4b065046 | 977 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
5899329b | 978 | WARN_ON_ONCE(1); |
982d789a | 979 | return -EINVAL; |
5899329b | 980 | } |
cdecff68 | 981 | pgprot = __pgprot(prot); |
982 | return reserve_pfn_range(paddr, vma_size, &pgprot, 1); | |
5899329b | 983 | } |
984 | ||
5899329b | 985 | return 0; |
5899329b | 986 | } |
987 | ||
988 | /* | |
9049771f DW |
989 | * prot is passed in as a parameter for the new mapping. If the vma has |
990 | * a linear pfn mapping for the entire range, or no vma is provided, | |
991 | * reserve the entire pfn + size range with single reserve_pfn_range | |
992 | * call. | |
5899329b | 993 | */ |
5180da41 | 994 | int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
b3b9c293 | 995 | unsigned long pfn, unsigned long addr, unsigned long size) |
5899329b | 996 | { |
b1a86e15 | 997 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; |
2a374698 | 998 | enum page_cache_mode pcm; |
5899329b | 999 | |
b1a86e15 | 1000 | /* reserve the whole chunk starting from paddr */ |
9049771f DW |
1001 | if (!vma || (addr == vma->vm_start |
1002 | && size == (vma->vm_end - vma->vm_start))) { | |
b3b9c293 KK |
1003 | int ret; |
1004 | ||
1005 | ret = reserve_pfn_range(paddr, size, prot, 0); | |
9049771f | 1006 | if (ret == 0 && vma) |
b3b9c293 KK |
1007 | vma->vm_flags |= VM_PAT; |
1008 | return ret; | |
1009 | } | |
5899329b | 1010 | |
cb32edf6 | 1011 | if (!pat_enabled()) |
10876376 VP |
1012 | return 0; |
1013 | ||
5180da41 SS |
1014 | /* |
1015 | * For anything smaller than the vma size we set prot based on the | |
1016 | * lookup. | |
1017 | */ | |
2a374698 | 1018 | pcm = lookup_memtype(paddr); |
5180da41 SS |
1019 | |
1020 | /* Check memtype for the remaining pages */ | |
1021 | while (size > PAGE_SIZE) { | |
1022 | size -= PAGE_SIZE; | |
1023 | paddr += PAGE_SIZE; | |
2a374698 | 1024 | if (pcm != lookup_memtype(paddr)) |
5180da41 SS |
1025 | return -EINVAL; |
1026 | } | |
1027 | ||
dd7b6847 | 1028 | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | |
2a374698 | 1029 | cachemode2protval(pcm)); |
5180da41 SS |
1030 | |
1031 | return 0; | |
1032 | } | |
1033 | ||
308a047c | 1034 | void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) |
5180da41 | 1035 | { |
2a374698 | 1036 | enum page_cache_mode pcm; |
5180da41 | 1037 | |
cb32edf6 | 1038 | if (!pat_enabled()) |
308a047c | 1039 | return; |
5180da41 SS |
1040 | |
1041 | /* Set prot based on lookup */ | |
f25748e3 | 1042 | pcm = lookup_memtype(pfn_t_to_phys(pfn)); |
dd7b6847 | 1043 | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | |
2a374698 | 1044 | cachemode2protval(pcm)); |
5899329b | 1045 | } |
1046 | ||
1047 | /* | |
5180da41 | 1048 | * untrack_pfn is called while unmapping a pfnmap for a region. |
5899329b | 1049 | * untrack can be called for a specific region indicated by pfn and size or |
b1a86e15 | 1050 | * can be for the entire vma (in which case pfn, size are zero). |
5899329b | 1051 | */ |
5180da41 SS |
1052 | void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, |
1053 | unsigned long size) | |
5899329b | 1054 | { |
c1c15b65 | 1055 | resource_size_t paddr; |
b1a86e15 | 1056 | unsigned long prot; |
5899329b | 1057 | |
9049771f | 1058 | if (vma && !(vma->vm_flags & VM_PAT)) |
5899329b | 1059 | return; |
b1a86e15 SS |
1060 | |
1061 | /* free the chunk starting from pfn or the whole chunk */ | |
1062 | paddr = (resource_size_t)pfn << PAGE_SHIFT; | |
1063 | if (!paddr && !size) { | |
1064 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { | |
1065 | WARN_ON_ONCE(1); | |
1066 | return; | |
1067 | } | |
1068 | ||
1069 | size = vma->vm_end - vma->vm_start; | |
5899329b | 1070 | } |
b1a86e15 | 1071 | free_pfn_range(paddr, size); |
9049771f DW |
1072 | if (vma) |
1073 | vma->vm_flags &= ~VM_PAT; | |
5899329b | 1074 | } |
1075 | ||
d9fe4fab TK |
1076 | /* |
1077 | * untrack_pfn_moved is called, while mremapping a pfnmap for a new region, | |
1078 | * with the old vma after its pfnmap page table has been removed. The new | |
1079 | * vma has a new pfnmap to the same pfn & cache type with VM_PAT set. | |
1080 | */ | |
1081 | void untrack_pfn_moved(struct vm_area_struct *vma) | |
1082 | { | |
1083 | vma->vm_flags &= ~VM_PAT; | |
1084 | } | |
1085 | ||
2520bd31 | 1086 | pgprot_t pgprot_writecombine(pgprot_t prot) |
1087 | { | |
7202fdb1 | 1088 | return __pgprot(pgprot_val(prot) | |
e00c8cc9 | 1089 | cachemode2protval(_PAGE_CACHE_MODE_WC)); |
2520bd31 | 1090 | } |
92b9af9e | 1091 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
2520bd31 | 1092 | |
d1b4bfbf TK |
1093 | pgprot_t pgprot_writethrough(pgprot_t prot) |
1094 | { | |
1095 | return __pgprot(pgprot_val(prot) | | |
1096 | cachemode2protval(_PAGE_CACHE_MODE_WT)); | |
1097 | } | |
1098 | EXPORT_SYMBOL_GPL(pgprot_writethrough); | |
1099 | ||
012f09e7 | 1100 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
fec0962e | 1101 | |
fec0962e | 1102 | static struct memtype *memtype_get_idx(loff_t pos) |
1103 | { | |
be5a0c12 | 1104 | struct memtype *print_entry; |
1105 | int ret; | |
fec0962e | 1106 | |
be5a0c12 | 1107 | print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
fec0962e | 1108 | if (!print_entry) |
1109 | return NULL; | |
1110 | ||
1111 | spin_lock(&memtype_lock); | |
9e41a49a | 1112 | ret = rbt_memtype_copy_nth_element(print_entry, pos); |
fec0962e | 1113 | spin_unlock(&memtype_lock); |
ad2cde16 | 1114 | |
be5a0c12 | 1115 | if (!ret) { |
1116 | return print_entry; | |
1117 | } else { | |
1118 | kfree(print_entry); | |
1119 | return NULL; | |
1120 | } | |
fec0962e | 1121 | } |
1122 | ||
1123 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) | |
1124 | { | |
1125 | if (*pos == 0) { | |
1126 | ++*pos; | |
3736708f | 1127 | seq_puts(seq, "PAT memtype list:\n"); |
fec0962e | 1128 | } |
1129 | ||
1130 | return memtype_get_idx(*pos); | |
1131 | } | |
1132 | ||
1133 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1134 | { | |
1135 | ++*pos; | |
1136 | return memtype_get_idx(*pos); | |
1137 | } | |
1138 | ||
1139 | static void memtype_seq_stop(struct seq_file *seq, void *v) | |
1140 | { | |
1141 | } | |
1142 | ||
1143 | static int memtype_seq_show(struct seq_file *seq, void *v) | |
1144 | { | |
1145 | struct memtype *print_entry = (struct memtype *)v; | |
1146 | ||
1147 | seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), | |
1148 | print_entry->start, print_entry->end); | |
1149 | kfree(print_entry); | |
ad2cde16 | 1150 | |
fec0962e | 1151 | return 0; |
1152 | } | |
1153 | ||
d535e431 | 1154 | static const struct seq_operations memtype_seq_ops = { |
fec0962e | 1155 | .start = memtype_seq_start, |
1156 | .next = memtype_seq_next, | |
1157 | .stop = memtype_seq_stop, | |
1158 | .show = memtype_seq_show, | |
1159 | }; | |
1160 | ||
1161 | static int memtype_seq_open(struct inode *inode, struct file *file) | |
1162 | { | |
1163 | return seq_open(file, &memtype_seq_ops); | |
1164 | } | |
1165 | ||
1166 | static const struct file_operations memtype_fops = { | |
1167 | .open = memtype_seq_open, | |
1168 | .read = seq_read, | |
1169 | .llseek = seq_lseek, | |
1170 | .release = seq_release, | |
1171 | }; | |
1172 | ||
1173 | static int __init pat_memtype_list_init(void) | |
1174 | { | |
cb32edf6 | 1175 | if (pat_enabled()) { |
dd4377b0 XF |
1176 | debugfs_create_file("pat_memtype_list", S_IRUSR, |
1177 | arch_debugfs_dir, NULL, &memtype_fops); | |
1178 | } | |
fec0962e | 1179 | return 0; |
1180 | } | |
1181 | ||
1182 | late_initcall(pat_memtype_list_init); | |
1183 | ||
012f09e7 | 1184 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |