]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/mm/pat.c
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / pat.c
CommitLineData
2e5d9c85 1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
ad2cde16
IM
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
2e5d9c85 13#include <linux/kernel.h>
92b9af9e 14#include <linux/module.h>
5a0e3ad6 15#include <linux/slab.h>
ad2cde16 16#include <linux/mm.h>
2e5d9c85 17#include <linux/fs.h>
335ef896 18#include <linux/rbtree.h>
2e5d9c85 19
ad2cde16 20#include <asm/cacheflush.h>
2e5d9c85 21#include <asm/processor.h>
ad2cde16 22#include <asm/tlbflush.h>
fd12a0d6 23#include <asm/x86_init.h>
2e5d9c85 24#include <asm/pgtable.h>
2e5d9c85 25#include <asm/fcntl.h>
ad2cde16 26#include <asm/e820.h>
2e5d9c85 27#include <asm/mtrr.h>
ad2cde16
IM
28#include <asm/page.h>
29#include <asm/msr.h>
30#include <asm/pat.h>
e7f260a2 31#include <asm/io.h>
2e5d9c85 32
be5a0c12 33#include "pat_internal.h"
bd809af1 34#include "mm_internal.h"
be5a0c12 35
9e76561f
LR
36#undef pr_fmt
37#define pr_fmt(fmt) "" fmt
38
9dac6290
BP
39static bool boot_cpu_done;
40
cb32edf6 41static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
2e5d9c85 42
1ee4bd92 43static inline void pat_disable(const char *reason)
2e5d9c85 44{
cb32edf6 45 __pat_enabled = 0;
9e76561f 46 pr_info("x86/PAT: %s\n", reason);
2e5d9c85 47}
2e5d9c85 48
be524fb9 49static int __init nopat(char *str)
2e5d9c85 50{
8d4a4300 51 pat_disable("PAT support disabled.");
2e5d9c85 52 return 0;
53}
8d4a4300 54early_param("nopat", nopat);
cb32edf6
LR
55
56bool pat_enabled(void)
75a04811 57{
cb32edf6 58 return !!__pat_enabled;
75a04811 59}
fbe7193a 60EXPORT_SYMBOL_GPL(pat_enabled);
77b52b4c 61
be5a0c12 62int pat_debug_enable;
ad2cde16 63
77b52b4c
VP
64static int __init pat_debug_setup(char *str)
65{
be5a0c12 66 pat_debug_enable = 1;
77b52b4c
VP
67 return 0;
68}
69__setup("debugpat", pat_debug_setup);
70
0dbcae88
TG
71#ifdef CONFIG_X86_PAT
72/*
35a5a104
TK
73 * X86 PAT uses page flags arch_1 and uncached together to keep track of
74 * memory type of pages that have backing page struct.
75 *
76 * X86 PAT supports 4 different memory types:
77 * - _PAGE_CACHE_MODE_WB
78 * - _PAGE_CACHE_MODE_WC
79 * - _PAGE_CACHE_MODE_UC_MINUS
80 * - _PAGE_CACHE_MODE_WT
81 *
82 * _PAGE_CACHE_MODE_WB is the default type.
0dbcae88
TG
83 */
84
35a5a104 85#define _PGMT_WB 0
0dbcae88
TG
86#define _PGMT_WC (1UL << PG_arch_1)
87#define _PGMT_UC_MINUS (1UL << PG_uncached)
35a5a104 88#define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1)
0dbcae88
TG
89#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
90#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
91
92static inline enum page_cache_mode get_page_memtype(struct page *pg)
93{
94 unsigned long pg_flags = pg->flags & _PGMT_MASK;
95
35a5a104
TK
96 if (pg_flags == _PGMT_WB)
97 return _PAGE_CACHE_MODE_WB;
0dbcae88
TG
98 else if (pg_flags == _PGMT_WC)
99 return _PAGE_CACHE_MODE_WC;
100 else if (pg_flags == _PGMT_UC_MINUS)
101 return _PAGE_CACHE_MODE_UC_MINUS;
102 else
35a5a104 103 return _PAGE_CACHE_MODE_WT;
0dbcae88
TG
104}
105
106static inline void set_page_memtype(struct page *pg,
107 enum page_cache_mode memtype)
108{
109 unsigned long memtype_flags;
110 unsigned long old_flags;
111 unsigned long new_flags;
112
113 switch (memtype) {
114 case _PAGE_CACHE_MODE_WC:
115 memtype_flags = _PGMT_WC;
116 break;
117 case _PAGE_CACHE_MODE_UC_MINUS:
118 memtype_flags = _PGMT_UC_MINUS;
119 break;
35a5a104
TK
120 case _PAGE_CACHE_MODE_WT:
121 memtype_flags = _PGMT_WT;
0dbcae88 122 break;
35a5a104 123 case _PAGE_CACHE_MODE_WB:
0dbcae88 124 default:
35a5a104 125 memtype_flags = _PGMT_WB;
0dbcae88
TG
126 break;
127 }
128
129 do {
130 old_flags = pg->flags;
131 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
132 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
133}
134#else
135static inline enum page_cache_mode get_page_memtype(struct page *pg)
136{
137 return -1;
138}
139static inline void set_page_memtype(struct page *pg,
140 enum page_cache_mode memtype)
141{
142}
143#endif
144
2e5d9c85 145enum {
146 PAT_UC = 0, /* uncached */
147 PAT_WC = 1, /* Write combining */
148 PAT_WT = 4, /* Write Through */
149 PAT_WP = 5, /* Write Protected */
150 PAT_WB = 6, /* Write Back (default) */
151 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
152};
153
bd809af1
JG
154#define CM(c) (_PAGE_CACHE_MODE_ ## c)
155
156static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
157{
158 enum page_cache_mode cache;
159 char *cache_mode;
160
161 switch (pat_val) {
162 case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
163 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
164 case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
165 case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
166 case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
167 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
168 default: cache = CM(WB); cache_mode = "WB "; break;
169 }
170
171 memcpy(msg, cache_mode, 4);
172
173 return cache;
174}
175
176#undef CM
177
178/*
179 * Update the cache mode to pgprot translation tables according to PAT
180 * configuration.
181 * Using lower indices is preferred, so we start with highest index.
182 */
9cd25aac 183void pat_init_cache_modes(u64 pat)
bd809af1 184{
bd809af1
JG
185 enum page_cache_mode cache;
186 char pat_msg[33];
9cd25aac 187 int i;
bd809af1 188
bd809af1
JG
189 pat_msg[32] = 0;
190 for (i = 7; i >= 0; i--) {
191 cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
192 pat_msg + 4 * i);
193 update_cache_mode_entry(i, cache);
194 }
9e76561f 195 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
bd809af1
JG
196}
197
cd7a4e93 198#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 199
9dac6290 200static void pat_bsp_init(u64 pat)
2e5d9c85 201{
9cd25aac
BP
202 u64 tmp_pat;
203
9dac6290
BP
204 if (!cpu_has_pat) {
205 pat_disable("PAT not supported by CPU.");
206 return;
207 }
2e5d9c85 208
9cd25aac
BP
209 if (!pat_enabled())
210 goto done;
211
212 rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
213 if (!tmp_pat) {
9dac6290 214 pat_disable("PAT MSR is 0, disabled.");
2e5d9c85 215 return;
9dac6290
BP
216 }
217
218 wrmsrl(MSR_IA32_CR_PAT, pat);
2e5d9c85 219
9cd25aac
BP
220done:
221 pat_init_cache_modes(pat);
9dac6290
BP
222}
223
224static void pat_ap_init(u64 pat)
225{
9cd25aac
BP
226 if (!pat_enabled())
227 return;
228
75a04811 229 if (!cpu_has_pat) {
9dac6290
BP
230 /*
231 * If this happens we are on a secondary CPU, but switched to
232 * PAT on the boot CPU. We have no way to undo PAT.
233 */
234 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
8d4a4300 235 }
2e5d9c85 236
9dac6290
BP
237 wrmsrl(MSR_IA32_CR_PAT, pat);
238}
239
240void pat_init(void)
241{
242 u64 pat;
d79a40ca 243 struct cpuinfo_x86 *c = &boot_cpu_data;
9dac6290 244
9cd25aac
BP
245 if (!pat_enabled()) {
246 /*
247 * No PAT. Emulate the PAT table that corresponds to the two
248 * cache bits, PWT (Write Through) and PCD (Cache Disable). This
249 * setup is the same as the BIOS default setup when the system
250 * has PAT but the "nopat" boot option has been specified. This
251 * emulated PAT table is used when MSR_IA32_CR_PAT returns 0.
252 *
d79a40ca 253 * PTE encoding:
9cd25aac
BP
254 *
255 * PCD
256 * |PWT PAT
257 * || slot
258 * 00 0 WB : _PAGE_CACHE_MODE_WB
259 * 01 1 WT : _PAGE_CACHE_MODE_WT
260 * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
261 * 11 3 UC : _PAGE_CACHE_MODE_UC
262 *
263 * NOTE: When WC or WP is used, it is redirected to UC- per
264 * the default setup in __cachemode2pte_tbl[].
265 */
266 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
267 PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
d79a40ca
TK
268
269 } else if ((c->x86_vendor == X86_VENDOR_INTEL) &&
270 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
271 ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
9cd25aac 272 /*
d79a40ca
TK
273 * PAT support with the lower four entries. Intel Pentium 2,
274 * 3, M, and 4 are affected by PAT errata, which makes the
275 * upper four entries unusable. To be on the safe side, we don't
276 * use those.
277 *
278 * PTE encoding:
9cd25aac
BP
279 * PAT
280 * |PCD
d79a40ca
TK
281 * ||PWT PAT
282 * ||| slot
283 * 000 0 WB : _PAGE_CACHE_MODE_WB
284 * 001 1 WC : _PAGE_CACHE_MODE_WC
285 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
286 * 011 3 UC : _PAGE_CACHE_MODE_UC
9cd25aac 287 * PAT bit unused
d79a40ca
TK
288 *
289 * NOTE: When WT or WP is used, it is redirected to UC- per
290 * the default setup in __cachemode2pte_tbl[].
9cd25aac
BP
291 */
292 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
293 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
d79a40ca
TK
294 } else {
295 /*
296 * Full PAT support. We put WT in slot 7 to improve
297 * robustness in the presence of errata that might cause
298 * the high PAT bit to be ignored. This way, a buggy slot 7
299 * access will hit slot 3, and slot 3 is UC, so at worst
300 * we lose performance without causing a correctness issue.
301 * Pentium 4 erratum N46 is an example for such an erratum,
302 * although we try not to use PAT at all on affected CPUs.
303 *
304 * PTE encoding:
305 * PAT
306 * |PCD
307 * ||PWT PAT
308 * ||| slot
309 * 000 0 WB : _PAGE_CACHE_MODE_WB
310 * 001 1 WC : _PAGE_CACHE_MODE_WC
311 * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
312 * 011 3 UC : _PAGE_CACHE_MODE_UC
313 * 100 4 WB : Reserved
314 * 101 5 WC : Reserved
315 * 110 6 UC-: Reserved
316 * 111 7 WT : _PAGE_CACHE_MODE_WT
317 *
318 * The reserved slots are unused, but mapped to their
319 * corresponding types in the presence of PAT errata.
320 */
321 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
322 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, WT);
9cd25aac 323 }
2e5d9c85 324
9dac6290
BP
325 if (!boot_cpu_done) {
326 pat_bsp_init(pat);
327 boot_cpu_done = true;
328 } else {
329 pat_ap_init(pat);
9d34cfdf 330 }
2e5d9c85 331}
332
333#undef PAT
334
9e41a49a 335static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
335ef896 336
2e5d9c85 337/*
338 * Does intersection of PAT memory type and MTRR memory type and returns
339 * the resulting memory type as PAT understands it.
340 * (Type in pat and mtrr will not have same value)
341 * The intersection is based on "Effective Memory Type" tables in IA-32
342 * SDM vol 3a
343 */
e00c8cc9
JG
344static unsigned long pat_x_mtrr_type(u64 start, u64 end,
345 enum page_cache_mode req_type)
2e5d9c85 346{
c26421d0
VP
347 /*
348 * Look for MTRR hint to get the effective type in case where PAT
349 * request is for WB.
350 */
e00c8cc9 351 if (req_type == _PAGE_CACHE_MODE_WB) {
b73522e0 352 u8 mtrr_type, uniform;
dd0c7c49 353
b73522e0 354 mtrr_type = mtrr_type_lookup(start, end, &uniform);
b6ff32d9 355 if (mtrr_type != MTRR_TYPE_WRBACK)
e00c8cc9 356 return _PAGE_CACHE_MODE_UC_MINUS;
b6ff32d9 357
e00c8cc9 358 return _PAGE_CACHE_MODE_WB;
dd0c7c49
AH
359 }
360
361 return req_type;
2e5d9c85 362}
363
fa83523f
JD
364struct pagerange_state {
365 unsigned long cur_pfn;
366 int ram;
367 int not_ram;
368};
369
370static int
371pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
372{
373 struct pagerange_state *state = arg;
374
375 state->not_ram |= initial_pfn > state->cur_pfn;
376 state->ram |= total_nr_pages > 0;
377 state->cur_pfn = initial_pfn + total_nr_pages;
378
379 return state->ram && state->not_ram;
380}
381
3709c857 382static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
be03d9e8 383{
fa83523f
JD
384 int ret = 0;
385 unsigned long start_pfn = start >> PAGE_SHIFT;
386 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
387 struct pagerange_state state = {start_pfn, 0, 0};
388
389 /*
390 * For legacy reasons, physical address range in the legacy ISA
391 * region is tracked as non-RAM. This will allow users of
392 * /dev/mem to map portions of legacy ISA region, even when
393 * some of those portions are listed(or not even listed) with
394 * different e820 types(RAM/reserved/..)
395 */
396 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
397 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
398
399 if (start_pfn < end_pfn) {
400 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
401 &state, pagerange_is_ram_callback);
be03d9e8
SS
402 }
403
fa83523f 404 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
be03d9e8
SS
405}
406
9542ada8 407/*
f5841740 408 * For RAM pages, we use page flags to mark the pages with appropriate type.
35a5a104
TK
409 * The page flags are limited to four types, WB (default), WC, WT and UC-.
410 * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting
411 * a new memory type is only allowed for a page mapped with the default WB
412 * type.
0d69bdff
TK
413 *
414 * Here we do two passes:
415 * - Find the memtype of all the pages in the range, look for any conflicts.
416 * - In case of no conflicts, set the new memtype for pages in the range.
9542ada8 417 */
e00c8cc9
JG
418static int reserve_ram_pages_type(u64 start, u64 end,
419 enum page_cache_mode req_type,
420 enum page_cache_mode *new_type)
9542ada8
SS
421{
422 struct page *page;
f5841740
VP
423 u64 pfn;
424
35a5a104 425 if (req_type == _PAGE_CACHE_MODE_WP) {
0d69bdff
TK
426 if (new_type)
427 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
428 return -EINVAL;
429 }
430
e00c8cc9 431 if (req_type == _PAGE_CACHE_MODE_UC) {
f5841740
VP
432 /* We do not support strong UC */
433 WARN_ON_ONCE(1);
e00c8cc9 434 req_type = _PAGE_CACHE_MODE_UC_MINUS;
f5841740 435 }
9542ada8
SS
436
437 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
e00c8cc9 438 enum page_cache_mode type;
9542ada8 439
f5841740
VP
440 page = pfn_to_page(pfn);
441 type = get_page_memtype(page);
35a5a104 442 if (type != _PAGE_CACHE_MODE_WB) {
9e76561f 443 pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
365811d6 444 start, end - 1, type, req_type);
f5841740
VP
445 if (new_type)
446 *new_type = type;
447
448 return -EBUSY;
449 }
9542ada8 450 }
9542ada8 451
f5841740
VP
452 if (new_type)
453 *new_type = req_type;
454
455 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
9542ada8 456 page = pfn_to_page(pfn);
f5841740 457 set_page_memtype(page, req_type);
9542ada8 458 }
f5841740 459 return 0;
9542ada8
SS
460}
461
462static int free_ram_pages_type(u64 start, u64 end)
463{
464 struct page *page;
f5841740 465 u64 pfn;
9542ada8
SS
466
467 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
468 page = pfn_to_page(pfn);
35a5a104 469 set_page_memtype(page, _PAGE_CACHE_MODE_WB);
9542ada8
SS
470 }
471 return 0;
9542ada8
SS
472}
473
e7f260a2 474/*
475 * req_type typically has one of the:
e00c8cc9
JG
476 * - _PAGE_CACHE_MODE_WB
477 * - _PAGE_CACHE_MODE_WC
478 * - _PAGE_CACHE_MODE_UC_MINUS
479 * - _PAGE_CACHE_MODE_UC
0d69bdff 480 * - _PAGE_CACHE_MODE_WT
e7f260a2 481 *
ac97991e
AH
482 * If new_type is NULL, function will return an error if it cannot reserve the
483 * region with req_type. If new_type is non-NULL, function will return
484 * available type in new_type in case of no error. In case of any error
e7f260a2 485 * it will return a negative return value.
486 */
e00c8cc9
JG
487int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
488 enum page_cache_mode *new_type)
2e5d9c85 489{
be5a0c12 490 struct memtype *new;
e00c8cc9 491 enum page_cache_mode actual_type;
9542ada8 492 int is_range_ram;
ad2cde16 493 int err = 0;
2e5d9c85 494
ad2cde16 495 BUG_ON(start >= end); /* end is exclusive */
69e26be9 496
cb32edf6 497 if (!pat_enabled()) {
e7f260a2 498 /* This is identical to page table setting without PAT */
7202fdb1
BP
499 if (new_type)
500 *new_type = req_type;
2e5d9c85 501 return 0;
502 }
503
504 /* Low ISA region is always mapped WB in page table. No need to track */
8a271389 505 if (x86_platform.is_untracked_pat_range(start, end)) {
ac97991e 506 if (new_type)
e00c8cc9 507 *new_type = _PAGE_CACHE_MODE_WB;
2e5d9c85 508 return 0;
509 }
510
b6ff32d9
SS
511 /*
512 * Call mtrr_lookup to get the type hint. This is an
513 * optimization for /dev/mem mmap'ers into WB memory (BIOS
514 * tools and ACPI tools). Use WB request for WB memory and use
515 * UC_MINUS otherwise.
516 */
e00c8cc9 517 actual_type = pat_x_mtrr_type(start, end, req_type);
2e5d9c85 518
95971342
SS
519 if (new_type)
520 *new_type = actual_type;
521
be03d9e8 522 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
523 if (is_range_ram == 1) {
524
f5841740 525 err = reserve_ram_pages_type(start, end, req_type, new_type);
f5841740
VP
526
527 return err;
528 } else if (is_range_ram < 0) {
9542ada8 529 return -EINVAL;
f5841740 530 }
9542ada8 531
6a4f3b52 532 new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
ac97991e 533 if (!new)
2e5d9c85 534 return -ENOMEM;
535
ad2cde16
IM
536 new->start = start;
537 new->end = end;
538 new->type = actual_type;
2e5d9c85 539
2e5d9c85 540 spin_lock(&memtype_lock);
541
9e41a49a 542 err = rbt_memtype_check_insert(new, new_type);
2e5d9c85 543 if (err) {
9e76561f
LR
544 pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
545 start, end - 1,
546 cattr_name(new->type), cattr_name(req_type));
ac97991e 547 kfree(new);
2e5d9c85 548 spin_unlock(&memtype_lock);
ad2cde16 549
2e5d9c85 550 return err;
551 }
552
2e5d9c85 553 spin_unlock(&memtype_lock);
3e9c83b3 554
365811d6
BH
555 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
556 start, end - 1, cattr_name(new->type), cattr_name(req_type),
3e9c83b3
AH
557 new_type ? cattr_name(*new_type) : "-");
558
2e5d9c85 559 return err;
560}
561
562int free_memtype(u64 start, u64 end)
563{
2e5d9c85 564 int err = -EINVAL;
9542ada8 565 int is_range_ram;
20413f27 566 struct memtype *entry;
2e5d9c85 567
cb32edf6 568 if (!pat_enabled())
2e5d9c85 569 return 0;
2e5d9c85 570
571 /* Low ISA region is always mapped WB. No need to track */
8a271389 572 if (x86_platform.is_untracked_pat_range(start, end))
2e5d9c85 573 return 0;
2e5d9c85 574
be03d9e8 575 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
576 if (is_range_ram == 1) {
577
f5841740 578 err = free_ram_pages_type(start, end);
f5841740
VP
579
580 return err;
581 } else if (is_range_ram < 0) {
9542ada8 582 return -EINVAL;
f5841740 583 }
9542ada8 584
2e5d9c85 585 spin_lock(&memtype_lock);
20413f27 586 entry = rbt_memtype_erase(start, end);
2e5d9c85 587 spin_unlock(&memtype_lock);
588
20413f27 589 if (!entry) {
9e76561f
LR
590 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
591 current->comm, current->pid, start, end - 1);
20413f27 592 return -EINVAL;
2e5d9c85 593 }
6997ab49 594
20413f27
XF
595 kfree(entry);
596
365811d6 597 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
ad2cde16 598
20413f27 599 return 0;
2e5d9c85 600}
601
f0970c13 602
637b86e7
VP
603/**
604 * lookup_memtype - Looksup the memory type for a physical address
605 * @paddr: physical address of which memory type needs to be looked up
606 *
607 * Only to be called when PAT is enabled
608 *
2a374698 609 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
35a5a104 610 * or _PAGE_CACHE_MODE_WT.
637b86e7 611 */
2a374698 612static enum page_cache_mode lookup_memtype(u64 paddr)
637b86e7 613{
2a374698 614 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
637b86e7
VP
615 struct memtype *entry;
616
8a271389 617 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
637b86e7
VP
618 return rettype;
619
620 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
621 struct page *page;
637b86e7 622
35a5a104
TK
623 page = pfn_to_page(paddr >> PAGE_SHIFT);
624 return get_page_memtype(page);
637b86e7
VP
625 }
626
627 spin_lock(&memtype_lock);
628
9e41a49a 629 entry = rbt_memtype_lookup(paddr);
637b86e7
VP
630 if (entry != NULL)
631 rettype = entry->type;
632 else
2a374698 633 rettype = _PAGE_CACHE_MODE_UC_MINUS;
637b86e7
VP
634
635 spin_unlock(&memtype_lock);
636 return rettype;
637}
638
9fd126bc
VP
639/**
640 * io_reserve_memtype - Request a memory type mapping for a region of memory
641 * @start: start (physical address) of the region
642 * @end: end (physical address) of the region
643 * @type: A pointer to memtype, with requested type. On success, requested
644 * or any other compatible type that was available for the region is returned
645 *
646 * On success, returns 0
647 * On failure, returns non-zero
648 */
649int io_reserve_memtype(resource_size_t start, resource_size_t end,
49a3b3cb 650 enum page_cache_mode *type)
9fd126bc 651{
b855192c 652 resource_size_t size = end - start;
49a3b3cb
JG
653 enum page_cache_mode req_type = *type;
654 enum page_cache_mode new_type;
9fd126bc
VP
655 int ret;
656
b855192c 657 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
9fd126bc
VP
658
659 ret = reserve_memtype(start, end, req_type, &new_type);
660 if (ret)
661 goto out_err;
662
b855192c 663 if (!is_new_memtype_allowed(start, size, req_type, new_type))
9fd126bc
VP
664 goto out_free;
665
b855192c 666 if (kernel_map_sync_memtype(start, size, new_type) < 0)
9fd126bc
VP
667 goto out_free;
668
669 *type = new_type;
670 return 0;
671
672out_free:
673 free_memtype(start, end);
674 ret = -EBUSY;
675out_err:
676 return ret;
677}
678
679/**
680 * io_free_memtype - Release a memory type mapping for a region of memory
681 * @start: start (physical address) of the region
682 * @end: end (physical address) of the region
683 */
684void io_free_memtype(resource_size_t start, resource_size_t end)
685{
686 free_memtype(start, end);
687}
688
f0970c13 689pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
690 unsigned long size, pgprot_t vma_prot)
691{
692 return vma_prot;
693}
694
d092633b 695#ifdef CONFIG_STRICT_DEVMEM
1f40a8bf 696/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
0124cecf
VP
697static inline int range_is_allowed(unsigned long pfn, unsigned long size)
698{
699 return 1;
700}
701#else
9e41bff2 702/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
703static inline int range_is_allowed(unsigned long pfn, unsigned long size)
704{
705 u64 from = ((u64)pfn) << PAGE_SHIFT;
706 u64 to = from + size;
707 u64 cursor = from;
708
cb32edf6 709 if (!pat_enabled())
9e41bff2
RT
710 return 1;
711
0124cecf
VP
712 while (cursor < to) {
713 if (!devmem_is_allowed(pfn)) {
9e76561f
LR
714 pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
715 current->comm, from, to - 1);
0124cecf
VP
716 return 0;
717 }
718 cursor += PAGE_SIZE;
719 pfn++;
720 }
721 return 1;
722}
d092633b 723#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 724
f0970c13 725int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
726 unsigned long size, pgprot_t *vma_prot)
727{
e00c8cc9 728 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
f0970c13 729
0124cecf
VP
730 if (!range_is_allowed(pfn, size))
731 return 0;
732
6b2f3d1f 733 if (file->f_flags & O_DSYNC)
e00c8cc9 734 pcm = _PAGE_CACHE_MODE_UC_MINUS;
f0970c13 735
736#ifdef CONFIG_X86_32
737 /*
738 * On the PPro and successors, the MTRRs are used to set
739 * memory types for physical addresses outside main memory,
740 * so blindly setting UC or PWT on those pages is wrong.
741 * For Pentiums and earlier, the surround logic should disable
742 * caching for the high addresses through the KEN pin, but
743 * we maintain the tradition of paranoia in this code.
744 */
cb32edf6 745 if (!pat_enabled() &&
cd7a4e93
AH
746 !(boot_cpu_has(X86_FEATURE_MTRR) ||
747 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
748 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
749 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
750 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
e00c8cc9 751 pcm = _PAGE_CACHE_MODE_UC;
f0970c13 752 }
753#endif
754
e7f260a2 755 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
e00c8cc9 756 cachemode2protval(pcm));
f0970c13 757 return 1;
758}
e7f260a2 759
7880f746
VP
760/*
761 * Change the memory type for the physial address range in kernel identity
762 * mapping space if that range is a part of identity map.
763 */
b14097bd
JG
764int kernel_map_sync_memtype(u64 base, unsigned long size,
765 enum page_cache_mode pcm)
7880f746
VP
766{
767 unsigned long id_sz;
768
a25b9316 769 if (base > __pa(high_memory-1))
7880f746
VP
770 return 0;
771
60f583d5
DH
772 /*
773 * some areas in the middle of the kernel identity range
774 * are not mapped, like the PCI space.
775 */
776 if (!page_is_ram(base >> PAGE_SHIFT))
777 return 0;
778
a25b9316 779 id_sz = (__pa(high_memory-1) <= base + size) ?
7880f746
VP
780 __pa(high_memory) - base :
781 size;
782
b14097bd 783 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
9e76561f 784 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
7880f746 785 current->comm, current->pid,
e00c8cc9 786 cattr_name(pcm),
365811d6 787 base, (unsigned long long)(base + size-1));
7880f746
VP
788 return -EINVAL;
789 }
790 return 0;
791}
792
5899329b 793/*
794 * Internal interface to reserve a range of physical memory with prot.
795 * Reserved non RAM regions only and after successful reserve_memtype,
796 * this func also keeps identity mapping (if any) in sync with this new prot.
797 */
cdecff68 798static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
799 int strict_prot)
5899329b 800{
801 int is_ram = 0;
7880f746 802 int ret;
e00c8cc9
JG
803 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
804 enum page_cache_mode pcm = want_pcm;
5899329b 805
be03d9e8 806 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 807
be03d9e8 808 /*
d886c73c
VP
809 * reserve_pfn_range() for RAM pages. We do not refcount to keep
810 * track of number of mappings of RAM pages. We can assert that
811 * the type requested matches the type of first page in the range.
be03d9e8 812 */
d886c73c 813 if (is_ram) {
cb32edf6 814 if (!pat_enabled())
d886c73c
VP
815 return 0;
816
e00c8cc9
JG
817 pcm = lookup_memtype(paddr);
818 if (want_pcm != pcm) {
9e76561f 819 pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
d886c73c 820 current->comm, current->pid,
e00c8cc9 821 cattr_name(want_pcm),
d886c73c 822 (unsigned long long)paddr,
365811d6 823 (unsigned long long)(paddr + size - 1),
e00c8cc9 824 cattr_name(pcm));
d886c73c 825 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
e00c8cc9
JG
826 (~_PAGE_CACHE_MASK)) |
827 cachemode2protval(pcm));
d886c73c 828 }
4bb9c5c0 829 return 0;
d886c73c 830 }
5899329b 831
e00c8cc9 832 ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
5899329b 833 if (ret)
834 return ret;
835
e00c8cc9 836 if (pcm != want_pcm) {
1adcaafe 837 if (strict_prot ||
e00c8cc9 838 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
cdecff68 839 free_memtype(paddr, paddr + size);
9e76561f
LR
840 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
841 current->comm, current->pid,
842 cattr_name(want_pcm),
843 (unsigned long long)paddr,
844 (unsigned long long)(paddr + size - 1),
845 cattr_name(pcm));
cdecff68 846 return -EINVAL;
847 }
848 /*
849 * We allow returning different type than the one requested in
850 * non strict case.
851 */
852 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
853 (~_PAGE_CACHE_MASK)) |
e00c8cc9 854 cachemode2protval(pcm));
5899329b 855 }
856
e00c8cc9 857 if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
5899329b 858 free_memtype(paddr, paddr + size);
5899329b 859 return -EINVAL;
860 }
861 return 0;
862}
863
864/*
865 * Internal interface to free a range of physical memory.
866 * Frees non RAM regions only.
867 */
868static void free_pfn_range(u64 paddr, unsigned long size)
869{
870 int is_ram;
871
be03d9e8 872 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 873 if (is_ram == 0)
874 free_memtype(paddr, paddr + size);
875}
876
877/*
5180da41 878 * track_pfn_copy is called when vma that is covering the pfnmap gets
5899329b 879 * copied through copy_page_range().
880 *
881 * If the vma has a linear pfn mapping for the entire range, we get the prot
882 * from pte and reserve the entire vma range with single reserve_pfn_range call.
5899329b 883 */
5180da41 884int track_pfn_copy(struct vm_area_struct *vma)
5899329b 885{
c1c15b65 886 resource_size_t paddr;
982d789a 887 unsigned long prot;
4b065046 888 unsigned long vma_size = vma->vm_end - vma->vm_start;
cdecff68 889 pgprot_t pgprot;
5899329b 890
b3b9c293 891 if (vma->vm_flags & VM_PAT) {
5899329b 892 /*
982d789a 893 * reserve the whole chunk covered by vma. We need the
894 * starting address and protection from pte.
5899329b 895 */
4b065046 896 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
5899329b 897 WARN_ON_ONCE(1);
982d789a 898 return -EINVAL;
5899329b 899 }
cdecff68 900 pgprot = __pgprot(prot);
901 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 902 }
903
5899329b 904 return 0;
5899329b 905}
906
907/*
5899329b 908 * prot is passed in as a parameter for the new mapping. If the vma has a
909 * linear pfn mapping for the entire range reserve the entire vma range with
910 * single reserve_pfn_range call.
5899329b 911 */
5180da41 912int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c293 913 unsigned long pfn, unsigned long addr, unsigned long size)
5899329b 914{
b1a86e15 915 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
2a374698 916 enum page_cache_mode pcm;
5899329b 917
b1a86e15 918 /* reserve the whole chunk starting from paddr */
b3b9c293
KK
919 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
920 int ret;
921
922 ret = reserve_pfn_range(paddr, size, prot, 0);
923 if (!ret)
924 vma->vm_flags |= VM_PAT;
925 return ret;
926 }
5899329b 927
cb32edf6 928 if (!pat_enabled())
10876376
VP
929 return 0;
930
5180da41
SS
931 /*
932 * For anything smaller than the vma size we set prot based on the
933 * lookup.
934 */
2a374698 935 pcm = lookup_memtype(paddr);
5180da41
SS
936
937 /* Check memtype for the remaining pages */
938 while (size > PAGE_SIZE) {
939 size -= PAGE_SIZE;
940 paddr += PAGE_SIZE;
2a374698 941 if (pcm != lookup_memtype(paddr))
5180da41
SS
942 return -EINVAL;
943 }
944
945 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
2a374698 946 cachemode2protval(pcm));
5180da41
SS
947
948 return 0;
949}
950
951int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
952 unsigned long pfn)
953{
2a374698 954 enum page_cache_mode pcm;
5180da41 955
cb32edf6 956 if (!pat_enabled())
5180da41
SS
957 return 0;
958
959 /* Set prot based on lookup */
2a374698 960 pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
10876376 961 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
2a374698 962 cachemode2protval(pcm));
10876376 963
5899329b 964 return 0;
5899329b 965}
966
967/*
5180da41 968 * untrack_pfn is called while unmapping a pfnmap for a region.
5899329b 969 * untrack can be called for a specific region indicated by pfn and size or
b1a86e15 970 * can be for the entire vma (in which case pfn, size are zero).
5899329b 971 */
5180da41
SS
972void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
973 unsigned long size)
5899329b 974{
c1c15b65 975 resource_size_t paddr;
b1a86e15 976 unsigned long prot;
5899329b 977
b3b9c293 978 if (!(vma->vm_flags & VM_PAT))
5899329b 979 return;
b1a86e15
SS
980
981 /* free the chunk starting from pfn or the whole chunk */
982 paddr = (resource_size_t)pfn << PAGE_SHIFT;
983 if (!paddr && !size) {
984 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
985 WARN_ON_ONCE(1);
986 return;
987 }
988
989 size = vma->vm_end - vma->vm_start;
5899329b 990 }
b1a86e15 991 free_pfn_range(paddr, size);
b3b9c293 992 vma->vm_flags &= ~VM_PAT;
5899329b 993}
994
2520bd31 995pgprot_t pgprot_writecombine(pgprot_t prot)
996{
7202fdb1 997 return __pgprot(pgprot_val(prot) |
e00c8cc9 998 cachemode2protval(_PAGE_CACHE_MODE_WC));
2520bd31 999}
92b9af9e 1000EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 1001
d1b4bfbf
TK
1002pgprot_t pgprot_writethrough(pgprot_t prot)
1003{
1004 return __pgprot(pgprot_val(prot) |
1005 cachemode2protval(_PAGE_CACHE_MODE_WT));
1006}
1007EXPORT_SYMBOL_GPL(pgprot_writethrough);
1008
012f09e7 1009#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 1010
fec0962e 1011static struct memtype *memtype_get_idx(loff_t pos)
1012{
be5a0c12 1013 struct memtype *print_entry;
1014 int ret;
fec0962e 1015
be5a0c12 1016 print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
fec0962e 1017 if (!print_entry)
1018 return NULL;
1019
1020 spin_lock(&memtype_lock);
9e41a49a 1021 ret = rbt_memtype_copy_nth_element(print_entry, pos);
fec0962e 1022 spin_unlock(&memtype_lock);
ad2cde16 1023
be5a0c12 1024 if (!ret) {
1025 return print_entry;
1026 } else {
1027 kfree(print_entry);
1028 return NULL;
1029 }
fec0962e 1030}
1031
1032static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
1033{
1034 if (*pos == 0) {
1035 ++*pos;
3736708f 1036 seq_puts(seq, "PAT memtype list:\n");
fec0962e 1037 }
1038
1039 return memtype_get_idx(*pos);
1040}
1041
1042static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1043{
1044 ++*pos;
1045 return memtype_get_idx(*pos);
1046}
1047
1048static void memtype_seq_stop(struct seq_file *seq, void *v)
1049{
1050}
1051
1052static int memtype_seq_show(struct seq_file *seq, void *v)
1053{
1054 struct memtype *print_entry = (struct memtype *)v;
1055
1056 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
1057 print_entry->start, print_entry->end);
1058 kfree(print_entry);
ad2cde16 1059
fec0962e 1060 return 0;
1061}
1062
d535e431 1063static const struct seq_operations memtype_seq_ops = {
fec0962e 1064 .start = memtype_seq_start,
1065 .next = memtype_seq_next,
1066 .stop = memtype_seq_stop,
1067 .show = memtype_seq_show,
1068};
1069
1070static int memtype_seq_open(struct inode *inode, struct file *file)
1071{
1072 return seq_open(file, &memtype_seq_ops);
1073}
1074
1075static const struct file_operations memtype_fops = {
1076 .open = memtype_seq_open,
1077 .read = seq_read,
1078 .llseek = seq_lseek,
1079 .release = seq_release,
1080};
1081
1082static int __init pat_memtype_list_init(void)
1083{
cb32edf6 1084 if (pat_enabled()) {
dd4377b0
XF
1085 debugfs_create_file("pat_memtype_list", S_IRUSR,
1086 arch_debugfs_dir, NULL, &memtype_fops);
1087 }
fec0962e 1088 return 0;
1089}
1090
1091late_initcall(pat_memtype_list_init);
1092
012f09e7 1093#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */