]>
Commit | Line | Data |
---|---|---|
2e5d9c85 | 1 | /* |
2 | * Handle caching attributes in page tables (PAT) | |
3 | * | |
4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Suresh B Siddha <suresh.b.siddha@intel.com> | |
6 | * | |
7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | |
8 | */ | |
9 | ||
ad2cde16 IM |
10 | #include <linux/seq_file.h> |
11 | #include <linux/bootmem.h> | |
12 | #include <linux/debugfs.h> | |
2e5d9c85 | 13 | #include <linux/kernel.h> |
92b9af9e | 14 | #include <linux/module.h> |
2e5d9c85 | 15 | #include <linux/gfp.h> |
ad2cde16 | 16 | #include <linux/mm.h> |
2e5d9c85 | 17 | #include <linux/fs.h> |
18 | ||
ad2cde16 | 19 | #include <asm/cacheflush.h> |
2e5d9c85 | 20 | #include <asm/processor.h> |
ad2cde16 | 21 | #include <asm/tlbflush.h> |
2e5d9c85 | 22 | #include <asm/pgtable.h> |
2e5d9c85 | 23 | #include <asm/fcntl.h> |
ad2cde16 | 24 | #include <asm/e820.h> |
2e5d9c85 | 25 | #include <asm/mtrr.h> |
ad2cde16 IM |
26 | #include <asm/page.h> |
27 | #include <asm/msr.h> | |
28 | #include <asm/pat.h> | |
e7f260a2 | 29 | #include <asm/io.h> |
2e5d9c85 | 30 | |
8d4a4300 | 31 | #ifdef CONFIG_X86_PAT |
499f8f84 | 32 | int __read_mostly pat_enabled = 1; |
2e5d9c85 | 33 | |
31f4d870 | 34 | void __cpuinit pat_disable(char *reason) |
2e5d9c85 | 35 | { |
499f8f84 | 36 | pat_enabled = 0; |
8d4a4300 | 37 | printk(KERN_INFO "%s\n", reason); |
2e5d9c85 | 38 | } |
2e5d9c85 | 39 | |
be524fb9 | 40 | static int __init nopat(char *str) |
2e5d9c85 | 41 | { |
8d4a4300 | 42 | pat_disable("PAT support disabled."); |
2e5d9c85 | 43 | return 0; |
44 | } | |
8d4a4300 TG |
45 | early_param("nopat", nopat); |
46 | #endif | |
47 | ||
77b52b4c VP |
48 | |
49 | static int debug_enable; | |
ad2cde16 | 50 | |
77b52b4c VP |
51 | static int __init pat_debug_setup(char *str) |
52 | { | |
53 | debug_enable = 1; | |
54 | return 0; | |
55 | } | |
56 | __setup("debugpat", pat_debug_setup); | |
57 | ||
58 | #define dprintk(fmt, arg...) \ | |
59 | do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) | |
60 | ||
61 | ||
8d4a4300 | 62 | static u64 __read_mostly boot_pat_state; |
2e5d9c85 | 63 | |
64 | enum { | |
65 | PAT_UC = 0, /* uncached */ | |
66 | PAT_WC = 1, /* Write combining */ | |
67 | PAT_WT = 4, /* Write Through */ | |
68 | PAT_WP = 5, /* Write Protected */ | |
69 | PAT_WB = 6, /* Write Back (default) */ | |
70 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ | |
71 | }; | |
72 | ||
cd7a4e93 | 73 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
2e5d9c85 | 74 | |
75 | void pat_init(void) | |
76 | { | |
77 | u64 pat; | |
78 | ||
499f8f84 | 79 | if (!pat_enabled) |
2e5d9c85 | 80 | return; |
81 | ||
8d4a4300 | 82 | /* Paranoia check. */ |
97cfab6a | 83 | if (!cpu_has_pat && boot_pat_state) { |
8d4a4300 | 84 | /* |
97cfab6a | 85 | * If this happens we are on a secondary CPU, but |
8d4a4300 TG |
86 | * switched to PAT on the boot CPU. We have no way to |
87 | * undo PAT. | |
97cfab6a AH |
88 | */ |
89 | printk(KERN_ERR "PAT enabled, " | |
90 | "but not supported by secondary CPU\n"); | |
91 | BUG(); | |
8d4a4300 | 92 | } |
2e5d9c85 | 93 | |
94 | /* Set PWT to Write-Combining. All other bits stay the same */ | |
95 | /* | |
96 | * PTE encoding used in Linux: | |
97 | * PAT | |
98 | * |PCD | |
99 | * ||PWT | |
100 | * ||| | |
101 | * 000 WB _PAGE_CACHE_WB | |
102 | * 001 WC _PAGE_CACHE_WC | |
103 | * 010 UC- _PAGE_CACHE_UC_MINUS | |
104 | * 011 UC _PAGE_CACHE_UC | |
105 | * PAT bit unused | |
106 | */ | |
cd7a4e93 AH |
107 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | |
108 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); | |
2e5d9c85 | 109 | |
110 | /* Boot CPU check */ | |
8d4a4300 | 111 | if (!boot_pat_state) |
2e5d9c85 | 112 | rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); |
2e5d9c85 | 113 | |
114 | wrmsrl(MSR_IA32_CR_PAT, pat); | |
115 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", | |
116 | smp_processor_id(), boot_pat_state, pat); | |
117 | } | |
118 | ||
119 | #undef PAT | |
120 | ||
121 | static char *cattr_name(unsigned long flags) | |
122 | { | |
123 | switch (flags & _PAGE_CACHE_MASK) { | |
cd7a4e93 AH |
124 | case _PAGE_CACHE_UC: return "uncached"; |
125 | case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; | |
126 | case _PAGE_CACHE_WB: return "write-back"; | |
127 | case _PAGE_CACHE_WC: return "write-combining"; | |
128 | default: return "broken"; | |
2e5d9c85 | 129 | } |
130 | } | |
131 | ||
132 | /* | |
133 | * The global memtype list keeps track of memory type for specific | |
134 | * physical memory areas. Conflicting memory types in different | |
135 | * mappings can cause CPU cache corruption. To avoid this we keep track. | |
136 | * | |
137 | * The list is sorted based on starting address and can contain multiple | |
138 | * entries for each address (this allows reference counting for overlapping | |
139 | * areas). All the aliases have the same cache attributes of course. | |
140 | * Zero attributes are represented as holes. | |
141 | * | |
142 | * Currently the data structure is a list because the number of mappings | |
143 | * are expected to be relatively small. If this should be a problem | |
144 | * it could be changed to a rbtree or similar. | |
145 | * | |
146 | * memtype_lock protects the whole list. | |
147 | */ | |
148 | ||
149 | struct memtype { | |
ad2cde16 IM |
150 | u64 start; |
151 | u64 end; | |
152 | unsigned long type; | |
153 | struct list_head nd; | |
2e5d9c85 | 154 | }; |
155 | ||
156 | static LIST_HEAD(memtype_list); | |
ad2cde16 | 157 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ |
2e5d9c85 | 158 | |
159 | /* | |
160 | * Does intersection of PAT memory type and MTRR memory type and returns | |
161 | * the resulting memory type as PAT understands it. | |
162 | * (Type in pat and mtrr will not have same value) | |
163 | * The intersection is based on "Effective Memory Type" tables in IA-32 | |
164 | * SDM vol 3a | |
165 | */ | |
6cf514fc | 166 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) |
2e5d9c85 | 167 | { |
c26421d0 VP |
168 | /* |
169 | * Look for MTRR hint to get the effective type in case where PAT | |
170 | * request is for WB. | |
171 | */ | |
dd0c7c49 AH |
172 | if (req_type == _PAGE_CACHE_WB) { |
173 | u8 mtrr_type; | |
174 | ||
175 | mtrr_type = mtrr_type_lookup(start, end); | |
176 | if (mtrr_type == MTRR_TYPE_UNCACHABLE) | |
177 | return _PAGE_CACHE_UC; | |
178 | if (mtrr_type == MTRR_TYPE_WRCOMB) | |
179 | return _PAGE_CACHE_WC; | |
180 | } | |
181 | ||
182 | return req_type; | |
2e5d9c85 | 183 | } |
184 | ||
ad2cde16 IM |
185 | static int |
186 | chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type) | |
64fe44c3 AH |
187 | { |
188 | if (new->type != entry->type) { | |
189 | if (type) { | |
190 | new->type = entry->type; | |
191 | *type = entry->type; | |
192 | } else | |
193 | goto conflict; | |
194 | } | |
195 | ||
196 | /* check overlaps with more than one entry in the list */ | |
197 | list_for_each_entry_continue(entry, &memtype_list, nd) { | |
198 | if (new->end <= entry->start) | |
199 | break; | |
200 | else if (new->type != entry->type) | |
201 | goto conflict; | |
202 | } | |
203 | return 0; | |
204 | ||
205 | conflict: | |
206 | printk(KERN_INFO "%s:%d conflicting memory types " | |
207 | "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start, | |
208 | new->end, cattr_name(new->type), cattr_name(entry->type)); | |
209 | return -EBUSY; | |
210 | } | |
211 | ||
80c5e73d VP |
212 | static struct memtype *cached_entry; |
213 | static u64 cached_start; | |
214 | ||
be03d9e8 SS |
215 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) |
216 | { | |
217 | int ram_page = 0, not_rampage = 0; | |
218 | unsigned long page_nr; | |
219 | ||
220 | for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); | |
221 | ++page_nr) { | |
222 | /* | |
223 | * For legacy reasons, physical address range in the legacy ISA | |
224 | * region is tracked as non-RAM. This will allow users of | |
225 | * /dev/mem to map portions of legacy ISA region, even when | |
226 | * some of those portions are listed(or not even listed) with | |
227 | * different e820 types(RAM/reserved/..) | |
228 | */ | |
229 | if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && | |
230 | page_is_ram(page_nr)) | |
231 | ram_page = 1; | |
232 | else | |
233 | not_rampage = 1; | |
234 | ||
235 | if (ram_page == not_rampage) | |
236 | return -1; | |
237 | } | |
238 | ||
239 | return ram_page; | |
240 | } | |
241 | ||
9542ada8 SS |
242 | /* |
243 | * For RAM pages, mark the pages as non WB memory type using | |
244 | * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or | |
245 | * set_memory_wc() on a RAM page at a time before marking it as WB again. | |
246 | * This is ok, because only one driver will be owning the page and | |
247 | * doing set_memory_*() calls. | |
248 | * | |
249 | * For now, we use PageNonWB to track that the RAM page is being mapped | |
250 | * as non WB. In future, we will have to use one more flag | |
251 | * (or some other mechanism in page_struct) to distinguish between | |
252 | * UC and WC mapping. | |
253 | */ | |
254 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, | |
ad2cde16 | 255 | unsigned long *new_type) |
9542ada8 SS |
256 | { |
257 | struct page *page; | |
258 | u64 pfn, end_pfn; | |
259 | ||
260 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
261 | page = pfn_to_page(pfn); | |
262 | if (page_mapped(page) || PageNonWB(page)) | |
263 | goto out; | |
264 | ||
265 | SetPageNonWB(page); | |
266 | } | |
267 | return 0; | |
268 | ||
269 | out: | |
270 | end_pfn = pfn; | |
271 | for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { | |
272 | page = pfn_to_page(pfn); | |
273 | ClearPageNonWB(page); | |
274 | } | |
275 | ||
276 | return -EINVAL; | |
277 | } | |
278 | ||
279 | static int free_ram_pages_type(u64 start, u64 end) | |
280 | { | |
281 | struct page *page; | |
282 | u64 pfn, end_pfn; | |
283 | ||
284 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | |
285 | page = pfn_to_page(pfn); | |
286 | if (page_mapped(page) || !PageNonWB(page)) | |
287 | goto out; | |
288 | ||
289 | ClearPageNonWB(page); | |
290 | } | |
291 | return 0; | |
292 | ||
293 | out: | |
294 | end_pfn = pfn; | |
295 | for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { | |
296 | page = pfn_to_page(pfn); | |
297 | SetPageNonWB(page); | |
298 | } | |
299 | return -EINVAL; | |
300 | } | |
301 | ||
e7f260a2 | 302 | /* |
303 | * req_type typically has one of the: | |
304 | * - _PAGE_CACHE_WB | |
305 | * - _PAGE_CACHE_WC | |
306 | * - _PAGE_CACHE_UC_MINUS | |
307 | * - _PAGE_CACHE_UC | |
308 | * | |
309 | * req_type will have a special case value '-1', when requester want to inherit | |
310 | * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. | |
311 | * | |
ac97991e AH |
312 | * If new_type is NULL, function will return an error if it cannot reserve the |
313 | * region with req_type. If new_type is non-NULL, function will return | |
314 | * available type in new_type in case of no error. In case of any error | |
e7f260a2 | 315 | * it will return a negative return value. |
316 | */ | |
2e5d9c85 | 317 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, |
ad2cde16 | 318 | unsigned long *new_type) |
2e5d9c85 | 319 | { |
ac97991e | 320 | struct memtype *new, *entry; |
2e5d9c85 | 321 | unsigned long actual_type; |
f6887264 | 322 | struct list_head *where; |
9542ada8 | 323 | int is_range_ram; |
ad2cde16 | 324 | int err = 0; |
2e5d9c85 | 325 | |
ad2cde16 | 326 | BUG_ON(start >= end); /* end is exclusive */ |
69e26be9 | 327 | |
499f8f84 | 328 | if (!pat_enabled) { |
e7f260a2 | 329 | /* This is identical to page table setting without PAT */ |
ac97991e AH |
330 | if (new_type) { |
331 | if (req_type == -1) | |
332 | *new_type = _PAGE_CACHE_WB; | |
333 | else | |
334 | *new_type = req_type & _PAGE_CACHE_MASK; | |
e7f260a2 | 335 | } |
2e5d9c85 | 336 | return 0; |
337 | } | |
338 | ||
339 | /* Low ISA region is always mapped WB in page table. No need to track */ | |
bcc643dc | 340 | if (is_ISA_range(start, end - 1)) { |
ac97991e AH |
341 | if (new_type) |
342 | *new_type = _PAGE_CACHE_WB; | |
2e5d9c85 | 343 | return 0; |
344 | } | |
345 | ||
e7f260a2 | 346 | if (req_type == -1) { |
347 | /* | |
c26421d0 VP |
348 | * Call mtrr_lookup to get the type hint. This is an |
349 | * optimization for /dev/mem mmap'ers into WB memory (BIOS | |
350 | * tools and ACPI tools). Use WB request for WB memory and use | |
351 | * UC_MINUS otherwise. | |
e7f260a2 | 352 | */ |
353 | u8 mtrr_type = mtrr_type_lookup(start, end); | |
e7f260a2 | 354 | |
69e26be9 | 355 | if (mtrr_type == MTRR_TYPE_WRBACK) |
e7f260a2 | 356 | actual_type = _PAGE_CACHE_WB; |
69e26be9 | 357 | else |
e7f260a2 | 358 | actual_type = _PAGE_CACHE_UC_MINUS; |
ad2cde16 | 359 | } else { |
69e26be9 AH |
360 | actual_type = pat_x_mtrr_type(start, end, |
361 | req_type & _PAGE_CACHE_MASK); | |
ad2cde16 | 362 | } |
2e5d9c85 | 363 | |
95971342 SS |
364 | if (new_type) |
365 | *new_type = actual_type; | |
366 | ||
be03d9e8 SS |
367 | is_range_ram = pat_pagerange_is_ram(start, end); |
368 | if (is_range_ram == 1) | |
369 | return reserve_ram_pages_type(start, end, req_type, | |
370 | new_type); | |
371 | else if (is_range_ram < 0) | |
372 | return -EINVAL; | |
9542ada8 | 373 | |
ac97991e AH |
374 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); |
375 | if (!new) | |
2e5d9c85 | 376 | return -ENOMEM; |
377 | ||
ad2cde16 IM |
378 | new->start = start; |
379 | new->end = end; | |
380 | new->type = actual_type; | |
2e5d9c85 | 381 | |
2e5d9c85 | 382 | spin_lock(&memtype_lock); |
383 | ||
80c5e73d VP |
384 | if (cached_entry && start >= cached_start) |
385 | entry = cached_entry; | |
386 | else | |
387 | entry = list_entry(&memtype_list, struct memtype, nd); | |
388 | ||
2e5d9c85 | 389 | /* Search for existing mapping that overlaps the current range */ |
f6887264 | 390 | where = NULL; |
80c5e73d | 391 | list_for_each_entry_continue(entry, &memtype_list, nd) { |
33af9039 | 392 | if (end <= entry->start) { |
f6887264 | 393 | where = entry->nd.prev; |
80c5e73d | 394 | cached_entry = list_entry(where, struct memtype, nd); |
2e5d9c85 | 395 | break; |
33af9039 | 396 | } else if (start <= entry->start) { /* end > entry->start */ |
64fe44c3 | 397 | err = chk_conflict(new, entry, new_type); |
33af9039 AH |
398 | if (!err) { |
399 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | |
400 | entry->start, entry->end); | |
401 | where = entry->nd.prev; | |
80c5e73d VP |
402 | cached_entry = list_entry(where, |
403 | struct memtype, nd); | |
2e5d9c85 | 404 | } |
2e5d9c85 | 405 | break; |
33af9039 | 406 | } else if (start < entry->end) { /* start > entry->start */ |
64fe44c3 | 407 | err = chk_conflict(new, entry, new_type); |
33af9039 AH |
408 | if (!err) { |
409 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | |
410 | entry->start, entry->end); | |
80c5e73d VP |
411 | cached_entry = list_entry(entry->nd.prev, |
412 | struct memtype, nd); | |
413 | ||
414 | /* | |
415 | * Move to right position in the linked | |
416 | * list to add this new entry | |
417 | */ | |
418 | list_for_each_entry_continue(entry, | |
419 | &memtype_list, nd) { | |
420 | if (start <= entry->start) { | |
421 | where = entry->nd.prev; | |
422 | break; | |
423 | } | |
424 | } | |
2e5d9c85 | 425 | } |
2e5d9c85 | 426 | break; |
427 | } | |
428 | } | |
429 | ||
430 | if (err) { | |
3e9c83b3 AH |
431 | printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " |
432 | "track %s, req %s\n", | |
433 | start, end, cattr_name(new->type), cattr_name(req_type)); | |
ac97991e | 434 | kfree(new); |
2e5d9c85 | 435 | spin_unlock(&memtype_lock); |
ad2cde16 | 436 | |
2e5d9c85 | 437 | return err; |
438 | } | |
439 | ||
80c5e73d VP |
440 | cached_start = start; |
441 | ||
f6887264 AH |
442 | if (where) |
443 | list_add(&new->nd, where); | |
444 | else | |
ac97991e | 445 | list_add_tail(&new->nd, &memtype_list); |
6997ab49 | 446 | |
2e5d9c85 | 447 | spin_unlock(&memtype_lock); |
3e9c83b3 AH |
448 | |
449 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | |
450 | start, end, cattr_name(new->type), cattr_name(req_type), | |
451 | new_type ? cattr_name(*new_type) : "-"); | |
452 | ||
2e5d9c85 | 453 | return err; |
454 | } | |
455 | ||
456 | int free_memtype(u64 start, u64 end) | |
457 | { | |
ac97991e | 458 | struct memtype *entry; |
2e5d9c85 | 459 | int err = -EINVAL; |
9542ada8 | 460 | int is_range_ram; |
2e5d9c85 | 461 | |
69e26be9 | 462 | if (!pat_enabled) |
2e5d9c85 | 463 | return 0; |
2e5d9c85 | 464 | |
465 | /* Low ISA region is always mapped WB. No need to track */ | |
bcc643dc | 466 | if (is_ISA_range(start, end - 1)) |
2e5d9c85 | 467 | return 0; |
2e5d9c85 | 468 | |
be03d9e8 SS |
469 | is_range_ram = pat_pagerange_is_ram(start, end); |
470 | if (is_range_ram == 1) | |
471 | return free_ram_pages_type(start, end); | |
472 | else if (is_range_ram < 0) | |
473 | return -EINVAL; | |
9542ada8 | 474 | |
2e5d9c85 | 475 | spin_lock(&memtype_lock); |
ac97991e AH |
476 | list_for_each_entry(entry, &memtype_list, nd) { |
477 | if (entry->start == start && entry->end == end) { | |
80c5e73d VP |
478 | if (cached_entry == entry || cached_start == start) |
479 | cached_entry = NULL; | |
480 | ||
ac97991e AH |
481 | list_del(&entry->nd); |
482 | kfree(entry); | |
2e5d9c85 | 483 | err = 0; |
484 | break; | |
485 | } | |
486 | } | |
487 | spin_unlock(&memtype_lock); | |
488 | ||
489 | if (err) { | |
28eb559b | 490 | printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", |
2e5d9c85 | 491 | current->comm, current->pid, start, end); |
492 | } | |
6997ab49 | 493 | |
77b52b4c | 494 | dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); |
ad2cde16 | 495 | |
2e5d9c85 | 496 | return err; |
497 | } | |
498 | ||
f0970c13 | 499 | |
f0970c13 | 500 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
501 | unsigned long size, pgprot_t vma_prot) | |
502 | { | |
503 | return vma_prot; | |
504 | } | |
505 | ||
d092633b IM |
506 | #ifdef CONFIG_STRICT_DEVMEM |
507 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/ | |
0124cecf VP |
508 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
509 | { | |
510 | return 1; | |
511 | } | |
512 | #else | |
9e41bff2 | 513 | /* This check is needed to avoid cache aliasing when PAT is enabled */ |
0124cecf VP |
514 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
515 | { | |
516 | u64 from = ((u64)pfn) << PAGE_SHIFT; | |
517 | u64 to = from + size; | |
518 | u64 cursor = from; | |
519 | ||
9e41bff2 RT |
520 | if (!pat_enabled) |
521 | return 1; | |
522 | ||
0124cecf VP |
523 | while (cursor < to) { |
524 | if (!devmem_is_allowed(pfn)) { | |
525 | printk(KERN_INFO | |
526 | "Program %s tried to access /dev/mem between %Lx->%Lx.\n", | |
527 | current->comm, from, to); | |
528 | return 0; | |
529 | } | |
530 | cursor += PAGE_SIZE; | |
531 | pfn++; | |
532 | } | |
533 | return 1; | |
534 | } | |
d092633b | 535 | #endif /* CONFIG_STRICT_DEVMEM */ |
0124cecf | 536 | |
f0970c13 | 537 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
538 | unsigned long size, pgprot_t *vma_prot) | |
539 | { | |
e7f260a2 | 540 | u64 offset = ((u64) pfn) << PAGE_SHIFT; |
28df82eb | 541 | unsigned long flags = -1; |
e7f260a2 | 542 | int retval; |
f0970c13 | 543 | |
0124cecf VP |
544 | if (!range_is_allowed(pfn, size)) |
545 | return 0; | |
546 | ||
f0970c13 | 547 | if (file->f_flags & O_SYNC) { |
28df82eb | 548 | flags = _PAGE_CACHE_UC_MINUS; |
f0970c13 | 549 | } |
550 | ||
551 | #ifdef CONFIG_X86_32 | |
552 | /* | |
553 | * On the PPro and successors, the MTRRs are used to set | |
554 | * memory types for physical addresses outside main memory, | |
555 | * so blindly setting UC or PWT on those pages is wrong. | |
556 | * For Pentiums and earlier, the surround logic should disable | |
557 | * caching for the high addresses through the KEN pin, but | |
558 | * we maintain the tradition of paranoia in this code. | |
559 | */ | |
499f8f84 | 560 | if (!pat_enabled && |
cd7a4e93 AH |
561 | !(boot_cpu_has(X86_FEATURE_MTRR) || |
562 | boot_cpu_has(X86_FEATURE_K6_MTRR) || | |
563 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || | |
564 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && | |
565 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | |
e7f260a2 | 566 | flags = _PAGE_CACHE_UC; |
f0970c13 | 567 | } |
568 | #endif | |
569 | ||
e7f260a2 | 570 | /* |
28df82eb | 571 | * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot. |
572 | * | |
e7f260a2 | 573 | * Without O_SYNC, we want to get |
574 | * - WB for WB-able memory and no other conflicting mappings | |
575 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | |
576 | * - Inherit from confliting mappings otherwise | |
577 | */ | |
28df82eb | 578 | if (flags != -1) { |
e7f260a2 | 579 | retval = reserve_memtype(offset, offset + size, flags, NULL); |
580 | } else { | |
f022bfd5 | 581 | retval = reserve_memtype(offset, offset + size, -1, &flags); |
e7f260a2 | 582 | } |
583 | ||
584 | if (retval < 0) | |
585 | return 0; | |
586 | ||
b5db0e38 LT |
587 | if (((pfn < max_low_pfn_mapped) || |
588 | (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) && | |
589 | ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { | |
e7f260a2 | 590 | free_memtype(offset, offset + size); |
28eb559b | 591 | printk(KERN_INFO |
e7f260a2 | 592 | "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", |
593 | current->comm, current->pid, | |
594 | cattr_name(flags), | |
afc85343 | 595 | offset, (unsigned long long)(offset + size)); |
e7f260a2 | 596 | return 0; |
597 | } | |
598 | ||
599 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | | |
600 | flags); | |
f0970c13 | 601 | return 1; |
602 | } | |
e7f260a2 | 603 | |
604 | void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |
605 | { | |
ad2cde16 | 606 | unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); |
e7f260a2 | 607 | u64 addr = (u64)pfn << PAGE_SHIFT; |
608 | unsigned long flags; | |
e7f260a2 | 609 | |
610 | reserve_memtype(addr, addr + size, want_flags, &flags); | |
611 | if (flags != want_flags) { | |
28eb559b | 612 | printk(KERN_INFO |
e7f260a2 | 613 | "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n", |
614 | current->comm, current->pid, | |
615 | cattr_name(want_flags), | |
afc85343 | 616 | addr, (unsigned long long)(addr + size), |
e7f260a2 | 617 | cattr_name(flags)); |
618 | } | |
619 | } | |
620 | ||
621 | void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |
622 | { | |
623 | u64 addr = (u64)pfn << PAGE_SHIFT; | |
624 | ||
625 | free_memtype(addr, addr + size); | |
626 | } | |
627 | ||
5899329b | 628 | /* |
629 | * Internal interface to reserve a range of physical memory with prot. | |
630 | * Reserved non RAM regions only and after successful reserve_memtype, | |
631 | * this func also keeps identity mapping (if any) in sync with this new prot. | |
632 | */ | |
cdecff68 | 633 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
634 | int strict_prot) | |
5899329b | 635 | { |
636 | int is_ram = 0; | |
b5db0e38 | 637 | int id_sz, ret; |
5899329b | 638 | unsigned long flags; |
cdecff68 | 639 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); |
5899329b | 640 | |
be03d9e8 | 641 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 642 | |
be03d9e8 SS |
643 | /* |
644 | * reserve_pfn_range() doesn't support RAM pages. | |
645 | */ | |
646 | if (is_ram != 0) | |
647 | return -EINVAL; | |
5899329b | 648 | |
649 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | |
650 | if (ret) | |
651 | return ret; | |
652 | ||
653 | if (flags != want_flags) { | |
cdecff68 | 654 | if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) { |
655 | free_memtype(paddr, paddr + size); | |
656 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" | |
657 | " for %Lx-%Lx, got %s\n", | |
658 | current->comm, current->pid, | |
659 | cattr_name(want_flags), | |
660 | (unsigned long long)paddr, | |
661 | (unsigned long long)(paddr + size), | |
662 | cattr_name(flags)); | |
663 | return -EINVAL; | |
664 | } | |
665 | /* | |
666 | * We allow returning different type than the one requested in | |
667 | * non strict case. | |
668 | */ | |
669 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | |
670 | (~_PAGE_CACHE_MASK)) | | |
671 | flags); | |
5899329b | 672 | } |
673 | ||
b5db0e38 LT |
674 | /* Need to keep identity mapping in sync */ |
675 | if (paddr >= __pa(high_memory)) | |
676 | return 0; | |
677 | ||
678 | id_sz = (__pa(high_memory) < paddr + size) ? | |
679 | __pa(high_memory) - paddr : | |
680 | size; | |
681 | ||
682 | if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) { | |
5899329b | 683 | free_memtype(paddr, paddr + size); |
684 | printk(KERN_ERR | |
685 | "%s:%d reserve_pfn_range ioremap_change_attr failed %s " | |
686 | "for %Lx-%Lx\n", | |
687 | current->comm, current->pid, | |
688 | cattr_name(flags), | |
689 | (unsigned long long)paddr, | |
690 | (unsigned long long)(paddr + size)); | |
691 | return -EINVAL; | |
692 | } | |
693 | return 0; | |
694 | } | |
695 | ||
696 | /* | |
697 | * Internal interface to free a range of physical memory. | |
698 | * Frees non RAM regions only. | |
699 | */ | |
700 | static void free_pfn_range(u64 paddr, unsigned long size) | |
701 | { | |
702 | int is_ram; | |
703 | ||
be03d9e8 | 704 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
5899329b | 705 | if (is_ram == 0) |
706 | free_memtype(paddr, paddr + size); | |
707 | } | |
708 | ||
709 | /* | |
710 | * track_pfn_vma_copy is called when vma that is covering the pfnmap gets | |
711 | * copied through copy_page_range(). | |
712 | * | |
713 | * If the vma has a linear pfn mapping for the entire range, we get the prot | |
714 | * from pte and reserve the entire vma range with single reserve_pfn_range call. | |
715 | * Otherwise, we reserve the entire vma range, my ging through the PTEs page | |
716 | * by page to get physical address and protection. | |
717 | */ | |
718 | int track_pfn_vma_copy(struct vm_area_struct *vma) | |
719 | { | |
720 | int retval = 0; | |
721 | unsigned long i, j; | |
c1c15b65 | 722 | resource_size_t paddr; |
982d789a | 723 | unsigned long prot; |
5899329b | 724 | unsigned long vma_start = vma->vm_start; |
725 | unsigned long vma_end = vma->vm_end; | |
726 | unsigned long vma_size = vma_end - vma_start; | |
cdecff68 | 727 | pgprot_t pgprot; |
5899329b | 728 | |
729 | if (!pat_enabled) | |
730 | return 0; | |
731 | ||
732 | if (is_linear_pfn_mapping(vma)) { | |
733 | /* | |
982d789a | 734 | * reserve the whole chunk covered by vma. We need the |
735 | * starting address and protection from pte. | |
5899329b | 736 | */ |
982d789a | 737 | if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { |
5899329b | 738 | WARN_ON_ONCE(1); |
982d789a | 739 | return -EINVAL; |
5899329b | 740 | } |
cdecff68 | 741 | pgprot = __pgprot(prot); |
742 | return reserve_pfn_range(paddr, vma_size, &pgprot, 1); | |
5899329b | 743 | } |
744 | ||
745 | /* reserve entire vma page by page, using pfn and prot from pte */ | |
746 | for (i = 0; i < vma_size; i += PAGE_SIZE) { | |
982d789a | 747 | if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) |
5899329b | 748 | continue; |
749 | ||
cdecff68 | 750 | pgprot = __pgprot(prot); |
751 | retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1); | |
5899329b | 752 | if (retval) |
753 | goto cleanup_ret; | |
754 | } | |
755 | return 0; | |
756 | ||
757 | cleanup_ret: | |
758 | /* Reserve error: Cleanup partial reservation and return error */ | |
759 | for (j = 0; j < i; j += PAGE_SIZE) { | |
982d789a | 760 | if (follow_phys(vma, vma_start + j, 0, &prot, &paddr)) |
5899329b | 761 | continue; |
762 | ||
5899329b | 763 | free_pfn_range(paddr, PAGE_SIZE); |
764 | } | |
765 | ||
766 | return retval; | |
767 | } | |
768 | ||
769 | /* | |
770 | * track_pfn_vma_new is called when a _new_ pfn mapping is being established | |
771 | * for physical range indicated by pfn and size. | |
772 | * | |
773 | * prot is passed in as a parameter for the new mapping. If the vma has a | |
774 | * linear pfn mapping for the entire range reserve the entire vma range with | |
775 | * single reserve_pfn_range call. | |
776 | * Otherwise, we look t the pfn and size and reserve only the specified range | |
777 | * page by page. | |
778 | * | |
779 | * Note that this function can be called with caller trying to map only a | |
780 | * subrange/page inside the vma. | |
781 | */ | |
e4b866ed | 782 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, |
5899329b | 783 | unsigned long pfn, unsigned long size) |
784 | { | |
785 | int retval = 0; | |
786 | unsigned long i, j; | |
c1c15b65 PA |
787 | resource_size_t base_paddr; |
788 | resource_size_t paddr; | |
5899329b | 789 | unsigned long vma_start = vma->vm_start; |
790 | unsigned long vma_end = vma->vm_end; | |
791 | unsigned long vma_size = vma_end - vma_start; | |
792 | ||
793 | if (!pat_enabled) | |
794 | return 0; | |
795 | ||
796 | if (is_linear_pfn_mapping(vma)) { | |
797 | /* reserve the whole chunk starting from vm_pgoff */ | |
c1c15b65 | 798 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
cdecff68 | 799 | return reserve_pfn_range(paddr, vma_size, prot, 0); |
5899329b | 800 | } |
801 | ||
802 | /* reserve page by page using pfn and size */ | |
c1c15b65 | 803 | base_paddr = (resource_size_t)pfn << PAGE_SHIFT; |
5899329b | 804 | for (i = 0; i < size; i += PAGE_SIZE) { |
805 | paddr = base_paddr + i; | |
cdecff68 | 806 | retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0); |
5899329b | 807 | if (retval) |
808 | goto cleanup_ret; | |
809 | } | |
810 | return 0; | |
811 | ||
812 | cleanup_ret: | |
813 | /* Reserve error: Cleanup partial reservation and return error */ | |
814 | for (j = 0; j < i; j += PAGE_SIZE) { | |
815 | paddr = base_paddr + j; | |
816 | free_pfn_range(paddr, PAGE_SIZE); | |
817 | } | |
818 | ||
819 | return retval; | |
820 | } | |
821 | ||
822 | /* | |
823 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. | |
824 | * untrack can be called for a specific region indicated by pfn and size or | |
825 | * can be for the entire vma (in which case size can be zero). | |
826 | */ | |
827 | void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | |
828 | unsigned long size) | |
829 | { | |
830 | unsigned long i; | |
c1c15b65 | 831 | resource_size_t paddr; |
982d789a | 832 | unsigned long prot; |
5899329b | 833 | unsigned long vma_start = vma->vm_start; |
834 | unsigned long vma_end = vma->vm_end; | |
835 | unsigned long vma_size = vma_end - vma_start; | |
836 | ||
837 | if (!pat_enabled) | |
838 | return; | |
839 | ||
840 | if (is_linear_pfn_mapping(vma)) { | |
841 | /* free the whole chunk starting from vm_pgoff */ | |
c1c15b65 | 842 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
5899329b | 843 | free_pfn_range(paddr, vma_size); |
844 | return; | |
845 | } | |
846 | ||
847 | if (size != 0 && size != vma_size) { | |
848 | /* free page by page, using pfn and size */ | |
c1c15b65 | 849 | paddr = (resource_size_t)pfn << PAGE_SHIFT; |
5899329b | 850 | for (i = 0; i < size; i += PAGE_SIZE) { |
851 | paddr = paddr + i; | |
852 | free_pfn_range(paddr, PAGE_SIZE); | |
853 | } | |
854 | } else { | |
855 | /* free entire vma, page by page, using the pfn from pte */ | |
856 | for (i = 0; i < vma_size; i += PAGE_SIZE) { | |
982d789a | 857 | if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) |
5899329b | 858 | continue; |
859 | ||
5899329b | 860 | free_pfn_range(paddr, PAGE_SIZE); |
861 | } | |
862 | } | |
863 | } | |
864 | ||
2520bd31 | 865 | pgprot_t pgprot_writecombine(pgprot_t prot) |
866 | { | |
867 | if (pat_enabled) | |
868 | return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); | |
869 | else | |
870 | return pgprot_noncached(prot); | |
871 | } | |
92b9af9e | 872 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
2520bd31 | 873 | |
012f09e7 | 874 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
fec0962e | 875 | |
876 | /* get Nth element of the linked list */ | |
877 | static struct memtype *memtype_get_idx(loff_t pos) | |
878 | { | |
879 | struct memtype *list_node, *print_entry; | |
880 | int i = 1; | |
881 | ||
882 | print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); | |
883 | if (!print_entry) | |
884 | return NULL; | |
885 | ||
886 | spin_lock(&memtype_lock); | |
887 | list_for_each_entry(list_node, &memtype_list, nd) { | |
888 | if (pos == i) { | |
889 | *print_entry = *list_node; | |
890 | spin_unlock(&memtype_lock); | |
891 | return print_entry; | |
892 | } | |
893 | ++i; | |
894 | } | |
895 | spin_unlock(&memtype_lock); | |
896 | kfree(print_entry); | |
ad2cde16 | 897 | |
fec0962e | 898 | return NULL; |
899 | } | |
900 | ||
901 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) | |
902 | { | |
903 | if (*pos == 0) { | |
904 | ++*pos; | |
905 | seq_printf(seq, "PAT memtype list:\n"); | |
906 | } | |
907 | ||
908 | return memtype_get_idx(*pos); | |
909 | } | |
910 | ||
911 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
912 | { | |
913 | ++*pos; | |
914 | return memtype_get_idx(*pos); | |
915 | } | |
916 | ||
917 | static void memtype_seq_stop(struct seq_file *seq, void *v) | |
918 | { | |
919 | } | |
920 | ||
921 | static int memtype_seq_show(struct seq_file *seq, void *v) | |
922 | { | |
923 | struct memtype *print_entry = (struct memtype *)v; | |
924 | ||
925 | seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), | |
926 | print_entry->start, print_entry->end); | |
927 | kfree(print_entry); | |
ad2cde16 | 928 | |
fec0962e | 929 | return 0; |
930 | } | |
931 | ||
932 | static struct seq_operations memtype_seq_ops = { | |
933 | .start = memtype_seq_start, | |
934 | .next = memtype_seq_next, | |
935 | .stop = memtype_seq_stop, | |
936 | .show = memtype_seq_show, | |
937 | }; | |
938 | ||
939 | static int memtype_seq_open(struct inode *inode, struct file *file) | |
940 | { | |
941 | return seq_open(file, &memtype_seq_ops); | |
942 | } | |
943 | ||
944 | static const struct file_operations memtype_fops = { | |
945 | .open = memtype_seq_open, | |
946 | .read = seq_read, | |
947 | .llseek = seq_lseek, | |
948 | .release = seq_release, | |
949 | }; | |
950 | ||
951 | static int __init pat_memtype_list_init(void) | |
952 | { | |
953 | debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir, | |
954 | NULL, &memtype_fops); | |
955 | return 0; | |
956 | } | |
957 | ||
958 | late_initcall(pat_memtype_list_init); | |
959 | ||
012f09e7 | 960 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |