]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/include/asm/cacheflush.h
x86/mm/pat: Add set_memory_wt() for Write-Through type
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / cacheflush.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H
b2bba72c 3
b2bba72c 4/* Caches aren't brain-dead on the intel. */
cc67ba63 5#include <asm-generic/cacheflush.h>
f05e798a 6#include <asm/special_insns.h>
b2bba72c 7
7219bebd
AV
8/*
9 * The set_memory_* API can be used to change various attributes of a virtual
10 * address range. The attributes include:
623dffb2 11 * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
7219bebd
AV
12 * Executability : eXeutable, NoteXecutable
13 * Read/Write : ReadOnly, ReadWrite
14 * Presence : NotPresent
15 *
0d2eb44f 16 * Within a category, the attributes are mutually exclusive.
7219bebd
AV
17 *
18 * The implementation of this API will take care of various aspects that
19 * are associated with changing such attributes, such as:
20 * - Flushing TLBs
21 * - Flushing CPU caches
22 * - Making sure aliases of the memory behind the mapping don't violate
23 * coherency rules as defined by the CPU in the system.
24 *
25 * What this API does not do:
26 * - Provide exclusion between various callers - including callers that
27 * operation on other mappings of the same physical page
28 * - Restore default attributes when a page is freed
29 * - Guarantee that mappings other than the requested one are
30 * in any state, other than that these do not violate rules for
31 * the CPU you have. Do not depend on any effects on other mappings,
32 * CPUs other than the one you have may have more relaxed rules.
33 * The caller is required to take care of these.
34 */
75cbade8 35
1219333d 36int _set_memory_uc(unsigned long addr, int numpages);
ef354af4 37int _set_memory_wc(unsigned long addr, int numpages);
623dffb2 38int _set_memory_wt(unsigned long addr, int numpages);
1219333d 39int _set_memory_wb(unsigned long addr, int numpages);
75cbade8 40int set_memory_uc(unsigned long addr, int numpages);
ef354af4 41int set_memory_wc(unsigned long addr, int numpages);
623dffb2 42int set_memory_wt(unsigned long addr, int numpages);
75cbade8
AV
43int set_memory_wb(unsigned long addr, int numpages);
44int set_memory_x(unsigned long addr, int numpages);
45int set_memory_nx(unsigned long addr, int numpages);
46int set_memory_ro(unsigned long addr, int numpages);
47int set_memory_rw(unsigned long addr, int numpages);
f62d0f00 48int set_memory_np(unsigned long addr, int numpages);
c9caa02c 49int set_memory_4k(unsigned long addr, int numpages);
75cbade8 50
d75586ad 51int set_memory_array_uc(unsigned long *addr, int addrinarray);
4f646254 52int set_memory_array_wc(unsigned long *addr, int addrinarray);
623dffb2 53int set_memory_array_wt(unsigned long *addr, int addrinarray);
d75586ad
SL
54int set_memory_array_wb(unsigned long *addr, int addrinarray);
55
0f350755 56int set_pages_array_uc(struct page **pages, int addrinarray);
4f646254 57int set_pages_array_wc(struct page **pages, int addrinarray);
623dffb2 58int set_pages_array_wt(struct page **pages, int addrinarray);
0f350755 59int set_pages_array_wb(struct page **pages, int addrinarray);
60
7219bebd
AV
61/*
62 * For legacy compatibility with the old APIs, a few functions
63 * are provided that work on a "struct page".
64 * These functions operate ONLY on the 1:1 kernel mapping of the
65 * memory that the struct page represents, and internally just
66 * call the set_memory_* function. See the description of the
67 * set_memory_* function for more details on conventions.
68 *
69 * These APIs should be considered *deprecated* and are likely going to
70 * be removed in the future.
71 * The reason for this is the implicit operation on the 1:1 mapping only,
72 * making this not a generally useful API.
73 *
74 * Specifically, many users of the old APIs had a virtual address,
75 * called virt_to_page() or vmalloc_to_page() on that address to
76 * get a struct page* that the old API required.
77 * To convert these cases, use set_memory_*() on the original
78 * virtual address, do not use these functions.
79 */
80
81int set_pages_uc(struct page *page, int numpages);
82int set_pages_wb(struct page *page, int numpages);
83int set_pages_x(struct page *page, int numpages);
84int set_pages_nx(struct page *page, int numpages);
85int set_pages_ro(struct page *page, int numpages);
86int set_pages_rw(struct page *page, int numpages);
87
88
4c61afcd 89void clflush_cache_range(void *addr, unsigned int size);
b2bba72c 90
b2bba72c
TG
91#ifdef CONFIG_DEBUG_RODATA
92void mark_rodata_ro(void);
7bfeab9a 93extern const int rodata_test_data;
502f6604 94extern int kernel_set_to_readonly;
16239630
SR
95void set_kernel_text_rw(void);
96void set_kernel_text_ro(void);
97#else
98static inline void set_kernel_text_rw(void) { }
99static inline void set_kernel_text_ro(void) { }
b2bba72c 100#endif
7bfeab9a 101
edeed305 102#ifdef CONFIG_DEBUG_RODATA_TEST
7bfeab9a 103int rodata_test(void);
edeed305 104#else
7bfeab9a 105static inline int rodata_test(void)
edeed305 106{
7bfeab9a 107 return 0;
edeed305
AV
108}
109#endif
b2bba72c 110
1965aae3 111#endif /* _ASM_X86_CACHEFLUSH_H */