]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/kernel.h> | |
2 | #include <linux/string.h> | |
3 | #include <linux/mm.h> | |
4 | #include <linux/highmem.h> | |
5 | #include <linux/page_ext.h> | |
6 | #include <linux/poison.h> | |
7 | #include <linux/ratelimit.h> | |
8 | ||
9 | static bool __page_poisoning_enabled __read_mostly; | |
10 | static bool want_page_poisoning __read_mostly; | |
11 | ||
12 | static int early_page_poison_param(char *buf) | |
13 | { | |
14 | if (!buf) | |
15 | return -EINVAL; | |
16 | return strtobool(buf, &want_page_poisoning); | |
17 | } | |
18 | early_param("page_poison", early_page_poison_param); | |
19 | ||
20 | bool page_poisoning_enabled(void) | |
21 | { | |
22 | return __page_poisoning_enabled; | |
23 | } | |
24 | ||
25 | static bool need_page_poisoning(void) | |
26 | { | |
27 | return want_page_poisoning; | |
28 | } | |
29 | ||
30 | static void init_page_poisoning(void) | |
31 | { | |
32 | /* | |
33 | * page poisoning is debug page alloc for some arches. If either | |
34 | * of those options are enabled, enable poisoning | |
35 | */ | |
36 | if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) { | |
37 | if (!want_page_poisoning && !debug_pagealloc_enabled()) | |
38 | return; | |
39 | } else { | |
40 | if (!want_page_poisoning) | |
41 | return; | |
42 | } | |
43 | ||
44 | __page_poisoning_enabled = true; | |
45 | } | |
46 | ||
47 | struct page_ext_operations page_poisoning_ops = { | |
48 | .need = need_page_poisoning, | |
49 | .init = init_page_poisoning, | |
50 | }; | |
51 | ||
52 | static inline void set_page_poison(struct page *page) | |
53 | { | |
54 | struct page_ext *page_ext; | |
55 | ||
56 | page_ext = lookup_page_ext(page); | |
57 | if (unlikely(!page_ext)) | |
58 | return; | |
59 | ||
60 | __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | |
61 | } | |
62 | ||
63 | static inline void clear_page_poison(struct page *page) | |
64 | { | |
65 | struct page_ext *page_ext; | |
66 | ||
67 | page_ext = lookup_page_ext(page); | |
68 | if (unlikely(!page_ext)) | |
69 | return; | |
70 | ||
71 | __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | |
72 | } | |
73 | ||
74 | bool page_is_poisoned(struct page *page) | |
75 | { | |
76 | struct page_ext *page_ext; | |
77 | ||
78 | page_ext = lookup_page_ext(page); | |
79 | if (unlikely(!page_ext)) | |
80 | return false; | |
81 | ||
82 | return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | |
83 | } | |
84 | ||
85 | static void poison_page(struct page *page) | |
86 | { | |
87 | void *addr = kmap_atomic(page); | |
88 | ||
89 | set_page_poison(page); | |
90 | memset(addr, PAGE_POISON, PAGE_SIZE); | |
91 | kunmap_atomic(addr); | |
92 | } | |
93 | ||
94 | static void poison_pages(struct page *page, int n) | |
95 | { | |
96 | int i; | |
97 | ||
98 | for (i = 0; i < n; i++) | |
99 | poison_page(page + i); | |
100 | } | |
101 | ||
102 | static bool single_bit_flip(unsigned char a, unsigned char b) | |
103 | { | |
104 | unsigned char error = a ^ b; | |
105 | ||
106 | return error && !(error & (error - 1)); | |
107 | } | |
108 | ||
109 | static void check_poison_mem(unsigned char *mem, size_t bytes) | |
110 | { | |
111 | static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10); | |
112 | unsigned char *start; | |
113 | unsigned char *end; | |
114 | ||
115 | if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY)) | |
116 | return; | |
117 | ||
118 | start = memchr_inv(mem, PAGE_POISON, bytes); | |
119 | if (!start) | |
120 | return; | |
121 | ||
122 | for (end = mem + bytes - 1; end > start; end--) { | |
123 | if (*end != PAGE_POISON) | |
124 | break; | |
125 | } | |
126 | ||
127 | if (!__ratelimit(&ratelimit)) | |
128 | return; | |
129 | else if (start == end && single_bit_flip(*start, PAGE_POISON)) | |
130 | pr_err("pagealloc: single bit error\n"); | |
131 | else | |
132 | pr_err("pagealloc: memory corruption\n"); | |
133 | ||
134 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, | |
135 | end - start + 1, 1); | |
136 | dump_stack(); | |
137 | } | |
138 | ||
139 | static void unpoison_page(struct page *page) | |
140 | { | |
141 | void *addr; | |
142 | ||
143 | if (!page_is_poisoned(page)) | |
144 | return; | |
145 | ||
146 | addr = kmap_atomic(page); | |
147 | check_poison_mem(addr, PAGE_SIZE); | |
148 | clear_page_poison(page); | |
149 | kunmap_atomic(addr); | |
150 | } | |
151 | ||
152 | static void unpoison_pages(struct page *page, int n) | |
153 | { | |
154 | int i; | |
155 | ||
156 | for (i = 0; i < n; i++) | |
157 | unpoison_page(page + i); | |
158 | } | |
159 | ||
160 | void kernel_poison_pages(struct page *page, int numpages, int enable) | |
161 | { | |
162 | if (!page_poisoning_enabled()) | |
163 | return; | |
164 | ||
165 | if (enable) | |
166 | unpoison_pages(page, numpages); | |
167 | else | |
168 | poison_pages(page, numpages); | |
169 | } | |
170 | ||
171 | #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC | |
172 | void __kernel_map_pages(struct page *page, int numpages, int enable) | |
173 | { | |
174 | /* This function does nothing, all work is done via poison pages */ | |
175 | } | |
176 | #endif |