]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * |
3 | * Copyright (C) 1995 Linus Torvalds | |
4 | * | |
5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | #include <linux/module.h> |
9 | #include <linux/signal.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/hugetlb.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/pagemap.h> | |
cfb80c9e | 24 | #include <linux/pci.h> |
6fb14755 | 25 | #include <linux/pfn.h> |
c9cf5528 | 26 | #include <linux/poison.h> |
1da177e4 | 27 | #include <linux/bootmem.h> |
a9ce6bc1 | 28 | #include <linux/memblock.h> |
1da177e4 | 29 | #include <linux/proc_fs.h> |
05039b92 | 30 | #include <linux/memory_hotplug.h> |
27d99f7e | 31 | #include <linux/initrd.h> |
55b2355e | 32 | #include <linux/cpumask.h> |
5a0e3ad6 | 33 | #include <linux/gfp.h> |
1da177e4 | 34 | |
f832ff18 | 35 | #include <asm/asm.h> |
46eaa670 | 36 | #include <asm/bios_ebda.h> |
1da177e4 LT |
37 | #include <asm/processor.h> |
38 | #include <asm/system.h> | |
39 | #include <asm/uaccess.h> | |
40 | #include <asm/pgtable.h> | |
41 | #include <asm/dma.h> | |
42 | #include <asm/fixmap.h> | |
43 | #include <asm/e820.h> | |
44 | #include <asm/apic.h> | |
8550eb99 | 45 | #include <asm/bugs.h> |
1da177e4 LT |
46 | #include <asm/tlb.h> |
47 | #include <asm/tlbflush.h> | |
c10d1e26 | 48 | #include <asm/olpc_ofw.h> |
a5a19c63 | 49 | #include <asm/pgalloc.h> |
1da177e4 | 50 | #include <asm/sections.h> |
b239fb25 | 51 | #include <asm/paravirt.h> |
551889a6 | 52 | #include <asm/setup.h> |
7bfeab9a | 53 | #include <asm/cacheflush.h> |
2b72394e | 54 | #include <asm/page_types.h> |
4fcb2083 | 55 | #include <asm/init.h> |
1da177e4 | 56 | |
1da177e4 LT |
57 | unsigned long highstart_pfn, highend_pfn; |
58 | ||
8550eb99 | 59 | static noinline int do_test_wp_bit(void); |
1da177e4 | 60 | |
dc16ecf7 | 61 | bool __read_mostly __vmalloc_start_set = false; |
4e29684c | 62 | |
d6be89ad | 63 | static __init void *alloc_low_page(void) |
4e29684c | 64 | { |
d1b19426 | 65 | unsigned long pfn = pgt_buf_end++; |
4e29684c YL |
66 | void *adr; |
67 | ||
d1b19426 | 68 | if (pfn >= pgt_buf_top) |
4e29684c YL |
69 | panic("alloc_low_page: ran out of memory"); |
70 | ||
71 | adr = __va(pfn * PAGE_SIZE); | |
234bb549 | 72 | clear_page(adr); |
4e29684c YL |
73 | return adr; |
74 | } | |
75 | ||
1da177e4 LT |
76 | /* |
77 | * Creates a middle page table and puts a pointer to it in the | |
78 | * given global directory entry. This only returns the gd entry | |
79 | * in non-PAE compilation mode, since the middle layer is folded. | |
80 | */ | |
81 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | |
82 | { | |
83 | pud_t *pud; | |
84 | pmd_t *pmd_table; | |
8550eb99 | 85 | |
1da177e4 | 86 | #ifdef CONFIG_X86_PAE |
b239fb25 | 87 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { |
c464573c | 88 | if (after_bootmem) |
3c1596ef | 89 | pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); |
4e29684c | 90 | else |
d6be89ad | 91 | pmd_table = (pmd_t *)alloc_low_page(); |
6944a9c8 | 92 | paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); |
b239fb25 JF |
93 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
94 | pud = pud_offset(pgd, 0); | |
8550eb99 | 95 | BUG_ON(pmd_table != pmd_offset(pud, 0)); |
a376f30a Z |
96 | |
97 | return pmd_table; | |
b239fb25 JF |
98 | } |
99 | #endif | |
1da177e4 LT |
100 | pud = pud_offset(pgd, 0); |
101 | pmd_table = pmd_offset(pud, 0); | |
8550eb99 | 102 | |
1da177e4 LT |
103 | return pmd_table; |
104 | } | |
105 | ||
106 | /* | |
107 | * Create a page table and place a pointer to it in a middle page | |
8550eb99 | 108 | * directory entry: |
1da177e4 LT |
109 | */ |
110 | static pte_t * __init one_page_table_init(pmd_t *pmd) | |
111 | { | |
b239fb25 | 112 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { |
509a80c4 IM |
113 | pte_t *page_table = NULL; |
114 | ||
c464573c | 115 | if (after_bootmem) { |
f8561296 | 116 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) |
4e29684c | 117 | page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); |
509a80c4 | 118 | #endif |
4e29684c YL |
119 | if (!page_table) |
120 | page_table = | |
3c1596ef | 121 | (pte_t *)alloc_bootmem_pages(PAGE_SIZE); |
d6be89ad JB |
122 | } else |
123 | page_table = (pte_t *)alloc_low_page(); | |
b239fb25 | 124 | |
6944a9c8 | 125 | paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); |
1da177e4 | 126 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); |
b239fb25 | 127 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); |
1da177e4 | 128 | } |
509a80c4 | 129 | |
1da177e4 LT |
130 | return pte_offset_kernel(pmd, 0); |
131 | } | |
132 | ||
458a3e64 | 133 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) |
11124411 TH |
134 | { |
135 | int pgd_idx = pgd_index(vaddr); | |
136 | int pmd_idx = pmd_index(vaddr); | |
458a3e64 TH |
137 | |
138 | return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; | |
139 | } | |
140 | ||
141 | pte_t * __init populate_extra_pte(unsigned long vaddr) | |
142 | { | |
143 | int pte_idx = pte_index(vaddr); | |
11124411 TH |
144 | pmd_t *pmd; |
145 | ||
458a3e64 TH |
146 | pmd = populate_extra_pmd(vaddr); |
147 | return one_page_table_init(pmd) + pte_idx; | |
11124411 TH |
148 | } |
149 | ||
a3c6018e JB |
150 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, |
151 | unsigned long vaddr, pte_t *lastpte) | |
152 | { | |
153 | #ifdef CONFIG_HIGHMEM | |
154 | /* | |
155 | * Something (early fixmap) may already have put a pte | |
156 | * page here, which causes the page table allocation | |
157 | * to become nonlinear. Attempt to fix it, and if it | |
158 | * is still nonlinear then we have to bug. | |
159 | */ | |
160 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; | |
161 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; | |
162 | ||
163 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end | |
164 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin | |
165 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end | |
d1b19426 YL |
166 | && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start |
167 | || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) { | |
a3c6018e JB |
168 | pte_t *newpte; |
169 | int i; | |
170 | ||
c464573c | 171 | BUG_ON(after_bootmem); |
a3c6018e JB |
172 | newpte = alloc_low_page(); |
173 | for (i = 0; i < PTRS_PER_PTE; i++) | |
174 | set_pte(newpte + i, pte[i]); | |
175 | ||
176 | paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); | |
177 | set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); | |
178 | BUG_ON(newpte != pte_offset_kernel(pmd, 0)); | |
179 | __flush_tlb_all(); | |
180 | ||
181 | paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); | |
182 | pte = newpte; | |
183 | } | |
184 | BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) | |
185 | && vaddr > fix_to_virt(FIX_KMAP_END) | |
186 | && lastpte && lastpte + PTRS_PER_PTE != pte); | |
187 | #endif | |
188 | return pte; | |
189 | } | |
190 | ||
1da177e4 | 191 | /* |
8550eb99 | 192 | * This function initializes a certain range of kernel virtual memory |
1da177e4 LT |
193 | * with new bootmem page tables, everywhere page tables are missing in |
194 | * the given range. | |
8550eb99 IM |
195 | * |
196 | * NOTE: The pagetables are allocated contiguous on the physical space | |
197 | * so we can cache the place of the first one and move around without | |
1da177e4 LT |
198 | * checking the pgd every time. |
199 | */ | |
8550eb99 IM |
200 | static void __init |
201 | page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |
1da177e4 | 202 | { |
1da177e4 LT |
203 | int pgd_idx, pmd_idx; |
204 | unsigned long vaddr; | |
8550eb99 IM |
205 | pgd_t *pgd; |
206 | pmd_t *pmd; | |
a3c6018e | 207 | pte_t *pte = NULL; |
1da177e4 LT |
208 | |
209 | vaddr = start; | |
210 | pgd_idx = pgd_index(vaddr); | |
211 | pmd_idx = pmd_index(vaddr); | |
212 | pgd = pgd_base + pgd_idx; | |
213 | ||
214 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | |
b239fb25 JF |
215 | pmd = one_md_table_init(pgd); |
216 | pmd = pmd + pmd_index(vaddr); | |
8550eb99 IM |
217 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
218 | pmd++, pmd_idx++) { | |
a3c6018e JB |
219 | pte = page_table_kmap_check(one_page_table_init(pmd), |
220 | pmd, vaddr, pte); | |
1da177e4 LT |
221 | |
222 | vaddr += PMD_SIZE; | |
223 | } | |
224 | pmd_idx = 0; | |
225 | } | |
226 | } | |
227 | ||
228 | static inline int is_kernel_text(unsigned long addr) | |
229 | { | |
5bd5a452 | 230 | if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) |
1da177e4 LT |
231 | return 1; |
232 | return 0; | |
233 | } | |
234 | ||
235 | /* | |
8550eb99 IM |
236 | * This maps the physical memory to kernel virtual address space, a total |
237 | * of max_low_pfn pages, by creating page tables starting from address | |
238 | * PAGE_OFFSET: | |
1da177e4 | 239 | */ |
e53fb04f PE |
240 | unsigned long __init |
241 | kernel_physical_mapping_init(unsigned long start, | |
242 | unsigned long end, | |
243 | unsigned long page_size_mask) | |
1da177e4 | 244 | { |
e53fb04f | 245 | int use_pse = page_size_mask == (1<<PG_LEVEL_2M); |
c1fd1b43 | 246 | unsigned long last_map_addr = end; |
e53fb04f | 247 | unsigned long start_pfn, end_pfn; |
e7179853 | 248 | pgd_t *pgd_base = swapper_pg_dir; |
8550eb99 | 249 | int pgd_idx, pmd_idx, pte_ofs; |
1da177e4 LT |
250 | unsigned long pfn; |
251 | pgd_t *pgd; | |
252 | pmd_t *pmd; | |
253 | pte_t *pte; | |
a2699e47 SS |
254 | unsigned pages_2m, pages_4k; |
255 | int mapping_iter; | |
256 | ||
e53fb04f PE |
257 | start_pfn = start >> PAGE_SHIFT; |
258 | end_pfn = end >> PAGE_SHIFT; | |
259 | ||
a2699e47 SS |
260 | /* |
261 | * First iteration will setup identity mapping using large/small pages | |
262 | * based on use_pse, with other attributes same as set by | |
263 | * the early code in head_32.S | |
264 | * | |
265 | * Second iteration will setup the appropriate attributes (NX, GLOBAL..) | |
266 | * as desired for the kernel identity mapping. | |
267 | * | |
268 | * This two pass mechanism conforms to the TLB app note which says: | |
269 | * | |
270 | * "Software should not write to a paging-structure entry in a way | |
271 | * that would change, for any linear address, both the page size | |
272 | * and either the page frame or attributes." | |
273 | */ | |
274 | mapping_iter = 1; | |
1da177e4 | 275 | |
a04ad82d YL |
276 | if (!cpu_has_pse) |
277 | use_pse = 0; | |
1da177e4 | 278 | |
a2699e47 SS |
279 | repeat: |
280 | pages_2m = pages_4k = 0; | |
a04ad82d YL |
281 | pfn = start_pfn; |
282 | pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); | |
283 | pgd = pgd_base + pgd_idx; | |
1da177e4 LT |
284 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { |
285 | pmd = one_md_table_init(pgd); | |
8550eb99 | 286 | |
a04ad82d YL |
287 | if (pfn >= end_pfn) |
288 | continue; | |
289 | #ifdef CONFIG_X86_PAE | |
290 | pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); | |
291 | pmd += pmd_idx; | |
292 | #else | |
293 | pmd_idx = 0; | |
294 | #endif | |
295 | for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; | |
f3f20de8 | 296 | pmd++, pmd_idx++) { |
8550eb99 | 297 | unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; |
1da177e4 | 298 | |
8550eb99 IM |
299 | /* |
300 | * Map with big pages if possible, otherwise | |
301 | * create normal page tables: | |
302 | */ | |
a04ad82d | 303 | if (use_pse) { |
8550eb99 | 304 | unsigned int addr2; |
f3f20de8 | 305 | pgprot_t prot = PAGE_KERNEL_LARGE; |
a2699e47 SS |
306 | /* |
307 | * first pass will use the same initial | |
308 | * identity mapping attribute + _PAGE_PSE. | |
309 | */ | |
310 | pgprot_t init_prot = | |
311 | __pgprot(PTE_IDENT_ATTR | | |
312 | _PAGE_PSE); | |
f3f20de8 | 313 | |
8550eb99 | 314 | addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + |
f3f20de8 JF |
315 | PAGE_OFFSET + PAGE_SIZE-1; |
316 | ||
8550eb99 IM |
317 | if (is_kernel_text(addr) || |
318 | is_kernel_text(addr2)) | |
f3f20de8 JF |
319 | prot = PAGE_KERNEL_LARGE_EXEC; |
320 | ||
ce0c0e50 | 321 | pages_2m++; |
a2699e47 SS |
322 | if (mapping_iter == 1) |
323 | set_pmd(pmd, pfn_pmd(pfn, init_prot)); | |
324 | else | |
325 | set_pmd(pmd, pfn_pmd(pfn, prot)); | |
b239fb25 | 326 | |
1da177e4 | 327 | pfn += PTRS_PER_PTE; |
8550eb99 IM |
328 | continue; |
329 | } | |
330 | pte = one_page_table_init(pmd); | |
1da177e4 | 331 | |
a04ad82d YL |
332 | pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); |
333 | pte += pte_ofs; | |
334 | for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; | |
8550eb99 IM |
335 | pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { |
336 | pgprot_t prot = PAGE_KERNEL; | |
a2699e47 SS |
337 | /* |
338 | * first pass will use the same initial | |
339 | * identity mapping attribute. | |
340 | */ | |
341 | pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); | |
f3f20de8 | 342 | |
8550eb99 IM |
343 | if (is_kernel_text(addr)) |
344 | prot = PAGE_KERNEL_EXEC; | |
f3f20de8 | 345 | |
ce0c0e50 | 346 | pages_4k++; |
c1fd1b43 | 347 | if (mapping_iter == 1) { |
a2699e47 | 348 | set_pte(pte, pfn_pte(pfn, init_prot)); |
c1fd1b43 PE |
349 | last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; |
350 | } else | |
a2699e47 | 351 | set_pte(pte, pfn_pte(pfn, prot)); |
1da177e4 LT |
352 | } |
353 | } | |
354 | } | |
a2699e47 SS |
355 | if (mapping_iter == 1) { |
356 | /* | |
357 | * update direct mapping page count only in the first | |
358 | * iteration. | |
359 | */ | |
360 | update_page_count(PG_LEVEL_2M, pages_2m); | |
361 | update_page_count(PG_LEVEL_4K, pages_4k); | |
362 | ||
363 | /* | |
364 | * local global flush tlb, which will flush the previous | |
365 | * mappings present in both small and large page TLB's. | |
366 | */ | |
367 | __flush_tlb_all(); | |
368 | ||
369 | /* | |
370 | * Second iteration will set the actual desired PTE attributes. | |
371 | */ | |
372 | mapping_iter = 2; | |
373 | goto repeat; | |
374 | } | |
c1fd1b43 | 375 | return last_map_addr; |
ae531c26 AV |
376 | } |
377 | ||
1da177e4 LT |
378 | pte_t *kmap_pte; |
379 | pgprot_t kmap_prot; | |
380 | ||
8550eb99 IM |
381 | static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) |
382 | { | |
383 | return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), | |
384 | vaddr), vaddr), vaddr); | |
385 | } | |
1da177e4 LT |
386 | |
387 | static void __init kmap_init(void) | |
388 | { | |
389 | unsigned long kmap_vstart; | |
390 | ||
8550eb99 IM |
391 | /* |
392 | * Cache the first kmap pte: | |
393 | */ | |
1da177e4 LT |
394 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
395 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | |
396 | ||
397 | kmap_prot = PAGE_KERNEL; | |
398 | } | |
399 | ||
fd940934 | 400 | #ifdef CONFIG_HIGHMEM |
1da177e4 LT |
401 | static void __init permanent_kmaps_init(pgd_t *pgd_base) |
402 | { | |
8550eb99 | 403 | unsigned long vaddr; |
1da177e4 LT |
404 | pgd_t *pgd; |
405 | pud_t *pud; | |
406 | pmd_t *pmd; | |
407 | pte_t *pte; | |
1da177e4 LT |
408 | |
409 | vaddr = PKMAP_BASE; | |
410 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | |
411 | ||
412 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
413 | pud = pud_offset(pgd, vaddr); | |
414 | pmd = pmd_offset(pud, vaddr); | |
415 | pte = pte_offset_kernel(pmd, vaddr); | |
8550eb99 | 416 | pkmap_page_table = pte; |
1da177e4 LT |
417 | } |
418 | ||
b1258ac2 | 419 | static void __init add_one_highpage_init(struct page *page) |
1da177e4 | 420 | { |
cc9f7a0c YL |
421 | ClearPageReserved(page); |
422 | init_page_count(page); | |
423 | __free_page(page); | |
424 | totalhigh_pages++; | |
1da177e4 LT |
425 | } |
426 | ||
1d931264 YL |
427 | void __init add_highpages_with_active_regions(int nid, |
428 | unsigned long start_pfn, unsigned long end_pfn) | |
1da177e4 | 429 | { |
1d931264 YL |
430 | struct range *range; |
431 | int nr_range; | |
432 | int i; | |
8550eb99 | 433 | |
1d931264 | 434 | nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn); |
b5bc6c0e | 435 | |
1d931264 YL |
436 | for (i = 0; i < nr_range; i++) { |
437 | struct page *page; | |
438 | int node_pfn; | |
b5bc6c0e | 439 | |
1d931264 YL |
440 | for (node_pfn = range[i].start; node_pfn < range[i].end; |
441 | node_pfn++) { | |
442 | if (!pfn_valid(node_pfn)) | |
443 | continue; | |
444 | page = pfn_to_page(node_pfn); | |
445 | add_one_highpage_init(page); | |
446 | } | |
23be8c7d | 447 | } |
b5bc6c0e | 448 | } |
1da177e4 | 449 | #else |
e8e32326 IB |
450 | static inline void permanent_kmaps_init(pgd_t *pgd_base) |
451 | { | |
452 | } | |
1da177e4 LT |
453 | #endif /* CONFIG_HIGHMEM */ |
454 | ||
b239fb25 | 455 | void __init native_pagetable_setup_start(pgd_t *base) |
1da177e4 | 456 | { |
551889a6 IC |
457 | unsigned long pfn, va; |
458 | pgd_t *pgd; | |
459 | pud_t *pud; | |
460 | pmd_t *pmd; | |
461 | pte_t *pte; | |
b239fb25 JF |
462 | |
463 | /* | |
551889a6 IC |
464 | * Remove any mappings which extend past the end of physical |
465 | * memory from the boot time page table: | |
b239fb25 | 466 | */ |
551889a6 IC |
467 | for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { |
468 | va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); | |
469 | pgd = base + pgd_index(va); | |
470 | if (!pgd_present(*pgd)) | |
471 | break; | |
472 | ||
473 | pud = pud_offset(pgd, va); | |
474 | pmd = pmd_offset(pud, va); | |
475 | if (!pmd_present(*pmd)) | |
476 | break; | |
477 | ||
478 | pte = pte_offset_kernel(pmd, va); | |
479 | if (!pte_present(*pte)) | |
480 | break; | |
481 | ||
482 | pte_clear(NULL, va, pte); | |
483 | } | |
6944a9c8 | 484 | paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); |
b239fb25 JF |
485 | } |
486 | ||
487 | void __init native_pagetable_setup_done(pgd_t *base) | |
488 | { | |
b239fb25 JF |
489 | } |
490 | ||
491 | /* | |
492 | * Build a proper pagetable for the kernel mappings. Up until this | |
493 | * point, we've been running on some set of pagetables constructed by | |
494 | * the boot process. | |
495 | * | |
496 | * If we're booting on native hardware, this will be a pagetable | |
551889a6 IC |
497 | * constructed in arch/x86/kernel/head_32.S. The root of the |
498 | * pagetable will be swapper_pg_dir. | |
b239fb25 JF |
499 | * |
500 | * If we're booting paravirtualized under a hypervisor, then there are | |
501 | * more options: we may already be running PAE, and the pagetable may | |
502 | * or may not be based in swapper_pg_dir. In any case, | |
503 | * paravirt_pagetable_setup_start() will set up swapper_pg_dir | |
504 | * appropriately for the rest of the initialization to work. | |
505 | * | |
506 | * In general, pagetable_init() assumes that the pagetable may already | |
507 | * be partially populated, and so it avoids stomping on any existing | |
508 | * mappings. | |
509 | */ | |
f765090a | 510 | void __init early_ioremap_page_table_range_init(void) |
b239fb25 | 511 | { |
e7179853 | 512 | pgd_t *pgd_base = swapper_pg_dir; |
8550eb99 | 513 | unsigned long vaddr, end; |
b239fb25 | 514 | |
1da177e4 LT |
515 | /* |
516 | * Fixed mappings, only the page table structure has to be | |
517 | * created - mappings will be set by set_fixmap(): | |
518 | */ | |
519 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | |
b239fb25 JF |
520 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
521 | page_table_range_init(vaddr, end, pgd_base); | |
beacfaac | 522 | early_ioremap_reset(); |
e7b37895 YL |
523 | } |
524 | ||
525 | static void __init pagetable_init(void) | |
526 | { | |
527 | pgd_t *pgd_base = swapper_pg_dir; | |
528 | ||
1da177e4 | 529 | permanent_kmaps_init(pgd_base); |
1da177e4 LT |
530 | } |
531 | ||
be43d728 | 532 | pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); |
6fdc05d4 JF |
533 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
534 | ||
90d967e0 YL |
535 | /* user-defined highmem size */ |
536 | static unsigned int highmem_pages = -1; | |
537 | ||
538 | /* | |
539 | * highmem=size forces highmem to be exactly 'size' bytes. | |
540 | * This works even on boxes that have no highmem otherwise. | |
541 | * This also works to reduce highmem size on bigger boxes. | |
542 | */ | |
543 | static int __init parse_highmem(char *arg) | |
544 | { | |
545 | if (!arg) | |
546 | return -EINVAL; | |
547 | ||
548 | highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; | |
549 | return 0; | |
550 | } | |
551 | early_param("highmem", parse_highmem); | |
552 | ||
4769843b IM |
553 | #define MSG_HIGHMEM_TOO_BIG \ |
554 | "highmem size (%luMB) is bigger than pages available (%luMB)!\n" | |
555 | ||
556 | #define MSG_LOWMEM_TOO_SMALL \ | |
557 | "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" | |
90d967e0 | 558 | /* |
4769843b IM |
559 | * All of RAM fits into lowmem - but if user wants highmem |
560 | * artificially via the highmem=x boot parameter then create | |
561 | * it: | |
90d967e0 | 562 | */ |
4769843b | 563 | void __init lowmem_pfn_init(void) |
90d967e0 | 564 | { |
346cafec | 565 | /* max_low_pfn is 0, we already have early_res support */ |
90d967e0 | 566 | max_low_pfn = max_pfn; |
d88316c2 | 567 | |
4769843b IM |
568 | if (highmem_pages == -1) |
569 | highmem_pages = 0; | |
570 | #ifdef CONFIG_HIGHMEM | |
571 | if (highmem_pages >= max_pfn) { | |
572 | printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, | |
573 | pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); | |
574 | highmem_pages = 0; | |
575 | } | |
576 | if (highmem_pages) { | |
577 | if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { | |
578 | printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, | |
90d967e0 YL |
579 | pages_to_mb(highmem_pages)); |
580 | highmem_pages = 0; | |
581 | } | |
4769843b IM |
582 | max_low_pfn -= highmem_pages; |
583 | } | |
584 | #else | |
585 | if (highmem_pages) | |
586 | printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); | |
587 | #endif | |
588 | } | |
589 | ||
590 | #define MSG_HIGHMEM_TOO_SMALL \ | |
591 | "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" | |
592 | ||
593 | #define MSG_HIGHMEM_TRIMMED \ | |
594 | "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" | |
595 | /* | |
596 | * We have more RAM than fits into lowmem - we try to put it into | |
597 | * highmem, also taking the highmem=x boot parameter into account: | |
598 | */ | |
599 | void __init highmem_pfn_init(void) | |
600 | { | |
d88316c2 IM |
601 | max_low_pfn = MAXMEM_PFN; |
602 | ||
4769843b IM |
603 | if (highmem_pages == -1) |
604 | highmem_pages = max_pfn - MAXMEM_PFN; | |
605 | ||
606 | if (highmem_pages + MAXMEM_PFN < max_pfn) | |
607 | max_pfn = MAXMEM_PFN + highmem_pages; | |
608 | ||
609 | if (highmem_pages + MAXMEM_PFN > max_pfn) { | |
610 | printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, | |
611 | pages_to_mb(max_pfn - MAXMEM_PFN), | |
612 | pages_to_mb(highmem_pages)); | |
613 | highmem_pages = 0; | |
614 | } | |
90d967e0 | 615 | #ifndef CONFIG_HIGHMEM |
4769843b IM |
616 | /* Maximum memory usable is what is directly addressable */ |
617 | printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); | |
618 | if (max_pfn > MAX_NONPAE_PFN) | |
619 | printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); | |
620 | else | |
621 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); | |
622 | max_pfn = MAXMEM_PFN; | |
90d967e0 YL |
623 | #else /* !CONFIG_HIGHMEM */ |
624 | #ifndef CONFIG_HIGHMEM64G | |
4769843b IM |
625 | if (max_pfn > MAX_NONPAE_PFN) { |
626 | max_pfn = MAX_NONPAE_PFN; | |
627 | printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); | |
628 | } | |
90d967e0 YL |
629 | #endif /* !CONFIG_HIGHMEM64G */ |
630 | #endif /* !CONFIG_HIGHMEM */ | |
4769843b IM |
631 | } |
632 | ||
633 | /* | |
634 | * Determine low and high memory ranges: | |
635 | */ | |
636 | void __init find_low_pfn_range(void) | |
637 | { | |
638 | /* it could update max_pfn */ | |
639 | ||
d88316c2 | 640 | if (max_pfn <= MAXMEM_PFN) |
4769843b | 641 | lowmem_pfn_init(); |
d88316c2 IM |
642 | else |
643 | highmem_pfn_init(); | |
90d967e0 YL |
644 | } |
645 | ||
b2ac82a0 | 646 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
d8fc3afc | 647 | void __init initmem_init(void) |
b2ac82a0 | 648 | { |
b2ac82a0 YL |
649 | #ifdef CONFIG_HIGHMEM |
650 | highstart_pfn = highend_pfn = max_pfn; | |
651 | if (max_pfn > max_low_pfn) | |
652 | highstart_pfn = max_low_pfn; | |
b2ac82a0 YL |
653 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
654 | pages_to_mb(highend_pfn - highstart_pfn)); | |
655 | num_physpages = highend_pfn; | |
656 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | |
657 | #else | |
b2ac82a0 YL |
658 | num_physpages = max_low_pfn; |
659 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | |
660 | #endif | |
0608f70c TH |
661 | |
662 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); | |
663 | sparse_memory_present_with_active_regions(0); | |
664 | ||
b2ac82a0 YL |
665 | #ifdef CONFIG_FLATMEM |
666 | max_mapnr = num_physpages; | |
667 | #endif | |
dc16ecf7 JF |
668 | __vmalloc_start_set = true; |
669 | ||
b2ac82a0 YL |
670 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
671 | pages_to_mb(max_low_pfn)); | |
672 | ||
673 | setup_bootmem_allocator(); | |
b2ac82a0 | 674 | } |
cb95a13a | 675 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
b2ac82a0 | 676 | |
cb95a13a | 677 | static void __init zone_sizes_init(void) |
b2ac82a0 YL |
678 | { |
679 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | |
680 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | |
dc382fd5 | 681 | #ifdef CONFIG_ZONE_DMA |
b2ac82a0 YL |
682 | max_zone_pfns[ZONE_DMA] = |
683 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | |
dc382fd5 | 684 | #endif |
b2ac82a0 | 685 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
b2ac82a0 YL |
686 | #ifdef CONFIG_HIGHMEM |
687 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | |
b2ac82a0 YL |
688 | #endif |
689 | ||
690 | free_area_init_nodes(max_zone_pfns); | |
691 | } | |
b2ac82a0 | 692 | |
b2ac82a0 YL |
693 | void __init setup_bootmem_allocator(void) |
694 | { | |
b2ac82a0 YL |
695 | printk(KERN_INFO " mapped low ram: 0 - %08lx\n", |
696 | max_pfn_mapped<<PAGE_SHIFT); | |
fc5efe39 | 697 | printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); |
7482b0e9 | 698 | |
c464573c | 699 | after_bootmem = 1; |
4e29684c YL |
700 | } |
701 | ||
1da177e4 LT |
702 | /* |
703 | * paging_init() sets up the page tables - note that the first 8MB are | |
704 | * already mapped by head.S. | |
705 | * | |
706 | * This routines also unmaps the page at virtual kernel address 0, so | |
707 | * that we can trap those pesky NULL-reference errors in the kernel. | |
708 | */ | |
709 | void __init paging_init(void) | |
710 | { | |
1da177e4 LT |
711 | pagetable_init(); |
712 | ||
1da177e4 LT |
713 | __flush_tlb_all(); |
714 | ||
715 | kmap_init(); | |
11cd0bc1 YL |
716 | |
717 | /* | |
718 | * NOTE: at this point the bootmem allocator is fully available. | |
719 | */ | |
c10d1e26 | 720 | olpc_dt_build_devicetree(); |
797390d8 | 721 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
11cd0bc1 YL |
722 | sparse_init(); |
723 | zone_sizes_init(); | |
1da177e4 LT |
724 | } |
725 | ||
726 | /* | |
727 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | |
f7f17a67 DV |
728 | * and also on some strange 486's. All 586+'s are OK. This used to involve |
729 | * black magic jumps to work around some nasty CPU bugs, but fortunately the | |
730 | * switch to using exceptions got rid of all that. | |
1da177e4 | 731 | */ |
1da177e4 LT |
732 | static void __init test_wp_bit(void) |
733 | { | |
d7d119d7 IM |
734 | printk(KERN_INFO |
735 | "Checking if this processor honours the WP bit even in supervisor mode..."); | |
1da177e4 LT |
736 | |
737 | /* Any page-aligned address will do, the test is non-destructive */ | |
738 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | |
739 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | |
740 | clear_fixmap(FIX_WP_TEST); | |
741 | ||
742 | if (!boot_cpu_data.wp_works_ok) { | |
d7d119d7 | 743 | printk(KERN_CONT "No.\n"); |
1da177e4 | 744 | #ifdef CONFIG_X86_WP_WORKS_OK |
d7d119d7 IM |
745 | panic( |
746 | "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); | |
1da177e4 LT |
747 | #endif |
748 | } else { | |
d7d119d7 | 749 | printk(KERN_CONT "Ok.\n"); |
1da177e4 LT |
750 | } |
751 | } | |
752 | ||
1da177e4 LT |
753 | void __init mem_init(void) |
754 | { | |
1da177e4 | 755 | int codesize, reservedpages, datasize, initsize; |
cc9f7a0c | 756 | int tmp; |
1da177e4 | 757 | |
cfb80c9e JF |
758 | pci_iommu_alloc(); |
759 | ||
05b79bdc | 760 | #ifdef CONFIG_FLATMEM |
8d8f3cbe | 761 | BUG_ON(!mem_map); |
1da177e4 | 762 | #endif |
1da177e4 LT |
763 | /* this will put all low memory onto the freelists */ |
764 | totalram_pages += free_all_bootmem(); | |
765 | ||
766 | reservedpages = 0; | |
767 | for (tmp = 0; tmp < max_low_pfn; tmp++) | |
768 | /* | |
8550eb99 | 769 | * Only count reserved RAM pages: |
1da177e4 LT |
770 | */ |
771 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | |
772 | reservedpages++; | |
773 | ||
cc9f7a0c | 774 | set_highmem_pages_init(); |
1da177e4 LT |
775 | |
776 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
777 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
778 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
779 | ||
8550eb99 IM |
780 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
781 | "%dk reserved, %dk data, %dk init, %ldk highmem)\n", | |
cc013a88 | 782 | nr_free_pages() << (PAGE_SHIFT-10), |
1da177e4 LT |
783 | num_physpages << (PAGE_SHIFT-10), |
784 | codesize >> 10, | |
785 | reservedpages << (PAGE_SHIFT-10), | |
786 | datasize >> 10, | |
787 | initsize >> 10, | |
4b529401 | 788 | totalhigh_pages << (PAGE_SHIFT-10)); |
1da177e4 | 789 | |
d7d119d7 | 790 | printk(KERN_INFO "virtual kernel memory layout:\n" |
8550eb99 | 791 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
052e7994 | 792 | #ifdef CONFIG_HIGHMEM |
8550eb99 | 793 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
052e7994 | 794 | #endif |
8550eb99 IM |
795 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" |
796 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
797 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
798 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
799 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | |
800 | FIXADDR_START, FIXADDR_TOP, | |
801 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
052e7994 JF |
802 | |
803 | #ifdef CONFIG_HIGHMEM | |
8550eb99 IM |
804 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, |
805 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
052e7994 JF |
806 | #endif |
807 | ||
8550eb99 IM |
808 | VMALLOC_START, VMALLOC_END, |
809 | (VMALLOC_END - VMALLOC_START) >> 20, | |
052e7994 | 810 | |
8550eb99 IM |
811 | (unsigned long)__va(0), (unsigned long)high_memory, |
812 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
052e7994 | 813 | |
8550eb99 IM |
814 | (unsigned long)&__init_begin, (unsigned long)&__init_end, |
815 | ((unsigned long)&__init_end - | |
816 | (unsigned long)&__init_begin) >> 10, | |
052e7994 | 817 | |
8550eb99 IM |
818 | (unsigned long)&_etext, (unsigned long)&_edata, |
819 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | |
052e7994 | 820 | |
8550eb99 IM |
821 | (unsigned long)&_text, (unsigned long)&_etext, |
822 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | |
052e7994 | 823 | |
beeb4195 JB |
824 | /* |
825 | * Check boundaries twice: Some fundamental inconsistencies can | |
826 | * be detected at build time already. | |
827 | */ | |
828 | #define __FIXADDR_TOP (-PAGE_SIZE) | |
829 | #ifdef CONFIG_HIGHMEM | |
830 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | |
831 | BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); | |
832 | #endif | |
833 | #define high_memory (-128UL << 20) | |
834 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); | |
835 | #undef high_memory | |
836 | #undef __FIXADDR_TOP | |
837 | ||
052e7994 | 838 | #ifdef CONFIG_HIGHMEM |
8550eb99 IM |
839 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); |
840 | BUG_ON(VMALLOC_END > PKMAP_BASE); | |
052e7994 | 841 | #endif |
beeb4195 | 842 | BUG_ON(VMALLOC_START >= VMALLOC_END); |
8550eb99 | 843 | BUG_ON((unsigned long)high_memory > VMALLOC_START); |
052e7994 | 844 | |
1da177e4 LT |
845 | if (boot_cpu_data.wp_works_ok < 0) |
846 | test_wp_bit(); | |
1da177e4 LT |
847 | } |
848 | ||
ad8f5797 | 849 | #ifdef CONFIG_MEMORY_HOTPLUG |
bc02af93 | 850 | int arch_add_memory(int nid, u64 start, u64 size) |
05039b92 | 851 | { |
7c7e9425 | 852 | struct pglist_data *pgdata = NODE_DATA(nid); |
776ed98b | 853 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; |
05039b92 DH |
854 | unsigned long start_pfn = start >> PAGE_SHIFT; |
855 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
856 | ||
c04fc586 | 857 | return __add_pages(nid, zone, start_pfn, nr_pages); |
05039b92 | 858 | } |
9d99aaa3 | 859 | #endif |
05039b92 | 860 | |
1da177e4 LT |
861 | /* |
862 | * This function cannot be __init, since exceptions don't work in that | |
863 | * section. Put this after the callers, so that it cannot be inlined. | |
864 | */ | |
8550eb99 | 865 | static noinline int do_test_wp_bit(void) |
1da177e4 LT |
866 | { |
867 | char tmp_reg; | |
868 | int flag; | |
869 | ||
870 | __asm__ __volatile__( | |
8550eb99 IM |
871 | " movb %0, %1 \n" |
872 | "1: movb %1, %0 \n" | |
873 | " xorl %2, %2 \n" | |
1da177e4 | 874 | "2: \n" |
f832ff18 | 875 | _ASM_EXTABLE(1b,2b) |
1da177e4 LT |
876 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), |
877 | "=q" (tmp_reg), | |
878 | "=r" (flag) | |
879 | :"2" (1) | |
880 | :"memory"); | |
8550eb99 | 881 | |
1da177e4 LT |
882 | return flag; |
883 | } | |
884 | ||
63aaf308 | 885 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
886 | const int rodata_test_data = 0xC3; |
887 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
63aaf308 | 888 | |
502f6604 | 889 | int kernel_set_to_readonly __read_mostly; |
16239630 SR |
890 | |
891 | void set_kernel_text_rw(void) | |
892 | { | |
893 | unsigned long start = PFN_ALIGN(_text); | |
894 | unsigned long size = PFN_ALIGN(_etext) - start; | |
895 | ||
896 | if (!kernel_set_to_readonly) | |
897 | return; | |
898 | ||
899 | pr_debug("Set kernel text: %lx - %lx for read write\n", | |
900 | start, start+size); | |
901 | ||
902 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); | |
903 | } | |
904 | ||
905 | void set_kernel_text_ro(void) | |
906 | { | |
907 | unsigned long start = PFN_ALIGN(_text); | |
908 | unsigned long size = PFN_ALIGN(_etext) - start; | |
909 | ||
910 | if (!kernel_set_to_readonly) | |
911 | return; | |
912 | ||
913 | pr_debug("Set kernel text: %lx - %lx for read only\n", | |
914 | start, start+size); | |
915 | ||
916 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | |
917 | } | |
918 | ||
5bd5a452 MC |
919 | static void mark_nxdata_nx(void) |
920 | { | |
921 | /* | |
922 | * When this called, init has already been executed and released, | |
0d2eb44f | 923 | * so everything past _etext should be NX. |
5bd5a452 MC |
924 | */ |
925 | unsigned long start = PFN_ALIGN(_etext); | |
926 | /* | |
927 | * This comes from is_kernel_text upper limit. Also HPAGE where used: | |
928 | */ | |
929 | unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; | |
930 | ||
931 | if (__supported_pte_mask & _PAGE_NX) | |
932 | printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); | |
933 | set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); | |
934 | } | |
935 | ||
63aaf308 AV |
936 | void mark_rodata_ro(void) |
937 | { | |
6fb14755 JB |
938 | unsigned long start = PFN_ALIGN(_text); |
939 | unsigned long size = PFN_ALIGN(_etext) - start; | |
63aaf308 | 940 | |
4e4eee0e MD |
941 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
942 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", | |
943 | size >> 10); | |
0c42f392 | 944 | |
16239630 SR |
945 | kernel_set_to_readonly = 1; |
946 | ||
0c42f392 | 947 | #ifdef CONFIG_CPA_DEBUG |
4e4eee0e MD |
948 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", |
949 | start, start+size); | |
950 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); | |
0c42f392 | 951 | |
4e4eee0e MD |
952 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
953 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); | |
602033ed | 954 | #endif |
8f0f996e | 955 | |
6fb14755 JB |
956 | start += size; |
957 | size = (unsigned long)__end_rodata - start; | |
6d238cc4 | 958 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
d7d119d7 IM |
959 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
960 | size >> 10); | |
edeed305 | 961 | rodata_test(); |
63aaf308 | 962 | |
0c42f392 | 963 | #ifdef CONFIG_CPA_DEBUG |
d7d119d7 | 964 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); |
6d238cc4 | 965 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 | 966 | |
d7d119d7 | 967 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
6d238cc4 | 968 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 | 969 | #endif |
5bd5a452 | 970 | mark_nxdata_nx(); |
63aaf308 AV |
971 | } |
972 | #endif | |
973 |