]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * This file contains ioremap and related functions for 64-bit machines. | |
3 | * | |
4 | * Derived from arch/ppc64/mm/init.c | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
6 | * | |
7 | * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) | |
8 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
9 | * Copyright (C) 1996 Paul Mackerras | |
10 | * | |
11 | * Derived from "arch/i386/mm/init.c" | |
12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
13 | * | |
14 | * Dave Engebretsen <engebret@us.ibm.com> | |
15 | * Rework for PPC64 port. | |
16 | * | |
17 | * This program is free software; you can redistribute it and/or | |
18 | * modify it under the terms of the GNU General Public License | |
19 | * as published by the Free Software Foundation; either version | |
20 | * 2 of the License, or (at your option) any later version. | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/signal.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/errno.h> | |
28 | #include <linux/string.h> | |
29 | #include <linux/export.h> | |
30 | #include <linux/types.h> | |
31 | #include <linux/mman.h> | |
32 | #include <linux/mm.h> | |
33 | #include <linux/swap.h> | |
34 | #include <linux/stddef.h> | |
35 | #include <linux/vmalloc.h> | |
36 | #include <linux/memblock.h> | |
37 | #include <linux/slab.h> | |
38 | #include <linux/hugetlb.h> | |
39 | ||
40 | #include <asm/pgalloc.h> | |
41 | #include <asm/page.h> | |
42 | #include <asm/prom.h> | |
43 | #include <asm/io.h> | |
44 | #include <asm/mmu_context.h> | |
45 | #include <asm/pgtable.h> | |
46 | #include <asm/mmu.h> | |
47 | #include <asm/smp.h> | |
48 | #include <asm/machdep.h> | |
49 | #include <asm/tlb.h> | |
50 | #include <asm/processor.h> | |
51 | #include <asm/cputable.h> | |
52 | #include <asm/sections.h> | |
53 | #include <asm/firmware.h> | |
54 | #include <asm/dma.h> | |
55 | ||
56 | #include "mmu_decl.h" | |
57 | ||
58 | #define CREATE_TRACE_POINTS | |
59 | #include <trace/events/thp.h> | |
60 | ||
61 | /* Some sanity checking */ | |
62 | #if TASK_SIZE_USER64 > PGTABLE_RANGE | |
63 | #error TASK_SIZE_USER64 exceeds pagetable range | |
64 | #endif | |
65 | ||
66 | #ifdef CONFIG_PPC_STD_MMU_64 | |
67 | #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) | |
68 | #error TASK_SIZE_USER64 exceeds user VSID range | |
69 | #endif | |
70 | #endif | |
71 | ||
72 | unsigned long ioremap_bot = IOREMAP_BASE; | |
73 | ||
74 | #ifdef CONFIG_PPC_MMU_NOHASH | |
75 | static __ref void *early_alloc_pgtable(unsigned long size) | |
76 | { | |
77 | void *pt; | |
78 | ||
79 | pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS))); | |
80 | memset(pt, 0, size); | |
81 | ||
82 | return pt; | |
83 | } | |
84 | #endif /* CONFIG_PPC_MMU_NOHASH */ | |
85 | ||
86 | /* | |
87 | * map_kernel_page currently only called by __ioremap | |
88 | * map_kernel_page adds an entry to the ioremap page table | |
89 | * and adds an entry to the HPT, possibly bolting it | |
90 | */ | |
91 | int map_kernel_page(unsigned long ea, unsigned long pa, int flags) | |
92 | { | |
93 | pgd_t *pgdp; | |
94 | pud_t *pudp; | |
95 | pmd_t *pmdp; | |
96 | pte_t *ptep; | |
97 | ||
98 | if (slab_is_available()) { | |
99 | pgdp = pgd_offset_k(ea); | |
100 | pudp = pud_alloc(&init_mm, pgdp, ea); | |
101 | if (!pudp) | |
102 | return -ENOMEM; | |
103 | pmdp = pmd_alloc(&init_mm, pudp, ea); | |
104 | if (!pmdp) | |
105 | return -ENOMEM; | |
106 | ptep = pte_alloc_kernel(pmdp, ea); | |
107 | if (!ptep) | |
108 | return -ENOMEM; | |
109 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | |
110 | __pgprot(flags))); | |
111 | } else { | |
112 | #ifdef CONFIG_PPC_MMU_NOHASH | |
113 | pgdp = pgd_offset_k(ea); | |
114 | #ifdef PUD_TABLE_SIZE | |
115 | if (pgd_none(*pgdp)) { | |
116 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE); | |
117 | BUG_ON(pudp == NULL); | |
118 | pgd_populate(&init_mm, pgdp, pudp); | |
119 | } | |
120 | #endif /* PUD_TABLE_SIZE */ | |
121 | pudp = pud_offset(pgdp, ea); | |
122 | if (pud_none(*pudp)) { | |
123 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); | |
124 | BUG_ON(pmdp == NULL); | |
125 | pud_populate(&init_mm, pudp, pmdp); | |
126 | } | |
127 | pmdp = pmd_offset(pudp, ea); | |
128 | if (!pmd_present(*pmdp)) { | |
129 | ptep = early_alloc_pgtable(PAGE_SIZE); | |
130 | BUG_ON(ptep == NULL); | |
131 | pmd_populate_kernel(&init_mm, pmdp, ptep); | |
132 | } | |
133 | ptep = pte_offset_kernel(pmdp, ea); | |
134 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | |
135 | __pgprot(flags))); | |
136 | #else /* CONFIG_PPC_MMU_NOHASH */ | |
137 | /* | |
138 | * If the mm subsystem is not fully up, we cannot create a | |
139 | * linux page table entry for this mapping. Simply bolt an | |
140 | * entry in the hardware page table. | |
141 | * | |
142 | */ | |
143 | if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, | |
144 | mmu_io_psize, mmu_kernel_ssize)) { | |
145 | printk(KERN_ERR "Failed to do bolted mapping IO " | |
146 | "memory at %016lx !\n", pa); | |
147 | return -ENOMEM; | |
148 | } | |
149 | #endif /* !CONFIG_PPC_MMU_NOHASH */ | |
150 | } | |
151 | ||
152 | #ifdef CONFIG_PPC_BOOK3E_64 | |
153 | /* | |
154 | * With hardware tablewalk, a sync is needed to ensure that | |
155 | * subsequent accesses see the PTE we just wrote. Unlike userspace | |
156 | * mappings, we can't tolerate spurious faults, so make sure | |
157 | * the new PTE will be seen the first time. | |
158 | */ | |
159 | mb(); | |
160 | #else | |
161 | smp_wmb(); | |
162 | #endif | |
163 | return 0; | |
164 | } | |
165 | ||
166 | ||
167 | /** | |
168 | * __ioremap_at - Low level function to establish the page tables | |
169 | * for an IO mapping | |
170 | */ | |
171 | void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, | |
172 | unsigned long flags) | |
173 | { | |
174 | unsigned long i; | |
175 | ||
176 | /* Make sure we have the base flags */ | |
177 | if ((flags & _PAGE_PRESENT) == 0) | |
178 | flags |= pgprot_val(PAGE_KERNEL); | |
179 | ||
180 | /* Non-cacheable page cannot be coherent */ | |
181 | if (flags & _PAGE_NO_CACHE) | |
182 | flags &= ~_PAGE_COHERENT; | |
183 | ||
184 | /* We don't support the 4K PFN hack with ioremap */ | |
185 | if (flags & _PAGE_4K_PFN) | |
186 | return NULL; | |
187 | ||
188 | WARN_ON(pa & ~PAGE_MASK); | |
189 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); | |
190 | WARN_ON(size & ~PAGE_MASK); | |
191 | ||
192 | for (i = 0; i < size; i += PAGE_SIZE) | |
193 | if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) | |
194 | return NULL; | |
195 | ||
196 | return (void __iomem *)ea; | |
197 | } | |
198 | ||
199 | /** | |
200 | * __iounmap_from - Low level function to tear down the page tables | |
201 | * for an IO mapping. This is used for mappings that | |
202 | * are manipulated manually, like partial unmapping of | |
203 | * PCI IOs or ISA space. | |
204 | */ | |
205 | void __iounmap_at(void *ea, unsigned long size) | |
206 | { | |
207 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); | |
208 | WARN_ON(size & ~PAGE_MASK); | |
209 | ||
210 | unmap_kernel_range((unsigned long)ea, size); | |
211 | } | |
212 | ||
213 | void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, | |
214 | unsigned long flags, void *caller) | |
215 | { | |
216 | phys_addr_t paligned; | |
217 | void __iomem *ret; | |
218 | ||
219 | /* | |
220 | * Choose an address to map it to. | |
221 | * Once the imalloc system is running, we use it. | |
222 | * Before that, we map using addresses going | |
223 | * up from ioremap_bot. imalloc will use | |
224 | * the addresses from ioremap_bot through | |
225 | * IMALLOC_END | |
226 | * | |
227 | */ | |
228 | paligned = addr & PAGE_MASK; | |
229 | size = PAGE_ALIGN(addr + size) - paligned; | |
230 | ||
231 | if ((size == 0) || (paligned == 0)) | |
232 | return NULL; | |
233 | ||
234 | if (mem_init_done) { | |
235 | struct vm_struct *area; | |
236 | ||
237 | area = __get_vm_area_caller(size, VM_IOREMAP, | |
238 | ioremap_bot, IOREMAP_END, | |
239 | caller); | |
240 | if (area == NULL) | |
241 | return NULL; | |
242 | ||
243 | area->phys_addr = paligned; | |
244 | ret = __ioremap_at(paligned, area->addr, size, flags); | |
245 | if (!ret) | |
246 | vunmap(area->addr); | |
247 | } else { | |
248 | ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); | |
249 | if (ret) | |
250 | ioremap_bot += size; | |
251 | } | |
252 | ||
253 | if (ret) | |
254 | ret += addr & ~PAGE_MASK; | |
255 | return ret; | |
256 | } | |
257 | ||
258 | void __iomem * __ioremap(phys_addr_t addr, unsigned long size, | |
259 | unsigned long flags) | |
260 | { | |
261 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); | |
262 | } | |
263 | ||
264 | void __iomem * ioremap(phys_addr_t addr, unsigned long size) | |
265 | { | |
266 | unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED; | |
267 | void *caller = __builtin_return_address(0); | |
268 | ||
269 | if (ppc_md.ioremap) | |
270 | return ppc_md.ioremap(addr, size, flags, caller); | |
271 | return __ioremap_caller(addr, size, flags, caller); | |
272 | } | |
273 | ||
274 | void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) | |
275 | { | |
276 | unsigned long flags = _PAGE_NO_CACHE; | |
277 | void *caller = __builtin_return_address(0); | |
278 | ||
279 | if (ppc_md.ioremap) | |
280 | return ppc_md.ioremap(addr, size, flags, caller); | |
281 | return __ioremap_caller(addr, size, flags, caller); | |
282 | } | |
283 | ||
284 | void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, | |
285 | unsigned long flags) | |
286 | { | |
287 | void *caller = __builtin_return_address(0); | |
288 | ||
289 | /* writeable implies dirty for kernel addresses */ | |
290 | if (flags & _PAGE_RW) | |
291 | flags |= _PAGE_DIRTY; | |
292 | ||
293 | /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ | |
294 | flags &= ~(_PAGE_USER | _PAGE_EXEC); | |
295 | ||
296 | #ifdef _PAGE_BAP_SR | |
297 | /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format | |
298 | * which means that we just cleared supervisor access... oops ;-) This | |
299 | * restores it | |
300 | */ | |
301 | flags |= _PAGE_BAP_SR; | |
302 | #endif | |
303 | ||
304 | if (ppc_md.ioremap) | |
305 | return ppc_md.ioremap(addr, size, flags, caller); | |
306 | return __ioremap_caller(addr, size, flags, caller); | |
307 | } | |
308 | ||
309 | ||
310 | /* | |
311 | * Unmap an IO region and remove it from imalloc'd list. | |
312 | * Access to IO memory should be serialized by driver. | |
313 | */ | |
314 | void __iounmap(volatile void __iomem *token) | |
315 | { | |
316 | void *addr; | |
317 | ||
318 | if (!mem_init_done) | |
319 | return; | |
320 | ||
321 | addr = (void *) ((unsigned long __force) | |
322 | PCI_FIX_ADDR(token) & PAGE_MASK); | |
323 | if ((unsigned long)addr < ioremap_bot) { | |
324 | printk(KERN_WARNING "Attempt to iounmap early bolted mapping" | |
325 | " at 0x%p\n", addr); | |
326 | return; | |
327 | } | |
328 | vunmap(addr); | |
329 | } | |
330 | ||
331 | void iounmap(volatile void __iomem *token) | |
332 | { | |
333 | if (ppc_md.iounmap) | |
334 | ppc_md.iounmap(token); | |
335 | else | |
336 | __iounmap(token); | |
337 | } | |
338 | ||
339 | EXPORT_SYMBOL(ioremap); | |
340 | EXPORT_SYMBOL(ioremap_wc); | |
341 | EXPORT_SYMBOL(ioremap_prot); | |
342 | EXPORT_SYMBOL(__ioremap); | |
343 | EXPORT_SYMBOL(__ioremap_at); | |
344 | EXPORT_SYMBOL(iounmap); | |
345 | EXPORT_SYMBOL(__iounmap); | |
346 | EXPORT_SYMBOL(__iounmap_at); | |
347 | ||
348 | #ifndef __PAGETABLE_PUD_FOLDED | |
349 | /* 4 level page table */ | |
350 | struct page *pgd_page(pgd_t pgd) | |
351 | { | |
352 | if (pgd_huge(pgd)) | |
353 | return pte_page(pgd_pte(pgd)); | |
354 | return virt_to_page(pgd_page_vaddr(pgd)); | |
355 | } | |
356 | #endif | |
357 | ||
358 | struct page *pud_page(pud_t pud) | |
359 | { | |
360 | if (pud_huge(pud)) | |
361 | return pte_page(pud_pte(pud)); | |
362 | return virt_to_page(pud_page_vaddr(pud)); | |
363 | } | |
364 | ||
365 | /* | |
366 | * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags | |
367 | * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. | |
368 | */ | |
369 | struct page *pmd_page(pmd_t pmd) | |
370 | { | |
371 | if (pmd_trans_huge(pmd) || pmd_huge(pmd)) | |
372 | return pfn_to_page(pmd_pfn(pmd)); | |
373 | return virt_to_page(pmd_page_vaddr(pmd)); | |
374 | } | |
375 | ||
376 | #ifdef CONFIG_PPC_64K_PAGES | |
377 | static pte_t *get_from_cache(struct mm_struct *mm) | |
378 | { | |
379 | void *pte_frag, *ret; | |
380 | ||
381 | spin_lock(&mm->page_table_lock); | |
382 | ret = mm->context.pte_frag; | |
383 | if (ret) { | |
384 | pte_frag = ret + PTE_FRAG_SIZE; | |
385 | /* | |
386 | * If we have taken up all the fragments mark PTE page NULL | |
387 | */ | |
388 | if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) | |
389 | pte_frag = NULL; | |
390 | mm->context.pte_frag = pte_frag; | |
391 | } | |
392 | spin_unlock(&mm->page_table_lock); | |
393 | return (pte_t *)ret; | |
394 | } | |
395 | ||
396 | static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) | |
397 | { | |
398 | void *ret = NULL; | |
399 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | | |
400 | __GFP_REPEAT | __GFP_ZERO); | |
401 | if (!page) | |
402 | return NULL; | |
403 | if (!kernel && !pgtable_page_ctor(page)) { | |
404 | __free_page(page); | |
405 | return NULL; | |
406 | } | |
407 | ||
408 | ret = page_address(page); | |
409 | spin_lock(&mm->page_table_lock); | |
410 | /* | |
411 | * If we find pgtable_page set, we return | |
412 | * the allocated page with single fragement | |
413 | * count. | |
414 | */ | |
415 | if (likely(!mm->context.pte_frag)) { | |
416 | atomic_set(&page->_count, PTE_FRAG_NR); | |
417 | mm->context.pte_frag = ret + PTE_FRAG_SIZE; | |
418 | } | |
419 | spin_unlock(&mm->page_table_lock); | |
420 | ||
421 | return (pte_t *)ret; | |
422 | } | |
423 | ||
424 | pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) | |
425 | { | |
426 | pte_t *pte; | |
427 | ||
428 | pte = get_from_cache(mm); | |
429 | if (pte) | |
430 | return pte; | |
431 | ||
432 | return __alloc_for_cache(mm, kernel); | |
433 | } | |
434 | ||
435 | void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel) | |
436 | { | |
437 | struct page *page = virt_to_page(table); | |
438 | if (put_page_testzero(page)) { | |
439 | if (!kernel) | |
440 | pgtable_page_dtor(page); | |
441 | free_hot_cold_page(page, 0); | |
442 | } | |
443 | } | |
444 | ||
445 | #ifdef CONFIG_SMP | |
446 | static void page_table_free_rcu(void *table) | |
447 | { | |
448 | struct page *page = virt_to_page(table); | |
449 | if (put_page_testzero(page)) { | |
450 | pgtable_page_dtor(page); | |
451 | free_hot_cold_page(page, 0); | |
452 | } | |
453 | } | |
454 | ||
455 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) | |
456 | { | |
457 | unsigned long pgf = (unsigned long)table; | |
458 | ||
459 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); | |
460 | pgf |= shift; | |
461 | tlb_remove_table(tlb, (void *)pgf); | |
462 | } | |
463 | ||
464 | void __tlb_remove_table(void *_table) | |
465 | { | |
466 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); | |
467 | unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; | |
468 | ||
469 | if (!shift) | |
470 | /* PTE page needs special handling */ | |
471 | page_table_free_rcu(table); | |
472 | else { | |
473 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); | |
474 | kmem_cache_free(PGT_CACHE(shift), table); | |
475 | } | |
476 | } | |
477 | #else | |
478 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) | |
479 | { | |
480 | if (!shift) { | |
481 | /* PTE page needs special handling */ | |
482 | struct page *page = virt_to_page(table); | |
483 | if (put_page_testzero(page)) { | |
484 | pgtable_page_dtor(page); | |
485 | free_hot_cold_page(page, 0); | |
486 | } | |
487 | } else { | |
488 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); | |
489 | kmem_cache_free(PGT_CACHE(shift), table); | |
490 | } | |
491 | } | |
492 | #endif | |
493 | #endif /* CONFIG_PPC_64K_PAGES */ | |
494 | ||
495 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
496 | ||
497 | /* | |
498 | * This is called when relaxing access to a hugepage. It's also called in the page | |
499 | * fault path when we don't hit any of the major fault cases, ie, a minor | |
500 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have | |
501 | * handled those two for us, we additionally deal with missing execute | |
502 | * permission here on some processors | |
503 | */ | |
504 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |
505 | pmd_t *pmdp, pmd_t entry, int dirty) | |
506 | { | |
507 | int changed; | |
508 | #ifdef CONFIG_DEBUG_VM | |
509 | WARN_ON(!pmd_trans_huge(*pmdp)); | |
510 | assert_spin_locked(&vma->vm_mm->page_table_lock); | |
511 | #endif | |
512 | changed = !pmd_same(*(pmdp), entry); | |
513 | if (changed) { | |
514 | __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); | |
515 | /* | |
516 | * Since we are not supporting SW TLB systems, we don't | |
517 | * have any thing similar to flush_tlb_page_nohash() | |
518 | */ | |
519 | } | |
520 | return changed; | |
521 | } | |
522 | ||
523 | unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, | |
524 | pmd_t *pmdp, unsigned long clr, | |
525 | unsigned long set) | |
526 | { | |
527 | ||
528 | unsigned long old, tmp; | |
529 | ||
530 | #ifdef CONFIG_DEBUG_VM | |
531 | WARN_ON(!pmd_trans_huge(*pmdp)); | |
532 | assert_spin_locked(&mm->page_table_lock); | |
533 | #endif | |
534 | ||
535 | #ifdef PTE_ATOMIC_UPDATES | |
536 | __asm__ __volatile__( | |
537 | "1: ldarx %0,0,%3\n\ | |
538 | andi. %1,%0,%6\n\ | |
539 | bne- 1b \n\ | |
540 | andc %1,%0,%4 \n\ | |
541 | or %1,%1,%7\n\ | |
542 | stdcx. %1,0,%3 \n\ | |
543 | bne- 1b" | |
544 | : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) | |
545 | : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set) | |
546 | : "cc" ); | |
547 | #else | |
548 | old = pmd_val(*pmdp); | |
549 | *pmdp = __pmd((old & ~clr) | set); | |
550 | #endif | |
551 | trace_hugepage_update(addr, old, clr, set); | |
552 | if (old & _PAGE_HASHPTE) | |
553 | hpte_do_hugepage_flush(mm, addr, pmdp, old); | |
554 | return old; | |
555 | } | |
556 | ||
557 | pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, | |
558 | pmd_t *pmdp) | |
559 | { | |
560 | pmd_t pmd; | |
561 | ||
562 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
563 | if (pmd_trans_huge(*pmdp)) { | |
564 | pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); | |
565 | } else { | |
566 | /* | |
567 | * khugepaged calls this for normal pmd | |
568 | */ | |
569 | pmd = *pmdp; | |
570 | pmd_clear(pmdp); | |
571 | /* | |
572 | * Wait for all pending hash_page to finish. This is needed | |
573 | * in case of subpage collapse. When we collapse normal pages | |
574 | * to hugepage, we first clear the pmd, then invalidate all | |
575 | * the PTE entries. The assumption here is that any low level | |
576 | * page fault will see a none pmd and take the slow path that | |
577 | * will wait on mmap_sem. But we could very well be in a | |
578 | * hash_page with local ptep pointer value. Such a hash page | |
579 | * can result in adding new HPTE entries for normal subpages. | |
580 | * That means we could be modifying the page content as we | |
581 | * copy them to a huge page. So wait for parallel hash_page | |
582 | * to finish before invalidating HPTE entries. We can do this | |
583 | * by sending an IPI to all the cpus and executing a dummy | |
584 | * function there. | |
585 | */ | |
586 | kick_all_cpus_sync(); | |
587 | /* | |
588 | * Now invalidate the hpte entries in the range | |
589 | * covered by pmd. This make sure we take a | |
590 | * fault and will find the pmd as none, which will | |
591 | * result in a major fault which takes mmap_sem and | |
592 | * hence wait for collapse to complete. Without this | |
593 | * the __collapse_huge_page_copy can result in copying | |
594 | * the old content. | |
595 | */ | |
596 | flush_tlb_pmd_range(vma->vm_mm, &pmd, address); | |
597 | } | |
598 | return pmd; | |
599 | } | |
600 | ||
601 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
602 | unsigned long address, pmd_t *pmdp) | |
603 | { | |
604 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); | |
605 | } | |
606 | ||
607 | /* | |
608 | * We currently remove entries from the hashtable regardless of whether | |
609 | * the entry was young or dirty. The generic routines only flush if the | |
610 | * entry was young or dirty which is not good enough. | |
611 | * | |
612 | * We should be more intelligent about this but for the moment we override | |
613 | * these functions and force a tlb flush unconditionally | |
614 | */ | |
615 | int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
616 | unsigned long address, pmd_t *pmdp) | |
617 | { | |
618 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); | |
619 | } | |
620 | ||
621 | /* | |
622 | * We mark the pmd splitting and invalidate all the hpte | |
623 | * entries for this hugepage. | |
624 | */ | |
625 | void pmdp_splitting_flush(struct vm_area_struct *vma, | |
626 | unsigned long address, pmd_t *pmdp) | |
627 | { | |
628 | unsigned long old, tmp; | |
629 | ||
630 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
631 | ||
632 | #ifdef CONFIG_DEBUG_VM | |
633 | WARN_ON(!pmd_trans_huge(*pmdp)); | |
634 | assert_spin_locked(&vma->vm_mm->page_table_lock); | |
635 | #endif | |
636 | ||
637 | #ifdef PTE_ATOMIC_UPDATES | |
638 | ||
639 | __asm__ __volatile__( | |
640 | "1: ldarx %0,0,%3\n\ | |
641 | andi. %1,%0,%6\n\ | |
642 | bne- 1b \n\ | |
643 | ori %1,%0,%4 \n\ | |
644 | stdcx. %1,0,%3 \n\ | |
645 | bne- 1b" | |
646 | : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) | |
647 | : "r" (pmdp), "i" (_PAGE_SPLITTING), "m" (*pmdp), "i" (_PAGE_BUSY) | |
648 | : "cc" ); | |
649 | #else | |
650 | old = pmd_val(*pmdp); | |
651 | *pmdp = __pmd(old | _PAGE_SPLITTING); | |
652 | #endif | |
653 | /* | |
654 | * If we didn't had the splitting flag set, go and flush the | |
655 | * HPTE entries. | |
656 | */ | |
657 | trace_hugepage_splitting(address, old); | |
658 | if (!(old & _PAGE_SPLITTING)) { | |
659 | /* We need to flush the hpte */ | |
660 | if (old & _PAGE_HASHPTE) | |
661 | hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old); | |
662 | } | |
663 | /* | |
664 | * This ensures that generic code that rely on IRQ disabling | |
665 | * to prevent a parallel THP split work as expected. | |
666 | */ | |
667 | kick_all_cpus_sync(); | |
668 | } | |
669 | ||
670 | /* | |
671 | * We want to put the pgtable in pmd and use pgtable for tracking | |
672 | * the base page size hptes | |
673 | */ | |
674 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | |
675 | pgtable_t pgtable) | |
676 | { | |
677 | pgtable_t *pgtable_slot; | |
678 | assert_spin_locked(&mm->page_table_lock); | |
679 | /* | |
680 | * we store the pgtable in the second half of PMD | |
681 | */ | |
682 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; | |
683 | *pgtable_slot = pgtable; | |
684 | /* | |
685 | * expose the deposited pgtable to other cpus. | |
686 | * before we set the hugepage PTE at pmd level | |
687 | * hash fault code looks at the deposted pgtable | |
688 | * to store hash index values. | |
689 | */ | |
690 | smp_wmb(); | |
691 | } | |
692 | ||
693 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) | |
694 | { | |
695 | pgtable_t pgtable; | |
696 | pgtable_t *pgtable_slot; | |
697 | ||
698 | assert_spin_locked(&mm->page_table_lock); | |
699 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; | |
700 | pgtable = *pgtable_slot; | |
701 | /* | |
702 | * Once we withdraw, mark the entry NULL. | |
703 | */ | |
704 | *pgtable_slot = NULL; | |
705 | /* | |
706 | * We store HPTE information in the deposited PTE fragment. | |
707 | * zero out the content on withdraw. | |
708 | */ | |
709 | memset(pgtable, 0, PTE_FRAG_SIZE); | |
710 | return pgtable; | |
711 | } | |
712 | ||
713 | /* | |
714 | * set a new huge pmd. We should not be called for updating | |
715 | * an existing pmd entry. That should go via pmd_hugepage_update. | |
716 | */ | |
717 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
718 | pmd_t *pmdp, pmd_t pmd) | |
719 | { | |
720 | #ifdef CONFIG_DEBUG_VM | |
721 | WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT); | |
722 | assert_spin_locked(&mm->page_table_lock); | |
723 | WARN_ON(!pmd_trans_huge(pmd)); | |
724 | #endif | |
725 | trace_hugepage_set_pmd(addr, pmd); | |
726 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); | |
727 | } | |
728 | ||
729 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |
730 | pmd_t *pmdp) | |
731 | { | |
732 | pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); | |
733 | } | |
734 | ||
735 | /* | |
736 | * A linux hugepage PMD was changed and the corresponding hash table entries | |
737 | * neesd to be flushed. | |
738 | */ | |
739 | void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | |
740 | pmd_t *pmdp, unsigned long old_pmd) | |
741 | { | |
742 | int ssize, local = 0; | |
743 | unsigned int psize; | |
744 | unsigned long vsid; | |
745 | const struct cpumask *tmp; | |
746 | ||
747 | /* get the base page size,vsid and segment size */ | |
748 | #ifdef CONFIG_DEBUG_VM | |
749 | psize = get_slice_psize(mm, addr); | |
750 | BUG_ON(psize == MMU_PAGE_16M); | |
751 | #endif | |
752 | if (old_pmd & _PAGE_COMBO) | |
753 | psize = MMU_PAGE_4K; | |
754 | else | |
755 | psize = MMU_PAGE_64K; | |
756 | ||
757 | if (!is_kernel_addr(addr)) { | |
758 | ssize = user_segment_size(addr); | |
759 | vsid = get_vsid(mm->context.id, addr, ssize); | |
760 | WARN_ON(vsid == 0); | |
761 | } else { | |
762 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | |
763 | ssize = mmu_kernel_ssize; | |
764 | } | |
765 | ||
766 | tmp = cpumask_of(smp_processor_id()); | |
767 | if (cpumask_equal(mm_cpumask(mm), tmp)) | |
768 | local = 1; | |
769 | ||
770 | return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, local); | |
771 | } | |
772 | ||
773 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) | |
774 | { | |
775 | pmd_val(pmd) |= pgprot_val(pgprot); | |
776 | return pmd; | |
777 | } | |
778 | ||
779 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) | |
780 | { | |
781 | pmd_t pmd; | |
782 | /* | |
783 | * For a valid pte, we would have _PAGE_PRESENT or _PAGE_FILE always | |
784 | * set. We use this to check THP page at pmd level. | |
785 | * leaf pte for huge page, bottom two bits != 00 | |
786 | */ | |
787 | pmd_val(pmd) = pfn << PTE_RPN_SHIFT; | |
788 | pmd_val(pmd) |= _PAGE_THP_HUGE; | |
789 | pmd = pmd_set_protbits(pmd, pgprot); | |
790 | return pmd; | |
791 | } | |
792 | ||
793 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) | |
794 | { | |
795 | return pfn_pmd(page_to_pfn(page), pgprot); | |
796 | } | |
797 | ||
798 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |
799 | { | |
800 | ||
801 | pmd_val(pmd) &= _HPAGE_CHG_MASK; | |
802 | pmd = pmd_set_protbits(pmd, newprot); | |
803 | return pmd; | |
804 | } | |
805 | ||
806 | /* | |
807 | * This is called at the end of handling a user page fault, when the | |
808 | * fault has been handled by updating a HUGE PMD entry in the linux page tables. | |
809 | * We use it to preload an HPTE into the hash table corresponding to | |
810 | * the updated linux HUGE PMD entry. | |
811 | */ | |
812 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |
813 | pmd_t *pmd) | |
814 | { | |
815 | return; | |
816 | } | |
817 | ||
818 | pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |
819 | unsigned long addr, pmd_t *pmdp) | |
820 | { | |
821 | pmd_t old_pmd; | |
822 | pgtable_t pgtable; | |
823 | unsigned long old; | |
824 | pgtable_t *pgtable_slot; | |
825 | ||
826 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); | |
827 | old_pmd = __pmd(old); | |
828 | /* | |
829 | * We have pmd == none and we are holding page_table_lock. | |
830 | * So we can safely go and clear the pgtable hash | |
831 | * index info. | |
832 | */ | |
833 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; | |
834 | pgtable = *pgtable_slot; | |
835 | /* | |
836 | * Let's zero out old valid and hash index details | |
837 | * hash fault look at them. | |
838 | */ | |
839 | memset(pgtable, 0, PTE_FRAG_SIZE); | |
840 | return old_pmd; | |
841 | } | |
842 | ||
843 | int has_transparent_hugepage(void) | |
844 | { | |
845 | if (!mmu_has_feature(MMU_FTR_16M_PAGE)) | |
846 | return 0; | |
847 | /* | |
848 | * We support THP only if PMD_SIZE is 16MB. | |
849 | */ | |
850 | if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) | |
851 | return 0; | |
852 | /* | |
853 | * We need to make sure that we support 16MB hugepage in a segement | |
854 | * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE | |
855 | * of 64K. | |
856 | */ | |
857 | /* | |
858 | * If we have 64K HPTE, we will be using that by default | |
859 | */ | |
860 | if (mmu_psize_defs[MMU_PAGE_64K].shift && | |
861 | (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) | |
862 | return 0; | |
863 | /* | |
864 | * Ok we only have 4K HPTE | |
865 | */ | |
866 | if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) | |
867 | return 0; | |
868 | ||
869 | return 1; | |
870 | } | |
871 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |