]>
Commit | Line | Data |
---|---|---|
a7e79840 RK |
1 | /* |
2 | * Page table support for the Hexagon architecture | |
3 | * | |
e1858b2a | 4 | * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. |
a7e79840 RK |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 and | |
8 | * only version 2 as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
18 | * 02110-1301, USA. | |
19 | */ | |
20 | ||
21 | #ifndef _ASM_PGALLOC_H | |
22 | #define _ASM_PGALLOC_H | |
23 | ||
24 | #include <asm/mem-layout.h> | |
25 | #include <asm/atomic.h> | |
26 | ||
27 | #define check_pgt_cache() do {} while (0) | |
28 | ||
29 | extern unsigned long long kmap_generation; | |
30 | ||
31 | /* | |
32 | * Page table creation interface | |
33 | */ | |
34 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |
35 | { | |
36 | pgd_t *pgd; | |
37 | ||
38 | pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | |
39 | ||
40 | /* | |
41 | * There may be better ways to do this, but to ensure | |
42 | * that new address spaces always contain the kernel | |
43 | * base mapping, and to ensure that the user area is | |
44 | * initially marked invalid, initialize the new map | |
45 | * map with a copy of the kernel's persistent map. | |
46 | */ | |
47 | ||
48 | memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t *)); | |
49 | mm->context.generation = kmap_generation; | |
50 | ||
51 | /* Physical version is what is passed to virtual machine on switch */ | |
52 | mm->context.ptbase = __pa(pgd); | |
53 | ||
54 | return pgd; | |
55 | } | |
56 | ||
57 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
58 | { | |
59 | free_page((unsigned long) pgd); | |
60 | } | |
61 | ||
62 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | |
63 | unsigned long address) | |
64 | { | |
65 | struct page *pte; | |
66 | ||
67 | pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); | |
68 | ||
69 | if (pte) | |
70 | pgtable_page_ctor(pte); | |
71 | ||
72 | return pte; | |
73 | } | |
74 | ||
75 | /* _kernel variant gets to use a different allocator */ | |
76 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |
77 | unsigned long address) | |
78 | { | |
79 | gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; | |
80 | return (pte_t *) __get_free_page(flags); | |
81 | } | |
82 | ||
83 | static inline void pte_free(struct mm_struct *mm, struct page *pte) | |
84 | { | |
85 | pgtable_page_dtor(pte); | |
86 | __free_page(pte); | |
87 | } | |
88 | ||
89 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | |
90 | { | |
91 | free_page((unsigned long)pte); | |
92 | } | |
93 | ||
94 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |
95 | pgtable_t pte) | |
96 | { | |
97 | /* | |
98 | * Conveniently, zero in 3 LSB means indirect 4K page table. | |
99 | * Not so convenient when you're trying to vary the page size. | |
100 | */ | |
101 | set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) | | |
102 | HEXAGON_L1_PTE_SIZE)); | |
103 | } | |
104 | ||
105 | /* | |
106 | * Other architectures seem to have ways of making all processes | |
107 | * share the same pmd's for their kernel mappings, but the v0.3 | |
108 | * Hexagon VM spec has a "monolithic" L1 table for user and kernel | |
109 | * segments. We track "generations" of the kernel map to minimize | |
110 | * overhead, and update the "slave" copies of the kernel mappings | |
111 | * as part of switch_mm. However, we still need to update the | |
112 | * kernel map of the active thread who's calling pmd_populate_kernel... | |
113 | */ | |
114 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, | |
115 | pte_t *pte) | |
116 | { | |
117 | extern spinlock_t kmap_gen_lock; | |
118 | pmd_t *ppmd; | |
119 | int pmdindex; | |
120 | ||
121 | spin_lock(&kmap_gen_lock); | |
122 | kmap_generation++; | |
123 | mm->context.generation = kmap_generation; | |
124 | current->active_mm->context.generation = kmap_generation; | |
125 | spin_unlock(&kmap_gen_lock); | |
126 | ||
127 | set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); | |
128 | ||
129 | /* | |
130 | * Now the "slave" copy of the current thread. | |
131 | * This is pointer arithmetic, not byte addresses! | |
132 | */ | |
133 | pmdindex = (pgd_t *)pmd - mm->pgd; | |
134 | ppmd = (pmd_t *)current->active_mm->pgd + pmdindex; | |
135 | set_pmd(ppmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); | |
136 | if (pmdindex > max_kernel_seg) | |
137 | max_kernel_seg = pmdindex; | |
138 | } | |
139 | ||
140 | #define __pte_free_tlb(tlb, pte, addr) \ | |
141 | do { \ | |
142 | pgtable_page_dtor((pte)); \ | |
143 | tlb_remove_page((tlb), (pte)); \ | |
144 | } while (0) | |
145 | ||
146 | #endif |