]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_PGALLOC_H |
2 | #define _ASM_IA64_PGALLOC_H | |
3 | ||
4 | /* | |
5 | * This file contains the functions and defines necessary to allocate | |
6 | * page tables. | |
7 | * | |
8 | * This hopefully works with any (fixed) ia-64 page-size, as defined | |
9 | * in <asm/page.h> (currently 8192). | |
10 | * | |
11 | * Copyright (C) 1998-2001 Hewlett-Packard Co | |
12 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
13 | * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com> | |
14 | */ | |
15 | ||
16 | #include <linux/config.h> | |
17 | ||
18 | #include <linux/compiler.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/page-flags.h> | |
21 | #include <linux/threads.h> | |
22 | ||
23 | #include <asm/mmu_context.h> | |
24 | ||
25 | /* | |
26 | * Very stupidly, we used to get new pgd's and pmd's, init their contents | |
27 | * to point to the NULL versions of the next level page table, later on | |
28 | * completely re-init them the same way, then free them up. This wasted | |
29 | * a lot of work and caused unnecessary memory traffic. How broken... | |
30 | * We fix this by caching them. | |
31 | */ | |
32 | #define pgd_quicklist (local_cpu_data->pgd_quick) | |
33 | #define pmd_quicklist (local_cpu_data->pmd_quick) | |
34 | #define pgtable_cache_size (local_cpu_data->pgtable_cache_sz) | |
35 | ||
36 | static inline pgd_t* | |
37 | pgd_alloc_one_fast (struct mm_struct *mm) | |
38 | { | |
39 | unsigned long *ret = NULL; | |
40 | ||
41 | preempt_disable(); | |
42 | ||
43 | ret = pgd_quicklist; | |
44 | if (likely(ret != NULL)) { | |
45 | pgd_quicklist = (unsigned long *)(*ret); | |
46 | ret[0] = 0; | |
47 | --pgtable_cache_size; | |
48 | } else | |
49 | ret = NULL; | |
50 | ||
51 | preempt_enable(); | |
52 | ||
53 | return (pgd_t *) ret; | |
54 | } | |
55 | ||
56 | static inline pgd_t* | |
57 | pgd_alloc (struct mm_struct *mm) | |
58 | { | |
59 | /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */ | |
60 | pgd_t *pgd = pgd_alloc_one_fast(mm); | |
61 | ||
62 | if (unlikely(pgd == NULL)) { | |
63 | pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); | |
64 | } | |
65 | return pgd; | |
66 | } | |
67 | ||
68 | static inline void | |
69 | pgd_free (pgd_t *pgd) | |
70 | { | |
71 | preempt_disable(); | |
72 | *(unsigned long *)pgd = (unsigned long) pgd_quicklist; | |
73 | pgd_quicklist = (unsigned long *) pgd; | |
74 | ++pgtable_cache_size; | |
75 | preempt_enable(); | |
76 | } | |
77 | ||
78 | static inline void | |
79 | pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd) | |
80 | { | |
81 | pud_val(*pud_entry) = __pa(pmd); | |
82 | } | |
83 | ||
84 | static inline pmd_t* | |
85 | pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr) | |
86 | { | |
87 | unsigned long *ret = NULL; | |
88 | ||
89 | preempt_disable(); | |
90 | ||
91 | ret = (unsigned long *)pmd_quicklist; | |
92 | if (likely(ret != NULL)) { | |
93 | pmd_quicklist = (unsigned long *)(*ret); | |
94 | ret[0] = 0; | |
95 | --pgtable_cache_size; | |
96 | } | |
97 | ||
98 | preempt_enable(); | |
99 | ||
100 | return (pmd_t *)ret; | |
101 | } | |
102 | ||
103 | static inline pmd_t* | |
104 | pmd_alloc_one (struct mm_struct *mm, unsigned long addr) | |
105 | { | |
106 | pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | |
107 | ||
108 | return pmd; | |
109 | } | |
110 | ||
111 | static inline void | |
112 | pmd_free (pmd_t *pmd) | |
113 | { | |
114 | preempt_disable(); | |
115 | *(unsigned long *)pmd = (unsigned long) pmd_quicklist; | |
116 | pmd_quicklist = (unsigned long *) pmd; | |
117 | ++pgtable_cache_size; | |
118 | preempt_enable(); | |
119 | } | |
120 | ||
121 | #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) | |
122 | ||
123 | static inline void | |
124 | pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte) | |
125 | { | |
126 | pmd_val(*pmd_entry) = page_to_phys(pte); | |
127 | } | |
128 | ||
129 | static inline void | |
130 | pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte) | |
131 | { | |
132 | pmd_val(*pmd_entry) = __pa(pte); | |
133 | } | |
134 | ||
135 | static inline struct page * | |
136 | pte_alloc_one (struct mm_struct *mm, unsigned long addr) | |
137 | { | |
138 | struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | |
139 | ||
140 | return pte; | |
141 | } | |
142 | ||
143 | static inline pte_t * | |
144 | pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr) | |
145 | { | |
146 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | |
147 | ||
148 | return pte; | |
149 | } | |
150 | ||
151 | static inline void | |
152 | pte_free (struct page *pte) | |
153 | { | |
154 | __free_page(pte); | |
155 | } | |
156 | ||
157 | static inline void | |
158 | pte_free_kernel (pte_t *pte) | |
159 | { | |
160 | free_page((unsigned long) pte); | |
161 | } | |
162 | ||
163 | #define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte)) | |
164 | ||
165 | extern void check_pgt_cache (void); | |
166 | ||
167 | #endif /* _ASM_IA64_PGALLOC_H */ |