]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/asm-sparc64/pgalloc.h
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / include / asm-sparc64 / pgalloc.h
1 /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
2 #ifndef _SPARC64_PGALLOC_H
3 #define _SPARC64_PGALLOC_H
4
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9
10 #include <asm/spitfire.h>
11 #include <asm/cpudata.h>
12 #include <asm/cacheflush.h>
13
14 /* Page table allocation/freeing. */
15 #ifdef CONFIG_SMP
16 /* Sliiiicck */
17 #define pgt_quicklists local_cpu_data()
18 #else
19 extern struct pgtable_cache_struct {
20 unsigned long *pgd_cache;
21 unsigned long *pte_cache[2];
22 unsigned int pgcache_size;
23 } pgt_quicklists;
24 #endif
25 #define pgd_quicklist (pgt_quicklists.pgd_cache)
26 #define pmd_quicklist ((unsigned long *)0)
27 #define pte_quicklist (pgt_quicklists.pte_cache)
28 #define pgtable_cache_size (pgt_quicklists.pgcache_size)
29
30 static __inline__ void free_pgd_fast(pgd_t *pgd)
31 {
32 preempt_disable();
33 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
34 pgd_quicklist = (unsigned long *) pgd;
35 pgtable_cache_size++;
36 preempt_enable();
37 }
38
39 static __inline__ pgd_t *get_pgd_fast(void)
40 {
41 unsigned long *ret;
42
43 preempt_disable();
44 if((ret = pgd_quicklist) != NULL) {
45 pgd_quicklist = (unsigned long *)(*ret);
46 ret[0] = 0;
47 pgtable_cache_size--;
48 preempt_enable();
49 } else {
50 preempt_enable();
51 ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
52 if(ret)
53 memset(ret, 0, PAGE_SIZE);
54 }
55 return (pgd_t *)ret;
56 }
57
58 static __inline__ void free_pgd_slow(pgd_t *pgd)
59 {
60 free_page((unsigned long)pgd);
61 }
62
63 #ifdef DCACHE_ALIASING_POSSIBLE
64 #define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
65 #define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
66 #else
67 #define VPTE_COLOR(address) 0
68 #define DCACHE_COLOR(address) 0
69 #endif
70
71 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
72
73 static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
74 {
75 unsigned long *ret;
76 int color = 0;
77
78 preempt_disable();
79 if (pte_quicklist[color] == NULL)
80 color = 1;
81
82 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
83 pte_quicklist[color] = (unsigned long *)(*ret);
84 ret[0] = 0;
85 pgtable_cache_size--;
86 }
87 preempt_enable();
88
89 return (pmd_t *)ret;
90 }
91
92 static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
93 {
94 pmd_t *pmd;
95
96 pmd = pmd_alloc_one_fast(mm, address);
97 if (!pmd) {
98 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
99 if (pmd)
100 memset(pmd, 0, PAGE_SIZE);
101 }
102 return pmd;
103 }
104
105 static __inline__ void free_pmd_fast(pmd_t *pmd)
106 {
107 unsigned long color = DCACHE_COLOR((unsigned long)pmd);
108
109 preempt_disable();
110 *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
111 pte_quicklist[color] = (unsigned long *) pmd;
112 pgtable_cache_size++;
113 preempt_enable();
114 }
115
116 static __inline__ void free_pmd_slow(pmd_t *pmd)
117 {
118 free_page((unsigned long)pmd);
119 }
120
121 #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
122 #define pmd_populate(MM,PMD,PTE_PAGE) \
123 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
124
125 extern pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
126
127 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
128 {
129 return __pte_alloc_one_kernel(mm, address);
130 }
131
132 static inline struct page *
133 pte_alloc_one(struct mm_struct *mm, unsigned long addr)
134 {
135 pte_t *pte = __pte_alloc_one_kernel(mm, addr);
136
137 if (pte)
138 return virt_to_page(pte);
139
140 return NULL;
141 }
142
143 static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
144 {
145 unsigned long color = VPTE_COLOR(address);
146 unsigned long *ret;
147
148 preempt_disable();
149 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
150 pte_quicklist[color] = (unsigned long *)(*ret);
151 ret[0] = 0;
152 pgtable_cache_size--;
153 }
154 preempt_enable();
155 return (pte_t *)ret;
156 }
157
158 static __inline__ void free_pte_fast(pte_t *pte)
159 {
160 unsigned long color = DCACHE_COLOR((unsigned long)pte);
161
162 preempt_disable();
163 *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
164 pte_quicklist[color] = (unsigned long *) pte;
165 pgtable_cache_size++;
166 preempt_enable();
167 }
168
169 static __inline__ void free_pte_slow(pte_t *pte)
170 {
171 free_page((unsigned long)pte);
172 }
173
174 static inline void pte_free_kernel(pte_t *pte)
175 {
176 free_pte_fast(pte);
177 }
178
179 static inline void pte_free(struct page *ptepage)
180 {
181 free_pte_fast(page_address(ptepage));
182 }
183
184 #define pmd_free(pmd) free_pmd_fast(pmd)
185 #define pgd_free(pgd) free_pgd_fast(pgd)
186 #define pgd_alloc(mm) get_pgd_fast()
187
188 #endif /* _SPARC64_PGALLOC_H */