]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/asm-sparc64/pgalloc.h
Pull ar-k0-usage into release branch
[mirror_ubuntu-artful-kernel.git] / include / asm-sparc64 / pgalloc.h
1 /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
2 #ifndef _SPARC64_PGALLOC_H
3 #define _SPARC64_PGALLOC_H
4
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9
10 #include <asm/spitfire.h>
11 #include <asm/cpudata.h>
12 #include <asm/cacheflush.h>
13 #include <asm/page.h>
14
15 /* Page table allocation/freeing. */
16 #ifdef CONFIG_SMP
17 /* Sliiiicck */
18 #define pgt_quicklists local_cpu_data()
19 #else
20 extern struct pgtable_cache_struct {
21 unsigned long *pgd_cache;
22 unsigned long *pte_cache[2];
23 unsigned int pgcache_size;
24 } pgt_quicklists;
25 #endif
26 #define pgd_quicklist (pgt_quicklists.pgd_cache)
27 #define pmd_quicklist ((unsigned long *)0)
28 #define pte_quicklist (pgt_quicklists.pte_cache)
29 #define pgtable_cache_size (pgt_quicklists.pgcache_size)
30
31 static __inline__ void free_pgd_fast(pgd_t *pgd)
32 {
33 preempt_disable();
34 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
35 pgd_quicklist = (unsigned long *) pgd;
36 pgtable_cache_size++;
37 preempt_enable();
38 }
39
40 static __inline__ pgd_t *get_pgd_fast(void)
41 {
42 unsigned long *ret;
43
44 preempt_disable();
45 if((ret = pgd_quicklist) != NULL) {
46 pgd_quicklist = (unsigned long *)(*ret);
47 ret[0] = 0;
48 pgtable_cache_size--;
49 preempt_enable();
50 } else {
51 preempt_enable();
52 ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
53 if(ret)
54 memset(ret, 0, PAGE_SIZE);
55 }
56 return (pgd_t *)ret;
57 }
58
59 static __inline__ void free_pgd_slow(pgd_t *pgd)
60 {
61 free_page((unsigned long)pgd);
62 }
63
64 #ifdef DCACHE_ALIASING_POSSIBLE
65 #define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
66 #define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
67 #else
68 #define VPTE_COLOR(address) 0
69 #define DCACHE_COLOR(address) 0
70 #endif
71
72 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
73
74 static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
75 {
76 unsigned long *ret;
77 int color = 0;
78
79 preempt_disable();
80 if (pte_quicklist[color] == NULL)
81 color = 1;
82
83 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
84 pte_quicklist[color] = (unsigned long *)(*ret);
85 ret[0] = 0;
86 pgtable_cache_size--;
87 }
88 preempt_enable();
89
90 return (pmd_t *)ret;
91 }
92
93 static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
94 {
95 pmd_t *pmd;
96
97 pmd = pmd_alloc_one_fast(mm, address);
98 if (!pmd) {
99 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
100 if (pmd)
101 memset(pmd, 0, PAGE_SIZE);
102 }
103 return pmd;
104 }
105
106 static __inline__ void free_pmd_fast(pmd_t *pmd)
107 {
108 unsigned long color = DCACHE_COLOR((unsigned long)pmd);
109
110 preempt_disable();
111 *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
112 pte_quicklist[color] = (unsigned long *) pmd;
113 pgtable_cache_size++;
114 preempt_enable();
115 }
116
117 static __inline__ void free_pmd_slow(pmd_t *pmd)
118 {
119 free_page((unsigned long)pmd);
120 }
121
122 #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
123 #define pmd_populate(MM,PMD,PTE_PAGE) \
124 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
125
126 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
127
128 static inline struct page *
129 pte_alloc_one(struct mm_struct *mm, unsigned long addr)
130 {
131 pte_t *pte = pte_alloc_one_kernel(mm, addr);
132
133 if (pte)
134 return virt_to_page(pte);
135
136 return NULL;
137 }
138
139 static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
140 {
141 unsigned long color = VPTE_COLOR(address);
142 unsigned long *ret;
143
144 preempt_disable();
145 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
146 pte_quicklist[color] = (unsigned long *)(*ret);
147 ret[0] = 0;
148 pgtable_cache_size--;
149 }
150 preempt_enable();
151 return (pte_t *)ret;
152 }
153
154 static __inline__ void free_pte_fast(pte_t *pte)
155 {
156 unsigned long color = DCACHE_COLOR((unsigned long)pte);
157
158 preempt_disable();
159 *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
160 pte_quicklist[color] = (unsigned long *) pte;
161 pgtable_cache_size++;
162 preempt_enable();
163 }
164
165 static __inline__ void free_pte_slow(pte_t *pte)
166 {
167 free_page((unsigned long)pte);
168 }
169
170 static inline void pte_free_kernel(pte_t *pte)
171 {
172 free_pte_fast(pte);
173 }
174
175 static inline void pte_free(struct page *ptepage)
176 {
177 free_pte_fast(page_address(ptepage));
178 }
179
180 #define pmd_free(pmd) free_pmd_fast(pmd)
181 #define pgd_free(pgd) free_pgd_fast(pgd)
182 #define pgd_alloc(mm) get_pgd_fast()
183
184 #endif /* _SPARC64_PGALLOC_H */