]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/tlb.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Experimentation shows that on a StrongARM, it appears to be faster | |
11 | * to use the "invalidate whole tlb" rather than "invalidate single | |
12 | * tlb" for this. | |
13 | * | |
14 | * This appears true for both the process fork+exit case, as well as | |
15 | * the munmap-large-area case. | |
16 | */ | |
17 | #ifndef __ASMARM_TLB_H | |
18 | #define __ASMARM_TLB_H | |
19 | ||
20 | #include <asm/cacheflush.h> | |
0157903e HC |
21 | |
22 | #ifndef CONFIG_MMU | |
23 | ||
24 | #include <linux/pagemap.h> | |
58e9c47f RK |
25 | |
26 | #define tlb_flush(tlb) ((void) tlb) | |
27 | ||
0157903e HC |
28 | #include <asm-generic/tlb.h> |
29 | ||
30 | #else /* !CONFIG_MMU */ | |
31 | ||
06824ba8 | 32 | #include <linux/swap.h> |
1da177e4 | 33 | #include <asm/pgalloc.h> |
06824ba8 RK |
34 | #include <asm/tlbflush.h> |
35 | ||
9e14f674 PZ |
36 | #define MMU_GATHER_BUNDLE 8 |
37 | ||
1da177e4 LT |
38 | /* |
39 | * TLB handling. This allows us to remove pages from the page | |
40 | * tables, and efficiently handle the TLB issues. | |
41 | */ | |
42 | struct mmu_gather { | |
43 | struct mm_struct *mm; | |
1da177e4 | 44 | unsigned int fullmm; |
06824ba8 | 45 | struct vm_area_struct *vma; |
2b047252 | 46 | unsigned long start, end; |
7fccfc00 AK |
47 | unsigned long range_start; |
48 | unsigned long range_end; | |
06824ba8 | 49 | unsigned int nr; |
9e14f674 PZ |
50 | unsigned int max; |
51 | struct page **pages; | |
52 | struct page *local[MMU_GATHER_BUNDLE]; | |
1da177e4 LT |
53 | }; |
54 | ||
55 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | |
56 | ||
06824ba8 RK |
57 | /* |
58 | * This is unnecessarily complex. There's three ways the TLB shootdown | |
59 | * code is used: | |
60 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). | |
61 | * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. | |
62 | * tlb->vma will be non-NULL. | |
63 | * 2. Unmapping all vmas. See exit_mmap(). | |
64 | * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. | |
65 | * tlb->vma will be non-NULL. Additionally, page tables will be freed. | |
66 | * 3. Unmapping argument pages. See shift_arg_pages(). | |
67 | * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. | |
68 | * tlb->vma will be NULL. | |
69 | */ | |
70 | static inline void tlb_flush(struct mmu_gather *tlb) | |
71 | { | |
72 | if (tlb->fullmm || !tlb->vma) | |
73 | flush_tlb_mm(tlb->mm); | |
74 | else if (tlb->range_end > 0) { | |
75 | flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); | |
76 | tlb->range_start = TASK_SIZE; | |
77 | tlb->range_end = 0; | |
78 | } | |
79 | } | |
80 | ||
81 | static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) | |
82 | { | |
83 | if (!tlb->fullmm) { | |
84 | if (addr < tlb->range_start) | |
85 | tlb->range_start = addr; | |
86 | if (addr + PAGE_SIZE > tlb->range_end) | |
87 | tlb->range_end = addr + PAGE_SIZE; | |
88 | } | |
89 | } | |
90 | ||
9e14f674 PZ |
91 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) |
92 | { | |
93 | unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); | |
94 | ||
95 | if (addr) { | |
96 | tlb->pages = (void *)addr; | |
97 | tlb->max = PAGE_SIZE / sizeof(struct page *); | |
98 | } | |
99 | } | |
100 | ||
06824ba8 RK |
101 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
102 | { | |
103 | tlb_flush(tlb); | |
29eb7782 PZ |
104 | free_pages_and_swap_cache(tlb->pages, tlb->nr); |
105 | tlb->nr = 0; | |
106 | if (tlb->pages == tlb->local) | |
107 | __tlb_alloc_page(tlb); | |
06824ba8 RK |
108 | } |
109 | ||
9e14f674 | 110 | static inline void |
2b047252 | 111 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
1da177e4 | 112 | { |
1da177e4 | 113 | tlb->mm = mm; |
2b047252 LT |
114 | tlb->fullmm = !(start | (end+1)); |
115 | tlb->start = start; | |
116 | tlb->end = end; | |
06824ba8 | 117 | tlb->vma = NULL; |
9e14f674 PZ |
118 | tlb->max = ARRAY_SIZE(tlb->local); |
119 | tlb->pages = tlb->local; | |
06824ba8 | 120 | tlb->nr = 0; |
9e14f674 | 121 | __tlb_alloc_page(tlb); |
1da177e4 LT |
122 | } |
123 | ||
124 | static inline void | |
125 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |
126 | { | |
06824ba8 | 127 | tlb_flush_mmu(tlb); |
1da177e4 LT |
128 | |
129 | /* keep the page table cache within bounds */ | |
130 | check_pgt_cache(); | |
15a23ffa | 131 | |
9e14f674 PZ |
132 | if (tlb->pages != tlb->local) |
133 | free_pages((unsigned long)tlb->pages, 0); | |
1da177e4 LT |
134 | } |
135 | ||
7fccfc00 AK |
136 | /* |
137 | * Memorize the range for the TLB flush. | |
138 | */ | |
139 | static inline void | |
140 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) | |
141 | { | |
06824ba8 | 142 | tlb_add_flush(tlb, addr); |
7fccfc00 | 143 | } |
1da177e4 LT |
144 | |
145 | /* | |
146 | * In the case of tlb vma handling, we can optimise these away in the | |
147 | * case where we're doing a full MM flush. When we're doing a munmap, | |
148 | * the vmas are adjusted to only cover the region to be torn down. | |
149 | */ | |
150 | static inline void | |
151 | tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |
152 | { | |
7fccfc00 | 153 | if (!tlb->fullmm) { |
1da177e4 | 154 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
06824ba8 | 155 | tlb->vma = vma; |
7fccfc00 AK |
156 | tlb->range_start = TASK_SIZE; |
157 | tlb->range_end = 0; | |
158 | } | |
1da177e4 LT |
159 | } |
160 | ||
161 | static inline void | |
162 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |
163 | { | |
06824ba8 RK |
164 | if (!tlb->fullmm) |
165 | tlb_flush(tlb); | |
166 | } | |
167 | ||
9e14f674 | 168 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
06824ba8 | 169 | { |
9e14f674 PZ |
170 | tlb->pages[tlb->nr++] = page; |
171 | VM_BUG_ON(tlb->nr > tlb->max); | |
172 | return tlb->max - tlb->nr; | |
173 | } | |
174 | ||
175 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |
176 | { | |
177 | if (!__tlb_remove_page(tlb, page)) | |
178 | tlb_flush_mmu(tlb); | |
06824ba8 RK |
179 | } |
180 | ||
181 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |
182 | unsigned long addr) | |
183 | { | |
184 | pgtable_page_dtor(pte); | |
6d3ec1ae | 185 | |
df547e08 WD |
186 | #ifdef CONFIG_ARM_LPAE |
187 | tlb_add_flush(tlb, addr); | |
188 | #else | |
6d3ec1ae CM |
189 | /* |
190 | * With the classic ARM MMU, a pte page has two corresponding pmd | |
191 | * entries, each covering 1MB. | |
192 | */ | |
193 | addr &= PMD_MASK; | |
194 | tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); | |
195 | tlb_add_flush(tlb, addr + SZ_1M); | |
df547e08 | 196 | #endif |
6d3ec1ae | 197 | |
06824ba8 | 198 | tlb_remove_page(tlb, pte); |
1da177e4 LT |
199 | } |
200 | ||
c9f27f10 CM |
201 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, |
202 | unsigned long addr) | |
203 | { | |
204 | #ifdef CONFIG_ARM_LPAE | |
205 | tlb_add_flush(tlb, addr); | |
206 | tlb_remove_page(tlb, virt_to_page(pmdp)); | |
207 | #endif | |
208 | } | |
209 | ||
8d962507 CM |
210 | static inline void |
211 | tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) | |
212 | { | |
213 | tlb_add_flush(tlb, addr); | |
214 | } | |
215 | ||
06824ba8 | 216 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
c9f27f10 | 217 | #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) |
a32618d2 | 218 | #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) |
1da177e4 LT |
219 | |
220 | #define tlb_migrate_finish(mm) do { } while (0) | |
221 | ||
0157903e | 222 | #endif /* CONFIG_MMU */ |
1da177e4 | 223 | #endif |