]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/include/asm/tlb.h
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / arch / arm / include / asm / tlb.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/tlb.h
1da177e4
LT
3 *
4 * Copyright (C) 2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
12 * tlb" for this.
13 *
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
16 */
17#ifndef __ASMARM_TLB_H
18#define __ASMARM_TLB_H
19
20#include <asm/cacheflush.h>
0157903e
HC
21
22#ifndef CONFIG_MMU
23
24#include <linux/pagemap.h>
58e9c47f
RK
25
26#define tlb_flush(tlb) ((void) tlb)
27
0157903e
HC
28#include <asm-generic/tlb.h>
29
30#else /* !CONFIG_MMU */
31
06824ba8 32#include <linux/swap.h>
1da177e4 33#include <asm/pgalloc.h>
06824ba8
RK
34#include <asm/tlbflush.h>
35
9e14f674
PZ
36#define MMU_GATHER_BUNDLE 8
37
a0ad5496
SC
38#ifdef CONFIG_HAVE_RCU_TABLE_FREE
39static inline void __tlb_remove_table(void *_table)
40{
41 free_page_and_swap_cache((struct page *)_table);
42}
43
44struct mmu_table_batch {
45 struct rcu_head rcu;
46 unsigned int nr;
47 void *tables[0];
48};
49
50#define MAX_TABLE_BATCH \
51 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
52
53extern void tlb_table_flush(struct mmu_gather *tlb);
54extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
55
56#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
57#else
58#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
59#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
60
1da177e4
LT
61/*
62 * TLB handling. This allows us to remove pages from the page
63 * tables, and efficiently handle the TLB issues.
64 */
65struct mmu_gather {
66 struct mm_struct *mm;
a0ad5496
SC
67#ifdef CONFIG_HAVE_RCU_TABLE_FREE
68 struct mmu_table_batch *batch;
69 unsigned int need_flush;
70#endif
1da177e4 71 unsigned int fullmm;
06824ba8 72 struct vm_area_struct *vma;
2b047252 73 unsigned long start, end;
7fccfc00
AK
74 unsigned long range_start;
75 unsigned long range_end;
06824ba8 76 unsigned int nr;
9e14f674
PZ
77 unsigned int max;
78 struct page **pages;
79 struct page *local[MMU_GATHER_BUNDLE];
1da177e4
LT
80};
81
82DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
83
06824ba8
RK
84/*
85 * This is unnecessarily complex. There's three ways the TLB shootdown
86 * code is used:
87 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
88 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
89 * tlb->vma will be non-NULL.
90 * 2. Unmapping all vmas. See exit_mmap().
91 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
92 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
93 * 3. Unmapping argument pages. See shift_arg_pages().
94 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
95 * tlb->vma will be NULL.
96 */
97static inline void tlb_flush(struct mmu_gather *tlb)
98{
99 if (tlb->fullmm || !tlb->vma)
100 flush_tlb_mm(tlb->mm);
101 else if (tlb->range_end > 0) {
102 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
103 tlb->range_start = TASK_SIZE;
104 tlb->range_end = 0;
105 }
106}
107
108static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
109{
110 if (!tlb->fullmm) {
111 if (addr < tlb->range_start)
112 tlb->range_start = addr;
113 if (addr + PAGE_SIZE > tlb->range_end)
114 tlb->range_end = addr + PAGE_SIZE;
115 }
116}
117
9e14f674
PZ
118static inline void __tlb_alloc_page(struct mmu_gather *tlb)
119{
120 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
121
122 if (addr) {
123 tlb->pages = (void *)addr;
124 tlb->max = PAGE_SIZE / sizeof(struct page *);
125 }
126}
127
1cf35d47 128static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
06824ba8
RK
129{
130 tlb_flush(tlb);
a0ad5496
SC
131#ifdef CONFIG_HAVE_RCU_TABLE_FREE
132 tlb_table_flush(tlb);
133#endif
1cf35d47
LT
134}
135
136static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
137{
29eb7782
PZ
138 free_pages_and_swap_cache(tlb->pages, tlb->nr);
139 tlb->nr = 0;
140 if (tlb->pages == tlb->local)
141 __tlb_alloc_page(tlb);
06824ba8
RK
142}
143
1cf35d47
LT
144static inline void tlb_flush_mmu(struct mmu_gather *tlb)
145{
146 tlb_flush_mmu_tlbonly(tlb);
147 tlb_flush_mmu_free(tlb);
148}
149
9e14f674 150static inline void
56236a59
MK
151arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
152 unsigned long start, unsigned long end)
1da177e4 153{
1da177e4 154 tlb->mm = mm;
2b047252
LT
155 tlb->fullmm = !(start | (end+1));
156 tlb->start = start;
157 tlb->end = end;
06824ba8 158 tlb->vma = NULL;
9e14f674
PZ
159 tlb->max = ARRAY_SIZE(tlb->local);
160 tlb->pages = tlb->local;
06824ba8 161 tlb->nr = 0;
9e14f674 162 __tlb_alloc_page(tlb);
a0ad5496
SC
163
164#ifdef CONFIG_HAVE_RCU_TABLE_FREE
165 tlb->batch = NULL;
166#endif
1da177e4
LT
167}
168
169static inline void
56236a59 170arch_tlb_finish_mmu(struct mmu_gather *tlb,
99baac21 171 unsigned long start, unsigned long end, bool force)
1da177e4 172{
99baac21
MK
173 if (force) {
174 tlb->range_start = start;
175 tlb->range_end = end;
176 }
177
06824ba8 178 tlb_flush_mmu(tlb);
1da177e4
LT
179
180 /* keep the page table cache within bounds */
181 check_pgt_cache();
15a23ffa 182
9e14f674
PZ
183 if (tlb->pages != tlb->local)
184 free_pages((unsigned long)tlb->pages, 0);
1da177e4
LT
185}
186
7fccfc00
AK
187/*
188 * Memorize the range for the TLB flush.
189 */
190static inline void
191tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
192{
06824ba8 193 tlb_add_flush(tlb, addr);
7fccfc00 194}
1da177e4 195
b528e4b6
AK
196#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
197 tlb_remove_tlb_entry(tlb, ptep, address)
1da177e4
LT
198/*
199 * In the case of tlb vma handling, we can optimise these away in the
200 * case where we're doing a full MM flush. When we're doing a munmap,
201 * the vmas are adjusted to only cover the region to be torn down.
202 */
203static inline void
204tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
205{
7fccfc00 206 if (!tlb->fullmm) {
1da177e4 207 flush_cache_range(vma, vma->vm_start, vma->vm_end);
06824ba8 208 tlb->vma = vma;
7fccfc00
AK
209 tlb->range_start = TASK_SIZE;
210 tlb->range_end = 0;
211 }
1da177e4
LT
212}
213
214static inline void
215tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
216{
06824ba8
RK
217 if (!tlb->fullmm)
218 tlb_flush(tlb);
219}
220
e9d55e15 221static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
06824ba8 222{
692a68c1
AK
223 tlb->pages[tlb->nr++] = page;
224 VM_WARN_ON(tlb->nr > tlb->max);
e9d55e15
AK
225 if (tlb->nr == tlb->max)
226 return true;
e9d55e15 227 return false;
9e14f674
PZ
228}
229
230static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
231{
692a68c1 232 if (__tlb_remove_page(tlb, page))
9e14f674 233 tlb_flush_mmu(tlb);
e9d55e15
AK
234}
235
e77b0852
AK
236static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
237 struct page *page, int page_size)
238{
239 return __tlb_remove_page(tlb, page);
240}
241
e77b0852
AK
242static inline void tlb_remove_page_size(struct mmu_gather *tlb,
243 struct page *page, int page_size)
244{
245 return tlb_remove_page(tlb, page);
246}
247
06824ba8
RK
248static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
249 unsigned long addr)
250{
251 pgtable_page_dtor(pte);
6d3ec1ae 252
df547e08
WD
253#ifdef CONFIG_ARM_LPAE
254 tlb_add_flush(tlb, addr);
255#else
6d3ec1ae
CM
256 /*
257 * With the classic ARM MMU, a pte page has two corresponding pmd
258 * entries, each covering 1MB.
259 */
260 addr &= PMD_MASK;
261 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
262 tlb_add_flush(tlb, addr + SZ_1M);
df547e08 263#endif
6d3ec1ae 264
a0ad5496 265 tlb_remove_entry(tlb, pte);
1da177e4
LT
266}
267
c9f27f10
CM
268static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
269 unsigned long addr)
270{
271#ifdef CONFIG_ARM_LPAE
272 tlb_add_flush(tlb, addr);
a0ad5496 273 tlb_remove_entry(tlb, virt_to_page(pmdp));
c9f27f10
CM
274#endif
275}
276
8d962507
CM
277static inline void
278tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
279{
280 tlb_add_flush(tlb, addr);
281}
282
06824ba8 283#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
c9f27f10 284#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
a32618d2 285#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
1da177e4
LT
286
287#define tlb_migrate_finish(mm) do { } while (0)
288
07e32661
AK
289#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
290static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
291 unsigned int page_size)
292{
293}
294
0157903e 295#endif /* CONFIG_MMU */
1da177e4 296#endif