]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/um/include/asm/tlb.h
Fix TLB gather virtual address range invalidation corner cases
[mirror_ubuntu-artful-kernel.git] / arch / um / include / asm / tlb.h
1 #ifndef __UM_TLB_H
2 #define __UM_TLB_H
3
4 #include <linux/pagemap.h>
5 #include <linux/swap.h>
6 #include <asm/percpu.h>
7 #include <asm/pgalloc.h>
8 #include <asm/tlbflush.h>
9
10 #define tlb_start_vma(tlb, vma) do { } while (0)
11 #define tlb_end_vma(tlb, vma) do { } while (0)
12 #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
13
14 /* struct mmu_gather is an opaque type used by the mm code for passing around
15 * any data needed by arch specific code for tlb_remove_page.
16 */
17 struct mmu_gather {
18 struct mm_struct *mm;
19 unsigned int need_flush; /* Really unmapped some ptes? */
20 unsigned long start;
21 unsigned long end;
22 unsigned int fullmm; /* non-zero means full mm flush */
23 };
24
25 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
26 unsigned long address)
27 {
28 if (tlb->start > address)
29 tlb->start = address;
30 if (tlb->end < address + PAGE_SIZE)
31 tlb->end = address + PAGE_SIZE;
32 }
33
34 static inline void init_tlb_gather(struct mmu_gather *tlb)
35 {
36 tlb->need_flush = 0;
37
38 tlb->start = TASK_SIZE;
39 tlb->end = 0;
40
41 if (tlb->fullmm) {
42 tlb->start = 0;
43 tlb->end = TASK_SIZE;
44 }
45 }
46
47 static inline void
48 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
49 {
50 tlb->mm = mm;
51 tlb->start = start;
52 tlb->end = end;
53 tlb->fullmm = !(start | (end+1));
54
55 init_tlb_gather(tlb);
56 }
57
58 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
59 unsigned long end);
60
61 static inline void
62 tlb_flush_mmu(struct mmu_gather *tlb)
63 {
64 if (!tlb->need_flush)
65 return;
66
67 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
68 init_tlb_gather(tlb);
69 }
70
71 /* tlb_finish_mmu
72 * Called at the end of the shootdown operation to free up any resources
73 * that were required.
74 */
75 static inline void
76 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
77 {
78 tlb_flush_mmu(tlb);
79
80 /* keep the page table cache within bounds */
81 check_pgt_cache();
82 }
83
84 /* tlb_remove_page
85 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
86 * while handling the additional races in SMP caused by other CPUs
87 * caching valid mappings in their TLBs.
88 */
89 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
90 {
91 tlb->need_flush = 1;
92 free_page_and_swap_cache(page);
93 return 1; /* avoid calling tlb_flush_mmu */
94 }
95
96 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97 {
98 __tlb_remove_page(tlb, page);
99 }
100
101 /**
102 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
103 *
104 * Record the fact that pte's were really umapped in ->need_flush, so we can
105 * later optimise away the tlb invalidate. This helps when userspace is
106 * unmapping already-unmapped pages, which happens quite a lot.
107 */
108 #define tlb_remove_tlb_entry(tlb, ptep, address) \
109 do { \
110 tlb->need_flush = 1; \
111 __tlb_remove_tlb_entry(tlb, ptep, address); \
112 } while (0)
113
114 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
115
116 #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
117
118 #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
119
120 #define tlb_migrate_finish(mm) do {} while (0)
121
122 #endif