]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/um/include/asm/tlb.h
Merge tag 'powerpc-4.13-8' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-artful-kernel.git] / arch / um / include / asm / tlb.h
CommitLineData
1da177e4
LT
1#ifndef __UM_TLB_H
2#define __UM_TLB_H
3
1f4deba8 4#include <linux/pagemap.h>
0b4e273f
JD
5#include <linux/swap.h>
6#include <asm/percpu.h>
7#include <asm/pgalloc.h>
8#include <asm/tlbflush.h>
9
10#define tlb_start_vma(tlb, vma) do { } while (0)
11#define tlb_end_vma(tlb, vma) do { } while (0)
12#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
13
14/* struct mmu_gather is an opaque type used by the mm code for passing around
15 * any data needed by arch specific code for tlb_remove_page.
16 */
17struct mmu_gather {
18 struct mm_struct *mm;
19 unsigned int need_flush; /* Really unmapped some ptes? */
20 unsigned long start;
21 unsigned long end;
22 unsigned int fullmm; /* non-zero means full mm flush */
23};
24
0b4e273f
JD
25static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
26 unsigned long address)
27{
28 if (tlb->start > address)
29 tlb->start = address;
30 if (tlb->end < address + PAGE_SIZE)
31 tlb->end = address + PAGE_SIZE;
32}
33
34static inline void init_tlb_gather(struct mmu_gather *tlb)
35{
36 tlb->need_flush = 0;
37
38 tlb->start = TASK_SIZE;
39 tlb->end = 0;
40
41 if (tlb->fullmm) {
42 tlb->start = 0;
43 tlb->end = TASK_SIZE;
44 }
45}
46
ff075d60 47static inline void
56236a59
MK
48arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
49 unsigned long start, unsigned long end)
0b4e273f 50{
0b4e273f 51 tlb->mm = mm;
2b047252
LT
52 tlb->start = start;
53 tlb->end = end;
54 tlb->fullmm = !(start | (end+1));
0b4e273f
JD
55
56 init_tlb_gather(tlb);
0b4e273f
JD
57}
58
59extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
60 unsigned long end);
61
1cf35d47
LT
62static inline void
63tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
64{
65 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
66}
67
68static inline void
69tlb_flush_mmu_free(struct mmu_gather *tlb)
70{
71 init_tlb_gather(tlb);
72}
73
0b4e273f 74static inline void
ff075d60 75tlb_flush_mmu(struct mmu_gather *tlb)
0b4e273f
JD
76{
77 if (!tlb->need_flush)
78 return;
79
1cf35d47
LT
80 tlb_flush_mmu_tlbonly(tlb);
81 tlb_flush_mmu_free(tlb);
0b4e273f
JD
82}
83
56236a59 84/* arch_tlb_finish_mmu
0b4e273f
JD
85 * Called at the end of the shootdown operation to free up any resources
86 * that were required.
87 */
88static inline void
56236a59 89arch_tlb_finish_mmu(struct mmu_gather *tlb,
99baac21 90 unsigned long start, unsigned long end, bool force)
0b4e273f 91{
99baac21
MK
92 if (force) {
93 tlb->start = start;
94 tlb->end = end;
95 tlb->need_flush = 1;
96 }
ff075d60 97 tlb_flush_mmu(tlb);
0b4e273f
JD
98
99 /* keep the page table cache within bounds */
100 check_pgt_cache();
0b4e273f
JD
101}
102
103/* tlb_remove_page
104 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
105 * while handling the additional races in SMP caused by other CPUs
106 * caching valid mappings in their TLBs.
107 */
ff075d60 108static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
0b4e273f
JD
109{
110 tlb->need_flush = 1;
111 free_page_and_swap_cache(page);
e9d55e15 112 return false; /* avoid calling tlb_flush_mmu */
ff075d60
PZ
113}
114
115static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
116{
117 __tlb_remove_page(tlb, page);
0b4e273f
JD
118}
119
e77b0852
AK
120static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
121 struct page *page, int page_size)
122{
123 return __tlb_remove_page(tlb, page);
124}
125
e77b0852
AK
126static inline void tlb_remove_page_size(struct mmu_gather *tlb,
127 struct page *page, int page_size)
128{
129 return tlb_remove_page(tlb, page);
130}
131
0b4e273f
JD
132/**
133 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
134 *
135 * Record the fact that pte's were really umapped in ->need_flush, so we can
136 * later optimise away the tlb invalidate. This helps when userspace is
137 * unmapping already-unmapped pages, which happens quite a lot.
138 */
139#define tlb_remove_tlb_entry(tlb, ptep, address) \
140 do { \
141 tlb->need_flush = 1; \
142 __tlb_remove_tlb_entry(tlb, ptep, address); \
143 } while (0)
144
b528e4b6
AK
145#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
146 tlb_remove_tlb_entry(tlb, ptep, address)
147
07e32661
AK
148#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
149static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
150 unsigned int page_size)
151{
152}
153
9e1b32ca 154#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
0b4e273f 155
9e1b32ca 156#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
0b4e273f 157
9e1b32ca 158#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
0b4e273f
JD
159
160#define tlb_migrate_finish(mm) do {} while (0)
1da177e4
LT
161
162#endif