]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __ASM_SH_TLB_H |
3 | #define __ASM_SH_TLB_H | |
4 | ||
959f7d58 | 5 | #ifdef CONFIG_SUPERH64 |
a1ce3928 | 6 | # include <asm/tlb_64.h> |
959f7d58 PM |
7 | #endif |
8 | ||
9 | #ifndef __ASSEMBLY__ | |
c2035184 PM |
10 | #include <linux/pagemap.h> |
11 | ||
12 | #ifdef CONFIG_MMU | |
194cd8df | 13 | #include <linux/swap.h> |
c2035184 PM |
14 | #include <asm/pgalloc.h> |
15 | #include <asm/tlbflush.h> | |
24ef7fc4 | 16 | #include <asm/mmu_context.h> |
c2035184 PM |
17 | |
18 | /* | |
19 | * TLB handling. This allows us to remove pages from the page | |
20 | * tables, and efficiently handle the TLB issues. | |
21 | */ | |
22 | struct mmu_gather { | |
23 | struct mm_struct *mm; | |
24 | unsigned int fullmm; | |
25 | unsigned long start, end; | |
26 | }; | |
959f7d58 | 27 | |
c2035184 PM |
28 | static inline void init_tlb_gather(struct mmu_gather *tlb) |
29 | { | |
30 | tlb->start = TASK_SIZE; | |
31 | tlb->end = 0; | |
1da177e4 | 32 | |
c2035184 PM |
33 | if (tlb->fullmm) { |
34 | tlb->start = 0; | |
35 | tlb->end = TASK_SIZE; | |
36 | } | |
37 | } | |
38 | ||
1e56a564 | 39 | static inline void |
56236a59 MK |
40 | arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, |
41 | unsigned long start, unsigned long end) | |
c2035184 | 42 | { |
c2035184 | 43 | tlb->mm = mm; |
2b047252 LT |
44 | tlb->start = start; |
45 | tlb->end = end; | |
46 | tlb->fullmm = !(start | (end+1)); | |
c2035184 PM |
47 | |
48 | init_tlb_gather(tlb); | |
c2035184 PM |
49 | } |
50 | ||
51 | static inline void | |
56236a59 | 52 | arch_tlb_finish_mmu(struct mmu_gather *tlb, |
99baac21 | 53 | unsigned long start, unsigned long end, bool force) |
c2035184 | 54 | { |
99baac21 | 55 | if (tlb->fullmm || force) |
c2035184 PM |
56 | flush_tlb_mm(tlb->mm); |
57 | ||
58 | /* keep the page table cache within bounds */ | |
59 | check_pgt_cache(); | |
c2035184 PM |
60 | } |
61 | ||
62 | static inline void | |
63 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) | |
64 | { | |
65 | if (tlb->start > address) | |
66 | tlb->start = address; | |
67 | if (tlb->end < address + PAGE_SIZE) | |
68 | tlb->end = address + PAGE_SIZE; | |
69 | } | |
1da177e4 | 70 | |
b528e4b6 AK |
71 | #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
72 | tlb_remove_tlb_entry(tlb, ptep, address) | |
73 | ||
1da177e4 | 74 | /* |
c2035184 PM |
75 | * In the case of tlb vma handling, we can optimise these away in the |
76 | * case where we're doing a full MM flush. When we're doing a munmap, | |
77 | * the vmas are adjusted to only cover the region to be torn down. | |
1da177e4 | 78 | */ |
c2035184 PM |
79 | static inline void |
80 | tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |
81 | { | |
82 | if (!tlb->fullmm) | |
83 | flush_cache_range(vma, vma->vm_start, vma->vm_end); | |
84 | } | |
85 | ||
86 | static inline void | |
87 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |
88 | { | |
89 | if (!tlb->fullmm && tlb->end) { | |
90 | flush_tlb_range(vma, tlb->start, tlb->end); | |
91 | init_tlb_gather(tlb); | |
92 | } | |
93 | } | |
94 | ||
1cf35d47 LT |
95 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
96 | { | |
97 | } | |
98 | ||
99 | static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) | |
100 | { | |
101 | } | |
102 | ||
1e56a564 PZ |
103 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
104 | { | |
105 | } | |
106 | ||
107 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |
108 | { | |
109 | free_page_and_swap_cache(page); | |
e9d55e15 | 110 | return false; /* avoid calling tlb_flush_mmu */ |
1e56a564 PZ |
111 | } |
112 | ||
113 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |
114 | { | |
115 | __tlb_remove_page(tlb, page); | |
116 | } | |
117 | ||
e77b0852 AK |
118 | static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, |
119 | struct page *page, int page_size) | |
120 | { | |
121 | return __tlb_remove_page(tlb, page); | |
122 | } | |
123 | ||
e77b0852 AK |
124 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, |
125 | struct page *page, int page_size) | |
126 | { | |
127 | return tlb_remove_page(tlb, page); | |
128 | } | |
129 | ||
07e32661 AK |
130 | #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change |
131 | static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, | |
132 | unsigned int page_size) | |
133 | { | |
134 | } | |
135 | ||
9e1b32ca BH |
136 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) |
137 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) | |
138 | #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) | |
c2035184 PM |
139 | |
140 | #define tlb_migrate_finish(mm) do { } while (0) | |
141 | ||
bb29c677 | 142 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64) |
8eda5514 MF |
143 | extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); |
144 | extern void tlb_unwire_entry(void); | |
145 | #else | |
146 | static inline void tlb_wire_entry(struct vm_area_struct *vma , | |
147 | unsigned long addr, pte_t pte) | |
148 | { | |
149 | BUG(); | |
150 | } | |
151 | ||
152 | static inline void tlb_unwire_entry(void) | |
153 | { | |
154 | BUG(); | |
155 | } | |
bb29c677 | 156 | #endif |
8eda5514 | 157 | |
c2035184 PM |
158 | #else /* CONFIG_MMU */ |
159 | ||
160 | #define tlb_start_vma(tlb, vma) do { } while (0) | |
161 | #define tlb_end_vma(tlb, vma) do { } while (0) | |
162 | #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) | |
163 | #define tlb_flush(tlb) do { } while (0) | |
1da177e4 LT |
164 | |
165 | #include <asm-generic/tlb.h> | |
959f7d58 | 166 | |
c2035184 | 167 | #endif /* CONFIG_MMU */ |
959f7d58 PM |
168 | #endif /* __ASSEMBLY__ */ |
169 | #endif /* __ASM_SH_TLB_H */ |