]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blame - arch/sparc/mm/tlb.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-disco-kernel.git] / arch / sparc / mm / tlb.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/* arch/sparc64/mm/tlb.c
3 *
4 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
5 */
6
7#include <linux/kernel.h>
1da177e4
LT
8#include <linux/percpu.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
c9f2946f 11#include <linux/preempt.h>
1da177e4
LT
12
13#include <asm/pgtable.h>
14#include <asm/pgalloc.h>
15#include <asm/tlbflush.h>
16#include <asm/cacheflush.h>
17#include <asm/mmu_context.h>
18#include <asm/tlb.h>
19
20/* Heavily inspired by the ppc64 code. */
21
90f08e39 22static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
1da177e4
LT
23
24void flush_tlb_pending(void)
25{
90f08e39 26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
f36391d2 27 struct mm_struct *mm = tb->mm;
1da177e4 28
f36391d2
DM
29 if (!tb->tlb_nr)
30 goto out;
74bf4312 31
f36391d2
DM
32 flush_tsb_user(tb);
33
34 if (CTX_VALID(mm->context)) {
35 if (tb->tlb_nr == 1) {
36 global_flush_tlb_page(mm, tb->vaddrs[0]);
37 } else {
1da177e4 38#ifdef CONFIG_SMP
90f08e39
PZ
39 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
40 &tb->vaddrs[0]);
1da177e4 41#else
90f08e39
PZ
42 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
43 tb->tlb_nr, &tb->vaddrs[0]);
1da177e4
LT
44#endif
45 }
1da177e4 46 }
c9f2946f 47
f36391d2
DM
48 tb->tlb_nr = 0;
49
50out:
90f08e39 51 put_cpu_var(tlb_batch);
1da177e4
LT
52}
53
f36391d2
DM
54void arch_enter_lazy_mmu_mode(void)
55{
494fc421 56 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
f36391d2
DM
57
58 tb->active = 1;
59}
60
61void arch_leave_lazy_mmu_mode(void)
62{
494fc421 63 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
f36391d2
DM
64
65 if (tb->tlb_nr)
66 flush_tlb_pending();
67 tb->active = 0;
68}
69
9e695d2e 70static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
c7d9f77d 71 bool exec, unsigned int hugepage_shift)
1da177e4 72{
90f08e39 73 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
1da177e4
LT
74 unsigned long nr;
75
76 vaddr &= PAGE_MASK;
9e695d2e 77 if (exec)
1da177e4
LT
78 vaddr |= 0x1UL;
79
9e695d2e
DM
80 nr = tb->tlb_nr;
81
82 if (unlikely(nr != 0 && mm != tb->mm)) {
83 flush_tlb_pending();
84 nr = 0;
85 }
86
f36391d2 87 if (!tb->active) {
c7d9f77d 88 flush_tsb_user_page(mm, vaddr, hugepage_shift);
23a01138 89 global_flush_tlb_page(mm, vaddr);
f0af9707 90 goto out;
f36391d2
DM
91 }
92
24e49ee3 93 if (nr == 0) {
9e695d2e 94 tb->mm = mm;
c7d9f77d 95 tb->hugepage_shift = hugepage_shift;
24e49ee3
NG
96 }
97
c7d9f77d 98 if (tb->hugepage_shift != hugepage_shift) {
24e49ee3 99 flush_tlb_pending();
c7d9f77d 100 tb->hugepage_shift = hugepage_shift;
24e49ee3
NG
101 nr = 0;
102 }
9e695d2e
DM
103
104 tb->vaddrs[nr] = vaddr;
105 tb->tlb_nr = ++nr;
106 if (nr >= TLB_BATCH_NR)
107 flush_tlb_pending();
108
f0af9707 109out:
9e695d2e
DM
110 put_cpu_var(tlb_batch);
111}
112
113void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
c7d9f77d
NG
114 pte_t *ptep, pte_t orig, int fullmm,
115 unsigned int hugepage_shift)
9e695d2e 116{
7a591cfe
DM
117 if (tlb_type != hypervisor &&
118 pte_dirty(orig)) {
1da177e4
LT
119 unsigned long paddr, pfn = pte_pfn(orig);
120 struct address_space *mapping;
121 struct page *page;
122
123 if (!pfn_valid(pfn))
124 goto no_cache_flush;
125
126 page = pfn_to_page(pfn);
127 if (PageReserved(page))
128 goto no_cache_flush;
129
130 /* A real file page? */
131 mapping = page_mapping(page);
132 if (!mapping)
133 goto no_cache_flush;
134
135 paddr = (unsigned long) page_address(page);
136 if ((paddr ^ vaddr) & (1 << 13))
137 flush_dcache_page_all(mm, page);
138 }
139
140no_cache_flush:
9e695d2e 141 if (!fullmm)
c7d9f77d 142 tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
9e695d2e
DM
143}
144
145#ifdef CONFIG_TRANSPARENT_HUGEPAGE
146static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
5b1e94fa 147 pmd_t pmd)
9e695d2e
DM
148{
149 unsigned long end;
150 pte_t *pte;
151
152 pte = pte_offset_map(&pmd, vaddr);
153 end = vaddr + HPAGE_SIZE;
154 while (vaddr < end) {
5b1e94fa
DM
155 if (pte_val(*pte) & _PAGE_VALID) {
156 bool exec = pte_exec(*pte);
157
76811263 158 tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
5b1e94fa 159 }
9e695d2e
DM
160 pte++;
161 vaddr += PAGE_SIZE;
162 }
163 pte_unmap(pte);
164}
1da177e4 165
9e695d2e
DM
166void set_pmd_at(struct mm_struct *mm, unsigned long addr,
167 pmd_t *pmdp, pmd_t pmd)
168{
169 pmd_t orig = *pmdp;
170
171 *pmdp = pmd;
172
173 if (mm == &init_mm)
1da177e4 174 return;
9e695d2e 175
a7b9403f 176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
1e953d84
MK
177 /*
178 * Note that this routine only sets pmds for THP pages.
179 * Hugetlb pages are handled elsewhere. We need to check
180 * for huge zero page. Huge zero pages are like hugetlb
181 * pages in that there is no RSS, but there is the need
182 * for TSB entries. So, huge zero page counts go into
183 * hugetlb_pte_count.
184 */
185 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
186 if (is_huge_zero_page(pmd_page(pmd)))
187 mm->context.hugetlb_pte_count++;
188 else
189 mm->context.thp_pte_count++;
190 } else {
191 if (is_huge_zero_page(pmd_page(orig)))
192 mm->context.hugetlb_pte_count--;
193 else
194 mm->context.thp_pte_count--;
195 }
0fbebed6
DM
196
197 /* Do not try to allocate the TSB hash table if we
198 * don't have one already. We have various locks held
199 * and thus we'll end up doing a GFP_KERNEL allocation
200 * in an atomic context.
201 *
202 * Instead, we let the first TLB miss on a hugepage
203 * take care of this.
204 */
90f08e39 205 }
1da177e4 206
9e695d2e 207 if (!pmd_none(orig)) {
9e695d2e 208 addr &= HPAGE_MASK;
a7b9403f 209 if (pmd_trans_huge(orig)) {
5b1e94fa
DM
210 pte_t orig_pte = __pte(pmd_val(orig));
211 bool exec = pte_exec(orig_pte);
212
76811263 213 tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
24e49ee3 214 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
76811263 215 REAL_HPAGE_SHIFT);
37b3a8ff 216 } else {
5b1e94fa 217 tlb_batch_pmd_scan(mm, addr, orig);
37b3a8ff 218 }
1da177e4 219 }
9e695d2e 220}
1da177e4 221
1e953d84
MK
222/*
223 * This routine is only called when splitting a THP
224 */
51e5ef1b
DM
225void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
226 pmd_t *pmdp)
227{
228 pmd_t entry = *pmdp;
229
230 pmd_val(entry) &= ~_PAGE_VALID;
231
232 set_pmd_at(vma->vm_mm, address, pmdp, entry);
233 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1e953d84
MK
234
235 /*
236 * set_pmd_at() will not be called in a way to decrement
237 * thp_pte_count when splitting a THP, so do it now.
238 * Sanity check pmd before doing the actual decrement.
239 */
240 if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
241 !is_huge_zero_page(pmd_page(entry)))
242 (vma->vm_mm)->context.thp_pte_count--;
51e5ef1b
DM
243}
244
6b0b50b0
AK
245void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
246 pgtable_t pgtable)
9e695d2e
DM
247{
248 struct list_head *lh = (struct list_head *) pgtable;
1da177e4 249
9e695d2e 250 assert_spin_locked(&mm->page_table_lock);
90f08e39 251
9e695d2e 252 /* FIFO */
c389a250 253 if (!pmd_huge_pte(mm, pmdp))
9e695d2e
DM
254 INIT_LIST_HEAD(lh);
255 else
c389a250
KS
256 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
257 pmd_huge_pte(mm, pmdp) = pgtable;
9e695d2e
DM
258}
259
6b0b50b0 260pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
9e695d2e
DM
261{
262 struct list_head *lh;
263 pgtable_t pgtable;
264
265 assert_spin_locked(&mm->page_table_lock);
266
267 /* FIFO */
c389a250 268 pgtable = pmd_huge_pte(mm, pmdp);
9e695d2e
DM
269 lh = (struct list_head *) pgtable;
270 if (list_empty(lh))
c389a250 271 pmd_huge_pte(mm, pmdp) = NULL;
9e695d2e 272 else {
c389a250 273 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
9e695d2e
DM
274 list_del(lh);
275 }
276 pte_val(pgtable[0]) = 0;
277 pte_val(pgtable[1]) = 0;
278
279 return pgtable;
1da177e4 280}
9e695d2e 281#endif /* CONFIG_TRANSPARENT_HUGEPAGE */