]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/powerpc/mm/book3s32/tlb.c
mmap locking API: convert mmap_sem comments
[mirror_ubuntu-kernels.git] / arch / powerpc / mm / book3s32 / tlb.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
14cf11af
PM
2/*
3 * This file contains the routines for TLB flushing.
4 * On machines where the MMU uses a hash table to store virtual to
5 * physical translations, these routines flush entries from the
6 * hash table also.
7 * -- paulus
8 *
9 * Derived from arch/ppc/mm/init.c:
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14cf11af
PM
18 */
19
14cf11af
PM
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
97d22d26 24#include <linux/pagemap.h>
93087948 25#include <linux/export.h>
97d22d26 26
14cf11af
PM
27#include <asm/tlbflush.h>
28#include <asm/tlb.h>
29
9d9f2ccc 30#include <mm/mmu_decl.h>
14cf11af
PM
31
32/*
33 * Called when unmapping pages to flush entries from the TLB/hash table.
34 */
35void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
36{
37 unsigned long ptephys;
38
d8731527 39 if (Hash) {
14cf11af 40 ptephys = __pa(ptep) & PAGE_MASK;
6218a761 41 flush_hash_pages(mm->context.id, addr, ptephys, 1);
14cf11af
PM
42 }
43}
4ee7084e 44EXPORT_SYMBOL(flush_hash_entry);
14cf11af 45
14cf11af
PM
46/*
47 * Called at the end of a mmu_gather operation to make sure the
48 * TLB flush is completely done.
49 */
50void tlb_flush(struct mmu_gather *tlb)
51{
d8731527 52 if (!Hash) {
14cf11af
PM
53 /*
54 * 603 needs to flush the whole TLB here since
55 * it doesn't use a hash table.
56 */
57 _tlbia();
58 }
59}
60
61/*
62 * TLB flushing:
63 *
64 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
65 * - flush_tlb_page(vma, vmaddr) flushes one page
66 * - flush_tlb_range(vma, start, end) flushes a range of pages
67 * - flush_tlb_kernel_range(start, end) flushes kernel pages
68 *
69 * since the hardware hash table functions as an extension of the
70 * tlb as far as the linux tables are concerned, flush it too.
71 * -- Cort
72 */
73
14cf11af
PM
74static void flush_range(struct mm_struct *mm, unsigned long start,
75 unsigned long end)
76{
77 pmd_t *pmd;
78 unsigned long pmd_end;
79 int count;
6218a761 80 unsigned int ctx = mm->context.id;
14cf11af 81
030e3474 82 start &= PAGE_MASK;
d8731527 83 if (!Hash) {
030e3474
CL
84 if (end - start <= PAGE_SIZE)
85 _tlbie(start);
86 else
87 _tlbia();
14cf11af
PM
88 return;
89 }
14cf11af
PM
90 if (start >= end)
91 return;
92 end = (end - 1) | ~PAGE_MASK;
e05c7b1f 93 pmd = pmd_off(mm, start);
14cf11af
PM
94 for (;;) {
95 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
96 if (pmd_end > end)
97 pmd_end = end;
98 if (!pmd_none(*pmd)) {
99 count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
100 flush_hash_pages(ctx, start, pmd_val(*pmd), count);
101 }
102 if (pmd_end == end)
103 break;
104 start = pmd_end + 1;
105 ++pmd;
106 }
107}
108
109/*
110 * Flush kernel TLB entries in the given range
111 */
112void flush_tlb_kernel_range(unsigned long start, unsigned long end)
113{
114 flush_range(&init_mm, start, end);
14cf11af 115}
f048aace 116EXPORT_SYMBOL(flush_tlb_kernel_range);
14cf11af
PM
117
118/*
119 * Flush all the (user) entries for the address space described by mm.
120 */
121void flush_tlb_mm(struct mm_struct *mm)
122{
123 struct vm_area_struct *mp;
124
d8731527 125 if (!Hash) {
14cf11af
PM
126 _tlbia();
127 return;
128 }
129
01edcd89
HD
130 /*
131 * It is safe to go down the mm's list of vmas when called
c1e8d7c6 132 * from dup_mmap, holding mmap_lock. It would also be safe from
01edcd89
HD
133 * unmap_region or exit_mmap, but not from vmtruncate on SMP -
134 * but it seems dup_mmap is the only SMP case which gets here.
135 */
14cf11af
PM
136 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
137 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
14cf11af 138}
f048aace 139EXPORT_SYMBOL(flush_tlb_mm);
14cf11af
PM
140
141void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
142{
143 struct mm_struct *mm;
144 pmd_t *pmd;
145
d8731527 146 if (!Hash) {
14cf11af
PM
147 _tlbie(vmaddr);
148 return;
149 }
150 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
e05c7b1f 151 pmd = pmd_off(mm, vmaddr);
14cf11af 152 if (!pmd_none(*pmd))
6218a761 153 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
14cf11af 154}
f048aace 155EXPORT_SYMBOL(flush_tlb_page);
14cf11af
PM
156
157/*
158 * For each address in the range, find the pte for the address
159 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
160 * the corresponding HPTE.
161 */
162void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
163 unsigned long end)
164{
165 flush_range(vma->vm_mm, start, end);
14cf11af 166}
f048aace 167EXPORT_SYMBOL(flush_tlb_range);
91b191c7
DK
168
169void __init early_init_mmu(void)
170{
171}