]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm64/include/asm/tlb.h
2 * Based on arch/arm/include/asm/tlb.h
4 * Copyright (C) 2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #define __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
24 #include <asm-generic/tlb.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
29 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
31 #define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
32 static inline void __tlb_remove_table(void *_table
)
34 free_page_and_swap_cache((struct page
*)_table
);
37 #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
38 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
41 * There's three ways the TLB shootdown code is used:
42 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
43 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
44 * 2. Unmapping all vmas. See exit_mmap().
45 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
46 * Page tables will be freed.
47 * 3. Unmapping argument pages. See shift_arg_pages().
48 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
50 static inline void tlb_flush(struct mmu_gather
*tlb
)
53 flush_tlb_mm(tlb
->mm
);
54 } else if (tlb
->end
> 0) {
55 struct vm_area_struct vma
= { .vm_mm
= tlb
->mm
, };
56 flush_tlb_range(&vma
, tlb
->start
, tlb
->end
);
57 tlb
->start
= TASK_SIZE
;
62 static inline void tlb_add_flush(struct mmu_gather
*tlb
, unsigned long addr
)
65 tlb
->start
= min(tlb
->start
, addr
);
66 tlb
->end
= max(tlb
->end
, addr
+ PAGE_SIZE
);
71 * Memorize the range for the TLB flush.
73 static inline void __tlb_remove_tlb_entry(struct mmu_gather
*tlb
, pte_t
*ptep
,
76 tlb_add_flush(tlb
, addr
);
80 * In the case of tlb vma handling, we can optimise these away in the
81 * case where we're doing a full MM flush. When we're doing a munmap,
82 * the vmas are adjusted to only cover the region to be torn down.
84 static inline void tlb_start_vma(struct mmu_gather
*tlb
,
85 struct vm_area_struct
*vma
)
88 tlb
->start
= TASK_SIZE
;
93 static inline void tlb_end_vma(struct mmu_gather
*tlb
,
94 struct vm_area_struct
*vma
)
100 static inline void __pte_free_tlb(struct mmu_gather
*tlb
, pgtable_t pte
,
103 pgtable_page_dtor(pte
);
104 tlb_add_flush(tlb
, addr
);
105 tlb_remove_entry(tlb
, pte
);
108 #if CONFIG_ARM64_PGTABLE_LEVELS > 2
109 static inline void __pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmdp
,
112 tlb_add_flush(tlb
, addr
);
113 tlb_remove_entry(tlb
, virt_to_page(pmdp
));
117 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
118 static inline void __pud_free_tlb(struct mmu_gather
*tlb
, pud_t
*pudp
,
121 tlb_add_flush(tlb
, addr
);
122 tlb_remove_entry(tlb
, virt_to_page(pudp
));
126 static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather
*tlb
, pmd_t
*pmdp
,
127 unsigned long address
)
129 tlb_add_flush(tlb
, address
);