]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/arm/include/asm/tlb.h
Merge branch 'for_rmk' of git://git.mnementh.co.uk/linux-2.6-im into devel
[mirror_ubuntu-zesty-kernel.git] / arch / arm / include / asm / tlb.h
1 /*
2 * arch/arm/include/asm/tlb.h
3 *
4 * Copyright (C) 2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
12 * tlb" for this.
13 *
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
16 */
17 #ifndef __ASMARM_TLB_H
18 #define __ASMARM_TLB_H
19
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22
23 #ifndef CONFIG_MMU
24
25 #include <linux/pagemap.h>
26 #include <asm-generic/tlb.h>
27
28 #else /* !CONFIG_MMU */
29
30 #include <asm/pgalloc.h>
31
32 /*
33 * TLB handling. This allows us to remove pages from the page
34 * tables, and efficiently handle the TLB issues.
35 */
36 struct mmu_gather {
37 struct mm_struct *mm;
38 unsigned int fullmm;
39 };
40
41 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
42
43 static inline struct mmu_gather *
44 tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
45 {
46 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
47
48 tlb->mm = mm;
49 tlb->fullmm = full_mm_flush;
50
51 return tlb;
52 }
53
54 static inline void
55 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
56 {
57 if (tlb->fullmm)
58 flush_tlb_mm(tlb->mm);
59
60 /* keep the page table cache within bounds */
61 check_pgt_cache();
62
63 put_cpu_var(mmu_gathers);
64 }
65
66 #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
67
68 /*
69 * In the case of tlb vma handling, we can optimise these away in the
70 * case where we're doing a full MM flush. When we're doing a munmap,
71 * the vmas are adjusted to only cover the region to be torn down.
72 */
73 static inline void
74 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
75 {
76 if (!tlb->fullmm)
77 flush_cache_range(vma, vma->vm_start, vma->vm_end);
78 }
79
80 static inline void
81 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
82 {
83 if (!tlb->fullmm)
84 flush_tlb_range(vma, vma->vm_start, vma->vm_end);
85 }
86
87 #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
88 #define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
89 #define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
90
91 #define tlb_migrate_finish(mm) do { } while (0)
92
93 #endif /* CONFIG_MMU */
94 #endif