]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_TLBFLUSH_H |
2 | #define _ASM_IA64_TLBFLUSH_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 2002 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
8 | ||
9 | #include <linux/config.h> | |
10 | ||
11 | #include <linux/mm.h> | |
12 | ||
13 | #include <asm/intrinsics.h> | |
14 | #include <asm/mmu_context.h> | |
15 | #include <asm/page.h> | |
16 | ||
17 | /* | |
18 | * Now for some TLB flushing routines. This is the kind of stuff that | |
19 | * can be very expensive, so try to avoid them whenever possible. | |
20 | */ | |
21 | ||
22 | /* | |
23 | * Flush everything (kernel mapping may also have changed due to | |
24 | * vmalloc/vfree). | |
25 | */ | |
26 | extern void local_flush_tlb_all (void); | |
27 | ||
28 | #ifdef CONFIG_SMP | |
29 | extern void smp_flush_tlb_all (void); | |
30 | extern void smp_flush_tlb_mm (struct mm_struct *mm); | |
31 | # define flush_tlb_all() smp_flush_tlb_all() | |
32 | #else | |
33 | # define flush_tlb_all() local_flush_tlb_all() | |
34 | #endif | |
35 | ||
36 | static inline void | |
37 | local_finish_flush_tlb_mm (struct mm_struct *mm) | |
38 | { | |
39 | if (mm == current->active_mm) | |
40 | activate_context(mm); | |
41 | } | |
42 | ||
43 | /* | |
44 | * Flush a specified user mapping. This is called, e.g., as a result of fork() and | |
45 | * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect | |
46 | * the PTEs of the parent task. | |
47 | */ | |
48 | static inline void | |
49 | flush_tlb_mm (struct mm_struct *mm) | |
50 | { | |
51 | if (!mm) | |
52 | return; | |
53 | ||
dcc17d1b | 54 | set_bit(mm->context, ia64_ctx.flushmap); |
1da177e4 LT |
55 | mm->context = 0; |
56 | ||
57 | if (atomic_read(&mm->mm_users) == 0) | |
58 | return; /* happens as a result of exit_mmap() */ | |
59 | ||
60 | #ifdef CONFIG_SMP | |
61 | smp_flush_tlb_mm(mm); | |
62 | #else | |
63 | local_finish_flush_tlb_mm(mm); | |
64 | #endif | |
65 | } | |
66 | ||
67 | extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); | |
68 | ||
69 | /* | |
70 | * Page-granular tlb flush. | |
71 | */ | |
72 | static inline void | |
73 | flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) | |
74 | { | |
75 | #ifdef CONFIG_SMP | |
76 | flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); | |
77 | #else | |
78 | if (vma->vm_mm == current->active_mm) | |
79 | ia64_ptcl(addr, (PAGE_SHIFT << 2)); | |
80 | else | |
81 | vma->vm_mm->context = 0; | |
82 | #endif | |
83 | } | |
84 | ||
85 | /* | |
86 | * Flush the TLB entries mapping the virtually mapped linear page | |
87 | * table corresponding to address range [START-END). | |
88 | */ | |
89 | static inline void | |
90 | flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) | |
91 | { | |
92 | /* | |
93 | * Deprecated. The virtual page table is now flushed via the normal gather/flush | |
94 | * interface (see tlb.h). | |
95 | */ | |
96 | } | |
97 | ||
98 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ | |
99 | ||
100 | #endif /* _ASM_IA64_TLBFLUSH_H */ |