]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _PARISC_TLBFLUSH_H |
2 | #define _PARISC_TLBFLUSH_H | |
3 | ||
4 | /* TLB flushing routines.... */ | |
5 | ||
6 | #include <linux/config.h> | |
7 | #include <linux/mm.h> | |
8 | #include <asm/mmu_context.h> | |
9 | ||
10 | extern void flush_tlb_all(void); | |
11 | ||
12 | /* | |
13 | * flush_tlb_mm() | |
14 | * | |
15 | * XXX This code is NOT valid for HP-UX compatibility processes, | |
16 | * (although it will probably work 99% of the time). HP-UX | |
17 | * processes are free to play with the space id's and save them | |
18 | * over long periods of time, etc. so we have to preserve the | |
19 | * space and just flush the entire tlb. We need to check the | |
20 | * personality in order to do that, but the personality is not | |
21 | * currently being set correctly. | |
22 | * | |
23 | * Of course, Linux processes could do the same thing, but | |
24 | * we don't support that (and the compilers, dynamic linker, | |
25 | * etc. do not do that). | |
26 | */ | |
27 | ||
28 | static inline void flush_tlb_mm(struct mm_struct *mm) | |
29 | { | |
30 | BUG_ON(mm == &init_mm); /* Should never happen */ | |
31 | ||
32 | #ifdef CONFIG_SMP | |
33 | flush_tlb_all(); | |
34 | #else | |
35 | if (mm) { | |
36 | if (mm->context != 0) | |
37 | free_sid(mm->context); | |
38 | mm->context = alloc_sid(); | |
39 | if (mm == current->active_mm) | |
40 | load_context(mm->context); | |
41 | } | |
42 | #endif | |
43 | } | |
44 | ||
45 | extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) | |
46 | { | |
47 | } | |
48 | ||
49 | static inline void flush_tlb_page(struct vm_area_struct *vma, | |
50 | unsigned long addr) | |
51 | { | |
52 | /* For one page, it's not worth testing the split_tlb variable */ | |
53 | ||
54 | mb(); | |
55 | mtsp(vma->vm_mm->context,1); | |
56 | purge_tlb_start(); | |
57 | pdtlb(addr); | |
58 | pitlb(addr); | |
59 | purge_tlb_end(); | |
60 | } | |
61 | ||
62 | static inline void flush_tlb_range(struct vm_area_struct *vma, | |
63 | unsigned long start, unsigned long end) | |
64 | { | |
65 | unsigned long npages; | |
66 | ||
1da177e4 | 67 | npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
896a3756 | 68 | if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ |
1da177e4 LT |
69 | flush_tlb_all(); |
70 | else { | |
71 | ||
72 | mtsp(vma->vm_mm->context,1); | |
896a3756 | 73 | purge_tlb_start(); |
1da177e4 | 74 | if (split_tlb) { |
1da177e4 LT |
75 | while (npages--) { |
76 | pdtlb(start); | |
77 | pitlb(start); | |
78 | start += PAGE_SIZE; | |
79 | } | |
1da177e4 | 80 | } else { |
1da177e4 LT |
81 | while (npages--) { |
82 | pdtlb(start); | |
83 | start += PAGE_SIZE; | |
84 | } | |
1da177e4 | 85 | } |
896a3756 | 86 | purge_tlb_end(); |
1da177e4 LT |
87 | } |
88 | } | |
89 | ||
90 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() | |
91 | ||
92 | #endif |