]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_MMU_CONTEXT_H |
2 | #define _ASM_IA64_MMU_CONTEXT_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1998-2002 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | */ | |
8 | ||
9 | /* | |
10 | * Routines to manage the allocation of task context numbers. Task context numbers are | |
11 | * used to reduce or eliminate the need to perform TLB flushes due to context switches. | |
12 | * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not | |
13 | * consider the region number when performing a TLB lookup, we need to assign a unique | |
14 | * region id to each region in a process. We use the least significant three bits in a | |
15 | * region id for this purpose. | |
16 | */ | |
17 | ||
18 | #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ | |
19 | ||
20 | #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61)) | |
21 | ||
0a41e250 | 22 | # include <asm/page.h> |
1da177e4 LT |
23 | # ifndef __ASSEMBLY__ |
24 | ||
25 | #include <linux/compiler.h> | |
26 | #include <linux/percpu.h> | |
27 | #include <linux/sched.h> | |
28 | #include <linux/spinlock.h> | |
29 | ||
30 | #include <asm/processor.h> | |
31 | ||
32 | struct ia64_ctx { | |
33 | spinlock_t lock; | |
34 | unsigned int next; /* next context number to use */ | |
35 | unsigned int limit; /* next >= limit => must call wrap_mmu_context() */ | |
36 | unsigned int max_ctx; /* max. context value supported by all CPUs */ | |
37 | }; | |
38 | ||
39 | extern struct ia64_ctx ia64_ctx; | |
40 | DECLARE_PER_CPU(u8, ia64_need_tlb_flush); | |
41 | ||
42 | extern void wrap_mmu_context (struct mm_struct *mm); | |
43 | ||
44 | static inline void | |
45 | enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) | |
46 | { | |
47 | } | |
48 | ||
49 | /* | |
50 | * When the context counter wraps around all TLBs need to be flushed because an old | |
51 | * context number might have been reused. This is signalled by the ia64_need_tlb_flush | |
52 | * per-CPU variable, which is checked in the routine below. Called by activate_mm(). | |
53 | * <efocht@ess.nec.de> | |
54 | */ | |
55 | static inline void | |
56 | delayed_tlb_flush (void) | |
57 | { | |
58 | extern void local_flush_tlb_all (void); | |
badea125 | 59 | unsigned long flags; |
1da177e4 LT |
60 | |
61 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { | |
badea125 DMT |
62 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
63 | { | |
64 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { | |
65 | local_flush_tlb_all(); | |
66 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; | |
67 | } | |
68 | } | |
69 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | |
1da177e4 LT |
70 | } |
71 | } | |
72 | ||
badea125 | 73 | static inline nv_mm_context_t |
1da177e4 LT |
74 | get_mmu_context (struct mm_struct *mm) |
75 | { | |
76 | unsigned long flags; | |
badea125 DMT |
77 | nv_mm_context_t context = mm->context; |
78 | ||
79 | if (unlikely(!context)) { | |
80 | spin_lock_irqsave(&ia64_ctx.lock, flags); | |
81 | { | |
82 | /* re-check, now that we've got the lock: */ | |
83 | context = mm->context; | |
84 | if (context == 0) { | |
85 | cpus_clear(mm->cpu_vm_mask); | |
86 | if (ia64_ctx.next >= ia64_ctx.limit) | |
87 | wrap_mmu_context(mm); | |
88 | mm->context = context = ia64_ctx.next++; | |
89 | } | |
1da177e4 | 90 | } |
badea125 | 91 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); |
1da177e4 | 92 | } |
badea125 DMT |
93 | /* |
94 | * Ensure we're not starting to use "context" before any old | |
95 | * uses of it are gone from our TLB. | |
96 | */ | |
97 | delayed_tlb_flush(); | |
98 | ||
1da177e4 LT |
99 | return context; |
100 | } | |
101 | ||
102 | /* | |
103 | * Initialize context number to some sane value. MM is guaranteed to be a brand-new | |
104 | * address-space, so no TLB flushing is needed, ever. | |
105 | */ | |
106 | static inline int | |
107 | init_new_context (struct task_struct *p, struct mm_struct *mm) | |
108 | { | |
109 | mm->context = 0; | |
110 | return 0; | |
111 | } | |
112 | ||
113 | static inline void | |
114 | destroy_context (struct mm_struct *mm) | |
115 | { | |
116 | /* Nothing to do. */ | |
117 | } | |
118 | ||
119 | static inline void | |
badea125 | 120 | reload_context (nv_mm_context_t context) |
1da177e4 LT |
121 | { |
122 | unsigned long rid; | |
123 | unsigned long rid_incr = 0; | |
124 | unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; | |
125 | ||
0a41e250 | 126 | old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE)); |
1da177e4 LT |
127 | rid = context << 3; /* make space for encoding the region number */ |
128 | rid_incr = 1 << 8; | |
129 | ||
130 | /* encode the region id, preferred page size, and VHPT enable bit: */ | |
131 | rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1; | |
132 | rr1 = rr0 + 1*rid_incr; | |
133 | rr2 = rr0 + 2*rid_incr; | |
134 | rr3 = rr0 + 3*rid_incr; | |
135 | rr4 = rr0 + 4*rid_incr; | |
136 | #ifdef CONFIG_HUGETLB_PAGE | |
137 | rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); | |
0a41e250 PC |
138 | |
139 | # if RGN_HPAGE != 4 | |
140 | # error "reload_context assumes RGN_HPAGE is 4" | |
141 | # endif | |
1da177e4 LT |
142 | #endif |
143 | ||
144 | ia64_set_rr(0x0000000000000000UL, rr0); | |
145 | ia64_set_rr(0x2000000000000000UL, rr1); | |
146 | ia64_set_rr(0x4000000000000000UL, rr2); | |
147 | ia64_set_rr(0x6000000000000000UL, rr3); | |
148 | ia64_set_rr(0x8000000000000000UL, rr4); | |
149 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | |
150 | } | |
151 | ||
a68db763 PC |
152 | /* |
153 | * Must be called with preemption off | |
154 | */ | |
1da177e4 LT |
155 | static inline void |
156 | activate_context (struct mm_struct *mm) | |
157 | { | |
badea125 | 158 | nv_mm_context_t context; |
1da177e4 LT |
159 | |
160 | do { | |
161 | context = get_mmu_context(mm); | |
162 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | |
163 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | |
164 | reload_context(context); | |
165 | /* in the unlikely event of a TLB-flush by another thread, redo the load: */ | |
166 | } while (unlikely(context != mm->context)); | |
167 | } | |
168 | ||
169 | #define deactivate_mm(tsk,mm) do { } while (0) | |
170 | ||
171 | /* | |
172 | * Switch from address space PREV to address space NEXT. | |
173 | */ | |
174 | static inline void | |
175 | activate_mm (struct mm_struct *prev, struct mm_struct *next) | |
176 | { | |
1da177e4 LT |
177 | /* |
178 | * We may get interrupts here, but that's OK because interrupt handlers cannot | |
179 | * touch user-space. | |
180 | */ | |
181 | ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); | |
182 | activate_context(next); | |
183 | } | |
184 | ||
185 | #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm) | |
186 | ||
187 | # endif /* ! __ASSEMBLY__ */ | |
188 | #endif /* _ASM_IA64_MMU_CONTEXT_H */ |