]>
Commit | Line | Data |
---|---|---|
f1f3347d VG |
1 | /* |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * vineetg: May 2011 | |
9 | * -Refactored get_new_mmu_context( ) to only handle live-mm. | |
10 | * retiring-mm handled in other hooks | |
11 | * | |
12 | * Vineetg: March 25th, 2008: Bug #92690 | |
13 | * -Major rewrite of Core ASID allocation routine get_new_mmu_context | |
14 | * | |
15 | * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 | |
16 | */ | |
17 | ||
18 | #ifndef _ASM_ARC_MMU_CONTEXT_H | |
19 | #define _ASM_ARC_MMU_CONTEXT_H | |
20 | ||
21 | #include <asm/arcregs.h> | |
22 | #include <asm/tlb.h> | |
23 | ||
24 | #include <asm-generic/mm_hooks.h> | |
25 | ||
26 | /* ARC700 ASID Management | |
27 | * | |
28 | * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries | |
29 | * with same vaddr (different tasks) to co-exit. This provides for | |
30 | * "Fast Context Switch" i.e. no TLB flush on ctxt-switch | |
31 | * | |
32 | * Linux assigns each task a unique ASID. A simple round-robin allocation | |
33 | * of H/w ASID is done using software tracker @asid_cache. | |
34 | * When it reaches max 255, the allocation cycle starts afresh by flushing | |
35 | * the entire TLB and wrapping ASID back to zero. | |
36 | * | |
37 | * For book-keeping, Linux uses a couple of data-structures: | |
38 | * -mm_struct has an @asid field to keep a note of task's ASID (needed at the | |
39 | * time of say switch_mm( ) | |
40 | * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping, | |
41 | * given an ASID, finding the mm struct associated. | |
42 | * | |
43 | * The round-robin allocation algorithm allows for ASID stealing. | |
44 | * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was | |
45 | * already assigned to another (switched-out) task. Obviously the prev owner | |
46 | * is marked with an invalid ASID to make it request for a new ASID when it | |
47 | * gets scheduled next time. However its TLB entries (with ASID "x") could | |
48 | * exist, which must be cleared before the same ASID is used by the new owner. | |
49 | * Flushing them would be plausible but costly solution. Instead we force a | |
50 | * allocation policy quirk, which ensures that a stolen ASID won't have any | |
51 | * TLB entries associates, alleviating the need to flush. | |
52 | * The quirk essentially is not allowing ASID allocated in prev cycle | |
53 | * to be used past a roll-over in the next cycle. | |
54 | * When this happens (i.e. task ASID > asid tracker), task needs to refresh | |
55 | * its ASID, aligning it to current value of tracker. If the task doesn't get | |
56 | * scheduled past a roll-over, hence its ASID is not yet realigned with | |
57 | * tracker, such ASID is anyways safely reusable because it is | |
58 | * gauranteed that TLB entries with that ASID wont exist. | |
59 | */ | |
60 | ||
61 | #define FIRST_ASID 0 | |
62 | #define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */ | |
63 | #define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */ | |
64 | #define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1) | |
65 | ||
66 | /* ASID to mm struct mapping */ | |
67 | extern struct mm_struct *asid_mm_map[NUM_ASID + 1]; | |
68 | ||
69 | extern int asid_cache; | |
70 | ||
71 | /* | |
3daa48d1 VG |
72 | * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) |
73 | * Also set the MMU PID register to existing/updated ASID | |
f1f3347d VG |
74 | */ |
75 | static inline void get_new_mmu_context(struct mm_struct *mm) | |
76 | { | |
77 | struct mm_struct *prev_owner; | |
78 | unsigned long flags; | |
79 | ||
80 | local_irq_save(flags); | |
81 | ||
3daa48d1 VG |
82 | /* |
83 | * Move to new ASID if it was not from current alloc-cycle/generation. | |
84 | * | |
85 | * Note: Callers needing new ASID unconditionally, independent of | |
86 | * generation, e.g. local_flush_tlb_mm() for forking parent, | |
87 | * first need to destroy the context, setting it to invalid | |
88 | * value. | |
89 | */ | |
90 | if (mm->context.asid <= asid_cache) | |
91 | goto set_hw; | |
92 | ||
f1f3347d VG |
93 | /* |
94 | * Relinquish the currently owned ASID (if any). | |
95 | * Doing unconditionally saves a cmp-n-branch; for already unused | |
96 | * ASID slot, the value was/remains NULL | |
97 | */ | |
98 | asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL; | |
99 | ||
100 | /* move to new ASID */ | |
101 | if (++asid_cache > MAX_ASID) { /* ASID roll-over */ | |
102 | asid_cache = FIRST_ASID; | |
103 | flush_tlb_all(); | |
104 | } | |
105 | ||
106 | /* | |
107 | * Is next ASID already owned by some-one else (we are stealing it). | |
108 | * If so, let the orig owner be aware of this, so when it runs, it | |
109 | * asks for a brand new ASID. This would only happen for a long-lived | |
110 | * task with ASID from prev allocation cycle (before ASID roll-over). | |
111 | * | |
112 | * This might look wrong - if we are re-using some other task's ASID, | |
3daa48d1 | 113 | * won't we use it's stale TLB entries too. Actually the algorithm takes |
f1f3347d | 114 | * care of such a case: it ensures that task with ASID from prev alloc |
3daa48d1 | 115 | * cycle, when scheduled will refresh it's ASID |
f1f3347d VG |
116 | * The stealing scenario described here will only happen if that task |
117 | * didn't get a chance to refresh it's ASID - implying stale entries | |
118 | * won't exist. | |
119 | */ | |
120 | prev_owner = asid_mm_map[asid_cache]; | |
121 | if (prev_owner) | |
122 | prev_owner->context.asid = NO_ASID; | |
123 | ||
124 | /* Assign new ASID to tsk */ | |
125 | asid_mm_map[asid_cache] = mm; | |
126 | mm->context.asid = asid_cache; | |
127 | ||
3daa48d1 VG |
128 | set_hw: |
129 | write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE); | |
f1f3347d VG |
130 | |
131 | local_irq_restore(flags); | |
132 | } | |
133 | ||
134 | /* | |
135 | * Initialize the context related info for a new mm_struct | |
136 | * instance. | |
137 | */ | |
138 | static inline int | |
139 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
140 | { | |
141 | mm->context.asid = NO_ASID; | |
f1f3347d VG |
142 | return 0; |
143 | } | |
144 | ||
145 | /* Prepare the MMU for task: setup PID reg with allocated ASID | |
146 | If task doesn't have an ASID (never alloc or stolen, get a new ASID) | |
147 | */ | |
148 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
149 | struct task_struct *tsk) | |
150 | { | |
41195d23 | 151 | #ifndef CONFIG_SMP |
f1f3347d VG |
152 | /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ |
153 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | |
41195d23 | 154 | #endif |
f1f3347d | 155 | |
3daa48d1 | 156 | get_new_mmu_context(next); |
f1f3347d VG |
157 | } |
158 | ||
c6011553 VG |
159 | /* |
160 | * Called at the time of execve() to get a new ASID | |
161 | * Note the subtlety here: get_new_mmu_context() behaves differently here | |
162 | * vs. in switch_mm(). Here it always returns a new ASID, because mm has | |
163 | * an unallocated "initial" value, while in latter, it moves to a new ASID, | |
164 | * only if it was unallocated | |
165 | */ | |
166 | #define activate_mm(prev, next) switch_mm(prev, next, NULL) | |
167 | ||
f1f3347d VG |
168 | static inline void destroy_context(struct mm_struct *mm) |
169 | { | |
170 | unsigned long flags; | |
171 | ||
172 | local_irq_save(flags); | |
173 | ||
174 | asid_mm_map[mm->context.asid] = NULL; | |
175 | mm->context.asid = NO_ASID; | |
176 | ||
177 | local_irq_restore(flags); | |
178 | } | |
179 | ||
180 | /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping | |
181 | * for retiring-mm. However destroy_context( ) still needs to do that because | |
182 | * between mm_release( ) = >deactive_mm( ) and | |
183 | * mmput => .. => __mmdrop( ) => destroy_context( ) | |
184 | * there is a good chance that task gets sched-out/in, making it's ASID valid | |
185 | * again (this teased me for a whole day). | |
186 | */ | |
187 | #define deactivate_mm(tsk, mm) do { } while (0) | |
188 | ||
f1f3347d VG |
189 | #define enter_lazy_tlb(mm, tsk) |
190 | ||
191 | #endif /* __ASM_ARC_MMU_CONTEXT_H */ |