]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: mmu_context.h,v 1.54 2002/02/09 19:49:31 davem Exp $ */ |
2 | #ifndef __SPARC64_MMU_CONTEXT_H | |
3 | #define __SPARC64_MMU_CONTEXT_H | |
4 | ||
5 | /* Derived heavily from Linus's Alpha/AXP ASN code... */ | |
6 | ||
7 | #ifndef __ASSEMBLY__ | |
8 | ||
9 | #include <linux/spinlock.h> | |
10 | #include <asm/system.h> | |
11 | #include <asm/spitfire.h> | |
12 | ||
13 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
14 | { | |
15 | } | |
16 | ||
17 | extern spinlock_t ctx_alloc_lock; | |
18 | extern unsigned long tlb_context_cache; | |
19 | extern unsigned long mmu_context_bmap[]; | |
20 | ||
21 | extern void get_new_mmu_context(struct mm_struct *mm); | |
22 | ||
23 | /* Initialize a new mmu context. This is invoked when a new | |
24 | * address space instance (unique or shared) is instantiated. | |
25 | * This just needs to set mm->context to an invalid context. | |
26 | */ | |
27 | #define init_new_context(__tsk, __mm) \ | |
28 | (((__mm)->context.sparc64_ctx_val = 0UL), 0) | |
29 | ||
30 | /* Destroy a dead context. This occurs when mmput drops the | |
31 | * mm_users count to zero, the mmaps have been released, and | |
32 | * all the page tables have been flushed. Our job is to destroy | |
33 | * any remaining processor-specific state, and in the sparc64 | |
34 | * case this just means freeing up the mmu context ID held by | |
35 | * this task if valid. | |
36 | */ | |
37 | #define destroy_context(__mm) \ | |
38 | do { spin_lock(&ctx_alloc_lock); \ | |
39 | if (CTX_VALID((__mm)->context)) { \ | |
40 | unsigned long nr = CTX_NRBITS((__mm)->context); \ | |
41 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \ | |
42 | } \ | |
43 | spin_unlock(&ctx_alloc_lock); \ | |
44 | } while(0) | |
45 | ||
46 | /* Reload the two core values used by TLB miss handler | |
47 | * processing on sparc64. They are: | |
48 | * 1) The physical address of mm->pgd, when full page | |
49 | * table walks are necessary, this is where the | |
50 | * search begins. | |
51 | * 2) A "PGD cache". For 32-bit tasks only pgd[0] is | |
52 | * ever used since that maps the entire low 4GB | |
53 | * completely. To speed up TLB miss processing we | |
54 | * make this value available to the handlers. This | |
55 | * decreases the amount of memory traffic incurred. | |
56 | */ | |
57 | #define reload_tlbmiss_state(__tsk, __mm) \ | |
58 | do { \ | |
59 | register unsigned long paddr asm("o5"); \ | |
60 | register unsigned long pgd_cache asm("o4"); \ | |
61 | paddr = __pa((__mm)->pgd); \ | |
62 | pgd_cache = 0UL; \ | |
f3169641 | 63 | if (task_thread_info(__tsk)->flags & _TIF_32BIT) \ |
1da177e4 LT |
64 | pgd_cache = get_pgd_cache((__mm)->pgd); \ |
65 | __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ | |
66 | "mov %3, %%g4\n\t" \ | |
67 | "mov %0, %%g7\n\t" \ | |
68 | "stxa %1, [%%g4] %2\n\t" \ | |
69 | "membar #Sync\n\t" \ | |
70 | "wrpr %%g0, 0x096, %%pstate" \ | |
71 | : /* no outputs */ \ | |
72 | : "r" (paddr), "r" (pgd_cache),\ | |
73 | "i" (ASI_DMMU), "i" (TSB_REG)); \ | |
74 | } while(0) | |
75 | ||
76 | /* Set MMU context in the actual hardware. */ | |
77 | #define load_secondary_context(__mm) \ | |
78 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" \ | |
79 | "flush %%g6" \ | |
80 | : /* No outputs */ \ | |
81 | : "r" (CTX_HWBITS((__mm)->context)), \ | |
82 | "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU)) | |
83 | ||
84 | extern void __flush_tlb_mm(unsigned long, unsigned long); | |
85 | ||
86 | /* Switch the current MM context. */ | |
87 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) | |
88 | { | |
89 | unsigned long ctx_valid; | |
dedeb002 | 90 | int cpu; |
1da177e4 | 91 | |
dedeb002 HD |
92 | /* Note: page_table_lock is used here to serialize switch_mm |
93 | * and activate_mm, and their calls to get_new_mmu_context. | |
94 | * This use of page_table_lock is unrelated to its other uses. | |
95 | */ | |
1da177e4 | 96 | spin_lock(&mm->page_table_lock); |
dedeb002 HD |
97 | ctx_valid = CTX_VALID(mm->context); |
98 | if (!ctx_valid) | |
99 | get_new_mmu_context(mm); | |
100 | spin_unlock(&mm->page_table_lock); | |
1da177e4 LT |
101 | |
102 | if (!ctx_valid || (old_mm != mm)) { | |
1da177e4 LT |
103 | load_secondary_context(mm); |
104 | reload_tlbmiss_state(tsk, mm); | |
105 | } | |
106 | ||
dedeb002 HD |
107 | /* Even if (mm == old_mm) we _must_ check |
108 | * the cpu_vm_mask. If we do not we could | |
109 | * corrupt the TLB state because of how | |
110 | * smp_flush_tlb_{page,range,mm} on sparc64 | |
111 | * and lazy tlb switches work. -DaveM | |
112 | */ | |
113 | cpu = smp_processor_id(); | |
114 | if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { | |
115 | cpu_set(cpu, mm->cpu_vm_mask); | |
116 | __flush_tlb_mm(CTX_HWBITS(mm->context), | |
117 | SECONDARY_CONTEXT); | |
1da177e4 | 118 | } |
1da177e4 LT |
119 | } |
120 | ||
121 | #define deactivate_mm(tsk,mm) do { } while (0) | |
122 | ||
123 | /* Activate a new MM instance for the current task. */ | |
124 | static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) | |
125 | { | |
126 | int cpu; | |
127 | ||
dedeb002 HD |
128 | /* Note: page_table_lock is used here to serialize switch_mm |
129 | * and activate_mm, and their calls to get_new_mmu_context. | |
130 | * This use of page_table_lock is unrelated to its other uses. | |
131 | */ | |
1da177e4 LT |
132 | spin_lock(&mm->page_table_lock); |
133 | if (!CTX_VALID(mm->context)) | |
134 | get_new_mmu_context(mm); | |
135 | cpu = smp_processor_id(); | |
136 | if (!cpu_isset(cpu, mm->cpu_vm_mask)) | |
137 | cpu_set(cpu, mm->cpu_vm_mask); | |
138 | spin_unlock(&mm->page_table_lock); | |
139 | ||
140 | load_secondary_context(mm); | |
141 | __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); | |
142 | reload_tlbmiss_state(current, mm); | |
143 | } | |
144 | ||
145 | #endif /* !(__ASSEMBLY__) */ | |
146 | ||
147 | #endif /* !(__SPARC64_MMU_CONTEXT_H) */ |