]>
Commit | Line | Data |
---|---|---|
f5e706ad SR |
1 | #ifndef __SPARC64_MMU_CONTEXT_H |
2 | #define __SPARC64_MMU_CONTEXT_H | |
3 | ||
4 | /* Derived heavily from Linus's Alpha/AXP ASN code... */ | |
5 | ||
6 | #ifndef __ASSEMBLY__ | |
7 | ||
8 | #include <linux/spinlock.h> | |
589ee628 IM |
9 | #include <linux/mm_types.h> |
10 | ||
f5e706ad SR |
11 | #include <asm/spitfire.h> |
12 | #include <asm-generic/mm_hooks.h> | |
13 | ||
14 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
15 | { | |
16 | } | |
17 | ||
18 | extern spinlock_t ctx_alloc_lock; | |
19 | extern unsigned long tlb_context_cache; | |
20 | extern unsigned long mmu_context_bmap[]; | |
21 | ||
7a5b4bbf | 22 | DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm); |
f05a6865 | 23 | void get_new_mmu_context(struct mm_struct *mm); |
f05a6865 SR |
24 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
25 | void destroy_context(struct mm_struct *mm); | |
f5e706ad | 26 | |
f05a6865 SR |
27 | void __tsb_context_switch(unsigned long pgd_pa, |
28 | struct tsb_config *tsb_base, | |
29 | struct tsb_config *tsb_huge, | |
fc290a11 RG |
30 | unsigned long tsb_descr_pa, |
31 | unsigned long secondary_ctx); | |
f5e706ad | 32 | |
fc290a11 RG |
33 | static inline void tsb_context_switch_ctx(struct mm_struct *mm, |
34 | unsigned long ctx) | |
f5e706ad SR |
35 | { |
36 | __tsb_context_switch(__pa(mm->pgd), | |
4bbc84ff | 37 | &mm->context.tsb_block[MM_TSB_BASE], |
9e695d2e | 38 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
4bbc84ff MK |
39 | (mm->context.tsb_block[MM_TSB_HUGE].tsb ? |
40 | &mm->context.tsb_block[MM_TSB_HUGE] : | |
f5e706ad SR |
41 | NULL) |
42 | #else | |
43 | NULL | |
44 | #endif | |
fc290a11 RG |
45 | , __pa(&mm->context.tsb_descr[MM_TSB_BASE]), |
46 | ctx); | |
f5e706ad SR |
47 | } |
48 | ||
fc290a11 RG |
49 | #define tsb_context_switch(X) tsb_context_switch_ctx(X, 0) |
50 | ||
f05a6865 SR |
51 | void tsb_grow(struct mm_struct *mm, |
52 | unsigned long tsb_index, | |
53 | unsigned long mm_rss); | |
f5e706ad | 54 | #ifdef CONFIG_SMP |
f05a6865 | 55 | void smp_tsb_sync(struct mm_struct *mm); |
f5e706ad SR |
56 | #else |
57 | #define smp_tsb_sync(__mm) do { } while (0) | |
58 | #endif | |
59 | ||
60 | /* Set MMU context in the actual hardware. */ | |
61 | #define load_secondary_context(__mm) \ | |
62 | __asm__ __volatile__( \ | |
63 | "\n661: stxa %0, [%1] %2\n" \ | |
64 | " .section .sun4v_1insn_patch, \"ax\"\n" \ | |
65 | " .word 661b\n" \ | |
66 | " stxa %0, [%1] %3\n" \ | |
67 | " .previous\n" \ | |
68 | " flush %%g6\n" \ | |
69 | : /* No outputs */ \ | |
70 | : "r" (CTX_HWBITS((__mm)->context)), \ | |
71 | "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU)) | |
72 | ||
f05a6865 | 73 | void __flush_tlb_mm(unsigned long, unsigned long); |
f5e706ad | 74 | |
07df8418 | 75 | /* Switch the current MM context. */ |
f5e706ad SR |
76 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) |
77 | { | |
78 | unsigned long ctx_valid, flags; | |
7a5b4bbf | 79 | int cpu = smp_processor_id(); |
f5e706ad | 80 | |
7a5b4bbf | 81 | per_cpu(per_cpu_secondary_mm, cpu) = mm; |
f5e706ad SR |
82 | if (unlikely(mm == &init_mm)) |
83 | return; | |
84 | ||
85 | spin_lock_irqsave(&mm->context.lock, flags); | |
86 | ctx_valid = CTX_VALID(mm->context); | |
87 | if (!ctx_valid) | |
88 | get_new_mmu_context(mm); | |
89 | ||
90 | /* We have to be extremely careful here or else we will miss | |
91 | * a TSB grow if we switch back and forth between a kernel | |
92 | * thread and an address space which has it's TSB size increased | |
93 | * on another processor. | |
94 | * | |
95 | * It is possible to play some games in order to optimize the | |
96 | * switch, but the safest thing to do is to unconditionally | |
97 | * perform the secondary context load and the TSB context switch. | |
98 | * | |
99 | * For reference the bad case is, for address space "A": | |
100 | * | |
101 | * CPU 0 CPU 1 | |
102 | * run address space A | |
103 | * set cpu0's bits in cpu_vm_mask | |
104 | * switch to kernel thread, borrow | |
105 | * address space A via entry_lazy_tlb | |
106 | * run address space A | |
107 | * set cpu1's bit in cpu_vm_mask | |
108 | * flush_tlb_pending() | |
109 | * reset cpu_vm_mask to just cpu1 | |
110 | * TSB grow | |
111 | * run address space A | |
112 | * context was valid, so skip | |
113 | * TSB context switch | |
114 | * | |
115 | * At that point cpu0 continues to use a stale TSB, the one from | |
116 | * before the TSB grow performed on cpu1. cpu1 did not cross-call | |
117 | * cpu0 to update it's TSB because at that point the cpu_vm_mask | |
118 | * only had cpu1 set in it. | |
119 | */ | |
fc290a11 | 120 | tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); |
f5e706ad SR |
121 | |
122 | /* Any time a processor runs a context on an address space | |
123 | * for the first time, we must flush that context out of the | |
124 | * local TLB. | |
125 | */ | |
81f1adf0 RR |
126 | if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
127 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | |
f5e706ad SR |
128 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
129 | SECONDARY_CONTEXT); | |
130 | } | |
131 | spin_unlock_irqrestore(&mm->context.lock, flags); | |
132 | } | |
133 | ||
134 | #define deactivate_mm(tsk,mm) do { } while (0) | |
14d0334c | 135 | #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) |
f5e706ad SR |
136 | #endif /* !(__ASSEMBLY__) */ |
137 | ||
138 | #endif /* !(__SPARC64_MMU_CONTEXT_H) */ |