]>
Commit | Line | Data |
---|---|---|
f5e706ad SR |
1 | #ifndef __SPARC64_MMU_CONTEXT_H |
2 | #define __SPARC64_MMU_CONTEXT_H | |
3 | ||
4 | /* Derived heavily from Linus's Alpha/AXP ASN code... */ | |
5 | ||
6 | #ifndef __ASSEMBLY__ | |
7 | ||
8 | #include <linux/spinlock.h> | |
589ee628 IM |
9 | #include <linux/mm_types.h> |
10 | ||
f5e706ad SR |
11 | #include <asm/spitfire.h> |
12 | #include <asm-generic/mm_hooks.h> | |
13 | ||
14 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
15 | { | |
16 | } | |
17 | ||
18 | extern spinlock_t ctx_alloc_lock; | |
19 | extern unsigned long tlb_context_cache; | |
20 | extern unsigned long mmu_context_bmap[]; | |
21 | ||
7a5b4bbf | 22 | DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm); |
f05a6865 | 23 | void get_new_mmu_context(struct mm_struct *mm); |
f05a6865 SR |
24 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
25 | void destroy_context(struct mm_struct *mm); | |
f5e706ad | 26 | |
f05a6865 SR |
27 | void __tsb_context_switch(unsigned long pgd_pa, |
28 | struct tsb_config *tsb_base, | |
29 | struct tsb_config *tsb_huge, | |
30 | unsigned long tsb_descr_pa); | |
f5e706ad SR |
31 | |
32 | static inline void tsb_context_switch(struct mm_struct *mm) | |
33 | { | |
34 | __tsb_context_switch(__pa(mm->pgd), | |
4bbc84ff | 35 | &mm->context.tsb_block[MM_TSB_BASE], |
9e695d2e | 36 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
4bbc84ff MK |
37 | (mm->context.tsb_block[MM_TSB_HUGE].tsb ? |
38 | &mm->context.tsb_block[MM_TSB_HUGE] : | |
f5e706ad SR |
39 | NULL) |
40 | #else | |
41 | NULL | |
42 | #endif | |
4bbc84ff | 43 | , __pa(&mm->context.tsb_descr[MM_TSB_BASE])); |
f5e706ad SR |
44 | } |
45 | ||
f05a6865 SR |
46 | void tsb_grow(struct mm_struct *mm, |
47 | unsigned long tsb_index, | |
48 | unsigned long mm_rss); | |
f5e706ad | 49 | #ifdef CONFIG_SMP |
f05a6865 | 50 | void smp_tsb_sync(struct mm_struct *mm); |
f5e706ad SR |
51 | #else |
52 | #define smp_tsb_sync(__mm) do { } while (0) | |
53 | #endif | |
54 | ||
55 | /* Set MMU context in the actual hardware. */ | |
56 | #define load_secondary_context(__mm) \ | |
57 | __asm__ __volatile__( \ | |
58 | "\n661: stxa %0, [%1] %2\n" \ | |
59 | " .section .sun4v_1insn_patch, \"ax\"\n" \ | |
60 | " .word 661b\n" \ | |
61 | " stxa %0, [%1] %3\n" \ | |
62 | " .previous\n" \ | |
63 | " flush %%g6\n" \ | |
64 | : /* No outputs */ \ | |
65 | : "r" (CTX_HWBITS((__mm)->context)), \ | |
66 | "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU)) | |
67 | ||
f05a6865 | 68 | void __flush_tlb_mm(unsigned long, unsigned long); |
f5e706ad | 69 | |
07df8418 | 70 | /* Switch the current MM context. */ |
f5e706ad SR |
71 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) |
72 | { | |
73 | unsigned long ctx_valid, flags; | |
7a5b4bbf | 74 | int cpu = smp_processor_id(); |
f5e706ad | 75 | |
7a5b4bbf | 76 | per_cpu(per_cpu_secondary_mm, cpu) = mm; |
f5e706ad SR |
77 | if (unlikely(mm == &init_mm)) |
78 | return; | |
79 | ||
80 | spin_lock_irqsave(&mm->context.lock, flags); | |
81 | ctx_valid = CTX_VALID(mm->context); | |
82 | if (!ctx_valid) | |
83 | get_new_mmu_context(mm); | |
84 | ||
85 | /* We have to be extremely careful here or else we will miss | |
86 | * a TSB grow if we switch back and forth between a kernel | |
87 | * thread and an address space which has it's TSB size increased | |
88 | * on another processor. | |
89 | * | |
90 | * It is possible to play some games in order to optimize the | |
91 | * switch, but the safest thing to do is to unconditionally | |
92 | * perform the secondary context load and the TSB context switch. | |
93 | * | |
94 | * For reference the bad case is, for address space "A": | |
95 | * | |
96 | * CPU 0 CPU 1 | |
97 | * run address space A | |
98 | * set cpu0's bits in cpu_vm_mask | |
99 | * switch to kernel thread, borrow | |
100 | * address space A via entry_lazy_tlb | |
101 | * run address space A | |
102 | * set cpu1's bit in cpu_vm_mask | |
103 | * flush_tlb_pending() | |
104 | * reset cpu_vm_mask to just cpu1 | |
105 | * TSB grow | |
106 | * run address space A | |
107 | * context was valid, so skip | |
108 | * TSB context switch | |
109 | * | |
110 | * At that point cpu0 continues to use a stale TSB, the one from | |
111 | * before the TSB grow performed on cpu1. cpu1 did not cross-call | |
112 | * cpu0 to update it's TSB because at that point the cpu_vm_mask | |
113 | * only had cpu1 set in it. | |
114 | */ | |
115 | load_secondary_context(mm); | |
116 | tsb_context_switch(mm); | |
117 | ||
118 | /* Any time a processor runs a context on an address space | |
119 | * for the first time, we must flush that context out of the | |
120 | * local TLB. | |
121 | */ | |
81f1adf0 RR |
122 | if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
123 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | |
f5e706ad SR |
124 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
125 | SECONDARY_CONTEXT); | |
126 | } | |
127 | spin_unlock_irqrestore(&mm->context.lock, flags); | |
128 | } | |
129 | ||
130 | #define deactivate_mm(tsk,mm) do { } while (0) | |
14d0334c | 131 | #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) |
f5e706ad SR |
132 | #endif /* !(__ASSEMBLY__) */ |
133 | ||
134 | #endif /* !(__SPARC64_MMU_CONTEXT_H) */ |