]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/asm-sparc64/mmu_context.h
[SPARC64]: Move away from virtual page tables, part 1.
[mirror_ubuntu-bionic-kernel.git] / include / asm-sparc64 / mmu_context.h
1 /* $Id: mmu_context.h,v 1.54 2002/02/09 19:49:31 davem Exp $ */
2 #ifndef __SPARC64_MMU_CONTEXT_H
3 #define __SPARC64_MMU_CONTEXT_H
4
5 /* Derived heavily from Linus's Alpha/AXP ASN code... */
6
7 #ifndef __ASSEMBLY__
8
9 #include <linux/spinlock.h>
10 #include <asm/system.h>
11 #include <asm/spitfire.h>
12
13 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14 {
15 }
16
17 extern spinlock_t ctx_alloc_lock;
18 extern unsigned long tlb_context_cache;
19 extern unsigned long mmu_context_bmap[];
20
21 extern void get_new_mmu_context(struct mm_struct *mm);
22
23 /* Initialize a new mmu context. This is invoked when a new
24 * address space instance (unique or shared) is instantiated.
25 * This just needs to set mm->context to an invalid context.
26 */
27 #define init_new_context(__tsk, __mm) \
28 ({ unsigned long __pg = get_zeroed_page(GFP_KERNEL); \
29 (__mm)->context.sparc64_ctx_val = 0UL; \
30 (__mm)->context.sparc64_tsb = \
31 (unsigned long *) __pg; \
32 (__pg ? 0 : -ENOMEM); \
33 })
34
35
36 /* Destroy a dead context. This occurs when mmput drops the
37 * mm_users count to zero, the mmaps have been released, and
38 * all the page tables have been flushed. Our job is to destroy
39 * any remaining processor-specific state, and in the sparc64
40 * case this just means freeing up the mmu context ID held by
41 * this task if valid.
42 */
43 #define destroy_context(__mm) \
44 do { free_page((unsigned long)(__mm)->context.sparc64_tsb); \
45 spin_lock(&ctx_alloc_lock); \
46 if (CTX_VALID((__mm)->context)) { \
47 unsigned long nr = CTX_NRBITS((__mm)->context); \
48 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
49 } \
50 spin_unlock(&ctx_alloc_lock); \
51 } while(0)
52
53 extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
54
55 /* Set MMU context in the actual hardware. */
56 #define load_secondary_context(__mm) \
57 __asm__ __volatile__("stxa %0, [%1] %2\n\t" \
58 "flush %%g6" \
59 : /* No outputs */ \
60 : "r" (CTX_HWBITS((__mm)->context)), \
61 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU))
62
63 extern void __flush_tlb_mm(unsigned long, unsigned long);
64
65 /* Switch the current MM context. */
66 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
67 {
68 unsigned long ctx_valid;
69 int cpu;
70
71 /* Note: page_table_lock is used here to serialize switch_mm
72 * and activate_mm, and their calls to get_new_mmu_context.
73 * This use of page_table_lock is unrelated to its other uses.
74 */
75 spin_lock(&mm->page_table_lock);
76 ctx_valid = CTX_VALID(mm->context);
77 if (!ctx_valid)
78 get_new_mmu_context(mm);
79 spin_unlock(&mm->page_table_lock);
80
81 if (!ctx_valid || (old_mm != mm)) {
82 load_secondary_context(mm);
83 tsb_context_switch(__pa(mm->pgd),
84 mm->context.sparc64_tsb);
85 }
86
87 /* Even if (mm == old_mm) we _must_ check
88 * the cpu_vm_mask. If we do not we could
89 * corrupt the TLB state because of how
90 * smp_flush_tlb_{page,range,mm} on sparc64
91 * and lazy tlb switches work. -DaveM
92 */
93 cpu = smp_processor_id();
94 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
95 cpu_set(cpu, mm->cpu_vm_mask);
96 __flush_tlb_mm(CTX_HWBITS(mm->context),
97 SECONDARY_CONTEXT);
98 }
99 }
100
101 #define deactivate_mm(tsk,mm) do { } while (0)
102
103 /* Activate a new MM instance for the current task. */
104 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
105 {
106 int cpu;
107
108 /* Note: page_table_lock is used here to serialize switch_mm
109 * and activate_mm, and their calls to get_new_mmu_context.
110 * This use of page_table_lock is unrelated to its other uses.
111 */
112 spin_lock(&mm->page_table_lock);
113 if (!CTX_VALID(mm->context))
114 get_new_mmu_context(mm);
115 cpu = smp_processor_id();
116 if (!cpu_isset(cpu, mm->cpu_vm_mask))
117 cpu_set(cpu, mm->cpu_vm_mask);
118 spin_unlock(&mm->page_table_lock);
119
120 load_secondary_context(mm);
121 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
122 tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb);
123 }
124
125 #endif /* !(__ASSEMBLY__) */
126
127 #endif /* !(__SPARC64_MMU_CONTEXT_H) */