1 #ifndef __SPARC64_MMU_CONTEXT_H
2 #define __SPARC64_MMU_CONTEXT_H
4 /* Derived heavily from Linus's Alpha/AXP ASN code... */
8 #include <linux/spinlock.h>
9 #include <linux/mm_types.h>
11 #include <asm/spitfire.h>
12 #include <asm-generic/mm_hooks.h>
14 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
18 extern spinlock_t ctx_alloc_lock
;
19 extern unsigned long tlb_context_cache
;
20 extern unsigned long mmu_context_bmap
[];
22 DECLARE_PER_CPU(struct mm_struct
*, per_cpu_secondary_mm
);
23 void get_new_mmu_context(struct mm_struct
*mm
);
24 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
);
25 void destroy_context(struct mm_struct
*mm
);
27 void __tsb_context_switch(unsigned long pgd_pa
,
28 struct tsb_config
*tsb_base
,
29 struct tsb_config
*tsb_huge
,
30 unsigned long tsb_descr_pa
);
32 static inline void tsb_context_switch(struct mm_struct
*mm
)
34 __tsb_context_switch(__pa(mm
->pgd
),
35 &mm
->context
.tsb_block
[MM_TSB_BASE
],
36 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
37 (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
?
38 &mm
->context
.tsb_block
[MM_TSB_HUGE
] :
43 , __pa(&mm
->context
.tsb_descr
[MM_TSB_BASE
]));
46 void tsb_grow(struct mm_struct
*mm
,
47 unsigned long tsb_index
,
48 unsigned long mm_rss
);
50 void smp_tsb_sync(struct mm_struct
*mm
);
52 #define smp_tsb_sync(__mm) do { } while (0)
55 /* Set MMU context in the actual hardware. */
56 #define load_secondary_context(__mm) \
57 __asm__ __volatile__( \
58 "\n661: stxa %0, [%1] %2\n" \
59 " .section .sun4v_1insn_patch, \"ax\"\n" \
61 " stxa %0, [%1] %3\n" \
65 : "r" (CTX_HWBITS((__mm)->context)), \
66 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
68 void __flush_tlb_mm(unsigned long, unsigned long);
70 /* Switch the current MM context. */
71 static inline void switch_mm(struct mm_struct
*old_mm
, struct mm_struct
*mm
, struct task_struct
*tsk
)
73 unsigned long ctx_valid
, flags
;
74 int cpu
= smp_processor_id();
76 per_cpu(per_cpu_secondary_mm
, cpu
) = mm
;
77 if (unlikely(mm
== &init_mm
))
80 spin_lock_irqsave(&mm
->context
.lock
, flags
);
81 ctx_valid
= CTX_VALID(mm
->context
);
83 get_new_mmu_context(mm
);
85 /* We have to be extremely careful here or else we will miss
86 * a TSB grow if we switch back and forth between a kernel
87 * thread and an address space which has it's TSB size increased
88 * on another processor.
90 * It is possible to play some games in order to optimize the
91 * switch, but the safest thing to do is to unconditionally
92 * perform the secondary context load and the TSB context switch.
94 * For reference the bad case is, for address space "A":
98 * set cpu0's bits in cpu_vm_mask
99 * switch to kernel thread, borrow
100 * address space A via entry_lazy_tlb
101 * run address space A
102 * set cpu1's bit in cpu_vm_mask
103 * flush_tlb_pending()
104 * reset cpu_vm_mask to just cpu1
106 * run address space A
107 * context was valid, so skip
110 * At that point cpu0 continues to use a stale TSB, the one from
111 * before the TSB grow performed on cpu1. cpu1 did not cross-call
112 * cpu0 to update it's TSB because at that point the cpu_vm_mask
113 * only had cpu1 set in it.
115 load_secondary_context(mm
);
116 tsb_context_switch(mm
);
118 /* Any time a processor runs a context on an address space
119 * for the first time, we must flush that context out of the
122 if (!ctx_valid
|| !cpumask_test_cpu(cpu
, mm_cpumask(mm
))) {
123 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
124 __flush_tlb_mm(CTX_HWBITS(mm
->context
),
127 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
130 #define deactivate_mm(tsk,mm) do { } while (0)
131 #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
132 #endif /* !(__ASSEMBLY__) */
134 #endif /* !(__SPARC64_MMU_CONTEXT_H) */