]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_MMU_CONTEXT_H |
2 | #define _ASM_X86_MMU_CONTEXT_H | |
c3c2fee3 JF |
3 | |
4 | #include <asm/desc.h> | |
60063497 | 5 | #include <linux/atomic.h> |
c3c2fee3 JF |
6 | #include <asm/pgalloc.h> |
7 | #include <asm/tlbflush.h> | |
8 | #include <asm/paravirt.h> | |
9 | #ifndef CONFIG_PARAVIRT | |
10 | #include <asm-generic/mm_hooks.h> | |
11 | ||
12 | static inline void paravirt_activate_mm(struct mm_struct *prev, | |
13 | struct mm_struct *next) | |
14 | { | |
15 | } | |
16 | #endif /* !CONFIG_PARAVIRT */ | |
17 | ||
18 | /* | |
19 | * Used for LDT copy/destruction. | |
20 | */ | |
21 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | |
22 | void destroy_context(struct mm_struct *mm); | |
23 | ||
6826c8ff BG |
24 | |
25 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
26 | { | |
27 | #ifdef CONFIG_SMP | |
c6ae41e7 AS |
28 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
29 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); | |
6826c8ff BG |
30 | #endif |
31 | } | |
32 | ||
33 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
34 | struct task_struct *tsk) | |
35 | { | |
36 | unsigned cpu = smp_processor_id(); | |
37 | ||
38 | if (likely(prev != next)) { | |
6826c8ff | 39 | #ifdef CONFIG_SMP |
c6ae41e7 AS |
40 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
41 | this_cpu_write(cpu_tlbstate.active_mm, next); | |
96a388de | 42 | #endif |
78f1c4d6 | 43 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
6826c8ff BG |
44 | |
45 | /* Re-load page tables */ | |
46 | load_cr3(next->pgd); | |
47 | ||
831d52bc SS |
48 | /* stop flush ipis for the previous mm */ |
49 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | |
50 | ||
6826c8ff BG |
51 | /* |
52 | * load the LDT, if the LDT is different: | |
53 | */ | |
54 | if (unlikely(prev->context.ldt != next->context.ldt)) | |
55 | load_LDT_nolock(&next->context); | |
56 | } | |
57 | #ifdef CONFIG_SMP | |
58 | else { | |
c6ae41e7 AS |
59 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
60 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); | |
6826c8ff | 61 | |
78f1c4d6 | 62 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { |
6826c8ff BG |
63 | /* We were in lazy tlb mode and leave_mm disabled |
64 | * tlb flush IPI delivery. We must reload CR3 | |
65 | * to make sure to use no freed page tables. | |
66 | */ | |
67 | load_cr3(next->pgd); | |
68 | load_LDT_nolock(&next->context); | |
69 | } | |
70 | } | |
71 | #endif | |
72 | } | |
c3c2fee3 JF |
73 | |
74 | #define activate_mm(prev, next) \ | |
75 | do { \ | |
76 | paravirt_activate_mm((prev), (next)); \ | |
77 | switch_mm((prev), (next), NULL); \ | |
78 | } while (0); | |
79 | ||
6826c8ff BG |
80 | #ifdef CONFIG_X86_32 |
81 | #define deactivate_mm(tsk, mm) \ | |
82 | do { \ | |
ccbeed3a | 83 | lazy_load_gs(0); \ |
6826c8ff BG |
84 | } while (0) |
85 | #else | |
86 | #define deactivate_mm(tsk, mm) \ | |
87 | do { \ | |
88 | load_gs_index(0); \ | |
89 | loadsegment(fs, 0); \ | |
90 | } while (0) | |
91 | #endif | |
c3c2fee3 | 92 | |
1965aae3 | 93 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |