]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __X86_64_MMU_CONTEXT_H |
2 | #define __X86_64_MMU_CONTEXT_H | |
3 | ||
1da177e4 LT |
4 | #include <asm/desc.h> |
5 | #include <asm/atomic.h> | |
6 | #include <asm/pgalloc.h> | |
7 | #include <asm/pda.h> | |
8 | #include <asm/pgtable.h> | |
9 | #include <asm/tlbflush.h> | |
04662713 | 10 | #ifndef CONFIG_PARAVIRT |
d6dd61c8 | 11 | #include <asm-generic/mm_hooks.h> |
04662713 | 12 | #endif |
1da177e4 LT |
13 | |
14 | /* | |
15 | * possibly do the LDT unload here? | |
16 | */ | |
17 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | |
18 | void destroy_context(struct mm_struct *mm); | |
19 | ||
1da177e4 LT |
20 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
21 | { | |
e4b5939a | 22 | #ifdef CONFIG_SMP |
c4fe760e | 23 | if (read_pda(mmu_state) == TLBSTATE_OK) |
1da177e4 | 24 | write_pda(mmu_state, TLBSTATE_LAZY); |
1da177e4 | 25 | #endif |
e4b5939a | 26 | } |
1da177e4 | 27 | |
c4fe760e | 28 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
1da177e4 LT |
29 | struct task_struct *tsk) |
30 | { | |
31 | unsigned cpu = smp_processor_id(); | |
32 | if (likely(prev != next)) { | |
33 | /* stop flush ipis for the previous mm */ | |
3d1712c9 | 34 | cpu_clear(cpu, prev->cpu_vm_mask); |
1da177e4 LT |
35 | #ifdef CONFIG_SMP |
36 | write_pda(mmu_state, TLBSTATE_OK); | |
37 | write_pda(active_mm, next); | |
38 | #endif | |
3d1712c9 | 39 | cpu_set(cpu, next->cpu_vm_mask); |
1da177e4 LT |
40 | load_cr3(next->pgd); |
41 | ||
c4fe760e | 42 | if (unlikely(next->context.ldt != prev->context.ldt)) |
881c2975 | 43 | load_LDT_nolock(&next->context); |
1da177e4 LT |
44 | } |
45 | #ifdef CONFIG_SMP | |
46 | else { | |
47 | write_pda(mmu_state, TLBSTATE_OK); | |
48 | if (read_pda(active_mm) != next) | |
3abf024d | 49 | BUG(); |
3d1712c9 | 50 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
c4fe760e | 51 | /* We were in lazy tlb mode and leave_mm disabled |
1da177e4 LT |
52 | * tlb flush IPI delivery. We must reload CR3 |
53 | * to make sure to use no freed page tables. | |
54 | */ | |
55 | load_cr3(next->pgd); | |
881c2975 | 56 | load_LDT_nolock(&next->context); |
1da177e4 LT |
57 | } |
58 | } | |
59 | #endif | |
60 | } | |
61 | ||
c4fe760e JP |
62 | #define deactivate_mm(tsk, mm) \ |
63 | do { \ | |
64 | load_gs_index(0); \ | |
65 | asm volatile("movl %0,%%fs"::"r"(0)); \ | |
66 | } while (0) | |
1da177e4 | 67 | |
c4fe760e JP |
68 | #define activate_mm(prev, next) \ |
69 | switch_mm((prev), (next), NULL) | |
1da177e4 LT |
70 | |
71 | ||
72 | #endif |