]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/include/asm/mmu_context.h
cpumask: Use mm_cpumask() wrapper instead of cpu_vm_mask
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / include / asm / mmu_context.h
CommitLineData
047ea784
PM
1#ifndef __ASM_POWERPC_MMU_CONTEXT_H
2#define __ASM_POWERPC_MMU_CONTEXT_H
88ced031 3#ifdef __KERNEL__
047ea784 4
5e696617
BH
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/spinlock.h>
80a7cc6c
KG
9#include <asm/mmu.h>
10#include <asm/cputable.h>
11#include <asm-generic/mm_hooks.h>
5e696617 12#include <asm/cputhreads.h>
80a7cc6c
KG
13
14/*
5e696617 15 * Most if the context management is out of line
80a7cc6c 16 */
80a7cc6c 17extern void mmu_context_init(void);
1da177e4
LT
18extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
19extern void destroy_context(struct mm_struct *mm);
20
5e696617 21extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1da177e4
LT
22extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
23extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
5e696617 24extern void set_context(unsigned long id, pgd_t *pgd);
1da177e4
LT
25
26/*
27 * switch_mm is the entry point called from the architecture independent
28 * code in kernel/sched.c
29 */
30static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
31 struct task_struct *tsk)
32{
5e696617 33 /* Mark this context has been used on the new CPU */
56aa4129 34 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
5e696617
BH
35
36 /* 32-bit keeps track of the current PGDIR in the thread struct */
37#ifdef CONFIG_PPC32
38 tsk->thread.pgdir = next->pgd;
39#endif /* CONFIG_PPC32 */
1da177e4 40
5e696617 41 /* Nothing else to do if we aren't actually switching */
1da177e4
LT
42 if (prev == next)
43 return;
44
5e696617
BH
45 /* We must stop all altivec streams before changing the HW
46 * context
47 */
1da177e4
LT
48#ifdef CONFIG_ALTIVEC
49 if (cpu_has_feature(CPU_FTR_ALTIVEC))
50 asm volatile ("dssall");
51#endif /* CONFIG_ALTIVEC */
52
5e696617
BH
53 /* The actual HW switching method differs between the various
54 * sub architectures.
55 */
56#ifdef CONFIG_PPC_STD_MMU_64
1da177e4
LT
57 if (cpu_has_feature(CPU_FTR_SLB))
58 switch_slb(tsk, next);
59 else
60 switch_stab(tsk, next);
5e696617
BH
61#else
62 /* Out of line for now */
63 switch_mmu_context(prev, next);
64#endif
65
1da177e4
LT
66}
67
68#define deactivate_mm(tsk,mm) do { } while (0)
69
70/*
71 * After we have set current->mm to a new value, this activates
72 * the context for the new mm so we see the new mappings.
73 */
74static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
75{
76 unsigned long flags;
77
78 local_irq_save(flags);
79 switch_mm(prev, next, current);
80 local_irq_restore(flags);
81}
82
5e696617
BH
83/* We don't currently use enter_lazy_tlb() for anything */
84static inline void enter_lazy_tlb(struct mm_struct *mm,
85 struct task_struct *tsk)
86{
87}
88
88ced031 89#endif /* __KERNEL__ */
047ea784 90#endif /* __ASM_POWERPC_MMU_CONTEXT_H */