]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/include/asm-arm/mmu_context.h | |
3 | * | |
4 | * Copyright (C) 1996 Russell King. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Changelog: | |
11 | * 27-06-1996 RMK Created | |
12 | */ | |
13 | #ifndef __ASM_ARM_MMU_CONTEXT_H | |
14 | #define __ASM_ARM_MMU_CONTEXT_H | |
15 | ||
16 | #include <asm/proc-fns.h> | |
17 | ||
18 | #if __LINUX_ARM_ARCH__ >= 6 | |
19 | ||
20 | /* | |
21 | * On ARMv6, we have the following structure in the Context ID: | |
22 | * | |
23 | * 31 7 0 | |
24 | * +-------------------------+-----------+ | |
25 | * | process ID | ASID | | |
26 | * +-------------------------+-----------+ | |
27 | * | context ID | | |
28 | * +-------------------------------------+ | |
29 | * | |
30 | * The ASID is used to tag entries in the CPU caches and TLBs. | |
31 | * The context ID is used by debuggers and trace logic, and | |
32 | * should be unique within all running processes. | |
33 | */ | |
34 | #define ASID_BITS 8 | |
35 | #define ASID_MASK ((~0) << ASID_BITS) | |
36 | ||
37 | extern unsigned int cpu_last_asid; | |
38 | ||
39 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | |
40 | void __new_context(struct mm_struct *mm); | |
41 | ||
42 | static inline void check_context(struct mm_struct *mm) | |
43 | { | |
44 | if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) | |
45 | __new_context(mm); | |
46 | } | |
47 | ||
48 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | |
49 | ||
50 | #else | |
51 | ||
52 | #define check_context(mm) do { } while (0) | |
53 | #define init_new_context(tsk,mm) 0 | |
54 | ||
55 | #endif | |
56 | ||
57 | #define destroy_context(mm) do { } while(0) | |
58 | ||
59 | /* | |
60 | * This is called when "tsk" is about to enter lazy TLB mode. | |
61 | * | |
62 | * mm: describes the currently active mm context | |
63 | * tsk: task which is entering lazy tlb | |
64 | * cpu: cpu number which is entering lazy tlb | |
65 | * | |
66 | * tsk->mm will be NULL | |
67 | */ | |
68 | static inline void | |
69 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
70 | { | |
71 | } | |
72 | ||
73 | /* | |
74 | * This is the actual mm switch as far as the scheduler | |
75 | * is concerned. No registers are touched. We avoid | |
76 | * calling the CPU specific function when the mm hasn't | |
77 | * actually changed. | |
78 | */ | |
79 | static inline void | |
80 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
81 | struct task_struct *tsk) | |
82 | { | |
83 | unsigned int cpu = smp_processor_id(); | |
84 | ||
85 | if (prev != next) { | |
86 | cpu_set(cpu, next->cpu_vm_mask); | |
87 | check_context(next); | |
88 | cpu_switch_mm(next->pgd, next); | |
7e5e6e9a RK |
89 | if (cache_is_vivt()) |
90 | cpu_clear(cpu, prev->cpu_vm_mask); | |
1da177e4 LT |
91 | } |
92 | } | |
93 | ||
94 | #define deactivate_mm(tsk,mm) do { } while (0) | |
95 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | |
96 | ||
97 | #endif |