]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* mmu-context.c: MMU context allocation and management |
2 | * | |
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/sched.h> | |
6e84f315 | 13 | #include <linux/sched/mm.h> |
1da177e4 LT |
14 | #include <linux/mm.h> |
15 | #include <asm/tlbflush.h> | |
16 | ||
17 | #define NR_CXN 4096 | |
18 | ||
19 | static unsigned long cxn_bitmap[NR_CXN / (sizeof(unsigned long) * 8)]; | |
20 | static LIST_HEAD(cxn_owners_lru); | |
21 | static DEFINE_SPINLOCK(cxn_owners_lock); | |
22 | ||
23 | int __nongpreldata cxn_pinned = -1; | |
24 | ||
25 | ||
26 | /*****************************************************************************/ | |
27 | /* | |
28 | * initialise a new context | |
29 | */ | |
30 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
31 | { | |
32 | memset(&mm->context, 0, sizeof(mm->context)); | |
33 | INIT_LIST_HEAD(&mm->context.id_link); | |
34 | mm->context.itlb_cached_pge = 0xffffffffUL; | |
35 | mm->context.dtlb_cached_pge = 0xffffffffUL; | |
36 | ||
37 | return 0; | |
38 | } /* end init_new_context() */ | |
39 | ||
40 | /*****************************************************************************/ | |
41 | /* | |
42 | * make sure a kernel MMU context has a CPU context number | |
43 | * - call with cxn_owners_lock held | |
44 | */ | |
45 | static unsigned get_cxn(mm_context_t *ctx) | |
46 | { | |
47 | struct list_head *_p; | |
48 | mm_context_t *p; | |
49 | unsigned cxn; | |
50 | ||
51 | if (!list_empty(&ctx->id_link)) { | |
52 | list_move_tail(&ctx->id_link, &cxn_owners_lru); | |
53 | } | |
54 | else { | |
55 | /* find the first unallocated context number | |
56 | * - 0 is reserved for the kernel | |
57 | */ | |
0f7217f4 | 58 | cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1); |
1da177e4 | 59 | if (cxn < NR_CXN) { |
0f7217f4 | 60 | set_bit(cxn, cxn_bitmap); |
1da177e4 LT |
61 | } |
62 | else { | |
63 | /* none remaining - need to steal someone else's cxn */ | |
64 | p = NULL; | |
65 | list_for_each(_p, &cxn_owners_lru) { | |
66 | p = list_entry(_p, mm_context_t, id_link); | |
67 | if (!p->id_busy && p->id != cxn_pinned) | |
68 | break; | |
69 | } | |
70 | ||
71 | BUG_ON(_p == &cxn_owners_lru); | |
72 | ||
73 | cxn = p->id; | |
74 | p->id = 0; | |
75 | list_del_init(&p->id_link); | |
76 | __flush_tlb_mm(cxn); | |
77 | } | |
78 | ||
79 | ctx->id = cxn; | |
80 | list_add_tail(&ctx->id_link, &cxn_owners_lru); | |
81 | } | |
82 | ||
83 | return ctx->id; | |
84 | } /* end get_cxn() */ | |
85 | ||
86 | /*****************************************************************************/ | |
87 | /* | |
88 | * restore the current TLB miss handler mapped page tables into the MMU context and set up a | |
89 | * mapping for the page directory | |
90 | */ | |
91 | void change_mm_context(mm_context_t *old, mm_context_t *ctx, pgd_t *pgd) | |
92 | { | |
93 | unsigned long _pgd; | |
94 | ||
95 | _pgd = virt_to_phys(pgd); | |
96 | ||
97 | /* save the state of the outgoing MMU context */ | |
98 | old->id_busy = 0; | |
99 | ||
100 | asm volatile("movsg scr0,%0" : "=r"(old->itlb_cached_pge)); | |
101 | asm volatile("movsg dampr4,%0" : "=r"(old->itlb_ptd_mapping)); | |
102 | asm volatile("movsg scr1,%0" : "=r"(old->dtlb_cached_pge)); | |
103 | asm volatile("movsg dampr5,%0" : "=r"(old->dtlb_ptd_mapping)); | |
104 | ||
105 | /* select an MMU context number */ | |
106 | spin_lock(&cxn_owners_lock); | |
107 | get_cxn(ctx); | |
108 | ctx->id_busy = 1; | |
109 | spin_unlock(&cxn_owners_lock); | |
110 | ||
111 | asm volatile("movgs %0,cxnr" : : "r"(ctx->id)); | |
112 | ||
113 | /* restore the state of the incoming MMU context */ | |
114 | asm volatile("movgs %0,scr0" : : "r"(ctx->itlb_cached_pge)); | |
115 | asm volatile("movgs %0,dampr4" : : "r"(ctx->itlb_ptd_mapping)); | |
116 | asm volatile("movgs %0,scr1" : : "r"(ctx->dtlb_cached_pge)); | |
117 | asm volatile("movgs %0,dampr5" : : "r"(ctx->dtlb_ptd_mapping)); | |
118 | ||
119 | /* map the PGD into uncached virtual memory */ | |
120 | asm volatile("movgs %0,ttbr" : : "r"(_pgd)); | |
121 | asm volatile("movgs %0,dampr3" | |
122 | :: "r"(_pgd | xAMPRx_L | xAMPRx_M | xAMPRx_SS_16Kb | | |
123 | xAMPRx_S | xAMPRx_C | xAMPRx_V)); | |
124 | ||
125 | } /* end change_mm_context() */ | |
126 | ||
127 | /*****************************************************************************/ | |
128 | /* | |
129 | * finished with an MMU context number | |
130 | */ | |
131 | void destroy_context(struct mm_struct *mm) | |
132 | { | |
133 | mm_context_t *ctx = &mm->context; | |
134 | ||
135 | spin_lock(&cxn_owners_lock); | |
136 | ||
137 | if (!list_empty(&ctx->id_link)) { | |
138 | if (ctx->id == cxn_pinned) | |
139 | cxn_pinned = -1; | |
140 | ||
141 | list_del_init(&ctx->id_link); | |
0f7217f4 | 142 | clear_bit(ctx->id, cxn_bitmap); |
1da177e4 LT |
143 | __flush_tlb_mm(ctx->id); |
144 | ctx->id = 0; | |
145 | } | |
146 | ||
147 | spin_unlock(&cxn_owners_lock); | |
148 | } /* end destroy_context() */ | |
149 | ||
150 | /*****************************************************************************/ | |
151 | /* | |
152 | * display the MMU context currently a process is currently using | |
153 | */ | |
154 | #ifdef CONFIG_PROC_FS | |
155 | char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer) | |
156 | { | |
157 | spin_lock(&cxn_owners_lock); | |
158 | buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id); | |
159 | spin_unlock(&cxn_owners_lock); | |
160 | ||
161 | return buffer; | |
162 | } /* end proc_pid_status_frv_cxnr() */ | |
163 | #endif | |
164 | ||
165 | /*****************************************************************************/ | |
166 | /* | |
167 | * (un)pin a process's mm_struct's MMU context ID | |
168 | */ | |
169 | int cxn_pin_by_pid(pid_t pid) | |
170 | { | |
171 | struct task_struct *tsk; | |
172 | struct mm_struct *mm = NULL; | |
173 | int ret; | |
174 | ||
175 | /* unpin if pid is zero */ | |
176 | if (pid == 0) { | |
177 | cxn_pinned = -1; | |
178 | return 0; | |
179 | } | |
180 | ||
181 | ret = -ESRCH; | |
182 | ||
183 | /* get a handle on the mm_struct */ | |
184 | read_lock(&tasklist_lock); | |
540e3102 | 185 | tsk = find_task_by_vpid(pid); |
1da177e4 LT |
186 | if (tsk) { |
187 | ret = -EINVAL; | |
188 | ||
189 | task_lock(tsk); | |
190 | if (tsk->mm) { | |
191 | mm = tsk->mm; | |
3fce371b | 192 | mmget(mm); |
1da177e4 LT |
193 | ret = 0; |
194 | } | |
195 | task_unlock(tsk); | |
196 | } | |
197 | read_unlock(&tasklist_lock); | |
198 | ||
199 | if (ret < 0) | |
200 | return ret; | |
201 | ||
202 | /* make sure it has a CXN and pin it */ | |
203 | spin_lock(&cxn_owners_lock); | |
204 | cxn_pinned = get_cxn(&mm->context); | |
205 | spin_unlock(&cxn_owners_lock); | |
206 | ||
207 | mmput(mm); | |
208 | return 0; | |
209 | } /* end cxn_pin_by_pid() */ |