]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/mmu_notifier.c | |
3 | * | |
4 | * Copyright (C) 2008 Qumranet, Inc. | |
5 | * Copyright (C) 2008 SGI | |
6 | * Christoph Lameter <clameter@sgi.com> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
9 | * the COPYING file in the top-level directory. | |
10 | */ | |
11 | ||
12 | #include <linux/rculist.h> | |
13 | #include <linux/mmu_notifier.h> | |
14 | #include <linux/export.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/srcu.h> | |
18 | #include <linux/rcupdate.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/slab.h> | |
21 | ||
22 | /* global SRCU for all MMs */ | |
23 | static struct srcu_struct srcu; | |
24 | ||
25 | /* | |
26 | * This function allows mmu_notifier::release callback to delay a call to | |
27 | * a function that will free appropriate resources. The function must be | |
28 | * quick and must not block. | |
29 | */ | |
30 | void mmu_notifier_call_srcu(struct rcu_head *rcu, | |
31 | void (*func)(struct rcu_head *rcu)) | |
32 | { | |
33 | call_srcu(&srcu, rcu, func); | |
34 | } | |
35 | EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu); | |
36 | ||
37 | void mmu_notifier_synchronize(void) | |
38 | { | |
39 | /* Wait for any running method to finish. */ | |
40 | srcu_barrier(&srcu); | |
41 | } | |
42 | EXPORT_SYMBOL_GPL(mmu_notifier_synchronize); | |
43 | ||
44 | /* | |
45 | * This function can't run concurrently against mmu_notifier_register | |
46 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap | |
47 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers | |
48 | * in parallel despite there being no task using this mm any more, | |
49 | * through the vmas outside of the exit_mmap context, such as with | |
50 | * vmtruncate. This serializes against mmu_notifier_unregister with | |
51 | * the mmu_notifier_mm->lock in addition to SRCU and it serializes | |
52 | * against the other mmu notifiers with SRCU. struct mmu_notifier_mm | |
53 | * can't go away from under us as exit_mmap holds an mm_count pin | |
54 | * itself. | |
55 | */ | |
56 | void __mmu_notifier_release(struct mm_struct *mm) | |
57 | { | |
58 | struct mmu_notifier *mn; | |
59 | int id; | |
60 | ||
61 | /* | |
62 | * SRCU here will block mmu_notifier_unregister until | |
63 | * ->release returns. | |
64 | */ | |
65 | id = srcu_read_lock(&srcu); | |
66 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) | |
67 | /* | |
68 | * If ->release runs before mmu_notifier_unregister it must be | |
69 | * handled, as it's the only way for the driver to flush all | |
70 | * existing sptes and stop the driver from establishing any more | |
71 | * sptes before all the pages in the mm are freed. | |
72 | */ | |
73 | if (mn->ops->release) | |
74 | mn->ops->release(mn, mm); | |
75 | ||
76 | spin_lock(&mm->mmu_notifier_mm->lock); | |
77 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { | |
78 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, | |
79 | struct mmu_notifier, | |
80 | hlist); | |
81 | /* | |
82 | * We arrived before mmu_notifier_unregister so | |
83 | * mmu_notifier_unregister will do nothing other than to wait | |
84 | * for ->release to finish and for mmu_notifier_unregister to | |
85 | * return. | |
86 | */ | |
87 | hlist_del_init_rcu(&mn->hlist); | |
88 | } | |
89 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
90 | srcu_read_unlock(&srcu, id); | |
91 | ||
92 | /* | |
93 | * synchronize_srcu here prevents mmu_notifier_release from returning to | |
94 | * exit_mmap (which would proceed with freeing all pages in the mm) | |
95 | * until the ->release method returns, if it was invoked by | |
96 | * mmu_notifier_unregister. | |
97 | * | |
98 | * The mmu_notifier_mm can't go away from under us because one mm_count | |
99 | * is held by exit_mmap. | |
100 | */ | |
101 | synchronize_srcu(&srcu); | |
102 | } | |
103 | ||
104 | /* | |
105 | * If no young bitflag is supported by the hardware, ->clear_flush_young can | |
106 | * unmap the address and return 1 or 0 depending if the mapping previously | |
107 | * existed or not. | |
108 | */ | |
109 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | |
110 | unsigned long start, | |
111 | unsigned long end) | |
112 | { | |
113 | struct mmu_notifier *mn; | |
114 | int young = 0, id; | |
115 | ||
116 | id = srcu_read_lock(&srcu); | |
117 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
118 | if (mn->ops->clear_flush_young) | |
119 | young |= mn->ops->clear_flush_young(mn, mm, start, end); | |
120 | } | |
121 | srcu_read_unlock(&srcu, id); | |
122 | ||
123 | return young; | |
124 | } | |
125 | ||
126 | int __mmu_notifier_test_young(struct mm_struct *mm, | |
127 | unsigned long address) | |
128 | { | |
129 | struct mmu_notifier *mn; | |
130 | int young = 0, id; | |
131 | ||
132 | id = srcu_read_lock(&srcu); | |
133 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
134 | if (mn->ops->test_young) { | |
135 | young = mn->ops->test_young(mn, mm, address); | |
136 | if (young) | |
137 | break; | |
138 | } | |
139 | } | |
140 | srcu_read_unlock(&srcu, id); | |
141 | ||
142 | return young; | |
143 | } | |
144 | ||
145 | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, | |
146 | pte_t pte) | |
147 | { | |
148 | struct mmu_notifier *mn; | |
149 | int id; | |
150 | ||
151 | id = srcu_read_lock(&srcu); | |
152 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
153 | if (mn->ops->change_pte) | |
154 | mn->ops->change_pte(mn, mm, address, pte); | |
155 | } | |
156 | srcu_read_unlock(&srcu, id); | |
157 | } | |
158 | ||
159 | void __mmu_notifier_invalidate_page(struct mm_struct *mm, | |
160 | unsigned long address) | |
161 | { | |
162 | struct mmu_notifier *mn; | |
163 | int id; | |
164 | ||
165 | id = srcu_read_lock(&srcu); | |
166 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
167 | if (mn->ops->invalidate_page) | |
168 | mn->ops->invalidate_page(mn, mm, address); | |
169 | } | |
170 | srcu_read_unlock(&srcu, id); | |
171 | } | |
172 | ||
173 | void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, | |
174 | unsigned long start, unsigned long end) | |
175 | { | |
176 | struct mmu_notifier *mn; | |
177 | int id; | |
178 | ||
179 | id = srcu_read_lock(&srcu); | |
180 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
181 | if (mn->ops->invalidate_range_start) | |
182 | mn->ops->invalidate_range_start(mn, mm, start, end); | |
183 | } | |
184 | srcu_read_unlock(&srcu, id); | |
185 | } | |
186 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); | |
187 | ||
188 | void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |
189 | unsigned long start, unsigned long end) | |
190 | { | |
191 | struct mmu_notifier *mn; | |
192 | int id; | |
193 | ||
194 | id = srcu_read_lock(&srcu); | |
195 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
196 | if (mn->ops->invalidate_range_end) | |
197 | mn->ops->invalidate_range_end(mn, mm, start, end); | |
198 | } | |
199 | srcu_read_unlock(&srcu, id); | |
200 | } | |
201 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); | |
202 | ||
203 | static int do_mmu_notifier_register(struct mmu_notifier *mn, | |
204 | struct mm_struct *mm, | |
205 | int take_mmap_sem) | |
206 | { | |
207 | struct mmu_notifier_mm *mmu_notifier_mm; | |
208 | int ret; | |
209 | ||
210 | BUG_ON(atomic_read(&mm->mm_users) <= 0); | |
211 | ||
212 | /* | |
213 | * Verify that mmu_notifier_init() already run and the global srcu is | |
214 | * initialized. | |
215 | */ | |
216 | BUG_ON(!srcu.per_cpu_ref); | |
217 | ||
218 | ret = -ENOMEM; | |
219 | mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); | |
220 | if (unlikely(!mmu_notifier_mm)) | |
221 | goto out; | |
222 | ||
223 | if (take_mmap_sem) | |
224 | down_write(&mm->mmap_sem); | |
225 | ret = mm_take_all_locks(mm); | |
226 | if (unlikely(ret)) | |
227 | goto out_clean; | |
228 | ||
229 | if (!mm_has_notifiers(mm)) { | |
230 | INIT_HLIST_HEAD(&mmu_notifier_mm->list); | |
231 | spin_lock_init(&mmu_notifier_mm->lock); | |
232 | ||
233 | mm->mmu_notifier_mm = mmu_notifier_mm; | |
234 | mmu_notifier_mm = NULL; | |
235 | } | |
236 | atomic_inc(&mm->mm_count); | |
237 | ||
238 | /* | |
239 | * Serialize the update against mmu_notifier_unregister. A | |
240 | * side note: mmu_notifier_release can't run concurrently with | |
241 | * us because we hold the mm_users pin (either implicitly as | |
242 | * current->mm or explicitly with get_task_mm() or similar). | |
243 | * We can't race against any other mmu notifier method either | |
244 | * thanks to mm_take_all_locks(). | |
245 | */ | |
246 | spin_lock(&mm->mmu_notifier_mm->lock); | |
247 | hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); | |
248 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
249 | ||
250 | mm_drop_all_locks(mm); | |
251 | out_clean: | |
252 | if (take_mmap_sem) | |
253 | up_write(&mm->mmap_sem); | |
254 | kfree(mmu_notifier_mm); | |
255 | out: | |
256 | BUG_ON(atomic_read(&mm->mm_users) <= 0); | |
257 | return ret; | |
258 | } | |
259 | ||
260 | /* | |
261 | * Must not hold mmap_sem nor any other VM related lock when calling | |
262 | * this registration function. Must also ensure mm_users can't go down | |
263 | * to zero while this runs to avoid races with mmu_notifier_release, | |
264 | * so mm has to be current->mm or the mm should be pinned safely such | |
265 | * as with get_task_mm(). If the mm is not current->mm, the mm_users | |
266 | * pin should be released by calling mmput after mmu_notifier_register | |
267 | * returns. mmu_notifier_unregister must be always called to | |
268 | * unregister the notifier. mm_count is automatically pinned to allow | |
269 | * mmu_notifier_unregister to safely run at any time later, before or | |
270 | * after exit_mmap. ->release will always be called before exit_mmap | |
271 | * frees the pages. | |
272 | */ | |
273 | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
274 | { | |
275 | return do_mmu_notifier_register(mn, mm, 1); | |
276 | } | |
277 | EXPORT_SYMBOL_GPL(mmu_notifier_register); | |
278 | ||
279 | /* | |
280 | * Same as mmu_notifier_register but here the caller must hold the | |
281 | * mmap_sem in write mode. | |
282 | */ | |
283 | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
284 | { | |
285 | return do_mmu_notifier_register(mn, mm, 0); | |
286 | } | |
287 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); | |
288 | ||
289 | /* this is called after the last mmu_notifier_unregister() returned */ | |
290 | void __mmu_notifier_mm_destroy(struct mm_struct *mm) | |
291 | { | |
292 | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); | |
293 | kfree(mm->mmu_notifier_mm); | |
294 | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ | |
295 | } | |
296 | ||
297 | /* | |
298 | * This releases the mm_count pin automatically and frees the mm | |
299 | * structure if it was the last user of it. It serializes against | |
300 | * running mmu notifiers with SRCU and against mmu_notifier_unregister | |
301 | * with the unregister lock + SRCU. All sptes must be dropped before | |
302 | * calling mmu_notifier_unregister. ->release or any other notifier | |
303 | * method may be invoked concurrently with mmu_notifier_unregister, | |
304 | * and only after mmu_notifier_unregister returned we're guaranteed | |
305 | * that ->release or any other method can't run anymore. | |
306 | */ | |
307 | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) | |
308 | { | |
309 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
310 | ||
311 | if (!hlist_unhashed(&mn->hlist)) { | |
312 | /* | |
313 | * SRCU here will force exit_mmap to wait for ->release to | |
314 | * finish before freeing the pages. | |
315 | */ | |
316 | int id; | |
317 | ||
318 | id = srcu_read_lock(&srcu); | |
319 | /* | |
320 | * exit_mmap will block in mmu_notifier_release to guarantee | |
321 | * that ->release is called before freeing the pages. | |
322 | */ | |
323 | if (mn->ops->release) | |
324 | mn->ops->release(mn, mm); | |
325 | srcu_read_unlock(&srcu, id); | |
326 | ||
327 | spin_lock(&mm->mmu_notifier_mm->lock); | |
328 | /* | |
329 | * Can not use list_del_rcu() since __mmu_notifier_release | |
330 | * can delete it before we hold the lock. | |
331 | */ | |
332 | hlist_del_init_rcu(&mn->hlist); | |
333 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
334 | } | |
335 | ||
336 | /* | |
337 | * Wait for any running method to finish, of course including | |
338 | * ->release if it was run by mmu_notifier_release instead of us. | |
339 | */ | |
340 | synchronize_srcu(&srcu); | |
341 | ||
342 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
343 | ||
344 | mmdrop(mm); | |
345 | } | |
346 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); | |
347 | ||
348 | /* | |
349 | * Same as mmu_notifier_unregister but no callback and no srcu synchronization. | |
350 | */ | |
351 | void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, | |
352 | struct mm_struct *mm) | |
353 | { | |
354 | spin_lock(&mm->mmu_notifier_mm->lock); | |
355 | /* | |
356 | * Can not use list_del_rcu() since __mmu_notifier_release | |
357 | * can delete it before we hold the lock. | |
358 | */ | |
359 | hlist_del_init_rcu(&mn->hlist); | |
360 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
361 | ||
362 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
363 | mmdrop(mm); | |
364 | } | |
365 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release); | |
366 | ||
367 | static int __init mmu_notifier_init(void) | |
368 | { | |
369 | return init_srcu_struct(&srcu); | |
370 | } | |
371 | subsys_initcall(mmu_notifier_init); |