]>
Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
cddb8a5c AA |
2 | /* |
3 | * linux/mm/mmu_notifier.c | |
4 | * | |
5 | * Copyright (C) 2008 Qumranet, Inc. | |
6 | * Copyright (C) 2008 SGI | |
93e205a7 | 7 | * Christoph Lameter <cl@linux.com> |
cddb8a5c AA |
8 | */ |
9 | ||
10 | #include <linux/rculist.h> | |
11 | #include <linux/mmu_notifier.h> | |
b95f1b31 | 12 | #include <linux/export.h> |
cddb8a5c AA |
13 | #include <linux/mm.h> |
14 | #include <linux/err.h> | |
21a92735 | 15 | #include <linux/srcu.h> |
cddb8a5c AA |
16 | #include <linux/rcupdate.h> |
17 | #include <linux/sched.h> | |
6e84f315 | 18 | #include <linux/sched/mm.h> |
5a0e3ad6 | 19 | #include <linux/slab.h> |
cddb8a5c | 20 | |
21a92735 | 21 | /* global SRCU for all MMs */ |
dde8da6c | 22 | DEFINE_STATIC_SRCU(srcu); |
21a92735 | 23 | |
b972216e PZ |
24 | /* |
25 | * This function allows mmu_notifier::release callback to delay a call to | |
26 | * a function that will free appropriate resources. The function must be | |
27 | * quick and must not block. | |
28 | */ | |
29 | void mmu_notifier_call_srcu(struct rcu_head *rcu, | |
30 | void (*func)(struct rcu_head *rcu)) | |
31 | { | |
32 | call_srcu(&srcu, rcu, func); | |
33 | } | |
34 | EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu); | |
35 | ||
cddb8a5c AA |
36 | /* |
37 | * This function can't run concurrently against mmu_notifier_register | |
38 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap | |
39 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers | |
40 | * in parallel despite there being no task using this mm any more, | |
41 | * through the vmas outside of the exit_mmap context, such as with | |
42 | * vmtruncate. This serializes against mmu_notifier_unregister with | |
21a92735 SG |
43 | * the mmu_notifier_mm->lock in addition to SRCU and it serializes |
44 | * against the other mmu notifiers with SRCU. struct mmu_notifier_mm | |
cddb8a5c AA |
45 | * can't go away from under us as exit_mmap holds an mm_count pin |
46 | * itself. | |
47 | */ | |
48 | void __mmu_notifier_release(struct mm_struct *mm) | |
49 | { | |
50 | struct mmu_notifier *mn; | |
21a92735 | 51 | int id; |
3ad3d901 XG |
52 | |
53 | /* | |
d34883d4 XG |
54 | * SRCU here will block mmu_notifier_unregister until |
55 | * ->release returns. | |
3ad3d901 | 56 | */ |
21a92735 | 57 | id = srcu_read_lock(&srcu); |
d34883d4 XG |
58 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) |
59 | /* | |
60 | * If ->release runs before mmu_notifier_unregister it must be | |
61 | * handled, as it's the only way for the driver to flush all | |
62 | * existing sptes and stop the driver from establishing any more | |
63 | * sptes before all the pages in the mm are freed. | |
64 | */ | |
65 | if (mn->ops->release) | |
66 | mn->ops->release(mn, mm); | |
d34883d4 | 67 | |
cddb8a5c AA |
68 | spin_lock(&mm->mmu_notifier_mm->lock); |
69 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { | |
70 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, | |
71 | struct mmu_notifier, | |
72 | hlist); | |
73 | /* | |
d34883d4 XG |
74 | * We arrived before mmu_notifier_unregister so |
75 | * mmu_notifier_unregister will do nothing other than to wait | |
76 | * for ->release to finish and for mmu_notifier_unregister to | |
77 | * return. | |
cddb8a5c AA |
78 | */ |
79 | hlist_del_init_rcu(&mn->hlist); | |
cddb8a5c AA |
80 | } |
81 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
b972216e | 82 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
83 | |
84 | /* | |
d34883d4 XG |
85 | * synchronize_srcu here prevents mmu_notifier_release from returning to |
86 | * exit_mmap (which would proceed with freeing all pages in the mm) | |
87 | * until the ->release method returns, if it was invoked by | |
88 | * mmu_notifier_unregister. | |
89 | * | |
90 | * The mmu_notifier_mm can't go away from under us because one mm_count | |
91 | * is held by exit_mmap. | |
cddb8a5c | 92 | */ |
21a92735 | 93 | synchronize_srcu(&srcu); |
cddb8a5c AA |
94 | } |
95 | ||
96 | /* | |
97 | * If no young bitflag is supported by the hardware, ->clear_flush_young can | |
98 | * unmap the address and return 1 or 0 depending if the mapping previously | |
99 | * existed or not. | |
100 | */ | |
101 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | |
57128468 ALC |
102 | unsigned long start, |
103 | unsigned long end) | |
cddb8a5c AA |
104 | { |
105 | struct mmu_notifier *mn; | |
21a92735 | 106 | int young = 0, id; |
cddb8a5c | 107 | |
21a92735 | 108 | id = srcu_read_lock(&srcu); |
b67bfe0d | 109 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
cddb8a5c | 110 | if (mn->ops->clear_flush_young) |
57128468 | 111 | young |= mn->ops->clear_flush_young(mn, mm, start, end); |
cddb8a5c | 112 | } |
21a92735 | 113 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
114 | |
115 | return young; | |
116 | } | |
117 | ||
1d7715c6 VD |
118 | int __mmu_notifier_clear_young(struct mm_struct *mm, |
119 | unsigned long start, | |
120 | unsigned long end) | |
121 | { | |
122 | struct mmu_notifier *mn; | |
123 | int young = 0, id; | |
124 | ||
125 | id = srcu_read_lock(&srcu); | |
126 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
127 | if (mn->ops->clear_young) | |
128 | young |= mn->ops->clear_young(mn, mm, start, end); | |
129 | } | |
130 | srcu_read_unlock(&srcu, id); | |
131 | ||
132 | return young; | |
133 | } | |
134 | ||
8ee53820 AA |
135 | int __mmu_notifier_test_young(struct mm_struct *mm, |
136 | unsigned long address) | |
137 | { | |
138 | struct mmu_notifier *mn; | |
21a92735 | 139 | int young = 0, id; |
8ee53820 | 140 | |
21a92735 | 141 | id = srcu_read_lock(&srcu); |
b67bfe0d | 142 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
8ee53820 AA |
143 | if (mn->ops->test_young) { |
144 | young = mn->ops->test_young(mn, mm, address); | |
145 | if (young) | |
146 | break; | |
147 | } | |
148 | } | |
21a92735 | 149 | srcu_read_unlock(&srcu, id); |
8ee53820 AA |
150 | |
151 | return young; | |
152 | } | |
153 | ||
828502d3 IE |
154 | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
155 | pte_t pte) | |
156 | { | |
157 | struct mmu_notifier *mn; | |
21a92735 | 158 | int id; |
828502d3 | 159 | |
21a92735 | 160 | id = srcu_read_lock(&srcu); |
b67bfe0d | 161 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { |
828502d3 IE |
162 | if (mn->ops->change_pte) |
163 | mn->ops->change_pte(mn, mm, address, pte); | |
828502d3 | 164 | } |
21a92735 | 165 | srcu_read_unlock(&srcu, id); |
828502d3 IE |
166 | } |
167 | ||
ac46d4f3 | 168 | int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) |
cddb8a5c AA |
169 | { |
170 | struct mmu_notifier *mn; | |
93065ac7 | 171 | int ret = 0; |
21a92735 | 172 | int id; |
cddb8a5c | 173 | |
21a92735 | 174 | id = srcu_read_lock(&srcu); |
ac46d4f3 | 175 | hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { |
93065ac7 | 176 | if (mn->ops->invalidate_range_start) { |
5d6527a7 | 177 | int _ret = mn->ops->invalidate_range_start(mn, range); |
93065ac7 MH |
178 | if (_ret) { |
179 | pr_info("%pS callback failed with %d in %sblockable context.\n", | |
ac46d4f3 | 180 | mn->ops->invalidate_range_start, _ret, |
dfcd6660 | 181 | !mmu_notifier_range_blockable(range) ? "non-" : ""); |
93065ac7 MH |
182 | ret = _ret; |
183 | } | |
184 | } | |
cddb8a5c | 185 | } |
21a92735 | 186 | srcu_read_unlock(&srcu, id); |
93065ac7 MH |
187 | |
188 | return ret; | |
cddb8a5c | 189 | } |
fa794199 | 190 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start); |
cddb8a5c | 191 | |
ac46d4f3 | 192 | void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, |
4645b9fe | 193 | bool only_end) |
cddb8a5c AA |
194 | { |
195 | struct mmu_notifier *mn; | |
21a92735 | 196 | int id; |
cddb8a5c | 197 | |
21a92735 | 198 | id = srcu_read_lock(&srcu); |
ac46d4f3 | 199 | hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { |
0f0a327f JR |
200 | /* |
201 | * Call invalidate_range here too to avoid the need for the | |
202 | * subsystem of having to register an invalidate_range_end | |
203 | * call-back when there is invalidate_range already. Usually a | |
204 | * subsystem registers either invalidate_range_start()/end() or | |
205 | * invalidate_range(), so this will be no additional overhead | |
206 | * (besides the pointer check). | |
4645b9fe JG |
207 | * |
208 | * We skip call to invalidate_range() if we know it is safe ie | |
209 | * call site use mmu_notifier_invalidate_range_only_end() which | |
210 | * is safe to do when we know that a call to invalidate_range() | |
211 | * already happen under page table lock. | |
0f0a327f | 212 | */ |
4645b9fe | 213 | if (!only_end && mn->ops->invalidate_range) |
ac46d4f3 JG |
214 | mn->ops->invalidate_range(mn, range->mm, |
215 | range->start, | |
216 | range->end); | |
cddb8a5c | 217 | if (mn->ops->invalidate_range_end) |
5d6527a7 | 218 | mn->ops->invalidate_range_end(mn, range); |
cddb8a5c | 219 | } |
21a92735 | 220 | srcu_read_unlock(&srcu, id); |
cddb8a5c | 221 | } |
fa794199 | 222 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); |
cddb8a5c | 223 | |
0f0a327f JR |
224 | void __mmu_notifier_invalidate_range(struct mm_struct *mm, |
225 | unsigned long start, unsigned long end) | |
226 | { | |
227 | struct mmu_notifier *mn; | |
228 | int id; | |
229 | ||
230 | id = srcu_read_lock(&srcu); | |
231 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { | |
232 | if (mn->ops->invalidate_range) | |
233 | mn->ops->invalidate_range(mn, mm, start, end); | |
234 | } | |
235 | srcu_read_unlock(&srcu, id); | |
236 | } | |
237 | EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range); | |
238 | ||
cddb8a5c AA |
239 | static int do_mmu_notifier_register(struct mmu_notifier *mn, |
240 | struct mm_struct *mm, | |
241 | int take_mmap_sem) | |
242 | { | |
243 | struct mmu_notifier_mm *mmu_notifier_mm; | |
244 | int ret; | |
245 | ||
246 | BUG_ON(atomic_read(&mm->mm_users) <= 0); | |
247 | ||
35cfa2b0 GS |
248 | ret = -ENOMEM; |
249 | mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); | |
250 | if (unlikely(!mmu_notifier_mm)) | |
251 | goto out; | |
252 | ||
cddb8a5c AA |
253 | if (take_mmap_sem) |
254 | down_write(&mm->mmap_sem); | |
255 | ret = mm_take_all_locks(mm); | |
256 | if (unlikely(ret)) | |
35cfa2b0 | 257 | goto out_clean; |
cddb8a5c AA |
258 | |
259 | if (!mm_has_notifiers(mm)) { | |
260 | INIT_HLIST_HEAD(&mmu_notifier_mm->list); | |
261 | spin_lock_init(&mmu_notifier_mm->lock); | |
e0f3c3f7 | 262 | |
cddb8a5c | 263 | mm->mmu_notifier_mm = mmu_notifier_mm; |
35cfa2b0 | 264 | mmu_notifier_mm = NULL; |
cddb8a5c | 265 | } |
f1f10076 | 266 | mmgrab(mm); |
cddb8a5c AA |
267 | |
268 | /* | |
269 | * Serialize the update against mmu_notifier_unregister. A | |
270 | * side note: mmu_notifier_release can't run concurrently with | |
271 | * us because we hold the mm_users pin (either implicitly as | |
272 | * current->mm or explicitly with get_task_mm() or similar). | |
273 | * We can't race against any other mmu notifier method either | |
274 | * thanks to mm_take_all_locks(). | |
275 | */ | |
276 | spin_lock(&mm->mmu_notifier_mm->lock); | |
543bdb2d | 277 | hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list); |
cddb8a5c AA |
278 | spin_unlock(&mm->mmu_notifier_mm->lock); |
279 | ||
280 | mm_drop_all_locks(mm); | |
35cfa2b0 | 281 | out_clean: |
cddb8a5c AA |
282 | if (take_mmap_sem) |
283 | up_write(&mm->mmap_sem); | |
35cfa2b0 GS |
284 | kfree(mmu_notifier_mm); |
285 | out: | |
cddb8a5c AA |
286 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
287 | return ret; | |
288 | } | |
289 | ||
290 | /* | |
291 | * Must not hold mmap_sem nor any other VM related lock when calling | |
292 | * this registration function. Must also ensure mm_users can't go down | |
293 | * to zero while this runs to avoid races with mmu_notifier_release, | |
294 | * so mm has to be current->mm or the mm should be pinned safely such | |
295 | * as with get_task_mm(). If the mm is not current->mm, the mm_users | |
296 | * pin should be released by calling mmput after mmu_notifier_register | |
297 | * returns. mmu_notifier_unregister must be always called to | |
298 | * unregister the notifier. mm_count is automatically pinned to allow | |
299 | * mmu_notifier_unregister to safely run at any time later, before or | |
300 | * after exit_mmap. ->release will always be called before exit_mmap | |
301 | * frees the pages. | |
302 | */ | |
303 | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
304 | { | |
305 | return do_mmu_notifier_register(mn, mm, 1); | |
306 | } | |
307 | EXPORT_SYMBOL_GPL(mmu_notifier_register); | |
308 | ||
309 | /* | |
310 | * Same as mmu_notifier_register but here the caller must hold the | |
311 | * mmap_sem in write mode. | |
312 | */ | |
313 | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
314 | { | |
315 | return do_mmu_notifier_register(mn, mm, 0); | |
316 | } | |
317 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); | |
318 | ||
319 | /* this is called after the last mmu_notifier_unregister() returned */ | |
320 | void __mmu_notifier_mm_destroy(struct mm_struct *mm) | |
321 | { | |
322 | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); | |
323 | kfree(mm->mmu_notifier_mm); | |
324 | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ | |
325 | } | |
326 | ||
327 | /* | |
328 | * This releases the mm_count pin automatically and frees the mm | |
329 | * structure if it was the last user of it. It serializes against | |
21a92735 SG |
330 | * running mmu notifiers with SRCU and against mmu_notifier_unregister |
331 | * with the unregister lock + SRCU. All sptes must be dropped before | |
cddb8a5c AA |
332 | * calling mmu_notifier_unregister. ->release or any other notifier |
333 | * method may be invoked concurrently with mmu_notifier_unregister, | |
334 | * and only after mmu_notifier_unregister returned we're guaranteed | |
335 | * that ->release or any other method can't run anymore. | |
336 | */ | |
337 | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) | |
338 | { | |
339 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
340 | ||
cddb8a5c | 341 | if (!hlist_unhashed(&mn->hlist)) { |
d34883d4 XG |
342 | /* |
343 | * SRCU here will force exit_mmap to wait for ->release to | |
344 | * finish before freeing the pages. | |
345 | */ | |
21a92735 | 346 | int id; |
3ad3d901 | 347 | |
d34883d4 | 348 | id = srcu_read_lock(&srcu); |
cddb8a5c | 349 | /* |
d34883d4 XG |
350 | * exit_mmap will block in mmu_notifier_release to guarantee |
351 | * that ->release is called before freeing the pages. | |
cddb8a5c AA |
352 | */ |
353 | if (mn->ops->release) | |
354 | mn->ops->release(mn, mm); | |
d34883d4 | 355 | srcu_read_unlock(&srcu, id); |
3ad3d901 | 356 | |
d34883d4 | 357 | spin_lock(&mm->mmu_notifier_mm->lock); |
751efd86 | 358 | /* |
d34883d4 XG |
359 | * Can not use list_del_rcu() since __mmu_notifier_release |
360 | * can delete it before we hold the lock. | |
751efd86 | 361 | */ |
d34883d4 | 362 | hlist_del_init_rcu(&mn->hlist); |
cddb8a5c | 363 | spin_unlock(&mm->mmu_notifier_mm->lock); |
d34883d4 | 364 | } |
cddb8a5c AA |
365 | |
366 | /* | |
d34883d4 | 367 | * Wait for any running method to finish, of course including |
83a35e36 | 368 | * ->release if it was run by mmu_notifier_release instead of us. |
cddb8a5c | 369 | */ |
21a92735 | 370 | synchronize_srcu(&srcu); |
cddb8a5c AA |
371 | |
372 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
373 | ||
374 | mmdrop(mm); | |
375 | } | |
376 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); | |
21a92735 | 377 | |
b972216e PZ |
378 | /* |
379 | * Same as mmu_notifier_unregister but no callback and no srcu synchronization. | |
380 | */ | |
381 | void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, | |
382 | struct mm_struct *mm) | |
383 | { | |
384 | spin_lock(&mm->mmu_notifier_mm->lock); | |
385 | /* | |
386 | * Can not use list_del_rcu() since __mmu_notifier_release | |
387 | * can delete it before we hold the lock. | |
388 | */ | |
389 | hlist_del_init_rcu(&mn->hlist); | |
390 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
391 | ||
392 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
393 | mmdrop(mm); | |
394 | } | |
395 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release); | |
c6d23413 JG |
396 | |
397 | bool | |
398 | mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range) | |
399 | { | |
400 | if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA) | |
401 | return false; | |
402 | /* Return true if the vma still have the read flag set. */ | |
403 | return range->vma->vm_flags & VM_READ; | |
404 | } | |
405 | EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only); |