]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/mmu_notifier.h
net: rtnetlink: validate IFLA_MTU attribute in rtnl_create_link()
[mirror_ubuntu-bionic-kernel.git] / include / linux / mmu_notifier.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/srcu.h>
9
10 struct mmu_notifier;
11 struct mmu_notifier_ops;
12
13 #ifdef CONFIG_MMU_NOTIFIER
14
15 /*
16 * The mmu notifier_mm structure is allocated and installed in
17 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
18 * critical section and it's released only when mm_count reaches zero
19 * in mmdrop().
20 */
21 struct mmu_notifier_mm {
22 /* all mmu notifiers registerd in this mm are queued in this list */
23 struct hlist_head list;
24 /* to serialize the list modifications and hlist_unhashed */
25 spinlock_t lock;
26 };
27
28 struct mmu_notifier_ops {
29 /*
30 * Called either by mmu_notifier_unregister or when the mm is
31 * being destroyed by exit_mmap, always before all pages are
32 * freed. This can run concurrently with other mmu notifier
33 * methods (the ones invoked outside the mm context) and it
34 * should tear down all secondary mmu mappings and freeze the
35 * secondary mmu. If this method isn't implemented you've to
36 * be sure that nothing could possibly write to the pages
37 * through the secondary mmu by the time the last thread with
38 * tsk->mm == mm exits.
39 *
40 * As side note: the pages freed after ->release returns could
41 * be immediately reallocated by the gart at an alias physical
42 * address with a different cache model, so if ->release isn't
43 * implemented because all _software_ driven memory accesses
44 * through the secondary mmu are terminated by the time the
45 * last thread of this mm quits, you've also to be sure that
46 * speculative _hardware_ operations can't allocate dirty
47 * cachelines in the cpu that could not be snooped and made
48 * coherent with the other read and write operations happening
49 * through the gart alias address, so leading to memory
50 * corruption.
51 */
52 void (*release)(struct mmu_notifier *mn,
53 struct mm_struct *mm);
54
55 /*
56 * clear_flush_young is called after the VM is
57 * test-and-clearing the young/accessed bitflag in the
58 * pte. This way the VM will provide proper aging to the
59 * accesses to the page through the secondary MMUs and not
60 * only to the ones through the Linux pte.
61 * Start-end is necessary in case the secondary MMU is mapping the page
62 * at a smaller granularity than the primary MMU.
63 */
64 int (*clear_flush_young)(struct mmu_notifier *mn,
65 struct mm_struct *mm,
66 unsigned long start,
67 unsigned long end);
68
69 /*
70 * clear_young is a lightweight version of clear_flush_young. Like the
71 * latter, it is supposed to test-and-clear the young/accessed bitflag
72 * in the secondary pte, but it may omit flushing the secondary tlb.
73 */
74 int (*clear_young)(struct mmu_notifier *mn,
75 struct mm_struct *mm,
76 unsigned long start,
77 unsigned long end);
78
79 /*
80 * test_young is called to check the young/accessed bitflag in
81 * the secondary pte. This is used to know if the page is
82 * frequently used without actually clearing the flag or tearing
83 * down the secondary mapping on the page.
84 */
85 int (*test_young)(struct mmu_notifier *mn,
86 struct mm_struct *mm,
87 unsigned long address);
88
89 /*
90 * change_pte is called in cases that pte mapping to page is changed:
91 * for example, when ksm remaps pte to point to a new shared page.
92 */
93 void (*change_pte)(struct mmu_notifier *mn,
94 struct mm_struct *mm,
95 unsigned long address,
96 pte_t pte);
97
98 /*
99 * invalidate_range_start() and invalidate_range_end() must be
100 * paired and are called only when the mmap_sem and/or the
101 * locks protecting the reverse maps are held. If the subsystem
102 * can't guarantee that no additional references are taken to
103 * the pages in the range, it has to implement the
104 * invalidate_range() notifier to remove any references taken
105 * after invalidate_range_start().
106 *
107 * Invalidation of multiple concurrent ranges may be
108 * optionally permitted by the driver. Either way the
109 * establishment of sptes is forbidden in the range passed to
110 * invalidate_range_begin/end for the whole duration of the
111 * invalidate_range_begin/end critical section.
112 *
113 * invalidate_range_start() is called when all pages in the
114 * range are still mapped and have at least a refcount of one.
115 *
116 * invalidate_range_end() is called when all pages in the
117 * range have been unmapped and the pages have been freed by
118 * the VM.
119 *
120 * The VM will remove the page table entries and potentially
121 * the page between invalidate_range_start() and
122 * invalidate_range_end(). If the page must not be freed
123 * because of pending I/O or other circumstances then the
124 * invalidate_range_start() callback (or the initial mapping
125 * by the driver) must make sure that the refcount is kept
126 * elevated.
127 *
128 * If the driver increases the refcount when the pages are
129 * initially mapped into an address space then either
130 * invalidate_range_start() or invalidate_range_end() may
131 * decrease the refcount. If the refcount is decreased on
132 * invalidate_range_start() then the VM can free pages as page
133 * table entries are removed. If the refcount is only
134 * droppped on invalidate_range_end() then the driver itself
135 * will drop the last refcount but it must take care to flush
136 * any secondary tlb before doing the final free on the
137 * page. Pages will no longer be referenced by the linux
138 * address space but may still be referenced by sptes until
139 * the last refcount is dropped.
140 */
141 void (*invalidate_range_start)(struct mmu_notifier *mn,
142 struct mm_struct *mm,
143 unsigned long start, unsigned long end);
144 void (*invalidate_range_end)(struct mmu_notifier *mn,
145 struct mm_struct *mm,
146 unsigned long start, unsigned long end);
147
148 /*
149 * invalidate_range() is either called between
150 * invalidate_range_start() and invalidate_range_end() when the
151 * VM has to free pages that where unmapped, but before the
152 * pages are actually freed, or outside of _start()/_end() when
153 * a (remote) TLB is necessary.
154 *
155 * If invalidate_range() is used to manage a non-CPU TLB with
156 * shared page-tables, it not necessary to implement the
157 * invalidate_range_start()/end() notifiers, as
158 * invalidate_range() alread catches the points in time when an
159 * external TLB range needs to be flushed. For more in depth
160 * discussion on this see Documentation/vm/mmu_notifier.txt
161 *
162 * The invalidate_range() function is called under the ptl
163 * spin-lock and not allowed to sleep.
164 *
165 * Note that this function might be called with just a sub-range
166 * of what was passed to invalidate_range_start()/end(), if
167 * called between those functions.
168 */
169 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
170 unsigned long start, unsigned long end);
171 };
172
173 /*
174 * The notifier chains are protected by mmap_sem and/or the reverse map
175 * semaphores. Notifier chains are only changed when all reverse maps and
176 * the mmap_sem locks are taken.
177 *
178 * Therefore notifier chains can only be traversed when either
179 *
180 * 1. mmap_sem is held.
181 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
182 * 3. No other concurrent thread can access the list (release)
183 */
184 struct mmu_notifier {
185 struct hlist_node hlist;
186 const struct mmu_notifier_ops *ops;
187 };
188
189 static inline int mm_has_notifiers(struct mm_struct *mm)
190 {
191 return unlikely(mm->mmu_notifier_mm);
192 }
193
194 extern int mmu_notifier_register(struct mmu_notifier *mn,
195 struct mm_struct *mm);
196 extern int __mmu_notifier_register(struct mmu_notifier *mn,
197 struct mm_struct *mm);
198 extern void mmu_notifier_unregister(struct mmu_notifier *mn,
199 struct mm_struct *mm);
200 extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
201 struct mm_struct *mm);
202 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
203 extern void __mmu_notifier_release(struct mm_struct *mm);
204 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
205 unsigned long start,
206 unsigned long end);
207 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
208 unsigned long start,
209 unsigned long end);
210 extern int __mmu_notifier_test_young(struct mm_struct *mm,
211 unsigned long address);
212 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
213 unsigned long address, pte_t pte);
214 extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
215 unsigned long start, unsigned long end);
216 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
217 unsigned long start, unsigned long end,
218 bool only_end);
219 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
220 unsigned long start, unsigned long end);
221
222 static inline void mmu_notifier_release(struct mm_struct *mm)
223 {
224 if (mm_has_notifiers(mm))
225 __mmu_notifier_release(mm);
226 }
227
228 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
229 unsigned long start,
230 unsigned long end)
231 {
232 if (mm_has_notifiers(mm))
233 return __mmu_notifier_clear_flush_young(mm, start, end);
234 return 0;
235 }
236
237 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
238 unsigned long start,
239 unsigned long end)
240 {
241 if (mm_has_notifiers(mm))
242 return __mmu_notifier_clear_young(mm, start, end);
243 return 0;
244 }
245
246 static inline int mmu_notifier_test_young(struct mm_struct *mm,
247 unsigned long address)
248 {
249 if (mm_has_notifiers(mm))
250 return __mmu_notifier_test_young(mm, address);
251 return 0;
252 }
253
254 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
255 unsigned long address, pte_t pte)
256 {
257 if (mm_has_notifiers(mm))
258 __mmu_notifier_change_pte(mm, address, pte);
259 }
260
261 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
262 unsigned long start, unsigned long end)
263 {
264 if (mm_has_notifiers(mm))
265 __mmu_notifier_invalidate_range_start(mm, start, end);
266 }
267
268 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
269 unsigned long start, unsigned long end)
270 {
271 if (mm_has_notifiers(mm))
272 __mmu_notifier_invalidate_range_end(mm, start, end, false);
273 }
274
275 static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
276 unsigned long start, unsigned long end)
277 {
278 if (mm_has_notifiers(mm))
279 __mmu_notifier_invalidate_range_end(mm, start, end, true);
280 }
281
282 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
283 unsigned long start, unsigned long end)
284 {
285 if (mm_has_notifiers(mm))
286 __mmu_notifier_invalidate_range(mm, start, end);
287 }
288
289 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
290 {
291 mm->mmu_notifier_mm = NULL;
292 }
293
294 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
295 {
296 if (mm_has_notifiers(mm))
297 __mmu_notifier_mm_destroy(mm);
298 }
299
300 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
301 ({ \
302 int __young; \
303 struct vm_area_struct *___vma = __vma; \
304 unsigned long ___address = __address; \
305 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
306 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
307 ___address, \
308 ___address + \
309 PAGE_SIZE); \
310 __young; \
311 })
312
313 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
314 ({ \
315 int __young; \
316 struct vm_area_struct *___vma = __vma; \
317 unsigned long ___address = __address; \
318 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
319 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
320 ___address, \
321 ___address + \
322 PMD_SIZE); \
323 __young; \
324 })
325
326 #define ptep_clear_young_notify(__vma, __address, __ptep) \
327 ({ \
328 int __young; \
329 struct vm_area_struct *___vma = __vma; \
330 unsigned long ___address = __address; \
331 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
332 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
333 ___address + PAGE_SIZE); \
334 __young; \
335 })
336
337 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
338 ({ \
339 int __young; \
340 struct vm_area_struct *___vma = __vma; \
341 unsigned long ___address = __address; \
342 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
343 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
344 ___address + PMD_SIZE); \
345 __young; \
346 })
347
348 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
349 ({ \
350 unsigned long ___addr = __address & PAGE_MASK; \
351 struct mm_struct *___mm = (__vma)->vm_mm; \
352 pte_t ___pte; \
353 \
354 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
355 mmu_notifier_invalidate_range(___mm, ___addr, \
356 ___addr + PAGE_SIZE); \
357 \
358 ___pte; \
359 })
360
361 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
362 ({ \
363 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
364 struct mm_struct *___mm = (__vma)->vm_mm; \
365 pmd_t ___pmd; \
366 \
367 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
368 mmu_notifier_invalidate_range(___mm, ___haddr, \
369 ___haddr + HPAGE_PMD_SIZE); \
370 \
371 ___pmd; \
372 })
373
374 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
375 ({ \
376 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
377 struct mm_struct *___mm = (__vma)->vm_mm; \
378 pud_t ___pud; \
379 \
380 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
381 mmu_notifier_invalidate_range(___mm, ___haddr, \
382 ___haddr + HPAGE_PUD_SIZE); \
383 \
384 ___pud; \
385 })
386
387 /*
388 * set_pte_at_notify() sets the pte _after_ running the notifier.
389 * This is safe to start by updating the secondary MMUs, because the primary MMU
390 * pte invalidate must have already happened with a ptep_clear_flush() before
391 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
392 * required when we change both the protection of the mapping from read-only to
393 * read-write and the pfn (like during copy on write page faults). Otherwise the
394 * old page would remain mapped readonly in the secondary MMUs after the new
395 * page is already writable by some CPU through the primary MMU.
396 */
397 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
398 ({ \
399 struct mm_struct *___mm = __mm; \
400 unsigned long ___address = __address; \
401 pte_t ___pte = __pte; \
402 \
403 mmu_notifier_change_pte(___mm, ___address, ___pte); \
404 set_pte_at(___mm, ___address, __ptep, ___pte); \
405 })
406
407 extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
408 void (*func)(struct rcu_head *rcu));
409 extern void mmu_notifier_synchronize(void);
410
411 #else /* CONFIG_MMU_NOTIFIER */
412
413 static inline int mm_has_notifiers(struct mm_struct *mm)
414 {
415 return 0;
416 }
417
418 static inline void mmu_notifier_release(struct mm_struct *mm)
419 {
420 }
421
422 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
423 unsigned long start,
424 unsigned long end)
425 {
426 return 0;
427 }
428
429 static inline int mmu_notifier_test_young(struct mm_struct *mm,
430 unsigned long address)
431 {
432 return 0;
433 }
434
435 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
436 unsigned long address, pte_t pte)
437 {
438 }
439
440 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
441 unsigned long start, unsigned long end)
442 {
443 }
444
445 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
446 unsigned long start, unsigned long end)
447 {
448 }
449
450 static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
451 unsigned long start, unsigned long end)
452 {
453 }
454
455 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
456 unsigned long start, unsigned long end)
457 {
458 }
459
460 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
461 {
462 }
463
464 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
465 {
466 }
467
468 #define ptep_clear_flush_young_notify ptep_clear_flush_young
469 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
470 #define ptep_clear_young_notify ptep_test_and_clear_young
471 #define pmdp_clear_young_notify pmdp_test_and_clear_young
472 #define ptep_clear_flush_notify ptep_clear_flush
473 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
474 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
475 #define set_pte_at_notify set_pte_at
476
477 #endif /* CONFIG_MMU_NOTIFIER */
478
479 #endif /* _LINUX_MMU_NOTIFIER_H */