1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON Primitives for Virtual Address Spaces
5 * Author: SeongJae Park <sjpark@amazon.de>
8 #define pr_fmt(fmt) "damon-va: " fmt
10 #include <linux/damon.h>
11 #include <linux/hugetlb.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/highmem.h>
15 #include <linux/page_idle.h>
16 #include <linux/pagewalk.h>
17 #include <linux/random.h>
18 #include <linux/sched/mm.h>
19 #include <linux/slab.h>
21 #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
22 #undef DAMON_MIN_REGION
23 #define DAMON_MIN_REGION 1
26 /* Get a random number in [l, r) */
27 #define damon_rand(l, r) (l + prandom_u32_max(r - l))
30 * 't->id' should be the pointer to the relevant 'struct pid' having reference
31 * count. Caller must put the returned task, unless it is NULL.
33 #define damon_get_task_struct(t) \
34 (get_pid_task((struct pid *)t->id, PIDTYPE_PID))
37 * Get the mm_struct of the given target
39 * Caller _must_ put the mm_struct after use, unless it is NULL.
41 * Returns the mm_struct of the target on success, NULL on failure
43 static struct mm_struct
*damon_get_mm(struct damon_target
*t
)
45 struct task_struct
*task
;
48 task
= damon_get_task_struct(t
);
52 mm
= get_task_mm(task
);
53 put_task_struct(task
);
58 * Functions for the initial monitoring target regions construction
62 * Size-evenly split a region into 'nr_pieces' small regions
64 * Returns 0 on success, or negative error code otherwise.
66 static int damon_va_evenly_split_region(struct damon_target
*t
,
67 struct damon_region
*r
, unsigned int nr_pieces
)
69 unsigned long sz_orig
, sz_piece
, orig_end
;
70 struct damon_region
*n
= NULL
, *next
;
77 sz_orig
= r
->ar
.end
- r
->ar
.start
;
78 sz_piece
= ALIGN_DOWN(sz_orig
/ nr_pieces
, DAMON_MIN_REGION
);
83 r
->ar
.end
= r
->ar
.start
+ sz_piece
;
84 next
= damon_next_region(r
);
85 for (start
= r
->ar
.end
; start
+ sz_piece
<= orig_end
;
87 n
= damon_new_region(start
, start
+ sz_piece
);
90 damon_insert_region(n
, r
, next
, t
);
93 /* complement last region for possible rounding error */
100 static unsigned long sz_range(struct damon_addr_range
*r
)
102 return r
->end
- r
->start
;
105 static void swap_ranges(struct damon_addr_range
*r1
,
106 struct damon_addr_range
*r2
)
108 struct damon_addr_range tmp
;
116 * Find three regions separated by two biggest unmapped regions
118 * vma the head vma of the target address space
119 * regions an array of three address ranges that results will be saved
121 * This function receives an address space and finds three regions in it which
122 * separated by the two biggest unmapped regions in the space. Please refer to
123 * below comments of '__damon_va_init_regions()' function to know why this is
126 * Returns 0 if success, or negative error code otherwise.
128 static int __damon_va_three_regions(struct vm_area_struct
*vma
,
129 struct damon_addr_range regions
[3])
131 struct damon_addr_range gap
= {0}, first_gap
= {0}, second_gap
= {0};
132 struct vm_area_struct
*last_vma
= NULL
;
133 unsigned long start
= 0;
134 struct rb_root rbroot
;
136 /* Find two biggest gaps so that first_gap > second_gap > others */
137 for (; vma
; vma
= vma
->vm_next
) {
139 start
= vma
->vm_start
;
143 if (vma
->rb_subtree_gap
<= sz_range(&second_gap
)) {
144 rbroot
.rb_node
= &vma
->vm_rb
;
145 vma
= rb_entry(rb_last(&rbroot
),
146 struct vm_area_struct
, vm_rb
);
150 gap
.start
= last_vma
->vm_end
;
151 gap
.end
= vma
->vm_start
;
152 if (sz_range(&gap
) > sz_range(&second_gap
)) {
153 swap_ranges(&gap
, &second_gap
);
154 if (sz_range(&second_gap
) > sz_range(&first_gap
))
155 swap_ranges(&second_gap
, &first_gap
);
161 if (!sz_range(&second_gap
) || !sz_range(&first_gap
))
164 /* Sort the two biggest gaps by address */
165 if (first_gap
.start
> second_gap
.start
)
166 swap_ranges(&first_gap
, &second_gap
);
168 /* Store the result */
169 regions
[0].start
= ALIGN(start
, DAMON_MIN_REGION
);
170 regions
[0].end
= ALIGN(first_gap
.start
, DAMON_MIN_REGION
);
171 regions
[1].start
= ALIGN(first_gap
.end
, DAMON_MIN_REGION
);
172 regions
[1].end
= ALIGN(second_gap
.start
, DAMON_MIN_REGION
);
173 regions
[2].start
= ALIGN(second_gap
.end
, DAMON_MIN_REGION
);
174 regions
[2].end
= ALIGN(last_vma
->vm_end
, DAMON_MIN_REGION
);
180 * Get the three regions in the given target (task)
182 * Returns 0 on success, negative error code otherwise.
184 static int damon_va_three_regions(struct damon_target
*t
,
185 struct damon_addr_range regions
[3])
187 struct mm_struct
*mm
;
190 mm
= damon_get_mm(t
);
195 rc
= __damon_va_three_regions(mm
->mmap
, regions
);
196 mmap_read_unlock(mm
);
203 * Initialize the monitoring target regions for the given target (task)
207 * Because only a number of small portions of the entire address space
208 * is actually mapped to the memory and accessed, monitoring the unmapped
209 * regions is wasteful. That said, because we can deal with small noises,
210 * tracking every mapping is not strictly required but could even incur a high
211 * overhead if the mapping frequently changes or the number of mappings is
212 * high. The adaptive regions adjustment mechanism will further help to deal
213 * with the noise by simply identifying the unmapped areas as a region that
214 * has no access. Moreover, applying the real mappings that would have many
215 * unmapped areas inside will make the adaptive mechanism quite complex. That
216 * said, too huge unmapped areas inside the monitoring target should be removed
217 * to not take the time for the adaptive mechanism.
219 * For the reason, we convert the complex mappings to three distinct regions
220 * that cover every mapped area of the address space. Also the two gaps
221 * between the three regions are the two biggest unmapped areas in the given
222 * address space. In detail, this function first identifies the start and the
223 * end of the mappings and the two biggest unmapped areas of the address space.
224 * Then, it constructs the three regions as below:
226 * [mappings[0]->start, big_two_unmapped_areas[0]->start)
227 * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
228 * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
230 * As usual memory map of processes is as below, the gap between the heap and
231 * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
232 * region and the stack will be two biggest unmapped regions. Because these
233 * gaps are exceptionally huge areas in usual address space, excluding these
234 * two biggest unmapped regions will be sufficient to make a trade-off.
237 * <BIG UNMAPPED REGION 1>
238 * <uppermost mmap()-ed region>
239 * (other mmap()-ed regions and small unmapped regions)
240 * <lowermost mmap()-ed region>
241 * <BIG UNMAPPED REGION 2>
244 static void __damon_va_init_regions(struct damon_ctx
*ctx
,
245 struct damon_target
*t
)
247 struct damon_region
*r
;
248 struct damon_addr_range regions
[3];
249 unsigned long sz
= 0, nr_pieces
;
252 if (damon_va_three_regions(t
, regions
)) {
253 pr_err("Failed to get three regions of target %lu\n", t
->id
);
257 for (i
= 0; i
< 3; i
++)
258 sz
+= regions
[i
].end
- regions
[i
].start
;
259 if (ctx
->min_nr_regions
)
260 sz
/= ctx
->min_nr_regions
;
261 if (sz
< DAMON_MIN_REGION
)
262 sz
= DAMON_MIN_REGION
;
264 /* Set the initial three regions of the target */
265 for (i
= 0; i
< 3; i
++) {
266 r
= damon_new_region(regions
[i
].start
, regions
[i
].end
);
268 pr_err("%d'th init region creation failed\n", i
);
271 damon_add_region(r
, t
);
273 nr_pieces
= (regions
[i
].end
- regions
[i
].start
) / sz
;
274 damon_va_evenly_split_region(t
, r
, nr_pieces
);
278 /* Initialize '->regions_list' of every target (task) */
279 void damon_va_init(struct damon_ctx
*ctx
)
281 struct damon_target
*t
;
283 damon_for_each_target(t
, ctx
) {
284 /* the user may set the target regions as they want */
285 if (!damon_nr_regions(t
))
286 __damon_va_init_regions(ctx
, t
);
291 * Functions for the dynamic monitoring target regions update
295 * Check whether a region is intersecting an address range
297 * Returns true if it is.
299 static bool damon_intersect(struct damon_region
*r
, struct damon_addr_range
*re
)
301 return !(r
->ar
.end
<= re
->start
|| re
->end
<= r
->ar
.start
);
305 * Update damon regions for the three big regions of the given target
308 * bregions the three big regions of the target
310 static void damon_va_apply_three_regions(struct damon_target
*t
,
311 struct damon_addr_range bregions
[3])
313 struct damon_region
*r
, *next
;
316 /* Remove regions which are not in the three big regions now */
317 damon_for_each_region_safe(r
, next
, t
) {
318 for (i
= 0; i
< 3; i
++) {
319 if (damon_intersect(r
, &bregions
[i
]))
323 damon_destroy_region(r
, t
);
326 /* Adjust intersecting regions to fit with the three big regions */
327 for (i
= 0; i
< 3; i
++) {
328 struct damon_region
*first
= NULL
, *last
;
329 struct damon_region
*newr
;
330 struct damon_addr_range
*br
;
333 /* Get the first and last regions which intersects with br */
334 damon_for_each_region(r
, t
) {
335 if (damon_intersect(r
, br
)) {
340 if (r
->ar
.start
>= br
->end
)
344 /* no damon_region intersects with this big region */
345 newr
= damon_new_region(
346 ALIGN_DOWN(br
->start
,
348 ALIGN(br
->end
, DAMON_MIN_REGION
));
351 damon_insert_region(newr
, damon_prev_region(r
), r
, t
);
353 first
->ar
.start
= ALIGN_DOWN(br
->start
,
355 last
->ar
.end
= ALIGN(br
->end
, DAMON_MIN_REGION
);
361 * Update regions for current memory mappings
363 void damon_va_update(struct damon_ctx
*ctx
)
365 struct damon_addr_range three_regions
[3];
366 struct damon_target
*t
;
368 damon_for_each_target(t
, ctx
) {
369 if (damon_va_three_regions(t
, three_regions
))
371 damon_va_apply_three_regions(t
, three_regions
);
376 * Get an online page for a pfn if it's in the LRU list. Otherwise, returns
379 * The body of this function is stolen from the 'page_idle_get_page()'. We
380 * steal rather than reuse it because the code is quite simple.
382 static struct page
*damon_get_page(unsigned long pfn
)
384 struct page
*page
= pfn_to_online_page(pfn
);
386 if (!page
|| !PageLRU(page
) || !get_page_unless_zero(page
))
389 if (unlikely(!PageLRU(page
))) {
396 static void damon_ptep_mkold(pte_t
*pte
, struct mm_struct
*mm
,
399 bool referenced
= false;
400 struct page
*page
= damon_get_page(pte_pfn(*pte
));
405 if (pte_young(*pte
)) {
407 *pte
= pte_mkold(*pte
);
410 #ifdef CONFIG_MMU_NOTIFIER
411 if (mmu_notifier_clear_young(mm
, addr
, addr
+ PAGE_SIZE
))
413 #endif /* CONFIG_MMU_NOTIFIER */
416 set_page_young(page
);
422 static void damon_pmdp_mkold(pmd_t
*pmd
, struct mm_struct
*mm
,
425 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
426 bool referenced
= false;
427 struct page
*page
= damon_get_page(pmd_pfn(*pmd
));
432 if (pmd_young(*pmd
)) {
434 *pmd
= pmd_mkold(*pmd
);
437 #ifdef CONFIG_MMU_NOTIFIER
438 if (mmu_notifier_clear_young(mm
, addr
,
439 addr
+ ((1UL) << HPAGE_PMD_SHIFT
)))
441 #endif /* CONFIG_MMU_NOTIFIER */
444 set_page_young(page
);
448 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
451 static int damon_mkold_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
452 unsigned long next
, struct mm_walk
*walk
)
457 if (pmd_huge(*pmd
)) {
458 ptl
= pmd_lock(walk
->mm
, pmd
);
459 if (!pmd_present(*pmd
)) {
464 if (pmd_huge(*pmd
)) {
465 damon_pmdp_mkold(pmd
, walk
->mm
, addr
);
472 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
474 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
475 if (!pte_present(*pte
))
477 damon_ptep_mkold(pte
, walk
->mm
, addr
);
479 pte_unmap_unlock(pte
, ptl
);
483 static struct mm_walk_ops damon_mkold_ops
= {
484 .pmd_entry
= damon_mkold_pmd_entry
,
487 static void damon_va_mkold(struct mm_struct
*mm
, unsigned long addr
)
490 walk_page_range(mm
, addr
, addr
+ 1, &damon_mkold_ops
, NULL
);
491 mmap_read_unlock(mm
);
495 * Functions for the access checking of the regions
498 static void damon_va_prepare_access_check(struct damon_ctx
*ctx
,
499 struct mm_struct
*mm
, struct damon_region
*r
)
501 r
->sampling_addr
= damon_rand(r
->ar
.start
, r
->ar
.end
);
503 damon_va_mkold(mm
, r
->sampling_addr
);
506 void damon_va_prepare_access_checks(struct damon_ctx
*ctx
)
508 struct damon_target
*t
;
509 struct mm_struct
*mm
;
510 struct damon_region
*r
;
512 damon_for_each_target(t
, ctx
) {
513 mm
= damon_get_mm(t
);
516 damon_for_each_region(r
, t
)
517 damon_va_prepare_access_check(ctx
, mm
, r
);
522 struct damon_young_walk_private
{
523 unsigned long *page_sz
;
527 static int damon_young_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
528 unsigned long next
, struct mm_walk
*walk
)
533 struct damon_young_walk_private
*priv
= walk
->private;
535 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
536 if (pmd_huge(*pmd
)) {
537 ptl
= pmd_lock(walk
->mm
, pmd
);
538 if (!pmd_present(*pmd
)) {
543 if (!pmd_huge(*pmd
)) {
547 page
= damon_get_page(pmd_pfn(*pmd
));
550 if (pmd_young(*pmd
) || !page_is_idle(page
) ||
551 mmu_notifier_test_young(walk
->mm
,
553 *priv
->page_sz
= ((1UL) << HPAGE_PMD_SHIFT
);
563 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
565 if (pmd_none(*pmd
) || unlikely(pmd_bad(*pmd
)))
567 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
568 if (!pte_present(*pte
))
570 page
= damon_get_page(pte_pfn(*pte
));
573 if (pte_young(*pte
) || !page_is_idle(page
) ||
574 mmu_notifier_test_young(walk
->mm
, addr
)) {
575 *priv
->page_sz
= PAGE_SIZE
;
580 pte_unmap_unlock(pte
, ptl
);
584 static struct mm_walk_ops damon_young_ops
= {
585 .pmd_entry
= damon_young_pmd_entry
,
588 static bool damon_va_young(struct mm_struct
*mm
, unsigned long addr
,
589 unsigned long *page_sz
)
591 struct damon_young_walk_private arg
= {
597 walk_page_range(mm
, addr
, addr
+ 1, &damon_young_ops
, &arg
);
598 mmap_read_unlock(mm
);
603 * Check whether the region was accessed after the last preparation
605 * mm 'mm_struct' for the given virtual address space
606 * r the region to be checked
608 static void damon_va_check_access(struct damon_ctx
*ctx
,
609 struct mm_struct
*mm
, struct damon_region
*r
)
611 static struct mm_struct
*last_mm
;
612 static unsigned long last_addr
;
613 static unsigned long last_page_sz
= PAGE_SIZE
;
614 static bool last_accessed
;
616 /* If the region is in the last checked page, reuse the result */
617 if (mm
== last_mm
&& (ALIGN_DOWN(last_addr
, last_page_sz
) ==
618 ALIGN_DOWN(r
->sampling_addr
, last_page_sz
))) {
624 last_accessed
= damon_va_young(mm
, r
->sampling_addr
, &last_page_sz
);
629 last_addr
= r
->sampling_addr
;
632 unsigned int damon_va_check_accesses(struct damon_ctx
*ctx
)
634 struct damon_target
*t
;
635 struct mm_struct
*mm
;
636 struct damon_region
*r
;
637 unsigned int max_nr_accesses
= 0;
639 damon_for_each_target(t
, ctx
) {
640 mm
= damon_get_mm(t
);
643 damon_for_each_region(r
, t
) {
644 damon_va_check_access(ctx
, mm
, r
);
645 max_nr_accesses
= max(r
->nr_accesses
, max_nr_accesses
);
650 return max_nr_accesses
;
654 * Functions for the target validity check and cleanup
657 bool damon_va_target_valid(void *target
)
659 struct damon_target
*t
= target
;
660 struct task_struct
*task
;
662 task
= damon_get_task_struct(t
);
664 put_task_struct(task
);
671 void damon_va_set_primitives(struct damon_ctx
*ctx
)
673 ctx
->primitive
.init
= damon_va_init
;
674 ctx
->primitive
.update
= damon_va_update
;
675 ctx
->primitive
.prepare_access_checks
= damon_va_prepare_access_checks
;
676 ctx
->primitive
.check_accesses
= damon_va_check_accesses
;
677 ctx
->primitive
.reset_aggregated
= NULL
;
678 ctx
->primitive
.target_valid
= damon_va_target_valid
;
679 ctx
->primitive
.cleanup
= NULL
;
682 #include "vaddr-test.h"