1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/vmacache.h>
4 #include <linux/hugetlb.h>
5 #include <linux/huge_mm.h>
6 #include <linux/mount.h>
7 #include <linux/seq_file.h>
8 #include <linux/highmem.h>
9 #include <linux/ptrace.h>
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mempolicy.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/sched/mm.h>
16 #include <linux/swapops.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/page_idle.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/uaccess.h>
24 #include <asm/tlbflush.h>
27 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
29 unsigned long text
, lib
, swap
, anon
, file
, shmem
;
30 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
32 anon
= get_mm_counter(mm
, MM_ANONPAGES
);
33 file
= get_mm_counter(mm
, MM_FILEPAGES
);
34 shmem
= get_mm_counter(mm
, MM_SHMEMPAGES
);
37 * Note: to minimize their overhead, mm maintains hiwater_vm and
38 * hiwater_rss only when about to *lower* total_vm or rss. Any
39 * collector of these hiwater stats must therefore get total_vm
40 * and rss too, which will usually be the higher. Barriers? not
41 * worth the effort, such snapshots can always be inconsistent.
43 hiwater_vm
= total_vm
= mm
->total_vm
;
44 if (hiwater_vm
< mm
->hiwater_vm
)
45 hiwater_vm
= mm
->hiwater_vm
;
46 hiwater_rss
= total_rss
= anon
+ file
+ shmem
;
47 if (hiwater_rss
< mm
->hiwater_rss
)
48 hiwater_rss
= mm
->hiwater_rss
;
50 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
51 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
52 swap
= get_mm_counter(mm
, MM_SWAPENTS
);
62 "RssShmem:\t%8lu kB\n"
69 hiwater_vm
<< (PAGE_SHIFT
-10),
70 total_vm
<< (PAGE_SHIFT
-10),
71 mm
->locked_vm
<< (PAGE_SHIFT
-10),
72 mm
->pinned_vm
<< (PAGE_SHIFT
-10),
73 hiwater_rss
<< (PAGE_SHIFT
-10),
74 total_rss
<< (PAGE_SHIFT
-10),
75 anon
<< (PAGE_SHIFT
-10),
76 file
<< (PAGE_SHIFT
-10),
77 shmem
<< (PAGE_SHIFT
-10),
78 mm
->data_vm
<< (PAGE_SHIFT
-10),
79 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
80 mm_pgtables_bytes(mm
) >> 10,
81 swap
<< (PAGE_SHIFT
-10));
82 hugetlb_report_usage(m
, mm
);
85 unsigned long task_vsize(struct mm_struct
*mm
)
87 return PAGE_SIZE
* mm
->total_vm
;
90 unsigned long task_statm(struct mm_struct
*mm
,
91 unsigned long *shared
, unsigned long *text
,
92 unsigned long *data
, unsigned long *resident
)
94 *shared
= get_mm_counter(mm
, MM_FILEPAGES
) +
95 get_mm_counter(mm
, MM_SHMEMPAGES
);
96 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
98 *data
= mm
->data_vm
+ mm
->stack_vm
;
99 *resident
= *shared
+ get_mm_counter(mm
, MM_ANONPAGES
);
105 * Save get_task_policy() for show_numa_map().
107 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
109 struct task_struct
*task
= priv
->task
;
112 priv
->task_mempolicy
= get_task_policy(task
);
113 mpol_get(priv
->task_mempolicy
);
116 static void release_task_mempolicy(struct proc_maps_private
*priv
)
118 mpol_put(priv
->task_mempolicy
);
121 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
124 static void release_task_mempolicy(struct proc_maps_private
*priv
)
129 static void vma_stop(struct proc_maps_private
*priv
)
131 struct mm_struct
*mm
= priv
->mm
;
133 release_task_mempolicy(priv
);
134 up_read(&mm
->mmap_sem
);
138 static struct vm_area_struct
*
139 m_next_vma(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
141 if (vma
== priv
->tail_vma
)
143 return vma
->vm_next
?: priv
->tail_vma
;
146 static void m_cache_vma(struct seq_file
*m
, struct vm_area_struct
*vma
)
148 if (m
->count
< m
->size
) /* vma is copied successfully */
149 m
->version
= m_next_vma(m
->private, vma
) ? vma
->vm_end
: -1UL;
152 static void *m_start(struct seq_file
*m
, loff_t
*ppos
)
154 struct proc_maps_private
*priv
= m
->private;
155 unsigned long last_addr
= m
->version
;
156 struct mm_struct
*mm
;
157 struct vm_area_struct
*vma
;
158 unsigned int pos
= *ppos
;
160 /* See m_cache_vma(). Zero at the start or after lseek. */
161 if (last_addr
== -1UL)
164 priv
->task
= get_proc_task(priv
->inode
);
166 return ERR_PTR(-ESRCH
);
169 if (!mm
|| !mmget_not_zero(mm
))
172 down_read(&mm
->mmap_sem
);
173 hold_task_mempolicy(priv
);
174 priv
->tail_vma
= get_gate_vma(mm
);
177 vma
= find_vma(mm
, last_addr
- 1);
178 if (vma
&& vma
->vm_start
<= last_addr
)
179 vma
= m_next_vma(priv
, vma
);
185 if (pos
< mm
->map_count
) {
186 for (vma
= mm
->mmap
; pos
; pos
--) {
187 m
->version
= vma
->vm_start
;
193 /* we do not bother to update m->version in this case */
194 if (pos
== mm
->map_count
&& priv
->tail_vma
)
195 return priv
->tail_vma
;
201 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
203 struct proc_maps_private
*priv
= m
->private;
204 struct vm_area_struct
*next
;
207 next
= m_next_vma(priv
, v
);
213 static void m_stop(struct seq_file
*m
, void *v
)
215 struct proc_maps_private
*priv
= m
->private;
217 if (!IS_ERR_OR_NULL(v
))
220 put_task_struct(priv
->task
);
225 static int proc_maps_open(struct inode
*inode
, struct file
*file
,
226 const struct seq_operations
*ops
, int psize
)
228 struct proc_maps_private
*priv
= __seq_open_private(file
, ops
, psize
);
234 priv
->mm
= proc_mem_open(inode
, PTRACE_MODE_READ
);
235 if (IS_ERR(priv
->mm
)) {
236 int err
= PTR_ERR(priv
->mm
);
238 seq_release_private(inode
, file
);
245 static int proc_map_release(struct inode
*inode
, struct file
*file
)
247 struct seq_file
*seq
= file
->private_data
;
248 struct proc_maps_private
*priv
= seq
->private;
254 return seq_release_private(inode
, file
);
257 static int do_maps_open(struct inode
*inode
, struct file
*file
,
258 const struct seq_operations
*ops
)
260 return proc_maps_open(inode
, file
, ops
,
261 sizeof(struct proc_maps_private
));
265 * Indicate if the VMA is a stack for the given task; for
266 * /proc/PID/maps that is the stack of the main task.
268 static int is_stack(struct vm_area_struct
*vma
)
271 * We make no effort to guess what a given thread considers to be
272 * its "stack". It's not even well-defined for programs written
275 return vma
->vm_start
<= vma
->vm_mm
->start_stack
&&
276 vma
->vm_end
>= vma
->vm_mm
->start_stack
;
279 static void show_vma_header_prefix(struct seq_file
*m
,
280 unsigned long start
, unsigned long end
,
281 vm_flags_t flags
, unsigned long long pgoff
,
282 dev_t dev
, unsigned long ino
)
284 seq_setwidth(m
, 25 + sizeof(void *) * 6 - 1);
285 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
288 flags
& VM_READ
? 'r' : '-',
289 flags
& VM_WRITE
? 'w' : '-',
290 flags
& VM_EXEC
? 'x' : '-',
291 flags
& VM_MAYSHARE
? 's' : 'p',
293 MAJOR(dev
), MINOR(dev
), ino
);
297 show_map_vma(struct seq_file
*m
, struct vm_area_struct
*vma
, int is_pid
)
299 struct mm_struct
*mm
= vma
->vm_mm
;
300 struct file
*file
= vma
->vm_file
;
301 vm_flags_t flags
= vma
->vm_flags
;
302 unsigned long ino
= 0;
303 unsigned long long pgoff
= 0;
304 unsigned long start
, end
;
306 const char *name
= NULL
;
309 struct inode
*inode
= file_inode(vma
->vm_file
);
310 dev
= inode
->i_sb
->s_dev
;
312 pgoff
= ((loff_t
)vma
->vm_pgoff
) << PAGE_SHIFT
;
315 start
= vma
->vm_start
;
317 show_vma_header_prefix(m
, start
, end
, flags
, pgoff
, dev
, ino
);
320 * Print the dentry name for named mappings, and a
321 * special [heap] marker for the heap:
325 seq_file_path(m
, file
, "\n");
329 if (vma
->vm_ops
&& vma
->vm_ops
->name
) {
330 name
= vma
->vm_ops
->name(vma
);
335 name
= arch_vma_name(vma
);
342 if (vma
->vm_start
<= mm
->brk
&&
343 vma
->vm_end
>= mm
->start_brk
) {
360 static int show_map(struct seq_file
*m
, void *v
, int is_pid
)
362 show_map_vma(m
, v
, is_pid
);
367 static int show_pid_map(struct seq_file
*m
, void *v
)
369 return show_map(m
, v
, 1);
372 static int show_tid_map(struct seq_file
*m
, void *v
)
374 return show_map(m
, v
, 0);
377 static const struct seq_operations proc_pid_maps_op
= {
384 static const struct seq_operations proc_tid_maps_op
= {
391 static int pid_maps_open(struct inode
*inode
, struct file
*file
)
393 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
396 static int tid_maps_open(struct inode
*inode
, struct file
*file
)
398 return do_maps_open(inode
, file
, &proc_tid_maps_op
);
401 const struct file_operations proc_pid_maps_operations
= {
402 .open
= pid_maps_open
,
405 .release
= proc_map_release
,
408 const struct file_operations proc_tid_maps_operations
= {
409 .open
= tid_maps_open
,
412 .release
= proc_map_release
,
416 * Proportional Set Size(PSS): my share of RSS.
418 * PSS of a process is the count of pages it has in memory, where each
419 * page is divided by the number of processes sharing it. So if a
420 * process has 1000 pages all to itself, and 1000 shared with one other
421 * process, its PSS will be 1500.
423 * To keep (accumulated) division errors low, we adopt a 64bit
424 * fixed-point pss counter to minimize division errors. So (pss >>
425 * PSS_SHIFT) would be the real byte count.
427 * A shift of 12 before division means (assuming 4K page size):
428 * - 1M 3-user-pages add up to 8KB errors;
429 * - supports mapcount up to 2^24, or 16M;
430 * - supports PSS up to 2^52 bytes, or 4PB.
434 #ifdef CONFIG_PROC_PAGE_MONITOR
435 struct mem_size_stats
{
437 unsigned long resident
;
438 unsigned long shared_clean
;
439 unsigned long shared_dirty
;
440 unsigned long private_clean
;
441 unsigned long private_dirty
;
442 unsigned long referenced
;
443 unsigned long anonymous
;
444 unsigned long lazyfree
;
445 unsigned long anonymous_thp
;
446 unsigned long shmem_thp
;
448 unsigned long shared_hugetlb
;
449 unsigned long private_hugetlb
;
450 unsigned long first_vma_start
;
454 bool check_shmem_swap
;
457 static void smaps_account(struct mem_size_stats
*mss
, struct page
*page
,
458 bool compound
, bool young
, bool dirty
)
460 int i
, nr
= compound
? 1 << compound_order(page
) : 1;
461 unsigned long size
= nr
* PAGE_SIZE
;
463 if (PageAnon(page
)) {
464 mss
->anonymous
+= size
;
465 if (!PageSwapBacked(page
) && !dirty
&& !PageDirty(page
))
466 mss
->lazyfree
+= size
;
469 mss
->resident
+= size
;
470 /* Accumulate the size in pages that have been accessed. */
471 if (young
|| page_is_young(page
) || PageReferenced(page
))
472 mss
->referenced
+= size
;
475 * page_count(page) == 1 guarantees the page is mapped exactly once.
476 * If any subpage of the compound page mapped with PTE it would elevate
479 if (page_count(page
) == 1) {
480 if (dirty
|| PageDirty(page
))
481 mss
->private_dirty
+= size
;
483 mss
->private_clean
+= size
;
484 mss
->pss
+= (u64
)size
<< PSS_SHIFT
;
488 for (i
= 0; i
< nr
; i
++, page
++) {
489 int mapcount
= page_mapcount(page
);
492 if (dirty
|| PageDirty(page
))
493 mss
->shared_dirty
+= PAGE_SIZE
;
495 mss
->shared_clean
+= PAGE_SIZE
;
496 mss
->pss
+= (PAGE_SIZE
<< PSS_SHIFT
) / mapcount
;
498 if (dirty
|| PageDirty(page
))
499 mss
->private_dirty
+= PAGE_SIZE
;
501 mss
->private_clean
+= PAGE_SIZE
;
502 mss
->pss
+= PAGE_SIZE
<< PSS_SHIFT
;
508 static int smaps_pte_hole(unsigned long addr
, unsigned long end
,
509 struct mm_walk
*walk
)
511 struct mem_size_stats
*mss
= walk
->private;
513 mss
->swap
+= shmem_partial_swap_usage(
514 walk
->vma
->vm_file
->f_mapping
, addr
, end
);
520 static void smaps_pte_entry(pte_t
*pte
, unsigned long addr
,
521 struct mm_walk
*walk
)
523 struct mem_size_stats
*mss
= walk
->private;
524 struct vm_area_struct
*vma
= walk
->vma
;
525 struct page
*page
= NULL
;
527 if (pte_present(*pte
)) {
528 page
= vm_normal_page(vma
, addr
, *pte
);
529 } else if (is_swap_pte(*pte
)) {
530 swp_entry_t swpent
= pte_to_swp_entry(*pte
);
532 if (!non_swap_entry(swpent
)) {
535 mss
->swap
+= PAGE_SIZE
;
536 mapcount
= swp_swapcount(swpent
);
538 u64 pss_delta
= (u64
)PAGE_SIZE
<< PSS_SHIFT
;
540 do_div(pss_delta
, mapcount
);
541 mss
->swap_pss
+= pss_delta
;
543 mss
->swap_pss
+= (u64
)PAGE_SIZE
<< PSS_SHIFT
;
545 } else if (is_migration_entry(swpent
))
546 page
= migration_entry_to_page(swpent
);
547 else if (is_device_private_entry(swpent
))
548 page
= device_private_entry_to_page(swpent
);
549 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM
) && mss
->check_shmem_swap
550 && pte_none(*pte
))) {
551 page
= find_get_entry(vma
->vm_file
->f_mapping
,
552 linear_page_index(vma
, addr
));
556 if (radix_tree_exceptional_entry(page
))
557 mss
->swap
+= PAGE_SIZE
;
567 smaps_account(mss
, page
, false, pte_young(*pte
), pte_dirty(*pte
));
570 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
571 static void smaps_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
572 struct mm_walk
*walk
)
574 struct mem_size_stats
*mss
= walk
->private;
575 struct vm_area_struct
*vma
= walk
->vma
;
578 /* FOLL_DUMP will return -EFAULT on huge zero page */
579 page
= follow_trans_huge_pmd(vma
, addr
, pmd
, FOLL_DUMP
);
580 if (IS_ERR_OR_NULL(page
))
583 mss
->anonymous_thp
+= HPAGE_PMD_SIZE
;
584 else if (PageSwapBacked(page
))
585 mss
->shmem_thp
+= HPAGE_PMD_SIZE
;
586 else if (is_zone_device_page(page
))
589 VM_BUG_ON_PAGE(1, page
);
590 smaps_account(mss
, page
, true, pmd_young(*pmd
), pmd_dirty(*pmd
));
593 static void smaps_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
594 struct mm_walk
*walk
)
599 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
600 struct mm_walk
*walk
)
602 struct vm_area_struct
*vma
= walk
->vma
;
606 ptl
= pmd_trans_huge_lock(pmd
, vma
);
608 if (pmd_present(*pmd
))
609 smaps_pmd_entry(pmd
, addr
, walk
);
614 if (pmd_trans_unstable(pmd
))
617 * The mmap_sem held all the way back in m_start() is what
618 * keeps khugepaged out of here and from collapsing things
621 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
622 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
623 smaps_pte_entry(pte
, addr
, walk
);
624 pte_unmap_unlock(pte
- 1, ptl
);
630 static void show_smap_vma_flags(struct seq_file
*m
, struct vm_area_struct
*vma
)
633 * Don't forget to update Documentation/ on changes.
635 static const char mnemonics
[BITS_PER_LONG
][2] = {
637 * In case if we meet a flag we don't know about.
639 [0 ... (BITS_PER_LONG
-1)] = "??",
641 [ilog2(VM_READ
)] = "rd",
642 [ilog2(VM_WRITE
)] = "wr",
643 [ilog2(VM_EXEC
)] = "ex",
644 [ilog2(VM_SHARED
)] = "sh",
645 [ilog2(VM_MAYREAD
)] = "mr",
646 [ilog2(VM_MAYWRITE
)] = "mw",
647 [ilog2(VM_MAYEXEC
)] = "me",
648 [ilog2(VM_MAYSHARE
)] = "ms",
649 [ilog2(VM_GROWSDOWN
)] = "gd",
650 [ilog2(VM_PFNMAP
)] = "pf",
651 [ilog2(VM_DENYWRITE
)] = "dw",
652 #ifdef CONFIG_X86_INTEL_MPX
653 [ilog2(VM_MPX
)] = "mp",
655 [ilog2(VM_LOCKED
)] = "lo",
656 [ilog2(VM_IO
)] = "io",
657 [ilog2(VM_SEQ_READ
)] = "sr",
658 [ilog2(VM_RAND_READ
)] = "rr",
659 [ilog2(VM_DONTCOPY
)] = "dc",
660 [ilog2(VM_DONTEXPAND
)] = "de",
661 [ilog2(VM_ACCOUNT
)] = "ac",
662 [ilog2(VM_NORESERVE
)] = "nr",
663 [ilog2(VM_HUGETLB
)] = "ht",
664 [ilog2(VM_SYNC
)] = "sf",
665 [ilog2(VM_ARCH_1
)] = "ar",
666 [ilog2(VM_WIPEONFORK
)] = "wf",
667 [ilog2(VM_DONTDUMP
)] = "dd",
668 #ifdef CONFIG_MEM_SOFT_DIRTY
669 [ilog2(VM_SOFTDIRTY
)] = "sd",
671 [ilog2(VM_MIXEDMAP
)] = "mm",
672 [ilog2(VM_HUGEPAGE
)] = "hg",
673 [ilog2(VM_NOHUGEPAGE
)] = "nh",
674 [ilog2(VM_MERGEABLE
)] = "mg",
675 [ilog2(VM_UFFD_MISSING
)]= "um",
676 [ilog2(VM_UFFD_WP
)] = "uw",
677 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
678 /* These come out via ProtectionKey: */
679 [ilog2(VM_PKEY_BIT0
)] = "",
680 [ilog2(VM_PKEY_BIT1
)] = "",
681 [ilog2(VM_PKEY_BIT2
)] = "",
682 [ilog2(VM_PKEY_BIT3
)] = "",
687 seq_puts(m
, "VmFlags: ");
688 for (i
= 0; i
< BITS_PER_LONG
; i
++) {
689 if (!mnemonics
[i
][0])
691 if (vma
->vm_flags
& (1UL << i
)) {
692 seq_printf(m
, "%c%c ",
693 mnemonics
[i
][0], mnemonics
[i
][1]);
699 #ifdef CONFIG_HUGETLB_PAGE
700 static int smaps_hugetlb_range(pte_t
*pte
, unsigned long hmask
,
701 unsigned long addr
, unsigned long end
,
702 struct mm_walk
*walk
)
704 struct mem_size_stats
*mss
= walk
->private;
705 struct vm_area_struct
*vma
= walk
->vma
;
706 struct page
*page
= NULL
;
708 if (pte_present(*pte
)) {
709 page
= vm_normal_page(vma
, addr
, *pte
);
710 } else if (is_swap_pte(*pte
)) {
711 swp_entry_t swpent
= pte_to_swp_entry(*pte
);
713 if (is_migration_entry(swpent
))
714 page
= migration_entry_to_page(swpent
);
715 else if (is_device_private_entry(swpent
))
716 page
= device_private_entry_to_page(swpent
);
719 int mapcount
= page_mapcount(page
);
722 mss
->shared_hugetlb
+= huge_page_size(hstate_vma(vma
));
724 mss
->private_hugetlb
+= huge_page_size(hstate_vma(vma
));
728 #endif /* HUGETLB_PAGE */
730 void __weak
arch_show_smap(struct seq_file
*m
, struct vm_area_struct
*vma
)
734 static int show_smap(struct seq_file
*m
, void *v
, int is_pid
)
736 struct proc_maps_private
*priv
= m
->private;
737 struct vm_area_struct
*vma
= v
;
738 struct mem_size_stats mss_stack
;
739 struct mem_size_stats
*mss
;
740 struct mm_walk smaps_walk
= {
741 .pmd_entry
= smaps_pte_range
,
742 #ifdef CONFIG_HUGETLB_PAGE
743 .hugetlb_entry
= smaps_hugetlb_range
,
755 mss
->first_vma_start
= vma
->vm_start
;
758 last_vma
= !m_next_vma(priv
, vma
);
761 memset(&mss_stack
, 0, sizeof(mss_stack
));
765 smaps_walk
.private = mss
;
768 if (vma
->vm_file
&& shmem_mapping(vma
->vm_file
->f_mapping
)) {
770 * For shared or readonly shmem mappings we know that all
771 * swapped out pages belong to the shmem object, and we can
772 * obtain the swap value much more efficiently. For private
773 * writable mappings, we might have COW pages that are
774 * not affected by the parent swapped out pages of the shmem
775 * object, so we have to distinguish them during the page walk.
776 * Unless we know that the shmem object (or the part mapped by
777 * our VMA) has no swapped out pages at all.
779 unsigned long shmem_swapped
= shmem_swap_usage(vma
);
781 if (!shmem_swapped
|| (vma
->vm_flags
& VM_SHARED
) ||
782 !(vma
->vm_flags
& VM_WRITE
)) {
783 mss
->swap
= shmem_swapped
;
785 mss
->check_shmem_swap
= true;
786 smaps_walk
.pte_hole
= smaps_pte_hole
;
791 /* mmap_sem is held in m_start */
792 walk_page_vma(vma
, &smaps_walk
);
793 if (vma
->vm_flags
& VM_LOCKED
)
794 mss
->pss_locked
+= mss
->pss
;
797 show_map_vma(m
, vma
, is_pid
);
798 } else if (last_vma
) {
799 show_vma_header_prefix(
800 m
, mss
->first_vma_start
, vma
->vm_end
, 0, 0, 0, 0);
802 seq_puts(m
, "[rollup]\n");
810 "KernelPageSize: %8lu kB\n"
811 "MMUPageSize: %8lu kB\n",
812 (vma
->vm_end
- vma
->vm_start
) >> 10,
813 vma_kernel_pagesize(vma
) >> 10,
814 vma_mmu_pagesize(vma
) >> 10);
817 if (!rollup_mode
|| last_vma
)
821 "Shared_Clean: %8lu kB\n"
822 "Shared_Dirty: %8lu kB\n"
823 "Private_Clean: %8lu kB\n"
824 "Private_Dirty: %8lu kB\n"
825 "Referenced: %8lu kB\n"
826 "Anonymous: %8lu kB\n"
827 "LazyFree: %8lu kB\n"
828 "AnonHugePages: %8lu kB\n"
829 "ShmemPmdMapped: %8lu kB\n"
830 "Shared_Hugetlb: %8lu kB\n"
831 "Private_Hugetlb: %7lu kB\n"
836 (unsigned long)(mss
->pss
>> (10 + PSS_SHIFT
)),
837 mss
->shared_clean
>> 10,
838 mss
->shared_dirty
>> 10,
839 mss
->private_clean
>> 10,
840 mss
->private_dirty
>> 10,
841 mss
->referenced
>> 10,
842 mss
->anonymous
>> 10,
844 mss
->anonymous_thp
>> 10,
845 mss
->shmem_thp
>> 10,
846 mss
->shared_hugetlb
>> 10,
847 mss
->private_hugetlb
>> 10,
849 (unsigned long)(mss
->swap_pss
>> (10 + PSS_SHIFT
)),
850 (unsigned long)(mss
->pss
>> (10 + PSS_SHIFT
)));
853 arch_show_smap(m
, vma
);
854 show_smap_vma_flags(m
, vma
);
860 static int show_pid_smap(struct seq_file
*m
, void *v
)
862 return show_smap(m
, v
, 1);
865 static int show_tid_smap(struct seq_file
*m
, void *v
)
867 return show_smap(m
, v
, 0);
870 static const struct seq_operations proc_pid_smaps_op
= {
874 .show
= show_pid_smap
877 static const struct seq_operations proc_tid_smaps_op
= {
881 .show
= show_tid_smap
884 static int pid_smaps_open(struct inode
*inode
, struct file
*file
)
886 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
889 static int pid_smaps_rollup_open(struct inode
*inode
, struct file
*file
)
891 struct seq_file
*seq
;
892 struct proc_maps_private
*priv
;
893 int ret
= do_maps_open(inode
, file
, &proc_pid_smaps_op
);
897 seq
= file
->private_data
;
899 priv
->rollup
= kzalloc(sizeof(*priv
->rollup
), GFP_KERNEL
);
901 proc_map_release(inode
, file
);
904 priv
->rollup
->first
= true;
908 static int tid_smaps_open(struct inode
*inode
, struct file
*file
)
910 return do_maps_open(inode
, file
, &proc_tid_smaps_op
);
913 const struct file_operations proc_pid_smaps_operations
= {
914 .open
= pid_smaps_open
,
917 .release
= proc_map_release
,
920 const struct file_operations proc_pid_smaps_rollup_operations
= {
921 .open
= pid_smaps_rollup_open
,
924 .release
= proc_map_release
,
927 const struct file_operations proc_tid_smaps_operations
= {
928 .open
= tid_smaps_open
,
931 .release
= proc_map_release
,
934 enum clear_refs_types
{
938 CLEAR_REFS_SOFT_DIRTY
,
939 CLEAR_REFS_MM_HIWATER_RSS
,
943 struct clear_refs_private
{
944 enum clear_refs_types type
;
947 #ifdef CONFIG_MEM_SOFT_DIRTY
948 static inline void clear_soft_dirty(struct vm_area_struct
*vma
,
949 unsigned long addr
, pte_t
*pte
)
952 * The soft-dirty tracker uses #PF-s to catch writes
953 * to pages, so write-protect the pte as well. See the
954 * Documentation/vm/soft-dirty.txt for full description
955 * of how soft-dirty works.
959 if (pte_present(ptent
)) {
960 ptent
= ptep_modify_prot_start(vma
->vm_mm
, addr
, pte
);
961 ptent
= pte_wrprotect(ptent
);
962 ptent
= pte_clear_soft_dirty(ptent
);
963 ptep_modify_prot_commit(vma
->vm_mm
, addr
, pte
, ptent
);
964 } else if (is_swap_pte(ptent
)) {
965 ptent
= pte_swp_clear_soft_dirty(ptent
);
966 set_pte_at(vma
->vm_mm
, addr
, pte
, ptent
);
970 static inline void clear_soft_dirty(struct vm_area_struct
*vma
,
971 unsigned long addr
, pte_t
*pte
)
976 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
977 static inline void clear_soft_dirty_pmd(struct vm_area_struct
*vma
,
978 unsigned long addr
, pmd_t
*pmdp
)
982 if (pmd_present(pmd
)) {
983 /* See comment in change_huge_pmd() */
984 pmdp_invalidate(vma
, addr
, pmdp
);
985 if (pmd_dirty(*pmdp
))
986 pmd
= pmd_mkdirty(pmd
);
987 if (pmd_young(*pmdp
))
988 pmd
= pmd_mkyoung(pmd
);
990 pmd
= pmd_wrprotect(pmd
);
991 pmd
= pmd_clear_soft_dirty(pmd
);
993 set_pmd_at(vma
->vm_mm
, addr
, pmdp
, pmd
);
994 } else if (is_migration_entry(pmd_to_swp_entry(pmd
))) {
995 pmd
= pmd_swp_clear_soft_dirty(pmd
);
996 set_pmd_at(vma
->vm_mm
, addr
, pmdp
, pmd
);
1000 static inline void clear_soft_dirty_pmd(struct vm_area_struct
*vma
,
1001 unsigned long addr
, pmd_t
*pmdp
)
1006 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
1007 unsigned long end
, struct mm_walk
*walk
)
1009 struct clear_refs_private
*cp
= walk
->private;
1010 struct vm_area_struct
*vma
= walk
->vma
;
1015 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1017 if (cp
->type
== CLEAR_REFS_SOFT_DIRTY
) {
1018 clear_soft_dirty_pmd(vma
, addr
, pmd
);
1022 if (!pmd_present(*pmd
))
1025 page
= pmd_page(*pmd
);
1027 /* Clear accessed and referenced bits. */
1028 pmdp_test_and_clear_young(vma
, addr
, pmd
);
1029 test_and_clear_page_young(page
);
1030 ClearPageReferenced(page
);
1036 if (pmd_trans_unstable(pmd
))
1039 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
1040 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
1043 if (cp
->type
== CLEAR_REFS_SOFT_DIRTY
) {
1044 clear_soft_dirty(vma
, addr
, pte
);
1048 if (!pte_present(ptent
))
1051 page
= vm_normal_page(vma
, addr
, ptent
);
1055 /* Clear accessed and referenced bits. */
1056 ptep_test_and_clear_young(vma
, addr
, pte
);
1057 test_and_clear_page_young(page
);
1058 ClearPageReferenced(page
);
1060 pte_unmap_unlock(pte
- 1, ptl
);
1065 static int clear_refs_test_walk(unsigned long start
, unsigned long end
,
1066 struct mm_walk
*walk
)
1068 struct clear_refs_private
*cp
= walk
->private;
1069 struct vm_area_struct
*vma
= walk
->vma
;
1071 if (vma
->vm_flags
& VM_PFNMAP
)
1075 * Writing 1 to /proc/pid/clear_refs affects all pages.
1076 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1077 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1078 * Writing 4 to /proc/pid/clear_refs affects all pages.
1080 if (cp
->type
== CLEAR_REFS_ANON
&& vma
->vm_file
)
1082 if (cp
->type
== CLEAR_REFS_MAPPED
&& !vma
->vm_file
)
1087 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
1088 size_t count
, loff_t
*ppos
)
1090 struct task_struct
*task
;
1091 char buffer
[PROC_NUMBUF
];
1092 struct mm_struct
*mm
;
1093 struct vm_area_struct
*vma
;
1094 enum clear_refs_types type
;
1095 struct mmu_gather tlb
;
1099 memset(buffer
, 0, sizeof(buffer
));
1100 if (count
> sizeof(buffer
) - 1)
1101 count
= sizeof(buffer
) - 1;
1102 if (copy_from_user(buffer
, buf
, count
))
1104 rv
= kstrtoint(strstrip(buffer
), 10, &itype
);
1107 type
= (enum clear_refs_types
)itype
;
1108 if (type
< CLEAR_REFS_ALL
|| type
>= CLEAR_REFS_LAST
)
1111 task
= get_proc_task(file_inode(file
));
1114 mm
= get_task_mm(task
);
1116 struct clear_refs_private cp
= {
1119 struct mm_walk clear_refs_walk
= {
1120 .pmd_entry
= clear_refs_pte_range
,
1121 .test_walk
= clear_refs_test_walk
,
1126 if (type
== CLEAR_REFS_MM_HIWATER_RSS
) {
1127 if (down_write_killable(&mm
->mmap_sem
)) {
1133 * Writing 5 to /proc/pid/clear_refs resets the peak
1134 * resident set size to this mm's current rss value.
1136 reset_mm_hiwater_rss(mm
);
1137 up_write(&mm
->mmap_sem
);
1141 down_read(&mm
->mmap_sem
);
1142 tlb_gather_mmu(&tlb
, mm
, 0, -1);
1143 if (type
== CLEAR_REFS_SOFT_DIRTY
) {
1144 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1145 if (!(vma
->vm_flags
& VM_SOFTDIRTY
))
1147 up_read(&mm
->mmap_sem
);
1148 if (down_write_killable(&mm
->mmap_sem
)) {
1152 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1153 vma
->vm_flags
&= ~VM_SOFTDIRTY
;
1154 vma_set_page_prot(vma
);
1156 downgrade_write(&mm
->mmap_sem
);
1159 mmu_notifier_invalidate_range_start(mm
, 0, -1);
1161 walk_page_range(0, mm
->highest_vm_end
, &clear_refs_walk
);
1162 if (type
== CLEAR_REFS_SOFT_DIRTY
)
1163 mmu_notifier_invalidate_range_end(mm
, 0, -1);
1164 tlb_finish_mmu(&tlb
, 0, -1);
1165 up_read(&mm
->mmap_sem
);
1169 put_task_struct(task
);
1174 const struct file_operations proc_clear_refs_operations
= {
1175 .write
= clear_refs_write
,
1176 .llseek
= noop_llseek
,
1183 struct pagemapread
{
1184 int pos
, len
; /* units: PM_ENTRY_BYTES, not bytes */
1185 pagemap_entry_t
*buffer
;
1189 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
1190 #define PAGEMAP_WALK_MASK (PMD_MASK)
1192 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
1193 #define PM_PFRAME_BITS 55
1194 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1195 #define PM_SOFT_DIRTY BIT_ULL(55)
1196 #define PM_MMAP_EXCLUSIVE BIT_ULL(56)
1197 #define PM_FILE BIT_ULL(61)
1198 #define PM_SWAP BIT_ULL(62)
1199 #define PM_PRESENT BIT_ULL(63)
1201 #define PM_END_OF_BUFFER 1
1203 static inline pagemap_entry_t
make_pme(u64 frame
, u64 flags
)
1205 return (pagemap_entry_t
) { .pme
= (frame
& PM_PFRAME_MASK
) | flags
};
1208 static int add_to_pagemap(unsigned long addr
, pagemap_entry_t
*pme
,
1209 struct pagemapread
*pm
)
1211 pm
->buffer
[pm
->pos
++] = *pme
;
1212 if (pm
->pos
>= pm
->len
)
1213 return PM_END_OF_BUFFER
;
1217 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
1218 struct mm_walk
*walk
)
1220 struct pagemapread
*pm
= walk
->private;
1221 unsigned long addr
= start
;
1224 while (addr
< end
) {
1225 struct vm_area_struct
*vma
= find_vma(walk
->mm
, addr
);
1226 pagemap_entry_t pme
= make_pme(0, 0);
1227 /* End of address space hole, which we mark as non-present. */
1228 unsigned long hole_end
;
1231 hole_end
= min(end
, vma
->vm_start
);
1235 for (; addr
< hole_end
; addr
+= PAGE_SIZE
) {
1236 err
= add_to_pagemap(addr
, &pme
, pm
);
1244 /* Addresses in the VMA. */
1245 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1246 pme
= make_pme(0, PM_SOFT_DIRTY
);
1247 for (; addr
< min(end
, vma
->vm_end
); addr
+= PAGE_SIZE
) {
1248 err
= add_to_pagemap(addr
, &pme
, pm
);
1257 static pagemap_entry_t
pte_to_pagemap_entry(struct pagemapread
*pm
,
1258 struct vm_area_struct
*vma
, unsigned long addr
, pte_t pte
)
1260 u64 frame
= 0, flags
= 0;
1261 struct page
*page
= NULL
;
1263 if (pte_present(pte
)) {
1265 frame
= pte_pfn(pte
);
1266 flags
|= PM_PRESENT
;
1267 page
= _vm_normal_page(vma
, addr
, pte
, true);
1268 if (pte_soft_dirty(pte
))
1269 flags
|= PM_SOFT_DIRTY
;
1270 } else if (is_swap_pte(pte
)) {
1272 if (pte_swp_soft_dirty(pte
))
1273 flags
|= PM_SOFT_DIRTY
;
1274 entry
= pte_to_swp_entry(pte
);
1275 frame
= swp_type(entry
) |
1276 (swp_offset(entry
) << MAX_SWAPFILES_SHIFT
);
1278 if (is_migration_entry(entry
))
1279 page
= migration_entry_to_page(entry
);
1281 if (is_device_private_entry(entry
))
1282 page
= device_private_entry_to_page(entry
);
1285 if (page
&& !PageAnon(page
))
1287 if (page
&& page_mapcount(page
) == 1)
1288 flags
|= PM_MMAP_EXCLUSIVE
;
1289 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1290 flags
|= PM_SOFT_DIRTY
;
1292 return make_pme(frame
, flags
);
1295 static int pagemap_pmd_range(pmd_t
*pmdp
, unsigned long addr
, unsigned long end
,
1296 struct mm_walk
*walk
)
1298 struct vm_area_struct
*vma
= walk
->vma
;
1299 struct pagemapread
*pm
= walk
->private;
1301 pte_t
*pte
, *orig_pte
;
1304 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1305 ptl
= pmd_trans_huge_lock(pmdp
, vma
);
1307 u64 flags
= 0, frame
= 0;
1309 struct page
*page
= NULL
;
1311 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1312 flags
|= PM_SOFT_DIRTY
;
1314 if (pmd_present(pmd
)) {
1315 page
= pmd_page(pmd
);
1317 flags
|= PM_PRESENT
;
1318 if (pmd_soft_dirty(pmd
))
1319 flags
|= PM_SOFT_DIRTY
;
1321 frame
= pmd_pfn(pmd
) +
1322 ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
1324 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1325 else if (is_swap_pmd(pmd
)) {
1326 swp_entry_t entry
= pmd_to_swp_entry(pmd
);
1328 frame
= swp_type(entry
) |
1329 (swp_offset(entry
) << MAX_SWAPFILES_SHIFT
);
1331 if (pmd_swp_soft_dirty(pmd
))
1332 flags
|= PM_SOFT_DIRTY
;
1333 VM_BUG_ON(!is_pmd_migration_entry(pmd
));
1334 page
= migration_entry_to_page(entry
);
1338 if (page
&& page_mapcount(page
) == 1)
1339 flags
|= PM_MMAP_EXCLUSIVE
;
1341 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1342 pagemap_entry_t pme
= make_pme(frame
, flags
);
1344 err
= add_to_pagemap(addr
, &pme
, pm
);
1347 if (pm
->show_pfn
&& (flags
& PM_PRESENT
))
1354 if (pmd_trans_unstable(pmdp
))
1356 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1359 * We can assume that @vma always points to a valid one and @end never
1360 * goes beyond vma->vm_end.
1362 orig_pte
= pte
= pte_offset_map_lock(walk
->mm
, pmdp
, addr
, &ptl
);
1363 for (; addr
< end
; pte
++, addr
+= PAGE_SIZE
) {
1364 pagemap_entry_t pme
;
1366 pme
= pte_to_pagemap_entry(pm
, vma
, addr
, *pte
);
1367 err
= add_to_pagemap(addr
, &pme
, pm
);
1371 pte_unmap_unlock(orig_pte
, ptl
);
1378 #ifdef CONFIG_HUGETLB_PAGE
1379 /* This function walks within one hugetlb entry in the single call */
1380 static int pagemap_hugetlb_range(pte_t
*ptep
, unsigned long hmask
,
1381 unsigned long addr
, unsigned long end
,
1382 struct mm_walk
*walk
)
1384 struct pagemapread
*pm
= walk
->private;
1385 struct vm_area_struct
*vma
= walk
->vma
;
1386 u64 flags
= 0, frame
= 0;
1390 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1391 flags
|= PM_SOFT_DIRTY
;
1393 pte
= huge_ptep_get(ptep
);
1394 if (pte_present(pte
)) {
1395 struct page
*page
= pte_page(pte
);
1397 if (!PageAnon(page
))
1400 if (page_mapcount(page
) == 1)
1401 flags
|= PM_MMAP_EXCLUSIVE
;
1403 flags
|= PM_PRESENT
;
1405 frame
= pte_pfn(pte
) +
1406 ((addr
& ~hmask
) >> PAGE_SHIFT
);
1409 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1410 pagemap_entry_t pme
= make_pme(frame
, flags
);
1412 err
= add_to_pagemap(addr
, &pme
, pm
);
1415 if (pm
->show_pfn
&& (flags
& PM_PRESENT
))
1423 #endif /* HUGETLB_PAGE */
1426 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1428 * For each page in the address space, this file contains one 64-bit entry
1429 * consisting of the following:
1431 * Bits 0-54 page frame number (PFN) if present
1432 * Bits 0-4 swap type if swapped
1433 * Bits 5-54 swap offset if swapped
1434 * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
1435 * Bit 56 page exclusively mapped
1437 * Bit 61 page is file-page or shared-anon
1438 * Bit 62 page swapped
1439 * Bit 63 page present
1441 * If the page is not present but in swap, then the PFN contains an
1442 * encoding of the swap file number and the page's offset into the
1443 * swap. Unmapped pages return a null PFN. This allows determining
1444 * precisely which pages are mapped (or in swap) and comparing mapped
1445 * pages between processes.
1447 * Efficient users of this interface will use /proc/pid/maps to
1448 * determine which areas of memory are actually mapped and llseek to
1449 * skip over unmapped regions.
1451 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
1452 size_t count
, loff_t
*ppos
)
1454 struct mm_struct
*mm
= file
->private_data
;
1455 struct pagemapread pm
;
1456 struct mm_walk pagemap_walk
= {};
1458 unsigned long svpfn
;
1459 unsigned long start_vaddr
;
1460 unsigned long end_vaddr
;
1461 int ret
= 0, copied
= 0;
1463 if (!mm
|| !mmget_not_zero(mm
))
1467 /* file position must be aligned */
1468 if ((*ppos
% PM_ENTRY_BYTES
) || (count
% PM_ENTRY_BYTES
))
1475 /* do not disclose physical addresses: attack vector */
1476 pm
.show_pfn
= file_ns_capable(file
, &init_user_ns
, CAP_SYS_ADMIN
);
1478 pm
.len
= (PAGEMAP_WALK_SIZE
>> PAGE_SHIFT
);
1479 pm
.buffer
= kmalloc(pm
.len
* PM_ENTRY_BYTES
, GFP_KERNEL
);
1484 pagemap_walk
.pmd_entry
= pagemap_pmd_range
;
1485 pagemap_walk
.pte_hole
= pagemap_pte_hole
;
1486 #ifdef CONFIG_HUGETLB_PAGE
1487 pagemap_walk
.hugetlb_entry
= pagemap_hugetlb_range
;
1489 pagemap_walk
.mm
= mm
;
1490 pagemap_walk
.private = &pm
;
1493 svpfn
= src
/ PM_ENTRY_BYTES
;
1494 start_vaddr
= svpfn
<< PAGE_SHIFT
;
1495 end_vaddr
= mm
->task_size
;
1497 /* watch out for wraparound */
1498 if (svpfn
> mm
->task_size
>> PAGE_SHIFT
)
1499 start_vaddr
= end_vaddr
;
1502 * The odds are that this will stop walking way
1503 * before end_vaddr, because the length of the
1504 * user buffer is tracked in "pm", and the walk
1505 * will stop when we hit the end of the buffer.
1508 while (count
&& (start_vaddr
< end_vaddr
)) {
1513 end
= (start_vaddr
+ PAGEMAP_WALK_SIZE
) & PAGEMAP_WALK_MASK
;
1515 if (end
< start_vaddr
|| end
> end_vaddr
)
1517 down_read(&mm
->mmap_sem
);
1518 ret
= walk_page_range(start_vaddr
, end
, &pagemap_walk
);
1519 up_read(&mm
->mmap_sem
);
1522 len
= min(count
, PM_ENTRY_BYTES
* pm
.pos
);
1523 if (copy_to_user(buf
, pm
.buffer
, len
)) {
1532 if (!ret
|| ret
== PM_END_OF_BUFFER
)
1543 static int pagemap_open(struct inode
*inode
, struct file
*file
)
1545 struct mm_struct
*mm
;
1547 mm
= proc_mem_open(inode
, PTRACE_MODE_READ
);
1550 file
->private_data
= mm
;
1554 static int pagemap_release(struct inode
*inode
, struct file
*file
)
1556 struct mm_struct
*mm
= file
->private_data
;
1563 const struct file_operations proc_pagemap_operations
= {
1564 .llseek
= mem_lseek
, /* borrow this */
1565 .read
= pagemap_read
,
1566 .open
= pagemap_open
,
1567 .release
= pagemap_release
,
1569 #endif /* CONFIG_PROC_PAGE_MONITOR */
1574 unsigned long pages
;
1576 unsigned long active
;
1577 unsigned long writeback
;
1578 unsigned long mapcount_max
;
1579 unsigned long dirty
;
1580 unsigned long swapcache
;
1581 unsigned long node
[MAX_NUMNODES
];
1584 struct numa_maps_private
{
1585 struct proc_maps_private proc_maps
;
1586 struct numa_maps md
;
1589 static void gather_stats(struct page
*page
, struct numa_maps
*md
, int pte_dirty
,
1590 unsigned long nr_pages
)
1592 int count
= page_mapcount(page
);
1594 md
->pages
+= nr_pages
;
1595 if (pte_dirty
|| PageDirty(page
))
1596 md
->dirty
+= nr_pages
;
1598 if (PageSwapCache(page
))
1599 md
->swapcache
+= nr_pages
;
1601 if (PageActive(page
) || PageUnevictable(page
))
1602 md
->active
+= nr_pages
;
1604 if (PageWriteback(page
))
1605 md
->writeback
+= nr_pages
;
1608 md
->anon
+= nr_pages
;
1610 if (count
> md
->mapcount_max
)
1611 md
->mapcount_max
= count
;
1613 md
->node
[page_to_nid(page
)] += nr_pages
;
1616 static struct page
*can_gather_numa_stats(pte_t pte
, struct vm_area_struct
*vma
,
1622 if (!pte_present(pte
))
1625 page
= vm_normal_page(vma
, addr
, pte
);
1629 if (PageReserved(page
))
1632 nid
= page_to_nid(page
);
1633 if (!node_isset(nid
, node_states
[N_MEMORY
]))
1639 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1640 static struct page
*can_gather_numa_stats_pmd(pmd_t pmd
,
1641 struct vm_area_struct
*vma
,
1647 if (!pmd_present(pmd
))
1650 page
= vm_normal_page_pmd(vma
, addr
, pmd
);
1654 if (PageReserved(page
))
1657 nid
= page_to_nid(page
);
1658 if (!node_isset(nid
, node_states
[N_MEMORY
]))
1665 static int gather_pte_stats(pmd_t
*pmd
, unsigned long addr
,
1666 unsigned long end
, struct mm_walk
*walk
)
1668 struct numa_maps
*md
= walk
->private;
1669 struct vm_area_struct
*vma
= walk
->vma
;
1674 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1675 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1679 page
= can_gather_numa_stats_pmd(*pmd
, vma
, addr
);
1681 gather_stats(page
, md
, pmd_dirty(*pmd
),
1682 HPAGE_PMD_SIZE
/PAGE_SIZE
);
1687 if (pmd_trans_unstable(pmd
))
1690 orig_pte
= pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
1692 struct page
*page
= can_gather_numa_stats(*pte
, vma
, addr
);
1695 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1697 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
1698 pte_unmap_unlock(orig_pte
, ptl
);
1702 #ifdef CONFIG_HUGETLB_PAGE
1703 static int gather_hugetlb_stats(pte_t
*pte
, unsigned long hmask
,
1704 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1706 pte_t huge_pte
= huge_ptep_get(pte
);
1707 struct numa_maps
*md
;
1710 if (!pte_present(huge_pte
))
1713 page
= pte_page(huge_pte
);
1718 gather_stats(page
, md
, pte_dirty(huge_pte
), 1);
1723 static int gather_hugetlb_stats(pte_t
*pte
, unsigned long hmask
,
1724 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1731 * Display pages allocated per node and memory policy via /proc.
1733 static int show_numa_map(struct seq_file
*m
, void *v
, int is_pid
)
1735 struct numa_maps_private
*numa_priv
= m
->private;
1736 struct proc_maps_private
*proc_priv
= &numa_priv
->proc_maps
;
1737 struct vm_area_struct
*vma
= v
;
1738 struct numa_maps
*md
= &numa_priv
->md
;
1739 struct file
*file
= vma
->vm_file
;
1740 struct mm_struct
*mm
= vma
->vm_mm
;
1741 struct mm_walk walk
= {
1742 .hugetlb_entry
= gather_hugetlb_stats
,
1743 .pmd_entry
= gather_pte_stats
,
1747 struct mempolicy
*pol
;
1754 /* Ensure we start with an empty set of numa_maps statistics. */
1755 memset(md
, 0, sizeof(*md
));
1757 pol
= __get_vma_policy(vma
, vma
->vm_start
);
1759 mpol_to_str(buffer
, sizeof(buffer
), pol
);
1762 mpol_to_str(buffer
, sizeof(buffer
), proc_priv
->task_mempolicy
);
1765 seq_printf(m
, "%08lx %s", vma
->vm_start
, buffer
);
1768 seq_puts(m
, " file=");
1769 seq_file_path(m
, file
, "\n\t= ");
1770 } else if (vma
->vm_start
<= mm
->brk
&& vma
->vm_end
>= mm
->start_brk
) {
1771 seq_puts(m
, " heap");
1772 } else if (is_stack(vma
)) {
1773 seq_puts(m
, " stack");
1776 if (is_vm_hugetlb_page(vma
))
1777 seq_puts(m
, " huge");
1779 /* mmap_sem is held by m_start */
1780 walk_page_vma(vma
, &walk
);
1786 seq_printf(m
, " anon=%lu", md
->anon
);
1789 seq_printf(m
, " dirty=%lu", md
->dirty
);
1791 if (md
->pages
!= md
->anon
&& md
->pages
!= md
->dirty
)
1792 seq_printf(m
, " mapped=%lu", md
->pages
);
1794 if (md
->mapcount_max
> 1)
1795 seq_printf(m
, " mapmax=%lu", md
->mapcount_max
);
1798 seq_printf(m
, " swapcache=%lu", md
->swapcache
);
1800 if (md
->active
< md
->pages
&& !is_vm_hugetlb_page(vma
))
1801 seq_printf(m
, " active=%lu", md
->active
);
1804 seq_printf(m
, " writeback=%lu", md
->writeback
);
1806 for_each_node_state(nid
, N_MEMORY
)
1808 seq_printf(m
, " N%d=%lu", nid
, md
->node
[nid
]);
1810 seq_printf(m
, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma
) >> 10);
1813 m_cache_vma(m
, vma
);
1817 static int show_pid_numa_map(struct seq_file
*m
, void *v
)
1819 return show_numa_map(m
, v
, 1);
1822 static int show_tid_numa_map(struct seq_file
*m
, void *v
)
1824 return show_numa_map(m
, v
, 0);
1827 static const struct seq_operations proc_pid_numa_maps_op
= {
1831 .show
= show_pid_numa_map
,
1834 static const struct seq_operations proc_tid_numa_maps_op
= {
1838 .show
= show_tid_numa_map
,
1841 static int numa_maps_open(struct inode
*inode
, struct file
*file
,
1842 const struct seq_operations
*ops
)
1844 return proc_maps_open(inode
, file
, ops
,
1845 sizeof(struct numa_maps_private
));
1848 static int pid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1850 return numa_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
1853 static int tid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1855 return numa_maps_open(inode
, file
, &proc_tid_numa_maps_op
);
1858 const struct file_operations proc_pid_numa_maps_operations
= {
1859 .open
= pid_numa_maps_open
,
1861 .llseek
= seq_lseek
,
1862 .release
= proc_map_release
,
1865 const struct file_operations proc_tid_numa_maps_operations
= {
1866 .open
= tid_numa_maps_open
,
1868 .llseek
= seq_lseek
,
1869 .release
= proc_map_release
,
1871 #endif /* CONFIG_NUMA */