2 #include <linux/hugetlb.h>
3 #include <linux/huge_mm.h>
4 #include <linux/mount.h>
5 #include <linux/seq_file.h>
6 #include <linux/highmem.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/pagemap.h>
10 #include <linux/mempolicy.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
16 #include <asm/uaccess.h>
17 #include <asm/tlbflush.h>
20 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
22 unsigned long data
, text
, lib
, swap
;
23 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
26 * Note: to minimize their overhead, mm maintains hiwater_vm and
27 * hiwater_rss only when about to *lower* total_vm or rss. Any
28 * collector of these hiwater stats must therefore get total_vm
29 * and rss too, which will usually be the higher. Barriers? not
30 * worth the effort, such snapshots can always be inconsistent.
32 hiwater_vm
= total_vm
= mm
->total_vm
;
33 if (hiwater_vm
< mm
->hiwater_vm
)
34 hiwater_vm
= mm
->hiwater_vm
;
35 hiwater_rss
= total_rss
= get_mm_rss(mm
);
36 if (hiwater_rss
< mm
->hiwater_rss
)
37 hiwater_rss
= mm
->hiwater_rss
;
39 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
40 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
41 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
42 swap
= get_mm_counter(mm
, MM_SWAPENTS
);
56 hiwater_vm
<< (PAGE_SHIFT
-10),
57 (total_vm
- mm
->reserved_vm
) << (PAGE_SHIFT
-10),
58 mm
->locked_vm
<< (PAGE_SHIFT
-10),
59 mm
->pinned_vm
<< (PAGE_SHIFT
-10),
60 hiwater_rss
<< (PAGE_SHIFT
-10),
61 total_rss
<< (PAGE_SHIFT
-10),
62 data
<< (PAGE_SHIFT
-10),
63 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
64 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10,
65 swap
<< (PAGE_SHIFT
-10));
68 unsigned long task_vsize(struct mm_struct
*mm
)
70 return PAGE_SIZE
* mm
->total_vm
;
73 unsigned long task_statm(struct mm_struct
*mm
,
74 unsigned long *shared
, unsigned long *text
,
75 unsigned long *data
, unsigned long *resident
)
77 *shared
= get_mm_counter(mm
, MM_FILEPAGES
);
78 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
80 *data
= mm
->total_vm
- mm
->shared_vm
;
81 *resident
= *shared
+ get_mm_counter(mm
, MM_ANONPAGES
);
85 static void pad_len_spaces(struct seq_file
*m
, int len
)
87 len
= 25 + sizeof(void*) * 6 - len
;
90 seq_printf(m
, "%*c", len
, ' ');
93 static void vma_stop(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
95 if (vma
&& vma
!= priv
->tail_vma
) {
96 struct mm_struct
*mm
= vma
->vm_mm
;
97 up_read(&mm
->mmap_sem
);
102 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
104 struct proc_maps_private
*priv
= m
->private;
105 unsigned long last_addr
= m
->version
;
106 struct mm_struct
*mm
;
107 struct vm_area_struct
*vma
, *tail_vma
= NULL
;
110 /* Clear the per syscall fields in priv */
112 priv
->tail_vma
= NULL
;
115 * We remember last_addr rather than next_addr to hit with
116 * mmap_cache most of the time. We have zero last_addr at
117 * the beginning and also after lseek. We will have -1 last_addr
118 * after the end of the vmas.
121 if (last_addr
== -1UL)
124 priv
->task
= get_pid_task(priv
->pid
, PIDTYPE_PID
);
126 return ERR_PTR(-ESRCH
);
128 mm
= mm_for_maps(priv
->task
);
129 if (!mm
|| IS_ERR(mm
))
131 down_read(&mm
->mmap_sem
);
133 tail_vma
= get_gate_vma(priv
->task
->mm
);
134 priv
->tail_vma
= tail_vma
;
136 /* Start with last addr hint */
137 vma
= find_vma(mm
, last_addr
);
138 if (last_addr
&& vma
) {
144 * Check the vma index is within the range and do
145 * sequential scan until m_index.
148 if ((unsigned long)l
< mm
->map_count
) {
155 if (l
!= mm
->map_count
)
156 tail_vma
= NULL
; /* After gate vma */
162 /* End of vmas has been reached */
163 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
164 up_read(&mm
->mmap_sem
);
169 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
171 struct proc_maps_private
*priv
= m
->private;
172 struct vm_area_struct
*vma
= v
;
173 struct vm_area_struct
*tail_vma
= priv
->tail_vma
;
176 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
179 return (vma
!= tail_vma
)? tail_vma
: NULL
;
182 static void m_stop(struct seq_file
*m
, void *v
)
184 struct proc_maps_private
*priv
= m
->private;
185 struct vm_area_struct
*vma
= v
;
190 put_task_struct(priv
->task
);
193 static int do_maps_open(struct inode
*inode
, struct file
*file
,
194 const struct seq_operations
*ops
)
196 struct proc_maps_private
*priv
;
198 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
200 priv
->pid
= proc_pid(inode
);
201 ret
= seq_open(file
, ops
);
203 struct seq_file
*m
= file
->private_data
;
213 show_map_vma(struct seq_file
*m
, struct vm_area_struct
*vma
, int is_pid
)
215 struct mm_struct
*mm
= vma
->vm_mm
;
216 struct file
*file
= vma
->vm_file
;
217 struct proc_maps_private
*priv
= m
->private;
218 struct task_struct
*task
= priv
->task
;
219 vm_flags_t flags
= vma
->vm_flags
;
220 unsigned long ino
= 0;
221 unsigned long long pgoff
= 0;
222 unsigned long start
, end
;
225 const char *name
= NULL
;
228 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
229 dev
= inode
->i_sb
->s_dev
;
231 pgoff
= ((loff_t
)vma
->vm_pgoff
) << PAGE_SHIFT
;
234 /* We don't show the stack guard page in /proc/maps */
235 start
= vma
->vm_start
;
236 if (stack_guard_page_start(vma
, start
))
239 if (stack_guard_page_end(vma
, end
))
242 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
245 flags
& VM_READ
? 'r' : '-',
246 flags
& VM_WRITE
? 'w' : '-',
247 flags
& VM_EXEC
? 'x' : '-',
248 flags
& VM_MAYSHARE
? 's' : 'p',
250 MAJOR(dev
), MINOR(dev
), ino
, &len
);
253 * Print the dentry name for named mappings, and a
254 * special [heap] marker for the heap:
257 pad_len_spaces(m
, len
);
258 seq_path(m
, &file
->f_path
, "\n");
262 name
= arch_vma_name(vma
);
271 if (vma
->vm_start
<= mm
->brk
&&
272 vma
->vm_end
>= mm
->start_brk
) {
277 tid
= vm_is_stack(task
, vma
, is_pid
);
281 * Thread stack in /proc/PID/task/TID/maps or
282 * the main process stack.
284 if (!is_pid
|| (vma
->vm_start
<= mm
->start_stack
&&
285 vma
->vm_end
>= mm
->start_stack
)) {
288 /* Thread stack in /proc/PID/maps */
289 pad_len_spaces(m
, len
);
290 seq_printf(m
, "[stack:%d]", tid
);
297 pad_len_spaces(m
, len
);
303 static int show_map(struct seq_file
*m
, void *v
, int is_pid
)
305 struct vm_area_struct
*vma
= v
;
306 struct proc_maps_private
*priv
= m
->private;
307 struct task_struct
*task
= priv
->task
;
309 show_map_vma(m
, vma
, is_pid
);
311 if (m
->count
< m
->size
) /* vma is copied successfully */
312 m
->version
= (vma
!= get_gate_vma(task
->mm
))
317 static int show_pid_map(struct seq_file
*m
, void *v
)
319 return show_map(m
, v
, 1);
322 static int show_tid_map(struct seq_file
*m
, void *v
)
324 return show_map(m
, v
, 0);
327 static const struct seq_operations proc_pid_maps_op
= {
334 static const struct seq_operations proc_tid_maps_op
= {
341 static int pid_maps_open(struct inode
*inode
, struct file
*file
)
343 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
346 static int tid_maps_open(struct inode
*inode
, struct file
*file
)
348 return do_maps_open(inode
, file
, &proc_tid_maps_op
);
351 const struct file_operations proc_pid_maps_operations
= {
352 .open
= pid_maps_open
,
355 .release
= seq_release_private
,
358 const struct file_operations proc_tid_maps_operations
= {
359 .open
= tid_maps_open
,
362 .release
= seq_release_private
,
366 * Proportional Set Size(PSS): my share of RSS.
368 * PSS of a process is the count of pages it has in memory, where each
369 * page is divided by the number of processes sharing it. So if a
370 * process has 1000 pages all to itself, and 1000 shared with one other
371 * process, its PSS will be 1500.
373 * To keep (accumulated) division errors low, we adopt a 64bit
374 * fixed-point pss counter to minimize division errors. So (pss >>
375 * PSS_SHIFT) would be the real byte count.
377 * A shift of 12 before division means (assuming 4K page size):
378 * - 1M 3-user-pages add up to 8KB errors;
379 * - supports mapcount up to 2^24, or 16M;
380 * - supports PSS up to 2^52 bytes, or 4PB.
384 #ifdef CONFIG_PROC_PAGE_MONITOR
385 struct mem_size_stats
{
386 struct vm_area_struct
*vma
;
387 unsigned long resident
;
388 unsigned long shared_clean
;
389 unsigned long shared_dirty
;
390 unsigned long private_clean
;
391 unsigned long private_dirty
;
392 unsigned long referenced
;
393 unsigned long anonymous
;
394 unsigned long anonymous_thp
;
400 static void smaps_pte_entry(pte_t ptent
, unsigned long addr
,
401 unsigned long ptent_size
, struct mm_walk
*walk
)
403 struct mem_size_stats
*mss
= walk
->private;
404 struct vm_area_struct
*vma
= mss
->vma
;
408 if (is_swap_pte(ptent
)) {
409 mss
->swap
+= ptent_size
;
413 if (!pte_present(ptent
))
416 page
= vm_normal_page(vma
, addr
, ptent
);
421 mss
->anonymous
+= ptent_size
;
423 mss
->resident
+= ptent_size
;
424 /* Accumulate the size in pages that have been accessed. */
425 if (pte_young(ptent
) || PageReferenced(page
))
426 mss
->referenced
+= ptent_size
;
427 mapcount
= page_mapcount(page
);
429 if (pte_dirty(ptent
) || PageDirty(page
))
430 mss
->shared_dirty
+= ptent_size
;
432 mss
->shared_clean
+= ptent_size
;
433 mss
->pss
+= (ptent_size
<< PSS_SHIFT
) / mapcount
;
435 if (pte_dirty(ptent
) || PageDirty(page
))
436 mss
->private_dirty
+= ptent_size
;
438 mss
->private_clean
+= ptent_size
;
439 mss
->pss
+= (ptent_size
<< PSS_SHIFT
);
443 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
444 struct mm_walk
*walk
)
446 struct mem_size_stats
*mss
= walk
->private;
447 struct vm_area_struct
*vma
= mss
->vma
;
451 if (pmd_trans_huge_lock(pmd
, vma
) == 1) {
452 smaps_pte_entry(*(pte_t
*)pmd
, addr
, HPAGE_PMD_SIZE
, walk
);
453 spin_unlock(&walk
->mm
->page_table_lock
);
454 mss
->anonymous_thp
+= HPAGE_PMD_SIZE
;
458 if (pmd_trans_unstable(pmd
))
461 * The mmap_sem held all the way back in m_start() is what
462 * keeps khugepaged out of here and from collapsing things
465 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
466 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
467 smaps_pte_entry(*pte
, addr
, PAGE_SIZE
, walk
);
468 pte_unmap_unlock(pte
- 1, ptl
);
473 static int show_smap(struct seq_file
*m
, void *v
, int is_pid
)
475 struct proc_maps_private
*priv
= m
->private;
476 struct task_struct
*task
= priv
->task
;
477 struct vm_area_struct
*vma
= v
;
478 struct mem_size_stats mss
;
479 struct mm_walk smaps_walk
= {
480 .pmd_entry
= smaps_pte_range
,
485 memset(&mss
, 0, sizeof mss
);
487 /* mmap_sem is held in m_start */
488 if (vma
->vm_mm
&& !is_vm_hugetlb_page(vma
))
489 walk_page_range(vma
->vm_start
, vma
->vm_end
, &smaps_walk
);
491 show_map_vma(m
, vma
, is_pid
);
497 "Shared_Clean: %8lu kB\n"
498 "Shared_Dirty: %8lu kB\n"
499 "Private_Clean: %8lu kB\n"
500 "Private_Dirty: %8lu kB\n"
501 "Referenced: %8lu kB\n"
502 "Anonymous: %8lu kB\n"
503 "AnonHugePages: %8lu kB\n"
505 "KernelPageSize: %8lu kB\n"
506 "MMUPageSize: %8lu kB\n"
508 (vma
->vm_end
- vma
->vm_start
) >> 10,
510 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)),
511 mss
.shared_clean
>> 10,
512 mss
.shared_dirty
>> 10,
513 mss
.private_clean
>> 10,
514 mss
.private_dirty
>> 10,
515 mss
.referenced
>> 10,
517 mss
.anonymous_thp
>> 10,
519 vma_kernel_pagesize(vma
) >> 10,
520 vma_mmu_pagesize(vma
) >> 10,
521 (vma
->vm_flags
& VM_LOCKED
) ?
522 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)) : 0);
524 if (m
->count
< m
->size
) /* vma is copied successfully */
525 m
->version
= (vma
!= get_gate_vma(task
->mm
))
530 static int show_pid_smap(struct seq_file
*m
, void *v
)
532 return show_smap(m
, v
, 1);
535 static int show_tid_smap(struct seq_file
*m
, void *v
)
537 return show_smap(m
, v
, 0);
540 static const struct seq_operations proc_pid_smaps_op
= {
544 .show
= show_pid_smap
547 static const struct seq_operations proc_tid_smaps_op
= {
551 .show
= show_tid_smap
554 static int pid_smaps_open(struct inode
*inode
, struct file
*file
)
556 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
559 static int tid_smaps_open(struct inode
*inode
, struct file
*file
)
561 return do_maps_open(inode
, file
, &proc_tid_smaps_op
);
564 const struct file_operations proc_pid_smaps_operations
= {
565 .open
= pid_smaps_open
,
568 .release
= seq_release_private
,
571 const struct file_operations proc_tid_smaps_operations
= {
572 .open
= tid_smaps_open
,
575 .release
= seq_release_private
,
578 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
579 unsigned long end
, struct mm_walk
*walk
)
581 struct vm_area_struct
*vma
= walk
->private;
586 split_huge_page_pmd(walk
->mm
, pmd
);
587 if (pmd_trans_unstable(pmd
))
590 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
591 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
593 if (!pte_present(ptent
))
596 page
= vm_normal_page(vma
, addr
, ptent
);
600 if (PageReserved(page
))
603 /* Clear accessed and referenced bits. */
604 ptep_test_and_clear_young(vma
, addr
, pte
);
605 ClearPageReferenced(page
);
607 pte_unmap_unlock(pte
- 1, ptl
);
612 #define CLEAR_REFS_ALL 1
613 #define CLEAR_REFS_ANON 2
614 #define CLEAR_REFS_MAPPED 3
616 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
617 size_t count
, loff_t
*ppos
)
619 struct task_struct
*task
;
620 char buffer
[PROC_NUMBUF
];
621 struct mm_struct
*mm
;
622 struct vm_area_struct
*vma
;
626 memset(buffer
, 0, sizeof(buffer
));
627 if (count
> sizeof(buffer
) - 1)
628 count
= sizeof(buffer
) - 1;
629 if (copy_from_user(buffer
, buf
, count
))
631 rv
= kstrtoint(strstrip(buffer
), 10, &type
);
634 if (type
< CLEAR_REFS_ALL
|| type
> CLEAR_REFS_MAPPED
)
636 task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
639 mm
= get_task_mm(task
);
641 struct mm_walk clear_refs_walk
= {
642 .pmd_entry
= clear_refs_pte_range
,
645 down_read(&mm
->mmap_sem
);
646 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
647 clear_refs_walk
.private = vma
;
648 if (is_vm_hugetlb_page(vma
))
651 * Writing 1 to /proc/pid/clear_refs affects all pages.
653 * Writing 2 to /proc/pid/clear_refs only affects
656 * Writing 3 to /proc/pid/clear_refs only affects file
659 if (type
== CLEAR_REFS_ANON
&& vma
->vm_file
)
661 if (type
== CLEAR_REFS_MAPPED
&& !vma
->vm_file
)
663 walk_page_range(vma
->vm_start
, vma
->vm_end
,
667 up_read(&mm
->mmap_sem
);
670 put_task_struct(task
);
675 const struct file_operations proc_clear_refs_operations
= {
676 .write
= clear_refs_write
,
677 .llseek
= noop_llseek
,
686 pagemap_entry_t
*buffer
;
689 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
690 #define PAGEMAP_WALK_MASK (PMD_MASK)
692 #define PM_ENTRY_BYTES sizeof(u64)
693 #define PM_STATUS_BITS 3
694 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
695 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
696 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
697 #define PM_PSHIFT_BITS 6
698 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
699 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
700 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
701 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
702 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
704 #define PM_PRESENT PM_STATUS(4LL)
705 #define PM_SWAP PM_STATUS(2LL)
706 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
707 #define PM_END_OF_BUFFER 1
709 static inline pagemap_entry_t
make_pme(u64 val
)
711 return (pagemap_entry_t
) { .pme
= val
};
714 static int add_to_pagemap(unsigned long addr
, pagemap_entry_t
*pme
,
715 struct pagemapread
*pm
)
717 pm
->buffer
[pm
->pos
++] = *pme
;
718 if (pm
->pos
>= pm
->len
)
719 return PM_END_OF_BUFFER
;
723 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
724 struct mm_walk
*walk
)
726 struct pagemapread
*pm
= walk
->private;
729 pagemap_entry_t pme
= make_pme(PM_NOT_PRESENT
);
731 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
732 err
= add_to_pagemap(addr
, &pme
, pm
);
739 static u64
swap_pte_to_pagemap_entry(pte_t pte
)
741 swp_entry_t e
= pte_to_swp_entry(pte
);
742 return swp_type(e
) | (swp_offset(e
) << MAX_SWAPFILES_SHIFT
);
745 static void pte_to_pagemap_entry(pagemap_entry_t
*pme
, pte_t pte
)
747 if (is_swap_pte(pte
))
748 *pme
= make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte
))
749 | PM_PSHIFT(PAGE_SHIFT
) | PM_SWAP
);
750 else if (pte_present(pte
))
751 *pme
= make_pme(PM_PFRAME(pte_pfn(pte
))
752 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
);
755 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
756 static void thp_pmd_to_pagemap_entry(pagemap_entry_t
*pme
,
757 pmd_t pmd
, int offset
)
760 * Currently pmd for thp is always present because thp can not be
761 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
762 * This if-check is just to prepare for future implementation.
764 if (pmd_present(pmd
))
765 *pme
= make_pme(PM_PFRAME(pmd_pfn(pmd
) + offset
)
766 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
);
769 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t
*pme
,
770 pmd_t pmd
, int offset
)
775 static int pagemap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
776 struct mm_walk
*walk
)
778 struct vm_area_struct
*vma
;
779 struct pagemapread
*pm
= walk
->private;
782 pagemap_entry_t pme
= make_pme(PM_NOT_PRESENT
);
784 if (pmd_trans_unstable(pmd
))
787 /* find the first VMA at or above 'addr' */
788 vma
= find_vma(walk
->mm
, addr
);
789 spin_lock(&walk
->mm
->page_table_lock
);
790 if (pmd_trans_huge_lock(pmd
, vma
) == 1) {
791 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
792 unsigned long offset
;
794 offset
= (addr
& ~PAGEMAP_WALK_MASK
) >>
796 thp_pmd_to_pagemap_entry(&pme
, *pmd
, offset
);
797 err
= add_to_pagemap(addr
, &pme
, pm
);
801 spin_unlock(&walk
->mm
->page_table_lock
);
805 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
807 /* check to see if we've left 'vma' behind
808 * and need a new, higher one */
809 if (vma
&& (addr
>= vma
->vm_end
))
810 vma
= find_vma(walk
->mm
, addr
);
812 /* check that 'vma' actually covers this address,
813 * and that it isn't a huge page vma */
814 if (vma
&& (vma
->vm_start
<= addr
) &&
815 !is_vm_hugetlb_page(vma
)) {
816 pte
= pte_offset_map(pmd
, addr
);
817 pte_to_pagemap_entry(&pme
, *pte
);
818 /* unmap before userspace copy */
821 err
= add_to_pagemap(addr
, &pme
, pm
);
831 #ifdef CONFIG_HUGETLB_PAGE
832 static void huge_pte_to_pagemap_entry(pagemap_entry_t
*pme
,
833 pte_t pte
, int offset
)
835 if (pte_present(pte
))
836 *pme
= make_pme(PM_PFRAME(pte_pfn(pte
) + offset
)
837 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
);
840 /* This function walks within one hugetlb entry in the single call */
841 static int pagemap_hugetlb_range(pte_t
*pte
, unsigned long hmask
,
842 unsigned long addr
, unsigned long end
,
843 struct mm_walk
*walk
)
845 struct pagemapread
*pm
= walk
->private;
847 pagemap_entry_t pme
= make_pme(PM_NOT_PRESENT
);
849 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
850 int offset
= (addr
& ~hmask
) >> PAGE_SHIFT
;
851 huge_pte_to_pagemap_entry(&pme
, *pte
, offset
);
852 err
= add_to_pagemap(addr
, &pme
, pm
);
861 #endif /* HUGETLB_PAGE */
864 * /proc/pid/pagemap - an array mapping virtual pages to pfns
866 * For each page in the address space, this file contains one 64-bit entry
867 * consisting of the following:
869 * Bits 0-55 page frame number (PFN) if present
870 * Bits 0-4 swap type if swapped
871 * Bits 5-55 swap offset if swapped
872 * Bits 55-60 page shift (page size = 1<<page shift)
873 * Bit 61 reserved for future use
874 * Bit 62 page swapped
875 * Bit 63 page present
877 * If the page is not present but in swap, then the PFN contains an
878 * encoding of the swap file number and the page's offset into the
879 * swap. Unmapped pages return a null PFN. This allows determining
880 * precisely which pages are mapped (or in swap) and comparing mapped
881 * pages between processes.
883 * Efficient users of this interface will use /proc/pid/maps to
884 * determine which areas of memory are actually mapped and llseek to
885 * skip over unmapped regions.
887 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
888 size_t count
, loff_t
*ppos
)
890 struct task_struct
*task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
891 struct mm_struct
*mm
;
892 struct pagemapread pm
;
894 struct mm_walk pagemap_walk
= {};
897 unsigned long start_vaddr
;
898 unsigned long end_vaddr
;
905 /* file position must be aligned */
906 if ((*ppos
% PM_ENTRY_BYTES
) || (count
% PM_ENTRY_BYTES
))
913 pm
.len
= PM_ENTRY_BYTES
* (PAGEMAP_WALK_SIZE
>> PAGE_SHIFT
);
914 pm
.buffer
= kmalloc(pm
.len
, GFP_TEMPORARY
);
919 mm
= mm_for_maps(task
);
921 if (!mm
|| IS_ERR(mm
))
924 pagemap_walk
.pmd_entry
= pagemap_pte_range
;
925 pagemap_walk
.pte_hole
= pagemap_pte_hole
;
926 #ifdef CONFIG_HUGETLB_PAGE
927 pagemap_walk
.hugetlb_entry
= pagemap_hugetlb_range
;
929 pagemap_walk
.mm
= mm
;
930 pagemap_walk
.private = &pm
;
933 svpfn
= src
/ PM_ENTRY_BYTES
;
934 start_vaddr
= svpfn
<< PAGE_SHIFT
;
935 end_vaddr
= TASK_SIZE_OF(task
);
937 /* watch out for wraparound */
938 if (svpfn
> TASK_SIZE_OF(task
) >> PAGE_SHIFT
)
939 start_vaddr
= end_vaddr
;
942 * The odds are that this will stop walking way
943 * before end_vaddr, because the length of the
944 * user buffer is tracked in "pm", and the walk
945 * will stop when we hit the end of the buffer.
948 while (count
&& (start_vaddr
< end_vaddr
)) {
953 end
= (start_vaddr
+ PAGEMAP_WALK_SIZE
) & PAGEMAP_WALK_MASK
;
955 if (end
< start_vaddr
|| end
> end_vaddr
)
957 down_read(&mm
->mmap_sem
);
958 ret
= walk_page_range(start_vaddr
, end
, &pagemap_walk
);
959 up_read(&mm
->mmap_sem
);
962 len
= min(count
, PM_ENTRY_BYTES
* pm
.pos
);
963 if (copy_to_user(buf
, pm
.buffer
, len
)) {
972 if (!ret
|| ret
== PM_END_OF_BUFFER
)
980 put_task_struct(task
);
985 const struct file_operations proc_pagemap_operations
= {
986 .llseek
= mem_lseek
, /* borrow this */
987 .read
= pagemap_read
,
989 #endif /* CONFIG_PROC_PAGE_MONITOR */
994 struct vm_area_struct
*vma
;
997 unsigned long active
;
998 unsigned long writeback
;
999 unsigned long mapcount_max
;
1000 unsigned long dirty
;
1001 unsigned long swapcache
;
1002 unsigned long node
[MAX_NUMNODES
];
1005 struct numa_maps_private
{
1006 struct proc_maps_private proc_maps
;
1007 struct numa_maps md
;
1010 static void gather_stats(struct page
*page
, struct numa_maps
*md
, int pte_dirty
,
1011 unsigned long nr_pages
)
1013 int count
= page_mapcount(page
);
1015 md
->pages
+= nr_pages
;
1016 if (pte_dirty
|| PageDirty(page
))
1017 md
->dirty
+= nr_pages
;
1019 if (PageSwapCache(page
))
1020 md
->swapcache
+= nr_pages
;
1022 if (PageActive(page
) || PageUnevictable(page
))
1023 md
->active
+= nr_pages
;
1025 if (PageWriteback(page
))
1026 md
->writeback
+= nr_pages
;
1029 md
->anon
+= nr_pages
;
1031 if (count
> md
->mapcount_max
)
1032 md
->mapcount_max
= count
;
1034 md
->node
[page_to_nid(page
)] += nr_pages
;
1037 static struct page
*can_gather_numa_stats(pte_t pte
, struct vm_area_struct
*vma
,
1043 if (!pte_present(pte
))
1046 page
= vm_normal_page(vma
, addr
, pte
);
1050 if (PageReserved(page
))
1053 nid
= page_to_nid(page
);
1054 if (!node_isset(nid
, node_states
[N_HIGH_MEMORY
]))
1060 static int gather_pte_stats(pmd_t
*pmd
, unsigned long addr
,
1061 unsigned long end
, struct mm_walk
*walk
)
1063 struct numa_maps
*md
;
1070 if (pmd_trans_huge_lock(pmd
, md
->vma
) == 1) {
1071 pte_t huge_pte
= *(pte_t
*)pmd
;
1074 page
= can_gather_numa_stats(huge_pte
, md
->vma
, addr
);
1076 gather_stats(page
, md
, pte_dirty(huge_pte
),
1077 HPAGE_PMD_SIZE
/PAGE_SIZE
);
1078 spin_unlock(&walk
->mm
->page_table_lock
);
1082 if (pmd_trans_unstable(pmd
))
1084 orig_pte
= pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
1086 struct page
*page
= can_gather_numa_stats(*pte
, md
->vma
, addr
);
1089 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1091 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
1092 pte_unmap_unlock(orig_pte
, ptl
);
1095 #ifdef CONFIG_HUGETLB_PAGE
1096 static int gather_hugetbl_stats(pte_t
*pte
, unsigned long hmask
,
1097 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1099 struct numa_maps
*md
;
1105 page
= pte_page(*pte
);
1110 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1115 static int gather_hugetbl_stats(pte_t
*pte
, unsigned long hmask
,
1116 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1123 * Display pages allocated per node and memory policy via /proc.
1125 static int show_numa_map(struct seq_file
*m
, void *v
, int is_pid
)
1127 struct numa_maps_private
*numa_priv
= m
->private;
1128 struct proc_maps_private
*proc_priv
= &numa_priv
->proc_maps
;
1129 struct vm_area_struct
*vma
= v
;
1130 struct numa_maps
*md
= &numa_priv
->md
;
1131 struct file
*file
= vma
->vm_file
;
1132 struct mm_struct
*mm
= vma
->vm_mm
;
1133 struct mm_walk walk
= {};
1134 struct mempolicy
*pol
;
1141 /* Ensure we start with an empty set of numa_maps statistics. */
1142 memset(md
, 0, sizeof(*md
));
1146 walk
.hugetlb_entry
= gather_hugetbl_stats
;
1147 walk
.pmd_entry
= gather_pte_stats
;
1151 pol
= get_vma_policy(proc_priv
->task
, vma
, vma
->vm_start
);
1152 mpol_to_str(buffer
, sizeof(buffer
), pol
, 0);
1155 seq_printf(m
, "%08lx %s", vma
->vm_start
, buffer
);
1158 seq_printf(m
, " file=");
1159 seq_path(m
, &file
->f_path
, "\n\t= ");
1160 } else if (vma
->vm_start
<= mm
->brk
&& vma
->vm_end
>= mm
->start_brk
) {
1161 seq_printf(m
, " heap");
1163 pid_t tid
= vm_is_stack(proc_priv
->task
, vma
, is_pid
);
1166 * Thread stack in /proc/PID/task/TID/maps or
1167 * the main process stack.
1169 if (!is_pid
|| (vma
->vm_start
<= mm
->start_stack
&&
1170 vma
->vm_end
>= mm
->start_stack
))
1171 seq_printf(m
, " stack");
1173 seq_printf(m
, " stack:%d", tid
);
1177 if (is_vm_hugetlb_page(vma
))
1178 seq_printf(m
, " huge");
1180 walk_page_range(vma
->vm_start
, vma
->vm_end
, &walk
);
1186 seq_printf(m
, " anon=%lu", md
->anon
);
1189 seq_printf(m
, " dirty=%lu", md
->dirty
);
1191 if (md
->pages
!= md
->anon
&& md
->pages
!= md
->dirty
)
1192 seq_printf(m
, " mapped=%lu", md
->pages
);
1194 if (md
->mapcount_max
> 1)
1195 seq_printf(m
, " mapmax=%lu", md
->mapcount_max
);
1198 seq_printf(m
, " swapcache=%lu", md
->swapcache
);
1200 if (md
->active
< md
->pages
&& !is_vm_hugetlb_page(vma
))
1201 seq_printf(m
, " active=%lu", md
->active
);
1204 seq_printf(m
, " writeback=%lu", md
->writeback
);
1206 for_each_node_state(n
, N_HIGH_MEMORY
)
1208 seq_printf(m
, " N%d=%lu", n
, md
->node
[n
]);
1212 if (m
->count
< m
->size
)
1213 m
->version
= (vma
!= proc_priv
->tail_vma
) ? vma
->vm_start
: 0;
1217 static int show_pid_numa_map(struct seq_file
*m
, void *v
)
1219 return show_numa_map(m
, v
, 1);
1222 static int show_tid_numa_map(struct seq_file
*m
, void *v
)
1224 return show_numa_map(m
, v
, 0);
1227 static const struct seq_operations proc_pid_numa_maps_op
= {
1231 .show
= show_pid_numa_map
,
1234 static const struct seq_operations proc_tid_numa_maps_op
= {
1238 .show
= show_tid_numa_map
,
1241 static int numa_maps_open(struct inode
*inode
, struct file
*file
,
1242 const struct seq_operations
*ops
)
1244 struct numa_maps_private
*priv
;
1246 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1248 priv
->proc_maps
.pid
= proc_pid(inode
);
1249 ret
= seq_open(file
, ops
);
1251 struct seq_file
*m
= file
->private_data
;
1260 static int pid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1262 return numa_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
1265 static int tid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1267 return numa_maps_open(inode
, file
, &proc_tid_numa_maps_op
);
1270 const struct file_operations proc_pid_numa_maps_operations
= {
1271 .open
= pid_numa_maps_open
,
1273 .llseek
= seq_lseek
,
1274 .release
= seq_release_private
,
1277 const struct file_operations proc_tid_numa_maps_operations
= {
1278 .open
= tid_numa_maps_open
,
1280 .llseek
= seq_lseek
,
1281 .release
= seq_release_private
,
1283 #endif /* CONFIG_NUMA */