2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/slab.h>
8 #include <linux/pagemap.h>
9 #include <linux/mempolicy.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
14 #include <asm/uaccess.h>
15 #include <asm/tlbflush.h>
18 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
20 unsigned long data
, text
, lib
, swap
;
21 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
24 * Note: to minimize their overhead, mm maintains hiwater_vm and
25 * hiwater_rss only when about to *lower* total_vm or rss. Any
26 * collector of these hiwater stats must therefore get total_vm
27 * and rss too, which will usually be the higher. Barriers? not
28 * worth the effort, such snapshots can always be inconsistent.
30 hiwater_vm
= total_vm
= mm
->total_vm
;
31 if (hiwater_vm
< mm
->hiwater_vm
)
32 hiwater_vm
= mm
->hiwater_vm
;
33 hiwater_rss
= total_rss
= get_mm_rss(mm
);
34 if (hiwater_rss
< mm
->hiwater_rss
)
35 hiwater_rss
= mm
->hiwater_rss
;
37 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
38 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
39 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
40 swap
= get_mm_counter(mm
, MM_SWAPENTS
);
53 hiwater_vm
<< (PAGE_SHIFT
-10),
54 (total_vm
- mm
->reserved_vm
) << (PAGE_SHIFT
-10),
55 mm
->locked_vm
<< (PAGE_SHIFT
-10),
56 hiwater_rss
<< (PAGE_SHIFT
-10),
57 total_rss
<< (PAGE_SHIFT
-10),
58 data
<< (PAGE_SHIFT
-10),
59 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
60 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10,
61 swap
<< (PAGE_SHIFT
-10));
64 unsigned long task_vsize(struct mm_struct
*mm
)
66 return PAGE_SIZE
* mm
->total_vm
;
69 unsigned long task_statm(struct mm_struct
*mm
,
70 unsigned long *shared
, unsigned long *text
,
71 unsigned long *data
, unsigned long *resident
)
73 *shared
= get_mm_counter(mm
, MM_FILEPAGES
);
74 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
76 *data
= mm
->total_vm
- mm
->shared_vm
;
77 *resident
= *shared
+ get_mm_counter(mm
, MM_ANONPAGES
);
81 static void pad_len_spaces(struct seq_file
*m
, int len
)
83 len
= 25 + sizeof(void*) * 6 - len
;
86 seq_printf(m
, "%*c", len
, ' ');
89 static void vma_stop(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
91 if (vma
&& vma
!= priv
->tail_vma
) {
92 struct mm_struct
*mm
= vma
->vm_mm
;
93 up_read(&mm
->mmap_sem
);
98 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
100 struct proc_maps_private
*priv
= m
->private;
101 unsigned long last_addr
= m
->version
;
102 struct mm_struct
*mm
;
103 struct vm_area_struct
*vma
, *tail_vma
= NULL
;
106 /* Clear the per syscall fields in priv */
108 priv
->tail_vma
= NULL
;
111 * We remember last_addr rather than next_addr to hit with
112 * mmap_cache most of the time. We have zero last_addr at
113 * the beginning and also after lseek. We will have -1 last_addr
114 * after the end of the vmas.
117 if (last_addr
== -1UL)
120 priv
->task
= get_pid_task(priv
->pid
, PIDTYPE_PID
);
124 mm
= mm_for_maps(priv
->task
);
127 down_read(&mm
->mmap_sem
);
129 tail_vma
= get_gate_vma(priv
->task
);
130 priv
->tail_vma
= tail_vma
;
132 /* Start with last addr hint */
133 vma
= find_vma(mm
, last_addr
);
134 if (last_addr
&& vma
) {
140 * Check the vma index is within the range and do
141 * sequential scan until m_index.
144 if ((unsigned long)l
< mm
->map_count
) {
151 if (l
!= mm
->map_count
)
152 tail_vma
= NULL
; /* After gate vma */
158 /* End of vmas has been reached */
159 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
160 up_read(&mm
->mmap_sem
);
165 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
167 struct proc_maps_private
*priv
= m
->private;
168 struct vm_area_struct
*vma
= v
;
169 struct vm_area_struct
*tail_vma
= priv
->tail_vma
;
172 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
175 return (vma
!= tail_vma
)? tail_vma
: NULL
;
178 static void m_stop(struct seq_file
*m
, void *v
)
180 struct proc_maps_private
*priv
= m
->private;
181 struct vm_area_struct
*vma
= v
;
185 put_task_struct(priv
->task
);
188 static int do_maps_open(struct inode
*inode
, struct file
*file
,
189 const struct seq_operations
*ops
)
191 struct proc_maps_private
*priv
;
193 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
195 priv
->pid
= proc_pid(inode
);
196 ret
= seq_open(file
, ops
);
198 struct seq_file
*m
= file
->private_data
;
207 static void show_map_vma(struct seq_file
*m
, struct vm_area_struct
*vma
)
209 struct mm_struct
*mm
= vma
->vm_mm
;
210 struct file
*file
= vma
->vm_file
;
211 int flags
= vma
->vm_flags
;
212 unsigned long ino
= 0;
213 unsigned long long pgoff
= 0;
219 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
220 dev
= inode
->i_sb
->s_dev
;
222 pgoff
= ((loff_t
)vma
->vm_pgoff
) << PAGE_SHIFT
;
225 /* We don't show the stack guard page in /proc/maps */
226 start
= vma
->vm_start
;
227 if (vma
->vm_flags
& VM_GROWSDOWN
)
228 if (!vma_stack_continue(vma
->vm_prev
, vma
->vm_start
))
231 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
234 flags
& VM_READ
? 'r' : '-',
235 flags
& VM_WRITE
? 'w' : '-',
236 flags
& VM_EXEC
? 'x' : '-',
237 flags
& VM_MAYSHARE
? 's' : 'p',
239 MAJOR(dev
), MINOR(dev
), ino
, &len
);
242 * Print the dentry name for named mappings, and a
243 * special [heap] marker for the heap:
246 pad_len_spaces(m
, len
);
247 seq_path(m
, &file
->f_path
, "\n");
249 const char *name
= arch_vma_name(vma
);
252 if (vma
->vm_start
<= mm
->start_brk
&&
253 vma
->vm_end
>= mm
->brk
) {
255 } else if (vma
->vm_start
<= mm
->start_stack
&&
256 vma
->vm_end
>= mm
->start_stack
) {
264 pad_len_spaces(m
, len
);
271 static int show_map(struct seq_file
*m
, void *v
)
273 struct vm_area_struct
*vma
= v
;
274 struct proc_maps_private
*priv
= m
->private;
275 struct task_struct
*task
= priv
->task
;
277 show_map_vma(m
, vma
);
279 if (m
->count
< m
->size
) /* vma is copied successfully */
280 m
->version
= (vma
!= get_gate_vma(task
))? vma
->vm_start
: 0;
284 static const struct seq_operations proc_pid_maps_op
= {
291 static int maps_open(struct inode
*inode
, struct file
*file
)
293 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
296 const struct file_operations proc_maps_operations
= {
300 .release
= seq_release_private
,
304 * Proportional Set Size(PSS): my share of RSS.
306 * PSS of a process is the count of pages it has in memory, where each
307 * page is divided by the number of processes sharing it. So if a
308 * process has 1000 pages all to itself, and 1000 shared with one other
309 * process, its PSS will be 1500.
311 * To keep (accumulated) division errors low, we adopt a 64bit
312 * fixed-point pss counter to minimize division errors. So (pss >>
313 * PSS_SHIFT) would be the real byte count.
315 * A shift of 12 before division means (assuming 4K page size):
316 * - 1M 3-user-pages add up to 8KB errors;
317 * - supports mapcount up to 2^24, or 16M;
318 * - supports PSS up to 2^52 bytes, or 4PB.
322 #ifdef CONFIG_PROC_PAGE_MONITOR
323 struct mem_size_stats
{
324 struct vm_area_struct
*vma
;
325 unsigned long resident
;
326 unsigned long shared_clean
;
327 unsigned long shared_dirty
;
328 unsigned long private_clean
;
329 unsigned long private_dirty
;
330 unsigned long referenced
;
331 unsigned long anonymous
;
337 static void smaps_pte_entry(pte_t ptent
, unsigned long addr
,
338 struct mm_walk
*walk
)
340 struct mem_size_stats
*mss
= walk
->private;
341 struct vm_area_struct
*vma
= mss
->vma
;
345 if (is_swap_pte(ptent
)) {
346 mss
->swap
+= PAGE_SIZE
;
350 if (!pte_present(ptent
))
353 page
= vm_normal_page(vma
, addr
, ptent
);
358 mss
->anonymous
+= PAGE_SIZE
;
360 mss
->resident
+= PAGE_SIZE
;
361 /* Accumulate the size in pages that have been accessed. */
362 if (pte_young(ptent
) || PageReferenced(page
))
363 mss
->referenced
+= PAGE_SIZE
;
364 mapcount
= page_mapcount(page
);
366 if (pte_dirty(ptent
) || PageDirty(page
))
367 mss
->shared_dirty
+= PAGE_SIZE
;
369 mss
->shared_clean
+= PAGE_SIZE
;
370 mss
->pss
+= (PAGE_SIZE
<< PSS_SHIFT
) / mapcount
;
372 if (pte_dirty(ptent
) || PageDirty(page
))
373 mss
->private_dirty
+= PAGE_SIZE
;
375 mss
->private_clean
+= PAGE_SIZE
;
376 mss
->pss
+= (PAGE_SIZE
<< PSS_SHIFT
);
380 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
381 struct mm_walk
*walk
)
383 struct mem_size_stats
*mss
= walk
->private;
384 struct vm_area_struct
*vma
= mss
->vma
;
388 split_huge_page_pmd(walk
->mm
, pmd
);
390 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
391 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
392 smaps_pte_entry(*pte
, addr
, walk
);
393 pte_unmap_unlock(pte
- 1, ptl
);
398 static int show_smap(struct seq_file
*m
, void *v
)
400 struct proc_maps_private
*priv
= m
->private;
401 struct task_struct
*task
= priv
->task
;
402 struct vm_area_struct
*vma
= v
;
403 struct mem_size_stats mss
;
404 struct mm_walk smaps_walk
= {
405 .pmd_entry
= smaps_pte_range
,
410 memset(&mss
, 0, sizeof mss
);
412 /* mmap_sem is held in m_start */
413 if (vma
->vm_mm
&& !is_vm_hugetlb_page(vma
))
414 walk_page_range(vma
->vm_start
, vma
->vm_end
, &smaps_walk
);
416 show_map_vma(m
, vma
);
422 "Shared_Clean: %8lu kB\n"
423 "Shared_Dirty: %8lu kB\n"
424 "Private_Clean: %8lu kB\n"
425 "Private_Dirty: %8lu kB\n"
426 "Referenced: %8lu kB\n"
427 "Anonymous: %8lu kB\n"
429 "KernelPageSize: %8lu kB\n"
430 "MMUPageSize: %8lu kB\n"
432 (vma
->vm_end
- vma
->vm_start
) >> 10,
434 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)),
435 mss
.shared_clean
>> 10,
436 mss
.shared_dirty
>> 10,
437 mss
.private_clean
>> 10,
438 mss
.private_dirty
>> 10,
439 mss
.referenced
>> 10,
442 vma_kernel_pagesize(vma
) >> 10,
443 vma_mmu_pagesize(vma
) >> 10,
444 (vma
->vm_flags
& VM_LOCKED
) ?
445 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)) : 0);
447 if (m
->count
< m
->size
) /* vma is copied successfully */
448 m
->version
= (vma
!= get_gate_vma(task
)) ? vma
->vm_start
: 0;
452 static const struct seq_operations proc_pid_smaps_op
= {
459 static int smaps_open(struct inode
*inode
, struct file
*file
)
461 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
464 const struct file_operations proc_smaps_operations
= {
468 .release
= seq_release_private
,
471 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
472 unsigned long end
, struct mm_walk
*walk
)
474 struct vm_area_struct
*vma
= walk
->private;
479 split_huge_page_pmd(walk
->mm
, pmd
);
481 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
482 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
484 if (!pte_present(ptent
))
487 page
= vm_normal_page(vma
, addr
, ptent
);
491 /* Clear accessed and referenced bits. */
492 ptep_test_and_clear_young(vma
, addr
, pte
);
493 ClearPageReferenced(page
);
495 pte_unmap_unlock(pte
- 1, ptl
);
500 #define CLEAR_REFS_ALL 1
501 #define CLEAR_REFS_ANON 2
502 #define CLEAR_REFS_MAPPED 3
504 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
505 size_t count
, loff_t
*ppos
)
507 struct task_struct
*task
;
508 char buffer
[PROC_NUMBUF
];
509 struct mm_struct
*mm
;
510 struct vm_area_struct
*vma
;
513 memset(buffer
, 0, sizeof(buffer
));
514 if (count
> sizeof(buffer
) - 1)
515 count
= sizeof(buffer
) - 1;
516 if (copy_from_user(buffer
, buf
, count
))
518 if (strict_strtol(strstrip(buffer
), 10, &type
))
520 if (type
< CLEAR_REFS_ALL
|| type
> CLEAR_REFS_MAPPED
)
522 task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
525 mm
= get_task_mm(task
);
527 struct mm_walk clear_refs_walk
= {
528 .pmd_entry
= clear_refs_pte_range
,
531 down_read(&mm
->mmap_sem
);
532 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
533 clear_refs_walk
.private = vma
;
534 if (is_vm_hugetlb_page(vma
))
537 * Writing 1 to /proc/pid/clear_refs affects all pages.
539 * Writing 2 to /proc/pid/clear_refs only affects
542 * Writing 3 to /proc/pid/clear_refs only affects file
545 if (type
== CLEAR_REFS_ANON
&& vma
->vm_file
)
547 if (type
== CLEAR_REFS_MAPPED
&& !vma
->vm_file
)
549 walk_page_range(vma
->vm_start
, vma
->vm_end
,
553 up_read(&mm
->mmap_sem
);
556 put_task_struct(task
);
561 const struct file_operations proc_clear_refs_operations
= {
562 .write
= clear_refs_write
,
563 .llseek
= noop_llseek
,
571 #define PM_ENTRY_BYTES sizeof(u64)
572 #define PM_STATUS_BITS 3
573 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
574 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
575 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
576 #define PM_PSHIFT_BITS 6
577 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
578 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
579 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
580 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
581 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
583 #define PM_PRESENT PM_STATUS(4LL)
584 #define PM_SWAP PM_STATUS(2LL)
585 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
586 #define PM_END_OF_BUFFER 1
588 static int add_to_pagemap(unsigned long addr
, u64 pfn
,
589 struct pagemapread
*pm
)
591 pm
->buffer
[pm
->pos
++] = pfn
;
592 if (pm
->pos
>= pm
->len
)
593 return PM_END_OF_BUFFER
;
597 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
598 struct mm_walk
*walk
)
600 struct pagemapread
*pm
= walk
->private;
603 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
604 err
= add_to_pagemap(addr
, PM_NOT_PRESENT
, pm
);
611 static u64
swap_pte_to_pagemap_entry(pte_t pte
)
613 swp_entry_t e
= pte_to_swp_entry(pte
);
614 return swp_type(e
) | (swp_offset(e
) << MAX_SWAPFILES_SHIFT
);
617 static u64
pte_to_pagemap_entry(pte_t pte
)
620 if (is_swap_pte(pte
))
621 pme
= PM_PFRAME(swap_pte_to_pagemap_entry(pte
))
622 | PM_PSHIFT(PAGE_SHIFT
) | PM_SWAP
;
623 else if (pte_present(pte
))
624 pme
= PM_PFRAME(pte_pfn(pte
))
625 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
;
629 static int pagemap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
630 struct mm_walk
*walk
)
632 struct vm_area_struct
*vma
;
633 struct pagemapread
*pm
= walk
->private;
637 split_huge_page_pmd(walk
->mm
, pmd
);
639 /* find the first VMA at or above 'addr' */
640 vma
= find_vma(walk
->mm
, addr
);
641 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
642 u64 pfn
= PM_NOT_PRESENT
;
644 /* check to see if we've left 'vma' behind
645 * and need a new, higher one */
646 if (vma
&& (addr
>= vma
->vm_end
))
647 vma
= find_vma(walk
->mm
, addr
);
649 /* check that 'vma' actually covers this address,
650 * and that it isn't a huge page vma */
651 if (vma
&& (vma
->vm_start
<= addr
) &&
652 !is_vm_hugetlb_page(vma
)) {
653 pte
= pte_offset_map(pmd
, addr
);
654 pfn
= pte_to_pagemap_entry(*pte
);
655 /* unmap before userspace copy */
658 err
= add_to_pagemap(addr
, pfn
, pm
);
668 #ifdef CONFIG_HUGETLB_PAGE
669 static u64
huge_pte_to_pagemap_entry(pte_t pte
, int offset
)
672 if (pte_present(pte
))
673 pme
= PM_PFRAME(pte_pfn(pte
) + offset
)
674 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
;
678 /* This function walks within one hugetlb entry in the single call */
679 static int pagemap_hugetlb_range(pte_t
*pte
, unsigned long hmask
,
680 unsigned long addr
, unsigned long end
,
681 struct mm_walk
*walk
)
683 struct pagemapread
*pm
= walk
->private;
687 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
688 int offset
= (addr
& ~hmask
) >> PAGE_SHIFT
;
689 pfn
= huge_pte_to_pagemap_entry(*pte
, offset
);
690 err
= add_to_pagemap(addr
, pfn
, pm
);
699 #endif /* HUGETLB_PAGE */
702 * /proc/pid/pagemap - an array mapping virtual pages to pfns
704 * For each page in the address space, this file contains one 64-bit entry
705 * consisting of the following:
707 * Bits 0-55 page frame number (PFN) if present
708 * Bits 0-4 swap type if swapped
709 * Bits 5-55 swap offset if swapped
710 * Bits 55-60 page shift (page size = 1<<page shift)
711 * Bit 61 reserved for future use
712 * Bit 62 page swapped
713 * Bit 63 page present
715 * If the page is not present but in swap, then the PFN contains an
716 * encoding of the swap file number and the page's offset into the
717 * swap. Unmapped pages return a null PFN. This allows determining
718 * precisely which pages are mapped (or in swap) and comparing mapped
719 * pages between processes.
721 * Efficient users of this interface will use /proc/pid/maps to
722 * determine which areas of memory are actually mapped and llseek to
723 * skip over unmapped regions.
725 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
726 #define PAGEMAP_WALK_MASK (PMD_MASK)
727 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
728 size_t count
, loff_t
*ppos
)
730 struct task_struct
*task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
731 struct mm_struct
*mm
;
732 struct pagemapread pm
;
734 struct mm_walk pagemap_walk
= {};
737 unsigned long start_vaddr
;
738 unsigned long end_vaddr
;
745 if (!ptrace_may_access(task
, PTRACE_MODE_READ
))
749 /* file position must be aligned */
750 if ((*ppos
% PM_ENTRY_BYTES
) || (count
% PM_ENTRY_BYTES
))
758 mm
= get_task_mm(task
);
762 pm
.len
= PM_ENTRY_BYTES
* (PAGEMAP_WALK_SIZE
>> PAGE_SHIFT
);
763 pm
.buffer
= kmalloc(pm
.len
, GFP_TEMPORARY
);
768 pagemap_walk
.pmd_entry
= pagemap_pte_range
;
769 pagemap_walk
.pte_hole
= pagemap_pte_hole
;
770 #ifdef CONFIG_HUGETLB_PAGE
771 pagemap_walk
.hugetlb_entry
= pagemap_hugetlb_range
;
773 pagemap_walk
.mm
= mm
;
774 pagemap_walk
.private = &pm
;
777 svpfn
= src
/ PM_ENTRY_BYTES
;
778 start_vaddr
= svpfn
<< PAGE_SHIFT
;
779 end_vaddr
= TASK_SIZE_OF(task
);
781 /* watch out for wraparound */
782 if (svpfn
> TASK_SIZE_OF(task
) >> PAGE_SHIFT
)
783 start_vaddr
= end_vaddr
;
786 * The odds are that this will stop walking way
787 * before end_vaddr, because the length of the
788 * user buffer is tracked in "pm", and the walk
789 * will stop when we hit the end of the buffer.
792 while (count
&& (start_vaddr
< end_vaddr
)) {
797 end
= (start_vaddr
+ PAGEMAP_WALK_SIZE
) & PAGEMAP_WALK_MASK
;
799 if (end
< start_vaddr
|| end
> end_vaddr
)
801 down_read(&mm
->mmap_sem
);
802 ret
= walk_page_range(start_vaddr
, end
, &pagemap_walk
);
803 up_read(&mm
->mmap_sem
);
806 len
= min(count
, PM_ENTRY_BYTES
* pm
.pos
);
807 if (copy_to_user(buf
, pm
.buffer
, len
)) {
816 if (!ret
|| ret
== PM_END_OF_BUFFER
)
824 put_task_struct(task
);
829 const struct file_operations proc_pagemap_operations
= {
830 .llseek
= mem_lseek
, /* borrow this */
831 .read
= pagemap_read
,
833 #endif /* CONFIG_PROC_PAGE_MONITOR */
836 extern int show_numa_map(struct seq_file
*m
, void *v
);
838 static const struct seq_operations proc_pid_numa_maps_op
= {
842 .show
= show_numa_map
,
845 static int numa_maps_open(struct inode
*inode
, struct file
*file
)
847 return do_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
850 const struct file_operations proc_numa_maps_operations
= {
851 .open
= numa_maps_open
,
854 .release
= seq_release_private
,