]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/proc/task_mmu.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[mirror_ubuntu-artful-kernel.git] / fs / proc / task_mmu.c
CommitLineData
1da177e4 1#include <linux/mm.h>
615d6e87 2#include <linux/vmacache.h>
1da177e4 3#include <linux/hugetlb.h>
22e057c5 4#include <linux/huge_mm.h>
1da177e4
LT
5#include <linux/mount.h>
6#include <linux/seq_file.h>
e070ad49 7#include <linux/highmem.h>
5096add8 8#include <linux/ptrace.h>
5a0e3ad6 9#include <linux/slab.h>
6e21c8f1
CL
10#include <linux/pagemap.h>
11#include <linux/mempolicy.h>
22e057c5 12#include <linux/rmap.h>
85863e47 13#include <linux/swap.h>
6e84f315 14#include <linux/sched/mm.h>
85863e47 15#include <linux/swapops.h>
0f8975ec 16#include <linux/mmu_notifier.h>
33c3fc71 17#include <linux/page_idle.h>
6a15a370 18#include <linux/shmem_fs.h>
b3a81d08 19#include <linux/uaccess.h>
e070ad49 20
1da177e4 21#include <asm/elf.h>
b3a81d08 22#include <asm/tlb.h>
e070ad49 23#include <asm/tlbflush.h>
1da177e4
LT
24#include "internal.h"
25
df5f8314 26void task_mem(struct seq_file *m, struct mm_struct *mm)
1da177e4 27{
84638335 28 unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
365e9c87
HD
29 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
30
8cee852e
JM
31 anon = get_mm_counter(mm, MM_ANONPAGES);
32 file = get_mm_counter(mm, MM_FILEPAGES);
33 shmem = get_mm_counter(mm, MM_SHMEMPAGES);
34
365e9c87
HD
35 /*
36 * Note: to minimize their overhead, mm maintains hiwater_vm and
37 * hiwater_rss only when about to *lower* total_vm or rss. Any
38 * collector of these hiwater stats must therefore get total_vm
39 * and rss too, which will usually be the higher. Barriers? not
40 * worth the effort, such snapshots can always be inconsistent.
41 */
42 hiwater_vm = total_vm = mm->total_vm;
43 if (hiwater_vm < mm->hiwater_vm)
44 hiwater_vm = mm->hiwater_vm;
8cee852e 45 hiwater_rss = total_rss = anon + file + shmem;
365e9c87
HD
46 if (hiwater_rss < mm->hiwater_rss)
47 hiwater_rss = mm->hiwater_rss;
1da177e4 48
1da177e4
LT
49 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
50 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
b084d435 51 swap = get_mm_counter(mm, MM_SWAPENTS);
dc6c9a35
KS
52 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
53 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
df5f8314 54 seq_printf(m,
365e9c87 55 "VmPeak:\t%8lu kB\n"
1da177e4
LT
56 "VmSize:\t%8lu kB\n"
57 "VmLck:\t%8lu kB\n"
bc3e53f6 58 "VmPin:\t%8lu kB\n"
365e9c87 59 "VmHWM:\t%8lu kB\n"
1da177e4 60 "VmRSS:\t%8lu kB\n"
8cee852e
JM
61 "RssAnon:\t%8lu kB\n"
62 "RssFile:\t%8lu kB\n"
63 "RssShmem:\t%8lu kB\n"
1da177e4
LT
64 "VmData:\t%8lu kB\n"
65 "VmStk:\t%8lu kB\n"
66 "VmExe:\t%8lu kB\n"
67 "VmLib:\t%8lu kB\n"
b084d435 68 "VmPTE:\t%8lu kB\n"
dc6c9a35 69 "VmPMD:\t%8lu kB\n"
b084d435 70 "VmSwap:\t%8lu kB\n",
365e9c87 71 hiwater_vm << (PAGE_SHIFT-10),
314e51b9 72 total_vm << (PAGE_SHIFT-10),
1da177e4 73 mm->locked_vm << (PAGE_SHIFT-10),
bc3e53f6 74 mm->pinned_vm << (PAGE_SHIFT-10),
365e9c87
HD
75 hiwater_rss << (PAGE_SHIFT-10),
76 total_rss << (PAGE_SHIFT-10),
8cee852e
JM
77 anon << (PAGE_SHIFT-10),
78 file << (PAGE_SHIFT-10),
79 shmem << (PAGE_SHIFT-10),
84638335 80 mm->data_vm << (PAGE_SHIFT-10),
1da177e4 81 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
dc6c9a35
KS
82 ptes >> 10,
83 pmds >> 10,
b084d435 84 swap << (PAGE_SHIFT-10));
5d317b2b 85 hugetlb_report_usage(m, mm);
1da177e4
LT
86}
87
88unsigned long task_vsize(struct mm_struct *mm)
89{
90 return PAGE_SIZE * mm->total_vm;
91}
92
a2ade7b6
AD
93unsigned long task_statm(struct mm_struct *mm,
94 unsigned long *shared, unsigned long *text,
95 unsigned long *data, unsigned long *resident)
1da177e4 96{
eca56ff9
JM
97 *shared = get_mm_counter(mm, MM_FILEPAGES) +
98 get_mm_counter(mm, MM_SHMEMPAGES);
1da177e4
LT
99 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
100 >> PAGE_SHIFT;
84638335 101 *data = mm->data_vm + mm->stack_vm;
d559db08 102 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
1da177e4
LT
103 return mm->total_vm;
104}
105
9e781440
KH
106#ifdef CONFIG_NUMA
107/*
498f2371 108 * Save get_task_policy() for show_numa_map().
9e781440
KH
109 */
110static void hold_task_mempolicy(struct proc_maps_private *priv)
111{
112 struct task_struct *task = priv->task;
113
114 task_lock(task);
498f2371 115 priv->task_mempolicy = get_task_policy(task);
9e781440
KH
116 mpol_get(priv->task_mempolicy);
117 task_unlock(task);
118}
119static void release_task_mempolicy(struct proc_maps_private *priv)
120{
121 mpol_put(priv->task_mempolicy);
122}
123#else
124static void hold_task_mempolicy(struct proc_maps_private *priv)
125{
126}
127static void release_task_mempolicy(struct proc_maps_private *priv)
128{
129}
130#endif
131
59b4bf12 132static void vma_stop(struct proc_maps_private *priv)
a6198797 133{
59b4bf12
ON
134 struct mm_struct *mm = priv->mm;
135
136 release_task_mempolicy(priv);
137 up_read(&mm->mmap_sem);
138 mmput(mm);
a6198797 139}
ec4dd3eb 140
ad2a00e4
ON
141static struct vm_area_struct *
142m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
143{
144 if (vma == priv->tail_vma)
145 return NULL;
146 return vma->vm_next ?: priv->tail_vma;
147}
148
b8c20a9b
ON
149static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
150{
151 if (m->count < m->size) /* vma is copied successfully */
855af072 152 m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
b8c20a9b
ON
153}
154
0c255321 155static void *m_start(struct seq_file *m, loff_t *ppos)
e070ad49 156{
a6198797 157 struct proc_maps_private *priv = m->private;
b8c20a9b 158 unsigned long last_addr = m->version;
a6198797 159 struct mm_struct *mm;
0c255321
ON
160 struct vm_area_struct *vma;
161 unsigned int pos = *ppos;
a6198797 162
b8c20a9b
ON
163 /* See m_cache_vma(). Zero at the start or after lseek. */
164 if (last_addr == -1UL)
165 return NULL;
166
2c03376d 167 priv->task = get_proc_task(priv->inode);
a6198797 168 if (!priv->task)
ec6fd8a4 169 return ERR_PTR(-ESRCH);
a6198797 170
29a40ace 171 mm = priv->mm;
388f7934 172 if (!mm || !mmget_not_zero(mm))
29a40ace 173 return NULL;
a6198797 174
0c255321 175 down_read(&mm->mmap_sem);
9e781440 176 hold_task_mempolicy(priv);
0c255321 177 priv->tail_vma = get_gate_vma(mm);
a6198797 178
b8c20a9b 179 if (last_addr) {
855af072
RH
180 vma = find_vma(mm, last_addr - 1);
181 if (vma && vma->vm_start <= last_addr)
182 vma = m_next_vma(priv, vma);
183 if (vma)
b8c20a9b
ON
184 return vma;
185 }
186
187 m->version = 0;
0c255321 188 if (pos < mm->map_count) {
557c2d8a
ON
189 for (vma = mm->mmap; pos; pos--) {
190 m->version = vma->vm_start;
a6198797 191 vma = vma->vm_next;
557c2d8a 192 }
a6198797 193 return vma;
0c255321 194 }
a6198797 195
557c2d8a 196 /* we do not bother to update m->version in this case */
0c255321
ON
197 if (pos == mm->map_count && priv->tail_vma)
198 return priv->tail_vma;
59b4bf12
ON
199
200 vma_stop(priv);
201 return NULL;
a6198797
MM
202}
203
204static void *m_next(struct seq_file *m, void *v, loff_t *pos)
205{
206 struct proc_maps_private *priv = m->private;
ad2a00e4 207 struct vm_area_struct *next;
a6198797
MM
208
209 (*pos)++;
ad2a00e4 210 next = m_next_vma(priv, v);
59b4bf12
ON
211 if (!next)
212 vma_stop(priv);
213 return next;
a6198797
MM
214}
215
216static void m_stop(struct seq_file *m, void *v)
217{
218 struct proc_maps_private *priv = m->private;
a6198797 219
59b4bf12
ON
220 if (!IS_ERR_OR_NULL(v))
221 vma_stop(priv);
0d5f5f45 222 if (priv->task) {
a6198797 223 put_task_struct(priv->task);
0d5f5f45
ON
224 priv->task = NULL;
225 }
a6198797
MM
226}
227
4db7d0ee
ON
228static int proc_maps_open(struct inode *inode, struct file *file,
229 const struct seq_operations *ops, int psize)
230{
231 struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
232
233 if (!priv)
234 return -ENOMEM;
235
2c03376d 236 priv->inode = inode;
29a40ace
ON
237 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
238 if (IS_ERR(priv->mm)) {
239 int err = PTR_ERR(priv->mm);
240
241 seq_release_private(inode, file);
242 return err;
243 }
244
4db7d0ee
ON
245 return 0;
246}
247
29a40ace
ON
248static int proc_map_release(struct inode *inode, struct file *file)
249{
250 struct seq_file *seq = file->private_data;
251 struct proc_maps_private *priv = seq->private;
252
253 if (priv->mm)
254 mmdrop(priv->mm);
255
256 return seq_release_private(inode, file);
257}
258
a6198797 259static int do_maps_open(struct inode *inode, struct file *file,
03a44825 260 const struct seq_operations *ops)
a6198797 261{
4db7d0ee
ON
262 return proc_maps_open(inode, file, ops,
263 sizeof(struct proc_maps_private));
a6198797 264}
e070ad49 265
65376df5
JW
266/*
267 * Indicate if the VMA is a stack for the given task; for
268 * /proc/PID/maps that is the stack of the main task.
269 */
270static int is_stack(struct proc_maps_private *priv,
b18cb64e 271 struct vm_area_struct *vma)
58cb6548 272{
b18cb64e
AL
273 /*
274 * We make no effort to guess what a given thread considers to be
275 * its "stack". It's not even well-defined for programs written
276 * languages like Go.
277 */
278 return vma->vm_start <= vma->vm_mm->start_stack &&
279 vma->vm_end >= vma->vm_mm->start_stack;
58cb6548
ON
280}
281
b7643757
SP
282static void
283show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
1da177e4 284{
e070ad49
ML
285 struct mm_struct *mm = vma->vm_mm;
286 struct file *file = vma->vm_file;
b7643757 287 struct proc_maps_private *priv = m->private;
ca16d140 288 vm_flags_t flags = vma->vm_flags;
1da177e4 289 unsigned long ino = 0;
6260a4b0 290 unsigned long long pgoff = 0;
a09a79f6 291 unsigned long start, end;
1da177e4 292 dev_t dev = 0;
b7643757 293 const char *name = NULL;
1da177e4
LT
294
295 if (file) {
496ad9aa 296 struct inode *inode = file_inode(vma->vm_file);
1da177e4
LT
297 dev = inode->i_sb->s_dev;
298 ino = inode->i_ino;
6260a4b0 299 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
1da177e4
LT
300 }
301
d7824370 302 start = vma->vm_start;
a09a79f6 303 end = vma->vm_end;
d7824370 304
652586df
TH
305 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
306 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
d7824370 307 start,
a09a79f6 308 end,
1da177e4
LT
309 flags & VM_READ ? 'r' : '-',
310 flags & VM_WRITE ? 'w' : '-',
311 flags & VM_EXEC ? 'x' : '-',
312 flags & VM_MAYSHARE ? 's' : 'p',
6260a4b0 313 pgoff,
652586df 314 MAJOR(dev), MINOR(dev), ino);
1da177e4
LT
315
316 /*
317 * Print the dentry name for named mappings, and a
318 * special [heap] marker for the heap:
319 */
e070ad49 320 if (file) {
652586df 321 seq_pad(m, ' ');
2726d566 322 seq_file_path(m, file, "\n");
b7643757
SP
323 goto done;
324 }
325
78d683e8
AL
326 if (vma->vm_ops && vma->vm_ops->name) {
327 name = vma->vm_ops->name(vma);
328 if (name)
329 goto done;
330 }
331
b7643757
SP
332 name = arch_vma_name(vma);
333 if (!name) {
b7643757
SP
334 if (!mm) {
335 name = "[vdso]";
336 goto done;
337 }
338
339 if (vma->vm_start <= mm->brk &&
340 vma->vm_end >= mm->start_brk) {
341 name = "[heap]";
342 goto done;
343 }
344
b18cb64e 345 if (is_stack(priv, vma))
65376df5 346 name = "[stack]";
b7643757
SP
347 }
348
349done:
350 if (name) {
652586df 351 seq_pad(m, ' ');
b7643757 352 seq_puts(m, name);
1da177e4
LT
353 }
354 seq_putc(m, '\n');
7c88db0c
JK
355}
356
b7643757 357static int show_map(struct seq_file *m, void *v, int is_pid)
7c88db0c 358{
ebb6cdde 359 show_map_vma(m, v, is_pid);
b8c20a9b 360 m_cache_vma(m, v);
1da177e4
LT
361 return 0;
362}
363
b7643757
SP
364static int show_pid_map(struct seq_file *m, void *v)
365{
366 return show_map(m, v, 1);
367}
368
369static int show_tid_map(struct seq_file *m, void *v)
370{
371 return show_map(m, v, 0);
372}
373
03a44825 374static const struct seq_operations proc_pid_maps_op = {
a6198797
MM
375 .start = m_start,
376 .next = m_next,
377 .stop = m_stop,
b7643757
SP
378 .show = show_pid_map
379};
380
381static const struct seq_operations proc_tid_maps_op = {
382 .start = m_start,
383 .next = m_next,
384 .stop = m_stop,
385 .show = show_tid_map
a6198797
MM
386};
387
b7643757 388static int pid_maps_open(struct inode *inode, struct file *file)
a6198797
MM
389{
390 return do_maps_open(inode, file, &proc_pid_maps_op);
391}
392
b7643757
SP
393static int tid_maps_open(struct inode *inode, struct file *file)
394{
395 return do_maps_open(inode, file, &proc_tid_maps_op);
396}
397
398const struct file_operations proc_pid_maps_operations = {
399 .open = pid_maps_open,
400 .read = seq_read,
401 .llseek = seq_lseek,
29a40ace 402 .release = proc_map_release,
b7643757
SP
403};
404
405const struct file_operations proc_tid_maps_operations = {
406 .open = tid_maps_open,
a6198797
MM
407 .read = seq_read,
408 .llseek = seq_lseek,
29a40ace 409 .release = proc_map_release,
a6198797
MM
410};
411
412/*
413 * Proportional Set Size(PSS): my share of RSS.
414 *
415 * PSS of a process is the count of pages it has in memory, where each
416 * page is divided by the number of processes sharing it. So if a
417 * process has 1000 pages all to itself, and 1000 shared with one other
418 * process, its PSS will be 1500.
419 *
420 * To keep (accumulated) division errors low, we adopt a 64bit
421 * fixed-point pss counter to minimize division errors. So (pss >>
422 * PSS_SHIFT) would be the real byte count.
423 *
424 * A shift of 12 before division means (assuming 4K page size):
425 * - 1M 3-user-pages add up to 8KB errors;
426 * - supports mapcount up to 2^24, or 16M;
427 * - supports PSS up to 2^52 bytes, or 4PB.
428 */
429#define PSS_SHIFT 12
430
1e883281 431#ifdef CONFIG_PROC_PAGE_MONITOR
214e471f 432struct mem_size_stats {
a6198797
MM
433 unsigned long resident;
434 unsigned long shared_clean;
435 unsigned long shared_dirty;
436 unsigned long private_clean;
437 unsigned long private_dirty;
438 unsigned long referenced;
b40d4f84 439 unsigned long anonymous;
cf8496ea 440 unsigned long lazyfree;
4031a219 441 unsigned long anonymous_thp;
65c45377 442 unsigned long shmem_thp;
214e471f 443 unsigned long swap;
25ee01a2
NH
444 unsigned long shared_hugetlb;
445 unsigned long private_hugetlb;
a6198797 446 u64 pss;
8334b962 447 u64 swap_pss;
c261e7d9 448 bool check_shmem_swap;
a6198797
MM
449};
450
c164e038 451static void smaps_account(struct mem_size_stats *mss, struct page *page,
afd9883f 452 bool compound, bool young, bool dirty)
c164e038 453{
f4be6153 454 int i, nr = compound ? 1 << compound_order(page) : 1;
afd9883f 455 unsigned long size = nr * PAGE_SIZE;
c164e038 456
cf8496ea 457 if (PageAnon(page)) {
c164e038 458 mss->anonymous += size;
cf8496ea
SL
459 if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
460 mss->lazyfree += size;
461 }
c164e038
KS
462
463 mss->resident += size;
464 /* Accumulate the size in pages that have been accessed. */
33c3fc71 465 if (young || page_is_young(page) || PageReferenced(page))
c164e038 466 mss->referenced += size;
c164e038 467
afd9883f
KS
468 /*
469 * page_count(page) == 1 guarantees the page is mapped exactly once.
470 * If any subpage of the compound page mapped with PTE it would elevate
471 * page_count().
472 */
473 if (page_count(page) == 1) {
c164e038
KS
474 if (dirty || PageDirty(page))
475 mss->private_dirty += size;
476 else
477 mss->private_clean += size;
478 mss->pss += (u64)size << PSS_SHIFT;
afd9883f
KS
479 return;
480 }
481
482 for (i = 0; i < nr; i++, page++) {
483 int mapcount = page_mapcount(page);
484
485 if (mapcount >= 2) {
486 if (dirty || PageDirty(page))
487 mss->shared_dirty += PAGE_SIZE;
488 else
489 mss->shared_clean += PAGE_SIZE;
490 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
491 } else {
492 if (dirty || PageDirty(page))
493 mss->private_dirty += PAGE_SIZE;
494 else
495 mss->private_clean += PAGE_SIZE;
496 mss->pss += PAGE_SIZE << PSS_SHIFT;
497 }
c164e038
KS
498 }
499}
ae11c4d9 500
c261e7d9 501#ifdef CONFIG_SHMEM
c261e7d9
VB
502static int smaps_pte_hole(unsigned long addr, unsigned long end,
503 struct mm_walk *walk)
504{
505 struct mem_size_stats *mss = walk->private;
506
48131e03
VB
507 mss->swap += shmem_partial_swap_usage(
508 walk->vma->vm_file->f_mapping, addr, end);
c261e7d9
VB
509
510 return 0;
511}
c261e7d9
VB
512#endif
513
c164e038
KS
514static void smaps_pte_entry(pte_t *pte, unsigned long addr,
515 struct mm_walk *walk)
ae11c4d9
DH
516{
517 struct mem_size_stats *mss = walk->private;
14eb6fdd 518 struct vm_area_struct *vma = walk->vma;
b1d4d9e0 519 struct page *page = NULL;
ae11c4d9 520
c164e038
KS
521 if (pte_present(*pte)) {
522 page = vm_normal_page(vma, addr, *pte);
523 } else if (is_swap_pte(*pte)) {
524 swp_entry_t swpent = pte_to_swp_entry(*pte);
ae11c4d9 525
8334b962
MK
526 if (!non_swap_entry(swpent)) {
527 int mapcount;
528
c164e038 529 mss->swap += PAGE_SIZE;
8334b962
MK
530 mapcount = swp_swapcount(swpent);
531 if (mapcount >= 2) {
532 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
533
534 do_div(pss_delta, mapcount);
535 mss->swap_pss += pss_delta;
536 } else {
537 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
538 }
539 } else if (is_migration_entry(swpent))
b1d4d9e0 540 page = migration_entry_to_page(swpent);
c261e7d9
VB
541 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
542 && pte_none(*pte))) {
48131e03
VB
543 page = find_get_entry(vma->vm_file->f_mapping,
544 linear_page_index(vma, addr));
545 if (!page)
546 return;
547
548 if (radix_tree_exceptional_entry(page))
549 mss->swap += PAGE_SIZE;
550 else
09cbfeaf 551 put_page(page);
48131e03
VB
552
553 return;
b1d4d9e0 554 }
ae11c4d9 555
ae11c4d9
DH
556 if (!page)
557 return;
afd9883f
KS
558
559 smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
ae11c4d9
DH
560}
561
c164e038
KS
562#ifdef CONFIG_TRANSPARENT_HUGEPAGE
563static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
564 struct mm_walk *walk)
565{
566 struct mem_size_stats *mss = walk->private;
14eb6fdd 567 struct vm_area_struct *vma = walk->vma;
c164e038
KS
568 struct page *page;
569
570 /* FOLL_DUMP will return -EFAULT on huge zero page */
571 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
572 if (IS_ERR_OR_NULL(page))
573 return;
65c45377
KS
574 if (PageAnon(page))
575 mss->anonymous_thp += HPAGE_PMD_SIZE;
576 else if (PageSwapBacked(page))
577 mss->shmem_thp += HPAGE_PMD_SIZE;
ca120cf6
DW
578 else if (is_zone_device_page(page))
579 /* pass */;
65c45377
KS
580 else
581 VM_BUG_ON_PAGE(1, page);
afd9883f 582 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
c164e038
KS
583}
584#else
585static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
586 struct mm_walk *walk)
587{
588}
589#endif
590
b3ae5acb 591static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009b 592 struct mm_walk *walk)
e070ad49 593{
14eb6fdd 594 struct vm_area_struct *vma = walk->vma;
ae11c4d9 595 pte_t *pte;
705e87c0 596 spinlock_t *ptl;
e070ad49 597
b6ec57f4
KS
598 ptl = pmd_trans_huge_lock(pmd, vma);
599 if (ptl) {
c164e038 600 smaps_pmd_entry(pmd, addr, walk);
bf929152 601 spin_unlock(ptl);
025c5b24 602 return 0;
22e057c5 603 }
1a5a9906
AA
604
605 if (pmd_trans_unstable(pmd))
606 return 0;
22e057c5
DH
607 /*
608 * The mmap_sem held all the way back in m_start() is what
609 * keeps khugepaged out of here and from collapsing things
610 * in here.
611 */
705e87c0 612 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
ae11c4d9 613 for (; addr != end; pte++, addr += PAGE_SIZE)
c164e038 614 smaps_pte_entry(pte, addr, walk);
705e87c0
HD
615 pte_unmap_unlock(pte - 1, ptl);
616 cond_resched();
b3ae5acb 617 return 0;
e070ad49
ML
618}
619
834f82e2
CG
620static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
621{
622 /*
623 * Don't forget to update Documentation/ on changes.
624 */
625 static const char mnemonics[BITS_PER_LONG][2] = {
626 /*
627 * In case if we meet a flag we don't know about.
628 */
629 [0 ... (BITS_PER_LONG-1)] = "??",
630
631 [ilog2(VM_READ)] = "rd",
632 [ilog2(VM_WRITE)] = "wr",
633 [ilog2(VM_EXEC)] = "ex",
634 [ilog2(VM_SHARED)] = "sh",
635 [ilog2(VM_MAYREAD)] = "mr",
636 [ilog2(VM_MAYWRITE)] = "mw",
637 [ilog2(VM_MAYEXEC)] = "me",
638 [ilog2(VM_MAYSHARE)] = "ms",
639 [ilog2(VM_GROWSDOWN)] = "gd",
640 [ilog2(VM_PFNMAP)] = "pf",
641 [ilog2(VM_DENYWRITE)] = "dw",
4aae7e43
QR
642#ifdef CONFIG_X86_INTEL_MPX
643 [ilog2(VM_MPX)] = "mp",
644#endif
834f82e2
CG
645 [ilog2(VM_LOCKED)] = "lo",
646 [ilog2(VM_IO)] = "io",
647 [ilog2(VM_SEQ_READ)] = "sr",
648 [ilog2(VM_RAND_READ)] = "rr",
649 [ilog2(VM_DONTCOPY)] = "dc",
650 [ilog2(VM_DONTEXPAND)] = "de",
651 [ilog2(VM_ACCOUNT)] = "ac",
652 [ilog2(VM_NORESERVE)] = "nr",
653 [ilog2(VM_HUGETLB)] = "ht",
834f82e2
CG
654 [ilog2(VM_ARCH_1)] = "ar",
655 [ilog2(VM_DONTDUMP)] = "dd",
ec8e41ae
NH
656#ifdef CONFIG_MEM_SOFT_DIRTY
657 [ilog2(VM_SOFTDIRTY)] = "sd",
658#endif
834f82e2
CG
659 [ilog2(VM_MIXEDMAP)] = "mm",
660 [ilog2(VM_HUGEPAGE)] = "hg",
661 [ilog2(VM_NOHUGEPAGE)] = "nh",
662 [ilog2(VM_MERGEABLE)] = "mg",
16ba6f81
AA
663 [ilog2(VM_UFFD_MISSING)]= "um",
664 [ilog2(VM_UFFD_WP)] = "uw",
c1192f84
DH
665#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
666 /* These come out via ProtectionKey: */
667 [ilog2(VM_PKEY_BIT0)] = "",
668 [ilog2(VM_PKEY_BIT1)] = "",
669 [ilog2(VM_PKEY_BIT2)] = "",
670 [ilog2(VM_PKEY_BIT3)] = "",
671#endif
834f82e2
CG
672 };
673 size_t i;
674
675 seq_puts(m, "VmFlags: ");
676 for (i = 0; i < BITS_PER_LONG; i++) {
c1192f84
DH
677 if (!mnemonics[i][0])
678 continue;
834f82e2
CG
679 if (vma->vm_flags & (1UL << i)) {
680 seq_printf(m, "%c%c ",
681 mnemonics[i][0], mnemonics[i][1]);
682 }
683 }
684 seq_putc(m, '\n');
685}
686
25ee01a2
NH
687#ifdef CONFIG_HUGETLB_PAGE
688static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
689 unsigned long addr, unsigned long end,
690 struct mm_walk *walk)
691{
692 struct mem_size_stats *mss = walk->private;
693 struct vm_area_struct *vma = walk->vma;
694 struct page *page = NULL;
695
696 if (pte_present(*pte)) {
697 page = vm_normal_page(vma, addr, *pte);
698 } else if (is_swap_pte(*pte)) {
699 swp_entry_t swpent = pte_to_swp_entry(*pte);
700
701 if (is_migration_entry(swpent))
702 page = migration_entry_to_page(swpent);
703 }
704 if (page) {
705 int mapcount = page_mapcount(page);
706
707 if (mapcount >= 2)
708 mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
709 else
710 mss->private_hugetlb += huge_page_size(hstate_vma(vma));
711 }
712 return 0;
713}
714#endif /* HUGETLB_PAGE */
715
c1192f84
DH
716void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
717{
718}
719
b7643757 720static int show_smap(struct seq_file *m, void *v, int is_pid)
e070ad49
ML
721{
722 struct vm_area_struct *vma = v;
e070ad49 723 struct mem_size_stats mss;
2165009b
DH
724 struct mm_walk smaps_walk = {
725 .pmd_entry = smaps_pte_range,
25ee01a2
NH
726#ifdef CONFIG_HUGETLB_PAGE
727 .hugetlb_entry = smaps_hugetlb_range,
728#endif
2165009b
DH
729 .mm = vma->vm_mm,
730 .private = &mss,
731 };
e070ad49
ML
732
733 memset(&mss, 0, sizeof mss);
c261e7d9
VB
734
735#ifdef CONFIG_SHMEM
736 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
6a15a370
VB
737 /*
738 * For shared or readonly shmem mappings we know that all
739 * swapped out pages belong to the shmem object, and we can
740 * obtain the swap value much more efficiently. For private
741 * writable mappings, we might have COW pages that are
742 * not affected by the parent swapped out pages of the shmem
743 * object, so we have to distinguish them during the page walk.
744 * Unless we know that the shmem object (or the part mapped by
745 * our VMA) has no swapped out pages at all.
746 */
747 unsigned long shmem_swapped = shmem_swap_usage(vma);
748
749 if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
750 !(vma->vm_flags & VM_WRITE)) {
751 mss.swap = shmem_swapped;
752 } else {
753 mss.check_shmem_swap = true;
754 smaps_walk.pte_hole = smaps_pte_hole;
755 }
c261e7d9
VB
756 }
757#endif
758
d82ef020 759 /* mmap_sem is held in m_start */
14eb6fdd 760 walk_page_vma(vma, &smaps_walk);
4752c369 761
b7643757 762 show_map_vma(m, vma, is_pid);
4752c369
MM
763
764 seq_printf(m,
765 "Size: %8lu kB\n"
766 "Rss: %8lu kB\n"
767 "Pss: %8lu kB\n"
768 "Shared_Clean: %8lu kB\n"
769 "Shared_Dirty: %8lu kB\n"
770 "Private_Clean: %8lu kB\n"
771 "Private_Dirty: %8lu kB\n"
214e471f 772 "Referenced: %8lu kB\n"
b40d4f84 773 "Anonymous: %8lu kB\n"
cf8496ea 774 "LazyFree: %8lu kB\n"
4031a219 775 "AnonHugePages: %8lu kB\n"
65c45377 776 "ShmemPmdMapped: %8lu kB\n"
25ee01a2
NH
777 "Shared_Hugetlb: %8lu kB\n"
778 "Private_Hugetlb: %7lu kB\n"
08fba699 779 "Swap: %8lu kB\n"
8334b962 780 "SwapPss: %8lu kB\n"
3340289d 781 "KernelPageSize: %8lu kB\n"
2d90508f
NK
782 "MMUPageSize: %8lu kB\n"
783 "Locked: %8lu kB\n",
4752c369
MM
784 (vma->vm_end - vma->vm_start) >> 10,
785 mss.resident >> 10,
786 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
787 mss.shared_clean >> 10,
788 mss.shared_dirty >> 10,
789 mss.private_clean >> 10,
790 mss.private_dirty >> 10,
214e471f 791 mss.referenced >> 10,
b40d4f84 792 mss.anonymous >> 10,
cf8496ea 793 mss.lazyfree >> 10,
4031a219 794 mss.anonymous_thp >> 10,
65c45377 795 mss.shmem_thp >> 10,
25ee01a2
NH
796 mss.shared_hugetlb >> 10,
797 mss.private_hugetlb >> 10,
08fba699 798 mss.swap >> 10,
8334b962 799 (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
3340289d 800 vma_kernel_pagesize(vma) >> 10,
2d90508f
NK
801 vma_mmu_pagesize(vma) >> 10,
802 (vma->vm_flags & VM_LOCKED) ?
803 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
4752c369 804
c1192f84 805 arch_show_smap(m, vma);
834f82e2 806 show_smap_vma_flags(m, vma);
b8c20a9b 807 m_cache_vma(m, vma);
7c88db0c 808 return 0;
e070ad49
ML
809}
810
b7643757
SP
811static int show_pid_smap(struct seq_file *m, void *v)
812{
813 return show_smap(m, v, 1);
814}
815
816static int show_tid_smap(struct seq_file *m, void *v)
817{
818 return show_smap(m, v, 0);
819}
820
03a44825 821static const struct seq_operations proc_pid_smaps_op = {
a6198797
MM
822 .start = m_start,
823 .next = m_next,
824 .stop = m_stop,
b7643757
SP
825 .show = show_pid_smap
826};
827
828static const struct seq_operations proc_tid_smaps_op = {
829 .start = m_start,
830 .next = m_next,
831 .stop = m_stop,
832 .show = show_tid_smap
a6198797
MM
833};
834
b7643757 835static int pid_smaps_open(struct inode *inode, struct file *file)
a6198797
MM
836{
837 return do_maps_open(inode, file, &proc_pid_smaps_op);
838}
839
b7643757
SP
840static int tid_smaps_open(struct inode *inode, struct file *file)
841{
842 return do_maps_open(inode, file, &proc_tid_smaps_op);
843}
844
845const struct file_operations proc_pid_smaps_operations = {
846 .open = pid_smaps_open,
847 .read = seq_read,
848 .llseek = seq_lseek,
29a40ace 849 .release = proc_map_release,
b7643757
SP
850};
851
852const struct file_operations proc_tid_smaps_operations = {
853 .open = tid_smaps_open,
a6198797
MM
854 .read = seq_read,
855 .llseek = seq_lseek,
29a40ace 856 .release = proc_map_release,
a6198797
MM
857};
858
040fa020
PE
859enum clear_refs_types {
860 CLEAR_REFS_ALL = 1,
861 CLEAR_REFS_ANON,
862 CLEAR_REFS_MAPPED,
0f8975ec 863 CLEAR_REFS_SOFT_DIRTY,
695f0559 864 CLEAR_REFS_MM_HIWATER_RSS,
040fa020
PE
865 CLEAR_REFS_LAST,
866};
867
af9de7eb 868struct clear_refs_private {
0f8975ec 869 enum clear_refs_types type;
af9de7eb
PE
870};
871
7d5b3bfa 872#ifdef CONFIG_MEM_SOFT_DIRTY
0f8975ec
PE
873static inline void clear_soft_dirty(struct vm_area_struct *vma,
874 unsigned long addr, pte_t *pte)
875{
0f8975ec
PE
876 /*
877 * The soft-dirty tracker uses #PF-s to catch writes
878 * to pages, so write-protect the pte as well. See the
879 * Documentation/vm/soft-dirty.txt for full description
880 * of how soft-dirty works.
881 */
882 pte_t ptent = *pte;
179ef71c
CG
883
884 if (pte_present(ptent)) {
326c2597 885 ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
179ef71c 886 ptent = pte_wrprotect(ptent);
a7b76174 887 ptent = pte_clear_soft_dirty(ptent);
326c2597 888 ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
179ef71c
CG
889 } else if (is_swap_pte(ptent)) {
890 ptent = pte_swp_clear_soft_dirty(ptent);
326c2597 891 set_pte_at(vma->vm_mm, addr, pte, ptent);
179ef71c 892 }
0f8975ec 893}
5d3875a0
LD
894#else
895static inline void clear_soft_dirty(struct vm_area_struct *vma,
896 unsigned long addr, pte_t *pte)
897{
898}
899#endif
0f8975ec 900
5d3875a0 901#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
7d5b3bfa
KS
902static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
903 unsigned long addr, pmd_t *pmdp)
904{
5b7abeae
KS
905 pmd_t pmd = *pmdp;
906
907 /* See comment in change_huge_pmd() */
908 pmdp_invalidate(vma, addr, pmdp);
909 if (pmd_dirty(*pmdp))
910 pmd = pmd_mkdirty(pmd);
911 if (pmd_young(*pmdp))
912 pmd = pmd_mkyoung(pmd);
7d5b3bfa
KS
913
914 pmd = pmd_wrprotect(pmd);
a7b76174 915 pmd = pmd_clear_soft_dirty(pmd);
7d5b3bfa 916
7d5b3bfa
KS
917 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
918}
7d5b3bfa 919#else
7d5b3bfa
KS
920static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
921 unsigned long addr, pmd_t *pmdp)
922{
923}
924#endif
925
a6198797 926static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
2165009b 927 unsigned long end, struct mm_walk *walk)
a6198797 928{
af9de7eb 929 struct clear_refs_private *cp = walk->private;
5c64f52a 930 struct vm_area_struct *vma = walk->vma;
a6198797
MM
931 pte_t *pte, ptent;
932 spinlock_t *ptl;
933 struct page *page;
934
b6ec57f4
KS
935 ptl = pmd_trans_huge_lock(pmd, vma);
936 if (ptl) {
7d5b3bfa
KS
937 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
938 clear_soft_dirty_pmd(vma, addr, pmd);
939 goto out;
940 }
941
942 page = pmd_page(*pmd);
943
944 /* Clear accessed and referenced bits. */
945 pmdp_test_and_clear_young(vma, addr, pmd);
33c3fc71 946 test_and_clear_page_young(page);
7d5b3bfa
KS
947 ClearPageReferenced(page);
948out:
949 spin_unlock(ptl);
950 return 0;
951 }
952
1a5a9906
AA
953 if (pmd_trans_unstable(pmd))
954 return 0;
03319327 955
a6198797
MM
956 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
957 for (; addr != end; pte++, addr += PAGE_SIZE) {
958 ptent = *pte;
a6198797 959
0f8975ec
PE
960 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
961 clear_soft_dirty(vma, addr, pte);
962 continue;
963 }
964
179ef71c
CG
965 if (!pte_present(ptent))
966 continue;
967
a6198797
MM
968 page = vm_normal_page(vma, addr, ptent);
969 if (!page)
970 continue;
971
972 /* Clear accessed and referenced bits. */
973 ptep_test_and_clear_young(vma, addr, pte);
33c3fc71 974 test_and_clear_page_young(page);
a6198797
MM
975 ClearPageReferenced(page);
976 }
977 pte_unmap_unlock(pte - 1, ptl);
978 cond_resched();
979 return 0;
980}
981
5c64f52a
NH
982static int clear_refs_test_walk(unsigned long start, unsigned long end,
983 struct mm_walk *walk)
984{
985 struct clear_refs_private *cp = walk->private;
986 struct vm_area_struct *vma = walk->vma;
987
48684a65
NH
988 if (vma->vm_flags & VM_PFNMAP)
989 return 1;
990
5c64f52a
NH
991 /*
992 * Writing 1 to /proc/pid/clear_refs affects all pages.
993 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
994 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
995 * Writing 4 to /proc/pid/clear_refs affects all pages.
996 */
997 if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
998 return 1;
999 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1000 return 1;
1001 return 0;
1002}
1003
f248dcb3
MM
1004static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1005 size_t count, loff_t *ppos)
b813e931 1006{
f248dcb3 1007 struct task_struct *task;
fb92a4b0 1008 char buffer[PROC_NUMBUF];
f248dcb3 1009 struct mm_struct *mm;
b813e931 1010 struct vm_area_struct *vma;
040fa020 1011 enum clear_refs_types type;
b3a81d08 1012 struct mmu_gather tlb;
040fa020 1013 int itype;
0a8cb8e3 1014 int rv;
b813e931 1015
f248dcb3
MM
1016 memset(buffer, 0, sizeof(buffer));
1017 if (count > sizeof(buffer) - 1)
1018 count = sizeof(buffer) - 1;
1019 if (copy_from_user(buffer, buf, count))
1020 return -EFAULT;
040fa020 1021 rv = kstrtoint(strstrip(buffer), 10, &itype);
0a8cb8e3
AD
1022 if (rv < 0)
1023 return rv;
040fa020
PE
1024 type = (enum clear_refs_types)itype;
1025 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
f248dcb3 1026 return -EINVAL;
541c237c 1027
496ad9aa 1028 task = get_proc_task(file_inode(file));
f248dcb3
MM
1029 if (!task)
1030 return -ESRCH;
1031 mm = get_task_mm(task);
1032 if (mm) {
af9de7eb 1033 struct clear_refs_private cp = {
0f8975ec 1034 .type = type,
af9de7eb 1035 };
20cbc972
AM
1036 struct mm_walk clear_refs_walk = {
1037 .pmd_entry = clear_refs_pte_range,
5c64f52a 1038 .test_walk = clear_refs_test_walk,
20cbc972 1039 .mm = mm,
af9de7eb 1040 .private = &cp,
20cbc972 1041 };
695f0559
PC
1042
1043 if (type == CLEAR_REFS_MM_HIWATER_RSS) {
4e80153a
MH
1044 if (down_write_killable(&mm->mmap_sem)) {
1045 count = -EINTR;
1046 goto out_mm;
1047 }
1048
695f0559
PC
1049 /*
1050 * Writing 5 to /proc/pid/clear_refs resets the peak
1051 * resident set size to this mm's current rss value.
1052 */
695f0559
PC
1053 reset_mm_hiwater_rss(mm);
1054 up_write(&mm->mmap_sem);
1055 goto out_mm;
1056 }
1057
f248dcb3 1058 down_read(&mm->mmap_sem);
b3a81d08 1059 tlb_gather_mmu(&tlb, mm, 0, -1);
64e45507
PF
1060 if (type == CLEAR_REFS_SOFT_DIRTY) {
1061 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1062 if (!(vma->vm_flags & VM_SOFTDIRTY))
1063 continue;
1064 up_read(&mm->mmap_sem);
4e80153a
MH
1065 if (down_write_killable(&mm->mmap_sem)) {
1066 count = -EINTR;
1067 goto out_mm;
1068 }
64e45507
PF
1069 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1070 vma->vm_flags &= ~VM_SOFTDIRTY;
1071 vma_set_page_prot(vma);
1072 }
1073 downgrade_write(&mm->mmap_sem);
1074 break;
1075 }
0f8975ec 1076 mmu_notifier_invalidate_range_start(mm, 0, -1);
64e45507 1077 }
0f30206b 1078 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
0f8975ec
PE
1079 if (type == CLEAR_REFS_SOFT_DIRTY)
1080 mmu_notifier_invalidate_range_end(mm, 0, -1);
b3a81d08 1081 tlb_finish_mmu(&tlb, 0, -1);
f248dcb3 1082 up_read(&mm->mmap_sem);
695f0559 1083out_mm:
f248dcb3
MM
1084 mmput(mm);
1085 }
1086 put_task_struct(task);
fb92a4b0
VL
1087
1088 return count;
b813e931
DR
1089}
1090
f248dcb3
MM
1091const struct file_operations proc_clear_refs_operations = {
1092 .write = clear_refs_write,
6038f373 1093 .llseek = noop_llseek,
f248dcb3
MM
1094};
1095
092b50ba
NH
1096typedef struct {
1097 u64 pme;
1098} pagemap_entry_t;
1099
85863e47 1100struct pagemapread {
8c829622 1101 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
092b50ba 1102 pagemap_entry_t *buffer;
1c90308e 1103 bool show_pfn;
85863e47
MM
1104};
1105
5aaabe83
NH
1106#define PAGEMAP_WALK_SIZE (PMD_SIZE)
1107#define PAGEMAP_WALK_MASK (PMD_MASK)
1108
deb94544
KK
1109#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
1110#define PM_PFRAME_BITS 55
1111#define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1112#define PM_SOFT_DIRTY BIT_ULL(55)
77bb499b 1113#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
deb94544
KK
1114#define PM_FILE BIT_ULL(61)
1115#define PM_SWAP BIT_ULL(62)
1116#define PM_PRESENT BIT_ULL(63)
1117
85863e47
MM
1118#define PM_END_OF_BUFFER 1
1119
deb94544 1120static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
092b50ba 1121{
deb94544 1122 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
092b50ba
NH
1123}
1124
1125static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
85863e47
MM
1126 struct pagemapread *pm)
1127{
092b50ba 1128 pm->buffer[pm->pos++] = *pme;
d82ef020 1129 if (pm->pos >= pm->len)
aae8679b 1130 return PM_END_OF_BUFFER;
85863e47
MM
1131 return 0;
1132}
1133
1134static int pagemap_pte_hole(unsigned long start, unsigned long end,
2165009b 1135 struct mm_walk *walk)
85863e47 1136{
2165009b 1137 struct pagemapread *pm = walk->private;
68b5a652 1138 unsigned long addr = start;
85863e47 1139 int err = 0;
092b50ba 1140
68b5a652
PF
1141 while (addr < end) {
1142 struct vm_area_struct *vma = find_vma(walk->mm, addr);
deb94544 1143 pagemap_entry_t pme = make_pme(0, 0);
87e6d49a
PF
1144 /* End of address space hole, which we mark as non-present. */
1145 unsigned long hole_end;
68b5a652 1146
87e6d49a
PF
1147 if (vma)
1148 hole_end = min(end, vma->vm_start);
1149 else
1150 hole_end = end;
1151
1152 for (; addr < hole_end; addr += PAGE_SIZE) {
1153 err = add_to_pagemap(addr, &pme, pm);
1154 if (err)
1155 goto out;
68b5a652
PF
1156 }
1157
87e6d49a
PF
1158 if (!vma)
1159 break;
1160
1161 /* Addresses in the VMA. */
1162 if (vma->vm_flags & VM_SOFTDIRTY)
deb94544 1163 pme = make_pme(0, PM_SOFT_DIRTY);
87e6d49a 1164 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
68b5a652
PF
1165 err = add_to_pagemap(addr, &pme, pm);
1166 if (err)
1167 goto out;
1168 }
85863e47 1169 }
68b5a652 1170out:
85863e47
MM
1171 return err;
1172}
1173
deb94544 1174static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
052fb0d6 1175 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
85863e47 1176{
deb94544 1177 u64 frame = 0, flags = 0;
052fb0d6 1178 struct page *page = NULL;
85863e47 1179
052fb0d6 1180 if (pte_present(pte)) {
1c90308e
KK
1181 if (pm->show_pfn)
1182 frame = pte_pfn(pte);
deb94544 1183 flags |= PM_PRESENT;
052fb0d6 1184 page = vm_normal_page(vma, addr, pte);
e9cdd6e7 1185 if (pte_soft_dirty(pte))
deb94544 1186 flags |= PM_SOFT_DIRTY;
052fb0d6 1187 } else if (is_swap_pte(pte)) {
179ef71c
CG
1188 swp_entry_t entry;
1189 if (pte_swp_soft_dirty(pte))
deb94544 1190 flags |= PM_SOFT_DIRTY;
179ef71c 1191 entry = pte_to_swp_entry(pte);
052fb0d6
KK
1192 frame = swp_type(entry) |
1193 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
deb94544 1194 flags |= PM_SWAP;
052fb0d6
KK
1195 if (is_migration_entry(entry))
1196 page = migration_entry_to_page(entry);
052fb0d6
KK
1197 }
1198
1199 if (page && !PageAnon(page))
1200 flags |= PM_FILE;
77bb499b
KK
1201 if (page && page_mapcount(page) == 1)
1202 flags |= PM_MMAP_EXCLUSIVE;
deb94544
KK
1203 if (vma->vm_flags & VM_SOFTDIRTY)
1204 flags |= PM_SOFT_DIRTY;
052fb0d6 1205
deb94544 1206 return make_pme(frame, flags);
bcf8039e
DH
1207}
1208
356515e7 1209static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
2165009b 1210 struct mm_walk *walk)
85863e47 1211{
f995ece2 1212 struct vm_area_struct *vma = walk->vma;
2165009b 1213 struct pagemapread *pm = walk->private;
bf929152 1214 spinlock_t *ptl;
05fbf357 1215 pte_t *pte, *orig_pte;
85863e47
MM
1216 int err = 0;
1217
356515e7 1218#ifdef CONFIG_TRANSPARENT_HUGEPAGE
b6ec57f4
KS
1219 ptl = pmd_trans_huge_lock(pmdp, vma);
1220 if (ptl) {
356515e7
KK
1221 u64 flags = 0, frame = 0;
1222 pmd_t pmd = *pmdp;
0f8975ec 1223
356515e7 1224 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
deb94544 1225 flags |= PM_SOFT_DIRTY;
d9104d1c 1226
356515e7
KK
1227 /*
1228 * Currently pmd for thp is always present because thp
1229 * can not be swapped-out, migrated, or HWPOISONed
1230 * (split in such cases instead.)
1231 * This if-check is just to prepare for future implementation.
1232 */
1233 if (pmd_present(pmd)) {
77bb499b
KK
1234 struct page *page = pmd_page(pmd);
1235
1236 if (page_mapcount(page) == 1)
1237 flags |= PM_MMAP_EXCLUSIVE;
1238
356515e7 1239 flags |= PM_PRESENT;
1c90308e
KK
1240 if (pm->show_pfn)
1241 frame = pmd_pfn(pmd) +
1242 ((addr & ~PMD_MASK) >> PAGE_SHIFT);
356515e7
KK
1243 }
1244
025c5b24 1245 for (; addr != end; addr += PAGE_SIZE) {
356515e7 1246 pagemap_entry_t pme = make_pme(frame, flags);
025c5b24 1247
092b50ba 1248 err = add_to_pagemap(addr, &pme, pm);
025c5b24
NH
1249 if (err)
1250 break;
1c90308e 1251 if (pm->show_pfn && (flags & PM_PRESENT))
356515e7 1252 frame++;
5aaabe83 1253 }
bf929152 1254 spin_unlock(ptl);
025c5b24 1255 return err;
5aaabe83
NH
1256 }
1257
356515e7 1258 if (pmd_trans_unstable(pmdp))
45f83cef 1259 return 0;
356515e7 1260#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
81d0fa62 1261
f995ece2
NH
1262 /*
1263 * We can assume that @vma always points to a valid one and @end never
1264 * goes beyond vma->vm_end.
1265 */
356515e7 1266 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
f995ece2
NH
1267 for (; addr < end; pte++, addr += PAGE_SIZE) {
1268 pagemap_entry_t pme;
05fbf357 1269
deb94544 1270 pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
f995ece2 1271 err = add_to_pagemap(addr, &pme, pm);
05fbf357 1272 if (err)
81d0fa62 1273 break;
85863e47 1274 }
f995ece2 1275 pte_unmap_unlock(orig_pte, ptl);
85863e47
MM
1276
1277 cond_resched();
1278
1279 return err;
1280}
1281
1a5cb814 1282#ifdef CONFIG_HUGETLB_PAGE
116354d1 1283/* This function walks within one hugetlb entry in the single call */
356515e7 1284static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
116354d1
NH
1285 unsigned long addr, unsigned long end,
1286 struct mm_walk *walk)
5dc37642 1287{
5dc37642 1288 struct pagemapread *pm = walk->private;
f995ece2 1289 struct vm_area_struct *vma = walk->vma;
356515e7 1290 u64 flags = 0, frame = 0;
5dc37642 1291 int err = 0;
356515e7 1292 pte_t pte;
5dc37642 1293
f995ece2 1294 if (vma->vm_flags & VM_SOFTDIRTY)
deb94544 1295 flags |= PM_SOFT_DIRTY;
d9104d1c 1296
356515e7
KK
1297 pte = huge_ptep_get(ptep);
1298 if (pte_present(pte)) {
1299 struct page *page = pte_page(pte);
1300
1301 if (!PageAnon(page))
1302 flags |= PM_FILE;
1303
77bb499b
KK
1304 if (page_mapcount(page) == 1)
1305 flags |= PM_MMAP_EXCLUSIVE;
1306
356515e7 1307 flags |= PM_PRESENT;
1c90308e
KK
1308 if (pm->show_pfn)
1309 frame = pte_pfn(pte) +
1310 ((addr & ~hmask) >> PAGE_SHIFT);
356515e7
KK
1311 }
1312
5dc37642 1313 for (; addr != end; addr += PAGE_SIZE) {
356515e7
KK
1314 pagemap_entry_t pme = make_pme(frame, flags);
1315
092b50ba 1316 err = add_to_pagemap(addr, &pme, pm);
5dc37642
NH
1317 if (err)
1318 return err;
1c90308e 1319 if (pm->show_pfn && (flags & PM_PRESENT))
356515e7 1320 frame++;
5dc37642
NH
1321 }
1322
1323 cond_resched();
1324
1325 return err;
1326}
1a5cb814 1327#endif /* HUGETLB_PAGE */
5dc37642 1328
85863e47
MM
1329/*
1330 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1331 *
f16278c6
HR
1332 * For each page in the address space, this file contains one 64-bit entry
1333 * consisting of the following:
1334 *
052fb0d6 1335 * Bits 0-54 page frame number (PFN) if present
f16278c6 1336 * Bits 0-4 swap type if swapped
052fb0d6 1337 * Bits 5-54 swap offset if swapped
deb94544 1338 * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
77bb499b
KK
1339 * Bit 56 page exclusively mapped
1340 * Bits 57-60 zero
052fb0d6 1341 * Bit 61 page is file-page or shared-anon
f16278c6
HR
1342 * Bit 62 page swapped
1343 * Bit 63 page present
1344 *
1345 * If the page is not present but in swap, then the PFN contains an
1346 * encoding of the swap file number and the page's offset into the
1347 * swap. Unmapped pages return a null PFN. This allows determining
85863e47
MM
1348 * precisely which pages are mapped (or in swap) and comparing mapped
1349 * pages between processes.
1350 *
1351 * Efficient users of this interface will use /proc/pid/maps to
1352 * determine which areas of memory are actually mapped and llseek to
1353 * skip over unmapped regions.
1354 */
1355static ssize_t pagemap_read(struct file *file, char __user *buf,
1356 size_t count, loff_t *ppos)
1357{
a06db751 1358 struct mm_struct *mm = file->private_data;
85863e47 1359 struct pagemapread pm;
ee1e6ab6 1360 struct mm_walk pagemap_walk = {};
5d7e0d2b
AM
1361 unsigned long src;
1362 unsigned long svpfn;
1363 unsigned long start_vaddr;
1364 unsigned long end_vaddr;
a06db751 1365 int ret = 0, copied = 0;
85863e47 1366
388f7934 1367 if (!mm || !mmget_not_zero(mm))
85863e47
MM
1368 goto out;
1369
85863e47
MM
1370 ret = -EINVAL;
1371 /* file position must be aligned */
aae8679b 1372 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
a06db751 1373 goto out_mm;
85863e47
MM
1374
1375 ret = 0;
08161786 1376 if (!count)
a06db751 1377 goto out_mm;
08161786 1378
1c90308e
KK
1379 /* do not disclose physical addresses: attack vector */
1380 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1381
8c829622 1382 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1383 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
5d7e0d2b 1384 ret = -ENOMEM;
d82ef020 1385 if (!pm.buffer)
a06db751 1386 goto out_mm;
85863e47 1387
356515e7 1388 pagemap_walk.pmd_entry = pagemap_pmd_range;
5d7e0d2b 1389 pagemap_walk.pte_hole = pagemap_pte_hole;
1a5cb814 1390#ifdef CONFIG_HUGETLB_PAGE
5dc37642 1391 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1a5cb814 1392#endif
5d7e0d2b
AM
1393 pagemap_walk.mm = mm;
1394 pagemap_walk.private = &pm;
1395
1396 src = *ppos;
1397 svpfn = src / PM_ENTRY_BYTES;
1398 start_vaddr = svpfn << PAGE_SHIFT;
a06db751 1399 end_vaddr = mm->task_size;
5d7e0d2b
AM
1400
1401 /* watch out for wraparound */
a06db751 1402 if (svpfn > mm->task_size >> PAGE_SHIFT)
5d7e0d2b
AM
1403 start_vaddr = end_vaddr;
1404
1405 /*
1406 * The odds are that this will stop walking way
1407 * before end_vaddr, because the length of the
1408 * user buffer is tracked in "pm", and the walk
1409 * will stop when we hit the end of the buffer.
1410 */
d82ef020
KH
1411 ret = 0;
1412 while (count && (start_vaddr < end_vaddr)) {
1413 int len;
1414 unsigned long end;
1415
1416 pm.pos = 0;
ea251c1d 1417 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
d82ef020
KH
1418 /* overflow ? */
1419 if (end < start_vaddr || end > end_vaddr)
1420 end = end_vaddr;
1421 down_read(&mm->mmap_sem);
1422 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1423 up_read(&mm->mmap_sem);
1424 start_vaddr = end;
1425
1426 len = min(count, PM_ENTRY_BYTES * pm.pos);
309361e0 1427 if (copy_to_user(buf, pm.buffer, len)) {
d82ef020 1428 ret = -EFAULT;
a06db751 1429 goto out_free;
d82ef020
KH
1430 }
1431 copied += len;
1432 buf += len;
1433 count -= len;
85863e47 1434 }
d82ef020
KH
1435 *ppos += copied;
1436 if (!ret || ret == PM_END_OF_BUFFER)
1437 ret = copied;
1438
98bc93e5
KM
1439out_free:
1440 kfree(pm.buffer);
a06db751
KK
1441out_mm:
1442 mmput(mm);
85863e47
MM
1443out:
1444 return ret;
1445}
1446
541c237c
PE
1447static int pagemap_open(struct inode *inode, struct file *file)
1448{
a06db751
KK
1449 struct mm_struct *mm;
1450
a06db751
KK
1451 mm = proc_mem_open(inode, PTRACE_MODE_READ);
1452 if (IS_ERR(mm))
1453 return PTR_ERR(mm);
1454 file->private_data = mm;
1455 return 0;
1456}
1457
1458static int pagemap_release(struct inode *inode, struct file *file)
1459{
1460 struct mm_struct *mm = file->private_data;
1461
1462 if (mm)
1463 mmdrop(mm);
541c237c
PE
1464 return 0;
1465}
1466
85863e47
MM
1467const struct file_operations proc_pagemap_operations = {
1468 .llseek = mem_lseek, /* borrow this */
1469 .read = pagemap_read,
541c237c 1470 .open = pagemap_open,
a06db751 1471 .release = pagemap_release,
85863e47 1472};
1e883281 1473#endif /* CONFIG_PROC_PAGE_MONITOR */
85863e47 1474
6e21c8f1 1475#ifdef CONFIG_NUMA
6e21c8f1 1476
f69ff943 1477struct numa_maps {
f69ff943
SW
1478 unsigned long pages;
1479 unsigned long anon;
1480 unsigned long active;
1481 unsigned long writeback;
1482 unsigned long mapcount_max;
1483 unsigned long dirty;
1484 unsigned long swapcache;
1485 unsigned long node[MAX_NUMNODES];
1486};
1487
5b52fc89
SW
1488struct numa_maps_private {
1489 struct proc_maps_private proc_maps;
1490 struct numa_maps md;
1491};
1492
eb4866d0
DH
1493static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1494 unsigned long nr_pages)
f69ff943
SW
1495{
1496 int count = page_mapcount(page);
1497
eb4866d0 1498 md->pages += nr_pages;
f69ff943 1499 if (pte_dirty || PageDirty(page))
eb4866d0 1500 md->dirty += nr_pages;
f69ff943
SW
1501
1502 if (PageSwapCache(page))
eb4866d0 1503 md->swapcache += nr_pages;
f69ff943
SW
1504
1505 if (PageActive(page) || PageUnevictable(page))
eb4866d0 1506 md->active += nr_pages;
f69ff943
SW
1507
1508 if (PageWriteback(page))
eb4866d0 1509 md->writeback += nr_pages;
f69ff943
SW
1510
1511 if (PageAnon(page))
eb4866d0 1512 md->anon += nr_pages;
f69ff943
SW
1513
1514 if (count > md->mapcount_max)
1515 md->mapcount_max = count;
1516
eb4866d0 1517 md->node[page_to_nid(page)] += nr_pages;
f69ff943
SW
1518}
1519
3200a8aa
DH
1520static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1521 unsigned long addr)
1522{
1523 struct page *page;
1524 int nid;
1525
1526 if (!pte_present(pte))
1527 return NULL;
1528
1529 page = vm_normal_page(vma, addr, pte);
1530 if (!page)
1531 return NULL;
1532
1533 if (PageReserved(page))
1534 return NULL;
1535
1536 nid = page_to_nid(page);
4ff1b2c2 1537 if (!node_isset(nid, node_states[N_MEMORY]))
3200a8aa
DH
1538 return NULL;
1539
1540 return page;
1541}
1542
28093f9f
GS
1543#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1544static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1545 struct vm_area_struct *vma,
1546 unsigned long addr)
1547{
1548 struct page *page;
1549 int nid;
1550
1551 if (!pmd_present(pmd))
1552 return NULL;
1553
1554 page = vm_normal_page_pmd(vma, addr, pmd);
1555 if (!page)
1556 return NULL;
1557
1558 if (PageReserved(page))
1559 return NULL;
1560
1561 nid = page_to_nid(page);
1562 if (!node_isset(nid, node_states[N_MEMORY]))
1563 return NULL;
1564
1565 return page;
1566}
1567#endif
1568
f69ff943
SW
1569static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1570 unsigned long end, struct mm_walk *walk)
1571{
d85f4d6d
NH
1572 struct numa_maps *md = walk->private;
1573 struct vm_area_struct *vma = walk->vma;
f69ff943
SW
1574 spinlock_t *ptl;
1575 pte_t *orig_pte;
1576 pte_t *pte;
1577
28093f9f 1578#ifdef CONFIG_TRANSPARENT_HUGEPAGE
b6ec57f4
KS
1579 ptl = pmd_trans_huge_lock(pmd, vma);
1580 if (ptl) {
025c5b24
NH
1581 struct page *page;
1582
28093f9f 1583 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
025c5b24 1584 if (page)
28093f9f 1585 gather_stats(page, md, pmd_dirty(*pmd),
025c5b24 1586 HPAGE_PMD_SIZE/PAGE_SIZE);
bf929152 1587 spin_unlock(ptl);
025c5b24 1588 return 0;
32ef4384
DH
1589 }
1590
1a5a9906
AA
1591 if (pmd_trans_unstable(pmd))
1592 return 0;
28093f9f 1593#endif
f69ff943
SW
1594 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1595 do {
d85f4d6d 1596 struct page *page = can_gather_numa_stats(*pte, vma, addr);
f69ff943
SW
1597 if (!page)
1598 continue;
eb4866d0 1599 gather_stats(page, md, pte_dirty(*pte), 1);
f69ff943
SW
1600
1601 } while (pte++, addr += PAGE_SIZE, addr != end);
1602 pte_unmap_unlock(orig_pte, ptl);
a66c0410 1603 cond_resched();
f69ff943
SW
1604 return 0;
1605}
1606#ifdef CONFIG_HUGETLB_PAGE
632fd60f 1607static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
f69ff943
SW
1608 unsigned long addr, unsigned long end, struct mm_walk *walk)
1609{
5c2ff95e 1610 pte_t huge_pte = huge_ptep_get(pte);
f69ff943
SW
1611 struct numa_maps *md;
1612 struct page *page;
1613
5c2ff95e 1614 if (!pte_present(huge_pte))
f69ff943
SW
1615 return 0;
1616
5c2ff95e 1617 page = pte_page(huge_pte);
f69ff943
SW
1618 if (!page)
1619 return 0;
1620
1621 md = walk->private;
5c2ff95e 1622 gather_stats(page, md, pte_dirty(huge_pte), 1);
f69ff943
SW
1623 return 0;
1624}
1625
1626#else
632fd60f 1627static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
f69ff943
SW
1628 unsigned long addr, unsigned long end, struct mm_walk *walk)
1629{
1630 return 0;
1631}
1632#endif
1633
1634/*
1635 * Display pages allocated per node and memory policy via /proc.
1636 */
b7643757 1637static int show_numa_map(struct seq_file *m, void *v, int is_pid)
f69ff943 1638{
5b52fc89
SW
1639 struct numa_maps_private *numa_priv = m->private;
1640 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
f69ff943 1641 struct vm_area_struct *vma = v;
5b52fc89 1642 struct numa_maps *md = &numa_priv->md;
f69ff943
SW
1643 struct file *file = vma->vm_file;
1644 struct mm_struct *mm = vma->vm_mm;
d85f4d6d
NH
1645 struct mm_walk walk = {
1646 .hugetlb_entry = gather_hugetlb_stats,
1647 .pmd_entry = gather_pte_stats,
1648 .private = md,
1649 .mm = mm,
1650 };
f69ff943 1651 struct mempolicy *pol;
948927ee
DR
1652 char buffer[64];
1653 int nid;
f69ff943
SW
1654
1655 if (!mm)
1656 return 0;
1657
5b52fc89
SW
1658 /* Ensure we start with an empty set of numa_maps statistics. */
1659 memset(md, 0, sizeof(*md));
f69ff943 1660
498f2371
ON
1661 pol = __get_vma_policy(vma, vma->vm_start);
1662 if (pol) {
1663 mpol_to_str(buffer, sizeof(buffer), pol);
1664 mpol_cond_put(pol);
1665 } else {
1666 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1667 }
f69ff943
SW
1668
1669 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1670
1671 if (file) {
17c2b4ee 1672 seq_puts(m, " file=");
2726d566 1673 seq_file_path(m, file, "\n\t= ");
f69ff943 1674 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
17c2b4ee 1675 seq_puts(m, " heap");
b18cb64e 1676 } else if (is_stack(proc_priv, vma)) {
65376df5 1677 seq_puts(m, " stack");
f69ff943
SW
1678 }
1679
fc360bd9 1680 if (is_vm_hugetlb_page(vma))
17c2b4ee 1681 seq_puts(m, " huge");
fc360bd9 1682
d85f4d6d
NH
1683 /* mmap_sem is held by m_start */
1684 walk_page_vma(vma, &walk);
f69ff943
SW
1685
1686 if (!md->pages)
1687 goto out;
1688
1689 if (md->anon)
1690 seq_printf(m, " anon=%lu", md->anon);
1691
1692 if (md->dirty)
1693 seq_printf(m, " dirty=%lu", md->dirty);
1694
1695 if (md->pages != md->anon && md->pages != md->dirty)
1696 seq_printf(m, " mapped=%lu", md->pages);
1697
1698 if (md->mapcount_max > 1)
1699 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1700
1701 if (md->swapcache)
1702 seq_printf(m, " swapcache=%lu", md->swapcache);
1703
1704 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1705 seq_printf(m, " active=%lu", md->active);
1706
1707 if (md->writeback)
1708 seq_printf(m, " writeback=%lu", md->writeback);
1709
948927ee
DR
1710 for_each_node_state(nid, N_MEMORY)
1711 if (md->node[nid])
1712 seq_printf(m, " N%d=%lu", nid, md->node[nid]);
198d1597
RA
1713
1714 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
f69ff943
SW
1715out:
1716 seq_putc(m, '\n');
b8c20a9b 1717 m_cache_vma(m, vma);
f69ff943
SW
1718 return 0;
1719}
5b52fc89 1720
b7643757
SP
1721static int show_pid_numa_map(struct seq_file *m, void *v)
1722{
1723 return show_numa_map(m, v, 1);
1724}
1725
1726static int show_tid_numa_map(struct seq_file *m, void *v)
1727{
1728 return show_numa_map(m, v, 0);
1729}
1730
03a44825 1731static const struct seq_operations proc_pid_numa_maps_op = {
b7643757
SP
1732 .start = m_start,
1733 .next = m_next,
1734 .stop = m_stop,
1735 .show = show_pid_numa_map,
6e21c8f1 1736};
662795de 1737
b7643757
SP
1738static const struct seq_operations proc_tid_numa_maps_op = {
1739 .start = m_start,
1740 .next = m_next,
1741 .stop = m_stop,
1742 .show = show_tid_numa_map,
1743};
1744
1745static int numa_maps_open(struct inode *inode, struct file *file,
1746 const struct seq_operations *ops)
662795de 1747{
4db7d0ee
ON
1748 return proc_maps_open(inode, file, ops,
1749 sizeof(struct numa_maps_private));
662795de
EB
1750}
1751
b7643757
SP
1752static int pid_numa_maps_open(struct inode *inode, struct file *file)
1753{
1754 return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1755}
1756
1757static int tid_numa_maps_open(struct inode *inode, struct file *file)
1758{
1759 return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1760}
1761
1762const struct file_operations proc_pid_numa_maps_operations = {
1763 .open = pid_numa_maps_open,
1764 .read = seq_read,
1765 .llseek = seq_lseek,
29a40ace 1766 .release = proc_map_release,
b7643757
SP
1767};
1768
1769const struct file_operations proc_tid_numa_maps_operations = {
1770 .open = tid_numa_maps_open,
662795de
EB
1771 .read = seq_read,
1772 .llseek = seq_lseek,
29a40ace 1773 .release = proc_map_release,
662795de 1774};
f69ff943 1775#endif /* CONFIG_NUMA */