]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - fs/proc/task_mmu.c
mm: hugetlb: proc: add hugetlb-related fields to /proc/PID/smaps
[mirror_ubuntu-focal-kernel.git] / fs / proc / task_mmu.c
CommitLineData
1da177e4 1#include <linux/mm.h>
615d6e87 2#include <linux/vmacache.h>
1da177e4 3#include <linux/hugetlb.h>
22e057c5 4#include <linux/huge_mm.h>
1da177e4
LT
5#include <linux/mount.h>
6#include <linux/seq_file.h>
e070ad49 7#include <linux/highmem.h>
5096add8 8#include <linux/ptrace.h>
5a0e3ad6 9#include <linux/slab.h>
6e21c8f1
CL
10#include <linux/pagemap.h>
11#include <linux/mempolicy.h>
22e057c5 12#include <linux/rmap.h>
85863e47
MM
13#include <linux/swap.h>
14#include <linux/swapops.h>
0f8975ec 15#include <linux/mmu_notifier.h>
33c3fc71 16#include <linux/page_idle.h>
e070ad49 17
1da177e4
LT
18#include <asm/elf.h>
19#include <asm/uaccess.h>
e070ad49 20#include <asm/tlbflush.h>
1da177e4
LT
21#include "internal.h"
22
df5f8314 23void task_mem(struct seq_file *m, struct mm_struct *mm)
1da177e4 24{
dc6c9a35 25 unsigned long data, text, lib, swap, ptes, pmds;
365e9c87
HD
26 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
27
28 /*
29 * Note: to minimize their overhead, mm maintains hiwater_vm and
30 * hiwater_rss only when about to *lower* total_vm or rss. Any
31 * collector of these hiwater stats must therefore get total_vm
32 * and rss too, which will usually be the higher. Barriers? not
33 * worth the effort, such snapshots can always be inconsistent.
34 */
35 hiwater_vm = total_vm = mm->total_vm;
36 if (hiwater_vm < mm->hiwater_vm)
37 hiwater_vm = mm->hiwater_vm;
38 hiwater_rss = total_rss = get_mm_rss(mm);
39 if (hiwater_rss < mm->hiwater_rss)
40 hiwater_rss = mm->hiwater_rss;
1da177e4
LT
41
42 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
43 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
44 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
b084d435 45 swap = get_mm_counter(mm, MM_SWAPENTS);
dc6c9a35
KS
46 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
47 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
df5f8314 48 seq_printf(m,
365e9c87 49 "VmPeak:\t%8lu kB\n"
1da177e4
LT
50 "VmSize:\t%8lu kB\n"
51 "VmLck:\t%8lu kB\n"
bc3e53f6 52 "VmPin:\t%8lu kB\n"
365e9c87 53 "VmHWM:\t%8lu kB\n"
1da177e4
LT
54 "VmRSS:\t%8lu kB\n"
55 "VmData:\t%8lu kB\n"
56 "VmStk:\t%8lu kB\n"
57 "VmExe:\t%8lu kB\n"
58 "VmLib:\t%8lu kB\n"
b084d435 59 "VmPTE:\t%8lu kB\n"
dc6c9a35 60 "VmPMD:\t%8lu kB\n"
b084d435 61 "VmSwap:\t%8lu kB\n",
365e9c87 62 hiwater_vm << (PAGE_SHIFT-10),
314e51b9 63 total_vm << (PAGE_SHIFT-10),
1da177e4 64 mm->locked_vm << (PAGE_SHIFT-10),
bc3e53f6 65 mm->pinned_vm << (PAGE_SHIFT-10),
365e9c87
HD
66 hiwater_rss << (PAGE_SHIFT-10),
67 total_rss << (PAGE_SHIFT-10),
1da177e4
LT
68 data << (PAGE_SHIFT-10),
69 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
dc6c9a35
KS
70 ptes >> 10,
71 pmds >> 10,
b084d435 72 swap << (PAGE_SHIFT-10));
1da177e4
LT
73}
74
75unsigned long task_vsize(struct mm_struct *mm)
76{
77 return PAGE_SIZE * mm->total_vm;
78}
79
a2ade7b6
AD
80unsigned long task_statm(struct mm_struct *mm,
81 unsigned long *shared, unsigned long *text,
82 unsigned long *data, unsigned long *resident)
1da177e4 83{
d559db08 84 *shared = get_mm_counter(mm, MM_FILEPAGES);
1da177e4
LT
85 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
86 >> PAGE_SHIFT;
87 *data = mm->total_vm - mm->shared_vm;
d559db08 88 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
1da177e4
LT
89 return mm->total_vm;
90}
91
9e781440
KH
92#ifdef CONFIG_NUMA
93/*
498f2371 94 * Save get_task_policy() for show_numa_map().
9e781440
KH
95 */
96static void hold_task_mempolicy(struct proc_maps_private *priv)
97{
98 struct task_struct *task = priv->task;
99
100 task_lock(task);
498f2371 101 priv->task_mempolicy = get_task_policy(task);
9e781440
KH
102 mpol_get(priv->task_mempolicy);
103 task_unlock(task);
104}
105static void release_task_mempolicy(struct proc_maps_private *priv)
106{
107 mpol_put(priv->task_mempolicy);
108}
109#else
110static void hold_task_mempolicy(struct proc_maps_private *priv)
111{
112}
113static void release_task_mempolicy(struct proc_maps_private *priv)
114{
115}
116#endif
117
59b4bf12 118static void vma_stop(struct proc_maps_private *priv)
a6198797 119{
59b4bf12
ON
120 struct mm_struct *mm = priv->mm;
121
122 release_task_mempolicy(priv);
123 up_read(&mm->mmap_sem);
124 mmput(mm);
a6198797 125}
ec4dd3eb 126
ad2a00e4
ON
127static struct vm_area_struct *
128m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
129{
130 if (vma == priv->tail_vma)
131 return NULL;
132 return vma->vm_next ?: priv->tail_vma;
133}
134
b8c20a9b
ON
135static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
136{
137 if (m->count < m->size) /* vma is copied successfully */
138 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
139}
140
0c255321 141static void *m_start(struct seq_file *m, loff_t *ppos)
e070ad49 142{
a6198797 143 struct proc_maps_private *priv = m->private;
b8c20a9b 144 unsigned long last_addr = m->version;
a6198797 145 struct mm_struct *mm;
0c255321
ON
146 struct vm_area_struct *vma;
147 unsigned int pos = *ppos;
a6198797 148
b8c20a9b
ON
149 /* See m_cache_vma(). Zero at the start or after lseek. */
150 if (last_addr == -1UL)
151 return NULL;
152
2c03376d 153 priv->task = get_proc_task(priv->inode);
a6198797 154 if (!priv->task)
ec6fd8a4 155 return ERR_PTR(-ESRCH);
a6198797 156
29a40ace
ON
157 mm = priv->mm;
158 if (!mm || !atomic_inc_not_zero(&mm->mm_users))
159 return NULL;
a6198797 160
0c255321 161 down_read(&mm->mmap_sem);
9e781440 162 hold_task_mempolicy(priv);
0c255321 163 priv->tail_vma = get_gate_vma(mm);
a6198797 164
b8c20a9b
ON
165 if (last_addr) {
166 vma = find_vma(mm, last_addr);
167 if (vma && (vma = m_next_vma(priv, vma)))
168 return vma;
169 }
170
171 m->version = 0;
0c255321 172 if (pos < mm->map_count) {
557c2d8a
ON
173 for (vma = mm->mmap; pos; pos--) {
174 m->version = vma->vm_start;
a6198797 175 vma = vma->vm_next;
557c2d8a 176 }
a6198797 177 return vma;
0c255321 178 }
a6198797 179
557c2d8a 180 /* we do not bother to update m->version in this case */
0c255321
ON
181 if (pos == mm->map_count && priv->tail_vma)
182 return priv->tail_vma;
59b4bf12
ON
183
184 vma_stop(priv);
185 return NULL;
a6198797
MM
186}
187
188static void *m_next(struct seq_file *m, void *v, loff_t *pos)
189{
190 struct proc_maps_private *priv = m->private;
ad2a00e4 191 struct vm_area_struct *next;
a6198797
MM
192
193 (*pos)++;
ad2a00e4 194 next = m_next_vma(priv, v);
59b4bf12
ON
195 if (!next)
196 vma_stop(priv);
197 return next;
a6198797
MM
198}
199
200static void m_stop(struct seq_file *m, void *v)
201{
202 struct proc_maps_private *priv = m->private;
a6198797 203
59b4bf12
ON
204 if (!IS_ERR_OR_NULL(v))
205 vma_stop(priv);
0d5f5f45 206 if (priv->task) {
a6198797 207 put_task_struct(priv->task);
0d5f5f45
ON
208 priv->task = NULL;
209 }
a6198797
MM
210}
211
4db7d0ee
ON
212static int proc_maps_open(struct inode *inode, struct file *file,
213 const struct seq_operations *ops, int psize)
214{
215 struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
216
217 if (!priv)
218 return -ENOMEM;
219
2c03376d 220 priv->inode = inode;
29a40ace
ON
221 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
222 if (IS_ERR(priv->mm)) {
223 int err = PTR_ERR(priv->mm);
224
225 seq_release_private(inode, file);
226 return err;
227 }
228
4db7d0ee
ON
229 return 0;
230}
231
29a40ace
ON
232static int proc_map_release(struct inode *inode, struct file *file)
233{
234 struct seq_file *seq = file->private_data;
235 struct proc_maps_private *priv = seq->private;
236
237 if (priv->mm)
238 mmdrop(priv->mm);
239
240 return seq_release_private(inode, file);
241}
242
a6198797 243static int do_maps_open(struct inode *inode, struct file *file,
03a44825 244 const struct seq_operations *ops)
a6198797 245{
4db7d0ee
ON
246 return proc_maps_open(inode, file, ops,
247 sizeof(struct proc_maps_private));
a6198797 248}
e070ad49 249
58cb6548
ON
250static pid_t pid_of_stack(struct proc_maps_private *priv,
251 struct vm_area_struct *vma, bool is_pid)
252{
253 struct inode *inode = priv->inode;
254 struct task_struct *task;
255 pid_t ret = 0;
256
257 rcu_read_lock();
258 task = pid_task(proc_pid(inode), PIDTYPE_PID);
259 if (task) {
260 task = task_of_stack(task, vma, is_pid);
261 if (task)
262 ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
263 }
264 rcu_read_unlock();
265
266 return ret;
267}
268
b7643757
SP
269static void
270show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
1da177e4 271{
e070ad49
ML
272 struct mm_struct *mm = vma->vm_mm;
273 struct file *file = vma->vm_file;
b7643757 274 struct proc_maps_private *priv = m->private;
ca16d140 275 vm_flags_t flags = vma->vm_flags;
1da177e4 276 unsigned long ino = 0;
6260a4b0 277 unsigned long long pgoff = 0;
a09a79f6 278 unsigned long start, end;
1da177e4 279 dev_t dev = 0;
b7643757 280 const char *name = NULL;
1da177e4
LT
281
282 if (file) {
496ad9aa 283 struct inode *inode = file_inode(vma->vm_file);
1da177e4
LT
284 dev = inode->i_sb->s_dev;
285 ino = inode->i_ino;
6260a4b0 286 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
1da177e4
LT
287 }
288
d7824370
LT
289 /* We don't show the stack guard page in /proc/maps */
290 start = vma->vm_start;
a09a79f6
MP
291 if (stack_guard_page_start(vma, start))
292 start += PAGE_SIZE;
293 end = vma->vm_end;
294 if (stack_guard_page_end(vma, end))
295 end -= PAGE_SIZE;
d7824370 296
652586df
TH
297 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
298 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
d7824370 299 start,
a09a79f6 300 end,
1da177e4
LT
301 flags & VM_READ ? 'r' : '-',
302 flags & VM_WRITE ? 'w' : '-',
303 flags & VM_EXEC ? 'x' : '-',
304 flags & VM_MAYSHARE ? 's' : 'p',
6260a4b0 305 pgoff,
652586df 306 MAJOR(dev), MINOR(dev), ino);
1da177e4
LT
307
308 /*
309 * Print the dentry name for named mappings, and a
310 * special [heap] marker for the heap:
311 */
e070ad49 312 if (file) {
652586df 313 seq_pad(m, ' ');
2726d566 314 seq_file_path(m, file, "\n");
b7643757
SP
315 goto done;
316 }
317
78d683e8
AL
318 if (vma->vm_ops && vma->vm_ops->name) {
319 name = vma->vm_ops->name(vma);
320 if (name)
321 goto done;
322 }
323
b7643757
SP
324 name = arch_vma_name(vma);
325 if (!name) {
326 pid_t tid;
327
328 if (!mm) {
329 name = "[vdso]";
330 goto done;
331 }
332
333 if (vma->vm_start <= mm->brk &&
334 vma->vm_end >= mm->start_brk) {
335 name = "[heap]";
336 goto done;
337 }
338
58cb6548 339 tid = pid_of_stack(priv, vma, is_pid);
b7643757
SP
340 if (tid != 0) {
341 /*
342 * Thread stack in /proc/PID/task/TID/maps or
343 * the main process stack.
344 */
345 if (!is_pid || (vma->vm_start <= mm->start_stack &&
346 vma->vm_end >= mm->start_stack)) {
347 name = "[stack]";
e6e5494c 348 } else {
b7643757 349 /* Thread stack in /proc/PID/maps */
652586df 350 seq_pad(m, ' ');
b7643757 351 seq_printf(m, "[stack:%d]", tid);
1da177e4 352 }
e6e5494c 353 }
b7643757
SP
354 }
355
356done:
357 if (name) {
652586df 358 seq_pad(m, ' ');
b7643757 359 seq_puts(m, name);
1da177e4
LT
360 }
361 seq_putc(m, '\n');
7c88db0c
JK
362}
363
b7643757 364static int show_map(struct seq_file *m, void *v, int is_pid)
7c88db0c 365{
ebb6cdde 366 show_map_vma(m, v, is_pid);
b8c20a9b 367 m_cache_vma(m, v);
1da177e4
LT
368 return 0;
369}
370
b7643757
SP
371static int show_pid_map(struct seq_file *m, void *v)
372{
373 return show_map(m, v, 1);
374}
375
376static int show_tid_map(struct seq_file *m, void *v)
377{
378 return show_map(m, v, 0);
379}
380
03a44825 381static const struct seq_operations proc_pid_maps_op = {
a6198797
MM
382 .start = m_start,
383 .next = m_next,
384 .stop = m_stop,
b7643757
SP
385 .show = show_pid_map
386};
387
388static const struct seq_operations proc_tid_maps_op = {
389 .start = m_start,
390 .next = m_next,
391 .stop = m_stop,
392 .show = show_tid_map
a6198797
MM
393};
394
b7643757 395static int pid_maps_open(struct inode *inode, struct file *file)
a6198797
MM
396{
397 return do_maps_open(inode, file, &proc_pid_maps_op);
398}
399
b7643757
SP
400static int tid_maps_open(struct inode *inode, struct file *file)
401{
402 return do_maps_open(inode, file, &proc_tid_maps_op);
403}
404
405const struct file_operations proc_pid_maps_operations = {
406 .open = pid_maps_open,
407 .read = seq_read,
408 .llseek = seq_lseek,
29a40ace 409 .release = proc_map_release,
b7643757
SP
410};
411
412const struct file_operations proc_tid_maps_operations = {
413 .open = tid_maps_open,
a6198797
MM
414 .read = seq_read,
415 .llseek = seq_lseek,
29a40ace 416 .release = proc_map_release,
a6198797
MM
417};
418
419/*
420 * Proportional Set Size(PSS): my share of RSS.
421 *
422 * PSS of a process is the count of pages it has in memory, where each
423 * page is divided by the number of processes sharing it. So if a
424 * process has 1000 pages all to itself, and 1000 shared with one other
425 * process, its PSS will be 1500.
426 *
427 * To keep (accumulated) division errors low, we adopt a 64bit
428 * fixed-point pss counter to minimize division errors. So (pss >>
429 * PSS_SHIFT) would be the real byte count.
430 *
431 * A shift of 12 before division means (assuming 4K page size):
432 * - 1M 3-user-pages add up to 8KB errors;
433 * - supports mapcount up to 2^24, or 16M;
434 * - supports PSS up to 2^52 bytes, or 4PB.
435 */
436#define PSS_SHIFT 12
437
1e883281 438#ifdef CONFIG_PROC_PAGE_MONITOR
214e471f 439struct mem_size_stats {
a6198797
MM
440 unsigned long resident;
441 unsigned long shared_clean;
442 unsigned long shared_dirty;
443 unsigned long private_clean;
444 unsigned long private_dirty;
445 unsigned long referenced;
b40d4f84 446 unsigned long anonymous;
4031a219 447 unsigned long anonymous_thp;
214e471f 448 unsigned long swap;
25ee01a2
NH
449 unsigned long shared_hugetlb;
450 unsigned long private_hugetlb;
a6198797 451 u64 pss;
8334b962 452 u64 swap_pss;
a6198797
MM
453};
454
c164e038
KS
455static void smaps_account(struct mem_size_stats *mss, struct page *page,
456 unsigned long size, bool young, bool dirty)
457{
458 int mapcount;
459
460 if (PageAnon(page))
461 mss->anonymous += size;
462
463 mss->resident += size;
464 /* Accumulate the size in pages that have been accessed. */
33c3fc71 465 if (young || page_is_young(page) || PageReferenced(page))
c164e038
KS
466 mss->referenced += size;
467 mapcount = page_mapcount(page);
468 if (mapcount >= 2) {
469 u64 pss_delta;
470
471 if (dirty || PageDirty(page))
472 mss->shared_dirty += size;
473 else
474 mss->shared_clean += size;
475 pss_delta = (u64)size << PSS_SHIFT;
476 do_div(pss_delta, mapcount);
477 mss->pss += pss_delta;
478 } else {
479 if (dirty || PageDirty(page))
480 mss->private_dirty += size;
481 else
482 mss->private_clean += size;
483 mss->pss += (u64)size << PSS_SHIFT;
484 }
485}
ae11c4d9 486
c164e038
KS
487static void smaps_pte_entry(pte_t *pte, unsigned long addr,
488 struct mm_walk *walk)
ae11c4d9
DH
489{
490 struct mem_size_stats *mss = walk->private;
14eb6fdd 491 struct vm_area_struct *vma = walk->vma;
b1d4d9e0 492 struct page *page = NULL;
ae11c4d9 493
c164e038
KS
494 if (pte_present(*pte)) {
495 page = vm_normal_page(vma, addr, *pte);
496 } else if (is_swap_pte(*pte)) {
497 swp_entry_t swpent = pte_to_swp_entry(*pte);
ae11c4d9 498
8334b962
MK
499 if (!non_swap_entry(swpent)) {
500 int mapcount;
501
c164e038 502 mss->swap += PAGE_SIZE;
8334b962
MK
503 mapcount = swp_swapcount(swpent);
504 if (mapcount >= 2) {
505 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
506
507 do_div(pss_delta, mapcount);
508 mss->swap_pss += pss_delta;
509 } else {
510 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
511 }
512 } else if (is_migration_entry(swpent))
b1d4d9e0
KK
513 page = migration_entry_to_page(swpent);
514 }
ae11c4d9 515
ae11c4d9
DH
516 if (!page)
517 return;
c164e038 518 smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
ae11c4d9
DH
519}
520
c164e038
KS
521#ifdef CONFIG_TRANSPARENT_HUGEPAGE
522static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
523 struct mm_walk *walk)
524{
525 struct mem_size_stats *mss = walk->private;
14eb6fdd 526 struct vm_area_struct *vma = walk->vma;
c164e038
KS
527 struct page *page;
528
529 /* FOLL_DUMP will return -EFAULT on huge zero page */
530 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
531 if (IS_ERR_OR_NULL(page))
532 return;
533 mss->anonymous_thp += HPAGE_PMD_SIZE;
534 smaps_account(mss, page, HPAGE_PMD_SIZE,
535 pmd_young(*pmd), pmd_dirty(*pmd));
536}
537#else
538static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
539 struct mm_walk *walk)
540{
541}
542#endif
543
b3ae5acb 544static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009b 545 struct mm_walk *walk)
e070ad49 546{
14eb6fdd 547 struct vm_area_struct *vma = walk->vma;
ae11c4d9 548 pte_t *pte;
705e87c0 549 spinlock_t *ptl;
e070ad49 550
bf929152 551 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
c164e038 552 smaps_pmd_entry(pmd, addr, walk);
bf929152 553 spin_unlock(ptl);
025c5b24 554 return 0;
22e057c5 555 }
1a5a9906
AA
556
557 if (pmd_trans_unstable(pmd))
558 return 0;
22e057c5
DH
559 /*
560 * The mmap_sem held all the way back in m_start() is what
561 * keeps khugepaged out of here and from collapsing things
562 * in here.
563 */
705e87c0 564 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
ae11c4d9 565 for (; addr != end; pte++, addr += PAGE_SIZE)
c164e038 566 smaps_pte_entry(pte, addr, walk);
705e87c0
HD
567 pte_unmap_unlock(pte - 1, ptl);
568 cond_resched();
b3ae5acb 569 return 0;
e070ad49
ML
570}
571
834f82e2
CG
572static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
573{
574 /*
575 * Don't forget to update Documentation/ on changes.
576 */
577 static const char mnemonics[BITS_PER_LONG][2] = {
578 /*
579 * In case if we meet a flag we don't know about.
580 */
581 [0 ... (BITS_PER_LONG-1)] = "??",
582
583 [ilog2(VM_READ)] = "rd",
584 [ilog2(VM_WRITE)] = "wr",
585 [ilog2(VM_EXEC)] = "ex",
586 [ilog2(VM_SHARED)] = "sh",
587 [ilog2(VM_MAYREAD)] = "mr",
588 [ilog2(VM_MAYWRITE)] = "mw",
589 [ilog2(VM_MAYEXEC)] = "me",
590 [ilog2(VM_MAYSHARE)] = "ms",
591 [ilog2(VM_GROWSDOWN)] = "gd",
592 [ilog2(VM_PFNMAP)] = "pf",
593 [ilog2(VM_DENYWRITE)] = "dw",
4aae7e43
QR
594#ifdef CONFIG_X86_INTEL_MPX
595 [ilog2(VM_MPX)] = "mp",
596#endif
834f82e2
CG
597 [ilog2(VM_LOCKED)] = "lo",
598 [ilog2(VM_IO)] = "io",
599 [ilog2(VM_SEQ_READ)] = "sr",
600 [ilog2(VM_RAND_READ)] = "rr",
601 [ilog2(VM_DONTCOPY)] = "dc",
602 [ilog2(VM_DONTEXPAND)] = "de",
603 [ilog2(VM_ACCOUNT)] = "ac",
604 [ilog2(VM_NORESERVE)] = "nr",
605 [ilog2(VM_HUGETLB)] = "ht",
834f82e2
CG
606 [ilog2(VM_ARCH_1)] = "ar",
607 [ilog2(VM_DONTDUMP)] = "dd",
ec8e41ae
NH
608#ifdef CONFIG_MEM_SOFT_DIRTY
609 [ilog2(VM_SOFTDIRTY)] = "sd",
610#endif
834f82e2
CG
611 [ilog2(VM_MIXEDMAP)] = "mm",
612 [ilog2(VM_HUGEPAGE)] = "hg",
613 [ilog2(VM_NOHUGEPAGE)] = "nh",
614 [ilog2(VM_MERGEABLE)] = "mg",
16ba6f81
AA
615 [ilog2(VM_UFFD_MISSING)]= "um",
616 [ilog2(VM_UFFD_WP)] = "uw",
834f82e2
CG
617 };
618 size_t i;
619
620 seq_puts(m, "VmFlags: ");
621 for (i = 0; i < BITS_PER_LONG; i++) {
622 if (vma->vm_flags & (1UL << i)) {
623 seq_printf(m, "%c%c ",
624 mnemonics[i][0], mnemonics[i][1]);
625 }
626 }
627 seq_putc(m, '\n');
628}
629
25ee01a2
NH
630#ifdef CONFIG_HUGETLB_PAGE
631static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
632 unsigned long addr, unsigned long end,
633 struct mm_walk *walk)
634{
635 struct mem_size_stats *mss = walk->private;
636 struct vm_area_struct *vma = walk->vma;
637 struct page *page = NULL;
638
639 if (pte_present(*pte)) {
640 page = vm_normal_page(vma, addr, *pte);
641 } else if (is_swap_pte(*pte)) {
642 swp_entry_t swpent = pte_to_swp_entry(*pte);
643
644 if (is_migration_entry(swpent))
645 page = migration_entry_to_page(swpent);
646 }
647 if (page) {
648 int mapcount = page_mapcount(page);
649
650 if (mapcount >= 2)
651 mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
652 else
653 mss->private_hugetlb += huge_page_size(hstate_vma(vma));
654 }
655 return 0;
656}
657#endif /* HUGETLB_PAGE */
658
b7643757 659static int show_smap(struct seq_file *m, void *v, int is_pid)
e070ad49
ML
660{
661 struct vm_area_struct *vma = v;
e070ad49 662 struct mem_size_stats mss;
2165009b
DH
663 struct mm_walk smaps_walk = {
664 .pmd_entry = smaps_pte_range,
25ee01a2
NH
665#ifdef CONFIG_HUGETLB_PAGE
666 .hugetlb_entry = smaps_hugetlb_range,
667#endif
2165009b
DH
668 .mm = vma->vm_mm,
669 .private = &mss,
670 };
e070ad49
ML
671
672 memset(&mss, 0, sizeof mss);
d82ef020 673 /* mmap_sem is held in m_start */
14eb6fdd 674 walk_page_vma(vma, &smaps_walk);
4752c369 675
b7643757 676 show_map_vma(m, vma, is_pid);
4752c369
MM
677
678 seq_printf(m,
679 "Size: %8lu kB\n"
680 "Rss: %8lu kB\n"
681 "Pss: %8lu kB\n"
682 "Shared_Clean: %8lu kB\n"
683 "Shared_Dirty: %8lu kB\n"
684 "Private_Clean: %8lu kB\n"
685 "Private_Dirty: %8lu kB\n"
214e471f 686 "Referenced: %8lu kB\n"
b40d4f84 687 "Anonymous: %8lu kB\n"
4031a219 688 "AnonHugePages: %8lu kB\n"
25ee01a2
NH
689 "Shared_Hugetlb: %8lu kB\n"
690 "Private_Hugetlb: %7lu kB\n"
08fba699 691 "Swap: %8lu kB\n"
8334b962 692 "SwapPss: %8lu kB\n"
3340289d 693 "KernelPageSize: %8lu kB\n"
2d90508f
NK
694 "MMUPageSize: %8lu kB\n"
695 "Locked: %8lu kB\n",
4752c369
MM
696 (vma->vm_end - vma->vm_start) >> 10,
697 mss.resident >> 10,
698 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
699 mss.shared_clean >> 10,
700 mss.shared_dirty >> 10,
701 mss.private_clean >> 10,
702 mss.private_dirty >> 10,
214e471f 703 mss.referenced >> 10,
b40d4f84 704 mss.anonymous >> 10,
4031a219 705 mss.anonymous_thp >> 10,
25ee01a2
NH
706 mss.shared_hugetlb >> 10,
707 mss.private_hugetlb >> 10,
08fba699 708 mss.swap >> 10,
8334b962 709 (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
3340289d 710 vma_kernel_pagesize(vma) >> 10,
2d90508f
NK
711 vma_mmu_pagesize(vma) >> 10,
712 (vma->vm_flags & VM_LOCKED) ?
713 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
4752c369 714
834f82e2 715 show_smap_vma_flags(m, vma);
b8c20a9b 716 m_cache_vma(m, vma);
7c88db0c 717 return 0;
e070ad49
ML
718}
719
b7643757
SP
720static int show_pid_smap(struct seq_file *m, void *v)
721{
722 return show_smap(m, v, 1);
723}
724
725static int show_tid_smap(struct seq_file *m, void *v)
726{
727 return show_smap(m, v, 0);
728}
729
03a44825 730static const struct seq_operations proc_pid_smaps_op = {
a6198797
MM
731 .start = m_start,
732 .next = m_next,
733 .stop = m_stop,
b7643757
SP
734 .show = show_pid_smap
735};
736
737static const struct seq_operations proc_tid_smaps_op = {
738 .start = m_start,
739 .next = m_next,
740 .stop = m_stop,
741 .show = show_tid_smap
a6198797
MM
742};
743
b7643757 744static int pid_smaps_open(struct inode *inode, struct file *file)
a6198797
MM
745{
746 return do_maps_open(inode, file, &proc_pid_smaps_op);
747}
748
b7643757
SP
749static int tid_smaps_open(struct inode *inode, struct file *file)
750{
751 return do_maps_open(inode, file, &proc_tid_smaps_op);
752}
753
754const struct file_operations proc_pid_smaps_operations = {
755 .open = pid_smaps_open,
756 .read = seq_read,
757 .llseek = seq_lseek,
29a40ace 758 .release = proc_map_release,
b7643757
SP
759};
760
761const struct file_operations proc_tid_smaps_operations = {
762 .open = tid_smaps_open,
a6198797
MM
763 .read = seq_read,
764 .llseek = seq_lseek,
29a40ace 765 .release = proc_map_release,
a6198797
MM
766};
767
040fa020
PE
768enum clear_refs_types {
769 CLEAR_REFS_ALL = 1,
770 CLEAR_REFS_ANON,
771 CLEAR_REFS_MAPPED,
0f8975ec 772 CLEAR_REFS_SOFT_DIRTY,
695f0559 773 CLEAR_REFS_MM_HIWATER_RSS,
040fa020
PE
774 CLEAR_REFS_LAST,
775};
776
af9de7eb 777struct clear_refs_private {
0f8975ec 778 enum clear_refs_types type;
af9de7eb
PE
779};
780
7d5b3bfa 781#ifdef CONFIG_MEM_SOFT_DIRTY
0f8975ec
PE
782static inline void clear_soft_dirty(struct vm_area_struct *vma,
783 unsigned long addr, pte_t *pte)
784{
0f8975ec
PE
785 /*
786 * The soft-dirty tracker uses #PF-s to catch writes
787 * to pages, so write-protect the pte as well. See the
788 * Documentation/vm/soft-dirty.txt for full description
789 * of how soft-dirty works.
790 */
791 pte_t ptent = *pte;
179ef71c
CG
792
793 if (pte_present(ptent)) {
794 ptent = pte_wrprotect(ptent);
a7b76174 795 ptent = pte_clear_soft_dirty(ptent);
179ef71c
CG
796 } else if (is_swap_pte(ptent)) {
797 ptent = pte_swp_clear_soft_dirty(ptent);
798 }
799
0f8975ec 800 set_pte_at(vma->vm_mm, addr, pte, ptent);
0f8975ec
PE
801}
802
7d5b3bfa
KS
803static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
804 unsigned long addr, pmd_t *pmdp)
805{
806 pmd_t pmd = *pmdp;
807
808 pmd = pmd_wrprotect(pmd);
a7b76174 809 pmd = pmd_clear_soft_dirty(pmd);
7d5b3bfa
KS
810
811 if (vma->vm_flags & VM_SOFTDIRTY)
812 vma->vm_flags &= ~VM_SOFTDIRTY;
813
814 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
815}
816
817#else
818
819static inline void clear_soft_dirty(struct vm_area_struct *vma,
820 unsigned long addr, pte_t *pte)
821{
822}
823
824static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
825 unsigned long addr, pmd_t *pmdp)
826{
827}
828#endif
829
a6198797 830static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
2165009b 831 unsigned long end, struct mm_walk *walk)
a6198797 832{
af9de7eb 833 struct clear_refs_private *cp = walk->private;
5c64f52a 834 struct vm_area_struct *vma = walk->vma;
a6198797
MM
835 pte_t *pte, ptent;
836 spinlock_t *ptl;
837 struct page *page;
838
7d5b3bfa
KS
839 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
840 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
841 clear_soft_dirty_pmd(vma, addr, pmd);
842 goto out;
843 }
844
845 page = pmd_page(*pmd);
846
847 /* Clear accessed and referenced bits. */
848 pmdp_test_and_clear_young(vma, addr, pmd);
33c3fc71 849 test_and_clear_page_young(page);
7d5b3bfa
KS
850 ClearPageReferenced(page);
851out:
852 spin_unlock(ptl);
853 return 0;
854 }
855
1a5a9906
AA
856 if (pmd_trans_unstable(pmd))
857 return 0;
03319327 858
a6198797
MM
859 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
860 for (; addr != end; pte++, addr += PAGE_SIZE) {
861 ptent = *pte;
a6198797 862
0f8975ec
PE
863 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
864 clear_soft_dirty(vma, addr, pte);
865 continue;
866 }
867
179ef71c
CG
868 if (!pte_present(ptent))
869 continue;
870
a6198797
MM
871 page = vm_normal_page(vma, addr, ptent);
872 if (!page)
873 continue;
874
875 /* Clear accessed and referenced bits. */
876 ptep_test_and_clear_young(vma, addr, pte);
33c3fc71 877 test_and_clear_page_young(page);
a6198797
MM
878 ClearPageReferenced(page);
879 }
880 pte_unmap_unlock(pte - 1, ptl);
881 cond_resched();
882 return 0;
883}
884
5c64f52a
NH
885static int clear_refs_test_walk(unsigned long start, unsigned long end,
886 struct mm_walk *walk)
887{
888 struct clear_refs_private *cp = walk->private;
889 struct vm_area_struct *vma = walk->vma;
890
48684a65
NH
891 if (vma->vm_flags & VM_PFNMAP)
892 return 1;
893
5c64f52a
NH
894 /*
895 * Writing 1 to /proc/pid/clear_refs affects all pages.
896 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
897 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
898 * Writing 4 to /proc/pid/clear_refs affects all pages.
899 */
900 if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
901 return 1;
902 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
903 return 1;
904 return 0;
905}
906
f248dcb3
MM
907static ssize_t clear_refs_write(struct file *file, const char __user *buf,
908 size_t count, loff_t *ppos)
b813e931 909{
f248dcb3 910 struct task_struct *task;
fb92a4b0 911 char buffer[PROC_NUMBUF];
f248dcb3 912 struct mm_struct *mm;
b813e931 913 struct vm_area_struct *vma;
040fa020
PE
914 enum clear_refs_types type;
915 int itype;
0a8cb8e3 916 int rv;
b813e931 917
f248dcb3
MM
918 memset(buffer, 0, sizeof(buffer));
919 if (count > sizeof(buffer) - 1)
920 count = sizeof(buffer) - 1;
921 if (copy_from_user(buffer, buf, count))
922 return -EFAULT;
040fa020 923 rv = kstrtoint(strstrip(buffer), 10, &itype);
0a8cb8e3
AD
924 if (rv < 0)
925 return rv;
040fa020
PE
926 type = (enum clear_refs_types)itype;
927 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
f248dcb3 928 return -EINVAL;
541c237c 929
496ad9aa 930 task = get_proc_task(file_inode(file));
f248dcb3
MM
931 if (!task)
932 return -ESRCH;
933 mm = get_task_mm(task);
934 if (mm) {
af9de7eb 935 struct clear_refs_private cp = {
0f8975ec 936 .type = type,
af9de7eb 937 };
20cbc972
AM
938 struct mm_walk clear_refs_walk = {
939 .pmd_entry = clear_refs_pte_range,
5c64f52a 940 .test_walk = clear_refs_test_walk,
20cbc972 941 .mm = mm,
af9de7eb 942 .private = &cp,
20cbc972 943 };
695f0559
PC
944
945 if (type == CLEAR_REFS_MM_HIWATER_RSS) {
946 /*
947 * Writing 5 to /proc/pid/clear_refs resets the peak
948 * resident set size to this mm's current rss value.
949 */
950 down_write(&mm->mmap_sem);
951 reset_mm_hiwater_rss(mm);
952 up_write(&mm->mmap_sem);
953 goto out_mm;
954 }
955
f248dcb3 956 down_read(&mm->mmap_sem);
64e45507
PF
957 if (type == CLEAR_REFS_SOFT_DIRTY) {
958 for (vma = mm->mmap; vma; vma = vma->vm_next) {
959 if (!(vma->vm_flags & VM_SOFTDIRTY))
960 continue;
961 up_read(&mm->mmap_sem);
962 down_write(&mm->mmap_sem);
963 for (vma = mm->mmap; vma; vma = vma->vm_next) {
964 vma->vm_flags &= ~VM_SOFTDIRTY;
965 vma_set_page_prot(vma);
966 }
967 downgrade_write(&mm->mmap_sem);
968 break;
969 }
0f8975ec 970 mmu_notifier_invalidate_range_start(mm, 0, -1);
64e45507 971 }
5c64f52a 972 walk_page_range(0, ~0UL, &clear_refs_walk);
0f8975ec
PE
973 if (type == CLEAR_REFS_SOFT_DIRTY)
974 mmu_notifier_invalidate_range_end(mm, 0, -1);
f248dcb3
MM
975 flush_tlb_mm(mm);
976 up_read(&mm->mmap_sem);
695f0559 977out_mm:
f248dcb3
MM
978 mmput(mm);
979 }
980 put_task_struct(task);
fb92a4b0
VL
981
982 return count;
b813e931
DR
983}
984
f248dcb3
MM
985const struct file_operations proc_clear_refs_operations = {
986 .write = clear_refs_write,
6038f373 987 .llseek = noop_llseek,
f248dcb3
MM
988};
989
092b50ba
NH
990typedef struct {
991 u64 pme;
992} pagemap_entry_t;
993
85863e47 994struct pagemapread {
8c829622 995 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
092b50ba 996 pagemap_entry_t *buffer;
1c90308e 997 bool show_pfn;
85863e47
MM
998};
999
5aaabe83
NH
1000#define PAGEMAP_WALK_SIZE (PMD_SIZE)
1001#define PAGEMAP_WALK_MASK (PMD_MASK)
1002
deb94544
KK
1003#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
1004#define PM_PFRAME_BITS 55
1005#define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1006#define PM_SOFT_DIRTY BIT_ULL(55)
77bb499b 1007#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
deb94544
KK
1008#define PM_FILE BIT_ULL(61)
1009#define PM_SWAP BIT_ULL(62)
1010#define PM_PRESENT BIT_ULL(63)
1011
85863e47
MM
1012#define PM_END_OF_BUFFER 1
1013
deb94544 1014static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
092b50ba 1015{
deb94544 1016 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
092b50ba
NH
1017}
1018
1019static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
85863e47
MM
1020 struct pagemapread *pm)
1021{
092b50ba 1022 pm->buffer[pm->pos++] = *pme;
d82ef020 1023 if (pm->pos >= pm->len)
aae8679b 1024 return PM_END_OF_BUFFER;
85863e47
MM
1025 return 0;
1026}
1027
1028static int pagemap_pte_hole(unsigned long start, unsigned long end,
2165009b 1029 struct mm_walk *walk)
85863e47 1030{
2165009b 1031 struct pagemapread *pm = walk->private;
68b5a652 1032 unsigned long addr = start;
85863e47 1033 int err = 0;
092b50ba 1034
68b5a652
PF
1035 while (addr < end) {
1036 struct vm_area_struct *vma = find_vma(walk->mm, addr);
deb94544 1037 pagemap_entry_t pme = make_pme(0, 0);
87e6d49a
PF
1038 /* End of address space hole, which we mark as non-present. */
1039 unsigned long hole_end;
68b5a652 1040
87e6d49a
PF
1041 if (vma)
1042 hole_end = min(end, vma->vm_start);
1043 else
1044 hole_end = end;
1045
1046 for (; addr < hole_end; addr += PAGE_SIZE) {
1047 err = add_to_pagemap(addr, &pme, pm);
1048 if (err)
1049 goto out;
68b5a652
PF
1050 }
1051
87e6d49a
PF
1052 if (!vma)
1053 break;
1054
1055 /* Addresses in the VMA. */
1056 if (vma->vm_flags & VM_SOFTDIRTY)
deb94544 1057 pme = make_pme(0, PM_SOFT_DIRTY);
87e6d49a 1058 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
68b5a652
PF
1059 err = add_to_pagemap(addr, &pme, pm);
1060 if (err)
1061 goto out;
1062 }
85863e47 1063 }
68b5a652 1064out:
85863e47
MM
1065 return err;
1066}
1067
deb94544 1068static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
052fb0d6 1069 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
85863e47 1070{
deb94544 1071 u64 frame = 0, flags = 0;
052fb0d6 1072 struct page *page = NULL;
85863e47 1073
052fb0d6 1074 if (pte_present(pte)) {
1c90308e
KK
1075 if (pm->show_pfn)
1076 frame = pte_pfn(pte);
deb94544 1077 flags |= PM_PRESENT;
052fb0d6 1078 page = vm_normal_page(vma, addr, pte);
e9cdd6e7 1079 if (pte_soft_dirty(pte))
deb94544 1080 flags |= PM_SOFT_DIRTY;
052fb0d6 1081 } else if (is_swap_pte(pte)) {
179ef71c
CG
1082 swp_entry_t entry;
1083 if (pte_swp_soft_dirty(pte))
deb94544 1084 flags |= PM_SOFT_DIRTY;
179ef71c 1085 entry = pte_to_swp_entry(pte);
052fb0d6
KK
1086 frame = swp_type(entry) |
1087 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
deb94544 1088 flags |= PM_SWAP;
052fb0d6
KK
1089 if (is_migration_entry(entry))
1090 page = migration_entry_to_page(entry);
052fb0d6
KK
1091 }
1092
1093 if (page && !PageAnon(page))
1094 flags |= PM_FILE;
77bb499b
KK
1095 if (page && page_mapcount(page) == 1)
1096 flags |= PM_MMAP_EXCLUSIVE;
deb94544
KK
1097 if (vma->vm_flags & VM_SOFTDIRTY)
1098 flags |= PM_SOFT_DIRTY;
052fb0d6 1099
deb94544 1100 return make_pme(frame, flags);
bcf8039e
DH
1101}
1102
356515e7 1103static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
2165009b 1104 struct mm_walk *walk)
85863e47 1105{
f995ece2 1106 struct vm_area_struct *vma = walk->vma;
2165009b 1107 struct pagemapread *pm = walk->private;
bf929152 1108 spinlock_t *ptl;
05fbf357 1109 pte_t *pte, *orig_pte;
85863e47
MM
1110 int err = 0;
1111
356515e7
KK
1112#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1113 if (pmd_trans_huge_lock(pmdp, vma, &ptl) == 1) {
1114 u64 flags = 0, frame = 0;
1115 pmd_t pmd = *pmdp;
0f8975ec 1116
356515e7 1117 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
deb94544 1118 flags |= PM_SOFT_DIRTY;
d9104d1c 1119
356515e7
KK
1120 /*
1121 * Currently pmd for thp is always present because thp
1122 * can not be swapped-out, migrated, or HWPOISONed
1123 * (split in such cases instead.)
1124 * This if-check is just to prepare for future implementation.
1125 */
1126 if (pmd_present(pmd)) {
77bb499b
KK
1127 struct page *page = pmd_page(pmd);
1128
1129 if (page_mapcount(page) == 1)
1130 flags |= PM_MMAP_EXCLUSIVE;
1131
356515e7 1132 flags |= PM_PRESENT;
1c90308e
KK
1133 if (pm->show_pfn)
1134 frame = pmd_pfn(pmd) +
1135 ((addr & ~PMD_MASK) >> PAGE_SHIFT);
356515e7
KK
1136 }
1137
025c5b24 1138 for (; addr != end; addr += PAGE_SIZE) {
356515e7 1139 pagemap_entry_t pme = make_pme(frame, flags);
025c5b24 1140
092b50ba 1141 err = add_to_pagemap(addr, &pme, pm);
025c5b24
NH
1142 if (err)
1143 break;
1c90308e 1144 if (pm->show_pfn && (flags & PM_PRESENT))
356515e7 1145 frame++;
5aaabe83 1146 }
bf929152 1147 spin_unlock(ptl);
025c5b24 1148 return err;
5aaabe83
NH
1149 }
1150
356515e7 1151 if (pmd_trans_unstable(pmdp))
45f83cef 1152 return 0;
356515e7 1153#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
81d0fa62 1154
f995ece2
NH
1155 /*
1156 * We can assume that @vma always points to a valid one and @end never
1157 * goes beyond vma->vm_end.
1158 */
356515e7 1159 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
f995ece2
NH
1160 for (; addr < end; pte++, addr += PAGE_SIZE) {
1161 pagemap_entry_t pme;
05fbf357 1162
deb94544 1163 pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
f995ece2 1164 err = add_to_pagemap(addr, &pme, pm);
05fbf357 1165 if (err)
81d0fa62 1166 break;
85863e47 1167 }
f995ece2 1168 pte_unmap_unlock(orig_pte, ptl);
85863e47
MM
1169
1170 cond_resched();
1171
1172 return err;
1173}
1174
1a5cb814 1175#ifdef CONFIG_HUGETLB_PAGE
116354d1 1176/* This function walks within one hugetlb entry in the single call */
356515e7 1177static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
116354d1
NH
1178 unsigned long addr, unsigned long end,
1179 struct mm_walk *walk)
5dc37642 1180{
5dc37642 1181 struct pagemapread *pm = walk->private;
f995ece2 1182 struct vm_area_struct *vma = walk->vma;
356515e7 1183 u64 flags = 0, frame = 0;
5dc37642 1184 int err = 0;
356515e7 1185 pte_t pte;
5dc37642 1186
f995ece2 1187 if (vma->vm_flags & VM_SOFTDIRTY)
deb94544 1188 flags |= PM_SOFT_DIRTY;
d9104d1c 1189
356515e7
KK
1190 pte = huge_ptep_get(ptep);
1191 if (pte_present(pte)) {
1192 struct page *page = pte_page(pte);
1193
1194 if (!PageAnon(page))
1195 flags |= PM_FILE;
1196
77bb499b
KK
1197 if (page_mapcount(page) == 1)
1198 flags |= PM_MMAP_EXCLUSIVE;
1199
356515e7 1200 flags |= PM_PRESENT;
1c90308e
KK
1201 if (pm->show_pfn)
1202 frame = pte_pfn(pte) +
1203 ((addr & ~hmask) >> PAGE_SHIFT);
356515e7
KK
1204 }
1205
5dc37642 1206 for (; addr != end; addr += PAGE_SIZE) {
356515e7
KK
1207 pagemap_entry_t pme = make_pme(frame, flags);
1208
092b50ba 1209 err = add_to_pagemap(addr, &pme, pm);
5dc37642
NH
1210 if (err)
1211 return err;
1c90308e 1212 if (pm->show_pfn && (flags & PM_PRESENT))
356515e7 1213 frame++;
5dc37642
NH
1214 }
1215
1216 cond_resched();
1217
1218 return err;
1219}
1a5cb814 1220#endif /* HUGETLB_PAGE */
5dc37642 1221
85863e47
MM
1222/*
1223 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1224 *
f16278c6
HR
1225 * For each page in the address space, this file contains one 64-bit entry
1226 * consisting of the following:
1227 *
052fb0d6 1228 * Bits 0-54 page frame number (PFN) if present
f16278c6 1229 * Bits 0-4 swap type if swapped
052fb0d6 1230 * Bits 5-54 swap offset if swapped
deb94544 1231 * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
77bb499b
KK
1232 * Bit 56 page exclusively mapped
1233 * Bits 57-60 zero
052fb0d6 1234 * Bit 61 page is file-page or shared-anon
f16278c6
HR
1235 * Bit 62 page swapped
1236 * Bit 63 page present
1237 *
1238 * If the page is not present but in swap, then the PFN contains an
1239 * encoding of the swap file number and the page's offset into the
1240 * swap. Unmapped pages return a null PFN. This allows determining
85863e47
MM
1241 * precisely which pages are mapped (or in swap) and comparing mapped
1242 * pages between processes.
1243 *
1244 * Efficient users of this interface will use /proc/pid/maps to
1245 * determine which areas of memory are actually mapped and llseek to
1246 * skip over unmapped regions.
1247 */
1248static ssize_t pagemap_read(struct file *file, char __user *buf,
1249 size_t count, loff_t *ppos)
1250{
a06db751 1251 struct mm_struct *mm = file->private_data;
85863e47 1252 struct pagemapread pm;
ee1e6ab6 1253 struct mm_walk pagemap_walk = {};
5d7e0d2b
AM
1254 unsigned long src;
1255 unsigned long svpfn;
1256 unsigned long start_vaddr;
1257 unsigned long end_vaddr;
a06db751 1258 int ret = 0, copied = 0;
85863e47 1259
a06db751 1260 if (!mm || !atomic_inc_not_zero(&mm->mm_users))
85863e47
MM
1261 goto out;
1262
85863e47
MM
1263 ret = -EINVAL;
1264 /* file position must be aligned */
aae8679b 1265 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
a06db751 1266 goto out_mm;
85863e47
MM
1267
1268 ret = 0;
08161786 1269 if (!count)
a06db751 1270 goto out_mm;
08161786 1271
1c90308e
KK
1272 /* do not disclose physical addresses: attack vector */
1273 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1274
8c829622 1275 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1276 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
5d7e0d2b 1277 ret = -ENOMEM;
d82ef020 1278 if (!pm.buffer)
a06db751 1279 goto out_mm;
85863e47 1280
356515e7 1281 pagemap_walk.pmd_entry = pagemap_pmd_range;
5d7e0d2b 1282 pagemap_walk.pte_hole = pagemap_pte_hole;
1a5cb814 1283#ifdef CONFIG_HUGETLB_PAGE
5dc37642 1284 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1a5cb814 1285#endif
5d7e0d2b
AM
1286 pagemap_walk.mm = mm;
1287 pagemap_walk.private = &pm;
1288
1289 src = *ppos;
1290 svpfn = src / PM_ENTRY_BYTES;
1291 start_vaddr = svpfn << PAGE_SHIFT;
a06db751 1292 end_vaddr = mm->task_size;
5d7e0d2b
AM
1293
1294 /* watch out for wraparound */
a06db751 1295 if (svpfn > mm->task_size >> PAGE_SHIFT)
5d7e0d2b
AM
1296 start_vaddr = end_vaddr;
1297
1298 /*
1299 * The odds are that this will stop walking way
1300 * before end_vaddr, because the length of the
1301 * user buffer is tracked in "pm", and the walk
1302 * will stop when we hit the end of the buffer.
1303 */
d82ef020
KH
1304 ret = 0;
1305 while (count && (start_vaddr < end_vaddr)) {
1306 int len;
1307 unsigned long end;
1308
1309 pm.pos = 0;
ea251c1d 1310 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
d82ef020
KH
1311 /* overflow ? */
1312 if (end < start_vaddr || end > end_vaddr)
1313 end = end_vaddr;
1314 down_read(&mm->mmap_sem);
1315 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1316 up_read(&mm->mmap_sem);
1317 start_vaddr = end;
1318
1319 len = min(count, PM_ENTRY_BYTES * pm.pos);
309361e0 1320 if (copy_to_user(buf, pm.buffer, len)) {
d82ef020 1321 ret = -EFAULT;
a06db751 1322 goto out_free;
d82ef020
KH
1323 }
1324 copied += len;
1325 buf += len;
1326 count -= len;
85863e47 1327 }
d82ef020
KH
1328 *ppos += copied;
1329 if (!ret || ret == PM_END_OF_BUFFER)
1330 ret = copied;
1331
98bc93e5
KM
1332out_free:
1333 kfree(pm.buffer);
a06db751
KK
1334out_mm:
1335 mmput(mm);
85863e47
MM
1336out:
1337 return ret;
1338}
1339
541c237c
PE
1340static int pagemap_open(struct inode *inode, struct file *file)
1341{
a06db751
KK
1342 struct mm_struct *mm;
1343
a06db751
KK
1344 mm = proc_mem_open(inode, PTRACE_MODE_READ);
1345 if (IS_ERR(mm))
1346 return PTR_ERR(mm);
1347 file->private_data = mm;
1348 return 0;
1349}
1350
1351static int pagemap_release(struct inode *inode, struct file *file)
1352{
1353 struct mm_struct *mm = file->private_data;
1354
1355 if (mm)
1356 mmdrop(mm);
541c237c
PE
1357 return 0;
1358}
1359
85863e47
MM
1360const struct file_operations proc_pagemap_operations = {
1361 .llseek = mem_lseek, /* borrow this */
1362 .read = pagemap_read,
541c237c 1363 .open = pagemap_open,
a06db751 1364 .release = pagemap_release,
85863e47 1365};
1e883281 1366#endif /* CONFIG_PROC_PAGE_MONITOR */
85863e47 1367
6e21c8f1 1368#ifdef CONFIG_NUMA
6e21c8f1 1369
f69ff943 1370struct numa_maps {
f69ff943
SW
1371 unsigned long pages;
1372 unsigned long anon;
1373 unsigned long active;
1374 unsigned long writeback;
1375 unsigned long mapcount_max;
1376 unsigned long dirty;
1377 unsigned long swapcache;
1378 unsigned long node[MAX_NUMNODES];
1379};
1380
5b52fc89
SW
1381struct numa_maps_private {
1382 struct proc_maps_private proc_maps;
1383 struct numa_maps md;
1384};
1385
eb4866d0
DH
1386static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1387 unsigned long nr_pages)
f69ff943
SW
1388{
1389 int count = page_mapcount(page);
1390
eb4866d0 1391 md->pages += nr_pages;
f69ff943 1392 if (pte_dirty || PageDirty(page))
eb4866d0 1393 md->dirty += nr_pages;
f69ff943
SW
1394
1395 if (PageSwapCache(page))
eb4866d0 1396 md->swapcache += nr_pages;
f69ff943
SW
1397
1398 if (PageActive(page) || PageUnevictable(page))
eb4866d0 1399 md->active += nr_pages;
f69ff943
SW
1400
1401 if (PageWriteback(page))
eb4866d0 1402 md->writeback += nr_pages;
f69ff943
SW
1403
1404 if (PageAnon(page))
eb4866d0 1405 md->anon += nr_pages;
f69ff943
SW
1406
1407 if (count > md->mapcount_max)
1408 md->mapcount_max = count;
1409
eb4866d0 1410 md->node[page_to_nid(page)] += nr_pages;
f69ff943
SW
1411}
1412
3200a8aa
DH
1413static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1414 unsigned long addr)
1415{
1416 struct page *page;
1417 int nid;
1418
1419 if (!pte_present(pte))
1420 return NULL;
1421
1422 page = vm_normal_page(vma, addr, pte);
1423 if (!page)
1424 return NULL;
1425
1426 if (PageReserved(page))
1427 return NULL;
1428
1429 nid = page_to_nid(page);
4ff1b2c2 1430 if (!node_isset(nid, node_states[N_MEMORY]))
3200a8aa
DH
1431 return NULL;
1432
1433 return page;
1434}
1435
f69ff943
SW
1436static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1437 unsigned long end, struct mm_walk *walk)
1438{
d85f4d6d
NH
1439 struct numa_maps *md = walk->private;
1440 struct vm_area_struct *vma = walk->vma;
f69ff943
SW
1441 spinlock_t *ptl;
1442 pte_t *orig_pte;
1443 pte_t *pte;
1444
d85f4d6d 1445 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
025c5b24
NH
1446 pte_t huge_pte = *(pte_t *)pmd;
1447 struct page *page;
1448
d85f4d6d 1449 page = can_gather_numa_stats(huge_pte, vma, addr);
025c5b24
NH
1450 if (page)
1451 gather_stats(page, md, pte_dirty(huge_pte),
1452 HPAGE_PMD_SIZE/PAGE_SIZE);
bf929152 1453 spin_unlock(ptl);
025c5b24 1454 return 0;
32ef4384
DH
1455 }
1456
1a5a9906
AA
1457 if (pmd_trans_unstable(pmd))
1458 return 0;
f69ff943
SW
1459 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1460 do {
d85f4d6d 1461 struct page *page = can_gather_numa_stats(*pte, vma, addr);
f69ff943
SW
1462 if (!page)
1463 continue;
eb4866d0 1464 gather_stats(page, md, pte_dirty(*pte), 1);
f69ff943
SW
1465
1466 } while (pte++, addr += PAGE_SIZE, addr != end);
1467 pte_unmap_unlock(orig_pte, ptl);
1468 return 0;
1469}
1470#ifdef CONFIG_HUGETLB_PAGE
632fd60f 1471static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
f69ff943
SW
1472 unsigned long addr, unsigned long end, struct mm_walk *walk)
1473{
1474 struct numa_maps *md;
1475 struct page *page;
1476
d4c54919 1477 if (!pte_present(*pte))
f69ff943
SW
1478 return 0;
1479
1480 page = pte_page(*pte);
1481 if (!page)
1482 return 0;
1483
1484 md = walk->private;
eb4866d0 1485 gather_stats(page, md, pte_dirty(*pte), 1);
f69ff943
SW
1486 return 0;
1487}
1488
1489#else
632fd60f 1490static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
f69ff943
SW
1491 unsigned long addr, unsigned long end, struct mm_walk *walk)
1492{
1493 return 0;
1494}
1495#endif
1496
1497/*
1498 * Display pages allocated per node and memory policy via /proc.
1499 */
b7643757 1500static int show_numa_map(struct seq_file *m, void *v, int is_pid)
f69ff943 1501{
5b52fc89
SW
1502 struct numa_maps_private *numa_priv = m->private;
1503 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
f69ff943 1504 struct vm_area_struct *vma = v;
5b52fc89 1505 struct numa_maps *md = &numa_priv->md;
f69ff943
SW
1506 struct file *file = vma->vm_file;
1507 struct mm_struct *mm = vma->vm_mm;
d85f4d6d
NH
1508 struct mm_walk walk = {
1509 .hugetlb_entry = gather_hugetlb_stats,
1510 .pmd_entry = gather_pte_stats,
1511 .private = md,
1512 .mm = mm,
1513 };
f69ff943 1514 struct mempolicy *pol;
948927ee
DR
1515 char buffer[64];
1516 int nid;
f69ff943
SW
1517
1518 if (!mm)
1519 return 0;
1520
5b52fc89
SW
1521 /* Ensure we start with an empty set of numa_maps statistics. */
1522 memset(md, 0, sizeof(*md));
f69ff943 1523
498f2371
ON
1524 pol = __get_vma_policy(vma, vma->vm_start);
1525 if (pol) {
1526 mpol_to_str(buffer, sizeof(buffer), pol);
1527 mpol_cond_put(pol);
1528 } else {
1529 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1530 }
f69ff943
SW
1531
1532 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1533
1534 if (file) {
17c2b4ee 1535 seq_puts(m, " file=");
2726d566 1536 seq_file_path(m, file, "\n\t= ");
f69ff943 1537 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
17c2b4ee 1538 seq_puts(m, " heap");
b7643757 1539 } else {
58cb6548 1540 pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
b7643757
SP
1541 if (tid != 0) {
1542 /*
1543 * Thread stack in /proc/PID/task/TID/maps or
1544 * the main process stack.
1545 */
1546 if (!is_pid || (vma->vm_start <= mm->start_stack &&
1547 vma->vm_end >= mm->start_stack))
17c2b4ee 1548 seq_puts(m, " stack");
b7643757
SP
1549 else
1550 seq_printf(m, " stack:%d", tid);
1551 }
f69ff943
SW
1552 }
1553
fc360bd9 1554 if (is_vm_hugetlb_page(vma))
17c2b4ee 1555 seq_puts(m, " huge");
fc360bd9 1556
d85f4d6d
NH
1557 /* mmap_sem is held by m_start */
1558 walk_page_vma(vma, &walk);
f69ff943
SW
1559
1560 if (!md->pages)
1561 goto out;
1562
1563 if (md->anon)
1564 seq_printf(m, " anon=%lu", md->anon);
1565
1566 if (md->dirty)
1567 seq_printf(m, " dirty=%lu", md->dirty);
1568
1569 if (md->pages != md->anon && md->pages != md->dirty)
1570 seq_printf(m, " mapped=%lu", md->pages);
1571
1572 if (md->mapcount_max > 1)
1573 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1574
1575 if (md->swapcache)
1576 seq_printf(m, " swapcache=%lu", md->swapcache);
1577
1578 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1579 seq_printf(m, " active=%lu", md->active);
1580
1581 if (md->writeback)
1582 seq_printf(m, " writeback=%lu", md->writeback);
1583
948927ee
DR
1584 for_each_node_state(nid, N_MEMORY)
1585 if (md->node[nid])
1586 seq_printf(m, " N%d=%lu", nid, md->node[nid]);
198d1597
RA
1587
1588 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
f69ff943
SW
1589out:
1590 seq_putc(m, '\n');
b8c20a9b 1591 m_cache_vma(m, vma);
f69ff943
SW
1592 return 0;
1593}
5b52fc89 1594
b7643757
SP
1595static int show_pid_numa_map(struct seq_file *m, void *v)
1596{
1597 return show_numa_map(m, v, 1);
1598}
1599
1600static int show_tid_numa_map(struct seq_file *m, void *v)
1601{
1602 return show_numa_map(m, v, 0);
1603}
1604
03a44825 1605static const struct seq_operations proc_pid_numa_maps_op = {
b7643757
SP
1606 .start = m_start,
1607 .next = m_next,
1608 .stop = m_stop,
1609 .show = show_pid_numa_map,
6e21c8f1 1610};
662795de 1611
b7643757
SP
1612static const struct seq_operations proc_tid_numa_maps_op = {
1613 .start = m_start,
1614 .next = m_next,
1615 .stop = m_stop,
1616 .show = show_tid_numa_map,
1617};
1618
1619static int numa_maps_open(struct inode *inode, struct file *file,
1620 const struct seq_operations *ops)
662795de 1621{
4db7d0ee
ON
1622 return proc_maps_open(inode, file, ops,
1623 sizeof(struct numa_maps_private));
662795de
EB
1624}
1625
b7643757
SP
1626static int pid_numa_maps_open(struct inode *inode, struct file *file)
1627{
1628 return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1629}
1630
1631static int tid_numa_maps_open(struct inode *inode, struct file *file)
1632{
1633 return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1634}
1635
1636const struct file_operations proc_pid_numa_maps_operations = {
1637 .open = pid_numa_maps_open,
1638 .read = seq_read,
1639 .llseek = seq_lseek,
29a40ace 1640 .release = proc_map_release,
b7643757
SP
1641};
1642
1643const struct file_operations proc_tid_numa_maps_operations = {
1644 .open = tid_numa_maps_open,
662795de
EB
1645 .read = seq_read,
1646 .llseek = seq_lseek,
29a40ace 1647 .release = proc_map_release,
662795de 1648};
f69ff943 1649#endif /* CONFIG_NUMA */