]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/mm_types_task.h
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / include / linux / mm_types_task.h
CommitLineData
2e58f173
IM
1#ifndef _LINUX_MM_TYPES_TASK_H
2#define _LINUX_MM_TYPES_TASK_H
3
9e7d2e44
IM
4/*
5 * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
6 *
7 * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
8 */
9
2e58f173
IM
10#include <linux/types.h>
11#include <linux/threads.h>
12#include <linux/atomic.h>
dcc2dc45 13#include <linux/cpumask.h>
2e58f173
IM
14
15#include <asm/page.h>
16
e73ad5ff
AL
17#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
18#include <asm/tlbbatch.h>
19#endif
20
9e7d2e44
IM
21#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
22#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
23 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
24#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
25
26/*
27 * The per task VMA cache array:
28 */
29#define VMACACHE_BITS 2
30#define VMACACHE_SIZE (1U << VMACACHE_BITS)
31#define VMACACHE_MASK (VMACACHE_SIZE - 1)
32
33struct vmacache {
34 u32 seqnum;
35 struct vm_area_struct *vmas[VMACACHE_SIZE];
36};
37
38enum {
39 MM_FILEPAGES, /* Resident file mapping pages */
40 MM_ANONPAGES, /* Resident anonymous pages */
41 MM_SWAPENTS, /* Anonymous swap entries */
42 MM_SHMEMPAGES, /* Resident shared memory pages */
43 NR_MM_COUNTERS
44};
45
46#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
47#define SPLIT_RSS_COUNTING
48/* per-thread cached information, */
49struct task_rss_stat {
50 int events; /* for synchronization threshold */
51 int count[NR_MM_COUNTERS];
52};
53#endif /* USE_SPLIT_PTE_PTLOCKS */
54
55struct mm_rss_stat {
56 atomic_long_t count[NR_MM_COUNTERS];
57};
58
59struct page_frag {
60 struct page *page;
61#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
62 __u32 offset;
63 __u32 size;
64#else
65 __u16 offset;
66 __u16 size;
67#endif
68};
69
dcc2dc45
IM
70/* Track pages that require TLB flushes */
71struct tlbflush_unmap_batch {
72#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
73 /*
e73ad5ff
AL
74 * The arch code makes the following promise: generic code can modify a
75 * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
76 * needed barriers), then call arch_tlbbatch_flush(), and the entries
77 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
78 * returns.
dcc2dc45 79 */
e73ad5ff 80 struct arch_tlbflush_unmap_batch arch;
dcc2dc45 81
e73ad5ff 82 /* True if a flush is needed. */
dcc2dc45
IM
83 bool flush_required;
84
85 /*
86 * If true then the PTE was dirty when unmapped. The entry must be
87 * flushed before IO is initiated or a stale TLB entry potentially
88 * allows an update without redirtying the page.
89 */
90 bool writable;
91#endif
92};
93
2e58f173 94#endif /* _LINUX_MM_TYPES_TASK_H */