1 #ifndef _LINUX_MM_TYPES_TASK_H
2 #define _LINUX_MM_TYPES_TASK_H
5 * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
7 * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
10 #include <linux/types.h>
11 #include <linux/threads.h>
12 #include <linux/atomic.h>
13 #include <linux/cpumask.h>
17 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
18 #include <asm/tlbbatch.h>
21 #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
22 #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
23 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
24 #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
27 * The per task VMA cache array:
29 #define VMACACHE_BITS 2
30 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
31 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
35 struct vm_area_struct
*vmas
[VMACACHE_SIZE
];
39 MM_FILEPAGES
, /* Resident file mapping pages */
40 MM_ANONPAGES
, /* Resident anonymous pages */
41 MM_SWAPENTS
, /* Anonymous swap entries */
42 MM_SHMEMPAGES
, /* Resident shared memory pages */
46 #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
47 #define SPLIT_RSS_COUNTING
48 /* per-thread cached information, */
49 struct task_rss_stat
{
50 int events
; /* for synchronization threshold */
51 int count
[NR_MM_COUNTERS
];
53 #endif /* USE_SPLIT_PTE_PTLOCKS */
56 atomic_long_t count
[NR_MM_COUNTERS
];
61 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
70 /* Track pages that require TLB flushes */
71 struct tlbflush_unmap_batch
{
72 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
74 * The arch code makes the following promise: generic code can modify a
75 * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
76 * needed barriers), then call arch_tlbbatch_flush(), and the entries
77 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
80 struct arch_tlbflush_unmap_batch arch
;
82 /* True if a flush is needed. */
86 * If true then the PTE was dirty when unmapped. The entry must be
87 * flushed before IO is initiated or a stale TLB entry potentially
88 * allows an update without redirtying the page.
94 #endif /* _LINUX_MM_TYPES_TASK_H */