]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/mm_types_task.h
arm,arm64,drivers: move externs in a new header file
[mirror_ubuntu-artful-kernel.git] / include / linux / mm_types_task.h
1 #ifndef _LINUX_MM_TYPES_TASK_H
2 #define _LINUX_MM_TYPES_TASK_H
3
4 /*
5 * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
6 *
7 * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
8 */
9
10 #include <linux/types.h>
11 #include <linux/threads.h>
12 #include <linux/atomic.h>
13 #include <linux/cpumask.h>
14
15 #include <asm/page.h>
16
17 #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
18 #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
19 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
20 #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
21
22 /*
23 * The per task VMA cache array:
24 */
25 #define VMACACHE_BITS 2
26 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
27 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
28
29 struct vmacache {
30 u32 seqnum;
31 struct vm_area_struct *vmas[VMACACHE_SIZE];
32 };
33
34 enum {
35 MM_FILEPAGES, /* Resident file mapping pages */
36 MM_ANONPAGES, /* Resident anonymous pages */
37 MM_SWAPENTS, /* Anonymous swap entries */
38 MM_SHMEMPAGES, /* Resident shared memory pages */
39 NR_MM_COUNTERS
40 };
41
42 #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
43 #define SPLIT_RSS_COUNTING
44 /* per-thread cached information, */
45 struct task_rss_stat {
46 int events; /* for synchronization threshold */
47 int count[NR_MM_COUNTERS];
48 };
49 #endif /* USE_SPLIT_PTE_PTLOCKS */
50
51 struct mm_rss_stat {
52 atomic_long_t count[NR_MM_COUNTERS];
53 };
54
55 struct page_frag {
56 struct page *page;
57 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
58 __u32 offset;
59 __u32 size;
60 #else
61 __u16 offset;
62 __u16 size;
63 #endif
64 };
65
66 /* Track pages that require TLB flushes */
67 struct tlbflush_unmap_batch {
68 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
69 /*
70 * Each bit set is a CPU that potentially has a TLB entry for one of
71 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
72 */
73 struct cpumask cpumask;
74
75 /* True if any bit in cpumask is set */
76 bool flush_required;
77
78 /*
79 * If true then the PTE was dirty when unmapped. The entry must be
80 * flushed before IO is initiated or a stale TLB entry potentially
81 * allows an update without redirtying the page.
82 */
83 bool writable;
84 #endif
85 };
86
87 #endif /* _LINUX_MM_TYPES_TASK_H */