]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_MM_TYPES_H | |
2 | #define _LINUX_MM_TYPES_H | |
3 | ||
4 | #include <linux/auxvec.h> | |
5 | #include <linux/types.h> | |
6 | #include <linux/threads.h> | |
7 | #include <linux/list.h> | |
8 | #include <linux/spinlock.h> | |
9 | #include <linux/rbtree.h> | |
10 | #include <linux/rwsem.h> | |
11 | #include <linux/completion.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/page-debug-flags.h> | |
14 | #include <linux/uprobes.h> | |
15 | #include <linux/page-flags-layout.h> | |
16 | #include <asm/page.h> | |
17 | #include <asm/mmu.h> | |
18 | ||
19 | #ifndef AT_VECTOR_SIZE_ARCH | |
20 | #define AT_VECTOR_SIZE_ARCH 0 | |
21 | #endif | |
22 | #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) | |
23 | ||
24 | struct address_space; | |
25 | ||
26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) | |
27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ | |
28 | IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) | |
29 | ||
30 | /* | |
31 | * Each physical page in the system has a struct page associated with | |
32 | * it to keep track of whatever it is we are using the page for at the | |
33 | * moment. Note that we have no way to track which tasks are using | |
34 | * a page, though if it is a pagecache page, rmap structures can tell us | |
35 | * who is mapping it. | |
36 | * | |
37 | * The objects in struct page are organized in double word blocks in | |
38 | * order to allows us to use atomic double word operations on portions | |
39 | * of struct page. That is currently only used by slub but the arrangement | |
40 | * allows the use of atomic double word operations on the flags/mapping | |
41 | * and lru list pointers also. | |
42 | */ | |
43 | struct page { | |
44 | /* First double word block */ | |
45 | unsigned long flags; /* Atomic flags, some possibly | |
46 | * updated asynchronously */ | |
47 | struct address_space *mapping; /* If low bit clear, points to | |
48 | * inode address_space, or NULL. | |
49 | * If page mapped as anonymous | |
50 | * memory, low bit is set, and | |
51 | * it points to anon_vma object: | |
52 | * see PAGE_MAPPING_ANON below. | |
53 | */ | |
54 | /* Second double word */ | |
55 | struct { | |
56 | union { | |
57 | pgoff_t index; /* Our offset within mapping. */ | |
58 | void *freelist; /* slub/slob first free object */ | |
59 | bool pfmemalloc; /* If set by the page allocator, | |
60 | * ALLOC_NO_WATERMARKS was set | |
61 | * and the low watermark was not | |
62 | * met implying that the system | |
63 | * is under some pressure. The | |
64 | * caller should try ensure | |
65 | * this page is only used to | |
66 | * free other pages. | |
67 | */ | |
68 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS | |
69 | pgtable_t pmd_huge_pte; /* protected by page->ptl */ | |
70 | #endif | |
71 | }; | |
72 | ||
73 | union { | |
74 | #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ | |
75 | defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) | |
76 | /* Used for cmpxchg_double in slub */ | |
77 | unsigned long counters; | |
78 | #else | |
79 | /* | |
80 | * Keep _count separate from slub cmpxchg_double data. | |
81 | * As the rest of the double word is protected by | |
82 | * slab_lock but _count is not. | |
83 | */ | |
84 | unsigned counters; | |
85 | #endif | |
86 | ||
87 | struct { | |
88 | ||
89 | union { | |
90 | /* | |
91 | * Count of ptes mapped in | |
92 | * mms, to show when page is | |
93 | * mapped & limit reverse map | |
94 | * searches. | |
95 | * | |
96 | * Used also for tail pages | |
97 | * refcounting instead of | |
98 | * _count. Tail pages cannot | |
99 | * be mapped and keeping the | |
100 | * tail page _count zero at | |
101 | * all times guarantees | |
102 | * get_page_unless_zero() will | |
103 | * never succeed on tail | |
104 | * pages. | |
105 | */ | |
106 | atomic_t _mapcount; | |
107 | ||
108 | struct { /* SLUB */ | |
109 | unsigned inuse:16; | |
110 | unsigned objects:15; | |
111 | unsigned frozen:1; | |
112 | }; | |
113 | int units; /* SLOB */ | |
114 | }; | |
115 | atomic_t _count; /* Usage count, see below. */ | |
116 | }; | |
117 | }; | |
118 | }; | |
119 | ||
120 | /* Third double word block */ | |
121 | union { | |
122 | struct list_head lru; /* Pageout list, eg. active_list | |
123 | * protected by zone->lru_lock ! | |
124 | */ | |
125 | struct { /* slub per cpu partial pages */ | |
126 | struct page *next; /* Next partial slab */ | |
127 | #ifdef CONFIG_64BIT | |
128 | int pages; /* Nr of partial slabs left */ | |
129 | int pobjects; /* Approximate # of objects */ | |
130 | #else | |
131 | short int pages; | |
132 | short int pobjects; | |
133 | #endif | |
134 | }; | |
135 | ||
136 | struct list_head list; /* slobs list of pages */ | |
137 | struct slab *slab_page; /* slab fields */ | |
138 | }; | |
139 | ||
140 | /* Remainder is not double word aligned */ | |
141 | union { | |
142 | unsigned long private; /* Mapping-private opaque data: | |
143 | * usually used for buffer_heads | |
144 | * if PagePrivate set; used for | |
145 | * swp_entry_t if PageSwapCache; | |
146 | * indicates order in the buddy | |
147 | * system if PG_buddy is set. | |
148 | */ | |
149 | #if USE_SPLIT_PTE_PTLOCKS | |
150 | #if BLOATED_SPINLOCKS | |
151 | spinlock_t *ptl; | |
152 | #else | |
153 | spinlock_t ptl; | |
154 | #endif | |
155 | #endif | |
156 | struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ | |
157 | struct page *first_page; /* Compound tail pages */ | |
158 | }; | |
159 | ||
160 | /* | |
161 | * On machines where all RAM is mapped into kernel address space, | |
162 | * we can simply calculate the virtual address. On machines with | |
163 | * highmem some memory is mapped into kernel virtual memory | |
164 | * dynamically, so we need a place to store that address. | |
165 | * Note that this field could be 16 bits on x86 ... ;) | |
166 | * | |
167 | * Architectures with slow multiplication can define | |
168 | * WANT_PAGE_VIRTUAL in asm/page.h | |
169 | */ | |
170 | #if defined(WANT_PAGE_VIRTUAL) | |
171 | void *virtual; /* Kernel virtual address (NULL if | |
172 | not kmapped, ie. highmem) */ | |
173 | #endif /* WANT_PAGE_VIRTUAL */ | |
174 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS | |
175 | unsigned long debug_flags; /* Use atomic bitops on this */ | |
176 | #endif | |
177 | ||
178 | #ifdef CONFIG_KMEMCHECK | |
179 | /* | |
180 | * kmemcheck wants to track the status of each byte in a page; this | |
181 | * is a pointer to such a status block. NULL if not tracked. | |
182 | */ | |
183 | void *shadow; | |
184 | #endif | |
185 | ||
186 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS | |
187 | int _last_cpupid; | |
188 | #endif | |
189 | } | |
190 | /* | |
191 | * The struct page can be forced to be double word aligned so that atomic ops | |
192 | * on double words work. The SLUB allocator can make use of such a feature. | |
193 | */ | |
194 | #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE | |
195 | __aligned(2 * sizeof(unsigned long)) | |
196 | #endif | |
197 | ; | |
198 | ||
199 | struct page_frag { | |
200 | struct page *page; | |
201 | #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) | |
202 | __u32 offset; | |
203 | __u32 size; | |
204 | #else | |
205 | __u16 offset; | |
206 | __u16 size; | |
207 | #endif | |
208 | }; | |
209 | ||
210 | typedef unsigned long __nocast vm_flags_t; | |
211 | ||
212 | /* | |
213 | * A region containing a mapping of a non-memory backed file under NOMMU | |
214 | * conditions. These are held in a global tree and are pinned by the VMAs that | |
215 | * map parts of them. | |
216 | */ | |
217 | struct vm_region { | |
218 | struct rb_node vm_rb; /* link in global region tree */ | |
219 | vm_flags_t vm_flags; /* VMA vm_flags */ | |
220 | unsigned long vm_start; /* start address of region */ | |
221 | unsigned long vm_end; /* region initialised to here */ | |
222 | unsigned long vm_top; /* region allocated to here */ | |
223 | unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ | |
224 | struct file *vm_file; /* the backing file or NULL */ | |
225 | ||
226 | int vm_usage; /* region usage count (access under nommu_region_sem) */ | |
227 | bool vm_icache_flushed : 1; /* true if the icache has been flushed for | |
228 | * this region */ | |
229 | }; | |
230 | ||
231 | /* | |
232 | * This struct defines a memory VMM memory area. There is one of these | |
233 | * per VM-area/task. A VM area is any part of the process virtual memory | |
234 | * space that has a special rule for the page-fault handlers (ie a shared | |
235 | * library, the executable area etc). | |
236 | */ | |
237 | struct vm_area_struct { | |
238 | /* The first cache line has the info for VMA tree walking. */ | |
239 | ||
240 | unsigned long vm_start; /* Our start address within vm_mm. */ | |
241 | unsigned long vm_end; /* The first byte after our end address | |
242 | within vm_mm. */ | |
243 | ||
244 | /* linked list of VM areas per task, sorted by address */ | |
245 | struct vm_area_struct *vm_next, *vm_prev; | |
246 | ||
247 | struct rb_node vm_rb; | |
248 | ||
249 | /* | |
250 | * Largest free memory gap in bytes to the left of this VMA. | |
251 | * Either between this VMA and vma->vm_prev, or between one of the | |
252 | * VMAs below us in the VMA rbtree and its ->vm_prev. This helps | |
253 | * get_unmapped_area find a free area of the right size. | |
254 | */ | |
255 | unsigned long rb_subtree_gap; | |
256 | ||
257 | /* Second cache line starts here. */ | |
258 | ||
259 | struct mm_struct *vm_mm; /* The address space we belong to. */ | |
260 | pgprot_t vm_page_prot; /* Access permissions of this VMA. */ | |
261 | unsigned long vm_flags; /* Flags, see mm.h. */ | |
262 | ||
263 | /* | |
264 | * For areas with an address space and backing store, | |
265 | * linkage into the address_space->i_mmap interval tree, or | |
266 | * linkage of vma in the address_space->i_mmap_nonlinear list. | |
267 | */ | |
268 | union { | |
269 | struct { | |
270 | struct rb_node rb; | |
271 | unsigned long rb_subtree_last; | |
272 | } linear; | |
273 | struct list_head nonlinear; | |
274 | } shared; | |
275 | ||
276 | /* | |
277 | * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma | |
278 | * list, after a COW of one of the file pages. A MAP_SHARED vma | |
279 | * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack | |
280 | * or brk vma (with NULL file) can only be in an anon_vma list. | |
281 | */ | |
282 | struct list_head anon_vma_chain; /* Serialized by mmap_sem & | |
283 | * page_table_lock */ | |
284 | struct anon_vma *anon_vma; /* Serialized by page_table_lock */ | |
285 | ||
286 | /* Function pointers to deal with this struct. */ | |
287 | const struct vm_operations_struct *vm_ops; | |
288 | ||
289 | /* Information about our backing store: */ | |
290 | unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE | |
291 | units, *not* PAGE_CACHE_SIZE */ | |
292 | struct file * vm_file; /* File we map to (can be NULL). */ | |
293 | void * vm_private_data; /* was vm_pte (shared mem) */ | |
294 | ||
295 | #ifndef CONFIG_MMU | |
296 | struct vm_region *vm_region; /* NOMMU mapping region */ | |
297 | #endif | |
298 | #ifdef CONFIG_NUMA | |
299 | struct mempolicy *vm_policy; /* NUMA policy for the VMA */ | |
300 | #endif | |
301 | }; | |
302 | ||
303 | struct core_thread { | |
304 | struct task_struct *task; | |
305 | struct core_thread *next; | |
306 | }; | |
307 | ||
308 | struct core_state { | |
309 | atomic_t nr_threads; | |
310 | struct core_thread dumper; | |
311 | struct completion startup; | |
312 | }; | |
313 | ||
314 | enum { | |
315 | MM_FILEPAGES, | |
316 | MM_ANONPAGES, | |
317 | MM_SWAPENTS, | |
318 | NR_MM_COUNTERS | |
319 | }; | |
320 | ||
321 | #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) | |
322 | #define SPLIT_RSS_COUNTING | |
323 | /* per-thread cached information, */ | |
324 | struct task_rss_stat { | |
325 | int events; /* for synchronization threshold */ | |
326 | int count[NR_MM_COUNTERS]; | |
327 | }; | |
328 | #endif /* USE_SPLIT_PTE_PTLOCKS */ | |
329 | ||
330 | struct mm_rss_stat { | |
331 | atomic_long_t count[NR_MM_COUNTERS]; | |
332 | }; | |
333 | ||
334 | struct kioctx_table; | |
335 | struct mm_struct { | |
336 | struct vm_area_struct * mmap; /* list of VMAs */ | |
337 | struct rb_root mm_rb; | |
338 | struct vm_area_struct * mmap_cache; /* last find_vma result */ | |
339 | #ifdef CONFIG_MMU | |
340 | unsigned long (*get_unmapped_area) (struct file *filp, | |
341 | unsigned long addr, unsigned long len, | |
342 | unsigned long pgoff, unsigned long flags); | |
343 | #endif | |
344 | unsigned long mmap_base; /* base of mmap area */ | |
345 | unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ | |
346 | unsigned long task_size; /* size of task vm space */ | |
347 | unsigned long highest_vm_end; /* highest vma end address */ | |
348 | pgd_t * pgd; | |
349 | atomic_t mm_users; /* How many users with user space? */ | |
350 | atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ | |
351 | atomic_long_t nr_ptes; /* Page table pages */ | |
352 | int map_count; /* number of VMAs */ | |
353 | ||
354 | spinlock_t page_table_lock; /* Protects page tables and some counters */ | |
355 | struct rw_semaphore mmap_sem; | |
356 | ||
357 | struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung | |
358 | * together off init_mm.mmlist, and are protected | |
359 | * by mmlist_lock | |
360 | */ | |
361 | ||
362 | ||
363 | unsigned long hiwater_rss; /* High-watermark of RSS usage */ | |
364 | unsigned long hiwater_vm; /* High-water virtual memory usage */ | |
365 | ||
366 | unsigned long total_vm; /* Total pages mapped */ | |
367 | unsigned long locked_vm; /* Pages that have PG_mlocked set */ | |
368 | unsigned long pinned_vm; /* Refcount permanently increased */ | |
369 | unsigned long shared_vm; /* Shared pages (files) */ | |
370 | unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ | |
371 | unsigned long stack_vm; /* VM_GROWSUP/DOWN */ | |
372 | unsigned long def_flags; | |
373 | unsigned long start_code, end_code, start_data, end_data; | |
374 | unsigned long start_brk, brk, start_stack; | |
375 | unsigned long arg_start, arg_end, env_start, env_end; | |
376 | ||
377 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ | |
378 | ||
379 | /* | |
380 | * Special counters, in some configurations protected by the | |
381 | * page_table_lock, in other configurations by being atomic. | |
382 | */ | |
383 | struct mm_rss_stat rss_stat; | |
384 | ||
385 | struct linux_binfmt *binfmt; | |
386 | ||
387 | cpumask_var_t cpu_vm_mask_var; | |
388 | ||
389 | /* Architecture-specific MM context */ | |
390 | mm_context_t context; | |
391 | ||
392 | unsigned long flags; /* Must use atomic bitops to access the bits */ | |
393 | ||
394 | struct core_state *core_state; /* coredumping support */ | |
395 | #ifdef CONFIG_AIO | |
396 | spinlock_t ioctx_lock; | |
397 | struct kioctx_table __rcu *ioctx_table; | |
398 | #endif | |
399 | #ifdef CONFIG_MM_OWNER | |
400 | /* | |
401 | * "owner" points to a task that is regarded as the canonical | |
402 | * user/owner of this mm. All of the following must be true in | |
403 | * order for it to be changed: | |
404 | * | |
405 | * current == mm->owner | |
406 | * current->mm != mm | |
407 | * new_owner->mm == mm | |
408 | * new_owner->alloc_lock is held | |
409 | */ | |
410 | struct task_struct __rcu *owner; | |
411 | #endif | |
412 | ||
413 | /* store ref to file /proc/<pid>/exe symlink points to */ | |
414 | struct file *exe_file; | |
415 | #ifdef CONFIG_MMU_NOTIFIER | |
416 | struct mmu_notifier_mm *mmu_notifier_mm; | |
417 | #endif | |
418 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS | |
419 | pgtable_t pmd_huge_pte; /* protected by page_table_lock */ | |
420 | #endif | |
421 | #ifdef CONFIG_CPUMASK_OFFSTACK | |
422 | struct cpumask cpumask_allocation; | |
423 | #endif | |
424 | #ifdef CONFIG_NUMA_BALANCING | |
425 | /* | |
426 | * numa_next_scan is the next time that the PTEs will be marked | |
427 | * pte_numa. NUMA hinting faults will gather statistics and migrate | |
428 | * pages to new nodes if necessary. | |
429 | */ | |
430 | unsigned long numa_next_scan; | |
431 | ||
432 | /* Restart point for scanning and setting pte_numa */ | |
433 | unsigned long numa_scan_offset; | |
434 | ||
435 | /* numa_scan_seq prevents two threads setting pte_numa */ | |
436 | int numa_scan_seq; | |
437 | #endif | |
438 | struct uprobes_state uprobes_state; | |
439 | }; | |
440 | ||
441 | static inline void mm_init_cpumask(struct mm_struct *mm) | |
442 | { | |
443 | #ifdef CONFIG_CPUMASK_OFFSTACK | |
444 | mm->cpu_vm_mask_var = &mm->cpumask_allocation; | |
445 | #endif | |
446 | } | |
447 | ||
448 | /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ | |
449 | static inline cpumask_t *mm_cpumask(struct mm_struct *mm) | |
450 | { | |
451 | return mm->cpu_vm_mask_var; | |
452 | } | |
453 | ||
454 | #endif /* _LINUX_MM_TYPES_H */ |