]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - include/linux/mm_types.h
migrate_pages: try to split pages on queuing
[mirror_ubuntu-zesty-kernel.git] / include / linux / mm_types.h
CommitLineData
5b99cd0e
HC
1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
4f9a58d7 4#include <linux/auxvec.h>
5b99cd0e
HC
5#include <linux/types.h>
6#include <linux/threads.h>
7#include <linux/list.h>
8#include <linux/spinlock.h>
c92ff1bd
MS
9#include <linux/rbtree.h>
10#include <linux/rwsem.h>
11#include <linux/completion.h>
cddb8a5c 12#include <linux/cpumask.h>
d4b3b638 13#include <linux/uprobes.h>
bbeae5b0 14#include <linux/page-flags-layout.h>
c92ff1bd
MS
15#include <asm/page.h>
16#include <asm/mmu.h>
5b99cd0e 17
4f9a58d7
OH
18#ifndef AT_VECTOR_SIZE_ARCH
19#define AT_VECTOR_SIZE_ARCH 0
20#endif
21#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
22
5b99cd0e 23struct address_space;
1306a85a 24struct mem_cgroup;
5b99cd0e 25
57c1ffce 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
e009bb30
KS
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
597d795a 29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
f7d0b926 30
5b99cd0e
HC
31/*
32 * Each physical page in the system has a struct page associated with
33 * it to keep track of whatever it is we are using the page for at the
34 * moment. Note that we have no way to track which tasks are using
35 * a page, though if it is a pagecache page, rmap structures can tell us
36 * who is mapping it.
fc9bb8c7
CL
37 *
38 * The objects in struct page are organized in double word blocks in
39 * order to allows us to use atomic double word operations on portions
40 * of struct page. That is currently only used by slub but the arrangement
41 * allows the use of atomic double word operations on the flags/mapping
42 * and lru list pointers also.
5b99cd0e
HC
43 */
44struct page {
fc9bb8c7 45 /* First double word block */
5b99cd0e
HC
46 unsigned long flags; /* Atomic flags, some possibly
47 * updated asynchronously */
8456a648
JK
48 union {
49 struct address_space *mapping; /* If low bit clear, points to
50 * inode address_space, or NULL.
51 * If page mapped as anonymous
52 * memory, low bit is set, and
53 * it points to anon_vma object:
54 * see PAGE_MAPPING_ANON below.
55 */
56 void *s_mem; /* slab first object */
53f9263b 57 atomic_t compound_mapcount; /* first tail page */
8456a648
JK
58 };
59
fc9bb8c7 60 /* Second double word */
013e8963
CL
61 struct {
62 union {
fc9bb8c7 63 pgoff_t index; /* Our offset within mapping. */
8456a648 64 void *freelist; /* sl[aou]b first free object */
013e8963
CL
65 };
66
67 union {
abca7c49
PS
68#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
69 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
013e8963
CL
70 /* Used for cmpxchg_double in slub */
71 unsigned long counters;
abca7c49
PS
72#else
73 /*
74 * Keep _count separate from slub cmpxchg_double data.
75 * As the rest of the double word is protected by
76 * slab_lock but _count is not.
77 */
78 unsigned counters;
79#endif
013e8963
CL
80
81 struct {
82
83 union {
70b50f94 84 /*
ddc58f27
KS
85 * Count of ptes mapped in mms, to show
86 * when page is mapped & limit reverse
87 * map searches.
70b50f94
AA
88 */
89 atomic_t _mapcount;
fc9bb8c7 90
b8c24c4a 91 struct { /* SLUB */
013e8963
CL
92 unsigned inuse:16;
93 unsigned objects:15;
94 unsigned frozen:1;
95 };
b8c24c4a 96 int units; /* SLOB */
3adf004d 97 };
013e8963 98 atomic_t _count; /* Usage count, see below. */
fc9bb8c7 99 };
8456a648 100 unsigned int active; /* SLAB */
39b26464 101 };
81819f0f 102 };
fc9bb8c7 103
1d798ca3
KS
104 /*
105 * Third double word block
106 *
107 * WARNING: bit 0 of the first word encode PageTail(). That means
108 * the rest users of the storage space MUST NOT use the bit to
109 * avoid collision and false-positive PageTail().
110 */
49e22585
CL
111 union {
112 struct list_head lru; /* Pageout list, eg. active_list
fc9bb8c7 113 * protected by zone->lru_lock !
34bf6ef9
DH
114 * Can be used as a generic list
115 * by the page owner.
fc9bb8c7 116 */
49e22585
CL
117 struct { /* slub per cpu partial pages */
118 struct page *next; /* Next partial slab */
119#ifdef CONFIG_64BIT
120 int pages; /* Nr of partial slabs left */
121 int pobjects; /* Approximate # of objects */
122#else
123 short int pages;
124 short int pobjects;
125#endif
126 };
b8c24c4a 127
68126702
JK
128 struct rcu_head rcu_head; /* Used by SLAB
129 * when destroying via RCU
130 */
1d798ca3 131 /* Tail pages of compound page */
e4b294c2 132 struct {
1d798ca3
KS
133 unsigned long compound_head; /* If bit zero is set */
134
135 /* First tail page only */
1965c8b7
KS
136#ifdef CONFIG_64BIT
137 /*
138 * On 64 bit system we have enough space in struct page
139 * to encode compound_dtor and compound_order with
140 * unsigned int. It can help compiler generate better or
141 * smaller code on some archtectures.
142 */
143 unsigned int compound_dtor;
144 unsigned int compound_order;
145#else
f1e61557
KS
146 unsigned short int compound_dtor;
147 unsigned short int compound_order;
1965c8b7 148#endif
e4b294c2
KS
149 };
150
7aa555bf 151#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
1d798ca3
KS
152 struct {
153 unsigned long __pad; /* do not overlay pmd_huge_pte
154 * with compound_head to avoid
155 * possible bit 0 collision.
156 */
157 pgtable_t pmd_huge_pte; /* protected by page->ptl */
158 };
7aa555bf 159#endif
49e22585 160 };
fc9bb8c7
CL
161
162 /* Remainder is not double word aligned */
5b99cd0e 163 union {
5b99cd0e
HC
164 unsigned long private; /* Mapping-private opaque data:
165 * usually used for buffer_heads
166 * if PagePrivate set; used for
167 * swp_entry_t if PageSwapCache;
168 * indicates order in the buddy
169 * system if PG_buddy is set.
170 */
57c1ffce 171#if USE_SPLIT_PTE_PTLOCKS
597d795a 172#if ALLOC_SPLIT_PTLOCKS
539edb58
PZ
173 spinlock_t *ptl;
174#else
175 spinlock_t ptl;
176#endif
5b99cd0e 177#endif
1b4f59e3 178 struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
81819f0f 179 };
fc9bb8c7 180
1306a85a
JW
181#ifdef CONFIG_MEMCG
182 struct mem_cgroup *mem_cgroup;
183#endif
184
5b99cd0e
HC
185 /*
186 * On machines where all RAM is mapped into kernel address space,
187 * we can simply calculate the virtual address. On machines with
188 * highmem some memory is mapped into kernel virtual memory
189 * dynamically, so we need a place to store that address.
190 * Note that this field could be 16 bits on x86 ... ;)
191 *
192 * Architectures with slow multiplication can define
193 * WANT_PAGE_VIRTUAL in asm/page.h
194 */
195#if defined(WANT_PAGE_VIRTUAL)
196 void *virtual; /* Kernel virtual address (NULL if
197 not kmapped, ie. highmem) */
198#endif /* WANT_PAGE_VIRTUAL */
dfec072e
VN
199
200#ifdef CONFIG_KMEMCHECK
201 /*
202 * kmemcheck wants to track the status of each byte in a page; this
203 * is a pointer to such a status block. NULL if not tracked.
204 */
205 void *shadow;
206#endif
57e0a030 207
90572890
PZ
208#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
209 int _last_cpupid;
57e0a030 210#endif
fc9bb8c7
CL
211}
212/*
43570fd2
HC
213 * The struct page can be forced to be double word aligned so that atomic ops
214 * on double words work. The SLUB allocator can make use of such a feature.
fc9bb8c7 215 */
43570fd2
HC
216#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
217 __aligned(2 * sizeof(unsigned long))
fc9bb8c7
CL
218#endif
219;
5b99cd0e 220
30d3c128
IC
221struct page_frag {
222 struct page *page;
223#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
224 __u32 offset;
225 __u32 size;
226#else
227 __u16 offset;
228 __u16 size;
229#endif
230};
231
b63ae8ca
AD
232#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
233#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
234
235struct page_frag_cache {
236 void * va;
237#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
238 __u16 offset;
239 __u16 size;
240#else
241 __u32 offset;
242#endif
243 /* we maintain a pagecount bias, so that we dont dirty cache line
244 * containing page->_count every time we allocate a fragment.
245 */
246 unsigned int pagecnt_bias;
247 bool pfmemalloc;
248};
249
64b990d2 250typedef unsigned long vm_flags_t;
ca16d140 251
8feae131
DH
252/*
253 * A region containing a mapping of a non-memory backed file under NOMMU
254 * conditions. These are held in a global tree and are pinned by the VMAs that
255 * map parts of them.
256 */
257struct vm_region {
258 struct rb_node vm_rb; /* link in global region tree */
ca16d140 259 vm_flags_t vm_flags; /* VMA vm_flags */
8feae131
DH
260 unsigned long vm_start; /* start address of region */
261 unsigned long vm_end; /* region initialised to here */
dd8632a1 262 unsigned long vm_top; /* region allocated to here */
8feae131
DH
263 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
264 struct file *vm_file; /* the backing file or NULL */
265
1e2ae599 266 int vm_usage; /* region usage count (access under nommu_region_sem) */
cfe79c00
MF
267 bool vm_icache_flushed : 1; /* true if the icache has been flushed for
268 * this region */
8feae131
DH
269};
270
745f234b
AA
271#ifdef CONFIG_USERFAULTFD
272#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
273struct vm_userfaultfd_ctx {
274 struct userfaultfd_ctx *ctx;
275};
276#else /* CONFIG_USERFAULTFD */
277#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
278struct vm_userfaultfd_ctx {};
279#endif /* CONFIG_USERFAULTFD */
280
c92ff1bd
MS
281/*
282 * This struct defines a memory VMM memory area. There is one of these
283 * per VM-area/task. A VM area is any part of the process virtual memory
284 * space that has a special rule for the page-fault handlers (ie a shared
285 * library, the executable area etc).
286 */
287struct vm_area_struct {
e4c6bfd2
RR
288 /* The first cache line has the info for VMA tree walking. */
289
c92ff1bd
MS
290 unsigned long vm_start; /* Our start address within vm_mm. */
291 unsigned long vm_end; /* The first byte after our end address
292 within vm_mm. */
293
294 /* linked list of VM areas per task, sorted by address */
297c5eee 295 struct vm_area_struct *vm_next, *vm_prev;
c92ff1bd 296
c92ff1bd
MS
297 struct rb_node vm_rb;
298
d3737187
ML
299 /*
300 * Largest free memory gap in bytes to the left of this VMA.
301 * Either between this VMA and vma->vm_prev, or between one of the
302 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
303 * get_unmapped_area find a free area of the right size.
304 */
305 unsigned long rb_subtree_gap;
306
e4c6bfd2
RR
307 /* Second cache line starts here. */
308
309 struct mm_struct *vm_mm; /* The address space we belong to. */
310 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
311 unsigned long vm_flags; /* Flags, see mm.h. */
312
c92ff1bd
MS
313 /*
314 * For areas with an address space and backing store,
27ba0644 315 * linkage into the address_space->i_mmap interval tree.
c92ff1bd 316 */
ac51b934
KS
317 struct {
318 struct rb_node rb;
319 unsigned long rb_subtree_last;
c92ff1bd
MS
320 } shared;
321
322 /*
323 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
324 * list, after a COW of one of the file pages. A MAP_SHARED vma
325 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
326 * or brk vma (with NULL file) can only be in an anon_vma list.
327 */
5beb4930
RR
328 struct list_head anon_vma_chain; /* Serialized by mmap_sem &
329 * page_table_lock */
c92ff1bd
MS
330 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
331
332 /* Function pointers to deal with this struct. */
f0f37e2f 333 const struct vm_operations_struct *vm_ops;
c92ff1bd
MS
334
335 /* Information about our backing store: */
336 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
337 units, *not* PAGE_CACHE_SIZE */
338 struct file * vm_file; /* File we map to (can be NULL). */
339 void * vm_private_data; /* was vm_pte (shared mem) */
c92ff1bd
MS
340
341#ifndef CONFIG_MMU
8feae131 342 struct vm_region *vm_region; /* NOMMU mapping region */
c92ff1bd
MS
343#endif
344#ifdef CONFIG_NUMA
345 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
346#endif
745f234b 347 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
c92ff1bd
MS
348};
349
b564daf8
ON
350struct core_thread {
351 struct task_struct *task;
352 struct core_thread *next;
353};
354
32ecb1f2 355struct core_state {
c5f1cc8c 356 atomic_t nr_threads;
b564daf8 357 struct core_thread dumper;
32ecb1f2
ON
358 struct completion startup;
359};
360
d559db08 361enum {
eca56ff9
JM
362 MM_FILEPAGES, /* Resident file mapping pages */
363 MM_ANONPAGES, /* Resident anonymous pages */
364 MM_SWAPENTS, /* Anonymous swap entries */
365 MM_SHMEMPAGES, /* Resident shared memory pages */
d559db08
KH
366 NR_MM_COUNTERS
367};
368
57c1ffce 369#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
34e55232 370#define SPLIT_RSS_COUNTING
34e55232
KH
371/* per-thread cached information, */
372struct task_rss_stat {
373 int events; /* for synchronization threshold */
374 int count[NR_MM_COUNTERS];
375};
57c1ffce 376#endif /* USE_SPLIT_PTE_PTLOCKS */
172703b0 377
d559db08 378struct mm_rss_stat {
172703b0 379 atomic_long_t count[NR_MM_COUNTERS];
d559db08 380};
d559db08 381
db446a08 382struct kioctx_table;
c92ff1bd 383struct mm_struct {
615d6e87 384 struct vm_area_struct *mmap; /* list of VMAs */
c92ff1bd 385 struct rb_root mm_rb;
615d6e87 386 u32 vmacache_seqnum; /* per-thread vmacache */
efc1a3b1 387#ifdef CONFIG_MMU
c92ff1bd
MS
388 unsigned long (*get_unmapped_area) (struct file *filp,
389 unsigned long addr, unsigned long len,
390 unsigned long pgoff, unsigned long flags);
efc1a3b1 391#endif
c92ff1bd 392 unsigned long mmap_base; /* base of mmap area */
41aacc1e 393 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
c92ff1bd 394 unsigned long task_size; /* size of task vm space */
d3737187 395 unsigned long highest_vm_end; /* highest vma end address */
c92ff1bd
MS
396 pgd_t * pgd;
397 atomic_t mm_users; /* How many users with user space? */
398 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
dc6c9a35 399 atomic_long_t nr_ptes; /* PTE page table pages */
5a3fbef3 400#if CONFIG_PGTABLE_LEVELS > 2
dc6c9a35 401 atomic_long_t nr_pmds; /* PMD page table pages */
5a3fbef3 402#endif
c92ff1bd 403 int map_count; /* number of VMAs */
481b4bb5 404
c92ff1bd 405 spinlock_t page_table_lock; /* Protects page tables and some counters */
481b4bb5 406 struct rw_semaphore mmap_sem;
c92ff1bd
MS
407
408 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
409 * together off init_mm.mmlist, and are protected
410 * by mmlist_lock
411 */
412
c92ff1bd
MS
413
414 unsigned long hiwater_rss; /* High-watermark of RSS usage */
415 unsigned long hiwater_vm; /* High-water virtual memory usage */
416
e10d59f2
CL
417 unsigned long total_vm; /* Total pages mapped */
418 unsigned long locked_vm; /* Pages that have PG_mlocked set */
419 unsigned long pinned_vm; /* Refcount permanently increased */
84638335 420 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
e10d59f2
CL
421 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
422 unsigned long stack_vm; /* VM_GROWSUP/DOWN */
e10d59f2 423 unsigned long def_flags;
c92ff1bd
MS
424 unsigned long start_code, end_code, start_data, end_data;
425 unsigned long start_brk, brk, start_stack;
426 unsigned long arg_start, arg_end, env_start, env_end;
427
428 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
429
d559db08
KH
430 /*
431 * Special counters, in some configurations protected by the
432 * page_table_lock, in other configurations by being atomic.
433 */
434 struct mm_rss_stat rss_stat;
435
801460d0
HS
436 struct linux_binfmt *binfmt;
437
6345d24d
LT
438 cpumask_var_t cpu_vm_mask_var;
439
c92ff1bd
MS
440 /* Architecture-specific MM context */
441 mm_context_t context;
442
c92ff1bd
MS
443 unsigned long flags; /* Must use atomic bitops to access the bits */
444
a94e2d40 445 struct core_state *core_state; /* coredumping support */
858f0993 446#ifdef CONFIG_AIO
db446a08
BL
447 spinlock_t ioctx_lock;
448 struct kioctx_table __rcu *ioctx_table;
858f0993 449#endif
f98bafa0 450#ifdef CONFIG_MEMCG
4cd1a8fc
KM
451 /*
452 * "owner" points to a task that is regarded as the canonical
453 * user/owner of this mm. All of the following must be true in
454 * order for it to be changed:
455 *
456 * current == mm->owner
457 * current->mm != mm
458 * new_owner->mm == mm
459 * new_owner->alloc_lock is held
460 */
4d2deb40 461 struct task_struct __rcu *owner;
78fb7466 462#endif
925d1c40 463
925d1c40 464 /* store ref to file /proc/<pid>/exe symlink points to */
90f31d0e 465 struct file __rcu *exe_file;
cddb8a5c
AA
466#ifdef CONFIG_MMU_NOTIFIER
467 struct mmu_notifier_mm *mmu_notifier_mm;
e7a00c45 468#endif
e009bb30 469#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
e7a00c45 470 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
cddb8a5c 471#endif
6345d24d
LT
472#ifdef CONFIG_CPUMASK_OFFSTACK
473 struct cpumask cpumask_allocation;
cbee9f88
PZ
474#endif
475#ifdef CONFIG_NUMA_BALANCING
476 /*
34f0315a
MG
477 * numa_next_scan is the next time that the PTEs will be marked
478 * pte_numa. NUMA hinting faults will gather statistics and migrate
479 * pages to new nodes if necessary.
cbee9f88
PZ
480 */
481 unsigned long numa_next_scan;
482
6e5fb223
PZ
483 /* Restart point for scanning and setting pte_numa */
484 unsigned long numa_scan_offset;
485
cbee9f88
PZ
486 /* numa_scan_seq prevents two threads setting pte_numa */
487 int numa_scan_seq;
20841405
RR
488#endif
489#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
490 /*
491 * An operation with batched TLB flushing is going on. Anything that
492 * can move process memory needs to flush the TLB when moving a
493 * PROT_NONE or PROT_NUMA mapped page.
494 */
495 bool tlb_flush_pending;
6345d24d 496#endif
d4b3b638 497 struct uprobes_state uprobes_state;
fe3d197f
DH
498#ifdef CONFIG_X86_INTEL_MPX
499 /* address of the bounds directory */
500 void __user *bd_addr;
501#endif
5d317b2b
NH
502#ifdef CONFIG_HUGETLB_PAGE
503 atomic_long_t hugetlb_usage;
504#endif
c92ff1bd
MS
505};
506
6345d24d
LT
507static inline void mm_init_cpumask(struct mm_struct *mm)
508{
509#ifdef CONFIG_CPUMASK_OFFSTACK
510 mm->cpu_vm_mask_var = &mm->cpumask_allocation;
511#endif
41f727fd 512 cpumask_clear(mm->cpu_vm_mask_var);
6345d24d
LT
513}
514
45e575ab 515/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
de03c72c
KM
516static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
517{
518 return mm->cpu_vm_mask_var;
519}
45e575ab 520
20841405
RR
521#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
522/*
523 * Memory barriers to keep this state in sync are graciously provided by
524 * the page table locks, outside of which no page table modifications happen.
525 * The barriers below prevent the compiler from re-ordering the instructions
526 * around the memory barriers that are already present in the code.
527 */
528static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
529{
530 barrier();
531 return mm->tlb_flush_pending;
532}
533static inline void set_tlb_flush_pending(struct mm_struct *mm)
534{
535 mm->tlb_flush_pending = true;
af2c1401
MG
536
537 /*
538 * Guarantee that the tlb_flush_pending store does not leak into the
539 * critical section updating the page tables
540 */
541 smp_mb__before_spinlock();
20841405
RR
542}
543/* Clearing is done after a TLB flush, which also provides a barrier. */
544static inline void clear_tlb_flush_pending(struct mm_struct *mm)
545{
546 barrier();
547 mm->tlb_flush_pending = false;
548}
549#else
550static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
551{
552 return false;
553}
554static inline void set_tlb_flush_pending(struct mm_struct *mm)
555{
556}
557static inline void clear_tlb_flush_pending(struct mm_struct *mm)
558{
559}
560#endif
561
a62c34bd
AL
562struct vm_special_mapping
563{
564 const char *name;
565 struct page **pages;
566};
567
d17d8f9d
DH
568enum tlb_flush_reason {
569 TLB_FLUSH_ON_TASK_SWITCH,
570 TLB_REMOTE_SHOOTDOWN,
571 TLB_LOCAL_SHOOTDOWN,
572 TLB_LOCAL_MM_SHOOTDOWN,
5b74283a 573 TLB_REMOTE_SEND_IPI,
d17d8f9d
DH
574 NR_TLB_FLUSH_REASONS,
575};
576
bd6dace7
TH
577 /*
578 * A swap entry has to fit into a "unsigned long", as the entry is hidden
579 * in the "index" field of the swapper address space.
580 */
581typedef struct {
582 unsigned long val;
583} swp_entry_t;
584
5b99cd0e 585#endif /* _LINUX_MM_TYPES_H */