]>
Commit | Line | Data |
---|---|---|
5b99cd0e HC |
1 | #ifndef _LINUX_MM_TYPES_H |
2 | #define _LINUX_MM_TYPES_H | |
3 | ||
4f9a58d7 | 4 | #include <linux/auxvec.h> |
5b99cd0e HC |
5 | #include <linux/types.h> |
6 | #include <linux/threads.h> | |
7 | #include <linux/list.h> | |
8 | #include <linux/spinlock.h> | |
c92ff1bd MS |
9 | #include <linux/rbtree.h> |
10 | #include <linux/rwsem.h> | |
11 | #include <linux/completion.h> | |
cddb8a5c | 12 | #include <linux/cpumask.h> |
6a11f75b | 13 | #include <linux/page-debug-flags.h> |
d4b3b638 | 14 | #include <linux/uprobes.h> |
bbeae5b0 | 15 | #include <linux/page-flags-layout.h> |
c92ff1bd MS |
16 | #include <asm/page.h> |
17 | #include <asm/mmu.h> | |
5b99cd0e | 18 | |
4f9a58d7 OH |
19 | #ifndef AT_VECTOR_SIZE_ARCH |
20 | #define AT_VECTOR_SIZE_ARCH 0 | |
21 | #endif | |
22 | #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) | |
23 | ||
5b99cd0e HC |
24 | struct address_space; |
25 | ||
57c1ffce | 26 | #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) |
e009bb30 KS |
27 | #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ |
28 | IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) | |
f7d0b926 | 29 | |
5b99cd0e HC |
30 | /* |
31 | * Each physical page in the system has a struct page associated with | |
32 | * it to keep track of whatever it is we are using the page for at the | |
33 | * moment. Note that we have no way to track which tasks are using | |
34 | * a page, though if it is a pagecache page, rmap structures can tell us | |
35 | * who is mapping it. | |
fc9bb8c7 CL |
36 | * |
37 | * The objects in struct page are organized in double word blocks in | |
38 | * order to allows us to use atomic double word operations on portions | |
39 | * of struct page. That is currently only used by slub but the arrangement | |
40 | * allows the use of atomic double word operations on the flags/mapping | |
41 | * and lru list pointers also. | |
5b99cd0e HC |
42 | */ |
43 | struct page { | |
fc9bb8c7 | 44 | /* First double word block */ |
5b99cd0e HC |
45 | unsigned long flags; /* Atomic flags, some possibly |
46 | * updated asynchronously */ | |
8456a648 JK |
47 | union { |
48 | struct address_space *mapping; /* If low bit clear, points to | |
49 | * inode address_space, or NULL. | |
50 | * If page mapped as anonymous | |
51 | * memory, low bit is set, and | |
52 | * it points to anon_vma object: | |
53 | * see PAGE_MAPPING_ANON below. | |
54 | */ | |
55 | void *s_mem; /* slab first object */ | |
56 | }; | |
57 | ||
fc9bb8c7 | 58 | /* Second double word */ |
013e8963 CL |
59 | struct { |
60 | union { | |
fc9bb8c7 | 61 | pgoff_t index; /* Our offset within mapping. */ |
8456a648 | 62 | void *freelist; /* sl[aou]b first free object */ |
072bb0aa | 63 | bool pfmemalloc; /* If set by the page allocator, |
b37f1dd0 | 64 | * ALLOC_NO_WATERMARKS was set |
072bb0aa MG |
65 | * and the low watermark was not |
66 | * met implying that the system | |
67 | * is under some pressure. The | |
68 | * caller should try ensure | |
69 | * this page is only used to | |
70 | * free other pages. | |
71 | */ | |
013e8963 CL |
72 | }; |
73 | ||
74 | union { | |
abca7c49 PS |
75 | #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ |
76 | defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) | |
013e8963 CL |
77 | /* Used for cmpxchg_double in slub */ |
78 | unsigned long counters; | |
abca7c49 PS |
79 | #else |
80 | /* | |
81 | * Keep _count separate from slub cmpxchg_double data. | |
82 | * As the rest of the double word is protected by | |
83 | * slab_lock but _count is not. | |
84 | */ | |
85 | unsigned counters; | |
86 | #endif | |
013e8963 CL |
87 | |
88 | struct { | |
89 | ||
90 | union { | |
70b50f94 AA |
91 | /* |
92 | * Count of ptes mapped in | |
93 | * mms, to show when page is | |
94 | * mapped & limit reverse map | |
95 | * searches. | |
96 | * | |
97 | * Used also for tail pages | |
98 | * refcounting instead of | |
99 | * _count. Tail pages cannot | |
100 | * be mapped and keeping the | |
101 | * tail page _count zero at | |
102 | * all times guarantees | |
103 | * get_page_unless_zero() will | |
104 | * never succeed on tail | |
105 | * pages. | |
106 | */ | |
107 | atomic_t _mapcount; | |
fc9bb8c7 | 108 | |
b8c24c4a | 109 | struct { /* SLUB */ |
013e8963 CL |
110 | unsigned inuse:16; |
111 | unsigned objects:15; | |
112 | unsigned frozen:1; | |
113 | }; | |
b8c24c4a | 114 | int units; /* SLOB */ |
3adf004d | 115 | }; |
013e8963 | 116 | atomic_t _count; /* Usage count, see below. */ |
fc9bb8c7 | 117 | }; |
8456a648 | 118 | unsigned int active; /* SLAB */ |
39b26464 | 119 | }; |
81819f0f | 120 | }; |
fc9bb8c7 CL |
121 | |
122 | /* Third double word block */ | |
49e22585 CL |
123 | union { |
124 | struct list_head lru; /* Pageout list, eg. active_list | |
fc9bb8c7 CL |
125 | * protected by zone->lru_lock ! |
126 | */ | |
49e22585 CL |
127 | struct { /* slub per cpu partial pages */ |
128 | struct page *next; /* Next partial slab */ | |
129 | #ifdef CONFIG_64BIT | |
130 | int pages; /* Nr of partial slabs left */ | |
131 | int pobjects; /* Approximate # of objects */ | |
132 | #else | |
133 | short int pages; | |
134 | short int pobjects; | |
135 | #endif | |
136 | }; | |
b8c24c4a CL |
137 | |
138 | struct list_head list; /* slobs list of pages */ | |
1b4f59e3 | 139 | struct slab *slab_page; /* slab fields */ |
68126702 JK |
140 | struct rcu_head rcu_head; /* Used by SLAB |
141 | * when destroying via RCU | |
142 | */ | |
7aa555bf KS |
143 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS |
144 | pgtable_t pmd_huge_pte; /* protected by page->ptl */ | |
145 | #endif | |
49e22585 | 146 | }; |
fc9bb8c7 CL |
147 | |
148 | /* Remainder is not double word aligned */ | |
5b99cd0e | 149 | union { |
5b99cd0e HC |
150 | unsigned long private; /* Mapping-private opaque data: |
151 | * usually used for buffer_heads | |
152 | * if PagePrivate set; used for | |
153 | * swp_entry_t if PageSwapCache; | |
154 | * indicates order in the buddy | |
155 | * system if PG_buddy is set. | |
156 | */ | |
57c1ffce | 157 | #if USE_SPLIT_PTE_PTLOCKS |
539edb58 PZ |
158 | #if BLOATED_SPINLOCKS |
159 | spinlock_t *ptl; | |
160 | #else | |
161 | spinlock_t ptl; | |
162 | #endif | |
5b99cd0e | 163 | #endif |
1b4f59e3 | 164 | struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ |
fc9bb8c7 | 165 | struct page *first_page; /* Compound tail pages */ |
81819f0f | 166 | }; |
fc9bb8c7 | 167 | |
5b99cd0e HC |
168 | /* |
169 | * On machines where all RAM is mapped into kernel address space, | |
170 | * we can simply calculate the virtual address. On machines with | |
171 | * highmem some memory is mapped into kernel virtual memory | |
172 | * dynamically, so we need a place to store that address. | |
173 | * Note that this field could be 16 bits on x86 ... ;) | |
174 | * | |
175 | * Architectures with slow multiplication can define | |
176 | * WANT_PAGE_VIRTUAL in asm/page.h | |
177 | */ | |
178 | #if defined(WANT_PAGE_VIRTUAL) | |
179 | void *virtual; /* Kernel virtual address (NULL if | |
180 | not kmapped, ie. highmem) */ | |
181 | #endif /* WANT_PAGE_VIRTUAL */ | |
ee3b4290 AM |
182 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS |
183 | unsigned long debug_flags; /* Use atomic bitops on this */ | |
184 | #endif | |
dfec072e VN |
185 | |
186 | #ifdef CONFIG_KMEMCHECK | |
187 | /* | |
188 | * kmemcheck wants to track the status of each byte in a page; this | |
189 | * is a pointer to such a status block. NULL if not tracked. | |
190 | */ | |
191 | void *shadow; | |
192 | #endif | |
57e0a030 | 193 | |
90572890 PZ |
194 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
195 | int _last_cpupid; | |
57e0a030 | 196 | #endif |
fc9bb8c7 CL |
197 | } |
198 | /* | |
43570fd2 HC |
199 | * The struct page can be forced to be double word aligned so that atomic ops |
200 | * on double words work. The SLUB allocator can make use of such a feature. | |
fc9bb8c7 | 201 | */ |
43570fd2 HC |
202 | #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE |
203 | __aligned(2 * sizeof(unsigned long)) | |
fc9bb8c7 CL |
204 | #endif |
205 | ; | |
5b99cd0e | 206 | |
30d3c128 IC |
207 | struct page_frag { |
208 | struct page *page; | |
209 | #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) | |
210 | __u32 offset; | |
211 | __u32 size; | |
212 | #else | |
213 | __u16 offset; | |
214 | __u16 size; | |
215 | #endif | |
216 | }; | |
217 | ||
ca16d140 KM |
218 | typedef unsigned long __nocast vm_flags_t; |
219 | ||
8feae131 DH |
220 | /* |
221 | * A region containing a mapping of a non-memory backed file under NOMMU | |
222 | * conditions. These are held in a global tree and are pinned by the VMAs that | |
223 | * map parts of them. | |
224 | */ | |
225 | struct vm_region { | |
226 | struct rb_node vm_rb; /* link in global region tree */ | |
ca16d140 | 227 | vm_flags_t vm_flags; /* VMA vm_flags */ |
8feae131 DH |
228 | unsigned long vm_start; /* start address of region */ |
229 | unsigned long vm_end; /* region initialised to here */ | |
dd8632a1 | 230 | unsigned long vm_top; /* region allocated to here */ |
8feae131 DH |
231 | unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ |
232 | struct file *vm_file; /* the backing file or NULL */ | |
233 | ||
1e2ae599 | 234 | int vm_usage; /* region usage count (access under nommu_region_sem) */ |
cfe79c00 MF |
235 | bool vm_icache_flushed : 1; /* true if the icache has been flushed for |
236 | * this region */ | |
8feae131 DH |
237 | }; |
238 | ||
c92ff1bd MS |
239 | /* |
240 | * This struct defines a memory VMM memory area. There is one of these | |
241 | * per VM-area/task. A VM area is any part of the process virtual memory | |
242 | * space that has a special rule for the page-fault handlers (ie a shared | |
243 | * library, the executable area etc). | |
244 | */ | |
245 | struct vm_area_struct { | |
e4c6bfd2 RR |
246 | /* The first cache line has the info for VMA tree walking. */ |
247 | ||
c92ff1bd MS |
248 | unsigned long vm_start; /* Our start address within vm_mm. */ |
249 | unsigned long vm_end; /* The first byte after our end address | |
250 | within vm_mm. */ | |
251 | ||
252 | /* linked list of VM areas per task, sorted by address */ | |
297c5eee | 253 | struct vm_area_struct *vm_next, *vm_prev; |
c92ff1bd | 254 | |
c92ff1bd MS |
255 | struct rb_node vm_rb; |
256 | ||
d3737187 ML |
257 | /* |
258 | * Largest free memory gap in bytes to the left of this VMA. | |
259 | * Either between this VMA and vma->vm_prev, or between one of the | |
260 | * VMAs below us in the VMA rbtree and its ->vm_prev. This helps | |
261 | * get_unmapped_area find a free area of the right size. | |
262 | */ | |
263 | unsigned long rb_subtree_gap; | |
264 | ||
e4c6bfd2 RR |
265 | /* Second cache line starts here. */ |
266 | ||
267 | struct mm_struct *vm_mm; /* The address space we belong to. */ | |
268 | pgprot_t vm_page_prot; /* Access permissions of this VMA. */ | |
269 | unsigned long vm_flags; /* Flags, see mm.h. */ | |
270 | ||
c92ff1bd MS |
271 | /* |
272 | * For areas with an address space and backing store, | |
6b2dbba8 | 273 | * linkage into the address_space->i_mmap interval tree, or |
c92ff1bd MS |
274 | * linkage of vma in the address_space->i_mmap_nonlinear list. |
275 | */ | |
276 | union { | |
277 | struct { | |
6b2dbba8 ML |
278 | struct rb_node rb; |
279 | unsigned long rb_subtree_last; | |
280 | } linear; | |
281 | struct list_head nonlinear; | |
c92ff1bd MS |
282 | } shared; |
283 | ||
284 | /* | |
285 | * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma | |
286 | * list, after a COW of one of the file pages. A MAP_SHARED vma | |
287 | * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack | |
288 | * or brk vma (with NULL file) can only be in an anon_vma list. | |
289 | */ | |
5beb4930 RR |
290 | struct list_head anon_vma_chain; /* Serialized by mmap_sem & |
291 | * page_table_lock */ | |
c92ff1bd MS |
292 | struct anon_vma *anon_vma; /* Serialized by page_table_lock */ |
293 | ||
294 | /* Function pointers to deal with this struct. */ | |
f0f37e2f | 295 | const struct vm_operations_struct *vm_ops; |
c92ff1bd MS |
296 | |
297 | /* Information about our backing store: */ | |
298 | unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE | |
299 | units, *not* PAGE_CACHE_SIZE */ | |
300 | struct file * vm_file; /* File we map to (can be NULL). */ | |
301 | void * vm_private_data; /* was vm_pte (shared mem) */ | |
c92ff1bd MS |
302 | |
303 | #ifndef CONFIG_MMU | |
8feae131 | 304 | struct vm_region *vm_region; /* NOMMU mapping region */ |
c92ff1bd MS |
305 | #endif |
306 | #ifdef CONFIG_NUMA | |
307 | struct mempolicy *vm_policy; /* NUMA policy for the VMA */ | |
308 | #endif | |
309 | }; | |
310 | ||
b564daf8 ON |
311 | struct core_thread { |
312 | struct task_struct *task; | |
313 | struct core_thread *next; | |
314 | }; | |
315 | ||
32ecb1f2 | 316 | struct core_state { |
c5f1cc8c | 317 | atomic_t nr_threads; |
b564daf8 | 318 | struct core_thread dumper; |
32ecb1f2 ON |
319 | struct completion startup; |
320 | }; | |
321 | ||
d559db08 KH |
322 | enum { |
323 | MM_FILEPAGES, | |
324 | MM_ANONPAGES, | |
b084d435 | 325 | MM_SWAPENTS, |
d559db08 KH |
326 | NR_MM_COUNTERS |
327 | }; | |
328 | ||
57c1ffce | 329 | #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) |
34e55232 | 330 | #define SPLIT_RSS_COUNTING |
34e55232 KH |
331 | /* per-thread cached information, */ |
332 | struct task_rss_stat { | |
333 | int events; /* for synchronization threshold */ | |
334 | int count[NR_MM_COUNTERS]; | |
335 | }; | |
57c1ffce | 336 | #endif /* USE_SPLIT_PTE_PTLOCKS */ |
172703b0 | 337 | |
d559db08 | 338 | struct mm_rss_stat { |
172703b0 | 339 | atomic_long_t count[NR_MM_COUNTERS]; |
d559db08 | 340 | }; |
d559db08 | 341 | |
db446a08 | 342 | struct kioctx_table; |
c92ff1bd MS |
343 | struct mm_struct { |
344 | struct vm_area_struct * mmap; /* list of VMAs */ | |
345 | struct rb_root mm_rb; | |
346 | struct vm_area_struct * mmap_cache; /* last find_vma result */ | |
efc1a3b1 | 347 | #ifdef CONFIG_MMU |
c92ff1bd MS |
348 | unsigned long (*get_unmapped_area) (struct file *filp, |
349 | unsigned long addr, unsigned long len, | |
350 | unsigned long pgoff, unsigned long flags); | |
efc1a3b1 | 351 | #endif |
c92ff1bd | 352 | unsigned long mmap_base; /* base of mmap area */ |
41aacc1e | 353 | unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ |
c92ff1bd | 354 | unsigned long task_size; /* size of task vm space */ |
d3737187 | 355 | unsigned long highest_vm_end; /* highest vma end address */ |
c92ff1bd MS |
356 | pgd_t * pgd; |
357 | atomic_t mm_users; /* How many users with user space? */ | |
358 | atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ | |
e1f56c89 | 359 | atomic_long_t nr_ptes; /* Page table pages */ |
c92ff1bd | 360 | int map_count; /* number of VMAs */ |
481b4bb5 | 361 | |
c92ff1bd | 362 | spinlock_t page_table_lock; /* Protects page tables and some counters */ |
481b4bb5 | 363 | struct rw_semaphore mmap_sem; |
c92ff1bd MS |
364 | |
365 | struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung | |
366 | * together off init_mm.mmlist, and are protected | |
367 | * by mmlist_lock | |
368 | */ | |
369 | ||
c92ff1bd MS |
370 | |
371 | unsigned long hiwater_rss; /* High-watermark of RSS usage */ | |
372 | unsigned long hiwater_vm; /* High-water virtual memory usage */ | |
373 | ||
e10d59f2 CL |
374 | unsigned long total_vm; /* Total pages mapped */ |
375 | unsigned long locked_vm; /* Pages that have PG_mlocked set */ | |
376 | unsigned long pinned_vm; /* Refcount permanently increased */ | |
377 | unsigned long shared_vm; /* Shared pages (files) */ | |
378 | unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ | |
379 | unsigned long stack_vm; /* VM_GROWSUP/DOWN */ | |
e10d59f2 | 380 | unsigned long def_flags; |
c92ff1bd MS |
381 | unsigned long start_code, end_code, start_data, end_data; |
382 | unsigned long start_brk, brk, start_stack; | |
383 | unsigned long arg_start, arg_end, env_start, env_end; | |
384 | ||
385 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ | |
386 | ||
d559db08 KH |
387 | /* |
388 | * Special counters, in some configurations protected by the | |
389 | * page_table_lock, in other configurations by being atomic. | |
390 | */ | |
391 | struct mm_rss_stat rss_stat; | |
392 | ||
801460d0 HS |
393 | struct linux_binfmt *binfmt; |
394 | ||
6345d24d LT |
395 | cpumask_var_t cpu_vm_mask_var; |
396 | ||
c92ff1bd MS |
397 | /* Architecture-specific MM context */ |
398 | mm_context_t context; | |
399 | ||
c92ff1bd MS |
400 | unsigned long flags; /* Must use atomic bitops to access the bits */ |
401 | ||
a94e2d40 | 402 | struct core_state *core_state; /* coredumping support */ |
858f0993 | 403 | #ifdef CONFIG_AIO |
db446a08 BL |
404 | spinlock_t ioctx_lock; |
405 | struct kioctx_table __rcu *ioctx_table; | |
858f0993 | 406 | #endif |
cf475ad2 | 407 | #ifdef CONFIG_MM_OWNER |
4cd1a8fc KM |
408 | /* |
409 | * "owner" points to a task that is regarded as the canonical | |
410 | * user/owner of this mm. All of the following must be true in | |
411 | * order for it to be changed: | |
412 | * | |
413 | * current == mm->owner | |
414 | * current->mm != mm | |
415 | * new_owner->mm == mm | |
416 | * new_owner->alloc_lock is held | |
417 | */ | |
4d2deb40 | 418 | struct task_struct __rcu *owner; |
78fb7466 | 419 | #endif |
925d1c40 | 420 | |
925d1c40 MH |
421 | /* store ref to file /proc/<pid>/exe symlink points to */ |
422 | struct file *exe_file; | |
cddb8a5c AA |
423 | #ifdef CONFIG_MMU_NOTIFIER |
424 | struct mmu_notifier_mm *mmu_notifier_mm; | |
e7a00c45 | 425 | #endif |
e009bb30 | 426 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
e7a00c45 | 427 | pgtable_t pmd_huge_pte; /* protected by page_table_lock */ |
cddb8a5c | 428 | #endif |
6345d24d LT |
429 | #ifdef CONFIG_CPUMASK_OFFSTACK |
430 | struct cpumask cpumask_allocation; | |
cbee9f88 PZ |
431 | #endif |
432 | #ifdef CONFIG_NUMA_BALANCING | |
433 | /* | |
34f0315a MG |
434 | * numa_next_scan is the next time that the PTEs will be marked |
435 | * pte_numa. NUMA hinting faults will gather statistics and migrate | |
436 | * pages to new nodes if necessary. | |
cbee9f88 PZ |
437 | */ |
438 | unsigned long numa_next_scan; | |
439 | ||
6e5fb223 PZ |
440 | /* Restart point for scanning and setting pte_numa */ |
441 | unsigned long numa_scan_offset; | |
442 | ||
cbee9f88 PZ |
443 | /* numa_scan_seq prevents two threads setting pte_numa */ |
444 | int numa_scan_seq; | |
20841405 RR |
445 | #endif |
446 | #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) | |
447 | /* | |
448 | * An operation with batched TLB flushing is going on. Anything that | |
449 | * can move process memory needs to flush the TLB when moving a | |
450 | * PROT_NONE or PROT_NUMA mapped page. | |
451 | */ | |
452 | bool tlb_flush_pending; | |
6345d24d | 453 | #endif |
d4b3b638 | 454 | struct uprobes_state uprobes_state; |
c92ff1bd MS |
455 | }; |
456 | ||
6345d24d LT |
457 | static inline void mm_init_cpumask(struct mm_struct *mm) |
458 | { | |
459 | #ifdef CONFIG_CPUMASK_OFFSTACK | |
460 | mm->cpu_vm_mask_var = &mm->cpumask_allocation; | |
461 | #endif | |
462 | } | |
463 | ||
45e575ab | 464 | /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ |
de03c72c KM |
465 | static inline cpumask_t *mm_cpumask(struct mm_struct *mm) |
466 | { | |
467 | return mm->cpu_vm_mask_var; | |
468 | } | |
45e575ab | 469 | |
20841405 RR |
470 | #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) |
471 | /* | |
472 | * Memory barriers to keep this state in sync are graciously provided by | |
473 | * the page table locks, outside of which no page table modifications happen. | |
474 | * The barriers below prevent the compiler from re-ordering the instructions | |
475 | * around the memory barriers that are already present in the code. | |
476 | */ | |
477 | static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | |
478 | { | |
479 | barrier(); | |
480 | return mm->tlb_flush_pending; | |
481 | } | |
482 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | |
483 | { | |
484 | mm->tlb_flush_pending = true; | |
485 | barrier(); | |
486 | } | |
487 | /* Clearing is done after a TLB flush, which also provides a barrier. */ | |
488 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | |
489 | { | |
490 | barrier(); | |
491 | mm->tlb_flush_pending = false; | |
492 | } | |
493 | #else | |
494 | static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | |
495 | { | |
496 | return false; | |
497 | } | |
498 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | |
499 | { | |
500 | } | |
501 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | |
502 | { | |
503 | } | |
504 | #endif | |
505 | ||
5b99cd0e | 506 | #endif /* _LINUX_MM_TYPES_H */ |