]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | |
2 | /* internal.h: mm/ internal definitions | |
3 | * | |
4 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
6 | */ | |
7 | #ifndef __MM_INTERNAL_H | |
8 | #define __MM_INTERNAL_H | |
9 | ||
10 | #include <linux/fs.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/pagemap.h> | |
13 | #include <linux/tracepoint-defs.h> | |
14 | ||
15 | /* | |
16 | * The set of flags that only affect watermark checking and reclaim | |
17 | * behaviour. This is used by the MM to obey the caller constraints | |
18 | * about IO, FS and watermark checking while ignoring placement | |
19 | * hints such as HIGHMEM usage. | |
20 | */ | |
21 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | |
22 | __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ | |
23 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ | |
24 | __GFP_ATOMIC) | |
25 | ||
26 | /* The GFP flags allowed during early boot */ | |
27 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) | |
28 | ||
29 | /* Control allocation cpuset and node placement constraints */ | |
30 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | |
31 | ||
32 | /* Do not use these with a slab allocator */ | |
33 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | |
34 | ||
35 | void page_writeback_init(void); | |
36 | ||
37 | vm_fault_t do_swap_page(struct vm_fault *vmf); | |
38 | ||
39 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | |
40 | unsigned long floor, unsigned long ceiling); | |
41 | ||
42 | static inline bool can_madv_lru_vma(struct vm_area_struct *vma) | |
43 | { | |
44 | return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); | |
45 | } | |
46 | ||
47 | void unmap_page_range(struct mmu_gather *tlb, | |
48 | struct vm_area_struct *vma, | |
49 | unsigned long addr, unsigned long end, | |
50 | struct zap_details *details); | |
51 | ||
52 | void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read, | |
53 | unsigned long lookahead_size); | |
54 | void force_page_cache_ra(struct readahead_control *, unsigned long nr); | |
55 | static inline void force_page_cache_readahead(struct address_space *mapping, | |
56 | struct file *file, pgoff_t index, unsigned long nr_to_read) | |
57 | { | |
58 | DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); | |
59 | force_page_cache_ra(&ractl, nr_to_read); | |
60 | } | |
61 | ||
62 | unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, | |
63 | pgoff_t end, struct pagevec *pvec, pgoff_t *indices); | |
64 | ||
65 | /** | |
66 | * page_evictable - test whether a page is evictable | |
67 | * @page: the page to test | |
68 | * | |
69 | * Test whether page is evictable--i.e., should be placed on active/inactive | |
70 | * lists vs unevictable list. | |
71 | * | |
72 | * Reasons page might not be evictable: | |
73 | * (1) page's mapping marked unevictable | |
74 | * (2) page is part of an mlocked VMA | |
75 | * | |
76 | */ | |
77 | static inline bool page_evictable(struct page *page) | |
78 | { | |
79 | bool ret; | |
80 | ||
81 | /* Prevent address_space of inode and swap cache from being freed */ | |
82 | rcu_read_lock(); | |
83 | ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); | |
84 | rcu_read_unlock(); | |
85 | return ret; | |
86 | } | |
87 | ||
88 | /* | |
89 | * Turn a non-refcounted page (->_refcount == 0) into refcounted with | |
90 | * a count of one. | |
91 | */ | |
92 | static inline void set_page_refcounted(struct page *page) | |
93 | { | |
94 | VM_BUG_ON_PAGE(PageTail(page), page); | |
95 | VM_BUG_ON_PAGE(page_ref_count(page), page); | |
96 | set_page_count(page, 1); | |
97 | } | |
98 | ||
99 | extern unsigned long highest_memmap_pfn; | |
100 | ||
101 | /* | |
102 | * Maximum number of reclaim retries without progress before the OOM | |
103 | * killer is consider the only way forward. | |
104 | */ | |
105 | #define MAX_RECLAIM_RETRIES 16 | |
106 | ||
107 | /* | |
108 | * in mm/vmscan.c: | |
109 | */ | |
110 | extern int isolate_lru_page(struct page *page); | |
111 | extern void putback_lru_page(struct page *page); | |
112 | ||
113 | /* | |
114 | * in mm/rmap.c: | |
115 | */ | |
116 | extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); | |
117 | ||
118 | /* | |
119 | * in mm/memcontrol.c: | |
120 | */ | |
121 | extern bool cgroup_memory_nokmem; | |
122 | ||
123 | /* | |
124 | * in mm/page_alloc.c | |
125 | */ | |
126 | ||
127 | /* | |
128 | * Structure for holding the mostly immutable allocation parameters passed | |
129 | * between functions involved in allocations, including the alloc_pages* | |
130 | * family of functions. | |
131 | * | |
132 | * nodemask, migratetype and highest_zoneidx are initialized only once in | |
133 | * __alloc_pages() and then never change. | |
134 | * | |
135 | * zonelist, preferred_zone and highest_zoneidx are set first in | |
136 | * __alloc_pages() for the fast path, and might be later changed | |
137 | * in __alloc_pages_slowpath(). All other functions pass the whole structure | |
138 | * by a const pointer. | |
139 | */ | |
140 | struct alloc_context { | |
141 | struct zonelist *zonelist; | |
142 | nodemask_t *nodemask; | |
143 | struct zoneref *preferred_zoneref; | |
144 | int migratetype; | |
145 | ||
146 | /* | |
147 | * highest_zoneidx represents highest usable zone index of | |
148 | * the allocation request. Due to the nature of the zone, | |
149 | * memory on lower zone than the highest_zoneidx will be | |
150 | * protected by lowmem_reserve[highest_zoneidx]. | |
151 | * | |
152 | * highest_zoneidx is also used by reclaim/compaction to limit | |
153 | * the target zone since higher zone than this index cannot be | |
154 | * usable for this allocation request. | |
155 | */ | |
156 | enum zone_type highest_zoneidx; | |
157 | bool spread_dirty_pages; | |
158 | }; | |
159 | ||
160 | /* | |
161 | * Locate the struct page for both the matching buddy in our | |
162 | * pair (buddy1) and the combined O(n+1) page they form (page). | |
163 | * | |
164 | * 1) Any buddy B1 will have an order O twin B2 which satisfies | |
165 | * the following equation: | |
166 | * B2 = B1 ^ (1 << O) | |
167 | * For example, if the starting buddy (buddy2) is #8 its order | |
168 | * 1 buddy is #10: | |
169 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 | |
170 | * | |
171 | * 2) Any buddy B will have an order O+1 parent P which | |
172 | * satisfies the following equation: | |
173 | * P = B & ~(1 << O) | |
174 | * | |
175 | * Assumption: *_mem_map is contiguous at least up to MAX_ORDER | |
176 | */ | |
177 | static inline unsigned long | |
178 | __find_buddy_pfn(unsigned long page_pfn, unsigned int order) | |
179 | { | |
180 | return page_pfn ^ (1 << order); | |
181 | } | |
182 | ||
183 | extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, | |
184 | unsigned long end_pfn, struct zone *zone); | |
185 | ||
186 | static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, | |
187 | unsigned long end_pfn, struct zone *zone) | |
188 | { | |
189 | if (zone->contiguous) | |
190 | return pfn_to_page(start_pfn); | |
191 | ||
192 | return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); | |
193 | } | |
194 | ||
195 | extern int __isolate_free_page(struct page *page, unsigned int order); | |
196 | extern void __putback_isolated_page(struct page *page, unsigned int order, | |
197 | int mt); | |
198 | extern void memblock_free_pages(struct page *page, unsigned long pfn, | |
199 | unsigned int order); | |
200 | extern void __free_pages_core(struct page *page, unsigned int order); | |
201 | extern void prep_compound_page(struct page *page, unsigned int order); | |
202 | extern void post_alloc_hook(struct page *page, unsigned int order, | |
203 | gfp_t gfp_flags); | |
204 | extern int user_min_free_kbytes; | |
205 | ||
206 | extern void free_unref_page(struct page *page, unsigned int order); | |
207 | extern void free_unref_page_list(struct list_head *list); | |
208 | ||
209 | extern void zone_pcp_update(struct zone *zone, int cpu_online); | |
210 | extern void zone_pcp_reset(struct zone *zone); | |
211 | extern void zone_pcp_disable(struct zone *zone); | |
212 | extern void zone_pcp_enable(struct zone *zone); | |
213 | ||
214 | extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, | |
215 | phys_addr_t min_addr, | |
216 | int nid, bool exact_nid); | |
217 | ||
218 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | |
219 | ||
220 | /* | |
221 | * in mm/compaction.c | |
222 | */ | |
223 | /* | |
224 | * compact_control is used to track pages being migrated and the free pages | |
225 | * they are being migrated to during memory compaction. The free_pfn starts | |
226 | * at the end of a zone and migrate_pfn begins at the start. Movable pages | |
227 | * are moved to the end of a zone during a compaction run and the run | |
228 | * completes when free_pfn <= migrate_pfn | |
229 | */ | |
230 | struct compact_control { | |
231 | struct list_head freepages; /* List of free pages to migrate to */ | |
232 | struct list_head migratepages; /* List of pages being migrated */ | |
233 | unsigned int nr_freepages; /* Number of isolated free pages */ | |
234 | unsigned int nr_migratepages; /* Number of pages to migrate */ | |
235 | unsigned long free_pfn; /* isolate_freepages search base */ | |
236 | /* | |
237 | * Acts as an in/out parameter to page isolation for migration. | |
238 | * isolate_migratepages uses it as a search base. | |
239 | * isolate_migratepages_block will update the value to the next pfn | |
240 | * after the last isolated one. | |
241 | */ | |
242 | unsigned long migrate_pfn; | |
243 | unsigned long fast_start_pfn; /* a pfn to start linear scan from */ | |
244 | struct zone *zone; | |
245 | unsigned long total_migrate_scanned; | |
246 | unsigned long total_free_scanned; | |
247 | unsigned short fast_search_fail;/* failures to use free list searches */ | |
248 | short search_order; /* order to start a fast search at */ | |
249 | const gfp_t gfp_mask; /* gfp mask of a direct compactor */ | |
250 | int order; /* order a direct compactor needs */ | |
251 | int migratetype; /* migratetype of direct compactor */ | |
252 | const unsigned int alloc_flags; /* alloc flags of a direct compactor */ | |
253 | const int highest_zoneidx; /* zone index of a direct compactor */ | |
254 | enum migrate_mode mode; /* Async or sync migration mode */ | |
255 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ | |
256 | bool no_set_skip_hint; /* Don't mark blocks for skipping */ | |
257 | bool ignore_block_suitable; /* Scan blocks considered unsuitable */ | |
258 | bool direct_compaction; /* False from kcompactd or /proc/... */ | |
259 | bool proactive_compaction; /* kcompactd proactive compaction */ | |
260 | bool whole_zone; /* Whole zone should/has been scanned */ | |
261 | bool contended; /* Signal lock or sched contention */ | |
262 | bool rescan; /* Rescanning the same pageblock */ | |
263 | bool alloc_contig; /* alloc_contig_range allocation */ | |
264 | }; | |
265 | ||
266 | /* | |
267 | * Used in direct compaction when a page should be taken from the freelists | |
268 | * immediately when one is created during the free path. | |
269 | */ | |
270 | struct capture_control { | |
271 | struct compact_control *cc; | |
272 | struct page *page; | |
273 | }; | |
274 | ||
275 | unsigned long | |
276 | isolate_freepages_range(struct compact_control *cc, | |
277 | unsigned long start_pfn, unsigned long end_pfn); | |
278 | int | |
279 | isolate_migratepages_range(struct compact_control *cc, | |
280 | unsigned long low_pfn, unsigned long end_pfn); | |
281 | #endif | |
282 | int find_suitable_fallback(struct free_area *area, unsigned int order, | |
283 | int migratetype, bool only_stealable, bool *can_steal); | |
284 | ||
285 | /* | |
286 | * This function returns the order of a free page in the buddy system. In | |
287 | * general, page_zone(page)->lock must be held by the caller to prevent the | |
288 | * page from being allocated in parallel and returning garbage as the order. | |
289 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the | |
290 | * page cannot be allocated or merged in parallel. Alternatively, it must | |
291 | * handle invalid values gracefully, and use buddy_order_unsafe() below. | |
292 | */ | |
293 | static inline unsigned int buddy_order(struct page *page) | |
294 | { | |
295 | /* PageBuddy() must be checked by the caller */ | |
296 | return page_private(page); | |
297 | } | |
298 | ||
299 | /* | |
300 | * Like buddy_order(), but for callers who cannot afford to hold the zone lock. | |
301 | * PageBuddy() should be checked first by the caller to minimize race window, | |
302 | * and invalid values must be handled gracefully. | |
303 | * | |
304 | * READ_ONCE is used so that if the caller assigns the result into a local | |
305 | * variable and e.g. tests it for valid range before using, the compiler cannot | |
306 | * decide to remove the variable and inline the page_private(page) multiple | |
307 | * times, potentially observing different values in the tests and the actual | |
308 | * use of the result. | |
309 | */ | |
310 | #define buddy_order_unsafe(page) READ_ONCE(page_private(page)) | |
311 | ||
312 | /* | |
313 | * These three helpers classifies VMAs for virtual memory accounting. | |
314 | */ | |
315 | ||
316 | /* | |
317 | * Executable code area - executable, not writable, not stack | |
318 | */ | |
319 | static inline bool is_exec_mapping(vm_flags_t flags) | |
320 | { | |
321 | return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; | |
322 | } | |
323 | ||
324 | /* | |
325 | * Stack area - automatically grows in one direction | |
326 | * | |
327 | * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: | |
328 | * do_mmap() forbids all other combinations. | |
329 | */ | |
330 | static inline bool is_stack_mapping(vm_flags_t flags) | |
331 | { | |
332 | return (flags & VM_STACK) == VM_STACK; | |
333 | } | |
334 | ||
335 | /* | |
336 | * Data area - private, writable, not stack | |
337 | */ | |
338 | static inline bool is_data_mapping(vm_flags_t flags) | |
339 | { | |
340 | return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; | |
341 | } | |
342 | ||
343 | /* mm/util.c */ | |
344 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | |
345 | struct vm_area_struct *prev); | |
346 | void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); | |
347 | ||
348 | #ifdef CONFIG_MMU | |
349 | extern long populate_vma_page_range(struct vm_area_struct *vma, | |
350 | unsigned long start, unsigned long end, int *locked); | |
351 | extern long faultin_vma_page_range(struct vm_area_struct *vma, | |
352 | unsigned long start, unsigned long end, | |
353 | bool write, int *locked); | |
354 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | |
355 | unsigned long start, unsigned long end); | |
356 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |
357 | { | |
358 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | |
359 | } | |
360 | ||
361 | /* | |
362 | * must be called with vma's mmap_lock held for read or write, and page locked. | |
363 | */ | |
364 | extern void mlock_vma_page(struct page *page); | |
365 | extern unsigned int munlock_vma_page(struct page *page); | |
366 | ||
367 | extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, | |
368 | unsigned long len); | |
369 | ||
370 | /* | |
371 | * Clear the page's PageMlocked(). This can be useful in a situation where | |
372 | * we want to unconditionally remove a page from the pagecache -- e.g., | |
373 | * on truncation or freeing. | |
374 | * | |
375 | * It is legal to call this function for any page, mlocked or not. | |
376 | * If called for a page that is still mapped by mlocked vmas, all we do | |
377 | * is revert to lazy LRU behaviour -- semantics are not broken. | |
378 | */ | |
379 | extern void clear_page_mlock(struct page *page); | |
380 | ||
381 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); | |
382 | ||
383 | /* | |
384 | * At what user virtual address is page expected in vma? | |
385 | * Returns -EFAULT if all of the page is outside the range of vma. | |
386 | * If page is a compound head, the entire compound page is considered. | |
387 | */ | |
388 | static inline unsigned long | |
389 | vma_address(struct page *page, struct vm_area_struct *vma) | |
390 | { | |
391 | pgoff_t pgoff; | |
392 | unsigned long address; | |
393 | ||
394 | VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ | |
395 | pgoff = page_to_pgoff(page); | |
396 | if (pgoff >= vma->vm_pgoff) { | |
397 | address = vma->vm_start + | |
398 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
399 | /* Check for address beyond vma (or wrapped through 0?) */ | |
400 | if (address < vma->vm_start || address >= vma->vm_end) | |
401 | address = -EFAULT; | |
402 | } else if (PageHead(page) && | |
403 | pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) { | |
404 | /* Test above avoids possibility of wrap to 0 on 32-bit */ | |
405 | address = vma->vm_start; | |
406 | } else { | |
407 | address = -EFAULT; | |
408 | } | |
409 | return address; | |
410 | } | |
411 | ||
412 | /* | |
413 | * Then at what user virtual address will none of the page be found in vma? | |
414 | * Assumes that vma_address() already returned a good starting address. | |
415 | * If page is a compound head, the entire compound page is considered. | |
416 | */ | |
417 | static inline unsigned long | |
418 | vma_address_end(struct page *page, struct vm_area_struct *vma) | |
419 | { | |
420 | pgoff_t pgoff; | |
421 | unsigned long address; | |
422 | ||
423 | VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ | |
424 | pgoff = page_to_pgoff(page) + compound_nr(page); | |
425 | address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
426 | /* Check for address beyond vma (or wrapped through 0?) */ | |
427 | if (address < vma->vm_start || address > vma->vm_end) | |
428 | address = vma->vm_end; | |
429 | return address; | |
430 | } | |
431 | ||
432 | static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, | |
433 | struct file *fpin) | |
434 | { | |
435 | int flags = vmf->flags; | |
436 | ||
437 | if (fpin) | |
438 | return fpin; | |
439 | ||
440 | /* | |
441 | * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or | |
442 | * anything, so we only pin the file and drop the mmap_lock if only | |
443 | * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. | |
444 | */ | |
445 | if (fault_flag_allow_retry_first(flags) && | |
446 | !(flags & FAULT_FLAG_RETRY_NOWAIT)) { | |
447 | fpin = get_file(vmf->vma->vm_file); | |
448 | mmap_read_unlock(vmf->vma->vm_mm); | |
449 | } | |
450 | return fpin; | |
451 | } | |
452 | ||
453 | #else /* !CONFIG_MMU */ | |
454 | static inline void clear_page_mlock(struct page *page) { } | |
455 | static inline void mlock_vma_page(struct page *page) { } | |
456 | static inline void vunmap_range_noflush(unsigned long start, unsigned long end) | |
457 | { | |
458 | } | |
459 | #endif /* !CONFIG_MMU */ | |
460 | ||
461 | /* | |
462 | * Return the mem_map entry representing the 'offset' subpage within | |
463 | * the maximally aligned gigantic page 'base'. Handle any discontiguity | |
464 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. | |
465 | */ | |
466 | static inline struct page *mem_map_offset(struct page *base, int offset) | |
467 | { | |
468 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) | |
469 | return nth_page(base, offset); | |
470 | return base + offset; | |
471 | } | |
472 | ||
473 | /* | |
474 | * Iterator over all subpages within the maximally aligned gigantic | |
475 | * page 'base'. Handle any discontiguity in the mem_map. | |
476 | */ | |
477 | static inline struct page *mem_map_next(struct page *iter, | |
478 | struct page *base, int offset) | |
479 | { | |
480 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { | |
481 | unsigned long pfn = page_to_pfn(base) + offset; | |
482 | if (!pfn_valid(pfn)) | |
483 | return NULL; | |
484 | return pfn_to_page(pfn); | |
485 | } | |
486 | return iter + 1; | |
487 | } | |
488 | ||
489 | /* Memory initialisation debug and verification */ | |
490 | enum mminit_level { | |
491 | MMINIT_WARNING, | |
492 | MMINIT_VERIFY, | |
493 | MMINIT_TRACE | |
494 | }; | |
495 | ||
496 | #ifdef CONFIG_DEBUG_MEMORY_INIT | |
497 | ||
498 | extern int mminit_loglevel; | |
499 | ||
500 | #define mminit_dprintk(level, prefix, fmt, arg...) \ | |
501 | do { \ | |
502 | if (level < mminit_loglevel) { \ | |
503 | if (level <= MMINIT_WARNING) \ | |
504 | pr_warn("mminit::" prefix " " fmt, ##arg); \ | |
505 | else \ | |
506 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ | |
507 | } \ | |
508 | } while (0) | |
509 | ||
510 | extern void mminit_verify_pageflags_layout(void); | |
511 | extern void mminit_verify_zonelist(void); | |
512 | #else | |
513 | ||
514 | static inline void mminit_dprintk(enum mminit_level level, | |
515 | const char *prefix, const char *fmt, ...) | |
516 | { | |
517 | } | |
518 | ||
519 | static inline void mminit_verify_pageflags_layout(void) | |
520 | { | |
521 | } | |
522 | ||
523 | static inline void mminit_verify_zonelist(void) | |
524 | { | |
525 | } | |
526 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ | |
527 | ||
528 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ | |
529 | #if defined(CONFIG_SPARSEMEM) | |
530 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
531 | unsigned long *end_pfn); | |
532 | #else | |
533 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
534 | unsigned long *end_pfn) | |
535 | { | |
536 | } | |
537 | #endif /* CONFIG_SPARSEMEM */ | |
538 | ||
539 | #define NODE_RECLAIM_NOSCAN -2 | |
540 | #define NODE_RECLAIM_FULL -1 | |
541 | #define NODE_RECLAIM_SOME 0 | |
542 | #define NODE_RECLAIM_SUCCESS 1 | |
543 | ||
544 | #ifdef CONFIG_NUMA | |
545 | extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); | |
546 | extern int find_next_best_node(int node, nodemask_t *used_node_mask); | |
547 | #else | |
548 | static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, | |
549 | unsigned int order) | |
550 | { | |
551 | return NODE_RECLAIM_NOSCAN; | |
552 | } | |
553 | static inline int find_next_best_node(int node, nodemask_t *used_node_mask) | |
554 | { | |
555 | return NUMA_NO_NODE; | |
556 | } | |
557 | #endif | |
558 | ||
559 | extern int hwpoison_filter(struct page *p); | |
560 | ||
561 | extern u32 hwpoison_filter_dev_major; | |
562 | extern u32 hwpoison_filter_dev_minor; | |
563 | extern u64 hwpoison_filter_flags_mask; | |
564 | extern u64 hwpoison_filter_flags_value; | |
565 | extern u64 hwpoison_filter_memcg; | |
566 | extern u32 hwpoison_filter_enable; | |
567 | ||
568 | extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, | |
569 | unsigned long, unsigned long, | |
570 | unsigned long, unsigned long); | |
571 | ||
572 | extern void set_pageblock_order(void); | |
573 | unsigned int reclaim_clean_pages_from_list(struct zone *zone, | |
574 | struct list_head *page_list); | |
575 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ | |
576 | #define ALLOC_WMARK_MIN WMARK_MIN | |
577 | #define ALLOC_WMARK_LOW WMARK_LOW | |
578 | #define ALLOC_WMARK_HIGH WMARK_HIGH | |
579 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ | |
580 | ||
581 | /* Mask to get the watermark bits */ | |
582 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) | |
583 | ||
584 | /* | |
585 | * Only MMU archs have async oom victim reclaim - aka oom_reaper so we | |
586 | * cannot assume a reduced access to memory reserves is sufficient for | |
587 | * !MMU | |
588 | */ | |
589 | #ifdef CONFIG_MMU | |
590 | #define ALLOC_OOM 0x08 | |
591 | #else | |
592 | #define ALLOC_OOM ALLOC_NO_WATERMARKS | |
593 | #endif | |
594 | ||
595 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ | |
596 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | |
597 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | |
598 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | |
599 | #ifdef CONFIG_ZONE_DMA32 | |
600 | #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ | |
601 | #else | |
602 | #define ALLOC_NOFRAGMENT 0x0 | |
603 | #endif | |
604 | #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ | |
605 | ||
606 | enum ttu_flags; | |
607 | struct tlbflush_unmap_batch; | |
608 | ||
609 | ||
610 | /* | |
611 | * only for MM internal work items which do not depend on | |
612 | * any allocations or locks which might depend on allocations | |
613 | */ | |
614 | extern struct workqueue_struct *mm_percpu_wq; | |
615 | ||
616 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH | |
617 | void try_to_unmap_flush(void); | |
618 | void try_to_unmap_flush_dirty(void); | |
619 | void flush_tlb_batched_pending(struct mm_struct *mm); | |
620 | #else | |
621 | static inline void try_to_unmap_flush(void) | |
622 | { | |
623 | } | |
624 | static inline void try_to_unmap_flush_dirty(void) | |
625 | { | |
626 | } | |
627 | static inline void flush_tlb_batched_pending(struct mm_struct *mm) | |
628 | { | |
629 | } | |
630 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ | |
631 | ||
632 | extern const struct trace_print_flags pageflag_names[]; | |
633 | extern const struct trace_print_flags vmaflag_names[]; | |
634 | extern const struct trace_print_flags gfpflag_names[]; | |
635 | ||
636 | static inline bool is_migrate_highatomic(enum migratetype migratetype) | |
637 | { | |
638 | return migratetype == MIGRATE_HIGHATOMIC; | |
639 | } | |
640 | ||
641 | static inline bool is_migrate_highatomic_page(struct page *page) | |
642 | { | |
643 | return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; | |
644 | } | |
645 | ||
646 | void setup_zone_pageset(struct zone *zone); | |
647 | ||
648 | struct migration_target_control { | |
649 | int nid; /* preferred node id */ | |
650 | nodemask_t *nmask; | |
651 | gfp_t gfp_mask; | |
652 | }; | |
653 | ||
654 | /* | |
655 | * mm/vmalloc.c | |
656 | */ | |
657 | #ifdef CONFIG_MMU | |
658 | int vmap_pages_range_noflush(unsigned long addr, unsigned long end, | |
659 | pgprot_t prot, struct page **pages, unsigned int page_shift); | |
660 | #else | |
661 | static inline | |
662 | int vmap_pages_range_noflush(unsigned long addr, unsigned long end, | |
663 | pgprot_t prot, struct page **pages, unsigned int page_shift) | |
664 | { | |
665 | return -EINVAL; | |
666 | } | |
667 | #endif | |
668 | ||
669 | void vunmap_range_noflush(unsigned long start, unsigned long end); | |
670 | ||
671 | int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, | |
672 | unsigned long addr, int page_nid, int *flags); | |
673 | ||
674 | #endif /* __MM_INTERNAL_H */ |