]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/page_isolation.c
1 // SPDX-License-Identifier: GPL-2.0
3 * linux/mm/page_isolation.c
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
18 static int set_migratetype_isolate(struct page
*page
, int migratetype
, int isol_flags
)
20 struct zone
*zone
= page_zone(page
);
21 struct page
*unmovable
;
24 spin_lock_irqsave(&zone
->lock
, flags
);
27 * We assume the caller intended to SET migrate type to isolate.
28 * If it is already set, then someone else must have raced and
31 if (is_migrate_isolate_page(page
)) {
32 spin_unlock_irqrestore(&zone
->lock
, flags
);
37 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
38 * We just check MOVABLE pages.
40 unmovable
= has_unmovable_pages(zone
, page
, migratetype
, isol_flags
);
42 unsigned long nr_pages
;
43 int mt
= get_pageblock_migratetype(page
);
45 set_pageblock_migratetype(page
, MIGRATE_ISOLATE
);
46 zone
->nr_isolate_pageblock
++;
47 nr_pages
= move_freepages_block(zone
, page
, MIGRATE_ISOLATE
,
50 __mod_zone_freepage_state(zone
, -nr_pages
, mt
);
51 spin_unlock_irqrestore(&zone
->lock
, flags
);
55 spin_unlock_irqrestore(&zone
->lock
, flags
);
56 if (isol_flags
& REPORT_FAILURE
) {
58 * printk() with zone->lock held will likely trigger a
59 * lockdep splat, so defer it here.
61 dump_page(unmovable
, "unmovable page");
67 static void unset_migratetype_isolate(struct page
*page
, unsigned migratetype
)
70 unsigned long flags
, nr_pages
;
71 bool isolated_page
= false;
73 unsigned long pfn
, buddy_pfn
;
76 zone
= page_zone(page
);
77 spin_lock_irqsave(&zone
->lock
, flags
);
78 if (!is_migrate_isolate_page(page
))
82 * Because freepage with more than pageblock_order on isolated
83 * pageblock is restricted to merge due to freepage counting problem,
84 * it is possible that there is free buddy page.
85 * move_freepages_block() doesn't care of merge so we need other
86 * approach in order to merge them. Isolation and free will make
87 * these pages to be merged.
89 if (PageBuddy(page
)) {
90 order
= buddy_order(page
);
91 if (order
>= pageblock_order
&& order
< MAX_ORDER
- 1) {
92 pfn
= page_to_pfn(page
);
93 buddy_pfn
= __find_buddy_pfn(pfn
, order
);
94 buddy
= page
+ (buddy_pfn
- pfn
);
96 if (!is_migrate_isolate_page(buddy
)) {
97 __isolate_free_page(page
, order
);
104 * If we isolate freepage with more than pageblock_order, there
105 * should be no freepage in the range, so we could avoid costly
106 * pageblock scanning for freepage moving.
108 * We didn't actually touch any of the isolated pages, so place them
109 * to the tail of the freelist. This is an optimization for memory
110 * onlining - just onlined memory won't immediately be considered for
113 if (!isolated_page
) {
114 nr_pages
= move_freepages_block(zone
, page
, migratetype
, NULL
);
115 __mod_zone_freepage_state(zone
, nr_pages
, migratetype
);
117 set_pageblock_migratetype(page
, migratetype
);
119 __putback_isolated_page(page
, order
, migratetype
);
120 zone
->nr_isolate_pageblock
--;
122 spin_unlock_irqrestore(&zone
->lock
, flags
);
125 static inline struct page
*
126 __first_valid_page(unsigned long pfn
, unsigned long nr_pages
)
130 for (i
= 0; i
< nr_pages
; i
++) {
133 page
= pfn_to_online_page(pfn
+ i
);
142 * start_isolate_page_range() - make page-allocation-type of range of pages to
143 * be MIGRATE_ISOLATE.
144 * @start_pfn: The lower PFN of the range to be isolated.
145 * @end_pfn: The upper PFN of the range to be isolated.
146 * start_pfn/end_pfn must be aligned to pageblock_order.
147 * @migratetype: Migrate type to set in error recovery.
148 * @flags: The following flags are allowed (they can be combined in
150 * MEMORY_OFFLINE - isolate to offline (!allocate) memory
151 * e.g., skip over PageHWPoison() pages
152 * and PageOffline() pages.
153 * REPORT_FAILURE - report details about the failure to
156 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
157 * the range will never be allocated. Any free pages and pages freed in the
158 * future will not be allocated again. If specified range includes migrate types
159 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
160 * pages in the range finally, the caller have to free all pages in the range.
161 * test_page_isolated() can be used for test it.
163 * There is no high level synchronization mechanism that prevents two threads
164 * from trying to isolate overlapping ranges. If this happens, one thread
165 * will notice pageblocks in the overlapping range already set to isolate.
166 * This happens in set_migratetype_isolate, and set_migratetype_isolate
167 * returns an error. We then clean up by restoring the migration type on
168 * pageblocks we may have modified and return -EBUSY to caller. This
169 * prevents two threads from simultaneously working on overlapping ranges.
171 * Please note that there is no strong synchronization with the page allocator
172 * either. Pages might be freed while their page blocks are marked ISOLATED.
173 * A call to drain_all_pages() after isolation can flush most of them. However
174 * in some cases pages might still end up on pcp lists and that would allow
175 * for their allocation even when they are in fact isolated already. Depending
176 * on how strong of a guarantee the caller needs, zone_pcp_disable/enable()
177 * might be used to flush and disable pcplist before isolation and enable after
180 * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
182 int start_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
183 unsigned migratetype
, int flags
)
186 unsigned long undo_pfn
;
189 BUG_ON(!IS_ALIGNED(start_pfn
, pageblock_nr_pages
));
190 BUG_ON(!IS_ALIGNED(end_pfn
, pageblock_nr_pages
));
192 for (pfn
= start_pfn
;
194 pfn
+= pageblock_nr_pages
) {
195 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
197 if (set_migratetype_isolate(page
, migratetype
, flags
)) {
205 for (pfn
= start_pfn
;
207 pfn
+= pageblock_nr_pages
) {
208 struct page
*page
= pfn_to_online_page(pfn
);
211 unset_migratetype_isolate(page
, migratetype
);
218 * Make isolated pages available again.
220 void undo_isolate_page_range(unsigned long start_pfn
, unsigned long end_pfn
,
221 unsigned migratetype
)
226 BUG_ON(!IS_ALIGNED(start_pfn
, pageblock_nr_pages
));
227 BUG_ON(!IS_ALIGNED(end_pfn
, pageblock_nr_pages
));
229 for (pfn
= start_pfn
;
231 pfn
+= pageblock_nr_pages
) {
232 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
233 if (!page
|| !is_migrate_isolate_page(page
))
235 unset_migratetype_isolate(page
, migratetype
);
239 * Test all pages in the range is free(means isolated) or not.
240 * all pages in [start_pfn...end_pfn) must be in the same zone.
241 * zone->lock must be held before call this.
243 * Returns the last tested pfn.
246 __test_page_isolated_in_pageblock(unsigned long pfn
, unsigned long end_pfn
,
251 while (pfn
< end_pfn
) {
252 page
= pfn_to_page(pfn
);
255 * If the page is on a free list, it has to be on
256 * the correct MIGRATE_ISOLATE freelist. There is no
257 * simple way to verify that as VM_BUG_ON(), though.
259 pfn
+= 1 << buddy_order(page
);
260 else if ((flags
& MEMORY_OFFLINE
) && PageHWPoison(page
))
261 /* A HWPoisoned page cannot be also PageBuddy */
263 else if ((flags
& MEMORY_OFFLINE
) && PageOffline(page
) &&
266 * The responsible driver agreed to skip PageOffline()
267 * pages when offlining memory by dropping its
268 * reference in MEM_GOING_OFFLINE.
278 /* Caller should ensure that requested range is in a single zone */
279 int test_pages_isolated(unsigned long start_pfn
, unsigned long end_pfn
,
282 unsigned long pfn
, flags
;
288 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
289 * are not aligned to pageblock_nr_pages.
290 * Then we just check migratetype first.
292 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= pageblock_nr_pages
) {
293 page
= __first_valid_page(pfn
, pageblock_nr_pages
);
294 if (page
&& !is_migrate_isolate_page(page
))
297 page
= __first_valid_page(start_pfn
, end_pfn
- start_pfn
);
298 if ((pfn
< end_pfn
) || !page
) {
303 /* Check all pages are free or marked as ISOLATED */
304 zone
= page_zone(page
);
305 spin_lock_irqsave(&zone
->lock
, flags
);
306 pfn
= __test_page_isolated_in_pageblock(start_pfn
, end_pfn
, isol_flags
);
307 spin_unlock_irqrestore(&zone
->lock
, flags
);
309 ret
= pfn
< end_pfn
? -EBUSY
: 0;
312 trace_test_pages_isolated(start_pfn
, end_pfn
, pfn
);