]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/page_isolation.c | |
3 | */ | |
4 | ||
5 | #include <linux/mm.h> | |
6 | #include <linux/page-isolation.h> | |
7 | #include <linux/pageblock-flags.h> | |
8 | #include <linux/memory.h> | |
9 | #include <linux/hugetlb.h> | |
10 | #include <linux/page_owner.h> | |
11 | #include "internal.h" | |
12 | ||
13 | #define CREATE_TRACE_POINTS | |
14 | #include <trace/events/page_isolation.h> | |
15 | ||
16 | static int set_migratetype_isolate(struct page *page, | |
17 | bool skip_hwpoisoned_pages) | |
18 | { | |
19 | struct zone *zone; | |
20 | unsigned long flags, pfn; | |
21 | struct memory_isolate_notify arg; | |
22 | int notifier_ret; | |
23 | int ret = -EBUSY; | |
24 | ||
25 | zone = page_zone(page); | |
26 | ||
27 | spin_lock_irqsave(&zone->lock, flags); | |
28 | ||
29 | pfn = page_to_pfn(page); | |
30 | arg.start_pfn = pfn; | |
31 | arg.nr_pages = pageblock_nr_pages; | |
32 | arg.pages_found = 0; | |
33 | ||
34 | /* | |
35 | * It may be possible to isolate a pageblock even if the | |
36 | * migratetype is not MIGRATE_MOVABLE. The memory isolation | |
37 | * notifier chain is used by balloon drivers to return the | |
38 | * number of pages in a range that are held by the balloon | |
39 | * driver to shrink memory. If all the pages are accounted for | |
40 | * by balloons, are free, or on the LRU, isolation can continue. | |
41 | * Later, for example, when memory hotplug notifier runs, these | |
42 | * pages reported as "can be isolated" should be isolated(freed) | |
43 | * by the balloon driver through the memory notifier chain. | |
44 | */ | |
45 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); | |
46 | notifier_ret = notifier_to_errno(notifier_ret); | |
47 | if (notifier_ret) | |
48 | goto out; | |
49 | /* | |
50 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. | |
51 | * We just check MOVABLE pages. | |
52 | */ | |
53 | if (!has_unmovable_pages(zone, page, arg.pages_found, | |
54 | skip_hwpoisoned_pages)) | |
55 | ret = 0; | |
56 | ||
57 | /* | |
58 | * immobile means "not-on-lru" pages. If immobile is larger than | |
59 | * removable-by-driver pages reported by notifier, we'll fail. | |
60 | */ | |
61 | ||
62 | out: | |
63 | if (!ret) { | |
64 | unsigned long nr_pages; | |
65 | int migratetype = get_pageblock_migratetype(page); | |
66 | ||
67 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); | |
68 | zone->nr_isolate_pageblock++; | |
69 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, | |
70 | NULL); | |
71 | ||
72 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); | |
73 | } | |
74 | ||
75 | spin_unlock_irqrestore(&zone->lock, flags); | |
76 | if (!ret) | |
77 | drain_all_pages(zone); | |
78 | return ret; | |
79 | } | |
80 | ||
81 | static void unset_migratetype_isolate(struct page *page, unsigned migratetype) | |
82 | { | |
83 | struct zone *zone; | |
84 | unsigned long flags, nr_pages; | |
85 | bool isolated_page = false; | |
86 | unsigned int order; | |
87 | unsigned long pfn, buddy_pfn; | |
88 | struct page *buddy; | |
89 | ||
90 | zone = page_zone(page); | |
91 | spin_lock_irqsave(&zone->lock, flags); | |
92 | if (!is_migrate_isolate_page(page)) | |
93 | goto out; | |
94 | ||
95 | /* | |
96 | * Because freepage with more than pageblock_order on isolated | |
97 | * pageblock is restricted to merge due to freepage counting problem, | |
98 | * it is possible that there is free buddy page. | |
99 | * move_freepages_block() doesn't care of merge so we need other | |
100 | * approach in order to merge them. Isolation and free will make | |
101 | * these pages to be merged. | |
102 | */ | |
103 | if (PageBuddy(page)) { | |
104 | order = page_order(page); | |
105 | if (order >= pageblock_order) { | |
106 | pfn = page_to_pfn(page); | |
107 | buddy_pfn = __find_buddy_pfn(pfn, order); | |
108 | buddy = page + (buddy_pfn - pfn); | |
109 | ||
110 | if (pfn_valid_within(buddy_pfn) && | |
111 | !is_migrate_isolate_page(buddy)) { | |
112 | __isolate_free_page(page, order); | |
113 | isolated_page = true; | |
114 | } | |
115 | } | |
116 | } | |
117 | ||
118 | /* | |
119 | * If we isolate freepage with more than pageblock_order, there | |
120 | * should be no freepage in the range, so we could avoid costly | |
121 | * pageblock scanning for freepage moving. | |
122 | */ | |
123 | if (!isolated_page) { | |
124 | nr_pages = move_freepages_block(zone, page, migratetype, NULL); | |
125 | __mod_zone_freepage_state(zone, nr_pages, migratetype); | |
126 | } | |
127 | set_pageblock_migratetype(page, migratetype); | |
128 | zone->nr_isolate_pageblock--; | |
129 | out: | |
130 | spin_unlock_irqrestore(&zone->lock, flags); | |
131 | if (isolated_page) { | |
132 | post_alloc_hook(page, order, __GFP_MOVABLE); | |
133 | __free_pages(page, order); | |
134 | } | |
135 | } | |
136 | ||
137 | static inline struct page * | |
138 | __first_valid_page(unsigned long pfn, unsigned long nr_pages) | |
139 | { | |
140 | int i; | |
141 | for (i = 0; i < nr_pages; i++) | |
142 | if (pfn_valid_within(pfn + i)) | |
143 | break; | |
144 | if (unlikely(i == nr_pages)) | |
145 | return NULL; | |
146 | return pfn_to_page(pfn + i); | |
147 | } | |
148 | ||
149 | /* | |
150 | * start_isolate_page_range() -- make page-allocation-type of range of pages | |
151 | * to be MIGRATE_ISOLATE. | |
152 | * @start_pfn: The lower PFN of the range to be isolated. | |
153 | * @end_pfn: The upper PFN of the range to be isolated. | |
154 | * @migratetype: migrate type to set in error recovery. | |
155 | * | |
156 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in | |
157 | * the range will never be allocated. Any free pages and pages freed in the | |
158 | * future will not be allocated again. | |
159 | * | |
160 | * start_pfn/end_pfn must be aligned to pageblock_order. | |
161 | * Returns 0 on success and -EBUSY if any part of range cannot be isolated. | |
162 | */ | |
163 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |
164 | unsigned migratetype, bool skip_hwpoisoned_pages) | |
165 | { | |
166 | unsigned long pfn; | |
167 | unsigned long undo_pfn; | |
168 | struct page *page; | |
169 | ||
170 | BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); | |
171 | BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); | |
172 | ||
173 | for (pfn = start_pfn; | |
174 | pfn < end_pfn; | |
175 | pfn += pageblock_nr_pages) { | |
176 | page = __first_valid_page(pfn, pageblock_nr_pages); | |
177 | if (page && | |
178 | set_migratetype_isolate(page, skip_hwpoisoned_pages)) { | |
179 | undo_pfn = pfn; | |
180 | goto undo; | |
181 | } | |
182 | } | |
183 | return 0; | |
184 | undo: | |
185 | for (pfn = start_pfn; | |
186 | pfn < undo_pfn; | |
187 | pfn += pageblock_nr_pages) | |
188 | unset_migratetype_isolate(pfn_to_page(pfn), migratetype); | |
189 | ||
190 | return -EBUSY; | |
191 | } | |
192 | ||
193 | /* | |
194 | * Make isolated pages available again. | |
195 | */ | |
196 | int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | |
197 | unsigned migratetype) | |
198 | { | |
199 | unsigned long pfn; | |
200 | struct page *page; | |
201 | ||
202 | BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); | |
203 | BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); | |
204 | ||
205 | for (pfn = start_pfn; | |
206 | pfn < end_pfn; | |
207 | pfn += pageblock_nr_pages) { | |
208 | page = __first_valid_page(pfn, pageblock_nr_pages); | |
209 | if (!page || !is_migrate_isolate_page(page)) | |
210 | continue; | |
211 | unset_migratetype_isolate(page, migratetype); | |
212 | } | |
213 | return 0; | |
214 | } | |
215 | /* | |
216 | * Test all pages in the range is free(means isolated) or not. | |
217 | * all pages in [start_pfn...end_pfn) must be in the same zone. | |
218 | * zone->lock must be held before call this. | |
219 | * | |
220 | * Returns the last tested pfn. | |
221 | */ | |
222 | static unsigned long | |
223 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, | |
224 | bool skip_hwpoisoned_pages) | |
225 | { | |
226 | struct page *page; | |
227 | ||
228 | while (pfn < end_pfn) { | |
229 | if (!pfn_valid_within(pfn)) { | |
230 | pfn++; | |
231 | continue; | |
232 | } | |
233 | page = pfn_to_page(pfn); | |
234 | if (PageBuddy(page)) | |
235 | /* | |
236 | * If the page is on a free list, it has to be on | |
237 | * the correct MIGRATE_ISOLATE freelist. There is no | |
238 | * simple way to verify that as VM_BUG_ON(), though. | |
239 | */ | |
240 | pfn += 1 << page_order(page); | |
241 | else if (skip_hwpoisoned_pages && PageHWPoison(page)) | |
242 | /* A HWPoisoned page cannot be also PageBuddy */ | |
243 | pfn++; | |
244 | else | |
245 | break; | |
246 | } | |
247 | ||
248 | return pfn; | |
249 | } | |
250 | ||
251 | /* Caller should ensure that requested range is in a single zone */ | |
252 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, | |
253 | bool skip_hwpoisoned_pages) | |
254 | { | |
255 | unsigned long pfn, flags; | |
256 | struct page *page; | |
257 | struct zone *zone; | |
258 | ||
259 | /* | |
260 | * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages | |
261 | * are not aligned to pageblock_nr_pages. | |
262 | * Then we just check migratetype first. | |
263 | */ | |
264 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { | |
265 | page = __first_valid_page(pfn, pageblock_nr_pages); | |
266 | if (page && !is_migrate_isolate_page(page)) | |
267 | break; | |
268 | } | |
269 | page = __first_valid_page(start_pfn, end_pfn - start_pfn); | |
270 | if ((pfn < end_pfn) || !page) | |
271 | return -EBUSY; | |
272 | /* Check all pages are free or marked as ISOLATED */ | |
273 | zone = page_zone(page); | |
274 | spin_lock_irqsave(&zone->lock, flags); | |
275 | pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, | |
276 | skip_hwpoisoned_pages); | |
277 | spin_unlock_irqrestore(&zone->lock, flags); | |
278 | ||
279 | trace_test_pages_isolated(start_pfn, end_pfn, pfn); | |
280 | ||
281 | return pfn < end_pfn ? -EBUSY : 0; | |
282 | } | |
283 | ||
284 | struct page *alloc_migrate_target(struct page *page, unsigned long private, | |
285 | int **resultp) | |
286 | { | |
287 | gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; | |
288 | ||
289 | /* | |
290 | * TODO: allocate a destination hugepage from a nearest neighbor node, | |
291 | * accordance with memory policy of the user process if possible. For | |
292 | * now as a simple work-around, we use the next node for destination. | |
293 | */ | |
294 | if (PageHuge(page)) | |
295 | return alloc_huge_page_node(page_hstate(compound_head(page)), | |
296 | next_node_in(page_to_nid(page), | |
297 | node_online_map)); | |
298 | ||
299 | if (PageHighMem(page)) | |
300 | gfp_mask |= __GFP_HIGHMEM; | |
301 | ||
302 | return alloc_page(gfp_mask); | |
303 | } |