]>
Commit | Line | Data |
---|---|---|
1 | /****************************************************************************** | |
2 | * Xen balloon driver - enables returning/claiming memory to/from Xen. | |
3 | * | |
4 | * Copyright (c) 2003, B Dragovic | |
5 | * Copyright (c) 2003-2004, M Williamson, K Fraser | |
6 | * Copyright (c) 2005 Dan M. Smith, IBM Corporation | |
7 | * Copyright (c) 2010 Daniel Kiper | |
8 | * | |
9 | * Memory hotplug support was written by Daniel Kiper. Work on | |
10 | * it was sponsored by Google under Google Summer of Code 2010 | |
11 | * program. Jeremy Fitzhardinge from Citrix was the mentor for | |
12 | * this project. | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or | |
15 | * modify it under the terms of the GNU General Public License version 2 | |
16 | * as published by the Free Software Foundation; or, when distributed | |
17 | * separately from the Linux kernel or incorporated into other | |
18 | * software packages, subject to the following license: | |
19 | * | |
20 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
21 | * of this source file (the "Software"), to deal in the Software without | |
22 | * restriction, including without limitation the rights to use, copy, modify, | |
23 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
24 | * and to permit persons to whom the Software is furnished to do so, subject to | |
25 | * the following conditions: | |
26 | * | |
27 | * The above copyright notice and this permission notice shall be included in | |
28 | * all copies or substantial portions of the Software. | |
29 | * | |
30 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
31 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
32 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
33 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
34 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
35 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
36 | * IN THE SOFTWARE. | |
37 | */ | |
38 | ||
39 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt | |
40 | ||
41 | #include <linux/cpu.h> | |
42 | #include <linux/kernel.h> | |
43 | #include <linux/sched.h> | |
44 | #include <linux/cred.h> | |
45 | #include <linux/errno.h> | |
46 | #include <linux/mm.h> | |
47 | #include <linux/bootmem.h> | |
48 | #include <linux/pagemap.h> | |
49 | #include <linux/highmem.h> | |
50 | #include <linux/mutex.h> | |
51 | #include <linux/list.h> | |
52 | #include <linux/gfp.h> | |
53 | #include <linux/notifier.h> | |
54 | #include <linux/memory.h> | |
55 | #include <linux/memory_hotplug.h> | |
56 | #include <linux/percpu-defs.h> | |
57 | #include <linux/slab.h> | |
58 | #include <linux/sysctl.h> | |
59 | ||
60 | #include <asm/page.h> | |
61 | #include <asm/pgalloc.h> | |
62 | #include <asm/pgtable.h> | |
63 | #include <asm/tlb.h> | |
64 | ||
65 | #include <asm/xen/hypervisor.h> | |
66 | #include <asm/xen/hypercall.h> | |
67 | ||
68 | #include <xen/xen.h> | |
69 | #include <xen/interface/xen.h> | |
70 | #include <xen/interface/memory.h> | |
71 | #include <xen/balloon.h> | |
72 | #include <xen/features.h> | |
73 | #include <xen/page.h> | |
74 | ||
75 | static int xen_hotplug_unpopulated; | |
76 | ||
77 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG | |
78 | ||
79 | static int zero; | |
80 | static int one = 1; | |
81 | ||
82 | static struct ctl_table balloon_table[] = { | |
83 | { | |
84 | .procname = "hotplug_unpopulated", | |
85 | .data = &xen_hotplug_unpopulated, | |
86 | .maxlen = sizeof(int), | |
87 | .mode = 0644, | |
88 | .proc_handler = proc_dointvec_minmax, | |
89 | .extra1 = &zero, | |
90 | .extra2 = &one, | |
91 | }, | |
92 | { } | |
93 | }; | |
94 | ||
95 | static struct ctl_table balloon_root[] = { | |
96 | { | |
97 | .procname = "balloon", | |
98 | .mode = 0555, | |
99 | .child = balloon_table, | |
100 | }, | |
101 | { } | |
102 | }; | |
103 | ||
104 | static struct ctl_table xen_root[] = { | |
105 | { | |
106 | .procname = "xen", | |
107 | .mode = 0555, | |
108 | .child = balloon_root, | |
109 | }, | |
110 | { } | |
111 | }; | |
112 | ||
113 | #endif | |
114 | ||
115 | /* | |
116 | * Use one extent per PAGE_SIZE to avoid to break down the page into | |
117 | * multiple frame. | |
118 | */ | |
119 | #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) | |
120 | ||
121 | /* | |
122 | * balloon_process() state: | |
123 | * | |
124 | * BP_DONE: done or nothing to do, | |
125 | * BP_WAIT: wait to be rescheduled, | |
126 | * BP_EAGAIN: error, go to sleep, | |
127 | * BP_ECANCELED: error, balloon operation canceled. | |
128 | */ | |
129 | ||
130 | enum bp_state { | |
131 | BP_DONE, | |
132 | BP_WAIT, | |
133 | BP_EAGAIN, | |
134 | BP_ECANCELED | |
135 | }; | |
136 | ||
137 | ||
138 | static DEFINE_MUTEX(balloon_mutex); | |
139 | ||
140 | struct balloon_stats balloon_stats; | |
141 | EXPORT_SYMBOL_GPL(balloon_stats); | |
142 | ||
143 | /* We increase/decrease in batches which fit in a page */ | |
144 | static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)]; | |
145 | ||
146 | ||
147 | /* List of ballooned pages, threaded through the mem_map array. */ | |
148 | static LIST_HEAD(ballooned_pages); | |
149 | static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); | |
150 | ||
151 | /* Main work function, always executed in process context. */ | |
152 | static void balloon_process(struct work_struct *work); | |
153 | static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); | |
154 | ||
155 | /* When ballooning out (allocating memory to return to Xen) we don't really | |
156 | want the kernel to try too hard since that can trigger the oom killer. */ | |
157 | #define GFP_BALLOON \ | |
158 | (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) | |
159 | ||
160 | static void scrub_page(struct page *page) | |
161 | { | |
162 | #ifdef CONFIG_XEN_SCRUB_PAGES | |
163 | clear_highpage(page); | |
164 | #endif | |
165 | } | |
166 | ||
167 | /* balloon_append: add the given page to the balloon. */ | |
168 | static void __balloon_append(struct page *page) | |
169 | { | |
170 | /* Lowmem is re-populated first, so highmem pages go at list tail. */ | |
171 | if (PageHighMem(page)) { | |
172 | list_add_tail(&page->lru, &ballooned_pages); | |
173 | balloon_stats.balloon_high++; | |
174 | } else { | |
175 | list_add(&page->lru, &ballooned_pages); | |
176 | balloon_stats.balloon_low++; | |
177 | } | |
178 | wake_up(&balloon_wq); | |
179 | } | |
180 | ||
181 | static void balloon_append(struct page *page) | |
182 | { | |
183 | __balloon_append(page); | |
184 | } | |
185 | ||
186 | /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ | |
187 | static struct page *balloon_retrieve(bool require_lowmem) | |
188 | { | |
189 | struct page *page; | |
190 | ||
191 | if (list_empty(&ballooned_pages)) | |
192 | return NULL; | |
193 | ||
194 | page = list_entry(ballooned_pages.next, struct page, lru); | |
195 | if (require_lowmem && PageHighMem(page)) | |
196 | return NULL; | |
197 | list_del(&page->lru); | |
198 | ||
199 | if (PageHighMem(page)) | |
200 | balloon_stats.balloon_high--; | |
201 | else | |
202 | balloon_stats.balloon_low--; | |
203 | ||
204 | return page; | |
205 | } | |
206 | ||
207 | static struct page *balloon_next_page(struct page *page) | |
208 | { | |
209 | struct list_head *next = page->lru.next; | |
210 | if (next == &ballooned_pages) | |
211 | return NULL; | |
212 | return list_entry(next, struct page, lru); | |
213 | } | |
214 | ||
215 | static enum bp_state update_schedule(enum bp_state state) | |
216 | { | |
217 | if (state == BP_WAIT) | |
218 | return BP_WAIT; | |
219 | ||
220 | if (state == BP_ECANCELED) | |
221 | return BP_ECANCELED; | |
222 | ||
223 | if (state == BP_DONE) { | |
224 | balloon_stats.schedule_delay = 1; | |
225 | balloon_stats.retry_count = 1; | |
226 | return BP_DONE; | |
227 | } | |
228 | ||
229 | ++balloon_stats.retry_count; | |
230 | ||
231 | if (balloon_stats.max_retry_count != RETRY_UNLIMITED && | |
232 | balloon_stats.retry_count > balloon_stats.max_retry_count) { | |
233 | balloon_stats.schedule_delay = 1; | |
234 | balloon_stats.retry_count = 1; | |
235 | return BP_ECANCELED; | |
236 | } | |
237 | ||
238 | balloon_stats.schedule_delay <<= 1; | |
239 | ||
240 | if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay) | |
241 | balloon_stats.schedule_delay = balloon_stats.max_schedule_delay; | |
242 | ||
243 | return BP_EAGAIN; | |
244 | } | |
245 | ||
246 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG | |
247 | static void release_memory_resource(struct resource *resource) | |
248 | { | |
249 | if (!resource) | |
250 | return; | |
251 | ||
252 | /* | |
253 | * No need to reset region to identity mapped since we now | |
254 | * know that no I/O can be in this region | |
255 | */ | |
256 | release_resource(resource); | |
257 | kfree(resource); | |
258 | } | |
259 | ||
260 | /* | |
261 | * Host memory not allocated to dom0. We can use this range for hotplug-based | |
262 | * ballooning. | |
263 | * | |
264 | * It's a type-less resource. Setting IORESOURCE_MEM will make resource | |
265 | * management algorithms (arch_remove_reservations()) look into guest e820, | |
266 | * which we don't want. | |
267 | */ | |
268 | static struct resource hostmem_resource = { | |
269 | .name = "Host RAM", | |
270 | }; | |
271 | ||
272 | void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res) | |
273 | {} | |
274 | ||
275 | static struct resource *additional_memory_resource(phys_addr_t size) | |
276 | { | |
277 | struct resource *res, *res_hostmem; | |
278 | int ret = -ENOMEM; | |
279 | ||
280 | res = kzalloc(sizeof(*res), GFP_KERNEL); | |
281 | if (!res) | |
282 | return NULL; | |
283 | ||
284 | res->name = "System RAM"; | |
285 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | |
286 | ||
287 | res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL); | |
288 | if (res_hostmem) { | |
289 | /* Try to grab a range from hostmem */ | |
290 | res_hostmem->name = "Host memory"; | |
291 | ret = allocate_resource(&hostmem_resource, res_hostmem, | |
292 | size, 0, -1, | |
293 | PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); | |
294 | } | |
295 | ||
296 | if (!ret) { | |
297 | /* | |
298 | * Insert this resource into iomem. Because hostmem_resource | |
299 | * tracks portion of guest e820 marked as UNUSABLE noone else | |
300 | * should try to use it. | |
301 | */ | |
302 | res->start = res_hostmem->start; | |
303 | res->end = res_hostmem->end; | |
304 | ret = insert_resource(&iomem_resource, res); | |
305 | if (ret < 0) { | |
306 | pr_err("Can't insert iomem_resource [%llx - %llx]\n", | |
307 | res->start, res->end); | |
308 | release_memory_resource(res_hostmem); | |
309 | res_hostmem = NULL; | |
310 | res->start = res->end = 0; | |
311 | } | |
312 | } | |
313 | ||
314 | if (ret) { | |
315 | ret = allocate_resource(&iomem_resource, res, | |
316 | size, 0, -1, | |
317 | PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); | |
318 | if (ret < 0) { | |
319 | pr_err("Cannot allocate new System RAM resource\n"); | |
320 | kfree(res); | |
321 | return NULL; | |
322 | } | |
323 | } | |
324 | ||
325 | #ifdef CONFIG_SPARSEMEM | |
326 | { | |
327 | unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT); | |
328 | unsigned long pfn = res->start >> PAGE_SHIFT; | |
329 | ||
330 | if (pfn > limit) { | |
331 | pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", | |
332 | pfn, limit); | |
333 | release_memory_resource(res); | |
334 | release_memory_resource(res_hostmem); | |
335 | return NULL; | |
336 | } | |
337 | } | |
338 | #endif | |
339 | ||
340 | return res; | |
341 | } | |
342 | ||
343 | static enum bp_state reserve_additional_memory(void) | |
344 | { | |
345 | long credit; | |
346 | struct resource *resource; | |
347 | int nid, rc; | |
348 | unsigned long balloon_hotplug; | |
349 | ||
350 | credit = balloon_stats.target_pages + balloon_stats.target_unpopulated | |
351 | - balloon_stats.total_pages; | |
352 | ||
353 | /* | |
354 | * Already hotplugged enough pages? Wait for them to be | |
355 | * onlined. | |
356 | */ | |
357 | if (credit <= 0) | |
358 | return BP_WAIT; | |
359 | ||
360 | balloon_hotplug = round_up(credit, PAGES_PER_SECTION); | |
361 | ||
362 | resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE); | |
363 | if (!resource) | |
364 | goto err; | |
365 | ||
366 | nid = memory_add_physaddr_to_nid(resource->start); | |
367 | ||
368 | #ifdef CONFIG_XEN_HAVE_PVMMU | |
369 | /* | |
370 | * We don't support PV MMU when Linux and Xen is using | |
371 | * different page granularity. | |
372 | */ | |
373 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | |
374 | ||
375 | /* | |
376 | * add_memory() will build page tables for the new memory so | |
377 | * the p2m must contain invalid entries so the correct | |
378 | * non-present PTEs will be written. | |
379 | * | |
380 | * If a failure occurs, the original (identity) p2m entries | |
381 | * are not restored since this region is now known not to | |
382 | * conflict with any devices. | |
383 | */ | |
384 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | |
385 | unsigned long pfn, i; | |
386 | ||
387 | pfn = PFN_DOWN(resource->start); | |
388 | for (i = 0; i < balloon_hotplug; i++) { | |
389 | if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { | |
390 | pr_warn("set_phys_to_machine() failed, no memory added\n"); | |
391 | goto err; | |
392 | } | |
393 | } | |
394 | } | |
395 | #endif | |
396 | ||
397 | /* | |
398 | * add_memory_resource() will call online_pages() which in its turn | |
399 | * will call xen_online_page() callback causing deadlock if we don't | |
400 | * release balloon_mutex here. Unlocking here is safe because the | |
401 | * callers drop the mutex before trying again. | |
402 | */ | |
403 | mutex_unlock(&balloon_mutex); | |
404 | rc = add_memory_resource(nid, resource, memhp_auto_online); | |
405 | mutex_lock(&balloon_mutex); | |
406 | ||
407 | if (rc) { | |
408 | pr_warn("Cannot add additional memory (%i)\n", rc); | |
409 | goto err; | |
410 | } | |
411 | ||
412 | balloon_stats.total_pages += balloon_hotplug; | |
413 | ||
414 | return BP_WAIT; | |
415 | err: | |
416 | release_memory_resource(resource); | |
417 | return BP_ECANCELED; | |
418 | } | |
419 | ||
420 | static void xen_online_page(struct page *page) | |
421 | { | |
422 | __online_page_set_limits(page); | |
423 | ||
424 | mutex_lock(&balloon_mutex); | |
425 | ||
426 | __balloon_append(page); | |
427 | ||
428 | mutex_unlock(&balloon_mutex); | |
429 | } | |
430 | ||
431 | static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) | |
432 | { | |
433 | if (val == MEM_ONLINE) | |
434 | schedule_delayed_work(&balloon_worker, 0); | |
435 | ||
436 | return NOTIFY_OK; | |
437 | } | |
438 | ||
439 | static struct notifier_block xen_memory_nb = { | |
440 | .notifier_call = xen_memory_notifier, | |
441 | .priority = 0 | |
442 | }; | |
443 | #else | |
444 | static enum bp_state reserve_additional_memory(void) | |
445 | { | |
446 | balloon_stats.target_pages = balloon_stats.current_pages; | |
447 | return BP_ECANCELED; | |
448 | } | |
449 | #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ | |
450 | ||
451 | static long current_credit(void) | |
452 | { | |
453 | return balloon_stats.target_pages - balloon_stats.current_pages; | |
454 | } | |
455 | ||
456 | static bool balloon_is_inflated(void) | |
457 | { | |
458 | return balloon_stats.balloon_low || balloon_stats.balloon_high; | |
459 | } | |
460 | ||
461 | static enum bp_state increase_reservation(unsigned long nr_pages) | |
462 | { | |
463 | int rc; | |
464 | unsigned long i; | |
465 | struct page *page; | |
466 | struct xen_memory_reservation reservation = { | |
467 | .address_bits = 0, | |
468 | .extent_order = EXTENT_ORDER, | |
469 | .domid = DOMID_SELF | |
470 | }; | |
471 | ||
472 | if (nr_pages > ARRAY_SIZE(frame_list)) | |
473 | nr_pages = ARRAY_SIZE(frame_list); | |
474 | ||
475 | page = list_first_entry_or_null(&ballooned_pages, struct page, lru); | |
476 | for (i = 0; i < nr_pages; i++) { | |
477 | if (!page) { | |
478 | nr_pages = i; | |
479 | break; | |
480 | } | |
481 | ||
482 | /* XENMEM_populate_physmap requires a PFN based on Xen | |
483 | * granularity. | |
484 | */ | |
485 | frame_list[i] = page_to_xen_pfn(page); | |
486 | page = balloon_next_page(page); | |
487 | } | |
488 | ||
489 | set_xen_guest_handle(reservation.extent_start, frame_list); | |
490 | reservation.nr_extents = nr_pages; | |
491 | rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); | |
492 | if (rc <= 0) | |
493 | return BP_EAGAIN; | |
494 | ||
495 | for (i = 0; i < rc; i++) { | |
496 | page = balloon_retrieve(false); | |
497 | BUG_ON(page == NULL); | |
498 | ||
499 | #ifdef CONFIG_XEN_HAVE_PVMMU | |
500 | /* | |
501 | * We don't support PV MMU when Linux and Xen is using | |
502 | * different page granularity. | |
503 | */ | |
504 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | |
505 | ||
506 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | |
507 | unsigned long pfn = page_to_pfn(page); | |
508 | ||
509 | set_phys_to_machine(pfn, frame_list[i]); | |
510 | ||
511 | /* Link back into the page tables if not highmem. */ | |
512 | if (!PageHighMem(page)) { | |
513 | int ret; | |
514 | ret = HYPERVISOR_update_va_mapping( | |
515 | (unsigned long)__va(pfn << PAGE_SHIFT), | |
516 | mfn_pte(frame_list[i], PAGE_KERNEL), | |
517 | 0); | |
518 | BUG_ON(ret); | |
519 | } | |
520 | } | |
521 | #endif | |
522 | ||
523 | /* Relinquish the page back to the allocator. */ | |
524 | free_reserved_page(page); | |
525 | } | |
526 | ||
527 | balloon_stats.current_pages += rc; | |
528 | ||
529 | return BP_DONE; | |
530 | } | |
531 | ||
532 | static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |
533 | { | |
534 | enum bp_state state = BP_DONE; | |
535 | unsigned long i; | |
536 | struct page *page, *tmp; | |
537 | int ret; | |
538 | struct xen_memory_reservation reservation = { | |
539 | .address_bits = 0, | |
540 | .extent_order = EXTENT_ORDER, | |
541 | .domid = DOMID_SELF | |
542 | }; | |
543 | LIST_HEAD(pages); | |
544 | ||
545 | if (nr_pages > ARRAY_SIZE(frame_list)) | |
546 | nr_pages = ARRAY_SIZE(frame_list); | |
547 | ||
548 | for (i = 0; i < nr_pages; i++) { | |
549 | page = alloc_page(gfp); | |
550 | if (page == NULL) { | |
551 | nr_pages = i; | |
552 | state = BP_EAGAIN; | |
553 | break; | |
554 | } | |
555 | adjust_managed_page_count(page, -1); | |
556 | scrub_page(page); | |
557 | list_add(&page->lru, &pages); | |
558 | } | |
559 | ||
560 | /* | |
561 | * Ensure that ballooned highmem pages don't have kmaps. | |
562 | * | |
563 | * Do this before changing the p2m as kmap_flush_unused() | |
564 | * reads PTEs to obtain pages (and hence needs the original | |
565 | * p2m entry). | |
566 | */ | |
567 | kmap_flush_unused(); | |
568 | ||
569 | /* | |
570 | * Setup the frame, update direct mapping, invalidate P2M, | |
571 | * and add to balloon. | |
572 | */ | |
573 | i = 0; | |
574 | list_for_each_entry_safe(page, tmp, &pages, lru) { | |
575 | /* XENMEM_decrease_reservation requires a GFN */ | |
576 | frame_list[i++] = xen_page_to_gfn(page); | |
577 | ||
578 | #ifdef CONFIG_XEN_HAVE_PVMMU | |
579 | /* | |
580 | * We don't support PV MMU when Linux and Xen is using | |
581 | * different page granularity. | |
582 | */ | |
583 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | |
584 | ||
585 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | |
586 | unsigned long pfn = page_to_pfn(page); | |
587 | ||
588 | if (!PageHighMem(page)) { | |
589 | ret = HYPERVISOR_update_va_mapping( | |
590 | (unsigned long)__va(pfn << PAGE_SHIFT), | |
591 | __pte_ma(0), 0); | |
592 | BUG_ON(ret); | |
593 | } | |
594 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | |
595 | } | |
596 | #endif | |
597 | list_del(&page->lru); | |
598 | ||
599 | balloon_append(page); | |
600 | } | |
601 | ||
602 | flush_tlb_all(); | |
603 | ||
604 | set_xen_guest_handle(reservation.extent_start, frame_list); | |
605 | reservation.nr_extents = nr_pages; | |
606 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | |
607 | BUG_ON(ret != nr_pages); | |
608 | ||
609 | balloon_stats.current_pages -= nr_pages; | |
610 | ||
611 | return state; | |
612 | } | |
613 | ||
614 | /* | |
615 | * As this is a work item it is guaranteed to run as a single instance only. | |
616 | * We may of course race updates of the target counts (which are protected | |
617 | * by the balloon lock), or with changes to the Xen hard limit, but we will | |
618 | * recover from these in time. | |
619 | */ | |
620 | static void balloon_process(struct work_struct *work) | |
621 | { | |
622 | enum bp_state state = BP_DONE; | |
623 | long credit; | |
624 | ||
625 | ||
626 | do { | |
627 | mutex_lock(&balloon_mutex); | |
628 | ||
629 | credit = current_credit(); | |
630 | ||
631 | if (credit > 0) { | |
632 | if (balloon_is_inflated()) | |
633 | state = increase_reservation(credit); | |
634 | else | |
635 | state = reserve_additional_memory(); | |
636 | } | |
637 | ||
638 | if (credit < 0) | |
639 | state = decrease_reservation(-credit, GFP_BALLOON); | |
640 | ||
641 | state = update_schedule(state); | |
642 | ||
643 | mutex_unlock(&balloon_mutex); | |
644 | ||
645 | cond_resched(); | |
646 | ||
647 | } while (credit && state == BP_DONE); | |
648 | ||
649 | /* Schedule more work if there is some still to be done. */ | |
650 | if (state == BP_EAGAIN) | |
651 | schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); | |
652 | } | |
653 | ||
654 | /* Resets the Xen limit, sets new target, and kicks off processing. */ | |
655 | void balloon_set_new_target(unsigned long target) | |
656 | { | |
657 | /* No need for lock. Not read-modify-write updates. */ | |
658 | balloon_stats.target_pages = target; | |
659 | schedule_delayed_work(&balloon_worker, 0); | |
660 | } | |
661 | EXPORT_SYMBOL_GPL(balloon_set_new_target); | |
662 | ||
663 | static int add_ballooned_pages(int nr_pages) | |
664 | { | |
665 | enum bp_state st; | |
666 | ||
667 | if (xen_hotplug_unpopulated) { | |
668 | st = reserve_additional_memory(); | |
669 | if (st != BP_ECANCELED) { | |
670 | mutex_unlock(&balloon_mutex); | |
671 | wait_event(balloon_wq, | |
672 | !list_empty(&ballooned_pages)); | |
673 | mutex_lock(&balloon_mutex); | |
674 | return 0; | |
675 | } | |
676 | } | |
677 | ||
678 | st = decrease_reservation(nr_pages, GFP_USER); | |
679 | if (st != BP_DONE) | |
680 | return -ENOMEM; | |
681 | ||
682 | return 0; | |
683 | } | |
684 | ||
685 | /** | |
686 | * alloc_xenballooned_pages - get pages that have been ballooned out | |
687 | * @nr_pages: Number of pages to get | |
688 | * @pages: pages returned | |
689 | * @return 0 on success, error otherwise | |
690 | */ | |
691 | int alloc_xenballooned_pages(int nr_pages, struct page **pages) | |
692 | { | |
693 | int pgno = 0; | |
694 | struct page *page; | |
695 | int ret; | |
696 | ||
697 | mutex_lock(&balloon_mutex); | |
698 | ||
699 | balloon_stats.target_unpopulated += nr_pages; | |
700 | ||
701 | while (pgno < nr_pages) { | |
702 | page = balloon_retrieve(true); | |
703 | if (page) { | |
704 | pages[pgno++] = page; | |
705 | #ifdef CONFIG_XEN_HAVE_PVMMU | |
706 | /* | |
707 | * We don't support PV MMU when Linux and Xen is using | |
708 | * different page granularity. | |
709 | */ | |
710 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | |
711 | ||
712 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | |
713 | ret = xen_alloc_p2m_entry(page_to_pfn(page)); | |
714 | if (ret < 0) | |
715 | goto out_undo; | |
716 | } | |
717 | #endif | |
718 | } else { | |
719 | ret = add_ballooned_pages(nr_pages - pgno); | |
720 | if (ret < 0) | |
721 | goto out_undo; | |
722 | } | |
723 | } | |
724 | mutex_unlock(&balloon_mutex); | |
725 | return 0; | |
726 | out_undo: | |
727 | mutex_unlock(&balloon_mutex); | |
728 | free_xenballooned_pages(pgno, pages); | |
729 | return ret; | |
730 | } | |
731 | EXPORT_SYMBOL(alloc_xenballooned_pages); | |
732 | ||
733 | /** | |
734 | * free_xenballooned_pages - return pages retrieved with get_ballooned_pages | |
735 | * @nr_pages: Number of pages | |
736 | * @pages: pages to return | |
737 | */ | |
738 | void free_xenballooned_pages(int nr_pages, struct page **pages) | |
739 | { | |
740 | int i; | |
741 | ||
742 | mutex_lock(&balloon_mutex); | |
743 | ||
744 | for (i = 0; i < nr_pages; i++) { | |
745 | if (pages[i]) | |
746 | balloon_append(pages[i]); | |
747 | } | |
748 | ||
749 | balloon_stats.target_unpopulated -= nr_pages; | |
750 | ||
751 | /* The balloon may be too large now. Shrink it if needed. */ | |
752 | if (current_credit()) | |
753 | schedule_delayed_work(&balloon_worker, 0); | |
754 | ||
755 | mutex_unlock(&balloon_mutex); | |
756 | } | |
757 | EXPORT_SYMBOL(free_xenballooned_pages); | |
758 | ||
759 | #ifdef CONFIG_XEN_PV | |
760 | static void __init balloon_add_region(unsigned long start_pfn, | |
761 | unsigned long pages) | |
762 | { | |
763 | unsigned long pfn, extra_pfn_end; | |
764 | struct page *page; | |
765 | ||
766 | /* | |
767 | * If the amount of usable memory has been limited (e.g., with | |
768 | * the 'mem' command line parameter), don't add pages beyond | |
769 | * this limit. | |
770 | */ | |
771 | extra_pfn_end = min(max_pfn, start_pfn + pages); | |
772 | ||
773 | for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { | |
774 | page = pfn_to_page(pfn); | |
775 | /* totalram_pages and totalhigh_pages do not | |
776 | include the boot-time balloon extension, so | |
777 | don't subtract from it. */ | |
778 | __balloon_append(page); | |
779 | } | |
780 | ||
781 | balloon_stats.total_pages += extra_pfn_end - start_pfn; | |
782 | } | |
783 | #endif | |
784 | ||
785 | static int __init balloon_init(void) | |
786 | { | |
787 | if (!xen_domain()) | |
788 | return -ENODEV; | |
789 | ||
790 | pr_info("Initialising balloon driver\n"); | |
791 | ||
792 | #ifdef CONFIG_XEN_PV | |
793 | balloon_stats.current_pages = xen_pv_domain() | |
794 | ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) | |
795 | : get_num_physpages(); | |
796 | #else | |
797 | balloon_stats.current_pages = get_num_physpages(); | |
798 | #endif | |
799 | balloon_stats.target_pages = balloon_stats.current_pages; | |
800 | balloon_stats.balloon_low = 0; | |
801 | balloon_stats.balloon_high = 0; | |
802 | balloon_stats.total_pages = balloon_stats.current_pages; | |
803 | ||
804 | balloon_stats.schedule_delay = 1; | |
805 | balloon_stats.max_schedule_delay = 32; | |
806 | balloon_stats.retry_count = 1; | |
807 | balloon_stats.max_retry_count = RETRY_UNLIMITED; | |
808 | ||
809 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG | |
810 | set_online_page_callback(&xen_online_page); | |
811 | register_memory_notifier(&xen_memory_nb); | |
812 | register_sysctl_table(xen_root); | |
813 | ||
814 | arch_xen_balloon_init(&hostmem_resource); | |
815 | #endif | |
816 | ||
817 | #ifdef CONFIG_XEN_PV | |
818 | { | |
819 | int i; | |
820 | ||
821 | /* | |
822 | * Initialize the balloon with pages from the extra memory | |
823 | * regions (see arch/x86/xen/setup.c). | |
824 | */ | |
825 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) | |
826 | if (xen_extra_mem[i].n_pfns) | |
827 | balloon_add_region(xen_extra_mem[i].start_pfn, | |
828 | xen_extra_mem[i].n_pfns); | |
829 | } | |
830 | #endif | |
831 | ||
832 | /* Init the xen-balloon driver. */ | |
833 | xen_balloon_init(); | |
834 | ||
835 | return 0; | |
836 | } | |
837 | subsys_initcall(balloon_init); |