]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/xen/balloon.c
Merge branches 'no-rebases', 'arch-avr32', 'arch-blackfin', 'arch-cris', 'arch-h8300...
[mirror_ubuntu-bionic-kernel.git] / drivers / xen / balloon.c
CommitLineData
1775826c 1/******************************************************************************
1775826c
JF
2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
3 *
4 * Copyright (c) 2003, B Dragovic
5 * Copyright (c) 2003-2004, M Williamson, K Fraser
6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
080e2be7
DK
7 * Copyright (c) 2010 Daniel Kiper
8 *
9 * Memory hotplug support was written by Daniel Kiper. Work on
10 * it was sponsored by Google under Google Summer of Code 2010
11 * program. Jeremy Fitzhardinge from Citrix was the mentor for
12 * this project.
1775826c
JF
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License version 2
16 * as published by the Free Software Foundation; or, when distributed
17 * separately from the Linux kernel or incorporated into other
18 * software packages, subject to the following license:
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a copy
21 * of this source file (the "Software"), to deal in the Software without
22 * restriction, including without limitation the rights to use, copy, modify,
23 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
24 * and to permit persons to whom the Software is furnished to do so, subject to
25 * the following conditions:
26 *
27 * The above copyright notice and this permission notice shall be included in
28 * all copies or substantial portions of the Software.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36 * IN THE SOFTWARE.
37 */
38
39#include <linux/kernel.h>
1775826c
JF
40#include <linux/sched.h>
41#include <linux/errno.h>
72ee5112 42#include <linux/module.h>
1775826c
JF
43#include <linux/mm.h>
44#include <linux/bootmem.h>
45#include <linux/pagemap.h>
46#include <linux/highmem.h>
47#include <linux/mutex.h>
1775826c 48#include <linux/list.h>
5a0e3ad6 49#include <linux/gfp.h>
080e2be7
DK
50#include <linux/notifier.h>
51#include <linux/memory.h>
52#include <linux/memory_hotplug.h>
1775826c 53
1775826c
JF
54#include <asm/page.h>
55#include <asm/pgalloc.h>
56#include <asm/pgtable.h>
1775826c
JF
57#include <asm/tlb.h>
58
ecbf29cd
JF
59#include <asm/xen/hypervisor.h>
60#include <asm/xen/hypercall.h>
1ccbf534
JF
61
62#include <xen/xen.h>
ecbf29cd 63#include <xen/interface/xen.h>
1775826c 64#include <xen/interface/memory.h>
803eb047 65#include <xen/balloon.h>
1775826c
JF
66#include <xen/features.h>
67#include <xen/page.h>
68
95d2ac4a
DK
69/*
70 * balloon_process() state:
71 *
72 * BP_DONE: done or nothing to do,
73 * BP_EAGAIN: error, go to sleep,
74 * BP_ECANCELED: error, balloon operation canceled.
75 */
1775826c 76
95d2ac4a
DK
77enum bp_state {
78 BP_DONE,
79 BP_EAGAIN,
80 BP_ECANCELED
1775826c
JF
81};
82
1775826c 83
1775826c 84static DEFINE_MUTEX(balloon_mutex);
1775826c 85
803eb047
DDG
86struct balloon_stats balloon_stats;
87EXPORT_SYMBOL_GPL(balloon_stats);
1775826c
JF
88
89/* We increase/decrease in batches which fit in a page */
965c0aaa 90static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
1775826c 91
1775826c 92#ifdef CONFIG_HIGHMEM
1775826c
JF
93#define inc_totalhigh_pages() (totalhigh_pages++)
94#define dec_totalhigh_pages() (totalhigh_pages--)
95#else
e882dc9c
RP
96#define inc_totalhigh_pages() do {} while (0)
97#define dec_totalhigh_pages() do {} while (0)
1775826c
JF
98#endif
99
100/* List of ballooned pages, threaded through the mem_map array. */
101static LIST_HEAD(ballooned_pages);
102
103/* Main work function, always executed in process context. */
104static void balloon_process(struct work_struct *work);
95170b2e 105static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
1775826c
JF
106
107/* When ballooning out (allocating memory to return to Xen) we don't really
108 want the kernel to try too hard since that can trigger the oom killer. */
109#define GFP_BALLOON \
110 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
111
112static void scrub_page(struct page *page)
113{
114#ifdef CONFIG_XEN_SCRUB_PAGES
26a3e991 115 clear_highpage(page);
1775826c
JF
116#endif
117}
118
119/* balloon_append: add the given page to the balloon. */
9be4d457 120static void __balloon_append(struct page *page)
1775826c
JF
121{
122 /* Lowmem is re-populated first, so highmem pages go at list tail. */
123 if (PageHighMem(page)) {
124 list_add_tail(&page->lru, &ballooned_pages);
125 balloon_stats.balloon_high++;
1775826c
JF
126 } else {
127 list_add(&page->lru, &ballooned_pages);
128 balloon_stats.balloon_low++;
129 }
9be4d457 130}
3d65c948 131
9be4d457
JF
132static void balloon_append(struct page *page)
133{
134 __balloon_append(page);
09ca132a
DK
135 if (PageHighMem(page))
136 dec_totalhigh_pages();
3d65c948 137 totalram_pages--;
1775826c
JF
138}
139
140/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
b6f30679 141static struct page *balloon_retrieve(bool prefer_highmem)
1775826c
JF
142{
143 struct page *page;
144
145 if (list_empty(&ballooned_pages))
146 return NULL;
147
b6f30679
KRW
148 if (prefer_highmem)
149 page = list_entry(ballooned_pages.prev, struct page, lru);
150 else
151 page = list_entry(ballooned_pages.next, struct page, lru);
1775826c
JF
152 list_del(&page->lru);
153
154 if (PageHighMem(page)) {
155 balloon_stats.balloon_high--;
156 inc_totalhigh_pages();
e882dc9c 157 } else
1775826c
JF
158 balloon_stats.balloon_low--;
159
3d65c948
GG
160 totalram_pages++;
161
1775826c
JF
162 return page;
163}
164
165static struct page *balloon_first_page(void)
166{
167 if (list_empty(&ballooned_pages))
168 return NULL;
169 return list_entry(ballooned_pages.next, struct page, lru);
170}
171
172static struct page *balloon_next_page(struct page *page)
173{
174 struct list_head *next = page->lru.next;
175 if (next == &ballooned_pages)
176 return NULL;
177 return list_entry(next, struct page, lru);
178}
179
95d2ac4a 180static enum bp_state update_schedule(enum bp_state state)
1775826c 181{
95d2ac4a
DK
182 if (state == BP_DONE) {
183 balloon_stats.schedule_delay = 1;
184 balloon_stats.retry_count = 1;
185 return BP_DONE;
186 }
187
95d2ac4a
DK
188 ++balloon_stats.retry_count;
189
190 if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
191 balloon_stats.retry_count > balloon_stats.max_retry_count) {
95d2ac4a
DK
192 balloon_stats.schedule_delay = 1;
193 balloon_stats.retry_count = 1;
194 return BP_ECANCELED;
195 }
196
197 balloon_stats.schedule_delay <<= 1;
198
199 if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
200 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
201
202 return BP_EAGAIN;
1775826c
JF
203}
204
080e2be7
DK
205#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
206static long current_credit(void)
207{
208 return balloon_stats.target_pages - balloon_stats.current_pages -
209 balloon_stats.hotplug_pages;
210}
211
212static bool balloon_is_inflated(void)
213{
214 if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
215 balloon_stats.balloon_hotplug)
216 return true;
217 else
218 return false;
219}
220
221/*
222 * reserve_additional_memory() adds memory region of size >= credit above
223 * max_pfn. New region is section aligned and size is modified to be multiple
224 * of section size. Those features allow optimal use of address space and
225 * establish proper alignment when this function is called first time after
226 * boot (last section not fully populated at boot time contains unused memory
227 * pages with PG_reserved bit not set; online_pages_range() does not allow page
228 * onlining in whole range if first onlined page does not have PG_reserved
229 * bit set). Real size of added memory is established at page onlining stage.
230 */
231
232static enum bp_state reserve_additional_memory(long credit)
233{
234 int nid, rc;
235 u64 hotplug_start_paddr;
236 unsigned long balloon_hotplug = credit;
237
238 hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn));
239 balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
240 nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
241
242 rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
243
244 if (rc) {
245 pr_info("xen_balloon: %s: add_memory() failed: %i\n", __func__, rc);
246 return BP_EAGAIN;
247 }
248
249 balloon_hotplug -= credit;
250
251 balloon_stats.hotplug_pages += credit;
252 balloon_stats.balloon_hotplug = balloon_hotplug;
253
254 return BP_DONE;
255}
256
257static void xen_online_page(struct page *page)
258{
259 __online_page_set_limits(page);
260
261 mutex_lock(&balloon_mutex);
262
263 __balloon_append(page);
264
265 if (balloon_stats.hotplug_pages)
266 --balloon_stats.hotplug_pages;
267 else
268 --balloon_stats.balloon_hotplug;
269
270 mutex_unlock(&balloon_mutex);
271}
272
273static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
274{
275 if (val == MEM_ONLINE)
276 schedule_delayed_work(&balloon_worker, 0);
277
278 return NOTIFY_OK;
279}
280
281static struct notifier_block xen_memory_nb = {
282 .notifier_call = xen_memory_notifier,
283 .priority = 0
284};
285#else
83be7e52 286static long current_credit(void)
1775826c 287{
bc2c0303 288 unsigned long target = balloon_stats.target_pages;
1775826c
JF
289
290 target = min(target,
291 balloon_stats.current_pages +
292 balloon_stats.balloon_low +
293 balloon_stats.balloon_high);
294
83be7e52 295 return target - balloon_stats.current_pages;
1775826c
JF
296}
297
080e2be7
DK
298static bool balloon_is_inflated(void)
299{
300 if (balloon_stats.balloon_low || balloon_stats.balloon_high)
301 return true;
302 else
303 return false;
304}
305
306static enum bp_state reserve_additional_memory(long credit)
307{
308 balloon_stats.target_pages = balloon_stats.current_pages;
309 return BP_DONE;
310}
311#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
312
95d2ac4a 313static enum bp_state increase_reservation(unsigned long nr_pages)
1775826c 314{
95d2ac4a 315 int rc;
2f70e0ac 316 unsigned long pfn, i;
1775826c 317 struct page *page;
1775826c
JF
318 struct xen_memory_reservation reservation = {
319 .address_bits = 0,
320 .extent_order = 0,
321 .domid = DOMID_SELF
322 };
323
080e2be7
DK
324#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
325 if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
326 nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
327 balloon_stats.hotplug_pages += nr_pages;
328 balloon_stats.balloon_hotplug -= nr_pages;
329 return BP_DONE;
330 }
331#endif
332
1775826c
JF
333 if (nr_pages > ARRAY_SIZE(frame_list))
334 nr_pages = ARRAY_SIZE(frame_list);
335
1775826c
JF
336 page = balloon_first_page();
337 for (i = 0; i < nr_pages; i++) {
95d2ac4a
DK
338 if (!page) {
339 nr_pages = i;
340 break;
341 }
a419aef8 342 frame_list[i] = page_to_pfn(page);
1775826c
JF
343 page = balloon_next_page(page);
344 }
345
a90971eb 346 set_xen_guest_handle(reservation.extent_start, frame_list);
fde28e8f
JF
347 reservation.nr_extents = nr_pages;
348 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
40095de1 349 if (rc <= 0)
95d2ac4a 350 return BP_EAGAIN;
1775826c 351
bc2c0303 352 for (i = 0; i < rc; i++) {
b6f30679 353 page = balloon_retrieve(false);
1775826c
JF
354 BUG_ON(page == NULL);
355
356 pfn = page_to_pfn(page);
357 BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
358 phys_to_machine_mapping_valid(pfn));
359
360 set_phys_to_machine(pfn, frame_list[i]);
361
362 /* Link back into the page tables if not highmem. */
4dfe22f5 363 if (xen_pv_domain() && !PageHighMem(page)) {
1775826c
JF
364 int ret;
365 ret = HYPERVISOR_update_va_mapping(
366 (unsigned long)__va(pfn << PAGE_SHIFT),
367 mfn_pte(frame_list[i], PAGE_KERNEL),
368 0);
369 BUG_ON(ret);
370 }
371
372 /* Relinquish the page back to the allocator. */
373 ClearPageReserved(page);
374 init_page_count(page);
375 __free_page(page);
376 }
377
bc2c0303 378 balloon_stats.current_pages += rc;
1775826c 379
95d2ac4a 380 return BP_DONE;
1775826c
JF
381}
382
b6f30679 383static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
1775826c 384{
95d2ac4a 385 enum bp_state state = BP_DONE;
2f70e0ac 386 unsigned long pfn, i;
1775826c 387 struct page *page;
1775826c
JF
388 int ret;
389 struct xen_memory_reservation reservation = {
390 .address_bits = 0,
391 .extent_order = 0,
392 .domid = DOMID_SELF
393 };
394
080e2be7
DK
395#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
396 if (balloon_stats.hotplug_pages) {
397 nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
398 balloon_stats.hotplug_pages -= nr_pages;
399 balloon_stats.balloon_hotplug += nr_pages;
400 return BP_DONE;
401 }
402#endif
403
1775826c
JF
404 if (nr_pages > ARRAY_SIZE(frame_list))
405 nr_pages = ARRAY_SIZE(frame_list);
406
407 for (i = 0; i < nr_pages; i++) {
b6f30679 408 if ((page = alloc_page(gfp)) == NULL) {
1775826c 409 nr_pages = i;
95d2ac4a 410 state = BP_EAGAIN;
1775826c
JF
411 break;
412 }
413
414 pfn = page_to_pfn(page);
415 frame_list[i] = pfn_to_mfn(pfn);
416
417 scrub_page(page);
1058a75f 418
4dfe22f5 419 if (xen_pv_domain() && !PageHighMem(page)) {
ff4ce8c3
IC
420 ret = HYPERVISOR_update_va_mapping(
421 (unsigned long)__va(pfn << PAGE_SHIFT),
422 __pte_ma(0), 0);
423 BUG_ON(ret);
e882dc9c 424 }
ff4ce8c3 425
1775826c
JF
426 }
427
428 /* Ensure that ballooned highmem pages don't have kmaps. */
429 kmap_flush_unused();
430 flush_tlb_all();
431
1775826c
JF
432 /* No more mappings: invalidate P2M and add to balloon. */
433 for (i = 0; i < nr_pages; i++) {
434 pfn = mfn_to_pfn(frame_list[i]);
6eaa412f 435 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
1775826c
JF
436 balloon_append(pfn_to_page(pfn));
437 }
438
a90971eb 439 set_xen_guest_handle(reservation.extent_start, frame_list);
1775826c
JF
440 reservation.nr_extents = nr_pages;
441 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
442 BUG_ON(ret != nr_pages);
443
444 balloon_stats.current_pages -= nr_pages;
1775826c 445
95d2ac4a 446 return state;
1775826c
JF
447}
448
449/*
450 * We avoid multiple worker processes conflicting via the balloon mutex.
451 * We may of course race updates of the target counts (which are protected
452 * by the balloon lock), or with changes to the Xen hard limit, but we will
453 * recover from these in time.
454 */
455static void balloon_process(struct work_struct *work)
456{
95d2ac4a 457 enum bp_state state = BP_DONE;
1775826c
JF
458 long credit;
459
460 mutex_lock(&balloon_mutex);
461
462 do {
83be7e52 463 credit = current_credit();
95d2ac4a 464
080e2be7
DK
465 if (credit > 0) {
466 if (balloon_is_inflated())
467 state = increase_reservation(credit);
468 else
469 state = reserve_additional_memory(credit);
470 }
95d2ac4a 471
1775826c 472 if (credit < 0)
b6f30679 473 state = decrease_reservation(-credit, GFP_BALLOON);
95d2ac4a
DK
474
475 state = update_schedule(state);
1775826c
JF
476
477#ifndef CONFIG_PREEMPT
478 if (need_resched())
479 schedule();
480#endif
95d2ac4a 481 } while (credit && state == BP_DONE);
1775826c
JF
482
483 /* Schedule more work if there is some still to be done. */
95d2ac4a
DK
484 if (state == BP_EAGAIN)
485 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
1775826c
JF
486
487 mutex_unlock(&balloon_mutex);
488}
489
490/* Resets the Xen limit, sets new target, and kicks off processing. */
803eb047 491void balloon_set_new_target(unsigned long target)
1775826c
JF
492{
493 /* No need for lock. Not read-modify-write updates. */
1775826c 494 balloon_stats.target_pages = target;
95170b2e 495 schedule_delayed_work(&balloon_worker, 0);
1775826c 496}
803eb047 497EXPORT_SYMBOL_GPL(balloon_set_new_target);
1775826c 498
b6f30679
KRW
499/**
500 * alloc_xenballooned_pages - get pages that have been ballooned out
501 * @nr_pages: Number of pages to get
502 * @pages: pages returned
72e9cf2a 503 * @highmem: allow highmem pages
b6f30679
KRW
504 * @return 0 on success, error otherwise
505 */
693394b8 506int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
1775826c 507{
b6f30679 508 int pgno = 0;
e882dc9c 509 struct page *page;
b6f30679
KRW
510 mutex_lock(&balloon_mutex);
511 while (pgno < nr_pages) {
693394b8 512 page = balloon_retrieve(highmem);
72e9cf2a 513 if (page && (highmem || !PageHighMem(page))) {
b6f30679
KRW
514 pages[pgno++] = page;
515 } else {
516 enum bp_state st;
693394b8
SS
517 if (page)
518 balloon_append(page);
519 st = decrease_reservation(nr_pages - pgno,
520 highmem ? GFP_HIGHUSER : GFP_USER);
b6f30679
KRW
521 if (st != BP_DONE)
522 goto out_undo;
523 }
1775826c 524 }
b6f30679
KRW
525 mutex_unlock(&balloon_mutex);
526 return 0;
527 out_undo:
528 while (pgno)
529 balloon_append(pages[--pgno]);
530 /* Free the memory back to the kernel soon */
531 schedule_delayed_work(&balloon_worker, 0);
532 mutex_unlock(&balloon_mutex);
533 return -ENOMEM;
1775826c 534}
b6f30679 535EXPORT_SYMBOL(alloc_xenballooned_pages);
1775826c 536
b6f30679
KRW
537/**
538 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
539 * @nr_pages: Number of pages
540 * @pages: pages to return
541 */
e882dc9c 542void free_xenballooned_pages(int nr_pages, struct page **pages)
1775826c 543{
b6f30679 544 int i;
1775826c 545
b6f30679 546 mutex_lock(&balloon_mutex);
1775826c 547
b6f30679
KRW
548 for (i = 0; i < nr_pages; i++) {
549 if (pages[i])
550 balloon_append(pages[i]);
551 }
552
553 /* The balloon may be too large now. Shrink it if needed. */
83be7e52 554 if (current_credit())
b6f30679 555 schedule_delayed_work(&balloon_worker, 0);
1775826c 556
b6f30679
KRW
557 mutex_unlock(&balloon_mutex);
558}
559EXPORT_SYMBOL(free_xenballooned_pages);
1775826c 560
8b5d44a5
DV
561static void __init balloon_add_region(unsigned long start_pfn,
562 unsigned long pages)
1775826c 563{
4dfe22f5 564 unsigned long pfn, extra_pfn_end;
1775826c
JF
565 struct page *page;
566
8b5d44a5
DV
567 /*
568 * If the amount of usable memory has been limited (e.g., with
569 * the 'mem' command line parameter), don't add pages beyond
570 * this limit.
571 */
572 extra_pfn_end = min(max_pfn, start_pfn + pages);
573
574 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
575 page = pfn_to_page(pfn);
576 /* totalram_pages and totalhigh_pages do not
577 include the boot-time balloon extension, so
578 don't subtract from it. */
579 __balloon_append(page);
580 }
581}
582
583static int __init balloon_init(void)
584{
585 int i;
586
53d5522c 587 if (!xen_domain())
1775826c
JF
588 return -ENODEV;
589
803eb047 590 pr_info("xen/balloon: Initialising balloon driver.\n");
1775826c 591
aa24411b
DV
592 balloon_stats.current_pages = xen_pv_domain()
593 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
594 : max_pfn;
1775826c
JF
595 balloon_stats.target_pages = balloon_stats.current_pages;
596 balloon_stats.balloon_low = 0;
597 balloon_stats.balloon_high = 0;
1775826c 598
95d2ac4a
DK
599 balloon_stats.schedule_delay = 1;
600 balloon_stats.max_schedule_delay = 32;
601 balloon_stats.retry_count = 1;
40095de1 602 balloon_stats.max_retry_count = RETRY_UNLIMITED;
1775826c 603
080e2be7
DK
604#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
605 balloon_stats.hotplug_pages = 0;
606 balloon_stats.balloon_hotplug = 0;
607
608 set_online_page_callback(&xen_online_page);
609 register_memory_notifier(&xen_memory_nb);
610#endif
611
2a4c92fa 612 /*
b1cbf9b1 613 * Initialize the balloon with pages from the extra memory
8b5d44a5 614 * regions (see arch/x86/xen/setup.c).
2a4c92fa 615 */
8b5d44a5
DV
616 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
617 if (xen_extra_mem[i].size)
618 balloon_add_region(PFN_UP(xen_extra_mem[i].start),
619 PFN_DOWN(xen_extra_mem[i].size));
1775826c 620
1775826c
JF
621 return 0;
622}
623
624subsys_initcall(balloon_init);
625
1775826c 626MODULE_LICENSE("GPL");