]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/balloon_compaction.c
mm/balloon_compaction: remove balloon mapping and flag AS_BALLOON_MAP
[mirror_ubuntu-zesty-kernel.git] / mm / balloon_compaction.c
CommitLineData
18468d93
RA
1/*
2 * mm/balloon_compaction.c
3 *
4 * Common interface for making balloon pages movable by compaction.
5 *
6 * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
7 */
8#include <linux/mm.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/balloon_compaction.h>
12
18468d93
RA
13/*
14 * balloon_page_enqueue - allocates a new page and inserts it into the balloon
15 * page list.
16 * @b_dev_info: balloon device decriptor where we will insert a new page to
17 *
18 * Driver must call it to properly allocate a new enlisted balloon page
19 * before definetively removing it from the guest system.
20 * This function returns the page address for the recently enqueued page or
21 * NULL in the case we fail to allocate a new page this turn.
22 */
23struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
24{
25 unsigned long flags;
26 struct page *page = alloc_page(balloon_mapping_gfp_mask() |
27 __GFP_NOMEMALLOC | __GFP_NORETRY);
28 if (!page)
29 return NULL;
30
31 /*
32 * Block others from accessing the 'page' when we get around to
33 * establishing additional references. We should be the only one
34 * holding a reference to the 'page' at this point.
35 */
36 BUG_ON(!trylock_page(page));
37 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
9d1ba805 38 balloon_page_insert(b_dev_info, page);
18468d93
RA
39 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
40 unlock_page(page);
41 return page;
42}
43EXPORT_SYMBOL_GPL(balloon_page_enqueue);
44
45/*
46 * balloon_page_dequeue - removes a page from balloon's page list and returns
47 * the its address to allow the driver release the page.
48 * @b_dev_info: balloon device decriptor where we will grab a page from.
49 *
50 * Driver must call it to properly de-allocate a previous enlisted balloon page
51 * before definetively releasing it back to the guest system.
52 * This function returns the page address for the recently dequeued page or
53 * NULL in the case we find balloon's page list temporarily empty due to
54 * compaction isolated pages.
55 */
56struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
57{
58 struct page *page, *tmp;
59 unsigned long flags;
60 bool dequeued_page;
61
62 dequeued_page = false;
63 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
64 /*
65 * Block others from accessing the 'page' while we get around
66 * establishing additional references and preparing the 'page'
67 * to be released by the balloon driver.
68 */
69 if (trylock_page(page)) {
d6d86c0a
KK
70 if (!PagePrivate(page)) {
71 /* raced with isolation */
72 unlock_page(page);
73 continue;
74 }
18468d93 75 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
18468d93
RA
76 balloon_page_delete(page);
77 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
78 unlock_page(page);
79 dequeued_page = true;
80 break;
81 }
82 }
83
84 if (!dequeued_page) {
85 /*
86 * If we are unable to dequeue a balloon page because the page
87 * list is empty and there is no isolated pages, then something
88 * went out of track and some balloon pages are lost.
89 * BUG() here, otherwise the balloon driver may get stuck into
90 * an infinite loop while attempting to release all its pages.
91 */
92 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
93 if (unlikely(list_empty(&b_dev_info->pages) &&
94 !b_dev_info->isolated_pages))
95 BUG();
96 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
97 page = NULL;
98 }
99 return page;
100}
101EXPORT_SYMBOL_GPL(balloon_page_dequeue);
102
103#ifdef CONFIG_BALLOON_COMPACTION
18468d93
RA
104
105static inline void __isolate_balloon_page(struct page *page)
106{
9d1ba805 107 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
18468d93 108 unsigned long flags;
d6d86c0a 109
18468d93 110 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
d6d86c0a 111 ClearPagePrivate(page);
18468d93
RA
112 list_del(&page->lru);
113 b_dev_info->isolated_pages++;
114 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
115}
116
117static inline void __putback_balloon_page(struct page *page)
118{
9d1ba805 119 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
18468d93 120 unsigned long flags;
d6d86c0a 121
18468d93 122 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
d6d86c0a 123 SetPagePrivate(page);
18468d93
RA
124 list_add(&page->lru, &b_dev_info->pages);
125 b_dev_info->isolated_pages--;
126 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
127}
128
18468d93
RA
129/* __isolate_lru_page() counterpart for a ballooned page */
130bool balloon_page_isolate(struct page *page)
131{
132 /*
133 * Avoid burning cycles with pages that are yet under __free_pages(),
134 * or just got freed under us.
135 *
136 * In case we 'win' a race for a balloon page being freed under us and
137 * raise its refcount preventing __free_pages() from doing its job
138 * the put_page() at the end of this block will take care of
139 * release this page, thus avoiding a nasty leakage.
140 */
141 if (likely(get_page_unless_zero(page))) {
142 /*
143 * As balloon pages are not isolated from LRU lists, concurrent
144 * compaction threads can race against page migration functions
145 * as well as race against the balloon driver releasing a page.
146 *
147 * In order to avoid having an already isolated balloon page
148 * being (wrongly) re-isolated while it is under migration,
149 * or to avoid attempting to isolate pages being released by
150 * the balloon driver, lets be sure we have the page lock
151 * before proceeding with the balloon page isolation steps.
152 */
153 if (likely(trylock_page(page))) {
154 /*
d6d86c0a 155 * A ballooned page, by default, has PagePrivate set.
18468d93 156 * Prevent concurrent compaction threads from isolating
d6d86c0a 157 * an already isolated balloon page by clearing it.
18468d93 158 */
d6d86c0a 159 if (balloon_page_movable(page)) {
18468d93
RA
160 __isolate_balloon_page(page);
161 unlock_page(page);
162 return true;
163 }
164 unlock_page(page);
165 }
166 put_page(page);
167 }
168 return false;
169}
170
171/* putback_lru_page() counterpart for a ballooned page */
172void balloon_page_putback(struct page *page)
173{
174 /*
175 * 'lock_page()' stabilizes the page and prevents races against
176 * concurrent isolation threads attempting to re-isolate it.
177 */
178 lock_page(page);
179
180 if (__is_movable_balloon_page(page)) {
181 __putback_balloon_page(page);
182 /* drop the extra ref count taken for page isolation */
183 put_page(page);
184 } else {
185 WARN_ON(1);
f0b791a3 186 dump_page(page, "not movable balloon page");
18468d93
RA
187 }
188 unlock_page(page);
189}
190
191/* move_to_new_page() counterpart for a ballooned page */
192int balloon_page_migrate(struct page *newpage,
193 struct page *page, enum migrate_mode mode)
194{
9d1ba805 195 struct balloon_dev_info *balloon = balloon_page_device(page);
18468d93
RA
196 int rc = -EAGAIN;
197
198 /*
199 * Block others from accessing the 'newpage' when we get around to
200 * establishing additional references. We should be the only one
201 * holding a reference to the 'newpage' at this point.
202 */
203 BUG_ON(!trylock_page(newpage));
204
205 if (WARN_ON(!__is_movable_balloon_page(page))) {
f0b791a3 206 dump_page(page, "not movable balloon page");
18468d93
RA
207 unlock_page(newpage);
208 return rc;
209 }
210
9d1ba805
KK
211 if (balloon && balloon->migratepage)
212 rc = balloon->migratepage(balloon, newpage, page, mode);
18468d93
RA
213
214 unlock_page(newpage);
215 return rc;
216}
217#endif /* CONFIG_BALLOON_COMPACTION */