]> git.proxmox.com Git - mirror_qemu.git/blob - tests/libqos/malloc.c
Merge remote-tracking branch 'remotes/qmp-unstable/queue/qmp' into staging
[mirror_qemu.git] / tests / libqos / malloc.c
1 /*
2 * libqos malloc support
3 *
4 * Copyright (c) 2014
5 *
6 * Author:
7 * John Snow <jsnow@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13 #include "libqos/malloc.h"
14 #include "qemu-common.h"
15 #include <stdio.h>
16 #include <inttypes.h>
17 #include <glib.h>
18
19 typedef QTAILQ_HEAD(MemList, MemBlock) MemList;
20
21 typedef struct MemBlock {
22 QTAILQ_ENTRY(MemBlock) MLIST_ENTNAME;
23 uint64_t size;
24 uint64_t addr;
25 } MemBlock;
26
27 struct QGuestAllocator {
28 QAllocOpts opts;
29 uint64_t start;
30 uint64_t end;
31 uint32_t page_size;
32
33 MemList used;
34 MemList free;
35 };
36
37 #define DEFAULT_PAGE_SIZE 4096
38
39 static void mlist_delete(MemList *list, MemBlock *node)
40 {
41 g_assert(list && node);
42 QTAILQ_REMOVE(list, node, MLIST_ENTNAME);
43 g_free(node);
44 }
45
46 static MemBlock *mlist_find_key(MemList *head, uint64_t addr)
47 {
48 MemBlock *node;
49 QTAILQ_FOREACH(node, head, MLIST_ENTNAME) {
50 if (node->addr == addr) {
51 return node;
52 }
53 }
54 return NULL;
55 }
56
57 static MemBlock *mlist_find_space(MemList *head, uint64_t size)
58 {
59 MemBlock *node;
60
61 QTAILQ_FOREACH(node, head, MLIST_ENTNAME) {
62 if (node->size >= size) {
63 return node;
64 }
65 }
66 return NULL;
67 }
68
69 static MemBlock *mlist_sort_insert(MemList *head, MemBlock *insr)
70 {
71 MemBlock *node;
72 g_assert(head && insr);
73
74 QTAILQ_FOREACH(node, head, MLIST_ENTNAME) {
75 if (insr->addr < node->addr) {
76 QTAILQ_INSERT_BEFORE(node, insr, MLIST_ENTNAME);
77 return insr;
78 }
79 }
80
81 QTAILQ_INSERT_TAIL(head, insr, MLIST_ENTNAME);
82 return insr;
83 }
84
85 static inline uint64_t mlist_boundary(MemBlock *node)
86 {
87 return node->size + node->addr;
88 }
89
90 static MemBlock *mlist_join(MemList *head, MemBlock *left, MemBlock *right)
91 {
92 g_assert(head && left && right);
93
94 left->size += right->size;
95 mlist_delete(head, right);
96 return left;
97 }
98
99 static void mlist_coalesce(MemList *head, MemBlock *node)
100 {
101 g_assert(node);
102 MemBlock *left;
103 MemBlock *right;
104 char merge;
105
106 do {
107 merge = 0;
108 left = QTAILQ_PREV(node, MemList, MLIST_ENTNAME);
109 right = QTAILQ_NEXT(node, MLIST_ENTNAME);
110
111 /* clowns to the left of me */
112 if (left && mlist_boundary(left) == node->addr) {
113 node = mlist_join(head, left, node);
114 merge = 1;
115 }
116
117 /* jokers to the right */
118 if (right && mlist_boundary(node) == right->addr) {
119 node = mlist_join(head, node, right);
120 merge = 1;
121 }
122
123 } while (merge);
124 }
125
126 static MemBlock *mlist_new(uint64_t addr, uint64_t size)
127 {
128 MemBlock *block;
129
130 if (!size) {
131 return NULL;
132 }
133 block = g_malloc0(sizeof(MemBlock));
134
135 block->addr = addr;
136 block->size = size;
137
138 return block;
139 }
140
141 static uint64_t mlist_fulfill(QGuestAllocator *s, MemBlock *freenode,
142 uint64_t size)
143 {
144 uint64_t addr;
145 MemBlock *usednode;
146
147 g_assert(freenode);
148 g_assert_cmpint(freenode->size, >=, size);
149
150 addr = freenode->addr;
151 if (freenode->size == size) {
152 /* re-use this freenode as our used node */
153 QTAILQ_REMOVE(&s->free, freenode, MLIST_ENTNAME);
154 usednode = freenode;
155 } else {
156 /* adjust the free node and create a new used node */
157 freenode->addr += size;
158 freenode->size -= size;
159 usednode = mlist_new(addr, size);
160 }
161
162 mlist_sort_insert(&s->used, usednode);
163 return addr;
164 }
165
166 /* To assert the correctness of the list.
167 * Used only if ALLOC_PARANOID is set. */
168 static void mlist_check(QGuestAllocator *s)
169 {
170 MemBlock *node;
171 uint64_t addr = s->start > 0 ? s->start - 1 : 0;
172 uint64_t next = s->start;
173
174 QTAILQ_FOREACH(node, &s->free, MLIST_ENTNAME) {
175 g_assert_cmpint(node->addr, >, addr);
176 g_assert_cmpint(node->addr, >=, next);
177 addr = node->addr;
178 next = node->addr + node->size;
179 }
180
181 addr = s->start > 0 ? s->start - 1 : 0;
182 next = s->start;
183 QTAILQ_FOREACH(node, &s->used, MLIST_ENTNAME) {
184 g_assert_cmpint(node->addr, >, addr);
185 g_assert_cmpint(node->addr, >=, next);
186 addr = node->addr;
187 next = node->addr + node->size;
188 }
189 }
190
191 static uint64_t mlist_alloc(QGuestAllocator *s, uint64_t size)
192 {
193 MemBlock *node;
194
195 node = mlist_find_space(&s->free, size);
196 if (!node) {
197 fprintf(stderr, "Out of guest memory.\n");
198 g_assert_not_reached();
199 }
200 return mlist_fulfill(s, node, size);
201 }
202
203 static void mlist_free(QGuestAllocator *s, uint64_t addr)
204 {
205 MemBlock *node;
206
207 if (addr == 0) {
208 return;
209 }
210
211 node = mlist_find_key(&s->used, addr);
212 if (!node) {
213 fprintf(stderr, "Error: no record found for an allocation at "
214 "0x%016" PRIx64 ".\n",
215 addr);
216 g_assert_not_reached();
217 }
218
219 /* Rip it out of the used list and re-insert back into the free list. */
220 QTAILQ_REMOVE(&s->used, node, MLIST_ENTNAME);
221 mlist_sort_insert(&s->free, node);
222 mlist_coalesce(&s->free, node);
223 }
224
225 /*
226 * Mostly for valgrind happiness, but it does offer
227 * a chokepoint for debugging guest memory leaks, too.
228 */
229 void alloc_uninit(QGuestAllocator *allocator)
230 {
231 MemBlock *node;
232 MemBlock *tmp;
233 QAllocOpts mask;
234
235 /* Check for guest leaks, and destroy the list. */
236 QTAILQ_FOREACH_SAFE(node, &allocator->used, MLIST_ENTNAME, tmp) {
237 if (allocator->opts & (ALLOC_LEAK_WARN | ALLOC_LEAK_ASSERT)) {
238 fprintf(stderr, "guest malloc leak @ 0x%016" PRIx64 "; "
239 "size 0x%016" PRIx64 ".\n",
240 node->addr, node->size);
241 }
242 if (allocator->opts & (ALLOC_LEAK_ASSERT)) {
243 g_assert_not_reached();
244 }
245 g_free(node);
246 }
247
248 /* If we have previously asserted that there are no leaks, then there
249 * should be only one node here with a specific address and size. */
250 mask = ALLOC_LEAK_ASSERT | ALLOC_PARANOID;
251 QTAILQ_FOREACH_SAFE(node, &allocator->free, MLIST_ENTNAME, tmp) {
252 if ((allocator->opts & mask) == mask) {
253 if ((node->addr != allocator->start) ||
254 (node->size != allocator->end - allocator->start)) {
255 fprintf(stderr, "Free list is corrupted.\n");
256 g_assert_not_reached();
257 }
258 }
259
260 g_free(node);
261 }
262
263 g_free(allocator);
264 }
265
266 uint64_t guest_alloc(QGuestAllocator *allocator, size_t size)
267 {
268 uint64_t rsize = size;
269 uint64_t naddr;
270
271 rsize += (allocator->page_size - 1);
272 rsize &= -allocator->page_size;
273 g_assert_cmpint((allocator->start + rsize), <=, allocator->end);
274 g_assert_cmpint(rsize, >=, size);
275
276 naddr = mlist_alloc(allocator, rsize);
277 if (allocator->opts & ALLOC_PARANOID) {
278 mlist_check(allocator);
279 }
280
281 return naddr;
282 }
283
284 void guest_free(QGuestAllocator *allocator, uint64_t addr)
285 {
286 mlist_free(allocator, addr);
287 if (allocator->opts & ALLOC_PARANOID) {
288 mlist_check(allocator);
289 }
290 }
291
292 QGuestAllocator *alloc_init(uint64_t start, uint64_t end)
293 {
294 QGuestAllocator *s = g_malloc0(sizeof(*s));
295 MemBlock *node;
296
297 s->start = start;
298 s->end = end;
299
300 QTAILQ_INIT(&s->used);
301 QTAILQ_INIT(&s->free);
302
303 node = mlist_new(s->start, s->end - s->start);
304 QTAILQ_INSERT_HEAD(&s->free, node, MLIST_ENTNAME);
305
306 s->page_size = DEFAULT_PAGE_SIZE;
307
308 return s;
309 }
310
311 QGuestAllocator *alloc_init_flags(QAllocOpts opts,
312 uint64_t start, uint64_t end)
313 {
314 QGuestAllocator *s = alloc_init(start, end);
315 s->opts = opts;
316 return s;
317 }
318
319 void alloc_set_page_size(QGuestAllocator *allocator, size_t page_size)
320 {
321 /* Can't alter the page_size for an allocator in-use */
322 g_assert(QTAILQ_EMPTY(&allocator->used));
323
324 g_assert(is_power_of_2(page_size));
325 allocator->page_size = page_size;
326 }
327
328 void alloc_set_flags(QGuestAllocator *allocator, QAllocOpts opts)
329 {
330 allocator->opts |= opts;
331 }