]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2011 Citrix Ltd. | |
3 | * | |
4 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
5 | * the COPYING file in the top-level directory. | |
6 | * | |
7 | * Contributions after 2012-01-13 are licensed under the terms of the | |
8 | * GNU GPL, version 2 or (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include "qemu/osdep.h" | |
12 | ||
13 | #include <sys/resource.h> | |
14 | ||
15 | #include "hw/xen/xen_backend.h" | |
16 | #include "sysemu/blockdev.h" | |
17 | #include "qemu/bitmap.h" | |
18 | ||
19 | #include <xen/hvm/params.h> | |
20 | ||
21 | #include "sysemu/xen-mapcache.h" | |
22 | #include "trace-root.h" | |
23 | ||
24 | ||
25 | //#define MAPCACHE_DEBUG | |
26 | ||
27 | #ifdef MAPCACHE_DEBUG | |
28 | # define DPRINTF(fmt, ...) do { \ | |
29 | fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \ | |
30 | } while (0) | |
31 | #else | |
32 | # define DPRINTF(fmt, ...) do { } while (0) | |
33 | #endif | |
34 | ||
35 | #if HOST_LONG_BITS == 32 | |
36 | # define MCACHE_BUCKET_SHIFT 16 | |
37 | # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */ | |
38 | #else | |
39 | # define MCACHE_BUCKET_SHIFT 20 | |
40 | # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */ | |
41 | #endif | |
42 | #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT) | |
43 | ||
44 | /* This is the size of the virtual address space reserve to QEMU that will not | |
45 | * be use by MapCache. | |
46 | * From empirical tests I observed that qemu use 75MB more than the | |
47 | * max_mcache_size. | |
48 | */ | |
49 | #define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024) | |
50 | ||
51 | typedef struct MapCacheEntry { | |
52 | hwaddr paddr_index; | |
53 | uint8_t *vaddr_base; | |
54 | unsigned long *valid_mapping; | |
55 | uint8_t lock; | |
56 | hwaddr size; | |
57 | struct MapCacheEntry *next; | |
58 | } MapCacheEntry; | |
59 | ||
60 | typedef struct MapCacheRev { | |
61 | uint8_t *vaddr_req; | |
62 | hwaddr paddr_index; | |
63 | hwaddr size; | |
64 | QTAILQ_ENTRY(MapCacheRev) next; | |
65 | } MapCacheRev; | |
66 | ||
67 | typedef struct MapCache { | |
68 | MapCacheEntry *entry; | |
69 | unsigned long nr_buckets; | |
70 | QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries; | |
71 | ||
72 | /* For most cases (>99.9%), the page address is the same. */ | |
73 | MapCacheEntry *last_entry; | |
74 | unsigned long max_mcache_size; | |
75 | unsigned int mcache_bucket_shift; | |
76 | ||
77 | phys_offset_to_gaddr_t phys_offset_to_gaddr; | |
78 | QemuMutex lock; | |
79 | void *opaque; | |
80 | } MapCache; | |
81 | ||
82 | static MapCache *mapcache; | |
83 | ||
84 | static inline void mapcache_lock(void) | |
85 | { | |
86 | qemu_mutex_lock(&mapcache->lock); | |
87 | } | |
88 | ||
89 | static inline void mapcache_unlock(void) | |
90 | { | |
91 | qemu_mutex_unlock(&mapcache->lock); | |
92 | } | |
93 | ||
94 | static inline int test_bits(int nr, int size, const unsigned long *addr) | |
95 | { | |
96 | unsigned long res = find_next_zero_bit(addr, size + nr, nr); | |
97 | if (res >= nr + size) | |
98 | return 1; | |
99 | else | |
100 | return 0; | |
101 | } | |
102 | ||
103 | void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) | |
104 | { | |
105 | unsigned long size; | |
106 | struct rlimit rlimit_as; | |
107 | ||
108 | mapcache = g_malloc0(sizeof (MapCache)); | |
109 | ||
110 | mapcache->phys_offset_to_gaddr = f; | |
111 | mapcache->opaque = opaque; | |
112 | qemu_mutex_init(&mapcache->lock); | |
113 | ||
114 | QTAILQ_INIT(&mapcache->locked_entries); | |
115 | ||
116 | if (geteuid() == 0) { | |
117 | rlimit_as.rlim_cur = RLIM_INFINITY; | |
118 | rlimit_as.rlim_max = RLIM_INFINITY; | |
119 | mapcache->max_mcache_size = MCACHE_MAX_SIZE; | |
120 | } else { | |
121 | getrlimit(RLIMIT_AS, &rlimit_as); | |
122 | rlimit_as.rlim_cur = rlimit_as.rlim_max; | |
123 | ||
124 | if (rlimit_as.rlim_max != RLIM_INFINITY) { | |
125 | fprintf(stderr, "Warning: QEMU's maximum size of virtual" | |
126 | " memory is not infinity.\n"); | |
127 | } | |
128 | if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) { | |
129 | mapcache->max_mcache_size = rlimit_as.rlim_max - | |
130 | NON_MCACHE_MEMORY_SIZE; | |
131 | } else { | |
132 | mapcache->max_mcache_size = MCACHE_MAX_SIZE; | |
133 | } | |
134 | } | |
135 | ||
136 | setrlimit(RLIMIT_AS, &rlimit_as); | |
137 | ||
138 | mapcache->nr_buckets = | |
139 | (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + | |
140 | (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> | |
141 | (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); | |
142 | ||
143 | size = mapcache->nr_buckets * sizeof (MapCacheEntry); | |
144 | size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); | |
145 | DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__, | |
146 | mapcache->nr_buckets, size); | |
147 | mapcache->entry = g_malloc0(size); | |
148 | } | |
149 | ||
150 | static void xen_remap_bucket(MapCacheEntry *entry, | |
151 | hwaddr size, | |
152 | hwaddr address_index) | |
153 | { | |
154 | uint8_t *vaddr_base; | |
155 | xen_pfn_t *pfns; | |
156 | int *err; | |
157 | unsigned int i; | |
158 | hwaddr nb_pfn = size >> XC_PAGE_SHIFT; | |
159 | ||
160 | trace_xen_remap_bucket(address_index); | |
161 | ||
162 | pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t)); | |
163 | err = g_malloc0(nb_pfn * sizeof (int)); | |
164 | ||
165 | if (entry->vaddr_base != NULL) { | |
166 | ram_block_notify_remove(entry->vaddr_base, entry->size); | |
167 | if (munmap(entry->vaddr_base, entry->size) != 0) { | |
168 | perror("unmap fails"); | |
169 | exit(-1); | |
170 | } | |
171 | } | |
172 | g_free(entry->valid_mapping); | |
173 | entry->valid_mapping = NULL; | |
174 | ||
175 | for (i = 0; i < nb_pfn; i++) { | |
176 | pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; | |
177 | } | |
178 | ||
179 | vaddr_base = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, | |
180 | nb_pfn, pfns, err); | |
181 | if (vaddr_base == NULL) { | |
182 | perror("xenforeignmemory_map"); | |
183 | exit(-1); | |
184 | } | |
185 | ||
186 | entry->vaddr_base = vaddr_base; | |
187 | entry->paddr_index = address_index; | |
188 | entry->size = size; | |
189 | entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) * | |
190 | BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); | |
191 | ||
192 | ram_block_notify_add(entry->vaddr_base, entry->size); | |
193 | bitmap_zero(entry->valid_mapping, nb_pfn); | |
194 | for (i = 0; i < nb_pfn; i++) { | |
195 | if (!err[i]) { | |
196 | bitmap_set(entry->valid_mapping, i, 1); | |
197 | } | |
198 | } | |
199 | ||
200 | g_free(pfns); | |
201 | g_free(err); | |
202 | } | |
203 | ||
204 | static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, | |
205 | uint8_t lock) | |
206 | { | |
207 | MapCacheEntry *entry, *pentry = NULL; | |
208 | hwaddr address_index; | |
209 | hwaddr address_offset; | |
210 | hwaddr cache_size = size; | |
211 | hwaddr test_bit_size; | |
212 | bool translated = false; | |
213 | ||
214 | tryagain: | |
215 | address_index = phys_addr >> MCACHE_BUCKET_SHIFT; | |
216 | address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); | |
217 | ||
218 | trace_xen_map_cache(phys_addr); | |
219 | ||
220 | /* test_bit_size is always a multiple of XC_PAGE_SIZE */ | |
221 | if (size) { | |
222 | test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1)); | |
223 | ||
224 | if (test_bit_size % XC_PAGE_SIZE) { | |
225 | test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); | |
226 | } | |
227 | } else { | |
228 | test_bit_size = XC_PAGE_SIZE; | |
229 | } | |
230 | ||
231 | if (mapcache->last_entry != NULL && | |
232 | mapcache->last_entry->paddr_index == address_index && | |
233 | !lock && !size && | |
234 | test_bits(address_offset >> XC_PAGE_SHIFT, | |
235 | test_bit_size >> XC_PAGE_SHIFT, | |
236 | mapcache->last_entry->valid_mapping)) { | |
237 | trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); | |
238 | return mapcache->last_entry->vaddr_base + address_offset; | |
239 | } | |
240 | ||
241 | /* size is always a multiple of MCACHE_BUCKET_SIZE */ | |
242 | if (size) { | |
243 | cache_size = size + address_offset; | |
244 | if (cache_size % MCACHE_BUCKET_SIZE) { | |
245 | cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); | |
246 | } | |
247 | } else { | |
248 | cache_size = MCACHE_BUCKET_SIZE; | |
249 | } | |
250 | ||
251 | entry = &mapcache->entry[address_index % mapcache->nr_buckets]; | |
252 | ||
253 | while (entry && entry->lock && entry->vaddr_base && | |
254 | (entry->paddr_index != address_index || entry->size != cache_size || | |
255 | !test_bits(address_offset >> XC_PAGE_SHIFT, | |
256 | test_bit_size >> XC_PAGE_SHIFT, | |
257 | entry->valid_mapping))) { | |
258 | pentry = entry; | |
259 | entry = entry->next; | |
260 | } | |
261 | if (!entry) { | |
262 | entry = g_malloc0(sizeof (MapCacheEntry)); | |
263 | pentry->next = entry; | |
264 | xen_remap_bucket(entry, cache_size, address_index); | |
265 | } else if (!entry->lock) { | |
266 | if (!entry->vaddr_base || entry->paddr_index != address_index || | |
267 | entry->size != cache_size || | |
268 | !test_bits(address_offset >> XC_PAGE_SHIFT, | |
269 | test_bit_size >> XC_PAGE_SHIFT, | |
270 | entry->valid_mapping)) { | |
271 | xen_remap_bucket(entry, cache_size, address_index); | |
272 | } | |
273 | } | |
274 | ||
275 | if(!test_bits(address_offset >> XC_PAGE_SHIFT, | |
276 | test_bit_size >> XC_PAGE_SHIFT, | |
277 | entry->valid_mapping)) { | |
278 | mapcache->last_entry = NULL; | |
279 | if (!translated && mapcache->phys_offset_to_gaddr) { | |
280 | phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque); | |
281 | translated = true; | |
282 | goto tryagain; | |
283 | } | |
284 | trace_xen_map_cache_return(NULL); | |
285 | return NULL; | |
286 | } | |
287 | ||
288 | mapcache->last_entry = entry; | |
289 | if (lock) { | |
290 | MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev)); | |
291 | entry->lock++; | |
292 | reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; | |
293 | reventry->paddr_index = mapcache->last_entry->paddr_index; | |
294 | reventry->size = entry->size; | |
295 | QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); | |
296 | } | |
297 | ||
298 | trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); | |
299 | return mapcache->last_entry->vaddr_base + address_offset; | |
300 | } | |
301 | ||
302 | uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, | |
303 | uint8_t lock) | |
304 | { | |
305 | uint8_t *p; | |
306 | ||
307 | mapcache_lock(); | |
308 | p = xen_map_cache_unlocked(phys_addr, size, lock); | |
309 | mapcache_unlock(); | |
310 | return p; | |
311 | } | |
312 | ||
313 | ram_addr_t xen_ram_addr_from_mapcache(void *ptr) | |
314 | { | |
315 | MapCacheEntry *entry = NULL; | |
316 | MapCacheRev *reventry; | |
317 | hwaddr paddr_index; | |
318 | hwaddr size; | |
319 | ram_addr_t raddr; | |
320 | int found = 0; | |
321 | ||
322 | mapcache_lock(); | |
323 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
324 | if (reventry->vaddr_req == ptr) { | |
325 | paddr_index = reventry->paddr_index; | |
326 | size = reventry->size; | |
327 | found = 1; | |
328 | break; | |
329 | } | |
330 | } | |
331 | if (!found) { | |
332 | fprintf(stderr, "%s, could not find %p\n", __func__, ptr); | |
333 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
334 | DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, | |
335 | reventry->vaddr_req); | |
336 | } | |
337 | abort(); | |
338 | return 0; | |
339 | } | |
340 | ||
341 | entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; | |
342 | while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { | |
343 | entry = entry->next; | |
344 | } | |
345 | if (!entry) { | |
346 | DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr); | |
347 | raddr = 0; | |
348 | } else { | |
349 | raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + | |
350 | ((unsigned long) ptr - (unsigned long) entry->vaddr_base); | |
351 | } | |
352 | mapcache_unlock(); | |
353 | return raddr; | |
354 | } | |
355 | ||
356 | static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) | |
357 | { | |
358 | MapCacheEntry *entry = NULL, *pentry = NULL; | |
359 | MapCacheRev *reventry; | |
360 | hwaddr paddr_index; | |
361 | hwaddr size; | |
362 | int found = 0; | |
363 | ||
364 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
365 | if (reventry->vaddr_req == buffer) { | |
366 | paddr_index = reventry->paddr_index; | |
367 | size = reventry->size; | |
368 | found = 1; | |
369 | break; | |
370 | } | |
371 | } | |
372 | if (!found) { | |
373 | DPRINTF("%s, could not find %p\n", __func__, buffer); | |
374 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
375 | DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req); | |
376 | } | |
377 | return; | |
378 | } | |
379 | QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); | |
380 | g_free(reventry); | |
381 | ||
382 | if (mapcache->last_entry != NULL && | |
383 | mapcache->last_entry->paddr_index == paddr_index) { | |
384 | mapcache->last_entry = NULL; | |
385 | } | |
386 | ||
387 | entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; | |
388 | while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { | |
389 | pentry = entry; | |
390 | entry = entry->next; | |
391 | } | |
392 | if (!entry) { | |
393 | DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer); | |
394 | return; | |
395 | } | |
396 | entry->lock--; | |
397 | if (entry->lock > 0 || pentry == NULL) { | |
398 | return; | |
399 | } | |
400 | ||
401 | pentry->next = entry->next; | |
402 | ram_block_notify_remove(entry->vaddr_base, entry->size); | |
403 | if (munmap(entry->vaddr_base, entry->size) != 0) { | |
404 | perror("unmap fails"); | |
405 | exit(-1); | |
406 | } | |
407 | g_free(entry->valid_mapping); | |
408 | g_free(entry); | |
409 | } | |
410 | ||
411 | void xen_invalidate_map_cache_entry(uint8_t *buffer) | |
412 | { | |
413 | mapcache_lock(); | |
414 | xen_invalidate_map_cache_entry_unlocked(buffer); | |
415 | mapcache_unlock(); | |
416 | } | |
417 | ||
418 | void xen_invalidate_map_cache(void) | |
419 | { | |
420 | unsigned long i; | |
421 | MapCacheRev *reventry; | |
422 | ||
423 | /* Flush pending AIO before destroying the mapcache */ | |
424 | bdrv_drain_all(); | |
425 | ||
426 | mapcache_lock(); | |
427 | ||
428 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
429 | DPRINTF("There should be no locked mappings at this time, " | |
430 | "but "TARGET_FMT_plx" -> %p is present\n", | |
431 | reventry->paddr_index, reventry->vaddr_req); | |
432 | } | |
433 | ||
434 | for (i = 0; i < mapcache->nr_buckets; i++) { | |
435 | MapCacheEntry *entry = &mapcache->entry[i]; | |
436 | ||
437 | if (entry->vaddr_base == NULL) { | |
438 | continue; | |
439 | } | |
440 | if (entry->lock > 0) { | |
441 | continue; | |
442 | } | |
443 | ||
444 | if (munmap(entry->vaddr_base, entry->size) != 0) { | |
445 | perror("unmap fails"); | |
446 | exit(-1); | |
447 | } | |
448 | ||
449 | entry->paddr_index = 0; | |
450 | entry->vaddr_base = NULL; | |
451 | entry->size = 0; | |
452 | g_free(entry->valid_mapping); | |
453 | entry->valid_mapping = NULL; | |
454 | } | |
455 | ||
456 | mapcache->last_entry = NULL; | |
457 | ||
458 | mapcache_unlock(); | |
459 | } |