2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
11 #include <sys/resource.h>
13 #include "hw/xen_backend.h"
16 #include <xen/hvm/params.h>
19 #include "xen-mapcache.h"
23 //#define MAPCACHE_DEBUG
26 # define DPRINTF(fmt, ...) do { \
27 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
30 # define DPRINTF(fmt, ...) do { } while (0)
34 # define MCACHE_BUCKET_SHIFT 16
35 #elif defined(__x86_64__)
36 # define MCACHE_BUCKET_SHIFT 20
38 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
40 #define BITS_PER_LONG (sizeof(long) * 8)
41 #define BITS_TO_LONGS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
42 #define DECLARE_BITMAP(name, bits) unsigned long name[BITS_TO_LONGS(bits)]
44 typedef struct MapCacheEntry
{
45 target_phys_addr_t paddr_index
;
47 DECLARE_BITMAP(valid_mapping
, MCACHE_BUCKET_SIZE
>> XC_PAGE_SHIFT
);
49 struct MapCacheEntry
*next
;
52 typedef struct MapCacheRev
{
54 target_phys_addr_t paddr_index
;
55 QTAILQ_ENTRY(MapCacheRev
) next
;
58 typedef struct MapCache
{
60 unsigned long nr_buckets
;
61 QTAILQ_HEAD(map_cache_head
, MapCacheRev
) locked_entries
;
63 /* For most cases (>99.9%), the page address is the same. */
64 target_phys_addr_t last_address_index
;
65 uint8_t *last_address_vaddr
;
66 unsigned long max_mcache_size
;
67 unsigned int mcache_bucket_shift
;
70 static MapCache
*mapcache
;
72 static inline int test_bit(unsigned int bit
, const unsigned long *map
)
74 return !!((map
)[(bit
) / BITS_PER_LONG
] & (1UL << ((bit
) % BITS_PER_LONG
)));
77 void qemu_map_cache_init(void)
80 struct rlimit rlimit_as
;
82 mapcache
= qemu_mallocz(sizeof (MapCache
));
84 QTAILQ_INIT(&mapcache
->locked_entries
);
85 mapcache
->last_address_index
= -1;
87 getrlimit(RLIMIT_AS
, &rlimit_as
);
88 rlimit_as
.rlim_cur
= rlimit_as
.rlim_max
;
89 setrlimit(RLIMIT_AS
, &rlimit_as
);
90 mapcache
->max_mcache_size
= rlimit_as
.rlim_max
;
92 mapcache
->nr_buckets
=
93 (((mapcache
->max_mcache_size
>> XC_PAGE_SHIFT
) +
94 (1UL << (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
)) - 1) >>
95 (MCACHE_BUCKET_SHIFT
- XC_PAGE_SHIFT
));
97 size
= mapcache
->nr_buckets
* sizeof (MapCacheEntry
);
98 size
= (size
+ XC_PAGE_SIZE
- 1) & ~(XC_PAGE_SIZE
- 1);
99 DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache
->nr_buckets
, size
);
100 mapcache
->entry
= qemu_mallocz(size
);
103 static void qemu_remap_bucket(MapCacheEntry
*entry
,
104 target_phys_addr_t size
,
105 target_phys_addr_t address_index
)
111 target_phys_addr_t nb_pfn
= size
>> XC_PAGE_SHIFT
;
113 trace_qemu_remap_bucket(address_index
);
115 pfns
= qemu_mallocz(nb_pfn
* sizeof (xen_pfn_t
));
116 err
= qemu_mallocz(nb_pfn
* sizeof (int));
118 if (entry
->vaddr_base
!= NULL
) {
119 if (munmap(entry
->vaddr_base
, size
) != 0) {
120 perror("unmap fails");
125 for (i
= 0; i
< nb_pfn
; i
++) {
126 pfns
[i
] = (address_index
<< (MCACHE_BUCKET_SHIFT
-XC_PAGE_SHIFT
)) + i
;
129 vaddr_base
= xc_map_foreign_bulk(xen_xc
, xen_domid
, PROT_READ
|PROT_WRITE
,
131 if (vaddr_base
== NULL
) {
132 perror("xc_map_foreign_bulk");
136 entry
->vaddr_base
= vaddr_base
;
137 entry
->paddr_index
= address_index
;
139 for (i
= 0; i
< nb_pfn
; i
+= BITS_PER_LONG
) {
140 unsigned long word
= 0;
141 if ((i
+ BITS_PER_LONG
) > nb_pfn
) {
142 j
= nb_pfn
% BITS_PER_LONG
;
147 word
= (word
<< 1) | !err
[i
+ --j
];
149 entry
->valid_mapping
[i
/ BITS_PER_LONG
] = word
;
156 uint8_t *qemu_map_cache(target_phys_addr_t phys_addr
, target_phys_addr_t size
, uint8_t lock
)
158 MapCacheEntry
*entry
, *pentry
= NULL
;
159 target_phys_addr_t address_index
= phys_addr
>> MCACHE_BUCKET_SHIFT
;
160 target_phys_addr_t address_offset
= phys_addr
& (MCACHE_BUCKET_SIZE
- 1);
162 trace_qemu_map_cache(phys_addr
);
164 if (address_index
== mapcache
->last_address_index
&& !lock
) {
165 trace_qemu_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
166 return mapcache
->last_address_vaddr
+ address_offset
;
169 entry
= &mapcache
->entry
[address_index
% mapcache
->nr_buckets
];
171 while (entry
&& entry
->lock
&& entry
->paddr_index
!= address_index
&& entry
->vaddr_base
) {
176 entry
= qemu_mallocz(sizeof (MapCacheEntry
));
177 pentry
->next
= entry
;
178 qemu_remap_bucket(entry
, size
? : MCACHE_BUCKET_SIZE
, address_index
);
179 } else if (!entry
->lock
) {
180 if (!entry
->vaddr_base
|| entry
->paddr_index
!= address_index
||
181 !test_bit(address_offset
>> XC_PAGE_SHIFT
, entry
->valid_mapping
)) {
182 qemu_remap_bucket(entry
, size
? : MCACHE_BUCKET_SIZE
, address_index
);
186 if (!test_bit(address_offset
>> XC_PAGE_SHIFT
, entry
->valid_mapping
)) {
187 mapcache
->last_address_index
= -1;
188 trace_qemu_map_cache_return(NULL
);
192 mapcache
->last_address_index
= address_index
;
193 mapcache
->last_address_vaddr
= entry
->vaddr_base
;
195 MapCacheRev
*reventry
= qemu_mallocz(sizeof(MapCacheRev
));
197 reventry
->vaddr_req
= mapcache
->last_address_vaddr
+ address_offset
;
198 reventry
->paddr_index
= mapcache
->last_address_index
;
199 QTAILQ_INSERT_HEAD(&mapcache
->locked_entries
, reventry
, next
);
202 trace_qemu_map_cache_return(mapcache
->last_address_vaddr
+ address_offset
);
203 return mapcache
->last_address_vaddr
+ address_offset
;
206 ram_addr_t
qemu_ram_addr_from_mapcache(void *ptr
)
208 MapCacheRev
*reventry
;
209 target_phys_addr_t paddr_index
;
212 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
213 if (reventry
->vaddr_req
== ptr
) {
214 paddr_index
= reventry
->paddr_index
;
220 fprintf(stderr
, "qemu_ram_addr_from_mapcache, could not find %p\n", ptr
);
221 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
222 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
,
223 reventry
->vaddr_req
);
229 return paddr_index
<< MCACHE_BUCKET_SHIFT
;
232 void qemu_invalidate_entry(uint8_t *buffer
)
234 MapCacheEntry
*entry
= NULL
, *pentry
= NULL
;
235 MapCacheRev
*reventry
;
236 target_phys_addr_t paddr_index
;
239 if (mapcache
->last_address_vaddr
== buffer
) {
240 mapcache
->last_address_index
= -1;
243 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
244 if (reventry
->vaddr_req
== buffer
) {
245 paddr_index
= reventry
->paddr_index
;
251 DPRINTF("qemu_invalidate_entry, could not find %p\n", buffer
);
252 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
253 DPRINTF(" "TARGET_FMT_plx
" -> %p is present\n", reventry
->paddr_index
, reventry
->vaddr_req
);
257 QTAILQ_REMOVE(&mapcache
->locked_entries
, reventry
, next
);
260 entry
= &mapcache
->entry
[paddr_index
% mapcache
->nr_buckets
];
261 while (entry
&& entry
->paddr_index
!= paddr_index
) {
266 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer
);
270 if (entry
->lock
> 0 || pentry
== NULL
) {
274 pentry
->next
= entry
->next
;
275 if (munmap(entry
->vaddr_base
, MCACHE_BUCKET_SIZE
) != 0) {
276 perror("unmap fails");
282 void qemu_invalidate_map_cache(void)
285 MapCacheRev
*reventry
;
287 /* Flush pending AIO before destroying the mapcache */
290 QTAILQ_FOREACH(reventry
, &mapcache
->locked_entries
, next
) {
291 DPRINTF("There should be no locked mappings at this time, "
292 "but "TARGET_FMT_plx
" -> %p is present\n",
293 reventry
->paddr_index
, reventry
->vaddr_req
);
298 for (i
= 0; i
< mapcache
->nr_buckets
; i
++) {
299 MapCacheEntry
*entry
= &mapcache
->entry
[i
];
301 if (entry
->vaddr_base
== NULL
) {
305 if (munmap(entry
->vaddr_base
, MCACHE_BUCKET_SIZE
) != 0) {
306 perror("unmap fails");
310 entry
->paddr_index
= 0;
311 entry
->vaddr_base
= NULL
;
314 mapcache
->last_address_index
= -1;
315 mapcache
->last_address_vaddr
= NULL
;
320 uint8_t *xen_map_block(target_phys_addr_t phys_addr
, target_phys_addr_t size
)
326 target_phys_addr_t nb_pfn
= size
>> XC_PAGE_SHIFT
;
328 trace_xen_map_block(phys_addr
, size
);
329 phys_addr
>>= XC_PAGE_SHIFT
;
331 pfns
= qemu_mallocz(nb_pfn
* sizeof (xen_pfn_t
));
332 err
= qemu_mallocz(nb_pfn
* sizeof (int));
334 for (i
= 0; i
< nb_pfn
; i
++) {
335 pfns
[i
] = phys_addr
+ i
;
338 vaddr_base
= xc_map_foreign_bulk(xen_xc
, xen_domid
, PROT_READ
|PROT_WRITE
,
340 if (vaddr_base
== NULL
) {
341 perror("xc_map_foreign_bulk");