#include "qemu-common.h"
#include "cpu.h"
-#include "exec-all.h"
#include "tcg.h"
#include "hw/hw.h"
#include "hw/qdev.h"
unsigned int n, tb_page_addr_t page_addr)
{
PageDesc *p;
- TranslationBlock *last_first_tb;
+#ifndef CONFIG_USER_ONLY
+ bool page_already_protected;
+#endif
tb->page_addr[n] = page_addr;
p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
tb->page_next[n] = p->first_tb;
- last_first_tb = p->first_tb;
+#ifndef CONFIG_USER_ONLY
+ page_already_protected = p->first_tb != NULL;
+#endif
p->first_tb = (TranslationBlock *)((long)tb | n);
invalidate_page_bitmap(p);
/* if some code is already present, then the pages are already
protected. So we handle the case where only the first TB is
allocated in a physical page */
- if (!last_first_tb) {
+ if (!page_already_protected) {
tlb_protect_code(page_addr);
}
#endif
return 0;
}
+struct last_map {
+ target_phys_addr_t start_addr;
+ ram_addr_t size;
+ ram_addr_t phys_offset;
+};
+
/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
* address. Each intermediate table provides the next L2_BITs of guest
* physical address space. The number of levels vary based on host and
* guest configuration, making it efficient to build the final guest
* physical address by seeding the L1 offset and shifting and adding in
* each L2 offset as we recurse through them. */
-static void phys_page_for_each_1(CPUPhysMemoryClient *client,
- int level, void **lp, target_phys_addr_t addr)
+static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
+ void **lp, target_phys_addr_t addr,
+ struct last_map *map)
{
int i;
addr <<= L2_BITS + TARGET_PAGE_BITS;
for (i = 0; i < L2_SIZE; ++i) {
if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
- client->set_memory(client, addr | i << TARGET_PAGE_BITS,
- TARGET_PAGE_SIZE, pd[i].phys_offset, false);
+ target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
+
+ if (map->size &&
+ start_addr == map->start_addr + map->size &&
+ pd[i].phys_offset == map->phys_offset + map->size) {
+
+ map->size += TARGET_PAGE_SIZE;
+ continue;
+ } else if (map->size) {
+ client->set_memory(client, map->start_addr,
+ map->size, map->phys_offset, false);
+ }
+
+ map->start_addr = start_addr;
+ map->size = TARGET_PAGE_SIZE;
+ map->phys_offset = pd[i].phys_offset;
}
}
} else {
void **pp = *lp;
for (i = 0; i < L2_SIZE; ++i) {
phys_page_for_each_1(client, level - 1, pp + i,
- (addr << L2_BITS) | i);
+ (addr << L2_BITS) | i, map);
}
}
}
static void phys_page_for_each(CPUPhysMemoryClient *client)
{
int i;
+ struct last_map map = { };
+
for (i = 0; i < P_L1_SIZE; ++i) {
phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
- l1_phys_map + i, i);
+ l1_phys_map + i, i, &map);
+ }
+ if (map.size) {
+ client->set_memory(client, map.start_addr, map.size, map.phys_offset,
+ false);
}
}