]> git.proxmox.com Git - qemu.git/blob - include/exec/memory-internal.h
tcg/aarch64: Implement tlb lookup fast path
[qemu.git] / include / exec / memory-internal.h
1 /*
2 * Declarations for obsolete exec.c functions
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
11 *
12 */
13
14 /*
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
17 */
18
19 #ifndef MEMORY_INTERNAL_H
20 #define MEMORY_INTERNAL_H
21
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24
25
26 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
27
28 void address_space_init_dispatch(AddressSpace *as);
29 void address_space_destroy_dispatch(AddressSpace *as);
30
31 extern const MemoryRegionOps unassigned_mem_ops;
32
33 bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
34 unsigned size, bool is_write);
35
36 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
37 MemoryRegion *mr);
38 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
39 void *qemu_get_ram_ptr(ram_addr_t addr);
40 void qemu_ram_free(ram_addr_t addr);
41 void qemu_ram_free_from_ptr(ram_addr_t addr);
42
43 #define VGA_DIRTY_FLAG 0x01
44 #define CODE_DIRTY_FLAG 0x02
45 #define MIGRATION_DIRTY_FLAG 0x08
46
47 static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
48 {
49 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
50 }
51
52 /* read dirty bit (return 0 or 1) */
53 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
54 {
55 return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
56 }
57
58 static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
59 ram_addr_t length,
60 int dirty_flags)
61 {
62 int ret = 0;
63 ram_addr_t addr, end;
64
65 end = TARGET_PAGE_ALIGN(start + length);
66 start &= TARGET_PAGE_MASK;
67 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
68 ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
69 }
70 return ret;
71 }
72
73 static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
74 int dirty_flags)
75 {
76 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
77 }
78
79 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
80 {
81 cpu_physical_memory_set_dirty_flags(addr, 0xff);
82 }
83
84 static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
85 int dirty_flags)
86 {
87 int mask = ~dirty_flags;
88
89 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
90 }
91
92 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
93 ram_addr_t length,
94 int dirty_flags)
95 {
96 ram_addr_t addr, end;
97
98 end = TARGET_PAGE_ALIGN(start + length);
99 start &= TARGET_PAGE_MASK;
100 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
101 cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
102 }
103 xen_modified_memory(addr, length);
104 }
105
106 static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
107 ram_addr_t length,
108 int dirty_flags)
109 {
110 ram_addr_t addr, end;
111
112 end = TARGET_PAGE_ALIGN(start + length);
113 start &= TARGET_PAGE_MASK;
114 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
115 cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
116 }
117 }
118
119 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
120 int dirty_flags);
121
122 extern const IORangeOps memory_region_iorange_ops;
123
124 #endif
125
126 #endif