]> git.proxmox.com Git - mirror_qemu.git/blame - util/mmap-alloc.c
Merge tag 'pull-aspeed-20240201' of https://github.com/legoater/qemu into staging
[mirror_qemu.git] / util / mmap-alloc.c
CommitLineData
794e8f30
MT
1/*
2 * Support for RAM backed by mmaped host memory.
3 *
4 * Copyright (c) 2015 Red Hat, Inc.
5 *
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
11 */
a9c94277 12
119906af
ZY
13#ifdef CONFIG_LINUX
14#include <linux/mman.h>
15#else /* !CONFIG_LINUX */
16#define MAP_SYNC 0x0
17#define MAP_SHARED_VALIDATE 0x0
18#endif /* CONFIG_LINUX */
19
aafd7584 20#include "qemu/osdep.h"
a9c94277 21#include "qemu/mmap-alloc.h"
4a3ecf20 22#include "qemu/host-utils.h"
d94e0bc9 23#include "qemu/cutils.h"
8dbe22c6 24#include "qemu/error-report.h"
794e8f30 25
7197fb40
MT
26#define HUGETLBFS_MAGIC 0x958458f6
27
28#ifdef CONFIG_LINUX
29#include <sys/vfs.h>
fa45f8da 30#include <linux/magic.h>
7197fb40
MT
31#endif
32
fa45f8da
PX
33QemuFsType qemu_fd_getfs(int fd)
34{
35#ifdef CONFIG_LINUX
36 struct statfs fs;
37 int ret;
38
39 if (fd < 0) {
40 return QEMU_FS_TYPE_UNKNOWN;
41 }
42
43 do {
44 ret = fstatfs(fd, &fs);
45 } while (ret != 0 && errno == EINTR);
46
47 switch (fs.f_type) {
48 case TMPFS_MAGIC:
49 return QEMU_FS_TYPE_TMPFS;
50 case HUGETLBFS_MAGIC:
51 return QEMU_FS_TYPE_HUGETLBFS;
52 default:
53 return QEMU_FS_TYPE_UNKNOWN;
54 }
55#else
56 return QEMU_FS_TYPE_UNKNOWN;
57#endif
58}
59
7197fb40
MT
60size_t qemu_fd_getpagesize(int fd)
61{
62#ifdef CONFIG_LINUX
63 struct statfs fs;
64 int ret;
65
66 if (fd != -1) {
67 do {
68 ret = fstatfs(fd, &fs);
69 } while (ret != 0 && errno == EINTR);
70
71 if (ret == 0 && fs.f_type == HUGETLBFS_MAGIC) {
72 return fs.f_bsize;
73 }
74 }
57d1f6d7
PM
75#ifdef __sparc__
76 /* SPARC Linux needs greater alignment than the pagesize */
77 return QEMU_VMALLOC_ALIGN;
78#endif
7197fb40
MT
79#endif
80
8e3b0cbb 81 return qemu_real_host_page_size();
7197fb40
MT
82}
83
d94e0bc9
DH
84#define OVERCOMMIT_MEMORY_PATH "/proc/sys/vm/overcommit_memory"
85static bool map_noreserve_effective(int fd, uint32_t qemu_map_flags)
86{
87#if defined(__linux__)
88 const bool readonly = qemu_map_flags & QEMU_MAP_READONLY;
89 const bool shared = qemu_map_flags & QEMU_MAP_SHARED;
90 gchar *content = NULL;
91 const char *endptr;
92 unsigned int tmp;
93
94 /*
95 * hugeltb accounting is different than ordinary swap reservation:
96 * a) Hugetlb pages from the pool are reserved for both private and
97 * shared mappings. For shared mappings, all mappers have to specify
98 * MAP_NORESERVE.
99 * b) MAP_NORESERVE is not affected by /proc/sys/vm/overcommit_memory.
100 */
8e3b0cbb 101 if (qemu_fd_getpagesize(fd) != qemu_real_host_page_size()) {
d94e0bc9
DH
102 return true;
103 }
104
105 /*
106 * Accountable mappings in the kernel that can be affected by MAP_NORESEVE
107 * are private writable mappings (see mm/mmap.c:accountable_mapping() in
108 * Linux). For all shared or readonly mappings, MAP_NORESERVE is always
109 * implicitly active -- no reservation; this includes shmem. The only
110 * exception is shared anonymous memory, it is accounted like private
111 * anonymous memory.
112 */
113 if (readonly || (shared && fd >= 0)) {
114 return true;
115 }
116
117 /*
118 * MAP_NORESERVE is globally ignored for applicable !hugetlb mappings when
119 * memory overcommit is set to "never". Sparse memory regions aren't really
120 * possible in this system configuration.
121 *
122 * Bail out now instead of silently committing way more memory than
123 * currently desired by the user.
124 */
125 if (g_file_get_contents(OVERCOMMIT_MEMORY_PATH, &content, NULL, NULL) &&
126 !qemu_strtoui(content, &endptr, 0, &tmp) &&
127 (!endptr || *endptr == '\n')) {
128 if (tmp == 2) {
129 error_report("Skipping reservation of swap space is not supported:"
130 " \"" OVERCOMMIT_MEMORY_PATH "\" is \"2\"");
131 return false;
132 }
133 return true;
134 }
135 /* this interface has been around since Linux 2.6 */
136 error_report("Skipping reservation of swap space is not supported:"
137 " Could not read: \"" OVERCOMMIT_MEMORY_PATH "\"");
138 return false;
139#endif
140 /*
141 * E.g., FreeBSD used to define MAP_NORESERVE, never implemented it,
142 * and removed it a while ago.
143 */
144 error_report("Skipping reservation of swap space is not supported");
145 return false;
146}
147
01c26ad6
DH
148/*
149 * Reserve a new memory region of the requested size to be used for mapping
150 * from the given fd (if any).
151 */
152static void *mmap_reserve(size_t size, int fd)
153{
154 int flags = MAP_PRIVATE;
155
156#if defined(__powerpc64__) && defined(__linux__)
157 /*
158 * On ppc64 mappings in the same segment (aka slice) must share the same
159 * page size. Since we will be re-allocating part of this segment
160 * from the supplied fd, we should make sure to use the same page size, to
161 * this end we mmap the supplied fd. In this case, set MAP_NORESERVE to
162 * avoid allocating backing store memory.
163 * We do this unless we are using the system page size, in which case
164 * anonymous memory is OK.
165 */
8e3b0cbb 166 if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size()) {
01c26ad6
DH
167 fd = -1;
168 flags |= MAP_ANONYMOUS;
169 } else {
170 flags |= MAP_NORESERVE;
171 }
172#else
173 fd = -1;
174 flags |= MAP_ANONYMOUS;
175#endif
176
177 return mmap(0, size, PROT_NONE, flags, fd, 0);
178}
179
d01cbf82
DH
180/*
181 * Activate memory in a reserved region from the given fd (if any), to make
182 * it accessible.
183 */
b444f5c0
DH
184static void *mmap_activate(void *ptr, size_t size, int fd,
185 uint32_t qemu_map_flags, off_t map_offset)
d01cbf82 186{
8dbe22c6 187 const bool noreserve = qemu_map_flags & QEMU_MAP_NORESERVE;
b444f5c0
DH
188 const bool readonly = qemu_map_flags & QEMU_MAP_READONLY;
189 const bool shared = qemu_map_flags & QEMU_MAP_SHARED;
190 const bool sync = qemu_map_flags & QEMU_MAP_SYNC;
d01cbf82
DH
191 const int prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
192 int map_sync_flags = 0;
193 int flags = MAP_FIXED;
194 void *activated_ptr;
195
d94e0bc9 196 if (noreserve && !map_noreserve_effective(fd, qemu_map_flags)) {
8dbe22c6
DH
197 return MAP_FAILED;
198 }
199
d01cbf82
DH
200 flags |= fd == -1 ? MAP_ANONYMOUS : 0;
201 flags |= shared ? MAP_SHARED : MAP_PRIVATE;
d94e0bc9 202 flags |= noreserve ? MAP_NORESERVE : 0;
b444f5c0 203 if (shared && sync) {
d01cbf82
DH
204 map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
205 }
206
207 activated_ptr = mmap(ptr, size, prot, flags | map_sync_flags, fd,
208 map_offset);
209 if (activated_ptr == MAP_FAILED && map_sync_flags) {
210 if (errno == ENOTSUP) {
211 char *proc_link = g_strdup_printf("/proc/self/fd/%d", fd);
212 char *file_name = g_malloc0(PATH_MAX);
213 int len = readlink(proc_link, file_name, PATH_MAX - 1);
214
215 if (len < 0) {
216 len = 0;
217 }
218 file_name[len] = '\0';
219 fprintf(stderr, "Warning: requesting persistence across crashes "
220 "for backend file %s failed. Proceeding without "
221 "persistence, data might become corrupted in case of host "
222 "crash.\n", file_name);
223 g_free(proc_link);
224 g_free(file_name);
cdcf766d
IM
225 warn_report("Using non DAX backing file with 'pmem=on' option"
226 " is deprecated");
d01cbf82
DH
227 }
228 /*
229 * If mmap failed with MAP_SHARED_VALIDATE | MAP_SYNC, we will try
230 * again without these flags to handle backwards compatibility.
231 */
232 activated_ptr = mmap(ptr, size, prot, flags, fd, map_offset);
233 }
234 return activated_ptr;
235}
236
adad0b3a
DH
237static inline size_t mmap_guard_pagesize(int fd)
238{
239#if defined(__powerpc64__) && defined(__linux__)
240 /* Mappings in the same segment must share the same page size */
241 return qemu_fd_getpagesize(fd);
242#else
8e3b0cbb 243 return qemu_real_host_page_size();
adad0b3a
DH
244#endif
245}
246
2ac0f162
ZY
247void *qemu_ram_mmap(int fd,
248 size_t size,
249 size_t align,
b444f5c0 250 uint32_t qemu_map_flags,
44a4ff31 251 off_t map_offset)
794e8f30 252{
adad0b3a 253 const size_t guard_pagesize = mmap_guard_pagesize(fd);
d01cbf82
DH
254 size_t offset, total;
255 void *ptr, *guardptr;
2044c3e7 256
794e8f30
MT
257 /*
258 * Note: this always allocates at least one extra page of virtual address
259 * space, even if size is already aligned.
260 */
2044c3e7
MOA
261 total = size + align;
262
01c26ad6 263 guardptr = mmap_reserve(total, fd);
2044c3e7 264 if (guardptr == MAP_FAILED) {
9d4ec937 265 return MAP_FAILED;
794e8f30
MT
266 }
267
4a3ecf20 268 assert(is_power_of_2(align));
794e8f30 269 /* Always align to host page size */
adad0b3a 270 assert(align >= guard_pagesize);
794e8f30 271
2044c3e7
MOA
272 offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
273
b444f5c0 274 ptr = mmap_activate(guardptr + offset, size, fd, qemu_map_flags,
d01cbf82 275 map_offset);
2044c3e7
MOA
276 if (ptr == MAP_FAILED) {
277 munmap(guardptr, total);
9d4ec937 278 return MAP_FAILED;
794e8f30
MT
279 }
280
794e8f30 281 if (offset > 0) {
2044c3e7 282 munmap(guardptr, offset);
794e8f30
MT
283 }
284
285 /*
286 * Leave a single PROT_NONE page allocated after the RAM block, to serve as
287 * a guard page guarding against potential buffer overflows.
288 */
6e4c890e 289 total -= offset;
adad0b3a
DH
290 if (total > size + guard_pagesize) {
291 munmap(ptr + size + guard_pagesize, total - size - guard_pagesize);
794e8f30
MT
292 }
293
2044c3e7 294 return ptr;
794e8f30
MT
295}
296
53adb9d4 297void qemu_ram_munmap(int fd, void *ptr, size_t size)
794e8f30
MT
298{
299 if (ptr) {
300 /* Unmap both the RAM block and the guard page */
adad0b3a 301 munmap(ptr, size + mmap_guard_pagesize(fd));
794e8f30
MT
302 }
303}