]> git.proxmox.com Git - mirror_qemu.git/blame - util/cacheinfo.c
migration: report SaveStateEntry id and name on failure
[mirror_qemu.git] / util / cacheinfo.c
CommitLineData
b255b2c8
EC
1/*
2 * cacheinfo.c - helpers to query the host about its caches
3 *
4 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
5 * License: GNU GPL, version 2 or later.
6 * See the COPYING file in the top-level directory.
7 */
8
9#include "qemu/osdep.h"
5fe21034 10#include "qemu/host-utils.h"
782da5b2 11#include "qemu/atomic.h"
b255b2c8
EC
12
13int qemu_icache_linesize = 0;
5fe21034 14int qemu_icache_linesize_log;
b255b2c8 15int qemu_dcache_linesize = 0;
5fe21034 16int qemu_dcache_linesize_log;
b255b2c8
EC
17
18/*
19 * Operating system specific detection mechanisms.
20 */
21
78723752 22#if defined(_WIN32)
b255b2c8
EC
23
24static void sys_cache_info(int *isize, int *dsize)
25{
26 SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf;
27 DWORD size = 0;
28 BOOL success;
29 size_t i, n;
30
31 /* Check for the required buffer size first. Note that if the zero
32 size we use for the probe results in success, then there is no
33 data available; fail in that case. */
34 success = GetLogicalProcessorInformation(0, &size);
35 if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
36 return;
37 }
38
39 n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
40 size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
41 buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n);
42 if (!GetLogicalProcessorInformation(buf, &size)) {
43 goto fail;
44 }
45
46 for (i = 0; i < n; i++) {
47 if (buf[i].Relationship == RelationCache
48 && buf[i].Cache.Level == 1) {
49 switch (buf[i].Cache.Type) {
50 case CacheUnified:
51 *isize = *dsize = buf[i].Cache.LineSize;
52 break;
53 case CacheInstruction:
54 *isize = buf[i].Cache.LineSize;
55 break;
56 case CacheData:
57 *dsize = buf[i].Cache.LineSize;
58 break;
59 default:
60 break;
61 }
62 }
63 }
64 fail:
65 g_free(buf);
66}
67
5ca156cf 68#elif defined(__APPLE__)
b255b2c8 69# include <sys/sysctl.h>
b255b2c8
EC
70static void sys_cache_info(int *isize, int *dsize)
71{
72 /* There's only a single sysctl for both I/D cache line sizes. */
73 long size;
74 size_t len = sizeof(size);
5ca156cf
JH
75 if (!sysctlbyname("hw.cachelinesize", &size, &len, NULL, 0)) {
76 *isize = *dsize = size;
77 }
78}
79#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
80# include <sys/sysctl.h>
81static void sys_cache_info(int *isize, int *dsize)
82{
83 /* There's only a single sysctl for both I/D cache line sizes. */
84 int size;
85 size_t len = sizeof(size);
86 if (!sysctlbyname("machdep.cacheline_size", &size, &len, NULL, 0)) {
b255b2c8
EC
87 *isize = *dsize = size;
88 }
89}
b255b2c8
EC
90#else
91/* POSIX */
92
93static void sys_cache_info(int *isize, int *dsize)
94{
95# ifdef _SC_LEVEL1_ICACHE_LINESIZE
96 *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
97# endif
98# ifdef _SC_LEVEL1_DCACHE_LINESIZE
99 *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
100# endif
101}
102#endif /* sys_cache_info */
103
104/*
105 * Architecture (+ OS) specific detection mechanisms.
106 */
107
108#if defined(__aarch64__)
109
110static void arch_cache_info(int *isize, int *dsize)
111{
112 if (*isize == 0 || *dsize == 0) {
8041336e 113 uint64_t ctr;
b255b2c8
EC
114
115 /* The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1,
116 but (at least under Linux) these are marked protected by the
117 kernel. However, CTR_EL0 contains the minimum linesize in the
118 entire hierarchy, and is used by userspace cache flushing. */
119 asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr));
120 if (*isize == 0) {
121 *isize = 4 << (ctr & 0xf);
122 }
123 if (*dsize == 0) {
124 *dsize = 4 << ((ctr >> 16) & 0xf);
125 }
126 }
127}
128
129#elif defined(_ARCH_PPC) && defined(__linux__)
810d5cad 130# include "elf.h"
b255b2c8
EC
131
132static void arch_cache_info(int *isize, int *dsize)
133{
134 if (*isize == 0) {
135 *isize = qemu_getauxval(AT_ICACHEBSIZE);
136 }
137 if (*dsize == 0) {
138 *dsize = qemu_getauxval(AT_DCACHEBSIZE);
139 }
140}
141
142#else
143static void arch_cache_info(int *isize, int *dsize) { }
144#endif /* arch_cache_info */
145
146/*
147 * ... and if all else fails ...
148 */
149
150static void fallback_cache_info(int *isize, int *dsize)
151{
152 /* If we can only find one of the two, assume they're the same. */
153 if (*isize) {
154 if (*dsize) {
155 /* Success! */
156 } else {
157 *dsize = *isize;
158 }
159 } else if (*dsize) {
160 *isize = *dsize;
161 } else {
162#if defined(_ARCH_PPC)
163 /* For PPC, we're going to use the icache size computed for
164 flush_icache_range. Which means that we must use the
165 architecture minimum. */
166 *isize = *dsize = 16;
167#else
168 /* Otherwise, 64 bytes is not uncommon. */
169 *isize = *dsize = 64;
170#endif
171 }
172}
173
174static void __attribute__((constructor)) init_cache_info(void)
175{
176 int isize = 0, dsize = 0;
177
178 sys_cache_info(&isize, &dsize);
179 arch_cache_info(&isize, &dsize);
180 fallback_cache_info(&isize, &dsize);
181
5fe21034
EC
182 assert((isize & (isize - 1)) == 0);
183 assert((dsize & (dsize - 1)) == 0);
184
b255b2c8 185 qemu_icache_linesize = isize;
5fe21034 186 qemu_icache_linesize_log = ctz32(isize);
b255b2c8 187 qemu_dcache_linesize = dsize;
5fe21034 188 qemu_dcache_linesize_log = ctz32(dsize);
782da5b2
EC
189
190 atomic64_init();
b255b2c8 191}