* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/cutils.h"
#include "qemu/bswap.h"
}
}
-#if defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
-/* Do not use push_options pragmas unnecessarily, because clang
- * does not support them.
- */
-#ifdef CONFIG_AVX2_OPT
-#pragma GCC push_options
-#pragma GCC target("sse2")
-#endif
-#include <emmintrin.h>
+#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
+#include <immintrin.h>
/* Note that each of these vectorized functions require len >= 64. */
-static bool
+static bool __attribute__((target("sse2")))
buffer_zero_sse2(const void *buf, size_t len)
{
__m128i t = _mm_loadu_si128(buf);
return _mm_movemask_epi8(_mm_cmpeq_epi8(t, zero)) == 0xFFFF;
}
-#ifdef CONFIG_AVX2_OPT
-#pragma GCC pop_options
-#endif
#ifdef CONFIG_AVX2_OPT
-/* Note that due to restrictions/bugs wrt __builtin functions in gcc <= 4.8,
- * the includes have to be within the corresponding push_options region, and
- * therefore the regions themselves have to be ordered with increasing ISA.
- */
-#pragma GCC push_options
-#pragma GCC target("sse4")
-#include <smmintrin.h>
-
-static bool
+static bool __attribute__((target("sse4")))
buffer_zero_sse4(const void *buf, size_t len)
{
__m128i t = _mm_loadu_si128(buf);
return _mm_testz_si128(t, t);
}
-#pragma GCC pop_options
-#pragma GCC push_options
-#pragma GCC target("avx2")
-#include <immintrin.h>
-
-static bool
+static bool __attribute__((target("avx2")))
buffer_zero_avx2(const void *buf, size_t len)
{
/* Begin with an unaligned head of 32 bytes. */
__m256i *p = (__m256i *)(((uintptr_t)buf + 5 * 32) & -32);
__m256i *e = (__m256i *)(((uintptr_t)buf + len) & -32);
- if (likely(p <= e)) {
- /* Loop over 32-byte aligned blocks of 128. */
- do {
- __builtin_prefetch(p);
- if (unlikely(!_mm256_testz_si256(t, t))) {
- return false;
- }
- t = p[-4] | p[-3] | p[-2] | p[-1];
- p += 4;
- } while (p <= e);
- } else {
- t |= _mm256_loadu_si256(buf + 32);
- if (len <= 128) {
- goto last2;
+ /* Loop over 32-byte aligned blocks of 128. */
+ while (p <= e) {
+ __builtin_prefetch(p);
+ if (unlikely(!_mm256_testz_si256(t, t))) {
+ return false;
}
- }
+ t = p[-4] | p[-3] | p[-2] | p[-1];
+ p += 4;
+ } ;
/* Finish the last block of 128 unaligned. */
t |= _mm256_loadu_si256(buf + len - 4 * 32);
t |= _mm256_loadu_si256(buf + len - 3 * 32);
- last2:
t |= _mm256_loadu_si256(buf + len - 2 * 32);
t |= _mm256_loadu_si256(buf + len - 1 * 32);
return _mm256_testz_si256(t, t);
}
-#pragma GCC pop_options
#endif /* CONFIG_AVX2_OPT */
+#ifdef CONFIG_AVX512F_OPT
+static bool __attribute__((target("avx512f")))
+buffer_zero_avx512(const void *buf, size_t len)
+{
+ /* Begin with an unaligned head of 64 bytes. */
+ __m512i t = _mm512_loadu_si512(buf);
+ __m512i *p = (__m512i *)(((uintptr_t)buf + 5 * 64) & -64);
+ __m512i *e = (__m512i *)(((uintptr_t)buf + len) & -64);
+
+ /* Loop over 64-byte aligned blocks of 256. */
+ while (p <= e) {
+ __builtin_prefetch(p);
+ if (unlikely(_mm512_test_epi64_mask(t, t))) {
+ return false;
+ }
+ t = p[-4] | p[-3] | p[-2] | p[-1];
+ p += 4;
+ }
+
+ t |= _mm512_loadu_si512(buf + len - 4 * 64);
+ t |= _mm512_loadu_si512(buf + len - 3 * 64);
+ t |= _mm512_loadu_si512(buf + len - 2 * 64);
+ t |= _mm512_loadu_si512(buf + len - 1 * 64);
+
+ return !_mm512_test_epi64_mask(t, t);
+
+}
+#endif /* CONFIG_AVX512F_OPT */
+
+
/* Note that for test_buffer_is_zero_next_accel, the most preferred
* ISA must have the least significant bit.
*/
-#define CACHE_AVX2 1
-#define CACHE_SSE4 2
-#define CACHE_SSE2 4
+#define CACHE_AVX512F 1
+#define CACHE_AVX2 2
+#define CACHE_SSE4 4
+#define CACHE_SSE2 8
/* Make sure that these variables are appropriately initialized when
* SSE2 is enabled on the compiler command-line, but the compiler is
* too old to support CONFIG_AVX2_OPT.
*/
-#ifdef CONFIG_AVX2_OPT
+#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
# define INIT_CACHE 0
# define INIT_ACCEL buffer_zero_int
#else
static unsigned cpuid_cache = INIT_CACHE;
static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL;
+static int length_to_accel = 64;
static void init_accel(unsigned cache)
{
bool (*fn)(const void *, size_t) = buffer_zero_int;
if (cache & CACHE_SSE2) {
fn = buffer_zero_sse2;
+ length_to_accel = 64;
}
#ifdef CONFIG_AVX2_OPT
if (cache & CACHE_SSE4) {
fn = buffer_zero_sse4;
+ length_to_accel = 64;
}
if (cache & CACHE_AVX2) {
fn = buffer_zero_avx2;
+ length_to_accel = 128;
+ }
+#endif
+#ifdef CONFIG_AVX512F_OPT
+ if (cache & CACHE_AVX512F) {
+ fn = buffer_zero_avx512;
+ length_to_accel = 256;
}
#endif
buffer_accel = fn;
}
-#ifdef CONFIG_AVX2_OPT
+#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
#include "qemu/cpuid.h"
static void __attribute__((constructor)) init_cpuid_cache(void)
{
- int max = __get_cpuid_max(0, NULL);
+ unsigned max = __get_cpuid_max(0, NULL);
int a, b, c, d;
unsigned cache = 0;
/* We must check that AVX is not just available, but usable. */
if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) {
- int bv;
- __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0));
+ unsigned bv = xgetbv_low(0);
__cpuid_count(7, 0, a, b, c, d);
- if ((bv & 6) == 6 && (b & bit_AVX2)) {
+ if ((bv & 0x6) == 0x6 && (b & bit_AVX2)) {
cache |= CACHE_AVX2;
}
+ /* 0xe6:
+ * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15
+ * and ZMM16-ZMM31 state are enabled by OS)
+ * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS)
+ */
+ if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512F)) {
+ cache |= CACHE_AVX512F;
+ }
}
}
cpuid_cache = cache;
static bool select_accel_fn(const void *buf, size_t len)
{
- if (likely(len >= 64)) {
+ if (likely(len >= length_to_accel)) {
return buffer_accel(buf, len);
}
return buffer_zero_int(buf, len);