]>
git.proxmox.com Git - mirror_qemu.git/blob - target/arm/tcg/mte_helper.c
2 * ARM v8.5-MemTag Operations
4 * Copyright (c) 2020 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/ram_addr.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "hw/core/tcg-cpu-ops.h"
29 #include "qapi/error.h"
30 #include "qemu/guest-random.h"
33 static int choose_nonexcluded_tag(int tag
, int offset
, uint16_t exclude
)
35 if (exclude
== 0xffff) {
39 while (exclude
& (1 << tag
)) {
46 } while (exclude
& (1 << tag
));
47 } while (--offset
> 0);
53 * allocation_tag_mem_probe:
54 * @env: the cpu environment
55 * @ptr_mmu_idx: the addressing regime to use for the virtual address
56 * @ptr: the virtual address for which to look up tag memory
57 * @ptr_access: the access to use for the virtual address
58 * @ptr_size: the number of bytes in the normal memory access
59 * @tag_access: the access to use for the tag memory
60 * @probe: true to merely probe, never taking an exception
61 * @ra: the return address for exception handling
63 * Our tag memory is formatted as a sequence of little-endian nibbles.
64 * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
65 * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
66 * for the higher addr.
68 * Here, resolve the physical address from the virtual address, and return
69 * a pointer to the corresponding tag byte.
71 * If there is no tag storage corresponding to @ptr, return NULL.
73 * If the page is inaccessible for @ptr_access, or has a watchpoint, there are
75 * (1) probe = true, ra = 0 : pure probe -- we return NULL if the page is not
76 * accessible, and do not take watchpoint traps. The calling code must
77 * handle those cases in the right priority compared to MTE traps.
78 * (2) probe = false, ra = 0 : probe, no fault expected -- the caller guarantees
79 * that the page is going to be accessible. We will take watchpoint traps.
80 * (3) probe = false, ra != 0 : non-probe -- we will take both memory access
81 * traps and watchpoint traps.
82 * (probe = true, ra != 0 is invalid and will assert.)
84 static uint8_t *allocation_tag_mem_probe(CPUARMState
*env
, int ptr_mmu_idx
,
85 uint64_t ptr
, MMUAccessType ptr_access
,
86 int ptr_size
, MMUAccessType tag_access
,
87 bool probe
, uintptr_t ra
)
89 #ifdef CONFIG_USER_ONLY
90 uint64_t clean_ptr
= useronly_clean_ptr(ptr
);
91 int flags
= page_get_flags(clean_ptr
);
95 assert(!(probe
&& ra
));
97 if (!(flags
& (ptr_access
== MMU_DATA_STORE
? PAGE_WRITE_ORG
: PAGE_READ
))) {
98 cpu_loop_exit_sigsegv(env_cpu(env
), ptr
, ptr_access
,
99 !(flags
& PAGE_VALID
), ra
);
102 /* Require both MAP_ANON and PROT_MTE for the page. */
103 if (!(flags
& PAGE_ANON
) || !(flags
& PAGE_MTE
)) {
107 tags
= page_get_target_data(clean_ptr
);
109 index
= extract32(ptr
, LOG2_TAG_GRANULE
+ 1,
110 TARGET_PAGE_BITS
- LOG2_TAG_GRANULE
- 1);
113 CPUTLBEntryFull
*full
;
116 hwaddr ptr_paddr
, tag_paddr
, xlat
;
119 AddressSpace
*tag_as
;
123 * Probe the first byte of the virtual address. This raises an
124 * exception for inaccessible pages, and resolves the virtual address
125 * into the softmmu tlb.
127 * When RA == 0, this is either a pure probe or a no-fault-expected probe.
128 * Indicate to probe_access_flags no-fault, then either return NULL
129 * for the pure probe, or assert that we received a valid page for the
130 * no-fault-expected probe.
132 flags
= probe_access_full(env
, ptr
, 0, ptr_access
, ptr_mmu_idx
,
133 ra
== 0, &host
, &full
, ra
);
134 if (probe
&& (flags
& TLB_INVALID_MASK
)) {
137 assert(!(flags
& TLB_INVALID_MASK
));
139 /* If the virtual page MemAttr != Tagged, access unchecked. */
140 if (full
->extra
.arm
.pte_attrs
!= 0xf0) {
145 * If not backed by host ram, there is no tag storage: access unchecked.
146 * This is probably a guest os bug though, so log it.
148 if (unlikely(flags
& TLB_MMIO
)) {
149 qemu_log_mask(LOG_GUEST_ERROR
,
150 "Page @ 0x%" PRIx64
" indicates Tagged Normal memory "
151 "but is not backed by host ram\n", ptr
);
156 * Remember these values across the second lookup below,
157 * which may invalidate this pointer via tlb resize.
159 ptr_paddr
= full
->phys_addr
| (ptr
& ~TARGET_PAGE_MASK
);
164 * The Normal memory access can extend to the next page. E.g. a single
165 * 8-byte access to the last byte of a page will check only the last
166 * tag on the first page.
167 * Any page access exception has priority over tag check exception.
169 in_page
= -(ptr
| TARGET_PAGE_MASK
);
170 if (unlikely(ptr_size
> in_page
)) {
171 flags
|= probe_access_full(env
, ptr
+ in_page
, 0, ptr_access
,
172 ptr_mmu_idx
, ra
== 0, &host
, &full
, ra
);
173 assert(!(flags
& TLB_INVALID_MASK
));
176 /* Any debug exception has priority over a tag check exception. */
177 if (!probe
&& unlikely(flags
& TLB_WATCHPOINT
)) {
178 int wp
= ptr_access
== MMU_DATA_LOAD
? BP_MEM_READ
: BP_MEM_WRITE
;
180 cpu_check_watchpoint(env_cpu(env
), ptr
, ptr_size
, attrs
, wp
, ra
);
183 /* Convert to the physical address in tag space. */
184 tag_paddr
= ptr_paddr
>> (LOG2_TAG_GRANULE
+ 1);
186 /* Look up the address in tag space. */
187 tag_asi
= attrs
.secure
? ARMASIdx_TagS
: ARMASIdx_TagNS
;
188 tag_as
= cpu_get_address_space(env_cpu(env
), tag_asi
);
189 mr
= address_space_translate(tag_as
, tag_paddr
, &xlat
, NULL
,
190 tag_access
== MMU_DATA_STORE
, attrs
);
193 * Note that @mr will never be NULL. If there is nothing in the address
194 * space at @tag_paddr, the translation will return the unallocated memory
195 * region. For our purposes, the result must be ram.
197 if (unlikely(!memory_region_is_ram(mr
))) {
198 /* ??? Failure is a board configuration error. */
199 qemu_log_mask(LOG_UNIMP
,
200 "Tag Memory @ 0x%" HWADDR_PRIx
" not found for "
201 "Normal Memory @ 0x%" HWADDR_PRIx
"\n",
202 tag_paddr
, ptr_paddr
);
207 * Ensure the tag memory is dirty on write, for migration.
208 * Tag memory can never contain code or display memory (vga).
210 if (tag_access
== MMU_DATA_STORE
) {
211 ram_addr_t tag_ra
= memory_region_get_ram_addr(mr
) + xlat
;
212 cpu_physical_memory_set_dirty_flag(tag_ra
, DIRTY_MEMORY_MIGRATION
);
215 return memory_region_get_ram_ptr(mr
) + xlat
;
219 static uint8_t *allocation_tag_mem(CPUARMState
*env
, int ptr_mmu_idx
,
220 uint64_t ptr
, MMUAccessType ptr_access
,
221 int ptr_size
, MMUAccessType tag_access
,
224 return allocation_tag_mem_probe(env
, ptr_mmu_idx
, ptr
, ptr_access
,
225 ptr_size
, tag_access
, false, ra
);
228 uint64_t HELPER(irg
)(CPUARMState
*env
, uint64_t rn
, uint64_t rm
)
230 uint16_t exclude
= extract32(rm
| env
->cp15
.gcr_el1
, 0, 16);
231 int rrnd
= extract32(env
->cp15
.gcr_el1
, 16, 1);
232 int start
= extract32(env
->cp15
.rgsr_el1
, 0, 4);
233 int seed
= extract32(env
->cp15
.rgsr_el1
, 8, 16);
237 * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
238 * deterministic algorithm. Except that with RRND==1 the kernel is
239 * not required to have set RGSR_EL1.SEED != 0, which is required for
240 * the deterministic algorithm to function. So we force a non-zero
241 * SEED for that case.
243 if (unlikely(seed
== 0) && rrnd
) {
248 if (qemu_guest_getrandom(&two
, sizeof(two
), &err
) < 0) {
250 * Failed, for unknown reasons in the crypto subsystem.
251 * Best we can do is log the reason and use a constant seed.
253 qemu_log_mask(LOG_UNIMP
, "IRG: Crypto failure: %s\n",
254 error_get_pretty(err
));
263 for (i
= offset
= 0; i
< 4; ++i
) {
264 /* NextRandomTagBit */
265 int top
= (extract32(seed
, 5, 1) ^ extract32(seed
, 3, 1) ^
266 extract32(seed
, 2, 1) ^ extract32(seed
, 0, 1));
267 seed
= (top
<< 15) | (seed
>> 1);
270 rtag
= choose_nonexcluded_tag(start
, offset
, exclude
);
271 env
->cp15
.rgsr_el1
= rtag
| (seed
<< 8);
273 return address_with_allocation_tag(rn
, rtag
);
276 uint64_t HELPER(addsubg
)(CPUARMState
*env
, uint64_t ptr
,
277 int32_t offset
, uint32_t tag_offset
)
279 int start_tag
= allocation_tag_from_addr(ptr
);
280 uint16_t exclude
= extract32(env
->cp15
.gcr_el1
, 0, 16);
281 int rtag
= choose_nonexcluded_tag(start_tag
, tag_offset
, exclude
);
283 return address_with_allocation_tag(ptr
+ offset
, rtag
);
286 static int load_tag1(uint64_t ptr
, uint8_t *mem
)
288 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
289 return extract32(*mem
, ofs
, 4);
292 uint64_t HELPER(ldg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
294 int mmu_idx
= cpu_mmu_index(env
, false);
298 /* Trap if accessing an invalid page. */
299 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
, 1,
300 MMU_DATA_LOAD
, GETPC());
302 /* Load if page supports tags. */
304 rtag
= load_tag1(ptr
, mem
);
307 return address_with_allocation_tag(xt
, rtag
);
310 static void check_tag_aligned(CPUARMState
*env
, uint64_t ptr
, uintptr_t ra
)
312 if (unlikely(!QEMU_IS_ALIGNED(ptr
, TAG_GRANULE
))) {
313 arm_cpu_do_unaligned_access(env_cpu(env
), ptr
, MMU_DATA_STORE
,
314 cpu_mmu_index(env
, false), ra
);
315 g_assert_not_reached();
319 /* For use in a non-parallel context, store to the given nibble. */
320 static void store_tag1(uint64_t ptr
, uint8_t *mem
, int tag
)
322 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
323 *mem
= deposit32(*mem
, ofs
, 4, tag
);
326 /* For use in a parallel context, atomically store to the given nibble. */
327 static void store_tag1_parallel(uint64_t ptr
, uint8_t *mem
, int tag
)
329 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
330 uint8_t old
= qatomic_read(mem
);
333 uint8_t new = deposit32(old
, ofs
, 4, tag
);
334 uint8_t cmp
= qatomic_cmpxchg(mem
, old
, new);
335 if (likely(cmp
== old
)) {
342 typedef void stg_store1(uint64_t, uint8_t *, int);
344 static inline void do_stg(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
345 uintptr_t ra
, stg_store1 store1
)
347 int mmu_idx
= cpu_mmu_index(env
, false);
350 check_tag_aligned(env
, ptr
, ra
);
352 /* Trap if accessing an invalid page. */
353 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, TAG_GRANULE
,
356 /* Store if page supports tags. */
358 store1(ptr
, mem
, allocation_tag_from_addr(xt
));
362 void HELPER(stg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
364 do_stg(env
, ptr
, xt
, GETPC(), store_tag1
);
367 void HELPER(stg_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
369 do_stg(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
372 void HELPER(stg_stub
)(CPUARMState
*env
, uint64_t ptr
)
374 int mmu_idx
= cpu_mmu_index(env
, false);
375 uintptr_t ra
= GETPC();
377 check_tag_aligned(env
, ptr
, ra
);
378 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
381 static inline void do_st2g(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
382 uintptr_t ra
, stg_store1 store1
)
384 int mmu_idx
= cpu_mmu_index(env
, false);
385 int tag
= allocation_tag_from_addr(xt
);
386 uint8_t *mem1
, *mem2
;
388 check_tag_aligned(env
, ptr
, ra
);
391 * Trap if accessing an invalid page(s).
392 * This takes priority over !allocation_tag_access_enabled.
394 if (ptr
& TAG_GRANULE
) {
395 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
396 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
397 TAG_GRANULE
, MMU_DATA_STORE
, ra
);
398 mem2
= allocation_tag_mem(env
, mmu_idx
, ptr
+ TAG_GRANULE
,
399 MMU_DATA_STORE
, TAG_GRANULE
,
402 /* Store if page(s) support tags. */
404 store1(TAG_GRANULE
, mem1
, tag
);
407 store1(0, mem2
, tag
);
410 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
411 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
412 2 * TAG_GRANULE
, MMU_DATA_STORE
, ra
);
415 qatomic_set(mem1
, tag
);
420 void HELPER(st2g
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
422 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1
);
425 void HELPER(st2g_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
427 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
430 void HELPER(st2g_stub
)(CPUARMState
*env
, uint64_t ptr
)
432 int mmu_idx
= cpu_mmu_index(env
, false);
433 uintptr_t ra
= GETPC();
434 int in_page
= -(ptr
| TARGET_PAGE_MASK
);
436 check_tag_aligned(env
, ptr
, ra
);
438 if (likely(in_page
>= 2 * TAG_GRANULE
)) {
439 probe_write(env
, ptr
, 2 * TAG_GRANULE
, mmu_idx
, ra
);
441 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
442 probe_write(env
, ptr
+ TAG_GRANULE
, TAG_GRANULE
, mmu_idx
, ra
);
446 uint64_t HELPER(ldgm
)(CPUARMState
*env
, uint64_t ptr
)
448 int mmu_idx
= cpu_mmu_index(env
, false);
449 uintptr_t ra
= GETPC();
450 int gm_bs
= env_archcpu(env
)->gm_blocksize
;
451 int gm_bs_bytes
= 4 << gm_bs
;
456 ptr
= QEMU_ALIGN_DOWN(ptr
, gm_bs_bytes
);
458 /* Trap if accessing an invalid page. */
459 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
,
460 gm_bs_bytes
, MMU_DATA_LOAD
, ra
);
462 /* The tag is squashed to zero if the page does not support tags. */
468 * The ordering of elements within the word corresponds to
469 * a little-endian operation. Computation of shift comes from
471 * index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE>
472 * data<index*4+3:index*4> = tag
474 * Because of the alignment of ptr above, BS=6 has shift=0.
475 * All memory operations are aligned. Defer support for BS=2,
476 * requiring insertion or extraction of a nibble, until we
477 * support a cpu that requires it.
481 /* 32 bytes -> 2 tags -> 8 result bits */
482 ret
= *(uint8_t *)tag_mem
;
485 /* 64 bytes -> 4 tags -> 16 result bits */
486 ret
= cpu_to_le16(*(uint16_t *)tag_mem
);
489 /* 128 bytes -> 8 tags -> 32 result bits */
490 ret
= cpu_to_le32(*(uint32_t *)tag_mem
);
493 /* 256 bytes -> 16 tags -> 64 result bits */
494 return cpu_to_le64(*(uint64_t *)tag_mem
);
497 * CPU configured with unsupported/invalid gm blocksize.
498 * This is detected early in arm_cpu_realizefn.
500 g_assert_not_reached();
502 shift
= extract64(ptr
, LOG2_TAG_GRANULE
, 4) * 4;
506 void HELPER(stgm
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
508 int mmu_idx
= cpu_mmu_index(env
, false);
509 uintptr_t ra
= GETPC();
510 int gm_bs
= env_archcpu(env
)->gm_blocksize
;
511 int gm_bs_bytes
= 4 << gm_bs
;
515 ptr
= QEMU_ALIGN_DOWN(ptr
, gm_bs_bytes
);
517 /* Trap if accessing an invalid page. */
518 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
519 gm_bs_bytes
, MMU_DATA_LOAD
, ra
);
522 * Tag store only happens if the page support tags,
523 * and if the OS has enabled access to the tags.
529 /* See LDGM for comments on BS and on shift. */
530 shift
= extract64(ptr
, LOG2_TAG_GRANULE
, 4) * 4;
534 /* 32 bytes -> 2 tags -> 8 result bits */
535 *(uint8_t *)tag_mem
= val
;
538 /* 64 bytes -> 4 tags -> 16 result bits */
539 *(uint16_t *)tag_mem
= cpu_to_le16(val
);
542 /* 128 bytes -> 8 tags -> 32 result bits */
543 *(uint32_t *)tag_mem
= cpu_to_le32(val
);
546 /* 256 bytes -> 16 tags -> 64 result bits */
547 *(uint64_t *)tag_mem
= cpu_to_le64(val
);
550 /* cpu configured with unsupported gm blocksize. */
551 g_assert_not_reached();
555 void HELPER(stzgm_tags
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
557 uintptr_t ra
= GETPC();
558 int mmu_idx
= cpu_mmu_index(env
, false);
559 int log2_dcz_bytes
, log2_tag_bytes
;
560 intptr_t dcz_bytes
, tag_bytes
;
564 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
565 * i.e. 32 bytes, which is an unreasonably small dcz anyway,
566 * to make sure that we can access one complete tag byte here.
568 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
569 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
570 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
571 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
574 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, dcz_bytes
,
577 int tag_pair
= (val
& 0xf) * 0x11;
578 memset(mem
, tag_pair
, tag_bytes
);
582 static void mte_sync_check_fail(CPUARMState
*env
, uint32_t desc
,
583 uint64_t dirty_ptr
, uintptr_t ra
)
587 env
->exception
.vaddress
= dirty_ptr
;
589 is_write
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
590 syn
= syn_data_abort_no_iss(arm_current_el(env
) != 0, 0, 0, 0, 0, is_write
,
592 raise_exception_ra(env
, EXCP_DATA_ABORT
, syn
, exception_target_el(env
), ra
);
593 g_assert_not_reached();
596 static void mte_async_check_fail(CPUARMState
*env
, uint64_t dirty_ptr
,
597 uintptr_t ra
, ARMMMUIdx arm_mmu_idx
, int el
)
601 if (regime_has_2_ranges(arm_mmu_idx
)) {
602 select
= extract64(dirty_ptr
, 55, 1);
606 env
->cp15
.tfsr_el
[el
] |= 1 << select
;
607 #ifdef CONFIG_USER_ONLY
609 * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
610 * which then sends a SIGSEGV when the thread is next scheduled.
611 * This cpu will return to the main loop at the end of the TB,
612 * which is rather sooner than "normal". But the alternative
613 * is waiting until the next syscall.
615 qemu_cpu_kick(env_cpu(env
));
619 /* Record a tag check failure. */
620 void mte_check_fail(CPUARMState
*env
, uint32_t desc
,
621 uint64_t dirty_ptr
, uintptr_t ra
)
623 int mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
624 ARMMMUIdx arm_mmu_idx
= core_to_aa64_mmu_idx(mmu_idx
);
628 reg_el
= regime_el(env
, arm_mmu_idx
);
629 sctlr
= env
->cp15
.sctlr_el
[reg_el
];
631 switch (arm_mmu_idx
) {
632 case ARMMMUIdx_E10_0
:
633 case ARMMMUIdx_E20_0
:
635 tcf
= extract64(sctlr
, 38, 2);
639 tcf
= extract64(sctlr
, 40, 2);
644 /* Tag check fail causes a synchronous exception. */
645 mte_sync_check_fail(env
, desc
, dirty_ptr
, ra
);
650 * Tag check fail does not affect the PE.
651 * We eliminate this case by not setting MTE_ACTIVE
652 * in tb_flags, so that we never make this runtime call.
654 g_assert_not_reached();
657 /* Tag check fail causes asynchronous flag set. */
658 mte_async_check_fail(env
, dirty_ptr
, ra
, arm_mmu_idx
, el
);
663 * Tag check fail causes asynchronous flag set for stores, or
664 * a synchronous exception for loads.
666 if (FIELD_EX32(desc
, MTEDESC
, WRITE
)) {
667 mte_async_check_fail(env
, dirty_ptr
, ra
, arm_mmu_idx
, el
);
669 mte_sync_check_fail(env
, desc
, dirty_ptr
, ra
);
677 * @tag: tag memory to test
678 * @odd: true to begin testing at tags at odd nibble
679 * @cmp: the tag to compare against
680 * @count: number of tags to test
682 * Return the number of successful tests.
683 * Thus a return value < @count indicates a failure.
685 * A note about sizes: count is expected to be small.
687 * The most common use will be LDP/STP of two integer registers,
688 * which means 16 bytes of memory touching at most 2 tags, but
689 * often the access is aligned and thus just 1 tag.
691 * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
692 * touching at most 5 tags. SVE LDR/STR (vector) with the default
693 * vector length is also 64 bytes; the maximum architectural length
694 * is 256 bytes touching at most 9 tags.
696 * The loop below uses 7 logical operations and 1 memory operation
697 * per tag pair. An implementation that loads an aligned word and
698 * uses masking to ignore adjacent tags requires 18 logical operations
699 * and thus does not begin to pay off until 6 tags.
700 * Which, according to the survey above, is unlikely to be common.
702 static int checkN(uint8_t *mem
, int odd
, int cmp
, int count
)
706 /* Replicate the test tag and compare. */
716 if (unlikely((diff
) & 0x0f)) {
725 if (unlikely((diff
) & 0xf0)) {
739 * @tag: tag memory to test
740 * @odd: true to begin testing at tags at odd nibble
741 * @cmp: the tag to compare against
742 * @count: number of tags to test
744 * Return the number of successful tests.
745 * Thus a return value < @count indicates a failure.
747 * This is like checkN, but it runs backwards, checking the
748 * tags starting with @tag and then the tags preceding it.
749 * This is needed by the backwards-memory-copying operations.
751 static int checkNrev(uint8_t *mem
, int odd
, int cmp
, int count
)
755 /* Replicate the test tag and compare. */
765 if (unlikely((diff
) & 0xf0)) {
774 if (unlikely((diff
) & 0x0f)) {
787 * mte_probe_int() - helper for mte_probe and mte_check
788 * @env: CPU environment
789 * @desc: MTEDESC descriptor
790 * @ptr: virtual address of the base of the access
791 * @fault: return virtual address of the first check failure
793 * Internal routine for both mte_probe and mte_check.
794 * Return zero on failure, filling in *fault.
795 * Return negative on trivial success for tbi disabled.
796 * Return positive on success with tbi enabled.
798 static int mte_probe_int(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
,
799 uintptr_t ra
, uint64_t *fault
)
801 int mmu_idx
, ptr_tag
, bit55
;
802 uint64_t ptr_last
, prev_page
, next_page
;
803 uint64_t tag_first
, tag_last
;
804 uint32_t sizem1
, tag_count
, n
, c
;
805 uint8_t *mem1
, *mem2
;
808 bit55
= extract64(ptr
, 55, 1);
811 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
812 if (unlikely(!tbi_check(desc
, bit55
))) {
816 ptr_tag
= allocation_tag_from_addr(ptr
);
818 if (tcma_check(desc
, bit55
, ptr_tag
)) {
822 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
823 type
= FIELD_EX32(desc
, MTEDESC
, WRITE
) ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
824 sizem1
= FIELD_EX32(desc
, MTEDESC
, SIZEM1
);
826 /* Find the addr of the end of the access */
827 ptr_last
= ptr
+ sizem1
;
829 /* Round the bounds to the tag granule, and compute the number of tags. */
830 tag_first
= QEMU_ALIGN_DOWN(ptr
, TAG_GRANULE
);
831 tag_last
= QEMU_ALIGN_DOWN(ptr_last
, TAG_GRANULE
);
832 tag_count
= ((tag_last
- tag_first
) / TAG_GRANULE
) + 1;
834 /* Locate the page boundaries. */
835 prev_page
= ptr
& TARGET_PAGE_MASK
;
836 next_page
= prev_page
+ TARGET_PAGE_SIZE
;
838 if (likely(tag_last
- prev_page
< TARGET_PAGE_SIZE
)) {
839 /* Memory access stays on one page. */
840 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, sizem1
+ 1,
845 /* Perform all of the comparisons. */
846 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, tag_count
);
848 /* Memory access crosses to next page. */
849 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, next_page
- ptr
,
852 mem2
= allocation_tag_mem(env
, mmu_idx
, next_page
, type
,
853 ptr_last
- next_page
+ 1,
857 * Perform all of the comparisons.
858 * Note the possible but unlikely case of the operation spanning
859 * two pages that do not both have tagging enabled.
861 n
= c
= (next_page
- tag_first
) / TAG_GRANULE
;
863 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, c
);
869 n
+= checkN(mem2
, 0, ptr_tag
, tag_count
- c
);
873 if (likely(n
== tag_count
)) {
878 * If we failed, we know which granule. For the first granule, the
879 * failure address is @ptr, the first byte accessed. Otherwise the
880 * failure address is the first byte of the nth granule.
883 *fault
= tag_first
+ n
* TAG_GRANULE
;
888 uint64_t mte_check(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
, uintptr_t ra
)
891 int ret
= mte_probe_int(env
, desc
, ptr
, ra
, &fault
);
893 if (unlikely(ret
== 0)) {
894 mte_check_fail(env
, desc
, fault
, ra
);
895 } else if (ret
< 0) {
898 return useronly_clean_ptr(ptr
);
901 uint64_t HELPER(mte_check
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
904 * R_XCHFJ: Alignment check not caused by memory type is priority 1,
905 * higher than any translation fault. When MTE is disabled, tcg
906 * performs the alignment check during the code generated for the
907 * memory access. With MTE enabled, we must check this here before
908 * raising any translation fault in allocation_tag_mem.
910 unsigned align
= FIELD_EX32(desc
, MTEDESC
, ALIGN
);
911 if (unlikely(align
)) {
912 align
= (1u << align
) - 1;
913 if (unlikely(ptr
& align
)) {
914 int idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
915 bool w
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
916 MMUAccessType type
= w
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
917 arm_cpu_do_unaligned_access(env_cpu(env
), ptr
, type
, idx
, GETPC());
921 return mte_check(env
, desc
, ptr
, GETPC());
925 * No-fault version of mte_check, to be used by SVE for MemSingleNF.
926 * Returns false if the access is Checked and the check failed. This
927 * is only intended to probe the tag -- the validity of the page must
928 * be checked beforehand.
930 bool mte_probe(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
933 int ret
= mte_probe_int(env
, desc
, ptr
, 0, &fault
);
939 * Perform an MTE checked access for DC_ZVA.
941 uint64_t HELPER(mte_check_zva
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
943 uintptr_t ra
= GETPC();
944 int log2_dcz_bytes
, log2_tag_bytes
;
946 intptr_t dcz_bytes
, tag_bytes
, i
;
948 uint64_t ptr_tag
, mem_tag
, align_ptr
;
950 bit55
= extract64(ptr
, 55, 1);
952 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
953 if (unlikely(!tbi_check(desc
, bit55
))) {
957 ptr_tag
= allocation_tag_from_addr(ptr
);
959 if (tcma_check(desc
, bit55
, ptr_tag
)) {
964 * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
965 * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
966 * sure that we can access one complete tag byte here.
968 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
969 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
970 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
971 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
972 align_ptr
= ptr
& -dcz_bytes
;
975 * Trap if accessing an invalid page. DC_ZVA requires that we supply
976 * the original pointer for an invalid page. But watchpoints require
977 * that we probe the actual space. So do both.
979 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
980 (void) probe_write(env
, ptr
, 1, mmu_idx
, ra
);
981 mem
= allocation_tag_mem(env
, mmu_idx
, align_ptr
, MMU_DATA_STORE
,
982 dcz_bytes
, MMU_DATA_LOAD
, ra
);
988 * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
989 * it is quite easy to perform all of the comparisons at once without
992 * The most common zva block size is 64; some of the thunderx cpus use
993 * a block size of 128. For user-only, aarch64_max_initfn will set the
994 * block size to 512. Fill out the other cases for future-proofing.
996 * In order to be able to find the first miscompare later, we want the
997 * tag bytes to be in little-endian order.
999 switch (log2_tag_bytes
) {
1000 case 0: /* zva_blocksize 32 */
1001 mem_tag
= *(uint8_t *)mem
;
1004 case 1: /* zva_blocksize 64 */
1005 mem_tag
= cpu_to_le16(*(uint16_t *)mem
);
1008 case 2: /* zva_blocksize 128 */
1009 mem_tag
= cpu_to_le32(*(uint32_t *)mem
);
1010 ptr_tag
*= 0x11111111u
;
1012 case 3: /* zva_blocksize 256 */
1013 mem_tag
= cpu_to_le64(*(uint64_t *)mem
);
1014 ptr_tag
*= 0x1111111111111111ull
;
1017 default: /* zva_blocksize 512, 1024, 2048 */
1018 ptr_tag
*= 0x1111111111111111ull
;
1021 mem_tag
= cpu_to_le64(*(uint64_t *)(mem
+ i
));
1022 if (unlikely(mem_tag
!= ptr_tag
)) {
1026 align_ptr
+= 16 * TAG_GRANULE
;
1027 } while (i
< tag_bytes
);
1031 if (likely(mem_tag
== ptr_tag
)) {
1036 /* Locate the first nibble that differs. */
1037 i
= ctz64(mem_tag
^ ptr_tag
) >> 4;
1038 mte_check_fail(env
, desc
, align_ptr
+ i
* TAG_GRANULE
, ra
);
1041 return useronly_clean_ptr(ptr
);
1044 uint64_t mte_mops_probe(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1047 int mmu_idx
, tag_count
;
1048 uint64_t ptr_tag
, tag_first
, tag_last
;
1050 bool w
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
1053 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
1054 /* True probe; this will never fault */
1055 mem
= allocation_tag_mem_probe(env
, mmu_idx
, ptr
,
1056 w
? MMU_DATA_STORE
: MMU_DATA_LOAD
,
1057 size
, MMU_DATA_LOAD
, true, 0);
1063 * TODO: checkN() is not designed for checks of the size we expect
1064 * for FEAT_MOPS operations, so we should implement this differently.
1065 * Maybe we should do something like
1066 * if (region start and size are aligned nicely) {
1067 * do direct loads of 64 tag bits at a time;
1072 /* Round the bounds to the tag granule, and compute the number of tags. */
1073 ptr_tag
= allocation_tag_from_addr(ptr
);
1074 tag_first
= QEMU_ALIGN_DOWN(ptr
, TAG_GRANULE
);
1075 tag_last
= QEMU_ALIGN_DOWN(ptr
+ size
- 1, TAG_GRANULE
);
1076 tag_count
= ((tag_last
- tag_first
) / TAG_GRANULE
) + 1;
1077 n
= checkN(mem
, ptr
& TAG_GRANULE
, ptr_tag
, tag_count
);
1078 if (likely(n
== tag_count
)) {
1083 * Failure; for the first granule, it's at @ptr. Otherwise
1084 * it's at the first byte of the nth granule. Calculate how
1085 * many bytes we can access without hitting that failure.
1090 return n
* TAG_GRANULE
- (ptr
- tag_first
);
1094 uint64_t mte_mops_probe_rev(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1097 int mmu_idx
, tag_count
;
1098 uint64_t ptr_tag
, tag_first
, tag_last
;
1100 bool w
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
1103 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
1105 * True probe; this will never fault. Note that our caller passes
1106 * us a pointer to the end of the region, but allocation_tag_mem_probe()
1107 * wants a pointer to the start. Because we know we don't span a page
1108 * boundary and that allocation_tag_mem_probe() doesn't otherwise care
1109 * about the size, pass in a size of 1 byte. This is simpler than
1110 * adjusting the ptr to point to the start of the region and then having
1111 * to adjust the returned 'mem' to get the end of the tag memory.
1113 mem
= allocation_tag_mem_probe(env
, mmu_idx
, ptr
,
1114 w
? MMU_DATA_STORE
: MMU_DATA_LOAD
,
1115 1, MMU_DATA_LOAD
, true, 0);
1121 * TODO: checkNrev() is not designed for checks of the size we expect
1122 * for FEAT_MOPS operations, so we should implement this differently.
1123 * Maybe we should do something like
1124 * if (region start and size are aligned nicely) {
1125 * do direct loads of 64 tag bits at a time;
1130 /* Round the bounds to the tag granule, and compute the number of tags. */
1131 ptr_tag
= allocation_tag_from_addr(ptr
);
1132 tag_first
= QEMU_ALIGN_DOWN(ptr
- (size
- 1), TAG_GRANULE
);
1133 tag_last
= QEMU_ALIGN_DOWN(ptr
, TAG_GRANULE
);
1134 tag_count
= ((tag_last
- tag_first
) / TAG_GRANULE
) + 1;
1135 n
= checkNrev(mem
, ptr
& TAG_GRANULE
, ptr_tag
, tag_count
);
1136 if (likely(n
== tag_count
)) {
1141 * Failure; for the first granule, it's at @ptr. Otherwise
1142 * it's at the last byte of the nth granule. Calculate how
1143 * many bytes we can access without hitting that failure.
1148 return (n
- 1) * TAG_GRANULE
+ ((ptr
+ 1) - tag_last
);
1152 void mte_mops_set_tags(CPUARMState
*env
, uint64_t ptr
, uint64_t size
,
1155 int mmu_idx
, tag_count
;
1160 /* Tags not actually enabled */
1164 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
1165 /* True probe: this will never fault */
1166 mem
= allocation_tag_mem_probe(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, size
,
1167 MMU_DATA_STORE
, true, 0);
1173 * We know that ptr and size are both TAG_GRANULE aligned; store
1174 * the tag from the pointer value into the tag memory.
1176 ptr_tag
= allocation_tag_from_addr(ptr
);
1177 tag_count
= size
/ TAG_GRANULE
;
1178 if (ptr
& TAG_GRANULE
) {
1179 /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */
1180 store_tag1_parallel(TAG_GRANULE
, mem
, ptr_tag
);
1184 memset(mem
, ptr_tag
| (ptr_tag
<< 4), tag_count
/ 2);
1185 if (tag_count
& 1) {
1186 /* Final trailing unaligned nibble */
1187 mem
+= tag_count
/ 2;
1188 store_tag1_parallel(0, mem
, ptr_tag
);