]>
git.proxmox.com Git - mirror_qemu.git/blob - target/arm/tcg/mte_helper.c
2 * ARM v8.5-MemTag Operations
4 * Copyright (c) 2020 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/ram_addr.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "hw/core/tcg-cpu-ops.h"
29 #include "qapi/error.h"
30 #include "qemu/guest-random.h"
33 static int choose_nonexcluded_tag(int tag
, int offset
, uint16_t exclude
)
35 if (exclude
== 0xffff) {
39 while (exclude
& (1 << tag
)) {
46 } while (exclude
& (1 << tag
));
47 } while (--offset
> 0);
54 * @env: the cpu environment
55 * @ptr_mmu_idx: the addressing regime to use for the virtual address
56 * @ptr: the virtual address for which to look up tag memory
57 * @ptr_access: the access to use for the virtual address
58 * @ptr_size: the number of bytes in the normal memory access
59 * @tag_access: the access to use for the tag memory
60 * @tag_size: the number of bytes in the tag memory access
61 * @ra: the return address for exception handling
63 * Our tag memory is formatted as a sequence of little-endian nibbles.
64 * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
65 * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
66 * for the higher addr.
68 * Here, resolve the physical address from the virtual address, and return
69 * a pointer to the corresponding tag byte. Exit with exception if the
70 * virtual address is not accessible for @ptr_access.
72 * The @ptr_size and @tag_size values may not have an obvious relation
73 * due to the alignment of @ptr, and the number of tag checks required.
75 * If there is no tag storage corresponding to @ptr, return NULL.
77 static uint8_t *allocation_tag_mem(CPUARMState
*env
, int ptr_mmu_idx
,
78 uint64_t ptr
, MMUAccessType ptr_access
,
79 int ptr_size
, MMUAccessType tag_access
,
80 int tag_size
, uintptr_t ra
)
82 #ifdef CONFIG_USER_ONLY
83 uint64_t clean_ptr
= useronly_clean_ptr(ptr
);
84 int flags
= page_get_flags(clean_ptr
);
88 if (!(flags
& (ptr_access
== MMU_DATA_STORE
? PAGE_WRITE_ORG
: PAGE_READ
))) {
89 cpu_loop_exit_sigsegv(env_cpu(env
), ptr
, ptr_access
,
90 !(flags
& PAGE_VALID
), ra
);
93 /* Require both MAP_ANON and PROT_MTE for the page. */
94 if (!(flags
& PAGE_ANON
) || !(flags
& PAGE_MTE
)) {
98 tags
= page_get_target_data(clean_ptr
);
100 index
= extract32(ptr
, LOG2_TAG_GRANULE
+ 1,
101 TARGET_PAGE_BITS
- LOG2_TAG_GRANULE
- 1);
104 CPUTLBEntryFull
*full
;
107 hwaddr ptr_paddr
, tag_paddr
, xlat
;
110 AddressSpace
*tag_as
;
114 * Probe the first byte of the virtual address. This raises an
115 * exception for inaccessible pages, and resolves the virtual address
116 * into the softmmu tlb.
118 * When RA == 0, this is for mte_probe. The page is expected to be
119 * valid. Indicate to probe_access_flags no-fault, then assert that
120 * we received a valid page.
122 flags
= probe_access_full(env
, ptr
, 0, ptr_access
, ptr_mmu_idx
,
123 ra
== 0, &host
, &full
, ra
);
124 assert(!(flags
& TLB_INVALID_MASK
));
126 /* If the virtual page MemAttr != Tagged, access unchecked. */
127 if (full
->pte_attrs
!= 0xf0) {
132 * If not backed by host ram, there is no tag storage: access unchecked.
133 * This is probably a guest os bug though, so log it.
135 if (unlikely(flags
& TLB_MMIO
)) {
136 qemu_log_mask(LOG_GUEST_ERROR
,
137 "Page @ 0x%" PRIx64
" indicates Tagged Normal memory "
138 "but is not backed by host ram\n", ptr
);
143 * Remember these values across the second lookup below,
144 * which may invalidate this pointer via tlb resize.
146 ptr_paddr
= full
->phys_addr
| (ptr
& ~TARGET_PAGE_MASK
);
151 * The Normal memory access can extend to the next page. E.g. a single
152 * 8-byte access to the last byte of a page will check only the last
153 * tag on the first page.
154 * Any page access exception has priority over tag check exception.
156 in_page
= -(ptr
| TARGET_PAGE_MASK
);
157 if (unlikely(ptr_size
> in_page
)) {
158 flags
|= probe_access_full(env
, ptr
+ in_page
, 0, ptr_access
,
159 ptr_mmu_idx
, ra
== 0, &host
, &full
, ra
);
160 assert(!(flags
& TLB_INVALID_MASK
));
163 /* Any debug exception has priority over a tag check exception. */
164 if (unlikely(flags
& TLB_WATCHPOINT
)) {
165 int wp
= ptr_access
== MMU_DATA_LOAD
? BP_MEM_READ
: BP_MEM_WRITE
;
167 cpu_check_watchpoint(env_cpu(env
), ptr
, ptr_size
, attrs
, wp
, ra
);
170 /* Convert to the physical address in tag space. */
171 tag_paddr
= ptr_paddr
>> (LOG2_TAG_GRANULE
+ 1);
173 /* Look up the address in tag space. */
174 tag_asi
= attrs
.secure
? ARMASIdx_TagS
: ARMASIdx_TagNS
;
175 tag_as
= cpu_get_address_space(env_cpu(env
), tag_asi
);
176 mr
= address_space_translate(tag_as
, tag_paddr
, &xlat
, NULL
,
177 tag_access
== MMU_DATA_STORE
, attrs
);
180 * Note that @mr will never be NULL. If there is nothing in the address
181 * space at @tag_paddr, the translation will return the unallocated memory
182 * region. For our purposes, the result must be ram.
184 if (unlikely(!memory_region_is_ram(mr
))) {
185 /* ??? Failure is a board configuration error. */
186 qemu_log_mask(LOG_UNIMP
,
187 "Tag Memory @ 0x%" HWADDR_PRIx
" not found for "
188 "Normal Memory @ 0x%" HWADDR_PRIx
"\n",
189 tag_paddr
, ptr_paddr
);
194 * Ensure the tag memory is dirty on write, for migration.
195 * Tag memory can never contain code or display memory (vga).
197 if (tag_access
== MMU_DATA_STORE
) {
198 ram_addr_t tag_ra
= memory_region_get_ram_addr(mr
) + xlat
;
199 cpu_physical_memory_set_dirty_flag(tag_ra
, DIRTY_MEMORY_MIGRATION
);
202 return memory_region_get_ram_ptr(mr
) + xlat
;
206 uint64_t HELPER(irg
)(CPUARMState
*env
, uint64_t rn
, uint64_t rm
)
208 uint16_t exclude
= extract32(rm
| env
->cp15
.gcr_el1
, 0, 16);
209 int rrnd
= extract32(env
->cp15
.gcr_el1
, 16, 1);
210 int start
= extract32(env
->cp15
.rgsr_el1
, 0, 4);
211 int seed
= extract32(env
->cp15
.rgsr_el1
, 8, 16);
215 * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
216 * deterministic algorithm. Except that with RRND==1 the kernel is
217 * not required to have set RGSR_EL1.SEED != 0, which is required for
218 * the deterministic algorithm to function. So we force a non-zero
219 * SEED for that case.
221 if (unlikely(seed
== 0) && rrnd
) {
226 if (qemu_guest_getrandom(&two
, sizeof(two
), &err
) < 0) {
228 * Failed, for unknown reasons in the crypto subsystem.
229 * Best we can do is log the reason and use a constant seed.
231 qemu_log_mask(LOG_UNIMP
, "IRG: Crypto failure: %s\n",
232 error_get_pretty(err
));
241 for (i
= offset
= 0; i
< 4; ++i
) {
242 /* NextRandomTagBit */
243 int top
= (extract32(seed
, 5, 1) ^ extract32(seed
, 3, 1) ^
244 extract32(seed
, 2, 1) ^ extract32(seed
, 0, 1));
245 seed
= (top
<< 15) | (seed
>> 1);
248 rtag
= choose_nonexcluded_tag(start
, offset
, exclude
);
249 env
->cp15
.rgsr_el1
= rtag
| (seed
<< 8);
251 return address_with_allocation_tag(rn
, rtag
);
254 uint64_t HELPER(addsubg
)(CPUARMState
*env
, uint64_t ptr
,
255 int32_t offset
, uint32_t tag_offset
)
257 int start_tag
= allocation_tag_from_addr(ptr
);
258 uint16_t exclude
= extract32(env
->cp15
.gcr_el1
, 0, 16);
259 int rtag
= choose_nonexcluded_tag(start_tag
, tag_offset
, exclude
);
261 return address_with_allocation_tag(ptr
+ offset
, rtag
);
264 static int load_tag1(uint64_t ptr
, uint8_t *mem
)
266 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
267 return extract32(*mem
, ofs
, 4);
270 uint64_t HELPER(ldg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
272 int mmu_idx
= cpu_mmu_index(env
, false);
276 /* Trap if accessing an invalid page. */
277 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
, 1,
278 MMU_DATA_LOAD
, 1, GETPC());
280 /* Load if page supports tags. */
282 rtag
= load_tag1(ptr
, mem
);
285 return address_with_allocation_tag(xt
, rtag
);
288 static void check_tag_aligned(CPUARMState
*env
, uint64_t ptr
, uintptr_t ra
)
290 if (unlikely(!QEMU_IS_ALIGNED(ptr
, TAG_GRANULE
))) {
291 arm_cpu_do_unaligned_access(env_cpu(env
), ptr
, MMU_DATA_STORE
,
292 cpu_mmu_index(env
, false), ra
);
293 g_assert_not_reached();
297 /* For use in a non-parallel context, store to the given nibble. */
298 static void store_tag1(uint64_t ptr
, uint8_t *mem
, int tag
)
300 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
301 *mem
= deposit32(*mem
, ofs
, 4, tag
);
304 /* For use in a parallel context, atomically store to the given nibble. */
305 static void store_tag1_parallel(uint64_t ptr
, uint8_t *mem
, int tag
)
307 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
308 uint8_t old
= qatomic_read(mem
);
311 uint8_t new = deposit32(old
, ofs
, 4, tag
);
312 uint8_t cmp
= qatomic_cmpxchg(mem
, old
, new);
313 if (likely(cmp
== old
)) {
320 typedef void stg_store1(uint64_t, uint8_t *, int);
322 static inline void do_stg(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
323 uintptr_t ra
, stg_store1 store1
)
325 int mmu_idx
= cpu_mmu_index(env
, false);
328 check_tag_aligned(env
, ptr
, ra
);
330 /* Trap if accessing an invalid page. */
331 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, TAG_GRANULE
,
332 MMU_DATA_STORE
, 1, ra
);
334 /* Store if page supports tags. */
336 store1(ptr
, mem
, allocation_tag_from_addr(xt
));
340 void HELPER(stg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
342 do_stg(env
, ptr
, xt
, GETPC(), store_tag1
);
345 void HELPER(stg_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
347 do_stg(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
350 void HELPER(stg_stub
)(CPUARMState
*env
, uint64_t ptr
)
352 int mmu_idx
= cpu_mmu_index(env
, false);
353 uintptr_t ra
= GETPC();
355 check_tag_aligned(env
, ptr
, ra
);
356 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
359 static inline void do_st2g(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
360 uintptr_t ra
, stg_store1 store1
)
362 int mmu_idx
= cpu_mmu_index(env
, false);
363 int tag
= allocation_tag_from_addr(xt
);
364 uint8_t *mem1
, *mem2
;
366 check_tag_aligned(env
, ptr
, ra
);
369 * Trap if accessing an invalid page(s).
370 * This takes priority over !allocation_tag_access_enabled.
372 if (ptr
& TAG_GRANULE
) {
373 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
374 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
375 TAG_GRANULE
, MMU_DATA_STORE
, 1, ra
);
376 mem2
= allocation_tag_mem(env
, mmu_idx
, ptr
+ TAG_GRANULE
,
377 MMU_DATA_STORE
, TAG_GRANULE
,
378 MMU_DATA_STORE
, 1, ra
);
380 /* Store if page(s) support tags. */
382 store1(TAG_GRANULE
, mem1
, tag
);
385 store1(0, mem2
, tag
);
388 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
389 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
390 2 * TAG_GRANULE
, MMU_DATA_STORE
, 1, ra
);
393 qatomic_set(mem1
, tag
);
398 void HELPER(st2g
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
400 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1
);
403 void HELPER(st2g_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
405 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
408 void HELPER(st2g_stub
)(CPUARMState
*env
, uint64_t ptr
)
410 int mmu_idx
= cpu_mmu_index(env
, false);
411 uintptr_t ra
= GETPC();
412 int in_page
= -(ptr
| TARGET_PAGE_MASK
);
414 check_tag_aligned(env
, ptr
, ra
);
416 if (likely(in_page
>= 2 * TAG_GRANULE
)) {
417 probe_write(env
, ptr
, 2 * TAG_GRANULE
, mmu_idx
, ra
);
419 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
420 probe_write(env
, ptr
+ TAG_GRANULE
, TAG_GRANULE
, mmu_idx
, ra
);
424 #define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
426 uint64_t HELPER(ldgm
)(CPUARMState
*env
, uint64_t ptr
)
428 int mmu_idx
= cpu_mmu_index(env
, false);
429 uintptr_t ra
= GETPC();
432 ptr
= QEMU_ALIGN_DOWN(ptr
, LDGM_STGM_SIZE
);
434 /* Trap if accessing an invalid page. */
435 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
,
436 LDGM_STGM_SIZE
, MMU_DATA_LOAD
,
437 LDGM_STGM_SIZE
/ (2 * TAG_GRANULE
), ra
);
439 /* The tag is squashed to zero if the page does not support tags. */
444 QEMU_BUILD_BUG_ON(GMID_EL1_BS
!= 6);
446 * We are loading 64-bits worth of tags. The ordering of elements
447 * within the word corresponds to a 64-bit little-endian operation.
449 return ldq_le_p(tag_mem
);
452 void HELPER(stgm
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
454 int mmu_idx
= cpu_mmu_index(env
, false);
455 uintptr_t ra
= GETPC();
458 ptr
= QEMU_ALIGN_DOWN(ptr
, LDGM_STGM_SIZE
);
460 /* Trap if accessing an invalid page. */
461 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
462 LDGM_STGM_SIZE
, MMU_DATA_LOAD
,
463 LDGM_STGM_SIZE
/ (2 * TAG_GRANULE
), ra
);
466 * Tag store only happens if the page support tags,
467 * and if the OS has enabled access to the tags.
473 QEMU_BUILD_BUG_ON(GMID_EL1_BS
!= 6);
475 * We are storing 64-bits worth of tags. The ordering of elements
476 * within the word corresponds to a 64-bit little-endian operation.
478 stq_le_p(tag_mem
, val
);
481 void HELPER(stzgm_tags
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
483 uintptr_t ra
= GETPC();
484 int mmu_idx
= cpu_mmu_index(env
, false);
485 int log2_dcz_bytes
, log2_tag_bytes
;
486 intptr_t dcz_bytes
, tag_bytes
;
490 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
491 * i.e. 32 bytes, which is an unreasonably small dcz anyway,
492 * to make sure that we can access one complete tag byte here.
494 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
495 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
496 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
497 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
500 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, dcz_bytes
,
501 MMU_DATA_STORE
, tag_bytes
, ra
);
503 int tag_pair
= (val
& 0xf) * 0x11;
504 memset(mem
, tag_pair
, tag_bytes
);
508 static void mte_sync_check_fail(CPUARMState
*env
, uint32_t desc
,
509 uint64_t dirty_ptr
, uintptr_t ra
)
513 env
->exception
.vaddress
= dirty_ptr
;
515 is_write
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
516 syn
= syn_data_abort_no_iss(arm_current_el(env
) != 0, 0, 0, 0, 0, is_write
,
518 raise_exception_ra(env
, EXCP_DATA_ABORT
, syn
, exception_target_el(env
), ra
);
519 g_assert_not_reached();
522 static void mte_async_check_fail(CPUARMState
*env
, uint64_t dirty_ptr
,
523 uintptr_t ra
, ARMMMUIdx arm_mmu_idx
, int el
)
527 if (regime_has_2_ranges(arm_mmu_idx
)) {
528 select
= extract64(dirty_ptr
, 55, 1);
532 env
->cp15
.tfsr_el
[el
] |= 1 << select
;
533 #ifdef CONFIG_USER_ONLY
535 * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
536 * which then sends a SIGSEGV when the thread is next scheduled.
537 * This cpu will return to the main loop at the end of the TB,
538 * which is rather sooner than "normal". But the alternative
539 * is waiting until the next syscall.
541 qemu_cpu_kick(env_cpu(env
));
545 /* Record a tag check failure. */
546 static void mte_check_fail(CPUARMState
*env
, uint32_t desc
,
547 uint64_t dirty_ptr
, uintptr_t ra
)
549 int mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
550 ARMMMUIdx arm_mmu_idx
= core_to_aa64_mmu_idx(mmu_idx
);
554 reg_el
= regime_el(env
, arm_mmu_idx
);
555 sctlr
= env
->cp15
.sctlr_el
[reg_el
];
557 switch (arm_mmu_idx
) {
558 case ARMMMUIdx_E10_0
:
559 case ARMMMUIdx_E20_0
:
561 tcf
= extract64(sctlr
, 38, 2);
565 tcf
= extract64(sctlr
, 40, 2);
570 /* Tag check fail causes a synchronous exception. */
571 mte_sync_check_fail(env
, desc
, dirty_ptr
, ra
);
576 * Tag check fail does not affect the PE.
577 * We eliminate this case by not setting MTE_ACTIVE
578 * in tb_flags, so that we never make this runtime call.
580 g_assert_not_reached();
583 /* Tag check fail causes asynchronous flag set. */
584 mte_async_check_fail(env
, dirty_ptr
, ra
, arm_mmu_idx
, el
);
589 * Tag check fail causes asynchronous flag set for stores, or
590 * a synchronous exception for loads.
592 if (FIELD_EX32(desc
, MTEDESC
, WRITE
)) {
593 mte_async_check_fail(env
, dirty_ptr
, ra
, arm_mmu_idx
, el
);
595 mte_sync_check_fail(env
, desc
, dirty_ptr
, ra
);
603 * @tag: tag memory to test
604 * @odd: true to begin testing at tags at odd nibble
605 * @cmp: the tag to compare against
606 * @count: number of tags to test
608 * Return the number of successful tests.
609 * Thus a return value < @count indicates a failure.
611 * A note about sizes: count is expected to be small.
613 * The most common use will be LDP/STP of two integer registers,
614 * which means 16 bytes of memory touching at most 2 tags, but
615 * often the access is aligned and thus just 1 tag.
617 * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
618 * touching at most 5 tags. SVE LDR/STR (vector) with the default
619 * vector length is also 64 bytes; the maximum architectural length
620 * is 256 bytes touching at most 9 tags.
622 * The loop below uses 7 logical operations and 1 memory operation
623 * per tag pair. An implementation that loads an aligned word and
624 * uses masking to ignore adjacent tags requires 18 logical operations
625 * and thus does not begin to pay off until 6 tags.
626 * Which, according to the survey above, is unlikely to be common.
628 static int checkN(uint8_t *mem
, int odd
, int cmp
, int count
)
632 /* Replicate the test tag and compare. */
642 if (unlikely((diff
) & 0x0f)) {
651 if (unlikely((diff
) & 0xf0)) {
664 * mte_probe_int() - helper for mte_probe and mte_check
665 * @env: CPU environment
666 * @desc: MTEDESC descriptor
667 * @ptr: virtual address of the base of the access
668 * @fault: return virtual address of the first check failure
670 * Internal routine for both mte_probe and mte_check.
671 * Return zero on failure, filling in *fault.
672 * Return negative on trivial success for tbi disabled.
673 * Return positive on success with tbi enabled.
675 static int mte_probe_int(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
,
676 uintptr_t ra
, uint64_t *fault
)
678 int mmu_idx
, ptr_tag
, bit55
;
679 uint64_t ptr_last
, prev_page
, next_page
;
680 uint64_t tag_first
, tag_last
;
681 uint64_t tag_byte_first
, tag_byte_last
;
682 uint32_t sizem1
, tag_count
, tag_size
, n
, c
;
683 uint8_t *mem1
, *mem2
;
686 bit55
= extract64(ptr
, 55, 1);
689 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
690 if (unlikely(!tbi_check(desc
, bit55
))) {
694 ptr_tag
= allocation_tag_from_addr(ptr
);
696 if (tcma_check(desc
, bit55
, ptr_tag
)) {
700 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
701 type
= FIELD_EX32(desc
, MTEDESC
, WRITE
) ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
702 sizem1
= FIELD_EX32(desc
, MTEDESC
, SIZEM1
);
704 /* Find the addr of the end of the access */
705 ptr_last
= ptr
+ sizem1
;
707 /* Round the bounds to the tag granule, and compute the number of tags. */
708 tag_first
= QEMU_ALIGN_DOWN(ptr
, TAG_GRANULE
);
709 tag_last
= QEMU_ALIGN_DOWN(ptr_last
, TAG_GRANULE
);
710 tag_count
= ((tag_last
- tag_first
) / TAG_GRANULE
) + 1;
712 /* Round the bounds to twice the tag granule, and compute the bytes. */
713 tag_byte_first
= QEMU_ALIGN_DOWN(ptr
, 2 * TAG_GRANULE
);
714 tag_byte_last
= QEMU_ALIGN_DOWN(ptr_last
, 2 * TAG_GRANULE
);
716 /* Locate the page boundaries. */
717 prev_page
= ptr
& TARGET_PAGE_MASK
;
718 next_page
= prev_page
+ TARGET_PAGE_SIZE
;
720 if (likely(tag_last
- prev_page
< TARGET_PAGE_SIZE
)) {
721 /* Memory access stays on one page. */
722 tag_size
= ((tag_byte_last
- tag_byte_first
) / (2 * TAG_GRANULE
)) + 1;
723 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, sizem1
+ 1,
724 MMU_DATA_LOAD
, tag_size
, ra
);
728 /* Perform all of the comparisons. */
729 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, tag_count
);
731 /* Memory access crosses to next page. */
732 tag_size
= (next_page
- tag_byte_first
) / (2 * TAG_GRANULE
);
733 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, next_page
- ptr
,
734 MMU_DATA_LOAD
, tag_size
, ra
);
736 tag_size
= ((tag_byte_last
- next_page
) / (2 * TAG_GRANULE
)) + 1;
737 mem2
= allocation_tag_mem(env
, mmu_idx
, next_page
, type
,
738 ptr_last
- next_page
+ 1,
739 MMU_DATA_LOAD
, tag_size
, ra
);
742 * Perform all of the comparisons.
743 * Note the possible but unlikely case of the operation spanning
744 * two pages that do not both have tagging enabled.
746 n
= c
= (next_page
- tag_first
) / TAG_GRANULE
;
748 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, c
);
754 n
+= checkN(mem2
, 0, ptr_tag
, tag_count
- c
);
758 if (likely(n
== tag_count
)) {
763 * If we failed, we know which granule. For the first granule, the
764 * failure address is @ptr, the first byte accessed. Otherwise the
765 * failure address is the first byte of the nth granule.
768 *fault
= tag_first
+ n
* TAG_GRANULE
;
773 uint64_t mte_check(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
, uintptr_t ra
)
776 int ret
= mte_probe_int(env
, desc
, ptr
, ra
, &fault
);
778 if (unlikely(ret
== 0)) {
779 mte_check_fail(env
, desc
, fault
, ra
);
780 } else if (ret
< 0) {
783 return useronly_clean_ptr(ptr
);
786 uint64_t HELPER(mte_check
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
788 return mte_check(env
, desc
, ptr
, GETPC());
792 * No-fault version of mte_check, to be used by SVE for MemSingleNF.
793 * Returns false if the access is Checked and the check failed. This
794 * is only intended to probe the tag -- the validity of the page must
795 * be checked beforehand.
797 bool mte_probe(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
800 int ret
= mte_probe_int(env
, desc
, ptr
, 0, &fault
);
806 * Perform an MTE checked access for DC_ZVA.
808 uint64_t HELPER(mte_check_zva
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
810 uintptr_t ra
= GETPC();
811 int log2_dcz_bytes
, log2_tag_bytes
;
813 intptr_t dcz_bytes
, tag_bytes
, i
;
815 uint64_t ptr_tag
, mem_tag
, align_ptr
;
817 bit55
= extract64(ptr
, 55, 1);
819 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
820 if (unlikely(!tbi_check(desc
, bit55
))) {
824 ptr_tag
= allocation_tag_from_addr(ptr
);
826 if (tcma_check(desc
, bit55
, ptr_tag
)) {
831 * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
832 * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
833 * sure that we can access one complete tag byte here.
835 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
836 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
837 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
838 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
839 align_ptr
= ptr
& -dcz_bytes
;
842 * Trap if accessing an invalid page. DC_ZVA requires that we supply
843 * the original pointer for an invalid page. But watchpoints require
844 * that we probe the actual space. So do both.
846 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
847 (void) probe_write(env
, ptr
, 1, mmu_idx
, ra
);
848 mem
= allocation_tag_mem(env
, mmu_idx
, align_ptr
, MMU_DATA_STORE
,
849 dcz_bytes
, MMU_DATA_LOAD
, tag_bytes
, ra
);
855 * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
856 * it is quite easy to perform all of the comparisons at once without
859 * The most common zva block size is 64; some of the thunderx cpus use
860 * a block size of 128. For user-only, aarch64_max_initfn will set the
861 * block size to 512. Fill out the other cases for future-proofing.
863 * In order to be able to find the first miscompare later, we want the
864 * tag bytes to be in little-endian order.
866 switch (log2_tag_bytes
) {
867 case 0: /* zva_blocksize 32 */
868 mem_tag
= *(uint8_t *)mem
;
871 case 1: /* zva_blocksize 64 */
872 mem_tag
= cpu_to_le16(*(uint16_t *)mem
);
875 case 2: /* zva_blocksize 128 */
876 mem_tag
= cpu_to_le32(*(uint32_t *)mem
);
877 ptr_tag
*= 0x11111111u
;
879 case 3: /* zva_blocksize 256 */
880 mem_tag
= cpu_to_le64(*(uint64_t *)mem
);
881 ptr_tag
*= 0x1111111111111111ull
;
884 default: /* zva_blocksize 512, 1024, 2048 */
885 ptr_tag
*= 0x1111111111111111ull
;
888 mem_tag
= cpu_to_le64(*(uint64_t *)(mem
+ i
));
889 if (unlikely(mem_tag
!= ptr_tag
)) {
893 align_ptr
+= 16 * TAG_GRANULE
;
894 } while (i
< tag_bytes
);
898 if (likely(mem_tag
== ptr_tag
)) {
903 /* Locate the first nibble that differs. */
904 i
= ctz64(mem_tag
^ ptr_tag
) >> 4;
905 mte_check_fail(env
, desc
, align_ptr
+ i
* TAG_GRANULE
, ra
);
908 return useronly_clean_ptr(ptr
);