]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/tcg/mte_helper.c
softmmu: Restrict cpu_check_watchpoint / address_matches to TCG accel
[mirror_qemu.git] / target / arm / tcg / mte_helper.c
1 /*
2 * ARM v8.5-MemTag Operations
3 *
4 * Copyright (c) 2020 Linaro, Ltd.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/ram_addr.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "hw/core/tcg-cpu-ops.h"
29 #include "qapi/error.h"
30 #include "qemu/guest-random.h"
31
32
33 static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
34 {
35 if (exclude == 0xffff) {
36 return 0;
37 }
38 if (offset == 0) {
39 while (exclude & (1 << tag)) {
40 tag = (tag + 1) & 15;
41 }
42 } else {
43 do {
44 do {
45 tag = (tag + 1) & 15;
46 } while (exclude & (1 << tag));
47 } while (--offset > 0);
48 }
49 return tag;
50 }
51
52 /**
53 * allocation_tag_mem:
54 * @env: the cpu environment
55 * @ptr_mmu_idx: the addressing regime to use for the virtual address
56 * @ptr: the virtual address for which to look up tag memory
57 * @ptr_access: the access to use for the virtual address
58 * @ptr_size: the number of bytes in the normal memory access
59 * @tag_access: the access to use for the tag memory
60 * @tag_size: the number of bytes in the tag memory access
61 * @ra: the return address for exception handling
62 *
63 * Our tag memory is formatted as a sequence of little-endian nibbles.
64 * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
65 * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
66 * for the higher addr.
67 *
68 * Here, resolve the physical address from the virtual address, and return
69 * a pointer to the corresponding tag byte. Exit with exception if the
70 * virtual address is not accessible for @ptr_access.
71 *
72 * The @ptr_size and @tag_size values may not have an obvious relation
73 * due to the alignment of @ptr, and the number of tag checks required.
74 *
75 * If there is no tag storage corresponding to @ptr, return NULL.
76 */
77 static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
78 uint64_t ptr, MMUAccessType ptr_access,
79 int ptr_size, MMUAccessType tag_access,
80 int tag_size, uintptr_t ra)
81 {
82 #ifdef CONFIG_USER_ONLY
83 uint64_t clean_ptr = useronly_clean_ptr(ptr);
84 int flags = page_get_flags(clean_ptr);
85 uint8_t *tags;
86 uintptr_t index;
87
88 if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
89 cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
90 !(flags & PAGE_VALID), ra);
91 }
92
93 /* Require both MAP_ANON and PROT_MTE for the page. */
94 if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
95 return NULL;
96 }
97
98 tags = page_get_target_data(clean_ptr);
99
100 index = extract32(ptr, LOG2_TAG_GRANULE + 1,
101 TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
102 return tags + index;
103 #else
104 CPUTLBEntryFull *full;
105 MemTxAttrs attrs;
106 int in_page, flags;
107 hwaddr ptr_paddr, tag_paddr, xlat;
108 MemoryRegion *mr;
109 ARMASIdx tag_asi;
110 AddressSpace *tag_as;
111 void *host;
112
113 /*
114 * Probe the first byte of the virtual address. This raises an
115 * exception for inaccessible pages, and resolves the virtual address
116 * into the softmmu tlb.
117 *
118 * When RA == 0, this is for mte_probe. The page is expected to be
119 * valid. Indicate to probe_access_flags no-fault, then assert that
120 * we received a valid page.
121 */
122 flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
123 ra == 0, &host, &full, ra);
124 assert(!(flags & TLB_INVALID_MASK));
125
126 /* If the virtual page MemAttr != Tagged, access unchecked. */
127 if (full->pte_attrs != 0xf0) {
128 return NULL;
129 }
130
131 /*
132 * If not backed by host ram, there is no tag storage: access unchecked.
133 * This is probably a guest os bug though, so log it.
134 */
135 if (unlikely(flags & TLB_MMIO)) {
136 qemu_log_mask(LOG_GUEST_ERROR,
137 "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
138 "but is not backed by host ram\n", ptr);
139 return NULL;
140 }
141
142 /*
143 * Remember these values across the second lookup below,
144 * which may invalidate this pointer via tlb resize.
145 */
146 ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
147 attrs = full->attrs;
148 full = NULL;
149
150 /*
151 * The Normal memory access can extend to the next page. E.g. a single
152 * 8-byte access to the last byte of a page will check only the last
153 * tag on the first page.
154 * Any page access exception has priority over tag check exception.
155 */
156 in_page = -(ptr | TARGET_PAGE_MASK);
157 if (unlikely(ptr_size > in_page)) {
158 flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
159 ptr_mmu_idx, ra == 0, &host, &full, ra);
160 assert(!(flags & TLB_INVALID_MASK));
161 }
162
163 /* Any debug exception has priority over a tag check exception. */
164 if (unlikely(flags & TLB_WATCHPOINT)) {
165 int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
166 assert(ra != 0);
167 cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
168 }
169
170 /* Convert to the physical address in tag space. */
171 tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
172
173 /* Look up the address in tag space. */
174 tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
175 tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
176 mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
177 tag_access == MMU_DATA_STORE, attrs);
178
179 /*
180 * Note that @mr will never be NULL. If there is nothing in the address
181 * space at @tag_paddr, the translation will return the unallocated memory
182 * region. For our purposes, the result must be ram.
183 */
184 if (unlikely(!memory_region_is_ram(mr))) {
185 /* ??? Failure is a board configuration error. */
186 qemu_log_mask(LOG_UNIMP,
187 "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
188 "Normal Memory @ 0x%" HWADDR_PRIx "\n",
189 tag_paddr, ptr_paddr);
190 return NULL;
191 }
192
193 /*
194 * Ensure the tag memory is dirty on write, for migration.
195 * Tag memory can never contain code or display memory (vga).
196 */
197 if (tag_access == MMU_DATA_STORE) {
198 ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
199 cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
200 }
201
202 return memory_region_get_ram_ptr(mr) + xlat;
203 #endif
204 }
205
206 uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
207 {
208 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
209 int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
210 int start = extract32(env->cp15.rgsr_el1, 0, 4);
211 int seed = extract32(env->cp15.rgsr_el1, 8, 16);
212 int offset, i, rtag;
213
214 /*
215 * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
216 * deterministic algorithm. Except that with RRND==1 the kernel is
217 * not required to have set RGSR_EL1.SEED != 0, which is required for
218 * the deterministic algorithm to function. So we force a non-zero
219 * SEED for that case.
220 */
221 if (unlikely(seed == 0) && rrnd) {
222 do {
223 Error *err = NULL;
224 uint16_t two;
225
226 if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
227 /*
228 * Failed, for unknown reasons in the crypto subsystem.
229 * Best we can do is log the reason and use a constant seed.
230 */
231 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
232 error_get_pretty(err));
233 error_free(err);
234 two = 1;
235 }
236 seed = two;
237 } while (seed == 0);
238 }
239
240 /* RandomTag */
241 for (i = offset = 0; i < 4; ++i) {
242 /* NextRandomTagBit */
243 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
244 extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
245 seed = (top << 15) | (seed >> 1);
246 offset |= top << i;
247 }
248 rtag = choose_nonexcluded_tag(start, offset, exclude);
249 env->cp15.rgsr_el1 = rtag | (seed << 8);
250
251 return address_with_allocation_tag(rn, rtag);
252 }
253
254 uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
255 int32_t offset, uint32_t tag_offset)
256 {
257 int start_tag = allocation_tag_from_addr(ptr);
258 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
259 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
260
261 return address_with_allocation_tag(ptr + offset, rtag);
262 }
263
264 static int load_tag1(uint64_t ptr, uint8_t *mem)
265 {
266 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
267 return extract32(*mem, ofs, 4);
268 }
269
270 uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
271 {
272 int mmu_idx = cpu_mmu_index(env, false);
273 uint8_t *mem;
274 int rtag = 0;
275
276 /* Trap if accessing an invalid page. */
277 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
278 MMU_DATA_LOAD, 1, GETPC());
279
280 /* Load if page supports tags. */
281 if (mem) {
282 rtag = load_tag1(ptr, mem);
283 }
284
285 return address_with_allocation_tag(xt, rtag);
286 }
287
288 static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
289 {
290 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
291 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
292 cpu_mmu_index(env, false), ra);
293 g_assert_not_reached();
294 }
295 }
296
297 /* For use in a non-parallel context, store to the given nibble. */
298 static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
299 {
300 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
301 *mem = deposit32(*mem, ofs, 4, tag);
302 }
303
304 /* For use in a parallel context, atomically store to the given nibble. */
305 static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
306 {
307 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
308 uint8_t old = qatomic_read(mem);
309
310 while (1) {
311 uint8_t new = deposit32(old, ofs, 4, tag);
312 uint8_t cmp = qatomic_cmpxchg(mem, old, new);
313 if (likely(cmp == old)) {
314 return;
315 }
316 old = cmp;
317 }
318 }
319
320 typedef void stg_store1(uint64_t, uint8_t *, int);
321
322 static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
323 uintptr_t ra, stg_store1 store1)
324 {
325 int mmu_idx = cpu_mmu_index(env, false);
326 uint8_t *mem;
327
328 check_tag_aligned(env, ptr, ra);
329
330 /* Trap if accessing an invalid page. */
331 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
332 MMU_DATA_STORE, 1, ra);
333
334 /* Store if page supports tags. */
335 if (mem) {
336 store1(ptr, mem, allocation_tag_from_addr(xt));
337 }
338 }
339
340 void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
341 {
342 do_stg(env, ptr, xt, GETPC(), store_tag1);
343 }
344
345 void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
346 {
347 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
348 }
349
350 void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
351 {
352 int mmu_idx = cpu_mmu_index(env, false);
353 uintptr_t ra = GETPC();
354
355 check_tag_aligned(env, ptr, ra);
356 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
357 }
358
359 static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
360 uintptr_t ra, stg_store1 store1)
361 {
362 int mmu_idx = cpu_mmu_index(env, false);
363 int tag = allocation_tag_from_addr(xt);
364 uint8_t *mem1, *mem2;
365
366 check_tag_aligned(env, ptr, ra);
367
368 /*
369 * Trap if accessing an invalid page(s).
370 * This takes priority over !allocation_tag_access_enabled.
371 */
372 if (ptr & TAG_GRANULE) {
373 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
374 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
375 TAG_GRANULE, MMU_DATA_STORE, 1, ra);
376 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
377 MMU_DATA_STORE, TAG_GRANULE,
378 MMU_DATA_STORE, 1, ra);
379
380 /* Store if page(s) support tags. */
381 if (mem1) {
382 store1(TAG_GRANULE, mem1, tag);
383 }
384 if (mem2) {
385 store1(0, mem2, tag);
386 }
387 } else {
388 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
389 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
390 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
391 if (mem1) {
392 tag |= tag << 4;
393 qatomic_set(mem1, tag);
394 }
395 }
396 }
397
398 void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
399 {
400 do_st2g(env, ptr, xt, GETPC(), store_tag1);
401 }
402
403 void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
404 {
405 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
406 }
407
408 void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
409 {
410 int mmu_idx = cpu_mmu_index(env, false);
411 uintptr_t ra = GETPC();
412 int in_page = -(ptr | TARGET_PAGE_MASK);
413
414 check_tag_aligned(env, ptr, ra);
415
416 if (likely(in_page >= 2 * TAG_GRANULE)) {
417 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
418 } else {
419 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
420 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
421 }
422 }
423
424 #define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
425
426 uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
427 {
428 int mmu_idx = cpu_mmu_index(env, false);
429 uintptr_t ra = GETPC();
430 void *tag_mem;
431
432 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
433
434 /* Trap if accessing an invalid page. */
435 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
436 LDGM_STGM_SIZE, MMU_DATA_LOAD,
437 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
438
439 /* The tag is squashed to zero if the page does not support tags. */
440 if (!tag_mem) {
441 return 0;
442 }
443
444 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
445 /*
446 * We are loading 64-bits worth of tags. The ordering of elements
447 * within the word corresponds to a 64-bit little-endian operation.
448 */
449 return ldq_le_p(tag_mem);
450 }
451
452 void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
453 {
454 int mmu_idx = cpu_mmu_index(env, false);
455 uintptr_t ra = GETPC();
456 void *tag_mem;
457
458 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
459
460 /* Trap if accessing an invalid page. */
461 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
462 LDGM_STGM_SIZE, MMU_DATA_LOAD,
463 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
464
465 /*
466 * Tag store only happens if the page support tags,
467 * and if the OS has enabled access to the tags.
468 */
469 if (!tag_mem) {
470 return;
471 }
472
473 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
474 /*
475 * We are storing 64-bits worth of tags. The ordering of elements
476 * within the word corresponds to a 64-bit little-endian operation.
477 */
478 stq_le_p(tag_mem, val);
479 }
480
481 void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
482 {
483 uintptr_t ra = GETPC();
484 int mmu_idx = cpu_mmu_index(env, false);
485 int log2_dcz_bytes, log2_tag_bytes;
486 intptr_t dcz_bytes, tag_bytes;
487 uint8_t *mem;
488
489 /*
490 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
491 * i.e. 32 bytes, which is an unreasonably small dcz anyway,
492 * to make sure that we can access one complete tag byte here.
493 */
494 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
495 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
496 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
497 tag_bytes = (intptr_t)1 << log2_tag_bytes;
498 ptr &= -dcz_bytes;
499
500 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
501 MMU_DATA_STORE, tag_bytes, ra);
502 if (mem) {
503 int tag_pair = (val & 0xf) * 0x11;
504 memset(mem, tag_pair, tag_bytes);
505 }
506 }
507
508 static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
509 uint64_t dirty_ptr, uintptr_t ra)
510 {
511 int is_write, syn;
512
513 env->exception.vaddress = dirty_ptr;
514
515 is_write = FIELD_EX32(desc, MTEDESC, WRITE);
516 syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
517 0x11);
518 raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
519 g_assert_not_reached();
520 }
521
522 static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
523 uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
524 {
525 int select;
526
527 if (regime_has_2_ranges(arm_mmu_idx)) {
528 select = extract64(dirty_ptr, 55, 1);
529 } else {
530 select = 0;
531 }
532 env->cp15.tfsr_el[el] |= 1 << select;
533 #ifdef CONFIG_USER_ONLY
534 /*
535 * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
536 * which then sends a SIGSEGV when the thread is next scheduled.
537 * This cpu will return to the main loop at the end of the TB,
538 * which is rather sooner than "normal". But the alternative
539 * is waiting until the next syscall.
540 */
541 qemu_cpu_kick(env_cpu(env));
542 #endif
543 }
544
545 /* Record a tag check failure. */
546 static void mte_check_fail(CPUARMState *env, uint32_t desc,
547 uint64_t dirty_ptr, uintptr_t ra)
548 {
549 int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
550 ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
551 int el, reg_el, tcf;
552 uint64_t sctlr;
553
554 reg_el = regime_el(env, arm_mmu_idx);
555 sctlr = env->cp15.sctlr_el[reg_el];
556
557 switch (arm_mmu_idx) {
558 case ARMMMUIdx_E10_0:
559 case ARMMMUIdx_E20_0:
560 el = 0;
561 tcf = extract64(sctlr, 38, 2);
562 break;
563 default:
564 el = reg_el;
565 tcf = extract64(sctlr, 40, 2);
566 }
567
568 switch (tcf) {
569 case 1:
570 /* Tag check fail causes a synchronous exception. */
571 mte_sync_check_fail(env, desc, dirty_ptr, ra);
572 break;
573
574 case 0:
575 /*
576 * Tag check fail does not affect the PE.
577 * We eliminate this case by not setting MTE_ACTIVE
578 * in tb_flags, so that we never make this runtime call.
579 */
580 g_assert_not_reached();
581
582 case 2:
583 /* Tag check fail causes asynchronous flag set. */
584 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
585 break;
586
587 case 3:
588 /*
589 * Tag check fail causes asynchronous flag set for stores, or
590 * a synchronous exception for loads.
591 */
592 if (FIELD_EX32(desc, MTEDESC, WRITE)) {
593 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
594 } else {
595 mte_sync_check_fail(env, desc, dirty_ptr, ra);
596 }
597 break;
598 }
599 }
600
601 /**
602 * checkN:
603 * @tag: tag memory to test
604 * @odd: true to begin testing at tags at odd nibble
605 * @cmp: the tag to compare against
606 * @count: number of tags to test
607 *
608 * Return the number of successful tests.
609 * Thus a return value < @count indicates a failure.
610 *
611 * A note about sizes: count is expected to be small.
612 *
613 * The most common use will be LDP/STP of two integer registers,
614 * which means 16 bytes of memory touching at most 2 tags, but
615 * often the access is aligned and thus just 1 tag.
616 *
617 * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
618 * touching at most 5 tags. SVE LDR/STR (vector) with the default
619 * vector length is also 64 bytes; the maximum architectural length
620 * is 256 bytes touching at most 9 tags.
621 *
622 * The loop below uses 7 logical operations and 1 memory operation
623 * per tag pair. An implementation that loads an aligned word and
624 * uses masking to ignore adjacent tags requires 18 logical operations
625 * and thus does not begin to pay off until 6 tags.
626 * Which, according to the survey above, is unlikely to be common.
627 */
628 static int checkN(uint8_t *mem, int odd, int cmp, int count)
629 {
630 int n = 0, diff;
631
632 /* Replicate the test tag and compare. */
633 cmp *= 0x11;
634 diff = *mem++ ^ cmp;
635
636 if (odd) {
637 goto start_odd;
638 }
639
640 while (1) {
641 /* Test even tag. */
642 if (unlikely((diff) & 0x0f)) {
643 break;
644 }
645 if (++n == count) {
646 break;
647 }
648
649 start_odd:
650 /* Test odd tag. */
651 if (unlikely((diff) & 0xf0)) {
652 break;
653 }
654 if (++n == count) {
655 break;
656 }
657
658 diff = *mem++ ^ cmp;
659 }
660 return n;
661 }
662
663 /**
664 * mte_probe_int() - helper for mte_probe and mte_check
665 * @env: CPU environment
666 * @desc: MTEDESC descriptor
667 * @ptr: virtual address of the base of the access
668 * @fault: return virtual address of the first check failure
669 *
670 * Internal routine for both mte_probe and mte_check.
671 * Return zero on failure, filling in *fault.
672 * Return negative on trivial success for tbi disabled.
673 * Return positive on success with tbi enabled.
674 */
675 static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
676 uintptr_t ra, uint64_t *fault)
677 {
678 int mmu_idx, ptr_tag, bit55;
679 uint64_t ptr_last, prev_page, next_page;
680 uint64_t tag_first, tag_last;
681 uint64_t tag_byte_first, tag_byte_last;
682 uint32_t sizem1, tag_count, tag_size, n, c;
683 uint8_t *mem1, *mem2;
684 MMUAccessType type;
685
686 bit55 = extract64(ptr, 55, 1);
687 *fault = ptr;
688
689 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
690 if (unlikely(!tbi_check(desc, bit55))) {
691 return -1;
692 }
693
694 ptr_tag = allocation_tag_from_addr(ptr);
695
696 if (tcma_check(desc, bit55, ptr_tag)) {
697 return 1;
698 }
699
700 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
701 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
702 sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
703
704 /* Find the addr of the end of the access */
705 ptr_last = ptr + sizem1;
706
707 /* Round the bounds to the tag granule, and compute the number of tags. */
708 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
709 tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
710 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
711
712 /* Round the bounds to twice the tag granule, and compute the bytes. */
713 tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
714 tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
715
716 /* Locate the page boundaries. */
717 prev_page = ptr & TARGET_PAGE_MASK;
718 next_page = prev_page + TARGET_PAGE_SIZE;
719
720 if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
721 /* Memory access stays on one page. */
722 tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
723 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
724 MMU_DATA_LOAD, tag_size, ra);
725 if (!mem1) {
726 return 1;
727 }
728 /* Perform all of the comparisons. */
729 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
730 } else {
731 /* Memory access crosses to next page. */
732 tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
733 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
734 MMU_DATA_LOAD, tag_size, ra);
735
736 tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
737 mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
738 ptr_last - next_page + 1,
739 MMU_DATA_LOAD, tag_size, ra);
740
741 /*
742 * Perform all of the comparisons.
743 * Note the possible but unlikely case of the operation spanning
744 * two pages that do not both have tagging enabled.
745 */
746 n = c = (next_page - tag_first) / TAG_GRANULE;
747 if (mem1) {
748 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
749 }
750 if (n == c) {
751 if (!mem2) {
752 return 1;
753 }
754 n += checkN(mem2, 0, ptr_tag, tag_count - c);
755 }
756 }
757
758 if (likely(n == tag_count)) {
759 return 1;
760 }
761
762 /*
763 * If we failed, we know which granule. For the first granule, the
764 * failure address is @ptr, the first byte accessed. Otherwise the
765 * failure address is the first byte of the nth granule.
766 */
767 if (n > 0) {
768 *fault = tag_first + n * TAG_GRANULE;
769 }
770 return 0;
771 }
772
773 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
774 {
775 uint64_t fault;
776 int ret = mte_probe_int(env, desc, ptr, ra, &fault);
777
778 if (unlikely(ret == 0)) {
779 mte_check_fail(env, desc, fault, ra);
780 } else if (ret < 0) {
781 return ptr;
782 }
783 return useronly_clean_ptr(ptr);
784 }
785
786 uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
787 {
788 return mte_check(env, desc, ptr, GETPC());
789 }
790
791 /*
792 * No-fault version of mte_check, to be used by SVE for MemSingleNF.
793 * Returns false if the access is Checked and the check failed. This
794 * is only intended to probe the tag -- the validity of the page must
795 * be checked beforehand.
796 */
797 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
798 {
799 uint64_t fault;
800 int ret = mte_probe_int(env, desc, ptr, 0, &fault);
801
802 return ret != 0;
803 }
804
805 /*
806 * Perform an MTE checked access for DC_ZVA.
807 */
808 uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
809 {
810 uintptr_t ra = GETPC();
811 int log2_dcz_bytes, log2_tag_bytes;
812 int mmu_idx, bit55;
813 intptr_t dcz_bytes, tag_bytes, i;
814 void *mem;
815 uint64_t ptr_tag, mem_tag, align_ptr;
816
817 bit55 = extract64(ptr, 55, 1);
818
819 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
820 if (unlikely(!tbi_check(desc, bit55))) {
821 return ptr;
822 }
823
824 ptr_tag = allocation_tag_from_addr(ptr);
825
826 if (tcma_check(desc, bit55, ptr_tag)) {
827 goto done;
828 }
829
830 /*
831 * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
832 * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
833 * sure that we can access one complete tag byte here.
834 */
835 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
836 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
837 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
838 tag_bytes = (intptr_t)1 << log2_tag_bytes;
839 align_ptr = ptr & -dcz_bytes;
840
841 /*
842 * Trap if accessing an invalid page. DC_ZVA requires that we supply
843 * the original pointer for an invalid page. But watchpoints require
844 * that we probe the actual space. So do both.
845 */
846 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
847 (void) probe_write(env, ptr, 1, mmu_idx, ra);
848 mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
849 dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
850 if (!mem) {
851 goto done;
852 }
853
854 /*
855 * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
856 * it is quite easy to perform all of the comparisons at once without
857 * any extra masking.
858 *
859 * The most common zva block size is 64; some of the thunderx cpus use
860 * a block size of 128. For user-only, aarch64_max_initfn will set the
861 * block size to 512. Fill out the other cases for future-proofing.
862 *
863 * In order to be able to find the first miscompare later, we want the
864 * tag bytes to be in little-endian order.
865 */
866 switch (log2_tag_bytes) {
867 case 0: /* zva_blocksize 32 */
868 mem_tag = *(uint8_t *)mem;
869 ptr_tag *= 0x11u;
870 break;
871 case 1: /* zva_blocksize 64 */
872 mem_tag = cpu_to_le16(*(uint16_t *)mem);
873 ptr_tag *= 0x1111u;
874 break;
875 case 2: /* zva_blocksize 128 */
876 mem_tag = cpu_to_le32(*(uint32_t *)mem);
877 ptr_tag *= 0x11111111u;
878 break;
879 case 3: /* zva_blocksize 256 */
880 mem_tag = cpu_to_le64(*(uint64_t *)mem);
881 ptr_tag *= 0x1111111111111111ull;
882 break;
883
884 default: /* zva_blocksize 512, 1024, 2048 */
885 ptr_tag *= 0x1111111111111111ull;
886 i = 0;
887 do {
888 mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
889 if (unlikely(mem_tag != ptr_tag)) {
890 goto fail;
891 }
892 i += 8;
893 align_ptr += 16 * TAG_GRANULE;
894 } while (i < tag_bytes);
895 goto done;
896 }
897
898 if (likely(mem_tag == ptr_tag)) {
899 goto done;
900 }
901
902 fail:
903 /* Locate the first nibble that differs. */
904 i = ctz64(mem_tag ^ ptr_tag) >> 4;
905 mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
906
907 done:
908 return useronly_clean_ptr(ptr);
909 }