]>
Commit | Line | Data |
---|---|---|
da54941f RH |
1 | /* |
2 | * ARM v8.5-MemTag Operations | |
3 | * | |
4 | * Copyright (c) 2020 Linaro, Ltd. | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2.1 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
cd617484 | 21 | #include "qemu/log.h" |
da54941f RH |
22 | #include "cpu.h" |
23 | #include "internals.h" | |
24 | #include "exec/exec-all.h" | |
e4d5bf4f | 25 | #include "exec/ram_addr.h" |
da54941f RH |
26 | #include "exec/cpu_ldst.h" |
27 | #include "exec/helper-proto.h" | |
6eece7f5 | 28 | #include "hw/core/tcg-cpu-ops.h" |
d4f6dda1 RH |
29 | #include "qapi/error.h" |
30 | #include "qemu/guest-random.h" | |
da54941f RH |
31 | |
32 | ||
33 | static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) | |
34 | { | |
35 | if (exclude == 0xffff) { | |
36 | return 0; | |
37 | } | |
38 | if (offset == 0) { | |
39 | while (exclude & (1 << tag)) { | |
40 | tag = (tag + 1) & 15; | |
41 | } | |
42 | } else { | |
43 | do { | |
44 | do { | |
45 | tag = (tag + 1) & 15; | |
46 | } while (exclude & (1 << tag)); | |
47 | } while (--offset > 0); | |
48 | } | |
49 | return tag; | |
50 | } | |
51 | ||
c15294c1 RH |
52 | /** |
53 | * allocation_tag_mem: | |
54 | * @env: the cpu environment | |
55 | * @ptr_mmu_idx: the addressing regime to use for the virtual address | |
56 | * @ptr: the virtual address for which to look up tag memory | |
57 | * @ptr_access: the access to use for the virtual address | |
58 | * @ptr_size: the number of bytes in the normal memory access | |
59 | * @tag_access: the access to use for the tag memory | |
60 | * @tag_size: the number of bytes in the tag memory access | |
61 | * @ra: the return address for exception handling | |
62 | * | |
63 | * Our tag memory is formatted as a sequence of little-endian nibbles. | |
64 | * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two | |
65 | * tags, with the tag at [3:0] for the lower addr and the tag at [7:4] | |
66 | * for the higher addr. | |
67 | * | |
68 | * Here, resolve the physical address from the virtual address, and return | |
69 | * a pointer to the corresponding tag byte. Exit with exception if the | |
70 | * virtual address is not accessible for @ptr_access. | |
71 | * | |
72 | * The @ptr_size and @tag_size values may not have an obvious relation | |
73 | * due to the alignment of @ptr, and the number of tag checks required. | |
74 | * | |
75 | * If there is no tag storage corresponding to @ptr, return NULL. | |
76 | */ | |
77 | static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, | |
78 | uint64_t ptr, MMUAccessType ptr_access, | |
79 | int ptr_size, MMUAccessType tag_access, | |
80 | int tag_size, uintptr_t ra) | |
81 | { | |
e4d5bf4f | 82 | #ifdef CONFIG_USER_ONLY |
a11d3830 RH |
83 | uint64_t clean_ptr = useronly_clean_ptr(ptr); |
84 | int flags = page_get_flags(clean_ptr); | |
85 | uint8_t *tags; | |
86 | uintptr_t index; | |
87 | ||
ff38bca7 | 88 | if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) { |
5e98763c RH |
89 | cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access, |
90 | !(flags & PAGE_VALID), ra); | |
a11d3830 RH |
91 | } |
92 | ||
93 | /* Require both MAP_ANON and PROT_MTE for the page. */ | |
94 | if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) { | |
95 | return NULL; | |
96 | } | |
97 | ||
98 | tags = page_get_target_data(clean_ptr); | |
a11d3830 RH |
99 | |
100 | index = extract32(ptr, LOG2_TAG_GRANULE + 1, | |
101 | TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1); | |
102 | return tags + index; | |
e4d5bf4f | 103 | #else |
25d3ec58 | 104 | CPUTLBEntryFull *full; |
b8967ddf | 105 | MemTxAttrs attrs; |
e4d5bf4f | 106 | int in_page, flags; |
e4d5bf4f RH |
107 | hwaddr ptr_paddr, tag_paddr, xlat; |
108 | MemoryRegion *mr; | |
109 | ARMASIdx tag_asi; | |
110 | AddressSpace *tag_as; | |
111 | void *host; | |
112 | ||
113 | /* | |
114 | * Probe the first byte of the virtual address. This raises an | |
115 | * exception for inaccessible pages, and resolves the virtual address | |
116 | * into the softmmu tlb. | |
117 | * | |
d304d280 | 118 | * When RA == 0, this is for mte_probe. The page is expected to be |
e4d5bf4f RH |
119 | * valid. Indicate to probe_access_flags no-fault, then assert that |
120 | * we received a valid page. | |
121 | */ | |
d507e6c5 | 122 | flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx, |
b8967ddf | 123 | ra == 0, &host, &full, ra); |
e4d5bf4f RH |
124 | assert(!(flags & TLB_INVALID_MASK)); |
125 | ||
e4d5bf4f | 126 | /* If the virtual page MemAttr != Tagged, access unchecked. */ |
b8967ddf | 127 | if (full->pte_attrs != 0xf0) { |
e4d5bf4f RH |
128 | return NULL; |
129 | } | |
130 | ||
131 | /* | |
132 | * If not backed by host ram, there is no tag storage: access unchecked. | |
133 | * This is probably a guest os bug though, so log it. | |
134 | */ | |
135 | if (unlikely(flags & TLB_MMIO)) { | |
136 | qemu_log_mask(LOG_GUEST_ERROR, | |
137 | "Page @ 0x%" PRIx64 " indicates Tagged Normal memory " | |
138 | "but is not backed by host ram\n", ptr); | |
139 | return NULL; | |
140 | } | |
141 | ||
b8967ddf RH |
142 | /* |
143 | * Remember these values across the second lookup below, | |
144 | * which may invalidate this pointer via tlb resize. | |
145 | */ | |
28fb921f | 146 | ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK); |
b8967ddf RH |
147 | attrs = full->attrs; |
148 | full = NULL; | |
149 | ||
e4d5bf4f RH |
150 | /* |
151 | * The Normal memory access can extend to the next page. E.g. a single | |
152 | * 8-byte access to the last byte of a page will check only the last | |
153 | * tag on the first page. | |
154 | * Any page access exception has priority over tag check exception. | |
155 | */ | |
156 | in_page = -(ptr | TARGET_PAGE_MASK); | |
157 | if (unlikely(ptr_size > in_page)) { | |
d507e6c5 | 158 | flags |= probe_access_full(env, ptr + in_page, 0, ptr_access, |
b8967ddf | 159 | ptr_mmu_idx, ra == 0, &host, &full, ra); |
e4d5bf4f RH |
160 | assert(!(flags & TLB_INVALID_MASK)); |
161 | } | |
162 | ||
163 | /* Any debug exception has priority over a tag check exception. */ | |
164 | if (unlikely(flags & TLB_WATCHPOINT)) { | |
165 | int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE; | |
166 | assert(ra != 0); | |
b8967ddf | 167 | cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra); |
e4d5bf4f RH |
168 | } |
169 | ||
e4d5bf4f RH |
170 | /* Convert to the physical address in tag space. */ |
171 | tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1); | |
172 | ||
173 | /* Look up the address in tag space. */ | |
b8967ddf | 174 | tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS; |
e4d5bf4f RH |
175 | tag_as = cpu_get_address_space(env_cpu(env), tag_asi); |
176 | mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL, | |
b8967ddf | 177 | tag_access == MMU_DATA_STORE, attrs); |
e4d5bf4f RH |
178 | |
179 | /* | |
180 | * Note that @mr will never be NULL. If there is nothing in the address | |
181 | * space at @tag_paddr, the translation will return the unallocated memory | |
182 | * region. For our purposes, the result must be ram. | |
183 | */ | |
184 | if (unlikely(!memory_region_is_ram(mr))) { | |
185 | /* ??? Failure is a board configuration error. */ | |
186 | qemu_log_mask(LOG_UNIMP, | |
187 | "Tag Memory @ 0x%" HWADDR_PRIx " not found for " | |
188 | "Normal Memory @ 0x%" HWADDR_PRIx "\n", | |
189 | tag_paddr, ptr_paddr); | |
190 | return NULL; | |
191 | } | |
192 | ||
193 | /* | |
194 | * Ensure the tag memory is dirty on write, for migration. | |
195 | * Tag memory can never contain code or display memory (vga). | |
196 | */ | |
197 | if (tag_access == MMU_DATA_STORE) { | |
198 | ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat; | |
199 | cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION); | |
200 | } | |
201 | ||
202 | return memory_region_get_ram_ptr(mr) + xlat; | |
203 | #endif | |
c15294c1 RH |
204 | } |
205 | ||
da54941f RH |
206 | uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm) |
207 | { | |
da54941f | 208 | uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16); |
d4f6dda1 | 209 | int rrnd = extract32(env->cp15.gcr_el1, 16, 1); |
da54941f RH |
210 | int start = extract32(env->cp15.rgsr_el1, 0, 4); |
211 | int seed = extract32(env->cp15.rgsr_el1, 8, 16); | |
d4f6dda1 RH |
212 | int offset, i, rtag; |
213 | ||
214 | /* | |
215 | * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the | |
216 | * deterministic algorithm. Except that with RRND==1 the kernel is | |
217 | * not required to have set RGSR_EL1.SEED != 0, which is required for | |
218 | * the deterministic algorithm to function. So we force a non-zero | |
219 | * SEED for that case. | |
220 | */ | |
221 | if (unlikely(seed == 0) && rrnd) { | |
222 | do { | |
223 | Error *err = NULL; | |
224 | uint16_t two; | |
225 | ||
226 | if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) { | |
227 | /* | |
228 | * Failed, for unknown reasons in the crypto subsystem. | |
229 | * Best we can do is log the reason and use a constant seed. | |
230 | */ | |
231 | qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n", | |
232 | error_get_pretty(err)); | |
233 | error_free(err); | |
234 | two = 1; | |
235 | } | |
236 | seed = two; | |
237 | } while (seed == 0); | |
238 | } | |
da54941f RH |
239 | |
240 | /* RandomTag */ | |
241 | for (i = offset = 0; i < 4; ++i) { | |
242 | /* NextRandomTagBit */ | |
243 | int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^ | |
244 | extract32(seed, 2, 1) ^ extract32(seed, 0, 1)); | |
245 | seed = (top << 15) | (seed >> 1); | |
246 | offset |= top << i; | |
247 | } | |
248 | rtag = choose_nonexcluded_tag(start, offset, exclude); | |
249 | env->cp15.rgsr_el1 = rtag | (seed << 8); | |
250 | ||
251 | return address_with_allocation_tag(rn, rtag); | |
252 | } | |
efbc78ad RH |
253 | |
254 | uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr, | |
255 | int32_t offset, uint32_t tag_offset) | |
256 | { | |
257 | int start_tag = allocation_tag_from_addr(ptr); | |
258 | uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16); | |
259 | int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude); | |
260 | ||
261 | return address_with_allocation_tag(ptr + offset, rtag); | |
262 | } | |
c15294c1 RH |
263 | |
264 | static int load_tag1(uint64_t ptr, uint8_t *mem) | |
265 | { | |
266 | int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; | |
267 | return extract32(*mem, ofs, 4); | |
268 | } | |
269 | ||
270 | uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt) | |
271 | { | |
272 | int mmu_idx = cpu_mmu_index(env, false); | |
273 | uint8_t *mem; | |
274 | int rtag = 0; | |
275 | ||
276 | /* Trap if accessing an invalid page. */ | |
277 | mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1, | |
278 | MMU_DATA_LOAD, 1, GETPC()); | |
279 | ||
280 | /* Load if page supports tags. */ | |
281 | if (mem) { | |
282 | rtag = load_tag1(ptr, mem); | |
283 | } | |
284 | ||
285 | return address_with_allocation_tag(xt, rtag); | |
286 | } | |
287 | ||
288 | static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra) | |
289 | { | |
290 | if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) { | |
291 | arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE, | |
292 | cpu_mmu_index(env, false), ra); | |
293 | g_assert_not_reached(); | |
294 | } | |
295 | } | |
296 | ||
297 | /* For use in a non-parallel context, store to the given nibble. */ | |
298 | static void store_tag1(uint64_t ptr, uint8_t *mem, int tag) | |
299 | { | |
300 | int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; | |
301 | *mem = deposit32(*mem, ofs, 4, tag); | |
302 | } | |
303 | ||
304 | /* For use in a parallel context, atomically store to the given nibble. */ | |
305 | static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag) | |
306 | { | |
307 | int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; | |
d73415a3 | 308 | uint8_t old = qatomic_read(mem); |
c15294c1 RH |
309 | |
310 | while (1) { | |
311 | uint8_t new = deposit32(old, ofs, 4, tag); | |
d73415a3 | 312 | uint8_t cmp = qatomic_cmpxchg(mem, old, new); |
c15294c1 RH |
313 | if (likely(cmp == old)) { |
314 | return; | |
315 | } | |
316 | old = cmp; | |
317 | } | |
318 | } | |
319 | ||
320 | typedef void stg_store1(uint64_t, uint8_t *, int); | |
321 | ||
322 | static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt, | |
323 | uintptr_t ra, stg_store1 store1) | |
324 | { | |
325 | int mmu_idx = cpu_mmu_index(env, false); | |
326 | uint8_t *mem; | |
327 | ||
328 | check_tag_aligned(env, ptr, ra); | |
329 | ||
330 | /* Trap if accessing an invalid page. */ | |
331 | mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE, | |
332 | MMU_DATA_STORE, 1, ra); | |
333 | ||
334 | /* Store if page supports tags. */ | |
335 | if (mem) { | |
336 | store1(ptr, mem, allocation_tag_from_addr(xt)); | |
337 | } | |
338 | } | |
339 | ||
340 | void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt) | |
341 | { | |
342 | do_stg(env, ptr, xt, GETPC(), store_tag1); | |
343 | } | |
344 | ||
345 | void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) | |
346 | { | |
347 | do_stg(env, ptr, xt, GETPC(), store_tag1_parallel); | |
348 | } | |
349 | ||
350 | void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr) | |
351 | { | |
352 | int mmu_idx = cpu_mmu_index(env, false); | |
353 | uintptr_t ra = GETPC(); | |
354 | ||
355 | check_tag_aligned(env, ptr, ra); | |
356 | probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra); | |
357 | } | |
358 | ||
359 | static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, | |
360 | uintptr_t ra, stg_store1 store1) | |
361 | { | |
362 | int mmu_idx = cpu_mmu_index(env, false); | |
363 | int tag = allocation_tag_from_addr(xt); | |
364 | uint8_t *mem1, *mem2; | |
365 | ||
366 | check_tag_aligned(env, ptr, ra); | |
367 | ||
368 | /* | |
369 | * Trap if accessing an invalid page(s). | |
370 | * This takes priority over !allocation_tag_access_enabled. | |
371 | */ | |
372 | if (ptr & TAG_GRANULE) { | |
373 | /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */ | |
374 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, | |
375 | TAG_GRANULE, MMU_DATA_STORE, 1, ra); | |
376 | mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE, | |
377 | MMU_DATA_STORE, TAG_GRANULE, | |
378 | MMU_DATA_STORE, 1, ra); | |
379 | ||
380 | /* Store if page(s) support tags. */ | |
381 | if (mem1) { | |
382 | store1(TAG_GRANULE, mem1, tag); | |
383 | } | |
384 | if (mem2) { | |
385 | store1(0, mem2, tag); | |
386 | } | |
387 | } else { | |
388 | /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */ | |
389 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, | |
390 | 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra); | |
391 | if (mem1) { | |
392 | tag |= tag << 4; | |
d73415a3 | 393 | qatomic_set(mem1, tag); |
c15294c1 RH |
394 | } |
395 | } | |
396 | } | |
397 | ||
398 | void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt) | |
399 | { | |
400 | do_st2g(env, ptr, xt, GETPC(), store_tag1); | |
401 | } | |
402 | ||
403 | void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) | |
404 | { | |
405 | do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel); | |
406 | } | |
407 | ||
408 | void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr) | |
409 | { | |
410 | int mmu_idx = cpu_mmu_index(env, false); | |
411 | uintptr_t ra = GETPC(); | |
412 | int in_page = -(ptr | TARGET_PAGE_MASK); | |
413 | ||
414 | check_tag_aligned(env, ptr, ra); | |
415 | ||
416 | if (likely(in_page >= 2 * TAG_GRANULE)) { | |
417 | probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra); | |
418 | } else { | |
419 | probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra); | |
420 | probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra); | |
421 | } | |
422 | } | |
5f716a82 | 423 | |
5f716a82 RH |
424 | uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) |
425 | { | |
426 | int mmu_idx = cpu_mmu_index(env, false); | |
427 | uintptr_t ra = GETPC(); | |
851ec6eb RH |
428 | int gm_bs = env_archcpu(env)->gm_blocksize; |
429 | int gm_bs_bytes = 4 << gm_bs; | |
5f716a82 | 430 | void *tag_mem; |
7134cb07 RH |
431 | uint64_t ret; |
432 | int shift; | |
5f716a82 | 433 | |
851ec6eb | 434 | ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes); |
5f716a82 RH |
435 | |
436 | /* Trap if accessing an invalid page. */ | |
437 | tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, | |
851ec6eb RH |
438 | gm_bs_bytes, MMU_DATA_LOAD, |
439 | gm_bs_bytes / (2 * TAG_GRANULE), ra); | |
5f716a82 RH |
440 | |
441 | /* The tag is squashed to zero if the page does not support tags. */ | |
442 | if (!tag_mem) { | |
443 | return 0; | |
444 | } | |
445 | ||
5f716a82 | 446 | /* |
851ec6eb | 447 | * The ordering of elements within the word corresponds to |
7134cb07 RH |
448 | * a little-endian operation. Computation of shift comes from |
449 | * | |
450 | * index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE> | |
451 | * data<index*4+3:index*4> = tag | |
452 | * | |
453 | * Because of the alignment of ptr above, BS=6 has shift=0. | |
454 | * All memory operations are aligned. Defer support for BS=2, | |
455 | * requiring insertion or extraction of a nibble, until we | |
456 | * support a cpu that requires it. | |
5f716a82 | 457 | */ |
851ec6eb | 458 | switch (gm_bs) { |
7134cb07 RH |
459 | case 3: |
460 | /* 32 bytes -> 2 tags -> 8 result bits */ | |
461 | ret = *(uint8_t *)tag_mem; | |
462 | break; | |
463 | case 4: | |
464 | /* 64 bytes -> 4 tags -> 16 result bits */ | |
465 | ret = cpu_to_le16(*(uint16_t *)tag_mem); | |
466 | break; | |
467 | case 5: | |
468 | /* 128 bytes -> 8 tags -> 32 result bits */ | |
469 | ret = cpu_to_le32(*(uint32_t *)tag_mem); | |
470 | break; | |
851ec6eb RH |
471 | case 6: |
472 | /* 256 bytes -> 16 tags -> 64 result bits */ | |
7134cb07 | 473 | return cpu_to_le64(*(uint64_t *)tag_mem); |
851ec6eb | 474 | default: |
7134cb07 RH |
475 | /* |
476 | * CPU configured with unsupported/invalid gm blocksize. | |
477 | * This is detected early in arm_cpu_realizefn. | |
478 | */ | |
851ec6eb RH |
479 | g_assert_not_reached(); |
480 | } | |
7134cb07 RH |
481 | shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4; |
482 | return ret << shift; | |
5f716a82 RH |
483 | } |
484 | ||
485 | void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) | |
486 | { | |
487 | int mmu_idx = cpu_mmu_index(env, false); | |
488 | uintptr_t ra = GETPC(); | |
851ec6eb RH |
489 | int gm_bs = env_archcpu(env)->gm_blocksize; |
490 | int gm_bs_bytes = 4 << gm_bs; | |
5f716a82 | 491 | void *tag_mem; |
7134cb07 | 492 | int shift; |
5f716a82 | 493 | |
851ec6eb | 494 | ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes); |
5f716a82 RH |
495 | |
496 | /* Trap if accessing an invalid page. */ | |
497 | tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, | |
851ec6eb RH |
498 | gm_bs_bytes, MMU_DATA_LOAD, |
499 | gm_bs_bytes / (2 * TAG_GRANULE), ra); | |
5f716a82 RH |
500 | |
501 | /* | |
502 | * Tag store only happens if the page support tags, | |
503 | * and if the OS has enabled access to the tags. | |
504 | */ | |
505 | if (!tag_mem) { | |
506 | return; | |
507 | } | |
508 | ||
7134cb07 RH |
509 | /* See LDGM for comments on BS and on shift. */ |
510 | shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4; | |
511 | val >>= shift; | |
851ec6eb | 512 | switch (gm_bs) { |
7134cb07 RH |
513 | case 3: |
514 | /* 32 bytes -> 2 tags -> 8 result bits */ | |
515 | *(uint8_t *)tag_mem = val; | |
516 | break; | |
517 | case 4: | |
518 | /* 64 bytes -> 4 tags -> 16 result bits */ | |
519 | *(uint16_t *)tag_mem = cpu_to_le16(val); | |
520 | break; | |
521 | case 5: | |
522 | /* 128 bytes -> 8 tags -> 32 result bits */ | |
523 | *(uint32_t *)tag_mem = cpu_to_le32(val); | |
524 | break; | |
851ec6eb | 525 | case 6: |
7134cb07 RH |
526 | /* 256 bytes -> 16 tags -> 64 result bits */ |
527 | *(uint64_t *)tag_mem = cpu_to_le64(val); | |
851ec6eb RH |
528 | break; |
529 | default: | |
530 | /* cpu configured with unsupported gm blocksize. */ | |
531 | g_assert_not_reached(); | |
532 | } | |
5f716a82 RH |
533 | } |
534 | ||
535 | void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val) | |
536 | { | |
537 | uintptr_t ra = GETPC(); | |
538 | int mmu_idx = cpu_mmu_index(env, false); | |
539 | int log2_dcz_bytes, log2_tag_bytes; | |
540 | intptr_t dcz_bytes, tag_bytes; | |
541 | uint8_t *mem; | |
542 | ||
543 | /* | |
544 | * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1, | |
545 | * i.e. 32 bytes, which is an unreasonably small dcz anyway, | |
546 | * to make sure that we can access one complete tag byte here. | |
547 | */ | |
548 | log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2; | |
549 | log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1); | |
550 | dcz_bytes = (intptr_t)1 << log2_dcz_bytes; | |
551 | tag_bytes = (intptr_t)1 << log2_tag_bytes; | |
552 | ptr &= -dcz_bytes; | |
553 | ||
554 | mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes, | |
555 | MMU_DATA_STORE, tag_bytes, ra); | |
556 | if (mem) { | |
557 | int tag_pair = (val & 0xf) * 0x11; | |
558 | memset(mem, tag_pair, tag_bytes); | |
559 | } | |
560 | } | |
0a405be2 | 561 | |
86f0d4c7 PC |
562 | static void mte_sync_check_fail(CPUARMState *env, uint32_t desc, |
563 | uint64_t dirty_ptr, uintptr_t ra) | |
564 | { | |
565 | int is_write, syn; | |
566 | ||
567 | env->exception.vaddress = dirty_ptr; | |
568 | ||
569 | is_write = FIELD_EX32(desc, MTEDESC, WRITE); | |
570 | syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write, | |
571 | 0x11); | |
572 | raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra); | |
573 | g_assert_not_reached(); | |
574 | } | |
575 | ||
576 | static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr, | |
577 | uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el) | |
578 | { | |
579 | int select; | |
580 | ||
581 | if (regime_has_2_ranges(arm_mmu_idx)) { | |
582 | select = extract64(dirty_ptr, 55, 1); | |
583 | } else { | |
584 | select = 0; | |
585 | } | |
586 | env->cp15.tfsr_el[el] |= 1 << select; | |
587 | #ifdef CONFIG_USER_ONLY | |
588 | /* | |
589 | * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT, | |
590 | * which then sends a SIGSEGV when the thread is next scheduled. | |
591 | * This cpu will return to the main loop at the end of the TB, | |
592 | * which is rather sooner than "normal". But the alternative | |
593 | * is waiting until the next syscall. | |
594 | */ | |
595 | qemu_cpu_kick(env_cpu(env)); | |
596 | #endif | |
597 | } | |
598 | ||
2e34ff45 | 599 | /* Record a tag check failure. */ |
dbf8c321 | 600 | static void mte_check_fail(CPUARMState *env, uint32_t desc, |
2e34ff45 RH |
601 | uint64_t dirty_ptr, uintptr_t ra) |
602 | { | |
dbf8c321 | 603 | int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); |
2e34ff45 | 604 | ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx); |
86f0d4c7 | 605 | int el, reg_el, tcf; |
2e34ff45 RH |
606 | uint64_t sctlr; |
607 | ||
608 | reg_el = regime_el(env, arm_mmu_idx); | |
609 | sctlr = env->cp15.sctlr_el[reg_el]; | |
610 | ||
2d928adf PC |
611 | switch (arm_mmu_idx) { |
612 | case ARMMMUIdx_E10_0: | |
613 | case ARMMMUIdx_E20_0: | |
614 | el = 0; | |
2e34ff45 | 615 | tcf = extract64(sctlr, 38, 2); |
2d928adf PC |
616 | break; |
617 | default: | |
618 | el = reg_el; | |
2e34ff45 RH |
619 | tcf = extract64(sctlr, 40, 2); |
620 | } | |
621 | ||
622 | switch (tcf) { | |
623 | case 1: | |
5bf100c3 | 624 | /* Tag check fail causes a synchronous exception. */ |
86f0d4c7 PC |
625 | mte_sync_check_fail(env, desc, dirty_ptr, ra); |
626 | break; | |
2e34ff45 RH |
627 | |
628 | case 0: | |
629 | /* | |
630 | * Tag check fail does not affect the PE. | |
631 | * We eliminate this case by not setting MTE_ACTIVE | |
632 | * in tb_flags, so that we never make this runtime call. | |
633 | */ | |
634 | g_assert_not_reached(); | |
635 | ||
636 | case 2: | |
637 | /* Tag check fail causes asynchronous flag set. */ | |
86f0d4c7 | 638 | mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el); |
2e34ff45 RH |
639 | break; |
640 | ||
86f0d4c7 PC |
641 | case 3: |
642 | /* | |
643 | * Tag check fail causes asynchronous flag set for stores, or | |
644 | * a synchronous exception for loads. | |
645 | */ | |
646 | if (FIELD_EX32(desc, MTEDESC, WRITE)) { | |
647 | mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el); | |
648 | } else { | |
649 | mte_sync_check_fail(env, desc, dirty_ptr, ra); | |
650 | } | |
2e34ff45 RH |
651 | break; |
652 | } | |
653 | } | |
654 | ||
5add8248 RH |
655 | /** |
656 | * checkN: | |
657 | * @tag: tag memory to test | |
658 | * @odd: true to begin testing at tags at odd nibble | |
659 | * @cmp: the tag to compare against | |
660 | * @count: number of tags to test | |
661 | * | |
662 | * Return the number of successful tests. | |
663 | * Thus a return value < @count indicates a failure. | |
664 | * | |
665 | * A note about sizes: count is expected to be small. | |
666 | * | |
667 | * The most common use will be LDP/STP of two integer registers, | |
668 | * which means 16 bytes of memory touching at most 2 tags, but | |
669 | * often the access is aligned and thus just 1 tag. | |
670 | * | |
671 | * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory, | |
672 | * touching at most 5 tags. SVE LDR/STR (vector) with the default | |
673 | * vector length is also 64 bytes; the maximum architectural length | |
674 | * is 256 bytes touching at most 9 tags. | |
675 | * | |
676 | * The loop below uses 7 logical operations and 1 memory operation | |
677 | * per tag pair. An implementation that loads an aligned word and | |
678 | * uses masking to ignore adjacent tags requires 18 logical operations | |
679 | * and thus does not begin to pay off until 6 tags. | |
680 | * Which, according to the survey above, is unlikely to be common. | |
681 | */ | |
682 | static int checkN(uint8_t *mem, int odd, int cmp, int count) | |
683 | { | |
684 | int n = 0, diff; | |
685 | ||
686 | /* Replicate the test tag and compare. */ | |
687 | cmp *= 0x11; | |
688 | diff = *mem++ ^ cmp; | |
689 | ||
690 | if (odd) { | |
691 | goto start_odd; | |
692 | } | |
693 | ||
694 | while (1) { | |
695 | /* Test even tag. */ | |
696 | if (unlikely((diff) & 0x0f)) { | |
697 | break; | |
698 | } | |
699 | if (++n == count) { | |
700 | break; | |
701 | } | |
702 | ||
703 | start_odd: | |
704 | /* Test odd tag. */ | |
705 | if (unlikely((diff) & 0xf0)) { | |
706 | break; | |
707 | } | |
708 | if (++n == count) { | |
709 | break; | |
710 | } | |
711 | ||
712 | diff = *mem++ ^ cmp; | |
713 | } | |
714 | return n; | |
715 | } | |
716 | ||
f8c8a860 RH |
717 | /** |
718 | * mte_probe_int() - helper for mte_probe and mte_check | |
719 | * @env: CPU environment | |
720 | * @desc: MTEDESC descriptor | |
721 | * @ptr: virtual address of the base of the access | |
722 | * @fault: return virtual address of the first check failure | |
723 | * | |
724 | * Internal routine for both mte_probe and mte_check. | |
725 | * Return zero on failure, filling in *fault. | |
726 | * Return negative on trivial success for tbi disabled. | |
727 | * Return positive on success with tbi enabled. | |
728 | */ | |
729 | static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, | |
28f32503 | 730 | uintptr_t ra, uint64_t *fault) |
5add8248 RH |
731 | { |
732 | int mmu_idx, ptr_tag, bit55; | |
98f96050 RH |
733 | uint64_t ptr_last, prev_page, next_page; |
734 | uint64_t tag_first, tag_last; | |
735 | uint64_t tag_byte_first, tag_byte_last; | |
28f32503 | 736 | uint32_t sizem1, tag_count, tag_size, n, c; |
5add8248 RH |
737 | uint8_t *mem1, *mem2; |
738 | MMUAccessType type; | |
739 | ||
740 | bit55 = extract64(ptr, 55, 1); | |
f8c8a860 | 741 | *fault = ptr; |
5add8248 RH |
742 | |
743 | /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ | |
744 | if (unlikely(!tbi_check(desc, bit55))) { | |
f8c8a860 | 745 | return -1; |
5add8248 RH |
746 | } |
747 | ||
748 | ptr_tag = allocation_tag_from_addr(ptr); | |
749 | ||
750 | if (tcma_check(desc, bit55, ptr_tag)) { | |
f8c8a860 | 751 | return 1; |
5add8248 RH |
752 | } |
753 | ||
754 | mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | |
755 | type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD; | |
28f32503 | 756 | sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1); |
5add8248 | 757 | |
98f96050 | 758 | /* Find the addr of the end of the access */ |
28f32503 | 759 | ptr_last = ptr + sizem1; |
5add8248 RH |
760 | |
761 | /* Round the bounds to the tag granule, and compute the number of tags. */ | |
762 | tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); | |
98f96050 RH |
763 | tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE); |
764 | tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; | |
5add8248 RH |
765 | |
766 | /* Round the bounds to twice the tag granule, and compute the bytes. */ | |
767 | tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE); | |
98f96050 | 768 | tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE); |
5add8248 RH |
769 | |
770 | /* Locate the page boundaries. */ | |
771 | prev_page = ptr & TARGET_PAGE_MASK; | |
772 | next_page = prev_page + TARGET_PAGE_SIZE; | |
773 | ||
d3327a38 | 774 | if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) { |
5add8248 | 775 | /* Memory access stays on one page. */ |
98f96050 | 776 | tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1; |
28f32503 | 777 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1, |
5add8248 RH |
778 | MMU_DATA_LOAD, tag_size, ra); |
779 | if (!mem1) { | |
f8c8a860 | 780 | return 1; |
5add8248 RH |
781 | } |
782 | /* Perform all of the comparisons. */ | |
783 | n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count); | |
784 | } else { | |
785 | /* Memory access crosses to next page. */ | |
786 | tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE); | |
787 | mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr, | |
788 | MMU_DATA_LOAD, tag_size, ra); | |
789 | ||
98f96050 | 790 | tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1; |
5add8248 | 791 | mem2 = allocation_tag_mem(env, mmu_idx, next_page, type, |
98f96050 | 792 | ptr_last - next_page + 1, |
5add8248 RH |
793 | MMU_DATA_LOAD, tag_size, ra); |
794 | ||
795 | /* | |
796 | * Perform all of the comparisons. | |
797 | * Note the possible but unlikely case of the operation spanning | |
798 | * two pages that do not both have tagging enabled. | |
799 | */ | |
800 | n = c = (next_page - tag_first) / TAG_GRANULE; | |
801 | if (mem1) { | |
802 | n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c); | |
803 | } | |
804 | if (n == c) { | |
805 | if (!mem2) { | |
f8c8a860 | 806 | return 1; |
5add8248 RH |
807 | } |
808 | n += checkN(mem2, 0, ptr_tag, tag_count - c); | |
809 | } | |
810 | } | |
811 | ||
f8c8a860 RH |
812 | if (likely(n == tag_count)) { |
813 | return 1; | |
814 | } | |
815 | ||
5add8248 | 816 | /* |
98f96050 RH |
817 | * If we failed, we know which granule. For the first granule, the |
818 | * failure address is @ptr, the first byte accessed. Otherwise the | |
819 | * failure address is the first byte of the nth granule. | |
5add8248 | 820 | */ |
f8c8a860 RH |
821 | if (n > 0) { |
822 | *fault = tag_first + n * TAG_GRANULE; | |
5add8248 | 823 | } |
f8c8a860 RH |
824 | return 0; |
825 | } | |
826 | ||
bd47b61c | 827 | uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra) |
f8c8a860 RH |
828 | { |
829 | uint64_t fault; | |
28f32503 | 830 | int ret = mte_probe_int(env, desc, ptr, ra, &fault); |
5add8248 | 831 | |
f8c8a860 RH |
832 | if (unlikely(ret == 0)) { |
833 | mte_check_fail(env, desc, fault, ra); | |
834 | } else if (ret < 0) { | |
835 | return ptr; | |
836 | } | |
5add8248 RH |
837 | return useronly_clean_ptr(ptr); |
838 | } | |
839 | ||
bd47b61c | 840 | uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr) |
73ceeb00 | 841 | { |
523da6b9 RH |
842 | /* |
843 | * R_XCHFJ: Alignment check not caused by memory type is priority 1, | |
844 | * higher than any translation fault. When MTE is disabled, tcg | |
845 | * performs the alignment check during the code generated for the | |
846 | * memory access. With MTE enabled, we must check this here before | |
847 | * raising any translation fault in allocation_tag_mem. | |
848 | */ | |
849 | unsigned align = FIELD_EX32(desc, MTEDESC, ALIGN); | |
850 | if (unlikely(align)) { | |
851 | align = (1u << align) - 1; | |
852 | if (unlikely(ptr & align)) { | |
853 | int idx = FIELD_EX32(desc, MTEDESC, MIDX); | |
854 | bool w = FIELD_EX32(desc, MTEDESC, WRITE); | |
855 | MMUAccessType type = w ? MMU_DATA_STORE : MMU_DATA_LOAD; | |
856 | arm_cpu_do_unaligned_access(env_cpu(env), ptr, type, idx, GETPC()); | |
857 | } | |
858 | } | |
859 | ||
bd47b61c | 860 | return mte_check(env, desc, ptr, GETPC()); |
4a09a213 RH |
861 | } |
862 | ||
863 | /* | |
d304d280 | 864 | * No-fault version of mte_check, to be used by SVE for MemSingleNF. |
4a09a213 RH |
865 | * Returns false if the access is Checked and the check failed. This |
866 | * is only intended to probe the tag -- the validity of the page must | |
867 | * be checked beforehand. | |
868 | */ | |
d304d280 | 869 | bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr) |
4a09a213 RH |
870 | { |
871 | uint64_t fault; | |
28f32503 | 872 | int ret = mte_probe_int(env, desc, ptr, 0, &fault); |
4a09a213 RH |
873 | |
874 | return ret != 0; | |
875 | } | |
876 | ||
46dc1bc0 RH |
877 | /* |
878 | * Perform an MTE checked access for DC_ZVA. | |
879 | */ | |
880 | uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr) | |
881 | { | |
882 | uintptr_t ra = GETPC(); | |
883 | int log2_dcz_bytes, log2_tag_bytes; | |
884 | int mmu_idx, bit55; | |
885 | intptr_t dcz_bytes, tag_bytes, i; | |
886 | void *mem; | |
887 | uint64_t ptr_tag, mem_tag, align_ptr; | |
888 | ||
889 | bit55 = extract64(ptr, 55, 1); | |
890 | ||
891 | /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ | |
892 | if (unlikely(!tbi_check(desc, bit55))) { | |
893 | return ptr; | |
894 | } | |
895 | ||
896 | ptr_tag = allocation_tag_from_addr(ptr); | |
897 | ||
898 | if (tcma_check(desc, bit55, ptr_tag)) { | |
899 | goto done; | |
900 | } | |
901 | ||
902 | /* | |
903 | * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1, | |
904 | * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make | |
905 | * sure that we can access one complete tag byte here. | |
906 | */ | |
907 | log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2; | |
908 | log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1); | |
909 | dcz_bytes = (intptr_t)1 << log2_dcz_bytes; | |
910 | tag_bytes = (intptr_t)1 << log2_tag_bytes; | |
911 | align_ptr = ptr & -dcz_bytes; | |
912 | ||
913 | /* | |
914 | * Trap if accessing an invalid page. DC_ZVA requires that we supply | |
915 | * the original pointer for an invalid page. But watchpoints require | |
916 | * that we probe the actual space. So do both. | |
917 | */ | |
918 | mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); | |
919 | (void) probe_write(env, ptr, 1, mmu_idx, ra); | |
920 | mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE, | |
921 | dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra); | |
922 | if (!mem) { | |
923 | goto done; | |
924 | } | |
925 | ||
926 | /* | |
927 | * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus | |
928 | * it is quite easy to perform all of the comparisons at once without | |
929 | * any extra masking. | |
930 | * | |
931 | * The most common zva block size is 64; some of the thunderx cpus use | |
932 | * a block size of 128. For user-only, aarch64_max_initfn will set the | |
933 | * block size to 512. Fill out the other cases for future-proofing. | |
934 | * | |
935 | * In order to be able to find the first miscompare later, we want the | |
936 | * tag bytes to be in little-endian order. | |
937 | */ | |
938 | switch (log2_tag_bytes) { | |
939 | case 0: /* zva_blocksize 32 */ | |
940 | mem_tag = *(uint8_t *)mem; | |
941 | ptr_tag *= 0x11u; | |
942 | break; | |
943 | case 1: /* zva_blocksize 64 */ | |
944 | mem_tag = cpu_to_le16(*(uint16_t *)mem); | |
945 | ptr_tag *= 0x1111u; | |
946 | break; | |
947 | case 2: /* zva_blocksize 128 */ | |
948 | mem_tag = cpu_to_le32(*(uint32_t *)mem); | |
949 | ptr_tag *= 0x11111111u; | |
950 | break; | |
951 | case 3: /* zva_blocksize 256 */ | |
952 | mem_tag = cpu_to_le64(*(uint64_t *)mem); | |
953 | ptr_tag *= 0x1111111111111111ull; | |
954 | break; | |
955 | ||
956 | default: /* zva_blocksize 512, 1024, 2048 */ | |
957 | ptr_tag *= 0x1111111111111111ull; | |
958 | i = 0; | |
959 | do { | |
960 | mem_tag = cpu_to_le64(*(uint64_t *)(mem + i)); | |
961 | if (unlikely(mem_tag != ptr_tag)) { | |
962 | goto fail; | |
963 | } | |
964 | i += 8; | |
965 | align_ptr += 16 * TAG_GRANULE; | |
966 | } while (i < tag_bytes); | |
967 | goto done; | |
968 | } | |
969 | ||
970 | if (likely(mem_tag == ptr_tag)) { | |
971 | goto done; | |
972 | } | |
973 | ||
974 | fail: | |
975 | /* Locate the first nibble that differs. */ | |
976 | i = ctz64(mem_tag ^ ptr_tag) >> 4; | |
dbf8c321 | 977 | mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra); |
46dc1bc0 RH |
978 | |
979 | done: | |
980 | return useronly_clean_ptr(ptr); | |
981 | } |