]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/mte_helper.c
target/arm: Add gen_mte_checkN
[mirror_qemu.git] / target / arm / mte_helper.c
CommitLineData
da54941f
RH
1/*
2 * ARM v8.5-MemTag Operations
3 *
4 * Copyright (c) 2020 Linaro, Ltd.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "internals.h"
23#include "exec/exec-all.h"
24#include "exec/cpu_ldst.h"
25#include "exec/helper-proto.h"
26
27
28static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
29{
30 if (exclude == 0xffff) {
31 return 0;
32 }
33 if (offset == 0) {
34 while (exclude & (1 << tag)) {
35 tag = (tag + 1) & 15;
36 }
37 } else {
38 do {
39 do {
40 tag = (tag + 1) & 15;
41 } while (exclude & (1 << tag));
42 } while (--offset > 0);
43 }
44 return tag;
45}
46
c15294c1
RH
47/**
48 * allocation_tag_mem:
49 * @env: the cpu environment
50 * @ptr_mmu_idx: the addressing regime to use for the virtual address
51 * @ptr: the virtual address for which to look up tag memory
52 * @ptr_access: the access to use for the virtual address
53 * @ptr_size: the number of bytes in the normal memory access
54 * @tag_access: the access to use for the tag memory
55 * @tag_size: the number of bytes in the tag memory access
56 * @ra: the return address for exception handling
57 *
58 * Our tag memory is formatted as a sequence of little-endian nibbles.
59 * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
60 * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
61 * for the higher addr.
62 *
63 * Here, resolve the physical address from the virtual address, and return
64 * a pointer to the corresponding tag byte. Exit with exception if the
65 * virtual address is not accessible for @ptr_access.
66 *
67 * The @ptr_size and @tag_size values may not have an obvious relation
68 * due to the alignment of @ptr, and the number of tag checks required.
69 *
70 * If there is no tag storage corresponding to @ptr, return NULL.
71 */
72static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
73 uint64_t ptr, MMUAccessType ptr_access,
74 int ptr_size, MMUAccessType tag_access,
75 int tag_size, uintptr_t ra)
76{
77 /* Tag storage not implemented. */
78 return NULL;
79}
80
da54941f
RH
81uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
82{
83 int rtag;
84
85 /*
86 * Our IMPDEF choice for GCR_EL1.RRND==1 is to behave as if
87 * GCR_EL1.RRND==0, always producing deterministic results.
88 */
89 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
90 int start = extract32(env->cp15.rgsr_el1, 0, 4);
91 int seed = extract32(env->cp15.rgsr_el1, 8, 16);
92 int offset, i;
93
94 /* RandomTag */
95 for (i = offset = 0; i < 4; ++i) {
96 /* NextRandomTagBit */
97 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
98 extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
99 seed = (top << 15) | (seed >> 1);
100 offset |= top << i;
101 }
102 rtag = choose_nonexcluded_tag(start, offset, exclude);
103 env->cp15.rgsr_el1 = rtag | (seed << 8);
104
105 return address_with_allocation_tag(rn, rtag);
106}
efbc78ad
RH
107
108uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
109 int32_t offset, uint32_t tag_offset)
110{
111 int start_tag = allocation_tag_from_addr(ptr);
112 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
113 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
114
115 return address_with_allocation_tag(ptr + offset, rtag);
116}
c15294c1
RH
117
118static int load_tag1(uint64_t ptr, uint8_t *mem)
119{
120 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
121 return extract32(*mem, ofs, 4);
122}
123
124uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
125{
126 int mmu_idx = cpu_mmu_index(env, false);
127 uint8_t *mem;
128 int rtag = 0;
129
130 /* Trap if accessing an invalid page. */
131 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
132 MMU_DATA_LOAD, 1, GETPC());
133
134 /* Load if page supports tags. */
135 if (mem) {
136 rtag = load_tag1(ptr, mem);
137 }
138
139 return address_with_allocation_tag(xt, rtag);
140}
141
142static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
143{
144 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
145 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
146 cpu_mmu_index(env, false), ra);
147 g_assert_not_reached();
148 }
149}
150
151/* For use in a non-parallel context, store to the given nibble. */
152static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
153{
154 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
155 *mem = deposit32(*mem, ofs, 4, tag);
156}
157
158/* For use in a parallel context, atomically store to the given nibble. */
159static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
160{
161 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
162 uint8_t old = atomic_read(mem);
163
164 while (1) {
165 uint8_t new = deposit32(old, ofs, 4, tag);
166 uint8_t cmp = atomic_cmpxchg(mem, old, new);
167 if (likely(cmp == old)) {
168 return;
169 }
170 old = cmp;
171 }
172}
173
174typedef void stg_store1(uint64_t, uint8_t *, int);
175
176static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
177 uintptr_t ra, stg_store1 store1)
178{
179 int mmu_idx = cpu_mmu_index(env, false);
180 uint8_t *mem;
181
182 check_tag_aligned(env, ptr, ra);
183
184 /* Trap if accessing an invalid page. */
185 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
186 MMU_DATA_STORE, 1, ra);
187
188 /* Store if page supports tags. */
189 if (mem) {
190 store1(ptr, mem, allocation_tag_from_addr(xt));
191 }
192}
193
194void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
195{
196 do_stg(env, ptr, xt, GETPC(), store_tag1);
197}
198
199void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
200{
201 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
202}
203
204void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
205{
206 int mmu_idx = cpu_mmu_index(env, false);
207 uintptr_t ra = GETPC();
208
209 check_tag_aligned(env, ptr, ra);
210 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
211}
212
213static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
214 uintptr_t ra, stg_store1 store1)
215{
216 int mmu_idx = cpu_mmu_index(env, false);
217 int tag = allocation_tag_from_addr(xt);
218 uint8_t *mem1, *mem2;
219
220 check_tag_aligned(env, ptr, ra);
221
222 /*
223 * Trap if accessing an invalid page(s).
224 * This takes priority over !allocation_tag_access_enabled.
225 */
226 if (ptr & TAG_GRANULE) {
227 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
228 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
229 TAG_GRANULE, MMU_DATA_STORE, 1, ra);
230 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
231 MMU_DATA_STORE, TAG_GRANULE,
232 MMU_DATA_STORE, 1, ra);
233
234 /* Store if page(s) support tags. */
235 if (mem1) {
236 store1(TAG_GRANULE, mem1, tag);
237 }
238 if (mem2) {
239 store1(0, mem2, tag);
240 }
241 } else {
242 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
243 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
244 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
245 if (mem1) {
246 tag |= tag << 4;
247 atomic_set(mem1, tag);
248 }
249 }
250}
251
252void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
253{
254 do_st2g(env, ptr, xt, GETPC(), store_tag1);
255}
256
257void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
258{
259 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
260}
261
262void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
263{
264 int mmu_idx = cpu_mmu_index(env, false);
265 uintptr_t ra = GETPC();
266 int in_page = -(ptr | TARGET_PAGE_MASK);
267
268 check_tag_aligned(env, ptr, ra);
269
270 if (likely(in_page >= 2 * TAG_GRANULE)) {
271 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
272 } else {
273 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
274 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
275 }
276}
5f716a82
RH
277
278#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
279
280uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
281{
282 int mmu_idx = cpu_mmu_index(env, false);
283 uintptr_t ra = GETPC();
284 void *tag_mem;
285
286 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
287
288 /* Trap if accessing an invalid page. */
289 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
290 LDGM_STGM_SIZE, MMU_DATA_LOAD,
291 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
292
293 /* The tag is squashed to zero if the page does not support tags. */
294 if (!tag_mem) {
295 return 0;
296 }
297
298 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
299 /*
300 * We are loading 64-bits worth of tags. The ordering of elements
301 * within the word corresponds to a 64-bit little-endian operation.
302 */
303 return ldq_le_p(tag_mem);
304}
305
306void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
307{
308 int mmu_idx = cpu_mmu_index(env, false);
309 uintptr_t ra = GETPC();
310 void *tag_mem;
311
312 ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
313
314 /* Trap if accessing an invalid page. */
315 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
316 LDGM_STGM_SIZE, MMU_DATA_LOAD,
317 LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
318
319 /*
320 * Tag store only happens if the page support tags,
321 * and if the OS has enabled access to the tags.
322 */
323 if (!tag_mem) {
324 return;
325 }
326
327 QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
328 /*
329 * We are storing 64-bits worth of tags. The ordering of elements
330 * within the word corresponds to a 64-bit little-endian operation.
331 */
332 stq_le_p(tag_mem, val);
333}
334
335void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
336{
337 uintptr_t ra = GETPC();
338 int mmu_idx = cpu_mmu_index(env, false);
339 int log2_dcz_bytes, log2_tag_bytes;
340 intptr_t dcz_bytes, tag_bytes;
341 uint8_t *mem;
342
343 /*
344 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
345 * i.e. 32 bytes, which is an unreasonably small dcz anyway,
346 * to make sure that we can access one complete tag byte here.
347 */
348 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
349 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
350 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
351 tag_bytes = (intptr_t)1 << log2_tag_bytes;
352 ptr &= -dcz_bytes;
353
354 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
355 MMU_DATA_STORE, tag_bytes, ra);
356 if (mem) {
357 int tag_pair = (val & 0xf) * 0x11;
358 memset(mem, tag_pair, tag_bytes);
359 }
360}
0a405be2
RH
361
362/*
363 * Perform an MTE checked access for a single logical or atomic access.
364 */
365uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
366{
367 return ptr;
368}
73ceeb00
RH
369
370/*
371 * Perform an MTE checked access for multiple logical accesses.
372 */
373uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
374{
375 return ptr;
376}