]> git.proxmox.com Git - mirror_qemu.git/blame - memory_ldst.c.inc
crypto: Make QCryptoTLSCreds* structures private
[mirror_qemu.git] / memory_ldst.c.inc
CommitLineData
0ce265ff
PB
1/*
2 * Physical memory access templates
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
61f3c91a 11 * version 2.1 of the License, or (at your option) any later version.
0ce265ff
PB
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22/* warning: addr must be aligned */
23static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
24 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
25 enum device_endian endian)
26{
27 uint8_t *ptr;
28 uint64_t val;
29 MemoryRegion *mr;
30 hwaddr l = 4;
31 hwaddr addr1;
32 MemTxResult r;
33 bool release_lock = false;
34
35 RCU_READ_LOCK();
bc6b1cec 36 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
a99761d3 37 if (l < 4 || !memory_access_is_direct(mr, false)) {
0ce265ff
PB
38 release_lock |= prepare_mmio_access(mr);
39
40 /* I/O case */
d5d680ca
TN
41 r = memory_region_dispatch_read(mr, addr1, &val,
42 MO_32 | devend_memop(endian), attrs);
0ce265ff
PB
43 } else {
44 /* RAM case */
fc1c8344 45 fuzz_dma_read_cb(addr, 4, mr);
a99761d3 46 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
0ce265ff
PB
47 switch (endian) {
48 case DEVICE_LITTLE_ENDIAN:
49 val = ldl_le_p(ptr);
50 break;
51 case DEVICE_BIG_ENDIAN:
52 val = ldl_be_p(ptr);
53 break;
54 default:
55 val = ldl_p(ptr);
56 break;
57 }
58 r = MEMTX_OK;
59 }
60 if (result) {
61 *result = r;
62 }
63 if (release_lock) {
64 qemu_mutex_unlock_iothread();
65 }
66 RCU_READ_UNLOCK();
67 return val;
68}
69
70uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
71 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
72{
73 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
74 DEVICE_NATIVE_ENDIAN);
75}
76
77uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
78 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
79{
80 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
81 DEVICE_LITTLE_ENDIAN);
82}
83
84uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
85 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
86{
87 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
88 DEVICE_BIG_ENDIAN);
89}
90
0ce265ff
PB
91/* warning: addr must be aligned */
92static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
93 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
94 enum device_endian endian)
95{
96 uint8_t *ptr;
97 uint64_t val;
98 MemoryRegion *mr;
99 hwaddr l = 8;
100 hwaddr addr1;
101 MemTxResult r;
102 bool release_lock = false;
103
104 RCU_READ_LOCK();
bc6b1cec 105 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
a99761d3 106 if (l < 8 || !memory_access_is_direct(mr, false)) {
0ce265ff
PB
107 release_lock |= prepare_mmio_access(mr);
108
109 /* I/O case */
d5d680ca
TN
110 r = memory_region_dispatch_read(mr, addr1, &val,
111 MO_64 | devend_memop(endian), attrs);
0ce265ff
PB
112 } else {
113 /* RAM case */
fc1c8344 114 fuzz_dma_read_cb(addr, 8, mr);
a99761d3 115 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
0ce265ff
PB
116 switch (endian) {
117 case DEVICE_LITTLE_ENDIAN:
118 val = ldq_le_p(ptr);
119 break;
120 case DEVICE_BIG_ENDIAN:
121 val = ldq_be_p(ptr);
122 break;
123 default:
124 val = ldq_p(ptr);
125 break;
126 }
127 r = MEMTX_OK;
128 }
129 if (result) {
130 *result = r;
131 }
132 if (release_lock) {
133 qemu_mutex_unlock_iothread();
134 }
135 RCU_READ_UNLOCK();
136 return val;
137}
138
139uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
140 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
141{
142 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
143 DEVICE_NATIVE_ENDIAN);
144}
145
146uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
147 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
148{
149 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
150 DEVICE_LITTLE_ENDIAN);
151}
152
153uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
154 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
155{
156 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
157 DEVICE_BIG_ENDIAN);
158}
159
f933b02b 160uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
0ce265ff
PB
161 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
162{
163 uint8_t *ptr;
164 uint64_t val;
165 MemoryRegion *mr;
166 hwaddr l = 1;
167 hwaddr addr1;
168 MemTxResult r;
169 bool release_lock = false;
170
171 RCU_READ_LOCK();
bc6b1cec 172 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
a99761d3 173 if (!memory_access_is_direct(mr, false)) {
0ce265ff
PB
174 release_lock |= prepare_mmio_access(mr);
175
176 /* I/O case */
07f0834f 177 r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs);
0ce265ff
PB
178 } else {
179 /* RAM case */
fc1c8344 180 fuzz_dma_read_cb(addr, 1, mr);
a99761d3 181 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
0ce265ff
PB
182 val = ldub_p(ptr);
183 r = MEMTX_OK;
184 }
185 if (result) {
186 *result = r;
187 }
188 if (release_lock) {
189 qemu_mutex_unlock_iothread();
190 }
191 RCU_READ_UNLOCK();
192 return val;
193}
194
0ce265ff 195/* warning: addr must be aligned */
f933b02b 196static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
0ce265ff
PB
197 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
198 enum device_endian endian)
199{
200 uint8_t *ptr;
201 uint64_t val;
202 MemoryRegion *mr;
203 hwaddr l = 2;
204 hwaddr addr1;
205 MemTxResult r;
206 bool release_lock = false;
207
208 RCU_READ_LOCK();
bc6b1cec 209 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
a99761d3 210 if (l < 2 || !memory_access_is_direct(mr, false)) {
0ce265ff
PB
211 release_lock |= prepare_mmio_access(mr);
212
213 /* I/O case */
d5d680ca
TN
214 r = memory_region_dispatch_read(mr, addr1, &val,
215 MO_16 | devend_memop(endian), attrs);
0ce265ff
PB
216 } else {
217 /* RAM case */
fc1c8344 218 fuzz_dma_read_cb(addr, 2, mr);
a99761d3 219 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
0ce265ff
PB
220 switch (endian) {
221 case DEVICE_LITTLE_ENDIAN:
222 val = lduw_le_p(ptr);
223 break;
224 case DEVICE_BIG_ENDIAN:
225 val = lduw_be_p(ptr);
226 break;
227 default:
228 val = lduw_p(ptr);
229 break;
230 }
231 r = MEMTX_OK;
232 }
233 if (result) {
234 *result = r;
235 }
236 if (release_lock) {
237 qemu_mutex_unlock_iothread();
238 }
239 RCU_READ_UNLOCK();
240 return val;
241}
242
f933b02b 243uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
0ce265ff
PB
244 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
245{
246 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
247 DEVICE_NATIVE_ENDIAN);
248}
249
f933b02b 250uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
0ce265ff
PB
251 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
252{
253 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
254 DEVICE_LITTLE_ENDIAN);
255}
256
f933b02b 257uint16_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
0ce265ff
PB
258 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
259{
260 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
261 DEVICE_BIG_ENDIAN);
262}
263
0ce265ff
PB
264/* warning: addr must be aligned. The ram page is not masked as dirty
265 and the code inside is not invalidated. It is useful if the dirty
266 bits are used to track modified PTEs */
267void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
268 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
269{
270 uint8_t *ptr;
271 MemoryRegion *mr;
272 hwaddr l = 4;
273 hwaddr addr1;
274 MemTxResult r;
275 uint8_t dirty_log_mask;
276 bool release_lock = false;
277
278 RCU_READ_LOCK();
bc6b1cec 279 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
a99761d3 280 if (l < 4 || !memory_access_is_direct(mr, true)) {
0ce265ff
PB
281 release_lock |= prepare_mmio_access(mr);
282
07f0834f 283 r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs);
0ce265ff 284 } else {
a99761d3 285 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
0ce265ff
PB
286 stl_p(ptr, val);
287
288 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
289 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
290 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
291 4, dirty_log_mask);
292 r = MEMTX_OK;
293 }
294 if (result) {
295 *result = r;
296 }
297 if (release_lock) {
298 qemu_mutex_unlock_iothread();
299 }
300 RCU_READ_UNLOCK();
301}
302
0ce265ff
PB
303/* warning: addr must be aligned */
304static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
305 hwaddr addr, uint32_t val, MemTxAttrs attrs,
306 MemTxResult *result, enum device_endian endian)
307{
308 uint8_t *ptr;
309 MemoryRegion *mr;
310 hwaddr l = 4;
311 hwaddr addr1;
312 MemTxResult r;
313 bool release_lock = false;
314
315 RCU_READ_LOCK();
bc6b1cec 316 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
a99761d3 317 if (l < 4 || !memory_access_is_direct(mr, true)) {
0ce265ff 318 release_lock |= prepare_mmio_access(mr);
d5d680ca
TN
319 r = memory_region_dispatch_write(mr, addr1, val,
320 MO_32 | devend_memop(endian), attrs);
0ce265ff
PB
321 } else {
322 /* RAM case */
a99761d3 323 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
0ce265ff
PB
324 switch (endian) {
325 case DEVICE_LITTLE_ENDIAN:
326 stl_le_p(ptr, val);
327 break;
328 case DEVICE_BIG_ENDIAN:
329 stl_be_p(ptr, val);
330 break;
331 default:
332 stl_p(ptr, val);
333 break;
334 }
a99761d3 335 invalidate_and_set_dirty(mr, addr1, 4);
0ce265ff
PB
336 r = MEMTX_OK;
337 }
338 if (result) {
339 *result = r;
340 }
341 if (release_lock) {
342 qemu_mutex_unlock_iothread();
343 }
344 RCU_READ_UNLOCK();
345}
346
347void glue(address_space_stl, SUFFIX)(ARG1_DECL,
348 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
349{
350 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
351 result, DEVICE_NATIVE_ENDIAN);
352}
353
354void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
355 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
356{
357 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
358 result, DEVICE_LITTLE_ENDIAN);
359}
360
361void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
362 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
363{
364 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
365 result, DEVICE_BIG_ENDIAN);
366}
367
0ce265ff 368void glue(address_space_stb, SUFFIX)(ARG1_DECL,
f933b02b 369 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
0ce265ff
PB
370{
371 uint8_t *ptr;
372 MemoryRegion *mr;
373 hwaddr l = 1;
374 hwaddr addr1;
375 MemTxResult r;
376 bool release_lock = false;
377
378 RCU_READ_LOCK();
bc6b1cec 379 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
a99761d3 380 if (!memory_access_is_direct(mr, true)) {
0ce265ff 381 release_lock |= prepare_mmio_access(mr);
07f0834f 382 r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs);
0ce265ff
PB
383 } else {
384 /* RAM case */
a99761d3 385 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
0ce265ff 386 stb_p(ptr, val);
a99761d3 387 invalidate_and_set_dirty(mr, addr1, 1);
0ce265ff
PB
388 r = MEMTX_OK;
389 }
390 if (result) {
391 *result = r;
392 }
393 if (release_lock) {
394 qemu_mutex_unlock_iothread();
395 }
396 RCU_READ_UNLOCK();
397}
398
0ce265ff
PB
399/* warning: addr must be aligned */
400static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
f933b02b 401 hwaddr addr, uint16_t val, MemTxAttrs attrs,
0ce265ff
PB
402 MemTxResult *result, enum device_endian endian)
403{
404 uint8_t *ptr;
405 MemoryRegion *mr;
406 hwaddr l = 2;
407 hwaddr addr1;
408 MemTxResult r;
409 bool release_lock = false;
410
411 RCU_READ_LOCK();
bc6b1cec 412 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
a99761d3 413 if (l < 2 || !memory_access_is_direct(mr, true)) {
0ce265ff 414 release_lock |= prepare_mmio_access(mr);
d5d680ca
TN
415 r = memory_region_dispatch_write(mr, addr1, val,
416 MO_16 | devend_memop(endian), attrs);
0ce265ff
PB
417 } else {
418 /* RAM case */
a99761d3 419 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
0ce265ff
PB
420 switch (endian) {
421 case DEVICE_LITTLE_ENDIAN:
422 stw_le_p(ptr, val);
423 break;
424 case DEVICE_BIG_ENDIAN:
425 stw_be_p(ptr, val);
426 break;
427 default:
428 stw_p(ptr, val);
429 break;
430 }
a99761d3 431 invalidate_and_set_dirty(mr, addr1, 2);
0ce265ff
PB
432 r = MEMTX_OK;
433 }
434 if (result) {
435 *result = r;
436 }
437 if (release_lock) {
438 qemu_mutex_unlock_iothread();
439 }
440 RCU_READ_UNLOCK();
441}
442
443void glue(address_space_stw, SUFFIX)(ARG1_DECL,
f933b02b 444 hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
0ce265ff
PB
445{
446 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
447 DEVICE_NATIVE_ENDIAN);
448}
449
450void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
f933b02b 451 hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
0ce265ff
PB
452{
453 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
454 DEVICE_LITTLE_ENDIAN);
455}
456
457void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
f933b02b 458 hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
0ce265ff
PB
459{
460 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
461 DEVICE_BIG_ENDIAN);
462}
463
0ce265ff
PB
464static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
465 hwaddr addr, uint64_t val, MemTxAttrs attrs,
466 MemTxResult *result, enum device_endian endian)
467{
468 uint8_t *ptr;
469 MemoryRegion *mr;
470 hwaddr l = 8;
471 hwaddr addr1;
472 MemTxResult r;
473 bool release_lock = false;
474
475 RCU_READ_LOCK();
bc6b1cec 476 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
a99761d3 477 if (l < 8 || !memory_access_is_direct(mr, true)) {
0ce265ff 478 release_lock |= prepare_mmio_access(mr);
d5d680ca
TN
479 r = memory_region_dispatch_write(mr, addr1, val,
480 MO_64 | devend_memop(endian), attrs);
0ce265ff
PB
481 } else {
482 /* RAM case */
a99761d3 483 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
0ce265ff
PB
484 switch (endian) {
485 case DEVICE_LITTLE_ENDIAN:
486 stq_le_p(ptr, val);
487 break;
488 case DEVICE_BIG_ENDIAN:
489 stq_be_p(ptr, val);
490 break;
491 default:
492 stq_p(ptr, val);
493 break;
494 }
a99761d3 495 invalidate_and_set_dirty(mr, addr1, 8);
0ce265ff
PB
496 r = MEMTX_OK;
497 }
498 if (result) {
499 *result = r;
500 }
501 if (release_lock) {
502 qemu_mutex_unlock_iothread();
503 }
504 RCU_READ_UNLOCK();
505}
506
507void glue(address_space_stq, SUFFIX)(ARG1_DECL,
508 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
509{
510 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
511 DEVICE_NATIVE_ENDIAN);
512}
513
514void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
515 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
516{
517 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
518 DEVICE_LITTLE_ENDIAN);
519}
520
521void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
522 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
523{
524 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
525 DEVICE_BIG_ENDIAN);
526}
527
0ce265ff
PB
528#undef ARG1_DECL
529#undef ARG1
530#undef SUFFIX
531#undef TRANSLATE
0ce265ff
PB
532#undef RCU_READ_LOCK
533#undef RCU_READ_UNLOCK