]> git.proxmox.com Git - mirror_qemu.git/blame - memory_ldst.inc.c
ppc: implement xsrqpxp instruction
[mirror_qemu.git] / memory_ldst.inc.c
CommitLineData
0ce265ff
PB
1/*
2 * Physical memory access templates
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22/* warning: addr must be aligned */
23static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
24 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
25 enum device_endian endian)
26{
27 uint8_t *ptr;
28 uint64_t val;
29 MemoryRegion *mr;
30 hwaddr l = 4;
31 hwaddr addr1;
32 MemTxResult r;
33 bool release_lock = false;
34
35 RCU_READ_LOCK();
36 mr = TRANSLATE(addr, &addr1, &l, false);
37 if (l < 4 || !IS_DIRECT(mr, false)) {
38 release_lock |= prepare_mmio_access(mr);
39
40 /* I/O case */
41 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
42#if defined(TARGET_WORDS_BIGENDIAN)
43 if (endian == DEVICE_LITTLE_ENDIAN) {
44 val = bswap32(val);
45 }
46#else
47 if (endian == DEVICE_BIG_ENDIAN) {
48 val = bswap32(val);
49 }
50#endif
51 } else {
52 /* RAM case */
53 ptr = MAP_RAM(mr, addr1);
54 switch (endian) {
55 case DEVICE_LITTLE_ENDIAN:
56 val = ldl_le_p(ptr);
57 break;
58 case DEVICE_BIG_ENDIAN:
59 val = ldl_be_p(ptr);
60 break;
61 default:
62 val = ldl_p(ptr);
63 break;
64 }
65 r = MEMTX_OK;
66 }
67 if (result) {
68 *result = r;
69 }
70 if (release_lock) {
71 qemu_mutex_unlock_iothread();
72 }
73 RCU_READ_UNLOCK();
74 return val;
75}
76
77uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
78 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
79{
80 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
81 DEVICE_NATIVE_ENDIAN);
82}
83
84uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
85 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
86{
87 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
88 DEVICE_LITTLE_ENDIAN);
89}
90
91uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
92 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
93{
94 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
95 DEVICE_BIG_ENDIAN);
96}
97
98uint32_t glue(ldl_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
99{
100 return glue(address_space_ldl, SUFFIX)(ARG1, addr,
101 MEMTXATTRS_UNSPECIFIED, NULL);
102}
103
104uint32_t glue(ldl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
105{
106 return glue(address_space_ldl_le, SUFFIX)(ARG1, addr,
107 MEMTXATTRS_UNSPECIFIED, NULL);
108}
109
110uint32_t glue(ldl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
111{
112 return glue(address_space_ldl_be, SUFFIX)(ARG1, addr,
113 MEMTXATTRS_UNSPECIFIED, NULL);
114}
115
116/* warning: addr must be aligned */
117static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
118 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
119 enum device_endian endian)
120{
121 uint8_t *ptr;
122 uint64_t val;
123 MemoryRegion *mr;
124 hwaddr l = 8;
125 hwaddr addr1;
126 MemTxResult r;
127 bool release_lock = false;
128
129 RCU_READ_LOCK();
130 mr = TRANSLATE(addr, &addr1, &l, false);
131 if (l < 8 || !IS_DIRECT(mr, false)) {
132 release_lock |= prepare_mmio_access(mr);
133
134 /* I/O case */
135 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
136#if defined(TARGET_WORDS_BIGENDIAN)
137 if (endian == DEVICE_LITTLE_ENDIAN) {
138 val = bswap64(val);
139 }
140#else
141 if (endian == DEVICE_BIG_ENDIAN) {
142 val = bswap64(val);
143 }
144#endif
145 } else {
146 /* RAM case */
147 ptr = MAP_RAM(mr, addr1);
148 switch (endian) {
149 case DEVICE_LITTLE_ENDIAN:
150 val = ldq_le_p(ptr);
151 break;
152 case DEVICE_BIG_ENDIAN:
153 val = ldq_be_p(ptr);
154 break;
155 default:
156 val = ldq_p(ptr);
157 break;
158 }
159 r = MEMTX_OK;
160 }
161 if (result) {
162 *result = r;
163 }
164 if (release_lock) {
165 qemu_mutex_unlock_iothread();
166 }
167 RCU_READ_UNLOCK();
168 return val;
169}
170
171uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
172 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
173{
174 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
175 DEVICE_NATIVE_ENDIAN);
176}
177
178uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
179 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
180{
181 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
182 DEVICE_LITTLE_ENDIAN);
183}
184
185uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
186 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
187{
188 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
189 DEVICE_BIG_ENDIAN);
190}
191
192uint64_t glue(ldq_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
193{
194 return glue(address_space_ldq, SUFFIX)(ARG1, addr,
195 MEMTXATTRS_UNSPECIFIED, NULL);
196}
197
198uint64_t glue(ldq_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
199{
200 return glue(address_space_ldq_le, SUFFIX)(ARG1, addr,
201 MEMTXATTRS_UNSPECIFIED, NULL);
202}
203
204uint64_t glue(ldq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
205{
206 return glue(address_space_ldq_be, SUFFIX)(ARG1, addr,
207 MEMTXATTRS_UNSPECIFIED, NULL);
208}
209
210uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
211 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
212{
213 uint8_t *ptr;
214 uint64_t val;
215 MemoryRegion *mr;
216 hwaddr l = 1;
217 hwaddr addr1;
218 MemTxResult r;
219 bool release_lock = false;
220
221 RCU_READ_LOCK();
222 mr = TRANSLATE(addr, &addr1, &l, false);
223 if (!IS_DIRECT(mr, false)) {
224 release_lock |= prepare_mmio_access(mr);
225
226 /* I/O case */
227 r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs);
228 } else {
229 /* RAM case */
230 ptr = MAP_RAM(mr, addr1);
231 val = ldub_p(ptr);
232 r = MEMTX_OK;
233 }
234 if (result) {
235 *result = r;
236 }
237 if (release_lock) {
238 qemu_mutex_unlock_iothread();
239 }
240 RCU_READ_UNLOCK();
241 return val;
242}
243
244uint32_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
245{
246 return glue(address_space_ldub, SUFFIX)(ARG1, addr,
247 MEMTXATTRS_UNSPECIFIED, NULL);
248}
249
250/* warning: addr must be aligned */
251static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
252 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
253 enum device_endian endian)
254{
255 uint8_t *ptr;
256 uint64_t val;
257 MemoryRegion *mr;
258 hwaddr l = 2;
259 hwaddr addr1;
260 MemTxResult r;
261 bool release_lock = false;
262
263 RCU_READ_LOCK();
264 mr = TRANSLATE(addr, &addr1, &l, false);
265 if (l < 2 || !IS_DIRECT(mr, false)) {
266 release_lock |= prepare_mmio_access(mr);
267
268 /* I/O case */
269 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
270#if defined(TARGET_WORDS_BIGENDIAN)
271 if (endian == DEVICE_LITTLE_ENDIAN) {
272 val = bswap16(val);
273 }
274#else
275 if (endian == DEVICE_BIG_ENDIAN) {
276 val = bswap16(val);
277 }
278#endif
279 } else {
280 /* RAM case */
281 ptr = MAP_RAM(mr, addr1);
282 switch (endian) {
283 case DEVICE_LITTLE_ENDIAN:
284 val = lduw_le_p(ptr);
285 break;
286 case DEVICE_BIG_ENDIAN:
287 val = lduw_be_p(ptr);
288 break;
289 default:
290 val = lduw_p(ptr);
291 break;
292 }
293 r = MEMTX_OK;
294 }
295 if (result) {
296 *result = r;
297 }
298 if (release_lock) {
299 qemu_mutex_unlock_iothread();
300 }
301 RCU_READ_UNLOCK();
302 return val;
303}
304
305uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
306 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
307{
308 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
309 DEVICE_NATIVE_ENDIAN);
310}
311
312uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
313 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
314{
315 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
316 DEVICE_LITTLE_ENDIAN);
317}
318
319uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
320 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
321{
322 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
323 DEVICE_BIG_ENDIAN);
324}
325
326uint32_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
327{
328 return glue(address_space_lduw, SUFFIX)(ARG1, addr,
329 MEMTXATTRS_UNSPECIFIED, NULL);
330}
331
332uint32_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
333{
334 return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
335 MEMTXATTRS_UNSPECIFIED, NULL);
336}
337
338uint32_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
339{
340 return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
341 MEMTXATTRS_UNSPECIFIED, NULL);
342}
343
344/* warning: addr must be aligned. The ram page is not masked as dirty
345 and the code inside is not invalidated. It is useful if the dirty
346 bits are used to track modified PTEs */
347void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
348 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
349{
350 uint8_t *ptr;
351 MemoryRegion *mr;
352 hwaddr l = 4;
353 hwaddr addr1;
354 MemTxResult r;
355 uint8_t dirty_log_mask;
356 bool release_lock = false;
357
358 RCU_READ_LOCK();
359 mr = TRANSLATE(addr, &addr1, &l, true);
360 if (l < 4 || !IS_DIRECT(mr, true)) {
361 release_lock |= prepare_mmio_access(mr);
362
363 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
364 } else {
365 ptr = MAP_RAM(mr, addr1);
366 stl_p(ptr, val);
367
368 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
369 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
370 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
371 4, dirty_log_mask);
372 r = MEMTX_OK;
373 }
374 if (result) {
375 *result = r;
376 }
377 if (release_lock) {
378 qemu_mutex_unlock_iothread();
379 }
380 RCU_READ_UNLOCK();
381}
382
383void glue(stl_phys_notdirty, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
384{
385 glue(address_space_stl_notdirty, SUFFIX)(ARG1, addr, val,
386 MEMTXATTRS_UNSPECIFIED, NULL);
387}
388
389/* warning: addr must be aligned */
390static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
391 hwaddr addr, uint32_t val, MemTxAttrs attrs,
392 MemTxResult *result, enum device_endian endian)
393{
394 uint8_t *ptr;
395 MemoryRegion *mr;
396 hwaddr l = 4;
397 hwaddr addr1;
398 MemTxResult r;
399 bool release_lock = false;
400
401 RCU_READ_LOCK();
402 mr = TRANSLATE(addr, &addr1, &l, true);
403 if (l < 4 || !IS_DIRECT(mr, true)) {
404 release_lock |= prepare_mmio_access(mr);
405
406#if defined(TARGET_WORDS_BIGENDIAN)
407 if (endian == DEVICE_LITTLE_ENDIAN) {
408 val = bswap32(val);
409 }
410#else
411 if (endian == DEVICE_BIG_ENDIAN) {
412 val = bswap32(val);
413 }
414#endif
415 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
416 } else {
417 /* RAM case */
418 ptr = MAP_RAM(mr, addr1);
419 switch (endian) {
420 case DEVICE_LITTLE_ENDIAN:
421 stl_le_p(ptr, val);
422 break;
423 case DEVICE_BIG_ENDIAN:
424 stl_be_p(ptr, val);
425 break;
426 default:
427 stl_p(ptr, val);
428 break;
429 }
430 INVALIDATE(mr, addr1, 4);
431 r = MEMTX_OK;
432 }
433 if (result) {
434 *result = r;
435 }
436 if (release_lock) {
437 qemu_mutex_unlock_iothread();
438 }
439 RCU_READ_UNLOCK();
440}
441
442void glue(address_space_stl, SUFFIX)(ARG1_DECL,
443 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
444{
445 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
446 result, DEVICE_NATIVE_ENDIAN);
447}
448
449void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
450 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
451{
452 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
453 result, DEVICE_LITTLE_ENDIAN);
454}
455
456void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
457 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
458{
459 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
460 result, DEVICE_BIG_ENDIAN);
461}
462
463void glue(stl_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
464{
465 glue(address_space_stl, SUFFIX)(ARG1, addr, val,
466 MEMTXATTRS_UNSPECIFIED, NULL);
467}
468
469void glue(stl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
470{
471 glue(address_space_stl_le, SUFFIX)(ARG1, addr, val,
472 MEMTXATTRS_UNSPECIFIED, NULL);
473}
474
475void glue(stl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
476{
477 glue(address_space_stl_be, SUFFIX)(ARG1, addr, val,
478 MEMTXATTRS_UNSPECIFIED, NULL);
479}
480
481void glue(address_space_stb, SUFFIX)(ARG1_DECL,
482 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
483{
484 uint8_t *ptr;
485 MemoryRegion *mr;
486 hwaddr l = 1;
487 hwaddr addr1;
488 MemTxResult r;
489 bool release_lock = false;
490
491 RCU_READ_LOCK();
492 mr = TRANSLATE(addr, &addr1, &l, true);
493 if (!IS_DIRECT(mr, true)) {
494 release_lock |= prepare_mmio_access(mr);
495 r = memory_region_dispatch_write(mr, addr1, val, 1, attrs);
496 } else {
497 /* RAM case */
498 ptr = MAP_RAM(mr, addr1);
499 stb_p(ptr, val);
500 INVALIDATE(mr, addr1, 1);
501 r = MEMTX_OK;
502 }
503 if (result) {
504 *result = r;
505 }
506 if (release_lock) {
507 qemu_mutex_unlock_iothread();
508 }
509 RCU_READ_UNLOCK();
510}
511
512void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
513{
514 glue(address_space_stb, SUFFIX)(ARG1, addr, val,
515 MEMTXATTRS_UNSPECIFIED, NULL);
516}
517
518/* warning: addr must be aligned */
519static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
520 hwaddr addr, uint32_t val, MemTxAttrs attrs,
521 MemTxResult *result, enum device_endian endian)
522{
523 uint8_t *ptr;
524 MemoryRegion *mr;
525 hwaddr l = 2;
526 hwaddr addr1;
527 MemTxResult r;
528 bool release_lock = false;
529
530 RCU_READ_LOCK();
531 mr = TRANSLATE(addr, &addr1, &l, true);
532 if (l < 2 || !IS_DIRECT(mr, true)) {
533 release_lock |= prepare_mmio_access(mr);
534
535#if defined(TARGET_WORDS_BIGENDIAN)
536 if (endian == DEVICE_LITTLE_ENDIAN) {
537 val = bswap16(val);
538 }
539#else
540 if (endian == DEVICE_BIG_ENDIAN) {
541 val = bswap16(val);
542 }
543#endif
544 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
545 } else {
546 /* RAM case */
547 ptr = MAP_RAM(mr, addr1);
548 switch (endian) {
549 case DEVICE_LITTLE_ENDIAN:
550 stw_le_p(ptr, val);
551 break;
552 case DEVICE_BIG_ENDIAN:
553 stw_be_p(ptr, val);
554 break;
555 default:
556 stw_p(ptr, val);
557 break;
558 }
559 INVALIDATE(mr, addr1, 2);
560 r = MEMTX_OK;
561 }
562 if (result) {
563 *result = r;
564 }
565 if (release_lock) {
566 qemu_mutex_unlock_iothread();
567 }
568 RCU_READ_UNLOCK();
569}
570
571void glue(address_space_stw, SUFFIX)(ARG1_DECL,
572 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
573{
574 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
575 DEVICE_NATIVE_ENDIAN);
576}
577
578void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
579 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
580{
581 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
582 DEVICE_LITTLE_ENDIAN);
583}
584
585void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
586 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
587{
588 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
589 DEVICE_BIG_ENDIAN);
590}
591
592void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
593{
594 glue(address_space_stw, SUFFIX)(ARG1, addr, val,
595 MEMTXATTRS_UNSPECIFIED, NULL);
596}
597
598void glue(stw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
599{
600 glue(address_space_stw_le, SUFFIX)(ARG1, addr, val,
601 MEMTXATTRS_UNSPECIFIED, NULL);
602}
603
604void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
605{
606 glue(address_space_stw_be, SUFFIX)(ARG1, addr, val,
607 MEMTXATTRS_UNSPECIFIED, NULL);
608}
609
610static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
611 hwaddr addr, uint64_t val, MemTxAttrs attrs,
612 MemTxResult *result, enum device_endian endian)
613{
614 uint8_t *ptr;
615 MemoryRegion *mr;
616 hwaddr l = 8;
617 hwaddr addr1;
618 MemTxResult r;
619 bool release_lock = false;
620
621 RCU_READ_LOCK();
622 mr = TRANSLATE(addr, &addr1, &l, true);
623 if (l < 8 || !IS_DIRECT(mr, true)) {
624 release_lock |= prepare_mmio_access(mr);
625
626#if defined(TARGET_WORDS_BIGENDIAN)
627 if (endian == DEVICE_LITTLE_ENDIAN) {
628 val = bswap64(val);
629 }
630#else
631 if (endian == DEVICE_BIG_ENDIAN) {
632 val = bswap64(val);
633 }
634#endif
635 r = memory_region_dispatch_write(mr, addr1, val, 8, attrs);
636 } else {
637 /* RAM case */
638 ptr = MAP_RAM(mr, addr1);
639 switch (endian) {
640 case DEVICE_LITTLE_ENDIAN:
641 stq_le_p(ptr, val);
642 break;
643 case DEVICE_BIG_ENDIAN:
644 stq_be_p(ptr, val);
645 break;
646 default:
647 stq_p(ptr, val);
648 break;
649 }
650 INVALIDATE(mr, addr1, 8);
651 r = MEMTX_OK;
652 }
653 if (result) {
654 *result = r;
655 }
656 if (release_lock) {
657 qemu_mutex_unlock_iothread();
658 }
659 RCU_READ_UNLOCK();
660}
661
662void glue(address_space_stq, SUFFIX)(ARG1_DECL,
663 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
664{
665 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
666 DEVICE_NATIVE_ENDIAN);
667}
668
669void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
670 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
671{
672 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
673 DEVICE_LITTLE_ENDIAN);
674}
675
676void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
677 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
678{
679 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
680 DEVICE_BIG_ENDIAN);
681}
682
683void glue(stq_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
684{
685 glue(address_space_stq, SUFFIX)(ARG1, addr, val,
686 MEMTXATTRS_UNSPECIFIED, NULL);
687}
688
689void glue(stq_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
690{
691 glue(address_space_stq_le, SUFFIX)(ARG1, addr, val,
692 MEMTXATTRS_UNSPECIFIED, NULL);
693}
694
695void glue(stq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
696{
697 glue(address_space_stq_be, SUFFIX)(ARG1, addr, val,
698 MEMTXATTRS_UNSPECIFIED, NULL);
699}
700
701#undef ARG1_DECL
702#undef ARG1
703#undef SUFFIX
704#undef TRANSLATE
705#undef IS_DIRECT
706#undef MAP_RAM
707#undef INVALIDATE
708#undef RCU_READ_LOCK
709#undef RCU_READ_UNLOCK