2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
5 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
6 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "../tcg-ldst.c.inc"
35 #if TCG_TARGET_REG_BITS == 32
36 # define LO_OFF (MIPS_BE * 4)
37 # define HI_OFF (4 - LO_OFF)
39 /* To assert at compile-time that these values are never used
40 for TCG_TARGET_REG_BITS == 64. */
42 # define LO_OFF link_error()
43 # define HI_OFF link_error()
46 #ifdef CONFIG_DEBUG_TCG
47 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
83 #define TCG_TMP0 TCG_REG_AT
84 #define TCG_TMP1 TCG_REG_T9
85 #define TCG_TMP2 TCG_REG_T8
86 #define TCG_TMP3 TCG_REG_T7
88 #ifndef CONFIG_SOFTMMU
89 #define TCG_GUEST_BASE_REG TCG_REG_S1
92 /* check if we really need so many registers :P */
93 static const int tcg_target_reg_alloc_order[] = {
94 /* Call saved registers. */
105 /* Call clobbered registers. */
115 /* Argument registers, opposite order of allocation. */
126 static const TCGReg tcg_target_call_iarg_regs[] = {
131 #if _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64
139 static const TCGReg tcg_target_call_oarg_regs[2] = {
144 static const tcg_insn_unit *tb_ret_addr;
145 static const tcg_insn_unit *bswap32_addr;
146 static const tcg_insn_unit *bswap32u_addr;
147 static const tcg_insn_unit *bswap64_addr;
149 static bool reloc_pc16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
151 /* Let the compiler perform the right-shift as part of the arithmetic. */
152 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
153 ptrdiff_t disp = target - (src_rx + 1);
154 if (disp == (int16_t)disp) {
155 *src_rw = deposit32(*src_rw, 0, 16, disp);
161 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
162 intptr_t value, intptr_t addend)
164 tcg_debug_assert(type == R_MIPS_PC16);
165 tcg_debug_assert(addend == 0);
166 return reloc_pc16(code_ptr, (const tcg_insn_unit *)value);
169 #define TCG_CT_CONST_ZERO 0x100
170 #define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
171 #define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
172 #define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
173 #define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
174 #define TCG_CT_CONST_WSZ 0x2000 /* word size */
176 #define ALL_GENERAL_REGS 0xffffffffu
177 #define NOA0_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_A0))
179 #ifdef CONFIG_SOFTMMU
180 #define ALL_QLOAD_REGS \
181 (NOA0_REGS & ~((TCG_TARGET_REG_BITS < TARGET_LONG_BITS) << TCG_REG_A2))
182 #define ALL_QSTORE_REGS \
183 (NOA0_REGS & ~(TCG_TARGET_REG_BITS < TARGET_LONG_BITS \
184 ? (1 << TCG_REG_A2) | (1 << TCG_REG_A3) \
185 : (1 << TCG_REG_A1)))
187 #define ALL_QLOAD_REGS NOA0_REGS
188 #define ALL_QSTORE_REGS NOA0_REGS
192 static bool is_p2m1(tcg_target_long val)
194 return val && ((val + 1) & val) == 0;
197 /* test if a constant matches the constraint */
198 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
200 if (ct & TCG_CT_CONST) {
202 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
204 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
206 } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
208 } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) {
210 } else if ((ct & TCG_CT_CONST_P2M1)
211 && use_mips32r2_instructions && is_p2m1(val)) {
213 } else if ((ct & TCG_CT_CONST_WSZ)
214 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
220 /* instruction opcodes */
226 OPC_BLEZ = 006 << 26,
227 OPC_BGTZ = 007 << 26,
228 OPC_ADDIU = 011 << 26,
229 OPC_SLTI = 012 << 26,
230 OPC_SLTIU = 013 << 26,
231 OPC_ANDI = 014 << 26,
233 OPC_XORI = 016 << 26,
235 OPC_BNEL = 025 << 26,
236 OPC_BNEZALC_R6 = 030 << 26,
237 OPC_DADDIU = 031 << 26,
258 OPC_SPECIAL = 000 << 26,
259 OPC_SLL = OPC_SPECIAL | 000,
260 OPC_SRL = OPC_SPECIAL | 002,
261 OPC_ROTR = OPC_SPECIAL | 002 | (1 << 21),
262 OPC_SRA = OPC_SPECIAL | 003,
263 OPC_SLLV = OPC_SPECIAL | 004,
264 OPC_SRLV = OPC_SPECIAL | 006,
265 OPC_ROTRV = OPC_SPECIAL | 006 | 0100,
266 OPC_SRAV = OPC_SPECIAL | 007,
267 OPC_JR_R5 = OPC_SPECIAL | 010,
268 OPC_JALR = OPC_SPECIAL | 011,
269 OPC_MOVZ = OPC_SPECIAL | 012,
270 OPC_MOVN = OPC_SPECIAL | 013,
271 OPC_SYNC = OPC_SPECIAL | 017,
272 OPC_MFHI = OPC_SPECIAL | 020,
273 OPC_MFLO = OPC_SPECIAL | 022,
274 OPC_DSLLV = OPC_SPECIAL | 024,
275 OPC_DSRLV = OPC_SPECIAL | 026,
276 OPC_DROTRV = OPC_SPECIAL | 026 | 0100,
277 OPC_DSRAV = OPC_SPECIAL | 027,
278 OPC_MULT = OPC_SPECIAL | 030,
279 OPC_MUL_R6 = OPC_SPECIAL | 030 | 0200,
280 OPC_MUH = OPC_SPECIAL | 030 | 0300,
281 OPC_MULTU = OPC_SPECIAL | 031,
282 OPC_MULU = OPC_SPECIAL | 031 | 0200,
283 OPC_MUHU = OPC_SPECIAL | 031 | 0300,
284 OPC_DIV = OPC_SPECIAL | 032,
285 OPC_DIV_R6 = OPC_SPECIAL | 032 | 0200,
286 OPC_MOD = OPC_SPECIAL | 032 | 0300,
287 OPC_DIVU = OPC_SPECIAL | 033,
288 OPC_DIVU_R6 = OPC_SPECIAL | 033 | 0200,
289 OPC_MODU = OPC_SPECIAL | 033 | 0300,
290 OPC_DMULT = OPC_SPECIAL | 034,
291 OPC_DMUL = OPC_SPECIAL | 034 | 0200,
292 OPC_DMUH = OPC_SPECIAL | 034 | 0300,
293 OPC_DMULTU = OPC_SPECIAL | 035,
294 OPC_DMULU = OPC_SPECIAL | 035 | 0200,
295 OPC_DMUHU = OPC_SPECIAL | 035 | 0300,
296 OPC_DDIV = OPC_SPECIAL | 036,
297 OPC_DDIV_R6 = OPC_SPECIAL | 036 | 0200,
298 OPC_DMOD = OPC_SPECIAL | 036 | 0300,
299 OPC_DDIVU = OPC_SPECIAL | 037,
300 OPC_DDIVU_R6 = OPC_SPECIAL | 037 | 0200,
301 OPC_DMODU = OPC_SPECIAL | 037 | 0300,
302 OPC_ADDU = OPC_SPECIAL | 041,
303 OPC_SUBU = OPC_SPECIAL | 043,
304 OPC_AND = OPC_SPECIAL | 044,
305 OPC_OR = OPC_SPECIAL | 045,
306 OPC_XOR = OPC_SPECIAL | 046,
307 OPC_NOR = OPC_SPECIAL | 047,
308 OPC_SLT = OPC_SPECIAL | 052,
309 OPC_SLTU = OPC_SPECIAL | 053,
310 OPC_DADDU = OPC_SPECIAL | 055,
311 OPC_DSUBU = OPC_SPECIAL | 057,
312 OPC_SELEQZ = OPC_SPECIAL | 065,
313 OPC_SELNEZ = OPC_SPECIAL | 067,
314 OPC_DSLL = OPC_SPECIAL | 070,
315 OPC_DSRL = OPC_SPECIAL | 072,
316 OPC_DROTR = OPC_SPECIAL | 072 | (1 << 21),
317 OPC_DSRA = OPC_SPECIAL | 073,
318 OPC_DSLL32 = OPC_SPECIAL | 074,
319 OPC_DSRL32 = OPC_SPECIAL | 076,
320 OPC_DROTR32 = OPC_SPECIAL | 076 | (1 << 21),
321 OPC_DSRA32 = OPC_SPECIAL | 077,
322 OPC_CLZ_R6 = OPC_SPECIAL | 0120,
323 OPC_DCLZ_R6 = OPC_SPECIAL | 0122,
325 OPC_REGIMM = 001 << 26,
326 OPC_BLTZ = OPC_REGIMM | (000 << 16),
327 OPC_BGEZ = OPC_REGIMM | (001 << 16),
329 OPC_SPECIAL2 = 034 << 26,
330 OPC_MUL_R5 = OPC_SPECIAL2 | 002,
331 OPC_CLZ = OPC_SPECIAL2 | 040,
332 OPC_DCLZ = OPC_SPECIAL2 | 044,
334 OPC_SPECIAL3 = 037 << 26,
335 OPC_EXT = OPC_SPECIAL3 | 000,
336 OPC_DEXTM = OPC_SPECIAL3 | 001,
337 OPC_DEXTU = OPC_SPECIAL3 | 002,
338 OPC_DEXT = OPC_SPECIAL3 | 003,
339 OPC_INS = OPC_SPECIAL3 | 004,
340 OPC_DINSM = OPC_SPECIAL3 | 005,
341 OPC_DINSU = OPC_SPECIAL3 | 006,
342 OPC_DINS = OPC_SPECIAL3 | 007,
343 OPC_WSBH = OPC_SPECIAL3 | 00240,
344 OPC_DSBH = OPC_SPECIAL3 | 00244,
345 OPC_DSHD = OPC_SPECIAL3 | 00544,
346 OPC_SEB = OPC_SPECIAL3 | 02040,
347 OPC_SEH = OPC_SPECIAL3 | 03040,
349 /* MIPS r6 doesn't have JR, JALR should be used instead */
350 OPC_JR = use_mips32r6_instructions ? OPC_JALR : OPC_JR_R5,
353 * MIPS r6 replaces MUL with an alternative encoding which is
354 * backwards-compatible at the assembly level.
356 OPC_MUL = use_mips32r6_instructions ? OPC_MUL_R6 : OPC_MUL_R5,
358 /* MIPS r6 introduced names for weaker variants of SYNC. These are
359 backward compatible to previous architecture revisions. */
360 OPC_SYNC_WMB = OPC_SYNC | 0x04 << 6,
361 OPC_SYNC_MB = OPC_SYNC | 0x10 << 6,
362 OPC_SYNC_ACQUIRE = OPC_SYNC | 0x11 << 6,
363 OPC_SYNC_RELEASE = OPC_SYNC | 0x12 << 6,
364 OPC_SYNC_RMB = OPC_SYNC | 0x13 << 6,
366 /* Aliases for convenience. */
367 ALIAS_PADD = sizeof(void *) == 4 ? OPC_ADDU : OPC_DADDU,
368 ALIAS_PADDI = sizeof(void *) == 4 ? OPC_ADDIU : OPC_DADDIU,
369 ALIAS_TSRL = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32
370 ? OPC_SRL : OPC_DSRL,
376 static void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc,
377 TCGReg rd, TCGReg rs, TCGReg rt)
382 inst |= (rs & 0x1F) << 21;
383 inst |= (rt & 0x1F) << 16;
384 inst |= (rd & 0x1F) << 11;
391 static void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc,
392 TCGReg rt, TCGReg rs, TCGArg imm)
397 inst |= (rs & 0x1F) << 21;
398 inst |= (rt & 0x1F) << 16;
399 inst |= (imm & 0xffff);
406 static void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt,
407 TCGReg rs, int msb, int lsb)
412 inst |= (rs & 0x1F) << 21;
413 inst |= (rt & 0x1F) << 16;
414 inst |= (msb & 0x1F) << 11;
415 inst |= (lsb & 0x1F) << 6;
419 static void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm,
420 MIPSInsn oph, TCGReg rt, TCGReg rs,
427 } else if (msb >= 32) {
431 tcg_out_opc_bf(s, opc, rt, rs, msb, lsb);
437 static void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, TCGReg rt, TCGReg rs)
439 tcg_out_opc_imm(s, opc, rt, rs, 0);
445 static void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc,
446 TCGReg rd, TCGReg rt, TCGArg sa)
451 inst |= (rt & 0x1F) << 16;
452 inst |= (rd & 0x1F) << 11;
453 inst |= (sa & 0x1F) << 6;
458 static void tcg_out_opc_sa64(TCGContext *s, MIPSInsn opc1, MIPSInsn opc2,
459 TCGReg rd, TCGReg rt, TCGArg sa)
463 inst = (sa & 32 ? opc2 : opc1);
464 inst |= (rt & 0x1F) << 16;
465 inst |= (rd & 0x1F) << 11;
466 inst |= (sa & 0x1F) << 6;
472 * Returns true if the branch was in range and the insn was emitted.
474 static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, const void *target)
476 uintptr_t dest = (uintptr_t)target;
477 uintptr_t from = (uintptr_t)tcg_splitwx_to_rx(s->code_ptr) + 4;
480 /* The pc-region branch happens within the 256MB region of
481 the delay slot (thus the +4). */
482 if ((from ^ dest) & -(1 << 28)) {
485 tcg_debug_assert((dest & 3) == 0);
488 inst |= (dest >> 2) & 0x3ffffff;
493 static void tcg_out_nop(TCGContext *s)
498 static void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
500 tcg_out_opc_sa64(s, OPC_DSLL, OPC_DSLL32, rd, rt, sa);
503 static void tcg_out_dsrl(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
505 tcg_out_opc_sa64(s, OPC_DSRL, OPC_DSRL32, rd, rt, sa);
508 static void tcg_out_dsra(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
510 tcg_out_opc_sa64(s, OPC_DSRA, OPC_DSRA32, rd, rt, sa);
513 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
515 /* Simple reg-reg move, optimising out the 'do nothing' case */
517 tcg_out_opc_reg(s, OPC_OR, ret, arg, TCG_REG_ZERO);
522 static void tcg_out_movi(TCGContext *s, TCGType type,
523 TCGReg ret, tcg_target_long arg)
525 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
528 if (arg == (int16_t)arg) {
529 tcg_out_opc_imm(s, OPC_ADDIU, ret, TCG_REG_ZERO, arg);
532 if (arg == (uint16_t)arg) {
533 tcg_out_opc_imm(s, OPC_ORI, ret, TCG_REG_ZERO, arg);
536 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
537 tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16);
539 tcg_out_movi(s, TCG_TYPE_I32, ret, arg >> 31 >> 1);
540 if (arg & 0xffff0000ull) {
541 tcg_out_dsll(s, ret, ret, 16);
542 tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg >> 16);
543 tcg_out_dsll(s, ret, ret, 16);
545 tcg_out_dsll(s, ret, ret, 32);
549 tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff);
553 static void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg, int flags)
555 /* ret and arg can't be register tmp0 */
556 tcg_debug_assert(ret != TCG_TMP0);
557 tcg_debug_assert(arg != TCG_TMP0);
559 /* With arg = abcd: */
560 if (use_mips32r2_instructions) {
561 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); /* badc */
562 if (flags & TCG_BSWAP_OS) {
563 tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); /* ssdc */
564 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
565 tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xffff); /* 00dc */
570 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); /* 0abc */
571 if (!(flags & TCG_BSWAP_IZ)) {
572 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0x00ff); /* 000c */
574 if (flags & TCG_BSWAP_OS) {
575 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); /* d000 */
576 tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); /* ssd0 */
578 tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); /* bcd0 */
579 if (flags & TCG_BSWAP_OZ) {
580 tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); /* 00d0 */
583 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); /* ssdc */
586 static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub)
588 if (!tcg_out_opc_jmp(s, OPC_JAL, sub)) {
589 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP1, (uintptr_t)sub);
590 tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_TMP1, 0);
594 static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg, int flags)
596 if (use_mips32r2_instructions) {
597 tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
598 tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
599 if (flags & TCG_BSWAP_OZ) {
600 tcg_out_opc_bf(s, OPC_DEXT, ret, ret, 31, 0);
603 if (flags & TCG_BSWAP_OZ) {
604 tcg_out_bswap_subr(s, bswap32u_addr);
606 tcg_out_bswap_subr(s, bswap32_addr);
608 /* delay slot -- never omit the insn, like tcg_out_mov might. */
609 tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
610 tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
614 static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg)
616 if (use_mips32r2_instructions) {
617 tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
618 tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
620 tcg_out_bswap_subr(s, bswap64_addr);
621 /* delay slot -- never omit the insn, like tcg_out_mov might. */
622 tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
623 tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
627 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
629 if (use_mips32r2_instructions) {
630 tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0);
632 tcg_out_dsll(s, ret, arg, 32);
633 tcg_out_dsrl(s, ret, ret, 32);
637 static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data,
638 TCGReg addr, intptr_t ofs)
642 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo);
643 if (addr != TCG_REG_ZERO) {
644 tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP0, TCG_TMP0, addr);
648 tcg_out_opc_imm(s, opc, data, addr, lo);
651 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
652 TCGReg arg1, intptr_t arg2)
654 MIPSInsn opc = OPC_LD;
655 if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
658 tcg_out_ldst(s, opc, arg, arg1, arg2);
661 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
662 TCGReg arg1, intptr_t arg2)
664 MIPSInsn opc = OPC_SD;
665 if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
668 tcg_out_ldst(s, opc, arg, arg1, arg2);
671 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
672 TCGReg base, intptr_t ofs)
675 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
681 static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
682 TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
683 bool cbh, bool is_sub)
685 TCGReg th = TCG_TMP1;
687 /* If we have a negative constant such that negating it would
688 make the high part zero, we can (usually) eliminate one insn. */
689 if (cbl && cbh && bh == -1 && bl != 0) {
695 /* By operating on the high part first, we get to use the final
696 carry operation to move back from the temporary. */
698 tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
699 } else if (bh != 0 || ah == rl) {
700 tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
705 /* Note that tcg optimization should eliminate the bl == 0 case. */
708 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
709 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
711 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
712 tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
714 tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
717 tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
718 tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
719 } else if (rl == al && rl == bl) {
720 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, TCG_TARGET_REG_BITS - 1);
721 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
723 tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
724 tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
726 tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
730 /* Bit 0 set if inversion required; bit 1 set if swapping required. */
731 #define MIPS_CMP_INV 1
732 #define MIPS_CMP_SWAP 2
734 static const uint8_t mips_cmp_map[16] = {
737 [TCG_COND_GE] = MIPS_CMP_INV,
738 [TCG_COND_GEU] = MIPS_CMP_INV,
739 [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP,
740 [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP,
741 [TCG_COND_GT] = MIPS_CMP_SWAP,
742 [TCG_COND_GTU] = MIPS_CMP_SWAP,
745 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
746 TCGReg arg1, TCGReg arg2)
748 MIPSInsn s_opc = OPC_SLTU;
754 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
757 tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1);
762 tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
765 tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1);
779 cmp_map = mips_cmp_map[cond];
780 if (cmp_map & MIPS_CMP_SWAP) {
785 tcg_out_opc_reg(s, s_opc, ret, arg1, arg2);
786 if (cmp_map & MIPS_CMP_INV) {
787 tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
797 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
798 TCGReg arg2, TCGLabel *l)
800 static const MIPSInsn b_zero[16] = {
801 [TCG_COND_LT] = OPC_BLTZ,
802 [TCG_COND_GT] = OPC_BGTZ,
803 [TCG_COND_LE] = OPC_BLEZ,
804 [TCG_COND_GE] = OPC_BGEZ,
807 MIPSInsn s_opc = OPC_SLTU;
824 b_opc = b_zero[cond];
836 cmp_map = mips_cmp_map[cond];
837 if (cmp_map & MIPS_CMP_SWAP) {
842 tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2);
843 b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE);
853 tcg_out_opc_br(s, b_opc, arg1, arg2);
854 tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, l, 0);
858 static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1,
859 TCGReg al, TCGReg ah,
860 TCGReg bl, TCGReg bh)
862 /* Merge highpart comparison into AH. */
865 tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh);
871 /* Merge lowpart comparison into AL. */
874 tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl);
880 /* Merge high and low part comparisons into AL. */
883 tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al);
892 static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
893 TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
895 TCGReg tmp0 = TCG_TMP0;
898 tcg_debug_assert(ret != TCG_TMP0);
899 if (ret == ah || ret == bh) {
900 tcg_debug_assert(ret != TCG_TMP1);
907 tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh);
908 tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO);
912 tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh);
913 tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl);
914 tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0);
915 tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh);
916 tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0);
921 static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
922 TCGReg bl, TCGReg bh, TCGLabel *l)
924 TCGCond b_cond = TCG_COND_NE;
925 TCGReg tmp = TCG_TMP1;
927 /* With branches, we emit between 4 and 9 insns with 2 or 3 branches.
928 With setcond, we emit between 3 and 10 insns and only 1 branch,
929 which ought to get better branch prediction. */
934 tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh);
938 /* Minimize code size by preferring a compare not requiring INV. */
939 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
940 cond = tcg_invert_cond(cond);
941 b_cond = TCG_COND_EQ;
943 tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh);
947 tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, l);
950 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
951 TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2)
955 /* If one of the values is zero, put it last to match SEL*Z instructions */
956 if (use_mips32r6_instructions && v1 == 0) {
959 cond = tcg_invert_cond(cond);
968 tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2);
974 /* Minimize code size by preferring a compare not requiring INV. */
975 if (mips_cmp_map[cond] & MIPS_CMP_INV) {
976 cond = tcg_invert_cond(cond);
979 tcg_out_setcond(s, cond, TCG_TMP0, c1, c2);
984 if (use_mips32r6_instructions) {
985 MIPSInsn m_opc_t = eqz ? OPC_SELEQZ : OPC_SELNEZ;
986 MIPSInsn m_opc_f = eqz ? OPC_SELNEZ : OPC_SELEQZ;
989 tcg_out_opc_reg(s, m_opc_f, TCG_TMP1, v2, c1);
991 tcg_out_opc_reg(s, m_opc_t, ret, v1, c1);
993 tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP1);
996 MIPSInsn m_opc = eqz ? OPC_MOVZ : OPC_MOVN;
998 tcg_out_opc_reg(s, m_opc, ret, v1, c1);
1000 /* This should be guaranteed via constraints */
1001 tcg_debug_assert(v2 == ret);
1005 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
1007 /* Note that the ABI requires the called function's address to be
1008 loaded into T9, even if a direct branch is in range. */
1009 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg);
1011 /* But do try a direct branch, allowing the cpu better insn prefetch. */
1013 if (!tcg_out_opc_jmp(s, OPC_J, arg)) {
1014 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0);
1017 if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) {
1018 tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0);
1023 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
1024 const TCGHelperInfo *info)
1026 tcg_out_call_int(s, arg, false);
1030 #if defined(CONFIG_SOFTMMU)
1031 static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
1032 [MO_UB] = helper_ret_ldub_mmu,
1033 [MO_SB] = helper_ret_ldsb_mmu,
1034 [MO_LEUW] = helper_le_lduw_mmu,
1035 [MO_LESW] = helper_le_ldsw_mmu,
1036 [MO_LEUL] = helper_le_ldul_mmu,
1037 [MO_LEUQ] = helper_le_ldq_mmu,
1038 [MO_BEUW] = helper_be_lduw_mmu,
1039 [MO_BESW] = helper_be_ldsw_mmu,
1040 [MO_BEUL] = helper_be_ldul_mmu,
1041 [MO_BEUQ] = helper_be_ldq_mmu,
1042 #if TCG_TARGET_REG_BITS == 64
1043 [MO_LESL] = helper_le_ldsl_mmu,
1044 [MO_BESL] = helper_be_ldsl_mmu,
1048 static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
1049 [MO_UB] = helper_ret_stb_mmu,
1050 [MO_LEUW] = helper_le_stw_mmu,
1051 [MO_LEUL] = helper_le_stl_mmu,
1052 [MO_LEUQ] = helper_le_stq_mmu,
1053 [MO_BEUW] = helper_be_stw_mmu,
1054 [MO_BEUL] = helper_be_stl_mmu,
1055 [MO_BEUQ] = helper_be_stq_mmu,
1058 /* Helper routines for marshalling helper function arguments into
1059 * the correct registers and stack.
1060 * I is where we want to put this argument, and is updated and returned
1061 * for the next call. ARG is the argument itself.
1063 * We provide routines for arguments which are: immediate, 32 bit
1064 * value in register, 16 and 8 bit values in register (which must be zero
1065 * extended before use) and 64 bit value in a lo:hi register pair.
1068 static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg)
1070 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1071 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg);
1073 /* For N32 and N64, the initial offset is different. But there
1074 we also have 8 argument register so we don't run out here. */
1075 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
1076 tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i);
1081 static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg)
1083 TCGReg tmp = TCG_TMP0;
1084 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1085 tmp = tcg_target_call_iarg_regs[i];
1087 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff);
1088 return tcg_out_call_iarg_reg(s, i, tmp);
1091 static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg)
1093 TCGReg tmp = TCG_TMP0;
1094 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1095 tmp = tcg_target_call_iarg_regs[i];
1097 tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff);
1098 return tcg_out_call_iarg_reg(s, i, tmp);
1101 static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg)
1103 TCGReg tmp = TCG_TMP0;
1107 if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
1108 tmp = tcg_target_call_iarg_regs[i];
1110 tcg_out_movi(s, TCG_TYPE_REG, tmp, arg);
1112 return tcg_out_call_iarg_reg(s, i, tmp);
1115 static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
1117 tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
1119 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al));
1120 i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah));
1124 /* We expect to use a 16-bit negative offset from ENV. */
1125 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1126 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
1129 * Perform the tlb comparison operation.
1130 * The complete host address is placed in BASE.
1131 * Clobbers TMP0, TMP1, TMP2, TMP3.
1133 static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
1134 TCGReg addrh, MemOpIdx oi,
1135 tcg_insn_unit *label_ptr[2], bool is_load)
1137 MemOp opc = get_memop(oi);
1138 unsigned a_bits = get_alignment_bits(opc);
1139 unsigned s_bits = opc & MO_SIZE;
1140 unsigned a_mask = (1 << a_bits) - 1;
1141 unsigned s_mask = (1 << s_bits) - 1;
1142 int mem_index = get_mmuidx(oi);
1143 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1144 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1145 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1146 int add_off = offsetof(CPUTLBEntry, addend);
1147 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1148 : offsetof(CPUTLBEntry, addr_write));
1149 target_ulong tlb_mask;
1151 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1152 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
1153 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off);
1155 /* Extract the TLB index from the address into TMP3. */
1156 tcg_out_opc_sa(s, ALIAS_TSRL, TCG_TMP3, addrl,
1157 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1158 tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
1160 /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */
1161 tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
1163 /* Load the (low-half) tlb comparator. */
1164 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1165 tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
1167 tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD
1168 : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW),
1169 TCG_TMP0, TCG_TMP3, cmp_off);
1172 /* Zero extend a 32-bit guest address for a 64-bit host. */
1173 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1174 tcg_out_ext32u(s, base, addrl);
1179 * Mask the page bits, keeping the alignment bits to compare against.
1180 * For unaligned accesses, compare against the end of the access to
1181 * verify that it does not cross a page boundary.
1183 tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
1184 tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, tlb_mask);
1185 if (a_mask >= s_mask) {
1186 tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
1188 tcg_out_opc_imm(s, ALIAS_PADDI, TCG_TMP2, addrl, s_mask - a_mask);
1189 tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
1192 if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
1193 /* Load the tlb addend for the fast path. */
1194 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
1197 label_ptr[0] = s->code_ptr;
1198 tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
1200 /* Load and test the high half tlb comparator. */
1201 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1203 tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
1205 /* Load the tlb addend for the fast path. */
1206 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
1208 label_ptr[1] = s->code_ptr;
1209 tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0);
1213 tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl);
1216 static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
1218 TCGReg datalo, TCGReg datahi,
1219 TCGReg addrlo, TCGReg addrhi,
1220 void *raddr, tcg_insn_unit *label_ptr[2])
1222 TCGLabelQemuLdst *label = new_ldst_label(s);
1224 label->is_ld = is_ld;
1227 label->datalo_reg = datalo;
1228 label->datahi_reg = datahi;
1229 label->addrlo_reg = addrlo;
1230 label->addrhi_reg = addrhi;
1231 label->raddr = tcg_splitwx_to_rx(raddr);
1232 label->label_ptr[0] = label_ptr[0];
1233 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1234 label->label_ptr[1] = label_ptr[1];
1238 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1240 const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
1241 MemOpIdx oi = l->oi;
1242 MemOp opc = get_memop(oi);
1246 /* resolve label address */
1247 if (!reloc_pc16(l->label_ptr[0], tgt_rx)
1248 || (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
1249 && !reloc_pc16(l->label_ptr[1], tgt_rx))) {
1254 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1255 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1257 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1259 i = tcg_out_call_iarg_imm(s, i, oi);
1260 i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr);
1261 tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)], false);
1263 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1266 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
1267 /* We eliminated V0 from the possible output registers, so it
1268 cannot be clobbered here. So we must move V1 first. */
1270 tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1);
1273 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1);
1277 tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
1278 if (!reloc_pc16(s->code_ptr - 1, l->raddr)) {
1283 if (TCG_TARGET_REG_BITS == 64 && l->type == TCG_TYPE_I32) {
1284 /* we always sign-extend 32-bit loads */
1285 tcg_out_opc_sa(s, OPC_SLL, v0, TCG_REG_V0, 0);
1287 tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO);
1292 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1294 const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
1295 MemOpIdx oi = l->oi;
1296 MemOp opc = get_memop(oi);
1297 MemOp s_bits = opc & MO_SIZE;
1300 /* resolve label address */
1301 if (!reloc_pc16(l->label_ptr[0], tgt_rx)
1302 || (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
1303 && !reloc_pc16(l->label_ptr[1], tgt_rx))) {
1308 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1309 i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
1311 i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
1315 i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg);
1318 i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg);
1321 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1324 if (TCG_TARGET_REG_BITS == 32) {
1325 i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
1327 i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
1333 i = tcg_out_call_iarg_imm(s, i, oi);
1335 /* Tail call to the store helper. Thus force the return address
1336 computation to take place in the return address register. */
1337 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr);
1338 i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA);
1339 tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true);
1341 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1347 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
1348 TCGReg addrhi, unsigned a_bits)
1350 unsigned a_mask = (1 << a_bits) - 1;
1351 TCGLabelQemuLdst *l = new_ldst_label(s);
1354 l->addrlo_reg = addrlo;
1355 l->addrhi_reg = addrhi;
1357 /* We are expecting a_bits to max out at 7, much lower than ANDI. */
1358 tcg_debug_assert(a_bits < 16);
1359 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
1361 l->label_ptr[0] = s->code_ptr;
1362 if (use_mips32r6_instructions) {
1363 tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0);
1365 tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO);
1369 l->raddr = tcg_splitwx_to_rx(s->code_ptr);
1372 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
1376 if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1380 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1381 /* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */
1382 TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg;
1383 TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg;
1385 if (a3 != TCG_REG_A2) {
1386 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
1387 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
1388 } else if (a2 != TCG_REG_A3) {
1389 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
1390 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
1392 tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2);
1393 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3);
1394 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0);
1397 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
1399 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
1402 * Tail call to the helper, with the return address back inline.
1403 * We have arrived here via BNEL, so $31 is already set.
1405 target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st);
1406 tcg_out_call_int(s, target, true);
1410 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1412 return tcg_out_fail_alignment(s, l);
1415 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1417 return tcg_out_fail_alignment(s, l);
1419 #endif /* SOFTMMU */
1421 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
1422 TCGReg base, MemOp opc, bool is_64)
1424 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1426 tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
1429 tcg_out_opc_imm(s, OPC_LB, lo, base, 0);
1431 case MO_UW | MO_BSWAP:
1432 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1433 tcg_out_bswap16(s, lo, TCG_TMP1, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
1436 tcg_out_opc_imm(s, OPC_LHU, lo, base, 0);
1438 case MO_SW | MO_BSWAP:
1439 tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
1440 tcg_out_bswap16(s, lo, TCG_TMP1, TCG_BSWAP_IZ | TCG_BSWAP_OS);
1443 tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
1445 case MO_UL | MO_BSWAP:
1446 if (TCG_TARGET_REG_BITS == 64 && is_64) {
1447 if (use_mips32r2_instructions) {
1448 tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
1449 tcg_out_bswap32(s, lo, lo, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
1451 tcg_out_bswap_subr(s, bswap32u_addr);
1453 tcg_out_opc_imm(s, OPC_LWU, TCG_TMP0, base, 0);
1454 tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
1459 case MO_SL | MO_BSWAP:
1460 if (use_mips32r2_instructions) {
1461 tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1462 tcg_out_bswap32(s, lo, lo, 0);
1464 tcg_out_bswap_subr(s, bswap32_addr);
1466 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0);
1467 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_TMP3);
1471 if (TCG_TARGET_REG_BITS == 64 && is_64) {
1472 tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
1477 tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1479 case MO_UQ | MO_BSWAP:
1480 if (TCG_TARGET_REG_BITS == 64) {
1481 if (use_mips32r2_instructions) {
1482 tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
1483 tcg_out_bswap64(s, lo, lo);
1485 tcg_out_bswap_subr(s, bswap64_addr);
1487 tcg_out_opc_imm(s, OPC_LD, TCG_TMP0, base, 0);
1488 tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
1490 } else if (use_mips32r2_instructions) {
1491 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0);
1492 tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 4);
1493 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0);
1494 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1);
1495 tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16);
1496 tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16);
1498 tcg_out_bswap_subr(s, bswap32_addr);
1500 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0);
1501 tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 4);
1502 tcg_out_bswap_subr(s, bswap32_addr);
1504 tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3);
1505 tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3);
1509 /* Prefer to load from offset 0 first, but allow for overlap. */
1510 if (TCG_TARGET_REG_BITS == 64) {
1511 tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
1512 } else if (MIPS_BE ? hi != base : lo == base) {
1513 tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF);
1514 tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF);
1516 tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF);
1517 tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF);
1525 static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
1526 TCGReg base, MemOp opc, bool is_64)
1528 const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
1529 const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL;
1530 const MIPSInsn ld1 = MIPS_BE ? OPC_LDL : OPC_LDR;
1531 const MIPSInsn ld2 = MIPS_BE ? OPC_LDR : OPC_LDL;
1533 bool sgn = (opc & MO_SIGN);
1535 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1538 tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 0);
1539 tcg_out_opc_imm(s, OPC_LBU, lo, base, 1);
1540 if (use_mips32r2_instructions) {
1541 tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8);
1543 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8);
1544 tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1);
1550 if (use_mips32r2_instructions && lo != base) {
1551 tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
1552 tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 1);
1553 tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8);
1555 tcg_out_opc_imm(s, OPC_LBU, TCG_TMP0, base, 0);
1556 tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP1, base, 1);
1557 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP1, TCG_TMP1, 8);
1558 tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1);
1564 tcg_out_opc_imm(s, lw1, lo, base, 0);
1565 tcg_out_opc_imm(s, lw2, lo, base, 3);
1566 if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) {
1567 tcg_out_ext32u(s, lo, lo);
1571 case MO_UL | MO_BSWAP:
1572 case MO_SL | MO_BSWAP:
1573 if (use_mips32r2_instructions) {
1574 tcg_out_opc_imm(s, lw1, lo, base, 0);
1575 tcg_out_opc_imm(s, lw2, lo, base, 3);
1576 tcg_out_bswap32(s, lo, lo,
1577 TCG_TARGET_REG_BITS == 64 && is_64
1578 ? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0);
1580 const tcg_insn_unit *subr =
1581 (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn
1582 ? bswap32u_addr : bswap32_addr);
1584 tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0);
1585 tcg_out_bswap_subr(s, subr);
1587 tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3);
1588 tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3);
1593 if (TCG_TARGET_REG_BITS == 64) {
1594 tcg_out_opc_imm(s, ld1, lo, base, 0);
1595 tcg_out_opc_imm(s, ld2, lo, base, 7);
1597 tcg_out_opc_imm(s, lw1, MIPS_BE ? hi : lo, base, 0 + 0);
1598 tcg_out_opc_imm(s, lw2, MIPS_BE ? hi : lo, base, 0 + 3);
1599 tcg_out_opc_imm(s, lw1, MIPS_BE ? lo : hi, base, 4 + 0);
1600 tcg_out_opc_imm(s, lw2, MIPS_BE ? lo : hi, base, 4 + 3);
1604 case MO_UQ | MO_BSWAP:
1605 if (TCG_TARGET_REG_BITS == 64) {
1606 if (use_mips32r2_instructions) {
1607 tcg_out_opc_imm(s, ld1, lo, base, 0);
1608 tcg_out_opc_imm(s, ld2, lo, base, 7);
1609 tcg_out_bswap64(s, lo, lo);
1611 tcg_out_opc_imm(s, ld1, TCG_TMP0, base, 0);
1612 tcg_out_bswap_subr(s, bswap64_addr);
1614 tcg_out_opc_imm(s, ld2, TCG_TMP0, base, 7);
1615 tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
1617 } else if (use_mips32r2_instructions) {
1618 tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0);
1619 tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3);
1620 tcg_out_opc_imm(s, lw1, TCG_TMP1, base, 4 + 0);
1621 tcg_out_opc_imm(s, lw2, TCG_TMP1, base, 4 + 3);
1622 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0);
1623 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1);
1624 tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16);
1625 tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16);
1627 tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0);
1628 tcg_out_bswap_subr(s, bswap32_addr);
1630 tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3);
1631 tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 4 + 0);
1632 tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3);
1633 tcg_out_bswap_subr(s, bswap32_addr);
1635 tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 4 + 3);
1636 tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3);
1641 g_assert_not_reached();
1645 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1647 TCGReg addr_regl, addr_regh __attribute__((unused));
1648 TCGReg data_regl, data_regh;
1651 #if defined(CONFIG_SOFTMMU)
1652 tcg_insn_unit *label_ptr[2];
1655 unsigned a_bits, s_bits;
1656 TCGReg base = TCG_REG_A0;
1658 data_regl = *args++;
1659 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
1660 addr_regl = *args++;
1661 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
1663 opc = get_memop(oi);
1664 a_bits = get_alignment_bits(opc);
1665 s_bits = opc & MO_SIZE;
1668 * R6 removes the left/right instructions but requires the
1669 * system to support misaligned memory accesses.
1671 #if defined(CONFIG_SOFTMMU)
1672 tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
1673 if (use_mips32r6_instructions || a_bits >= s_bits) {
1674 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
1676 tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
1678 add_qemu_ldst_label(s, 1, oi,
1679 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1680 data_regl, data_regh, addr_regl, addr_regh,
1681 s->code_ptr, label_ptr);
1683 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1684 tcg_out_ext32u(s, base, addr_regl);
1687 if (guest_base == 0 && data_regl != addr_regl) {
1689 } else if (guest_base == (int16_t)guest_base) {
1690 tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
1692 tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
1694 if (use_mips32r6_instructions) {
1696 tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
1698 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
1700 if (a_bits && a_bits != s_bits) {
1701 tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
1703 if (a_bits >= s_bits) {
1704 tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
1706 tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
1712 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
1713 TCGReg base, MemOp opc)
1715 /* Don't clutter the code below with checks to avoid bswapping ZERO. */
1716 if ((lo | hi) == 0) {
1720 switch (opc & (MO_SIZE | MO_BSWAP)) {
1722 tcg_out_opc_imm(s, OPC_SB, lo, base, 0);
1725 case MO_16 | MO_BSWAP:
1726 tcg_out_bswap16(s, TCG_TMP1, lo, 0);
1730 tcg_out_opc_imm(s, OPC_SH, lo, base, 0);
1733 case MO_32 | MO_BSWAP:
1734 tcg_out_bswap32(s, TCG_TMP3, lo, 0);
1738 tcg_out_opc_imm(s, OPC_SW, lo, base, 0);
1741 case MO_64 | MO_BSWAP:
1742 if (TCG_TARGET_REG_BITS == 64) {
1743 tcg_out_bswap64(s, TCG_TMP3, lo);
1744 tcg_out_opc_imm(s, OPC_SD, TCG_TMP3, base, 0);
1745 } else if (use_mips32r2_instructions) {
1746 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? lo : hi);
1747 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? hi : lo);
1748 tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16);
1749 tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16);
1750 tcg_out_opc_imm(s, OPC_SW, TCG_TMP0, base, 0);
1751 tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, 4);
1753 tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0);
1754 tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 0);
1755 tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0);
1756 tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 4);
1760 if (TCG_TARGET_REG_BITS == 64) {
1761 tcg_out_opc_imm(s, OPC_SD, lo, base, 0);
1763 tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? hi : lo, base, 0);
1764 tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? lo : hi, base, 4);
1773 static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
1774 TCGReg base, MemOp opc)
1776 const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR;
1777 const MIPSInsn sw2 = MIPS_BE ? OPC_SWR : OPC_SWL;
1778 const MIPSInsn sd1 = MIPS_BE ? OPC_SDL : OPC_SDR;
1779 const MIPSInsn sd2 = MIPS_BE ? OPC_SDR : OPC_SDL;
1781 /* Don't clutter the code below with checks to avoid bswapping ZERO. */
1782 if ((lo | hi) == 0) {
1786 switch (opc & (MO_SIZE | MO_BSWAP)) {
1788 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8);
1789 tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 0);
1790 tcg_out_opc_imm(s, OPC_SB, lo, base, 1);
1794 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8);
1795 tcg_out_opc_imm(s, OPC_SB, lo, base, 0);
1796 tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 1);
1799 case MO_32 | MO_BSWAP:
1800 tcg_out_bswap32(s, TCG_TMP3, lo, 0);
1804 tcg_out_opc_imm(s, sw1, lo, base, 0);
1805 tcg_out_opc_imm(s, sw2, lo, base, 3);
1808 case MO_64 | MO_BSWAP:
1809 if (TCG_TARGET_REG_BITS == 64) {
1810 tcg_out_bswap64(s, TCG_TMP3, lo);
1812 } else if (use_mips32r2_instructions) {
1813 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? hi : lo);
1814 tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? lo : hi);
1815 tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16);
1816 tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16);
1817 hi = MIPS_BE ? TCG_TMP0 : TCG_TMP1;
1818 lo = MIPS_BE ? TCG_TMP1 : TCG_TMP0;
1820 tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0);
1821 tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 0 + 0);
1822 tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 0 + 3);
1823 tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0);
1824 tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 4 + 0);
1825 tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 4 + 3);
1830 if (TCG_TARGET_REG_BITS == 64) {
1831 tcg_out_opc_imm(s, sd1, lo, base, 0);
1832 tcg_out_opc_imm(s, sd2, lo, base, 7);
1834 tcg_out_opc_imm(s, sw1, MIPS_BE ? hi : lo, base, 0 + 0);
1835 tcg_out_opc_imm(s, sw2, MIPS_BE ? hi : lo, base, 0 + 3);
1836 tcg_out_opc_imm(s, sw1, MIPS_BE ? lo : hi, base, 4 + 0);
1837 tcg_out_opc_imm(s, sw2, MIPS_BE ? lo : hi, base, 4 + 3);
1845 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1847 TCGReg addr_regl, addr_regh __attribute__((unused));
1848 TCGReg data_regl, data_regh;
1851 #if defined(CONFIG_SOFTMMU)
1852 tcg_insn_unit *label_ptr[2];
1854 unsigned a_bits, s_bits;
1855 TCGReg base = TCG_REG_A0;
1857 data_regl = *args++;
1858 data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
1859 addr_regl = *args++;
1860 addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
1862 opc = get_memop(oi);
1863 a_bits = get_alignment_bits(opc);
1864 s_bits = opc & MO_SIZE;
1867 * R6 removes the left/right instructions but requires the
1868 * system to support misaligned memory accesses.
1870 #if defined(CONFIG_SOFTMMU)
1871 tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
1872 if (use_mips32r6_instructions || a_bits >= s_bits) {
1873 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1875 tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
1877 add_qemu_ldst_label(s, 0, oi,
1878 (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1879 data_regl, data_regh, addr_regl, addr_regh,
1880 s->code_ptr, label_ptr);
1882 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1883 tcg_out_ext32u(s, base, addr_regl);
1886 if (guest_base == 0) {
1888 } else if (guest_base == (int16_t)guest_base) {
1889 tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
1891 tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
1893 if (use_mips32r6_instructions) {
1895 tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
1897 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1899 if (a_bits && a_bits != s_bits) {
1900 tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
1902 if (a_bits >= s_bits) {
1903 tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1905 tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
1911 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1913 static const MIPSInsn sync[] = {
1914 /* Note that SYNC_MB is a slightly weaker than SYNC 0,
1915 as the former is an ordering barrier and the latter
1916 is a completion barrier. */
1917 [0 ... TCG_MO_ALL] = OPC_SYNC_MB,
1918 [TCG_MO_LD_LD] = OPC_SYNC_RMB,
1919 [TCG_MO_ST_ST] = OPC_SYNC_WMB,
1920 [TCG_MO_LD_ST] = OPC_SYNC_RELEASE,
1921 [TCG_MO_LD_ST | TCG_MO_ST_ST] = OPC_SYNC_RELEASE,
1922 [TCG_MO_LD_ST | TCG_MO_LD_LD] = OPC_SYNC_ACQUIRE,
1924 tcg_out32(s, sync[a0 & TCG_MO_ALL]);
1927 static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6,
1928 int width, TCGReg a0, TCGReg a1, TCGArg a2)
1930 if (use_mips32r6_instructions) {
1932 tcg_out_opc_reg(s, opcv6, a0, a1, 0);
1934 tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0);
1935 tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0);
1939 tcg_out_opc_reg(s, opcv2, a0, a1, a1);
1940 } else if (a0 == a2) {
1941 tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
1942 tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1);
1943 } else if (a0 != a1) {
1944 tcg_out_opc_reg(s, opcv2, a0, a1, a1);
1945 tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1);
1947 tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
1948 tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1);
1949 tcg_out_mov(s, TCG_TYPE_REG, a0, TCG_TMP0);
1954 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1956 TCGReg b0 = TCG_REG_ZERO;
1959 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
1962 if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
1963 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)tb_ret_addr);
1964 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1966 tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
1969 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1970 const TCGArg args[TCG_MAX_OP_ARGS],
1971 const int const_args[TCG_MAX_OP_ARGS])
1978 * Note that many operands use the constraint set "rZ".
1979 * We make use of the fact that 0 is the ZERO register,
1980 * and hence such cases need not check for const_args.
1988 case INDEX_op_goto_tb:
1989 /* indirect jump method */
1990 qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
1991 tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
1992 (uintptr_t)(s->tb_jmp_target_addr + a0));
1993 tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
1995 set_jmp_reset_offset(s, a0);
1997 case INDEX_op_goto_ptr:
1998 /* jmp to the given host address (could be epilogue) */
1999 tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
2003 tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
2007 case INDEX_op_ld8u_i32:
2008 case INDEX_op_ld8u_i64:
2011 case INDEX_op_ld8s_i32:
2012 case INDEX_op_ld8s_i64:
2015 case INDEX_op_ld16u_i32:
2016 case INDEX_op_ld16u_i64:
2019 case INDEX_op_ld16s_i32:
2020 case INDEX_op_ld16s_i64:
2023 case INDEX_op_ld_i32:
2024 case INDEX_op_ld32s_i64:
2027 case INDEX_op_ld32u_i64:
2030 case INDEX_op_ld_i64:
2033 case INDEX_op_st8_i32:
2034 case INDEX_op_st8_i64:
2037 case INDEX_op_st16_i32:
2038 case INDEX_op_st16_i64:
2041 case INDEX_op_st_i32:
2042 case INDEX_op_st32_i64:
2045 case INDEX_op_st_i64:
2048 tcg_out_ldst(s, i1, a0, a1, a2);
2051 case INDEX_op_add_i32:
2052 i1 = OPC_ADDU, i2 = OPC_ADDIU;
2054 case INDEX_op_add_i64:
2055 i1 = OPC_DADDU, i2 = OPC_DADDIU;
2057 case INDEX_op_or_i32:
2058 case INDEX_op_or_i64:
2059 i1 = OPC_OR, i2 = OPC_ORI;
2061 case INDEX_op_xor_i32:
2062 case INDEX_op_xor_i64:
2063 i1 = OPC_XOR, i2 = OPC_XORI;
2066 tcg_out_opc_imm(s, i2, a0, a1, a2);
2070 tcg_out_opc_reg(s, i1, a0, a1, a2);
2073 case INDEX_op_sub_i32:
2074 i1 = OPC_SUBU, i2 = OPC_ADDIU;
2076 case INDEX_op_sub_i64:
2077 i1 = OPC_DSUBU, i2 = OPC_DADDIU;
2080 tcg_out_opc_imm(s, i2, a0, a1, -a2);
2084 case INDEX_op_and_i32:
2085 if (c2 && a2 != (uint16_t)a2) {
2086 int msb = ctz32(~a2) - 1;
2087 tcg_debug_assert(use_mips32r2_instructions);
2088 tcg_debug_assert(is_p2m1(a2));
2089 tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
2092 i1 = OPC_AND, i2 = OPC_ANDI;
2094 case INDEX_op_and_i64:
2095 if (c2 && a2 != (uint16_t)a2) {
2096 int msb = ctz64(~a2) - 1;
2097 tcg_debug_assert(use_mips32r2_instructions);
2098 tcg_debug_assert(is_p2m1(a2));
2099 tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0);
2102 i1 = OPC_AND, i2 = OPC_ANDI;
2104 case INDEX_op_nor_i32:
2105 case INDEX_op_nor_i64:
2109 case INDEX_op_mul_i32:
2110 if (use_mips32_instructions) {
2111 tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
2114 i1 = OPC_MULT, i2 = OPC_MFLO;
2116 case INDEX_op_mulsh_i32:
2117 if (use_mips32r6_instructions) {
2118 tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2);
2121 i1 = OPC_MULT, i2 = OPC_MFHI;
2123 case INDEX_op_muluh_i32:
2124 if (use_mips32r6_instructions) {
2125 tcg_out_opc_reg(s, OPC_MUHU, a0, a1, a2);
2128 i1 = OPC_MULTU, i2 = OPC_MFHI;
2130 case INDEX_op_div_i32:
2131 if (use_mips32r6_instructions) {
2132 tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2);
2135 i1 = OPC_DIV, i2 = OPC_MFLO;
2137 case INDEX_op_divu_i32:
2138 if (use_mips32r6_instructions) {
2139 tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2);
2142 i1 = OPC_DIVU, i2 = OPC_MFLO;
2144 case INDEX_op_rem_i32:
2145 if (use_mips32r6_instructions) {
2146 tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2);
2149 i1 = OPC_DIV, i2 = OPC_MFHI;
2151 case INDEX_op_remu_i32:
2152 if (use_mips32r6_instructions) {
2153 tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2);
2156 i1 = OPC_DIVU, i2 = OPC_MFHI;
2158 case INDEX_op_mul_i64:
2159 if (use_mips32r6_instructions) {
2160 tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2);
2163 i1 = OPC_DMULT, i2 = OPC_MFLO;
2165 case INDEX_op_mulsh_i64:
2166 if (use_mips32r6_instructions) {
2167 tcg_out_opc_reg(s, OPC_DMUH, a0, a1, a2);
2170 i1 = OPC_DMULT, i2 = OPC_MFHI;
2172 case INDEX_op_muluh_i64:
2173 if (use_mips32r6_instructions) {
2174 tcg_out_opc_reg(s, OPC_DMUHU, a0, a1, a2);
2177 i1 = OPC_DMULTU, i2 = OPC_MFHI;
2179 case INDEX_op_div_i64:
2180 if (use_mips32r6_instructions) {
2181 tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2);
2184 i1 = OPC_DDIV, i2 = OPC_MFLO;
2186 case INDEX_op_divu_i64:
2187 if (use_mips32r6_instructions) {
2188 tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2);
2191 i1 = OPC_DDIVU, i2 = OPC_MFLO;
2193 case INDEX_op_rem_i64:
2194 if (use_mips32r6_instructions) {
2195 tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2);
2198 i1 = OPC_DDIV, i2 = OPC_MFHI;
2200 case INDEX_op_remu_i64:
2201 if (use_mips32r6_instructions) {
2202 tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2);
2205 i1 = OPC_DDIVU, i2 = OPC_MFHI;
2207 tcg_out_opc_reg(s, i1, 0, a1, a2);
2208 tcg_out_opc_reg(s, i2, a0, 0, 0);
2211 case INDEX_op_muls2_i32:
2214 case INDEX_op_mulu2_i32:
2217 case INDEX_op_muls2_i64:
2220 case INDEX_op_mulu2_i64:
2223 tcg_out_opc_reg(s, i1, 0, a2, args[3]);
2224 tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
2225 tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
2228 case INDEX_op_not_i32:
2229 case INDEX_op_not_i64:
2232 case INDEX_op_ext8s_i32:
2233 case INDEX_op_ext8s_i64:
2236 case INDEX_op_ext16s_i32:
2237 case INDEX_op_ext16s_i64:
2240 tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
2243 case INDEX_op_bswap16_i32:
2244 case INDEX_op_bswap16_i64:
2245 tcg_out_bswap16(s, a0, a1, a2);
2247 case INDEX_op_bswap32_i32:
2248 tcg_out_bswap32(s, a0, a1, 0);
2250 case INDEX_op_bswap32_i64:
2251 tcg_out_bswap32(s, a0, a1, a2);
2253 case INDEX_op_bswap64_i64:
2254 tcg_out_bswap64(s, a0, a1);
2256 case INDEX_op_extrh_i64_i32:
2257 tcg_out_dsra(s, a0, a1, 32);
2259 case INDEX_op_ext32s_i64:
2260 case INDEX_op_ext_i32_i64:
2261 case INDEX_op_extrl_i64_i32:
2262 tcg_out_opc_sa(s, OPC_SLL, a0, a1, 0);
2264 case INDEX_op_ext32u_i64:
2265 case INDEX_op_extu_i32_i64:
2266 tcg_out_ext32u(s, a0, a1);
2269 case INDEX_op_sar_i32:
2270 i1 = OPC_SRAV, i2 = OPC_SRA;
2272 case INDEX_op_shl_i32:
2273 i1 = OPC_SLLV, i2 = OPC_SLL;
2275 case INDEX_op_shr_i32:
2276 i1 = OPC_SRLV, i2 = OPC_SRL;
2278 case INDEX_op_rotr_i32:
2279 i1 = OPC_ROTRV, i2 = OPC_ROTR;
2282 tcg_out_opc_sa(s, i2, a0, a1, a2);
2286 tcg_out_opc_reg(s, i1, a0, a2, a1);
2288 case INDEX_op_rotl_i32:
2290 tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2);
2292 tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2);
2293 tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
2296 case INDEX_op_sar_i64:
2298 tcg_out_dsra(s, a0, a1, a2);
2303 case INDEX_op_shl_i64:
2305 tcg_out_dsll(s, a0, a1, a2);
2310 case INDEX_op_shr_i64:
2312 tcg_out_dsrl(s, a0, a1, a2);
2317 case INDEX_op_rotr_i64:
2319 tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2);
2324 case INDEX_op_rotl_i64:
2326 tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, 64 - a2);
2328 tcg_out_opc_reg(s, OPC_DSUBU, TCG_TMP0, TCG_REG_ZERO, a2);
2329 tcg_out_opc_reg(s, OPC_DROTRV, a0, TCG_TMP0, a1);
2333 case INDEX_op_clz_i32:
2334 tcg_out_clz(s, OPC_CLZ, OPC_CLZ_R6, 32, a0, a1, a2);
2336 case INDEX_op_clz_i64:
2337 tcg_out_clz(s, OPC_DCLZ, OPC_DCLZ_R6, 64, a0, a1, a2);
2340 case INDEX_op_deposit_i32:
2341 tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
2343 case INDEX_op_deposit_i64:
2344 tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2,
2345 args[3] + args[4] - 1, args[3]);
2347 case INDEX_op_extract_i32:
2348 tcg_out_opc_bf(s, OPC_EXT, a0, a1, args[3] - 1, a2);
2350 case INDEX_op_extract_i64:
2351 tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1,
2355 case INDEX_op_brcond_i32:
2356 case INDEX_op_brcond_i64:
2357 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
2359 case INDEX_op_brcond2_i32:
2360 tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
2363 case INDEX_op_movcond_i32:
2364 case INDEX_op_movcond_i64:
2365 tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]);
2368 case INDEX_op_setcond_i32:
2369 case INDEX_op_setcond_i64:
2370 tcg_out_setcond(s, args[3], a0, a1, a2);
2372 case INDEX_op_setcond2_i32:
2373 tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
2376 case INDEX_op_qemu_ld_i32:
2377 tcg_out_qemu_ld(s, args, false);
2379 case INDEX_op_qemu_ld_i64:
2380 tcg_out_qemu_ld(s, args, true);
2382 case INDEX_op_qemu_st_i32:
2383 tcg_out_qemu_st(s, args, false);
2385 case INDEX_op_qemu_st_i64:
2386 tcg_out_qemu_st(s, args, true);
2389 case INDEX_op_add2_i32:
2390 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2391 const_args[4], const_args[5], false);
2393 case INDEX_op_sub2_i32:
2394 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2395 const_args[4], const_args[5], true);
2401 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2402 case INDEX_op_mov_i64:
2403 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2404 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
2410 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2413 case INDEX_op_goto_ptr:
2416 case INDEX_op_ld8u_i32:
2417 case INDEX_op_ld8s_i32:
2418 case INDEX_op_ld16u_i32:
2419 case INDEX_op_ld16s_i32:
2420 case INDEX_op_ld_i32:
2421 case INDEX_op_not_i32:
2422 case INDEX_op_bswap16_i32:
2423 case INDEX_op_bswap32_i32:
2424 case INDEX_op_ext8s_i32:
2425 case INDEX_op_ext16s_i32:
2426 case INDEX_op_extract_i32:
2427 case INDEX_op_ld8u_i64:
2428 case INDEX_op_ld8s_i64:
2429 case INDEX_op_ld16u_i64:
2430 case INDEX_op_ld16s_i64:
2431 case INDEX_op_ld32s_i64:
2432 case INDEX_op_ld32u_i64:
2433 case INDEX_op_ld_i64:
2434 case INDEX_op_not_i64:
2435 case INDEX_op_bswap16_i64:
2436 case INDEX_op_bswap32_i64:
2437 case INDEX_op_bswap64_i64:
2438 case INDEX_op_ext8s_i64:
2439 case INDEX_op_ext16s_i64:
2440 case INDEX_op_ext32s_i64:
2441 case INDEX_op_ext32u_i64:
2442 case INDEX_op_ext_i32_i64:
2443 case INDEX_op_extu_i32_i64:
2444 case INDEX_op_extrl_i64_i32:
2445 case INDEX_op_extrh_i64_i32:
2446 case INDEX_op_extract_i64:
2447 return C_O1_I1(r, r);
2449 case INDEX_op_st8_i32:
2450 case INDEX_op_st16_i32:
2451 case INDEX_op_st_i32:
2452 case INDEX_op_st8_i64:
2453 case INDEX_op_st16_i64:
2454 case INDEX_op_st32_i64:
2455 case INDEX_op_st_i64:
2456 return C_O0_I2(rZ, r);
2458 case INDEX_op_add_i32:
2459 case INDEX_op_add_i64:
2460 return C_O1_I2(r, r, rJ);
2461 case INDEX_op_sub_i32:
2462 case INDEX_op_sub_i64:
2463 return C_O1_I2(r, rZ, rN);
2464 case INDEX_op_mul_i32:
2465 case INDEX_op_mulsh_i32:
2466 case INDEX_op_muluh_i32:
2467 case INDEX_op_div_i32:
2468 case INDEX_op_divu_i32:
2469 case INDEX_op_rem_i32:
2470 case INDEX_op_remu_i32:
2471 case INDEX_op_nor_i32:
2472 case INDEX_op_setcond_i32:
2473 case INDEX_op_mul_i64:
2474 case INDEX_op_mulsh_i64:
2475 case INDEX_op_muluh_i64:
2476 case INDEX_op_div_i64:
2477 case INDEX_op_divu_i64:
2478 case INDEX_op_rem_i64:
2479 case INDEX_op_remu_i64:
2480 case INDEX_op_nor_i64:
2481 case INDEX_op_setcond_i64:
2482 return C_O1_I2(r, rZ, rZ);
2483 case INDEX_op_muls2_i32:
2484 case INDEX_op_mulu2_i32:
2485 case INDEX_op_muls2_i64:
2486 case INDEX_op_mulu2_i64:
2487 return C_O2_I2(r, r, r, r);
2488 case INDEX_op_and_i32:
2489 case INDEX_op_and_i64:
2490 return C_O1_I2(r, r, rIK);
2491 case INDEX_op_or_i32:
2492 case INDEX_op_xor_i32:
2493 case INDEX_op_or_i64:
2494 case INDEX_op_xor_i64:
2495 return C_O1_I2(r, r, rI);
2496 case INDEX_op_shl_i32:
2497 case INDEX_op_shr_i32:
2498 case INDEX_op_sar_i32:
2499 case INDEX_op_rotr_i32:
2500 case INDEX_op_rotl_i32:
2501 case INDEX_op_shl_i64:
2502 case INDEX_op_shr_i64:
2503 case INDEX_op_sar_i64:
2504 case INDEX_op_rotr_i64:
2505 case INDEX_op_rotl_i64:
2506 return C_O1_I2(r, r, ri);
2507 case INDEX_op_clz_i32:
2508 case INDEX_op_clz_i64:
2509 return C_O1_I2(r, r, rWZ);
2511 case INDEX_op_deposit_i32:
2512 case INDEX_op_deposit_i64:
2513 return C_O1_I2(r, 0, rZ);
2514 case INDEX_op_brcond_i32:
2515 case INDEX_op_brcond_i64:
2516 return C_O0_I2(rZ, rZ);
2517 case INDEX_op_movcond_i32:
2518 case INDEX_op_movcond_i64:
2519 return (use_mips32r6_instructions
2520 ? C_O1_I4(r, rZ, rZ, rZ, rZ)
2521 : C_O1_I4(r, rZ, rZ, rZ, 0));
2522 case INDEX_op_add2_i32:
2523 case INDEX_op_sub2_i32:
2524 return C_O2_I4(r, r, rZ, rZ, rN, rN);
2525 case INDEX_op_setcond2_i32:
2526 return C_O1_I4(r, rZ, rZ, rZ, rZ);
2527 case INDEX_op_brcond2_i32:
2528 return C_O0_I4(rZ, rZ, rZ, rZ);
2530 case INDEX_op_qemu_ld_i32:
2531 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
2532 ? C_O1_I1(r, L) : C_O1_I2(r, L, L));
2533 case INDEX_op_qemu_st_i32:
2534 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
2535 ? C_O0_I2(SZ, S) : C_O0_I3(SZ, S, S));
2536 case INDEX_op_qemu_ld_i64:
2537 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
2538 : TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, L)
2539 : C_O2_I2(r, r, L, L));
2540 case INDEX_op_qemu_st_i64:
2541 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(SZ, S)
2542 : TARGET_LONG_BITS == 32 ? C_O0_I3(SZ, SZ, S)
2543 : C_O0_I4(SZ, SZ, S, S));
2546 g_assert_not_reached();
2550 static const int tcg_target_callee_save_regs[] = {
2551 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
2560 TCG_REG_RA, /* should be last for ABI compliance */
2563 /* The Linux kernel doesn't provide any information about the available
2564 instruction set. Probe it using a signal handler. */
2567 #ifndef use_movnz_instructions
2568 bool use_movnz_instructions = false;
2571 #ifndef use_mips32_instructions
2572 bool use_mips32_instructions = false;
2575 #ifndef use_mips32r2_instructions
2576 bool use_mips32r2_instructions = false;
2579 static volatile sig_atomic_t got_sigill;
2581 static void sigill_handler(int signo, siginfo_t *si, void *data)
2583 /* Skip the faulty instruction */
2584 ucontext_t *uc = (ucontext_t *)data;
2585 uc->uc_mcontext.pc += 4;
2590 static void tcg_target_detect_isa(void)
2592 struct sigaction sa_old, sa_new;
2594 memset(&sa_new, 0, sizeof(sa_new));
2595 sa_new.sa_flags = SA_SIGINFO;
2596 sa_new.sa_sigaction = sigill_handler;
2597 sigaction(SIGILL, &sa_new, &sa_old);
2599 /* Probe for movn/movz, necessary to implement movcond. */
2600 #ifndef use_movnz_instructions
2602 asm volatile(".set push\n"
2604 "movn $zero, $zero, $zero\n"
2605 "movz $zero, $zero, $zero\n"
2608 use_movnz_instructions = !got_sigill;
2611 /* Probe for MIPS32 instructions. As no subsetting is allowed
2612 by the specification, it is only necessary to probe for one
2613 of the instructions. */
2614 #ifndef use_mips32_instructions
2616 asm volatile(".set push\n"
2618 "mul $zero, $zero\n"
2621 use_mips32_instructions = !got_sigill;
2624 /* Probe for MIPS32r2 instructions if MIPS32 instructions are
2625 available. As no subsetting is allowed by the specification,
2626 it is only necessary to probe for one of the instructions. */
2627 #ifndef use_mips32r2_instructions
2628 if (use_mips32_instructions) {
2630 asm volatile(".set push\n"
2632 "seb $zero, $zero\n"
2635 use_mips32r2_instructions = !got_sigill;
2639 sigaction(SIGILL, &sa_old, NULL);
2642 static tcg_insn_unit *align_code_ptr(TCGContext *s)
2644 uintptr_t p = (uintptr_t)s->code_ptr;
2647 s->code_ptr = (void *)p;
2652 /* Stack frame parameters. */
2653 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
2654 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2655 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2657 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2658 + TCG_TARGET_STACK_ALIGN - 1) \
2659 & -TCG_TARGET_STACK_ALIGN)
2660 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2662 /* We're expecting to be able to use an immediate for frame allocation. */
2663 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7fff);
2665 /* Generate global QEMU prologue and epilogue code */
2666 static void tcg_target_qemu_prologue(TCGContext *s)
2670 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2673 tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2674 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2675 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2676 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2679 #ifndef CONFIG_SOFTMMU
2681 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2682 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2686 /* Call generated code */
2687 tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
2689 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2692 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2693 * and fall through to the rest of the epilogue.
2695 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2696 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_V0, TCG_REG_ZERO);
2699 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2700 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2701 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2702 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2705 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
2707 tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2709 if (use_mips32r2_instructions) {
2713 /* Bswap subroutines: Input in TCG_TMP0, output in TCG_TMP3;
2714 clobbers TCG_TMP1, TCG_TMP2. */
2717 * bswap32 -- 32-bit swap (signed result for mips64). a0 = abcd.
2719 bswap32_addr = tcg_splitwx_to_rx(align_code_ptr(s));
2720 /* t3 = (ssss)d000 */
2721 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP3, TCG_TMP0, 24);
2723 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 24);
2725 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00);
2727 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2729 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8);
2731 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8);
2733 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
2735 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2736 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
2737 /* t3 = dcba -- delay slot */
2738 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2740 if (TCG_TARGET_REG_BITS == 32) {
2745 * bswap32u -- unsigned 32-bit swap. a0 = ....abcd.
2747 bswap32u_addr = tcg_splitwx_to_rx(align_code_ptr(s));
2748 /* t1 = (0000)000d */
2749 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP0, 0xff);
2751 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, TCG_TMP0, 24);
2752 /* t1 = (0000)d000 */
2753 tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24);
2755 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00);
2757 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2759 tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8);
2761 tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8);
2763 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
2765 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2766 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
2767 /* t3 = dcba -- delay slot */
2768 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2771 * bswap64 -- 64-bit swap. a0 = abcdefgh
2773 bswap64_addr = tcg_splitwx_to_rx(align_code_ptr(s));
2775 tcg_out_dsll(s, TCG_TMP3, TCG_TMP0, 56);
2777 tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 56);
2780 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00);
2782 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2784 tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 40);
2786 tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40);
2788 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
2791 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2793 tcg_out_dsrl(s, TCG_TMP2, TCG_TMP0, 32);
2795 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2798 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP2, 0xff00);
2800 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP2, 0x00ff);
2802 tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 8);
2804 tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 24);
2807 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2809 tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 16);
2811 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2814 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP1, 0x00ff);
2816 tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
2818 tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40);
2820 tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24);
2823 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
2824 tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
2825 /* t3 = hgfedcba -- delay slot */
2826 tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
2829 static void tcg_target_init(TCGContext *s)
2831 tcg_target_detect_isa();
2832 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
2833 if (TCG_TARGET_REG_BITS == 64) {
2834 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
2837 tcg_target_call_clobber_regs = 0;
2838 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
2839 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
2840 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A0);
2841 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A1);
2842 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A2);
2843 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_A3);
2844 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T0);
2845 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T1);
2846 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T2);
2847 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T3);
2848 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T4);
2849 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T5);
2850 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T6);
2851 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T7);
2852 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T8);
2853 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_T9);
2855 s->reserved_regs = 0;
2856 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */
2857 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */
2858 tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */
2859 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */
2860 tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */
2861 tcg_regset_set_reg(s->reserved_regs, TCG_TMP2); /* internal use */
2862 tcg_regset_set_reg(s->reserved_regs, TCG_TMP3); /* internal use */
2863 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
2864 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
2865 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
2870 uint8_t fde_def_cfa[4];
2871 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2874 #define ELF_HOST_MACHINE EM_MIPS
2875 /* GDB doesn't appear to require proper setting of ELF_HOST_FLAGS,
2876 which is good because they're really quite complicated for MIPS. */
2878 static const DebugFrame debug_frame = {
2879 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2882 .h.cie.code_align = 1,
2883 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2884 .h.cie.return_column = TCG_REG_RA,
2886 /* Total FDE size does not include the "len" member. */
2887 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2890 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
2891 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2895 0x80 + 16, 9, /* DW_CFA_offset, s0, -72 */
2896 0x80 + 17, 8, /* DW_CFA_offset, s2, -64 */
2897 0x80 + 18, 7, /* DW_CFA_offset, s3, -56 */
2898 0x80 + 19, 6, /* DW_CFA_offset, s4, -48 */
2899 0x80 + 20, 5, /* DW_CFA_offset, s5, -40 */
2900 0x80 + 21, 4, /* DW_CFA_offset, s6, -32 */
2901 0x80 + 22, 3, /* DW_CFA_offset, s7, -24 */
2902 0x80 + 30, 2, /* DW_CFA_offset, s8, -16 */
2903 0x80 + 31, 1, /* DW_CFA_offset, ra, -8 */
2907 void tcg_register_jit(const void *buf, size_t buf_size)
2909 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));