]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/ia64/tcg-target.c
tcg: Merge memop and mmu_idx parameters to qemu_ld/st
[mirror_qemu.git] / tcg / ia64 / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009-2010 Aurelien Jarno <aurelien@aurel32.net>
5 * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 /*
27 * Register definitions
28 */
29
30 #ifndef NDEBUG
31 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
32 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
33 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
34 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
35 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
36 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
37 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
38 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
39 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
40 };
41 #endif
42
43 #ifdef CONFIG_USE_GUEST_BASE
44 #define TCG_GUEST_BASE_REG TCG_REG_R55
45 #else
46 #define TCG_GUEST_BASE_REG TCG_REG_R0
47 #endif
48 #ifndef GUEST_BASE
49 #define GUEST_BASE 0
50 #endif
51
52 /* Branch registers */
53 enum {
54 TCG_REG_B0 = 0,
55 TCG_REG_B1,
56 TCG_REG_B2,
57 TCG_REG_B3,
58 TCG_REG_B4,
59 TCG_REG_B5,
60 TCG_REG_B6,
61 TCG_REG_B7,
62 };
63
64 /* Floating point registers */
65 enum {
66 TCG_REG_F0 = 0,
67 TCG_REG_F1,
68 TCG_REG_F2,
69 TCG_REG_F3,
70 TCG_REG_F4,
71 TCG_REG_F5,
72 TCG_REG_F6,
73 TCG_REG_F7,
74 TCG_REG_F8,
75 TCG_REG_F9,
76 TCG_REG_F10,
77 TCG_REG_F11,
78 TCG_REG_F12,
79 TCG_REG_F13,
80 TCG_REG_F14,
81 TCG_REG_F15,
82 };
83
84 /* Predicate registers */
85 enum {
86 TCG_REG_P0 = 0,
87 TCG_REG_P1,
88 TCG_REG_P2,
89 TCG_REG_P3,
90 TCG_REG_P4,
91 TCG_REG_P5,
92 TCG_REG_P6,
93 TCG_REG_P7,
94 TCG_REG_P8,
95 TCG_REG_P9,
96 TCG_REG_P10,
97 TCG_REG_P11,
98 TCG_REG_P12,
99 TCG_REG_P13,
100 TCG_REG_P14,
101 TCG_REG_P15,
102 };
103
104 /* Application registers */
105 enum {
106 TCG_REG_PFS = 64,
107 };
108
109 static const int tcg_target_reg_alloc_order[] = {
110 TCG_REG_R35,
111 TCG_REG_R36,
112 TCG_REG_R37,
113 TCG_REG_R38,
114 TCG_REG_R39,
115 TCG_REG_R40,
116 TCG_REG_R41,
117 TCG_REG_R42,
118 TCG_REG_R43,
119 TCG_REG_R44,
120 TCG_REG_R45,
121 TCG_REG_R46,
122 TCG_REG_R47,
123 TCG_REG_R48,
124 TCG_REG_R49,
125 TCG_REG_R50,
126 TCG_REG_R51,
127 TCG_REG_R52,
128 TCG_REG_R53,
129 TCG_REG_R54,
130 TCG_REG_R55,
131 TCG_REG_R14,
132 TCG_REG_R15,
133 TCG_REG_R16,
134 TCG_REG_R17,
135 TCG_REG_R18,
136 TCG_REG_R19,
137 TCG_REG_R20,
138 TCG_REG_R21,
139 TCG_REG_R22,
140 TCG_REG_R23,
141 TCG_REG_R24,
142 TCG_REG_R25,
143 TCG_REG_R26,
144 TCG_REG_R27,
145 TCG_REG_R28,
146 TCG_REG_R29,
147 TCG_REG_R30,
148 TCG_REG_R31,
149 TCG_REG_R56,
150 TCG_REG_R57,
151 TCG_REG_R58,
152 TCG_REG_R59,
153 TCG_REG_R60,
154 TCG_REG_R61,
155 TCG_REG_R62,
156 TCG_REG_R63,
157 TCG_REG_R8,
158 TCG_REG_R9,
159 TCG_REG_R10,
160 TCG_REG_R11
161 };
162
163 static const int tcg_target_call_iarg_regs[8] = {
164 TCG_REG_R56,
165 TCG_REG_R57,
166 TCG_REG_R58,
167 TCG_REG_R59,
168 TCG_REG_R60,
169 TCG_REG_R61,
170 TCG_REG_R62,
171 TCG_REG_R63,
172 };
173
174 static const int tcg_target_call_oarg_regs[] = {
175 TCG_REG_R8
176 };
177
178 /*
179 * opcode formation
180 */
181
182 /* bundle templates: stops (double bar in the IA64 manual) are marked with
183 an uppercase letter. */
184 enum {
185 mii = 0x00,
186 miI = 0x01,
187 mIi = 0x02,
188 mII = 0x03,
189 mlx = 0x04,
190 mLX = 0x05,
191 mmi = 0x08,
192 mmI = 0x09,
193 Mmi = 0x0a,
194 MmI = 0x0b,
195 mfi = 0x0c,
196 mfI = 0x0d,
197 mmf = 0x0e,
198 mmF = 0x0f,
199 mib = 0x10,
200 miB = 0x11,
201 mbb = 0x12,
202 mbB = 0x13,
203 bbb = 0x16,
204 bbB = 0x17,
205 mmb = 0x18,
206 mmB = 0x19,
207 mfb = 0x1c,
208 mfB = 0x1d,
209 };
210
211 enum {
212 OPC_ADD_A1 = 0x10000000000ull,
213 OPC_AND_A1 = 0x10060000000ull,
214 OPC_AND_A3 = 0x10160000000ull,
215 OPC_ANDCM_A1 = 0x10068000000ull,
216 OPC_ANDCM_A3 = 0x10168000000ull,
217 OPC_ADDS_A4 = 0x10800000000ull,
218 OPC_ADDL_A5 = 0x12000000000ull,
219 OPC_ALLOC_M34 = 0x02c00000000ull,
220 OPC_BR_DPTK_FEW_B1 = 0x08400000000ull,
221 OPC_BR_SPTK_MANY_B1 = 0x08000001000ull,
222 OPC_BR_CALL_SPNT_FEW_B3 = 0x0a200000000ull,
223 OPC_BR_SPTK_MANY_B4 = 0x00100001000ull,
224 OPC_BR_CALL_SPTK_MANY_B5 = 0x02100001000ull,
225 OPC_BR_RET_SPTK_MANY_B4 = 0x00108001100ull,
226 OPC_BRL_SPTK_MANY_X3 = 0x18000001000ull,
227 OPC_BRL_CALL_SPNT_MANY_X4 = 0x1a200001000ull,
228 OPC_BRL_CALL_SPTK_MANY_X4 = 0x1a000001000ull,
229 OPC_CMP_LT_A6 = 0x18000000000ull,
230 OPC_CMP_LTU_A6 = 0x1a000000000ull,
231 OPC_CMP_EQ_A6 = 0x1c000000000ull,
232 OPC_CMP4_LT_A6 = 0x18400000000ull,
233 OPC_CMP4_LTU_A6 = 0x1a400000000ull,
234 OPC_CMP4_EQ_A6 = 0x1c400000000ull,
235 OPC_DEP_I14 = 0x0ae00000000ull,
236 OPC_DEP_I15 = 0x08000000000ull,
237 OPC_DEP_Z_I12 = 0x0a600000000ull,
238 OPC_EXTR_I11 = 0x0a400002000ull,
239 OPC_EXTR_U_I11 = 0x0a400000000ull,
240 OPC_FCVT_FX_TRUNC_S1_F10 = 0x004d0000000ull,
241 OPC_FCVT_FXU_TRUNC_S1_F10 = 0x004d8000000ull,
242 OPC_FCVT_XF_F11 = 0x000e0000000ull,
243 OPC_FMA_S1_F1 = 0x10400000000ull,
244 OPC_FNMA_S1_F1 = 0x18400000000ull,
245 OPC_FRCPA_S1_F6 = 0x00600000000ull,
246 OPC_GETF_SIG_M19 = 0x08708000000ull,
247 OPC_LD1_M1 = 0x08000000000ull,
248 OPC_LD1_M3 = 0x0a000000000ull,
249 OPC_LD2_M1 = 0x08040000000ull,
250 OPC_LD2_M3 = 0x0a040000000ull,
251 OPC_LD4_M1 = 0x08080000000ull,
252 OPC_LD4_M3 = 0x0a080000000ull,
253 OPC_LD8_M1 = 0x080c0000000ull,
254 OPC_LD8_M3 = 0x0a0c0000000ull,
255 OPC_MUX1_I3 = 0x0eca0000000ull,
256 OPC_NOP_B9 = 0x04008000000ull,
257 OPC_NOP_F16 = 0x00008000000ull,
258 OPC_NOP_I18 = 0x00008000000ull,
259 OPC_NOP_M48 = 0x00008000000ull,
260 OPC_MOV_I21 = 0x00e00100000ull,
261 OPC_MOV_RET_I21 = 0x00e00500000ull,
262 OPC_MOV_I22 = 0x00188000000ull,
263 OPC_MOV_I_I26 = 0x00150000000ull,
264 OPC_MOVL_X2 = 0x0c000000000ull,
265 OPC_OR_A1 = 0x10070000000ull,
266 OPC_OR_A3 = 0x10170000000ull,
267 OPC_SETF_EXP_M18 = 0x0c748000000ull,
268 OPC_SETF_SIG_M18 = 0x0c708000000ull,
269 OPC_SHL_I7 = 0x0f240000000ull,
270 OPC_SHR_I5 = 0x0f220000000ull,
271 OPC_SHR_U_I5 = 0x0f200000000ull,
272 OPC_SHRP_I10 = 0x0ac00000000ull,
273 OPC_SXT1_I29 = 0x000a0000000ull,
274 OPC_SXT2_I29 = 0x000a8000000ull,
275 OPC_SXT4_I29 = 0x000b0000000ull,
276 OPC_ST1_M4 = 0x08c00000000ull,
277 OPC_ST2_M4 = 0x08c40000000ull,
278 OPC_ST4_M4 = 0x08c80000000ull,
279 OPC_ST8_M4 = 0x08cc0000000ull,
280 OPC_SUB_A1 = 0x10028000000ull,
281 OPC_SUB_A3 = 0x10128000000ull,
282 OPC_UNPACK4_L_I2 = 0x0f860000000ull,
283 OPC_XMA_L_F2 = 0x1d000000000ull,
284 OPC_XOR_A1 = 0x10078000000ull,
285 OPC_XOR_A3 = 0x10178000000ull,
286 OPC_ZXT1_I29 = 0x00080000000ull,
287 OPC_ZXT2_I29 = 0x00088000000ull,
288 OPC_ZXT4_I29 = 0x00090000000ull,
289
290 INSN_NOP_M = OPC_NOP_M48, /* nop.m 0 */
291 INSN_NOP_I = OPC_NOP_I18, /* nop.i 0 */
292 };
293
294 static inline uint64_t tcg_opc_a1(int qp, uint64_t opc, int r1,
295 int r2, int r3)
296 {
297 return opc
298 | ((r3 & 0x7f) << 20)
299 | ((r2 & 0x7f) << 13)
300 | ((r1 & 0x7f) << 6)
301 | (qp & 0x3f);
302 }
303
304 static inline uint64_t tcg_opc_a3(int qp, uint64_t opc, int r1,
305 uint64_t imm, int r3)
306 {
307 return opc
308 | ((imm & 0x80) << 29) /* s */
309 | ((imm & 0x7f) << 13) /* imm7b */
310 | ((r3 & 0x7f) << 20)
311 | ((r1 & 0x7f) << 6)
312 | (qp & 0x3f);
313 }
314
315 static inline uint64_t tcg_opc_a4(int qp, uint64_t opc, int r1,
316 uint64_t imm, int r3)
317 {
318 return opc
319 | ((imm & 0x2000) << 23) /* s */
320 | ((imm & 0x1f80) << 20) /* imm6d */
321 | ((imm & 0x007f) << 13) /* imm7b */
322 | ((r3 & 0x7f) << 20)
323 | ((r1 & 0x7f) << 6)
324 | (qp & 0x3f);
325 }
326
327 static inline uint64_t tcg_opc_a5(int qp, uint64_t opc, int r1,
328 uint64_t imm, int r3)
329 {
330 return opc
331 | ((imm & 0x200000) << 15) /* s */
332 | ((imm & 0x1f0000) << 6) /* imm5c */
333 | ((imm & 0x00ff80) << 20) /* imm9d */
334 | ((imm & 0x00007f) << 13) /* imm7b */
335 | ((r3 & 0x03) << 20)
336 | ((r1 & 0x7f) << 6)
337 | (qp & 0x3f);
338 }
339
340 static inline uint64_t tcg_opc_a6(int qp, uint64_t opc, int p1,
341 int p2, int r2, int r3)
342 {
343 return opc
344 | ((p2 & 0x3f) << 27)
345 | ((r3 & 0x7f) << 20)
346 | ((r2 & 0x7f) << 13)
347 | ((p1 & 0x3f) << 6)
348 | (qp & 0x3f);
349 }
350
351 static inline uint64_t tcg_opc_b1(int qp, uint64_t opc, uint64_t imm)
352 {
353 return opc
354 | ((imm & 0x100000) << 16) /* s */
355 | ((imm & 0x0fffff) << 13) /* imm20b */
356 | (qp & 0x3f);
357 }
358
359 static inline uint64_t tcg_opc_b3(int qp, uint64_t opc, int b1, uint64_t imm)
360 {
361 return opc
362 | ((imm & 0x100000) << 16) /* s */
363 | ((imm & 0x0fffff) << 13) /* imm20b */
364 | ((b1 & 0x7) << 6)
365 | (qp & 0x3f);
366 }
367
368 static inline uint64_t tcg_opc_b4(int qp, uint64_t opc, int b2)
369 {
370 return opc
371 | ((b2 & 0x7) << 13)
372 | (qp & 0x3f);
373 }
374
375 static inline uint64_t tcg_opc_b5(int qp, uint64_t opc, int b1, int b2)
376 {
377 return opc
378 | ((b2 & 0x7) << 13)
379 | ((b1 & 0x7) << 6)
380 | (qp & 0x3f);
381 }
382
383
384 static inline uint64_t tcg_opc_b9(int qp, uint64_t opc, uint64_t imm)
385 {
386 return opc
387 | ((imm & 0x100000) << 16) /* i */
388 | ((imm & 0x0fffff) << 6) /* imm20a */
389 | (qp & 0x3f);
390 }
391
392 static inline uint64_t tcg_opc_f1(int qp, uint64_t opc, int f1,
393 int f3, int f4, int f2)
394 {
395 return opc
396 | ((f4 & 0x7f) << 27)
397 | ((f3 & 0x7f) << 20)
398 | ((f2 & 0x7f) << 13)
399 | ((f1 & 0x7f) << 6)
400 | (qp & 0x3f);
401 }
402
403 static inline uint64_t tcg_opc_f2(int qp, uint64_t opc, int f1,
404 int f3, int f4, int f2)
405 {
406 return opc
407 | ((f4 & 0x7f) << 27)
408 | ((f3 & 0x7f) << 20)
409 | ((f2 & 0x7f) << 13)
410 | ((f1 & 0x7f) << 6)
411 | (qp & 0x3f);
412 }
413
414 static inline uint64_t tcg_opc_f6(int qp, uint64_t opc, int f1,
415 int p2, int f2, int f3)
416 {
417 return opc
418 | ((p2 & 0x3f) << 27)
419 | ((f3 & 0x7f) << 20)
420 | ((f2 & 0x7f) << 13)
421 | ((f1 & 0x7f) << 6)
422 | (qp & 0x3f);
423 }
424
425 static inline uint64_t tcg_opc_f10(int qp, uint64_t opc, int f1, int f2)
426 {
427 return opc
428 | ((f2 & 0x7f) << 13)
429 | ((f1 & 0x7f) << 6)
430 | (qp & 0x3f);
431 }
432
433 static inline uint64_t tcg_opc_f11(int qp, uint64_t opc, int f1, int f2)
434 {
435 return opc
436 | ((f2 & 0x7f) << 13)
437 | ((f1 & 0x7f) << 6)
438 | (qp & 0x3f);
439 }
440
441 static inline uint64_t tcg_opc_f16(int qp, uint64_t opc, uint64_t imm)
442 {
443 return opc
444 | ((imm & 0x100000) << 16) /* i */
445 | ((imm & 0x0fffff) << 6) /* imm20a */
446 | (qp & 0x3f);
447 }
448
449 static inline uint64_t tcg_opc_i2(int qp, uint64_t opc, int r1,
450 int r2, int r3)
451 {
452 return opc
453 | ((r3 & 0x7f) << 20)
454 | ((r2 & 0x7f) << 13)
455 | ((r1 & 0x7f) << 6)
456 | (qp & 0x3f);
457 }
458
459 static inline uint64_t tcg_opc_i3(int qp, uint64_t opc, int r1,
460 int r2, int mbtype)
461 {
462 return opc
463 | ((mbtype & 0x0f) << 20)
464 | ((r2 & 0x7f) << 13)
465 | ((r1 & 0x7f) << 6)
466 | (qp & 0x3f);
467 }
468
469 static inline uint64_t tcg_opc_i5(int qp, uint64_t opc, int r1,
470 int r3, int r2)
471 {
472 return opc
473 | ((r3 & 0x7f) << 20)
474 | ((r2 & 0x7f) << 13)
475 | ((r1 & 0x7f) << 6)
476 | (qp & 0x3f);
477 }
478
479 static inline uint64_t tcg_opc_i7(int qp, uint64_t opc, int r1,
480 int r2, int r3)
481 {
482 return opc
483 | ((r3 & 0x7f) << 20)
484 | ((r2 & 0x7f) << 13)
485 | ((r1 & 0x7f) << 6)
486 | (qp & 0x3f);
487 }
488
489 static inline uint64_t tcg_opc_i10(int qp, uint64_t opc, int r1,
490 int r2, int r3, uint64_t count)
491 {
492 return opc
493 | ((count & 0x3f) << 27)
494 | ((r3 & 0x7f) << 20)
495 | ((r2 & 0x7f) << 13)
496 | ((r1 & 0x7f) << 6)
497 | (qp & 0x3f);
498 }
499
500 static inline uint64_t tcg_opc_i11(int qp, uint64_t opc, int r1,
501 int r3, uint64_t pos, uint64_t len)
502 {
503 return opc
504 | ((len & 0x3f) << 27)
505 | ((r3 & 0x7f) << 20)
506 | ((pos & 0x3f) << 14)
507 | ((r1 & 0x7f) << 6)
508 | (qp & 0x3f);
509 }
510
511 static inline uint64_t tcg_opc_i12(int qp, uint64_t opc, int r1,
512 int r2, uint64_t pos, uint64_t len)
513 {
514 return opc
515 | ((len & 0x3f) << 27)
516 | ((pos & 0x3f) << 20)
517 | ((r2 & 0x7f) << 13)
518 | ((r1 & 0x7f) << 6)
519 | (qp & 0x3f);
520 }
521
522 static inline uint64_t tcg_opc_i14(int qp, uint64_t opc, int r1, uint64_t imm,
523 int r3, uint64_t pos, uint64_t len)
524 {
525 return opc
526 | ((imm & 0x01) << 36)
527 | ((len & 0x3f) << 27)
528 | ((r3 & 0x7f) << 20)
529 | ((pos & 0x3f) << 14)
530 | ((r1 & 0x7f) << 6)
531 | (qp & 0x3f);
532 }
533
534 static inline uint64_t tcg_opc_i15(int qp, uint64_t opc, int r1, int r2,
535 int r3, uint64_t pos, uint64_t len)
536 {
537 return opc
538 | ((pos & 0x3f) << 31)
539 | ((len & 0x0f) << 27)
540 | ((r3 & 0x7f) << 20)
541 | ((r2 & 0x7f) << 13)
542 | ((r1 & 0x7f) << 6)
543 | (qp & 0x3f);
544 }
545
546 static inline uint64_t tcg_opc_i18(int qp, uint64_t opc, uint64_t imm)
547 {
548 return opc
549 | ((imm & 0x100000) << 16) /* i */
550 | ((imm & 0x0fffff) << 6) /* imm20a */
551 | (qp & 0x3f);
552 }
553
554 static inline uint64_t tcg_opc_i21(int qp, uint64_t opc, int b1,
555 int r2, uint64_t imm)
556 {
557 return opc
558 | ((imm & 0x1ff) << 24)
559 | ((r2 & 0x7f) << 13)
560 | ((b1 & 0x7) << 6)
561 | (qp & 0x3f);
562 }
563
564 static inline uint64_t tcg_opc_i22(int qp, uint64_t opc, int r1, int b2)
565 {
566 return opc
567 | ((b2 & 0x7) << 13)
568 | ((r1 & 0x7f) << 6)
569 | (qp & 0x3f);
570 }
571
572 static inline uint64_t tcg_opc_i26(int qp, uint64_t opc, int ar3, int r2)
573 {
574 return opc
575 | ((ar3 & 0x7f) << 20)
576 | ((r2 & 0x7f) << 13)
577 | (qp & 0x3f);
578 }
579
580 static inline uint64_t tcg_opc_i29(int qp, uint64_t opc, int r1, int r3)
581 {
582 return opc
583 | ((r3 & 0x7f) << 20)
584 | ((r1 & 0x7f) << 6)
585 | (qp & 0x3f);
586 }
587
588 static inline uint64_t tcg_opc_l2(uint64_t imm)
589 {
590 return (imm & 0x7fffffffffc00000ull) >> 22;
591 }
592
593 static inline uint64_t tcg_opc_l3(uint64_t imm)
594 {
595 return (imm & 0x07fffffffff00000ull) >> 18;
596 }
597
598 #define tcg_opc_l4 tcg_opc_l3
599
600 static inline uint64_t tcg_opc_m1(int qp, uint64_t opc, int r1, int r3)
601 {
602 return opc
603 | ((r3 & 0x7f) << 20)
604 | ((r1 & 0x7f) << 6)
605 | (qp & 0x3f);
606 }
607
608 static inline uint64_t tcg_opc_m3(int qp, uint64_t opc, int r1,
609 int r3, uint64_t imm)
610 {
611 return opc
612 | ((imm & 0x100) << 28) /* s */
613 | ((imm & 0x080) << 20) /* i */
614 | ((imm & 0x07f) << 13) /* imm7b */
615 | ((r3 & 0x7f) << 20)
616 | ((r1 & 0x7f) << 6)
617 | (qp & 0x3f);
618 }
619
620 static inline uint64_t tcg_opc_m4(int qp, uint64_t opc, int r2, int r3)
621 {
622 return opc
623 | ((r3 & 0x7f) << 20)
624 | ((r2 & 0x7f) << 13)
625 | (qp & 0x3f);
626 }
627
628 static inline uint64_t tcg_opc_m18(int qp, uint64_t opc, int f1, int r2)
629 {
630 return opc
631 | ((r2 & 0x7f) << 13)
632 | ((f1 & 0x7f) << 6)
633 | (qp & 0x3f);
634 }
635
636 static inline uint64_t tcg_opc_m19(int qp, uint64_t opc, int r1, int f2)
637 {
638 return opc
639 | ((f2 & 0x7f) << 13)
640 | ((r1 & 0x7f) << 6)
641 | (qp & 0x3f);
642 }
643
644 static inline uint64_t tcg_opc_m34(int qp, uint64_t opc, int r1,
645 int sof, int sol, int sor)
646 {
647 return opc
648 | ((sor & 0x0f) << 27)
649 | ((sol & 0x7f) << 20)
650 | ((sof & 0x7f) << 13)
651 | ((r1 & 0x7f) << 6)
652 | (qp & 0x3f);
653 }
654
655 static inline uint64_t tcg_opc_m48(int qp, uint64_t opc, uint64_t imm)
656 {
657 return opc
658 | ((imm & 0x100000) << 16) /* i */
659 | ((imm & 0x0fffff) << 6) /* imm20a */
660 | (qp & 0x3f);
661 }
662
663 static inline uint64_t tcg_opc_x2(int qp, uint64_t opc,
664 int r1, uint64_t imm)
665 {
666 return opc
667 | ((imm & 0x8000000000000000ull) >> 27) /* i */
668 | (imm & 0x0000000000200000ull) /* ic */
669 | ((imm & 0x00000000001f0000ull) << 6) /* imm5c */
670 | ((imm & 0x000000000000ff80ull) << 20) /* imm9d */
671 | ((imm & 0x000000000000007full) << 13) /* imm7b */
672 | ((r1 & 0x7f) << 6)
673 | (qp & 0x3f);
674 }
675
676 static inline uint64_t tcg_opc_x3(int qp, uint64_t opc, uint64_t imm)
677 {
678 return opc
679 | ((imm & 0x0800000000000000ull) >> 23) /* i */
680 | ((imm & 0x00000000000fffffull) << 13) /* imm20b */
681 | (qp & 0x3f);
682 }
683
684 static inline uint64_t tcg_opc_x4(int qp, uint64_t opc, int b1, uint64_t imm)
685 {
686 return opc
687 | ((imm & 0x0800000000000000ull) >> 23) /* i */
688 | ((imm & 0x00000000000fffffull) << 13) /* imm20b */
689 | ((b1 & 0x7) << 6)
690 | (qp & 0x3f);
691 }
692
693
694 /*
695 * Relocations - Note that we never encode branches elsewhere than slot 2.
696 */
697
698 static void reloc_pcrel21b_slot2(tcg_insn_unit *pc, tcg_insn_unit *target)
699 {
700 uint64_t imm = target - pc;
701
702 pc->hi = (pc->hi & 0xf700000fffffffffull)
703 | ((imm & 0x100000) << 39) /* s */
704 | ((imm & 0x0fffff) << 36); /* imm20b */
705 }
706
707 static uint64_t get_reloc_pcrel21b_slot2(tcg_insn_unit *pc)
708 {
709 int64_t high = pc->hi;
710
711 return ((high >> 39) & 0x100000) + /* s */
712 ((high >> 36) & 0x0fffff); /* imm20b */
713 }
714
715 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
716 intptr_t value, intptr_t addend)
717 {
718 assert(addend == 0);
719 assert(type == R_IA64_PCREL21B);
720 reloc_pcrel21b_slot2(code_ptr, (tcg_insn_unit *)value);
721 }
722
723 /*
724 * Constraints
725 */
726
727 /* parse target specific constraints */
728 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
729 {
730 const char *ct_str;
731
732 ct_str = *pct_str;
733 switch(ct_str[0]) {
734 case 'r':
735 ct->ct |= TCG_CT_REG;
736 tcg_regset_set(ct->u.regs, 0xffffffffffffffffull);
737 break;
738 case 'I':
739 ct->ct |= TCG_CT_CONST_S22;
740 break;
741 case 'S':
742 ct->ct |= TCG_CT_REG;
743 tcg_regset_set(ct->u.regs, 0xffffffffffffffffull);
744 #if defined(CONFIG_SOFTMMU)
745 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R56);
746 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R57);
747 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R58);
748 #endif
749 break;
750 case 'Z':
751 /* We are cheating a bit here, using the fact that the register
752 r0 is also the register number 0. Hence there is no need
753 to check for const_args in each instruction. */
754 ct->ct |= TCG_CT_CONST_ZERO;
755 break;
756 default:
757 return -1;
758 }
759 ct_str++;
760 *pct_str = ct_str;
761 return 0;
762 }
763
764 /* test if a constant matches the constraint */
765 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
766 const TCGArgConstraint *arg_ct)
767 {
768 int ct;
769 ct = arg_ct->ct;
770 if (ct & TCG_CT_CONST)
771 return 1;
772 else if ((ct & TCG_CT_CONST_ZERO) && val == 0)
773 return 1;
774 else if ((ct & TCG_CT_CONST_S22) && val == ((int32_t)val << 10) >> 10)
775 return 1;
776 else
777 return 0;
778 }
779
780 /*
781 * Code generation
782 */
783
784 static tcg_insn_unit *tb_ret_addr;
785
786 static inline void tcg_out_bundle(TCGContext *s, int template,
787 uint64_t slot0, uint64_t slot1,
788 uint64_t slot2)
789 {
790 template &= 0x1f; /* 5 bits */
791 slot0 &= 0x1ffffffffffull; /* 41 bits */
792 slot1 &= 0x1ffffffffffull; /* 41 bits */
793 slot2 &= 0x1ffffffffffull; /* 41 bits */
794
795 *s->code_ptr++ = (tcg_insn_unit){
796 (slot1 << 46) | (slot0 << 5) | template,
797 (slot2 << 23) | (slot1 >> 18)
798 };
799 }
800
801 static inline uint64_t tcg_opc_mov_a(int qp, TCGReg dst, TCGReg src)
802 {
803 return tcg_opc_a4(qp, OPC_ADDS_A4, dst, 0, src);
804 }
805
806 static inline void tcg_out_mov(TCGContext *s, TCGType type,
807 TCGReg ret, TCGReg arg)
808 {
809 tcg_out_bundle(s, mmI,
810 INSN_NOP_M,
811 INSN_NOP_M,
812 tcg_opc_mov_a(TCG_REG_P0, ret, arg));
813 }
814
815 static inline uint64_t tcg_opc_movi_a(int qp, TCGReg dst, int64_t src)
816 {
817 assert(src == sextract64(src, 0, 22));
818 return tcg_opc_a5(qp, OPC_ADDL_A5, dst, src, TCG_REG_R0);
819 }
820
821 static inline void tcg_out_movi(TCGContext *s, TCGType type,
822 TCGReg reg, tcg_target_long arg)
823 {
824 tcg_out_bundle(s, mLX,
825 INSN_NOP_M,
826 tcg_opc_l2 (arg),
827 tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, reg, arg));
828 }
829
830 static void tcg_out_br(TCGContext *s, TCGLabel *l)
831 {
832 uint64_t imm;
833
834 /* We pay attention here to not modify the branch target by reading
835 the existing value and using it again. This ensure that caches and
836 memory are kept coherent during retranslation. */
837 if (l->has_value) {
838 imm = l->u.value_ptr - s->code_ptr;
839 } else {
840 imm = get_reloc_pcrel21b_slot2(s->code_ptr);
841 tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, l, 0);
842 }
843
844 tcg_out_bundle(s, mmB,
845 INSN_NOP_M,
846 INSN_NOP_M,
847 tcg_opc_b1(TCG_REG_P0, OPC_BR_SPTK_MANY_B1, imm));
848 }
849
850 static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *desc)
851 {
852 uintptr_t func = desc->lo, gp = desc->hi, disp;
853
854 /* Look through the function descriptor. */
855 tcg_out_bundle(s, mlx,
856 INSN_NOP_M,
857 tcg_opc_l2 (gp),
858 tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_REG_R1, gp));
859 disp = (tcg_insn_unit *)func - s->code_ptr;
860 tcg_out_bundle(s, mLX,
861 INSN_NOP_M,
862 tcg_opc_l4 (disp),
863 tcg_opc_x4 (TCG_REG_P0, OPC_BRL_CALL_SPTK_MANY_X4,
864 TCG_REG_B0, disp));
865 }
866
867 static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
868 {
869 uint64_t imm, opc1;
870
871 /* At least arg == 0 is a common operation. */
872 if (arg == sextract64(arg, 0, 22)) {
873 opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R8, arg);
874 } else {
875 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg);
876 opc1 = INSN_NOP_M;
877 }
878
879 imm = tb_ret_addr - s->code_ptr;
880
881 tcg_out_bundle(s, mLX,
882 opc1,
883 tcg_opc_l3 (imm),
884 tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm));
885 }
886
887 static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
888 {
889 if (s->tb_jmp_offset) {
890 /* direct jump method */
891 tcg_abort();
892 } else {
893 /* indirect jump method */
894 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2,
895 (tcg_target_long)(s->tb_next + arg));
896 tcg_out_bundle(s, MmI,
897 tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1,
898 TCG_REG_R2, TCG_REG_R2),
899 INSN_NOP_M,
900 tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6,
901 TCG_REG_R2, 0));
902 tcg_out_bundle(s, mmB,
903 INSN_NOP_M,
904 INSN_NOP_M,
905 tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4,
906 TCG_REG_B6));
907 }
908 s->tb_next_offset[arg] = tcg_current_code_size(s);
909 }
910
911 static inline void tcg_out_jmp(TCGContext *s, TCGArg addr)
912 {
913 tcg_out_bundle(s, mmI,
914 INSN_NOP_M,
915 INSN_NOP_M,
916 tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, addr, 0));
917 tcg_out_bundle(s, mmB,
918 INSN_NOP_M,
919 INSN_NOP_M,
920 tcg_opc_b4(TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6));
921 }
922
923 static inline void tcg_out_ld_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg,
924 TCGArg arg1, tcg_target_long arg2)
925 {
926 if (arg2 == ((int16_t)arg2 >> 2) << 2) {
927 tcg_out_bundle(s, MmI,
928 tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4,
929 TCG_REG_R2, arg2, arg1),
930 tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
931 INSN_NOP_I);
932 } else {
933 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2);
934 tcg_out_bundle(s, MmI,
935 tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
936 TCG_REG_R2, TCG_REG_R2, arg1),
937 tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
938 INSN_NOP_I);
939 }
940 }
941
942 static inline void tcg_out_st_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg,
943 TCGArg arg1, tcg_target_long arg2)
944 {
945 if (arg2 == ((int16_t)arg2 >> 2) << 2) {
946 tcg_out_bundle(s, MmI,
947 tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4,
948 TCG_REG_R2, arg2, arg1),
949 tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
950 INSN_NOP_I);
951 } else {
952 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2);
953 tcg_out_bundle(s, MmI,
954 tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
955 TCG_REG_R2, TCG_REG_R2, arg1),
956 tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
957 INSN_NOP_I);
958 }
959 }
960
961 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
962 TCGReg arg1, intptr_t arg2)
963 {
964 if (type == TCG_TYPE_I32) {
965 tcg_out_ld_rel(s, OPC_LD4_M1, arg, arg1, arg2);
966 } else {
967 tcg_out_ld_rel(s, OPC_LD8_M1, arg, arg1, arg2);
968 }
969 }
970
971 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
972 TCGReg arg1, intptr_t arg2)
973 {
974 if (type == TCG_TYPE_I32) {
975 tcg_out_st_rel(s, OPC_ST4_M4, arg, arg1, arg2);
976 } else {
977 tcg_out_st_rel(s, OPC_ST8_M4, arg, arg1, arg2);
978 }
979 }
980
981 static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, uint64_t opc_a3,
982 TCGReg ret, TCGArg arg1, int const_arg1,
983 TCGArg arg2, int const_arg2)
984 {
985 uint64_t opc1 = 0, opc2 = 0, opc3 = 0;
986
987 if (const_arg2 && arg2 != 0) {
988 opc2 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R3, arg2);
989 arg2 = TCG_REG_R3;
990 }
991 if (const_arg1 && arg1 != 0) {
992 if (opc_a3 && arg1 == (int8_t)arg1) {
993 opc3 = tcg_opc_a3(TCG_REG_P0, opc_a3, ret, arg1, arg2);
994 } else {
995 opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, arg1);
996 arg1 = TCG_REG_R2;
997 }
998 }
999 if (opc3 == 0) {
1000 opc3 = tcg_opc_a1(TCG_REG_P0, opc_a1, ret, arg1, arg2);
1001 }
1002
1003 tcg_out_bundle(s, (opc1 || opc2 ? mII : miI),
1004 opc1 ? opc1 : INSN_NOP_M,
1005 opc2 ? opc2 : INSN_NOP_I,
1006 opc3);
1007 }
1008
1009 static inline void tcg_out_add(TCGContext *s, TCGReg ret, TCGReg arg1,
1010 TCGArg arg2, int const_arg2)
1011 {
1012 if (const_arg2 && arg2 == sextract64(arg2, 0, 14)) {
1013 tcg_out_bundle(s, mmI,
1014 INSN_NOP_M,
1015 INSN_NOP_M,
1016 tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, arg2, arg1));
1017 } else {
1018 tcg_out_alu(s, OPC_ADD_A1, 0, ret, arg1, 0, arg2, const_arg2);
1019 }
1020 }
1021
1022 static inline void tcg_out_sub(TCGContext *s, TCGReg ret, TCGArg arg1,
1023 int const_arg1, TCGArg arg2, int const_arg2)
1024 {
1025 if (!const_arg1 && const_arg2 && -arg2 == sextract64(-arg2, 0, 14)) {
1026 tcg_out_bundle(s, mmI,
1027 INSN_NOP_M,
1028 INSN_NOP_M,
1029 tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, -arg2, arg1));
1030 } else {
1031 tcg_out_alu(s, OPC_SUB_A1, OPC_SUB_A3, ret,
1032 arg1, const_arg1, arg2, const_arg2);
1033 }
1034 }
1035
1036 static inline void tcg_out_eqv(TCGContext *s, TCGArg ret,
1037 TCGArg arg1, int const_arg1,
1038 TCGArg arg2, int const_arg2)
1039 {
1040 tcg_out_bundle(s, mII,
1041 INSN_NOP_M,
1042 tcg_opc_a1 (TCG_REG_P0, OPC_XOR_A1, ret, arg1, arg2),
1043 tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
1044 }
1045
1046 static inline void tcg_out_nand(TCGContext *s, TCGArg ret,
1047 TCGArg arg1, int const_arg1,
1048 TCGArg arg2, int const_arg2)
1049 {
1050 tcg_out_bundle(s, mII,
1051 INSN_NOP_M,
1052 tcg_opc_a1 (TCG_REG_P0, OPC_AND_A1, ret, arg1, arg2),
1053 tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
1054 }
1055
1056 static inline void tcg_out_nor(TCGContext *s, TCGArg ret,
1057 TCGArg arg1, int const_arg1,
1058 TCGArg arg2, int const_arg2)
1059 {
1060 tcg_out_bundle(s, mII,
1061 INSN_NOP_M,
1062 tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, arg2),
1063 tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
1064 }
1065
1066 static inline void tcg_out_orc(TCGContext *s, TCGArg ret,
1067 TCGArg arg1, int const_arg1,
1068 TCGArg arg2, int const_arg2)
1069 {
1070 tcg_out_bundle(s, mII,
1071 INSN_NOP_M,
1072 tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, TCG_REG_R2, -1, arg2),
1073 tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, TCG_REG_R2));
1074 }
1075
1076 static inline void tcg_out_mul(TCGContext *s, TCGArg ret,
1077 TCGArg arg1, TCGArg arg2)
1078 {
1079 tcg_out_bundle(s, mmI,
1080 tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F6, arg1),
1081 tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F7, arg2),
1082 INSN_NOP_I);
1083 tcg_out_bundle(s, mmF,
1084 INSN_NOP_M,
1085 INSN_NOP_M,
1086 tcg_opc_f2 (TCG_REG_P0, OPC_XMA_L_F2, TCG_REG_F6, TCG_REG_F6,
1087 TCG_REG_F7, TCG_REG_F0));
1088 tcg_out_bundle(s, miI,
1089 tcg_opc_m19(TCG_REG_P0, OPC_GETF_SIG_M19, ret, TCG_REG_F6),
1090 INSN_NOP_I,
1091 INSN_NOP_I);
1092 }
1093
1094 static inline void tcg_out_sar_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
1095 TCGArg arg2, int const_arg2)
1096 {
1097 if (const_arg2) {
1098 tcg_out_bundle(s, miI,
1099 INSN_NOP_M,
1100 INSN_NOP_I,
1101 tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11,
1102 ret, arg1, arg2, 31 - arg2));
1103 } else {
1104 tcg_out_bundle(s, mII,
1105 tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3,
1106 TCG_REG_R3, 0x1f, arg2),
1107 tcg_opc_i29(TCG_REG_P0, OPC_SXT4_I29, TCG_REG_R2, arg1),
1108 tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret,
1109 TCG_REG_R2, TCG_REG_R3));
1110 }
1111 }
1112
1113 static inline void tcg_out_sar_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
1114 TCGArg arg2, int const_arg2)
1115 {
1116 if (const_arg2) {
1117 tcg_out_bundle(s, miI,
1118 INSN_NOP_M,
1119 INSN_NOP_I,
1120 tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11,
1121 ret, arg1, arg2, 63 - arg2));
1122 } else {
1123 tcg_out_bundle(s, miI,
1124 INSN_NOP_M,
1125 INSN_NOP_I,
1126 tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret, arg1, arg2));
1127 }
1128 }
1129
1130 static inline void tcg_out_shl_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
1131 TCGArg arg2, int const_arg2)
1132 {
1133 if (const_arg2) {
1134 tcg_out_bundle(s, miI,
1135 INSN_NOP_M,
1136 INSN_NOP_I,
1137 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret,
1138 arg1, 63 - arg2, 31 - arg2));
1139 } else {
1140 tcg_out_bundle(s, mII,
1141 INSN_NOP_M,
1142 tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R2,
1143 0x1f, arg2),
1144 tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret,
1145 arg1, TCG_REG_R2));
1146 }
1147 }
1148
1149 static inline void tcg_out_shl_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
1150 TCGArg arg2, int const_arg2)
1151 {
1152 if (const_arg2) {
1153 tcg_out_bundle(s, miI,
1154 INSN_NOP_M,
1155 INSN_NOP_I,
1156 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret,
1157 arg1, 63 - arg2, 63 - arg2));
1158 } else {
1159 tcg_out_bundle(s, miI,
1160 INSN_NOP_M,
1161 INSN_NOP_I,
1162 tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret,
1163 arg1, arg2));
1164 }
1165 }
1166
1167 static inline void tcg_out_shr_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
1168 TCGArg arg2, int const_arg2)
1169 {
1170 if (const_arg2) {
1171 tcg_out_bundle(s, miI,
1172 INSN_NOP_M,
1173 INSN_NOP_I,
1174 tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
1175 arg1, arg2, 31 - arg2));
1176 } else {
1177 tcg_out_bundle(s, mII,
1178 tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
1179 0x1f, arg2),
1180 tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R2, arg1),
1181 tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
1182 TCG_REG_R2, TCG_REG_R3));
1183 }
1184 }
1185
1186 static inline void tcg_out_shr_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
1187 TCGArg arg2, int const_arg2)
1188 {
1189 if (const_arg2) {
1190 tcg_out_bundle(s, miI,
1191 INSN_NOP_M,
1192 INSN_NOP_I,
1193 tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
1194 arg1, arg2, 63 - arg2));
1195 } else {
1196 tcg_out_bundle(s, miI,
1197 INSN_NOP_M,
1198 INSN_NOP_I,
1199 tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
1200 arg1, arg2));
1201 }
1202 }
1203
1204 static inline void tcg_out_rotl_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
1205 TCGArg arg2, int const_arg2)
1206 {
1207 if (const_arg2) {
1208 tcg_out_bundle(s, mII,
1209 INSN_NOP_M,
1210 tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
1211 TCG_REG_R2, arg1, arg1),
1212 tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
1213 TCG_REG_R2, 32 - arg2, 31));
1214 } else {
1215 tcg_out_bundle(s, miI,
1216 INSN_NOP_M,
1217 tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
1218 TCG_REG_R2, arg1, arg1),
1219 tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
1220 0x1f, arg2));
1221 tcg_out_bundle(s, mII,
1222 INSN_NOP_M,
1223 tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R3,
1224 0x20, TCG_REG_R3),
1225 tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
1226 TCG_REG_R2, TCG_REG_R3));
1227 }
1228 }
1229
1230 static inline void tcg_out_rotl_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
1231 TCGArg arg2, int const_arg2)
1232 {
1233 if (const_arg2) {
1234 tcg_out_bundle(s, miI,
1235 INSN_NOP_M,
1236 INSN_NOP_I,
1237 tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1,
1238 arg1, 0x40 - arg2));
1239 } else {
1240 tcg_out_bundle(s, mII,
1241 tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2,
1242 0x40, arg2),
1243 tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R3,
1244 arg1, arg2),
1245 tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R2,
1246 arg1, TCG_REG_R2));
1247 tcg_out_bundle(s, miI,
1248 INSN_NOP_M,
1249 INSN_NOP_I,
1250 tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret,
1251 TCG_REG_R2, TCG_REG_R3));
1252 }
1253 }
1254
1255 static inline void tcg_out_rotr_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
1256 TCGArg arg2, int const_arg2)
1257 {
1258 if (const_arg2) {
1259 tcg_out_bundle(s, mII,
1260 INSN_NOP_M,
1261 tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
1262 TCG_REG_R2, arg1, arg1),
1263 tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
1264 TCG_REG_R2, arg2, 31));
1265 } else {
1266 tcg_out_bundle(s, mII,
1267 tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
1268 0x1f, arg2),
1269 tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
1270 TCG_REG_R2, arg1, arg1),
1271 tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
1272 TCG_REG_R2, TCG_REG_R3));
1273 }
1274 }
1275
1276 static inline void tcg_out_rotr_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
1277 TCGArg arg2, int const_arg2)
1278 {
1279 if (const_arg2) {
1280 tcg_out_bundle(s, miI,
1281 INSN_NOP_M,
1282 INSN_NOP_I,
1283 tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1,
1284 arg1, arg2));
1285 } else {
1286 tcg_out_bundle(s, mII,
1287 tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2,
1288 0x40, arg2),
1289 tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R3,
1290 arg1, arg2),
1291 tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R2,
1292 arg1, TCG_REG_R2));
1293 tcg_out_bundle(s, miI,
1294 INSN_NOP_M,
1295 INSN_NOP_I,
1296 tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret,
1297 TCG_REG_R2, TCG_REG_R3));
1298 }
1299 }
1300
1301 static const uint64_t opc_ext_i29[8] = {
1302 OPC_ZXT1_I29, OPC_ZXT2_I29, OPC_ZXT4_I29, 0,
1303 OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0
1304 };
1305
1306 static inline uint64_t tcg_opc_ext_i(int qp, TCGMemOp opc, TCGReg d, TCGReg s)
1307 {
1308 if ((opc & MO_SIZE) == MO_64) {
1309 return tcg_opc_mov_a(qp, d, s);
1310 } else {
1311 return tcg_opc_i29(qp, opc_ext_i29[opc & MO_SSIZE], d, s);
1312 }
1313 }
1314
1315 static inline void tcg_out_ext(TCGContext *s, uint64_t opc_i29,
1316 TCGArg ret, TCGArg arg)
1317 {
1318 tcg_out_bundle(s, miI,
1319 INSN_NOP_M,
1320 INSN_NOP_I,
1321 tcg_opc_i29(TCG_REG_P0, opc_i29, ret, arg));
1322 }
1323
1324 static inline uint64_t tcg_opc_bswap64_i(int qp, TCGReg d, TCGReg s)
1325 {
1326 return tcg_opc_i3(qp, OPC_MUX1_I3, d, s, 0xb);
1327 }
1328
1329 static inline void tcg_out_bswap16(TCGContext *s, TCGArg ret, TCGArg arg)
1330 {
1331 tcg_out_bundle(s, mII,
1332 INSN_NOP_M,
1333 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 15, 15),
1334 tcg_opc_bswap64_i(TCG_REG_P0, ret, ret));
1335 }
1336
1337 static inline void tcg_out_bswap32(TCGContext *s, TCGArg ret, TCGArg arg)
1338 {
1339 tcg_out_bundle(s, mII,
1340 INSN_NOP_M,
1341 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 31, 31),
1342 tcg_opc_bswap64_i(TCG_REG_P0, ret, ret));
1343 }
1344
1345 static inline void tcg_out_bswap64(TCGContext *s, TCGArg ret, TCGArg arg)
1346 {
1347 tcg_out_bundle(s, miI,
1348 INSN_NOP_M,
1349 INSN_NOP_I,
1350 tcg_opc_bswap64_i(TCG_REG_P0, ret, arg));
1351 }
1352
1353 static inline void tcg_out_deposit(TCGContext *s, TCGArg ret, TCGArg a1,
1354 TCGArg a2, int const_a2, int pos, int len)
1355 {
1356 uint64_t i1 = 0, i2 = 0;
1357 int cpos = 63 - pos, lm1 = len - 1;
1358
1359 if (const_a2) {
1360 /* Truncate the value of a constant a2 to the width of the field. */
1361 int mask = (1u << len) - 1;
1362 a2 &= mask;
1363
1364 if (a2 == 0 || a2 == mask) {
1365 /* 1-bit signed constant inserted into register. */
1366 i2 = tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, ret, a2, a1, cpos, lm1);
1367 } else {
1368 /* Otherwise, load any constant into a temporary. Do this into
1369 the first I slot to help out with cross-unit delays. */
1370 i1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, a2);
1371 a2 = TCG_REG_R2;
1372 }
1373 }
1374 if (i2 == 0) {
1375 i2 = tcg_opc_i15(TCG_REG_P0, OPC_DEP_I15, ret, a2, a1, cpos, lm1);
1376 }
1377 tcg_out_bundle(s, (i1 ? mII : miI),
1378 INSN_NOP_M,
1379 i1 ? i1 : INSN_NOP_I,
1380 i2);
1381 }
1382
1383 static inline uint64_t tcg_opc_cmp_a(int qp, TCGCond cond, TCGArg arg1,
1384 TCGArg arg2, int cmp4)
1385 {
1386 uint64_t opc_eq_a6, opc_lt_a6, opc_ltu_a6;
1387
1388 if (cmp4) {
1389 opc_eq_a6 = OPC_CMP4_EQ_A6;
1390 opc_lt_a6 = OPC_CMP4_LT_A6;
1391 opc_ltu_a6 = OPC_CMP4_LTU_A6;
1392 } else {
1393 opc_eq_a6 = OPC_CMP_EQ_A6;
1394 opc_lt_a6 = OPC_CMP_LT_A6;
1395 opc_ltu_a6 = OPC_CMP_LTU_A6;
1396 }
1397
1398 switch (cond) {
1399 case TCG_COND_EQ:
1400 return tcg_opc_a6 (qp, opc_eq_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2);
1401 case TCG_COND_NE:
1402 return tcg_opc_a6 (qp, opc_eq_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2);
1403 case TCG_COND_LT:
1404 return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2);
1405 case TCG_COND_LTU:
1406 return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2);
1407 case TCG_COND_GE:
1408 return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2);
1409 case TCG_COND_GEU:
1410 return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2);
1411 case TCG_COND_LE:
1412 return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P7, TCG_REG_P6, arg2, arg1);
1413 case TCG_COND_LEU:
1414 return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg2, arg1);
1415 case TCG_COND_GT:
1416 return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P6, TCG_REG_P7, arg2, arg1);
1417 case TCG_COND_GTU:
1418 return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg2, arg1);
1419 default:
1420 tcg_abort();
1421 break;
1422 }
1423 }
1424
1425 static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
1426 TCGReg arg2, TCGLabel *l, int cmp4)
1427 {
1428 uint64_t imm;
1429
1430 /* We pay attention here to not modify the branch target by reading
1431 the existing value and using it again. This ensure that caches and
1432 memory are kept coherent during retranslation. */
1433 if (l->has_value) {
1434 imm = l->u.value_ptr - s->code_ptr;
1435 } else {
1436 imm = get_reloc_pcrel21b_slot2(s->code_ptr);
1437 tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, l, 0);
1438 }
1439
1440 tcg_out_bundle(s, miB,
1441 INSN_NOP_M,
1442 tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4),
1443 tcg_opc_b1(TCG_REG_P6, OPC_BR_DPTK_FEW_B1, imm));
1444 }
1445
1446 static inline void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg ret,
1447 TCGArg arg1, TCGArg arg2, int cmp4)
1448 {
1449 tcg_out_bundle(s, MmI,
1450 tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4),
1451 tcg_opc_movi_a(TCG_REG_P6, ret, 1),
1452 tcg_opc_movi_a(TCG_REG_P7, ret, 0));
1453 }
1454
1455 static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret,
1456 TCGArg c1, TCGArg c2,
1457 TCGArg v1, int const_v1,
1458 TCGArg v2, int const_v2, int cmp4)
1459 {
1460 uint64_t opc1, opc2;
1461
1462 if (const_v1) {
1463 opc1 = tcg_opc_movi_a(TCG_REG_P6, ret, v1);
1464 } else if (ret == v1) {
1465 opc1 = INSN_NOP_M;
1466 } else {
1467 opc1 = tcg_opc_mov_a(TCG_REG_P6, ret, v1);
1468 }
1469 if (const_v2) {
1470 opc2 = tcg_opc_movi_a(TCG_REG_P7, ret, v2);
1471 } else if (ret == v2) {
1472 opc2 = INSN_NOP_I;
1473 } else {
1474 opc2 = tcg_opc_mov_a(TCG_REG_P7, ret, v2);
1475 }
1476
1477 tcg_out_bundle(s, MmI,
1478 tcg_opc_cmp_a(TCG_REG_P0, cond, c1, c2, cmp4),
1479 opc1,
1480 opc2);
1481 }
1482
1483 #if defined(CONFIG_SOFTMMU)
1484 /* We're expecting to use an signed 22-bit immediate add. */
1485 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1486 > 0x1fffff)
1487
1488 /* Load and compare a TLB entry, and return the result in (p6, p7).
1489 R2 is loaded with the addend TLB entry.
1490 R57 is loaded with the address, zero extented on 32-bit targets.
1491 R1, R3 are clobbered, leaving R56 free for...
1492 BSWAP_1, BSWAP_2 and I-slot insns for swapping data for store. */
1493 static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg,
1494 TCGMemOp s_bits, int off_rw, int off_add,
1495 uint64_t bswap1, uint64_t bswap2)
1496 {
1497 /*
1498 .mii
1499 mov r2 = off_rw
1500 extr.u r3 = addr_reg, ... # extract tlb page
1501 zxt4 r57 = addr_reg # or mov for 64-bit guest
1502 ;;
1503 .mii
1504 addl r2 = r2, areg0
1505 shl r3 = r3, cteb # via dep.z
1506 dep r1 = 0, r57, ... # zero page ofs, keep align
1507 ;;
1508 .mmi
1509 add r2 = r2, r3
1510 ;;
1511 ld4 r3 = [r2], off_add-off_rw # or ld8 for 64-bit guest
1512 nop
1513 ;;
1514 .mmi
1515 nop
1516 cmp.eq p6, p7 = r3, r58
1517 nop
1518 ;;
1519 */
1520 tcg_out_bundle(s, miI,
1521 tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, off_rw),
1522 tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R3,
1523 addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1),
1524 tcg_opc_ext_i(TCG_REG_P0,
1525 TARGET_LONG_BITS == 32 ? MO_UL : MO_Q,
1526 TCG_REG_R57, addr_reg));
1527 tcg_out_bundle(s, miI,
1528 tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
1529 TCG_REG_R2, TCG_AREG0),
1530 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R3,
1531 TCG_REG_R3, 63 - CPU_TLB_ENTRY_BITS,
1532 63 - CPU_TLB_ENTRY_BITS),
1533 tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R1, 0,
1534 TCG_REG_R57, 63 - s_bits,
1535 TARGET_PAGE_BITS - s_bits - 1));
1536 tcg_out_bundle(s, MmI,
1537 tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
1538 TCG_REG_R2, TCG_REG_R2, TCG_REG_R3),
1539 tcg_opc_m3 (TCG_REG_P0,
1540 (TARGET_LONG_BITS == 32
1541 ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R3,
1542 TCG_REG_R2, off_add - off_rw),
1543 bswap1);
1544 tcg_out_bundle(s, mmI,
1545 tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, TCG_REG_R2),
1546 tcg_opc_a6 (TCG_REG_P0, OPC_CMP_EQ_A6, TCG_REG_P6,
1547 TCG_REG_P7, TCG_REG_R1, TCG_REG_R3),
1548 bswap2);
1549 }
1550
1551 typedef struct TCGLabelQemuLdst {
1552 bool is_ld;
1553 TCGMemOp size;
1554 tcg_insn_unit *label_ptr; /* label pointers to be updated */
1555 struct TCGLabelQemuLdst *next;
1556 } TCGLabelQemuLdst;
1557
1558 typedef struct TCGBackendData {
1559 TCGLabelQemuLdst *labels;
1560 } TCGBackendData;
1561
1562 static inline void tcg_out_tb_init(TCGContext *s)
1563 {
1564 s->be->labels = NULL;
1565 }
1566
1567 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
1568 tcg_insn_unit *label_ptr)
1569 {
1570 TCGBackendData *be = s->be;
1571 TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l));
1572
1573 l->is_ld = is_ld;
1574 l->size = opc & MO_SIZE;
1575 l->label_ptr = label_ptr;
1576 l->next = be->labels;
1577 be->labels = l;
1578 }
1579
1580 static void tcg_out_tb_finalize(TCGContext *s)
1581 {
1582 static const void * const helpers[8] = {
1583 helper_ret_stb_mmu,
1584 helper_le_stw_mmu,
1585 helper_le_stl_mmu,
1586 helper_le_stq_mmu,
1587 helper_ret_ldub_mmu,
1588 helper_le_lduw_mmu,
1589 helper_le_ldul_mmu,
1590 helper_le_ldq_mmu,
1591 };
1592 tcg_insn_unit *thunks[8] = { };
1593 TCGLabelQemuLdst *l;
1594
1595 for (l = s->be->labels; l != NULL; l = l->next) {
1596 long x = l->is_ld * 4 + l->size;
1597 tcg_insn_unit *dest = thunks[x];
1598
1599 /* The out-of-line thunks are all the same; load the return address
1600 from B0, load the GP, and branch to the code. Note that we are
1601 always post-call, so the register window has rolled, so we're
1602 using incomming parameter register numbers, not outgoing. */
1603 if (dest == NULL) {
1604 uintptr_t *desc = (uintptr_t *)helpers[x];
1605 uintptr_t func = desc[0], gp = desc[1], disp;
1606
1607 thunks[x] = dest = s->code_ptr;
1608
1609 tcg_out_bundle(s, mlx,
1610 INSN_NOP_M,
1611 tcg_opc_l2 (gp),
1612 tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2,
1613 TCG_REG_R1, gp));
1614 tcg_out_bundle(s, mii,
1615 INSN_NOP_M,
1616 INSN_NOP_I,
1617 tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22,
1618 l->is_ld ? TCG_REG_R35 : TCG_REG_R36,
1619 TCG_REG_B0));
1620 disp = (tcg_insn_unit *)func - s->code_ptr;
1621 tcg_out_bundle(s, mLX,
1622 INSN_NOP_M,
1623 tcg_opc_l3 (disp),
1624 tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, disp));
1625 }
1626
1627 reloc_pcrel21b_slot2(l->label_ptr, dest);
1628 }
1629 }
1630
1631 static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
1632 {
1633 static const uint64_t opc_ld_m1[4] = {
1634 OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
1635 };
1636 int addr_reg, data_reg, mem_index;
1637 TCGMemOpIdx oi;
1638 TCGMemOp opc, s_bits;
1639 uint64_t fin1, fin2;
1640 tcg_insn_unit *label_ptr;
1641
1642 data_reg = args[0];
1643 addr_reg = args[1];
1644 oi = args[2];
1645 opc = get_memop(oi);
1646 mem_index = get_mmuidx(oi);
1647 s_bits = opc & MO_SIZE;
1648
1649 /* Read the TLB entry */
1650 tcg_out_qemu_tlb(s, addr_reg, s_bits,
1651 offsetof(CPUArchState, tlb_table[mem_index][0].addr_read),
1652 offsetof(CPUArchState, tlb_table[mem_index][0].addend),
1653 INSN_NOP_I, INSN_NOP_I);
1654
1655 /* P6 is the fast path, and P7 the slow path */
1656
1657 fin2 = 0;
1658 if (opc & MO_BSWAP) {
1659 fin1 = tcg_opc_bswap64_i(TCG_REG_P0, data_reg, TCG_REG_R8);
1660 if (s_bits < MO_64) {
1661 int shift = 64 - (8 << s_bits);
1662 fin2 = (opc & MO_SIGN ? OPC_EXTR_I11 : OPC_EXTR_U_I11);
1663 fin2 = tcg_opc_i11(TCG_REG_P0, fin2,
1664 data_reg, data_reg, shift, 63 - shift);
1665 }
1666 } else {
1667 fin1 = tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8);
1668 }
1669
1670 tcg_out_bundle(s, mmI,
1671 tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
1672 tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
1673 TCG_REG_R2, TCG_REG_R57),
1674 tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index));
1675 label_ptr = s->code_ptr;
1676 tcg_out_bundle(s, miB,
1677 tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
1678 TCG_REG_R8, TCG_REG_R2),
1679 INSN_NOP_I,
1680 tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0,
1681 get_reloc_pcrel21b_slot2(label_ptr)));
1682
1683 add_qemu_ldst_label(s, 1, opc, label_ptr);
1684
1685 /* Note that we always use LE helper functions, so the bswap insns
1686 here for the fast path also apply to the slow path. */
1687 tcg_out_bundle(s, (fin2 ? mII : miI),
1688 INSN_NOP_M,
1689 fin1,
1690 fin2 ? fin2 : INSN_NOP_I);
1691 }
1692
1693 static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
1694 {
1695 static const uint64_t opc_st_m4[4] = {
1696 OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
1697 };
1698 TCGReg addr_reg, data_reg;
1699 int mem_index;
1700 uint64_t pre1, pre2;
1701 TCGMemOpIdx oi;
1702 TCGMemOp opc, s_bits;
1703 tcg_insn_unit *label_ptr;
1704
1705 data_reg = args[0];
1706 addr_reg = args[1];
1707 oi = args[2];
1708 opc = get_memop(oi);
1709 mem_index = get_mmuidx(oi);
1710 s_bits = opc & MO_SIZE;
1711
1712 /* Note that we always use LE helper functions, so the bswap insns
1713 that are here for the fast path also apply to the slow path,
1714 and move the data into the argument register. */
1715 pre2 = INSN_NOP_I;
1716 if (opc & MO_BSWAP) {
1717 pre1 = tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R58, data_reg);
1718 if (s_bits < MO_64) {
1719 int shift = 64 - (8 << s_bits);
1720 pre2 = tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11,
1721 TCG_REG_R58, TCG_REG_R58, shift, 63 - shift);
1722 }
1723 } else {
1724 /* Just move the data into place for the slow path. */
1725 pre1 = tcg_opc_ext_i(TCG_REG_P0, opc, TCG_REG_R58, data_reg);
1726 }
1727
1728 tcg_out_qemu_tlb(s, addr_reg, s_bits,
1729 offsetof(CPUArchState, tlb_table[mem_index][0].addr_write),
1730 offsetof(CPUArchState, tlb_table[mem_index][0].addend),
1731 pre1, pre2);
1732
1733 /* P6 is the fast path, and P7 the slow path */
1734 tcg_out_bundle(s, mmI,
1735 tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
1736 tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
1737 TCG_REG_R2, TCG_REG_R57),
1738 tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index));
1739 label_ptr = s->code_ptr;
1740 tcg_out_bundle(s, miB,
1741 tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits],
1742 TCG_REG_R58, TCG_REG_R2),
1743 INSN_NOP_I,
1744 tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0,
1745 get_reloc_pcrel21b_slot2(label_ptr)));
1746
1747 add_qemu_ldst_label(s, 0, opc, label_ptr);
1748 }
1749
1750 #else /* !CONFIG_SOFTMMU */
1751 # include "tcg-be-null.h"
1752
1753 static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
1754 {
1755 static uint64_t const opc_ld_m1[4] = {
1756 OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
1757 };
1758 int addr_reg, data_reg;
1759 TCGMemOp opc, s_bits, bswap;
1760
1761 data_reg = args[0];
1762 addr_reg = args[1];
1763 opc = args[2];
1764 s_bits = opc & MO_SIZE;
1765 bswap = opc & MO_BSWAP;
1766
1767 #if TARGET_LONG_BITS == 32
1768 if (GUEST_BASE != 0) {
1769 tcg_out_bundle(s, mII,
1770 INSN_NOP_M,
1771 tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
1772 TCG_REG_R3, addr_reg),
1773 tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
1774 TCG_GUEST_BASE_REG, TCG_REG_R3));
1775 } else {
1776 tcg_out_bundle(s, miI,
1777 INSN_NOP_M,
1778 tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
1779 TCG_REG_R2, addr_reg),
1780 INSN_NOP_I);
1781 }
1782
1783 if (!bswap) {
1784 if (!(opc & MO_SIGN)) {
1785 tcg_out_bundle(s, miI,
1786 tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
1787 data_reg, TCG_REG_R2),
1788 INSN_NOP_I,
1789 INSN_NOP_I);
1790 } else {
1791 tcg_out_bundle(s, mII,
1792 tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
1793 data_reg, TCG_REG_R2),
1794 INSN_NOP_I,
1795 tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg));
1796 }
1797 } else if (s_bits == MO_64) {
1798 tcg_out_bundle(s, mII,
1799 tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
1800 data_reg, TCG_REG_R2),
1801 INSN_NOP_I,
1802 tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
1803 } else {
1804 if (s_bits == MO_16) {
1805 tcg_out_bundle(s, mII,
1806 tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
1807 data_reg, TCG_REG_R2),
1808 INSN_NOP_I,
1809 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
1810 data_reg, data_reg, 15, 15));
1811 } else {
1812 tcg_out_bundle(s, mII,
1813 tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
1814 data_reg, TCG_REG_R2),
1815 INSN_NOP_I,
1816 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
1817 data_reg, data_reg, 31, 31));
1818 }
1819 if (!(opc & MO_SIGN)) {
1820 tcg_out_bundle(s, miI,
1821 INSN_NOP_M,
1822 INSN_NOP_I,
1823 tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
1824 } else {
1825 tcg_out_bundle(s, mII,
1826 INSN_NOP_M,
1827 tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg),
1828 tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg));
1829 }
1830 }
1831 #else
1832 if (GUEST_BASE != 0) {
1833 tcg_out_bundle(s, MmI,
1834 tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
1835 TCG_GUEST_BASE_REG, addr_reg),
1836 tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
1837 data_reg, TCG_REG_R2),
1838 INSN_NOP_I);
1839 } else {
1840 tcg_out_bundle(s, mmI,
1841 INSN_NOP_M,
1842 tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
1843 data_reg, addr_reg),
1844 INSN_NOP_I);
1845 }
1846
1847 if (bswap && s_bits == MO_16) {
1848 tcg_out_bundle(s, mII,
1849 INSN_NOP_M,
1850 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
1851 data_reg, data_reg, 15, 15),
1852 tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
1853 } else if (bswap && s_bits == MO_32) {
1854 tcg_out_bundle(s, mII,
1855 INSN_NOP_M,
1856 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
1857 data_reg, data_reg, 31, 31),
1858 tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
1859 } else if (bswap && s_bits == MO_64) {
1860 tcg_out_bundle(s, miI,
1861 INSN_NOP_M,
1862 INSN_NOP_I,
1863 tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
1864 }
1865 if (opc & MO_SIGN) {
1866 tcg_out_bundle(s, miI,
1867 INSN_NOP_M,
1868 INSN_NOP_I,
1869 tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg));
1870 }
1871 #endif
1872 }
1873
1874 static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
1875 {
1876 static uint64_t const opc_st_m4[4] = {
1877 OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
1878 };
1879 int addr_reg, data_reg;
1880 #if TARGET_LONG_BITS == 64
1881 uint64_t add_guest_base;
1882 #endif
1883 TCGMemOp opc, s_bits, bswap;
1884
1885 data_reg = args[0];
1886 addr_reg = args[1];
1887 opc = args[2];
1888 s_bits = opc & MO_SIZE;
1889 bswap = opc & MO_BSWAP;
1890
1891 #if TARGET_LONG_BITS == 32
1892 if (GUEST_BASE != 0) {
1893 tcg_out_bundle(s, mII,
1894 INSN_NOP_M,
1895 tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
1896 TCG_REG_R3, addr_reg),
1897 tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
1898 TCG_GUEST_BASE_REG, TCG_REG_R3));
1899 } else {
1900 tcg_out_bundle(s, miI,
1901 INSN_NOP_M,
1902 tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
1903 TCG_REG_R2, addr_reg),
1904 INSN_NOP_I);
1905 }
1906
1907 if (bswap) {
1908 if (s_bits == MO_16) {
1909 tcg_out_bundle(s, mII,
1910 INSN_NOP_M,
1911 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
1912 TCG_REG_R3, data_reg, 15, 15),
1913 tcg_opc_bswap64_i(TCG_REG_P0,
1914 TCG_REG_R3, TCG_REG_R3));
1915 data_reg = TCG_REG_R3;
1916 } else if (s_bits == MO_32) {
1917 tcg_out_bundle(s, mII,
1918 INSN_NOP_M,
1919 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
1920 TCG_REG_R3, data_reg, 31, 31),
1921 tcg_opc_bswap64_i(TCG_REG_P0,
1922 TCG_REG_R3, TCG_REG_R3));
1923 data_reg = TCG_REG_R3;
1924 } else if (s_bits == MO_64) {
1925 tcg_out_bundle(s, miI,
1926 INSN_NOP_M,
1927 INSN_NOP_I,
1928 tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg));
1929 data_reg = TCG_REG_R3;
1930 }
1931 }
1932 tcg_out_bundle(s, mmI,
1933 tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
1934 data_reg, TCG_REG_R2),
1935 INSN_NOP_M,
1936 INSN_NOP_I);
1937 #else
1938 if (GUEST_BASE != 0) {
1939 add_guest_base = tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
1940 TCG_GUEST_BASE_REG, addr_reg);
1941 addr_reg = TCG_REG_R2;
1942 } else {
1943 add_guest_base = INSN_NOP_M;
1944 }
1945
1946 if (!bswap) {
1947 tcg_out_bundle(s, (GUEST_BASE ? MmI : mmI),
1948 add_guest_base,
1949 tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
1950 data_reg, addr_reg),
1951 INSN_NOP_I);
1952 } else {
1953 if (s_bits == MO_16) {
1954 tcg_out_bundle(s, mII,
1955 add_guest_base,
1956 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
1957 TCG_REG_R3, data_reg, 15, 15),
1958 tcg_opc_bswap64_i(TCG_REG_P0,
1959 TCG_REG_R3, TCG_REG_R3));
1960 data_reg = TCG_REG_R3;
1961 } else if (s_bits == MO_32) {
1962 tcg_out_bundle(s, mII,
1963 add_guest_base,
1964 tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
1965 TCG_REG_R3, data_reg, 31, 31),
1966 tcg_opc_bswap64_i(TCG_REG_P0,
1967 TCG_REG_R3, TCG_REG_R3));
1968 data_reg = TCG_REG_R3;
1969 } else if (s_bits == MO_64) {
1970 tcg_out_bundle(s, miI,
1971 add_guest_base,
1972 INSN_NOP_I,
1973 tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg));
1974 data_reg = TCG_REG_R3;
1975 }
1976 tcg_out_bundle(s, miI,
1977 tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
1978 data_reg, addr_reg),
1979 INSN_NOP_I,
1980 INSN_NOP_I);
1981 }
1982 #endif
1983 }
1984
1985 #endif
1986
1987 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1988 const TCGArg *args, const int *const_args)
1989 {
1990 switch(opc) {
1991 case INDEX_op_exit_tb:
1992 tcg_out_exit_tb(s, args[0]);
1993 break;
1994 case INDEX_op_br:
1995 tcg_out_br(s, arg_label(args[0]));
1996 break;
1997 case INDEX_op_goto_tb:
1998 tcg_out_goto_tb(s, args[0]);
1999 break;
2000
2001 case INDEX_op_ld8u_i32:
2002 case INDEX_op_ld8u_i64:
2003 tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]);
2004 break;
2005 case INDEX_op_ld8s_i32:
2006 case INDEX_op_ld8s_i64:
2007 tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]);
2008 tcg_out_ext(s, OPC_SXT1_I29, args[0], args[0]);
2009 break;
2010 case INDEX_op_ld16u_i32:
2011 case INDEX_op_ld16u_i64:
2012 tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]);
2013 break;
2014 case INDEX_op_ld16s_i32:
2015 case INDEX_op_ld16s_i64:
2016 tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]);
2017 tcg_out_ext(s, OPC_SXT2_I29, args[0], args[0]);
2018 break;
2019 case INDEX_op_ld_i32:
2020 case INDEX_op_ld32u_i64:
2021 tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]);
2022 break;
2023 case INDEX_op_ld32s_i64:
2024 tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]);
2025 tcg_out_ext(s, OPC_SXT4_I29, args[0], args[0]);
2026 break;
2027 case INDEX_op_ld_i64:
2028 tcg_out_ld_rel(s, OPC_LD8_M1, args[0], args[1], args[2]);
2029 break;
2030 case INDEX_op_st8_i32:
2031 case INDEX_op_st8_i64:
2032 tcg_out_st_rel(s, OPC_ST1_M4, args[0], args[1], args[2]);
2033 break;
2034 case INDEX_op_st16_i32:
2035 case INDEX_op_st16_i64:
2036 tcg_out_st_rel(s, OPC_ST2_M4, args[0], args[1], args[2]);
2037 break;
2038 case INDEX_op_st_i32:
2039 case INDEX_op_st32_i64:
2040 tcg_out_st_rel(s, OPC_ST4_M4, args[0], args[1], args[2]);
2041 break;
2042 case INDEX_op_st_i64:
2043 tcg_out_st_rel(s, OPC_ST8_M4, args[0], args[1], args[2]);
2044 break;
2045
2046 case INDEX_op_add_i32:
2047 case INDEX_op_add_i64:
2048 tcg_out_add(s, args[0], args[1], args[2], const_args[2]);
2049 break;
2050 case INDEX_op_sub_i32:
2051 case INDEX_op_sub_i64:
2052 tcg_out_sub(s, args[0], args[1], const_args[1], args[2], const_args[2]);
2053 break;
2054
2055 case INDEX_op_and_i32:
2056 case INDEX_op_and_i64:
2057 /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */
2058 tcg_out_alu(s, OPC_AND_A1, OPC_AND_A3, args[0],
2059 args[2], const_args[2], args[1], const_args[1]);
2060 break;
2061 case INDEX_op_andc_i32:
2062 case INDEX_op_andc_i64:
2063 tcg_out_alu(s, OPC_ANDCM_A1, OPC_ANDCM_A3, args[0],
2064 args[1], const_args[1], args[2], const_args[2]);
2065 break;
2066 case INDEX_op_eqv_i32:
2067 case INDEX_op_eqv_i64:
2068 tcg_out_eqv(s, args[0], args[1], const_args[1],
2069 args[2], const_args[2]);
2070 break;
2071 case INDEX_op_nand_i32:
2072 case INDEX_op_nand_i64:
2073 tcg_out_nand(s, args[0], args[1], const_args[1],
2074 args[2], const_args[2]);
2075 break;
2076 case INDEX_op_nor_i32:
2077 case INDEX_op_nor_i64:
2078 tcg_out_nor(s, args[0], args[1], const_args[1],
2079 args[2], const_args[2]);
2080 break;
2081 case INDEX_op_or_i32:
2082 case INDEX_op_or_i64:
2083 /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */
2084 tcg_out_alu(s, OPC_OR_A1, OPC_OR_A3, args[0],
2085 args[2], const_args[2], args[1], const_args[1]);
2086 break;
2087 case INDEX_op_orc_i32:
2088 case INDEX_op_orc_i64:
2089 tcg_out_orc(s, args[0], args[1], const_args[1],
2090 args[2], const_args[2]);
2091 break;
2092 case INDEX_op_xor_i32:
2093 case INDEX_op_xor_i64:
2094 /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */
2095 tcg_out_alu(s, OPC_XOR_A1, OPC_XOR_A3, args[0],
2096 args[2], const_args[2], args[1], const_args[1]);
2097 break;
2098
2099 case INDEX_op_mul_i32:
2100 case INDEX_op_mul_i64:
2101 tcg_out_mul(s, args[0], args[1], args[2]);
2102 break;
2103
2104 case INDEX_op_sar_i32:
2105 tcg_out_sar_i32(s, args[0], args[1], args[2], const_args[2]);
2106 break;
2107 case INDEX_op_sar_i64:
2108 tcg_out_sar_i64(s, args[0], args[1], args[2], const_args[2]);
2109 break;
2110 case INDEX_op_shl_i32:
2111 tcg_out_shl_i32(s, args[0], args[1], args[2], const_args[2]);
2112 break;
2113 case INDEX_op_shl_i64:
2114 tcg_out_shl_i64(s, args[0], args[1], args[2], const_args[2]);
2115 break;
2116 case INDEX_op_shr_i32:
2117 tcg_out_shr_i32(s, args[0], args[1], args[2], const_args[2]);
2118 break;
2119 case INDEX_op_shr_i64:
2120 tcg_out_shr_i64(s, args[0], args[1], args[2], const_args[2]);
2121 break;
2122 case INDEX_op_rotl_i32:
2123 tcg_out_rotl_i32(s, args[0], args[1], args[2], const_args[2]);
2124 break;
2125 case INDEX_op_rotl_i64:
2126 tcg_out_rotl_i64(s, args[0], args[1], args[2], const_args[2]);
2127 break;
2128 case INDEX_op_rotr_i32:
2129 tcg_out_rotr_i32(s, args[0], args[1], args[2], const_args[2]);
2130 break;
2131 case INDEX_op_rotr_i64:
2132 tcg_out_rotr_i64(s, args[0], args[1], args[2], const_args[2]);
2133 break;
2134
2135 case INDEX_op_ext8s_i32:
2136 case INDEX_op_ext8s_i64:
2137 tcg_out_ext(s, OPC_SXT1_I29, args[0], args[1]);
2138 break;
2139 case INDEX_op_ext8u_i32:
2140 case INDEX_op_ext8u_i64:
2141 tcg_out_ext(s, OPC_ZXT1_I29, args[0], args[1]);
2142 break;
2143 case INDEX_op_ext16s_i32:
2144 case INDEX_op_ext16s_i64:
2145 tcg_out_ext(s, OPC_SXT2_I29, args[0], args[1]);
2146 break;
2147 case INDEX_op_ext16u_i32:
2148 case INDEX_op_ext16u_i64:
2149 tcg_out_ext(s, OPC_ZXT2_I29, args[0], args[1]);
2150 break;
2151 case INDEX_op_ext32s_i64:
2152 tcg_out_ext(s, OPC_SXT4_I29, args[0], args[1]);
2153 break;
2154 case INDEX_op_ext32u_i64:
2155 tcg_out_ext(s, OPC_ZXT4_I29, args[0], args[1]);
2156 break;
2157
2158 case INDEX_op_bswap16_i32:
2159 case INDEX_op_bswap16_i64:
2160 tcg_out_bswap16(s, args[0], args[1]);
2161 break;
2162 case INDEX_op_bswap32_i32:
2163 case INDEX_op_bswap32_i64:
2164 tcg_out_bswap32(s, args[0], args[1]);
2165 break;
2166 case INDEX_op_bswap64_i64:
2167 tcg_out_bswap64(s, args[0], args[1]);
2168 break;
2169
2170 case INDEX_op_deposit_i32:
2171 case INDEX_op_deposit_i64:
2172 tcg_out_deposit(s, args[0], args[1], args[2], const_args[2],
2173 args[3], args[4]);
2174 break;
2175
2176 case INDEX_op_brcond_i32:
2177 tcg_out_brcond(s, args[2], args[0], args[1], arg_label(args[3]), 1);
2178 break;
2179 case INDEX_op_brcond_i64:
2180 tcg_out_brcond(s, args[2], args[0], args[1], arg_label(args[3]), 0);
2181 break;
2182 case INDEX_op_setcond_i32:
2183 tcg_out_setcond(s, args[3], args[0], args[1], args[2], 1);
2184 break;
2185 case INDEX_op_setcond_i64:
2186 tcg_out_setcond(s, args[3], args[0], args[1], args[2], 0);
2187 break;
2188 case INDEX_op_movcond_i32:
2189 tcg_out_movcond(s, args[5], args[0], args[1], args[2],
2190 args[3], const_args[3], args[4], const_args[4], 1);
2191 break;
2192 case INDEX_op_movcond_i64:
2193 tcg_out_movcond(s, args[5], args[0], args[1], args[2],
2194 args[3], const_args[3], args[4], const_args[4], 0);
2195 break;
2196
2197 case INDEX_op_qemu_ld_i32:
2198 tcg_out_qemu_ld(s, args);
2199 break;
2200 case INDEX_op_qemu_ld_i64:
2201 tcg_out_qemu_ld(s, args);
2202 break;
2203 case INDEX_op_qemu_st_i32:
2204 tcg_out_qemu_st(s, args);
2205 break;
2206 case INDEX_op_qemu_st_i64:
2207 tcg_out_qemu_st(s, args);
2208 break;
2209
2210 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2211 case INDEX_op_mov_i64:
2212 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2213 case INDEX_op_movi_i64:
2214 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2215 default:
2216 tcg_abort();
2217 }
2218 }
2219
2220 static const TCGTargetOpDef ia64_op_defs[] = {
2221 { INDEX_op_br, { } },
2222 { INDEX_op_exit_tb, { } },
2223 { INDEX_op_goto_tb, { } },
2224
2225 { INDEX_op_ld8u_i32, { "r", "r" } },
2226 { INDEX_op_ld8s_i32, { "r", "r" } },
2227 { INDEX_op_ld16u_i32, { "r", "r" } },
2228 { INDEX_op_ld16s_i32, { "r", "r" } },
2229 { INDEX_op_ld_i32, { "r", "r" } },
2230 { INDEX_op_st8_i32, { "rZ", "r" } },
2231 { INDEX_op_st16_i32, { "rZ", "r" } },
2232 { INDEX_op_st_i32, { "rZ", "r" } },
2233
2234 { INDEX_op_add_i32, { "r", "rZ", "rI" } },
2235 { INDEX_op_sub_i32, { "r", "rI", "rI" } },
2236
2237 { INDEX_op_and_i32, { "r", "rI", "rI" } },
2238 { INDEX_op_andc_i32, { "r", "rI", "rI" } },
2239 { INDEX_op_eqv_i32, { "r", "rZ", "rZ" } },
2240 { INDEX_op_nand_i32, { "r", "rZ", "rZ" } },
2241 { INDEX_op_nor_i32, { "r", "rZ", "rZ" } },
2242 { INDEX_op_or_i32, { "r", "rI", "rI" } },
2243 { INDEX_op_orc_i32, { "r", "rZ", "rZ" } },
2244 { INDEX_op_xor_i32, { "r", "rI", "rI" } },
2245
2246 { INDEX_op_mul_i32, { "r", "rZ", "rZ" } },
2247
2248 { INDEX_op_sar_i32, { "r", "rZ", "ri" } },
2249 { INDEX_op_shl_i32, { "r", "rZ", "ri" } },
2250 { INDEX_op_shr_i32, { "r", "rZ", "ri" } },
2251 { INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
2252 { INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
2253
2254 { INDEX_op_ext8s_i32, { "r", "rZ"} },
2255 { INDEX_op_ext8u_i32, { "r", "rZ"} },
2256 { INDEX_op_ext16s_i32, { "r", "rZ"} },
2257 { INDEX_op_ext16u_i32, { "r", "rZ"} },
2258
2259 { INDEX_op_bswap16_i32, { "r", "rZ" } },
2260 { INDEX_op_bswap32_i32, { "r", "rZ" } },
2261
2262 { INDEX_op_brcond_i32, { "rZ", "rZ" } },
2263 { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
2264 { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rI", "rI" } },
2265
2266 { INDEX_op_ld8u_i64, { "r", "r" } },
2267 { INDEX_op_ld8s_i64, { "r", "r" } },
2268 { INDEX_op_ld16u_i64, { "r", "r" } },
2269 { INDEX_op_ld16s_i64, { "r", "r" } },
2270 { INDEX_op_ld32u_i64, { "r", "r" } },
2271 { INDEX_op_ld32s_i64, { "r", "r" } },
2272 { INDEX_op_ld_i64, { "r", "r" } },
2273 { INDEX_op_st8_i64, { "rZ", "r" } },
2274 { INDEX_op_st16_i64, { "rZ", "r" } },
2275 { INDEX_op_st32_i64, { "rZ", "r" } },
2276 { INDEX_op_st_i64, { "rZ", "r" } },
2277
2278 { INDEX_op_add_i64, { "r", "rZ", "rI" } },
2279 { INDEX_op_sub_i64, { "r", "rI", "rI" } },
2280
2281 { INDEX_op_and_i64, { "r", "rI", "rI" } },
2282 { INDEX_op_andc_i64, { "r", "rI", "rI" } },
2283 { INDEX_op_eqv_i64, { "r", "rZ", "rZ" } },
2284 { INDEX_op_nand_i64, { "r", "rZ", "rZ" } },
2285 { INDEX_op_nor_i64, { "r", "rZ", "rZ" } },
2286 { INDEX_op_or_i64, { "r", "rI", "rI" } },
2287 { INDEX_op_orc_i64, { "r", "rZ", "rZ" } },
2288 { INDEX_op_xor_i64, { "r", "rI", "rI" } },
2289
2290 { INDEX_op_mul_i64, { "r", "rZ", "rZ" } },
2291
2292 { INDEX_op_sar_i64, { "r", "rZ", "ri" } },
2293 { INDEX_op_shl_i64, { "r", "rZ", "ri" } },
2294 { INDEX_op_shr_i64, { "r", "rZ", "ri" } },
2295 { INDEX_op_rotl_i64, { "r", "rZ", "ri" } },
2296 { INDEX_op_rotr_i64, { "r", "rZ", "ri" } },
2297
2298 { INDEX_op_ext8s_i64, { "r", "rZ"} },
2299 { INDEX_op_ext8u_i64, { "r", "rZ"} },
2300 { INDEX_op_ext16s_i64, { "r", "rZ"} },
2301 { INDEX_op_ext16u_i64, { "r", "rZ"} },
2302 { INDEX_op_ext32s_i64, { "r", "rZ"} },
2303 { INDEX_op_ext32u_i64, { "r", "rZ"} },
2304
2305 { INDEX_op_bswap16_i64, { "r", "rZ" } },
2306 { INDEX_op_bswap32_i64, { "r", "rZ" } },
2307 { INDEX_op_bswap64_i64, { "r", "rZ" } },
2308
2309 { INDEX_op_brcond_i64, { "rZ", "rZ" } },
2310 { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } },
2311 { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rI", "rI" } },
2312
2313 { INDEX_op_deposit_i32, { "r", "rZ", "ri" } },
2314 { INDEX_op_deposit_i64, { "r", "rZ", "ri" } },
2315
2316 { INDEX_op_qemu_ld_i32, { "r", "r" } },
2317 { INDEX_op_qemu_ld_i64, { "r", "r" } },
2318 { INDEX_op_qemu_st_i32, { "SZ", "r" } },
2319 { INDEX_op_qemu_st_i64, { "SZ", "r" } },
2320
2321 { -1 },
2322 };
2323
2324 /* Generate global QEMU prologue and epilogue code */
2325 static void tcg_target_qemu_prologue(TCGContext *s)
2326 {
2327 int frame_size;
2328
2329 /* reserve some stack space */
2330 frame_size = TCG_STATIC_CALL_ARGS_SIZE +
2331 CPU_TEMP_BUF_NLONGS * sizeof(long);
2332 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
2333 ~(TCG_TARGET_STACK_ALIGN - 1);
2334 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2335 CPU_TEMP_BUF_NLONGS * sizeof(long));
2336
2337 /* First emit adhoc function descriptor */
2338 *s->code_ptr = (tcg_insn_unit){
2339 (uint64_t)(s->code_ptr + 1), /* entry point */
2340 0 /* skip gp */
2341 };
2342 s->code_ptr++;
2343
2344 /* prologue */
2345 tcg_out_bundle(s, miI,
2346 tcg_opc_m34(TCG_REG_P0, OPC_ALLOC_M34,
2347 TCG_REG_R34, 32, 24, 0),
2348 INSN_NOP_I,
2349 tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
2350 TCG_REG_B6, TCG_REG_R33, 0));
2351
2352 /* ??? If GUEST_BASE < 0x200000, we could load the register via
2353 an ADDL in the M slot of the next bundle. */
2354 if (GUEST_BASE != 0) {
2355 tcg_out_bundle(s, mlx,
2356 INSN_NOP_M,
2357 tcg_opc_l2 (GUEST_BASE),
2358 tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2,
2359 TCG_GUEST_BASE_REG, GUEST_BASE));
2360 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2361 }
2362
2363 tcg_out_bundle(s, miB,
2364 tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
2365 TCG_REG_R12, -frame_size, TCG_REG_R12),
2366 tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22,
2367 TCG_REG_R33, TCG_REG_B0),
2368 tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6));
2369
2370 /* epilogue */
2371 tb_ret_addr = s->code_ptr;
2372 tcg_out_bundle(s, miI,
2373 INSN_NOP_M,
2374 tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
2375 TCG_REG_B0, TCG_REG_R33, 0),
2376 tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
2377 TCG_REG_R12, frame_size, TCG_REG_R12));
2378 tcg_out_bundle(s, miB,
2379 INSN_NOP_M,
2380 tcg_opc_i26(TCG_REG_P0, OPC_MOV_I_I26,
2381 TCG_REG_PFS, TCG_REG_R34),
2382 tcg_opc_b4 (TCG_REG_P0, OPC_BR_RET_SPTK_MANY_B4,
2383 TCG_REG_B0));
2384 }
2385
2386 static void tcg_target_init(TCGContext *s)
2387 {
2388 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32],
2389 0xffffffffffffffffull);
2390 tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I64],
2391 0xffffffffffffffffull);
2392
2393 tcg_regset_clear(tcg_target_call_clobber_regs);
2394 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
2395 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
2396 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
2397 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
2398 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2399 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R15);
2400 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R16);
2401 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R17);
2402 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R18);
2403 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R19);
2404 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
2405 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
2406 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
2407 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
2408 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
2409 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
2410 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
2411 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R27);
2412 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R28);
2413 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R29);
2414 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R30);
2415 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R31);
2416 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R56);
2417 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R57);
2418 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R58);
2419 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R59);
2420 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R60);
2421 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R61);
2422 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R62);
2423 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R63);
2424
2425 tcg_regset_clear(s->reserved_regs);
2426 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* zero register */
2427 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* global pointer */
2428 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* internal use */
2429 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* internal use */
2430 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12); /* stack pointer */
2431 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
2432 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R33); /* return address */
2433 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R34); /* PFS */
2434
2435 /* The following 4 are not in use, are call-saved, but *not* saved
2436 by the prologue. Therefore we cannot use them without modifying
2437 the prologue. There doesn't seem to be any good reason to use
2438 these as opposed to the windowed registers. */
2439 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R4);
2440 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R5);
2441 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6);
2442 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R7);
2443
2444 tcg_add_target_add_op_defs(ia64_op_defs);
2445 }