]> git.proxmox.com Git - mirror_qemu.git/blob - target/avr/translate.c
Merge tag 'pull-tcg-20230305' of https://gitlab.com/rth7680/qemu into staging
[mirror_qemu.git] / target / avr / translate.c
1 /*
2 * QEMU AVR CPU
3 *
4 * Copyright (c) 2019-2020 Michael Rolnik
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see
18 * <http://www.gnu.org/licenses/lgpl-2.1.html>
19 */
20
21 #include "qemu/osdep.h"
22 #include "qemu/qemu-print.h"
23 #include "tcg/tcg.h"
24 #include "cpu.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/log.h"
31 #include "exec/translator.h"
32 #include "exec/gen-icount.h"
33
34 /*
35 * Define if you want a BREAK instruction translated to a breakpoint
36 * Active debugging connection is assumed
37 * This is for
38 * https://github.com/seharris/qemu-avr-tests/tree/master/instruction-tests
39 * tests
40 */
41 #undef BREAKPOINT_ON_BREAK
42
43 static TCGv cpu_pc;
44
45 static TCGv cpu_Cf;
46 static TCGv cpu_Zf;
47 static TCGv cpu_Nf;
48 static TCGv cpu_Vf;
49 static TCGv cpu_Sf;
50 static TCGv cpu_Hf;
51 static TCGv cpu_Tf;
52 static TCGv cpu_If;
53
54 static TCGv cpu_rampD;
55 static TCGv cpu_rampX;
56 static TCGv cpu_rampY;
57 static TCGv cpu_rampZ;
58
59 static TCGv cpu_r[NUMBER_OF_CPU_REGISTERS];
60 static TCGv cpu_eind;
61 static TCGv cpu_sp;
62
63 static TCGv cpu_skip;
64
65 static const char reg_names[NUMBER_OF_CPU_REGISTERS][8] = {
66 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
67 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
68 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
69 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
70 };
71 #define REG(x) (cpu_r[x])
72
73 #define DISAS_EXIT DISAS_TARGET_0 /* We want return to the cpu main loop. */
74 #define DISAS_LOOKUP DISAS_TARGET_1 /* We have a variable condition exit. */
75 #define DISAS_CHAIN DISAS_TARGET_2 /* We have a single condition exit. */
76
77 typedef struct DisasContext DisasContext;
78
79 /* This is the state at translation time. */
80 struct DisasContext {
81 DisasContextBase base;
82
83 CPUAVRState *env;
84 CPUState *cs;
85
86 target_long npc;
87 uint32_t opcode;
88
89 /* Routine used to access memory */
90 int memidx;
91
92 /*
93 * some AVR instructions can make the following instruction to be skipped
94 * Let's name those instructions
95 * A - instruction that can skip the next one
96 * B - instruction that can be skipped. this depends on execution of A
97 * there are two scenarios
98 * 1. A and B belong to the same translation block
99 * 2. A is the last instruction in the translation block and B is the last
100 *
101 * following variables are used to simplify the skipping logic, they are
102 * used in the following manner (sketch)
103 *
104 * TCGLabel *skip_label = NULL;
105 * if (ctx->skip_cond != TCG_COND_NEVER) {
106 * skip_label = gen_new_label();
107 * tcg_gen_brcond_tl(skip_cond, skip_var0, skip_var1, skip_label);
108 * }
109 *
110 * translate(ctx);
111 *
112 * if (skip_label) {
113 * gen_set_label(skip_label);
114 * }
115 */
116 TCGv skip_var0;
117 TCGv skip_var1;
118 TCGCond skip_cond;
119 };
120
121 void avr_cpu_tcg_init(void)
122 {
123 int i;
124
125 #define AVR_REG_OFFS(x) offsetof(CPUAVRState, x)
126 cpu_pc = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(pc_w), "pc");
127 cpu_Cf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregC), "Cf");
128 cpu_Zf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregZ), "Zf");
129 cpu_Nf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregN), "Nf");
130 cpu_Vf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregV), "Vf");
131 cpu_Sf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregS), "Sf");
132 cpu_Hf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregH), "Hf");
133 cpu_Tf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregT), "Tf");
134 cpu_If = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregI), "If");
135 cpu_rampD = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampD), "rampD");
136 cpu_rampX = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampX), "rampX");
137 cpu_rampY = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampY), "rampY");
138 cpu_rampZ = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampZ), "rampZ");
139 cpu_eind = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(eind), "eind");
140 cpu_sp = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sp), "sp");
141 cpu_skip = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(skip), "skip");
142
143 for (i = 0; i < NUMBER_OF_CPU_REGISTERS; i++) {
144 cpu_r[i] = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(r[i]),
145 reg_names[i]);
146 }
147 #undef AVR_REG_OFFS
148 }
149
150 static int to_regs_16_31_by_one(DisasContext *ctx, int indx)
151 {
152 return 16 + (indx % 16);
153 }
154
155 static int to_regs_16_23_by_one(DisasContext *ctx, int indx)
156 {
157 return 16 + (indx % 8);
158 }
159
160 static int to_regs_24_30_by_two(DisasContext *ctx, int indx)
161 {
162 return 24 + (indx % 4) * 2;
163 }
164
165 static int to_regs_00_30_by_two(DisasContext *ctx, int indx)
166 {
167 return (indx % 16) * 2;
168 }
169
170 static uint16_t next_word(DisasContext *ctx)
171 {
172 return cpu_lduw_code(ctx->env, ctx->npc++ * 2);
173 }
174
175 static int append_16(DisasContext *ctx, int x)
176 {
177 return x << 16 | next_word(ctx);
178 }
179
180 static bool avr_have_feature(DisasContext *ctx, int feature)
181 {
182 if (!avr_feature(ctx->env, feature)) {
183 gen_helper_unsupported(cpu_env);
184 ctx->base.is_jmp = DISAS_NORETURN;
185 return false;
186 }
187 return true;
188 }
189
190 static bool decode_insn(DisasContext *ctx, uint16_t insn);
191 #include "decode-insn.c.inc"
192
193 /*
194 * Arithmetic Instructions
195 */
196
197 /*
198 * Utility functions for updating status registers:
199 *
200 * - gen_add_CHf()
201 * - gen_add_Vf()
202 * - gen_sub_CHf()
203 * - gen_sub_Vf()
204 * - gen_NSf()
205 * - gen_ZNSf()
206 *
207 */
208
209 static void gen_add_CHf(TCGv R, TCGv Rd, TCGv Rr)
210 {
211 TCGv t1 = tcg_temp_new_i32();
212 TCGv t2 = tcg_temp_new_i32();
213 TCGv t3 = tcg_temp_new_i32();
214
215 tcg_gen_and_tl(t1, Rd, Rr); /* t1 = Rd & Rr */
216 tcg_gen_andc_tl(t2, Rd, R); /* t2 = Rd & ~R */
217 tcg_gen_andc_tl(t3, Rr, R); /* t3 = Rr & ~R */
218 tcg_gen_or_tl(t1, t1, t2); /* t1 = t1 | t2 | t3 */
219 tcg_gen_or_tl(t1, t1, t3);
220
221 tcg_gen_shri_tl(cpu_Cf, t1, 7); /* Cf = t1(7) */
222 tcg_gen_shri_tl(cpu_Hf, t1, 3); /* Hf = t1(3) */
223 tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1);
224 }
225
226 static void gen_add_Vf(TCGv R, TCGv Rd, TCGv Rr)
227 {
228 TCGv t1 = tcg_temp_new_i32();
229 TCGv t2 = tcg_temp_new_i32();
230
231 /* t1 = Rd & Rr & ~R | ~Rd & ~Rr & R */
232 /* = (Rd ^ R) & ~(Rd ^ Rr) */
233 tcg_gen_xor_tl(t1, Rd, R);
234 tcg_gen_xor_tl(t2, Rd, Rr);
235 tcg_gen_andc_tl(t1, t1, t2);
236
237 tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */
238 }
239
240 static void gen_sub_CHf(TCGv R, TCGv Rd, TCGv Rr)
241 {
242 TCGv t1 = tcg_temp_new_i32();
243 TCGv t2 = tcg_temp_new_i32();
244 TCGv t3 = tcg_temp_new_i32();
245
246 tcg_gen_not_tl(t1, Rd); /* t1 = ~Rd */
247 tcg_gen_and_tl(t2, t1, Rr); /* t2 = ~Rd & Rr */
248 tcg_gen_or_tl(t3, t1, Rr); /* t3 = (~Rd | Rr) & R */
249 tcg_gen_and_tl(t3, t3, R);
250 tcg_gen_or_tl(t2, t2, t3); /* t2 = ~Rd & Rr | ~Rd & R | R & Rr */
251
252 tcg_gen_shri_tl(cpu_Cf, t2, 7); /* Cf = t2(7) */
253 tcg_gen_shri_tl(cpu_Hf, t2, 3); /* Hf = t2(3) */
254 tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1);
255 }
256
257 static void gen_sub_Vf(TCGv R, TCGv Rd, TCGv Rr)
258 {
259 TCGv t1 = tcg_temp_new_i32();
260 TCGv t2 = tcg_temp_new_i32();
261
262 /* t1 = Rd & ~Rr & ~R | ~Rd & Rr & R */
263 /* = (Rd ^ R) & (Rd ^ R) */
264 tcg_gen_xor_tl(t1, Rd, R);
265 tcg_gen_xor_tl(t2, Rd, Rr);
266 tcg_gen_and_tl(t1, t1, t2);
267
268 tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */
269 }
270
271 static void gen_NSf(TCGv R)
272 {
273 tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */
274 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
275 }
276
277 static void gen_ZNSf(TCGv R)
278 {
279 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
280
281 /* update status register */
282 tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */
283 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
284 }
285
286 /*
287 * Adds two registers without the C Flag and places the result in the
288 * destination register Rd.
289 */
290 static bool trans_ADD(DisasContext *ctx, arg_ADD *a)
291 {
292 TCGv Rd = cpu_r[a->rd];
293 TCGv Rr = cpu_r[a->rr];
294 TCGv R = tcg_temp_new_i32();
295
296 tcg_gen_add_tl(R, Rd, Rr); /* Rd = Rd + Rr */
297 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
298
299 /* update status register */
300 gen_add_CHf(R, Rd, Rr);
301 gen_add_Vf(R, Rd, Rr);
302 gen_ZNSf(R);
303
304 /* update output registers */
305 tcg_gen_mov_tl(Rd, R);
306 return true;
307 }
308
309 /*
310 * Adds two registers and the contents of the C Flag and places the result in
311 * the destination register Rd.
312 */
313 static bool trans_ADC(DisasContext *ctx, arg_ADC *a)
314 {
315 TCGv Rd = cpu_r[a->rd];
316 TCGv Rr = cpu_r[a->rr];
317 TCGv R = tcg_temp_new_i32();
318
319 tcg_gen_add_tl(R, Rd, Rr); /* R = Rd + Rr + Cf */
320 tcg_gen_add_tl(R, R, cpu_Cf);
321 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
322
323 /* update status register */
324 gen_add_CHf(R, Rd, Rr);
325 gen_add_Vf(R, Rd, Rr);
326 gen_ZNSf(R);
327
328 /* update output registers */
329 tcg_gen_mov_tl(Rd, R);
330 return true;
331 }
332
333 /*
334 * Adds an immediate value (0 - 63) to a register pair and places the result
335 * in the register pair. This instruction operates on the upper four register
336 * pairs, and is well suited for operations on the pointer registers. This
337 * instruction is not available in all devices. Refer to the device specific
338 * instruction set summary.
339 */
340 static bool trans_ADIW(DisasContext *ctx, arg_ADIW *a)
341 {
342 if (!avr_have_feature(ctx, AVR_FEATURE_ADIW_SBIW)) {
343 return true;
344 }
345
346 TCGv RdL = cpu_r[a->rd];
347 TCGv RdH = cpu_r[a->rd + 1];
348 int Imm = (a->imm);
349 TCGv R = tcg_temp_new_i32();
350 TCGv Rd = tcg_temp_new_i32();
351
352 tcg_gen_deposit_tl(Rd, RdL, RdH, 8, 8); /* Rd = RdH:RdL */
353 tcg_gen_addi_tl(R, Rd, Imm); /* R = Rd + Imm */
354 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
355
356 /* update status register */
357 tcg_gen_andc_tl(cpu_Cf, Rd, R); /* Cf = Rd & ~R */
358 tcg_gen_shri_tl(cpu_Cf, cpu_Cf, 15);
359 tcg_gen_andc_tl(cpu_Vf, R, Rd); /* Vf = R & ~Rd */
360 tcg_gen_shri_tl(cpu_Vf, cpu_Vf, 15);
361 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
362 tcg_gen_shri_tl(cpu_Nf, R, 15); /* Nf = R(15) */
363 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf);/* Sf = Nf ^ Vf */
364
365 /* update output registers */
366 tcg_gen_andi_tl(RdL, R, 0xff);
367 tcg_gen_shri_tl(RdH, R, 8);
368 return true;
369 }
370
371 /*
372 * Subtracts two registers and places the result in the destination
373 * register Rd.
374 */
375 static bool trans_SUB(DisasContext *ctx, arg_SUB *a)
376 {
377 TCGv Rd = cpu_r[a->rd];
378 TCGv Rr = cpu_r[a->rr];
379 TCGv R = tcg_temp_new_i32();
380
381 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
382 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
383
384 /* update status register */
385 tcg_gen_andc_tl(cpu_Cf, Rd, R); /* Cf = Rd & ~R */
386 gen_sub_CHf(R, Rd, Rr);
387 gen_sub_Vf(R, Rd, Rr);
388 gen_ZNSf(R);
389
390 /* update output registers */
391 tcg_gen_mov_tl(Rd, R);
392 return true;
393 }
394
395 /*
396 * Subtracts a register and a constant and places the result in the
397 * destination register Rd. This instruction is working on Register R16 to R31
398 * and is very well suited for operations on the X, Y, and Z-pointers.
399 */
400 static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a)
401 {
402 TCGv Rd = cpu_r[a->rd];
403 TCGv Rr = tcg_const_i32(a->imm);
404 TCGv R = tcg_temp_new_i32();
405
406 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Imm */
407 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
408
409 /* update status register */
410 gen_sub_CHf(R, Rd, Rr);
411 gen_sub_Vf(R, Rd, Rr);
412 gen_ZNSf(R);
413
414 /* update output registers */
415 tcg_gen_mov_tl(Rd, R);
416 return true;
417 }
418
419 /*
420 * Subtracts two registers and subtracts with the C Flag and places the
421 * result in the destination register Rd.
422 */
423 static bool trans_SBC(DisasContext *ctx, arg_SBC *a)
424 {
425 TCGv Rd = cpu_r[a->rd];
426 TCGv Rr = cpu_r[a->rr];
427 TCGv R = tcg_temp_new_i32();
428 TCGv zero = tcg_const_i32(0);
429
430 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
431 tcg_gen_sub_tl(R, R, cpu_Cf);
432 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
433
434 /* update status register */
435 gen_sub_CHf(R, Rd, Rr);
436 gen_sub_Vf(R, Rd, Rr);
437 gen_NSf(R);
438
439 /*
440 * Previous value remains unchanged when the result is zero;
441 * cleared otherwise.
442 */
443 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero);
444
445 /* update output registers */
446 tcg_gen_mov_tl(Rd, R);
447 return true;
448 }
449
450 /*
451 * SBCI -- Subtract Immediate with Carry
452 */
453 static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a)
454 {
455 TCGv Rd = cpu_r[a->rd];
456 TCGv Rr = tcg_const_i32(a->imm);
457 TCGv R = tcg_temp_new_i32();
458 TCGv zero = tcg_const_i32(0);
459
460 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
461 tcg_gen_sub_tl(R, R, cpu_Cf);
462 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
463
464 /* update status register */
465 gen_sub_CHf(R, Rd, Rr);
466 gen_sub_Vf(R, Rd, Rr);
467 gen_NSf(R);
468
469 /*
470 * Previous value remains unchanged when the result is zero;
471 * cleared otherwise.
472 */
473 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero);
474
475 /* update output registers */
476 tcg_gen_mov_tl(Rd, R);
477 return true;
478 }
479
480 /*
481 * Subtracts an immediate value (0-63) from a register pair and places the
482 * result in the register pair. This instruction operates on the upper four
483 * register pairs, and is well suited for operations on the Pointer Registers.
484 * This instruction is not available in all devices. Refer to the device
485 * specific instruction set summary.
486 */
487 static bool trans_SBIW(DisasContext *ctx, arg_SBIW *a)
488 {
489 if (!avr_have_feature(ctx, AVR_FEATURE_ADIW_SBIW)) {
490 return true;
491 }
492
493 TCGv RdL = cpu_r[a->rd];
494 TCGv RdH = cpu_r[a->rd + 1];
495 int Imm = (a->imm);
496 TCGv R = tcg_temp_new_i32();
497 TCGv Rd = tcg_temp_new_i32();
498
499 tcg_gen_deposit_tl(Rd, RdL, RdH, 8, 8); /* Rd = RdH:RdL */
500 tcg_gen_subi_tl(R, Rd, Imm); /* R = Rd - Imm */
501 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
502
503 /* update status register */
504 tcg_gen_andc_tl(cpu_Cf, R, Rd);
505 tcg_gen_shri_tl(cpu_Cf, cpu_Cf, 15); /* Cf = R & ~Rd */
506 tcg_gen_andc_tl(cpu_Vf, Rd, R);
507 tcg_gen_shri_tl(cpu_Vf, cpu_Vf, 15); /* Vf = Rd & ~R */
508 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
509 tcg_gen_shri_tl(cpu_Nf, R, 15); /* Nf = R(15) */
510 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
511
512 /* update output registers */
513 tcg_gen_andi_tl(RdL, R, 0xff);
514 tcg_gen_shri_tl(RdH, R, 8);
515 return true;
516 }
517
518 /*
519 * Performs the logical AND between the contents of register Rd and register
520 * Rr and places the result in the destination register Rd.
521 */
522 static bool trans_AND(DisasContext *ctx, arg_AND *a)
523 {
524 TCGv Rd = cpu_r[a->rd];
525 TCGv Rr = cpu_r[a->rr];
526 TCGv R = tcg_temp_new_i32();
527
528 tcg_gen_and_tl(R, Rd, Rr); /* Rd = Rd and Rr */
529
530 /* update status register */
531 tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */
532 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
533 gen_ZNSf(R);
534
535 /* update output registers */
536 tcg_gen_mov_tl(Rd, R);
537 return true;
538 }
539
540 /*
541 * Performs the logical AND between the contents of register Rd and a constant
542 * and places the result in the destination register Rd.
543 */
544 static bool trans_ANDI(DisasContext *ctx, arg_ANDI *a)
545 {
546 TCGv Rd = cpu_r[a->rd];
547 int Imm = (a->imm);
548
549 tcg_gen_andi_tl(Rd, Rd, Imm); /* Rd = Rd & Imm */
550
551 /* update status register */
552 tcg_gen_movi_tl(cpu_Vf, 0x00); /* Vf = 0 */
553 gen_ZNSf(Rd);
554
555 return true;
556 }
557
558 /*
559 * Performs the logical OR between the contents of register Rd and register
560 * Rr and places the result in the destination register Rd.
561 */
562 static bool trans_OR(DisasContext *ctx, arg_OR *a)
563 {
564 TCGv Rd = cpu_r[a->rd];
565 TCGv Rr = cpu_r[a->rr];
566 TCGv R = tcg_temp_new_i32();
567
568 tcg_gen_or_tl(R, Rd, Rr);
569
570 /* update status register */
571 tcg_gen_movi_tl(cpu_Vf, 0);
572 gen_ZNSf(R);
573
574 /* update output registers */
575 tcg_gen_mov_tl(Rd, R);
576 return true;
577 }
578
579 /*
580 * Performs the logical OR between the contents of register Rd and a
581 * constant and places the result in the destination register Rd.
582 */
583 static bool trans_ORI(DisasContext *ctx, arg_ORI *a)
584 {
585 TCGv Rd = cpu_r[a->rd];
586 int Imm = (a->imm);
587
588 tcg_gen_ori_tl(Rd, Rd, Imm); /* Rd = Rd | Imm */
589
590 /* update status register */
591 tcg_gen_movi_tl(cpu_Vf, 0x00); /* Vf = 0 */
592 gen_ZNSf(Rd);
593
594 return true;
595 }
596
597 /*
598 * Performs the logical EOR between the contents of register Rd and
599 * register Rr and places the result in the destination register Rd.
600 */
601 static bool trans_EOR(DisasContext *ctx, arg_EOR *a)
602 {
603 TCGv Rd = cpu_r[a->rd];
604 TCGv Rr = cpu_r[a->rr];
605
606 tcg_gen_xor_tl(Rd, Rd, Rr);
607
608 /* update status register */
609 tcg_gen_movi_tl(cpu_Vf, 0);
610 gen_ZNSf(Rd);
611
612 return true;
613 }
614
615 /*
616 * Clears the specified bits in register Rd. Performs the logical AND
617 * between the contents of register Rd and the complement of the constant mask
618 * K. The result will be placed in register Rd.
619 */
620 static bool trans_COM(DisasContext *ctx, arg_COM *a)
621 {
622 TCGv Rd = cpu_r[a->rd];
623
624 tcg_gen_xori_tl(Rd, Rd, 0xff);
625
626 /* update status register */
627 tcg_gen_movi_tl(cpu_Cf, 1); /* Cf = 1 */
628 tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */
629 gen_ZNSf(Rd);
630 return true;
631 }
632
633 /*
634 * Replaces the contents of register Rd with its two's complement; the
635 * value $80 is left unchanged.
636 */
637 static bool trans_NEG(DisasContext *ctx, arg_NEG *a)
638 {
639 TCGv Rd = cpu_r[a->rd];
640 TCGv t0 = tcg_const_i32(0);
641 TCGv R = tcg_temp_new_i32();
642
643 tcg_gen_sub_tl(R, t0, Rd); /* R = 0 - Rd */
644 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
645
646 /* update status register */
647 gen_sub_CHf(R, t0, Rd);
648 gen_sub_Vf(R, t0, Rd);
649 gen_ZNSf(R);
650
651 /* update output registers */
652 tcg_gen_mov_tl(Rd, R);
653 return true;
654 }
655
656 /*
657 * Adds one -1- to the contents of register Rd and places the result in the
658 * destination register Rd. The C Flag in SREG is not affected by the
659 * operation, thus allowing the INC instruction to be used on a loop counter in
660 * multiple-precision computations. When operating on unsigned numbers, only
661 * BREQ and BRNE branches can be expected to perform consistently. When
662 * operating on two's complement values, all signed branches are available.
663 */
664 static bool trans_INC(DisasContext *ctx, arg_INC *a)
665 {
666 TCGv Rd = cpu_r[a->rd];
667
668 tcg_gen_addi_tl(Rd, Rd, 1);
669 tcg_gen_andi_tl(Rd, Rd, 0xff);
670
671 /* update status register */
672 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Vf, Rd, 0x80); /* Vf = Rd == 0x80 */
673 gen_ZNSf(Rd);
674
675 return true;
676 }
677
678 /*
679 * Subtracts one -1- from the contents of register Rd and places the result
680 * in the destination register Rd. The C Flag in SREG is not affected by the
681 * operation, thus allowing the DEC instruction to be used on a loop counter in
682 * multiple-precision computations. When operating on unsigned values, only
683 * BREQ and BRNE branches can be expected to perform consistently. When
684 * operating on two's complement values, all signed branches are available.
685 */
686 static bool trans_DEC(DisasContext *ctx, arg_DEC *a)
687 {
688 TCGv Rd = cpu_r[a->rd];
689
690 tcg_gen_subi_tl(Rd, Rd, 1); /* Rd = Rd - 1 */
691 tcg_gen_andi_tl(Rd, Rd, 0xff); /* make it 8 bits */
692
693 /* update status register */
694 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Vf, Rd, 0x7f); /* Vf = Rd == 0x7f */
695 gen_ZNSf(Rd);
696
697 return true;
698 }
699
700 /*
701 * This instruction performs 8-bit x 8-bit -> 16-bit unsigned multiplication.
702 */
703 static bool trans_MUL(DisasContext *ctx, arg_MUL *a)
704 {
705 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
706 return true;
707 }
708
709 TCGv R0 = cpu_r[0];
710 TCGv R1 = cpu_r[1];
711 TCGv Rd = cpu_r[a->rd];
712 TCGv Rr = cpu_r[a->rr];
713 TCGv R = tcg_temp_new_i32();
714
715 tcg_gen_mul_tl(R, Rd, Rr); /* R = Rd * Rr */
716 tcg_gen_andi_tl(R0, R, 0xff);
717 tcg_gen_shri_tl(R1, R, 8);
718
719 /* update status register */
720 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
721 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
722 return true;
723 }
724
725 /*
726 * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication.
727 */
728 static bool trans_MULS(DisasContext *ctx, arg_MULS *a)
729 {
730 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
731 return true;
732 }
733
734 TCGv R0 = cpu_r[0];
735 TCGv R1 = cpu_r[1];
736 TCGv Rd = cpu_r[a->rd];
737 TCGv Rr = cpu_r[a->rr];
738 TCGv R = tcg_temp_new_i32();
739 TCGv t0 = tcg_temp_new_i32();
740 TCGv t1 = tcg_temp_new_i32();
741
742 tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
743 tcg_gen_ext8s_tl(t1, Rr); /* make Rr full 32 bit signed */
744 tcg_gen_mul_tl(R, t0, t1); /* R = Rd * Rr */
745 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
746 tcg_gen_andi_tl(R0, R, 0xff);
747 tcg_gen_shri_tl(R1, R, 8);
748
749 /* update status register */
750 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
751 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
752 return true;
753 }
754
755 /*
756 * This instruction performs 8-bit x 8-bit -> 16-bit multiplication of a
757 * signed and an unsigned number.
758 */
759 static bool trans_MULSU(DisasContext *ctx, arg_MULSU *a)
760 {
761 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
762 return true;
763 }
764
765 TCGv R0 = cpu_r[0];
766 TCGv R1 = cpu_r[1];
767 TCGv Rd = cpu_r[a->rd];
768 TCGv Rr = cpu_r[a->rr];
769 TCGv R = tcg_temp_new_i32();
770 TCGv t0 = tcg_temp_new_i32();
771
772 tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
773 tcg_gen_mul_tl(R, t0, Rr); /* R = Rd * Rr */
774 tcg_gen_andi_tl(R, R, 0xffff); /* make R 16 bits */
775 tcg_gen_andi_tl(R0, R, 0xff);
776 tcg_gen_shri_tl(R1, R, 8);
777
778 /* update status register */
779 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
780 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
781 return true;
782 }
783
784 /*
785 * This instruction performs 8-bit x 8-bit -> 16-bit unsigned
786 * multiplication and shifts the result one bit left.
787 */
788 static bool trans_FMUL(DisasContext *ctx, arg_FMUL *a)
789 {
790 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
791 return true;
792 }
793
794 TCGv R0 = cpu_r[0];
795 TCGv R1 = cpu_r[1];
796 TCGv Rd = cpu_r[a->rd];
797 TCGv Rr = cpu_r[a->rr];
798 TCGv R = tcg_temp_new_i32();
799
800 tcg_gen_mul_tl(R, Rd, Rr); /* R = Rd * Rr */
801
802 /* update status register */
803 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
804 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
805
806 /* update output registers */
807 tcg_gen_shli_tl(R, R, 1);
808 tcg_gen_andi_tl(R0, R, 0xff);
809 tcg_gen_shri_tl(R1, R, 8);
810 tcg_gen_andi_tl(R1, R1, 0xff);
811 return true;
812 }
813
814 /*
815 * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication
816 * and shifts the result one bit left.
817 */
818 static bool trans_FMULS(DisasContext *ctx, arg_FMULS *a)
819 {
820 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
821 return true;
822 }
823
824 TCGv R0 = cpu_r[0];
825 TCGv R1 = cpu_r[1];
826 TCGv Rd = cpu_r[a->rd];
827 TCGv Rr = cpu_r[a->rr];
828 TCGv R = tcg_temp_new_i32();
829 TCGv t0 = tcg_temp_new_i32();
830 TCGv t1 = tcg_temp_new_i32();
831
832 tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
833 tcg_gen_ext8s_tl(t1, Rr); /* make Rr full 32 bit signed */
834 tcg_gen_mul_tl(R, t0, t1); /* R = Rd * Rr */
835 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
836
837 /* update status register */
838 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
839 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
840
841 /* update output registers */
842 tcg_gen_shli_tl(R, R, 1);
843 tcg_gen_andi_tl(R0, R, 0xff);
844 tcg_gen_shri_tl(R1, R, 8);
845 tcg_gen_andi_tl(R1, R1, 0xff);
846 return true;
847 }
848
849 /*
850 * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication
851 * and shifts the result one bit left.
852 */
853 static bool trans_FMULSU(DisasContext *ctx, arg_FMULSU *a)
854 {
855 if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
856 return true;
857 }
858
859 TCGv R0 = cpu_r[0];
860 TCGv R1 = cpu_r[1];
861 TCGv Rd = cpu_r[a->rd];
862 TCGv Rr = cpu_r[a->rr];
863 TCGv R = tcg_temp_new_i32();
864 TCGv t0 = tcg_temp_new_i32();
865
866 tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
867 tcg_gen_mul_tl(R, t0, Rr); /* R = Rd * Rr */
868 tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
869
870 /* update status register */
871 tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
872 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
873
874 /* update output registers */
875 tcg_gen_shli_tl(R, R, 1);
876 tcg_gen_andi_tl(R0, R, 0xff);
877 tcg_gen_shri_tl(R1, R, 8);
878 tcg_gen_andi_tl(R1, R1, 0xff);
879 return true;
880 }
881
882 /*
883 * The module is an instruction set extension to the AVR CPU, performing
884 * DES iterations. The 64-bit data block (plaintext or ciphertext) is placed in
885 * the CPU register file, registers R0-R7, where LSB of data is placed in LSB
886 * of R0 and MSB of data is placed in MSB of R7. The full 64-bit key (including
887 * parity bits) is placed in registers R8- R15, organized in the register file
888 * with LSB of key in LSB of R8 and MSB of key in MSB of R15. Executing one DES
889 * instruction performs one round in the DES algorithm. Sixteen rounds must be
890 * executed in increasing order to form the correct DES ciphertext or
891 * plaintext. Intermediate results are stored in the register file (R0-R15)
892 * after each DES instruction. The instruction's operand (K) determines which
893 * round is executed, and the half carry flag (H) determines whether encryption
894 * or decryption is performed. The DES algorithm is described in
895 * "Specifications for the Data Encryption Standard" (Federal Information
896 * Processing Standards Publication 46). Intermediate results in this
897 * implementation differ from the standard because the initial permutation and
898 * the inverse initial permutation are performed each iteration. This does not
899 * affect the result in the final ciphertext or plaintext, but reduces
900 * execution time.
901 */
902 static bool trans_DES(DisasContext *ctx, arg_DES *a)
903 {
904 /* TODO */
905 if (!avr_have_feature(ctx, AVR_FEATURE_DES)) {
906 return true;
907 }
908
909 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
910
911 return true;
912 }
913
914 /*
915 * Branch Instructions
916 */
917 static void gen_jmp_ez(DisasContext *ctx)
918 {
919 tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
920 tcg_gen_or_tl(cpu_pc, cpu_pc, cpu_eind);
921 ctx->base.is_jmp = DISAS_LOOKUP;
922 }
923
924 static void gen_jmp_z(DisasContext *ctx)
925 {
926 tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
927 ctx->base.is_jmp = DISAS_LOOKUP;
928 }
929
930 static void gen_push_ret(DisasContext *ctx, int ret)
931 {
932 if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) {
933 TCGv t0 = tcg_const_i32((ret & 0x0000ff));
934
935 tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_UB);
936 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
937 } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) {
938 TCGv t0 = tcg_const_i32((ret & 0x00ffff));
939
940 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
941 tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_BEUW);
942 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
943 } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) {
944 TCGv lo = tcg_const_i32((ret & 0x0000ff));
945 TCGv hi = tcg_const_i32((ret & 0xffff00) >> 8);
946
947 tcg_gen_qemu_st_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB);
948 tcg_gen_subi_tl(cpu_sp, cpu_sp, 2);
949 tcg_gen_qemu_st_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW);
950 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
951 }
952 }
953
954 static void gen_pop_ret(DisasContext *ctx, TCGv ret)
955 {
956 if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) {
957 tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
958 tcg_gen_qemu_ld_tl(ret, cpu_sp, MMU_DATA_IDX, MO_UB);
959 } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) {
960 tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
961 tcg_gen_qemu_ld_tl(ret, cpu_sp, MMU_DATA_IDX, MO_BEUW);
962 tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
963 } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) {
964 TCGv lo = tcg_temp_new_i32();
965 TCGv hi = tcg_temp_new_i32();
966
967 tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
968 tcg_gen_qemu_ld_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW);
969
970 tcg_gen_addi_tl(cpu_sp, cpu_sp, 2);
971 tcg_gen_qemu_ld_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB);
972
973 tcg_gen_deposit_tl(ret, lo, hi, 8, 16);
974 }
975 }
976
977 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
978 {
979 const TranslationBlock *tb = ctx->base.tb;
980
981 if (translator_use_goto_tb(&ctx->base, dest)) {
982 tcg_gen_goto_tb(n);
983 tcg_gen_movi_i32(cpu_pc, dest);
984 tcg_gen_exit_tb(tb, n);
985 } else {
986 tcg_gen_movi_i32(cpu_pc, dest);
987 tcg_gen_lookup_and_goto_ptr();
988 }
989 ctx->base.is_jmp = DISAS_NORETURN;
990 }
991
992 /*
993 * Relative jump to an address within PC - 2K +1 and PC + 2K (words). For
994 * AVR microcontrollers with Program memory not exceeding 4K words (8KB) this
995 * instruction can address the entire memory from every address location. See
996 * also JMP.
997 */
998 static bool trans_RJMP(DisasContext *ctx, arg_RJMP *a)
999 {
1000 int dst = ctx->npc + a->imm;
1001
1002 gen_goto_tb(ctx, 0, dst);
1003
1004 return true;
1005 }
1006
1007 /*
1008 * Indirect jump to the address pointed to by the Z (16 bits) Pointer
1009 * Register in the Register File. The Z-pointer Register is 16 bits wide and
1010 * allows jump within the lowest 64K words (128KB) section of Program memory.
1011 * This instruction is not available in all devices. Refer to the device
1012 * specific instruction set summary.
1013 */
1014 static bool trans_IJMP(DisasContext *ctx, arg_IJMP *a)
1015 {
1016 if (!avr_have_feature(ctx, AVR_FEATURE_IJMP_ICALL)) {
1017 return true;
1018 }
1019
1020 gen_jmp_z(ctx);
1021
1022 return true;
1023 }
1024
1025 /*
1026 * Indirect jump to the address pointed to by the Z (16 bits) Pointer
1027 * Register in the Register File and the EIND Register in the I/O space. This
1028 * instruction allows for indirect jumps to the entire 4M (words) Program
1029 * memory space. See also IJMP. This instruction is not available in all
1030 * devices. Refer to the device specific instruction set summary.
1031 */
1032 static bool trans_EIJMP(DisasContext *ctx, arg_EIJMP *a)
1033 {
1034 if (!avr_have_feature(ctx, AVR_FEATURE_EIJMP_EICALL)) {
1035 return true;
1036 }
1037
1038 gen_jmp_ez(ctx);
1039 return true;
1040 }
1041
1042 /*
1043 * Jump to an address within the entire 4M (words) Program memory. See also
1044 * RJMP. This instruction is not available in all devices. Refer to the device
1045 * specific instruction set summary.0
1046 */
1047 static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
1048 {
1049 if (!avr_have_feature(ctx, AVR_FEATURE_JMP_CALL)) {
1050 return true;
1051 }
1052
1053 gen_goto_tb(ctx, 0, a->imm);
1054
1055 return true;
1056 }
1057
1058 /*
1059 * Relative call to an address within PC - 2K + 1 and PC + 2K (words). The
1060 * return address (the instruction after the RCALL) is stored onto the Stack.
1061 * See also CALL. For AVR microcontrollers with Program memory not exceeding 4K
1062 * words (8KB) this instruction can address the entire memory from every
1063 * address location. The Stack Pointer uses a post-decrement scheme during
1064 * RCALL.
1065 */
1066 static bool trans_RCALL(DisasContext *ctx, arg_RCALL *a)
1067 {
1068 int ret = ctx->npc;
1069 int dst = ctx->npc + a->imm;
1070
1071 gen_push_ret(ctx, ret);
1072 gen_goto_tb(ctx, 0, dst);
1073
1074 return true;
1075 }
1076
1077 /*
1078 * Calls to a subroutine within the entire 4M (words) Program memory. The
1079 * return address (to the instruction after the CALL) will be stored onto the
1080 * Stack. See also RCALL. The Stack Pointer uses a post-decrement scheme during
1081 * CALL. This instruction is not available in all devices. Refer to the device
1082 * specific instruction set summary.
1083 */
1084 static bool trans_ICALL(DisasContext *ctx, arg_ICALL *a)
1085 {
1086 if (!avr_have_feature(ctx, AVR_FEATURE_IJMP_ICALL)) {
1087 return true;
1088 }
1089
1090 int ret = ctx->npc;
1091
1092 gen_push_ret(ctx, ret);
1093 gen_jmp_z(ctx);
1094
1095 return true;
1096 }
1097
1098 /*
1099 * Indirect call of a subroutine pointed to by the Z (16 bits) Pointer
1100 * Register in the Register File and the EIND Register in the I/O space. This
1101 * instruction allows for indirect calls to the entire 4M (words) Program
1102 * memory space. See also ICALL. The Stack Pointer uses a post-decrement scheme
1103 * during EICALL. This instruction is not available in all devices. Refer to
1104 * the device specific instruction set summary.
1105 */
1106 static bool trans_EICALL(DisasContext *ctx, arg_EICALL *a)
1107 {
1108 if (!avr_have_feature(ctx, AVR_FEATURE_EIJMP_EICALL)) {
1109 return true;
1110 }
1111
1112 int ret = ctx->npc;
1113
1114 gen_push_ret(ctx, ret);
1115 gen_jmp_ez(ctx);
1116 return true;
1117 }
1118
1119 /*
1120 * Calls to a subroutine within the entire Program memory. The return
1121 * address (to the instruction after the CALL) will be stored onto the Stack.
1122 * (See also RCALL). The Stack Pointer uses a post-decrement scheme during
1123 * CALL. This instruction is not available in all devices. Refer to the device
1124 * specific instruction set summary.
1125 */
1126 static bool trans_CALL(DisasContext *ctx, arg_CALL *a)
1127 {
1128 if (!avr_have_feature(ctx, AVR_FEATURE_JMP_CALL)) {
1129 return true;
1130 }
1131
1132 int Imm = a->imm;
1133 int ret = ctx->npc;
1134
1135 gen_push_ret(ctx, ret);
1136 gen_goto_tb(ctx, 0, Imm);
1137
1138 return true;
1139 }
1140
1141 /*
1142 * Returns from subroutine. The return address is loaded from the STACK.
1143 * The Stack Pointer uses a preincrement scheme during RET.
1144 */
1145 static bool trans_RET(DisasContext *ctx, arg_RET *a)
1146 {
1147 gen_pop_ret(ctx, cpu_pc);
1148
1149 ctx->base.is_jmp = DISAS_LOOKUP;
1150 return true;
1151 }
1152
1153 /*
1154 * Returns from interrupt. The return address is loaded from the STACK and
1155 * the Global Interrupt Flag is set. Note that the Status Register is not
1156 * automatically stored when entering an interrupt routine, and it is not
1157 * restored when returning from an interrupt routine. This must be handled by
1158 * the application program. The Stack Pointer uses a pre-increment scheme
1159 * during RETI.
1160 */
1161 static bool trans_RETI(DisasContext *ctx, arg_RETI *a)
1162 {
1163 gen_pop_ret(ctx, cpu_pc);
1164 tcg_gen_movi_tl(cpu_If, 1);
1165
1166 /* Need to return to main loop to re-evaluate interrupts. */
1167 ctx->base.is_jmp = DISAS_EXIT;
1168 return true;
1169 }
1170
1171 /*
1172 * This instruction performs a compare between two registers Rd and Rr, and
1173 * skips the next instruction if Rd = Rr.
1174 */
1175 static bool trans_CPSE(DisasContext *ctx, arg_CPSE *a)
1176 {
1177 ctx->skip_cond = TCG_COND_EQ;
1178 ctx->skip_var0 = cpu_r[a->rd];
1179 ctx->skip_var1 = cpu_r[a->rr];
1180 return true;
1181 }
1182
1183 /*
1184 * This instruction performs a compare between two registers Rd and Rr.
1185 * None of the registers are changed. All conditional branches can be used
1186 * after this instruction.
1187 */
1188 static bool trans_CP(DisasContext *ctx, arg_CP *a)
1189 {
1190 TCGv Rd = cpu_r[a->rd];
1191 TCGv Rr = cpu_r[a->rr];
1192 TCGv R = tcg_temp_new_i32();
1193
1194 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
1195 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
1196
1197 /* update status register */
1198 gen_sub_CHf(R, Rd, Rr);
1199 gen_sub_Vf(R, Rd, Rr);
1200 gen_ZNSf(R);
1201 return true;
1202 }
1203
1204 /*
1205 * This instruction performs a compare between two registers Rd and Rr and
1206 * also takes into account the previous carry. None of the registers are
1207 * changed. All conditional branches can be used after this instruction.
1208 */
1209 static bool trans_CPC(DisasContext *ctx, arg_CPC *a)
1210 {
1211 TCGv Rd = cpu_r[a->rd];
1212 TCGv Rr = cpu_r[a->rr];
1213 TCGv R = tcg_temp_new_i32();
1214 TCGv zero = tcg_const_i32(0);
1215
1216 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
1217 tcg_gen_sub_tl(R, R, cpu_Cf);
1218 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
1219 /* update status register */
1220 gen_sub_CHf(R, Rd, Rr);
1221 gen_sub_Vf(R, Rd, Rr);
1222 gen_NSf(R);
1223
1224 /*
1225 * Previous value remains unchanged when the result is zero;
1226 * cleared otherwise.
1227 */
1228 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero);
1229 return true;
1230 }
1231
1232 /*
1233 * This instruction performs a compare between register Rd and a constant.
1234 * The register is not changed. All conditional branches can be used after this
1235 * instruction.
1236 */
1237 static bool trans_CPI(DisasContext *ctx, arg_CPI *a)
1238 {
1239 TCGv Rd = cpu_r[a->rd];
1240 int Imm = a->imm;
1241 TCGv Rr = tcg_const_i32(Imm);
1242 TCGv R = tcg_temp_new_i32();
1243
1244 tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
1245 tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
1246
1247 /* update status register */
1248 gen_sub_CHf(R, Rd, Rr);
1249 gen_sub_Vf(R, Rd, Rr);
1250 gen_ZNSf(R);
1251 return true;
1252 }
1253
1254 /*
1255 * This instruction tests a single bit in a register and skips the next
1256 * instruction if the bit is cleared.
1257 */
1258 static bool trans_SBRC(DisasContext *ctx, arg_SBRC *a)
1259 {
1260 TCGv Rr = cpu_r[a->rr];
1261
1262 ctx->skip_cond = TCG_COND_EQ;
1263 ctx->skip_var0 = tcg_temp_new();
1264
1265 tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit);
1266 return true;
1267 }
1268
1269 /*
1270 * This instruction tests a single bit in a register and skips the next
1271 * instruction if the bit is set.
1272 */
1273 static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a)
1274 {
1275 TCGv Rr = cpu_r[a->rr];
1276
1277 ctx->skip_cond = TCG_COND_NE;
1278 ctx->skip_var0 = tcg_temp_new();
1279
1280 tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit);
1281 return true;
1282 }
1283
1284 /*
1285 * This instruction tests a single bit in an I/O Register and skips the
1286 * next instruction if the bit is cleared. This instruction operates on the
1287 * lower 32 I/O Registers -- addresses 0-31.
1288 */
1289 static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a)
1290 {
1291 TCGv temp = tcg_const_i32(a->reg);
1292
1293 gen_helper_inb(temp, cpu_env, temp);
1294 tcg_gen_andi_tl(temp, temp, 1 << a->bit);
1295 ctx->skip_cond = TCG_COND_EQ;
1296 ctx->skip_var0 = temp;
1297
1298 return true;
1299 }
1300
1301 /*
1302 * This instruction tests a single bit in an I/O Register and skips the
1303 * next instruction if the bit is set. This instruction operates on the lower
1304 * 32 I/O Registers -- addresses 0-31.
1305 */
1306 static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a)
1307 {
1308 TCGv temp = tcg_const_i32(a->reg);
1309
1310 gen_helper_inb(temp, cpu_env, temp);
1311 tcg_gen_andi_tl(temp, temp, 1 << a->bit);
1312 ctx->skip_cond = TCG_COND_NE;
1313 ctx->skip_var0 = temp;
1314
1315 return true;
1316 }
1317
1318 /*
1319 * Conditional relative branch. Tests a single bit in SREG and branches
1320 * relatively to PC if the bit is cleared. This instruction branches relatively
1321 * to PC in either direction (PC - 63 < = destination <= PC + 64). The
1322 * parameter k is the offset from PC and is represented in two's complement
1323 * form.
1324 */
1325 static bool trans_BRBC(DisasContext *ctx, arg_BRBC *a)
1326 {
1327 TCGLabel *not_taken = gen_new_label();
1328
1329 TCGv var;
1330
1331 switch (a->bit) {
1332 case 0x00:
1333 var = cpu_Cf;
1334 break;
1335 case 0x01:
1336 var = cpu_Zf;
1337 break;
1338 case 0x02:
1339 var = cpu_Nf;
1340 break;
1341 case 0x03:
1342 var = cpu_Vf;
1343 break;
1344 case 0x04:
1345 var = cpu_Sf;
1346 break;
1347 case 0x05:
1348 var = cpu_Hf;
1349 break;
1350 case 0x06:
1351 var = cpu_Tf;
1352 break;
1353 case 0x07:
1354 var = cpu_If;
1355 break;
1356 default:
1357 g_assert_not_reached();
1358 }
1359
1360 tcg_gen_brcondi_i32(TCG_COND_NE, var, 0, not_taken);
1361 gen_goto_tb(ctx, 0, ctx->npc + a->imm);
1362 gen_set_label(not_taken);
1363
1364 ctx->base.is_jmp = DISAS_CHAIN;
1365 return true;
1366 }
1367
1368 /*
1369 * Conditional relative branch. Tests a single bit in SREG and branches
1370 * relatively to PC if the bit is set. This instruction branches relatively to
1371 * PC in either direction (PC - 63 < = destination <= PC + 64). The parameter k
1372 * is the offset from PC and is represented in two's complement form.
1373 */
1374 static bool trans_BRBS(DisasContext *ctx, arg_BRBS *a)
1375 {
1376 TCGLabel *not_taken = gen_new_label();
1377
1378 TCGv var;
1379
1380 switch (a->bit) {
1381 case 0x00:
1382 var = cpu_Cf;
1383 break;
1384 case 0x01:
1385 var = cpu_Zf;
1386 break;
1387 case 0x02:
1388 var = cpu_Nf;
1389 break;
1390 case 0x03:
1391 var = cpu_Vf;
1392 break;
1393 case 0x04:
1394 var = cpu_Sf;
1395 break;
1396 case 0x05:
1397 var = cpu_Hf;
1398 break;
1399 case 0x06:
1400 var = cpu_Tf;
1401 break;
1402 case 0x07:
1403 var = cpu_If;
1404 break;
1405 default:
1406 g_assert_not_reached();
1407 }
1408
1409 tcg_gen_brcondi_i32(TCG_COND_EQ, var, 0, not_taken);
1410 gen_goto_tb(ctx, 0, ctx->npc + a->imm);
1411 gen_set_label(not_taken);
1412
1413 ctx->base.is_jmp = DISAS_CHAIN;
1414 return true;
1415 }
1416
1417 /*
1418 * Data Transfer Instructions
1419 */
1420
1421 /*
1422 * in the gen_set_addr & gen_get_addr functions
1423 * H assumed to be in 0x00ff0000 format
1424 * M assumed to be in 0x000000ff format
1425 * L assumed to be in 0x000000ff format
1426 */
1427 static void gen_set_addr(TCGv addr, TCGv H, TCGv M, TCGv L)
1428 {
1429
1430 tcg_gen_andi_tl(L, addr, 0x000000ff);
1431
1432 tcg_gen_andi_tl(M, addr, 0x0000ff00);
1433 tcg_gen_shri_tl(M, M, 8);
1434
1435 tcg_gen_andi_tl(H, addr, 0x00ff0000);
1436 }
1437
1438 static void gen_set_xaddr(TCGv addr)
1439 {
1440 gen_set_addr(addr, cpu_rampX, cpu_r[27], cpu_r[26]);
1441 }
1442
1443 static void gen_set_yaddr(TCGv addr)
1444 {
1445 gen_set_addr(addr, cpu_rampY, cpu_r[29], cpu_r[28]);
1446 }
1447
1448 static void gen_set_zaddr(TCGv addr)
1449 {
1450 gen_set_addr(addr, cpu_rampZ, cpu_r[31], cpu_r[30]);
1451 }
1452
1453 static TCGv gen_get_addr(TCGv H, TCGv M, TCGv L)
1454 {
1455 TCGv addr = tcg_temp_new_i32();
1456
1457 tcg_gen_deposit_tl(addr, M, H, 8, 8);
1458 tcg_gen_deposit_tl(addr, L, addr, 8, 16);
1459
1460 return addr;
1461 }
1462
1463 static TCGv gen_get_xaddr(void)
1464 {
1465 return gen_get_addr(cpu_rampX, cpu_r[27], cpu_r[26]);
1466 }
1467
1468 static TCGv gen_get_yaddr(void)
1469 {
1470 return gen_get_addr(cpu_rampY, cpu_r[29], cpu_r[28]);
1471 }
1472
1473 static TCGv gen_get_zaddr(void)
1474 {
1475 return gen_get_addr(cpu_rampZ, cpu_r[31], cpu_r[30]);
1476 }
1477
1478 /*
1479 * Load one byte indirect from data space to register and stores an clear
1480 * the bits in data space specified by the register. The instruction can only
1481 * be used towards internal SRAM. The data location is pointed to by the Z (16
1482 * bits) Pointer Register in the Register File. Memory access is limited to the
1483 * current data segment of 64KB. To access another data segment in devices with
1484 * more than 64KB data space, the RAMPZ in register in the I/O area has to be
1485 * changed. The Z-pointer Register is left unchanged by the operation. This
1486 * instruction is especially suited for clearing status bits stored in SRAM.
1487 */
1488 static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
1489 {
1490 if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
1491 gen_helper_fullwr(cpu_env, data, addr);
1492 } else {
1493 tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */
1494 }
1495 }
1496
1497 static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
1498 {
1499 if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
1500 gen_helper_fullrd(data, cpu_env, addr);
1501 } else {
1502 tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */
1503 }
1504 }
1505
1506 /*
1507 * This instruction makes a copy of one register into another. The source
1508 * register Rr is left unchanged, while the destination register Rd is loaded
1509 * with a copy of Rr.
1510 */
1511 static bool trans_MOV(DisasContext *ctx, arg_MOV *a)
1512 {
1513 TCGv Rd = cpu_r[a->rd];
1514 TCGv Rr = cpu_r[a->rr];
1515
1516 tcg_gen_mov_tl(Rd, Rr);
1517
1518 return true;
1519 }
1520
1521 /*
1522 * This instruction makes a copy of one register pair into another register
1523 * pair. The source register pair Rr+1:Rr is left unchanged, while the
1524 * destination register pair Rd+1:Rd is loaded with a copy of Rr + 1:Rr. This
1525 * instruction is not available in all devices. Refer to the device specific
1526 * instruction set summary.
1527 */
1528 static bool trans_MOVW(DisasContext *ctx, arg_MOVW *a)
1529 {
1530 if (!avr_have_feature(ctx, AVR_FEATURE_MOVW)) {
1531 return true;
1532 }
1533
1534 TCGv RdL = cpu_r[a->rd];
1535 TCGv RdH = cpu_r[a->rd + 1];
1536 TCGv RrL = cpu_r[a->rr];
1537 TCGv RrH = cpu_r[a->rr + 1];
1538
1539 tcg_gen_mov_tl(RdH, RrH);
1540 tcg_gen_mov_tl(RdL, RrL);
1541
1542 return true;
1543 }
1544
1545 /*
1546 * Loads an 8 bit constant directly to register 16 to 31.
1547 */
1548 static bool trans_LDI(DisasContext *ctx, arg_LDI *a)
1549 {
1550 TCGv Rd = cpu_r[a->rd];
1551 int imm = a->imm;
1552
1553 tcg_gen_movi_tl(Rd, imm);
1554
1555 return true;
1556 }
1557
1558 /*
1559 * Loads one byte from the data space to a register. For parts with SRAM,
1560 * the data space consists of the Register File, I/O memory and internal SRAM
1561 * (and external SRAM if applicable). For parts without SRAM, the data space
1562 * consists of the register file only. The EEPROM has a separate address space.
1563 * A 16-bit address must be supplied. Memory access is limited to the current
1564 * data segment of 64KB. The LDS instruction uses the RAMPD Register to access
1565 * memory above 64KB. To access another data segment in devices with more than
1566 * 64KB data space, the RAMPD in register in the I/O area has to be changed.
1567 * This instruction is not available in all devices. Refer to the device
1568 * specific instruction set summary.
1569 */
1570 static bool trans_LDS(DisasContext *ctx, arg_LDS *a)
1571 {
1572 TCGv Rd = cpu_r[a->rd];
1573 TCGv addr = tcg_temp_new_i32();
1574 TCGv H = cpu_rampD;
1575 a->imm = next_word(ctx);
1576
1577 tcg_gen_mov_tl(addr, H); /* addr = H:M:L */
1578 tcg_gen_shli_tl(addr, addr, 16);
1579 tcg_gen_ori_tl(addr, addr, a->imm);
1580
1581 gen_data_load(ctx, Rd, addr);
1582 return true;
1583 }
1584
1585 /*
1586 * Loads one byte indirect from the data space to a register. For parts
1587 * with SRAM, the data space consists of the Register File, I/O memory and
1588 * internal SRAM (and external SRAM if applicable). For parts without SRAM, the
1589 * data space consists of the Register File only. In some parts the Flash
1590 * Memory has been mapped to the data space and can be read using this command.
1591 * The EEPROM has a separate address space. The data location is pointed to by
1592 * the X (16 bits) Pointer Register in the Register File. Memory access is
1593 * limited to the current data segment of 64KB. To access another data segment
1594 * in devices with more than 64KB data space, the RAMPX in register in the I/O
1595 * area has to be changed. The X-pointer Register can either be left unchanged
1596 * by the operation, or it can be post-incremented or predecremented. These
1597 * features are especially suited for accessing arrays, tables, and Stack
1598 * Pointer usage of the X-pointer Register. Note that only the low byte of the
1599 * X-pointer is updated in devices with no more than 256 bytes data space. For
1600 * such devices, the high byte of the pointer is not used by this instruction
1601 * and can be used for other purposes. The RAMPX Register in the I/O area is
1602 * updated in parts with more than 64KB data space or more than 64KB Program
1603 * memory, and the increment/decrement is added to the entire 24-bit address on
1604 * such devices. Not all variants of this instruction is available in all
1605 * devices. Refer to the device specific instruction set summary. In the
1606 * Reduced Core tinyAVR the LD instruction can be used to achieve the same
1607 * operation as LPM since the program memory is mapped to the data memory
1608 * space.
1609 */
1610 static bool trans_LDX1(DisasContext *ctx, arg_LDX1 *a)
1611 {
1612 TCGv Rd = cpu_r[a->rd];
1613 TCGv addr = gen_get_xaddr();
1614
1615 gen_data_load(ctx, Rd, addr);
1616 return true;
1617 }
1618
1619 static bool trans_LDX2(DisasContext *ctx, arg_LDX2 *a)
1620 {
1621 TCGv Rd = cpu_r[a->rd];
1622 TCGv addr = gen_get_xaddr();
1623
1624 gen_data_load(ctx, Rd, addr);
1625 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
1626
1627 gen_set_xaddr(addr);
1628 return true;
1629 }
1630
1631 static bool trans_LDX3(DisasContext *ctx, arg_LDX3 *a)
1632 {
1633 TCGv Rd = cpu_r[a->rd];
1634 TCGv addr = gen_get_xaddr();
1635
1636 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
1637 gen_data_load(ctx, Rd, addr);
1638 gen_set_xaddr(addr);
1639 return true;
1640 }
1641
1642 /*
1643 * Loads one byte indirect with or without displacement from the data space
1644 * to a register. For parts with SRAM, the data space consists of the Register
1645 * File, I/O memory and internal SRAM (and external SRAM if applicable). For
1646 * parts without SRAM, the data space consists of the Register File only. In
1647 * some parts the Flash Memory has been mapped to the data space and can be
1648 * read using this command. The EEPROM has a separate address space. The data
1649 * location is pointed to by the Y (16 bits) Pointer Register in the Register
1650 * File. Memory access is limited to the current data segment of 64KB. To
1651 * access another data segment in devices with more than 64KB data space, the
1652 * RAMPY in register in the I/O area has to be changed. The Y-pointer Register
1653 * can either be left unchanged by the operation, or it can be post-incremented
1654 * or predecremented. These features are especially suited for accessing
1655 * arrays, tables, and Stack Pointer usage of the Y-pointer Register. Note that
1656 * only the low byte of the Y-pointer is updated in devices with no more than
1657 * 256 bytes data space. For such devices, the high byte of the pointer is not
1658 * used by this instruction and can be used for other purposes. The RAMPY
1659 * Register in the I/O area is updated in parts with more than 64KB data space
1660 * or more than 64KB Program memory, and the increment/decrement/displacement
1661 * is added to the entire 24-bit address on such devices. Not all variants of
1662 * this instruction is available in all devices. Refer to the device specific
1663 * instruction set summary. In the Reduced Core tinyAVR the LD instruction can
1664 * be used to achieve the same operation as LPM since the program memory is
1665 * mapped to the data memory space.
1666 */
1667 static bool trans_LDY2(DisasContext *ctx, arg_LDY2 *a)
1668 {
1669 TCGv Rd = cpu_r[a->rd];
1670 TCGv addr = gen_get_yaddr();
1671
1672 gen_data_load(ctx, Rd, addr);
1673 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
1674
1675 gen_set_yaddr(addr);
1676 return true;
1677 }
1678
1679 static bool trans_LDY3(DisasContext *ctx, arg_LDY3 *a)
1680 {
1681 TCGv Rd = cpu_r[a->rd];
1682 TCGv addr = gen_get_yaddr();
1683
1684 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
1685 gen_data_load(ctx, Rd, addr);
1686 gen_set_yaddr(addr);
1687 return true;
1688 }
1689
1690 static bool trans_LDDY(DisasContext *ctx, arg_LDDY *a)
1691 {
1692 TCGv Rd = cpu_r[a->rd];
1693 TCGv addr = gen_get_yaddr();
1694
1695 tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
1696 gen_data_load(ctx, Rd, addr);
1697 return true;
1698 }
1699
1700 /*
1701 * Loads one byte indirect with or without displacement from the data space
1702 * to a register. For parts with SRAM, the data space consists of the Register
1703 * File, I/O memory and internal SRAM (and external SRAM if applicable). For
1704 * parts without SRAM, the data space consists of the Register File only. In
1705 * some parts the Flash Memory has been mapped to the data space and can be
1706 * read using this command. The EEPROM has a separate address space. The data
1707 * location is pointed to by the Z (16 bits) Pointer Register in the Register
1708 * File. Memory access is limited to the current data segment of 64KB. To
1709 * access another data segment in devices with more than 64KB data space, the
1710 * RAMPZ in register in the I/O area has to be changed. The Z-pointer Register
1711 * can either be left unchanged by the operation, or it can be post-incremented
1712 * or predecremented. These features are especially suited for Stack Pointer
1713 * usage of the Z-pointer Register, however because the Z-pointer Register can
1714 * be used for indirect subroutine calls, indirect jumps and table lookup, it
1715 * is often more convenient to use the X or Y-pointer as a dedicated Stack
1716 * Pointer. Note that only the low byte of the Z-pointer is updated in devices
1717 * with no more than 256 bytes data space. For such devices, the high byte of
1718 * the pointer is not used by this instruction and can be used for other
1719 * purposes. The RAMPZ Register in the I/O area is updated in parts with more
1720 * than 64KB data space or more than 64KB Program memory, and the
1721 * increment/decrement/displacement is added to the entire 24-bit address on
1722 * such devices. Not all variants of this instruction is available in all
1723 * devices. Refer to the device specific instruction set summary. In the
1724 * Reduced Core tinyAVR the LD instruction can be used to achieve the same
1725 * operation as LPM since the program memory is mapped to the data memory
1726 * space. For using the Z-pointer for table lookup in Program memory see the
1727 * LPM and ELPM instructions.
1728 */
1729 static bool trans_LDZ2(DisasContext *ctx, arg_LDZ2 *a)
1730 {
1731 TCGv Rd = cpu_r[a->rd];
1732 TCGv addr = gen_get_zaddr();
1733
1734 gen_data_load(ctx, Rd, addr);
1735 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
1736
1737 gen_set_zaddr(addr);
1738 return true;
1739 }
1740
1741 static bool trans_LDZ3(DisasContext *ctx, arg_LDZ3 *a)
1742 {
1743 TCGv Rd = cpu_r[a->rd];
1744 TCGv addr = gen_get_zaddr();
1745
1746 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
1747 gen_data_load(ctx, Rd, addr);
1748
1749 gen_set_zaddr(addr);
1750 return true;
1751 }
1752
1753 static bool trans_LDDZ(DisasContext *ctx, arg_LDDZ *a)
1754 {
1755 TCGv Rd = cpu_r[a->rd];
1756 TCGv addr = gen_get_zaddr();
1757
1758 tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
1759 gen_data_load(ctx, Rd, addr);
1760 return true;
1761 }
1762
1763 /*
1764 * Stores one byte from a Register to the data space. For parts with SRAM,
1765 * the data space consists of the Register File, I/O memory and internal SRAM
1766 * (and external SRAM if applicable). For parts without SRAM, the data space
1767 * consists of the Register File only. The EEPROM has a separate address space.
1768 * A 16-bit address must be supplied. Memory access is limited to the current
1769 * data segment of 64KB. The STS instruction uses the RAMPD Register to access
1770 * memory above 64KB. To access another data segment in devices with more than
1771 * 64KB data space, the RAMPD in register in the I/O area has to be changed.
1772 * This instruction is not available in all devices. Refer to the device
1773 * specific instruction set summary.
1774 */
1775 static bool trans_STS(DisasContext *ctx, arg_STS *a)
1776 {
1777 TCGv Rd = cpu_r[a->rd];
1778 TCGv addr = tcg_temp_new_i32();
1779 TCGv H = cpu_rampD;
1780 a->imm = next_word(ctx);
1781
1782 tcg_gen_mov_tl(addr, H); /* addr = H:M:L */
1783 tcg_gen_shli_tl(addr, addr, 16);
1784 tcg_gen_ori_tl(addr, addr, a->imm);
1785 gen_data_store(ctx, Rd, addr);
1786 return true;
1787 }
1788
1789 /*
1790 * Stores one byte indirect from a register to data space. For parts with SRAM,
1791 * the data space consists of the Register File, I/O memory, and internal SRAM
1792 * (and external SRAM if applicable). For parts without SRAM, the data space
1793 * consists of the Register File only. The EEPROM has a separate address space.
1794 *
1795 * The data location is pointed to by the X (16 bits) Pointer Register in the
1796 * Register File. Memory access is limited to the current data segment of 64KB.
1797 * To access another data segment in devices with more than 64KB data space, the
1798 * RAMPX in register in the I/O area has to be changed.
1799 *
1800 * The X-pointer Register can either be left unchanged by the operation, or it
1801 * can be post-incremented or pre-decremented. These features are especially
1802 * suited for accessing arrays, tables, and Stack Pointer usage of the
1803 * X-pointer Register. Note that only the low byte of the X-pointer is updated
1804 * in devices with no more than 256 bytes data space. For such devices, the high
1805 * byte of the pointer is not used by this instruction and can be used for other
1806 * purposes. The RAMPX Register in the I/O area is updated in parts with more
1807 * than 64KB data space or more than 64KB Program memory, and the increment /
1808 * decrement is added to the entire 24-bit address on such devices.
1809 */
1810 static bool trans_STX1(DisasContext *ctx, arg_STX1 *a)
1811 {
1812 TCGv Rd = cpu_r[a->rr];
1813 TCGv addr = gen_get_xaddr();
1814
1815 gen_data_store(ctx, Rd, addr);
1816 return true;
1817 }
1818
1819 static bool trans_STX2(DisasContext *ctx, arg_STX2 *a)
1820 {
1821 TCGv Rd = cpu_r[a->rr];
1822 TCGv addr = gen_get_xaddr();
1823
1824 gen_data_store(ctx, Rd, addr);
1825 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
1826 gen_set_xaddr(addr);
1827 return true;
1828 }
1829
1830 static bool trans_STX3(DisasContext *ctx, arg_STX3 *a)
1831 {
1832 TCGv Rd = cpu_r[a->rr];
1833 TCGv addr = gen_get_xaddr();
1834
1835 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
1836 gen_data_store(ctx, Rd, addr);
1837 gen_set_xaddr(addr);
1838 return true;
1839 }
1840
1841 /*
1842 * Stores one byte indirect with or without displacement from a register to data
1843 * space. For parts with SRAM, the data space consists of the Register File, I/O
1844 * memory, and internal SRAM (and external SRAM if applicable). For parts
1845 * without SRAM, the data space consists of the Register File only. The EEPROM
1846 * has a separate address space.
1847 *
1848 * The data location is pointed to by the Y (16 bits) Pointer Register in the
1849 * Register File. Memory access is limited to the current data segment of 64KB.
1850 * To access another data segment in devices with more than 64KB data space, the
1851 * RAMPY in register in the I/O area has to be changed.
1852 *
1853 * The Y-pointer Register can either be left unchanged by the operation, or it
1854 * can be post-incremented or pre-decremented. These features are especially
1855 * suited for accessing arrays, tables, and Stack Pointer usage of the Y-pointer
1856 * Register. Note that only the low byte of the Y-pointer is updated in devices
1857 * with no more than 256 bytes data space. For such devices, the high byte of
1858 * the pointer is not used by this instruction and can be used for other
1859 * purposes. The RAMPY Register in the I/O area is updated in parts with more
1860 * than 64KB data space or more than 64KB Program memory, and the increment /
1861 * decrement / displacement is added to the entire 24-bit address on such
1862 * devices.
1863 */
1864 static bool trans_STY2(DisasContext *ctx, arg_STY2 *a)
1865 {
1866 TCGv Rd = cpu_r[a->rd];
1867 TCGv addr = gen_get_yaddr();
1868
1869 gen_data_store(ctx, Rd, addr);
1870 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
1871 gen_set_yaddr(addr);
1872 return true;
1873 }
1874
1875 static bool trans_STY3(DisasContext *ctx, arg_STY3 *a)
1876 {
1877 TCGv Rd = cpu_r[a->rd];
1878 TCGv addr = gen_get_yaddr();
1879
1880 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
1881 gen_data_store(ctx, Rd, addr);
1882 gen_set_yaddr(addr);
1883 return true;
1884 }
1885
1886 static bool trans_STDY(DisasContext *ctx, arg_STDY *a)
1887 {
1888 TCGv Rd = cpu_r[a->rd];
1889 TCGv addr = gen_get_yaddr();
1890
1891 tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
1892 gen_data_store(ctx, Rd, addr);
1893 return true;
1894 }
1895
1896 /*
1897 * Stores one byte indirect with or without displacement from a register to data
1898 * space. For parts with SRAM, the data space consists of the Register File, I/O
1899 * memory, and internal SRAM (and external SRAM if applicable). For parts
1900 * without SRAM, the data space consists of the Register File only. The EEPROM
1901 * has a separate address space.
1902 *
1903 * The data location is pointed to by the Y (16 bits) Pointer Register in the
1904 * Register File. Memory access is limited to the current data segment of 64KB.
1905 * To access another data segment in devices with more than 64KB data space, the
1906 * RAMPY in register in the I/O area has to be changed.
1907 *
1908 * The Y-pointer Register can either be left unchanged by the operation, or it
1909 * can be post-incremented or pre-decremented. These features are especially
1910 * suited for accessing arrays, tables, and Stack Pointer usage of the Y-pointer
1911 * Register. Note that only the low byte of the Y-pointer is updated in devices
1912 * with no more than 256 bytes data space. For such devices, the high byte of
1913 * the pointer is not used by this instruction and can be used for other
1914 * purposes. The RAMPY Register in the I/O area is updated in parts with more
1915 * than 64KB data space or more than 64KB Program memory, and the increment /
1916 * decrement / displacement is added to the entire 24-bit address on such
1917 * devices.
1918 */
1919 static bool trans_STZ2(DisasContext *ctx, arg_STZ2 *a)
1920 {
1921 TCGv Rd = cpu_r[a->rd];
1922 TCGv addr = gen_get_zaddr();
1923
1924 gen_data_store(ctx, Rd, addr);
1925 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
1926
1927 gen_set_zaddr(addr);
1928 return true;
1929 }
1930
1931 static bool trans_STZ3(DisasContext *ctx, arg_STZ3 *a)
1932 {
1933 TCGv Rd = cpu_r[a->rd];
1934 TCGv addr = gen_get_zaddr();
1935
1936 tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
1937 gen_data_store(ctx, Rd, addr);
1938
1939 gen_set_zaddr(addr);
1940 return true;
1941 }
1942
1943 static bool trans_STDZ(DisasContext *ctx, arg_STDZ *a)
1944 {
1945 TCGv Rd = cpu_r[a->rd];
1946 TCGv addr = gen_get_zaddr();
1947
1948 tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
1949 gen_data_store(ctx, Rd, addr);
1950 return true;
1951 }
1952
1953 /*
1954 * Loads one byte pointed to by the Z-register into the destination
1955 * register Rd. This instruction features a 100% space effective constant
1956 * initialization or constant data fetch. The Program memory is organized in
1957 * 16-bit words while the Z-pointer is a byte address. Thus, the least
1958 * significant bit of the Z-pointer selects either low byte (ZLSB = 0) or high
1959 * byte (ZLSB = 1). This instruction can address the first 64KB (32K words) of
1960 * Program memory. The Zpointer Register can either be left unchanged by the
1961 * operation, or it can be incremented. The incrementation does not apply to
1962 * the RAMPZ Register.
1963 *
1964 * Devices with Self-Programming capability can use the LPM instruction to read
1965 * the Fuse and Lock bit values.
1966 */
1967 static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a)
1968 {
1969 if (!avr_have_feature(ctx, AVR_FEATURE_LPM)) {
1970 return true;
1971 }
1972
1973 TCGv Rd = cpu_r[0];
1974 TCGv addr = tcg_temp_new_i32();
1975 TCGv H = cpu_r[31];
1976 TCGv L = cpu_r[30];
1977
1978 tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
1979 tcg_gen_or_tl(addr, addr, L);
1980 tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
1981 return true;
1982 }
1983
1984 static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a)
1985 {
1986 if (!avr_have_feature(ctx, AVR_FEATURE_LPM)) {
1987 return true;
1988 }
1989
1990 TCGv Rd = cpu_r[a->rd];
1991 TCGv addr = tcg_temp_new_i32();
1992 TCGv H = cpu_r[31];
1993 TCGv L = cpu_r[30];
1994
1995 tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
1996 tcg_gen_or_tl(addr, addr, L);
1997 tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
1998 return true;
1999 }
2000
2001 static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a)
2002 {
2003 if (!avr_have_feature(ctx, AVR_FEATURE_LPMX)) {
2004 return true;
2005 }
2006
2007 TCGv Rd = cpu_r[a->rd];
2008 TCGv addr = tcg_temp_new_i32();
2009 TCGv H = cpu_r[31];
2010 TCGv L = cpu_r[30];
2011
2012 tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
2013 tcg_gen_or_tl(addr, addr, L);
2014 tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
2015 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
2016 tcg_gen_andi_tl(L, addr, 0xff);
2017 tcg_gen_shri_tl(addr, addr, 8);
2018 tcg_gen_andi_tl(H, addr, 0xff);
2019 return true;
2020 }
2021
2022 /*
2023 * Loads one byte pointed to by the Z-register and the RAMPZ Register in
2024 * the I/O space, and places this byte in the destination register Rd. This
2025 * instruction features a 100% space effective constant initialization or
2026 * constant data fetch. The Program memory is organized in 16-bit words while
2027 * the Z-pointer is a byte address. Thus, the least significant bit of the
2028 * Z-pointer selects either low byte (ZLSB = 0) or high byte (ZLSB = 1). This
2029 * instruction can address the entire Program memory space. The Z-pointer
2030 * Register can either be left unchanged by the operation, or it can be
2031 * incremented. The incrementation applies to the entire 24-bit concatenation
2032 * of the RAMPZ and Z-pointer Registers.
2033 *
2034 * Devices with Self-Programming capability can use the ELPM instruction to
2035 * read the Fuse and Lock bit value.
2036 */
2037 static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a)
2038 {
2039 if (!avr_have_feature(ctx, AVR_FEATURE_ELPM)) {
2040 return true;
2041 }
2042
2043 TCGv Rd = cpu_r[0];
2044 TCGv addr = gen_get_zaddr();
2045
2046 tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
2047 return true;
2048 }
2049
2050 static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a)
2051 {
2052 if (!avr_have_feature(ctx, AVR_FEATURE_ELPM)) {
2053 return true;
2054 }
2055
2056 TCGv Rd = cpu_r[a->rd];
2057 TCGv addr = gen_get_zaddr();
2058
2059 tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
2060 return true;
2061 }
2062
2063 static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a)
2064 {
2065 if (!avr_have_feature(ctx, AVR_FEATURE_ELPMX)) {
2066 return true;
2067 }
2068
2069 TCGv Rd = cpu_r[a->rd];
2070 TCGv addr = gen_get_zaddr();
2071
2072 tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
2073 tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
2074 gen_set_zaddr(addr);
2075 return true;
2076 }
2077
2078 /*
2079 * SPM can be used to erase a page in the Program memory, to write a page
2080 * in the Program memory (that is already erased), and to set Boot Loader Lock
2081 * bits. In some devices, the Program memory can be written one word at a time,
2082 * in other devices an entire page can be programmed simultaneously after first
2083 * filling a temporary page buffer. In all cases, the Program memory must be
2084 * erased one page at a time. When erasing the Program memory, the RAMPZ and
2085 * Z-register are used as page address. When writing the Program memory, the
2086 * RAMPZ and Z-register are used as page or word address, and the R1:R0
2087 * register pair is used as data(1). When setting the Boot Loader Lock bits,
2088 * the R1:R0 register pair is used as data. Refer to the device documentation
2089 * for detailed description of SPM usage. This instruction can address the
2090 * entire Program memory.
2091 *
2092 * The SPM instruction is not available in all devices. Refer to the device
2093 * specific instruction set summary.
2094 *
2095 * Note: 1. R1 determines the instruction high byte, and R0 determines the
2096 * instruction low byte.
2097 */
2098 static bool trans_SPM(DisasContext *ctx, arg_SPM *a)
2099 {
2100 /* TODO */
2101 if (!avr_have_feature(ctx, AVR_FEATURE_SPM)) {
2102 return true;
2103 }
2104
2105 return true;
2106 }
2107
2108 static bool trans_SPMX(DisasContext *ctx, arg_SPMX *a)
2109 {
2110 /* TODO */
2111 if (!avr_have_feature(ctx, AVR_FEATURE_SPMX)) {
2112 return true;
2113 }
2114
2115 return true;
2116 }
2117
2118 /*
2119 * Loads data from the I/O Space (Ports, Timers, Configuration Registers,
2120 * etc.) into register Rd in the Register File.
2121 */
2122 static bool trans_IN(DisasContext *ctx, arg_IN *a)
2123 {
2124 TCGv Rd = cpu_r[a->rd];
2125 TCGv port = tcg_const_i32(a->imm);
2126
2127 gen_helper_inb(Rd, cpu_env, port);
2128 return true;
2129 }
2130
2131 /*
2132 * Stores data from register Rr in the Register File to I/O Space (Ports,
2133 * Timers, Configuration Registers, etc.).
2134 */
2135 static bool trans_OUT(DisasContext *ctx, arg_OUT *a)
2136 {
2137 TCGv Rd = cpu_r[a->rd];
2138 TCGv port = tcg_const_i32(a->imm);
2139
2140 gen_helper_outb(cpu_env, port, Rd);
2141 return true;
2142 }
2143
2144 /*
2145 * This instruction stores the contents of register Rr on the STACK. The
2146 * Stack Pointer is post-decremented by 1 after the PUSH. This instruction is
2147 * not available in all devices. Refer to the device specific instruction set
2148 * summary.
2149 */
2150 static bool trans_PUSH(DisasContext *ctx, arg_PUSH *a)
2151 {
2152 TCGv Rd = cpu_r[a->rd];
2153
2154 gen_data_store(ctx, Rd, cpu_sp);
2155 tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
2156
2157 return true;
2158 }
2159
2160 /*
2161 * This instruction loads register Rd with a byte from the STACK. The Stack
2162 * Pointer is pre-incremented by 1 before the POP. This instruction is not
2163 * available in all devices. Refer to the device specific instruction set
2164 * summary.
2165 */
2166 static bool trans_POP(DisasContext *ctx, arg_POP *a)
2167 {
2168 /*
2169 * Using a temp to work around some strange behaviour:
2170 * tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
2171 * gen_data_load(ctx, Rd, cpu_sp);
2172 * seems to cause the add to happen twice.
2173 * This doesn't happen if either the add or the load is removed.
2174 */
2175 TCGv t1 = tcg_temp_new_i32();
2176 TCGv Rd = cpu_r[a->rd];
2177
2178 tcg_gen_addi_tl(t1, cpu_sp, 1);
2179 gen_data_load(ctx, Rd, t1);
2180 tcg_gen_mov_tl(cpu_sp, t1);
2181
2182 return true;
2183 }
2184
2185 /*
2186 * Exchanges one byte indirect between register and data space. The data
2187 * location is pointed to by the Z (16 bits) Pointer Register in the Register
2188 * File. Memory access is limited to the current data segment of 64KB. To
2189 * access another data segment in devices with more than 64KB data space, the
2190 * RAMPZ in register in the I/O area has to be changed.
2191 *
2192 * The Z-pointer Register is left unchanged by the operation. This instruction
2193 * is especially suited for writing/reading status bits stored in SRAM.
2194 */
2195 static bool trans_XCH(DisasContext *ctx, arg_XCH *a)
2196 {
2197 if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
2198 return true;
2199 }
2200
2201 TCGv Rd = cpu_r[a->rd];
2202 TCGv t0 = tcg_temp_new_i32();
2203 TCGv addr = gen_get_zaddr();
2204
2205 gen_data_load(ctx, t0, addr);
2206 gen_data_store(ctx, Rd, addr);
2207 tcg_gen_mov_tl(Rd, t0);
2208 return true;
2209 }
2210
2211 /*
2212 * Load one byte indirect from data space to register and set bits in data
2213 * space specified by the register. The instruction can only be used towards
2214 * internal SRAM. The data location is pointed to by the Z (16 bits) Pointer
2215 * Register in the Register File. Memory access is limited to the current data
2216 * segment of 64KB. To access another data segment in devices with more than
2217 * 64KB data space, the RAMPZ in register in the I/O area has to be changed.
2218 *
2219 * The Z-pointer Register is left unchanged by the operation. This instruction
2220 * is especially suited for setting status bits stored in SRAM.
2221 */
2222 static bool trans_LAS(DisasContext *ctx, arg_LAS *a)
2223 {
2224 if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
2225 return true;
2226 }
2227
2228 TCGv Rr = cpu_r[a->rd];
2229 TCGv addr = gen_get_zaddr();
2230 TCGv t0 = tcg_temp_new_i32();
2231 TCGv t1 = tcg_temp_new_i32();
2232
2233 gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */
2234 tcg_gen_or_tl(t1, t0, Rr);
2235 tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */
2236 gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */
2237 return true;
2238 }
2239
2240 /*
2241 * Load one byte indirect from data space to register and stores and clear
2242 * the bits in data space specified by the register. The instruction can
2243 * only be used towards internal SRAM. The data location is pointed to by
2244 * the Z (16 bits) Pointer Register in the Register File. Memory access is
2245 * limited to the current data segment of 64KB. To access another data
2246 * segment in devices with more than 64KB data space, the RAMPZ in register
2247 * in the I/O area has to be changed.
2248 *
2249 * The Z-pointer Register is left unchanged by the operation. This instruction
2250 * is especially suited for clearing status bits stored in SRAM.
2251 */
2252 static bool trans_LAC(DisasContext *ctx, arg_LAC *a)
2253 {
2254 if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
2255 return true;
2256 }
2257
2258 TCGv Rr = cpu_r[a->rd];
2259 TCGv addr = gen_get_zaddr();
2260 TCGv t0 = tcg_temp_new_i32();
2261 TCGv t1 = tcg_temp_new_i32();
2262
2263 gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */
2264 tcg_gen_andc_tl(t1, t0, Rr); /* t1 = t0 & (0xff - Rr) = t0 & ~Rr */
2265 tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */
2266 gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */
2267 return true;
2268 }
2269
2270
2271 /*
2272 * Load one byte indirect from data space to register and toggles bits in
2273 * the data space specified by the register. The instruction can only be used
2274 * towards SRAM. The data location is pointed to by the Z (16 bits) Pointer
2275 * Register in the Register File. Memory access is limited to the current data
2276 * segment of 64KB. To access another data segment in devices with more than
2277 * 64KB data space, the RAMPZ in register in the I/O area has to be changed.
2278 *
2279 * The Z-pointer Register is left unchanged by the operation. This instruction
2280 * is especially suited for changing status bits stored in SRAM.
2281 */
2282 static bool trans_LAT(DisasContext *ctx, arg_LAT *a)
2283 {
2284 if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
2285 return true;
2286 }
2287
2288 TCGv Rd = cpu_r[a->rd];
2289 TCGv addr = gen_get_zaddr();
2290 TCGv t0 = tcg_temp_new_i32();
2291 TCGv t1 = tcg_temp_new_i32();
2292
2293 gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */
2294 tcg_gen_xor_tl(t1, t0, Rd);
2295 tcg_gen_mov_tl(Rd, t0); /* Rd = t0 */
2296 gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */
2297 return true;
2298 }
2299
2300 /*
2301 * Bit and Bit-test Instructions
2302 */
2303 static void gen_rshift_ZNVSf(TCGv R)
2304 {
2305 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
2306 tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */
2307 tcg_gen_xor_tl(cpu_Vf, cpu_Nf, cpu_Cf);
2308 tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
2309 }
2310
2311 /*
2312 * Shifts all bits in Rd one place to the right. Bit 7 is cleared. Bit 0 is
2313 * loaded into the C Flag of the SREG. This operation effectively divides an
2314 * unsigned value by two. The C Flag can be used to round the result.
2315 */
2316 static bool trans_LSR(DisasContext *ctx, arg_LSR *a)
2317 {
2318 TCGv Rd = cpu_r[a->rd];
2319
2320 tcg_gen_andi_tl(cpu_Cf, Rd, 1);
2321 tcg_gen_shri_tl(Rd, Rd, 1);
2322
2323 /* update status register */
2324 tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, Rd, 0); /* Zf = Rd == 0 */
2325 tcg_gen_movi_tl(cpu_Nf, 0);
2326 tcg_gen_mov_tl(cpu_Vf, cpu_Cf);
2327 tcg_gen_mov_tl(cpu_Sf, cpu_Vf);
2328
2329 return true;
2330 }
2331
2332 /*
2333 * Shifts all bits in Rd one place to the right. The C Flag is shifted into
2334 * bit 7 of Rd. Bit 0 is shifted into the C Flag. This operation, combined
2335 * with ASR, effectively divides multi-byte signed values by two. Combined with
2336 * LSR it effectively divides multi-byte unsigned values by two. The Carry Flag
2337 * can be used to round the result.
2338 */
2339 static bool trans_ROR(DisasContext *ctx, arg_ROR *a)
2340 {
2341 TCGv Rd = cpu_r[a->rd];
2342 TCGv t0 = tcg_temp_new_i32();
2343
2344 tcg_gen_shli_tl(t0, cpu_Cf, 7);
2345
2346 /* update status register */
2347 tcg_gen_andi_tl(cpu_Cf, Rd, 1);
2348
2349 /* update output register */
2350 tcg_gen_shri_tl(Rd, Rd, 1);
2351 tcg_gen_or_tl(Rd, Rd, t0);
2352
2353 /* update status register */
2354 gen_rshift_ZNVSf(Rd);
2355 return true;
2356 }
2357
2358 /*
2359 * Shifts all bits in Rd one place to the right. Bit 7 is held constant. Bit 0
2360 * is loaded into the C Flag of the SREG. This operation effectively divides a
2361 * signed value by two without changing its sign. The Carry Flag can be used to
2362 * round the result.
2363 */
2364 static bool trans_ASR(DisasContext *ctx, arg_ASR *a)
2365 {
2366 TCGv Rd = cpu_r[a->rd];
2367 TCGv t0 = tcg_temp_new_i32();
2368
2369 /* update status register */
2370 tcg_gen_andi_tl(cpu_Cf, Rd, 1); /* Cf = Rd(0) */
2371
2372 /* update output register */
2373 tcg_gen_andi_tl(t0, Rd, 0x80); /* Rd = (Rd & 0x80) | (Rd >> 1) */
2374 tcg_gen_shri_tl(Rd, Rd, 1);
2375 tcg_gen_or_tl(Rd, Rd, t0);
2376
2377 /* update status register */
2378 gen_rshift_ZNVSf(Rd);
2379 return true;
2380 }
2381
2382 /*
2383 * Swaps high and low nibbles in a register.
2384 */
2385 static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a)
2386 {
2387 TCGv Rd = cpu_r[a->rd];
2388 TCGv t0 = tcg_temp_new_i32();
2389 TCGv t1 = tcg_temp_new_i32();
2390
2391 tcg_gen_andi_tl(t0, Rd, 0x0f);
2392 tcg_gen_shli_tl(t0, t0, 4);
2393 tcg_gen_andi_tl(t1, Rd, 0xf0);
2394 tcg_gen_shri_tl(t1, t1, 4);
2395 tcg_gen_or_tl(Rd, t0, t1);
2396 return true;
2397 }
2398
2399 /*
2400 * Sets a specified bit in an I/O Register. This instruction operates on
2401 * the lower 32 I/O Registers -- addresses 0-31.
2402 */
2403 static bool trans_SBI(DisasContext *ctx, arg_SBI *a)
2404 {
2405 TCGv data = tcg_temp_new_i32();
2406 TCGv port = tcg_const_i32(a->reg);
2407
2408 gen_helper_inb(data, cpu_env, port);
2409 tcg_gen_ori_tl(data, data, 1 << a->bit);
2410 gen_helper_outb(cpu_env, port, data);
2411 return true;
2412 }
2413
2414 /*
2415 * Clears a specified bit in an I/O Register. This instruction operates on
2416 * the lower 32 I/O Registers -- addresses 0-31.
2417 */
2418 static bool trans_CBI(DisasContext *ctx, arg_CBI *a)
2419 {
2420 TCGv data = tcg_temp_new_i32();
2421 TCGv port = tcg_const_i32(a->reg);
2422
2423 gen_helper_inb(data, cpu_env, port);
2424 tcg_gen_andi_tl(data, data, ~(1 << a->bit));
2425 gen_helper_outb(cpu_env, port, data);
2426 return true;
2427 }
2428
2429 /*
2430 * Stores bit b from Rd to the T Flag in SREG (Status Register).
2431 */
2432 static bool trans_BST(DisasContext *ctx, arg_BST *a)
2433 {
2434 TCGv Rd = cpu_r[a->rd];
2435
2436 tcg_gen_andi_tl(cpu_Tf, Rd, 1 << a->bit);
2437 tcg_gen_shri_tl(cpu_Tf, cpu_Tf, a->bit);
2438
2439 return true;
2440 }
2441
2442 /*
2443 * Copies the T Flag in the SREG (Status Register) to bit b in register Rd.
2444 */
2445 static bool trans_BLD(DisasContext *ctx, arg_BLD *a)
2446 {
2447 TCGv Rd = cpu_r[a->rd];
2448 TCGv t1 = tcg_temp_new_i32();
2449
2450 tcg_gen_andi_tl(Rd, Rd, ~(1u << a->bit)); /* clear bit */
2451 tcg_gen_shli_tl(t1, cpu_Tf, a->bit); /* create mask */
2452 tcg_gen_or_tl(Rd, Rd, t1);
2453 return true;
2454 }
2455
2456 /*
2457 * Sets a single Flag or bit in SREG.
2458 */
2459 static bool trans_BSET(DisasContext *ctx, arg_BSET *a)
2460 {
2461 switch (a->bit) {
2462 case 0x00:
2463 tcg_gen_movi_tl(cpu_Cf, 0x01);
2464 break;
2465 case 0x01:
2466 tcg_gen_movi_tl(cpu_Zf, 0x01);
2467 break;
2468 case 0x02:
2469 tcg_gen_movi_tl(cpu_Nf, 0x01);
2470 break;
2471 case 0x03:
2472 tcg_gen_movi_tl(cpu_Vf, 0x01);
2473 break;
2474 case 0x04:
2475 tcg_gen_movi_tl(cpu_Sf, 0x01);
2476 break;
2477 case 0x05:
2478 tcg_gen_movi_tl(cpu_Hf, 0x01);
2479 break;
2480 case 0x06:
2481 tcg_gen_movi_tl(cpu_Tf, 0x01);
2482 break;
2483 case 0x07:
2484 tcg_gen_movi_tl(cpu_If, 0x01);
2485 break;
2486 }
2487
2488 return true;
2489 }
2490
2491 /*
2492 * Clears a single Flag in SREG.
2493 */
2494 static bool trans_BCLR(DisasContext *ctx, arg_BCLR *a)
2495 {
2496 switch (a->bit) {
2497 case 0x00:
2498 tcg_gen_movi_tl(cpu_Cf, 0x00);
2499 break;
2500 case 0x01:
2501 tcg_gen_movi_tl(cpu_Zf, 0x00);
2502 break;
2503 case 0x02:
2504 tcg_gen_movi_tl(cpu_Nf, 0x00);
2505 break;
2506 case 0x03:
2507 tcg_gen_movi_tl(cpu_Vf, 0x00);
2508 break;
2509 case 0x04:
2510 tcg_gen_movi_tl(cpu_Sf, 0x00);
2511 break;
2512 case 0x05:
2513 tcg_gen_movi_tl(cpu_Hf, 0x00);
2514 break;
2515 case 0x06:
2516 tcg_gen_movi_tl(cpu_Tf, 0x00);
2517 break;
2518 case 0x07:
2519 tcg_gen_movi_tl(cpu_If, 0x00);
2520 break;
2521 }
2522
2523 return true;
2524 }
2525
2526 /*
2527 * MCU Control Instructions
2528 */
2529
2530 /*
2531 * The BREAK instruction is used by the On-chip Debug system, and is
2532 * normally not used in the application software. When the BREAK instruction is
2533 * executed, the AVR CPU is set in the Stopped Mode. This gives the On-chip
2534 * Debugger access to internal resources. If any Lock bits are set, or either
2535 * the JTAGEN or OCDEN Fuses are unprogrammed, the CPU will treat the BREAK
2536 * instruction as a NOP and will not enter the Stopped mode. This instruction
2537 * is not available in all devices. Refer to the device specific instruction
2538 * set summary.
2539 */
2540 static bool trans_BREAK(DisasContext *ctx, arg_BREAK *a)
2541 {
2542 if (!avr_have_feature(ctx, AVR_FEATURE_BREAK)) {
2543 return true;
2544 }
2545
2546 #ifdef BREAKPOINT_ON_BREAK
2547 tcg_gen_movi_tl(cpu_pc, ctx->npc - 1);
2548 gen_helper_debug(cpu_env);
2549 ctx->base.is_jmp = DISAS_EXIT;
2550 #else
2551 /* NOP */
2552 #endif
2553
2554 return true;
2555 }
2556
2557 /*
2558 * This instruction performs a single cycle No Operation.
2559 */
2560 static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
2561 {
2562
2563 /* NOP */
2564
2565 return true;
2566 }
2567
2568 /*
2569 * This instruction sets the circuit in sleep mode defined by the MCU
2570 * Control Register.
2571 */
2572 static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a)
2573 {
2574 gen_helper_sleep(cpu_env);
2575 ctx->base.is_jmp = DISAS_NORETURN;
2576 return true;
2577 }
2578
2579 /*
2580 * This instruction resets the Watchdog Timer. This instruction must be
2581 * executed within a limited time given by the WD prescaler. See the Watchdog
2582 * Timer hardware specification.
2583 */
2584 static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
2585 {
2586 gen_helper_wdr(cpu_env);
2587
2588 return true;
2589 }
2590
2591 /*
2592 * Core translation mechanism functions:
2593 *
2594 * - translate()
2595 * - canonicalize_skip()
2596 * - gen_intermediate_code()
2597 * - restore_state_to_opc()
2598 *
2599 */
2600 static void translate(DisasContext *ctx)
2601 {
2602 uint32_t opcode = next_word(ctx);
2603
2604 if (!decode_insn(ctx, opcode)) {
2605 gen_helper_unsupported(cpu_env);
2606 ctx->base.is_jmp = DISAS_NORETURN;
2607 }
2608 }
2609
2610 /* Standardize the cpu_skip condition to NE. */
2611 static bool canonicalize_skip(DisasContext *ctx)
2612 {
2613 switch (ctx->skip_cond) {
2614 case TCG_COND_NEVER:
2615 /* Normal case: cpu_skip is known to be false. */
2616 return false;
2617
2618 case TCG_COND_ALWAYS:
2619 /*
2620 * Breakpoint case: cpu_skip is known to be true, via TB_FLAGS_SKIP.
2621 * The breakpoint is on the instruction being skipped, at the start
2622 * of the TranslationBlock. No need to update.
2623 */
2624 return false;
2625
2626 case TCG_COND_NE:
2627 if (ctx->skip_var1 == NULL) {
2628 tcg_gen_mov_tl(cpu_skip, ctx->skip_var0);
2629 } else {
2630 tcg_gen_xor_tl(cpu_skip, ctx->skip_var0, ctx->skip_var1);
2631 ctx->skip_var1 = NULL;
2632 }
2633 break;
2634
2635 default:
2636 /* Convert to a NE condition vs 0. */
2637 if (ctx->skip_var1 == NULL) {
2638 tcg_gen_setcondi_tl(ctx->skip_cond, cpu_skip, ctx->skip_var0, 0);
2639 } else {
2640 tcg_gen_setcond_tl(ctx->skip_cond, cpu_skip,
2641 ctx->skip_var0, ctx->skip_var1);
2642 ctx->skip_var1 = NULL;
2643 }
2644 ctx->skip_cond = TCG_COND_NE;
2645 break;
2646 }
2647 ctx->skip_var0 = cpu_skip;
2648 return true;
2649 }
2650
2651 static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2652 {
2653 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2654 CPUAVRState *env = cs->env_ptr;
2655 uint32_t tb_flags = ctx->base.tb->flags;
2656
2657 ctx->cs = cs;
2658 ctx->env = env;
2659 ctx->npc = ctx->base.pc_first / 2;
2660
2661 ctx->skip_cond = TCG_COND_NEVER;
2662 if (tb_flags & TB_FLAGS_SKIP) {
2663 ctx->skip_cond = TCG_COND_ALWAYS;
2664 ctx->skip_var0 = cpu_skip;
2665 }
2666
2667 if (tb_flags & TB_FLAGS_FULL_ACCESS) {
2668 /*
2669 * This flag is set by ST/LD instruction we will regenerate it ONLY
2670 * with mem/cpu memory access instead of mem access
2671 */
2672 ctx->base.max_insns = 1;
2673 }
2674 }
2675
2676 static void avr_tr_tb_start(DisasContextBase *db, CPUState *cs)
2677 {
2678 }
2679
2680 static void avr_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2681 {
2682 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2683
2684 tcg_gen_insn_start(ctx->npc);
2685 }
2686
2687 static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2688 {
2689 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2690 TCGLabel *skip_label = NULL;
2691
2692 /* Conditionally skip the next instruction, if indicated. */
2693 if (ctx->skip_cond != TCG_COND_NEVER) {
2694 skip_label = gen_new_label();
2695 if (ctx->skip_var0 == cpu_skip) {
2696 /*
2697 * Copy cpu_skip so that we may zero it before the branch.
2698 * This ensures that cpu_skip is non-zero after the label
2699 * if and only if the skipped insn itself sets a skip.
2700 */
2701 ctx->skip_var0 = tcg_temp_new();
2702 tcg_gen_mov_tl(ctx->skip_var0, cpu_skip);
2703 tcg_gen_movi_tl(cpu_skip, 0);
2704 }
2705 if (ctx->skip_var1 == NULL) {
2706 tcg_gen_brcondi_tl(ctx->skip_cond, ctx->skip_var0, 0, skip_label);
2707 } else {
2708 tcg_gen_brcond_tl(ctx->skip_cond, ctx->skip_var0,
2709 ctx->skip_var1, skip_label);
2710 ctx->skip_var1 = NULL;
2711 }
2712 ctx->skip_cond = TCG_COND_NEVER;
2713 ctx->skip_var0 = NULL;
2714 }
2715
2716 translate(ctx);
2717
2718 ctx->base.pc_next = ctx->npc * 2;
2719
2720 if (skip_label) {
2721 canonicalize_skip(ctx);
2722 gen_set_label(skip_label);
2723
2724 switch (ctx->base.is_jmp) {
2725 case DISAS_NORETURN:
2726 ctx->base.is_jmp = DISAS_CHAIN;
2727 break;
2728 case DISAS_NEXT:
2729 if (ctx->base.tb->flags & TB_FLAGS_SKIP) {
2730 ctx->base.is_jmp = DISAS_TOO_MANY;
2731 }
2732 break;
2733 default:
2734 break;
2735 }
2736 }
2737
2738 if (ctx->base.is_jmp == DISAS_NEXT) {
2739 target_ulong page_first = ctx->base.pc_first & TARGET_PAGE_MASK;
2740
2741 if ((ctx->base.pc_next - page_first) >= TARGET_PAGE_SIZE - 4) {
2742 ctx->base.is_jmp = DISAS_TOO_MANY;
2743 }
2744 }
2745 }
2746
2747 static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2748 {
2749 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2750 bool nonconst_skip = canonicalize_skip(ctx);
2751 /*
2752 * Because we disable interrupts while env->skip is set,
2753 * we must return to the main loop to re-evaluate afterward.
2754 */
2755 bool force_exit = ctx->base.tb->flags & TB_FLAGS_SKIP;
2756
2757 switch (ctx->base.is_jmp) {
2758 case DISAS_NORETURN:
2759 assert(!nonconst_skip);
2760 break;
2761 case DISAS_NEXT:
2762 case DISAS_TOO_MANY:
2763 case DISAS_CHAIN:
2764 if (!nonconst_skip && !force_exit) {
2765 /* Note gen_goto_tb checks singlestep. */
2766 gen_goto_tb(ctx, 1, ctx->npc);
2767 break;
2768 }
2769 tcg_gen_movi_tl(cpu_pc, ctx->npc);
2770 /* fall through */
2771 case DISAS_LOOKUP:
2772 if (!force_exit) {
2773 tcg_gen_lookup_and_goto_ptr();
2774 break;
2775 }
2776 /* fall through */
2777 case DISAS_EXIT:
2778 tcg_gen_exit_tb(NULL, 0);
2779 break;
2780 default:
2781 g_assert_not_reached();
2782 }
2783 }
2784
2785 static void avr_tr_disas_log(const DisasContextBase *dcbase,
2786 CPUState *cs, FILE *logfile)
2787 {
2788 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2789 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
2790 }
2791
2792 static const TranslatorOps avr_tr_ops = {
2793 .init_disas_context = avr_tr_init_disas_context,
2794 .tb_start = avr_tr_tb_start,
2795 .insn_start = avr_tr_insn_start,
2796 .translate_insn = avr_tr_translate_insn,
2797 .tb_stop = avr_tr_tb_stop,
2798 .disas_log = avr_tr_disas_log,
2799 };
2800
2801 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
2802 target_ulong pc, void *host_pc)
2803 {
2804 DisasContext dc = { };
2805 translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
2806 }