]> git.proxmox.com Git - mirror_qemu.git/blob - hw/intc/armv7m_nvic.c
c0dbbad2aa79fc41ff0b9e22a17e7ed379521386
[mirror_qemu.git] / hw / intc / armv7m_nvic.c
1 /*
2 * ARM Nested Vectored Interrupt Controller
3 *
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
6 *
7 * This code is licensed under the GPL.
8 *
9 * The ARMv7M System controller is fairly tightly tied in with the
10 * NVIC. Much of that is also implemented here.
11 */
12
13 #include "qemu/osdep.h"
14 #include "qapi/error.h"
15 #include "qemu-common.h"
16 #include "cpu.h"
17 #include "hw/sysbus.h"
18 #include "qemu/timer.h"
19 #include "hw/arm/arm.h"
20 #include "hw/intc/armv7m_nvic.h"
21 #include "target/arm/cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/log.h"
24 #include "trace.h"
25
26 /* IRQ number counting:
27 *
28 * the num-irq property counts the number of external IRQ lines
29 *
30 * NVICState::num_irq counts the total number of exceptions
31 * (external IRQs, the 15 internal exceptions including reset,
32 * and one for the unused exception number 0).
33 *
34 * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
35 *
36 * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
37 *
38 * Iterating through all exceptions should typically be done with
39 * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
40 *
41 * The external qemu_irq lines are the NVIC's external IRQ lines,
42 * so line 0 is exception 16.
43 *
44 * In the terminology of the architecture manual, "interrupts" are
45 * a subcategory of exception referring to the external interrupts
46 * (which are exception numbers NVIC_FIRST_IRQ and upward).
47 * For historical reasons QEMU tends to use "interrupt" and
48 * "exception" more or less interchangeably.
49 */
50 #define NVIC_FIRST_IRQ 16
51 #define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
52
53 /* Effective running priority of the CPU when no exception is active
54 * (higher than the highest possible priority value)
55 */
56 #define NVIC_NOEXC_PRIO 0x100
57
58 static const uint8_t nvic_id[] = {
59 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
60 };
61
62 static int nvic_pending_prio(NVICState *s)
63 {
64 /* return the priority of the current pending interrupt,
65 * or NVIC_NOEXC_PRIO if no interrupt is pending
66 */
67 return s->vectpending ? s->vectors[s->vectpending].prio : NVIC_NOEXC_PRIO;
68 }
69
70 /* Return the value of the ISCR RETTOBASE bit:
71 * 1 if there is exactly one active exception
72 * 0 if there is more than one active exception
73 * UNKNOWN if there are no active exceptions (we choose 1,
74 * which matches the choice Cortex-M3 is documented as making).
75 *
76 * NB: some versions of the documentation talk about this
77 * counting "active exceptions other than the one shown by IPSR";
78 * this is only different in the obscure corner case where guest
79 * code has manually deactivated an exception and is about
80 * to fail an exception-return integrity check. The definition
81 * above is the one from the v8M ARM ARM and is also in line
82 * with the behaviour documented for the Cortex-M3.
83 */
84 static bool nvic_rettobase(NVICState *s)
85 {
86 int irq, nhand = 0;
87
88 for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
89 if (s->vectors[irq].active) {
90 nhand++;
91 if (nhand == 2) {
92 return 0;
93 }
94 }
95 }
96
97 return 1;
98 }
99
100 /* Return the value of the ISCR ISRPENDING bit:
101 * 1 if an external interrupt is pending
102 * 0 if no external interrupt is pending
103 */
104 static bool nvic_isrpending(NVICState *s)
105 {
106 int irq;
107
108 /* We can shortcut if the highest priority pending interrupt
109 * happens to be external or if there is nothing pending.
110 */
111 if (s->vectpending > NVIC_FIRST_IRQ) {
112 return true;
113 }
114 if (s->vectpending == 0) {
115 return false;
116 }
117
118 for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
119 if (s->vectors[irq].pending) {
120 return true;
121 }
122 }
123 return false;
124 }
125
126 /* Return a mask word which clears the subpriority bits from
127 * a priority value for an M-profile exception, leaving only
128 * the group priority.
129 */
130 static inline uint32_t nvic_gprio_mask(NVICState *s)
131 {
132 return ~0U << (s->prigroup + 1);
133 }
134
135 /* Recompute vectpending and exception_prio */
136 static void nvic_recompute_state(NVICState *s)
137 {
138 int i;
139 int pend_prio = NVIC_NOEXC_PRIO;
140 int active_prio = NVIC_NOEXC_PRIO;
141 int pend_irq = 0;
142
143 for (i = 1; i < s->num_irq; i++) {
144 VecInfo *vec = &s->vectors[i];
145
146 if (vec->enabled && vec->pending && vec->prio < pend_prio) {
147 pend_prio = vec->prio;
148 pend_irq = i;
149 }
150 if (vec->active && vec->prio < active_prio) {
151 active_prio = vec->prio;
152 }
153 }
154
155 s->vectpending = pend_irq;
156 s->exception_prio = active_prio & nvic_gprio_mask(s);
157
158 trace_nvic_recompute_state(s->vectpending, s->exception_prio);
159 }
160
161 /* Return the current execution priority of the CPU
162 * (equivalent to the pseudocode ExecutionPriority function).
163 * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
164 */
165 static inline int nvic_exec_prio(NVICState *s)
166 {
167 CPUARMState *env = &s->cpu->env;
168 int running;
169
170 if (env->v7m.faultmask) {
171 running = -1;
172 } else if (env->v7m.primask) {
173 running = 0;
174 } else if (env->v7m.basepri > 0) {
175 running = env->v7m.basepri & nvic_gprio_mask(s);
176 } else {
177 running = NVIC_NOEXC_PRIO; /* lower than any possible priority */
178 }
179 /* consider priority of active handler */
180 return MIN(running, s->exception_prio);
181 }
182
183 bool armv7m_nvic_can_take_pending_exception(void *opaque)
184 {
185 NVICState *s = opaque;
186
187 return nvic_exec_prio(s) > nvic_pending_prio(s);
188 }
189
190 /* caller must call nvic_irq_update() after this */
191 static void set_prio(NVICState *s, unsigned irq, uint8_t prio)
192 {
193 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
194 assert(irq < s->num_irq);
195
196 s->vectors[irq].prio = prio;
197
198 trace_nvic_set_prio(irq, prio);
199 }
200
201 /* Recompute state and assert irq line accordingly.
202 * Must be called after changes to:
203 * vec->active, vec->enabled, vec->pending or vec->prio for any vector
204 * prigroup
205 */
206 static void nvic_irq_update(NVICState *s)
207 {
208 int lvl;
209 int pend_prio;
210
211 nvic_recompute_state(s);
212 pend_prio = nvic_pending_prio(s);
213
214 /* Raise NVIC output if this IRQ would be taken, except that we
215 * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
216 * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
217 * to those CPU registers don't cause us to recalculate the NVIC
218 * pending info.
219 */
220 lvl = (pend_prio < s->exception_prio);
221 trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
222 qemu_set_irq(s->excpout, lvl);
223 }
224
225 static void armv7m_nvic_clear_pending(void *opaque, int irq)
226 {
227 NVICState *s = (NVICState *)opaque;
228 VecInfo *vec;
229
230 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
231
232 vec = &s->vectors[irq];
233 trace_nvic_clear_pending(irq, vec->enabled, vec->prio);
234 if (vec->pending) {
235 vec->pending = 0;
236 nvic_irq_update(s);
237 }
238 }
239
240 void armv7m_nvic_set_pending(void *opaque, int irq)
241 {
242 NVICState *s = (NVICState *)opaque;
243 VecInfo *vec;
244
245 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
246
247 vec = &s->vectors[irq];
248 trace_nvic_set_pending(irq, vec->enabled, vec->prio);
249
250
251 if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
252 /* If a synchronous exception is pending then it may be
253 * escalated to HardFault if:
254 * * it is equal or lower priority to current execution
255 * * it is disabled
256 * (ie we need to take it immediately but we can't do so).
257 * Asynchronous exceptions (and interrupts) simply remain pending.
258 *
259 * For QEMU, we don't have any imprecise (asynchronous) faults,
260 * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
261 * synchronous.
262 * Debug exceptions are awkward because only Debug exceptions
263 * resulting from the BKPT instruction should be escalated,
264 * but we don't currently implement any Debug exceptions other
265 * than those that result from BKPT, so we treat all debug exceptions
266 * as needing escalation.
267 *
268 * This all means we can identify whether to escalate based only on
269 * the exception number and don't (yet) need the caller to explicitly
270 * tell us whether this exception is synchronous or not.
271 */
272 int running = nvic_exec_prio(s);
273 bool escalate = false;
274
275 if (vec->prio >= running) {
276 trace_nvic_escalate_prio(irq, vec->prio, running);
277 escalate = true;
278 } else if (!vec->enabled) {
279 trace_nvic_escalate_disabled(irq);
280 escalate = true;
281 }
282
283 if (escalate) {
284 if (running < 0) {
285 /* We want to escalate to HardFault but we can't take a
286 * synchronous HardFault at this point either. This is a
287 * Lockup condition due to a guest bug. We don't model
288 * Lockup, so report via cpu_abort() instead.
289 */
290 cpu_abort(&s->cpu->parent_obj,
291 "Lockup: can't escalate %d to HardFault "
292 "(current priority %d)\n", irq, running);
293 }
294
295 /* We can do the escalation, so we take HardFault instead */
296 irq = ARMV7M_EXCP_HARD;
297 vec = &s->vectors[irq];
298 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
299 }
300 }
301
302 if (!vec->pending) {
303 vec->pending = 1;
304 nvic_irq_update(s);
305 }
306 }
307
308 /* Make pending IRQ active. */
309 void armv7m_nvic_acknowledge_irq(void *opaque)
310 {
311 NVICState *s = (NVICState *)opaque;
312 CPUARMState *env = &s->cpu->env;
313 const int pending = s->vectpending;
314 const int running = nvic_exec_prio(s);
315 int pendgroupprio;
316 VecInfo *vec;
317
318 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
319
320 vec = &s->vectors[pending];
321
322 assert(vec->enabled);
323 assert(vec->pending);
324
325 pendgroupprio = vec->prio & nvic_gprio_mask(s);
326 assert(pendgroupprio < running);
327
328 trace_nvic_acknowledge_irq(pending, vec->prio);
329
330 vec->active = 1;
331 vec->pending = 0;
332
333 env->v7m.exception = s->vectpending;
334
335 nvic_irq_update(s);
336 }
337
338 int armv7m_nvic_complete_irq(void *opaque, int irq)
339 {
340 NVICState *s = (NVICState *)opaque;
341 VecInfo *vec;
342 int ret;
343
344 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
345
346 vec = &s->vectors[irq];
347
348 trace_nvic_complete_irq(irq);
349
350 if (!vec->active) {
351 /* Tell the caller this was an illegal exception return */
352 return -1;
353 }
354
355 ret = nvic_rettobase(s);
356
357 vec->active = 0;
358 if (vec->level) {
359 /* Re-pend the exception if it's still held high; only
360 * happens for extenal IRQs
361 */
362 assert(irq >= NVIC_FIRST_IRQ);
363 vec->pending = 1;
364 }
365
366 nvic_irq_update(s);
367
368 return ret;
369 }
370
371 /* callback when external interrupt line is changed */
372 static void set_irq_level(void *opaque, int n, int level)
373 {
374 NVICState *s = opaque;
375 VecInfo *vec;
376
377 n += NVIC_FIRST_IRQ;
378
379 assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
380
381 trace_nvic_set_irq_level(n, level);
382
383 /* The pending status of an external interrupt is
384 * latched on rising edge and exception handler return.
385 *
386 * Pulsing the IRQ will always run the handler
387 * once, and the handler will re-run until the
388 * level is low when the handler completes.
389 */
390 vec = &s->vectors[n];
391 if (level != vec->level) {
392 vec->level = level;
393 if (level) {
394 armv7m_nvic_set_pending(s, n);
395 }
396 }
397 }
398
399 static uint32_t nvic_readl(NVICState *s, uint32_t offset)
400 {
401 ARMCPU *cpu = s->cpu;
402 uint32_t val;
403
404 switch (offset) {
405 case 4: /* Interrupt Control Type. */
406 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
407 case 0xd00: /* CPUID Base. */
408 return cpu->midr;
409 case 0xd04: /* Interrupt Control State. */
410 /* VECTACTIVE */
411 val = cpu->env.v7m.exception;
412 /* VECTPENDING */
413 val |= (s->vectpending & 0xff) << 12;
414 /* ISRPENDING - set if any external IRQ is pending */
415 if (nvic_isrpending(s)) {
416 val |= (1 << 22);
417 }
418 /* RETTOBASE - set if only one handler is active */
419 if (nvic_rettobase(s)) {
420 val |= (1 << 11);
421 }
422 /* PENDSTSET */
423 if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
424 val |= (1 << 26);
425 }
426 /* PENDSVSET */
427 if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
428 val |= (1 << 28);
429 }
430 /* NMIPENDSET */
431 if (s->vectors[ARMV7M_EXCP_NMI].pending) {
432 val |= (1 << 31);
433 }
434 /* ISRPREEMPT not implemented */
435 return val;
436 case 0xd08: /* Vector Table Offset. */
437 return cpu->env.v7m.vecbase;
438 case 0xd0c: /* Application Interrupt/Reset Control. */
439 return 0xfa050000 | (s->prigroup << 8);
440 case 0xd10: /* System Control. */
441 /* TODO: Implement SLEEPONEXIT. */
442 return 0;
443 case 0xd14: /* Configuration Control. */
444 return cpu->env.v7m.ccr;
445 case 0xd24: /* System Handler Status. */
446 val = 0;
447 if (s->vectors[ARMV7M_EXCP_MEM].active) {
448 val |= (1 << 0);
449 }
450 if (s->vectors[ARMV7M_EXCP_BUS].active) {
451 val |= (1 << 1);
452 }
453 if (s->vectors[ARMV7M_EXCP_USAGE].active) {
454 val |= (1 << 3);
455 }
456 if (s->vectors[ARMV7M_EXCP_SVC].active) {
457 val |= (1 << 7);
458 }
459 if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
460 val |= (1 << 8);
461 }
462 if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
463 val |= (1 << 10);
464 }
465 if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
466 val |= (1 << 11);
467 }
468 if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
469 val |= (1 << 12);
470 }
471 if (s->vectors[ARMV7M_EXCP_MEM].pending) {
472 val |= (1 << 13);
473 }
474 if (s->vectors[ARMV7M_EXCP_BUS].pending) {
475 val |= (1 << 14);
476 }
477 if (s->vectors[ARMV7M_EXCP_SVC].pending) {
478 val |= (1 << 15);
479 }
480 if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
481 val |= (1 << 16);
482 }
483 if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
484 val |= (1 << 17);
485 }
486 if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
487 val |= (1 << 18);
488 }
489 return val;
490 case 0xd28: /* Configurable Fault Status. */
491 return cpu->env.v7m.cfsr;
492 case 0xd2c: /* Hard Fault Status. */
493 return cpu->env.v7m.hfsr;
494 case 0xd30: /* Debug Fault Status. */
495 return cpu->env.v7m.dfsr;
496 case 0xd34: /* MMFAR MemManage Fault Address */
497 return cpu->env.v7m.mmfar;
498 case 0xd38: /* Bus Fault Address. */
499 return cpu->env.v7m.bfar;
500 case 0xd3c: /* Aux Fault Status. */
501 /* TODO: Implement fault status registers. */
502 qemu_log_mask(LOG_UNIMP,
503 "Aux Fault status registers unimplemented\n");
504 return 0;
505 case 0xd40: /* PFR0. */
506 return 0x00000030;
507 case 0xd44: /* PRF1. */
508 return 0x00000200;
509 case 0xd48: /* DFR0. */
510 return 0x00100000;
511 case 0xd4c: /* AFR0. */
512 return 0x00000000;
513 case 0xd50: /* MMFR0. */
514 return 0x00000030;
515 case 0xd54: /* MMFR1. */
516 return 0x00000000;
517 case 0xd58: /* MMFR2. */
518 return 0x00000000;
519 case 0xd5c: /* MMFR3. */
520 return 0x00000000;
521 case 0xd60: /* ISAR0. */
522 return 0x01141110;
523 case 0xd64: /* ISAR1. */
524 return 0x02111000;
525 case 0xd68: /* ISAR2. */
526 return 0x21112231;
527 case 0xd6c: /* ISAR3. */
528 return 0x01111110;
529 case 0xd70: /* ISAR4. */
530 return 0x01310102;
531 /* TODO: Implement debug registers. */
532 case 0xd90: /* MPU_TYPE */
533 /* Unified MPU; if the MPU is not present this value is zero */
534 return cpu->pmsav7_dregion << 8;
535 break;
536 case 0xd94: /* MPU_CTRL */
537 return cpu->env.v7m.mpu_ctrl;
538 case 0xd98: /* MPU_RNR */
539 return cpu->env.pmsav7.rnr;
540 case 0xd9c: /* MPU_RBAR */
541 case 0xda4: /* MPU_RBAR_A1 */
542 case 0xdac: /* MPU_RBAR_A2 */
543 case 0xdb4: /* MPU_RBAR_A3 */
544 {
545 int region = cpu->env.pmsav7.rnr;
546
547 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
548 /* PMSAv8M handling of the aliases is different from v7M:
549 * aliases A1, A2, A3 override the low two bits of the region
550 * number in MPU_RNR, and there is no 'region' field in the
551 * RBAR register.
552 */
553 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
554 if (aliasno) {
555 region = deposit32(region, 0, 2, aliasno);
556 }
557 if (region >= cpu->pmsav7_dregion) {
558 return 0;
559 }
560 return cpu->env.pmsav8.rbar[region];
561 }
562
563 if (region >= cpu->pmsav7_dregion) {
564 return 0;
565 }
566 return (cpu->env.pmsav7.drbar[region] & 0x1f) | (region & 0xf);
567 }
568 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
569 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
570 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
571 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
572 {
573 int region = cpu->env.pmsav7.rnr;
574
575 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
576 /* PMSAv8M handling of the aliases is different from v7M:
577 * aliases A1, A2, A3 override the low two bits of the region
578 * number in MPU_RNR.
579 */
580 int aliasno = (offset - 0xda0) / 8; /* 0..3 */
581 if (aliasno) {
582 region = deposit32(region, 0, 2, aliasno);
583 }
584 if (region >= cpu->pmsav7_dregion) {
585 return 0;
586 }
587 return cpu->env.pmsav8.rlar[region];
588 }
589
590 if (region >= cpu->pmsav7_dregion) {
591 return 0;
592 }
593 return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
594 (cpu->env.pmsav7.drsr[region] & 0xffff);
595 }
596 case 0xdc0: /* MPU_MAIR0 */
597 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
598 goto bad_offset;
599 }
600 return cpu->env.pmsav8.mair0;
601 case 0xdc4: /* MPU_MAIR1 */
602 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
603 goto bad_offset;
604 }
605 return cpu->env.pmsav8.mair1;
606 default:
607 bad_offset:
608 qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
609 return 0;
610 }
611 }
612
613 static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value)
614 {
615 ARMCPU *cpu = s->cpu;
616
617 switch (offset) {
618 case 0xd04: /* Interrupt Control State. */
619 if (value & (1 << 31)) {
620 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI);
621 }
622 if (value & (1 << 28)) {
623 armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV);
624 } else if (value & (1 << 27)) {
625 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV);
626 }
627 if (value & (1 << 26)) {
628 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK);
629 } else if (value & (1 << 25)) {
630 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK);
631 }
632 break;
633 case 0xd08: /* Vector Table Offset. */
634 cpu->env.v7m.vecbase = value & 0xffffff80;
635 break;
636 case 0xd0c: /* Application Interrupt/Reset Control. */
637 if ((value >> 16) == 0x05fa) {
638 if (value & 4) {
639 qemu_irq_pulse(s->sysresetreq);
640 }
641 if (value & 2) {
642 qemu_log_mask(LOG_GUEST_ERROR,
643 "Setting VECTCLRACTIVE when not in DEBUG mode "
644 "is UNPREDICTABLE\n");
645 }
646 if (value & 1) {
647 qemu_log_mask(LOG_GUEST_ERROR,
648 "Setting VECTRESET when not in DEBUG mode "
649 "is UNPREDICTABLE\n");
650 }
651 s->prigroup = extract32(value, 8, 3);
652 nvic_irq_update(s);
653 }
654 break;
655 case 0xd10: /* System Control. */
656 /* TODO: Implement control registers. */
657 qemu_log_mask(LOG_UNIMP, "NVIC: SCR unimplemented\n");
658 break;
659 case 0xd14: /* Configuration Control. */
660 /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
661 value &= (R_V7M_CCR_STKALIGN_MASK |
662 R_V7M_CCR_BFHFNMIGN_MASK |
663 R_V7M_CCR_DIV_0_TRP_MASK |
664 R_V7M_CCR_UNALIGN_TRP_MASK |
665 R_V7M_CCR_USERSETMPEND_MASK |
666 R_V7M_CCR_NONBASETHRDENA_MASK);
667
668 cpu->env.v7m.ccr = value;
669 break;
670 case 0xd24: /* System Handler Control. */
671 s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
672 s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
673 s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
674 s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
675 s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
676 s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
677 s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
678 s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
679 s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
680 s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
681 s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
682 s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
683 s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
684 s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
685 nvic_irq_update(s);
686 break;
687 case 0xd28: /* Configurable Fault Status. */
688 cpu->env.v7m.cfsr &= ~value; /* W1C */
689 break;
690 case 0xd2c: /* Hard Fault Status. */
691 cpu->env.v7m.hfsr &= ~value; /* W1C */
692 break;
693 case 0xd30: /* Debug Fault Status. */
694 cpu->env.v7m.dfsr &= ~value; /* W1C */
695 break;
696 case 0xd34: /* Mem Manage Address. */
697 cpu->env.v7m.mmfar = value;
698 return;
699 case 0xd38: /* Bus Fault Address. */
700 cpu->env.v7m.bfar = value;
701 return;
702 case 0xd3c: /* Aux Fault Status. */
703 qemu_log_mask(LOG_UNIMP,
704 "NVIC: Aux fault status registers unimplemented\n");
705 break;
706 case 0xd90: /* MPU_TYPE */
707 return; /* RO */
708 case 0xd94: /* MPU_CTRL */
709 if ((value &
710 (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
711 == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
712 qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
713 "UNPREDICTABLE\n");
714 }
715 cpu->env.v7m.mpu_ctrl = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
716 R_V7M_MPU_CTRL_HFNMIENA_MASK |
717 R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
718 tlb_flush(CPU(cpu));
719 break;
720 case 0xd98: /* MPU_RNR */
721 if (value >= cpu->pmsav7_dregion) {
722 qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
723 PRIu32 "/%" PRIu32 "\n",
724 value, cpu->pmsav7_dregion);
725 } else {
726 cpu->env.pmsav7.rnr = value;
727 }
728 break;
729 case 0xd9c: /* MPU_RBAR */
730 case 0xda4: /* MPU_RBAR_A1 */
731 case 0xdac: /* MPU_RBAR_A2 */
732 case 0xdb4: /* MPU_RBAR_A3 */
733 {
734 int region;
735
736 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
737 /* PMSAv8M handling of the aliases is different from v7M:
738 * aliases A1, A2, A3 override the low two bits of the region
739 * number in MPU_RNR, and there is no 'region' field in the
740 * RBAR register.
741 */
742 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
743
744 region = cpu->env.pmsav7.rnr;
745 if (aliasno) {
746 region = deposit32(region, 0, 2, aliasno);
747 }
748 if (region >= cpu->pmsav7_dregion) {
749 return;
750 }
751 cpu->env.pmsav8.rbar[region] = value;
752 tlb_flush(CPU(cpu));
753 return;
754 }
755
756 if (value & (1 << 4)) {
757 /* VALID bit means use the region number specified in this
758 * value and also update MPU_RNR.REGION with that value.
759 */
760 region = extract32(value, 0, 4);
761 if (region >= cpu->pmsav7_dregion) {
762 qemu_log_mask(LOG_GUEST_ERROR,
763 "MPU region out of range %u/%" PRIu32 "\n",
764 region, cpu->pmsav7_dregion);
765 return;
766 }
767 cpu->env.pmsav7.rnr = region;
768 } else {
769 region = cpu->env.pmsav7.rnr;
770 }
771
772 if (region >= cpu->pmsav7_dregion) {
773 return;
774 }
775
776 cpu->env.pmsav7.drbar[region] = value & ~0x1f;
777 tlb_flush(CPU(cpu));
778 break;
779 }
780 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
781 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
782 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
783 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
784 {
785 int region = cpu->env.pmsav7.rnr;
786
787 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
788 /* PMSAv8M handling of the aliases is different from v7M:
789 * aliases A1, A2, A3 override the low two bits of the region
790 * number in MPU_RNR.
791 */
792 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
793
794 region = cpu->env.pmsav7.rnr;
795 if (aliasno) {
796 region = deposit32(region, 0, 2, aliasno);
797 }
798 if (region >= cpu->pmsav7_dregion) {
799 return;
800 }
801 cpu->env.pmsav8.rlar[region] = value;
802 tlb_flush(CPU(cpu));
803 return;
804 }
805
806 if (region >= cpu->pmsav7_dregion) {
807 return;
808 }
809
810 cpu->env.pmsav7.drsr[region] = value & 0xff3f;
811 cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
812 tlb_flush(CPU(cpu));
813 break;
814 }
815 case 0xdc0: /* MPU_MAIR0 */
816 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
817 goto bad_offset;
818 }
819 if (cpu->pmsav7_dregion) {
820 /* Register is RES0 if no MPU regions are implemented */
821 cpu->env.pmsav8.mair0 = value;
822 }
823 /* We don't need to do anything else because memory attributes
824 * only affect cacheability, and we don't implement caching.
825 */
826 break;
827 case 0xdc4: /* MPU_MAIR1 */
828 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
829 goto bad_offset;
830 }
831 if (cpu->pmsav7_dregion) {
832 /* Register is RES0 if no MPU regions are implemented */
833 cpu->env.pmsav8.mair1 = value;
834 }
835 /* We don't need to do anything else because memory attributes
836 * only affect cacheability, and we don't implement caching.
837 */
838 break;
839 case 0xf00: /* Software Triggered Interrupt Register */
840 {
841 int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
842 if (excnum < s->num_irq) {
843 armv7m_nvic_set_pending(s, excnum);
844 }
845 break;
846 }
847 default:
848 bad_offset:
849 qemu_log_mask(LOG_GUEST_ERROR,
850 "NVIC: Bad write offset 0x%x\n", offset);
851 }
852 }
853
854 static bool nvic_user_access_ok(NVICState *s, hwaddr offset)
855 {
856 /* Return true if unprivileged access to this register is permitted. */
857 switch (offset) {
858 case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
859 return s->cpu->env.v7m.ccr & R_V7M_CCR_USERSETMPEND_MASK;
860 default:
861 /* All other user accesses cause a BusFault unconditionally */
862 return false;
863 }
864 }
865
866 static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
867 uint64_t *data, unsigned size,
868 MemTxAttrs attrs)
869 {
870 NVICState *s = (NVICState *)opaque;
871 uint32_t offset = addr;
872 unsigned i, startvec, end;
873 uint32_t val;
874
875 if (attrs.user && !nvic_user_access_ok(s, addr)) {
876 /* Generate BusFault for unprivileged accesses */
877 return MEMTX_ERROR;
878 }
879
880 switch (offset) {
881 /* reads of set and clear both return the status */
882 case 0x100 ... 0x13f: /* NVIC Set enable */
883 offset += 0x80;
884 /* fall through */
885 case 0x180 ... 0x1bf: /* NVIC Clear enable */
886 val = 0;
887 startvec = offset - 0x180 + NVIC_FIRST_IRQ; /* vector # */
888
889 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
890 if (s->vectors[startvec + i].enabled) {
891 val |= (1 << i);
892 }
893 }
894 break;
895 case 0x200 ... 0x23f: /* NVIC Set pend */
896 offset += 0x80;
897 /* fall through */
898 case 0x280 ... 0x2bf: /* NVIC Clear pend */
899 val = 0;
900 startvec = offset - 0x280 + NVIC_FIRST_IRQ; /* vector # */
901 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
902 if (s->vectors[startvec + i].pending) {
903 val |= (1 << i);
904 }
905 }
906 break;
907 case 0x300 ... 0x33f: /* NVIC Active */
908 val = 0;
909 startvec = offset - 0x300 + NVIC_FIRST_IRQ; /* vector # */
910
911 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
912 if (s->vectors[startvec + i].active) {
913 val |= (1 << i);
914 }
915 }
916 break;
917 case 0x400 ... 0x5ef: /* NVIC Priority */
918 val = 0;
919 startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
920
921 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
922 val |= s->vectors[startvec + i].prio << (8 * i);
923 }
924 break;
925 case 0xd18 ... 0xd23: /* System Handler Priority. */
926 val = 0;
927 for (i = 0; i < size; i++) {
928 val |= s->vectors[(offset - 0xd14) + i].prio << (i * 8);
929 }
930 break;
931 case 0xfe0 ... 0xfff: /* ID. */
932 if (offset & 3) {
933 val = 0;
934 } else {
935 val = nvic_id[(offset - 0xfe0) >> 2];
936 }
937 break;
938 default:
939 if (size == 4) {
940 val = nvic_readl(s, offset);
941 } else {
942 qemu_log_mask(LOG_GUEST_ERROR,
943 "NVIC: Bad read of size %d at offset 0x%x\n",
944 size, offset);
945 val = 0;
946 }
947 }
948
949 trace_nvic_sysreg_read(addr, val, size);
950 *data = val;
951 return MEMTX_OK;
952 }
953
954 static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
955 uint64_t value, unsigned size,
956 MemTxAttrs attrs)
957 {
958 NVICState *s = (NVICState *)opaque;
959 uint32_t offset = addr;
960 unsigned i, startvec, end;
961 unsigned setval = 0;
962
963 trace_nvic_sysreg_write(addr, value, size);
964
965 if (attrs.user && !nvic_user_access_ok(s, addr)) {
966 /* Generate BusFault for unprivileged accesses */
967 return MEMTX_ERROR;
968 }
969
970 switch (offset) {
971 case 0x100 ... 0x13f: /* NVIC Set enable */
972 offset += 0x80;
973 setval = 1;
974 /* fall through */
975 case 0x180 ... 0x1bf: /* NVIC Clear enable */
976 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
977
978 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
979 if (value & (1 << i)) {
980 s->vectors[startvec + i].enabled = setval;
981 }
982 }
983 nvic_irq_update(s);
984 return MEMTX_OK;
985 case 0x200 ... 0x23f: /* NVIC Set pend */
986 /* the special logic in armv7m_nvic_set_pending()
987 * is not needed since IRQs are never escalated
988 */
989 offset += 0x80;
990 setval = 1;
991 /* fall through */
992 case 0x280 ... 0x2bf: /* NVIC Clear pend */
993 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
994
995 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
996 if (value & (1 << i)) {
997 s->vectors[startvec + i].pending = setval;
998 }
999 }
1000 nvic_irq_update(s);
1001 return MEMTX_OK;
1002 case 0x300 ... 0x33f: /* NVIC Active */
1003 return MEMTX_OK; /* R/O */
1004 case 0x400 ... 0x5ef: /* NVIC Priority */
1005 startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
1006
1007 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
1008 set_prio(s, startvec + i, (value >> (i * 8)) & 0xff);
1009 }
1010 nvic_irq_update(s);
1011 return MEMTX_OK;
1012 case 0xd18 ... 0xd23: /* System Handler Priority. */
1013 for (i = 0; i < size; i++) {
1014 unsigned hdlidx = (offset - 0xd14) + i;
1015 set_prio(s, hdlidx, (value >> (i * 8)) & 0xff);
1016 }
1017 nvic_irq_update(s);
1018 return MEMTX_OK;
1019 }
1020 if (size == 4) {
1021 nvic_writel(s, offset, value);
1022 return MEMTX_OK;
1023 }
1024 qemu_log_mask(LOG_GUEST_ERROR,
1025 "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
1026 /* This is UNPREDICTABLE; treat as RAZ/WI */
1027 return MEMTX_OK;
1028 }
1029
1030 static const MemoryRegionOps nvic_sysreg_ops = {
1031 .read_with_attrs = nvic_sysreg_read,
1032 .write_with_attrs = nvic_sysreg_write,
1033 .endianness = DEVICE_NATIVE_ENDIAN,
1034 };
1035
1036 static int nvic_post_load(void *opaque, int version_id)
1037 {
1038 NVICState *s = opaque;
1039 unsigned i;
1040
1041 /* Check for out of range priority settings */
1042 if (s->vectors[ARMV7M_EXCP_RESET].prio != -3 ||
1043 s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
1044 s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
1045 return 1;
1046 }
1047 for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
1048 if (s->vectors[i].prio & ~0xff) {
1049 return 1;
1050 }
1051 }
1052
1053 nvic_recompute_state(s);
1054
1055 return 0;
1056 }
1057
1058 static const VMStateDescription vmstate_VecInfo = {
1059 .name = "armv7m_nvic_info",
1060 .version_id = 1,
1061 .minimum_version_id = 1,
1062 .fields = (VMStateField[]) {
1063 VMSTATE_INT16(prio, VecInfo),
1064 VMSTATE_UINT8(enabled, VecInfo),
1065 VMSTATE_UINT8(pending, VecInfo),
1066 VMSTATE_UINT8(active, VecInfo),
1067 VMSTATE_UINT8(level, VecInfo),
1068 VMSTATE_END_OF_LIST()
1069 }
1070 };
1071
1072 static const VMStateDescription vmstate_nvic = {
1073 .name = "armv7m_nvic",
1074 .version_id = 4,
1075 .minimum_version_id = 4,
1076 .post_load = &nvic_post_load,
1077 .fields = (VMStateField[]) {
1078 VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
1079 vmstate_VecInfo, VecInfo),
1080 VMSTATE_UINT32(prigroup, NVICState),
1081 VMSTATE_END_OF_LIST()
1082 }
1083 };
1084
1085 static Property props_nvic[] = {
1086 /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
1087 DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
1088 DEFINE_PROP_END_OF_LIST()
1089 };
1090
1091 static void armv7m_nvic_reset(DeviceState *dev)
1092 {
1093 NVICState *s = NVIC(dev);
1094
1095 s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
1096 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1097 /* MEM, BUS, and USAGE are enabled through
1098 * the System Handler Control register
1099 */
1100 s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
1101 s->vectors[ARMV7M_EXCP_DEBUG].enabled = 1;
1102 s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
1103 s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
1104
1105 s->vectors[ARMV7M_EXCP_RESET].prio = -3;
1106 s->vectors[ARMV7M_EXCP_NMI].prio = -2;
1107 s->vectors[ARMV7M_EXCP_HARD].prio = -1;
1108
1109 /* Strictly speaking the reset handler should be enabled.
1110 * However, we don't simulate soft resets through the NVIC,
1111 * and the reset vector should never be pended.
1112 * So we leave it disabled to catch logic errors.
1113 */
1114
1115 s->exception_prio = NVIC_NOEXC_PRIO;
1116 s->vectpending = 0;
1117 }
1118
1119 static void nvic_systick_trigger(void *opaque, int n, int level)
1120 {
1121 NVICState *s = opaque;
1122
1123 if (level) {
1124 /* SysTick just asked us to pend its exception.
1125 * (This is different from an external interrupt line's
1126 * behaviour.)
1127 */
1128 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK);
1129 }
1130 }
1131
1132 static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
1133 {
1134 NVICState *s = NVIC(dev);
1135 SysBusDevice *systick_sbd;
1136 Error *err = NULL;
1137
1138 s->cpu = ARM_CPU(qemu_get_cpu(0));
1139 assert(s->cpu);
1140
1141 if (s->num_irq > NVIC_MAX_IRQ) {
1142 error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
1143 return;
1144 }
1145
1146 qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
1147
1148 /* include space for internal exception vectors */
1149 s->num_irq += NVIC_FIRST_IRQ;
1150
1151 object_property_set_bool(OBJECT(&s->systick), true, "realized", &err);
1152 if (err != NULL) {
1153 error_propagate(errp, err);
1154 return;
1155 }
1156 systick_sbd = SYS_BUS_DEVICE(&s->systick);
1157 sysbus_connect_irq(systick_sbd, 0,
1158 qdev_get_gpio_in_named(dev, "systick-trigger", 0));
1159
1160 /* The NVIC and System Control Space (SCS) starts at 0xe000e000
1161 * and looks like this:
1162 * 0x004 - ICTR
1163 * 0x010 - 0xff - systick
1164 * 0x100..0x7ec - NVIC
1165 * 0x7f0..0xcff - Reserved
1166 * 0xd00..0xd3c - SCS registers
1167 * 0xd40..0xeff - Reserved or Not implemented
1168 * 0xf00 - STIR
1169 */
1170 memory_region_init(&s->container, OBJECT(s), "nvic", 0x1000);
1171 /* The system register region goes at the bottom of the priority
1172 * stack as it covers the whole page.
1173 */
1174 memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
1175 "nvic_sysregs", 0x1000);
1176 memory_region_add_subregion(&s->container, 0, &s->sysregmem);
1177 memory_region_add_subregion_overlap(&s->container, 0x10,
1178 sysbus_mmio_get_region(systick_sbd, 0),
1179 1);
1180
1181 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
1182 }
1183
1184 static void armv7m_nvic_instance_init(Object *obj)
1185 {
1186 /* We have a different default value for the num-irq property
1187 * than our superclass. This function runs after qdev init
1188 * has set the defaults from the Property array and before
1189 * any user-specified property setting, so just modify the
1190 * value in the GICState struct.
1191 */
1192 DeviceState *dev = DEVICE(obj);
1193 NVICState *nvic = NVIC(obj);
1194 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
1195
1196 object_initialize(&nvic->systick, sizeof(nvic->systick), TYPE_SYSTICK);
1197 qdev_set_parent_bus(DEVICE(&nvic->systick), sysbus_get_default());
1198
1199 sysbus_init_irq(sbd, &nvic->excpout);
1200 qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
1201 qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", 1);
1202 }
1203
1204 static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
1205 {
1206 DeviceClass *dc = DEVICE_CLASS(klass);
1207
1208 dc->vmsd = &vmstate_nvic;
1209 dc->props = props_nvic;
1210 dc->reset = armv7m_nvic_reset;
1211 dc->realize = armv7m_nvic_realize;
1212 }
1213
1214 static const TypeInfo armv7m_nvic_info = {
1215 .name = TYPE_NVIC,
1216 .parent = TYPE_SYS_BUS_DEVICE,
1217 .instance_init = armv7m_nvic_instance_init,
1218 .instance_size = sizeof(NVICState),
1219 .class_init = armv7m_nvic_class_init,
1220 .class_size = sizeof(SysBusDeviceClass),
1221 };
1222
1223 static void armv7m_nvic_register_types(void)
1224 {
1225 type_register_static(&armv7m_nvic_info);
1226 }
1227
1228 type_init(armv7m_nvic_register_types)