]> git.proxmox.com Git - qemu.git/blob - hw/apic.c
xenstore: Use <xenstore.h>
[qemu.git] / hw / apic.c
1 /*
2 * APIC support
3 *
4 * Copyright (c) 2004-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>
18 */
19 #include "apic_internal.h"
20 #include "apic.h"
21 #include "ioapic.h"
22 #include "msi.h"
23 #include "host-utils.h"
24 #include "trace.h"
25 #include "pc.h"
26
27 #define MAX_APIC_WORDS 8
28
29 /* Intel APIC constants: from include/asm/msidef.h */
30 #define MSI_DATA_VECTOR_SHIFT 0
31 #define MSI_DATA_VECTOR_MASK 0x000000ff
32 #define MSI_DATA_DELIVERY_MODE_SHIFT 8
33 #define MSI_DATA_TRIGGER_SHIFT 15
34 #define MSI_DATA_LEVEL_SHIFT 14
35 #define MSI_ADDR_DEST_MODE_SHIFT 2
36 #define MSI_ADDR_DEST_ID_SHIFT 12
37 #define MSI_ADDR_DEST_ID_MASK 0x00ffff0
38
39 #define SYNC_FROM_VAPIC 0x1
40 #define SYNC_TO_VAPIC 0x2
41 #define SYNC_ISR_IRR_TO_VAPIC 0x4
42
43 static APICCommonState *local_apics[MAX_APICS + 1];
44
45 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode);
46 static void apic_update_irq(APICCommonState *s);
47 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
48 uint8_t dest, uint8_t dest_mode);
49
50 /* Find first bit starting from msb */
51 static int fls_bit(uint32_t value)
52 {
53 return 31 - clz32(value);
54 }
55
56 /* Find first bit starting from lsb */
57 static int ffs_bit(uint32_t value)
58 {
59 return ctz32(value);
60 }
61
62 static inline void set_bit(uint32_t *tab, int index)
63 {
64 int i, mask;
65 i = index >> 5;
66 mask = 1 << (index & 0x1f);
67 tab[i] |= mask;
68 }
69
70 static inline void reset_bit(uint32_t *tab, int index)
71 {
72 int i, mask;
73 i = index >> 5;
74 mask = 1 << (index & 0x1f);
75 tab[i] &= ~mask;
76 }
77
78 static inline int get_bit(uint32_t *tab, int index)
79 {
80 int i, mask;
81 i = index >> 5;
82 mask = 1 << (index & 0x1f);
83 return !!(tab[i] & mask);
84 }
85
86 /* return -1 if no bit is set */
87 static int get_highest_priority_int(uint32_t *tab)
88 {
89 int i;
90 for (i = 7; i >= 0; i--) {
91 if (tab[i] != 0) {
92 return i * 32 + fls_bit(tab[i]);
93 }
94 }
95 return -1;
96 }
97
98 static void apic_sync_vapic(APICCommonState *s, int sync_type)
99 {
100 VAPICState vapic_state;
101 size_t length;
102 off_t start;
103 int vector;
104
105 if (!s->vapic_paddr) {
106 return;
107 }
108 if (sync_type & SYNC_FROM_VAPIC) {
109 cpu_physical_memory_rw(s->vapic_paddr, (void *)&vapic_state,
110 sizeof(vapic_state), 0);
111 s->tpr = vapic_state.tpr;
112 }
113 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) {
114 start = offsetof(VAPICState, isr);
115 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr);
116
117 if (sync_type & SYNC_TO_VAPIC) {
118 assert(qemu_cpu_is_self(s->cpu_env));
119
120 vapic_state.tpr = s->tpr;
121 vapic_state.enabled = 1;
122 start = 0;
123 length = sizeof(VAPICState);
124 }
125
126 vector = get_highest_priority_int(s->isr);
127 if (vector < 0) {
128 vector = 0;
129 }
130 vapic_state.isr = vector & 0xf0;
131
132 vapic_state.zero = 0;
133
134 vector = get_highest_priority_int(s->irr);
135 if (vector < 0) {
136 vector = 0;
137 }
138 vapic_state.irr = vector & 0xff;
139
140 cpu_physical_memory_write_rom(s->vapic_paddr + start,
141 ((void *)&vapic_state) + start, length);
142 }
143 }
144
145 static void apic_vapic_base_update(APICCommonState *s)
146 {
147 apic_sync_vapic(s, SYNC_TO_VAPIC);
148 }
149
150 static void apic_local_deliver(APICCommonState *s, int vector)
151 {
152 uint32_t lvt = s->lvt[vector];
153 int trigger_mode;
154
155 trace_apic_local_deliver(vector, (lvt >> 8) & 7);
156
157 if (lvt & APIC_LVT_MASKED)
158 return;
159
160 switch ((lvt >> 8) & 7) {
161 case APIC_DM_SMI:
162 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_SMI);
163 break;
164
165 case APIC_DM_NMI:
166 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_NMI);
167 break;
168
169 case APIC_DM_EXTINT:
170 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
171 break;
172
173 case APIC_DM_FIXED:
174 trigger_mode = APIC_TRIGGER_EDGE;
175 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) &&
176 (lvt & APIC_LVT_LEVEL_TRIGGER))
177 trigger_mode = APIC_TRIGGER_LEVEL;
178 apic_set_irq(s, lvt & 0xff, trigger_mode);
179 }
180 }
181
182 void apic_deliver_pic_intr(DeviceState *d, int level)
183 {
184 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
185
186 if (level) {
187 apic_local_deliver(s, APIC_LVT_LINT0);
188 } else {
189 uint32_t lvt = s->lvt[APIC_LVT_LINT0];
190
191 switch ((lvt >> 8) & 7) {
192 case APIC_DM_FIXED:
193 if (!(lvt & APIC_LVT_LEVEL_TRIGGER))
194 break;
195 reset_bit(s->irr, lvt & 0xff);
196 /* fall through */
197 case APIC_DM_EXTINT:
198 cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
199 break;
200 }
201 }
202 }
203
204 static void apic_external_nmi(APICCommonState *s)
205 {
206 apic_local_deliver(s, APIC_LVT_LINT1);
207 }
208
209 #define foreach_apic(apic, deliver_bitmask, code) \
210 {\
211 int __i, __j, __mask;\
212 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\
213 __mask = deliver_bitmask[__i];\
214 if (__mask) {\
215 for(__j = 0; __j < 32; __j++) {\
216 if (__mask & (1 << __j)) {\
217 apic = local_apics[__i * 32 + __j];\
218 if (apic) {\
219 code;\
220 }\
221 }\
222 }\
223 }\
224 }\
225 }
226
227 static void apic_bus_deliver(const uint32_t *deliver_bitmask,
228 uint8_t delivery_mode, uint8_t vector_num,
229 uint8_t trigger_mode)
230 {
231 APICCommonState *apic_iter;
232
233 switch (delivery_mode) {
234 case APIC_DM_LOWPRI:
235 /* XXX: search for focus processor, arbitration */
236 {
237 int i, d;
238 d = -1;
239 for(i = 0; i < MAX_APIC_WORDS; i++) {
240 if (deliver_bitmask[i]) {
241 d = i * 32 + ffs_bit(deliver_bitmask[i]);
242 break;
243 }
244 }
245 if (d >= 0) {
246 apic_iter = local_apics[d];
247 if (apic_iter) {
248 apic_set_irq(apic_iter, vector_num, trigger_mode);
249 }
250 }
251 }
252 return;
253
254 case APIC_DM_FIXED:
255 break;
256
257 case APIC_DM_SMI:
258 foreach_apic(apic_iter, deliver_bitmask,
259 cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_SMI) );
260 return;
261
262 case APIC_DM_NMI:
263 foreach_apic(apic_iter, deliver_bitmask,
264 cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_NMI) );
265 return;
266
267 case APIC_DM_INIT:
268 /* normal INIT IPI sent to processors */
269 foreach_apic(apic_iter, deliver_bitmask,
270 cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_INIT) );
271 return;
272
273 case APIC_DM_EXTINT:
274 /* handled in I/O APIC code */
275 break;
276
277 default:
278 return;
279 }
280
281 foreach_apic(apic_iter, deliver_bitmask,
282 apic_set_irq(apic_iter, vector_num, trigger_mode) );
283 }
284
285 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode,
286 uint8_t vector_num, uint8_t trigger_mode)
287 {
288 uint32_t deliver_bitmask[MAX_APIC_WORDS];
289
290 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num,
291 trigger_mode);
292
293 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
294 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
295 }
296
297 static void apic_set_base(APICCommonState *s, uint64_t val)
298 {
299 s->apicbase = (val & 0xfffff000) |
300 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
301 /* if disabled, cannot be enabled again */
302 if (!(val & MSR_IA32_APICBASE_ENABLE)) {
303 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
304 cpu_clear_apic_feature(s->cpu_env);
305 s->spurious_vec &= ~APIC_SV_ENABLE;
306 }
307 }
308
309 static void apic_set_tpr(APICCommonState *s, uint8_t val)
310 {
311 /* Updates from cr8 are ignored while the VAPIC is active */
312 if (!s->vapic_paddr) {
313 s->tpr = val << 4;
314 apic_update_irq(s);
315 }
316 }
317
318 static uint8_t apic_get_tpr(APICCommonState *s)
319 {
320 apic_sync_vapic(s, SYNC_FROM_VAPIC);
321 return s->tpr >> 4;
322 }
323
324 static int apic_get_ppr(APICCommonState *s)
325 {
326 int tpr, isrv, ppr;
327
328 tpr = (s->tpr >> 4);
329 isrv = get_highest_priority_int(s->isr);
330 if (isrv < 0)
331 isrv = 0;
332 isrv >>= 4;
333 if (tpr >= isrv)
334 ppr = s->tpr;
335 else
336 ppr = isrv << 4;
337 return ppr;
338 }
339
340 static int apic_get_arb_pri(APICCommonState *s)
341 {
342 /* XXX: arbitration */
343 return 0;
344 }
345
346
347 /*
348 * <0 - low prio interrupt,
349 * 0 - no interrupt,
350 * >0 - interrupt number
351 */
352 static int apic_irq_pending(APICCommonState *s)
353 {
354 int irrv, ppr;
355 irrv = get_highest_priority_int(s->irr);
356 if (irrv < 0) {
357 return 0;
358 }
359 ppr = apic_get_ppr(s);
360 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) {
361 return -1;
362 }
363
364 return irrv;
365 }
366
367 /* signal the CPU if an irq is pending */
368 static void apic_update_irq(APICCommonState *s)
369 {
370 if (!(s->spurious_vec & APIC_SV_ENABLE)) {
371 return;
372 }
373 if (apic_irq_pending(s) > 0) {
374 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
375 } else if (apic_accept_pic_intr(&s->busdev.qdev) &&
376 pic_get_output(isa_pic)) {
377 apic_deliver_pic_intr(&s->busdev.qdev, 1);
378 }
379 }
380
381 void apic_poll_irq(DeviceState *d)
382 {
383 APICCommonState *s = APIC_COMMON(d);
384
385 apic_sync_vapic(s, SYNC_FROM_VAPIC);
386 apic_update_irq(s);
387 }
388
389 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode)
390 {
391 apic_report_irq_delivered(!get_bit(s->irr, vector_num));
392
393 set_bit(s->irr, vector_num);
394 if (trigger_mode)
395 set_bit(s->tmr, vector_num);
396 else
397 reset_bit(s->tmr, vector_num);
398 if (s->vapic_paddr) {
399 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC);
400 /*
401 * The vcpu thread needs to see the new IRR before we pull its current
402 * TPR value. That way, if we miss a lowering of the TRP, the guest
403 * has the chance to notice the new IRR and poll for IRQs on its own.
404 */
405 smp_wmb();
406 apic_sync_vapic(s, SYNC_FROM_VAPIC);
407 }
408 apic_update_irq(s);
409 }
410
411 static void apic_eoi(APICCommonState *s)
412 {
413 int isrv;
414 isrv = get_highest_priority_int(s->isr);
415 if (isrv < 0)
416 return;
417 reset_bit(s->isr, isrv);
418 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && get_bit(s->tmr, isrv)) {
419 ioapic_eoi_broadcast(isrv);
420 }
421 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC);
422 apic_update_irq(s);
423 }
424
425 static int apic_find_dest(uint8_t dest)
426 {
427 APICCommonState *apic = local_apics[dest];
428 int i;
429
430 if (apic && apic->id == dest)
431 return dest; /* shortcut in case apic->id == apic->idx */
432
433 for (i = 0; i < MAX_APICS; i++) {
434 apic = local_apics[i];
435 if (apic && apic->id == dest)
436 return i;
437 if (!apic)
438 break;
439 }
440
441 return -1;
442 }
443
444 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
445 uint8_t dest, uint8_t dest_mode)
446 {
447 APICCommonState *apic_iter;
448 int i;
449
450 if (dest_mode == 0) {
451 if (dest == 0xff) {
452 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t));
453 } else {
454 int idx = apic_find_dest(dest);
455 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
456 if (idx >= 0)
457 set_bit(deliver_bitmask, idx);
458 }
459 } else {
460 /* XXX: cluster mode */
461 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
462 for(i = 0; i < MAX_APICS; i++) {
463 apic_iter = local_apics[i];
464 if (apic_iter) {
465 if (apic_iter->dest_mode == 0xf) {
466 if (dest & apic_iter->log_dest)
467 set_bit(deliver_bitmask, i);
468 } else if (apic_iter->dest_mode == 0x0) {
469 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) &&
470 (dest & apic_iter->log_dest & 0x0f)) {
471 set_bit(deliver_bitmask, i);
472 }
473 }
474 } else {
475 break;
476 }
477 }
478 }
479 }
480
481 static void apic_startup(APICCommonState *s, int vector_num)
482 {
483 s->sipi_vector = vector_num;
484 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_SIPI);
485 }
486
487 void apic_sipi(DeviceState *d)
488 {
489 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
490
491 cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_SIPI);
492
493 if (!s->wait_for_sipi)
494 return;
495 cpu_x86_load_seg_cache_sipi(s->cpu_env, s->sipi_vector);
496 s->wait_for_sipi = 0;
497 }
498
499 static void apic_deliver(DeviceState *d, uint8_t dest, uint8_t dest_mode,
500 uint8_t delivery_mode, uint8_t vector_num,
501 uint8_t trigger_mode)
502 {
503 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
504 uint32_t deliver_bitmask[MAX_APIC_WORDS];
505 int dest_shorthand = (s->icr[0] >> 18) & 3;
506 APICCommonState *apic_iter;
507
508 switch (dest_shorthand) {
509 case 0:
510 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
511 break;
512 case 1:
513 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask));
514 set_bit(deliver_bitmask, s->idx);
515 break;
516 case 2:
517 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
518 break;
519 case 3:
520 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
521 reset_bit(deliver_bitmask, s->idx);
522 break;
523 }
524
525 switch (delivery_mode) {
526 case APIC_DM_INIT:
527 {
528 int trig_mode = (s->icr[0] >> 15) & 1;
529 int level = (s->icr[0] >> 14) & 1;
530 if (level == 0 && trig_mode == 1) {
531 foreach_apic(apic_iter, deliver_bitmask,
532 apic_iter->arb_id = apic_iter->id );
533 return;
534 }
535 }
536 break;
537
538 case APIC_DM_SIPI:
539 foreach_apic(apic_iter, deliver_bitmask,
540 apic_startup(apic_iter, vector_num) );
541 return;
542 }
543
544 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
545 }
546
547 int apic_get_interrupt(DeviceState *d)
548 {
549 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
550 int intno;
551
552 /* if the APIC is installed or enabled, we let the 8259 handle the
553 IRQs */
554 if (!s)
555 return -1;
556 if (!(s->spurious_vec & APIC_SV_ENABLE))
557 return -1;
558
559 apic_sync_vapic(s, SYNC_FROM_VAPIC);
560 intno = apic_irq_pending(s);
561
562 if (intno == 0) {
563 apic_sync_vapic(s, SYNC_TO_VAPIC);
564 return -1;
565 } else if (intno < 0) {
566 apic_sync_vapic(s, SYNC_TO_VAPIC);
567 return s->spurious_vec & 0xff;
568 }
569 reset_bit(s->irr, intno);
570 set_bit(s->isr, intno);
571 apic_sync_vapic(s, SYNC_TO_VAPIC);
572 apic_update_irq(s);
573 return intno;
574 }
575
576 int apic_accept_pic_intr(DeviceState *d)
577 {
578 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
579 uint32_t lvt0;
580
581 if (!s)
582 return -1;
583
584 lvt0 = s->lvt[APIC_LVT_LINT0];
585
586 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 ||
587 (lvt0 & APIC_LVT_MASKED) == 0)
588 return 1;
589
590 return 0;
591 }
592
593 static uint32_t apic_get_current_count(APICCommonState *s)
594 {
595 int64_t d;
596 uint32_t val;
597 d = (qemu_get_clock_ns(vm_clock) - s->initial_count_load_time) >>
598 s->count_shift;
599 if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
600 /* periodic */
601 val = s->initial_count - (d % ((uint64_t)s->initial_count + 1));
602 } else {
603 if (d >= s->initial_count)
604 val = 0;
605 else
606 val = s->initial_count - d;
607 }
608 return val;
609 }
610
611 static void apic_timer_update(APICCommonState *s, int64_t current_time)
612 {
613 if (apic_next_timer(s, current_time)) {
614 qemu_mod_timer(s->timer, s->next_time);
615 } else {
616 qemu_del_timer(s->timer);
617 }
618 }
619
620 static void apic_timer(void *opaque)
621 {
622 APICCommonState *s = opaque;
623
624 apic_local_deliver(s, APIC_LVT_TIMER);
625 apic_timer_update(s, s->next_time);
626 }
627
628 static uint32_t apic_mem_readb(void *opaque, target_phys_addr_t addr)
629 {
630 return 0;
631 }
632
633 static uint32_t apic_mem_readw(void *opaque, target_phys_addr_t addr)
634 {
635 return 0;
636 }
637
638 static void apic_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
639 {
640 }
641
642 static void apic_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
643 {
644 }
645
646 static uint32_t apic_mem_readl(void *opaque, target_phys_addr_t addr)
647 {
648 DeviceState *d;
649 APICCommonState *s;
650 uint32_t val;
651 int index;
652
653 d = cpu_get_current_apic();
654 if (!d) {
655 return 0;
656 }
657 s = DO_UPCAST(APICCommonState, busdev.qdev, d);
658
659 index = (addr >> 4) & 0xff;
660 switch(index) {
661 case 0x02: /* id */
662 val = s->id << 24;
663 break;
664 case 0x03: /* version */
665 val = 0x11 | ((APIC_LVT_NB - 1) << 16); /* version 0x11 */
666 break;
667 case 0x08:
668 apic_sync_vapic(s, SYNC_FROM_VAPIC);
669 if (apic_report_tpr_access) {
670 cpu_report_tpr_access(s->cpu_env, TPR_ACCESS_READ);
671 }
672 val = s->tpr;
673 break;
674 case 0x09:
675 val = apic_get_arb_pri(s);
676 break;
677 case 0x0a:
678 /* ppr */
679 val = apic_get_ppr(s);
680 break;
681 case 0x0b:
682 val = 0;
683 break;
684 case 0x0d:
685 val = s->log_dest << 24;
686 break;
687 case 0x0e:
688 val = s->dest_mode << 28;
689 break;
690 case 0x0f:
691 val = s->spurious_vec;
692 break;
693 case 0x10 ... 0x17:
694 val = s->isr[index & 7];
695 break;
696 case 0x18 ... 0x1f:
697 val = s->tmr[index & 7];
698 break;
699 case 0x20 ... 0x27:
700 val = s->irr[index & 7];
701 break;
702 case 0x28:
703 val = s->esr;
704 break;
705 case 0x30:
706 case 0x31:
707 val = s->icr[index & 1];
708 break;
709 case 0x32 ... 0x37:
710 val = s->lvt[index - 0x32];
711 break;
712 case 0x38:
713 val = s->initial_count;
714 break;
715 case 0x39:
716 val = apic_get_current_count(s);
717 break;
718 case 0x3e:
719 val = s->divide_conf;
720 break;
721 default:
722 s->esr |= ESR_ILLEGAL_ADDRESS;
723 val = 0;
724 break;
725 }
726 trace_apic_mem_readl(addr, val);
727 return val;
728 }
729
730 static void apic_send_msi(target_phys_addr_t addr, uint32_t data)
731 {
732 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
733 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
734 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
735 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
736 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
737 /* XXX: Ignore redirection hint. */
738 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode);
739 }
740
741 static void apic_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
742 {
743 DeviceState *d;
744 APICCommonState *s;
745 int index = (addr >> 4) & 0xff;
746 if (addr > 0xfff || !index) {
747 /* MSI and MMIO APIC are at the same memory location,
748 * but actually not on the global bus: MSI is on PCI bus
749 * APIC is connected directly to the CPU.
750 * Mapping them on the global bus happens to work because
751 * MSI registers are reserved in APIC MMIO and vice versa. */
752 apic_send_msi(addr, val);
753 return;
754 }
755
756 d = cpu_get_current_apic();
757 if (!d) {
758 return;
759 }
760 s = DO_UPCAST(APICCommonState, busdev.qdev, d);
761
762 trace_apic_mem_writel(addr, val);
763
764 switch(index) {
765 case 0x02:
766 s->id = (val >> 24);
767 break;
768 case 0x03:
769 break;
770 case 0x08:
771 if (apic_report_tpr_access) {
772 cpu_report_tpr_access(s->cpu_env, TPR_ACCESS_WRITE);
773 }
774 s->tpr = val;
775 apic_sync_vapic(s, SYNC_TO_VAPIC);
776 apic_update_irq(s);
777 break;
778 case 0x09:
779 case 0x0a:
780 break;
781 case 0x0b: /* EOI */
782 apic_eoi(s);
783 break;
784 case 0x0d:
785 s->log_dest = val >> 24;
786 break;
787 case 0x0e:
788 s->dest_mode = val >> 28;
789 break;
790 case 0x0f:
791 s->spurious_vec = val & 0x1ff;
792 apic_update_irq(s);
793 break;
794 case 0x10 ... 0x17:
795 case 0x18 ... 0x1f:
796 case 0x20 ... 0x27:
797 case 0x28:
798 break;
799 case 0x30:
800 s->icr[0] = val;
801 apic_deliver(d, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1,
802 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff),
803 (s->icr[0] >> 15) & 1);
804 break;
805 case 0x31:
806 s->icr[1] = val;
807 break;
808 case 0x32 ... 0x37:
809 {
810 int n = index - 0x32;
811 s->lvt[n] = val;
812 if (n == APIC_LVT_TIMER)
813 apic_timer_update(s, qemu_get_clock_ns(vm_clock));
814 }
815 break;
816 case 0x38:
817 s->initial_count = val;
818 s->initial_count_load_time = qemu_get_clock_ns(vm_clock);
819 apic_timer_update(s, s->initial_count_load_time);
820 break;
821 case 0x39:
822 break;
823 case 0x3e:
824 {
825 int v;
826 s->divide_conf = val & 0xb;
827 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
828 s->count_shift = (v + 1) & 7;
829 }
830 break;
831 default:
832 s->esr |= ESR_ILLEGAL_ADDRESS;
833 break;
834 }
835 }
836
837 static void apic_pre_save(APICCommonState *s)
838 {
839 apic_sync_vapic(s, SYNC_FROM_VAPIC);
840 }
841
842 static void apic_post_load(APICCommonState *s)
843 {
844 if (s->timer_expiry != -1) {
845 qemu_mod_timer(s->timer, s->timer_expiry);
846 } else {
847 qemu_del_timer(s->timer);
848 }
849 }
850
851 static const MemoryRegionOps apic_io_ops = {
852 .old_mmio = {
853 .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, },
854 .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, },
855 },
856 .endianness = DEVICE_NATIVE_ENDIAN,
857 };
858
859 static void apic_init(APICCommonState *s)
860 {
861 memory_region_init_io(&s->io_memory, &apic_io_ops, s, "apic-msi",
862 MSI_SPACE_SIZE);
863
864 s->timer = qemu_new_timer_ns(vm_clock, apic_timer, s);
865 local_apics[s->idx] = s;
866
867 msi_supported = true;
868 }
869
870 static void apic_class_init(ObjectClass *klass, void *data)
871 {
872 APICCommonClass *k = APIC_COMMON_CLASS(klass);
873
874 k->init = apic_init;
875 k->set_base = apic_set_base;
876 k->set_tpr = apic_set_tpr;
877 k->get_tpr = apic_get_tpr;
878 k->vapic_base_update = apic_vapic_base_update;
879 k->external_nmi = apic_external_nmi;
880 k->pre_save = apic_pre_save;
881 k->post_load = apic_post_load;
882 }
883
884 static TypeInfo apic_info = {
885 .name = "apic",
886 .instance_size = sizeof(APICCommonState),
887 .parent = TYPE_APIC_COMMON,
888 .class_init = apic_class_init,
889 };
890
891 static void apic_register_types(void)
892 {
893 type_register_static(&apic_info);
894 }
895
896 type_init(apic_register_types)