]> git.proxmox.com Git - mirror_qemu.git/blob - hw/apic.c
apic: Store X86CPU in APICCommonState
[mirror_qemu.git] / hw / apic.c
1 /*
2 * APIC support
3 *
4 * Copyright (c) 2004-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>
18 */
19 #include "qemu-thread.h"
20 #include "apic_internal.h"
21 #include "apic.h"
22 #include "ioapic.h"
23 #include "msi.h"
24 #include "host-utils.h"
25 #include "trace.h"
26 #include "pc.h"
27 #include "apic-msidef.h"
28
29 #define MAX_APIC_WORDS 8
30
31 #define SYNC_FROM_VAPIC 0x1
32 #define SYNC_TO_VAPIC 0x2
33 #define SYNC_ISR_IRR_TO_VAPIC 0x4
34
35 static APICCommonState *local_apics[MAX_APICS + 1];
36
37 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode);
38 static void apic_update_irq(APICCommonState *s);
39 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
40 uint8_t dest, uint8_t dest_mode);
41
42 /* Find first bit starting from msb */
43 static int fls_bit(uint32_t value)
44 {
45 return 31 - clz32(value);
46 }
47
48 /* Find first bit starting from lsb */
49 static int ffs_bit(uint32_t value)
50 {
51 return ctz32(value);
52 }
53
54 static inline void set_bit(uint32_t *tab, int index)
55 {
56 int i, mask;
57 i = index >> 5;
58 mask = 1 << (index & 0x1f);
59 tab[i] |= mask;
60 }
61
62 static inline void reset_bit(uint32_t *tab, int index)
63 {
64 int i, mask;
65 i = index >> 5;
66 mask = 1 << (index & 0x1f);
67 tab[i] &= ~mask;
68 }
69
70 static inline int get_bit(uint32_t *tab, int index)
71 {
72 int i, mask;
73 i = index >> 5;
74 mask = 1 << (index & 0x1f);
75 return !!(tab[i] & mask);
76 }
77
78 /* return -1 if no bit is set */
79 static int get_highest_priority_int(uint32_t *tab)
80 {
81 int i;
82 for (i = 7; i >= 0; i--) {
83 if (tab[i] != 0) {
84 return i * 32 + fls_bit(tab[i]);
85 }
86 }
87 return -1;
88 }
89
90 static void apic_sync_vapic(APICCommonState *s, int sync_type)
91 {
92 VAPICState vapic_state;
93 size_t length;
94 off_t start;
95 int vector;
96
97 if (!s->vapic_paddr) {
98 return;
99 }
100 if (sync_type & SYNC_FROM_VAPIC) {
101 cpu_physical_memory_rw(s->vapic_paddr, (void *)&vapic_state,
102 sizeof(vapic_state), 0);
103 s->tpr = vapic_state.tpr;
104 }
105 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) {
106 start = offsetof(VAPICState, isr);
107 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr);
108
109 if (sync_type & SYNC_TO_VAPIC) {
110 assert(qemu_cpu_is_self(&s->cpu->env));
111
112 vapic_state.tpr = s->tpr;
113 vapic_state.enabled = 1;
114 start = 0;
115 length = sizeof(VAPICState);
116 }
117
118 vector = get_highest_priority_int(s->isr);
119 if (vector < 0) {
120 vector = 0;
121 }
122 vapic_state.isr = vector & 0xf0;
123
124 vapic_state.zero = 0;
125
126 vector = get_highest_priority_int(s->irr);
127 if (vector < 0) {
128 vector = 0;
129 }
130 vapic_state.irr = vector & 0xff;
131
132 cpu_physical_memory_write_rom(s->vapic_paddr + start,
133 ((void *)&vapic_state) + start, length);
134 }
135 }
136
137 static void apic_vapic_base_update(APICCommonState *s)
138 {
139 apic_sync_vapic(s, SYNC_TO_VAPIC);
140 }
141
142 static void apic_local_deliver(APICCommonState *s, int vector)
143 {
144 uint32_t lvt = s->lvt[vector];
145 int trigger_mode;
146
147 trace_apic_local_deliver(vector, (lvt >> 8) & 7);
148
149 if (lvt & APIC_LVT_MASKED)
150 return;
151
152 switch ((lvt >> 8) & 7) {
153 case APIC_DM_SMI:
154 cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_SMI);
155 break;
156
157 case APIC_DM_NMI:
158 cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_NMI);
159 break;
160
161 case APIC_DM_EXTINT:
162 cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_HARD);
163 break;
164
165 case APIC_DM_FIXED:
166 trigger_mode = APIC_TRIGGER_EDGE;
167 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) &&
168 (lvt & APIC_LVT_LEVEL_TRIGGER))
169 trigger_mode = APIC_TRIGGER_LEVEL;
170 apic_set_irq(s, lvt & 0xff, trigger_mode);
171 }
172 }
173
174 void apic_deliver_pic_intr(DeviceState *d, int level)
175 {
176 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
177
178 if (level) {
179 apic_local_deliver(s, APIC_LVT_LINT0);
180 } else {
181 uint32_t lvt = s->lvt[APIC_LVT_LINT0];
182
183 switch ((lvt >> 8) & 7) {
184 case APIC_DM_FIXED:
185 if (!(lvt & APIC_LVT_LEVEL_TRIGGER))
186 break;
187 reset_bit(s->irr, lvt & 0xff);
188 /* fall through */
189 case APIC_DM_EXTINT:
190 cpu_reset_interrupt(&s->cpu->env, CPU_INTERRUPT_HARD);
191 break;
192 }
193 }
194 }
195
196 static void apic_external_nmi(APICCommonState *s)
197 {
198 apic_local_deliver(s, APIC_LVT_LINT1);
199 }
200
201 #define foreach_apic(apic, deliver_bitmask, code) \
202 {\
203 int __i, __j, __mask;\
204 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\
205 __mask = deliver_bitmask[__i];\
206 if (__mask) {\
207 for(__j = 0; __j < 32; __j++) {\
208 if (__mask & (1 << __j)) {\
209 apic = local_apics[__i * 32 + __j];\
210 if (apic) {\
211 code;\
212 }\
213 }\
214 }\
215 }\
216 }\
217 }
218
219 static void apic_bus_deliver(const uint32_t *deliver_bitmask,
220 uint8_t delivery_mode, uint8_t vector_num,
221 uint8_t trigger_mode)
222 {
223 APICCommonState *apic_iter;
224
225 switch (delivery_mode) {
226 case APIC_DM_LOWPRI:
227 /* XXX: search for focus processor, arbitration */
228 {
229 int i, d;
230 d = -1;
231 for(i = 0; i < MAX_APIC_WORDS; i++) {
232 if (deliver_bitmask[i]) {
233 d = i * 32 + ffs_bit(deliver_bitmask[i]);
234 break;
235 }
236 }
237 if (d >= 0) {
238 apic_iter = local_apics[d];
239 if (apic_iter) {
240 apic_set_irq(apic_iter, vector_num, trigger_mode);
241 }
242 }
243 }
244 return;
245
246 case APIC_DM_FIXED:
247 break;
248
249 case APIC_DM_SMI:
250 foreach_apic(apic_iter, deliver_bitmask,
251 cpu_interrupt(&apic_iter->cpu->env, CPU_INTERRUPT_SMI)
252 );
253 return;
254
255 case APIC_DM_NMI:
256 foreach_apic(apic_iter, deliver_bitmask,
257 cpu_interrupt(&apic_iter->cpu->env, CPU_INTERRUPT_NMI)
258 );
259 return;
260
261 case APIC_DM_INIT:
262 /* normal INIT IPI sent to processors */
263 foreach_apic(apic_iter, deliver_bitmask,
264 cpu_interrupt(&apic_iter->cpu->env,
265 CPU_INTERRUPT_INIT)
266 );
267 return;
268
269 case APIC_DM_EXTINT:
270 /* handled in I/O APIC code */
271 break;
272
273 default:
274 return;
275 }
276
277 foreach_apic(apic_iter, deliver_bitmask,
278 apic_set_irq(apic_iter, vector_num, trigger_mode) );
279 }
280
281 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode,
282 uint8_t vector_num, uint8_t trigger_mode)
283 {
284 uint32_t deliver_bitmask[MAX_APIC_WORDS];
285
286 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num,
287 trigger_mode);
288
289 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
290 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
291 }
292
293 static void apic_set_base(APICCommonState *s, uint64_t val)
294 {
295 s->apicbase = (val & 0xfffff000) |
296 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
297 /* if disabled, cannot be enabled again */
298 if (!(val & MSR_IA32_APICBASE_ENABLE)) {
299 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
300 cpu_clear_apic_feature(&s->cpu->env);
301 s->spurious_vec &= ~APIC_SV_ENABLE;
302 }
303 }
304
305 static void apic_set_tpr(APICCommonState *s, uint8_t val)
306 {
307 /* Updates from cr8 are ignored while the VAPIC is active */
308 if (!s->vapic_paddr) {
309 s->tpr = val << 4;
310 apic_update_irq(s);
311 }
312 }
313
314 static uint8_t apic_get_tpr(APICCommonState *s)
315 {
316 apic_sync_vapic(s, SYNC_FROM_VAPIC);
317 return s->tpr >> 4;
318 }
319
320 static int apic_get_ppr(APICCommonState *s)
321 {
322 int tpr, isrv, ppr;
323
324 tpr = (s->tpr >> 4);
325 isrv = get_highest_priority_int(s->isr);
326 if (isrv < 0)
327 isrv = 0;
328 isrv >>= 4;
329 if (tpr >= isrv)
330 ppr = s->tpr;
331 else
332 ppr = isrv << 4;
333 return ppr;
334 }
335
336 static int apic_get_arb_pri(APICCommonState *s)
337 {
338 /* XXX: arbitration */
339 return 0;
340 }
341
342
343 /*
344 * <0 - low prio interrupt,
345 * 0 - no interrupt,
346 * >0 - interrupt number
347 */
348 static int apic_irq_pending(APICCommonState *s)
349 {
350 int irrv, ppr;
351 irrv = get_highest_priority_int(s->irr);
352 if (irrv < 0) {
353 return 0;
354 }
355 ppr = apic_get_ppr(s);
356 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) {
357 return -1;
358 }
359
360 return irrv;
361 }
362
363 /* signal the CPU if an irq is pending */
364 static void apic_update_irq(APICCommonState *s)
365 {
366 if (!(s->spurious_vec & APIC_SV_ENABLE)) {
367 return;
368 }
369 if (!qemu_cpu_is_self(&s->cpu->env)) {
370 cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_POLL);
371 } else if (apic_irq_pending(s) > 0) {
372 cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_HARD);
373 }
374 }
375
376 void apic_poll_irq(DeviceState *d)
377 {
378 APICCommonState *s = APIC_COMMON(d);
379
380 apic_sync_vapic(s, SYNC_FROM_VAPIC);
381 apic_update_irq(s);
382 }
383
384 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode)
385 {
386 apic_report_irq_delivered(!get_bit(s->irr, vector_num));
387
388 set_bit(s->irr, vector_num);
389 if (trigger_mode)
390 set_bit(s->tmr, vector_num);
391 else
392 reset_bit(s->tmr, vector_num);
393 if (s->vapic_paddr) {
394 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC);
395 /*
396 * The vcpu thread needs to see the new IRR before we pull its current
397 * TPR value. That way, if we miss a lowering of the TRP, the guest
398 * has the chance to notice the new IRR and poll for IRQs on its own.
399 */
400 smp_wmb();
401 apic_sync_vapic(s, SYNC_FROM_VAPIC);
402 }
403 apic_update_irq(s);
404 }
405
406 static void apic_eoi(APICCommonState *s)
407 {
408 int isrv;
409 isrv = get_highest_priority_int(s->isr);
410 if (isrv < 0)
411 return;
412 reset_bit(s->isr, isrv);
413 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && get_bit(s->tmr, isrv)) {
414 ioapic_eoi_broadcast(isrv);
415 }
416 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC);
417 apic_update_irq(s);
418 }
419
420 static int apic_find_dest(uint8_t dest)
421 {
422 APICCommonState *apic = local_apics[dest];
423 int i;
424
425 if (apic && apic->id == dest)
426 return dest; /* shortcut in case apic->id == apic->idx */
427
428 for (i = 0; i < MAX_APICS; i++) {
429 apic = local_apics[i];
430 if (apic && apic->id == dest)
431 return i;
432 if (!apic)
433 break;
434 }
435
436 return -1;
437 }
438
439 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
440 uint8_t dest, uint8_t dest_mode)
441 {
442 APICCommonState *apic_iter;
443 int i;
444
445 if (dest_mode == 0) {
446 if (dest == 0xff) {
447 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t));
448 } else {
449 int idx = apic_find_dest(dest);
450 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
451 if (idx >= 0)
452 set_bit(deliver_bitmask, idx);
453 }
454 } else {
455 /* XXX: cluster mode */
456 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
457 for(i = 0; i < MAX_APICS; i++) {
458 apic_iter = local_apics[i];
459 if (apic_iter) {
460 if (apic_iter->dest_mode == 0xf) {
461 if (dest & apic_iter->log_dest)
462 set_bit(deliver_bitmask, i);
463 } else if (apic_iter->dest_mode == 0x0) {
464 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) &&
465 (dest & apic_iter->log_dest & 0x0f)) {
466 set_bit(deliver_bitmask, i);
467 }
468 }
469 } else {
470 break;
471 }
472 }
473 }
474 }
475
476 static void apic_startup(APICCommonState *s, int vector_num)
477 {
478 s->sipi_vector = vector_num;
479 cpu_interrupt(&s->cpu->env, CPU_INTERRUPT_SIPI);
480 }
481
482 void apic_sipi(DeviceState *d)
483 {
484 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
485
486 cpu_reset_interrupt(&s->cpu->env, CPU_INTERRUPT_SIPI);
487
488 if (!s->wait_for_sipi)
489 return;
490 cpu_x86_load_seg_cache_sipi(&s->cpu->env, s->sipi_vector);
491 s->wait_for_sipi = 0;
492 }
493
494 static void apic_deliver(DeviceState *d, uint8_t dest, uint8_t dest_mode,
495 uint8_t delivery_mode, uint8_t vector_num,
496 uint8_t trigger_mode)
497 {
498 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
499 uint32_t deliver_bitmask[MAX_APIC_WORDS];
500 int dest_shorthand = (s->icr[0] >> 18) & 3;
501 APICCommonState *apic_iter;
502
503 switch (dest_shorthand) {
504 case 0:
505 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
506 break;
507 case 1:
508 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask));
509 set_bit(deliver_bitmask, s->idx);
510 break;
511 case 2:
512 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
513 break;
514 case 3:
515 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
516 reset_bit(deliver_bitmask, s->idx);
517 break;
518 }
519
520 switch (delivery_mode) {
521 case APIC_DM_INIT:
522 {
523 int trig_mode = (s->icr[0] >> 15) & 1;
524 int level = (s->icr[0] >> 14) & 1;
525 if (level == 0 && trig_mode == 1) {
526 foreach_apic(apic_iter, deliver_bitmask,
527 apic_iter->arb_id = apic_iter->id );
528 return;
529 }
530 }
531 break;
532
533 case APIC_DM_SIPI:
534 foreach_apic(apic_iter, deliver_bitmask,
535 apic_startup(apic_iter, vector_num) );
536 return;
537 }
538
539 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
540 }
541
542 static bool apic_check_pic(APICCommonState *s)
543 {
544 if (!apic_accept_pic_intr(&s->busdev.qdev) || !pic_get_output(isa_pic)) {
545 return false;
546 }
547 apic_deliver_pic_intr(&s->busdev.qdev, 1);
548 return true;
549 }
550
551 int apic_get_interrupt(DeviceState *d)
552 {
553 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
554 int intno;
555
556 /* if the APIC is installed or enabled, we let the 8259 handle the
557 IRQs */
558 if (!s)
559 return -1;
560 if (!(s->spurious_vec & APIC_SV_ENABLE))
561 return -1;
562
563 apic_sync_vapic(s, SYNC_FROM_VAPIC);
564 intno = apic_irq_pending(s);
565
566 if (intno == 0) {
567 apic_sync_vapic(s, SYNC_TO_VAPIC);
568 return -1;
569 } else if (intno < 0) {
570 apic_sync_vapic(s, SYNC_TO_VAPIC);
571 return s->spurious_vec & 0xff;
572 }
573 reset_bit(s->irr, intno);
574 set_bit(s->isr, intno);
575 apic_sync_vapic(s, SYNC_TO_VAPIC);
576
577 /* re-inject if there is still a pending PIC interrupt */
578 apic_check_pic(s);
579
580 apic_update_irq(s);
581
582 return intno;
583 }
584
585 int apic_accept_pic_intr(DeviceState *d)
586 {
587 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
588 uint32_t lvt0;
589
590 if (!s)
591 return -1;
592
593 lvt0 = s->lvt[APIC_LVT_LINT0];
594
595 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 ||
596 (lvt0 & APIC_LVT_MASKED) == 0)
597 return 1;
598
599 return 0;
600 }
601
602 static uint32_t apic_get_current_count(APICCommonState *s)
603 {
604 int64_t d;
605 uint32_t val;
606 d = (qemu_get_clock_ns(vm_clock) - s->initial_count_load_time) >>
607 s->count_shift;
608 if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
609 /* periodic */
610 val = s->initial_count - (d % ((uint64_t)s->initial_count + 1));
611 } else {
612 if (d >= s->initial_count)
613 val = 0;
614 else
615 val = s->initial_count - d;
616 }
617 return val;
618 }
619
620 static void apic_timer_update(APICCommonState *s, int64_t current_time)
621 {
622 if (apic_next_timer(s, current_time)) {
623 qemu_mod_timer(s->timer, s->next_time);
624 } else {
625 qemu_del_timer(s->timer);
626 }
627 }
628
629 static void apic_timer(void *opaque)
630 {
631 APICCommonState *s = opaque;
632
633 apic_local_deliver(s, APIC_LVT_TIMER);
634 apic_timer_update(s, s->next_time);
635 }
636
637 static uint32_t apic_mem_readb(void *opaque, hwaddr addr)
638 {
639 return 0;
640 }
641
642 static uint32_t apic_mem_readw(void *opaque, hwaddr addr)
643 {
644 return 0;
645 }
646
647 static void apic_mem_writeb(void *opaque, hwaddr addr, uint32_t val)
648 {
649 }
650
651 static void apic_mem_writew(void *opaque, hwaddr addr, uint32_t val)
652 {
653 }
654
655 static uint32_t apic_mem_readl(void *opaque, hwaddr addr)
656 {
657 DeviceState *d;
658 APICCommonState *s;
659 uint32_t val;
660 int index;
661
662 d = cpu_get_current_apic();
663 if (!d) {
664 return 0;
665 }
666 s = DO_UPCAST(APICCommonState, busdev.qdev, d);
667
668 index = (addr >> 4) & 0xff;
669 switch(index) {
670 case 0x02: /* id */
671 val = s->id << 24;
672 break;
673 case 0x03: /* version */
674 val = 0x11 | ((APIC_LVT_NB - 1) << 16); /* version 0x11 */
675 break;
676 case 0x08:
677 apic_sync_vapic(s, SYNC_FROM_VAPIC);
678 if (apic_report_tpr_access) {
679 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ);
680 }
681 val = s->tpr;
682 break;
683 case 0x09:
684 val = apic_get_arb_pri(s);
685 break;
686 case 0x0a:
687 /* ppr */
688 val = apic_get_ppr(s);
689 break;
690 case 0x0b:
691 val = 0;
692 break;
693 case 0x0d:
694 val = s->log_dest << 24;
695 break;
696 case 0x0e:
697 val = s->dest_mode << 28;
698 break;
699 case 0x0f:
700 val = s->spurious_vec;
701 break;
702 case 0x10 ... 0x17:
703 val = s->isr[index & 7];
704 break;
705 case 0x18 ... 0x1f:
706 val = s->tmr[index & 7];
707 break;
708 case 0x20 ... 0x27:
709 val = s->irr[index & 7];
710 break;
711 case 0x28:
712 val = s->esr;
713 break;
714 case 0x30:
715 case 0x31:
716 val = s->icr[index & 1];
717 break;
718 case 0x32 ... 0x37:
719 val = s->lvt[index - 0x32];
720 break;
721 case 0x38:
722 val = s->initial_count;
723 break;
724 case 0x39:
725 val = apic_get_current_count(s);
726 break;
727 case 0x3e:
728 val = s->divide_conf;
729 break;
730 default:
731 s->esr |= ESR_ILLEGAL_ADDRESS;
732 val = 0;
733 break;
734 }
735 trace_apic_mem_readl(addr, val);
736 return val;
737 }
738
739 static void apic_send_msi(hwaddr addr, uint32_t data)
740 {
741 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
742 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
743 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
744 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
745 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
746 /* XXX: Ignore redirection hint. */
747 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode);
748 }
749
750 static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val)
751 {
752 DeviceState *d;
753 APICCommonState *s;
754 int index = (addr >> 4) & 0xff;
755 if (addr > 0xfff || !index) {
756 /* MSI and MMIO APIC are at the same memory location,
757 * but actually not on the global bus: MSI is on PCI bus
758 * APIC is connected directly to the CPU.
759 * Mapping them on the global bus happens to work because
760 * MSI registers are reserved in APIC MMIO and vice versa. */
761 apic_send_msi(addr, val);
762 return;
763 }
764
765 d = cpu_get_current_apic();
766 if (!d) {
767 return;
768 }
769 s = DO_UPCAST(APICCommonState, busdev.qdev, d);
770
771 trace_apic_mem_writel(addr, val);
772
773 switch(index) {
774 case 0x02:
775 s->id = (val >> 24);
776 break;
777 case 0x03:
778 break;
779 case 0x08:
780 if (apic_report_tpr_access) {
781 cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE);
782 }
783 s->tpr = val;
784 apic_sync_vapic(s, SYNC_TO_VAPIC);
785 apic_update_irq(s);
786 break;
787 case 0x09:
788 case 0x0a:
789 break;
790 case 0x0b: /* EOI */
791 apic_eoi(s);
792 break;
793 case 0x0d:
794 s->log_dest = val >> 24;
795 break;
796 case 0x0e:
797 s->dest_mode = val >> 28;
798 break;
799 case 0x0f:
800 s->spurious_vec = val & 0x1ff;
801 apic_update_irq(s);
802 break;
803 case 0x10 ... 0x17:
804 case 0x18 ... 0x1f:
805 case 0x20 ... 0x27:
806 case 0x28:
807 break;
808 case 0x30:
809 s->icr[0] = val;
810 apic_deliver(d, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1,
811 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff),
812 (s->icr[0] >> 15) & 1);
813 break;
814 case 0x31:
815 s->icr[1] = val;
816 break;
817 case 0x32 ... 0x37:
818 {
819 int n = index - 0x32;
820 s->lvt[n] = val;
821 if (n == APIC_LVT_TIMER) {
822 apic_timer_update(s, qemu_get_clock_ns(vm_clock));
823 } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) {
824 apic_update_irq(s);
825 }
826 }
827 break;
828 case 0x38:
829 s->initial_count = val;
830 s->initial_count_load_time = qemu_get_clock_ns(vm_clock);
831 apic_timer_update(s, s->initial_count_load_time);
832 break;
833 case 0x39:
834 break;
835 case 0x3e:
836 {
837 int v;
838 s->divide_conf = val & 0xb;
839 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
840 s->count_shift = (v + 1) & 7;
841 }
842 break;
843 default:
844 s->esr |= ESR_ILLEGAL_ADDRESS;
845 break;
846 }
847 }
848
849 static void apic_pre_save(APICCommonState *s)
850 {
851 apic_sync_vapic(s, SYNC_FROM_VAPIC);
852 }
853
854 static void apic_post_load(APICCommonState *s)
855 {
856 if (s->timer_expiry != -1) {
857 qemu_mod_timer(s->timer, s->timer_expiry);
858 } else {
859 qemu_del_timer(s->timer);
860 }
861 }
862
863 static const MemoryRegionOps apic_io_ops = {
864 .old_mmio = {
865 .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, },
866 .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, },
867 },
868 .endianness = DEVICE_NATIVE_ENDIAN,
869 };
870
871 static void apic_init(APICCommonState *s)
872 {
873 memory_region_init_io(&s->io_memory, &apic_io_ops, s, "apic-msi",
874 MSI_SPACE_SIZE);
875
876 s->timer = qemu_new_timer_ns(vm_clock, apic_timer, s);
877 local_apics[s->idx] = s;
878
879 msi_supported = true;
880 }
881
882 static void apic_class_init(ObjectClass *klass, void *data)
883 {
884 APICCommonClass *k = APIC_COMMON_CLASS(klass);
885
886 k->init = apic_init;
887 k->set_base = apic_set_base;
888 k->set_tpr = apic_set_tpr;
889 k->get_tpr = apic_get_tpr;
890 k->vapic_base_update = apic_vapic_base_update;
891 k->external_nmi = apic_external_nmi;
892 k->pre_save = apic_pre_save;
893 k->post_load = apic_post_load;
894 }
895
896 static TypeInfo apic_info = {
897 .name = "apic",
898 .instance_size = sizeof(APICCommonState),
899 .parent = TYPE_APIC_COMMON,
900 .class_init = apic_class_init,
901 };
902
903 static void apic_register_types(void)
904 {
905 type_register_static(&apic_info);
906 }
907
908 type_init(apic_register_types)