]> git.proxmox.com Git - qemu.git/blob - hw/apic.c
acpi_piix4: Re-define PCI hotplug eject register read
[qemu.git] / hw / apic.c
1 /*
2 * APIC support
3 *
4 * Copyright (c) 2004-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>
18 */
19 #include "apic_internal.h"
20 #include "apic.h"
21 #include "ioapic.h"
22 #include "host-utils.h"
23 #include "trace.h"
24 #include "pc.h"
25
26 #define MAX_APIC_WORDS 8
27
28 /* Intel APIC constants: from include/asm/msidef.h */
29 #define MSI_DATA_VECTOR_SHIFT 0
30 #define MSI_DATA_VECTOR_MASK 0x000000ff
31 #define MSI_DATA_DELIVERY_MODE_SHIFT 8
32 #define MSI_DATA_TRIGGER_SHIFT 15
33 #define MSI_DATA_LEVEL_SHIFT 14
34 #define MSI_ADDR_DEST_MODE_SHIFT 2
35 #define MSI_ADDR_DEST_ID_SHIFT 12
36 #define MSI_ADDR_DEST_ID_MASK 0x00ffff0
37
38 #define SYNC_FROM_VAPIC 0x1
39 #define SYNC_TO_VAPIC 0x2
40 #define SYNC_ISR_IRR_TO_VAPIC 0x4
41
42 static APICCommonState *local_apics[MAX_APICS + 1];
43
44 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode);
45 static void apic_update_irq(APICCommonState *s);
46 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
47 uint8_t dest, uint8_t dest_mode);
48
49 /* Find first bit starting from msb */
50 static int fls_bit(uint32_t value)
51 {
52 return 31 - clz32(value);
53 }
54
55 /* Find first bit starting from lsb */
56 static int ffs_bit(uint32_t value)
57 {
58 return ctz32(value);
59 }
60
61 static inline void set_bit(uint32_t *tab, int index)
62 {
63 int i, mask;
64 i = index >> 5;
65 mask = 1 << (index & 0x1f);
66 tab[i] |= mask;
67 }
68
69 static inline void reset_bit(uint32_t *tab, int index)
70 {
71 int i, mask;
72 i = index >> 5;
73 mask = 1 << (index & 0x1f);
74 tab[i] &= ~mask;
75 }
76
77 static inline int get_bit(uint32_t *tab, int index)
78 {
79 int i, mask;
80 i = index >> 5;
81 mask = 1 << (index & 0x1f);
82 return !!(tab[i] & mask);
83 }
84
85 /* return -1 if no bit is set */
86 static int get_highest_priority_int(uint32_t *tab)
87 {
88 int i;
89 for (i = 7; i >= 0; i--) {
90 if (tab[i] != 0) {
91 return i * 32 + fls_bit(tab[i]);
92 }
93 }
94 return -1;
95 }
96
97 static void apic_sync_vapic(APICCommonState *s, int sync_type)
98 {
99 VAPICState vapic_state;
100 size_t length;
101 off_t start;
102 int vector;
103
104 if (!s->vapic_paddr) {
105 return;
106 }
107 if (sync_type & SYNC_FROM_VAPIC) {
108 cpu_physical_memory_rw(s->vapic_paddr, (void *)&vapic_state,
109 sizeof(vapic_state), 0);
110 s->tpr = vapic_state.tpr;
111 }
112 if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) {
113 start = offsetof(VAPICState, isr);
114 length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr);
115
116 if (sync_type & SYNC_TO_VAPIC) {
117 assert(qemu_cpu_is_self(s->cpu_env));
118
119 vapic_state.tpr = s->tpr;
120 vapic_state.enabled = 1;
121 start = 0;
122 length = sizeof(VAPICState);
123 }
124
125 vector = get_highest_priority_int(s->isr);
126 if (vector < 0) {
127 vector = 0;
128 }
129 vapic_state.isr = vector & 0xf0;
130
131 vapic_state.zero = 0;
132
133 vector = get_highest_priority_int(s->irr);
134 if (vector < 0) {
135 vector = 0;
136 }
137 vapic_state.irr = vector & 0xff;
138
139 cpu_physical_memory_write_rom(s->vapic_paddr + start,
140 ((void *)&vapic_state) + start, length);
141 }
142 }
143
144 static void apic_vapic_base_update(APICCommonState *s)
145 {
146 apic_sync_vapic(s, SYNC_TO_VAPIC);
147 }
148
149 static void apic_local_deliver(APICCommonState *s, int vector)
150 {
151 uint32_t lvt = s->lvt[vector];
152 int trigger_mode;
153
154 trace_apic_local_deliver(vector, (lvt >> 8) & 7);
155
156 if (lvt & APIC_LVT_MASKED)
157 return;
158
159 switch ((lvt >> 8) & 7) {
160 case APIC_DM_SMI:
161 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_SMI);
162 break;
163
164 case APIC_DM_NMI:
165 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_NMI);
166 break;
167
168 case APIC_DM_EXTINT:
169 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
170 break;
171
172 case APIC_DM_FIXED:
173 trigger_mode = APIC_TRIGGER_EDGE;
174 if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) &&
175 (lvt & APIC_LVT_LEVEL_TRIGGER))
176 trigger_mode = APIC_TRIGGER_LEVEL;
177 apic_set_irq(s, lvt & 0xff, trigger_mode);
178 }
179 }
180
181 void apic_deliver_pic_intr(DeviceState *d, int level)
182 {
183 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
184
185 if (level) {
186 apic_local_deliver(s, APIC_LVT_LINT0);
187 } else {
188 uint32_t lvt = s->lvt[APIC_LVT_LINT0];
189
190 switch ((lvt >> 8) & 7) {
191 case APIC_DM_FIXED:
192 if (!(lvt & APIC_LVT_LEVEL_TRIGGER))
193 break;
194 reset_bit(s->irr, lvt & 0xff);
195 /* fall through */
196 case APIC_DM_EXTINT:
197 cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
198 break;
199 }
200 }
201 }
202
203 static void apic_external_nmi(APICCommonState *s)
204 {
205 apic_local_deliver(s, APIC_LVT_LINT1);
206 }
207
208 #define foreach_apic(apic, deliver_bitmask, code) \
209 {\
210 int __i, __j, __mask;\
211 for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\
212 __mask = deliver_bitmask[__i];\
213 if (__mask) {\
214 for(__j = 0; __j < 32; __j++) {\
215 if (__mask & (1 << __j)) {\
216 apic = local_apics[__i * 32 + __j];\
217 if (apic) {\
218 code;\
219 }\
220 }\
221 }\
222 }\
223 }\
224 }
225
226 static void apic_bus_deliver(const uint32_t *deliver_bitmask,
227 uint8_t delivery_mode, uint8_t vector_num,
228 uint8_t trigger_mode)
229 {
230 APICCommonState *apic_iter;
231
232 switch (delivery_mode) {
233 case APIC_DM_LOWPRI:
234 /* XXX: search for focus processor, arbitration */
235 {
236 int i, d;
237 d = -1;
238 for(i = 0; i < MAX_APIC_WORDS; i++) {
239 if (deliver_bitmask[i]) {
240 d = i * 32 + ffs_bit(deliver_bitmask[i]);
241 break;
242 }
243 }
244 if (d >= 0) {
245 apic_iter = local_apics[d];
246 if (apic_iter) {
247 apic_set_irq(apic_iter, vector_num, trigger_mode);
248 }
249 }
250 }
251 return;
252
253 case APIC_DM_FIXED:
254 break;
255
256 case APIC_DM_SMI:
257 foreach_apic(apic_iter, deliver_bitmask,
258 cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_SMI) );
259 return;
260
261 case APIC_DM_NMI:
262 foreach_apic(apic_iter, deliver_bitmask,
263 cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_NMI) );
264 return;
265
266 case APIC_DM_INIT:
267 /* normal INIT IPI sent to processors */
268 foreach_apic(apic_iter, deliver_bitmask,
269 cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_INIT) );
270 return;
271
272 case APIC_DM_EXTINT:
273 /* handled in I/O APIC code */
274 break;
275
276 default:
277 return;
278 }
279
280 foreach_apic(apic_iter, deliver_bitmask,
281 apic_set_irq(apic_iter, vector_num, trigger_mode) );
282 }
283
284 void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode,
285 uint8_t vector_num, uint8_t trigger_mode)
286 {
287 uint32_t deliver_bitmask[MAX_APIC_WORDS];
288
289 trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num,
290 trigger_mode);
291
292 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
293 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
294 }
295
296 static void apic_set_base(APICCommonState *s, uint64_t val)
297 {
298 s->apicbase = (val & 0xfffff000) |
299 (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
300 /* if disabled, cannot be enabled again */
301 if (!(val & MSR_IA32_APICBASE_ENABLE)) {
302 s->apicbase &= ~MSR_IA32_APICBASE_ENABLE;
303 cpu_clear_apic_feature(s->cpu_env);
304 s->spurious_vec &= ~APIC_SV_ENABLE;
305 }
306 }
307
308 static void apic_set_tpr(APICCommonState *s, uint8_t val)
309 {
310 /* Updates from cr8 are ignored while the VAPIC is active */
311 if (!s->vapic_paddr) {
312 s->tpr = val << 4;
313 apic_update_irq(s);
314 }
315 }
316
317 static uint8_t apic_get_tpr(APICCommonState *s)
318 {
319 apic_sync_vapic(s, SYNC_FROM_VAPIC);
320 return s->tpr >> 4;
321 }
322
323 static int apic_get_ppr(APICCommonState *s)
324 {
325 int tpr, isrv, ppr;
326
327 tpr = (s->tpr >> 4);
328 isrv = get_highest_priority_int(s->isr);
329 if (isrv < 0)
330 isrv = 0;
331 isrv >>= 4;
332 if (tpr >= isrv)
333 ppr = s->tpr;
334 else
335 ppr = isrv << 4;
336 return ppr;
337 }
338
339 static int apic_get_arb_pri(APICCommonState *s)
340 {
341 /* XXX: arbitration */
342 return 0;
343 }
344
345
346 /*
347 * <0 - low prio interrupt,
348 * 0 - no interrupt,
349 * >0 - interrupt number
350 */
351 static int apic_irq_pending(APICCommonState *s)
352 {
353 int irrv, ppr;
354 irrv = get_highest_priority_int(s->irr);
355 if (irrv < 0) {
356 return 0;
357 }
358 ppr = apic_get_ppr(s);
359 if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) {
360 return -1;
361 }
362
363 return irrv;
364 }
365
366 /* signal the CPU if an irq is pending */
367 static void apic_update_irq(APICCommonState *s)
368 {
369 if (!(s->spurious_vec & APIC_SV_ENABLE)) {
370 return;
371 }
372 if (apic_irq_pending(s) > 0) {
373 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
374 } else if (apic_accept_pic_intr(&s->busdev.qdev) &&
375 pic_get_output(isa_pic)) {
376 apic_deliver_pic_intr(&s->busdev.qdev, 1);
377 }
378 }
379
380 void apic_poll_irq(DeviceState *d)
381 {
382 APICCommonState *s = APIC_COMMON(d);
383
384 apic_sync_vapic(s, SYNC_FROM_VAPIC);
385 apic_update_irq(s);
386 }
387
388 static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode)
389 {
390 apic_report_irq_delivered(!get_bit(s->irr, vector_num));
391
392 set_bit(s->irr, vector_num);
393 if (trigger_mode)
394 set_bit(s->tmr, vector_num);
395 else
396 reset_bit(s->tmr, vector_num);
397 if (s->vapic_paddr) {
398 apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC);
399 /*
400 * The vcpu thread needs to see the new IRR before we pull its current
401 * TPR value. That way, if we miss a lowering of the TRP, the guest
402 * has the chance to notice the new IRR and poll for IRQs on its own.
403 */
404 smp_wmb();
405 apic_sync_vapic(s, SYNC_FROM_VAPIC);
406 }
407 apic_update_irq(s);
408 }
409
410 static void apic_eoi(APICCommonState *s)
411 {
412 int isrv;
413 isrv = get_highest_priority_int(s->isr);
414 if (isrv < 0)
415 return;
416 reset_bit(s->isr, isrv);
417 if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && get_bit(s->tmr, isrv)) {
418 ioapic_eoi_broadcast(isrv);
419 }
420 apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC);
421 apic_update_irq(s);
422 }
423
424 static int apic_find_dest(uint8_t dest)
425 {
426 APICCommonState *apic = local_apics[dest];
427 int i;
428
429 if (apic && apic->id == dest)
430 return dest; /* shortcut in case apic->id == apic->idx */
431
432 for (i = 0; i < MAX_APICS; i++) {
433 apic = local_apics[i];
434 if (apic && apic->id == dest)
435 return i;
436 if (!apic)
437 break;
438 }
439
440 return -1;
441 }
442
443 static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
444 uint8_t dest, uint8_t dest_mode)
445 {
446 APICCommonState *apic_iter;
447 int i;
448
449 if (dest_mode == 0) {
450 if (dest == 0xff) {
451 memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t));
452 } else {
453 int idx = apic_find_dest(dest);
454 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
455 if (idx >= 0)
456 set_bit(deliver_bitmask, idx);
457 }
458 } else {
459 /* XXX: cluster mode */
460 memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
461 for(i = 0; i < MAX_APICS; i++) {
462 apic_iter = local_apics[i];
463 if (apic_iter) {
464 if (apic_iter->dest_mode == 0xf) {
465 if (dest & apic_iter->log_dest)
466 set_bit(deliver_bitmask, i);
467 } else if (apic_iter->dest_mode == 0x0) {
468 if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) &&
469 (dest & apic_iter->log_dest & 0x0f)) {
470 set_bit(deliver_bitmask, i);
471 }
472 }
473 } else {
474 break;
475 }
476 }
477 }
478 }
479
480 static void apic_startup(APICCommonState *s, int vector_num)
481 {
482 s->sipi_vector = vector_num;
483 cpu_interrupt(s->cpu_env, CPU_INTERRUPT_SIPI);
484 }
485
486 void apic_sipi(DeviceState *d)
487 {
488 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
489
490 cpu_reset_interrupt(s->cpu_env, CPU_INTERRUPT_SIPI);
491
492 if (!s->wait_for_sipi)
493 return;
494 cpu_x86_load_seg_cache_sipi(s->cpu_env, s->sipi_vector);
495 s->wait_for_sipi = 0;
496 }
497
498 static void apic_deliver(DeviceState *d, uint8_t dest, uint8_t dest_mode,
499 uint8_t delivery_mode, uint8_t vector_num,
500 uint8_t trigger_mode)
501 {
502 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
503 uint32_t deliver_bitmask[MAX_APIC_WORDS];
504 int dest_shorthand = (s->icr[0] >> 18) & 3;
505 APICCommonState *apic_iter;
506
507 switch (dest_shorthand) {
508 case 0:
509 apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
510 break;
511 case 1:
512 memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask));
513 set_bit(deliver_bitmask, s->idx);
514 break;
515 case 2:
516 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
517 break;
518 case 3:
519 memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
520 reset_bit(deliver_bitmask, s->idx);
521 break;
522 }
523
524 switch (delivery_mode) {
525 case APIC_DM_INIT:
526 {
527 int trig_mode = (s->icr[0] >> 15) & 1;
528 int level = (s->icr[0] >> 14) & 1;
529 if (level == 0 && trig_mode == 1) {
530 foreach_apic(apic_iter, deliver_bitmask,
531 apic_iter->arb_id = apic_iter->id );
532 return;
533 }
534 }
535 break;
536
537 case APIC_DM_SIPI:
538 foreach_apic(apic_iter, deliver_bitmask,
539 apic_startup(apic_iter, vector_num) );
540 return;
541 }
542
543 apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
544 }
545
546 int apic_get_interrupt(DeviceState *d)
547 {
548 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
549 int intno;
550
551 /* if the APIC is installed or enabled, we let the 8259 handle the
552 IRQs */
553 if (!s)
554 return -1;
555 if (!(s->spurious_vec & APIC_SV_ENABLE))
556 return -1;
557
558 apic_sync_vapic(s, SYNC_FROM_VAPIC);
559 intno = apic_irq_pending(s);
560
561 if (intno == 0) {
562 apic_sync_vapic(s, SYNC_TO_VAPIC);
563 return -1;
564 } else if (intno < 0) {
565 apic_sync_vapic(s, SYNC_TO_VAPIC);
566 return s->spurious_vec & 0xff;
567 }
568 reset_bit(s->irr, intno);
569 set_bit(s->isr, intno);
570 apic_sync_vapic(s, SYNC_TO_VAPIC);
571 apic_update_irq(s);
572 return intno;
573 }
574
575 int apic_accept_pic_intr(DeviceState *d)
576 {
577 APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
578 uint32_t lvt0;
579
580 if (!s)
581 return -1;
582
583 lvt0 = s->lvt[APIC_LVT_LINT0];
584
585 if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 ||
586 (lvt0 & APIC_LVT_MASKED) == 0)
587 return 1;
588
589 return 0;
590 }
591
592 static uint32_t apic_get_current_count(APICCommonState *s)
593 {
594 int64_t d;
595 uint32_t val;
596 d = (qemu_get_clock_ns(vm_clock) - s->initial_count_load_time) >>
597 s->count_shift;
598 if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
599 /* periodic */
600 val = s->initial_count - (d % ((uint64_t)s->initial_count + 1));
601 } else {
602 if (d >= s->initial_count)
603 val = 0;
604 else
605 val = s->initial_count - d;
606 }
607 return val;
608 }
609
610 static void apic_timer_update(APICCommonState *s, int64_t current_time)
611 {
612 if (apic_next_timer(s, current_time)) {
613 qemu_mod_timer(s->timer, s->next_time);
614 } else {
615 qemu_del_timer(s->timer);
616 }
617 }
618
619 static void apic_timer(void *opaque)
620 {
621 APICCommonState *s = opaque;
622
623 apic_local_deliver(s, APIC_LVT_TIMER);
624 apic_timer_update(s, s->next_time);
625 }
626
627 static uint32_t apic_mem_readb(void *opaque, target_phys_addr_t addr)
628 {
629 return 0;
630 }
631
632 static uint32_t apic_mem_readw(void *opaque, target_phys_addr_t addr)
633 {
634 return 0;
635 }
636
637 static void apic_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
638 {
639 }
640
641 static void apic_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
642 {
643 }
644
645 static uint32_t apic_mem_readl(void *opaque, target_phys_addr_t addr)
646 {
647 DeviceState *d;
648 APICCommonState *s;
649 uint32_t val;
650 int index;
651
652 d = cpu_get_current_apic();
653 if (!d) {
654 return 0;
655 }
656 s = DO_UPCAST(APICCommonState, busdev.qdev, d);
657
658 index = (addr >> 4) & 0xff;
659 switch(index) {
660 case 0x02: /* id */
661 val = s->id << 24;
662 break;
663 case 0x03: /* version */
664 val = 0x11 | ((APIC_LVT_NB - 1) << 16); /* version 0x11 */
665 break;
666 case 0x08:
667 apic_sync_vapic(s, SYNC_FROM_VAPIC);
668 if (apic_report_tpr_access) {
669 cpu_report_tpr_access(s->cpu_env, TPR_ACCESS_READ);
670 }
671 val = s->tpr;
672 break;
673 case 0x09:
674 val = apic_get_arb_pri(s);
675 break;
676 case 0x0a:
677 /* ppr */
678 val = apic_get_ppr(s);
679 break;
680 case 0x0b:
681 val = 0;
682 break;
683 case 0x0d:
684 val = s->log_dest << 24;
685 break;
686 case 0x0e:
687 val = s->dest_mode << 28;
688 break;
689 case 0x0f:
690 val = s->spurious_vec;
691 break;
692 case 0x10 ... 0x17:
693 val = s->isr[index & 7];
694 break;
695 case 0x18 ... 0x1f:
696 val = s->tmr[index & 7];
697 break;
698 case 0x20 ... 0x27:
699 val = s->irr[index & 7];
700 break;
701 case 0x28:
702 val = s->esr;
703 break;
704 case 0x30:
705 case 0x31:
706 val = s->icr[index & 1];
707 break;
708 case 0x32 ... 0x37:
709 val = s->lvt[index - 0x32];
710 break;
711 case 0x38:
712 val = s->initial_count;
713 break;
714 case 0x39:
715 val = apic_get_current_count(s);
716 break;
717 case 0x3e:
718 val = s->divide_conf;
719 break;
720 default:
721 s->esr |= ESR_ILLEGAL_ADDRESS;
722 val = 0;
723 break;
724 }
725 trace_apic_mem_readl(addr, val);
726 return val;
727 }
728
729 static void apic_send_msi(target_phys_addr_t addr, uint32_t data)
730 {
731 uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
732 uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
733 uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
734 uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
735 uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
736 /* XXX: Ignore redirection hint. */
737 apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode);
738 }
739
740 static void apic_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
741 {
742 DeviceState *d;
743 APICCommonState *s;
744 int index = (addr >> 4) & 0xff;
745 if (addr > 0xfff || !index) {
746 /* MSI and MMIO APIC are at the same memory location,
747 * but actually not on the global bus: MSI is on PCI bus
748 * APIC is connected directly to the CPU.
749 * Mapping them on the global bus happens to work because
750 * MSI registers are reserved in APIC MMIO and vice versa. */
751 apic_send_msi(addr, val);
752 return;
753 }
754
755 d = cpu_get_current_apic();
756 if (!d) {
757 return;
758 }
759 s = DO_UPCAST(APICCommonState, busdev.qdev, d);
760
761 trace_apic_mem_writel(addr, val);
762
763 switch(index) {
764 case 0x02:
765 s->id = (val >> 24);
766 break;
767 case 0x03:
768 break;
769 case 0x08:
770 if (apic_report_tpr_access) {
771 cpu_report_tpr_access(s->cpu_env, TPR_ACCESS_WRITE);
772 }
773 s->tpr = val;
774 apic_sync_vapic(s, SYNC_TO_VAPIC);
775 apic_update_irq(s);
776 break;
777 case 0x09:
778 case 0x0a:
779 break;
780 case 0x0b: /* EOI */
781 apic_eoi(s);
782 break;
783 case 0x0d:
784 s->log_dest = val >> 24;
785 break;
786 case 0x0e:
787 s->dest_mode = val >> 28;
788 break;
789 case 0x0f:
790 s->spurious_vec = val & 0x1ff;
791 apic_update_irq(s);
792 break;
793 case 0x10 ... 0x17:
794 case 0x18 ... 0x1f:
795 case 0x20 ... 0x27:
796 case 0x28:
797 break;
798 case 0x30:
799 s->icr[0] = val;
800 apic_deliver(d, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1,
801 (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff),
802 (s->icr[0] >> 15) & 1);
803 break;
804 case 0x31:
805 s->icr[1] = val;
806 break;
807 case 0x32 ... 0x37:
808 {
809 int n = index - 0x32;
810 s->lvt[n] = val;
811 if (n == APIC_LVT_TIMER)
812 apic_timer_update(s, qemu_get_clock_ns(vm_clock));
813 }
814 break;
815 case 0x38:
816 s->initial_count = val;
817 s->initial_count_load_time = qemu_get_clock_ns(vm_clock);
818 apic_timer_update(s, s->initial_count_load_time);
819 break;
820 case 0x39:
821 break;
822 case 0x3e:
823 {
824 int v;
825 s->divide_conf = val & 0xb;
826 v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
827 s->count_shift = (v + 1) & 7;
828 }
829 break;
830 default:
831 s->esr |= ESR_ILLEGAL_ADDRESS;
832 break;
833 }
834 }
835
836 static void apic_pre_save(APICCommonState *s)
837 {
838 apic_sync_vapic(s, SYNC_FROM_VAPIC);
839 }
840
841 static void apic_post_load(APICCommonState *s)
842 {
843 if (s->timer_expiry != -1) {
844 qemu_mod_timer(s->timer, s->timer_expiry);
845 } else {
846 qemu_del_timer(s->timer);
847 }
848 }
849
850 static const MemoryRegionOps apic_io_ops = {
851 .old_mmio = {
852 .read = { apic_mem_readb, apic_mem_readw, apic_mem_readl, },
853 .write = { apic_mem_writeb, apic_mem_writew, apic_mem_writel, },
854 },
855 .endianness = DEVICE_NATIVE_ENDIAN,
856 };
857
858 static void apic_init(APICCommonState *s)
859 {
860 memory_region_init_io(&s->io_memory, &apic_io_ops, s, "apic-msi",
861 MSI_SPACE_SIZE);
862
863 s->timer = qemu_new_timer_ns(vm_clock, apic_timer, s);
864 local_apics[s->idx] = s;
865 }
866
867 static void apic_class_init(ObjectClass *klass, void *data)
868 {
869 APICCommonClass *k = APIC_COMMON_CLASS(klass);
870
871 k->init = apic_init;
872 k->set_base = apic_set_base;
873 k->set_tpr = apic_set_tpr;
874 k->get_tpr = apic_get_tpr;
875 k->vapic_base_update = apic_vapic_base_update;
876 k->external_nmi = apic_external_nmi;
877 k->pre_save = apic_pre_save;
878 k->post_load = apic_post_load;
879 }
880
881 static TypeInfo apic_info = {
882 .name = "apic",
883 .instance_size = sizeof(APICCommonState),
884 .parent = TYPE_APIC_COMMON,
885 .class_init = apic_class_init,
886 };
887
888 static void apic_register_types(void)
889 {
890 type_register_static(&apic_info);
891 }
892
893 type_init(apic_register_types)