]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * 8259 interrupt controller emulation | |
3 | * | |
4 | * Copyright (c) 2003-2004 Fabrice Bellard | |
5 | * Copyright (c) 2007 Intel Corporation | |
6 | * Copyright 2009 Red Hat, Inc. and/or its affilates. | |
7 | * | |
8 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
9 | * of this software and associated documentation files (the "Software"), to deal | |
10 | * in the Software without restriction, including without limitation the rights | |
11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
12 | * copies of the Software, and to permit persons to whom the Software is | |
13 | * furnished to do so, subject to the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice shall be included in | |
16 | * all copies or substantial portions of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
24 | * THE SOFTWARE. | |
25 | * Authors: | |
26 | * Yaozu (Eddie) Dong <Eddie.dong@intel.com> | |
27 | * Port from Qemu. | |
28 | */ | |
29 | #include <linux/mm.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/bitops.h> | |
32 | #include "irq.h" | |
33 | ||
34 | #include <linux/kvm_host.h> | |
35 | #include "trace.h" | |
36 | ||
37 | static void pic_lock(struct kvm_pic *s) | |
38 | __acquires(&s->lock) | |
39 | { | |
40 | raw_spin_lock(&s->lock); | |
41 | } | |
42 | ||
43 | static void pic_unlock(struct kvm_pic *s) | |
44 | __releases(&s->lock) | |
45 | { | |
46 | bool wakeup = s->wakeup_needed; | |
47 | struct kvm_vcpu *vcpu; | |
48 | ||
49 | s->wakeup_needed = false; | |
50 | ||
51 | raw_spin_unlock(&s->lock); | |
52 | ||
53 | if (wakeup) { | |
54 | vcpu = s->kvm->bsp_vcpu; | |
55 | if (vcpu) | |
56 | kvm_vcpu_kick(vcpu); | |
57 | } | |
58 | } | |
59 | ||
60 | static void pic_clear_isr(struct kvm_kpic_state *s, int irq) | |
61 | { | |
62 | s->isr &= ~(1 << irq); | |
63 | s->isr_ack |= (1 << irq); | |
64 | if (s != &s->pics_state->pics[0]) | |
65 | irq += 8; | |
66 | /* | |
67 | * We are dropping lock while calling ack notifiers since ack | |
68 | * notifier callbacks for assigned devices call into PIC recursively. | |
69 | * Other interrupt may be delivered to PIC while lock is dropped but | |
70 | * it should be safe since PIC state is already updated at this stage. | |
71 | */ | |
72 | pic_unlock(s->pics_state); | |
73 | kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); | |
74 | pic_lock(s->pics_state); | |
75 | } | |
76 | ||
77 | void kvm_pic_clear_isr_ack(struct kvm *kvm) | |
78 | { | |
79 | struct kvm_pic *s = pic_irqchip(kvm); | |
80 | ||
81 | pic_lock(s); | |
82 | s->pics[0].isr_ack = 0xff; | |
83 | s->pics[1].isr_ack = 0xff; | |
84 | pic_unlock(s); | |
85 | } | |
86 | ||
87 | /* | |
88 | * set irq level. If an edge is detected, then the IRR is set to 1 | |
89 | */ | |
90 | static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) | |
91 | { | |
92 | int mask, ret = 1; | |
93 | mask = 1 << irq; | |
94 | if (s->elcr & mask) /* level triggered */ | |
95 | if (level) { | |
96 | ret = !(s->irr & mask); | |
97 | s->irr |= mask; | |
98 | s->last_irr |= mask; | |
99 | } else { | |
100 | s->irr &= ~mask; | |
101 | s->last_irr &= ~mask; | |
102 | } | |
103 | else /* edge triggered */ | |
104 | if (level) { | |
105 | if ((s->last_irr & mask) == 0) { | |
106 | ret = !(s->irr & mask); | |
107 | s->irr |= mask; | |
108 | } | |
109 | s->last_irr |= mask; | |
110 | } else | |
111 | s->last_irr &= ~mask; | |
112 | ||
113 | return (s->imr & mask) ? -1 : ret; | |
114 | } | |
115 | ||
116 | /* | |
117 | * return the highest priority found in mask (highest = smallest | |
118 | * number). Return 8 if no irq | |
119 | */ | |
120 | static inline int get_priority(struct kvm_kpic_state *s, int mask) | |
121 | { | |
122 | int priority; | |
123 | if (mask == 0) | |
124 | return 8; | |
125 | priority = 0; | |
126 | while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) | |
127 | priority++; | |
128 | return priority; | |
129 | } | |
130 | ||
131 | /* | |
132 | * return the pic wanted interrupt. return -1 if none | |
133 | */ | |
134 | static int pic_get_irq(struct kvm_kpic_state *s) | |
135 | { | |
136 | int mask, cur_priority, priority; | |
137 | ||
138 | mask = s->irr & ~s->imr; | |
139 | priority = get_priority(s, mask); | |
140 | if (priority == 8) | |
141 | return -1; | |
142 | /* | |
143 | * compute current priority. If special fully nested mode on the | |
144 | * master, the IRQ coming from the slave is not taken into account | |
145 | * for the priority computation. | |
146 | */ | |
147 | mask = s->isr; | |
148 | if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) | |
149 | mask &= ~(1 << 2); | |
150 | cur_priority = get_priority(s, mask); | |
151 | if (priority < cur_priority) | |
152 | /* | |
153 | * higher priority found: an irq should be generated | |
154 | */ | |
155 | return (priority + s->priority_add) & 7; | |
156 | else | |
157 | return -1; | |
158 | } | |
159 | ||
160 | /* | |
161 | * raise irq to CPU if necessary. must be called every time the active | |
162 | * irq may change | |
163 | */ | |
164 | static void pic_update_irq(struct kvm_pic *s) | |
165 | { | |
166 | int irq2, irq; | |
167 | ||
168 | irq2 = pic_get_irq(&s->pics[1]); | |
169 | if (irq2 >= 0) { | |
170 | /* | |
171 | * if irq request by slave pic, signal master PIC | |
172 | */ | |
173 | pic_set_irq1(&s->pics[0], 2, 1); | |
174 | pic_set_irq1(&s->pics[0], 2, 0); | |
175 | } | |
176 | irq = pic_get_irq(&s->pics[0]); | |
177 | if (irq >= 0) | |
178 | s->irq_request(s->irq_request_opaque, 1); | |
179 | else | |
180 | s->irq_request(s->irq_request_opaque, 0); | |
181 | } | |
182 | ||
183 | void kvm_pic_update_irq(struct kvm_pic *s) | |
184 | { | |
185 | pic_lock(s); | |
186 | pic_update_irq(s); | |
187 | pic_unlock(s); | |
188 | } | |
189 | ||
190 | int kvm_pic_set_irq(void *opaque, int irq, int level) | |
191 | { | |
192 | struct kvm_pic *s = opaque; | |
193 | int ret = -1; | |
194 | ||
195 | pic_lock(s); | |
196 | if (irq >= 0 && irq < PIC_NUM_PINS) { | |
197 | ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); | |
198 | pic_update_irq(s); | |
199 | trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, | |
200 | s->pics[irq >> 3].imr, ret == 0); | |
201 | } | |
202 | pic_unlock(s); | |
203 | ||
204 | return ret; | |
205 | } | |
206 | ||
207 | /* | |
208 | * acknowledge interrupt 'irq' | |
209 | */ | |
210 | static inline void pic_intack(struct kvm_kpic_state *s, int irq) | |
211 | { | |
212 | s->isr |= 1 << irq; | |
213 | /* | |
214 | * We don't clear a level sensitive interrupt here | |
215 | */ | |
216 | if (!(s->elcr & (1 << irq))) | |
217 | s->irr &= ~(1 << irq); | |
218 | ||
219 | if (s->auto_eoi) { | |
220 | if (s->rotate_on_auto_eoi) | |
221 | s->priority_add = (irq + 1) & 7; | |
222 | pic_clear_isr(s, irq); | |
223 | } | |
224 | ||
225 | } | |
226 | ||
227 | int kvm_pic_read_irq(struct kvm *kvm) | |
228 | { | |
229 | int irq, irq2, intno; | |
230 | struct kvm_pic *s = pic_irqchip(kvm); | |
231 | ||
232 | pic_lock(s); | |
233 | irq = pic_get_irq(&s->pics[0]); | |
234 | if (irq >= 0) { | |
235 | pic_intack(&s->pics[0], irq); | |
236 | if (irq == 2) { | |
237 | irq2 = pic_get_irq(&s->pics[1]); | |
238 | if (irq2 >= 0) | |
239 | pic_intack(&s->pics[1], irq2); | |
240 | else | |
241 | /* | |
242 | * spurious IRQ on slave controller | |
243 | */ | |
244 | irq2 = 7; | |
245 | intno = s->pics[1].irq_base + irq2; | |
246 | irq = irq2 + 8; | |
247 | } else | |
248 | intno = s->pics[0].irq_base + irq; | |
249 | } else { | |
250 | /* | |
251 | * spurious IRQ on host controller | |
252 | */ | |
253 | irq = 7; | |
254 | intno = s->pics[0].irq_base + irq; | |
255 | } | |
256 | pic_update_irq(s); | |
257 | pic_unlock(s); | |
258 | ||
259 | return intno; | |
260 | } | |
261 | ||
262 | void kvm_pic_reset(struct kvm_kpic_state *s) | |
263 | { | |
264 | int irq; | |
265 | struct kvm *kvm = s->pics_state->irq_request_opaque; | |
266 | struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu; | |
267 | u8 irr = s->irr, isr = s->imr; | |
268 | ||
269 | s->last_irr = 0; | |
270 | s->irr = 0; | |
271 | s->imr = 0; | |
272 | s->isr = 0; | |
273 | s->isr_ack = 0xff; | |
274 | s->priority_add = 0; | |
275 | s->irq_base = 0; | |
276 | s->read_reg_select = 0; | |
277 | s->poll = 0; | |
278 | s->special_mask = 0; | |
279 | s->init_state = 0; | |
280 | s->auto_eoi = 0; | |
281 | s->rotate_on_auto_eoi = 0; | |
282 | s->special_fully_nested_mode = 0; | |
283 | s->init4 = 0; | |
284 | ||
285 | for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { | |
286 | if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) | |
287 | if (irr & (1 << irq) || isr & (1 << irq)) { | |
288 | pic_clear_isr(s, irq); | |
289 | } | |
290 | } | |
291 | } | |
292 | ||
293 | static void pic_ioport_write(void *opaque, u32 addr, u32 val) | |
294 | { | |
295 | struct kvm_kpic_state *s = opaque; | |
296 | int priority, cmd, irq; | |
297 | ||
298 | addr &= 1; | |
299 | if (addr == 0) { | |
300 | if (val & 0x10) { | |
301 | kvm_pic_reset(s); /* init */ | |
302 | /* | |
303 | * deassert a pending interrupt | |
304 | */ | |
305 | s->pics_state->irq_request(s->pics_state-> | |
306 | irq_request_opaque, 0); | |
307 | s->init_state = 1; | |
308 | s->init4 = val & 1; | |
309 | if (val & 0x02) | |
310 | printk(KERN_ERR "single mode not supported"); | |
311 | if (val & 0x08) | |
312 | printk(KERN_ERR | |
313 | "level sensitive irq not supported"); | |
314 | } else if (val & 0x08) { | |
315 | if (val & 0x04) | |
316 | s->poll = 1; | |
317 | if (val & 0x02) | |
318 | s->read_reg_select = val & 1; | |
319 | if (val & 0x40) | |
320 | s->special_mask = (val >> 5) & 1; | |
321 | } else { | |
322 | cmd = val >> 5; | |
323 | switch (cmd) { | |
324 | case 0: | |
325 | case 4: | |
326 | s->rotate_on_auto_eoi = cmd >> 2; | |
327 | break; | |
328 | case 1: /* end of interrupt */ | |
329 | case 5: | |
330 | priority = get_priority(s, s->isr); | |
331 | if (priority != 8) { | |
332 | irq = (priority + s->priority_add) & 7; | |
333 | if (cmd == 5) | |
334 | s->priority_add = (irq + 1) & 7; | |
335 | pic_clear_isr(s, irq); | |
336 | pic_update_irq(s->pics_state); | |
337 | } | |
338 | break; | |
339 | case 3: | |
340 | irq = val & 7; | |
341 | pic_clear_isr(s, irq); | |
342 | pic_update_irq(s->pics_state); | |
343 | break; | |
344 | case 6: | |
345 | s->priority_add = (val + 1) & 7; | |
346 | pic_update_irq(s->pics_state); | |
347 | break; | |
348 | case 7: | |
349 | irq = val & 7; | |
350 | s->priority_add = (irq + 1) & 7; | |
351 | pic_clear_isr(s, irq); | |
352 | pic_update_irq(s->pics_state); | |
353 | break; | |
354 | default: | |
355 | break; /* no operation */ | |
356 | } | |
357 | } | |
358 | } else | |
359 | switch (s->init_state) { | |
360 | case 0: /* normal mode */ | |
361 | s->imr = val; | |
362 | pic_update_irq(s->pics_state); | |
363 | break; | |
364 | case 1: | |
365 | s->irq_base = val & 0xf8; | |
366 | s->init_state = 2; | |
367 | break; | |
368 | case 2: | |
369 | if (s->init4) | |
370 | s->init_state = 3; | |
371 | else | |
372 | s->init_state = 0; | |
373 | break; | |
374 | case 3: | |
375 | s->special_fully_nested_mode = (val >> 4) & 1; | |
376 | s->auto_eoi = (val >> 1) & 1; | |
377 | s->init_state = 0; | |
378 | break; | |
379 | } | |
380 | } | |
381 | ||
382 | static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) | |
383 | { | |
384 | int ret; | |
385 | ||
386 | ret = pic_get_irq(s); | |
387 | if (ret >= 0) { | |
388 | if (addr1 >> 7) { | |
389 | s->pics_state->pics[0].isr &= ~(1 << 2); | |
390 | s->pics_state->pics[0].irr &= ~(1 << 2); | |
391 | } | |
392 | s->irr &= ~(1 << ret); | |
393 | pic_clear_isr(s, ret); | |
394 | if (addr1 >> 7 || ret != 2) | |
395 | pic_update_irq(s->pics_state); | |
396 | } else { | |
397 | ret = 0x07; | |
398 | pic_update_irq(s->pics_state); | |
399 | } | |
400 | ||
401 | return ret; | |
402 | } | |
403 | ||
404 | static u32 pic_ioport_read(void *opaque, u32 addr1) | |
405 | { | |
406 | struct kvm_kpic_state *s = opaque; | |
407 | unsigned int addr; | |
408 | int ret; | |
409 | ||
410 | addr = addr1; | |
411 | addr &= 1; | |
412 | if (s->poll) { | |
413 | ret = pic_poll_read(s, addr1); | |
414 | s->poll = 0; | |
415 | } else | |
416 | if (addr == 0) | |
417 | if (s->read_reg_select) | |
418 | ret = s->isr; | |
419 | else | |
420 | ret = s->irr; | |
421 | else | |
422 | ret = s->imr; | |
423 | return ret; | |
424 | } | |
425 | ||
426 | static void elcr_ioport_write(void *opaque, u32 addr, u32 val) | |
427 | { | |
428 | struct kvm_kpic_state *s = opaque; | |
429 | s->elcr = val & s->elcr_mask; | |
430 | } | |
431 | ||
432 | static u32 elcr_ioport_read(void *opaque, u32 addr1) | |
433 | { | |
434 | struct kvm_kpic_state *s = opaque; | |
435 | return s->elcr; | |
436 | } | |
437 | ||
438 | static int picdev_in_range(gpa_t addr) | |
439 | { | |
440 | switch (addr) { | |
441 | case 0x20: | |
442 | case 0x21: | |
443 | case 0xa0: | |
444 | case 0xa1: | |
445 | case 0x4d0: | |
446 | case 0x4d1: | |
447 | return 1; | |
448 | default: | |
449 | return 0; | |
450 | } | |
451 | } | |
452 | ||
453 | static inline struct kvm_pic *to_pic(struct kvm_io_device *dev) | |
454 | { | |
455 | return container_of(dev, struct kvm_pic, dev); | |
456 | } | |
457 | ||
458 | static int picdev_write(struct kvm_io_device *this, | |
459 | gpa_t addr, int len, const void *val) | |
460 | { | |
461 | struct kvm_pic *s = to_pic(this); | |
462 | unsigned char data = *(unsigned char *)val; | |
463 | if (!picdev_in_range(addr)) | |
464 | return -EOPNOTSUPP; | |
465 | ||
466 | if (len != 1) { | |
467 | if (printk_ratelimit()) | |
468 | printk(KERN_ERR "PIC: non byte write\n"); | |
469 | return 0; | |
470 | } | |
471 | pic_lock(s); | |
472 | switch (addr) { | |
473 | case 0x20: | |
474 | case 0x21: | |
475 | case 0xa0: | |
476 | case 0xa1: | |
477 | pic_ioport_write(&s->pics[addr >> 7], addr, data); | |
478 | break; | |
479 | case 0x4d0: | |
480 | case 0x4d1: | |
481 | elcr_ioport_write(&s->pics[addr & 1], addr, data); | |
482 | break; | |
483 | } | |
484 | pic_unlock(s); | |
485 | return 0; | |
486 | } | |
487 | ||
488 | static int picdev_read(struct kvm_io_device *this, | |
489 | gpa_t addr, int len, void *val) | |
490 | { | |
491 | struct kvm_pic *s = to_pic(this); | |
492 | unsigned char data = 0; | |
493 | if (!picdev_in_range(addr)) | |
494 | return -EOPNOTSUPP; | |
495 | ||
496 | if (len != 1) { | |
497 | if (printk_ratelimit()) | |
498 | printk(KERN_ERR "PIC: non byte read\n"); | |
499 | return 0; | |
500 | } | |
501 | pic_lock(s); | |
502 | switch (addr) { | |
503 | case 0x20: | |
504 | case 0x21: | |
505 | case 0xa0: | |
506 | case 0xa1: | |
507 | data = pic_ioport_read(&s->pics[addr >> 7], addr); | |
508 | break; | |
509 | case 0x4d0: | |
510 | case 0x4d1: | |
511 | data = elcr_ioport_read(&s->pics[addr & 1], addr); | |
512 | break; | |
513 | } | |
514 | *(unsigned char *)val = data; | |
515 | pic_unlock(s); | |
516 | return 0; | |
517 | } | |
518 | ||
519 | /* | |
520 | * callback when PIC0 irq status changed | |
521 | */ | |
522 | static void pic_irq_request(void *opaque, int level) | |
523 | { | |
524 | struct kvm *kvm = opaque; | |
525 | struct kvm_vcpu *vcpu = kvm->bsp_vcpu; | |
526 | struct kvm_pic *s = pic_irqchip(kvm); | |
527 | int irq = pic_get_irq(&s->pics[0]); | |
528 | ||
529 | s->output = level; | |
530 | if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { | |
531 | s->pics[0].isr_ack &= ~(1 << irq); | |
532 | s->wakeup_needed = true; | |
533 | } | |
534 | } | |
535 | ||
536 | static const struct kvm_io_device_ops picdev_ops = { | |
537 | .read = picdev_read, | |
538 | .write = picdev_write, | |
539 | }; | |
540 | ||
541 | struct kvm_pic *kvm_create_pic(struct kvm *kvm) | |
542 | { | |
543 | struct kvm_pic *s; | |
544 | int ret; | |
545 | ||
546 | s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); | |
547 | if (!s) | |
548 | return NULL; | |
549 | raw_spin_lock_init(&s->lock); | |
550 | s->kvm = kvm; | |
551 | s->pics[0].elcr_mask = 0xf8; | |
552 | s->pics[1].elcr_mask = 0xde; | |
553 | s->irq_request = pic_irq_request; | |
554 | s->irq_request_opaque = kvm; | |
555 | s->pics[0].pics_state = s; | |
556 | s->pics[1].pics_state = s; | |
557 | ||
558 | /* | |
559 | * Initialize PIO device | |
560 | */ | |
561 | kvm_iodevice_init(&s->dev, &picdev_ops); | |
562 | mutex_lock(&kvm->slots_lock); | |
563 | ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev); | |
564 | mutex_unlock(&kvm->slots_lock); | |
565 | if (ret < 0) { | |
566 | kfree(s); | |
567 | return NULL; | |
568 | } | |
569 | ||
570 | return s; | |
571 | } | |
572 | ||
573 | void kvm_destroy_pic(struct kvm *kvm) | |
574 | { | |
575 | struct kvm_pic *vpic = kvm->arch.vpic; | |
576 | ||
577 | if (vpic) { | |
578 | kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev); | |
579 | kvm->arch.vpic = NULL; | |
580 | kfree(vpic); | |
581 | } | |
582 | } |