]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/x86/kvm/i8259.c
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kvm / i8259.c
1 /*
2 * 8259 interrupt controller emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2007 Intel Corporation
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 * Authors:
25 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
26 * Port from Qemu.
27 */
28 #include <linux/mm.h>
29 #include <linux/bitops.h>
30 #include "irq.h"
31
32 #include <linux/kvm_host.h>
33 #include "trace.h"
34
35 static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
36 {
37 s->isr &= ~(1 << irq);
38 s->isr_ack |= (1 << irq);
39 if (s != &s->pics_state->pics[0])
40 irq += 8;
41 /*
42 * We are dropping lock while calling ack notifiers since ack
43 * notifier callbacks for assigned devices call into PIC recursively.
44 * Other interrupt may be delivered to PIC while lock is dropped but
45 * it should be safe since PIC state is already updated at this stage.
46 */
47 raw_spin_unlock(&s->pics_state->lock);
48 kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
49 raw_spin_lock(&s->pics_state->lock);
50 }
51
52 void kvm_pic_clear_isr_ack(struct kvm *kvm)
53 {
54 struct kvm_pic *s = pic_irqchip(kvm);
55
56 raw_spin_lock(&s->lock);
57 s->pics[0].isr_ack = 0xff;
58 s->pics[1].isr_ack = 0xff;
59 raw_spin_unlock(&s->lock);
60 }
61
62 /*
63 * set irq level. If an edge is detected, then the IRR is set to 1
64 */
65 static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level)
66 {
67 int mask, ret = 1;
68 mask = 1 << irq;
69 if (s->elcr & mask) /* level triggered */
70 if (level) {
71 ret = !(s->irr & mask);
72 s->irr |= mask;
73 s->last_irr |= mask;
74 } else {
75 s->irr &= ~mask;
76 s->last_irr &= ~mask;
77 }
78 else /* edge triggered */
79 if (level) {
80 if ((s->last_irr & mask) == 0) {
81 ret = !(s->irr & mask);
82 s->irr |= mask;
83 }
84 s->last_irr |= mask;
85 } else
86 s->last_irr &= ~mask;
87
88 return (s->imr & mask) ? -1 : ret;
89 }
90
91 /*
92 * return the highest priority found in mask (highest = smallest
93 * number). Return 8 if no irq
94 */
95 static inline int get_priority(struct kvm_kpic_state *s, int mask)
96 {
97 int priority;
98 if (mask == 0)
99 return 8;
100 priority = 0;
101 while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0)
102 priority++;
103 return priority;
104 }
105
106 /*
107 * return the pic wanted interrupt. return -1 if none
108 */
109 static int pic_get_irq(struct kvm_kpic_state *s)
110 {
111 int mask, cur_priority, priority;
112
113 mask = s->irr & ~s->imr;
114 priority = get_priority(s, mask);
115 if (priority == 8)
116 return -1;
117 /*
118 * compute current priority. If special fully nested mode on the
119 * master, the IRQ coming from the slave is not taken into account
120 * for the priority computation.
121 */
122 mask = s->isr;
123 if (s->special_fully_nested_mode && s == &s->pics_state->pics[0])
124 mask &= ~(1 << 2);
125 cur_priority = get_priority(s, mask);
126 if (priority < cur_priority)
127 /*
128 * higher priority found: an irq should be generated
129 */
130 return (priority + s->priority_add) & 7;
131 else
132 return -1;
133 }
134
135 /*
136 * raise irq to CPU if necessary. must be called every time the active
137 * irq may change
138 */
139 static void pic_update_irq(struct kvm_pic *s)
140 {
141 int irq2, irq;
142
143 irq2 = pic_get_irq(&s->pics[1]);
144 if (irq2 >= 0) {
145 /*
146 * if irq request by slave pic, signal master PIC
147 */
148 pic_set_irq1(&s->pics[0], 2, 1);
149 pic_set_irq1(&s->pics[0], 2, 0);
150 }
151 irq = pic_get_irq(&s->pics[0]);
152 if (irq >= 0)
153 s->irq_request(s->irq_request_opaque, 1);
154 else
155 s->irq_request(s->irq_request_opaque, 0);
156 }
157
158 void kvm_pic_update_irq(struct kvm_pic *s)
159 {
160 raw_spin_lock(&s->lock);
161 pic_update_irq(s);
162 raw_spin_unlock(&s->lock);
163 }
164
165 int kvm_pic_set_irq(void *opaque, int irq, int level)
166 {
167 struct kvm_pic *s = opaque;
168 int ret = -1;
169
170 raw_spin_lock(&s->lock);
171 if (irq >= 0 && irq < PIC_NUM_PINS) {
172 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
173 pic_update_irq(s);
174 trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
175 s->pics[irq >> 3].imr, ret == 0);
176 }
177 raw_spin_unlock(&s->lock);
178
179 return ret;
180 }
181
182 /*
183 * acknowledge interrupt 'irq'
184 */
185 static inline void pic_intack(struct kvm_kpic_state *s, int irq)
186 {
187 s->isr |= 1 << irq;
188 /*
189 * We don't clear a level sensitive interrupt here
190 */
191 if (!(s->elcr & (1 << irq)))
192 s->irr &= ~(1 << irq);
193
194 if (s->auto_eoi) {
195 if (s->rotate_on_auto_eoi)
196 s->priority_add = (irq + 1) & 7;
197 pic_clear_isr(s, irq);
198 }
199
200 }
201
202 int kvm_pic_read_irq(struct kvm *kvm)
203 {
204 int irq, irq2, intno;
205 struct kvm_pic *s = pic_irqchip(kvm);
206
207 raw_spin_lock(&s->lock);
208 irq = pic_get_irq(&s->pics[0]);
209 if (irq >= 0) {
210 pic_intack(&s->pics[0], irq);
211 if (irq == 2) {
212 irq2 = pic_get_irq(&s->pics[1]);
213 if (irq2 >= 0)
214 pic_intack(&s->pics[1], irq2);
215 else
216 /*
217 * spurious IRQ on slave controller
218 */
219 irq2 = 7;
220 intno = s->pics[1].irq_base + irq2;
221 irq = irq2 + 8;
222 } else
223 intno = s->pics[0].irq_base + irq;
224 } else {
225 /*
226 * spurious IRQ on host controller
227 */
228 irq = 7;
229 intno = s->pics[0].irq_base + irq;
230 }
231 pic_update_irq(s);
232 raw_spin_unlock(&s->lock);
233
234 return intno;
235 }
236
237 void kvm_pic_reset(struct kvm_kpic_state *s)
238 {
239 int irq;
240 struct kvm *kvm = s->pics_state->irq_request_opaque;
241 struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu;
242 u8 irr = s->irr, isr = s->imr;
243
244 s->last_irr = 0;
245 s->irr = 0;
246 s->imr = 0;
247 s->isr = 0;
248 s->isr_ack = 0xff;
249 s->priority_add = 0;
250 s->irq_base = 0;
251 s->read_reg_select = 0;
252 s->poll = 0;
253 s->special_mask = 0;
254 s->init_state = 0;
255 s->auto_eoi = 0;
256 s->rotate_on_auto_eoi = 0;
257 s->special_fully_nested_mode = 0;
258 s->init4 = 0;
259
260 for (irq = 0; irq < PIC_NUM_PINS/2; irq++) {
261 if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
262 if (irr & (1 << irq) || isr & (1 << irq)) {
263 pic_clear_isr(s, irq);
264 }
265 }
266 }
267
268 static void pic_ioport_write(void *opaque, u32 addr, u32 val)
269 {
270 struct kvm_kpic_state *s = opaque;
271 int priority, cmd, irq;
272
273 addr &= 1;
274 if (addr == 0) {
275 if (val & 0x10) {
276 kvm_pic_reset(s); /* init */
277 /*
278 * deassert a pending interrupt
279 */
280 s->pics_state->irq_request(s->pics_state->
281 irq_request_opaque, 0);
282 s->init_state = 1;
283 s->init4 = val & 1;
284 if (val & 0x02)
285 printk(KERN_ERR "single mode not supported");
286 if (val & 0x08)
287 printk(KERN_ERR
288 "level sensitive irq not supported");
289 } else if (val & 0x08) {
290 if (val & 0x04)
291 s->poll = 1;
292 if (val & 0x02)
293 s->read_reg_select = val & 1;
294 if (val & 0x40)
295 s->special_mask = (val >> 5) & 1;
296 } else {
297 cmd = val >> 5;
298 switch (cmd) {
299 case 0:
300 case 4:
301 s->rotate_on_auto_eoi = cmd >> 2;
302 break;
303 case 1: /* end of interrupt */
304 case 5:
305 priority = get_priority(s, s->isr);
306 if (priority != 8) {
307 irq = (priority + s->priority_add) & 7;
308 if (cmd == 5)
309 s->priority_add = (irq + 1) & 7;
310 pic_clear_isr(s, irq);
311 pic_update_irq(s->pics_state);
312 }
313 break;
314 case 3:
315 irq = val & 7;
316 pic_clear_isr(s, irq);
317 pic_update_irq(s->pics_state);
318 break;
319 case 6:
320 s->priority_add = (val + 1) & 7;
321 pic_update_irq(s->pics_state);
322 break;
323 case 7:
324 irq = val & 7;
325 s->priority_add = (irq + 1) & 7;
326 pic_clear_isr(s, irq);
327 pic_update_irq(s->pics_state);
328 break;
329 default:
330 break; /* no operation */
331 }
332 }
333 } else
334 switch (s->init_state) {
335 case 0: /* normal mode */
336 s->imr = val;
337 pic_update_irq(s->pics_state);
338 break;
339 case 1:
340 s->irq_base = val & 0xf8;
341 s->init_state = 2;
342 break;
343 case 2:
344 if (s->init4)
345 s->init_state = 3;
346 else
347 s->init_state = 0;
348 break;
349 case 3:
350 s->special_fully_nested_mode = (val >> 4) & 1;
351 s->auto_eoi = (val >> 1) & 1;
352 s->init_state = 0;
353 break;
354 }
355 }
356
357 static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
358 {
359 int ret;
360
361 ret = pic_get_irq(s);
362 if (ret >= 0) {
363 if (addr1 >> 7) {
364 s->pics_state->pics[0].isr &= ~(1 << 2);
365 s->pics_state->pics[0].irr &= ~(1 << 2);
366 }
367 s->irr &= ~(1 << ret);
368 pic_clear_isr(s, ret);
369 if (addr1 >> 7 || ret != 2)
370 pic_update_irq(s->pics_state);
371 } else {
372 ret = 0x07;
373 pic_update_irq(s->pics_state);
374 }
375
376 return ret;
377 }
378
379 static u32 pic_ioport_read(void *opaque, u32 addr1)
380 {
381 struct kvm_kpic_state *s = opaque;
382 unsigned int addr;
383 int ret;
384
385 addr = addr1;
386 addr &= 1;
387 if (s->poll) {
388 ret = pic_poll_read(s, addr1);
389 s->poll = 0;
390 } else
391 if (addr == 0)
392 if (s->read_reg_select)
393 ret = s->isr;
394 else
395 ret = s->irr;
396 else
397 ret = s->imr;
398 return ret;
399 }
400
401 static void elcr_ioport_write(void *opaque, u32 addr, u32 val)
402 {
403 struct kvm_kpic_state *s = opaque;
404 s->elcr = val & s->elcr_mask;
405 }
406
407 static u32 elcr_ioport_read(void *opaque, u32 addr1)
408 {
409 struct kvm_kpic_state *s = opaque;
410 return s->elcr;
411 }
412
413 static int picdev_in_range(gpa_t addr)
414 {
415 switch (addr) {
416 case 0x20:
417 case 0x21:
418 case 0xa0:
419 case 0xa1:
420 case 0x4d0:
421 case 0x4d1:
422 return 1;
423 default:
424 return 0;
425 }
426 }
427
428 static inline struct kvm_pic *to_pic(struct kvm_io_device *dev)
429 {
430 return container_of(dev, struct kvm_pic, dev);
431 }
432
433 static int picdev_write(struct kvm_io_device *this,
434 gpa_t addr, int len, const void *val)
435 {
436 struct kvm_pic *s = to_pic(this);
437 unsigned char data = *(unsigned char *)val;
438 if (!picdev_in_range(addr))
439 return -EOPNOTSUPP;
440
441 if (len != 1) {
442 if (printk_ratelimit())
443 printk(KERN_ERR "PIC: non byte write\n");
444 return 0;
445 }
446 raw_spin_lock(&s->lock);
447 switch (addr) {
448 case 0x20:
449 case 0x21:
450 case 0xa0:
451 case 0xa1:
452 pic_ioport_write(&s->pics[addr >> 7], addr, data);
453 break;
454 case 0x4d0:
455 case 0x4d1:
456 elcr_ioport_write(&s->pics[addr & 1], addr, data);
457 break;
458 }
459 raw_spin_unlock(&s->lock);
460 return 0;
461 }
462
463 static int picdev_read(struct kvm_io_device *this,
464 gpa_t addr, int len, void *val)
465 {
466 struct kvm_pic *s = to_pic(this);
467 unsigned char data = 0;
468 if (!picdev_in_range(addr))
469 return -EOPNOTSUPP;
470
471 if (len != 1) {
472 if (printk_ratelimit())
473 printk(KERN_ERR "PIC: non byte read\n");
474 return 0;
475 }
476 raw_spin_lock(&s->lock);
477 switch (addr) {
478 case 0x20:
479 case 0x21:
480 case 0xa0:
481 case 0xa1:
482 data = pic_ioport_read(&s->pics[addr >> 7], addr);
483 break;
484 case 0x4d0:
485 case 0x4d1:
486 data = elcr_ioport_read(&s->pics[addr & 1], addr);
487 break;
488 }
489 *(unsigned char *)val = data;
490 raw_spin_unlock(&s->lock);
491 return 0;
492 }
493
494 /*
495 * callback when PIC0 irq status changed
496 */
497 static void pic_irq_request(void *opaque, int level)
498 {
499 struct kvm *kvm = opaque;
500 struct kvm_vcpu *vcpu = kvm->bsp_vcpu;
501 struct kvm_pic *s = pic_irqchip(kvm);
502 int irq = pic_get_irq(&s->pics[0]);
503
504 s->output = level;
505 if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) {
506 s->pics[0].isr_ack &= ~(1 << irq);
507 kvm_vcpu_kick(vcpu);
508 }
509 }
510
511 static const struct kvm_io_device_ops picdev_ops = {
512 .read = picdev_read,
513 .write = picdev_write,
514 };
515
516 struct kvm_pic *kvm_create_pic(struct kvm *kvm)
517 {
518 struct kvm_pic *s;
519 int ret;
520
521 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
522 if (!s)
523 return NULL;
524 raw_spin_lock_init(&s->lock);
525 s->kvm = kvm;
526 s->pics[0].elcr_mask = 0xf8;
527 s->pics[1].elcr_mask = 0xde;
528 s->irq_request = pic_irq_request;
529 s->irq_request_opaque = kvm;
530 s->pics[0].pics_state = s;
531 s->pics[1].pics_state = s;
532
533 /*
534 * Initialize PIO device
535 */
536 kvm_iodevice_init(&s->dev, &picdev_ops);
537 mutex_lock(&kvm->slots_lock);
538 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev);
539 mutex_unlock(&kvm->slots_lock);
540 if (ret < 0) {
541 kfree(s);
542 return NULL;
543 }
544
545 return s;
546 }
547
548 void kvm_destroy_pic(struct kvm *kvm)
549 {
550 struct kvm_pic *vpic = kvm->arch.vpic;
551
552 if (vpic) {
553 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev);
554 kvm->arch.vpic = NULL;
555 kfree(vpic);
556 }
557 }