]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/xen/events.c
HID: split picolcd's operation_mode sysfs attribute
[mirror_ubuntu-bionic-kernel.git] / drivers / xen / events.c
1 /*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
19 * 4. Hardware interrupts. Not supported at present.
20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30
31 #include <asm/ptrace.h>
32 #include <asm/irq.h>
33 #include <asm/idle.h>
34 #include <asm/sync_bitops.h>
35 #include <asm/xen/hypercall.h>
36 #include <asm/xen/hypervisor.h>
37
38 #include <xen/xen-ops.h>
39 #include <xen/events.h>
40 #include <xen/interface/xen.h>
41 #include <xen/interface/event_channel.h>
42
43 /*
44 * This lock protects updates to the following mapping and reference-count
45 * arrays. The lock does not need to be acquired to read the mapping tables.
46 */
47 static DEFINE_SPINLOCK(irq_mapping_update_lock);
48
49 /* IRQ <-> VIRQ mapping. */
50 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
51
52 /* IRQ <-> IPI mapping */
53 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
54
55 /* Interrupt types. */
56 enum xen_irq_type {
57 IRQT_UNBOUND = 0,
58 IRQT_PIRQ,
59 IRQT_VIRQ,
60 IRQT_IPI,
61 IRQT_EVTCHN
62 };
63
64 /*
65 * Packed IRQ information:
66 * type - enum xen_irq_type
67 * event channel - irq->event channel mapping
68 * cpu - cpu this event channel is bound to
69 * index - type-specific information:
70 * PIRQ - vector, with MSB being "needs EIO"
71 * VIRQ - virq number
72 * IPI - IPI vector
73 * EVTCHN -
74 */
75 struct irq_info
76 {
77 enum xen_irq_type type; /* type */
78 unsigned short evtchn; /* event channel */
79 unsigned short cpu; /* cpu bound */
80
81 union {
82 unsigned short virq;
83 enum ipi_vector ipi;
84 struct {
85 unsigned short gsi;
86 unsigned short vector;
87 } pirq;
88 } u;
89 };
90
91 static struct irq_info irq_info[NR_IRQS];
92
93 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
94 [0 ... NR_EVENT_CHANNELS-1] = -1
95 };
96 struct cpu_evtchn_s {
97 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
98 };
99 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
100 static inline unsigned long *cpu_evtchn_mask(int cpu)
101 {
102 return cpu_evtchn_mask_p[cpu].bits;
103 }
104
105 /* Xen will never allocate port zero for any purpose. */
106 #define VALID_EVTCHN(chn) ((chn) != 0)
107
108 static struct irq_chip xen_dynamic_chip;
109
110 /* Constructor for packed IRQ information. */
111 static struct irq_info mk_unbound_info(void)
112 {
113 return (struct irq_info) { .type = IRQT_UNBOUND };
114 }
115
116 static struct irq_info mk_evtchn_info(unsigned short evtchn)
117 {
118 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
119 .cpu = 0 };
120 }
121
122 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
123 {
124 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
125 .cpu = 0, .u.ipi = ipi };
126 }
127
128 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
129 {
130 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
131 .cpu = 0, .u.virq = virq };
132 }
133
134 static struct irq_info mk_pirq_info(unsigned short evtchn,
135 unsigned short gsi, unsigned short vector)
136 {
137 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
138 .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
139 }
140
141 /*
142 * Accessors for packed IRQ information.
143 */
144 static struct irq_info *info_for_irq(unsigned irq)
145 {
146 return &irq_info[irq];
147 }
148
149 static unsigned int evtchn_from_irq(unsigned irq)
150 {
151 return info_for_irq(irq)->evtchn;
152 }
153
154 unsigned irq_from_evtchn(unsigned int evtchn)
155 {
156 return evtchn_to_irq[evtchn];
157 }
158 EXPORT_SYMBOL_GPL(irq_from_evtchn);
159
160 static enum ipi_vector ipi_from_irq(unsigned irq)
161 {
162 struct irq_info *info = info_for_irq(irq);
163
164 BUG_ON(info == NULL);
165 BUG_ON(info->type != IRQT_IPI);
166
167 return info->u.ipi;
168 }
169
170 static unsigned virq_from_irq(unsigned irq)
171 {
172 struct irq_info *info = info_for_irq(irq);
173
174 BUG_ON(info == NULL);
175 BUG_ON(info->type != IRQT_VIRQ);
176
177 return info->u.virq;
178 }
179
180 static unsigned gsi_from_irq(unsigned irq)
181 {
182 struct irq_info *info = info_for_irq(irq);
183
184 BUG_ON(info == NULL);
185 BUG_ON(info->type != IRQT_PIRQ);
186
187 return info->u.pirq.gsi;
188 }
189
190 static unsigned vector_from_irq(unsigned irq)
191 {
192 struct irq_info *info = info_for_irq(irq);
193
194 BUG_ON(info == NULL);
195 BUG_ON(info->type != IRQT_PIRQ);
196
197 return info->u.pirq.vector;
198 }
199
200 static enum xen_irq_type type_from_irq(unsigned irq)
201 {
202 return info_for_irq(irq)->type;
203 }
204
205 static unsigned cpu_from_irq(unsigned irq)
206 {
207 return info_for_irq(irq)->cpu;
208 }
209
210 static unsigned int cpu_from_evtchn(unsigned int evtchn)
211 {
212 int irq = evtchn_to_irq[evtchn];
213 unsigned ret = 0;
214
215 if (irq != -1)
216 ret = cpu_from_irq(irq);
217
218 return ret;
219 }
220
221 static inline unsigned long active_evtchns(unsigned int cpu,
222 struct shared_info *sh,
223 unsigned int idx)
224 {
225 return (sh->evtchn_pending[idx] &
226 cpu_evtchn_mask(cpu)[idx] &
227 ~sh->evtchn_mask[idx]);
228 }
229
230 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
231 {
232 int irq = evtchn_to_irq[chn];
233
234 BUG_ON(irq == -1);
235 #ifdef CONFIG_SMP
236 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
237 #endif
238
239 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
240 __set_bit(chn, cpu_evtchn_mask(cpu));
241
242 irq_info[irq].cpu = cpu;
243 }
244
245 static void init_evtchn_cpu_bindings(void)
246 {
247 #ifdef CONFIG_SMP
248 struct irq_desc *desc;
249 int i;
250
251 /* By default all event channels notify CPU#0. */
252 for_each_irq_desc(i, desc) {
253 cpumask_copy(desc->affinity, cpumask_of(0));
254 }
255 #endif
256
257 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
258 }
259
260 static inline void clear_evtchn(int port)
261 {
262 struct shared_info *s = HYPERVISOR_shared_info;
263 sync_clear_bit(port, &s->evtchn_pending[0]);
264 }
265
266 static inline void set_evtchn(int port)
267 {
268 struct shared_info *s = HYPERVISOR_shared_info;
269 sync_set_bit(port, &s->evtchn_pending[0]);
270 }
271
272 static inline int test_evtchn(int port)
273 {
274 struct shared_info *s = HYPERVISOR_shared_info;
275 return sync_test_bit(port, &s->evtchn_pending[0]);
276 }
277
278
279 /**
280 * notify_remote_via_irq - send event to remote end of event channel via irq
281 * @irq: irq of event channel to send event to
282 *
283 * Unlike notify_remote_via_evtchn(), this is safe to use across
284 * save/restore. Notifications on a broken connection are silently
285 * dropped.
286 */
287 void notify_remote_via_irq(int irq)
288 {
289 int evtchn = evtchn_from_irq(irq);
290
291 if (VALID_EVTCHN(evtchn))
292 notify_remote_via_evtchn(evtchn);
293 }
294 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
295
296 static void mask_evtchn(int port)
297 {
298 struct shared_info *s = HYPERVISOR_shared_info;
299 sync_set_bit(port, &s->evtchn_mask[0]);
300 }
301
302 static void unmask_evtchn(int port)
303 {
304 struct shared_info *s = HYPERVISOR_shared_info;
305 unsigned int cpu = get_cpu();
306
307 BUG_ON(!irqs_disabled());
308
309 /* Slow path (hypercall) if this is a non-local port. */
310 if (unlikely(cpu != cpu_from_evtchn(port))) {
311 struct evtchn_unmask unmask = { .port = port };
312 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
313 } else {
314 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
315
316 sync_clear_bit(port, &s->evtchn_mask[0]);
317
318 /*
319 * The following is basically the equivalent of
320 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
321 * the interrupt edge' if the channel is masked.
322 */
323 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
324 !sync_test_and_set_bit(port / BITS_PER_LONG,
325 &vcpu_info->evtchn_pending_sel))
326 vcpu_info->evtchn_upcall_pending = 1;
327 }
328
329 put_cpu();
330 }
331
332 static int find_unbound_irq(void)
333 {
334 int irq;
335 struct irq_desc *desc;
336
337 for (irq = 0; irq < nr_irqs; irq++)
338 if (irq_info[irq].type == IRQT_UNBOUND)
339 break;
340
341 if (irq == nr_irqs)
342 panic("No available IRQ to bind to: increase nr_irqs!\n");
343
344 desc = irq_to_desc_alloc_node(irq, 0);
345 if (WARN_ON(desc == NULL))
346 return -1;
347
348 dynamic_irq_init(irq);
349
350 return irq;
351 }
352
353 int bind_evtchn_to_irq(unsigned int evtchn)
354 {
355 int irq;
356
357 spin_lock(&irq_mapping_update_lock);
358
359 irq = evtchn_to_irq[evtchn];
360
361 if (irq == -1) {
362 irq = find_unbound_irq();
363
364 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
365 handle_level_irq, "event");
366
367 evtchn_to_irq[evtchn] = irq;
368 irq_info[irq] = mk_evtchn_info(evtchn);
369 }
370
371 spin_unlock(&irq_mapping_update_lock);
372
373 return irq;
374 }
375 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
376
377 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
378 {
379 struct evtchn_bind_ipi bind_ipi;
380 int evtchn, irq;
381
382 spin_lock(&irq_mapping_update_lock);
383
384 irq = per_cpu(ipi_to_irq, cpu)[ipi];
385
386 if (irq == -1) {
387 irq = find_unbound_irq();
388 if (irq < 0)
389 goto out;
390
391 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
392 handle_level_irq, "ipi");
393
394 bind_ipi.vcpu = cpu;
395 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
396 &bind_ipi) != 0)
397 BUG();
398 evtchn = bind_ipi.port;
399
400 evtchn_to_irq[evtchn] = irq;
401 irq_info[irq] = mk_ipi_info(evtchn, ipi);
402 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
403
404 bind_evtchn_to_cpu(evtchn, cpu);
405 }
406
407 out:
408 spin_unlock(&irq_mapping_update_lock);
409 return irq;
410 }
411
412
413 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
414 {
415 struct evtchn_bind_virq bind_virq;
416 int evtchn, irq;
417
418 spin_lock(&irq_mapping_update_lock);
419
420 irq = per_cpu(virq_to_irq, cpu)[virq];
421
422 if (irq == -1) {
423 bind_virq.virq = virq;
424 bind_virq.vcpu = cpu;
425 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
426 &bind_virq) != 0)
427 BUG();
428 evtchn = bind_virq.port;
429
430 irq = find_unbound_irq();
431
432 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
433 handle_level_irq, "virq");
434
435 evtchn_to_irq[evtchn] = irq;
436 irq_info[irq] = mk_virq_info(evtchn, virq);
437
438 per_cpu(virq_to_irq, cpu)[virq] = irq;
439
440 bind_evtchn_to_cpu(evtchn, cpu);
441 }
442
443 spin_unlock(&irq_mapping_update_lock);
444
445 return irq;
446 }
447
448 static void unbind_from_irq(unsigned int irq)
449 {
450 struct evtchn_close close;
451 int evtchn = evtchn_from_irq(irq);
452
453 spin_lock(&irq_mapping_update_lock);
454
455 if (VALID_EVTCHN(evtchn)) {
456 close.port = evtchn;
457 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
458 BUG();
459
460 switch (type_from_irq(irq)) {
461 case IRQT_VIRQ:
462 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
463 [virq_from_irq(irq)] = -1;
464 break;
465 case IRQT_IPI:
466 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
467 [ipi_from_irq(irq)] = -1;
468 break;
469 default:
470 break;
471 }
472
473 /* Closed ports are implicitly re-bound to VCPU0. */
474 bind_evtchn_to_cpu(evtchn, 0);
475
476 evtchn_to_irq[evtchn] = -1;
477 }
478
479 if (irq_info[irq].type != IRQT_UNBOUND) {
480 irq_info[irq] = mk_unbound_info();
481
482 dynamic_irq_cleanup(irq);
483 }
484
485 spin_unlock(&irq_mapping_update_lock);
486 }
487
488 int bind_evtchn_to_irqhandler(unsigned int evtchn,
489 irq_handler_t handler,
490 unsigned long irqflags,
491 const char *devname, void *dev_id)
492 {
493 unsigned int irq;
494 int retval;
495
496 irq = bind_evtchn_to_irq(evtchn);
497 retval = request_irq(irq, handler, irqflags, devname, dev_id);
498 if (retval != 0) {
499 unbind_from_irq(irq);
500 return retval;
501 }
502
503 return irq;
504 }
505 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
506
507 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
508 irq_handler_t handler,
509 unsigned long irqflags, const char *devname, void *dev_id)
510 {
511 unsigned int irq;
512 int retval;
513
514 irq = bind_virq_to_irq(virq, cpu);
515 retval = request_irq(irq, handler, irqflags, devname, dev_id);
516 if (retval != 0) {
517 unbind_from_irq(irq);
518 return retval;
519 }
520
521 return irq;
522 }
523 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
524
525 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
526 unsigned int cpu,
527 irq_handler_t handler,
528 unsigned long irqflags,
529 const char *devname,
530 void *dev_id)
531 {
532 int irq, retval;
533
534 irq = bind_ipi_to_irq(ipi, cpu);
535 if (irq < 0)
536 return irq;
537
538 retval = request_irq(irq, handler, irqflags, devname, dev_id);
539 if (retval != 0) {
540 unbind_from_irq(irq);
541 return retval;
542 }
543
544 return irq;
545 }
546
547 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
548 {
549 free_irq(irq, dev_id);
550 unbind_from_irq(irq);
551 }
552 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
553
554 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
555 {
556 int irq = per_cpu(ipi_to_irq, cpu)[vector];
557 BUG_ON(irq < 0);
558 notify_remote_via_irq(irq);
559 }
560
561 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
562 {
563 struct shared_info *sh = HYPERVISOR_shared_info;
564 int cpu = smp_processor_id();
565 int i;
566 unsigned long flags;
567 static DEFINE_SPINLOCK(debug_lock);
568
569 spin_lock_irqsave(&debug_lock, flags);
570
571 printk("vcpu %d\n ", cpu);
572
573 for_each_online_cpu(i) {
574 struct vcpu_info *v = per_cpu(xen_vcpu, i);
575 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
576 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
577 v->evtchn_upcall_pending,
578 v->evtchn_pending_sel);
579 }
580 printk("pending:\n ");
581 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
582 printk("%08lx%s", sh->evtchn_pending[i],
583 i % 8 == 0 ? "\n " : " ");
584 printk("\nmasks:\n ");
585 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
586 printk("%08lx%s", sh->evtchn_mask[i],
587 i % 8 == 0 ? "\n " : " ");
588
589 printk("\nunmasked:\n ");
590 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
591 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
592 i % 8 == 0 ? "\n " : " ");
593
594 printk("\npending list:\n");
595 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
596 if (sync_test_bit(i, sh->evtchn_pending)) {
597 printk(" %d: event %d -> irq %d\n",
598 cpu_from_evtchn(i), i,
599 evtchn_to_irq[i]);
600 }
601 }
602
603 spin_unlock_irqrestore(&debug_lock, flags);
604
605 return IRQ_HANDLED;
606 }
607
608 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
609
610 /*
611 * Search the CPUs pending events bitmasks. For each one found, map
612 * the event number to an irq, and feed it into do_IRQ() for
613 * handling.
614 *
615 * Xen uses a two-level bitmap to speed searching. The first level is
616 * a bitset of words which contain pending event bits. The second
617 * level is a bitset of pending events themselves.
618 */
619 void xen_evtchn_do_upcall(struct pt_regs *regs)
620 {
621 int cpu = get_cpu();
622 struct pt_regs *old_regs = set_irq_regs(regs);
623 struct shared_info *s = HYPERVISOR_shared_info;
624 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
625 unsigned count;
626
627 exit_idle();
628 irq_enter();
629
630 do {
631 unsigned long pending_words;
632
633 vcpu_info->evtchn_upcall_pending = 0;
634
635 if (__get_cpu_var(xed_nesting_count)++)
636 goto out;
637
638 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
639 /* Clear master flag /before/ clearing selector flag. */
640 wmb();
641 #endif
642 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
643 while (pending_words != 0) {
644 unsigned long pending_bits;
645 int word_idx = __ffs(pending_words);
646 pending_words &= ~(1UL << word_idx);
647
648 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
649 int bit_idx = __ffs(pending_bits);
650 int port = (word_idx * BITS_PER_LONG) + bit_idx;
651 int irq = evtchn_to_irq[port];
652 struct irq_desc *desc;
653
654 if (irq != -1) {
655 desc = irq_to_desc(irq);
656 if (desc)
657 generic_handle_irq_desc(irq, desc);
658 }
659 }
660 }
661
662 BUG_ON(!irqs_disabled());
663
664 count = __get_cpu_var(xed_nesting_count);
665 __get_cpu_var(xed_nesting_count) = 0;
666 } while(count != 1);
667
668 out:
669 irq_exit();
670 set_irq_regs(old_regs);
671
672 put_cpu();
673 }
674
675 /* Rebind a new event channel to an existing irq. */
676 void rebind_evtchn_irq(int evtchn, int irq)
677 {
678 struct irq_info *info = info_for_irq(irq);
679
680 /* Make sure the irq is masked, since the new event channel
681 will also be masked. */
682 disable_irq(irq);
683
684 spin_lock(&irq_mapping_update_lock);
685
686 /* After resume the irq<->evtchn mappings are all cleared out */
687 BUG_ON(evtchn_to_irq[evtchn] != -1);
688 /* Expect irq to have been bound before,
689 so there should be a proper type */
690 BUG_ON(info->type == IRQT_UNBOUND);
691
692 evtchn_to_irq[evtchn] = irq;
693 irq_info[irq] = mk_evtchn_info(evtchn);
694
695 spin_unlock(&irq_mapping_update_lock);
696
697 /* new event channels are always bound to cpu 0 */
698 irq_set_affinity(irq, cpumask_of(0));
699
700 /* Unmask the event channel. */
701 enable_irq(irq);
702 }
703
704 /* Rebind an evtchn so that it gets delivered to a specific cpu */
705 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
706 {
707 struct evtchn_bind_vcpu bind_vcpu;
708 int evtchn = evtchn_from_irq(irq);
709
710 if (!VALID_EVTCHN(evtchn))
711 return -1;
712
713 /* Send future instances of this interrupt to other vcpu. */
714 bind_vcpu.port = evtchn;
715 bind_vcpu.vcpu = tcpu;
716
717 /*
718 * If this fails, it usually just indicates that we're dealing with a
719 * virq or IPI channel, which don't actually need to be rebound. Ignore
720 * it, but don't do the xenlinux-level rebind in that case.
721 */
722 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
723 bind_evtchn_to_cpu(evtchn, tcpu);
724
725 return 0;
726 }
727
728 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
729 {
730 unsigned tcpu = cpumask_first(dest);
731
732 return rebind_irq_to_cpu(irq, tcpu);
733 }
734
735 int resend_irq_on_evtchn(unsigned int irq)
736 {
737 int masked, evtchn = evtchn_from_irq(irq);
738 struct shared_info *s = HYPERVISOR_shared_info;
739
740 if (!VALID_EVTCHN(evtchn))
741 return 1;
742
743 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
744 sync_set_bit(evtchn, s->evtchn_pending);
745 if (!masked)
746 unmask_evtchn(evtchn);
747
748 return 1;
749 }
750
751 static void enable_dynirq(unsigned int irq)
752 {
753 int evtchn = evtchn_from_irq(irq);
754
755 if (VALID_EVTCHN(evtchn))
756 unmask_evtchn(evtchn);
757 }
758
759 static void disable_dynirq(unsigned int irq)
760 {
761 int evtchn = evtchn_from_irq(irq);
762
763 if (VALID_EVTCHN(evtchn))
764 mask_evtchn(evtchn);
765 }
766
767 static void ack_dynirq(unsigned int irq)
768 {
769 int evtchn = evtchn_from_irq(irq);
770
771 move_native_irq(irq);
772
773 if (VALID_EVTCHN(evtchn))
774 clear_evtchn(evtchn);
775 }
776
777 static int retrigger_dynirq(unsigned int irq)
778 {
779 int evtchn = evtchn_from_irq(irq);
780 struct shared_info *sh = HYPERVISOR_shared_info;
781 int ret = 0;
782
783 if (VALID_EVTCHN(evtchn)) {
784 int masked;
785
786 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
787 sync_set_bit(evtchn, sh->evtchn_pending);
788 if (!masked)
789 unmask_evtchn(evtchn);
790 ret = 1;
791 }
792
793 return ret;
794 }
795
796 static void restore_cpu_virqs(unsigned int cpu)
797 {
798 struct evtchn_bind_virq bind_virq;
799 int virq, irq, evtchn;
800
801 for (virq = 0; virq < NR_VIRQS; virq++) {
802 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
803 continue;
804
805 BUG_ON(virq_from_irq(irq) != virq);
806
807 /* Get a new binding from Xen. */
808 bind_virq.virq = virq;
809 bind_virq.vcpu = cpu;
810 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
811 &bind_virq) != 0)
812 BUG();
813 evtchn = bind_virq.port;
814
815 /* Record the new mapping. */
816 evtchn_to_irq[evtchn] = irq;
817 irq_info[irq] = mk_virq_info(evtchn, virq);
818 bind_evtchn_to_cpu(evtchn, cpu);
819
820 /* Ready for use. */
821 unmask_evtchn(evtchn);
822 }
823 }
824
825 static void restore_cpu_ipis(unsigned int cpu)
826 {
827 struct evtchn_bind_ipi bind_ipi;
828 int ipi, irq, evtchn;
829
830 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
831 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
832 continue;
833
834 BUG_ON(ipi_from_irq(irq) != ipi);
835
836 /* Get a new binding from Xen. */
837 bind_ipi.vcpu = cpu;
838 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
839 &bind_ipi) != 0)
840 BUG();
841 evtchn = bind_ipi.port;
842
843 /* Record the new mapping. */
844 evtchn_to_irq[evtchn] = irq;
845 irq_info[irq] = mk_ipi_info(evtchn, ipi);
846 bind_evtchn_to_cpu(evtchn, cpu);
847
848 /* Ready for use. */
849 unmask_evtchn(evtchn);
850
851 }
852 }
853
854 /* Clear an irq's pending state, in preparation for polling on it */
855 void xen_clear_irq_pending(int irq)
856 {
857 int evtchn = evtchn_from_irq(irq);
858
859 if (VALID_EVTCHN(evtchn))
860 clear_evtchn(evtchn);
861 }
862
863 void xen_set_irq_pending(int irq)
864 {
865 int evtchn = evtchn_from_irq(irq);
866
867 if (VALID_EVTCHN(evtchn))
868 set_evtchn(evtchn);
869 }
870
871 bool xen_test_irq_pending(int irq)
872 {
873 int evtchn = evtchn_from_irq(irq);
874 bool ret = false;
875
876 if (VALID_EVTCHN(evtchn))
877 ret = test_evtchn(evtchn);
878
879 return ret;
880 }
881
882 /* Poll waiting for an irq to become pending. In the usual case, the
883 irq will be disabled so it won't deliver an interrupt. */
884 void xen_poll_irq(int irq)
885 {
886 evtchn_port_t evtchn = evtchn_from_irq(irq);
887
888 if (VALID_EVTCHN(evtchn)) {
889 struct sched_poll poll;
890
891 poll.nr_ports = 1;
892 poll.timeout = 0;
893 set_xen_guest_handle(poll.ports, &evtchn);
894
895 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
896 BUG();
897 }
898 }
899
900 void xen_irq_resume(void)
901 {
902 unsigned int cpu, irq, evtchn;
903
904 init_evtchn_cpu_bindings();
905
906 /* New event-channel space is not 'live' yet. */
907 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
908 mask_evtchn(evtchn);
909
910 /* No IRQ <-> event-channel mappings. */
911 for (irq = 0; irq < nr_irqs; irq++)
912 irq_info[irq].evtchn = 0; /* zap event-channel binding */
913
914 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
915 evtchn_to_irq[evtchn] = -1;
916
917 for_each_possible_cpu(cpu) {
918 restore_cpu_virqs(cpu);
919 restore_cpu_ipis(cpu);
920 }
921 }
922
923 static struct irq_chip xen_dynamic_chip __read_mostly = {
924 .name = "xen-dyn",
925
926 .disable = disable_dynirq,
927 .mask = disable_dynirq,
928 .unmask = enable_dynirq,
929
930 .ack = ack_dynirq,
931 .set_affinity = set_affinity_irq,
932 .retrigger = retrigger_dynirq,
933 };
934
935 void __init xen_init_IRQ(void)
936 {
937 int i;
938
939 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
940 GFP_KERNEL);
941 BUG_ON(cpu_evtchn_mask_p == NULL);
942
943 init_evtchn_cpu_bindings();
944
945 /* No event channels are 'live' right now. */
946 for (i = 0; i < NR_EVENT_CHANNELS; i++)
947 mask_evtchn(i);
948
949 irq_ctx_init(smp_processor_id());
950 }