4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
35 #include <asm/ptrace.h>
38 #include <asm/io_apic.h>
39 #include <asm/sync_bitops.h>
40 #include <asm/xen/pci.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
46 #include <xen/xen-ops.h>
47 #include <xen/events.h>
48 #include <xen/interface/xen.h>
49 #include <xen/interface/event_channel.h>
50 #include <xen/interface/hvm/hvm_op.h>
51 #include <xen/interface/hvm/params.h>
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
57 static DEFINE_SPINLOCK(irq_mapping_update_lock
);
59 /* IRQ <-> VIRQ mapping. */
60 static DEFINE_PER_CPU(int [NR_VIRQS
], virq_to_irq
) = {[0 ... NR_VIRQS
-1] = -1};
62 /* IRQ <-> IPI mapping */
63 static DEFINE_PER_CPU(int [XEN_NR_IPIS
], ipi_to_irq
) = {[0 ... XEN_NR_IPIS
-1] = -1};
65 /* Interrupt types. */
75 * Packed IRQ information:
76 * type - enum xen_irq_type
77 * event channel - irq->event channel mapping
78 * cpu - cpu this event channel is bound to
79 * index - type-specific information:
80 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
81 * guest, or GSI (real passthrough IRQ) of the device.
88 enum xen_irq_type type
; /* type */
89 unsigned short evtchn
; /* event channel */
90 unsigned short cpu
; /* cpu bound */
103 #define PIRQ_NEEDS_EOI (1 << 0)
104 #define PIRQ_SHAREABLE (1 << 1)
106 static struct irq_info
*irq_info
;
107 static int *pirq_to_irq
;
109 static int *evtchn_to_irq
;
110 struct cpu_evtchn_s
{
111 unsigned long bits
[NR_EVENT_CHANNELS
/BITS_PER_LONG
];
114 static __initdata
struct cpu_evtchn_s init_evtchn_mask
= {
115 .bits
[0 ... (NR_EVENT_CHANNELS
/BITS_PER_LONG
)-1] = ~0ul,
117 static struct cpu_evtchn_s
*cpu_evtchn_mask_p
= &init_evtchn_mask
;
119 static inline unsigned long *cpu_evtchn_mask(int cpu
)
121 return cpu_evtchn_mask_p
[cpu
].bits
;
124 /* Xen will never allocate port zero for any purpose. */
125 #define VALID_EVTCHN(chn) ((chn) != 0)
127 static struct irq_chip xen_dynamic_chip
;
128 static struct irq_chip xen_percpu_chip
;
129 static struct irq_chip xen_pirq_chip
;
131 /* Constructor for packed IRQ information. */
132 static struct irq_info
mk_unbound_info(void)
134 return (struct irq_info
) { .type
= IRQT_UNBOUND
};
137 static struct irq_info
mk_evtchn_info(unsigned short evtchn
)
139 return (struct irq_info
) { .type
= IRQT_EVTCHN
, .evtchn
= evtchn
,
143 static struct irq_info
mk_ipi_info(unsigned short evtchn
, enum ipi_vector ipi
)
145 return (struct irq_info
) { .type
= IRQT_IPI
, .evtchn
= evtchn
,
146 .cpu
= 0, .u
.ipi
= ipi
};
149 static struct irq_info
mk_virq_info(unsigned short evtchn
, unsigned short virq
)
151 return (struct irq_info
) { .type
= IRQT_VIRQ
, .evtchn
= evtchn
,
152 .cpu
= 0, .u
.virq
= virq
};
155 static struct irq_info
mk_pirq_info(unsigned short evtchn
, unsigned short pirq
,
156 unsigned short gsi
, unsigned short vector
)
158 return (struct irq_info
) { .type
= IRQT_PIRQ
, .evtchn
= evtchn
,
160 .u
.pirq
= { .pirq
= pirq
, .gsi
= gsi
, .vector
= vector
} };
164 * Accessors for packed IRQ information.
166 static struct irq_info
*info_for_irq(unsigned irq
)
168 return &irq_info
[irq
];
171 static unsigned int evtchn_from_irq(unsigned irq
)
173 if (unlikely(WARN(irq
< 0 || irq
>= nr_irqs
, "Invalid irq %d!\n", irq
)))
176 return info_for_irq(irq
)->evtchn
;
179 unsigned irq_from_evtchn(unsigned int evtchn
)
181 return evtchn_to_irq
[evtchn
];
183 EXPORT_SYMBOL_GPL(irq_from_evtchn
);
185 static enum ipi_vector
ipi_from_irq(unsigned irq
)
187 struct irq_info
*info
= info_for_irq(irq
);
189 BUG_ON(info
== NULL
);
190 BUG_ON(info
->type
!= IRQT_IPI
);
195 static unsigned virq_from_irq(unsigned irq
)
197 struct irq_info
*info
= info_for_irq(irq
);
199 BUG_ON(info
== NULL
);
200 BUG_ON(info
->type
!= IRQT_VIRQ
);
205 static unsigned pirq_from_irq(unsigned irq
)
207 struct irq_info
*info
= info_for_irq(irq
);
209 BUG_ON(info
== NULL
);
210 BUG_ON(info
->type
!= IRQT_PIRQ
);
212 return info
->u
.pirq
.pirq
;
215 static unsigned gsi_from_irq(unsigned irq
)
217 struct irq_info
*info
= info_for_irq(irq
);
219 BUG_ON(info
== NULL
);
220 BUG_ON(info
->type
!= IRQT_PIRQ
);
222 return info
->u
.pirq
.gsi
;
225 static unsigned vector_from_irq(unsigned irq
)
227 struct irq_info
*info
= info_for_irq(irq
);
229 BUG_ON(info
== NULL
);
230 BUG_ON(info
->type
!= IRQT_PIRQ
);
232 return info
->u
.pirq
.vector
;
235 static enum xen_irq_type
type_from_irq(unsigned irq
)
237 return info_for_irq(irq
)->type
;
240 static unsigned cpu_from_irq(unsigned irq
)
242 return info_for_irq(irq
)->cpu
;
245 static unsigned int cpu_from_evtchn(unsigned int evtchn
)
247 int irq
= evtchn_to_irq
[evtchn
];
251 ret
= cpu_from_irq(irq
);
256 static bool pirq_needs_eoi(unsigned irq
)
258 struct irq_info
*info
= info_for_irq(irq
);
260 BUG_ON(info
->type
!= IRQT_PIRQ
);
262 return info
->u
.pirq
.flags
& PIRQ_NEEDS_EOI
;
265 static inline unsigned long active_evtchns(unsigned int cpu
,
266 struct shared_info
*sh
,
269 return (sh
->evtchn_pending
[idx
] &
270 cpu_evtchn_mask(cpu
)[idx
] &
271 ~sh
->evtchn_mask
[idx
]);
274 static void bind_evtchn_to_cpu(unsigned int chn
, unsigned int cpu
)
276 int irq
= evtchn_to_irq
[chn
];
280 cpumask_copy(irq_to_desc(irq
)->affinity
, cpumask_of(cpu
));
283 clear_bit(chn
, cpu_evtchn_mask(cpu_from_irq(irq
)));
284 set_bit(chn
, cpu_evtchn_mask(cpu
));
286 irq_info
[irq
].cpu
= cpu
;
289 static void init_evtchn_cpu_bindings(void)
293 struct irq_desc
*desc
;
295 /* By default all event channels notify CPU#0. */
296 for_each_irq_desc(i
, desc
) {
297 cpumask_copy(desc
->affinity
, cpumask_of(0));
301 for_each_possible_cpu(i
)
302 memset(cpu_evtchn_mask(i
),
303 (i
== 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s
));
307 static inline void clear_evtchn(int port
)
309 struct shared_info
*s
= HYPERVISOR_shared_info
;
310 sync_clear_bit(port
, &s
->evtchn_pending
[0]);
313 static inline void set_evtchn(int port
)
315 struct shared_info
*s
= HYPERVISOR_shared_info
;
316 sync_set_bit(port
, &s
->evtchn_pending
[0]);
319 static inline int test_evtchn(int port
)
321 struct shared_info
*s
= HYPERVISOR_shared_info
;
322 return sync_test_bit(port
, &s
->evtchn_pending
[0]);
327 * notify_remote_via_irq - send event to remote end of event channel via irq
328 * @irq: irq of event channel to send event to
330 * Unlike notify_remote_via_evtchn(), this is safe to use across
331 * save/restore. Notifications on a broken connection are silently
334 void notify_remote_via_irq(int irq
)
336 int evtchn
= evtchn_from_irq(irq
);
338 if (VALID_EVTCHN(evtchn
))
339 notify_remote_via_evtchn(evtchn
);
341 EXPORT_SYMBOL_GPL(notify_remote_via_irq
);
343 static void mask_evtchn(int port
)
345 struct shared_info
*s
= HYPERVISOR_shared_info
;
346 sync_set_bit(port
, &s
->evtchn_mask
[0]);
349 static void unmask_evtchn(int port
)
351 struct shared_info
*s
= HYPERVISOR_shared_info
;
352 unsigned int cpu
= get_cpu();
354 BUG_ON(!irqs_disabled());
356 /* Slow path (hypercall) if this is a non-local port. */
357 if (unlikely(cpu
!= cpu_from_evtchn(port
))) {
358 struct evtchn_unmask unmask
= { .port
= port
};
359 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask
, &unmask
);
361 struct vcpu_info
*vcpu_info
= __this_cpu_read(xen_vcpu
);
363 sync_clear_bit(port
, &s
->evtchn_mask
[0]);
366 * The following is basically the equivalent of
367 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
368 * the interrupt edge' if the channel is masked.
370 if (sync_test_bit(port
, &s
->evtchn_pending
[0]) &&
371 !sync_test_and_set_bit(port
/ BITS_PER_LONG
,
372 &vcpu_info
->evtchn_pending_sel
))
373 vcpu_info
->evtchn_upcall_pending
= 1;
379 static int get_nr_hw_irqs(void)
383 #ifdef CONFIG_X86_IO_APIC
384 ret
= get_nr_irqs_gsi();
390 static int find_unbound_pirq(int type
)
393 struct physdev_get_free_pirq op_get_free_pirq
;
394 op_get_free_pirq
.type
= type
;
396 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq
, &op_get_free_pirq
);
398 return op_get_free_pirq
.pirq
;
400 for (i
= 0; i
< nr_irqs
; i
++) {
401 if (pirq_to_irq
[i
] < 0)
407 static int find_unbound_irq(void)
409 struct irq_data
*data
;
411 int bottom
= get_nr_hw_irqs();
414 if (bottom
== nr_irqs
)
417 /* This loop starts from the top of IRQ space and goes down.
418 * We need this b/c if we have a PCI device in a Xen PV guest
419 * we do not have an IO-APIC (though the backend might have them)
420 * mapped in. To not have a collision of physical IRQs with the Xen
421 * event channels start at the top of the IRQ space for virtual IRQs.
423 for (irq
= top
; irq
> bottom
; irq
--) {
424 data
= irq_get_irq_data(irq
);
425 /* only 15->0 have init'd desc; handle irq > 16 */
428 if (data
->chip
== &no_irq_chip
)
430 if (data
->chip
!= &xen_dynamic_chip
)
432 if (irq_info
[irq
].type
== IRQT_UNBOUND
)
439 res
= irq_alloc_desc_at(irq
, -1);
441 if (WARN_ON(res
!= irq
))
447 panic("No available IRQ to bind to: increase nr_irqs!\n");
450 static bool identity_mapped_irq(unsigned irq
)
452 /* identity map all the hardware irqs */
453 return irq
< get_nr_hw_irqs();
456 static void pirq_unmask_notify(int irq
)
458 struct physdev_eoi eoi
= { .irq
= pirq_from_irq(irq
) };
460 if (unlikely(pirq_needs_eoi(irq
))) {
461 int rc
= HYPERVISOR_physdev_op(PHYSDEVOP_eoi
, &eoi
);
466 static void pirq_query_unmask(int irq
)
468 struct physdev_irq_status_query irq_status
;
469 struct irq_info
*info
= info_for_irq(irq
);
471 BUG_ON(info
->type
!= IRQT_PIRQ
);
473 irq_status
.irq
= pirq_from_irq(irq
);
474 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query
, &irq_status
))
475 irq_status
.flags
= 0;
477 info
->u
.pirq
.flags
&= ~PIRQ_NEEDS_EOI
;
478 if (irq_status
.flags
& XENIRQSTAT_needs_eoi
)
479 info
->u
.pirq
.flags
|= PIRQ_NEEDS_EOI
;
482 static bool probing_irq(int irq
)
484 struct irq_desc
*desc
= irq_to_desc(irq
);
486 return desc
&& desc
->action
== NULL
;
489 static unsigned int startup_pirq(unsigned int irq
)
491 struct evtchn_bind_pirq bind_pirq
;
492 struct irq_info
*info
= info_for_irq(irq
);
493 int evtchn
= evtchn_from_irq(irq
);
496 BUG_ON(info
->type
!= IRQT_PIRQ
);
498 if (VALID_EVTCHN(evtchn
))
501 bind_pirq
.pirq
= pirq_from_irq(irq
);
502 /* NB. We are happy to share unless we are probing. */
503 bind_pirq
.flags
= info
->u
.pirq
.flags
& PIRQ_SHAREABLE
?
504 BIND_PIRQ__WILL_SHARE
: 0;
505 rc
= HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq
, &bind_pirq
);
507 if (!probing_irq(irq
))
508 printk(KERN_INFO
"Failed to obtain physical IRQ %d\n",
512 evtchn
= bind_pirq
.port
;
514 pirq_query_unmask(irq
);
516 evtchn_to_irq
[evtchn
] = irq
;
517 bind_evtchn_to_cpu(evtchn
, 0);
518 info
->evtchn
= evtchn
;
521 unmask_evtchn(evtchn
);
522 pirq_unmask_notify(irq
);
527 static void shutdown_pirq(unsigned int irq
)
529 struct evtchn_close close
;
530 struct irq_info
*info
= info_for_irq(irq
);
531 int evtchn
= evtchn_from_irq(irq
);
533 BUG_ON(info
->type
!= IRQT_PIRQ
);
535 if (!VALID_EVTCHN(evtchn
))
541 if (HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
) != 0)
544 bind_evtchn_to_cpu(evtchn
, 0);
545 evtchn_to_irq
[evtchn
] = -1;
549 static void enable_pirq(unsigned int irq
)
554 static void disable_pirq(unsigned int irq
)
558 static void ack_pirq(unsigned int irq
)
560 int evtchn
= evtchn_from_irq(irq
);
562 move_native_irq(irq
);
564 if (VALID_EVTCHN(evtchn
)) {
566 clear_evtchn(evtchn
);
570 static void end_pirq(unsigned int irq
)
572 int evtchn
= evtchn_from_irq(irq
);
573 struct irq_desc
*desc
= irq_to_desc(irq
);
578 if ((desc
->status
& (IRQ_DISABLED
|IRQ_PENDING
)) ==
579 (IRQ_DISABLED
|IRQ_PENDING
)) {
581 } else if (VALID_EVTCHN(evtchn
)) {
582 unmask_evtchn(evtchn
);
583 pirq_unmask_notify(irq
);
587 static int find_irq_by_gsi(unsigned gsi
)
591 for (irq
= 0; irq
< nr_irqs
; irq
++) {
592 struct irq_info
*info
= info_for_irq(irq
);
594 if (info
== NULL
|| info
->type
!= IRQT_PIRQ
)
597 if (gsi_from_irq(irq
) == gsi
)
604 int xen_allocate_pirq(unsigned gsi
, int shareable
, char *name
)
606 return xen_map_pirq_gsi(gsi
, gsi
, shareable
, name
);
609 /* xen_map_pirq_gsi might allocate irqs from the top down, as a
610 * consequence don't assume that the irq number returned has a low value
611 * or can be used as a pirq number unless you know otherwise.
613 * One notable exception is when xen_map_pirq_gsi is called passing an
614 * hardware gsi as argument, in that case the irq number returned
615 * matches the gsi number passed as second argument.
617 * Note: We don't assign an event channel until the irq actually started
618 * up. Return an existing irq if we've already got one for the gsi.
620 int xen_map_pirq_gsi(unsigned pirq
, unsigned gsi
, int shareable
, char *name
)
623 struct physdev_irq irq_op
;
625 spin_lock(&irq_mapping_update_lock
);
627 if ((pirq
> nr_irqs
) || (gsi
> nr_irqs
)) {
628 printk(KERN_WARNING
"xen_map_pirq_gsi: %s %s is incorrect!\n",
629 pirq
> nr_irqs
? "pirq" :"",
630 gsi
> nr_irqs
? "gsi" : "");
634 irq
= find_irq_by_gsi(gsi
);
636 printk(KERN_INFO
"xen_map_pirq_gsi: returning irq %d for gsi %u\n",
638 goto out
; /* XXX need refcount? */
641 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
642 * we are using the !xen_initial_domain() to drop in the function.*/
643 if (identity_mapped_irq(gsi
) || (!xen_initial_domain() &&
646 irq_alloc_desc_at(irq
, -1);
648 irq
= find_unbound_irq();
650 set_irq_chip_and_handler_name(irq
, &xen_pirq_chip
,
651 handle_level_irq
, name
);
656 /* Only the privileged domain can do this. For non-priv, the pcifront
657 * driver provides a PCI bus that does the call to do exactly
658 * this in the priv domain. */
659 if (xen_initial_domain() &&
660 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector
, &irq_op
)) {
666 irq_info
[irq
] = mk_pirq_info(0, pirq
, gsi
, irq_op
.vector
);
667 irq_info
[irq
].u
.pirq
.flags
|= shareable
? PIRQ_SHAREABLE
: 0;
668 pirq_to_irq
[pirq
] = irq
;
671 spin_unlock(&irq_mapping_update_lock
);
676 #ifdef CONFIG_PCI_MSI
677 #include <linux/msi.h>
678 #include "../pci/msi.h"
680 void xen_allocate_pirq_msi(char *name
, int *irq
, int *pirq
, int alloc
)
682 spin_lock(&irq_mapping_update_lock
);
684 if (alloc
& XEN_ALLOC_IRQ
) {
685 *irq
= find_unbound_irq();
690 if (alloc
& XEN_ALLOC_PIRQ
) {
691 *pirq
= find_unbound_pirq(MAP_PIRQ_TYPE_MSI
);
696 set_irq_chip_and_handler_name(*irq
, &xen_pirq_chip
,
697 handle_level_irq
, name
);
699 irq_info
[*irq
] = mk_pirq_info(0, *pirq
, 0, 0);
700 pirq_to_irq
[*pirq
] = *irq
;
703 spin_unlock(&irq_mapping_update_lock
);
706 int xen_create_msi_irq(struct pci_dev
*dev
, struct msi_desc
*msidesc
, int type
)
709 struct physdev_map_pirq map_irq
;
712 u32 table_offset
, bir
;
714 memset(&map_irq
, 0, sizeof(map_irq
));
715 map_irq
.domid
= DOMID_SELF
;
716 map_irq
.type
= MAP_PIRQ_TYPE_MSI
;
719 map_irq
.bus
= dev
->bus
->number
;
720 map_irq
.devfn
= dev
->devfn
;
722 if (type
== PCI_CAP_ID_MSIX
) {
723 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
725 pci_read_config_dword(dev
, msix_table_offset_reg(pos
),
727 bir
= (u8
)(table_offset
& PCI_MSIX_FLAGS_BIRMASK
);
729 map_irq
.table_base
= pci_resource_start(dev
, bir
);
730 map_irq
.entry_nr
= msidesc
->msi_attrib
.entry_nr
;
733 spin_lock(&irq_mapping_update_lock
);
735 irq
= find_unbound_irq();
740 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq
, &map_irq
);
742 printk(KERN_WARNING
"xen map irq failed %d\n", rc
);
749 irq_info
[irq
] = mk_pirq_info(0, map_irq
.pirq
, 0, map_irq
.index
);
751 set_irq_chip_and_handler_name(irq
, &xen_pirq_chip
,
753 (type
== PCI_CAP_ID_MSIX
) ? "msi-x":"msi");
756 spin_unlock(&irq_mapping_update_lock
);
761 int xen_destroy_irq(int irq
)
763 struct irq_desc
*desc
;
764 struct physdev_unmap_pirq unmap_irq
;
765 struct irq_info
*info
= info_for_irq(irq
);
768 spin_lock(&irq_mapping_update_lock
);
770 desc
= irq_to_desc(irq
);
774 if (xen_initial_domain()) {
775 unmap_irq
.pirq
= info
->u
.pirq
.pirq
;
776 unmap_irq
.domid
= DOMID_SELF
;
777 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq
, &unmap_irq
);
779 printk(KERN_WARNING
"unmap irq failed %d\n", rc
);
782 pirq_to_irq
[info
->u
.pirq
.pirq
] = -1;
784 irq_info
[irq
] = mk_unbound_info();
789 spin_unlock(&irq_mapping_update_lock
);
793 int xen_vector_from_irq(unsigned irq
)
795 return vector_from_irq(irq
);
798 int xen_gsi_from_irq(unsigned irq
)
800 return gsi_from_irq(irq
);
803 int xen_irq_from_pirq(unsigned pirq
)
805 return pirq_to_irq
[pirq
];
808 int bind_evtchn_to_irq(unsigned int evtchn
)
812 spin_lock(&irq_mapping_update_lock
);
814 irq
= evtchn_to_irq
[evtchn
];
817 irq
= find_unbound_irq();
819 set_irq_chip_and_handler_name(irq
, &xen_dynamic_chip
,
820 handle_fasteoi_irq
, "event");
822 evtchn_to_irq
[evtchn
] = irq
;
823 irq_info
[irq
] = mk_evtchn_info(evtchn
);
826 spin_unlock(&irq_mapping_update_lock
);
830 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq
);
832 static int bind_ipi_to_irq(unsigned int ipi
, unsigned int cpu
)
834 struct evtchn_bind_ipi bind_ipi
;
837 spin_lock(&irq_mapping_update_lock
);
839 irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
];
842 irq
= find_unbound_irq();
846 set_irq_chip_and_handler_name(irq
, &xen_percpu_chip
,
847 handle_percpu_irq
, "ipi");
850 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
853 evtchn
= bind_ipi
.port
;
855 evtchn_to_irq
[evtchn
] = irq
;
856 irq_info
[irq
] = mk_ipi_info(evtchn
, ipi
);
857 per_cpu(ipi_to_irq
, cpu
)[ipi
] = irq
;
859 bind_evtchn_to_cpu(evtchn
, cpu
);
863 spin_unlock(&irq_mapping_update_lock
);
867 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain
,
868 unsigned int remote_port
)
870 struct evtchn_bind_interdomain bind_interdomain
;
873 bind_interdomain
.remote_dom
= remote_domain
;
874 bind_interdomain
.remote_port
= remote_port
;
876 err
= HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain
,
879 return err
? : bind_evtchn_to_irq(bind_interdomain
.local_port
);
883 int bind_virq_to_irq(unsigned int virq
, unsigned int cpu
)
885 struct evtchn_bind_virq bind_virq
;
888 spin_lock(&irq_mapping_update_lock
);
890 irq
= per_cpu(virq_to_irq
, cpu
)[virq
];
893 irq
= find_unbound_irq();
895 set_irq_chip_and_handler_name(irq
, &xen_percpu_chip
,
896 handle_percpu_irq
, "virq");
898 bind_virq
.virq
= virq
;
899 bind_virq
.vcpu
= cpu
;
900 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
903 evtchn
= bind_virq
.port
;
905 evtchn_to_irq
[evtchn
] = irq
;
906 irq_info
[irq
] = mk_virq_info(evtchn
, virq
);
908 per_cpu(virq_to_irq
, cpu
)[virq
] = irq
;
910 bind_evtchn_to_cpu(evtchn
, cpu
);
913 spin_unlock(&irq_mapping_update_lock
);
918 static void unbind_from_irq(unsigned int irq
)
920 struct evtchn_close close
;
921 int evtchn
= evtchn_from_irq(irq
);
923 spin_lock(&irq_mapping_update_lock
);
925 if (VALID_EVTCHN(evtchn
)) {
927 if (HYPERVISOR_event_channel_op(EVTCHNOP_close
, &close
) != 0)
930 switch (type_from_irq(irq
)) {
932 per_cpu(virq_to_irq
, cpu_from_evtchn(evtchn
))
933 [virq_from_irq(irq
)] = -1;
936 per_cpu(ipi_to_irq
, cpu_from_evtchn(evtchn
))
937 [ipi_from_irq(irq
)] = -1;
943 /* Closed ports are implicitly re-bound to VCPU0. */
944 bind_evtchn_to_cpu(evtchn
, 0);
946 evtchn_to_irq
[evtchn
] = -1;
949 if (irq_info
[irq
].type
!= IRQT_UNBOUND
) {
950 irq_info
[irq
] = mk_unbound_info();
955 spin_unlock(&irq_mapping_update_lock
);
958 int bind_evtchn_to_irqhandler(unsigned int evtchn
,
959 irq_handler_t handler
,
960 unsigned long irqflags
,
961 const char *devname
, void *dev_id
)
966 irq
= bind_evtchn_to_irq(evtchn
);
967 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
969 unbind_from_irq(irq
);
975 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler
);
977 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain
,
978 unsigned int remote_port
,
979 irq_handler_t handler
,
980 unsigned long irqflags
,
986 irq
= bind_interdomain_evtchn_to_irq(remote_domain
, remote_port
);
990 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
992 unbind_from_irq(irq
);
998 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler
);
1000 int bind_virq_to_irqhandler(unsigned int virq
, unsigned int cpu
,
1001 irq_handler_t handler
,
1002 unsigned long irqflags
, const char *devname
, void *dev_id
)
1007 irq
= bind_virq_to_irq(virq
, cpu
);
1008 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
1010 unbind_from_irq(irq
);
1016 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler
);
1018 int bind_ipi_to_irqhandler(enum ipi_vector ipi
,
1020 irq_handler_t handler
,
1021 unsigned long irqflags
,
1022 const char *devname
,
1027 irq
= bind_ipi_to_irq(ipi
, cpu
);
1031 irqflags
|= IRQF_NO_SUSPEND
;
1032 retval
= request_irq(irq
, handler
, irqflags
, devname
, dev_id
);
1034 unbind_from_irq(irq
);
1041 void unbind_from_irqhandler(unsigned int irq
, void *dev_id
)
1043 free_irq(irq
, dev_id
);
1044 unbind_from_irq(irq
);
1046 EXPORT_SYMBOL_GPL(unbind_from_irqhandler
);
1048 void xen_send_IPI_one(unsigned int cpu
, enum ipi_vector vector
)
1050 int irq
= per_cpu(ipi_to_irq
, cpu
)[vector
];
1052 notify_remote_via_irq(irq
);
1055 irqreturn_t
xen_debug_interrupt(int irq
, void *dev_id
)
1057 struct shared_info
*sh
= HYPERVISOR_shared_info
;
1058 int cpu
= smp_processor_id();
1059 unsigned long *cpu_evtchn
= cpu_evtchn_mask(cpu
);
1061 unsigned long flags
;
1062 static DEFINE_SPINLOCK(debug_lock
);
1063 struct vcpu_info
*v
;
1065 spin_lock_irqsave(&debug_lock
, flags
);
1067 printk("\nvcpu %d\n ", cpu
);
1069 for_each_online_cpu(i
) {
1071 v
= per_cpu(xen_vcpu
, i
);
1072 pending
= (get_irq_regs() && i
== cpu
)
1073 ? xen_irqs_disabled(get_irq_regs())
1074 : v
->evtchn_upcall_mask
;
1075 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i
,
1076 pending
, v
->evtchn_upcall_pending
,
1077 (int)(sizeof(v
->evtchn_pending_sel
)*2),
1078 v
->evtchn_pending_sel
);
1080 v
= per_cpu(xen_vcpu
, cpu
);
1082 printk("\npending:\n ");
1083 for (i
= ARRAY_SIZE(sh
->evtchn_pending
)-1; i
>= 0; i
--)
1084 printk("%0*lx%s", (int)sizeof(sh
->evtchn_pending
[0])*2,
1085 sh
->evtchn_pending
[i
],
1086 i
% 8 == 0 ? "\n " : " ");
1087 printk("\nglobal mask:\n ");
1088 for (i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
1090 (int)(sizeof(sh
->evtchn_mask
[0])*2),
1092 i
% 8 == 0 ? "\n " : " ");
1094 printk("\nglobally unmasked:\n ");
1095 for (i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--)
1096 printk("%0*lx%s", (int)(sizeof(sh
->evtchn_mask
[0])*2),
1097 sh
->evtchn_pending
[i
] & ~sh
->evtchn_mask
[i
],
1098 i
% 8 == 0 ? "\n " : " ");
1100 printk("\nlocal cpu%d mask:\n ", cpu
);
1101 for (i
= (NR_EVENT_CHANNELS
/BITS_PER_LONG
)-1; i
>= 0; i
--)
1102 printk("%0*lx%s", (int)(sizeof(cpu_evtchn
[0])*2),
1104 i
% 8 == 0 ? "\n " : " ");
1106 printk("\nlocally unmasked:\n ");
1107 for (i
= ARRAY_SIZE(sh
->evtchn_mask
)-1; i
>= 0; i
--) {
1108 unsigned long pending
= sh
->evtchn_pending
[i
]
1109 & ~sh
->evtchn_mask
[i
]
1111 printk("%0*lx%s", (int)(sizeof(sh
->evtchn_mask
[0])*2),
1112 pending
, i
% 8 == 0 ? "\n " : " ");
1115 printk("\npending list:\n");
1116 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++) {
1117 if (sync_test_bit(i
, sh
->evtchn_pending
)) {
1118 int word_idx
= i
/ BITS_PER_LONG
;
1119 printk(" %d: event %d -> irq %d%s%s%s\n",
1120 cpu_from_evtchn(i
), i
,
1122 sync_test_bit(word_idx
, &v
->evtchn_pending_sel
)
1124 !sync_test_bit(i
, sh
->evtchn_mask
)
1125 ? "" : " globally-masked",
1126 sync_test_bit(i
, cpu_evtchn
)
1127 ? "" : " locally-masked");
1131 spin_unlock_irqrestore(&debug_lock
, flags
);
1136 static DEFINE_PER_CPU(unsigned, xed_nesting_count
);
1139 * Search the CPUs pending events bitmasks. For each one found, map
1140 * the event number to an irq, and feed it into do_IRQ() for
1143 * Xen uses a two-level bitmap to speed searching. The first level is
1144 * a bitset of words which contain pending event bits. The second
1145 * level is a bitset of pending events themselves.
1147 static void __xen_evtchn_do_upcall(void)
1149 int cpu
= get_cpu();
1150 struct shared_info
*s
= HYPERVISOR_shared_info
;
1151 struct vcpu_info
*vcpu_info
= __this_cpu_read(xen_vcpu
);
1155 unsigned long pending_words
;
1157 vcpu_info
->evtchn_upcall_pending
= 0;
1159 if (__this_cpu_inc_return(xed_nesting_count
) - 1)
1162 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1163 /* Clear master flag /before/ clearing selector flag. */
1166 pending_words
= xchg(&vcpu_info
->evtchn_pending_sel
, 0);
1167 while (pending_words
!= 0) {
1168 unsigned long pending_bits
;
1169 int word_idx
= __ffs(pending_words
);
1170 pending_words
&= ~(1UL << word_idx
);
1172 while ((pending_bits
= active_evtchns(cpu
, s
, word_idx
)) != 0) {
1173 int bit_idx
= __ffs(pending_bits
);
1174 int port
= (word_idx
* BITS_PER_LONG
) + bit_idx
;
1175 int irq
= evtchn_to_irq
[port
];
1176 struct irq_desc
*desc
;
1182 desc
= irq_to_desc(irq
);
1184 generic_handle_irq_desc(irq
, desc
);
1189 BUG_ON(!irqs_disabled());
1191 count
= __this_cpu_read(xed_nesting_count
);
1192 __this_cpu_write(xed_nesting_count
, 0);
1193 } while (count
!= 1 || vcpu_info
->evtchn_upcall_pending
);
1200 void xen_evtchn_do_upcall(struct pt_regs
*regs
)
1202 struct pt_regs
*old_regs
= set_irq_regs(regs
);
1207 __xen_evtchn_do_upcall();
1210 set_irq_regs(old_regs
);
1213 void xen_hvm_evtchn_do_upcall(void)
1215 __xen_evtchn_do_upcall();
1217 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall
);
1219 /* Rebind a new event channel to an existing irq. */
1220 void rebind_evtchn_irq(int evtchn
, int irq
)
1222 struct irq_info
*info
= info_for_irq(irq
);
1224 /* Make sure the irq is masked, since the new event channel
1225 will also be masked. */
1228 spin_lock(&irq_mapping_update_lock
);
1230 /* After resume the irq<->evtchn mappings are all cleared out */
1231 BUG_ON(evtchn_to_irq
[evtchn
] != -1);
1232 /* Expect irq to have been bound before,
1233 so there should be a proper type */
1234 BUG_ON(info
->type
== IRQT_UNBOUND
);
1236 evtchn_to_irq
[evtchn
] = irq
;
1237 irq_info
[irq
] = mk_evtchn_info(evtchn
);
1239 spin_unlock(&irq_mapping_update_lock
);
1241 /* new event channels are always bound to cpu 0 */
1242 irq_set_affinity(irq
, cpumask_of(0));
1244 /* Unmask the event channel. */
1248 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1249 static int rebind_irq_to_cpu(unsigned irq
, unsigned tcpu
)
1251 struct evtchn_bind_vcpu bind_vcpu
;
1252 int evtchn
= evtchn_from_irq(irq
);
1254 /* events delivered via platform PCI interrupts are always
1255 * routed to vcpu 0 */
1256 if (!VALID_EVTCHN(evtchn
) ||
1257 (xen_hvm_domain() && !xen_have_vector_callback
))
1260 /* Send future instances of this interrupt to other vcpu. */
1261 bind_vcpu
.port
= evtchn
;
1262 bind_vcpu
.vcpu
= tcpu
;
1265 * If this fails, it usually just indicates that we're dealing with a
1266 * virq or IPI channel, which don't actually need to be rebound. Ignore
1267 * it, but don't do the xenlinux-level rebind in that case.
1269 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu
, &bind_vcpu
) >= 0)
1270 bind_evtchn_to_cpu(evtchn
, tcpu
);
1275 static int set_affinity_irq(unsigned irq
, const struct cpumask
*dest
)
1277 unsigned tcpu
= cpumask_first(dest
);
1279 return rebind_irq_to_cpu(irq
, tcpu
);
1282 int resend_irq_on_evtchn(unsigned int irq
)
1284 int masked
, evtchn
= evtchn_from_irq(irq
);
1285 struct shared_info
*s
= HYPERVISOR_shared_info
;
1287 if (!VALID_EVTCHN(evtchn
))
1290 masked
= sync_test_and_set_bit(evtchn
, s
->evtchn_mask
);
1291 sync_set_bit(evtchn
, s
->evtchn_pending
);
1293 unmask_evtchn(evtchn
);
1298 static void enable_dynirq(unsigned int irq
)
1300 int evtchn
= evtchn_from_irq(irq
);
1302 if (VALID_EVTCHN(evtchn
))
1303 unmask_evtchn(evtchn
);
1306 static void disable_dynirq(unsigned int irq
)
1308 int evtchn
= evtchn_from_irq(irq
);
1310 if (VALID_EVTCHN(evtchn
))
1311 mask_evtchn(evtchn
);
1314 static void ack_dynirq(unsigned int irq
)
1316 int evtchn
= evtchn_from_irq(irq
);
1318 move_masked_irq(irq
);
1320 if (VALID_EVTCHN(evtchn
))
1321 unmask_evtchn(evtchn
);
1324 static int retrigger_dynirq(unsigned int irq
)
1326 int evtchn
= evtchn_from_irq(irq
);
1327 struct shared_info
*sh
= HYPERVISOR_shared_info
;
1330 if (VALID_EVTCHN(evtchn
)) {
1333 masked
= sync_test_and_set_bit(evtchn
, sh
->evtchn_mask
);
1334 sync_set_bit(evtchn
, sh
->evtchn_pending
);
1336 unmask_evtchn(evtchn
);
1343 static void restore_cpu_pirqs(void)
1345 int pirq
, rc
, irq
, gsi
;
1346 struct physdev_map_pirq map_irq
;
1348 for (pirq
= 0; pirq
< nr_irqs
; pirq
++) {
1349 irq
= pirq_to_irq
[pirq
];
1353 /* save/restore of PT devices doesn't work, so at this point the
1354 * only devices present are GSI based emulated devices */
1355 gsi
= gsi_from_irq(irq
);
1359 map_irq
.domid
= DOMID_SELF
;
1360 map_irq
.type
= MAP_PIRQ_TYPE_GSI
;
1361 map_irq
.index
= gsi
;
1362 map_irq
.pirq
= pirq
;
1364 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq
, &map_irq
);
1366 printk(KERN_WARNING
"xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1367 gsi
, irq
, pirq
, rc
);
1368 irq_info
[irq
] = mk_unbound_info();
1369 pirq_to_irq
[pirq
] = -1;
1373 printk(KERN_DEBUG
"xen: --> irq=%d, pirq=%d\n", irq
, map_irq
.pirq
);
1379 static void restore_cpu_virqs(unsigned int cpu
)
1381 struct evtchn_bind_virq bind_virq
;
1382 int virq
, irq
, evtchn
;
1384 for (virq
= 0; virq
< NR_VIRQS
; virq
++) {
1385 if ((irq
= per_cpu(virq_to_irq
, cpu
)[virq
]) == -1)
1388 BUG_ON(virq_from_irq(irq
) != virq
);
1390 /* Get a new binding from Xen. */
1391 bind_virq
.virq
= virq
;
1392 bind_virq
.vcpu
= cpu
;
1393 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq
,
1396 evtchn
= bind_virq
.port
;
1398 /* Record the new mapping. */
1399 evtchn_to_irq
[evtchn
] = irq
;
1400 irq_info
[irq
] = mk_virq_info(evtchn
, virq
);
1401 bind_evtchn_to_cpu(evtchn
, cpu
);
1405 static void restore_cpu_ipis(unsigned int cpu
)
1407 struct evtchn_bind_ipi bind_ipi
;
1408 int ipi
, irq
, evtchn
;
1410 for (ipi
= 0; ipi
< XEN_NR_IPIS
; ipi
++) {
1411 if ((irq
= per_cpu(ipi_to_irq
, cpu
)[ipi
]) == -1)
1414 BUG_ON(ipi_from_irq(irq
) != ipi
);
1416 /* Get a new binding from Xen. */
1417 bind_ipi
.vcpu
= cpu
;
1418 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi
,
1421 evtchn
= bind_ipi
.port
;
1423 /* Record the new mapping. */
1424 evtchn_to_irq
[evtchn
] = irq
;
1425 irq_info
[irq
] = mk_ipi_info(evtchn
, ipi
);
1426 bind_evtchn_to_cpu(evtchn
, cpu
);
1430 /* Clear an irq's pending state, in preparation for polling on it */
1431 void xen_clear_irq_pending(int irq
)
1433 int evtchn
= evtchn_from_irq(irq
);
1435 if (VALID_EVTCHN(evtchn
))
1436 clear_evtchn(evtchn
);
1438 EXPORT_SYMBOL(xen_clear_irq_pending
);
1439 void xen_set_irq_pending(int irq
)
1441 int evtchn
= evtchn_from_irq(irq
);
1443 if (VALID_EVTCHN(evtchn
))
1447 bool xen_test_irq_pending(int irq
)
1449 int evtchn
= evtchn_from_irq(irq
);
1452 if (VALID_EVTCHN(evtchn
))
1453 ret
= test_evtchn(evtchn
);
1458 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1459 * the irq will be disabled so it won't deliver an interrupt. */
1460 void xen_poll_irq_timeout(int irq
, u64 timeout
)
1462 evtchn_port_t evtchn
= evtchn_from_irq(irq
);
1464 if (VALID_EVTCHN(evtchn
)) {
1465 struct sched_poll poll
;
1468 poll
.timeout
= timeout
;
1469 set_xen_guest_handle(poll
.ports
, &evtchn
);
1471 if (HYPERVISOR_sched_op(SCHEDOP_poll
, &poll
) != 0)
1475 EXPORT_SYMBOL(xen_poll_irq_timeout
);
1476 /* Poll waiting for an irq to become pending. In the usual case, the
1477 * irq will be disabled so it won't deliver an interrupt. */
1478 void xen_poll_irq(int irq
)
1480 xen_poll_irq_timeout(irq
, 0 /* no timeout */);
1483 void xen_irq_resume(void)
1485 unsigned int cpu
, irq
, evtchn
;
1486 struct irq_desc
*desc
;
1488 init_evtchn_cpu_bindings();
1490 /* New event-channel space is not 'live' yet. */
1491 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
1492 mask_evtchn(evtchn
);
1494 /* No IRQ <-> event-channel mappings. */
1495 for (irq
= 0; irq
< nr_irqs
; irq
++)
1496 irq_info
[irq
].evtchn
= 0; /* zap event-channel binding */
1498 for (evtchn
= 0; evtchn
< NR_EVENT_CHANNELS
; evtchn
++)
1499 evtchn_to_irq
[evtchn
] = -1;
1501 for_each_possible_cpu(cpu
) {
1502 restore_cpu_virqs(cpu
);
1503 restore_cpu_ipis(cpu
);
1507 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1508 * are not handled by the IRQ core.
1510 for_each_irq_desc(irq
, desc
) {
1511 if (!desc
->action
|| !(desc
->action
->flags
& IRQF_NO_SUSPEND
))
1513 if (desc
->status
& IRQ_DISABLED
)
1516 evtchn
= evtchn_from_irq(irq
);
1520 unmask_evtchn(evtchn
);
1523 restore_cpu_pirqs();
1526 static struct irq_chip xen_dynamic_chip __read_mostly
= {
1529 .disable
= disable_dynirq
,
1530 .mask
= disable_dynirq
,
1531 .unmask
= enable_dynirq
,
1534 .set_affinity
= set_affinity_irq
,
1535 .retrigger
= retrigger_dynirq
,
1538 static struct irq_chip xen_pirq_chip __read_mostly
= {
1541 .startup
= startup_pirq
,
1542 .shutdown
= shutdown_pirq
,
1544 .enable
= enable_pirq
,
1545 .unmask
= enable_pirq
,
1547 .disable
= disable_pirq
,
1548 .mask
= disable_pirq
,
1553 .set_affinity
= set_affinity_irq
,
1555 .retrigger
= retrigger_dynirq
,
1558 static struct irq_chip xen_percpu_chip __read_mostly
= {
1559 .name
= "xen-percpu",
1561 .disable
= disable_dynirq
,
1562 .mask
= disable_dynirq
,
1563 .unmask
= enable_dynirq
,
1568 int xen_set_callback_via(uint64_t via
)
1570 struct xen_hvm_param a
;
1571 a
.domid
= DOMID_SELF
;
1572 a
.index
= HVM_PARAM_CALLBACK_IRQ
;
1574 return HYPERVISOR_hvm_op(HVMOP_set_param
, &a
);
1576 EXPORT_SYMBOL_GPL(xen_set_callback_via
);
1578 #ifdef CONFIG_XEN_PVHVM
1579 /* Vector callbacks are better than PCI interrupts to receive event
1580 * channel notifications because we can receive vector callbacks on any
1581 * vcpu and we don't need PCI support or APIC interactions. */
1582 void xen_callback_vector(void)
1585 uint64_t callback_via
;
1586 if (xen_have_vector_callback
) {
1587 callback_via
= HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK
);
1588 rc
= xen_set_callback_via(callback_via
);
1590 printk(KERN_ERR
"Request for Xen HVM callback vector"
1592 xen_have_vector_callback
= 0;
1595 printk(KERN_INFO
"Xen HVM callback vector for event delivery is "
1597 /* in the restore case the vector has already been allocated */
1598 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK
, used_vectors
))
1599 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK
, xen_hvm_callback_vector
);
1603 void xen_callback_vector(void) {}
1606 void __init
xen_init_IRQ(void)
1610 cpu_evtchn_mask_p
= kcalloc(nr_cpu_ids
, sizeof(struct cpu_evtchn_s
),
1612 irq_info
= kcalloc(nr_irqs
, sizeof(*irq_info
), GFP_KERNEL
);
1614 /* We are using nr_irqs as the maximum number of pirq available but
1615 * that number is actually chosen by Xen and we don't know exactly
1616 * what it is. Be careful choosing high pirq numbers. */
1617 pirq_to_irq
= kcalloc(nr_irqs
, sizeof(*pirq_to_irq
), GFP_KERNEL
);
1618 for (i
= 0; i
< nr_irqs
; i
++)
1619 pirq_to_irq
[i
] = -1;
1621 evtchn_to_irq
= kcalloc(NR_EVENT_CHANNELS
, sizeof(*evtchn_to_irq
),
1623 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++)
1624 evtchn_to_irq
[i
] = -1;
1626 init_evtchn_cpu_bindings();
1628 /* No event channels are 'live' right now. */
1629 for (i
= 0; i
< NR_EVENT_CHANNELS
; i
++)
1632 if (xen_hvm_domain()) {
1633 xen_callback_vector();
1635 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1636 * __acpi_register_gsi can point at the right function */
1639 irq_ctx_init(smp_processor_id());
1640 if (xen_initial_domain())