]>
Commit | Line | Data |
---|---|---|
e46cdb66 JF |
1 | /* |
2 | * Xen event channels | |
3 | * | |
4 | * Xen models interrupts with abstract event channels. Because each | |
5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we | |
6 | * must dynamically map irqs<->event channels. The event channels | |
7 | * interface with the rest of the kernel by defining a xen interrupt | |
8 | * chip. When an event is recieved, it is mapped to an irq and sent | |
9 | * through the normal interrupt processing path. | |
10 | * | |
11 | * There are four kinds of events which can be mapped to an event | |
12 | * channel: | |
13 | * | |
14 | * 1. Inter-domain notifications. This includes all the virtual | |
15 | * device events, since they're driven by front-ends in another domain | |
16 | * (typically dom0). | |
17 | * 2. VIRQs, typically used for timers. These are per-cpu events. | |
18 | * 3. IPIs. | |
d46a78b0 | 19 | * 4. PIRQs - Hardware interrupts. |
e46cdb66 JF |
20 | * |
21 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
22 | */ | |
23 | ||
24 | #include <linux/linkage.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/irq.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/string.h> | |
28e08861 | 29 | #include <linux/bootmem.h> |
5a0e3ad6 | 30 | #include <linux/slab.h> |
b21ddbf5 | 31 | #include <linux/irqnr.h> |
e46cdb66 | 32 | |
38e20b07 | 33 | #include <asm/desc.h> |
e46cdb66 JF |
34 | #include <asm/ptrace.h> |
35 | #include <asm/irq.h> | |
792dc4f6 | 36 | #include <asm/idle.h> |
0794bfc7 | 37 | #include <asm/io_apic.h> |
e46cdb66 JF |
38 | #include <asm/sync_bitops.h> |
39 | #include <asm/xen/hypercall.h> | |
8d1b8753 | 40 | #include <asm/xen/hypervisor.h> |
e46cdb66 | 41 | |
38e20b07 SY |
42 | #include <xen/xen.h> |
43 | #include <xen/hvm.h> | |
e04d0d07 | 44 | #include <xen/xen-ops.h> |
e46cdb66 JF |
45 | #include <xen/events.h> |
46 | #include <xen/interface/xen.h> | |
47 | #include <xen/interface/event_channel.h> | |
38e20b07 SY |
48 | #include <xen/interface/hvm/hvm_op.h> |
49 | #include <xen/interface/hvm/params.h> | |
e46cdb66 | 50 | |
e46cdb66 JF |
51 | /* |
52 | * This lock protects updates to the following mapping and reference-count | |
53 | * arrays. The lock does not need to be acquired to read the mapping tables. | |
54 | */ | |
55 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | |
56 | ||
57 | /* IRQ <-> VIRQ mapping. */ | |
204fba4a | 58 | static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; |
e46cdb66 | 59 | |
f87e4cac | 60 | /* IRQ <-> IPI mapping */ |
204fba4a | 61 | static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; |
f87e4cac | 62 | |
ced40d0f JF |
63 | /* Interrupt types. */ |
64 | enum xen_irq_type { | |
d77bbd4d | 65 | IRQT_UNBOUND = 0, |
f87e4cac JF |
66 | IRQT_PIRQ, |
67 | IRQT_VIRQ, | |
68 | IRQT_IPI, | |
69 | IRQT_EVTCHN | |
70 | }; | |
e46cdb66 | 71 | |
ced40d0f JF |
72 | /* |
73 | * Packed IRQ information: | |
74 | * type - enum xen_irq_type | |
75 | * event channel - irq->event channel mapping | |
76 | * cpu - cpu this event channel is bound to | |
77 | * index - type-specific information: | |
78 | * PIRQ - vector, with MSB being "needs EIO" | |
79 | * VIRQ - virq number | |
80 | * IPI - IPI vector | |
81 | * EVTCHN - | |
82 | */ | |
83 | struct irq_info | |
84 | { | |
85 | enum xen_irq_type type; /* type */ | |
86 | unsigned short evtchn; /* event channel */ | |
87 | unsigned short cpu; /* cpu bound */ | |
88 | ||
89 | union { | |
90 | unsigned short virq; | |
91 | enum ipi_vector ipi; | |
92 | struct { | |
93 | unsigned short gsi; | |
d46a78b0 JF |
94 | unsigned char vector; |
95 | unsigned char flags; | |
ced40d0f JF |
96 | } pirq; |
97 | } u; | |
98 | }; | |
d46a78b0 | 99 | #define PIRQ_NEEDS_EOI (1 << 0) |
ced40d0f | 100 | |
b21ddbf5 | 101 | static struct irq_info *irq_info; |
e46cdb66 | 102 | |
b21ddbf5 | 103 | static int *evtchn_to_irq; |
c7a3589e MT |
104 | struct cpu_evtchn_s { |
105 | unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; | |
106 | }; | |
3b32f574 JF |
107 | |
108 | static __initdata struct cpu_evtchn_s init_evtchn_mask = { | |
109 | .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul, | |
110 | }; | |
111 | static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask; | |
112 | ||
c7a3589e MT |
113 | static inline unsigned long *cpu_evtchn_mask(int cpu) |
114 | { | |
115 | return cpu_evtchn_mask_p[cpu].bits; | |
116 | } | |
e46cdb66 | 117 | |
e46cdb66 JF |
118 | /* Xen will never allocate port zero for any purpose. */ |
119 | #define VALID_EVTCHN(chn) ((chn) != 0) | |
120 | ||
e46cdb66 | 121 | static struct irq_chip xen_dynamic_chip; |
aaca4964 | 122 | static struct irq_chip xen_percpu_chip; |
d46a78b0 | 123 | static struct irq_chip xen_pirq_chip; |
e46cdb66 JF |
124 | |
125 | /* Constructor for packed IRQ information. */ | |
ced40d0f JF |
126 | static struct irq_info mk_unbound_info(void) |
127 | { | |
128 | return (struct irq_info) { .type = IRQT_UNBOUND }; | |
129 | } | |
130 | ||
131 | static struct irq_info mk_evtchn_info(unsigned short evtchn) | |
132 | { | |
90af9514 IC |
133 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, |
134 | .cpu = 0 }; | |
ced40d0f JF |
135 | } |
136 | ||
137 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) | |
e46cdb66 | 138 | { |
ced40d0f | 139 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, |
90af9514 | 140 | .cpu = 0, .u.ipi = ipi }; |
ced40d0f JF |
141 | } |
142 | ||
143 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) | |
144 | { | |
145 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, | |
90af9514 | 146 | .cpu = 0, .u.virq = virq }; |
ced40d0f JF |
147 | } |
148 | ||
149 | static struct irq_info mk_pirq_info(unsigned short evtchn, | |
150 | unsigned short gsi, unsigned short vector) | |
151 | { | |
152 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, | |
90af9514 | 153 | .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } }; |
e46cdb66 JF |
154 | } |
155 | ||
156 | /* | |
157 | * Accessors for packed IRQ information. | |
158 | */ | |
ced40d0f | 159 | static struct irq_info *info_for_irq(unsigned irq) |
e46cdb66 | 160 | { |
ced40d0f | 161 | return &irq_info[irq]; |
e46cdb66 JF |
162 | } |
163 | ||
ced40d0f | 164 | static unsigned int evtchn_from_irq(unsigned irq) |
e46cdb66 | 165 | { |
ced40d0f | 166 | return info_for_irq(irq)->evtchn; |
e46cdb66 JF |
167 | } |
168 | ||
d4c04536 IC |
169 | unsigned irq_from_evtchn(unsigned int evtchn) |
170 | { | |
171 | return evtchn_to_irq[evtchn]; | |
172 | } | |
173 | EXPORT_SYMBOL_GPL(irq_from_evtchn); | |
174 | ||
ced40d0f | 175 | static enum ipi_vector ipi_from_irq(unsigned irq) |
e46cdb66 | 176 | { |
ced40d0f JF |
177 | struct irq_info *info = info_for_irq(irq); |
178 | ||
179 | BUG_ON(info == NULL); | |
180 | BUG_ON(info->type != IRQT_IPI); | |
181 | ||
182 | return info->u.ipi; | |
183 | } | |
184 | ||
185 | static unsigned virq_from_irq(unsigned irq) | |
186 | { | |
187 | struct irq_info *info = info_for_irq(irq); | |
188 | ||
189 | BUG_ON(info == NULL); | |
190 | BUG_ON(info->type != IRQT_VIRQ); | |
191 | ||
192 | return info->u.virq; | |
193 | } | |
194 | ||
195 | static unsigned gsi_from_irq(unsigned irq) | |
196 | { | |
197 | struct irq_info *info = info_for_irq(irq); | |
198 | ||
199 | BUG_ON(info == NULL); | |
200 | BUG_ON(info->type != IRQT_PIRQ); | |
201 | ||
202 | return info->u.pirq.gsi; | |
203 | } | |
204 | ||
205 | static unsigned vector_from_irq(unsigned irq) | |
206 | { | |
207 | struct irq_info *info = info_for_irq(irq); | |
208 | ||
209 | BUG_ON(info == NULL); | |
210 | BUG_ON(info->type != IRQT_PIRQ); | |
211 | ||
212 | return info->u.pirq.vector; | |
213 | } | |
214 | ||
215 | static enum xen_irq_type type_from_irq(unsigned irq) | |
216 | { | |
217 | return info_for_irq(irq)->type; | |
218 | } | |
219 | ||
220 | static unsigned cpu_from_irq(unsigned irq) | |
221 | { | |
222 | return info_for_irq(irq)->cpu; | |
223 | } | |
224 | ||
225 | static unsigned int cpu_from_evtchn(unsigned int evtchn) | |
226 | { | |
227 | int irq = evtchn_to_irq[evtchn]; | |
228 | unsigned ret = 0; | |
229 | ||
230 | if (irq != -1) | |
231 | ret = cpu_from_irq(irq); | |
232 | ||
233 | return ret; | |
e46cdb66 JF |
234 | } |
235 | ||
d46a78b0 JF |
236 | static bool pirq_needs_eoi(unsigned irq) |
237 | { | |
238 | struct irq_info *info = info_for_irq(irq); | |
239 | ||
240 | BUG_ON(info->type != IRQT_PIRQ); | |
241 | ||
242 | return info->u.pirq.flags & PIRQ_NEEDS_EOI; | |
243 | } | |
244 | ||
e46cdb66 JF |
245 | static inline unsigned long active_evtchns(unsigned int cpu, |
246 | struct shared_info *sh, | |
247 | unsigned int idx) | |
248 | { | |
249 | return (sh->evtchn_pending[idx] & | |
c7a3589e | 250 | cpu_evtchn_mask(cpu)[idx] & |
e46cdb66 JF |
251 | ~sh->evtchn_mask[idx]); |
252 | } | |
253 | ||
254 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |
255 | { | |
256 | int irq = evtchn_to_irq[chn]; | |
257 | ||
258 | BUG_ON(irq == -1); | |
259 | #ifdef CONFIG_SMP | |
7f7ace0c | 260 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
e46cdb66 JF |
261 | #endif |
262 | ||
ced40d0f | 263 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
c7a3589e | 264 | __set_bit(chn, cpu_evtchn_mask(cpu)); |
e46cdb66 | 265 | |
ced40d0f | 266 | irq_info[irq].cpu = cpu; |
e46cdb66 JF |
267 | } |
268 | ||
269 | static void init_evtchn_cpu_bindings(void) | |
270 | { | |
271 | #ifdef CONFIG_SMP | |
10e58084 | 272 | struct irq_desc *desc; |
e46cdb66 | 273 | int i; |
10e58084 | 274 | |
e46cdb66 | 275 | /* By default all event channels notify CPU#0. */ |
0b8f1efa | 276 | for_each_irq_desc(i, desc) { |
7f7ace0c | 277 | cpumask_copy(desc->affinity, cpumask_of(0)); |
0b8f1efa | 278 | } |
e46cdb66 JF |
279 | #endif |
280 | ||
c7a3589e | 281 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); |
e46cdb66 JF |
282 | } |
283 | ||
e46cdb66 JF |
284 | static inline void clear_evtchn(int port) |
285 | { | |
286 | struct shared_info *s = HYPERVISOR_shared_info; | |
287 | sync_clear_bit(port, &s->evtchn_pending[0]); | |
288 | } | |
289 | ||
290 | static inline void set_evtchn(int port) | |
291 | { | |
292 | struct shared_info *s = HYPERVISOR_shared_info; | |
293 | sync_set_bit(port, &s->evtchn_pending[0]); | |
294 | } | |
295 | ||
168d2f46 JF |
296 | static inline int test_evtchn(int port) |
297 | { | |
298 | struct shared_info *s = HYPERVISOR_shared_info; | |
299 | return sync_test_bit(port, &s->evtchn_pending[0]); | |
300 | } | |
301 | ||
e46cdb66 JF |
302 | |
303 | /** | |
304 | * notify_remote_via_irq - send event to remote end of event channel via irq | |
305 | * @irq: irq of event channel to send event to | |
306 | * | |
307 | * Unlike notify_remote_via_evtchn(), this is safe to use across | |
308 | * save/restore. Notifications on a broken connection are silently | |
309 | * dropped. | |
310 | */ | |
311 | void notify_remote_via_irq(int irq) | |
312 | { | |
313 | int evtchn = evtchn_from_irq(irq); | |
314 | ||
315 | if (VALID_EVTCHN(evtchn)) | |
316 | notify_remote_via_evtchn(evtchn); | |
317 | } | |
318 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); | |
319 | ||
320 | static void mask_evtchn(int port) | |
321 | { | |
322 | struct shared_info *s = HYPERVISOR_shared_info; | |
323 | sync_set_bit(port, &s->evtchn_mask[0]); | |
324 | } | |
325 | ||
326 | static void unmask_evtchn(int port) | |
327 | { | |
328 | struct shared_info *s = HYPERVISOR_shared_info; | |
329 | unsigned int cpu = get_cpu(); | |
330 | ||
331 | BUG_ON(!irqs_disabled()); | |
332 | ||
333 | /* Slow path (hypercall) if this is a non-local port. */ | |
334 | if (unlikely(cpu != cpu_from_evtchn(port))) { | |
335 | struct evtchn_unmask unmask = { .port = port }; | |
336 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | |
337 | } else { | |
338 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
339 | ||
340 | sync_clear_bit(port, &s->evtchn_mask[0]); | |
341 | ||
342 | /* | |
343 | * The following is basically the equivalent of | |
344 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose | |
345 | * the interrupt edge' if the channel is masked. | |
346 | */ | |
347 | if (sync_test_bit(port, &s->evtchn_pending[0]) && | |
348 | !sync_test_and_set_bit(port / BITS_PER_LONG, | |
349 | &vcpu_info->evtchn_pending_sel)) | |
350 | vcpu_info->evtchn_upcall_pending = 1; | |
351 | } | |
352 | ||
353 | put_cpu(); | |
354 | } | |
355 | ||
0794bfc7 KRW |
356 | static int get_nr_hw_irqs(void) |
357 | { | |
358 | int ret = 1; | |
359 | ||
360 | #ifdef CONFIG_X86_IO_APIC | |
361 | ret = get_nr_irqs_gsi(); | |
362 | #endif | |
363 | ||
364 | return ret; | |
365 | } | |
366 | ||
e46cdb66 JF |
367 | static int find_unbound_irq(void) |
368 | { | |
77dff1c7 TG |
369 | struct irq_data *data; |
370 | int irq, res; | |
3a69e916 | 371 | int start = get_nr_hw_irqs(); |
e46cdb66 | 372 | |
3a69e916 KRW |
373 | if (start == nr_irqs) |
374 | goto no_irqs; | |
375 | ||
376 | /* nr_irqs is a magic value. Must not use it.*/ | |
377 | for (irq = nr_irqs-1; irq > start; irq--) { | |
77dff1c7 | 378 | data = irq_get_irq_data(irq); |
99ad198c | 379 | /* only 0->15 have init'd desc; handle irq > 16 */ |
77dff1c7 | 380 | if (!data) |
99ad198c | 381 | break; |
77dff1c7 | 382 | if (data->chip == &no_irq_chip) |
99ad198c | 383 | break; |
77dff1c7 | 384 | if (data->chip != &xen_dynamic_chip) |
99ad198c | 385 | continue; |
d77bbd4d | 386 | if (irq_info[irq].type == IRQT_UNBOUND) |
77dff1c7 | 387 | return irq; |
99ad198c | 388 | } |
e46cdb66 | 389 | |
3a69e916 KRW |
390 | if (irq == start) |
391 | goto no_irqs; | |
e46cdb66 | 392 | |
77dff1c7 | 393 | res = irq_alloc_desc_at(irq, 0); |
6f8a0ed4 | 394 | |
77dff1c7 TG |
395 | if (WARN_ON(res != irq)) |
396 | return -1; | |
ced40d0f | 397 | |
e46cdb66 | 398 | return irq; |
3a69e916 KRW |
399 | |
400 | no_irqs: | |
401 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | |
e46cdb66 JF |
402 | } |
403 | ||
d46a78b0 JF |
404 | static bool identity_mapped_irq(unsigned irq) |
405 | { | |
0794bfc7 KRW |
406 | /* identity map all the hardware irqs */ |
407 | return irq < get_nr_hw_irqs(); | |
d46a78b0 JF |
408 | } |
409 | ||
410 | static void pirq_unmask_notify(int irq) | |
411 | { | |
412 | struct physdev_eoi eoi = { .irq = irq }; | |
413 | ||
414 | if (unlikely(pirq_needs_eoi(irq))) { | |
415 | int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); | |
416 | WARN_ON(rc); | |
417 | } | |
418 | } | |
419 | ||
420 | static void pirq_query_unmask(int irq) | |
421 | { | |
422 | struct physdev_irq_status_query irq_status; | |
423 | struct irq_info *info = info_for_irq(irq); | |
424 | ||
425 | BUG_ON(info->type != IRQT_PIRQ); | |
426 | ||
427 | irq_status.irq = irq; | |
428 | if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) | |
429 | irq_status.flags = 0; | |
430 | ||
431 | info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; | |
432 | if (irq_status.flags & XENIRQSTAT_needs_eoi) | |
433 | info->u.pirq.flags |= PIRQ_NEEDS_EOI; | |
434 | } | |
435 | ||
436 | static bool probing_irq(int irq) | |
437 | { | |
438 | struct irq_desc *desc = irq_to_desc(irq); | |
439 | ||
440 | return desc && desc->action == NULL; | |
441 | } | |
442 | ||
443 | static unsigned int startup_pirq(unsigned int irq) | |
444 | { | |
445 | struct evtchn_bind_pirq bind_pirq; | |
446 | struct irq_info *info = info_for_irq(irq); | |
447 | int evtchn = evtchn_from_irq(irq); | |
448 | ||
449 | BUG_ON(info->type != IRQT_PIRQ); | |
450 | ||
451 | if (VALID_EVTCHN(evtchn)) | |
452 | goto out; | |
453 | ||
454 | bind_pirq.pirq = irq; | |
455 | /* NB. We are happy to share unless we are probing. */ | |
456 | bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE; | |
457 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) { | |
458 | if (!probing_irq(irq)) | |
459 | printk(KERN_INFO "Failed to obtain physical IRQ %d\n", | |
460 | irq); | |
461 | return 0; | |
462 | } | |
463 | evtchn = bind_pirq.port; | |
464 | ||
465 | pirq_query_unmask(irq); | |
466 | ||
467 | evtchn_to_irq[evtchn] = irq; | |
468 | bind_evtchn_to_cpu(evtchn, 0); | |
469 | info->evtchn = evtchn; | |
470 | ||
471 | out: | |
472 | unmask_evtchn(evtchn); | |
473 | pirq_unmask_notify(irq); | |
474 | ||
475 | return 0; | |
476 | } | |
477 | ||
478 | static void shutdown_pirq(unsigned int irq) | |
479 | { | |
480 | struct evtchn_close close; | |
481 | struct irq_info *info = info_for_irq(irq); | |
482 | int evtchn = evtchn_from_irq(irq); | |
483 | ||
484 | BUG_ON(info->type != IRQT_PIRQ); | |
485 | ||
486 | if (!VALID_EVTCHN(evtchn)) | |
487 | return; | |
488 | ||
489 | mask_evtchn(evtchn); | |
490 | ||
491 | close.port = evtchn; | |
492 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | |
493 | BUG(); | |
494 | ||
495 | bind_evtchn_to_cpu(evtchn, 0); | |
496 | evtchn_to_irq[evtchn] = -1; | |
497 | info->evtchn = 0; | |
498 | } | |
499 | ||
500 | static void enable_pirq(unsigned int irq) | |
501 | { | |
502 | startup_pirq(irq); | |
503 | } | |
504 | ||
505 | static void disable_pirq(unsigned int irq) | |
506 | { | |
507 | } | |
508 | ||
509 | static void ack_pirq(unsigned int irq) | |
510 | { | |
511 | int evtchn = evtchn_from_irq(irq); | |
512 | ||
513 | move_native_irq(irq); | |
514 | ||
515 | if (VALID_EVTCHN(evtchn)) { | |
516 | mask_evtchn(evtchn); | |
517 | clear_evtchn(evtchn); | |
518 | } | |
519 | } | |
520 | ||
521 | static void end_pirq(unsigned int irq) | |
522 | { | |
523 | int evtchn = evtchn_from_irq(irq); | |
524 | struct irq_desc *desc = irq_to_desc(irq); | |
525 | ||
526 | if (WARN_ON(!desc)) | |
527 | return; | |
528 | ||
529 | if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) == | |
530 | (IRQ_DISABLED|IRQ_PENDING)) { | |
531 | shutdown_pirq(irq); | |
532 | } else if (VALID_EVTCHN(evtchn)) { | |
533 | unmask_evtchn(evtchn); | |
534 | pirq_unmask_notify(irq); | |
535 | } | |
536 | } | |
537 | ||
538 | static int find_irq_by_gsi(unsigned gsi) | |
539 | { | |
540 | int irq; | |
541 | ||
b21ddbf5 | 542 | for (irq = 0; irq < nr_irqs; irq++) { |
d46a78b0 JF |
543 | struct irq_info *info = info_for_irq(irq); |
544 | ||
545 | if (info == NULL || info->type != IRQT_PIRQ) | |
546 | continue; | |
547 | ||
548 | if (gsi_from_irq(irq) == gsi) | |
549 | return irq; | |
550 | } | |
551 | ||
552 | return -1; | |
553 | } | |
554 | ||
3a69e916 KRW |
555 | /* xen_allocate_irq might allocate irqs from the top down, as a |
556 | * consequence don't assume that the irq number returned has a low value | |
557 | * or can be used as a pirq number unless you know otherwise. | |
558 | * | |
559 | * One notable exception is when xen_allocate_irq is called passing an | |
560 | * hardware gsi as argument, in that case the irq number returned | |
561 | * matches the gsi number passed as first argument. | |
562 | ||
563 | * Note: We don't assign an | |
d46a78b0 JF |
564 | * event channel until the irq actually started up. Return an |
565 | * existing irq if we've already got one for the gsi. | |
566 | */ | |
1a60d05f | 567 | int xen_allocate_pirq(unsigned gsi, char *name) |
d46a78b0 JF |
568 | { |
569 | int irq; | |
570 | struct physdev_irq irq_op; | |
571 | ||
572 | spin_lock(&irq_mapping_update_lock); | |
573 | ||
574 | irq = find_irq_by_gsi(gsi); | |
575 | if (irq != -1) { | |
576 | printk(KERN_INFO "xen_allocate_pirq: returning irq %d for gsi %u\n", | |
577 | irq, gsi); | |
578 | goto out; /* XXX need refcount? */ | |
579 | } | |
580 | ||
581 | if (identity_mapped_irq(gsi)) { | |
582 | irq = gsi; | |
0794bfc7 | 583 | irq_to_desc_alloc_node(irq, 0); |
d46a78b0 JF |
584 | dynamic_irq_init(irq); |
585 | } else | |
586 | irq = find_unbound_irq(); | |
587 | ||
588 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | |
1a60d05f | 589 | handle_level_irq, name); |
d46a78b0 JF |
590 | |
591 | irq_op.irq = irq; | |
592 | if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { | |
593 | dynamic_irq_cleanup(irq); | |
594 | irq = -ENOSPC; | |
595 | goto out; | |
596 | } | |
597 | ||
598 | irq_info[irq] = mk_pirq_info(0, gsi, irq_op.vector); | |
599 | ||
600 | out: | |
601 | spin_unlock(&irq_mapping_update_lock); | |
602 | ||
603 | return irq; | |
604 | } | |
605 | ||
606 | int xen_vector_from_irq(unsigned irq) | |
607 | { | |
608 | return vector_from_irq(irq); | |
609 | } | |
610 | ||
611 | int xen_gsi_from_irq(unsigned irq) | |
612 | { | |
613 | return gsi_from_irq(irq); | |
614 | } | |
615 | ||
b536b4b9 | 616 | int bind_evtchn_to_irq(unsigned int evtchn) |
e46cdb66 JF |
617 | { |
618 | int irq; | |
619 | ||
620 | spin_lock(&irq_mapping_update_lock); | |
621 | ||
622 | irq = evtchn_to_irq[evtchn]; | |
623 | ||
624 | if (irq == -1) { | |
625 | irq = find_unbound_irq(); | |
626 | ||
e46cdb66 | 627 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
dffe2e1e | 628 | handle_edge_irq, "event"); |
e46cdb66 JF |
629 | |
630 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 631 | irq_info[irq] = mk_evtchn_info(evtchn); |
e46cdb66 JF |
632 | } |
633 | ||
e46cdb66 JF |
634 | spin_unlock(&irq_mapping_update_lock); |
635 | ||
636 | return irq; | |
637 | } | |
b536b4b9 | 638 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
e46cdb66 | 639 | |
f87e4cac JF |
640 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
641 | { | |
642 | struct evtchn_bind_ipi bind_ipi; | |
643 | int evtchn, irq; | |
644 | ||
645 | spin_lock(&irq_mapping_update_lock); | |
646 | ||
647 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | |
90af9514 | 648 | |
f87e4cac JF |
649 | if (irq == -1) { |
650 | irq = find_unbound_irq(); | |
651 | if (irq < 0) | |
652 | goto out; | |
653 | ||
aaca4964 JF |
654 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
655 | handle_percpu_irq, "ipi"); | |
f87e4cac JF |
656 | |
657 | bind_ipi.vcpu = cpu; | |
658 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
659 | &bind_ipi) != 0) | |
660 | BUG(); | |
661 | evtchn = bind_ipi.port; | |
662 | ||
663 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 664 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
f87e4cac JF |
665 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
666 | ||
667 | bind_evtchn_to_cpu(evtchn, cpu); | |
668 | } | |
669 | ||
f87e4cac JF |
670 | out: |
671 | spin_unlock(&irq_mapping_update_lock); | |
672 | return irq; | |
673 | } | |
674 | ||
675 | ||
e46cdb66 JF |
676 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
677 | { | |
678 | struct evtchn_bind_virq bind_virq; | |
679 | int evtchn, irq; | |
680 | ||
681 | spin_lock(&irq_mapping_update_lock); | |
682 | ||
683 | irq = per_cpu(virq_to_irq, cpu)[virq]; | |
684 | ||
685 | if (irq == -1) { | |
686 | bind_virq.virq = virq; | |
687 | bind_virq.vcpu = cpu; | |
688 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
689 | &bind_virq) != 0) | |
690 | BUG(); | |
691 | evtchn = bind_virq.port; | |
692 | ||
693 | irq = find_unbound_irq(); | |
694 | ||
aaca4964 JF |
695 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
696 | handle_percpu_irq, "virq"); | |
e46cdb66 JF |
697 | |
698 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 699 | irq_info[irq] = mk_virq_info(evtchn, virq); |
e46cdb66 JF |
700 | |
701 | per_cpu(virq_to_irq, cpu)[virq] = irq; | |
702 | ||
703 | bind_evtchn_to_cpu(evtchn, cpu); | |
704 | } | |
705 | ||
e46cdb66 JF |
706 | spin_unlock(&irq_mapping_update_lock); |
707 | ||
708 | return irq; | |
709 | } | |
710 | ||
711 | static void unbind_from_irq(unsigned int irq) | |
712 | { | |
713 | struct evtchn_close close; | |
714 | int evtchn = evtchn_from_irq(irq); | |
715 | ||
716 | spin_lock(&irq_mapping_update_lock); | |
717 | ||
d77bbd4d | 718 | if (VALID_EVTCHN(evtchn)) { |
e46cdb66 JF |
719 | close.port = evtchn; |
720 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | |
721 | BUG(); | |
722 | ||
723 | switch (type_from_irq(irq)) { | |
724 | case IRQT_VIRQ: | |
725 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | |
ced40d0f | 726 | [virq_from_irq(irq)] = -1; |
e46cdb66 | 727 | break; |
d68d82af AN |
728 | case IRQT_IPI: |
729 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) | |
ced40d0f | 730 | [ipi_from_irq(irq)] = -1; |
d68d82af | 731 | break; |
e46cdb66 JF |
732 | default: |
733 | break; | |
734 | } | |
735 | ||
736 | /* Closed ports are implicitly re-bound to VCPU0. */ | |
737 | bind_evtchn_to_cpu(evtchn, 0); | |
738 | ||
739 | evtchn_to_irq[evtchn] = -1; | |
fed5ea87 IC |
740 | } |
741 | ||
742 | if (irq_info[irq].type != IRQT_UNBOUND) { | |
ced40d0f | 743 | irq_info[irq] = mk_unbound_info(); |
e46cdb66 | 744 | |
77dff1c7 | 745 | irq_free_desc(irq); |
e46cdb66 JF |
746 | } |
747 | ||
748 | spin_unlock(&irq_mapping_update_lock); | |
749 | } | |
750 | ||
751 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | |
7c239975 | 752 | irq_handler_t handler, |
e46cdb66 JF |
753 | unsigned long irqflags, |
754 | const char *devname, void *dev_id) | |
755 | { | |
756 | unsigned int irq; | |
757 | int retval; | |
758 | ||
759 | irq = bind_evtchn_to_irq(evtchn); | |
760 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
761 | if (retval != 0) { | |
762 | unbind_from_irq(irq); | |
763 | return retval; | |
764 | } | |
765 | ||
766 | return irq; | |
767 | } | |
768 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); | |
769 | ||
770 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | |
7c239975 | 771 | irq_handler_t handler, |
e46cdb66 JF |
772 | unsigned long irqflags, const char *devname, void *dev_id) |
773 | { | |
774 | unsigned int irq; | |
775 | int retval; | |
776 | ||
777 | irq = bind_virq_to_irq(virq, cpu); | |
778 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
779 | if (retval != 0) { | |
780 | unbind_from_irq(irq); | |
781 | return retval; | |
782 | } | |
783 | ||
784 | return irq; | |
785 | } | |
786 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); | |
787 | ||
f87e4cac JF |
788 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, |
789 | unsigned int cpu, | |
790 | irq_handler_t handler, | |
791 | unsigned long irqflags, | |
792 | const char *devname, | |
793 | void *dev_id) | |
794 | { | |
795 | int irq, retval; | |
796 | ||
797 | irq = bind_ipi_to_irq(ipi, cpu); | |
798 | if (irq < 0) | |
799 | return irq; | |
800 | ||
4877c737 | 801 | irqflags |= IRQF_NO_SUSPEND; |
f87e4cac JF |
802 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
803 | if (retval != 0) { | |
804 | unbind_from_irq(irq); | |
805 | return retval; | |
806 | } | |
807 | ||
808 | return irq; | |
809 | } | |
810 | ||
e46cdb66 JF |
811 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) |
812 | { | |
813 | free_irq(irq, dev_id); | |
814 | unbind_from_irq(irq); | |
815 | } | |
816 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); | |
817 | ||
f87e4cac JF |
818 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
819 | { | |
820 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; | |
821 | BUG_ON(irq < 0); | |
822 | notify_remote_via_irq(irq); | |
823 | } | |
824 | ||
ee523ca1 JF |
825 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id) |
826 | { | |
827 | struct shared_info *sh = HYPERVISOR_shared_info; | |
828 | int cpu = smp_processor_id(); | |
829 | int i; | |
830 | unsigned long flags; | |
831 | static DEFINE_SPINLOCK(debug_lock); | |
832 | ||
833 | spin_lock_irqsave(&debug_lock, flags); | |
834 | ||
835 | printk("vcpu %d\n ", cpu); | |
836 | ||
837 | for_each_online_cpu(i) { | |
838 | struct vcpu_info *v = per_cpu(xen_vcpu, i); | |
839 | printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, | |
e849c3e9 | 840 | (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, |
ee523ca1 JF |
841 | v->evtchn_upcall_pending, |
842 | v->evtchn_pending_sel); | |
843 | } | |
844 | printk("pending:\n "); | |
845 | for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) | |
846 | printk("%08lx%s", sh->evtchn_pending[i], | |
847 | i % 8 == 0 ? "\n " : " "); | |
848 | printk("\nmasks:\n "); | |
849 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
850 | printk("%08lx%s", sh->evtchn_mask[i], | |
851 | i % 8 == 0 ? "\n " : " "); | |
852 | ||
853 | printk("\nunmasked:\n "); | |
854 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
855 | printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], | |
856 | i % 8 == 0 ? "\n " : " "); | |
857 | ||
858 | printk("\npending list:\n"); | |
859 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | |
860 | if (sync_test_bit(i, sh->evtchn_pending)) { | |
861 | printk(" %d: event %d -> irq %d\n", | |
ced40d0f JF |
862 | cpu_from_evtchn(i), i, |
863 | evtchn_to_irq[i]); | |
ee523ca1 JF |
864 | } |
865 | } | |
866 | ||
867 | spin_unlock_irqrestore(&debug_lock, flags); | |
868 | ||
869 | return IRQ_HANDLED; | |
870 | } | |
871 | ||
245b2e70 TH |
872 | static DEFINE_PER_CPU(unsigned, xed_nesting_count); |
873 | ||
e46cdb66 JF |
874 | /* |
875 | * Search the CPUs pending events bitmasks. For each one found, map | |
876 | * the event number to an irq, and feed it into do_IRQ() for | |
877 | * handling. | |
878 | * | |
879 | * Xen uses a two-level bitmap to speed searching. The first level is | |
880 | * a bitset of words which contain pending event bits. The second | |
881 | * level is a bitset of pending events themselves. | |
882 | */ | |
38e20b07 | 883 | static void __xen_evtchn_do_upcall(void) |
e46cdb66 JF |
884 | { |
885 | int cpu = get_cpu(); | |
886 | struct shared_info *s = HYPERVISOR_shared_info; | |
887 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
229664be | 888 | unsigned count; |
e46cdb66 | 889 | |
229664be JF |
890 | do { |
891 | unsigned long pending_words; | |
e46cdb66 | 892 | |
229664be | 893 | vcpu_info->evtchn_upcall_pending = 0; |
e46cdb66 | 894 | |
245b2e70 | 895 | if (__get_cpu_var(xed_nesting_count)++) |
229664be | 896 | goto out; |
e46cdb66 | 897 | |
e849c3e9 IY |
898 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
899 | /* Clear master flag /before/ clearing selector flag. */ | |
6673cf63 | 900 | wmb(); |
e849c3e9 | 901 | #endif |
229664be JF |
902 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); |
903 | while (pending_words != 0) { | |
904 | unsigned long pending_bits; | |
905 | int word_idx = __ffs(pending_words); | |
906 | pending_words &= ~(1UL << word_idx); | |
907 | ||
908 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { | |
909 | int bit_idx = __ffs(pending_bits); | |
910 | int port = (word_idx * BITS_PER_LONG) + bit_idx; | |
911 | int irq = evtchn_to_irq[port]; | |
ca4dbc66 | 912 | struct irq_desc *desc; |
229664be | 913 | |
ca4dbc66 EB |
914 | if (irq != -1) { |
915 | desc = irq_to_desc(irq); | |
916 | if (desc) | |
917 | generic_handle_irq_desc(irq, desc); | |
918 | } | |
e46cdb66 JF |
919 | } |
920 | } | |
e46cdb66 | 921 | |
229664be JF |
922 | BUG_ON(!irqs_disabled()); |
923 | ||
245b2e70 TH |
924 | count = __get_cpu_var(xed_nesting_count); |
925 | __get_cpu_var(xed_nesting_count) = 0; | |
183d03cc | 926 | } while (count != 1 || vcpu_info->evtchn_upcall_pending); |
229664be JF |
927 | |
928 | out: | |
38e20b07 SY |
929 | |
930 | put_cpu(); | |
931 | } | |
932 | ||
933 | void xen_evtchn_do_upcall(struct pt_regs *regs) | |
934 | { | |
935 | struct pt_regs *old_regs = set_irq_regs(regs); | |
936 | ||
937 | exit_idle(); | |
938 | irq_enter(); | |
939 | ||
940 | __xen_evtchn_do_upcall(); | |
941 | ||
3445a8fd JF |
942 | irq_exit(); |
943 | set_irq_regs(old_regs); | |
38e20b07 | 944 | } |
3445a8fd | 945 | |
38e20b07 SY |
946 | void xen_hvm_evtchn_do_upcall(void) |
947 | { | |
948 | __xen_evtchn_do_upcall(); | |
e46cdb66 | 949 | } |
183d03cc | 950 | EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); |
e46cdb66 | 951 | |
eb1e305f JF |
952 | /* Rebind a new event channel to an existing irq. */ |
953 | void rebind_evtchn_irq(int evtchn, int irq) | |
954 | { | |
d77bbd4d JF |
955 | struct irq_info *info = info_for_irq(irq); |
956 | ||
eb1e305f JF |
957 | /* Make sure the irq is masked, since the new event channel |
958 | will also be masked. */ | |
959 | disable_irq(irq); | |
960 | ||
961 | spin_lock(&irq_mapping_update_lock); | |
962 | ||
963 | /* After resume the irq<->evtchn mappings are all cleared out */ | |
964 | BUG_ON(evtchn_to_irq[evtchn] != -1); | |
965 | /* Expect irq to have been bound before, | |
d77bbd4d JF |
966 | so there should be a proper type */ |
967 | BUG_ON(info->type == IRQT_UNBOUND); | |
eb1e305f JF |
968 | |
969 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 970 | irq_info[irq] = mk_evtchn_info(evtchn); |
eb1e305f JF |
971 | |
972 | spin_unlock(&irq_mapping_update_lock); | |
973 | ||
974 | /* new event channels are always bound to cpu 0 */ | |
0de26520 | 975 | irq_set_affinity(irq, cpumask_of(0)); |
eb1e305f JF |
976 | |
977 | /* Unmask the event channel. */ | |
978 | enable_irq(irq); | |
979 | } | |
980 | ||
e46cdb66 | 981 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
d5dedd45 | 982 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
e46cdb66 JF |
983 | { |
984 | struct evtchn_bind_vcpu bind_vcpu; | |
985 | int evtchn = evtchn_from_irq(irq); | |
986 | ||
183d03cc SS |
987 | /* events delivered via platform PCI interrupts are always |
988 | * routed to vcpu 0 */ | |
989 | if (!VALID_EVTCHN(evtchn) || | |
990 | (xen_hvm_domain() && !xen_have_vector_callback)) | |
d5dedd45 | 991 | return -1; |
e46cdb66 JF |
992 | |
993 | /* Send future instances of this interrupt to other vcpu. */ | |
994 | bind_vcpu.port = evtchn; | |
995 | bind_vcpu.vcpu = tcpu; | |
996 | ||
997 | /* | |
998 | * If this fails, it usually just indicates that we're dealing with a | |
999 | * virq or IPI channel, which don't actually need to be rebound. Ignore | |
1000 | * it, but don't do the xenlinux-level rebind in that case. | |
1001 | */ | |
1002 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | |
1003 | bind_evtchn_to_cpu(evtchn, tcpu); | |
e46cdb66 | 1004 | |
d5dedd45 YL |
1005 | return 0; |
1006 | } | |
e46cdb66 | 1007 | |
d5dedd45 | 1008 | static int set_affinity_irq(unsigned irq, const struct cpumask *dest) |
e46cdb66 | 1009 | { |
0de26520 | 1010 | unsigned tcpu = cpumask_first(dest); |
d5dedd45 YL |
1011 | |
1012 | return rebind_irq_to_cpu(irq, tcpu); | |
e46cdb66 JF |
1013 | } |
1014 | ||
642e0c88 IY |
1015 | int resend_irq_on_evtchn(unsigned int irq) |
1016 | { | |
1017 | int masked, evtchn = evtchn_from_irq(irq); | |
1018 | struct shared_info *s = HYPERVISOR_shared_info; | |
1019 | ||
1020 | if (!VALID_EVTCHN(evtchn)) | |
1021 | return 1; | |
1022 | ||
1023 | masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); | |
1024 | sync_set_bit(evtchn, s->evtchn_pending); | |
1025 | if (!masked) | |
1026 | unmask_evtchn(evtchn); | |
1027 | ||
1028 | return 1; | |
1029 | } | |
1030 | ||
e46cdb66 JF |
1031 | static void enable_dynirq(unsigned int irq) |
1032 | { | |
1033 | int evtchn = evtchn_from_irq(irq); | |
1034 | ||
1035 | if (VALID_EVTCHN(evtchn)) | |
1036 | unmask_evtchn(evtchn); | |
1037 | } | |
1038 | ||
1039 | static void disable_dynirq(unsigned int irq) | |
1040 | { | |
1041 | int evtchn = evtchn_from_irq(irq); | |
1042 | ||
1043 | if (VALID_EVTCHN(evtchn)) | |
1044 | mask_evtchn(evtchn); | |
1045 | } | |
1046 | ||
1047 | static void ack_dynirq(unsigned int irq) | |
1048 | { | |
1049 | int evtchn = evtchn_from_irq(irq); | |
1050 | ||
1051 | move_native_irq(irq); | |
1052 | ||
1053 | if (VALID_EVTCHN(evtchn)) | |
1054 | clear_evtchn(evtchn); | |
1055 | } | |
1056 | ||
1057 | static int retrigger_dynirq(unsigned int irq) | |
1058 | { | |
1059 | int evtchn = evtchn_from_irq(irq); | |
ee8fa1c6 | 1060 | struct shared_info *sh = HYPERVISOR_shared_info; |
e46cdb66 JF |
1061 | int ret = 0; |
1062 | ||
1063 | if (VALID_EVTCHN(evtchn)) { | |
ee8fa1c6 JF |
1064 | int masked; |
1065 | ||
1066 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); | |
1067 | sync_set_bit(evtchn, sh->evtchn_pending); | |
1068 | if (!masked) | |
1069 | unmask_evtchn(evtchn); | |
e46cdb66 JF |
1070 | ret = 1; |
1071 | } | |
1072 | ||
1073 | return ret; | |
1074 | } | |
1075 | ||
0e91398f JF |
1076 | static void restore_cpu_virqs(unsigned int cpu) |
1077 | { | |
1078 | struct evtchn_bind_virq bind_virq; | |
1079 | int virq, irq, evtchn; | |
1080 | ||
1081 | for (virq = 0; virq < NR_VIRQS; virq++) { | |
1082 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | |
1083 | continue; | |
1084 | ||
ced40d0f | 1085 | BUG_ON(virq_from_irq(irq) != virq); |
0e91398f JF |
1086 | |
1087 | /* Get a new binding from Xen. */ | |
1088 | bind_virq.virq = virq; | |
1089 | bind_virq.vcpu = cpu; | |
1090 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
1091 | &bind_virq) != 0) | |
1092 | BUG(); | |
1093 | evtchn = bind_virq.port; | |
1094 | ||
1095 | /* Record the new mapping. */ | |
1096 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 1097 | irq_info[irq] = mk_virq_info(evtchn, virq); |
0e91398f JF |
1098 | bind_evtchn_to_cpu(evtchn, cpu); |
1099 | ||
1100 | /* Ready for use. */ | |
1101 | unmask_evtchn(evtchn); | |
1102 | } | |
1103 | } | |
1104 | ||
1105 | static void restore_cpu_ipis(unsigned int cpu) | |
1106 | { | |
1107 | struct evtchn_bind_ipi bind_ipi; | |
1108 | int ipi, irq, evtchn; | |
1109 | ||
1110 | for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { | |
1111 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | |
1112 | continue; | |
1113 | ||
ced40d0f | 1114 | BUG_ON(ipi_from_irq(irq) != ipi); |
0e91398f JF |
1115 | |
1116 | /* Get a new binding from Xen. */ | |
1117 | bind_ipi.vcpu = cpu; | |
1118 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
1119 | &bind_ipi) != 0) | |
1120 | BUG(); | |
1121 | evtchn = bind_ipi.port; | |
1122 | ||
1123 | /* Record the new mapping. */ | |
1124 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 1125 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
0e91398f JF |
1126 | bind_evtchn_to_cpu(evtchn, cpu); |
1127 | ||
1128 | /* Ready for use. */ | |
1129 | unmask_evtchn(evtchn); | |
1130 | ||
1131 | } | |
1132 | } | |
1133 | ||
2d9e1e2f JF |
1134 | /* Clear an irq's pending state, in preparation for polling on it */ |
1135 | void xen_clear_irq_pending(int irq) | |
1136 | { | |
1137 | int evtchn = evtchn_from_irq(irq); | |
1138 | ||
1139 | if (VALID_EVTCHN(evtchn)) | |
1140 | clear_evtchn(evtchn); | |
1141 | } | |
1142 | ||
168d2f46 JF |
1143 | void xen_set_irq_pending(int irq) |
1144 | { | |
1145 | int evtchn = evtchn_from_irq(irq); | |
1146 | ||
1147 | if (VALID_EVTCHN(evtchn)) | |
1148 | set_evtchn(evtchn); | |
1149 | } | |
1150 | ||
1151 | bool xen_test_irq_pending(int irq) | |
1152 | { | |
1153 | int evtchn = evtchn_from_irq(irq); | |
1154 | bool ret = false; | |
1155 | ||
1156 | if (VALID_EVTCHN(evtchn)) | |
1157 | ret = test_evtchn(evtchn); | |
1158 | ||
1159 | return ret; | |
1160 | } | |
1161 | ||
2d9e1e2f JF |
1162 | /* Poll waiting for an irq to become pending. In the usual case, the |
1163 | irq will be disabled so it won't deliver an interrupt. */ | |
1164 | void xen_poll_irq(int irq) | |
1165 | { | |
1166 | evtchn_port_t evtchn = evtchn_from_irq(irq); | |
1167 | ||
1168 | if (VALID_EVTCHN(evtchn)) { | |
1169 | struct sched_poll poll; | |
1170 | ||
1171 | poll.nr_ports = 1; | |
1172 | poll.timeout = 0; | |
ff3c5362 | 1173 | set_xen_guest_handle(poll.ports, &evtchn); |
2d9e1e2f JF |
1174 | |
1175 | if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) | |
1176 | BUG(); | |
1177 | } | |
1178 | } | |
1179 | ||
0e91398f JF |
1180 | void xen_irq_resume(void) |
1181 | { | |
1182 | unsigned int cpu, irq, evtchn; | |
1183 | ||
1184 | init_evtchn_cpu_bindings(); | |
1185 | ||
1186 | /* New event-channel space is not 'live' yet. */ | |
1187 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
1188 | mask_evtchn(evtchn); | |
1189 | ||
1190 | /* No IRQ <-> event-channel mappings. */ | |
0b8f1efa | 1191 | for (irq = 0; irq < nr_irqs; irq++) |
0e91398f JF |
1192 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ |
1193 | ||
1194 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
1195 | evtchn_to_irq[evtchn] = -1; | |
1196 | ||
1197 | for_each_possible_cpu(cpu) { | |
1198 | restore_cpu_virqs(cpu); | |
1199 | restore_cpu_ipis(cpu); | |
1200 | } | |
1201 | } | |
1202 | ||
e46cdb66 JF |
1203 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
1204 | .name = "xen-dyn", | |
54a353a0 JF |
1205 | |
1206 | .disable = disable_dynirq, | |
e46cdb66 JF |
1207 | .mask = disable_dynirq, |
1208 | .unmask = enable_dynirq, | |
54a353a0 | 1209 | |
e46cdb66 JF |
1210 | .ack = ack_dynirq, |
1211 | .set_affinity = set_affinity_irq, | |
1212 | .retrigger = retrigger_dynirq, | |
1213 | }; | |
1214 | ||
d46a78b0 JF |
1215 | static struct irq_chip xen_pirq_chip __read_mostly = { |
1216 | .name = "xen-pirq", | |
1217 | ||
1218 | .startup = startup_pirq, | |
1219 | .shutdown = shutdown_pirq, | |
1220 | ||
1221 | .enable = enable_pirq, | |
1222 | .unmask = enable_pirq, | |
1223 | ||
1224 | .disable = disable_pirq, | |
1225 | .mask = disable_pirq, | |
1226 | ||
1227 | .ack = ack_pirq, | |
1228 | .end = end_pirq, | |
1229 | ||
1230 | .set_affinity = set_affinity_irq, | |
1231 | ||
1232 | .retrigger = retrigger_dynirq, | |
1233 | }; | |
1234 | ||
aaca4964 JF |
1235 | static struct irq_chip xen_percpu_chip __read_mostly = { |
1236 | .name = "xen-percpu", | |
1237 | ||
1238 | .disable = disable_dynirq, | |
1239 | .mask = disable_dynirq, | |
1240 | .unmask = enable_dynirq, | |
1241 | ||
1242 | .ack = ack_dynirq, | |
1243 | }; | |
1244 | ||
38e20b07 SY |
1245 | int xen_set_callback_via(uint64_t via) |
1246 | { | |
1247 | struct xen_hvm_param a; | |
1248 | a.domid = DOMID_SELF; | |
1249 | a.index = HVM_PARAM_CALLBACK_IRQ; | |
1250 | a.value = via; | |
1251 | return HYPERVISOR_hvm_op(HVMOP_set_param, &a); | |
1252 | } | |
1253 | EXPORT_SYMBOL_GPL(xen_set_callback_via); | |
1254 | ||
ca65f9fc | 1255 | #ifdef CONFIG_XEN_PVHVM |
38e20b07 SY |
1256 | /* Vector callbacks are better than PCI interrupts to receive event |
1257 | * channel notifications because we can receive vector callbacks on any | |
1258 | * vcpu and we don't need PCI support or APIC interactions. */ | |
1259 | void xen_callback_vector(void) | |
1260 | { | |
1261 | int rc; | |
1262 | uint64_t callback_via; | |
1263 | if (xen_have_vector_callback) { | |
1264 | callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK); | |
1265 | rc = xen_set_callback_via(callback_via); | |
1266 | if (rc) { | |
1267 | printk(KERN_ERR "Request for Xen HVM callback vector" | |
1268 | " failed.\n"); | |
1269 | xen_have_vector_callback = 0; | |
1270 | return; | |
1271 | } | |
1272 | printk(KERN_INFO "Xen HVM callback vector for event delivery is " | |
1273 | "enabled\n"); | |
1274 | /* in the restore case the vector has already been allocated */ | |
1275 | if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors)) | |
1276 | alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector); | |
1277 | } | |
1278 | } | |
ca65f9fc SS |
1279 | #else |
1280 | void xen_callback_vector(void) {} | |
1281 | #endif | |
38e20b07 | 1282 | |
e46cdb66 JF |
1283 | void __init xen_init_IRQ(void) |
1284 | { | |
1285 | int i; | |
c7a3589e | 1286 | |
a70c352a PE |
1287 | cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), |
1288 | GFP_KERNEL); | |
b21ddbf5 JF |
1289 | irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); |
1290 | ||
1291 | evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), | |
1292 | GFP_KERNEL); | |
1293 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | |
1294 | evtchn_to_irq[i] = -1; | |
e46cdb66 JF |
1295 | |
1296 | init_evtchn_cpu_bindings(); | |
1297 | ||
1298 | /* No event channels are 'live' right now. */ | |
1299 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | |
1300 | mask_evtchn(i); | |
1301 | ||
38e20b07 SY |
1302 | if (xen_hvm_domain()) { |
1303 | xen_callback_vector(); | |
1304 | native_init_IRQ(); | |
1305 | } else { | |
1306 | irq_ctx_init(smp_processor_id()); | |
1307 | } | |
e46cdb66 | 1308 | } |