]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/xen/events/events_base.c
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / xen / events / events_base.c
1 /*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is received, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
19 * 4. PIRQs - Hardware interrupts.
20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25
26 #include <linux/linkage.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/moduleparam.h>
30 #include <linux/string.h>
31 #include <linux/bootmem.h>
32 #include <linux/slab.h>
33 #include <linux/irqnr.h>
34 #include <linux/pci.h>
35
36 #ifdef CONFIG_X86
37 #include <asm/desc.h>
38 #include <asm/ptrace.h>
39 #include <asm/irq.h>
40 #include <asm/io_apic.h>
41 #include <asm/i8259.h>
42 #include <asm/xen/pci.h>
43 #endif
44 #include <asm/sync_bitops.h>
45 #include <asm/xen/hypercall.h>
46 #include <asm/xen/hypervisor.h>
47 #include <xen/page.h>
48
49 #include <xen/xen.h>
50 #include <xen/hvm.h>
51 #include <xen/xen-ops.h>
52 #include <xen/events.h>
53 #include <xen/interface/xen.h>
54 #include <xen/interface/event_channel.h>
55 #include <xen/interface/hvm/hvm_op.h>
56 #include <xen/interface/hvm/params.h>
57 #include <xen/interface/physdev.h>
58 #include <xen/interface/sched.h>
59 #include <xen/interface/vcpu.h>
60 #include <asm/hw_irq.h>
61
62 #include "events_internal.h"
63
64 const struct evtchn_ops *evtchn_ops;
65
66 /*
67 * This lock protects updates to the following mapping and reference-count
68 * arrays. The lock does not need to be acquired to read the mapping tables.
69 */
70 static DEFINE_MUTEX(irq_mapping_update_lock);
71
72 static LIST_HEAD(xen_irq_list_head);
73
74 /* IRQ <-> VIRQ mapping. */
75 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
76
77 /* IRQ <-> IPI mapping */
78 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
79
80 int **evtchn_to_irq;
81 #ifdef CONFIG_X86
82 static unsigned long *pirq_eoi_map;
83 #endif
84 static bool (*pirq_needs_eoi)(unsigned irq);
85
86 #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
87 #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
88 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
89
90 /* Xen will never allocate port zero for any purpose. */
91 #define VALID_EVTCHN(chn) ((chn) != 0)
92
93 static struct irq_chip xen_dynamic_chip;
94 static struct irq_chip xen_percpu_chip;
95 static struct irq_chip xen_pirq_chip;
96 static void enable_dynirq(struct irq_data *data);
97 static void disable_dynirq(struct irq_data *data);
98
99 static void clear_evtchn_to_irq_row(unsigned row)
100 {
101 unsigned col;
102
103 for (col = 0; col < EVTCHN_PER_ROW; col++)
104 evtchn_to_irq[row][col] = -1;
105 }
106
107 static void clear_evtchn_to_irq_all(void)
108 {
109 unsigned row;
110
111 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
112 if (evtchn_to_irq[row] == NULL)
113 continue;
114 clear_evtchn_to_irq_row(row);
115 }
116 }
117
118 static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
119 {
120 unsigned row;
121 unsigned col;
122
123 if (evtchn >= xen_evtchn_max_channels())
124 return -EINVAL;
125
126 row = EVTCHN_ROW(evtchn);
127 col = EVTCHN_COL(evtchn);
128
129 if (evtchn_to_irq[row] == NULL) {
130 /* Unallocated irq entries return -1 anyway */
131 if (irq == -1)
132 return 0;
133
134 evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
135 if (evtchn_to_irq[row] == NULL)
136 return -ENOMEM;
137
138 clear_evtchn_to_irq_row(row);
139 }
140
141 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
142 return 0;
143 }
144
145 int get_evtchn_to_irq(unsigned evtchn)
146 {
147 if (evtchn >= xen_evtchn_max_channels())
148 return -1;
149 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
150 return -1;
151 return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
152 }
153
154 /* Get info for IRQ */
155 struct irq_info *info_for_irq(unsigned irq)
156 {
157 return irq_get_handler_data(irq);
158 }
159
160 /* Constructors for packed IRQ information. */
161 static int xen_irq_info_common_setup(struct irq_info *info,
162 unsigned irq,
163 enum xen_irq_type type,
164 unsigned evtchn,
165 unsigned short cpu)
166 {
167 int ret;
168
169 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
170
171 info->type = type;
172 info->irq = irq;
173 info->evtchn = evtchn;
174 info->cpu = cpu;
175
176 ret = set_evtchn_to_irq(evtchn, irq);
177 if (ret < 0)
178 return ret;
179
180 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
181
182 return xen_evtchn_port_setup(info);
183 }
184
185 static int xen_irq_info_evtchn_setup(unsigned irq,
186 unsigned evtchn)
187 {
188 struct irq_info *info = info_for_irq(irq);
189
190 return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
191 }
192
193 static int xen_irq_info_ipi_setup(unsigned cpu,
194 unsigned irq,
195 unsigned evtchn,
196 enum ipi_vector ipi)
197 {
198 struct irq_info *info = info_for_irq(irq);
199
200 info->u.ipi = ipi;
201
202 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
203
204 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
205 }
206
207 static int xen_irq_info_virq_setup(unsigned cpu,
208 unsigned irq,
209 unsigned evtchn,
210 unsigned virq)
211 {
212 struct irq_info *info = info_for_irq(irq);
213
214 info->u.virq = virq;
215
216 per_cpu(virq_to_irq, cpu)[virq] = irq;
217
218 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
219 }
220
221 static int xen_irq_info_pirq_setup(unsigned irq,
222 unsigned evtchn,
223 unsigned pirq,
224 unsigned gsi,
225 uint16_t domid,
226 unsigned char flags)
227 {
228 struct irq_info *info = info_for_irq(irq);
229
230 info->u.pirq.pirq = pirq;
231 info->u.pirq.gsi = gsi;
232 info->u.pirq.domid = domid;
233 info->u.pirq.flags = flags;
234
235 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
236 }
237
238 static void xen_irq_info_cleanup(struct irq_info *info)
239 {
240 set_evtchn_to_irq(info->evtchn, -1);
241 info->evtchn = 0;
242 }
243
244 /*
245 * Accessors for packed IRQ information.
246 */
247 unsigned int evtchn_from_irq(unsigned irq)
248 {
249 if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
250 return 0;
251
252 return info_for_irq(irq)->evtchn;
253 }
254
255 unsigned irq_from_evtchn(unsigned int evtchn)
256 {
257 return get_evtchn_to_irq(evtchn);
258 }
259 EXPORT_SYMBOL_GPL(irq_from_evtchn);
260
261 int irq_from_virq(unsigned int cpu, unsigned int virq)
262 {
263 return per_cpu(virq_to_irq, cpu)[virq];
264 }
265
266 static enum ipi_vector ipi_from_irq(unsigned irq)
267 {
268 struct irq_info *info = info_for_irq(irq);
269
270 BUG_ON(info == NULL);
271 BUG_ON(info->type != IRQT_IPI);
272
273 return info->u.ipi;
274 }
275
276 static unsigned virq_from_irq(unsigned irq)
277 {
278 struct irq_info *info = info_for_irq(irq);
279
280 BUG_ON(info == NULL);
281 BUG_ON(info->type != IRQT_VIRQ);
282
283 return info->u.virq;
284 }
285
286 static unsigned pirq_from_irq(unsigned irq)
287 {
288 struct irq_info *info = info_for_irq(irq);
289
290 BUG_ON(info == NULL);
291 BUG_ON(info->type != IRQT_PIRQ);
292
293 return info->u.pirq.pirq;
294 }
295
296 static enum xen_irq_type type_from_irq(unsigned irq)
297 {
298 return info_for_irq(irq)->type;
299 }
300
301 unsigned cpu_from_irq(unsigned irq)
302 {
303 return info_for_irq(irq)->cpu;
304 }
305
306 unsigned int cpu_from_evtchn(unsigned int evtchn)
307 {
308 int irq = get_evtchn_to_irq(evtchn);
309 unsigned ret = 0;
310
311 if (irq != -1)
312 ret = cpu_from_irq(irq);
313
314 return ret;
315 }
316
317 #ifdef CONFIG_X86
318 static bool pirq_check_eoi_map(unsigned irq)
319 {
320 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
321 }
322 #endif
323
324 static bool pirq_needs_eoi_flag(unsigned irq)
325 {
326 struct irq_info *info = info_for_irq(irq);
327 BUG_ON(info->type != IRQT_PIRQ);
328
329 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
330 }
331
332 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
333 {
334 int irq = get_evtchn_to_irq(chn);
335 struct irq_info *info = info_for_irq(irq);
336
337 BUG_ON(irq == -1);
338 #ifdef CONFIG_SMP
339 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
340 #endif
341 xen_evtchn_port_bind_to_cpu(info, cpu);
342
343 info->cpu = cpu;
344 }
345
346 /**
347 * notify_remote_via_irq - send event to remote end of event channel via irq
348 * @irq: irq of event channel to send event to
349 *
350 * Unlike notify_remote_via_evtchn(), this is safe to use across
351 * save/restore. Notifications on a broken connection are silently
352 * dropped.
353 */
354 void notify_remote_via_irq(int irq)
355 {
356 int evtchn = evtchn_from_irq(irq);
357
358 if (VALID_EVTCHN(evtchn))
359 notify_remote_via_evtchn(evtchn);
360 }
361 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
362
363 static void xen_irq_init(unsigned irq)
364 {
365 struct irq_info *info;
366 #ifdef CONFIG_SMP
367 /* By default all event channels notify CPU#0. */
368 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
369 #endif
370
371 info = kzalloc(sizeof(*info), GFP_KERNEL);
372 if (info == NULL)
373 panic("Unable to allocate metadata for IRQ%d\n", irq);
374
375 info->type = IRQT_UNBOUND;
376 info->refcnt = -1;
377
378 irq_set_handler_data(irq, info);
379
380 list_add_tail(&info->list, &xen_irq_list_head);
381 }
382
383 static int __must_check xen_allocate_irqs_dynamic(int nvec)
384 {
385 int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
386
387 if (irq >= 0) {
388 for (i = 0; i < nvec; i++)
389 xen_irq_init(irq + i);
390 }
391
392 return irq;
393 }
394
395 static inline int __must_check xen_allocate_irq_dynamic(void)
396 {
397
398 return xen_allocate_irqs_dynamic(1);
399 }
400
401 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
402 {
403 int irq;
404
405 /*
406 * A PV guest has no concept of a GSI (since it has no ACPI
407 * nor access to/knowledge of the physical APICs). Therefore
408 * all IRQs are dynamically allocated from the entire IRQ
409 * space.
410 */
411 if (xen_pv_domain() && !xen_initial_domain())
412 return xen_allocate_irq_dynamic();
413
414 /* Legacy IRQ descriptors are already allocated by the arch. */
415 if (gsi < nr_legacy_irqs())
416 irq = gsi;
417 else
418 irq = irq_alloc_desc_at(gsi, -1);
419
420 xen_irq_init(irq);
421
422 return irq;
423 }
424
425 static void xen_free_irq(unsigned irq)
426 {
427 struct irq_info *info = irq_get_handler_data(irq);
428
429 if (WARN_ON(!info))
430 return;
431
432 list_del(&info->list);
433
434 irq_set_handler_data(irq, NULL);
435
436 WARN_ON(info->refcnt > 0);
437
438 kfree(info);
439
440 /* Legacy IRQ descriptors are managed by the arch. */
441 if (irq < nr_legacy_irqs())
442 return;
443
444 irq_free_desc(irq);
445 }
446
447 static void xen_evtchn_close(unsigned int port)
448 {
449 struct evtchn_close close;
450
451 close.port = port;
452 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
453 BUG();
454 }
455
456 static void pirq_query_unmask(int irq)
457 {
458 struct physdev_irq_status_query irq_status;
459 struct irq_info *info = info_for_irq(irq);
460
461 BUG_ON(info->type != IRQT_PIRQ);
462
463 irq_status.irq = pirq_from_irq(irq);
464 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
465 irq_status.flags = 0;
466
467 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
468 if (irq_status.flags & XENIRQSTAT_needs_eoi)
469 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
470 }
471
472 static void eoi_pirq(struct irq_data *data)
473 {
474 int evtchn = evtchn_from_irq(data->irq);
475 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
476 int rc = 0;
477
478 if (!VALID_EVTCHN(evtchn))
479 return;
480
481 if (unlikely(irqd_is_setaffinity_pending(data)) &&
482 likely(!irqd_irq_disabled(data))) {
483 int masked = test_and_set_mask(evtchn);
484
485 clear_evtchn(evtchn);
486
487 irq_move_masked_irq(data);
488
489 if (!masked)
490 unmask_evtchn(evtchn);
491 } else
492 clear_evtchn(evtchn);
493
494 if (pirq_needs_eoi(data->irq)) {
495 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
496 WARN_ON(rc);
497 }
498 }
499
500 static void mask_ack_pirq(struct irq_data *data)
501 {
502 disable_dynirq(data);
503 eoi_pirq(data);
504 }
505
506 static unsigned int __startup_pirq(unsigned int irq)
507 {
508 struct evtchn_bind_pirq bind_pirq;
509 struct irq_info *info = info_for_irq(irq);
510 int evtchn = evtchn_from_irq(irq);
511 int rc;
512
513 BUG_ON(info->type != IRQT_PIRQ);
514
515 if (VALID_EVTCHN(evtchn))
516 goto out;
517
518 bind_pirq.pirq = pirq_from_irq(irq);
519 /* NB. We are happy to share unless we are probing. */
520 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
521 BIND_PIRQ__WILL_SHARE : 0;
522 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
523 if (rc != 0) {
524 pr_warn("Failed to obtain physical IRQ %d\n", irq);
525 return 0;
526 }
527 evtchn = bind_pirq.port;
528
529 pirq_query_unmask(irq);
530
531 rc = set_evtchn_to_irq(evtchn, irq);
532 if (rc)
533 goto err;
534
535 info->evtchn = evtchn;
536 bind_evtchn_to_cpu(evtchn, 0);
537
538 rc = xen_evtchn_port_setup(info);
539 if (rc)
540 goto err;
541
542 out:
543 unmask_evtchn(evtchn);
544 eoi_pirq(irq_get_irq_data(irq));
545
546 return 0;
547
548 err:
549 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
550 xen_evtchn_close(evtchn);
551 return 0;
552 }
553
554 static unsigned int startup_pirq(struct irq_data *data)
555 {
556 return __startup_pirq(data->irq);
557 }
558
559 static void shutdown_pirq(struct irq_data *data)
560 {
561 unsigned int irq = data->irq;
562 struct irq_info *info = info_for_irq(irq);
563 unsigned evtchn = evtchn_from_irq(irq);
564
565 BUG_ON(info->type != IRQT_PIRQ);
566
567 if (!VALID_EVTCHN(evtchn))
568 return;
569
570 mask_evtchn(evtchn);
571 xen_evtchn_close(evtchn);
572 xen_irq_info_cleanup(info);
573 }
574
575 static void enable_pirq(struct irq_data *data)
576 {
577 enable_dynirq(data);
578 }
579
580 static void disable_pirq(struct irq_data *data)
581 {
582 disable_dynirq(data);
583 }
584
585 int xen_irq_from_gsi(unsigned gsi)
586 {
587 struct irq_info *info;
588
589 list_for_each_entry(info, &xen_irq_list_head, list) {
590 if (info->type != IRQT_PIRQ)
591 continue;
592
593 if (info->u.pirq.gsi == gsi)
594 return info->irq;
595 }
596
597 return -1;
598 }
599 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
600
601 static void __unbind_from_irq(unsigned int irq)
602 {
603 int evtchn = evtchn_from_irq(irq);
604 struct irq_info *info = irq_get_handler_data(irq);
605
606 if (info->refcnt > 0) {
607 info->refcnt--;
608 if (info->refcnt != 0)
609 return;
610 }
611
612 if (VALID_EVTCHN(evtchn)) {
613 unsigned int cpu = cpu_from_irq(irq);
614
615 xen_evtchn_close(evtchn);
616
617 switch (type_from_irq(irq)) {
618 case IRQT_VIRQ:
619 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
620 break;
621 case IRQT_IPI:
622 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
623 break;
624 default:
625 break;
626 }
627
628 xen_irq_info_cleanup(info);
629 }
630
631 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
632
633 xen_free_irq(irq);
634 }
635
636 /*
637 * Do not make any assumptions regarding the relationship between the
638 * IRQ number returned here and the Xen pirq argument.
639 *
640 * Note: We don't assign an event channel until the irq actually started
641 * up. Return an existing irq if we've already got one for the gsi.
642 *
643 * Shareable implies level triggered, not shareable implies edge
644 * triggered here.
645 */
646 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
647 unsigned pirq, int shareable, char *name)
648 {
649 int irq = -1;
650 struct physdev_irq irq_op;
651 int ret;
652
653 mutex_lock(&irq_mapping_update_lock);
654
655 irq = xen_irq_from_gsi(gsi);
656 if (irq != -1) {
657 pr_info("%s: returning irq %d for gsi %u\n",
658 __func__, irq, gsi);
659 goto out;
660 }
661
662 irq = xen_allocate_irq_gsi(gsi);
663 if (irq < 0)
664 goto out;
665
666 irq_op.irq = irq;
667 irq_op.vector = 0;
668
669 /* Only the privileged domain can do this. For non-priv, the pcifront
670 * driver provides a PCI bus that does the call to do exactly
671 * this in the priv domain. */
672 if (xen_initial_domain() &&
673 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
674 xen_free_irq(irq);
675 irq = -ENOSPC;
676 goto out;
677 }
678
679 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
680 shareable ? PIRQ_SHAREABLE : 0);
681 if (ret < 0) {
682 __unbind_from_irq(irq);
683 irq = ret;
684 goto out;
685 }
686
687 pirq_query_unmask(irq);
688 /* We try to use the handler with the appropriate semantic for the
689 * type of interrupt: if the interrupt is an edge triggered
690 * interrupt we use handle_edge_irq.
691 *
692 * On the other hand if the interrupt is level triggered we use
693 * handle_fasteoi_irq like the native code does for this kind of
694 * interrupts.
695 *
696 * Depending on the Xen version, pirq_needs_eoi might return true
697 * not only for level triggered interrupts but for edge triggered
698 * interrupts too. In any case Xen always honors the eoi mechanism,
699 * not injecting any more pirqs of the same kind if the first one
700 * hasn't received an eoi yet. Therefore using the fasteoi handler
701 * is the right choice either way.
702 */
703 if (shareable)
704 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
705 handle_fasteoi_irq, name);
706 else
707 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
708 handle_edge_irq, name);
709
710 out:
711 mutex_unlock(&irq_mapping_update_lock);
712
713 return irq;
714 }
715
716 #ifdef CONFIG_PCI_MSI
717 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
718 {
719 int rc;
720 struct physdev_get_free_pirq op_get_free_pirq;
721
722 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
723 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
724
725 WARN_ONCE(rc == -ENOSYS,
726 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
727
728 return rc ? -1 : op_get_free_pirq.pirq;
729 }
730
731 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
732 int pirq, int nvec, const char *name, domid_t domid)
733 {
734 int i, irq, ret;
735
736 mutex_lock(&irq_mapping_update_lock);
737
738 irq = xen_allocate_irqs_dynamic(nvec);
739 if (irq < 0)
740 goto out;
741
742 for (i = 0; i < nvec; i++) {
743 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
744
745 ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
746 i == 0 ? 0 : PIRQ_MSI_GROUP);
747 if (ret < 0)
748 goto error_irq;
749 }
750
751 ret = irq_set_msi_desc(irq, msidesc);
752 if (ret < 0)
753 goto error_irq;
754 out:
755 mutex_unlock(&irq_mapping_update_lock);
756 return irq;
757 error_irq:
758 for (; i >= 0; i--)
759 __unbind_from_irq(irq + i);
760 mutex_unlock(&irq_mapping_update_lock);
761 return ret;
762 }
763 #endif
764
765 int xen_destroy_irq(int irq)
766 {
767 struct physdev_unmap_pirq unmap_irq;
768 struct irq_info *info = info_for_irq(irq);
769 int rc = -ENOENT;
770
771 mutex_lock(&irq_mapping_update_lock);
772
773 /*
774 * If trying to remove a vector in a MSI group different
775 * than the first one skip the PIRQ unmap unless this vector
776 * is the first one in the group.
777 */
778 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
779 unmap_irq.pirq = info->u.pirq.pirq;
780 unmap_irq.domid = info->u.pirq.domid;
781 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
782 /* If another domain quits without making the pci_disable_msix
783 * call, the Xen hypervisor takes care of freeing the PIRQs
784 * (free_domain_pirqs).
785 */
786 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
787 pr_info("domain %d does not have %d anymore\n",
788 info->u.pirq.domid, info->u.pirq.pirq);
789 else if (rc) {
790 pr_warn("unmap irq failed %d\n", rc);
791 goto out;
792 }
793 }
794
795 xen_free_irq(irq);
796
797 out:
798 mutex_unlock(&irq_mapping_update_lock);
799 return rc;
800 }
801
802 int xen_irq_from_pirq(unsigned pirq)
803 {
804 int irq;
805
806 struct irq_info *info;
807
808 mutex_lock(&irq_mapping_update_lock);
809
810 list_for_each_entry(info, &xen_irq_list_head, list) {
811 if (info->type != IRQT_PIRQ)
812 continue;
813 irq = info->irq;
814 if (info->u.pirq.pirq == pirq)
815 goto out;
816 }
817 irq = -1;
818 out:
819 mutex_unlock(&irq_mapping_update_lock);
820
821 return irq;
822 }
823
824
825 int xen_pirq_from_irq(unsigned irq)
826 {
827 return pirq_from_irq(irq);
828 }
829 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
830
831 int bind_evtchn_to_irq(unsigned int evtchn)
832 {
833 int irq;
834 int ret;
835
836 if (evtchn >= xen_evtchn_max_channels())
837 return -ENOMEM;
838
839 mutex_lock(&irq_mapping_update_lock);
840
841 irq = get_evtchn_to_irq(evtchn);
842
843 if (irq == -1) {
844 irq = xen_allocate_irq_dynamic();
845 if (irq < 0)
846 goto out;
847
848 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
849 handle_edge_irq, "event");
850
851 ret = xen_irq_info_evtchn_setup(irq, evtchn);
852 if (ret < 0) {
853 __unbind_from_irq(irq);
854 irq = ret;
855 goto out;
856 }
857 /* New interdomain events are bound to VCPU 0. */
858 bind_evtchn_to_cpu(evtchn, 0);
859 } else {
860 struct irq_info *info = info_for_irq(irq);
861 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
862 }
863
864 out:
865 mutex_unlock(&irq_mapping_update_lock);
866
867 return irq;
868 }
869 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
870
871 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
872 {
873 struct evtchn_bind_ipi bind_ipi;
874 int evtchn, irq;
875 int ret;
876
877 mutex_lock(&irq_mapping_update_lock);
878
879 irq = per_cpu(ipi_to_irq, cpu)[ipi];
880
881 if (irq == -1) {
882 irq = xen_allocate_irq_dynamic();
883 if (irq < 0)
884 goto out;
885
886 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
887 handle_percpu_irq, "ipi");
888
889 bind_ipi.vcpu = xen_vcpu_nr(cpu);
890 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
891 &bind_ipi) != 0)
892 BUG();
893 evtchn = bind_ipi.port;
894
895 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
896 if (ret < 0) {
897 __unbind_from_irq(irq);
898 irq = ret;
899 goto out;
900 }
901 bind_evtchn_to_cpu(evtchn, cpu);
902 } else {
903 struct irq_info *info = info_for_irq(irq);
904 WARN_ON(info == NULL || info->type != IRQT_IPI);
905 }
906
907 out:
908 mutex_unlock(&irq_mapping_update_lock);
909 return irq;
910 }
911
912 int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
913 unsigned int remote_port)
914 {
915 struct evtchn_bind_interdomain bind_interdomain;
916 int err;
917
918 bind_interdomain.remote_dom = remote_domain;
919 bind_interdomain.remote_port = remote_port;
920
921 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
922 &bind_interdomain);
923
924 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
925 }
926 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
927
928 static int find_virq(unsigned int virq, unsigned int cpu)
929 {
930 struct evtchn_status status;
931 int port, rc = -ENOENT;
932
933 memset(&status, 0, sizeof(status));
934 for (port = 0; port < xen_evtchn_max_channels(); port++) {
935 status.dom = DOMID_SELF;
936 status.port = port;
937 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
938 if (rc < 0)
939 continue;
940 if (status.status != EVTCHNSTAT_virq)
941 continue;
942 if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
943 rc = port;
944 break;
945 }
946 }
947 return rc;
948 }
949
950 /**
951 * xen_evtchn_nr_channels - number of usable event channel ports
952 *
953 * This may be less than the maximum supported by the current
954 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
955 * supported.
956 */
957 unsigned xen_evtchn_nr_channels(void)
958 {
959 return evtchn_ops->nr_channels();
960 }
961 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
962
963 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
964 {
965 struct evtchn_bind_virq bind_virq;
966 int evtchn, irq, ret;
967
968 mutex_lock(&irq_mapping_update_lock);
969
970 irq = per_cpu(virq_to_irq, cpu)[virq];
971
972 if (irq == -1) {
973 irq = xen_allocate_irq_dynamic();
974 if (irq < 0)
975 goto out;
976
977 if (percpu)
978 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
979 handle_percpu_irq, "virq");
980 else
981 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
982 handle_edge_irq, "virq");
983
984 bind_virq.virq = virq;
985 bind_virq.vcpu = xen_vcpu_nr(cpu);
986 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
987 &bind_virq);
988 if (ret == 0)
989 evtchn = bind_virq.port;
990 else {
991 if (ret == -EEXIST)
992 ret = find_virq(virq, cpu);
993 BUG_ON(ret < 0);
994 evtchn = ret;
995 }
996
997 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
998 if (ret < 0) {
999 __unbind_from_irq(irq);
1000 irq = ret;
1001 goto out;
1002 }
1003
1004 bind_evtchn_to_cpu(evtchn, cpu);
1005 } else {
1006 struct irq_info *info = info_for_irq(irq);
1007 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1008 }
1009
1010 out:
1011 mutex_unlock(&irq_mapping_update_lock);
1012
1013 return irq;
1014 }
1015
1016 static void unbind_from_irq(unsigned int irq)
1017 {
1018 mutex_lock(&irq_mapping_update_lock);
1019 __unbind_from_irq(irq);
1020 mutex_unlock(&irq_mapping_update_lock);
1021 }
1022
1023 int bind_evtchn_to_irqhandler(unsigned int evtchn,
1024 irq_handler_t handler,
1025 unsigned long irqflags,
1026 const char *devname, void *dev_id)
1027 {
1028 int irq, retval;
1029
1030 irq = bind_evtchn_to_irq(evtchn);
1031 if (irq < 0)
1032 return irq;
1033 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1034 if (retval != 0) {
1035 unbind_from_irq(irq);
1036 return retval;
1037 }
1038
1039 return irq;
1040 }
1041 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1042
1043 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1044 unsigned int remote_port,
1045 irq_handler_t handler,
1046 unsigned long irqflags,
1047 const char *devname,
1048 void *dev_id)
1049 {
1050 int irq, retval;
1051
1052 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1053 if (irq < 0)
1054 return irq;
1055
1056 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1057 if (retval != 0) {
1058 unbind_from_irq(irq);
1059 return retval;
1060 }
1061
1062 return irq;
1063 }
1064 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1065
1066 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1067 irq_handler_t handler,
1068 unsigned long irqflags, const char *devname, void *dev_id)
1069 {
1070 int irq, retval;
1071
1072 irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
1073 if (irq < 0)
1074 return irq;
1075 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1076 if (retval != 0) {
1077 unbind_from_irq(irq);
1078 return retval;
1079 }
1080
1081 return irq;
1082 }
1083 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1084
1085 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1086 unsigned int cpu,
1087 irq_handler_t handler,
1088 unsigned long irqflags,
1089 const char *devname,
1090 void *dev_id)
1091 {
1092 int irq, retval;
1093
1094 irq = bind_ipi_to_irq(ipi, cpu);
1095 if (irq < 0)
1096 return irq;
1097
1098 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1099 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1100 if (retval != 0) {
1101 unbind_from_irq(irq);
1102 return retval;
1103 }
1104
1105 return irq;
1106 }
1107
1108 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1109 {
1110 struct irq_info *info = irq_get_handler_data(irq);
1111
1112 if (WARN_ON(!info))
1113 return;
1114 free_irq(irq, dev_id);
1115 unbind_from_irq(irq);
1116 }
1117 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1118
1119 /**
1120 * xen_set_irq_priority() - set an event channel priority.
1121 * @irq:irq bound to an event channel.
1122 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1123 */
1124 int xen_set_irq_priority(unsigned irq, unsigned priority)
1125 {
1126 struct evtchn_set_priority set_priority;
1127
1128 set_priority.port = evtchn_from_irq(irq);
1129 set_priority.priority = priority;
1130
1131 return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1132 &set_priority);
1133 }
1134 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1135
1136 int evtchn_make_refcounted(unsigned int evtchn)
1137 {
1138 int irq = get_evtchn_to_irq(evtchn);
1139 struct irq_info *info;
1140
1141 if (irq == -1)
1142 return -ENOENT;
1143
1144 info = irq_get_handler_data(irq);
1145
1146 if (!info)
1147 return -ENOENT;
1148
1149 WARN_ON(info->refcnt != -1);
1150
1151 info->refcnt = 1;
1152
1153 return 0;
1154 }
1155 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1156
1157 int evtchn_get(unsigned int evtchn)
1158 {
1159 int irq;
1160 struct irq_info *info;
1161 int err = -ENOENT;
1162
1163 if (evtchn >= xen_evtchn_max_channels())
1164 return -EINVAL;
1165
1166 mutex_lock(&irq_mapping_update_lock);
1167
1168 irq = get_evtchn_to_irq(evtchn);
1169 if (irq == -1)
1170 goto done;
1171
1172 info = irq_get_handler_data(irq);
1173
1174 if (!info)
1175 goto done;
1176
1177 err = -EINVAL;
1178 if (info->refcnt <= 0)
1179 goto done;
1180
1181 info->refcnt++;
1182 err = 0;
1183 done:
1184 mutex_unlock(&irq_mapping_update_lock);
1185
1186 return err;
1187 }
1188 EXPORT_SYMBOL_GPL(evtchn_get);
1189
1190 void evtchn_put(unsigned int evtchn)
1191 {
1192 int irq = get_evtchn_to_irq(evtchn);
1193 if (WARN_ON(irq == -1))
1194 return;
1195 unbind_from_irq(irq);
1196 }
1197 EXPORT_SYMBOL_GPL(evtchn_put);
1198
1199 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1200 {
1201 int irq;
1202
1203 #ifdef CONFIG_X86
1204 if (unlikely(vector == XEN_NMI_VECTOR)) {
1205 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
1206 NULL);
1207 if (rc < 0)
1208 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1209 return;
1210 }
1211 #endif
1212 irq = per_cpu(ipi_to_irq, cpu)[vector];
1213 BUG_ON(irq < 0);
1214 notify_remote_via_irq(irq);
1215 }
1216
1217 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1218
1219 static void __xen_evtchn_do_upcall(void)
1220 {
1221 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1222 int cpu = get_cpu();
1223 unsigned count;
1224
1225 do {
1226 vcpu_info->evtchn_upcall_pending = 0;
1227
1228 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1229 goto out;
1230
1231 xen_evtchn_handle_events(cpu);
1232
1233 BUG_ON(!irqs_disabled());
1234
1235 count = __this_cpu_read(xed_nesting_count);
1236 __this_cpu_write(xed_nesting_count, 0);
1237 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1238
1239 out:
1240
1241 put_cpu();
1242 }
1243
1244 void xen_evtchn_do_upcall(struct pt_regs *regs)
1245 {
1246 struct pt_regs *old_regs = set_irq_regs(regs);
1247
1248 irq_enter();
1249 #ifdef CONFIG_X86
1250 inc_irq_stat(irq_hv_callback_count);
1251 #endif
1252
1253 __xen_evtchn_do_upcall();
1254
1255 irq_exit();
1256 set_irq_regs(old_regs);
1257 }
1258
1259 void xen_hvm_evtchn_do_upcall(void)
1260 {
1261 __xen_evtchn_do_upcall();
1262 }
1263 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1264
1265 /* Rebind a new event channel to an existing irq. */
1266 void rebind_evtchn_irq(int evtchn, int irq)
1267 {
1268 struct irq_info *info = info_for_irq(irq);
1269
1270 if (WARN_ON(!info))
1271 return;
1272
1273 /* Make sure the irq is masked, since the new event channel
1274 will also be masked. */
1275 disable_irq(irq);
1276
1277 mutex_lock(&irq_mapping_update_lock);
1278
1279 /* After resume the irq<->evtchn mappings are all cleared out */
1280 BUG_ON(get_evtchn_to_irq(evtchn) != -1);
1281 /* Expect irq to have been bound before,
1282 so there should be a proper type */
1283 BUG_ON(info->type == IRQT_UNBOUND);
1284
1285 (void)xen_irq_info_evtchn_setup(irq, evtchn);
1286
1287 mutex_unlock(&irq_mapping_update_lock);
1288
1289 bind_evtchn_to_cpu(evtchn, info->cpu);
1290 /* This will be deferred until interrupt is processed */
1291 irq_set_affinity(irq, cpumask_of(info->cpu));
1292
1293 /* Unmask the event channel. */
1294 enable_irq(irq);
1295 }
1296
1297 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1298 int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
1299 {
1300 struct evtchn_bind_vcpu bind_vcpu;
1301 int masked;
1302
1303 if (!VALID_EVTCHN(evtchn))
1304 return -1;
1305
1306 if (!xen_support_evtchn_rebind())
1307 return -1;
1308
1309 /* Send future instances of this interrupt to other vcpu. */
1310 bind_vcpu.port = evtchn;
1311 bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
1312
1313 /*
1314 * Mask the event while changing the VCPU binding to prevent
1315 * it being delivered on an unexpected VCPU.
1316 */
1317 masked = test_and_set_mask(evtchn);
1318
1319 /*
1320 * If this fails, it usually just indicates that we're dealing with a
1321 * virq or IPI channel, which don't actually need to be rebound. Ignore
1322 * it, but don't do the xenlinux-level rebind in that case.
1323 */
1324 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1325 bind_evtchn_to_cpu(evtchn, tcpu);
1326
1327 if (!masked)
1328 unmask_evtchn(evtchn);
1329
1330 return 0;
1331 }
1332 EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu);
1333
1334 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1335 bool force)
1336 {
1337 unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
1338 int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
1339
1340 if (!ret)
1341 irq_data_update_effective_affinity(data, cpumask_of(tcpu));
1342
1343 return ret;
1344 }
1345
1346 static void enable_dynirq(struct irq_data *data)
1347 {
1348 int evtchn = evtchn_from_irq(data->irq);
1349
1350 if (VALID_EVTCHN(evtchn))
1351 unmask_evtchn(evtchn);
1352 }
1353
1354 static void disable_dynirq(struct irq_data *data)
1355 {
1356 int evtchn = evtchn_from_irq(data->irq);
1357
1358 if (VALID_EVTCHN(evtchn))
1359 mask_evtchn(evtchn);
1360 }
1361
1362 static void ack_dynirq(struct irq_data *data)
1363 {
1364 int evtchn = evtchn_from_irq(data->irq);
1365
1366 if (!VALID_EVTCHN(evtchn))
1367 return;
1368
1369 if (unlikely(irqd_is_setaffinity_pending(data)) &&
1370 likely(!irqd_irq_disabled(data))) {
1371 int masked = test_and_set_mask(evtchn);
1372
1373 clear_evtchn(evtchn);
1374
1375 irq_move_masked_irq(data);
1376
1377 if (!masked)
1378 unmask_evtchn(evtchn);
1379 } else
1380 clear_evtchn(evtchn);
1381 }
1382
1383 static void mask_ack_dynirq(struct irq_data *data)
1384 {
1385 disable_dynirq(data);
1386 ack_dynirq(data);
1387 }
1388
1389 static int retrigger_dynirq(struct irq_data *data)
1390 {
1391 unsigned int evtchn = evtchn_from_irq(data->irq);
1392 int masked;
1393
1394 if (!VALID_EVTCHN(evtchn))
1395 return 0;
1396
1397 masked = test_and_set_mask(evtchn);
1398 set_evtchn(evtchn);
1399 if (!masked)
1400 unmask_evtchn(evtchn);
1401
1402 return 1;
1403 }
1404
1405 static void restore_pirqs(void)
1406 {
1407 int pirq, rc, irq, gsi;
1408 struct physdev_map_pirq map_irq;
1409 struct irq_info *info;
1410
1411 list_for_each_entry(info, &xen_irq_list_head, list) {
1412 if (info->type != IRQT_PIRQ)
1413 continue;
1414
1415 pirq = info->u.pirq.pirq;
1416 gsi = info->u.pirq.gsi;
1417 irq = info->irq;
1418
1419 /* save/restore of PT devices doesn't work, so at this point the
1420 * only devices present are GSI based emulated devices */
1421 if (!gsi)
1422 continue;
1423
1424 map_irq.domid = DOMID_SELF;
1425 map_irq.type = MAP_PIRQ_TYPE_GSI;
1426 map_irq.index = gsi;
1427 map_irq.pirq = pirq;
1428
1429 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1430 if (rc) {
1431 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1432 gsi, irq, pirq, rc);
1433 xen_free_irq(irq);
1434 continue;
1435 }
1436
1437 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1438
1439 __startup_pirq(irq);
1440 }
1441 }
1442
1443 static void restore_cpu_virqs(unsigned int cpu)
1444 {
1445 struct evtchn_bind_virq bind_virq;
1446 int virq, irq, evtchn;
1447
1448 for (virq = 0; virq < NR_VIRQS; virq++) {
1449 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1450 continue;
1451
1452 BUG_ON(virq_from_irq(irq) != virq);
1453
1454 /* Get a new binding from Xen. */
1455 bind_virq.virq = virq;
1456 bind_virq.vcpu = xen_vcpu_nr(cpu);
1457 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1458 &bind_virq) != 0)
1459 BUG();
1460 evtchn = bind_virq.port;
1461
1462 /* Record the new mapping. */
1463 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1464 bind_evtchn_to_cpu(evtchn, cpu);
1465 }
1466 }
1467
1468 static void restore_cpu_ipis(unsigned int cpu)
1469 {
1470 struct evtchn_bind_ipi bind_ipi;
1471 int ipi, irq, evtchn;
1472
1473 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1474 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1475 continue;
1476
1477 BUG_ON(ipi_from_irq(irq) != ipi);
1478
1479 /* Get a new binding from Xen. */
1480 bind_ipi.vcpu = xen_vcpu_nr(cpu);
1481 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1482 &bind_ipi) != 0)
1483 BUG();
1484 evtchn = bind_ipi.port;
1485
1486 /* Record the new mapping. */
1487 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1488 bind_evtchn_to_cpu(evtchn, cpu);
1489 }
1490 }
1491
1492 /* Clear an irq's pending state, in preparation for polling on it */
1493 void xen_clear_irq_pending(int irq)
1494 {
1495 int evtchn = evtchn_from_irq(irq);
1496
1497 if (VALID_EVTCHN(evtchn))
1498 clear_evtchn(evtchn);
1499 }
1500 EXPORT_SYMBOL(xen_clear_irq_pending);
1501 void xen_set_irq_pending(int irq)
1502 {
1503 int evtchn = evtchn_from_irq(irq);
1504
1505 if (VALID_EVTCHN(evtchn))
1506 set_evtchn(evtchn);
1507 }
1508
1509 bool xen_test_irq_pending(int irq)
1510 {
1511 int evtchn = evtchn_from_irq(irq);
1512 bool ret = false;
1513
1514 if (VALID_EVTCHN(evtchn))
1515 ret = test_evtchn(evtchn);
1516
1517 return ret;
1518 }
1519
1520 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1521 * the irq will be disabled so it won't deliver an interrupt. */
1522 void xen_poll_irq_timeout(int irq, u64 timeout)
1523 {
1524 evtchn_port_t evtchn = evtchn_from_irq(irq);
1525
1526 if (VALID_EVTCHN(evtchn)) {
1527 struct sched_poll poll;
1528
1529 poll.nr_ports = 1;
1530 poll.timeout = timeout;
1531 set_xen_guest_handle(poll.ports, &evtchn);
1532
1533 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1534 BUG();
1535 }
1536 }
1537 EXPORT_SYMBOL(xen_poll_irq_timeout);
1538 /* Poll waiting for an irq to become pending. In the usual case, the
1539 * irq will be disabled so it won't deliver an interrupt. */
1540 void xen_poll_irq(int irq)
1541 {
1542 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1543 }
1544
1545 /* Check whether the IRQ line is shared with other guests. */
1546 int xen_test_irq_shared(int irq)
1547 {
1548 struct irq_info *info = info_for_irq(irq);
1549 struct physdev_irq_status_query irq_status;
1550
1551 if (WARN_ON(!info))
1552 return -ENOENT;
1553
1554 irq_status.irq = info->u.pirq.pirq;
1555
1556 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1557 return 0;
1558 return !(irq_status.flags & XENIRQSTAT_shared);
1559 }
1560 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1561
1562 void xen_irq_resume(void)
1563 {
1564 unsigned int cpu;
1565 struct irq_info *info;
1566
1567 /* New event-channel space is not 'live' yet. */
1568 xen_evtchn_resume();
1569
1570 /* No IRQ <-> event-channel mappings. */
1571 list_for_each_entry(info, &xen_irq_list_head, list)
1572 info->evtchn = 0; /* zap event-channel binding */
1573
1574 clear_evtchn_to_irq_all();
1575
1576 for_each_possible_cpu(cpu) {
1577 restore_cpu_virqs(cpu);
1578 restore_cpu_ipis(cpu);
1579 }
1580
1581 restore_pirqs();
1582 }
1583
1584 static struct irq_chip xen_dynamic_chip __read_mostly = {
1585 .name = "xen-dyn",
1586
1587 .irq_disable = disable_dynirq,
1588 .irq_mask = disable_dynirq,
1589 .irq_unmask = enable_dynirq,
1590
1591 .irq_ack = ack_dynirq,
1592 .irq_mask_ack = mask_ack_dynirq,
1593
1594 .irq_set_affinity = set_affinity_irq,
1595 .irq_retrigger = retrigger_dynirq,
1596 };
1597
1598 static struct irq_chip xen_pirq_chip __read_mostly = {
1599 .name = "xen-pirq",
1600
1601 .irq_startup = startup_pirq,
1602 .irq_shutdown = shutdown_pirq,
1603 .irq_enable = enable_pirq,
1604 .irq_disable = disable_pirq,
1605
1606 .irq_mask = disable_dynirq,
1607 .irq_unmask = enable_dynirq,
1608
1609 .irq_ack = eoi_pirq,
1610 .irq_eoi = eoi_pirq,
1611 .irq_mask_ack = mask_ack_pirq,
1612
1613 .irq_set_affinity = set_affinity_irq,
1614
1615 .irq_retrigger = retrigger_dynirq,
1616 };
1617
1618 static struct irq_chip xen_percpu_chip __read_mostly = {
1619 .name = "xen-percpu",
1620
1621 .irq_disable = disable_dynirq,
1622 .irq_mask = disable_dynirq,
1623 .irq_unmask = enable_dynirq,
1624
1625 .irq_ack = ack_dynirq,
1626 };
1627
1628 int xen_set_callback_via(uint64_t via)
1629 {
1630 struct xen_hvm_param a;
1631 a.domid = DOMID_SELF;
1632 a.index = HVM_PARAM_CALLBACK_IRQ;
1633 a.value = via;
1634 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1635 }
1636 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1637
1638 #ifdef CONFIG_XEN_PVHVM
1639 /* Vector callbacks are better than PCI interrupts to receive event
1640 * channel notifications because we can receive vector callbacks on any
1641 * vcpu and we don't need PCI support or APIC interactions. */
1642 void xen_callback_vector(void)
1643 {
1644 int rc;
1645 uint64_t callback_via;
1646
1647 if (xen_have_vector_callback) {
1648 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
1649 rc = xen_set_callback_via(callback_via);
1650 if (rc) {
1651 pr_err("Request for Xen HVM callback vector failed\n");
1652 xen_have_vector_callback = 0;
1653 return;
1654 }
1655 pr_info("Xen HVM callback vector for event delivery is enabled\n");
1656 /* in the restore case the vector has already been allocated */
1657 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1658 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1659 xen_hvm_callback_vector);
1660 }
1661 }
1662 #else
1663 void xen_callback_vector(void) {}
1664 #endif
1665
1666 #undef MODULE_PARAM_PREFIX
1667 #define MODULE_PARAM_PREFIX "xen."
1668
1669 static bool fifo_events = true;
1670 module_param(fifo_events, bool, 0);
1671
1672 void __init xen_init_IRQ(void)
1673 {
1674 int ret = -EINVAL;
1675 unsigned int evtchn;
1676
1677 if (fifo_events)
1678 ret = xen_evtchn_fifo_init();
1679 if (ret < 0)
1680 xen_evtchn_2l_init();
1681
1682 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
1683 sizeof(*evtchn_to_irq), GFP_KERNEL);
1684 BUG_ON(!evtchn_to_irq);
1685
1686 /* No event channels are 'live' right now. */
1687 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
1688 mask_evtchn(evtchn);
1689
1690 pirq_needs_eoi = pirq_needs_eoi_flag;
1691
1692 #ifdef CONFIG_X86
1693 if (xen_pv_domain()) {
1694 irq_ctx_init(smp_processor_id());
1695 if (xen_initial_domain())
1696 pci_xen_initial_domain();
1697 }
1698 if (xen_feature(XENFEAT_hvm_callback_vector))
1699 xen_callback_vector();
1700
1701 if (xen_hvm_domain()) {
1702 native_init_IRQ();
1703 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1704 * __acpi_register_gsi can point at the right function */
1705 pci_xen_hvm_init();
1706 } else {
1707 int rc;
1708 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1709
1710 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1711 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
1712 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1713 if (rc != 0) {
1714 free_page((unsigned long) pirq_eoi_map);
1715 pirq_eoi_map = NULL;
1716 } else
1717 pirq_needs_eoi = pirq_check_eoi_map;
1718 }
1719 #endif
1720 }