]>
Commit | Line | Data |
---|---|---|
e46cdb66 JF |
1 | /* |
2 | * Xen event channels | |
3 | * | |
4 | * Xen models interrupts with abstract event channels. Because each | |
5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we | |
6 | * must dynamically map irqs<->event channels. The event channels | |
7 | * interface with the rest of the kernel by defining a xen interrupt | |
8 | * chip. When an event is recieved, it is mapped to an irq and sent | |
9 | * through the normal interrupt processing path. | |
10 | * | |
11 | * There are four kinds of events which can be mapped to an event | |
12 | * channel: | |
13 | * | |
14 | * 1. Inter-domain notifications. This includes all the virtual | |
15 | * device events, since they're driven by front-ends in another domain | |
16 | * (typically dom0). | |
17 | * 2. VIRQs, typically used for timers. These are per-cpu events. | |
18 | * 3. IPIs. | |
19 | * 4. Hardware interrupts. Not supported at present. | |
20 | * | |
21 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
22 | */ | |
23 | ||
24 | #include <linux/linkage.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/irq.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/string.h> | |
28e08861 | 29 | #include <linux/bootmem.h> |
e46cdb66 JF |
30 | |
31 | #include <asm/ptrace.h> | |
32 | #include <asm/irq.h> | |
792dc4f6 | 33 | #include <asm/idle.h> |
e46cdb66 JF |
34 | #include <asm/sync_bitops.h> |
35 | #include <asm/xen/hypercall.h> | |
8d1b8753 | 36 | #include <asm/xen/hypervisor.h> |
e46cdb66 | 37 | |
e04d0d07 | 38 | #include <xen/xen-ops.h> |
e46cdb66 JF |
39 | #include <xen/events.h> |
40 | #include <xen/interface/xen.h> | |
41 | #include <xen/interface/event_channel.h> | |
42 | ||
e46cdb66 JF |
43 | /* |
44 | * This lock protects updates to the following mapping and reference-count | |
45 | * arrays. The lock does not need to be acquired to read the mapping tables. | |
46 | */ | |
47 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | |
48 | ||
49 | /* IRQ <-> VIRQ mapping. */ | |
50 | static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | |
51 | ||
f87e4cac JF |
52 | /* IRQ <-> IPI mapping */ |
53 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | |
54 | ||
ced40d0f JF |
55 | /* Interrupt types. */ |
56 | enum xen_irq_type { | |
f87e4cac JF |
57 | IRQT_UNBOUND, |
58 | IRQT_PIRQ, | |
59 | IRQT_VIRQ, | |
60 | IRQT_IPI, | |
61 | IRQT_EVTCHN | |
62 | }; | |
e46cdb66 | 63 | |
ced40d0f JF |
64 | /* |
65 | * Packed IRQ information: | |
66 | * type - enum xen_irq_type | |
67 | * event channel - irq->event channel mapping | |
68 | * cpu - cpu this event channel is bound to | |
69 | * index - type-specific information: | |
70 | * PIRQ - vector, with MSB being "needs EIO" | |
71 | * VIRQ - virq number | |
72 | * IPI - IPI vector | |
73 | * EVTCHN - | |
74 | */ | |
75 | struct irq_info | |
76 | { | |
77 | enum xen_irq_type type; /* type */ | |
78 | unsigned short evtchn; /* event channel */ | |
79 | unsigned short cpu; /* cpu bound */ | |
80 | ||
81 | union { | |
82 | unsigned short virq; | |
83 | enum ipi_vector ipi; | |
84 | struct { | |
85 | unsigned short gsi; | |
86 | unsigned short vector; | |
87 | } pirq; | |
88 | } u; | |
89 | }; | |
90 | ||
91 | static struct irq_info irq_info[NR_IRQS]; | |
e46cdb66 JF |
92 | |
93 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | |
94 | [0 ... NR_EVENT_CHANNELS-1] = -1 | |
95 | }; | |
c7a3589e MT |
96 | struct cpu_evtchn_s { |
97 | unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; | |
98 | }; | |
99 | static struct cpu_evtchn_s *cpu_evtchn_mask_p; | |
100 | static inline unsigned long *cpu_evtchn_mask(int cpu) | |
101 | { | |
102 | return cpu_evtchn_mask_p[cpu].bits; | |
103 | } | |
e46cdb66 JF |
104 | |
105 | /* Reference counts for bindings to IRQs. */ | |
106 | static int irq_bindcount[NR_IRQS]; | |
107 | ||
108 | /* Xen will never allocate port zero for any purpose. */ | |
109 | #define VALID_EVTCHN(chn) ((chn) != 0) | |
110 | ||
e46cdb66 JF |
111 | static struct irq_chip xen_dynamic_chip; |
112 | ||
113 | /* Constructor for packed IRQ information. */ | |
ced40d0f JF |
114 | static struct irq_info mk_unbound_info(void) |
115 | { | |
116 | return (struct irq_info) { .type = IRQT_UNBOUND }; | |
117 | } | |
118 | ||
119 | static struct irq_info mk_evtchn_info(unsigned short evtchn) | |
120 | { | |
121 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn }; | |
122 | } | |
123 | ||
124 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) | |
e46cdb66 | 125 | { |
ced40d0f JF |
126 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, |
127 | .u.ipi = ipi }; | |
128 | } | |
129 | ||
130 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) | |
131 | { | |
132 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, | |
133 | .u.virq = virq }; | |
134 | } | |
135 | ||
136 | static struct irq_info mk_pirq_info(unsigned short evtchn, | |
137 | unsigned short gsi, unsigned short vector) | |
138 | { | |
139 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, | |
140 | .u.pirq = { .gsi = gsi, .vector = vector } }; | |
e46cdb66 JF |
141 | } |
142 | ||
143 | /* | |
144 | * Accessors for packed IRQ information. | |
145 | */ | |
ced40d0f | 146 | static struct irq_info *info_for_irq(unsigned irq) |
e46cdb66 | 147 | { |
ced40d0f | 148 | return &irq_info[irq]; |
e46cdb66 JF |
149 | } |
150 | ||
ced40d0f | 151 | static unsigned int evtchn_from_irq(unsigned irq) |
e46cdb66 | 152 | { |
ced40d0f | 153 | return info_for_irq(irq)->evtchn; |
e46cdb66 JF |
154 | } |
155 | ||
ced40d0f | 156 | static enum ipi_vector ipi_from_irq(unsigned irq) |
e46cdb66 | 157 | { |
ced40d0f JF |
158 | struct irq_info *info = info_for_irq(irq); |
159 | ||
160 | BUG_ON(info == NULL); | |
161 | BUG_ON(info->type != IRQT_IPI); | |
162 | ||
163 | return info->u.ipi; | |
164 | } | |
165 | ||
166 | static unsigned virq_from_irq(unsigned irq) | |
167 | { | |
168 | struct irq_info *info = info_for_irq(irq); | |
169 | ||
170 | BUG_ON(info == NULL); | |
171 | BUG_ON(info->type != IRQT_VIRQ); | |
172 | ||
173 | return info->u.virq; | |
174 | } | |
175 | ||
176 | static unsigned gsi_from_irq(unsigned irq) | |
177 | { | |
178 | struct irq_info *info = info_for_irq(irq); | |
179 | ||
180 | BUG_ON(info == NULL); | |
181 | BUG_ON(info->type != IRQT_PIRQ); | |
182 | ||
183 | return info->u.pirq.gsi; | |
184 | } | |
185 | ||
186 | static unsigned vector_from_irq(unsigned irq) | |
187 | { | |
188 | struct irq_info *info = info_for_irq(irq); | |
189 | ||
190 | BUG_ON(info == NULL); | |
191 | BUG_ON(info->type != IRQT_PIRQ); | |
192 | ||
193 | return info->u.pirq.vector; | |
194 | } | |
195 | ||
196 | static enum xen_irq_type type_from_irq(unsigned irq) | |
197 | { | |
198 | return info_for_irq(irq)->type; | |
199 | } | |
200 | ||
201 | static unsigned cpu_from_irq(unsigned irq) | |
202 | { | |
203 | return info_for_irq(irq)->cpu; | |
204 | } | |
205 | ||
206 | static unsigned int cpu_from_evtchn(unsigned int evtchn) | |
207 | { | |
208 | int irq = evtchn_to_irq[evtchn]; | |
209 | unsigned ret = 0; | |
210 | ||
211 | if (irq != -1) | |
212 | ret = cpu_from_irq(irq); | |
213 | ||
214 | return ret; | |
e46cdb66 JF |
215 | } |
216 | ||
217 | static inline unsigned long active_evtchns(unsigned int cpu, | |
218 | struct shared_info *sh, | |
219 | unsigned int idx) | |
220 | { | |
221 | return (sh->evtchn_pending[idx] & | |
c7a3589e | 222 | cpu_evtchn_mask(cpu)[idx] & |
e46cdb66 JF |
223 | ~sh->evtchn_mask[idx]); |
224 | } | |
225 | ||
226 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |
227 | { | |
228 | int irq = evtchn_to_irq[chn]; | |
229 | ||
230 | BUG_ON(irq == -1); | |
231 | #ifdef CONFIG_SMP | |
7f7ace0c | 232 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
e46cdb66 JF |
233 | #endif |
234 | ||
ced40d0f | 235 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
c7a3589e | 236 | __set_bit(chn, cpu_evtchn_mask(cpu)); |
e46cdb66 | 237 | |
ced40d0f | 238 | irq_info[irq].cpu = cpu; |
e46cdb66 JF |
239 | } |
240 | ||
241 | static void init_evtchn_cpu_bindings(void) | |
242 | { | |
243 | #ifdef CONFIG_SMP | |
10e58084 | 244 | struct irq_desc *desc; |
e46cdb66 | 245 | int i; |
10e58084 | 246 | |
e46cdb66 | 247 | /* By default all event channels notify CPU#0. */ |
0b8f1efa | 248 | for_each_irq_desc(i, desc) { |
7f7ace0c | 249 | cpumask_copy(desc->affinity, cpumask_of(0)); |
0b8f1efa | 250 | } |
e46cdb66 JF |
251 | #endif |
252 | ||
c7a3589e | 253 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); |
e46cdb66 JF |
254 | } |
255 | ||
e46cdb66 JF |
256 | static inline void clear_evtchn(int port) |
257 | { | |
258 | struct shared_info *s = HYPERVISOR_shared_info; | |
259 | sync_clear_bit(port, &s->evtchn_pending[0]); | |
260 | } | |
261 | ||
262 | static inline void set_evtchn(int port) | |
263 | { | |
264 | struct shared_info *s = HYPERVISOR_shared_info; | |
265 | sync_set_bit(port, &s->evtchn_pending[0]); | |
266 | } | |
267 | ||
168d2f46 JF |
268 | static inline int test_evtchn(int port) |
269 | { | |
270 | struct shared_info *s = HYPERVISOR_shared_info; | |
271 | return sync_test_bit(port, &s->evtchn_pending[0]); | |
272 | } | |
273 | ||
e46cdb66 JF |
274 | |
275 | /** | |
276 | * notify_remote_via_irq - send event to remote end of event channel via irq | |
277 | * @irq: irq of event channel to send event to | |
278 | * | |
279 | * Unlike notify_remote_via_evtchn(), this is safe to use across | |
280 | * save/restore. Notifications on a broken connection are silently | |
281 | * dropped. | |
282 | */ | |
283 | void notify_remote_via_irq(int irq) | |
284 | { | |
285 | int evtchn = evtchn_from_irq(irq); | |
286 | ||
287 | if (VALID_EVTCHN(evtchn)) | |
288 | notify_remote_via_evtchn(evtchn); | |
289 | } | |
290 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); | |
291 | ||
292 | static void mask_evtchn(int port) | |
293 | { | |
294 | struct shared_info *s = HYPERVISOR_shared_info; | |
295 | sync_set_bit(port, &s->evtchn_mask[0]); | |
296 | } | |
297 | ||
298 | static void unmask_evtchn(int port) | |
299 | { | |
300 | struct shared_info *s = HYPERVISOR_shared_info; | |
301 | unsigned int cpu = get_cpu(); | |
302 | ||
303 | BUG_ON(!irqs_disabled()); | |
304 | ||
305 | /* Slow path (hypercall) if this is a non-local port. */ | |
306 | if (unlikely(cpu != cpu_from_evtchn(port))) { | |
307 | struct evtchn_unmask unmask = { .port = port }; | |
308 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | |
309 | } else { | |
310 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
311 | ||
312 | sync_clear_bit(port, &s->evtchn_mask[0]); | |
313 | ||
314 | /* | |
315 | * The following is basically the equivalent of | |
316 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose | |
317 | * the interrupt edge' if the channel is masked. | |
318 | */ | |
319 | if (sync_test_bit(port, &s->evtchn_pending[0]) && | |
320 | !sync_test_and_set_bit(port / BITS_PER_LONG, | |
321 | &vcpu_info->evtchn_pending_sel)) | |
322 | vcpu_info->evtchn_upcall_pending = 1; | |
323 | } | |
324 | ||
325 | put_cpu(); | |
326 | } | |
327 | ||
328 | static int find_unbound_irq(void) | |
329 | { | |
330 | int irq; | |
6f8a0ed4 | 331 | struct irq_desc *desc; |
e46cdb66 JF |
332 | |
333 | /* Only allocate from dynirq range */ | |
0b8f1efa | 334 | for (irq = 0; irq < nr_irqs; irq++) |
e46cdb66 JF |
335 | if (irq_bindcount[irq] == 0) |
336 | break; | |
337 | ||
5a15d7e8 YL |
338 | if (irq == nr_irqs) |
339 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | |
e46cdb66 | 340 | |
6f8a0ed4 JF |
341 | desc = irq_to_desc_alloc_cpu(irq, 0); |
342 | if (WARN_ON(desc == NULL)) | |
343 | return -1; | |
344 | ||
ced40d0f JF |
345 | dynamic_irq_init(irq); |
346 | ||
e46cdb66 JF |
347 | return irq; |
348 | } | |
349 | ||
b536b4b9 | 350 | int bind_evtchn_to_irq(unsigned int evtchn) |
e46cdb66 JF |
351 | { |
352 | int irq; | |
353 | ||
354 | spin_lock(&irq_mapping_update_lock); | |
355 | ||
356 | irq = evtchn_to_irq[evtchn]; | |
357 | ||
358 | if (irq == -1) { | |
359 | irq = find_unbound_irq(); | |
360 | ||
e46cdb66 JF |
361 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
362 | handle_level_irq, "event"); | |
363 | ||
364 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 365 | irq_info[irq] = mk_evtchn_info(evtchn); |
e46cdb66 JF |
366 | } |
367 | ||
368 | irq_bindcount[irq]++; | |
369 | ||
370 | spin_unlock(&irq_mapping_update_lock); | |
371 | ||
372 | return irq; | |
373 | } | |
b536b4b9 | 374 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
e46cdb66 | 375 | |
f87e4cac JF |
376 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
377 | { | |
378 | struct evtchn_bind_ipi bind_ipi; | |
379 | int evtchn, irq; | |
380 | ||
381 | spin_lock(&irq_mapping_update_lock); | |
382 | ||
383 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | |
384 | if (irq == -1) { | |
385 | irq = find_unbound_irq(); | |
386 | if (irq < 0) | |
387 | goto out; | |
388 | ||
f87e4cac JF |
389 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
390 | handle_level_irq, "ipi"); | |
391 | ||
392 | bind_ipi.vcpu = cpu; | |
393 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
394 | &bind_ipi) != 0) | |
395 | BUG(); | |
396 | evtchn = bind_ipi.port; | |
397 | ||
398 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 399 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
f87e4cac JF |
400 | |
401 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | |
402 | ||
403 | bind_evtchn_to_cpu(evtchn, cpu); | |
404 | } | |
405 | ||
406 | irq_bindcount[irq]++; | |
407 | ||
408 | out: | |
409 | spin_unlock(&irq_mapping_update_lock); | |
410 | return irq; | |
411 | } | |
412 | ||
413 | ||
e46cdb66 JF |
414 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
415 | { | |
416 | struct evtchn_bind_virq bind_virq; | |
417 | int evtchn, irq; | |
418 | ||
419 | spin_lock(&irq_mapping_update_lock); | |
420 | ||
421 | irq = per_cpu(virq_to_irq, cpu)[virq]; | |
422 | ||
423 | if (irq == -1) { | |
424 | bind_virq.virq = virq; | |
425 | bind_virq.vcpu = cpu; | |
426 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
427 | &bind_virq) != 0) | |
428 | BUG(); | |
429 | evtchn = bind_virq.port; | |
430 | ||
431 | irq = find_unbound_irq(); | |
432 | ||
e46cdb66 JF |
433 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
434 | handle_level_irq, "virq"); | |
435 | ||
436 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 437 | irq_info[irq] = mk_virq_info(evtchn, virq); |
e46cdb66 JF |
438 | |
439 | per_cpu(virq_to_irq, cpu)[virq] = irq; | |
440 | ||
441 | bind_evtchn_to_cpu(evtchn, cpu); | |
442 | } | |
443 | ||
444 | irq_bindcount[irq]++; | |
445 | ||
446 | spin_unlock(&irq_mapping_update_lock); | |
447 | ||
448 | return irq; | |
449 | } | |
450 | ||
451 | static void unbind_from_irq(unsigned int irq) | |
452 | { | |
453 | struct evtchn_close close; | |
454 | int evtchn = evtchn_from_irq(irq); | |
455 | ||
456 | spin_lock(&irq_mapping_update_lock); | |
457 | ||
0f2287ad | 458 | if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { |
e46cdb66 JF |
459 | close.port = evtchn; |
460 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | |
461 | BUG(); | |
462 | ||
463 | switch (type_from_irq(irq)) { | |
464 | case IRQT_VIRQ: | |
465 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | |
ced40d0f | 466 | [virq_from_irq(irq)] = -1; |
e46cdb66 | 467 | break; |
d68d82af AN |
468 | case IRQT_IPI: |
469 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) | |
ced40d0f | 470 | [ipi_from_irq(irq)] = -1; |
d68d82af | 471 | break; |
e46cdb66 JF |
472 | default: |
473 | break; | |
474 | } | |
475 | ||
476 | /* Closed ports are implicitly re-bound to VCPU0. */ | |
477 | bind_evtchn_to_cpu(evtchn, 0); | |
478 | ||
479 | evtchn_to_irq[evtchn] = -1; | |
ced40d0f | 480 | irq_info[irq] = mk_unbound_info(); |
e46cdb66 | 481 | |
0f2287ad | 482 | dynamic_irq_cleanup(irq); |
e46cdb66 JF |
483 | } |
484 | ||
485 | spin_unlock(&irq_mapping_update_lock); | |
486 | } | |
487 | ||
488 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | |
7c239975 | 489 | irq_handler_t handler, |
e46cdb66 JF |
490 | unsigned long irqflags, |
491 | const char *devname, void *dev_id) | |
492 | { | |
493 | unsigned int irq; | |
494 | int retval; | |
495 | ||
496 | irq = bind_evtchn_to_irq(evtchn); | |
497 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
498 | if (retval != 0) { | |
499 | unbind_from_irq(irq); | |
500 | return retval; | |
501 | } | |
502 | ||
503 | return irq; | |
504 | } | |
505 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); | |
506 | ||
507 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | |
7c239975 | 508 | irq_handler_t handler, |
e46cdb66 JF |
509 | unsigned long irqflags, const char *devname, void *dev_id) |
510 | { | |
511 | unsigned int irq; | |
512 | int retval; | |
513 | ||
514 | irq = bind_virq_to_irq(virq, cpu); | |
515 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
516 | if (retval != 0) { | |
517 | unbind_from_irq(irq); | |
518 | return retval; | |
519 | } | |
520 | ||
521 | return irq; | |
522 | } | |
523 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); | |
524 | ||
f87e4cac JF |
525 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, |
526 | unsigned int cpu, | |
527 | irq_handler_t handler, | |
528 | unsigned long irqflags, | |
529 | const char *devname, | |
530 | void *dev_id) | |
531 | { | |
532 | int irq, retval; | |
533 | ||
534 | irq = bind_ipi_to_irq(ipi, cpu); | |
535 | if (irq < 0) | |
536 | return irq; | |
537 | ||
538 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
539 | if (retval != 0) { | |
540 | unbind_from_irq(irq); | |
541 | return retval; | |
542 | } | |
543 | ||
544 | return irq; | |
545 | } | |
546 | ||
e46cdb66 JF |
547 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) |
548 | { | |
549 | free_irq(irq, dev_id); | |
550 | unbind_from_irq(irq); | |
551 | } | |
552 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); | |
553 | ||
f87e4cac JF |
554 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
555 | { | |
556 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; | |
557 | BUG_ON(irq < 0); | |
558 | notify_remote_via_irq(irq); | |
559 | } | |
560 | ||
ee523ca1 JF |
561 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id) |
562 | { | |
563 | struct shared_info *sh = HYPERVISOR_shared_info; | |
564 | int cpu = smp_processor_id(); | |
565 | int i; | |
566 | unsigned long flags; | |
567 | static DEFINE_SPINLOCK(debug_lock); | |
568 | ||
569 | spin_lock_irqsave(&debug_lock, flags); | |
570 | ||
571 | printk("vcpu %d\n ", cpu); | |
572 | ||
573 | for_each_online_cpu(i) { | |
574 | struct vcpu_info *v = per_cpu(xen_vcpu, i); | |
575 | printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, | |
e849c3e9 | 576 | (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, |
ee523ca1 JF |
577 | v->evtchn_upcall_pending, |
578 | v->evtchn_pending_sel); | |
579 | } | |
580 | printk("pending:\n "); | |
581 | for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) | |
582 | printk("%08lx%s", sh->evtchn_pending[i], | |
583 | i % 8 == 0 ? "\n " : " "); | |
584 | printk("\nmasks:\n "); | |
585 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
586 | printk("%08lx%s", sh->evtchn_mask[i], | |
587 | i % 8 == 0 ? "\n " : " "); | |
588 | ||
589 | printk("\nunmasked:\n "); | |
590 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
591 | printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], | |
592 | i % 8 == 0 ? "\n " : " "); | |
593 | ||
594 | printk("\npending list:\n"); | |
595 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | |
596 | if (sync_test_bit(i, sh->evtchn_pending)) { | |
597 | printk(" %d: event %d -> irq %d\n", | |
ced40d0f JF |
598 | cpu_from_evtchn(i), i, |
599 | evtchn_to_irq[i]); | |
ee523ca1 JF |
600 | } |
601 | } | |
602 | ||
603 | spin_unlock_irqrestore(&debug_lock, flags); | |
604 | ||
605 | return IRQ_HANDLED; | |
606 | } | |
607 | ||
f87e4cac | 608 | |
792dc4f6 JF |
609 | static void xen_do_irq(unsigned irq, struct pt_regs *regs) |
610 | { | |
611 | struct pt_regs *old_regs = set_irq_regs(regs); | |
612 | ||
613 | if (WARN_ON(irq == -1)) | |
614 | return; | |
615 | ||
616 | exit_idle(); | |
617 | irq_enter(); | |
618 | ||
619 | //printk("cpu %d handling irq %d\n", smp_processor_id(), info->irq); | |
620 | handle_irq(irq, regs); | |
621 | ||
622 | irq_exit(); | |
623 | ||
624 | set_irq_regs(old_regs); | |
625 | } | |
626 | ||
e46cdb66 JF |
627 | /* |
628 | * Search the CPUs pending events bitmasks. For each one found, map | |
629 | * the event number to an irq, and feed it into do_IRQ() for | |
630 | * handling. | |
631 | * | |
632 | * Xen uses a two-level bitmap to speed searching. The first level is | |
633 | * a bitset of words which contain pending event bits. The second | |
634 | * level is a bitset of pending events themselves. | |
635 | */ | |
75604d7f | 636 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
e46cdb66 JF |
637 | { |
638 | int cpu = get_cpu(); | |
639 | struct shared_info *s = HYPERVISOR_shared_info; | |
640 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
229664be JF |
641 | static DEFINE_PER_CPU(unsigned, nesting_count); |
642 | unsigned count; | |
e46cdb66 | 643 | |
229664be JF |
644 | do { |
645 | unsigned long pending_words; | |
e46cdb66 | 646 | |
229664be | 647 | vcpu_info->evtchn_upcall_pending = 0; |
e46cdb66 | 648 | |
229664be JF |
649 | if (__get_cpu_var(nesting_count)++) |
650 | goto out; | |
e46cdb66 | 651 | |
e849c3e9 IY |
652 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
653 | /* Clear master flag /before/ clearing selector flag. */ | |
6673cf63 | 654 | wmb(); |
e849c3e9 | 655 | #endif |
229664be JF |
656 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); |
657 | while (pending_words != 0) { | |
658 | unsigned long pending_bits; | |
659 | int word_idx = __ffs(pending_words); | |
660 | pending_words &= ~(1UL << word_idx); | |
661 | ||
662 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { | |
663 | int bit_idx = __ffs(pending_bits); | |
664 | int port = (word_idx * BITS_PER_LONG) + bit_idx; | |
665 | int irq = evtchn_to_irq[port]; | |
666 | ||
792dc4f6 | 667 | xen_do_irq(irq, regs); |
e46cdb66 JF |
668 | } |
669 | } | |
e46cdb66 | 670 | |
229664be JF |
671 | BUG_ON(!irqs_disabled()); |
672 | ||
673 | count = __get_cpu_var(nesting_count); | |
674 | __get_cpu_var(nesting_count) = 0; | |
675 | } while(count != 1); | |
676 | ||
677 | out: | |
e46cdb66 JF |
678 | put_cpu(); |
679 | } | |
680 | ||
eb1e305f JF |
681 | /* Rebind a new event channel to an existing irq. */ |
682 | void rebind_evtchn_irq(int evtchn, int irq) | |
683 | { | |
684 | /* Make sure the irq is masked, since the new event channel | |
685 | will also be masked. */ | |
686 | disable_irq(irq); | |
687 | ||
688 | spin_lock(&irq_mapping_update_lock); | |
689 | ||
690 | /* After resume the irq<->evtchn mappings are all cleared out */ | |
691 | BUG_ON(evtchn_to_irq[evtchn] != -1); | |
692 | /* Expect irq to have been bound before, | |
693 | so the bindcount should be non-0 */ | |
694 | BUG_ON(irq_bindcount[irq] == 0); | |
695 | ||
696 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 697 | irq_info[irq] = mk_evtchn_info(evtchn); |
eb1e305f JF |
698 | |
699 | spin_unlock(&irq_mapping_update_lock); | |
700 | ||
701 | /* new event channels are always bound to cpu 0 */ | |
0de26520 | 702 | irq_set_affinity(irq, cpumask_of(0)); |
eb1e305f JF |
703 | |
704 | /* Unmask the event channel. */ | |
705 | enable_irq(irq); | |
706 | } | |
707 | ||
e46cdb66 JF |
708 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
709 | static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |
710 | { | |
711 | struct evtchn_bind_vcpu bind_vcpu; | |
712 | int evtchn = evtchn_from_irq(irq); | |
713 | ||
714 | if (!VALID_EVTCHN(evtchn)) | |
715 | return; | |
716 | ||
717 | /* Send future instances of this interrupt to other vcpu. */ | |
718 | bind_vcpu.port = evtchn; | |
719 | bind_vcpu.vcpu = tcpu; | |
720 | ||
721 | /* | |
722 | * If this fails, it usually just indicates that we're dealing with a | |
723 | * virq or IPI channel, which don't actually need to be rebound. Ignore | |
724 | * it, but don't do the xenlinux-level rebind in that case. | |
725 | */ | |
726 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | |
727 | bind_evtchn_to_cpu(evtchn, tcpu); | |
728 | } | |
729 | ||
730 | ||
0de26520 | 731 | static void set_affinity_irq(unsigned irq, const struct cpumask *dest) |
e46cdb66 | 732 | { |
0de26520 | 733 | unsigned tcpu = cpumask_first(dest); |
e46cdb66 JF |
734 | rebind_irq_to_cpu(irq, tcpu); |
735 | } | |
736 | ||
642e0c88 IY |
737 | int resend_irq_on_evtchn(unsigned int irq) |
738 | { | |
739 | int masked, evtchn = evtchn_from_irq(irq); | |
740 | struct shared_info *s = HYPERVISOR_shared_info; | |
741 | ||
742 | if (!VALID_EVTCHN(evtchn)) | |
743 | return 1; | |
744 | ||
745 | masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); | |
746 | sync_set_bit(evtchn, s->evtchn_pending); | |
747 | if (!masked) | |
748 | unmask_evtchn(evtchn); | |
749 | ||
750 | return 1; | |
751 | } | |
752 | ||
e46cdb66 JF |
753 | static void enable_dynirq(unsigned int irq) |
754 | { | |
755 | int evtchn = evtchn_from_irq(irq); | |
756 | ||
757 | if (VALID_EVTCHN(evtchn)) | |
758 | unmask_evtchn(evtchn); | |
759 | } | |
760 | ||
761 | static void disable_dynirq(unsigned int irq) | |
762 | { | |
763 | int evtchn = evtchn_from_irq(irq); | |
764 | ||
765 | if (VALID_EVTCHN(evtchn)) | |
766 | mask_evtchn(evtchn); | |
767 | } | |
768 | ||
769 | static void ack_dynirq(unsigned int irq) | |
770 | { | |
771 | int evtchn = evtchn_from_irq(irq); | |
772 | ||
773 | move_native_irq(irq); | |
774 | ||
775 | if (VALID_EVTCHN(evtchn)) | |
776 | clear_evtchn(evtchn); | |
777 | } | |
778 | ||
779 | static int retrigger_dynirq(unsigned int irq) | |
780 | { | |
781 | int evtchn = evtchn_from_irq(irq); | |
ee8fa1c6 | 782 | struct shared_info *sh = HYPERVISOR_shared_info; |
e46cdb66 JF |
783 | int ret = 0; |
784 | ||
785 | if (VALID_EVTCHN(evtchn)) { | |
ee8fa1c6 JF |
786 | int masked; |
787 | ||
788 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); | |
789 | sync_set_bit(evtchn, sh->evtchn_pending); | |
790 | if (!masked) | |
791 | unmask_evtchn(evtchn); | |
e46cdb66 JF |
792 | ret = 1; |
793 | } | |
794 | ||
795 | return ret; | |
796 | } | |
797 | ||
0e91398f JF |
798 | static void restore_cpu_virqs(unsigned int cpu) |
799 | { | |
800 | struct evtchn_bind_virq bind_virq; | |
801 | int virq, irq, evtchn; | |
802 | ||
803 | for (virq = 0; virq < NR_VIRQS; virq++) { | |
804 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | |
805 | continue; | |
806 | ||
ced40d0f | 807 | BUG_ON(virq_from_irq(irq) != virq); |
0e91398f JF |
808 | |
809 | /* Get a new binding from Xen. */ | |
810 | bind_virq.virq = virq; | |
811 | bind_virq.vcpu = cpu; | |
812 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
813 | &bind_virq) != 0) | |
814 | BUG(); | |
815 | evtchn = bind_virq.port; | |
816 | ||
817 | /* Record the new mapping. */ | |
818 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 819 | irq_info[irq] = mk_virq_info(evtchn, virq); |
0e91398f JF |
820 | bind_evtchn_to_cpu(evtchn, cpu); |
821 | ||
822 | /* Ready for use. */ | |
823 | unmask_evtchn(evtchn); | |
824 | } | |
825 | } | |
826 | ||
827 | static void restore_cpu_ipis(unsigned int cpu) | |
828 | { | |
829 | struct evtchn_bind_ipi bind_ipi; | |
830 | int ipi, irq, evtchn; | |
831 | ||
832 | for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { | |
833 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | |
834 | continue; | |
835 | ||
ced40d0f | 836 | BUG_ON(ipi_from_irq(irq) != ipi); |
0e91398f JF |
837 | |
838 | /* Get a new binding from Xen. */ | |
839 | bind_ipi.vcpu = cpu; | |
840 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
841 | &bind_ipi) != 0) | |
842 | BUG(); | |
843 | evtchn = bind_ipi.port; | |
844 | ||
845 | /* Record the new mapping. */ | |
846 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 847 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
0e91398f JF |
848 | bind_evtchn_to_cpu(evtchn, cpu); |
849 | ||
850 | /* Ready for use. */ | |
851 | unmask_evtchn(evtchn); | |
852 | ||
853 | } | |
854 | } | |
855 | ||
2d9e1e2f JF |
856 | /* Clear an irq's pending state, in preparation for polling on it */ |
857 | void xen_clear_irq_pending(int irq) | |
858 | { | |
859 | int evtchn = evtchn_from_irq(irq); | |
860 | ||
861 | if (VALID_EVTCHN(evtchn)) | |
862 | clear_evtchn(evtchn); | |
863 | } | |
864 | ||
168d2f46 JF |
865 | void xen_set_irq_pending(int irq) |
866 | { | |
867 | int evtchn = evtchn_from_irq(irq); | |
868 | ||
869 | if (VALID_EVTCHN(evtchn)) | |
870 | set_evtchn(evtchn); | |
871 | } | |
872 | ||
873 | bool xen_test_irq_pending(int irq) | |
874 | { | |
875 | int evtchn = evtchn_from_irq(irq); | |
876 | bool ret = false; | |
877 | ||
878 | if (VALID_EVTCHN(evtchn)) | |
879 | ret = test_evtchn(evtchn); | |
880 | ||
881 | return ret; | |
882 | } | |
883 | ||
2d9e1e2f JF |
884 | /* Poll waiting for an irq to become pending. In the usual case, the |
885 | irq will be disabled so it won't deliver an interrupt. */ | |
886 | void xen_poll_irq(int irq) | |
887 | { | |
888 | evtchn_port_t evtchn = evtchn_from_irq(irq); | |
889 | ||
890 | if (VALID_EVTCHN(evtchn)) { | |
891 | struct sched_poll poll; | |
892 | ||
893 | poll.nr_ports = 1; | |
894 | poll.timeout = 0; | |
ff3c5362 | 895 | set_xen_guest_handle(poll.ports, &evtchn); |
2d9e1e2f JF |
896 | |
897 | if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) | |
898 | BUG(); | |
899 | } | |
900 | } | |
901 | ||
0e91398f JF |
902 | void xen_irq_resume(void) |
903 | { | |
904 | unsigned int cpu, irq, evtchn; | |
905 | ||
906 | init_evtchn_cpu_bindings(); | |
907 | ||
908 | /* New event-channel space is not 'live' yet. */ | |
909 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
910 | mask_evtchn(evtchn); | |
911 | ||
912 | /* No IRQ <-> event-channel mappings. */ | |
0b8f1efa | 913 | for (irq = 0; irq < nr_irqs; irq++) |
0e91398f JF |
914 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ |
915 | ||
916 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
917 | evtchn_to_irq[evtchn] = -1; | |
918 | ||
919 | for_each_possible_cpu(cpu) { | |
920 | restore_cpu_virqs(cpu); | |
921 | restore_cpu_ipis(cpu); | |
922 | } | |
923 | } | |
924 | ||
e46cdb66 JF |
925 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
926 | .name = "xen-dyn", | |
54a353a0 JF |
927 | |
928 | .disable = disable_dynirq, | |
e46cdb66 JF |
929 | .mask = disable_dynirq, |
930 | .unmask = enable_dynirq, | |
54a353a0 | 931 | |
e46cdb66 JF |
932 | .ack = ack_dynirq, |
933 | .set_affinity = set_affinity_irq, | |
934 | .retrigger = retrigger_dynirq, | |
935 | }; | |
936 | ||
937 | void __init xen_init_IRQ(void) | |
938 | { | |
939 | int i; | |
c7a3589e MT |
940 | size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s); |
941 | ||
28e08861 CS |
942 | cpu_evtchn_mask_p = alloc_bootmem(size); |
943 | BUG_ON(cpu_evtchn_mask_p == NULL); | |
e46cdb66 JF |
944 | |
945 | init_evtchn_cpu_bindings(); | |
946 | ||
947 | /* No event channels are 'live' right now. */ | |
948 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | |
949 | mask_evtchn(i); | |
950 | ||
951 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | |
0b8f1efa | 952 | for (i = 0; i < nr_irqs; i++) |
e46cdb66 JF |
953 | irq_bindcount[i] = 0; |
954 | ||
955 | irq_ctx_init(smp_processor_id()); | |
956 | } |