]>
Commit | Line | Data |
---|---|---|
74afab7a JL |
1 | /* |
2 | * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc. | |
3 | * | |
4 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | |
5 | * Moved from arch/x86/kernel/apic/io_apic.c. | |
b5dc8e6c JL |
6 | * Jiang Liu <jiang.liu@linux.intel.com> |
7 | * Enable support of hierarchical irqdomains | |
74afab7a JL |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/irqdomain.h> | |
17 | #include <linux/slab.h> | |
18 | #include <asm/hw_irq.h> | |
19 | #include <asm/apic.h> | |
20 | #include <asm/i8259.h> | |
21 | #include <asm/desc.h> | |
22 | #include <asm/irq_remapping.h> | |
23 | ||
b5dc8e6c | 24 | struct irq_domain *x86_vector_domain; |
74afab7a | 25 | static DEFINE_RAW_SPINLOCK(vector_lock); |
b5dc8e6c | 26 | static struct irq_chip lapic_controller; |
13315320 JL |
27 | #ifdef CONFIG_X86_IO_APIC |
28 | static struct irq_cfg *legacy_irq_cfgs[NR_IRQS_LEGACY]; | |
29 | #endif | |
74afab7a JL |
30 | |
31 | void lock_vector_lock(void) | |
32 | { | |
33 | /* Used to the online set of cpus does not change | |
34 | * during assign_irq_vector. | |
35 | */ | |
36 | raw_spin_lock(&vector_lock); | |
37 | } | |
38 | ||
39 | void unlock_vector_lock(void) | |
40 | { | |
41 | raw_spin_unlock(&vector_lock); | |
42 | } | |
43 | ||
44 | struct irq_cfg *irq_cfg(unsigned int irq) | |
45 | { | |
b5dc8e6c | 46 | return irqd_cfg(irq_get_irq_data(irq)); |
74afab7a JL |
47 | } |
48 | ||
49 | struct irq_cfg *irqd_cfg(struct irq_data *irq_data) | |
50 | { | |
b5dc8e6c JL |
51 | if (!irq_data) |
52 | return NULL; | |
53 | ||
54 | while (irq_data->parent_data) | |
55 | irq_data = irq_data->parent_data; | |
56 | ||
74afab7a JL |
57 | return irq_data->chip_data; |
58 | } | |
59 | ||
b5dc8e6c | 60 | static struct irq_cfg *alloc_irq_cfg(int node) |
74afab7a JL |
61 | { |
62 | struct irq_cfg *cfg; | |
63 | ||
64 | cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); | |
65 | if (!cfg) | |
66 | return NULL; | |
67 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) | |
68 | goto out_cfg; | |
69 | if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) | |
70 | goto out_domain; | |
74afab7a JL |
71 | return cfg; |
72 | out_domain: | |
73 | free_cpumask_var(cfg->domain); | |
74 | out_cfg: | |
75 | kfree(cfg); | |
76 | return NULL; | |
77 | } | |
78 | ||
b5dc8e6c | 79 | static void free_irq_cfg(struct irq_cfg *cfg) |
74afab7a | 80 | { |
b5dc8e6c JL |
81 | if (cfg) { |
82 | free_cpumask_var(cfg->domain); | |
83 | free_cpumask_var(cfg->old_domain); | |
84 | kfree(cfg); | |
85 | } | |
74afab7a JL |
86 | } |
87 | ||
88 | static int | |
89 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |
90 | { | |
91 | /* | |
92 | * NOTE! The local APIC isn't very good at handling | |
93 | * multiple interrupts at the same interrupt level. | |
94 | * As the interrupt level is determined by taking the | |
95 | * vector number and shifting that right by 4, we | |
96 | * want to spread these out a bit so that they don't | |
97 | * all fall in the same interrupt level. | |
98 | * | |
99 | * Also, we've got to be careful not to trash gate | |
100 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | |
101 | */ | |
102 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; | |
103 | static int current_offset = VECTOR_OFFSET_START % 16; | |
104 | int cpu, err; | |
105 | cpumask_var_t tmp_mask; | |
106 | ||
107 | if (cfg->move_in_progress) | |
108 | return -EBUSY; | |
109 | ||
110 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | |
111 | return -ENOMEM; | |
112 | ||
113 | /* Only try and allocate irqs on cpus that are present */ | |
114 | err = -ENOSPC; | |
115 | cpumask_clear(cfg->old_domain); | |
116 | cpu = cpumask_first_and(mask, cpu_online_mask); | |
117 | while (cpu < nr_cpu_ids) { | |
118 | int new_cpu, vector, offset; | |
119 | ||
120 | apic->vector_allocation_domain(cpu, tmp_mask, mask); | |
121 | ||
122 | if (cpumask_subset(tmp_mask, cfg->domain)) { | |
123 | err = 0; | |
124 | if (cpumask_equal(tmp_mask, cfg->domain)) | |
125 | break; | |
126 | /* | |
127 | * New cpumask using the vector is a proper subset of | |
128 | * the current in use mask. So cleanup the vector | |
129 | * allocation for the members that are not used anymore. | |
130 | */ | |
131 | cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); | |
132 | cfg->move_in_progress = | |
133 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | |
134 | cpumask_and(cfg->domain, cfg->domain, tmp_mask); | |
135 | break; | |
136 | } | |
137 | ||
138 | vector = current_vector; | |
139 | offset = current_offset; | |
140 | next: | |
141 | vector += 16; | |
142 | if (vector >= first_system_vector) { | |
143 | offset = (offset + 1) % 16; | |
144 | vector = FIRST_EXTERNAL_VECTOR + offset; | |
145 | } | |
146 | ||
147 | if (unlikely(current_vector == vector)) { | |
148 | cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); | |
149 | cpumask_andnot(tmp_mask, mask, cfg->old_domain); | |
150 | cpu = cpumask_first_and(tmp_mask, cpu_online_mask); | |
151 | continue; | |
152 | } | |
153 | ||
154 | if (test_bit(vector, used_vectors)) | |
155 | goto next; | |
156 | ||
157 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { | |
158 | if (per_cpu(vector_irq, new_cpu)[vector] > | |
159 | VECTOR_UNDEFINED) | |
160 | goto next; | |
161 | } | |
162 | /* Found one! */ | |
163 | current_vector = vector; | |
164 | current_offset = offset; | |
165 | if (cfg->vector) { | |
166 | cpumask_copy(cfg->old_domain, cfg->domain); | |
167 | cfg->move_in_progress = | |
168 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | |
169 | } | |
170 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) | |
171 | per_cpu(vector_irq, new_cpu)[vector] = irq; | |
172 | cfg->vector = vector; | |
173 | cpumask_copy(cfg->domain, tmp_mask); | |
174 | err = 0; | |
175 | break; | |
176 | } | |
177 | free_cpumask_var(tmp_mask); | |
178 | ||
5f0052f9 JL |
179 | if (!err) { |
180 | /* cache destination APIC IDs into cfg->dest_apicid */ | |
181 | err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, | |
182 | &cfg->dest_apicid); | |
183 | } | |
184 | ||
74afab7a JL |
185 | return err; |
186 | } | |
187 | ||
f970510c JL |
188 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, |
189 | const struct cpumask *mask) | |
74afab7a JL |
190 | { |
191 | int err; | |
192 | unsigned long flags; | |
193 | ||
194 | raw_spin_lock_irqsave(&vector_lock, flags); | |
195 | err = __assign_irq_vector(irq, cfg, mask); | |
196 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
197 | return err; | |
198 | } | |
199 | ||
f970510c | 200 | static void clear_irq_vector(int irq, struct irq_cfg *cfg) |
74afab7a JL |
201 | { |
202 | int cpu, vector; | |
203 | unsigned long flags; | |
204 | ||
205 | raw_spin_lock_irqsave(&vector_lock, flags); | |
206 | BUG_ON(!cfg->vector); | |
207 | ||
208 | vector = cfg->vector; | |
209 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | |
210 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
211 | ||
212 | cfg->vector = 0; | |
213 | cpumask_clear(cfg->domain); | |
214 | ||
215 | if (likely(!cfg->move_in_progress)) { | |
216 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
217 | return; | |
218 | } | |
219 | ||
220 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | |
221 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | |
222 | vector++) { | |
223 | if (per_cpu(vector_irq, cpu)[vector] != irq) | |
224 | continue; | |
225 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
226 | break; | |
227 | } | |
228 | } | |
229 | cfg->move_in_progress = 0; | |
230 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
231 | } | |
232 | ||
b5dc8e6c JL |
233 | void init_irq_alloc_info(struct irq_alloc_info *info, |
234 | const struct cpumask *mask) | |
235 | { | |
236 | memset(info, 0, sizeof(*info)); | |
237 | info->mask = mask; | |
238 | } | |
239 | ||
240 | void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) | |
241 | { | |
242 | if (src) | |
243 | *dst = *src; | |
244 | else | |
245 | memset(dst, 0, sizeof(*dst)); | |
246 | } | |
247 | ||
248 | static inline const struct cpumask * | |
249 | irq_alloc_info_get_mask(struct irq_alloc_info *info) | |
250 | { | |
251 | return (!info || !info->mask) ? apic->target_cpus() : info->mask; | |
252 | } | |
253 | ||
254 | static void x86_vector_free_irqs(struct irq_domain *domain, | |
255 | unsigned int virq, unsigned int nr_irqs) | |
256 | { | |
257 | struct irq_data *irq_data; | |
258 | int i; | |
259 | ||
260 | for (i = 0; i < nr_irqs; i++) { | |
261 | irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i); | |
262 | if (irq_data && irq_data->chip_data) { | |
b5dc8e6c JL |
263 | clear_irq_vector(virq + i, irq_data->chip_data); |
264 | free_irq_cfg(irq_data->chip_data); | |
13315320 JL |
265 | #ifdef CONFIG_X86_IO_APIC |
266 | if (virq + i < nr_legacy_irqs()) | |
267 | legacy_irq_cfgs[virq + i] = NULL; | |
268 | #endif | |
b5dc8e6c JL |
269 | irq_domain_reset_irq_data(irq_data); |
270 | } | |
271 | } | |
272 | } | |
273 | ||
274 | static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, | |
275 | unsigned int nr_irqs, void *arg) | |
276 | { | |
277 | struct irq_alloc_info *info = arg; | |
278 | const struct cpumask *mask; | |
279 | struct irq_data *irq_data; | |
280 | struct irq_cfg *cfg; | |
281 | int i, err; | |
282 | ||
283 | if (disable_apic) | |
284 | return -ENXIO; | |
285 | ||
286 | /* Currently vector allocator can't guarantee contiguous allocations */ | |
287 | if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1) | |
288 | return -ENOSYS; | |
289 | ||
290 | mask = irq_alloc_info_get_mask(info); | |
291 | for (i = 0; i < nr_irqs; i++) { | |
292 | irq_data = irq_domain_get_irq_data(domain, virq + i); | |
293 | BUG_ON(!irq_data); | |
13315320 JL |
294 | #ifdef CONFIG_X86_IO_APIC |
295 | if (virq + i < nr_legacy_irqs() && legacy_irq_cfgs[virq + i]) | |
296 | cfg = legacy_irq_cfgs[virq + i]; | |
297 | else | |
298 | #endif | |
299 | cfg = alloc_irq_cfg(irq_data->node); | |
b5dc8e6c JL |
300 | if (!cfg) { |
301 | err = -ENOMEM; | |
302 | goto error; | |
303 | } | |
304 | ||
305 | irq_data->chip = &lapic_controller; | |
306 | irq_data->chip_data = cfg; | |
307 | irq_data->hwirq = virq + i; | |
308 | err = assign_irq_vector(virq, cfg, mask); | |
309 | if (err) | |
310 | goto error; | |
311 | } | |
312 | ||
313 | return 0; | |
314 | ||
315 | error: | |
316 | x86_vector_free_irqs(domain, virq, i + 1); | |
317 | return err; | |
318 | } | |
319 | ||
320 | static struct irq_domain_ops x86_vector_domain_ops = { | |
321 | .alloc = x86_vector_alloc_irqs, | |
322 | .free = x86_vector_free_irqs, | |
323 | }; | |
324 | ||
11d686e9 JL |
325 | int __init arch_probe_nr_irqs(void) |
326 | { | |
327 | int nr; | |
328 | ||
329 | if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) | |
330 | nr_irqs = NR_VECTORS * nr_cpu_ids; | |
331 | ||
332 | nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; | |
333 | #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) | |
334 | /* | |
335 | * for MSI and HT dyn irq | |
336 | */ | |
337 | if (gsi_top <= NR_IRQS_LEGACY) | |
338 | nr += 8 * nr_cpu_ids; | |
339 | else | |
340 | nr += gsi_top * 16; | |
341 | #endif | |
342 | if (nr < nr_irqs) | |
343 | nr_irqs = nr; | |
344 | ||
345 | return nr_legacy_irqs(); | |
346 | } | |
347 | ||
13315320 JL |
348 | #ifdef CONFIG_X86_IO_APIC |
349 | static void init_legacy_irqs(void) | |
350 | { | |
351 | int i, node = cpu_to_node(0); | |
352 | struct irq_cfg *cfg; | |
353 | ||
354 | /* | |
355 | * For legacy IRQ's, start with assigning irq0 to irq15 to | |
356 | * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's. | |
357 | */ | |
358 | for (i = 0; i < nr_legacy_irqs(); i++) { | |
359 | cfg = legacy_irq_cfgs[i] = alloc_irq_cfg(node); | |
360 | BUG_ON(!cfg); | |
361 | /* | |
362 | * For legacy IRQ's, start with assigning irq0 to irq15 to | |
363 | * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's. | |
364 | */ | |
365 | cfg->vector = IRQ0_VECTOR + i; | |
366 | cpumask_setall(cfg->domain); | |
367 | irq_set_chip_data(i, cfg); | |
368 | } | |
369 | } | |
370 | #else | |
371 | static void init_legacy_irqs(void) { } | |
372 | #endif | |
373 | ||
11d686e9 JL |
374 | int __init arch_early_irq_init(void) |
375 | { | |
13315320 JL |
376 | init_legacy_irqs(); |
377 | ||
b5dc8e6c JL |
378 | x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops, |
379 | NULL); | |
380 | BUG_ON(x86_vector_domain == NULL); | |
381 | irq_set_default_host(x86_vector_domain); | |
382 | ||
52f518a3 | 383 | arch_init_msi_domain(x86_vector_domain); |
49e07d8f | 384 | arch_init_htirq_domain(x86_vector_domain); |
52f518a3 | 385 | |
11d686e9 JL |
386 | return arch_early_ioapic_init(); |
387 | } | |
388 | ||
74afab7a JL |
389 | static void __setup_vector_irq(int cpu) |
390 | { | |
391 | /* Initialize vector_irq on a new cpu */ | |
392 | int irq, vector; | |
393 | struct irq_cfg *cfg; | |
394 | ||
395 | /* | |
396 | * vector_lock will make sure that we don't run into irq vector | |
397 | * assignments that might be happening on another cpu in parallel, | |
398 | * while we setup our initial vector to irq mappings. | |
399 | */ | |
400 | raw_spin_lock(&vector_lock); | |
401 | /* Mark the inuse vectors */ | |
402 | for_each_active_irq(irq) { | |
403 | cfg = irq_cfg(irq); | |
404 | if (!cfg) | |
405 | continue; | |
406 | ||
407 | if (!cpumask_test_cpu(cpu, cfg->domain)) | |
408 | continue; | |
409 | vector = cfg->vector; | |
410 | per_cpu(vector_irq, cpu)[vector] = irq; | |
411 | } | |
412 | /* Mark the free vectors */ | |
413 | for (vector = 0; vector < NR_VECTORS; ++vector) { | |
414 | irq = per_cpu(vector_irq, cpu)[vector]; | |
415 | if (irq <= VECTOR_UNDEFINED) | |
416 | continue; | |
417 | ||
418 | cfg = irq_cfg(irq); | |
419 | if (!cpumask_test_cpu(cpu, cfg->domain)) | |
420 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
421 | } | |
422 | raw_spin_unlock(&vector_lock); | |
423 | } | |
424 | ||
425 | /* | |
426 | * Setup the vector to irq mappings. | |
427 | */ | |
428 | void setup_vector_irq(int cpu) | |
429 | { | |
430 | int irq; | |
431 | ||
432 | /* | |
433 | * On most of the platforms, legacy PIC delivers the interrupts on the | |
434 | * boot cpu. But there are certain platforms where PIC interrupts are | |
435 | * delivered to multiple cpu's. If the legacy IRQ is handled by the | |
436 | * legacy PIC, for the new cpu that is coming online, setup the static | |
437 | * legacy vector to irq mapping: | |
438 | */ | |
439 | for (irq = 0; irq < nr_legacy_irqs(); irq++) | |
440 | per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; | |
441 | ||
442 | __setup_vector_irq(cpu); | |
443 | } | |
444 | ||
f970510c | 445 | static int apic_retrigger_irq(struct irq_data *data) |
74afab7a | 446 | { |
a9786091 | 447 | struct irq_cfg *cfg = irqd_cfg(data); |
74afab7a JL |
448 | unsigned long flags; |
449 | int cpu; | |
450 | ||
451 | raw_spin_lock_irqsave(&vector_lock, flags); | |
452 | cpu = cpumask_first_and(cfg->domain, cpu_online_mask); | |
453 | apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); | |
454 | raw_spin_unlock_irqrestore(&vector_lock, flags); | |
455 | ||
456 | return 1; | |
457 | } | |
458 | ||
459 | void apic_ack_edge(struct irq_data *data) | |
460 | { | |
a9786091 | 461 | irq_complete_move(irqd_cfg(data)); |
74afab7a JL |
462 | irq_move_irq(data); |
463 | ack_APIC_irq(); | |
464 | } | |
465 | ||
466 | /* | |
467 | * Either sets data->affinity to a valid value, and returns | |
468 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and | |
469 | * leaves data->affinity untouched. | |
470 | */ | |
471 | int apic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |
472 | unsigned int *dest_id) | |
473 | { | |
a9786091 | 474 | struct irq_cfg *cfg = irqd_cfg(data); |
74afab7a JL |
475 | unsigned int irq = data->irq; |
476 | int err; | |
477 | ||
478 | if (!config_enabled(CONFIG_SMP)) | |
479 | return -EPERM; | |
480 | ||
481 | if (!cpumask_intersects(mask, cpu_online_mask)) | |
482 | return -EINVAL; | |
483 | ||
484 | err = assign_irq_vector(irq, cfg, mask); | |
485 | if (err) | |
486 | return err; | |
487 | ||
488 | err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); | |
489 | if (err) { | |
490 | if (assign_irq_vector(irq, cfg, data->affinity)) | |
491 | pr_err("Failed to recover vector for irq %d\n", irq); | |
492 | return err; | |
493 | } | |
494 | ||
495 | cpumask_copy(data->affinity, mask); | |
496 | ||
497 | return 0; | |
498 | } | |
499 | ||
b5dc8e6c JL |
500 | static int vector_set_affinity(struct irq_data *irq_data, |
501 | const struct cpumask *dest, bool force) | |
502 | { | |
503 | struct irq_cfg *cfg = irq_data->chip_data; | |
504 | int err, irq = irq_data->irq; | |
505 | ||
506 | if (!config_enabled(CONFIG_SMP)) | |
507 | return -EPERM; | |
508 | ||
509 | if (!cpumask_intersects(dest, cpu_online_mask)) | |
510 | return -EINVAL; | |
511 | ||
512 | err = assign_irq_vector(irq, cfg, dest); | |
513 | if (err) { | |
514 | struct irq_data *top = irq_get_irq_data(irq); | |
515 | ||
516 | if (assign_irq_vector(irq, cfg, top->affinity)) | |
517 | pr_err("Failed to recover vector for irq %d\n", irq); | |
518 | return err; | |
519 | } | |
520 | ||
521 | return IRQ_SET_MASK_OK; | |
522 | } | |
523 | ||
524 | static struct irq_chip lapic_controller = { | |
525 | .irq_ack = apic_ack_edge, | |
526 | .irq_set_affinity = vector_set_affinity, | |
527 | .irq_retrigger = apic_retrigger_irq, | |
528 | }; | |
529 | ||
74afab7a JL |
530 | #ifdef CONFIG_SMP |
531 | void send_cleanup_vector(struct irq_cfg *cfg) | |
532 | { | |
533 | cpumask_var_t cleanup_mask; | |
534 | ||
535 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | |
536 | unsigned int i; | |
537 | ||
538 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | |
539 | apic->send_IPI_mask(cpumask_of(i), | |
540 | IRQ_MOVE_CLEANUP_VECTOR); | |
541 | } else { | |
542 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | |
543 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | |
544 | free_cpumask_var(cleanup_mask); | |
545 | } | |
546 | cfg->move_in_progress = 0; | |
547 | } | |
548 | ||
549 | asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) | |
550 | { | |
551 | unsigned vector, me; | |
552 | ||
553 | ack_APIC_irq(); | |
554 | irq_enter(); | |
555 | exit_idle(); | |
556 | ||
557 | me = smp_processor_id(); | |
558 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | |
559 | int irq; | |
560 | unsigned int irr; | |
561 | struct irq_desc *desc; | |
562 | struct irq_cfg *cfg; | |
563 | ||
564 | irq = __this_cpu_read(vector_irq[vector]); | |
565 | ||
566 | if (irq <= VECTOR_UNDEFINED) | |
567 | continue; | |
568 | ||
569 | desc = irq_to_desc(irq); | |
570 | if (!desc) | |
571 | continue; | |
572 | ||
573 | cfg = irq_cfg(irq); | |
574 | if (!cfg) | |
575 | continue; | |
576 | ||
577 | raw_spin_lock(&desc->lock); | |
578 | ||
579 | /* | |
580 | * Check if the irq migration is in progress. If so, we | |
581 | * haven't received the cleanup request yet for this irq. | |
582 | */ | |
583 | if (cfg->move_in_progress) | |
584 | goto unlock; | |
585 | ||
586 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | |
587 | goto unlock; | |
588 | ||
589 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | |
590 | /* | |
591 | * Check if the vector that needs to be cleanedup is | |
592 | * registered at the cpu's IRR. If so, then this is not | |
593 | * the best time to clean it up. Lets clean it up in the | |
594 | * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR | |
595 | * to myself. | |
596 | */ | |
597 | if (irr & (1 << (vector % 32))) { | |
598 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); | |
599 | goto unlock; | |
600 | } | |
601 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); | |
602 | unlock: | |
603 | raw_spin_unlock(&desc->lock); | |
604 | } | |
605 | ||
606 | irq_exit(); | |
607 | } | |
608 | ||
609 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) | |
610 | { | |
611 | unsigned me; | |
612 | ||
613 | if (likely(!cfg->move_in_progress)) | |
614 | return; | |
615 | ||
616 | me = smp_processor_id(); | |
617 | ||
618 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | |
619 | send_cleanup_vector(cfg); | |
620 | } | |
621 | ||
622 | void irq_complete_move(struct irq_cfg *cfg) | |
623 | { | |
624 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); | |
625 | } | |
626 | ||
627 | void irq_force_complete_move(int irq) | |
628 | { | |
629 | struct irq_cfg *cfg = irq_cfg(irq); | |
630 | ||
631 | if (!cfg) | |
632 | return; | |
633 | ||
634 | __irq_complete_move(cfg, cfg->vector); | |
635 | } | |
74afab7a JL |
636 | #endif |
637 | ||
74afab7a JL |
638 | static void __init print_APIC_field(int base) |
639 | { | |
640 | int i; | |
641 | ||
642 | printk(KERN_DEBUG); | |
643 | ||
644 | for (i = 0; i < 8; i++) | |
645 | pr_cont("%08x", apic_read(base + i*0x10)); | |
646 | ||
647 | pr_cont("\n"); | |
648 | } | |
649 | ||
650 | static void __init print_local_APIC(void *dummy) | |
651 | { | |
652 | unsigned int i, v, ver, maxlvt; | |
653 | u64 icr; | |
654 | ||
849d3569 JL |
655 | pr_debug("printing local APIC contents on CPU#%d/%d:\n", |
656 | smp_processor_id(), hard_smp_processor_id()); | |
74afab7a | 657 | v = apic_read(APIC_ID); |
849d3569 | 658 | pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); |
74afab7a | 659 | v = apic_read(APIC_LVR); |
849d3569 | 660 | pr_info("... APIC VERSION: %08x\n", v); |
74afab7a JL |
661 | ver = GET_APIC_VERSION(v); |
662 | maxlvt = lapic_get_maxlvt(); | |
663 | ||
664 | v = apic_read(APIC_TASKPRI); | |
849d3569 | 665 | pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); |
74afab7a JL |
666 | |
667 | /* !82489DX */ | |
668 | if (APIC_INTEGRATED(ver)) { | |
669 | if (!APIC_XAPIC(ver)) { | |
670 | v = apic_read(APIC_ARBPRI); | |
849d3569 JL |
671 | pr_debug("... APIC ARBPRI: %08x (%02x)\n", |
672 | v, v & APIC_ARBPRI_MASK); | |
74afab7a JL |
673 | } |
674 | v = apic_read(APIC_PROCPRI); | |
849d3569 | 675 | pr_debug("... APIC PROCPRI: %08x\n", v); |
74afab7a JL |
676 | } |
677 | ||
678 | /* | |
679 | * Remote read supported only in the 82489DX and local APIC for | |
680 | * Pentium processors. | |
681 | */ | |
682 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | |
683 | v = apic_read(APIC_RRR); | |
849d3569 | 684 | pr_debug("... APIC RRR: %08x\n", v); |
74afab7a JL |
685 | } |
686 | ||
687 | v = apic_read(APIC_LDR); | |
849d3569 | 688 | pr_debug("... APIC LDR: %08x\n", v); |
74afab7a JL |
689 | if (!x2apic_enabled()) { |
690 | v = apic_read(APIC_DFR); | |
849d3569 | 691 | pr_debug("... APIC DFR: %08x\n", v); |
74afab7a JL |
692 | } |
693 | v = apic_read(APIC_SPIV); | |
849d3569 | 694 | pr_debug("... APIC SPIV: %08x\n", v); |
74afab7a | 695 | |
849d3569 | 696 | pr_debug("... APIC ISR field:\n"); |
74afab7a | 697 | print_APIC_field(APIC_ISR); |
849d3569 | 698 | pr_debug("... APIC TMR field:\n"); |
74afab7a | 699 | print_APIC_field(APIC_TMR); |
849d3569 | 700 | pr_debug("... APIC IRR field:\n"); |
74afab7a JL |
701 | print_APIC_field(APIC_IRR); |
702 | ||
703 | /* !82489DX */ | |
704 | if (APIC_INTEGRATED(ver)) { | |
705 | /* Due to the Pentium erratum 3AP. */ | |
706 | if (maxlvt > 3) | |
707 | apic_write(APIC_ESR, 0); | |
708 | ||
709 | v = apic_read(APIC_ESR); | |
849d3569 | 710 | pr_debug("... APIC ESR: %08x\n", v); |
74afab7a JL |
711 | } |
712 | ||
713 | icr = apic_icr_read(); | |
849d3569 JL |
714 | pr_debug("... APIC ICR: %08x\n", (u32)icr); |
715 | pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32)); | |
74afab7a JL |
716 | |
717 | v = apic_read(APIC_LVTT); | |
849d3569 | 718 | pr_debug("... APIC LVTT: %08x\n", v); |
74afab7a JL |
719 | |
720 | if (maxlvt > 3) { | |
721 | /* PC is LVT#4. */ | |
722 | v = apic_read(APIC_LVTPC); | |
849d3569 | 723 | pr_debug("... APIC LVTPC: %08x\n", v); |
74afab7a JL |
724 | } |
725 | v = apic_read(APIC_LVT0); | |
849d3569 | 726 | pr_debug("... APIC LVT0: %08x\n", v); |
74afab7a | 727 | v = apic_read(APIC_LVT1); |
849d3569 | 728 | pr_debug("... APIC LVT1: %08x\n", v); |
74afab7a JL |
729 | |
730 | if (maxlvt > 2) { | |
731 | /* ERR is LVT#3. */ | |
732 | v = apic_read(APIC_LVTERR); | |
849d3569 | 733 | pr_debug("... APIC LVTERR: %08x\n", v); |
74afab7a JL |
734 | } |
735 | ||
736 | v = apic_read(APIC_TMICT); | |
849d3569 | 737 | pr_debug("... APIC TMICT: %08x\n", v); |
74afab7a | 738 | v = apic_read(APIC_TMCCT); |
849d3569 | 739 | pr_debug("... APIC TMCCT: %08x\n", v); |
74afab7a | 740 | v = apic_read(APIC_TDCR); |
849d3569 | 741 | pr_debug("... APIC TDCR: %08x\n", v); |
74afab7a JL |
742 | |
743 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | |
744 | v = apic_read(APIC_EFEAT); | |
745 | maxlvt = (v >> 16) & 0xff; | |
849d3569 | 746 | pr_debug("... APIC EFEAT: %08x\n", v); |
74afab7a | 747 | v = apic_read(APIC_ECTRL); |
849d3569 | 748 | pr_debug("... APIC ECTRL: %08x\n", v); |
74afab7a JL |
749 | for (i = 0; i < maxlvt; i++) { |
750 | v = apic_read(APIC_EILVTn(i)); | |
849d3569 | 751 | pr_debug("... APIC EILVT%d: %08x\n", i, v); |
74afab7a JL |
752 | } |
753 | } | |
754 | pr_cont("\n"); | |
755 | } | |
756 | ||
757 | static void __init print_local_APICs(int maxcpu) | |
758 | { | |
759 | int cpu; | |
760 | ||
761 | if (!maxcpu) | |
762 | return; | |
763 | ||
764 | preempt_disable(); | |
765 | for_each_online_cpu(cpu) { | |
766 | if (cpu >= maxcpu) | |
767 | break; | |
768 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | |
769 | } | |
770 | preempt_enable(); | |
771 | } | |
772 | ||
773 | static void __init print_PIC(void) | |
774 | { | |
775 | unsigned int v; | |
776 | unsigned long flags; | |
777 | ||
778 | if (!nr_legacy_irqs()) | |
779 | return; | |
780 | ||
849d3569 | 781 | pr_debug("\nprinting PIC contents\n"); |
74afab7a JL |
782 | |
783 | raw_spin_lock_irqsave(&i8259A_lock, flags); | |
784 | ||
785 | v = inb(0xa1) << 8 | inb(0x21); | |
849d3569 | 786 | pr_debug("... PIC IMR: %04x\n", v); |
74afab7a JL |
787 | |
788 | v = inb(0xa0) << 8 | inb(0x20); | |
849d3569 | 789 | pr_debug("... PIC IRR: %04x\n", v); |
74afab7a JL |
790 | |
791 | outb(0x0b, 0xa0); | |
792 | outb(0x0b, 0x20); | |
793 | v = inb(0xa0) << 8 | inb(0x20); | |
794 | outb(0x0a, 0xa0); | |
795 | outb(0x0a, 0x20); | |
796 | ||
797 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | |
798 | ||
849d3569 | 799 | pr_debug("... PIC ISR: %04x\n", v); |
74afab7a JL |
800 | |
801 | v = inb(0x4d1) << 8 | inb(0x4d0); | |
849d3569 | 802 | pr_debug("... PIC ELCR: %04x\n", v); |
74afab7a JL |
803 | } |
804 | ||
805 | static int show_lapic __initdata = 1; | |
806 | static __init int setup_show_lapic(char *arg) | |
807 | { | |
808 | int num = -1; | |
809 | ||
810 | if (strcmp(arg, "all") == 0) { | |
811 | show_lapic = CONFIG_NR_CPUS; | |
812 | } else { | |
813 | get_option(&arg, &num); | |
814 | if (num >= 0) | |
815 | show_lapic = num; | |
816 | } | |
817 | ||
818 | return 1; | |
819 | } | |
820 | __setup("show_lapic=", setup_show_lapic); | |
821 | ||
822 | static int __init print_ICs(void) | |
823 | { | |
824 | if (apic_verbosity == APIC_QUIET) | |
825 | return 0; | |
826 | ||
827 | print_PIC(); | |
828 | ||
829 | /* don't print out if apic is not there */ | |
830 | if (!cpu_has_apic && !apic_from_smp_config()) | |
831 | return 0; | |
832 | ||
833 | print_local_APICs(show_lapic); | |
834 | print_IO_APICs(); | |
835 | ||
836 | return 0; | |
837 | } | |
838 | ||
839 | late_initcall(print_ICs); |