]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/apic/vector.c
x86: Don't include linux/irq.h from asm/hardirq.h
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / apic / vector.c
1 /*
2 * Local APIC related interfaces to support IOAPIC, MSI, etc.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/seq_file.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/slab.h>
19 #include <asm/irqdomain.h>
20 #include <asm/hw_irq.h>
21 #include <asm/apic.h>
22 #include <asm/i8259.h>
23 #include <asm/desc.h>
24 #include <asm/irq_remapping.h>
25
26 #include <asm/trace/irq_vectors.h>
27
28 struct apic_chip_data {
29 struct irq_cfg hw_irq_cfg;
30 unsigned int vector;
31 unsigned int prev_vector;
32 unsigned int cpu;
33 unsigned int prev_cpu;
34 unsigned int irq;
35 struct hlist_node clist;
36 unsigned int move_in_progress : 1,
37 is_managed : 1,
38 can_reserve : 1,
39 has_reserved : 1;
40 };
41
42 struct irq_domain *x86_vector_domain;
43 EXPORT_SYMBOL_GPL(x86_vector_domain);
44 static DEFINE_RAW_SPINLOCK(vector_lock);
45 static cpumask_var_t vector_searchmask;
46 static struct irq_chip lapic_controller;
47 static struct irq_matrix *vector_matrix;
48 #ifdef CONFIG_SMP
49 static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
50 #endif
51
52 void lock_vector_lock(void)
53 {
54 /* Used to the online set of cpus does not change
55 * during assign_irq_vector.
56 */
57 raw_spin_lock(&vector_lock);
58 }
59
60 void unlock_vector_lock(void)
61 {
62 raw_spin_unlock(&vector_lock);
63 }
64
65 void init_irq_alloc_info(struct irq_alloc_info *info,
66 const struct cpumask *mask)
67 {
68 memset(info, 0, sizeof(*info));
69 info->mask = mask;
70 }
71
72 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
73 {
74 if (src)
75 *dst = *src;
76 else
77 memset(dst, 0, sizeof(*dst));
78 }
79
80 static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
81 {
82 if (!irqd)
83 return NULL;
84
85 while (irqd->parent_data)
86 irqd = irqd->parent_data;
87
88 return irqd->chip_data;
89 }
90
91 struct irq_cfg *irqd_cfg(struct irq_data *irqd)
92 {
93 struct apic_chip_data *apicd = apic_chip_data(irqd);
94
95 return apicd ? &apicd->hw_irq_cfg : NULL;
96 }
97 EXPORT_SYMBOL_GPL(irqd_cfg);
98
99 struct irq_cfg *irq_cfg(unsigned int irq)
100 {
101 return irqd_cfg(irq_get_irq_data(irq));
102 }
103
104 static struct apic_chip_data *alloc_apic_chip_data(int node)
105 {
106 struct apic_chip_data *apicd;
107
108 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
109 if (apicd)
110 INIT_HLIST_NODE(&apicd->clist);
111 return apicd;
112 }
113
114 static void free_apic_chip_data(struct apic_chip_data *apicd)
115 {
116 kfree(apicd);
117 }
118
119 static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
120 unsigned int cpu)
121 {
122 struct apic_chip_data *apicd = apic_chip_data(irqd);
123
124 lockdep_assert_held(&vector_lock);
125
126 apicd->hw_irq_cfg.vector = vector;
127 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
128 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
129 trace_vector_config(irqd->irq, vector, cpu,
130 apicd->hw_irq_cfg.dest_apicid);
131 }
132
133 static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
134 unsigned int newcpu)
135 {
136 struct apic_chip_data *apicd = apic_chip_data(irqd);
137 struct irq_desc *desc = irq_data_to_desc(irqd);
138 bool managed = irqd_affinity_is_managed(irqd);
139
140 lockdep_assert_held(&vector_lock);
141
142 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
143 apicd->cpu);
144
145 /*
146 * If there is no vector associated or if the associated vector is
147 * the shutdown vector, which is associated to make PCI/MSI
148 * shutdown mode work, then there is nothing to release. Clear out
149 * prev_vector for this and the offlined target case.
150 */
151 apicd->prev_vector = 0;
152 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
153 goto setnew;
154 /*
155 * If the target CPU of the previous vector is online, then mark
156 * the vector as move in progress and store it for cleanup when the
157 * first interrupt on the new vector arrives. If the target CPU is
158 * offline then the regular release mechanism via the cleanup
159 * vector is not possible and the vector can be immediately freed
160 * in the underlying matrix allocator.
161 */
162 if (cpu_online(apicd->cpu)) {
163 apicd->move_in_progress = true;
164 apicd->prev_vector = apicd->vector;
165 apicd->prev_cpu = apicd->cpu;
166 } else {
167 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
168 managed);
169 }
170
171 setnew:
172 apicd->vector = newvec;
173 apicd->cpu = newcpu;
174 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
175 per_cpu(vector_irq, newcpu)[newvec] = desc;
176 }
177
178 static void vector_assign_managed_shutdown(struct irq_data *irqd)
179 {
180 unsigned int cpu = cpumask_first(cpu_online_mask);
181
182 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
183 }
184
185 static int reserve_managed_vector(struct irq_data *irqd)
186 {
187 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
188 struct apic_chip_data *apicd = apic_chip_data(irqd);
189 unsigned long flags;
190 int ret;
191
192 raw_spin_lock_irqsave(&vector_lock, flags);
193 apicd->is_managed = true;
194 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
195 raw_spin_unlock_irqrestore(&vector_lock, flags);
196 trace_vector_reserve_managed(irqd->irq, ret);
197 return ret;
198 }
199
200 static void reserve_irq_vector_locked(struct irq_data *irqd)
201 {
202 struct apic_chip_data *apicd = apic_chip_data(irqd);
203
204 irq_matrix_reserve(vector_matrix);
205 apicd->can_reserve = true;
206 apicd->has_reserved = true;
207 irqd_set_can_reserve(irqd);
208 trace_vector_reserve(irqd->irq, 0);
209 vector_assign_managed_shutdown(irqd);
210 }
211
212 static int reserve_irq_vector(struct irq_data *irqd)
213 {
214 unsigned long flags;
215
216 raw_spin_lock_irqsave(&vector_lock, flags);
217 reserve_irq_vector_locked(irqd);
218 raw_spin_unlock_irqrestore(&vector_lock, flags);
219 return 0;
220 }
221
222 static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
223 {
224 struct apic_chip_data *apicd = apic_chip_data(irqd);
225 bool resvd = apicd->has_reserved;
226 unsigned int cpu = apicd->cpu;
227 int vector = apicd->vector;
228
229 lockdep_assert_held(&vector_lock);
230
231 /*
232 * If the current target CPU is online and in the new requested
233 * affinity mask, there is no point in moving the interrupt from
234 * one CPU to another.
235 */
236 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
237 return 0;
238
239 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
240 if (vector > 0)
241 apic_update_vector(irqd, vector, cpu);
242 trace_vector_alloc(irqd->irq, vector, resvd, vector);
243 return vector;
244 }
245
246 static int assign_vector_locked(struct irq_data *irqd,
247 const struct cpumask *dest)
248 {
249 struct apic_chip_data *apicd = apic_chip_data(irqd);
250 int vector = allocate_vector(irqd, dest);
251
252 if (vector < 0)
253 return vector;
254
255 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
256 return 0;
257 }
258
259 static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
260 {
261 unsigned long flags;
262 int ret;
263
264 raw_spin_lock_irqsave(&vector_lock, flags);
265 cpumask_and(vector_searchmask, dest, cpu_online_mask);
266 ret = assign_vector_locked(irqd, vector_searchmask);
267 raw_spin_unlock_irqrestore(&vector_lock, flags);
268 return ret;
269 }
270
271 static int assign_irq_vector_any_locked(struct irq_data *irqd)
272 {
273 /* Get the affinity mask - either irq_default_affinity or (user) set */
274 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
275 int node = irq_data_get_node(irqd);
276
277 if (node == NUMA_NO_NODE)
278 goto all;
279 /* Try the intersection of @affmsk and node mask */
280 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
281 if (!assign_vector_locked(irqd, vector_searchmask))
282 return 0;
283 /* Try the node mask */
284 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
285 return 0;
286 all:
287 /* Try the full affinity mask */
288 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
289 if (!assign_vector_locked(irqd, vector_searchmask))
290 return 0;
291 /* Try the full online mask */
292 return assign_vector_locked(irqd, cpu_online_mask);
293 }
294
295 static int
296 assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
297 {
298 if (irqd_affinity_is_managed(irqd))
299 return reserve_managed_vector(irqd);
300 if (info->mask)
301 return assign_irq_vector(irqd, info->mask);
302 /*
303 * Make only a global reservation with no guarantee. A real vector
304 * is associated at activation time.
305 */
306 return reserve_irq_vector(irqd);
307 }
308
309 static int
310 assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
311 {
312 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
313 struct apic_chip_data *apicd = apic_chip_data(irqd);
314 int vector, cpu;
315
316 cpumask_and(vector_searchmask, vector_searchmask, affmsk);
317 cpu = cpumask_first(vector_searchmask);
318 if (cpu >= nr_cpu_ids)
319 return -EINVAL;
320 /* set_affinity might call here for nothing */
321 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
322 return 0;
323 vector = irq_matrix_alloc_managed(vector_matrix, cpu);
324 trace_vector_alloc_managed(irqd->irq, vector, vector);
325 if (vector < 0)
326 return vector;
327 apic_update_vector(irqd, vector, cpu);
328 apic_update_irq_cfg(irqd, vector, cpu);
329 return 0;
330 }
331
332 static void clear_irq_vector(struct irq_data *irqd)
333 {
334 struct apic_chip_data *apicd = apic_chip_data(irqd);
335 bool managed = irqd_affinity_is_managed(irqd);
336 unsigned int vector = apicd->vector;
337
338 lockdep_assert_held(&vector_lock);
339
340 if (!vector)
341 return;
342
343 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
344 apicd->prev_cpu);
345
346 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
347 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
348 apicd->vector = 0;
349
350 /* Clean up move in progress */
351 vector = apicd->prev_vector;
352 if (!vector)
353 return;
354
355 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
356 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
357 apicd->prev_vector = 0;
358 apicd->move_in_progress = 0;
359 hlist_del_init(&apicd->clist);
360 }
361
362 static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
363 {
364 struct apic_chip_data *apicd = apic_chip_data(irqd);
365 unsigned long flags;
366
367 trace_vector_deactivate(irqd->irq, apicd->is_managed,
368 apicd->can_reserve, false);
369
370 /* Regular fixed assigned interrupt */
371 if (!apicd->is_managed && !apicd->can_reserve)
372 return;
373 /* If the interrupt has a global reservation, nothing to do */
374 if (apicd->has_reserved)
375 return;
376
377 raw_spin_lock_irqsave(&vector_lock, flags);
378 clear_irq_vector(irqd);
379 if (apicd->can_reserve)
380 reserve_irq_vector_locked(irqd);
381 else
382 vector_assign_managed_shutdown(irqd);
383 raw_spin_unlock_irqrestore(&vector_lock, flags);
384 }
385
386 static int activate_reserved(struct irq_data *irqd)
387 {
388 struct apic_chip_data *apicd = apic_chip_data(irqd);
389 int ret;
390
391 ret = assign_irq_vector_any_locked(irqd);
392 if (!ret) {
393 apicd->has_reserved = false;
394 /*
395 * Core might have disabled reservation mode after
396 * allocating the irq descriptor. Ideally this should
397 * happen before allocation time, but that would require
398 * completely convoluted ways of transporting that
399 * information.
400 */
401 if (!irqd_can_reserve(irqd))
402 apicd->can_reserve = false;
403 }
404 return ret;
405 }
406
407 static int activate_managed(struct irq_data *irqd)
408 {
409 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
410 int ret;
411
412 cpumask_and(vector_searchmask, dest, cpu_online_mask);
413 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
414 /* Something in the core code broke! Survive gracefully */
415 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
416 return EINVAL;
417 }
418
419 ret = assign_managed_vector(irqd, vector_searchmask);
420 /*
421 * This should not happen. The vector reservation got buggered. Handle
422 * it gracefully.
423 */
424 if (WARN_ON_ONCE(ret < 0)) {
425 pr_err("Managed startup irq %u, no vector available\n",
426 irqd->irq);
427 }
428 return ret;
429 }
430
431 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
432 bool reserve)
433 {
434 struct apic_chip_data *apicd = apic_chip_data(irqd);
435 unsigned long flags;
436 int ret = 0;
437
438 trace_vector_activate(irqd->irq, apicd->is_managed,
439 apicd->can_reserve, reserve);
440
441 /* Nothing to do for fixed assigned vectors */
442 if (!apicd->can_reserve && !apicd->is_managed)
443 return 0;
444
445 raw_spin_lock_irqsave(&vector_lock, flags);
446 if (reserve || irqd_is_managed_and_shutdown(irqd))
447 vector_assign_managed_shutdown(irqd);
448 else if (apicd->is_managed)
449 ret = activate_managed(irqd);
450 else if (apicd->has_reserved)
451 ret = activate_reserved(irqd);
452 raw_spin_unlock_irqrestore(&vector_lock, flags);
453 return ret;
454 }
455
456 static void vector_free_reserved_and_managed(struct irq_data *irqd)
457 {
458 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
459 struct apic_chip_data *apicd = apic_chip_data(irqd);
460
461 trace_vector_teardown(irqd->irq, apicd->is_managed,
462 apicd->has_reserved);
463
464 if (apicd->has_reserved)
465 irq_matrix_remove_reserved(vector_matrix);
466 if (apicd->is_managed)
467 irq_matrix_remove_managed(vector_matrix, dest);
468 }
469
470 static void x86_vector_free_irqs(struct irq_domain *domain,
471 unsigned int virq, unsigned int nr_irqs)
472 {
473 struct apic_chip_data *apicd;
474 struct irq_data *irqd;
475 unsigned long flags;
476 int i;
477
478 for (i = 0; i < nr_irqs; i++) {
479 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
480 if (irqd && irqd->chip_data) {
481 raw_spin_lock_irqsave(&vector_lock, flags);
482 clear_irq_vector(irqd);
483 vector_free_reserved_and_managed(irqd);
484 apicd = irqd->chip_data;
485 irq_domain_reset_irq_data(irqd);
486 raw_spin_unlock_irqrestore(&vector_lock, flags);
487 free_apic_chip_data(apicd);
488 }
489 }
490 }
491
492 static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
493 struct apic_chip_data *apicd)
494 {
495 unsigned long flags;
496 bool realloc = false;
497
498 apicd->vector = ISA_IRQ_VECTOR(virq);
499 apicd->cpu = 0;
500
501 raw_spin_lock_irqsave(&vector_lock, flags);
502 /*
503 * If the interrupt is activated, then it must stay at this vector
504 * position. That's usually the timer interrupt (0).
505 */
506 if (irqd_is_activated(irqd)) {
507 trace_vector_setup(virq, true, 0);
508 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
509 } else {
510 /* Release the vector */
511 apicd->can_reserve = true;
512 irqd_set_can_reserve(irqd);
513 clear_irq_vector(irqd);
514 realloc = true;
515 }
516 raw_spin_unlock_irqrestore(&vector_lock, flags);
517 return realloc;
518 }
519
520 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
521 unsigned int nr_irqs, void *arg)
522 {
523 struct irq_alloc_info *info = arg;
524 struct apic_chip_data *apicd;
525 struct irq_data *irqd;
526 int i, err, node;
527
528 if (disable_apic)
529 return -ENXIO;
530
531 /* Currently vector allocator can't guarantee contiguous allocations */
532 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
533 return -ENOSYS;
534
535 for (i = 0; i < nr_irqs; i++) {
536 irqd = irq_domain_get_irq_data(domain, virq + i);
537 BUG_ON(!irqd);
538 node = irq_data_get_node(irqd);
539 WARN_ON_ONCE(irqd->chip_data);
540 apicd = alloc_apic_chip_data(node);
541 if (!apicd) {
542 err = -ENOMEM;
543 goto error;
544 }
545
546 apicd->irq = virq + i;
547 irqd->chip = &lapic_controller;
548 irqd->chip_data = apicd;
549 irqd->hwirq = virq + i;
550 irqd_set_single_target(irqd);
551 /*
552 * Legacy vectors are already assigned when the IOAPIC
553 * takes them over. They stay on the same vector. This is
554 * required for check_timer() to work correctly as it might
555 * switch back to legacy mode. Only update the hardware
556 * config.
557 */
558 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
559 if (!vector_configure_legacy(virq + i, irqd, apicd))
560 continue;
561 }
562
563 err = assign_irq_vector_policy(irqd, info);
564 trace_vector_setup(virq + i, false, err);
565 if (err) {
566 irqd->chip_data = NULL;
567 free_apic_chip_data(apicd);
568 goto error;
569 }
570 }
571
572 return 0;
573
574 error:
575 x86_vector_free_irqs(domain, virq, i);
576 return err;
577 }
578
579 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
580 static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
581 struct irq_data *irqd, int ind)
582 {
583 unsigned int cpu, vector, prev_cpu, prev_vector;
584 struct apic_chip_data *apicd;
585 unsigned long flags;
586 int irq;
587
588 if (!irqd) {
589 irq_matrix_debug_show(m, vector_matrix, ind);
590 return;
591 }
592
593 irq = irqd->irq;
594 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
595 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
596 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
597 return;
598 }
599
600 apicd = irqd->chip_data;
601 if (!apicd) {
602 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
603 return;
604 }
605
606 raw_spin_lock_irqsave(&vector_lock, flags);
607 cpu = apicd->cpu;
608 vector = apicd->vector;
609 prev_cpu = apicd->prev_cpu;
610 prev_vector = apicd->prev_vector;
611 raw_spin_unlock_irqrestore(&vector_lock, flags);
612 seq_printf(m, "%*sVector: %5u\n", ind, "", vector);
613 seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu);
614 if (prev_vector) {
615 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vector);
616 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu);
617 }
618 }
619 #endif
620
621 static const struct irq_domain_ops x86_vector_domain_ops = {
622 .alloc = x86_vector_alloc_irqs,
623 .free = x86_vector_free_irqs,
624 .activate = x86_vector_activate,
625 .deactivate = x86_vector_deactivate,
626 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
627 .debug_show = x86_vector_debug_show,
628 #endif
629 };
630
631 int __init arch_probe_nr_irqs(void)
632 {
633 int nr;
634
635 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
636 nr_irqs = NR_VECTORS * nr_cpu_ids;
637
638 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
639 #if defined(CONFIG_PCI_MSI)
640 /*
641 * for MSI and HT dyn irq
642 */
643 if (gsi_top <= NR_IRQS_LEGACY)
644 nr += 8 * nr_cpu_ids;
645 else
646 nr += gsi_top * 16;
647 #endif
648 if (nr < nr_irqs)
649 nr_irqs = nr;
650
651 /*
652 * We don't know if PIC is present at this point so we need to do
653 * probe() to get the right number of legacy IRQs.
654 */
655 return legacy_pic->probe();
656 }
657
658 void lapic_assign_legacy_vector(unsigned int irq, bool replace)
659 {
660 /*
661 * Use assign system here so it wont get accounted as allocated
662 * and moveable in the cpu hotplug check and it prevents managed
663 * irq reservation from touching it.
664 */
665 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
666 }
667
668 void __init lapic_assign_system_vectors(void)
669 {
670 unsigned int i, vector = 0;
671
672 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
673 irq_matrix_assign_system(vector_matrix, vector, false);
674
675 if (nr_legacy_irqs() > 1)
676 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
677
678 /* System vectors are reserved, online it */
679 irq_matrix_online(vector_matrix);
680
681 /* Mark the preallocated legacy interrupts */
682 for (i = 0; i < nr_legacy_irqs(); i++) {
683 if (i != PIC_CASCADE_IR)
684 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
685 }
686 }
687
688 int __init arch_early_irq_init(void)
689 {
690 struct fwnode_handle *fn;
691
692 fn = irq_domain_alloc_named_fwnode("VECTOR");
693 BUG_ON(!fn);
694 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
695 NULL);
696 BUG_ON(x86_vector_domain == NULL);
697 irq_domain_free_fwnode(fn);
698 irq_set_default_host(x86_vector_domain);
699
700 arch_init_msi_domain(x86_vector_domain);
701
702 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
703
704 /*
705 * Allocate the vector matrix allocator data structure and limit the
706 * search area.
707 */
708 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
709 FIRST_SYSTEM_VECTOR);
710 BUG_ON(!vector_matrix);
711
712 return arch_early_ioapic_init();
713 }
714
715 #ifdef CONFIG_SMP
716
717 static struct irq_desc *__setup_vector_irq(int vector)
718 {
719 int isairq = vector - ISA_IRQ_VECTOR(0);
720
721 /* Check whether the irq is in the legacy space */
722 if (isairq < 0 || isairq >= nr_legacy_irqs())
723 return VECTOR_UNUSED;
724 /* Check whether the irq is handled by the IOAPIC */
725 if (test_bit(isairq, &io_apic_irqs))
726 return VECTOR_UNUSED;
727 return irq_to_desc(isairq);
728 }
729
730 /* Online the local APIC infrastructure and initialize the vectors */
731 void lapic_online(void)
732 {
733 unsigned int vector;
734
735 lockdep_assert_held(&vector_lock);
736
737 /* Online the vector matrix array for this CPU */
738 irq_matrix_online(vector_matrix);
739
740 /*
741 * The interrupt affinity logic never targets interrupts to offline
742 * CPUs. The exception are the legacy PIC interrupts. In general
743 * they are only targeted to CPU0, but depending on the platform
744 * they can be distributed to any online CPU in hardware. The
745 * kernel has no influence on that. So all active legacy vectors
746 * must be installed on all CPUs. All non legacy interrupts can be
747 * cleared.
748 */
749 for (vector = 0; vector < NR_VECTORS; vector++)
750 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
751 }
752
753 void lapic_offline(void)
754 {
755 lock_vector_lock();
756 irq_matrix_offline(vector_matrix);
757 unlock_vector_lock();
758 }
759
760 static int apic_set_affinity(struct irq_data *irqd,
761 const struct cpumask *dest, bool force)
762 {
763 struct apic_chip_data *apicd = apic_chip_data(irqd);
764 int err;
765
766 /*
767 * Core code can call here for inactive interrupts. For inactive
768 * interrupts which use managed or reservation mode there is no
769 * point in going through the vector assignment right now as the
770 * activation will assign a vector which fits the destination
771 * cpumask. Let the core code store the destination mask and be
772 * done with it.
773 */
774 if (!irqd_is_activated(irqd) &&
775 (apicd->is_managed || apicd->can_reserve))
776 return IRQ_SET_MASK_OK;
777
778 raw_spin_lock(&vector_lock);
779 cpumask_and(vector_searchmask, dest, cpu_online_mask);
780 if (irqd_affinity_is_managed(irqd))
781 err = assign_managed_vector(irqd, vector_searchmask);
782 else
783 err = assign_vector_locked(irqd, vector_searchmask);
784 raw_spin_unlock(&vector_lock);
785 return err ? err : IRQ_SET_MASK_OK;
786 }
787
788 #else
789 # define apic_set_affinity NULL
790 #endif
791
792 static int apic_retrigger_irq(struct irq_data *irqd)
793 {
794 struct apic_chip_data *apicd = apic_chip_data(irqd);
795 unsigned long flags;
796
797 raw_spin_lock_irqsave(&vector_lock, flags);
798 apic->send_IPI(apicd->cpu, apicd->vector);
799 raw_spin_unlock_irqrestore(&vector_lock, flags);
800
801 return 1;
802 }
803
804 void apic_ack_edge(struct irq_data *irqd)
805 {
806 irq_complete_move(irqd_cfg(irqd));
807 irq_move_irq(irqd);
808 ack_APIC_irq();
809 }
810
811 static struct irq_chip lapic_controller = {
812 .name = "APIC",
813 .irq_ack = apic_ack_edge,
814 .irq_set_affinity = apic_set_affinity,
815 .irq_retrigger = apic_retrigger_irq,
816 };
817
818 #ifdef CONFIG_SMP
819
820 static void free_moved_vector(struct apic_chip_data *apicd)
821 {
822 unsigned int vector = apicd->prev_vector;
823 unsigned int cpu = apicd->prev_cpu;
824 bool managed = apicd->is_managed;
825
826 /*
827 * This should never happen. Managed interrupts are not
828 * migrated except on CPU down, which does not involve the
829 * cleanup vector. But try to keep the accounting correct
830 * nevertheless.
831 */
832 WARN_ON_ONCE(managed);
833
834 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
835 irq_matrix_free(vector_matrix, cpu, vector, managed);
836 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
837 hlist_del_init(&apicd->clist);
838 apicd->prev_vector = 0;
839 apicd->move_in_progress = 0;
840 }
841
842 asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
843 {
844 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
845 struct apic_chip_data *apicd;
846 struct hlist_node *tmp;
847
848 entering_ack_irq();
849 /* Prevent vectors vanishing under us */
850 raw_spin_lock(&vector_lock);
851
852 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
853 unsigned int irr, vector = apicd->prev_vector;
854
855 /*
856 * Paranoia: Check if the vector that needs to be cleaned
857 * up is registered at the APICs IRR. If so, then this is
858 * not the best time to clean it up. Clean it up in the
859 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
860 * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest
861 * priority external vector, so on return from this
862 * interrupt the device interrupt will happen first.
863 */
864 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
865 if (irr & (1U << (vector % 32))) {
866 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
867 continue;
868 }
869 free_moved_vector(apicd);
870 }
871
872 raw_spin_unlock(&vector_lock);
873 exiting_irq();
874 }
875
876 static void __send_cleanup_vector(struct apic_chip_data *apicd)
877 {
878 unsigned int cpu;
879
880 raw_spin_lock(&vector_lock);
881 apicd->move_in_progress = 0;
882 cpu = apicd->prev_cpu;
883 if (cpu_online(cpu)) {
884 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
885 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
886 } else {
887 apicd->prev_vector = 0;
888 }
889 raw_spin_unlock(&vector_lock);
890 }
891
892 void send_cleanup_vector(struct irq_cfg *cfg)
893 {
894 struct apic_chip_data *apicd;
895
896 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
897 if (apicd->move_in_progress)
898 __send_cleanup_vector(apicd);
899 }
900
901 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
902 {
903 struct apic_chip_data *apicd;
904
905 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
906 if (likely(!apicd->move_in_progress))
907 return;
908
909 if (vector == apicd->vector && apicd->cpu == smp_processor_id())
910 __send_cleanup_vector(apicd);
911 }
912
913 void irq_complete_move(struct irq_cfg *cfg)
914 {
915 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
916 }
917
918 /*
919 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
920 */
921 void irq_force_complete_move(struct irq_desc *desc)
922 {
923 struct apic_chip_data *apicd;
924 struct irq_data *irqd;
925 unsigned int vector;
926
927 /*
928 * The function is called for all descriptors regardless of which
929 * irqdomain they belong to. For example if an IRQ is provided by
930 * an irq_chip as part of a GPIO driver, the chip data for that
931 * descriptor is specific to the irq_chip in question.
932 *
933 * Check first that the chip_data is what we expect
934 * (apic_chip_data) before touching it any further.
935 */
936 irqd = irq_domain_get_irq_data(x86_vector_domain,
937 irq_desc_get_irq(desc));
938 if (!irqd)
939 return;
940
941 raw_spin_lock(&vector_lock);
942 apicd = apic_chip_data(irqd);
943 if (!apicd)
944 goto unlock;
945
946 /*
947 * If prev_vector is empty, no action required.
948 */
949 vector = apicd->prev_vector;
950 if (!vector)
951 goto unlock;
952
953 /*
954 * This is tricky. If the cleanup of the old vector has not been
955 * done yet, then the following setaffinity call will fail with
956 * -EBUSY. This can leave the interrupt in a stale state.
957 *
958 * All CPUs are stuck in stop machine with interrupts disabled so
959 * calling __irq_complete_move() would be completely pointless.
960 *
961 * 1) The interrupt is in move_in_progress state. That means that we
962 * have not seen an interrupt since the io_apic was reprogrammed to
963 * the new vector.
964 *
965 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
966 * have not been processed yet.
967 */
968 if (apicd->move_in_progress) {
969 /*
970 * In theory there is a race:
971 *
972 * set_ioapic(new_vector) <-- Interrupt is raised before update
973 * is effective, i.e. it's raised on
974 * the old vector.
975 *
976 * So if the target cpu cannot handle that interrupt before
977 * the old vector is cleaned up, we get a spurious interrupt
978 * and in the worst case the ioapic irq line becomes stale.
979 *
980 * But in case of cpu hotplug this should be a non issue
981 * because if the affinity update happens right before all
982 * cpus rendevouz in stop machine, there is no way that the
983 * interrupt can be blocked on the target cpu because all cpus
984 * loops first with interrupts enabled in stop machine, so the
985 * old vector is not yet cleaned up when the interrupt fires.
986 *
987 * So the only way to run into this issue is if the delivery
988 * of the interrupt on the apic/system bus would be delayed
989 * beyond the point where the target cpu disables interrupts
990 * in stop machine. I doubt that it can happen, but at least
991 * there is a theroretical chance. Virtualization might be
992 * able to expose this, but AFAICT the IOAPIC emulation is not
993 * as stupid as the real hardware.
994 *
995 * Anyway, there is nothing we can do about that at this point
996 * w/o refactoring the whole fixup_irq() business completely.
997 * We print at least the irq number and the old vector number,
998 * so we have the necessary information when a problem in that
999 * area arises.
1000 */
1001 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1002 irqd->irq, vector);
1003 }
1004 free_moved_vector(apicd);
1005 unlock:
1006 raw_spin_unlock(&vector_lock);
1007 }
1008
1009 #ifdef CONFIG_HOTPLUG_CPU
1010 /*
1011 * Note, this is not accurate accounting, but at least good enough to
1012 * prevent that the actual interrupt move will run out of vectors.
1013 */
1014 int lapic_can_unplug_cpu(void)
1015 {
1016 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1017 int ret = 0;
1018
1019 raw_spin_lock(&vector_lock);
1020 tomove = irq_matrix_allocated(vector_matrix);
1021 avl = irq_matrix_available(vector_matrix, true);
1022 if (avl < tomove) {
1023 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1024 cpu, tomove, avl);
1025 ret = -ENOSPC;
1026 goto out;
1027 }
1028 rsvd = irq_matrix_reserved(vector_matrix);
1029 if (avl < rsvd) {
1030 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1031 rsvd, avl);
1032 }
1033 out:
1034 raw_spin_unlock(&vector_lock);
1035 return ret;
1036 }
1037 #endif /* HOTPLUG_CPU */
1038 #endif /* SMP */
1039
1040 static void __init print_APIC_field(int base)
1041 {
1042 int i;
1043
1044 printk(KERN_DEBUG);
1045
1046 for (i = 0; i < 8; i++)
1047 pr_cont("%08x", apic_read(base + i*0x10));
1048
1049 pr_cont("\n");
1050 }
1051
1052 static void __init print_local_APIC(void *dummy)
1053 {
1054 unsigned int i, v, ver, maxlvt;
1055 u64 icr;
1056
1057 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1058 smp_processor_id(), hard_smp_processor_id());
1059 v = apic_read(APIC_ID);
1060 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1061 v = apic_read(APIC_LVR);
1062 pr_info("... APIC VERSION: %08x\n", v);
1063 ver = GET_APIC_VERSION(v);
1064 maxlvt = lapic_get_maxlvt();
1065
1066 v = apic_read(APIC_TASKPRI);
1067 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1068
1069 /* !82489DX */
1070 if (APIC_INTEGRATED(ver)) {
1071 if (!APIC_XAPIC(ver)) {
1072 v = apic_read(APIC_ARBPRI);
1073 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1074 v, v & APIC_ARBPRI_MASK);
1075 }
1076 v = apic_read(APIC_PROCPRI);
1077 pr_debug("... APIC PROCPRI: %08x\n", v);
1078 }
1079
1080 /*
1081 * Remote read supported only in the 82489DX and local APIC for
1082 * Pentium processors.
1083 */
1084 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1085 v = apic_read(APIC_RRR);
1086 pr_debug("... APIC RRR: %08x\n", v);
1087 }
1088
1089 v = apic_read(APIC_LDR);
1090 pr_debug("... APIC LDR: %08x\n", v);
1091 if (!x2apic_enabled()) {
1092 v = apic_read(APIC_DFR);
1093 pr_debug("... APIC DFR: %08x\n", v);
1094 }
1095 v = apic_read(APIC_SPIV);
1096 pr_debug("... APIC SPIV: %08x\n", v);
1097
1098 pr_debug("... APIC ISR field:\n");
1099 print_APIC_field(APIC_ISR);
1100 pr_debug("... APIC TMR field:\n");
1101 print_APIC_field(APIC_TMR);
1102 pr_debug("... APIC IRR field:\n");
1103 print_APIC_field(APIC_IRR);
1104
1105 /* !82489DX */
1106 if (APIC_INTEGRATED(ver)) {
1107 /* Due to the Pentium erratum 3AP. */
1108 if (maxlvt > 3)
1109 apic_write(APIC_ESR, 0);
1110
1111 v = apic_read(APIC_ESR);
1112 pr_debug("... APIC ESR: %08x\n", v);
1113 }
1114
1115 icr = apic_icr_read();
1116 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1117 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1118
1119 v = apic_read(APIC_LVTT);
1120 pr_debug("... APIC LVTT: %08x\n", v);
1121
1122 if (maxlvt > 3) {
1123 /* PC is LVT#4. */
1124 v = apic_read(APIC_LVTPC);
1125 pr_debug("... APIC LVTPC: %08x\n", v);
1126 }
1127 v = apic_read(APIC_LVT0);
1128 pr_debug("... APIC LVT0: %08x\n", v);
1129 v = apic_read(APIC_LVT1);
1130 pr_debug("... APIC LVT1: %08x\n", v);
1131
1132 if (maxlvt > 2) {
1133 /* ERR is LVT#3. */
1134 v = apic_read(APIC_LVTERR);
1135 pr_debug("... APIC LVTERR: %08x\n", v);
1136 }
1137
1138 v = apic_read(APIC_TMICT);
1139 pr_debug("... APIC TMICT: %08x\n", v);
1140 v = apic_read(APIC_TMCCT);
1141 pr_debug("... APIC TMCCT: %08x\n", v);
1142 v = apic_read(APIC_TDCR);
1143 pr_debug("... APIC TDCR: %08x\n", v);
1144
1145 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1146 v = apic_read(APIC_EFEAT);
1147 maxlvt = (v >> 16) & 0xff;
1148 pr_debug("... APIC EFEAT: %08x\n", v);
1149 v = apic_read(APIC_ECTRL);
1150 pr_debug("... APIC ECTRL: %08x\n", v);
1151 for (i = 0; i < maxlvt; i++) {
1152 v = apic_read(APIC_EILVTn(i));
1153 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1154 }
1155 }
1156 pr_cont("\n");
1157 }
1158
1159 static void __init print_local_APICs(int maxcpu)
1160 {
1161 int cpu;
1162
1163 if (!maxcpu)
1164 return;
1165
1166 preempt_disable();
1167 for_each_online_cpu(cpu) {
1168 if (cpu >= maxcpu)
1169 break;
1170 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1171 }
1172 preempt_enable();
1173 }
1174
1175 static void __init print_PIC(void)
1176 {
1177 unsigned int v;
1178 unsigned long flags;
1179
1180 if (!nr_legacy_irqs())
1181 return;
1182
1183 pr_debug("\nprinting PIC contents\n");
1184
1185 raw_spin_lock_irqsave(&i8259A_lock, flags);
1186
1187 v = inb(0xa1) << 8 | inb(0x21);
1188 pr_debug("... PIC IMR: %04x\n", v);
1189
1190 v = inb(0xa0) << 8 | inb(0x20);
1191 pr_debug("... PIC IRR: %04x\n", v);
1192
1193 outb(0x0b, 0xa0);
1194 outb(0x0b, 0x20);
1195 v = inb(0xa0) << 8 | inb(0x20);
1196 outb(0x0a, 0xa0);
1197 outb(0x0a, 0x20);
1198
1199 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1200
1201 pr_debug("... PIC ISR: %04x\n", v);
1202
1203 v = inb(0x4d1) << 8 | inb(0x4d0);
1204 pr_debug("... PIC ELCR: %04x\n", v);
1205 }
1206
1207 static int show_lapic __initdata = 1;
1208 static __init int setup_show_lapic(char *arg)
1209 {
1210 int num = -1;
1211
1212 if (strcmp(arg, "all") == 0) {
1213 show_lapic = CONFIG_NR_CPUS;
1214 } else {
1215 get_option(&arg, &num);
1216 if (num >= 0)
1217 show_lapic = num;
1218 }
1219
1220 return 1;
1221 }
1222 __setup("show_lapic=", setup_show_lapic);
1223
1224 static int __init print_ICs(void)
1225 {
1226 if (apic_verbosity == APIC_QUIET)
1227 return 0;
1228
1229 print_PIC();
1230
1231 /* don't print out if apic is not there */
1232 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1233 return 0;
1234
1235 print_local_APICs(show_lapic);
1236 print_IO_APICs();
1237
1238 return 0;
1239 }
1240
1241 late_initcall(print_ICs);