2 * Local APIC related interfaces to support IOAPIC, MSI, etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/compiler.h>
17 #include <linux/slab.h>
18 #include <asm/irqdomain.h>
19 #include <asm/hw_irq.h>
21 #include <asm/i8259.h>
23 #include <asm/irq_remapping.h>
25 #include <asm/trace/irq_vectors.h>
27 struct apic_chip_data
{
28 struct irq_cfg hw_irq_cfg
;
30 unsigned int prev_vector
;
32 unsigned int prev_cpu
;
34 struct hlist_node clist
;
35 unsigned int move_in_progress
: 1,
41 struct irq_domain
*x86_vector_domain
;
42 EXPORT_SYMBOL_GPL(x86_vector_domain
);
43 static DEFINE_RAW_SPINLOCK(vector_lock
);
44 static cpumask_var_t vector_searchmask
;
45 static struct irq_chip lapic_controller
;
46 static struct irq_matrix
*vector_matrix
;
48 static DEFINE_PER_CPU(struct hlist_head
, cleanup_list
);
51 void lock_vector_lock(void)
53 /* Used to the online set of cpus does not change
54 * during assign_irq_vector.
56 raw_spin_lock(&vector_lock
);
59 void unlock_vector_lock(void)
61 raw_spin_unlock(&vector_lock
);
64 void init_irq_alloc_info(struct irq_alloc_info
*info
,
65 const struct cpumask
*mask
)
67 memset(info
, 0, sizeof(*info
));
71 void copy_irq_alloc_info(struct irq_alloc_info
*dst
, struct irq_alloc_info
*src
)
76 memset(dst
, 0, sizeof(*dst
));
79 static struct apic_chip_data
*apic_chip_data(struct irq_data
*irqd
)
84 while (irqd
->parent_data
)
85 irqd
= irqd
->parent_data
;
87 return irqd
->chip_data
;
90 struct irq_cfg
*irqd_cfg(struct irq_data
*irqd
)
92 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
94 return apicd
? &apicd
->hw_irq_cfg
: NULL
;
96 EXPORT_SYMBOL_GPL(irqd_cfg
);
98 struct irq_cfg
*irq_cfg(unsigned int irq
)
100 return irqd_cfg(irq_get_irq_data(irq
));
103 static struct apic_chip_data
*alloc_apic_chip_data(int node
)
105 struct apic_chip_data
*apicd
;
107 apicd
= kzalloc_node(sizeof(*apicd
), GFP_KERNEL
, node
);
109 INIT_HLIST_NODE(&apicd
->clist
);
113 static void free_apic_chip_data(struct apic_chip_data
*apicd
)
118 static void apic_update_irq_cfg(struct irq_data
*irqd
, unsigned int vector
,
121 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
123 lockdep_assert_held(&vector_lock
);
125 apicd
->hw_irq_cfg
.vector
= vector
;
126 apicd
->hw_irq_cfg
.dest_apicid
= apic
->calc_dest_apicid(cpu
);
127 irq_data_update_effective_affinity(irqd
, cpumask_of(cpu
));
128 trace_vector_config(irqd
->irq
, vector
, cpu
,
129 apicd
->hw_irq_cfg
.dest_apicid
);
132 static void apic_update_vector(struct irq_data
*irqd
, unsigned int newvec
,
135 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
136 struct irq_desc
*desc
= irq_data_to_desc(irqd
);
138 lockdep_assert_held(&vector_lock
);
140 trace_vector_update(irqd
->irq
, newvec
, newcpu
, apicd
->vector
,
143 /* Setup the vector move, if required */
144 if (apicd
->vector
&& cpu_online(apicd
->cpu
)) {
145 apicd
->move_in_progress
= true;
146 apicd
->prev_vector
= apicd
->vector
;
147 apicd
->prev_cpu
= apicd
->cpu
;
149 apicd
->prev_vector
= 0;
152 apicd
->vector
= newvec
;
154 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq
, newcpu
)[newvec
]));
155 per_cpu(vector_irq
, newcpu
)[newvec
] = desc
;
158 static void vector_assign_managed_shutdown(struct irq_data
*irqd
)
160 unsigned int cpu
= cpumask_first(cpu_online_mask
);
162 apic_update_irq_cfg(irqd
, MANAGED_IRQ_SHUTDOWN_VECTOR
, cpu
);
165 static int reserve_managed_vector(struct irq_data
*irqd
)
167 const struct cpumask
*affmsk
= irq_data_get_affinity_mask(irqd
);
168 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
172 raw_spin_lock_irqsave(&vector_lock
, flags
);
173 apicd
->is_managed
= true;
174 ret
= irq_matrix_reserve_managed(vector_matrix
, affmsk
);
175 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
176 trace_vector_reserve_managed(irqd
->irq
, ret
);
180 static void reserve_irq_vector_locked(struct irq_data
*irqd
)
182 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
184 irq_matrix_reserve(vector_matrix
);
185 apicd
->can_reserve
= true;
186 apicd
->has_reserved
= true;
187 trace_vector_reserve(irqd
->irq
, 0);
188 vector_assign_managed_shutdown(irqd
);
191 static int reserve_irq_vector(struct irq_data
*irqd
)
195 raw_spin_lock_irqsave(&vector_lock
, flags
);
196 reserve_irq_vector_locked(irqd
);
197 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
201 static int allocate_vector(struct irq_data
*irqd
, const struct cpumask
*dest
)
203 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
204 bool resvd
= apicd
->has_reserved
;
205 unsigned int cpu
= apicd
->cpu
;
206 int vector
= apicd
->vector
;
208 lockdep_assert_held(&vector_lock
);
211 * If the current target CPU is online and in the new requested
212 * affinity mask, there is no point in moving the interrupt from
213 * one CPU to another.
215 if (vector
&& cpu_online(cpu
) && cpumask_test_cpu(cpu
, dest
))
218 vector
= irq_matrix_alloc(vector_matrix
, dest
, resvd
, &cpu
);
220 apic_update_vector(irqd
, vector
, cpu
);
221 trace_vector_alloc(irqd
->irq
, vector
, resvd
, vector
);
225 static int assign_vector_locked(struct irq_data
*irqd
,
226 const struct cpumask
*dest
)
228 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
229 int vector
= allocate_vector(irqd
, dest
);
234 apic_update_irq_cfg(irqd
, apicd
->vector
, apicd
->cpu
);
238 static int assign_irq_vector(struct irq_data
*irqd
, const struct cpumask
*dest
)
243 raw_spin_lock_irqsave(&vector_lock
, flags
);
244 cpumask_and(vector_searchmask
, dest
, cpu_online_mask
);
245 ret
= assign_vector_locked(irqd
, vector_searchmask
);
246 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
250 static int assign_irq_vector_any_locked(struct irq_data
*irqd
)
252 /* Get the affinity mask - either irq_default_affinity or (user) set */
253 const struct cpumask
*affmsk
= irq_data_get_affinity_mask(irqd
);
254 int node
= irq_data_get_node(irqd
);
256 if (node
== NUMA_NO_NODE
)
258 /* Try the intersection of @affmsk and node mask */
259 cpumask_and(vector_searchmask
, cpumask_of_node(node
), affmsk
);
260 if (!assign_vector_locked(irqd
, vector_searchmask
))
262 /* Try the node mask */
263 if (!assign_vector_locked(irqd
, cpumask_of_node(node
)))
266 /* Try the full affinity mask */
267 cpumask_and(vector_searchmask
, affmsk
, cpu_online_mask
);
268 if (!assign_vector_locked(irqd
, vector_searchmask
))
270 /* Try the full online mask */
271 return assign_vector_locked(irqd
, cpu_online_mask
);
275 assign_irq_vector_policy(struct irq_data
*irqd
, struct irq_alloc_info
*info
)
277 if (irqd_affinity_is_managed(irqd
))
278 return reserve_managed_vector(irqd
);
280 return assign_irq_vector(irqd
, info
->mask
);
282 * Make only a global reservation with no guarantee. A real vector
283 * is associated at activation time.
285 return reserve_irq_vector(irqd
);
289 assign_managed_vector(struct irq_data
*irqd
, const struct cpumask
*dest
)
291 const struct cpumask
*affmsk
= irq_data_get_affinity_mask(irqd
);
292 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
295 cpumask_and(vector_searchmask
, vector_searchmask
, affmsk
);
296 cpu
= cpumask_first(vector_searchmask
);
297 if (cpu
>= nr_cpu_ids
)
299 /* set_affinity might call here for nothing */
300 if (apicd
->vector
&& cpumask_test_cpu(apicd
->cpu
, vector_searchmask
))
302 vector
= irq_matrix_alloc_managed(vector_matrix
, cpu
);
303 trace_vector_alloc_managed(irqd
->irq
, vector
, vector
);
306 apic_update_vector(irqd
, vector
, cpu
);
307 apic_update_irq_cfg(irqd
, vector
, cpu
);
311 static void clear_irq_vector(struct irq_data
*irqd
)
313 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
314 bool managed
= irqd_affinity_is_managed(irqd
);
315 unsigned int vector
= apicd
->vector
;
317 lockdep_assert_held(&vector_lock
);
322 trace_vector_clear(irqd
->irq
, vector
, apicd
->cpu
, apicd
->prev_vector
,
325 per_cpu(vector_irq
, apicd
->cpu
)[vector
] = VECTOR_UNUSED
;
326 irq_matrix_free(vector_matrix
, apicd
->cpu
, vector
, managed
);
329 /* Clean up move in progress */
330 vector
= apicd
->prev_vector
;
334 per_cpu(vector_irq
, apicd
->prev_cpu
)[vector
] = VECTOR_UNUSED
;
335 irq_matrix_free(vector_matrix
, apicd
->prev_cpu
, vector
, managed
);
336 apicd
->prev_vector
= 0;
337 apicd
->move_in_progress
= 0;
338 hlist_del_init(&apicd
->clist
);
341 static void x86_vector_deactivate(struct irq_domain
*dom
, struct irq_data
*irqd
)
343 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
346 trace_vector_deactivate(irqd
->irq
, apicd
->is_managed
,
347 apicd
->can_reserve
, false);
349 /* Regular fixed assigned interrupt */
350 if (!apicd
->is_managed
&& !apicd
->can_reserve
)
352 /* If the interrupt has a global reservation, nothing to do */
353 if (apicd
->has_reserved
)
356 raw_spin_lock_irqsave(&vector_lock
, flags
);
357 clear_irq_vector(irqd
);
358 if (apicd
->can_reserve
)
359 reserve_irq_vector_locked(irqd
);
361 vector_assign_managed_shutdown(irqd
);
362 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
365 static int activate_reserved(struct irq_data
*irqd
)
367 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
370 ret
= assign_irq_vector_any_locked(irqd
);
372 apicd
->has_reserved
= false;
376 static int activate_managed(struct irq_data
*irqd
)
378 const struct cpumask
*dest
= irq_data_get_affinity_mask(irqd
);
381 cpumask_and(vector_searchmask
, dest
, cpu_online_mask
);
382 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask
))) {
383 /* Something in the core code broke! Survive gracefully */
384 pr_err("Managed startup for irq %u, but no CPU\n", irqd
->irq
);
388 ret
= assign_managed_vector(irqd
, vector_searchmask
);
390 * This should not happen. The vector reservation got buggered. Handle
393 if (WARN_ON_ONCE(ret
< 0)) {
394 pr_err("Managed startup irq %u, no vector available\n",
400 static int x86_vector_activate(struct irq_domain
*dom
, struct irq_data
*irqd
,
403 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
407 trace_vector_activate(irqd
->irq
, apicd
->is_managed
,
408 apicd
->can_reserve
, early
);
410 /* Nothing to do for fixed assigned vectors */
411 if (!apicd
->can_reserve
&& !apicd
->is_managed
)
414 raw_spin_lock_irqsave(&vector_lock
, flags
);
415 if (early
|| irqd_is_managed_and_shutdown(irqd
))
416 vector_assign_managed_shutdown(irqd
);
417 else if (apicd
->is_managed
)
418 ret
= activate_managed(irqd
);
419 else if (apicd
->has_reserved
)
420 ret
= activate_reserved(irqd
);
421 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
425 static void vector_free_reserved_and_managed(struct irq_data
*irqd
)
427 const struct cpumask
*dest
= irq_data_get_affinity_mask(irqd
);
428 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
430 trace_vector_teardown(irqd
->irq
, apicd
->is_managed
,
431 apicd
->has_reserved
);
433 if (apicd
->has_reserved
)
434 irq_matrix_remove_reserved(vector_matrix
);
435 if (apicd
->is_managed
)
436 irq_matrix_remove_managed(vector_matrix
, dest
);
439 static void x86_vector_free_irqs(struct irq_domain
*domain
,
440 unsigned int virq
, unsigned int nr_irqs
)
442 struct apic_chip_data
*apicd
;
443 struct irq_data
*irqd
;
447 for (i
= 0; i
< nr_irqs
; i
++) {
448 irqd
= irq_domain_get_irq_data(x86_vector_domain
, virq
+ i
);
449 if (irqd
&& irqd
->chip_data
) {
450 raw_spin_lock_irqsave(&vector_lock
, flags
);
451 clear_irq_vector(irqd
);
452 vector_free_reserved_and_managed(irqd
);
453 apicd
= irqd
->chip_data
;
454 irq_domain_reset_irq_data(irqd
);
455 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
456 free_apic_chip_data(apicd
);
461 static bool vector_configure_legacy(unsigned int virq
, struct irq_data
*irqd
,
462 struct apic_chip_data
*apicd
)
465 bool realloc
= false;
467 apicd
->vector
= ISA_IRQ_VECTOR(virq
);
470 raw_spin_lock_irqsave(&vector_lock
, flags
);
472 * If the interrupt is activated, then it must stay at this vector
473 * position. That's usually the timer interrupt (0).
475 if (irqd_is_activated(irqd
)) {
476 trace_vector_setup(virq
, true, 0);
477 apic_update_irq_cfg(irqd
, apicd
->vector
, apicd
->cpu
);
479 /* Release the vector */
480 apicd
->can_reserve
= true;
481 clear_irq_vector(irqd
);
484 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
488 static int x86_vector_alloc_irqs(struct irq_domain
*domain
, unsigned int virq
,
489 unsigned int nr_irqs
, void *arg
)
491 struct irq_alloc_info
*info
= arg
;
492 struct apic_chip_data
*apicd
;
493 struct irq_data
*irqd
;
499 /* Currently vector allocator can't guarantee contiguous allocations */
500 if ((info
->flags
& X86_IRQ_ALLOC_CONTIGUOUS_VECTORS
) && nr_irqs
> 1)
503 for (i
= 0; i
< nr_irqs
; i
++) {
504 irqd
= irq_domain_get_irq_data(domain
, virq
+ i
);
506 node
= irq_data_get_node(irqd
);
507 WARN_ON_ONCE(irqd
->chip_data
);
508 apicd
= alloc_apic_chip_data(node
);
514 apicd
->irq
= virq
+ i
;
515 irqd
->chip
= &lapic_controller
;
516 irqd
->chip_data
= apicd
;
517 irqd
->hwirq
= virq
+ i
;
518 irqd_set_single_target(irqd
);
520 * Legacy vectors are already assigned when the IOAPIC
521 * takes them over. They stay on the same vector. This is
522 * required for check_timer() to work correctly as it might
523 * switch back to legacy mode. Only update the hardware
526 if (info
->flags
& X86_IRQ_ALLOC_LEGACY
) {
527 if (!vector_configure_legacy(virq
+ i
, irqd
, apicd
))
531 err
= assign_irq_vector_policy(irqd
, info
);
532 trace_vector_setup(virq
+ i
, false, err
);
540 x86_vector_free_irqs(domain
, virq
, i
+ 1);
544 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
545 void x86_vector_debug_show(struct seq_file
*m
, struct irq_domain
*d
,
546 struct irq_data
*irqd
, int ind
)
548 unsigned int cpu
, vector
, prev_cpu
, prev_vector
;
549 struct apic_chip_data
*apicd
;
554 irq_matrix_debug_show(m
, vector_matrix
, ind
);
559 if (irq
< nr_legacy_irqs() && !test_bit(irq
, &io_apic_irqs
)) {
560 seq_printf(m
, "%*sVector: %5d\n", ind
, "", ISA_IRQ_VECTOR(irq
));
561 seq_printf(m
, "%*sTarget: Legacy PIC all CPUs\n", ind
, "");
565 apicd
= irqd
->chip_data
;
567 seq_printf(m
, "%*sVector: Not assigned\n", ind
, "");
571 raw_spin_lock_irqsave(&vector_lock
, flags
);
573 vector
= apicd
->vector
;
574 prev_cpu
= apicd
->prev_cpu
;
575 prev_vector
= apicd
->prev_vector
;
576 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
577 seq_printf(m
, "%*sVector: %5u\n", ind
, "", vector
);
578 seq_printf(m
, "%*sTarget: %5u\n", ind
, "", cpu
);
580 seq_printf(m
, "%*sPrevious vector: %5u\n", ind
, "", prev_vector
);
581 seq_printf(m
, "%*sPrevious target: %5u\n", ind
, "", prev_cpu
);
586 static const struct irq_domain_ops x86_vector_domain_ops
= {
587 .alloc
= x86_vector_alloc_irqs
,
588 .free
= x86_vector_free_irqs
,
589 .activate
= x86_vector_activate
,
590 .deactivate
= x86_vector_deactivate
,
591 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
592 .debug_show
= x86_vector_debug_show
,
596 int __init
arch_probe_nr_irqs(void)
600 if (nr_irqs
> (NR_VECTORS
* nr_cpu_ids
))
601 nr_irqs
= NR_VECTORS
* nr_cpu_ids
;
603 nr
= (gsi_top
+ nr_legacy_irqs()) + 8 * nr_cpu_ids
;
604 #if defined(CONFIG_PCI_MSI)
606 * for MSI and HT dyn irq
608 if (gsi_top
<= NR_IRQS_LEGACY
)
609 nr
+= 8 * nr_cpu_ids
;
617 * We don't know if PIC is present at this point so we need to do
618 * probe() to get the right number of legacy IRQs.
620 return legacy_pic
->probe();
623 void lapic_assign_legacy_vector(unsigned int irq
, bool replace
)
626 * Use assign system here so it wont get accounted as allocated
627 * and moveable in the cpu hotplug check and it prevents managed
628 * irq reservation from touching it.
630 irq_matrix_assign_system(vector_matrix
, ISA_IRQ_VECTOR(irq
), replace
);
633 void __init
lapic_assign_system_vectors(void)
635 unsigned int i
, vector
= 0;
637 for_each_set_bit_from(vector
, system_vectors
, NR_VECTORS
)
638 irq_matrix_assign_system(vector_matrix
, vector
, false);
640 if (nr_legacy_irqs() > 1)
641 lapic_assign_legacy_vector(PIC_CASCADE_IR
, false);
643 /* System vectors are reserved, online it */
644 irq_matrix_online(vector_matrix
);
646 /* Mark the preallocated legacy interrupts */
647 for (i
= 0; i
< nr_legacy_irqs(); i
++) {
648 if (i
!= PIC_CASCADE_IR
)
649 irq_matrix_assign(vector_matrix
, ISA_IRQ_VECTOR(i
));
653 int __init
arch_early_irq_init(void)
655 struct fwnode_handle
*fn
;
657 fn
= irq_domain_alloc_named_fwnode("VECTOR");
659 x86_vector_domain
= irq_domain_create_tree(fn
, &x86_vector_domain_ops
,
661 BUG_ON(x86_vector_domain
== NULL
);
662 irq_domain_free_fwnode(fn
);
663 irq_set_default_host(x86_vector_domain
);
665 arch_init_msi_domain(x86_vector_domain
);
667 BUG_ON(!alloc_cpumask_var(&vector_searchmask
, GFP_KERNEL
));
670 * Allocate the vector matrix allocator data structure and limit the
673 vector_matrix
= irq_alloc_matrix(NR_VECTORS
, FIRST_EXTERNAL_VECTOR
,
674 FIRST_SYSTEM_VECTOR
);
675 BUG_ON(!vector_matrix
);
677 return arch_early_ioapic_init();
682 static struct irq_desc
*__setup_vector_irq(int vector
)
684 int isairq
= vector
- ISA_IRQ_VECTOR(0);
686 /* Check whether the irq is in the legacy space */
687 if (isairq
< 0 || isairq
>= nr_legacy_irqs())
688 return VECTOR_UNUSED
;
689 /* Check whether the irq is handled by the IOAPIC */
690 if (test_bit(isairq
, &io_apic_irqs
))
691 return VECTOR_UNUSED
;
692 return irq_to_desc(isairq
);
695 /* Online the local APIC infrastructure and initialize the vectors */
696 void lapic_online(void)
700 lockdep_assert_held(&vector_lock
);
702 /* Online the vector matrix array for this CPU */
703 irq_matrix_online(vector_matrix
);
706 * The interrupt affinity logic never targets interrupts to offline
707 * CPUs. The exception are the legacy PIC interrupts. In general
708 * they are only targeted to CPU0, but depending on the platform
709 * they can be distributed to any online CPU in hardware. The
710 * kernel has no influence on that. So all active legacy vectors
711 * must be installed on all CPUs. All non legacy interrupts can be
714 for (vector
= 0; vector
< NR_VECTORS
; vector
++)
715 this_cpu_write(vector_irq
[vector
], __setup_vector_irq(vector
));
718 void lapic_offline(void)
721 irq_matrix_offline(vector_matrix
);
722 unlock_vector_lock();
725 static int apic_set_affinity(struct irq_data
*irqd
,
726 const struct cpumask
*dest
, bool force
)
728 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
732 * Core code can call here for inactive interrupts. For inactive
733 * interrupts which use managed or reservation mode there is no
734 * point in going through the vector assignment right now as the
735 * activation will assign a vector which fits the destination
736 * cpumask. Let the core code store the destination mask and be
739 if (!irqd_is_activated(irqd
) &&
740 (apicd
->is_managed
|| apicd
->can_reserve
))
741 return IRQ_SET_MASK_OK
;
743 raw_spin_lock(&vector_lock
);
744 cpumask_and(vector_searchmask
, dest
, cpu_online_mask
);
745 if (irqd_affinity_is_managed(irqd
))
746 err
= assign_managed_vector(irqd
, vector_searchmask
);
748 err
= assign_vector_locked(irqd
, vector_searchmask
);
749 raw_spin_unlock(&vector_lock
);
750 return err
? err
: IRQ_SET_MASK_OK
;
754 # define apic_set_affinity NULL
757 static int apic_retrigger_irq(struct irq_data
*irqd
)
759 struct apic_chip_data
*apicd
= apic_chip_data(irqd
);
762 raw_spin_lock_irqsave(&vector_lock
, flags
);
763 apic
->send_IPI(apicd
->cpu
, apicd
->vector
);
764 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
769 void apic_ack_edge(struct irq_data
*irqd
)
771 irq_complete_move(irqd_cfg(irqd
));
776 static struct irq_chip lapic_controller
= {
778 .irq_ack
= apic_ack_edge
,
779 .irq_set_affinity
= apic_set_affinity
,
780 .irq_retrigger
= apic_retrigger_irq
,
785 static void free_moved_vector(struct apic_chip_data
*apicd
)
787 unsigned int vector
= apicd
->prev_vector
;
788 unsigned int cpu
= apicd
->prev_cpu
;
789 bool managed
= apicd
->is_managed
;
792 * This should never happen. Managed interrupts are not
793 * migrated except on CPU down, which does not involve the
794 * cleanup vector. But try to keep the accounting correct
797 WARN_ON_ONCE(managed
);
799 trace_vector_free_moved(apicd
->irq
, cpu
, vector
, managed
);
800 irq_matrix_free(vector_matrix
, cpu
, vector
, managed
);
801 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
802 hlist_del_init(&apicd
->clist
);
803 apicd
->prev_vector
= 0;
804 apicd
->move_in_progress
= 0;
807 asmlinkage __visible
void __irq_entry
smp_irq_move_cleanup_interrupt(void)
809 struct hlist_head
*clhead
= this_cpu_ptr(&cleanup_list
);
810 struct apic_chip_data
*apicd
;
811 struct hlist_node
*tmp
;
814 /* Prevent vectors vanishing under us */
815 raw_spin_lock(&vector_lock
);
817 hlist_for_each_entry_safe(apicd
, tmp
, clhead
, clist
) {
818 unsigned int irr
, vector
= apicd
->prev_vector
;
821 * Paranoia: Check if the vector that needs to be cleaned
822 * up is registered at the APICs IRR. If so, then this is
823 * not the best time to clean it up. Clean it up in the
824 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
825 * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest
826 * priority external vector, so on return from this
827 * interrupt the device interrupt will happen first.
829 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
830 if (irr
& (1U << (vector
% 32))) {
831 apic
->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR
);
834 free_moved_vector(apicd
);
837 raw_spin_unlock(&vector_lock
);
841 static void __send_cleanup_vector(struct apic_chip_data
*apicd
)
845 raw_spin_lock(&vector_lock
);
846 apicd
->move_in_progress
= 0;
847 cpu
= apicd
->prev_cpu
;
848 if (cpu_online(cpu
)) {
849 hlist_add_head(&apicd
->clist
, per_cpu_ptr(&cleanup_list
, cpu
));
850 apic
->send_IPI(cpu
, IRQ_MOVE_CLEANUP_VECTOR
);
852 apicd
->prev_vector
= 0;
854 raw_spin_unlock(&vector_lock
);
857 void send_cleanup_vector(struct irq_cfg
*cfg
)
859 struct apic_chip_data
*apicd
;
861 apicd
= container_of(cfg
, struct apic_chip_data
, hw_irq_cfg
);
862 if (apicd
->move_in_progress
)
863 __send_cleanup_vector(apicd
);
866 static void __irq_complete_move(struct irq_cfg
*cfg
, unsigned vector
)
868 struct apic_chip_data
*apicd
;
870 apicd
= container_of(cfg
, struct apic_chip_data
, hw_irq_cfg
);
871 if (likely(!apicd
->move_in_progress
))
874 if (vector
== apicd
->vector
&& apicd
->cpu
== smp_processor_id())
875 __send_cleanup_vector(apicd
);
878 void irq_complete_move(struct irq_cfg
*cfg
)
880 __irq_complete_move(cfg
, ~get_irq_regs()->orig_ax
);
884 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
886 void irq_force_complete_move(struct irq_desc
*desc
)
888 struct apic_chip_data
*apicd
;
889 struct irq_data
*irqd
;
893 * The function is called for all descriptors regardless of which
894 * irqdomain they belong to. For example if an IRQ is provided by
895 * an irq_chip as part of a GPIO driver, the chip data for that
896 * descriptor is specific to the irq_chip in question.
898 * Check first that the chip_data is what we expect
899 * (apic_chip_data) before touching it any further.
901 irqd
= irq_domain_get_irq_data(x86_vector_domain
,
902 irq_desc_get_irq(desc
));
906 raw_spin_lock(&vector_lock
);
907 apicd
= apic_chip_data(irqd
);
912 * If prev_vector is empty, no action required.
914 vector
= apicd
->prev_vector
;
919 * This is tricky. If the cleanup of the old vector has not been
920 * done yet, then the following setaffinity call will fail with
921 * -EBUSY. This can leave the interrupt in a stale state.
923 * All CPUs are stuck in stop machine with interrupts disabled so
924 * calling __irq_complete_move() would be completely pointless.
926 * 1) The interrupt is in move_in_progress state. That means that we
927 * have not seen an interrupt since the io_apic was reprogrammed to
930 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
931 * have not been processed yet.
933 if (apicd
->move_in_progress
) {
935 * In theory there is a race:
937 * set_ioapic(new_vector) <-- Interrupt is raised before update
938 * is effective, i.e. it's raised on
941 * So if the target cpu cannot handle that interrupt before
942 * the old vector is cleaned up, we get a spurious interrupt
943 * and in the worst case the ioapic irq line becomes stale.
945 * But in case of cpu hotplug this should be a non issue
946 * because if the affinity update happens right before all
947 * cpus rendevouz in stop machine, there is no way that the
948 * interrupt can be blocked on the target cpu because all cpus
949 * loops first with interrupts enabled in stop machine, so the
950 * old vector is not yet cleaned up when the interrupt fires.
952 * So the only way to run into this issue is if the delivery
953 * of the interrupt on the apic/system bus would be delayed
954 * beyond the point where the target cpu disables interrupts
955 * in stop machine. I doubt that it can happen, but at least
956 * there is a theroretical chance. Virtualization might be
957 * able to expose this, but AFAICT the IOAPIC emulation is not
958 * as stupid as the real hardware.
960 * Anyway, there is nothing we can do about that at this point
961 * w/o refactoring the whole fixup_irq() business completely.
962 * We print at least the irq number and the old vector number,
963 * so we have the necessary information when a problem in that
966 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
969 free_moved_vector(apicd
);
971 raw_spin_unlock(&vector_lock
);
974 #ifdef CONFIG_HOTPLUG_CPU
976 * Note, this is not accurate accounting, but at least good enough to
977 * prevent that the actual interrupt move will run out of vectors.
979 int lapic_can_unplug_cpu(void)
981 unsigned int rsvd
, avl
, tomove
, cpu
= smp_processor_id();
984 raw_spin_lock(&vector_lock
);
985 tomove
= irq_matrix_allocated(vector_matrix
);
986 avl
= irq_matrix_available(vector_matrix
, true);
988 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
993 rsvd
= irq_matrix_reserved(vector_matrix
);
995 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
999 raw_spin_unlock(&vector_lock
);
1002 #endif /* HOTPLUG_CPU */
1005 static void __init
print_APIC_field(int base
)
1011 for (i
= 0; i
< 8; i
++)
1012 pr_cont("%08x", apic_read(base
+ i
*0x10));
1017 static void __init
print_local_APIC(void *dummy
)
1019 unsigned int i
, v
, ver
, maxlvt
;
1022 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1023 smp_processor_id(), hard_smp_processor_id());
1024 v
= apic_read(APIC_ID
);
1025 pr_info("... APIC ID: %08x (%01x)\n", v
, read_apic_id());
1026 v
= apic_read(APIC_LVR
);
1027 pr_info("... APIC VERSION: %08x\n", v
);
1028 ver
= GET_APIC_VERSION(v
);
1029 maxlvt
= lapic_get_maxlvt();
1031 v
= apic_read(APIC_TASKPRI
);
1032 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
1035 if (APIC_INTEGRATED(ver
)) {
1036 if (!APIC_XAPIC(ver
)) {
1037 v
= apic_read(APIC_ARBPRI
);
1038 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1039 v
, v
& APIC_ARBPRI_MASK
);
1041 v
= apic_read(APIC_PROCPRI
);
1042 pr_debug("... APIC PROCPRI: %08x\n", v
);
1046 * Remote read supported only in the 82489DX and local APIC for
1047 * Pentium processors.
1049 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
1050 v
= apic_read(APIC_RRR
);
1051 pr_debug("... APIC RRR: %08x\n", v
);
1054 v
= apic_read(APIC_LDR
);
1055 pr_debug("... APIC LDR: %08x\n", v
);
1056 if (!x2apic_enabled()) {
1057 v
= apic_read(APIC_DFR
);
1058 pr_debug("... APIC DFR: %08x\n", v
);
1060 v
= apic_read(APIC_SPIV
);
1061 pr_debug("... APIC SPIV: %08x\n", v
);
1063 pr_debug("... APIC ISR field:\n");
1064 print_APIC_field(APIC_ISR
);
1065 pr_debug("... APIC TMR field:\n");
1066 print_APIC_field(APIC_TMR
);
1067 pr_debug("... APIC IRR field:\n");
1068 print_APIC_field(APIC_IRR
);
1071 if (APIC_INTEGRATED(ver
)) {
1072 /* Due to the Pentium erratum 3AP. */
1074 apic_write(APIC_ESR
, 0);
1076 v
= apic_read(APIC_ESR
);
1077 pr_debug("... APIC ESR: %08x\n", v
);
1080 icr
= apic_icr_read();
1081 pr_debug("... APIC ICR: %08x\n", (u32
)icr
);
1082 pr_debug("... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
1084 v
= apic_read(APIC_LVTT
);
1085 pr_debug("... APIC LVTT: %08x\n", v
);
1089 v
= apic_read(APIC_LVTPC
);
1090 pr_debug("... APIC LVTPC: %08x\n", v
);
1092 v
= apic_read(APIC_LVT0
);
1093 pr_debug("... APIC LVT0: %08x\n", v
);
1094 v
= apic_read(APIC_LVT1
);
1095 pr_debug("... APIC LVT1: %08x\n", v
);
1099 v
= apic_read(APIC_LVTERR
);
1100 pr_debug("... APIC LVTERR: %08x\n", v
);
1103 v
= apic_read(APIC_TMICT
);
1104 pr_debug("... APIC TMICT: %08x\n", v
);
1105 v
= apic_read(APIC_TMCCT
);
1106 pr_debug("... APIC TMCCT: %08x\n", v
);
1107 v
= apic_read(APIC_TDCR
);
1108 pr_debug("... APIC TDCR: %08x\n", v
);
1110 if (boot_cpu_has(X86_FEATURE_EXTAPIC
)) {
1111 v
= apic_read(APIC_EFEAT
);
1112 maxlvt
= (v
>> 16) & 0xff;
1113 pr_debug("... APIC EFEAT: %08x\n", v
);
1114 v
= apic_read(APIC_ECTRL
);
1115 pr_debug("... APIC ECTRL: %08x\n", v
);
1116 for (i
= 0; i
< maxlvt
; i
++) {
1117 v
= apic_read(APIC_EILVTn(i
));
1118 pr_debug("... APIC EILVT%d: %08x\n", i
, v
);
1124 static void __init
print_local_APICs(int maxcpu
)
1132 for_each_online_cpu(cpu
) {
1135 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
1140 static void __init
print_PIC(void)
1143 unsigned long flags
;
1145 if (!nr_legacy_irqs())
1148 pr_debug("\nprinting PIC contents\n");
1150 raw_spin_lock_irqsave(&i8259A_lock
, flags
);
1152 v
= inb(0xa1) << 8 | inb(0x21);
1153 pr_debug("... PIC IMR: %04x\n", v
);
1155 v
= inb(0xa0) << 8 | inb(0x20);
1156 pr_debug("... PIC IRR: %04x\n", v
);
1160 v
= inb(0xa0) << 8 | inb(0x20);
1164 raw_spin_unlock_irqrestore(&i8259A_lock
, flags
);
1166 pr_debug("... PIC ISR: %04x\n", v
);
1168 v
= inb(0x4d1) << 8 | inb(0x4d0);
1169 pr_debug("... PIC ELCR: %04x\n", v
);
1172 static int show_lapic __initdata
= 1;
1173 static __init
int setup_show_lapic(char *arg
)
1177 if (strcmp(arg
, "all") == 0) {
1178 show_lapic
= CONFIG_NR_CPUS
;
1180 get_option(&arg
, &num
);
1187 __setup("show_lapic=", setup_show_lapic
);
1189 static int __init
print_ICs(void)
1191 if (apic_verbosity
== APIC_QUIET
)
1196 /* don't print out if apic is not there */
1197 if (!boot_cpu_has(X86_FEATURE_APIC
) && !apic_from_smp_config())
1200 print_local_APICs(show_lapic
);
1206 late_initcall(print_ICs
);