]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/arch/ia64/kernel/irq.c | |
4 | * | |
5 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | |
6 | * | |
7 | * This file contains the code used by various IRQ handling routines: | |
72fdbdce | 8 | * asking for different IRQs should be done through these routines |
1da177e4 LT |
9 | * instead of just grabbing them. Thus setups with different IRQ numbers |
10 | * shouldn't result in any weird surprises, and installing new handlers | |
11 | * should be easier. | |
12 | * | |
13 | * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004 | |
14 | * | |
15 | * 4/14/2004: Added code to handle cpu migration and do safe irq | |
72fdbdce | 16 | * migration without losing interrupts for iosapic |
1da177e4 LT |
17 | * architecture. |
18 | */ | |
19 | ||
20 | #include <asm/delay.h> | |
7c0f6ba6 | 21 | #include <linux/uaccess.h> |
1da177e4 LT |
22 | #include <linux/module.h> |
23 | #include <linux/seq_file.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/kernel_stat.h> | |
26 | ||
d303e9e9 TL |
27 | #include <asm/mca.h> |
28 | ||
1da177e4 LT |
29 | /* |
30 | * 'what should we do if we get a hw irq event on an illegal vector'. | |
31 | * each architecture has to answer this themselves. | |
32 | */ | |
33 | void ack_bad_irq(unsigned int irq) | |
34 | { | |
35 | printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id()); | |
36 | } | |
37 | ||
38 | #ifdef CONFIG_IA64_GENERIC | |
1115200a KK |
39 | ia64_vector __ia64_irq_to_vector(int irq) |
40 | { | |
41 | return irq_cfg[irq].vector; | |
42 | } | |
43 | ||
1da177e4 LT |
44 | unsigned int __ia64_local_vector_to_irq (ia64_vector vec) |
45 | { | |
6065a244 | 46 | return __this_cpu_read(vector_irq[vec]); |
1da177e4 LT |
47 | } |
48 | #endif | |
49 | ||
50 | /* | |
51 | * Interrupt statistics: | |
52 | */ | |
53 | ||
54 | atomic_t irq_err_count; | |
55 | ||
56 | /* | |
57 | * /proc/interrupts printing: | |
58 | */ | |
e3d78122 | 59 | int arch_show_interrupts(struct seq_file *p, int prec) |
1da177e4 | 60 | { |
e3d78122 | 61 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
1da177e4 LT |
62 | return 0; |
63 | } | |
64 | ||
65 | #ifdef CONFIG_SMP | |
1da177e4 LT |
66 | static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; |
67 | ||
1da177e4 LT |
68 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) |
69 | { | |
1da177e4 | 70 | if (irq < NR_IRQS) { |
c42574ed | 71 | cpumask_copy(irq_get_affinity_mask(irq), |
d3b66bf2 | 72 | cpumask_of(cpu_logical_id(hwid))); |
1da177e4 LT |
73 | irq_redir[irq] = (char) (redir & 0xff); |
74 | } | |
75 | } | |
25d61578 | 76 | |
d3b66bf2 | 77 | bool is_affinity_mask_valid(const struct cpumask *cpumask) |
25d61578 JK |
78 | { |
79 | if (ia64_platform_is("sn2")) { | |
80 | /* Only allow one CPU to be specified in the smp_affinity mask */ | |
6bdf197b | 81 | if (cpumask_weight(cpumask) != 1) |
25d61578 JK |
82 | return false; |
83 | } | |
84 | return true; | |
85 | } | |
86 | ||
1da177e4 LT |
87 | #endif /* CONFIG_SMP */ |
88 | ||
d303e9e9 TL |
89 | int __init arch_early_irq_init(void) |
90 | { | |
91 | ia64_mca_irq_init(); | |
92 | return 0; | |
93 | } | |
94 | ||
1da177e4 LT |
95 | #ifdef CONFIG_HOTPLUG_CPU |
96 | unsigned int vectors_in_migration[NR_IRQS]; | |
97 | ||
98 | /* | |
d3b66bf2 | 99 | * Since cpu_online_mask is already updated, we just need to check for |
1da177e4 LT |
100 | * affinity that has zeros |
101 | */ | |
102 | static void migrate_irqs(void) | |
103 | { | |
1da177e4 LT |
104 | int irq, new_cpu; |
105 | ||
106 | for (irq=0; irq < NR_IRQS; irq++) { | |
428a40c5 TG |
107 | struct irq_desc *desc = irq_to_desc(irq); |
108 | struct irq_data *data = irq_desc_get_irq_data(desc); | |
109 | struct irq_chip *chip = irq_data_get_irq_chip(data); | |
1da177e4 | 110 | |
f5e5bf08 | 111 | if (irqd_irq_disabled(data)) |
29a00277 MD |
112 | continue; |
113 | ||
1da177e4 LT |
114 | /* |
115 | * No handling for now. | |
116 | * TBD: Implement a disable function so we can now | |
117 | * tell CPU not to respond to these local intr sources. | |
118 | * such as ITV,CPEI,MCA etc. | |
119 | */ | |
428a40c5 | 120 | if (irqd_is_per_cpu(data)) |
1da177e4 LT |
121 | continue; |
122 | ||
c42574ed JL |
123 | if (cpumask_any_and(irq_data_get_affinity_mask(data), |
124 | cpu_online_mask) >= nr_cpu_ids) { | |
1da177e4 LT |
125 | /* |
126 | * Save it for phase 2 processing | |
127 | */ | |
128 | vectors_in_migration[irq] = irq; | |
129 | ||
d3b66bf2 | 130 | new_cpu = cpumask_any(cpu_online_mask); |
1da177e4 LT |
131 | |
132 | /* | |
133 | * Al three are essential, currently WARN_ON.. maybe panic? | |
134 | */ | |
428a40c5 TG |
135 | if (chip && chip->irq_disable && |
136 | chip->irq_enable && chip->irq_set_affinity) { | |
137 | chip->irq_disable(data); | |
138 | chip->irq_set_affinity(data, | |
139 | cpumask_of(new_cpu), false); | |
140 | chip->irq_enable(data); | |
1da177e4 | 141 | } else { |
428a40c5 TG |
142 | WARN_ON((!chip || !chip->irq_disable || |
143 | !chip->irq_enable || | |
144 | !chip->irq_set_affinity)); | |
1da177e4 LT |
145 | } |
146 | } | |
147 | } | |
148 | } | |
149 | ||
150 | void fixup_irqs(void) | |
151 | { | |
152 | unsigned int irq; | |
153 | extern void ia64_process_pending_intr(void); | |
ff741906 AR |
154 | extern volatile int time_keeper_id; |
155 | ||
751fc784 HS |
156 | /* Mask ITV to disable timer */ |
157 | ia64_set_itv(1 << 16); | |
ff741906 AR |
158 | |
159 | /* | |
160 | * Find a new timesync master | |
161 | */ | |
162 | if (smp_processor_id() == time_keeper_id) { | |
d3b66bf2 | 163 | time_keeper_id = cpumask_first(cpu_online_mask); |
ff741906 AR |
164 | printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); |
165 | } | |
1da177e4 | 166 | |
1da177e4 | 167 | /* |
72fdbdce | 168 | * Phase 1: Locate IRQs bound to this cpu and |
1da177e4 LT |
169 | * relocate them for cpu removal. |
170 | */ | |
171 | migrate_irqs(); | |
172 | ||
173 | /* | |
174 | * Phase 2: Perform interrupt processing for all entries reported in | |
175 | * local APIC. | |
176 | */ | |
177 | ia64_process_pending_intr(); | |
178 | ||
179 | /* | |
180 | * Phase 3: Now handle any interrupts not captured in local APIC. | |
181 | * This is to account for cases that device interrupted during the time the | |
182 | * rte was being disabled and re-programmed. | |
183 | */ | |
184 | for (irq=0; irq < NR_IRQS; irq++) { | |
185 | if (vectors_in_migration[irq]) { | |
8c1addbc TL |
186 | struct pt_regs *old_regs = set_irq_regs(NULL); |
187 | ||
1da177e4 | 188 | vectors_in_migration[irq]=0; |
5fbb004a | 189 | generic_handle_irq(irq); |
8c1addbc | 190 | set_irq_regs(old_regs); |
1da177e4 LT |
191 | } |
192 | } | |
193 | ||
194 | /* | |
195 | * Now let processor die. We do irq disable and max_xtp() to | |
196 | * ensure there is no more interrupts routed to this processor. | |
197 | * But the local timer interrupt can have 1 pending which we | |
198 | * take care in timer_interrupt(). | |
199 | */ | |
200 | max_xtp(); | |
201 | local_irq_disable(); | |
202 | } | |
203 | #endif |