]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ |
2 | ||
3 | /* Copyright (C) 1999,2001 | |
4 | * | |
5 | * Author: J.E.J.Bottomley@HansenPartnership.com | |
6 | * | |
1da177e4 LT |
7 | * This file provides all the same external entries as smp.c but uses |
8 | * the voyager hal to provide the functionality | |
9 | */ | |
153f8057 | 10 | #include <linux/module.h> |
1da177e4 LT |
11 | #include <linux/mm.h> |
12 | #include <linux/kernel_stat.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/mc146818rtc.h> | |
15 | #include <linux/cache.h> | |
16 | #include <linux/interrupt.h> | |
1da177e4 LT |
17 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | |
19 | #include <linux/bootmem.h> | |
20 | #include <linux/completion.h> | |
21 | #include <asm/desc.h> | |
22 | #include <asm/voyager.h> | |
23 | #include <asm/vic.h> | |
24 | #include <asm/mtrr.h> | |
25 | #include <asm/pgalloc.h> | |
26 | #include <asm/tlbflush.h> | |
27 | #include <asm/arch_hooks.h> | |
e44b7b75 | 28 | #include <asm/trampoline.h> |
1da177e4 | 29 | |
1da177e4 | 30 | /* TLB state -- visible externally, indexed physically */ |
0cca1ca6 | 31 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; |
1da177e4 LT |
32 | |
33 | /* CPU IRQ affinity -- set to all ones initially */ | |
a4ec1eff IM |
34 | static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = |
35 | {[0 ... NR_CPUS-1] = ~0UL }; | |
1da177e4 LT |
36 | |
37 | /* per CPU data structure (for /proc/cpuinfo et al), visible externally | |
38 | * indexed physically */ | |
0cca1ca6 | 39 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
92cb7612 | 40 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
1da177e4 LT |
41 | |
42 | /* physical ID of the CPU used to boot the system */ | |
43 | unsigned char boot_cpu_id; | |
44 | ||
45 | /* The memory line addresses for the Quad CPIs */ | |
46 | struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned; | |
47 | ||
48 | /* The masks for the Extended VIC processors, filled in by cat_init */ | |
49 | __u32 voyager_extended_vic_processors = 0; | |
50 | ||
51 | /* Masks for the extended Quad processors which cannot be VIC booted */ | |
52 | __u32 voyager_allowed_boot_processors = 0; | |
53 | ||
54 | /* The mask for the Quad Processors (both extended and non-extended) */ | |
55 | __u32 voyager_quad_processors = 0; | |
56 | ||
57 | /* Total count of live CPUs, used in process.c to display | |
58 | * the CPU information and in irq.c for the per CPU irq | |
59 | * activity count. Finally exported by i386_ksyms.c */ | |
60 | static int voyager_extended_cpus = 1; | |
61 | ||
1da177e4 LT |
62 | /* Used for the invalidate map that's also checked in the spinlock */ |
63 | static volatile unsigned long smp_invalidate_needed; | |
64 | ||
65 | /* Bitmask of currently online CPUs - used by setup.c for | |
66 | /proc/cpuinfo, visible externally but still physical */ | |
67 | cpumask_t cpu_online_map = CPU_MASK_NONE; | |
153f8057 | 68 | EXPORT_SYMBOL(cpu_online_map); |
1da177e4 LT |
69 | |
70 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used | |
71 | * by scheduler but indexed physically */ | |
72 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | |
73 | ||
1da177e4 LT |
74 | /* The internal functions */ |
75 | static void send_CPI(__u32 cpuset, __u8 cpi); | |
76 | static void ack_CPI(__u8 cpi); | |
77 | static int ack_QIC_CPI(__u8 cpi); | |
78 | static void ack_special_QIC_CPI(__u8 cpi); | |
79 | static void ack_VIC_CPI(__u8 cpi); | |
80 | static void send_CPI_allbutself(__u8 cpi); | |
c771746e JB |
81 | static void mask_vic_irq(unsigned int irq); |
82 | static void unmask_vic_irq(unsigned int irq); | |
1da177e4 LT |
83 | static unsigned int startup_vic_irq(unsigned int irq); |
84 | static void enable_local_vic_irq(unsigned int irq); | |
85 | static void disable_local_vic_irq(unsigned int irq); | |
86 | static void before_handle_vic_irq(unsigned int irq); | |
87 | static void after_handle_vic_irq(unsigned int irq); | |
88 | static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); | |
89 | static void ack_vic_irq(unsigned int irq); | |
90 | static void vic_enable_cpi(void); | |
91 | static void do_boot_cpu(__u8 cpuid); | |
92 | static void do_quad_bootstrap(void); | |
1da177e4 LT |
93 | |
94 | int hard_smp_processor_id(void); | |
2654c08c | 95 | int safe_smp_processor_id(void); |
1da177e4 LT |
96 | |
97 | /* Inline functions */ | |
a4ec1eff | 98 | static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi) |
1da177e4 LT |
99 | { |
100 | voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi = | |
a4ec1eff | 101 | (smp_processor_id() << 16) + cpi; |
1da177e4 LT |
102 | } |
103 | ||
a4ec1eff | 104 | static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi) |
1da177e4 LT |
105 | { |
106 | int cpu; | |
107 | ||
108 | for_each_online_cpu(cpu) { | |
a4ec1eff | 109 | if (cpuset & (1 << cpu)) { |
1da177e4 | 110 | #ifdef VOYAGER_DEBUG |
7c04e64a | 111 | if (!cpu_online(cpu)) |
a4ec1eff IM |
112 | VDEBUG(("CPU%d sending cpi %d to CPU%d not in " |
113 | "cpu_online_map\n", | |
114 | hard_smp_processor_id(), cpi, cpu)); | |
1da177e4 LT |
115 | #endif |
116 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); | |
117 | } | |
118 | } | |
119 | } | |
120 | ||
a4ec1eff | 121 | static inline void wrapper_smp_local_timer_interrupt(void) |
6431e6a2 DH |
122 | { |
123 | irq_enter(); | |
7d12e780 | 124 | smp_local_timer_interrupt(); |
6431e6a2 DH |
125 | irq_exit(); |
126 | } | |
127 | ||
a4ec1eff | 128 | static inline void send_one_CPI(__u8 cpu, __u8 cpi) |
1da177e4 | 129 | { |
a4ec1eff | 130 | if (voyager_quad_processors & (1 << cpu)) |
1da177e4 LT |
131 | send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); |
132 | else | |
a4ec1eff | 133 | send_CPI(1 << cpu, cpi); |
1da177e4 LT |
134 | } |
135 | ||
a4ec1eff | 136 | static inline void send_CPI_allbutself(__u8 cpi) |
1da177e4 LT |
137 | { |
138 | __u8 cpu = smp_processor_id(); | |
139 | __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); | |
140 | send_CPI(mask, cpi); | |
141 | } | |
142 | ||
a4ec1eff | 143 | static inline int is_cpu_quad(void) |
1da177e4 LT |
144 | { |
145 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | |
146 | return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER); | |
147 | } | |
148 | ||
a4ec1eff | 149 | static inline int is_cpu_extended(void) |
1da177e4 LT |
150 | { |
151 | __u8 cpu = hard_smp_processor_id(); | |
152 | ||
a4ec1eff | 153 | return (voyager_extended_vic_processors & (1 << cpu)); |
1da177e4 LT |
154 | } |
155 | ||
a4ec1eff | 156 | static inline int is_cpu_vic_boot(void) |
1da177e4 LT |
157 | { |
158 | __u8 cpu = hard_smp_processor_id(); | |
159 | ||
a4ec1eff IM |
160 | return (voyager_extended_vic_processors |
161 | & voyager_allowed_boot_processors & (1 << cpu)); | |
1da177e4 LT |
162 | } |
163 | ||
a4ec1eff | 164 | static inline void ack_CPI(__u8 cpi) |
1da177e4 | 165 | { |
a4ec1eff | 166 | switch (cpi) { |
1da177e4 | 167 | case VIC_CPU_BOOT_CPI: |
a4ec1eff | 168 | if (is_cpu_quad() && !is_cpu_vic_boot()) |
1da177e4 LT |
169 | ack_QIC_CPI(cpi); |
170 | else | |
171 | ack_VIC_CPI(cpi); | |
172 | break; | |
173 | case VIC_SYS_INT: | |
a4ec1eff | 174 | case VIC_CMN_INT: |
1da177e4 LT |
175 | /* These are slightly strange. Even on the Quad card, |
176 | * They are vectored as VIC CPIs */ | |
a4ec1eff | 177 | if (is_cpu_quad()) |
1da177e4 LT |
178 | ack_special_QIC_CPI(cpi); |
179 | else | |
180 | ack_VIC_CPI(cpi); | |
181 | break; | |
182 | default: | |
183 | printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi); | |
184 | break; | |
185 | } | |
186 | } | |
187 | ||
188 | /* local variables */ | |
189 | ||
190 | /* The VIC IRQ descriptors -- these look almost identical to the | |
191 | * 8259 IRQs except that masks and things must be kept per processor | |
192 | */ | |
c771746e | 193 | static struct irq_chip vic_chip = { |
a4ec1eff IM |
194 | .name = "VIC", |
195 | .startup = startup_vic_irq, | |
196 | .mask = mask_vic_irq, | |
197 | .unmask = unmask_vic_irq, | |
198 | .set_affinity = set_vic_irq_affinity, | |
1da177e4 LT |
199 | }; |
200 | ||
201 | /* used to count up as CPUs are brought on line (starts at 0) */ | |
202 | static int cpucount = 0; | |
203 | ||
1da177e4 LT |
204 | /* The per cpu profile stuff - used in smp_local_timer_interrupt */ |
205 | static DEFINE_PER_CPU(int, prof_multiplier) = 1; | |
206 | static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; | |
a4ec1eff | 207 | static DEFINE_PER_CPU(int, prof_counter) = 1; |
1da177e4 LT |
208 | |
209 | /* the map used to check if a CPU has booted */ | |
210 | static __u32 cpu_booted_map; | |
211 | ||
212 | /* the synchronize flag used to hold all secondary CPUs spinning in | |
213 | * a tight loop until the boot sequence is ready for them */ | |
214 | static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |
215 | ||
216 | /* This is for the new dynamic CPU boot code */ | |
217 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | |
218 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | |
7a8ef1cb | 219 | cpumask_t cpu_possible_map = CPU_MASK_NONE; |
4ad8d383 | 220 | EXPORT_SYMBOL(cpu_possible_map); |
1da177e4 LT |
221 | |
222 | /* The per processor IRQ masks (these are usually kept in sync) */ | |
223 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | |
224 | ||
225 | /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */ | |
226 | static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; | |
227 | ||
228 | /* Lock for enable/disable of VIC interrupts */ | |
a4ec1eff | 229 | static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); |
1da177e4 | 230 | |
a4ec1eff | 231 | /* The boot processor is correctly set up in PC mode when it |
1da177e4 LT |
232 | * comes up, but the secondaries need their master/slave 8259 |
233 | * pairs initializing correctly */ | |
234 | ||
235 | /* Interrupt counters (per cpu) and total - used to try to | |
236 | * even up the interrupt handling routines */ | |
237 | static long vic_intr_total = 0; | |
238 | static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 }; | |
239 | static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 }; | |
240 | ||
241 | /* Since we can only use CPI0, we fake all the other CPIs */ | |
242 | static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; | |
243 | ||
244 | /* debugging routine to read the isr of the cpu's pic */ | |
a4ec1eff | 245 | static inline __u16 vic_read_isr(void) |
1da177e4 LT |
246 | { |
247 | __u16 isr; | |
248 | ||
249 | outb(0x0b, 0xa0); | |
250 | isr = inb(0xa0) << 8; | |
251 | outb(0x0b, 0x20); | |
252 | isr |= inb(0x20); | |
253 | ||
254 | return isr; | |
255 | } | |
256 | ||
a4ec1eff | 257 | static __init void qic_setup(void) |
1da177e4 | 258 | { |
a4ec1eff | 259 | if (!is_cpu_quad()) { |
1da177e4 LT |
260 | /* not a quad, no setup */ |
261 | return; | |
262 | } | |
263 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); | |
264 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | |
a4ec1eff IM |
265 | |
266 | if (is_cpu_extended()) { | |
1da177e4 LT |
267 | /* the QIC duplicate of the VIC base register */ |
268 | outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER); | |
269 | outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER); | |
270 | ||
271 | /* FIXME: should set up the QIC timer and memory parity | |
272 | * error vectors here */ | |
273 | } | |
274 | } | |
275 | ||
a4ec1eff | 276 | static __init void vic_setup_pic(void) |
1da177e4 LT |
277 | { |
278 | outb(1, VIC_REDIRECT_REGISTER_1); | |
279 | /* clear the claim registers for dynamic routing */ | |
280 | outb(0, VIC_CLAIM_REGISTER_0); | |
281 | outb(0, VIC_CLAIM_REGISTER_1); | |
282 | ||
283 | outb(0, VIC_PRIORITY_REGISTER); | |
284 | /* Set the Primary and Secondary Microchannel vector | |
285 | * bases to be the same as the ordinary interrupts | |
286 | * | |
287 | * FIXME: This would be more efficient using separate | |
288 | * vectors. */ | |
289 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | |
290 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | |
291 | /* Now initiallise the master PIC belonging to this CPU by | |
292 | * sending the four ICWs */ | |
293 | ||
294 | /* ICW1: level triggered, ICW4 needed */ | |
295 | outb(0x19, 0x20); | |
296 | ||
297 | /* ICW2: vector base */ | |
298 | outb(FIRST_EXTERNAL_VECTOR, 0x21); | |
299 | ||
300 | /* ICW3: slave at line 2 */ | |
301 | outb(0x04, 0x21); | |
302 | ||
303 | /* ICW4: 8086 mode */ | |
304 | outb(0x01, 0x21); | |
305 | ||
306 | /* now the same for the slave PIC */ | |
307 | ||
308 | /* ICW1: level trigger, ICW4 needed */ | |
309 | outb(0x19, 0xA0); | |
310 | ||
311 | /* ICW2: slave vector base */ | |
312 | outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1); | |
a4ec1eff | 313 | |
1da177e4 LT |
314 | /* ICW3: slave ID */ |
315 | outb(0x02, 0xA1); | |
316 | ||
317 | /* ICW4: 8086 mode */ | |
318 | outb(0x01, 0xA1); | |
319 | } | |
320 | ||
a4ec1eff | 321 | static void do_quad_bootstrap(void) |
1da177e4 | 322 | { |
a4ec1eff | 323 | if (is_cpu_quad() && is_cpu_vic_boot()) { |
1da177e4 LT |
324 | int i; |
325 | unsigned long flags; | |
326 | __u8 cpuid = hard_smp_processor_id(); | |
327 | ||
328 | local_irq_save(flags); | |
329 | ||
a4ec1eff | 330 | for (i = 0; i < 4; i++) { |
1da177e4 | 331 | /* FIXME: this would be >>3 &0x7 on the 32 way */ |
a4ec1eff | 332 | if (((cpuid >> 2) & 0x03) == i) |
1da177e4 LT |
333 | /* don't lower our own mask! */ |
334 | continue; | |
335 | ||
336 | /* masquerade as local Quad CPU */ | |
337 | outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID); | |
338 | /* enable the startup CPI */ | |
339 | outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1); | |
340 | /* restore cpu id */ | |
341 | outb(0, QIC_PROCESSOR_ID); | |
342 | } | |
343 | local_irq_restore(flags); | |
344 | } | |
345 | } | |
346 | ||
1da177e4 LT |
347 | /* Set up all the basic stuff: read the SMP config and make all the |
348 | * SMP information reflect only the boot cpu. All others will be | |
349 | * brought on-line later. */ | |
a4ec1eff | 350 | void __init find_smp_config(void) |
1da177e4 LT |
351 | { |
352 | int i; | |
353 | ||
354 | boot_cpu_id = hard_smp_processor_id(); | |
355 | ||
356 | printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); | |
357 | ||
358 | /* initialize the CPU structures (moved from smp_boot_cpus) */ | |
a4ec1eff | 359 | for (i = 0; i < NR_CPUS; i++) { |
1da177e4 LT |
360 | cpu_irq_affinity[i] = ~0; |
361 | } | |
362 | cpu_online_map = cpumask_of_cpu(boot_cpu_id); | |
363 | ||
364 | /* The boot CPU must be extended */ | |
a4ec1eff | 365 | voyager_extended_vic_processors = 1 << boot_cpu_id; |
27b46d76 | 366 | /* initially, all of the first 8 CPUs can boot */ |
1da177e4 LT |
367 | voyager_allowed_boot_processors = 0xff; |
368 | /* set up everything for just this CPU, we can alter | |
369 | * this as we start the other CPUs later */ | |
370 | /* now get the CPU disposition from the extended CMOS */ | |
a4ec1eff IM |
371 | cpus_addr(phys_cpu_present_map)[0] = |
372 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); | |
373 | cpus_addr(phys_cpu_present_map)[0] |= | |
374 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; | |
375 | cpus_addr(phys_cpu_present_map)[0] |= | |
376 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | |
377 | 2) << 16; | |
378 | cpus_addr(phys_cpu_present_map)[0] |= | |
379 | voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + | |
380 | 3) << 24; | |
f68a106f | 381 | cpu_possible_map = phys_cpu_present_map; |
a4ec1eff IM |
382 | printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", |
383 | cpus_addr(phys_cpu_present_map)[0]); | |
1da177e4 LT |
384 | /* Here we set up the VIC to enable SMP */ |
385 | /* enable the CPIs by writing the base vector to their register */ | |
386 | outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); | |
387 | outb(1, VIC_REDIRECT_REGISTER_1); | |
388 | /* set the claim registers for static routing --- Boot CPU gets | |
389 | * all interrupts untill all other CPUs started */ | |
390 | outb(0xff, VIC_CLAIM_REGISTER_0); | |
391 | outb(0xff, VIC_CLAIM_REGISTER_1); | |
392 | /* Set the Primary and Secondary Microchannel vector | |
393 | * bases to be the same as the ordinary interrupts | |
394 | * | |
395 | * FIXME: This would be more efficient using separate | |
396 | * vectors. */ | |
397 | outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE); | |
398 | outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE); | |
399 | ||
400 | /* Finally tell the firmware that we're driving */ | |
401 | outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG, | |
402 | VOYAGER_SUS_IN_CONTROL_PORT); | |
403 | ||
404 | current_thread_info()->cpu = boot_cpu_id; | |
6a3ee3d5 | 405 | x86_write_percpu(cpu_number, boot_cpu_id); |
1da177e4 LT |
406 | } |
407 | ||
408 | /* | |
409 | * The bootstrap kernel entry code has set these up. Save them | |
410 | * for a given CPU, id is physical */ | |
a4ec1eff | 411 | void __init smp_store_cpu_info(int id) |
1da177e4 | 412 | { |
92cb7612 | 413 | struct cpuinfo_x86 *c = &cpu_data(id); |
1da177e4 LT |
414 | |
415 | *c = boot_cpu_data; | |
416 | ||
6a3ee3d5 | 417 | identify_secondary_cpu(c); |
1da177e4 LT |
418 | } |
419 | ||
1da177e4 | 420 | /* Routine initially called when a non-boot CPU is brought online */ |
a4ec1eff | 421 | static void __init start_secondary(void *unused) |
1da177e4 LT |
422 | { |
423 | __u8 cpuid = hard_smp_processor_id(); | |
1da177e4 | 424 | |
6a3ee3d5 | 425 | cpu_init(); |
1da177e4 LT |
426 | |
427 | /* OK, we're in the routine */ | |
428 | ack_CPI(VIC_CPU_BOOT_CPI); | |
429 | ||
430 | /* setup the 8259 master slave pair belonging to this CPU --- | |
a4ec1eff IM |
431 | * we won't actually receive any until the boot CPU |
432 | * relinquishes it's static routing mask */ | |
1da177e4 LT |
433 | vic_setup_pic(); |
434 | ||
435 | qic_setup(); | |
436 | ||
a4ec1eff | 437 | if (is_cpu_quad() && !is_cpu_vic_boot()) { |
1da177e4 LT |
438 | /* clear the boot CPI */ |
439 | __u8 dummy; | |
440 | ||
a4ec1eff IM |
441 | dummy = |
442 | voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi; | |
1da177e4 LT |
443 | printk("read dummy %d\n", dummy); |
444 | } | |
445 | ||
446 | /* lower the mask to receive CPIs */ | |
447 | vic_enable_cpi(); | |
448 | ||
449 | VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid)); | |
450 | ||
451 | /* enable interrupts */ | |
452 | local_irq_enable(); | |
453 | ||
454 | /* get our bogomips */ | |
455 | calibrate_delay(); | |
456 | ||
457 | /* save our processor parameters */ | |
458 | smp_store_cpu_info(cpuid); | |
459 | ||
460 | /* if we're a quad, we may need to bootstrap other CPUs */ | |
461 | do_quad_bootstrap(); | |
462 | ||
463 | /* FIXME: this is rather a poor hack to prevent the CPU | |
464 | * activating softirqs while it's supposed to be waiting for | |
465 | * permission to proceed. Without this, the new per CPU stuff | |
466 | * in the softirqs will fail */ | |
467 | local_irq_disable(); | |
468 | cpu_set(cpuid, cpu_callin_map); | |
469 | ||
470 | /* signal that we're done */ | |
471 | cpu_booted_map = 1; | |
472 | ||
473 | while (!cpu_isset(cpuid, smp_commenced_mask)) | |
474 | rep_nop(); | |
475 | local_irq_enable(); | |
476 | ||
477 | local_flush_tlb(); | |
478 | ||
479 | cpu_set(cpuid, cpu_online_map); | |
480 | wmb(); | |
481 | cpu_idle(); | |
482 | } | |
483 | ||
1da177e4 LT |
484 | /* Routine to kick start the given CPU and wait for it to report ready |
485 | * (or timeout in startup). When this routine returns, the requested | |
486 | * CPU is either fully running and configured or known to be dead. | |
487 | * | |
488 | * We call this routine sequentially 1 CPU at a time, so no need for | |
489 | * locking */ | |
490 | ||
a4ec1eff | 491 | static void __init do_boot_cpu(__u8 cpu) |
1da177e4 LT |
492 | { |
493 | struct task_struct *idle; | |
494 | int timeout; | |
495 | unsigned long flags; | |
a4ec1eff IM |
496 | int quad_boot = (1 << cpu) & voyager_quad_processors |
497 | & ~(voyager_extended_vic_processors | |
498 | & voyager_allowed_boot_processors); | |
1da177e4 | 499 | |
1da177e4 LT |
500 | /* This is the format of the CPI IDT gate (in real mode) which |
501 | * we're hijacking to boot the CPU */ | |
a4ec1eff | 502 | union IDTFormat { |
1da177e4 | 503 | struct seg { |
a4ec1eff IM |
504 | __u16 Offset; |
505 | __u16 Segment; | |
1da177e4 LT |
506 | } idt; |
507 | __u32 val; | |
508 | } hijack_source; | |
509 | ||
510 | __u32 *hijack_vector; | |
511 | __u32 start_phys_address = setup_trampoline(); | |
512 | ||
513 | /* There's a clever trick to this: The linux trampoline is | |
514 | * compiled to begin at absolute location zero, so make the | |
515 | * address zero but have the data segment selector compensate | |
516 | * for the actual address */ | |
517 | hijack_source.idt.Offset = start_phys_address & 0x000F; | |
518 | hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; | |
519 | ||
520 | cpucount++; | |
d6444514 JB |
521 | alternatives_smp_switch(1); |
522 | ||
1da177e4 | 523 | idle = fork_idle(cpu); |
a4ec1eff | 524 | if (IS_ERR(idle)) |
1da177e4 | 525 | panic("failed fork for CPU%d", cpu); |
65ea5b03 | 526 | idle->thread.ip = (unsigned long)start_secondary; |
1da177e4 | 527 | /* init_tasks (in sched.c) is indexed logically */ |
65ea5b03 | 528 | stack_start.sp = (void *)idle->thread.sp; |
1da177e4 | 529 | |
6a3ee3d5 | 530 | init_gdt(cpu); |
a4ec1eff | 531 | per_cpu(current_task, cpu) = idle; |
6a3ee3d5 | 532 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); |
1da177e4 LT |
533 | irq_ctx_init(cpu); |
534 | ||
535 | /* Note: Don't modify initial ss override */ | |
a4ec1eff | 536 | VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, |
1da177e4 | 537 | (unsigned long)hijack_source.val, hijack_source.idt.Segment, |
65ea5b03 | 538 | hijack_source.idt.Offset, stack_start.sp)); |
9d0e59a3 EB |
539 | |
540 | /* init lowmem identity mapping */ | |
68db065c JF |
541 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, |
542 | min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); | |
9d0e59a3 | 543 | flush_tlb_all(); |
1da177e4 | 544 | |
a4ec1eff | 545 | if (quad_boot) { |
1da177e4 | 546 | printk("CPU %d: non extended Quad boot\n", cpu); |
a4ec1eff IM |
547 | hijack_vector = |
548 | (__u32 *) | |
549 | phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4); | |
1da177e4 LT |
550 | *hijack_vector = hijack_source.val; |
551 | } else { | |
552 | printk("CPU%d: extended VIC boot\n", cpu); | |
a4ec1eff IM |
553 | hijack_vector = |
554 | (__u32 *) | |
555 | phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4); | |
1da177e4 LT |
556 | *hijack_vector = hijack_source.val; |
557 | /* VIC errata, may also receive interrupt at this address */ | |
a4ec1eff IM |
558 | hijack_vector = |
559 | (__u32 *) | |
560 | phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + | |
561 | VIC_DEFAULT_CPI_BASE) * 4); | |
1da177e4 LT |
562 | *hijack_vector = hijack_source.val; |
563 | } | |
564 | /* All non-boot CPUs start with interrupts fully masked. Need | |
565 | * to lower the mask of the CPI we're about to send. We do | |
566 | * this in the VIC by masquerading as the processor we're | |
567 | * about to boot and lowering its interrupt mask */ | |
568 | local_irq_save(flags); | |
a4ec1eff | 569 | if (quad_boot) { |
1da177e4 LT |
570 | send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI); |
571 | } else { | |
572 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | |
573 | /* here we're altering registers belonging to `cpu' */ | |
a4ec1eff | 574 | |
1da177e4 LT |
575 | outb(VIC_BOOT_INTERRUPT_MASK, 0x21); |
576 | /* now go back to our original identity */ | |
577 | outb(boot_cpu_id, VIC_PROCESSOR_ID); | |
578 | ||
579 | /* and boot the CPU */ | |
580 | ||
a4ec1eff | 581 | send_CPI((1 << cpu), VIC_CPU_BOOT_CPI); |
1da177e4 LT |
582 | } |
583 | cpu_booted_map = 0; | |
584 | local_irq_restore(flags); | |
585 | ||
586 | /* now wait for it to become ready (or timeout) */ | |
a4ec1eff IM |
587 | for (timeout = 0; timeout < 50000; timeout++) { |
588 | if (cpu_booted_map) | |
1da177e4 LT |
589 | break; |
590 | udelay(100); | |
591 | } | |
592 | /* reset the page table */ | |
9d0e59a3 | 593 | zap_low_mappings(); |
a4ec1eff | 594 | |
1da177e4 LT |
595 | if (cpu_booted_map) { |
596 | VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", | |
597 | cpu, smp_processor_id())); | |
a4ec1eff | 598 | |
1da177e4 | 599 | printk("CPU%d: ", cpu); |
92cb7612 | 600 | print_cpu_info(&cpu_data(cpu)); |
1da177e4 LT |
601 | wmb(); |
602 | cpu_set(cpu, cpu_callout_map); | |
3c101cf0 | 603 | cpu_set(cpu, cpu_present_map); |
a4ec1eff | 604 | } else { |
1da177e4 | 605 | printk("CPU%d FAILED TO BOOT: ", cpu); |
a4ec1eff IM |
606 | if (* |
607 | ((volatile unsigned char *)phys_to_virt(start_phys_address)) | |
608 | == 0xA5) | |
1da177e4 LT |
609 | printk("Stuck.\n"); |
610 | else | |
611 | printk("Not responding.\n"); | |
a4ec1eff | 612 | |
1da177e4 LT |
613 | cpucount--; |
614 | } | |
615 | } | |
616 | ||
a4ec1eff | 617 | void __init smp_boot_cpus(void) |
1da177e4 LT |
618 | { |
619 | int i; | |
620 | ||
621 | /* CAT BUS initialisation must be done after the memory */ | |
622 | /* FIXME: The L4 has a catbus too, it just needs to be | |
623 | * accessed in a totally different way */ | |
a4ec1eff | 624 | if (voyager_level == 5) { |
1da177e4 LT |
625 | voyager_cat_init(); |
626 | ||
627 | /* now that the cat has probed the Voyager System Bus, sanity | |
628 | * check the cpu map */ | |
a4ec1eff IM |
629 | if (((voyager_quad_processors | voyager_extended_vic_processors) |
630 | & cpus_addr(phys_cpu_present_map)[0]) != | |
631 | cpus_addr(phys_cpu_present_map)[0]) { | |
1da177e4 | 632 | /* should panic */ |
a4ec1eff IM |
633 | printk("\n\n***WARNING*** " |
634 | "Sanity check of CPU present map FAILED\n"); | |
1da177e4 | 635 | } |
a4ec1eff IM |
636 | } else if (voyager_level == 4) |
637 | voyager_extended_vic_processors = | |
638 | cpus_addr(phys_cpu_present_map)[0]; | |
1da177e4 LT |
639 | |
640 | /* this sets up the idle task to run on the current cpu */ | |
641 | voyager_extended_cpus = 1; | |
642 | /* Remove the global_irq_holder setting, it triggers a BUG() on | |
643 | * schedule at the moment */ | |
644 | //global_irq_holder = boot_cpu_id; | |
645 | ||
646 | /* FIXME: Need to do something about this but currently only works | |
a4ec1eff IM |
647 | * on CPUs with a tsc which none of mine have. |
648 | smp_tune_scheduling(); | |
1da177e4 LT |
649 | */ |
650 | smp_store_cpu_info(boot_cpu_id); | |
651 | printk("CPU%d: ", boot_cpu_id); | |
92cb7612 | 652 | print_cpu_info(&cpu_data(boot_cpu_id)); |
1da177e4 | 653 | |
a4ec1eff | 654 | if (is_cpu_quad()) { |
1da177e4 LT |
655 | /* booting on a Quad CPU */ |
656 | printk("VOYAGER SMP: Boot CPU is Quad\n"); | |
657 | qic_setup(); | |
658 | do_quad_bootstrap(); | |
659 | } | |
660 | ||
661 | /* enable our own CPIs */ | |
662 | vic_enable_cpi(); | |
663 | ||
664 | cpu_set(boot_cpu_id, cpu_online_map); | |
665 | cpu_set(boot_cpu_id, cpu_callout_map); | |
a4ec1eff IM |
666 | |
667 | /* loop over all the extended VIC CPUs and boot them. The | |
1da177e4 | 668 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ |
a4ec1eff IM |
669 | for (i = 0; i < NR_CPUS; i++) { |
670 | if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) | |
1da177e4 LT |
671 | continue; |
672 | do_boot_cpu(i); | |
673 | /* This udelay seems to be needed for the Quad boots | |
674 | * don't remove unless you know what you're doing */ | |
675 | udelay(1000); | |
676 | } | |
677 | /* we could compute the total bogomips here, but why bother?, | |
678 | * Code added from smpboot.c */ | |
679 | { | |
680 | unsigned long bogosum = 0; | |
7c04e64a AM |
681 | |
682 | for_each_online_cpu(i) | |
683 | bogosum += cpu_data(i).loops_per_jiffy; | |
a4ec1eff IM |
684 | printk(KERN_INFO "Total of %d processors activated " |
685 | "(%lu.%02lu BogoMIPS).\n", | |
686 | cpucount + 1, bogosum / (500000 / HZ), | |
687 | (bogosum / (5000 / HZ)) % 100); | |
1da177e4 LT |
688 | } |
689 | voyager_extended_cpus = hweight32(voyager_extended_vic_processors); | |
a4ec1eff IM |
690 | printk("VOYAGER: Extended (interrupt handling CPUs): " |
691 | "%d, non-extended: %d\n", voyager_extended_cpus, | |
692 | num_booting_cpus() - voyager_extended_cpus); | |
1da177e4 LT |
693 | /* that's it, switch to symmetric mode */ |
694 | outb(0, VIC_PRIORITY_REGISTER); | |
695 | outb(0, VIC_CLAIM_REGISTER_0); | |
696 | outb(0, VIC_CLAIM_REGISTER_1); | |
a4ec1eff | 697 | |
1da177e4 LT |
698 | VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus())); |
699 | } | |
700 | ||
701 | /* Reload the secondary CPUs task structure (this function does not | |
702 | * return ) */ | |
a4ec1eff | 703 | void __init initialize_secondary(void) |
1da177e4 LT |
704 | { |
705 | #if 0 | |
706 | // AC kernels only | |
707 | set_current(hard_get_current()); | |
708 | #endif | |
709 | ||
710 | /* | |
711 | * We don't actually need to load the full TSS, | |
712 | * basically just the stack pointer and the eip. | |
713 | */ | |
714 | ||
a4ec1eff | 715 | asm volatile ("movl %0,%%esp\n\t" |
65ea5b03 PA |
716 | "jmp *%1"::"r" (current->thread.sp), |
717 | "r"(current->thread.ip)); | |
1da177e4 LT |
718 | } |
719 | ||
720 | /* handle a Voyager SYS_INT -- If we don't, the base board will | |
721 | * panic the system. | |
722 | * | |
723 | * System interrupts occur because some problem was detected on the | |
724 | * various busses. To find out what you have to probe all the | |
725 | * hardware via the CAT bus. FIXME: At the moment we do nothing. */ | |
75604d7f | 726 | void smp_vic_sys_interrupt(struct pt_regs *regs) |
1da177e4 LT |
727 | { |
728 | ack_CPI(VIC_SYS_INT); | |
a4ec1eff | 729 | printk("Voyager SYSTEM INTERRUPT\n"); |
1da177e4 LT |
730 | } |
731 | ||
732 | /* Handle a voyager CMN_INT; These interrupts occur either because of | |
733 | * a system status change or because a single bit memory error | |
734 | * occurred. FIXME: At the moment, ignore all this. */ | |
75604d7f | 735 | void smp_vic_cmn_interrupt(struct pt_regs *regs) |
1da177e4 LT |
736 | { |
737 | static __u8 in_cmn_int = 0; | |
738 | static DEFINE_SPINLOCK(cmn_int_lock); | |
739 | ||
740 | /* common ints are broadcast, so make sure we only do this once */ | |
741 | _raw_spin_lock(&cmn_int_lock); | |
a4ec1eff | 742 | if (in_cmn_int) |
1da177e4 LT |
743 | goto unlock_end; |
744 | ||
745 | in_cmn_int++; | |
746 | _raw_spin_unlock(&cmn_int_lock); | |
747 | ||
748 | VDEBUG(("Voyager COMMON INTERRUPT\n")); | |
749 | ||
a4ec1eff | 750 | if (voyager_level == 5) |
1da177e4 LT |
751 | voyager_cat_do_common_interrupt(); |
752 | ||
753 | _raw_spin_lock(&cmn_int_lock); | |
754 | in_cmn_int = 0; | |
a4ec1eff | 755 | unlock_end: |
1da177e4 LT |
756 | _raw_spin_unlock(&cmn_int_lock); |
757 | ack_CPI(VIC_CMN_INT); | |
758 | } | |
759 | ||
760 | /* | |
761 | * Reschedule call back. Nothing to do, all the work is done | |
762 | * automatically when we return from the interrupt. */ | |
a4ec1eff | 763 | static void smp_reschedule_interrupt(void) |
1da177e4 LT |
764 | { |
765 | /* do nothing */ | |
766 | } | |
767 | ||
a4ec1eff | 768 | static struct mm_struct *flush_mm; |
1da177e4 LT |
769 | static unsigned long flush_va; |
770 | static DEFINE_SPINLOCK(tlbstate_lock); | |
1da177e4 LT |
771 | |
772 | /* | |
a4ec1eff | 773 | * We cannot call mmdrop() because we are in interrupt context, |
1da177e4 LT |
774 | * instead update mm->cpu_vm_mask. |
775 | * | |
776 | * We need to reload %cr3 since the page tables may be going | |
777 | * away from under us.. | |
778 | */ | |
925596a0 | 779 | static inline void voyager_leave_mm(unsigned long cpu) |
1da177e4 LT |
780 | { |
781 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | |
782 | BUG(); | |
783 | cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); | |
784 | load_cr3(swapper_pg_dir); | |
785 | } | |
786 | ||
1da177e4 LT |
787 | /* |
788 | * Invalidate call-back | |
789 | */ | |
a4ec1eff | 790 | static void smp_invalidate_interrupt(void) |
1da177e4 LT |
791 | { |
792 | __u8 cpu = smp_processor_id(); | |
793 | ||
794 | if (!test_bit(cpu, &smp_invalidate_needed)) | |
795 | return; | |
796 | /* This will flood messages. Don't uncomment unless you see | |
797 | * Problems with cross cpu invalidation | |
a4ec1eff IM |
798 | VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n", |
799 | smp_processor_id())); | |
800 | */ | |
1da177e4 LT |
801 | |
802 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | |
803 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | |
0b9c99b6 | 804 | if (flush_va == TLB_FLUSH_ALL) |
1da177e4 LT |
805 | local_flush_tlb(); |
806 | else | |
807 | __flush_tlb_one(flush_va); | |
808 | } else | |
925596a0 | 809 | voyager_leave_mm(cpu); |
1da177e4 LT |
810 | } |
811 | smp_mb__before_clear_bit(); | |
812 | clear_bit(cpu, &smp_invalidate_needed); | |
813 | smp_mb__after_clear_bit(); | |
814 | } | |
815 | ||
816 | /* All the new flush operations for 2.4 */ | |
817 | ||
1da177e4 LT |
818 | /* This routine is called with a physical cpu mask */ |
819 | static void | |
a4ec1eff IM |
820 | voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm, |
821 | unsigned long va) | |
1da177e4 LT |
822 | { |
823 | int stuck = 50000; | |
824 | ||
825 | if (!cpumask) | |
826 | BUG(); | |
827 | if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask) | |
828 | BUG(); | |
829 | if (cpumask & (1 << smp_processor_id())) | |
830 | BUG(); | |
831 | if (!mm) | |
832 | BUG(); | |
833 | ||
834 | spin_lock(&tlbstate_lock); | |
a4ec1eff | 835 | |
1da177e4 LT |
836 | flush_mm = mm; |
837 | flush_va = va; | |
838 | atomic_set_mask(cpumask, &smp_invalidate_needed); | |
839 | /* | |
840 | * We have to send the CPI only to | |
841 | * CPUs affected. | |
842 | */ | |
843 | send_CPI(cpumask, VIC_INVALIDATE_CPI); | |
844 | ||
845 | while (smp_invalidate_needed) { | |
846 | mb(); | |
a4ec1eff IM |
847 | if (--stuck == 0) { |
848 | printk("***WARNING*** Stuck doing invalidate CPI " | |
849 | "(CPU%d)\n", smp_processor_id()); | |
1da177e4 LT |
850 | break; |
851 | } | |
852 | } | |
853 | ||
854 | /* Uncomment only to debug invalidation problems | |
a4ec1eff IM |
855 | VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu)); |
856 | */ | |
1da177e4 LT |
857 | |
858 | flush_mm = NULL; | |
859 | flush_va = 0; | |
860 | spin_unlock(&tlbstate_lock); | |
861 | } | |
862 | ||
a4ec1eff | 863 | void flush_tlb_current_task(void) |
1da177e4 LT |
864 | { |
865 | struct mm_struct *mm = current->mm; | |
866 | unsigned long cpu_mask; | |
867 | ||
868 | preempt_disable(); | |
869 | ||
870 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
871 | local_flush_tlb(); | |
872 | if (cpu_mask) | |
0b9c99b6 | 873 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
1da177e4 LT |
874 | |
875 | preempt_enable(); | |
876 | } | |
877 | ||
a4ec1eff | 878 | void flush_tlb_mm(struct mm_struct *mm) |
1da177e4 LT |
879 | { |
880 | unsigned long cpu_mask; | |
881 | ||
882 | preempt_disable(); | |
883 | ||
884 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
885 | ||
886 | if (current->active_mm == mm) { | |
887 | if (current->mm) | |
888 | local_flush_tlb(); | |
889 | else | |
925596a0 | 890 | voyager_leave_mm(smp_processor_id()); |
1da177e4 LT |
891 | } |
892 | if (cpu_mask) | |
0b9c99b6 | 893 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
1da177e4 LT |
894 | |
895 | preempt_enable(); | |
896 | } | |
897 | ||
a4ec1eff | 898 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) |
1da177e4 LT |
899 | { |
900 | struct mm_struct *mm = vma->vm_mm; | |
901 | unsigned long cpu_mask; | |
902 | ||
903 | preempt_disable(); | |
904 | ||
905 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | |
906 | if (current->active_mm == mm) { | |
a4ec1eff | 907 | if (current->mm) |
1da177e4 | 908 | __flush_tlb_one(va); |
a4ec1eff | 909 | else |
925596a0 | 910 | voyager_leave_mm(smp_processor_id()); |
1da177e4 LT |
911 | } |
912 | ||
913 | if (cpu_mask) | |
6a3ee3d5 | 914 | voyager_flush_tlb_others(cpu_mask, mm, va); |
1da177e4 LT |
915 | |
916 | preempt_enable(); | |
917 | } | |
a4ec1eff | 918 | |
153f8057 | 919 | EXPORT_SYMBOL(flush_tlb_page); |
1da177e4 LT |
920 | |
921 | /* enable the requested IRQs */ | |
a4ec1eff | 922 | static void smp_enable_irq_interrupt(void) |
1da177e4 LT |
923 | { |
924 | __u8 irq; | |
925 | __u8 cpu = get_cpu(); | |
926 | ||
927 | VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu, | |
a4ec1eff | 928 | vic_irq_enable_mask[cpu])); |
1da177e4 LT |
929 | |
930 | spin_lock(&vic_irq_lock); | |
a4ec1eff IM |
931 | for (irq = 0; irq < 16; irq++) { |
932 | if (vic_irq_enable_mask[cpu] & (1 << irq)) | |
1da177e4 LT |
933 | enable_local_vic_irq(irq); |
934 | } | |
935 | vic_irq_enable_mask[cpu] = 0; | |
936 | spin_unlock(&vic_irq_lock); | |
937 | ||
938 | put_cpu_no_resched(); | |
939 | } | |
a4ec1eff | 940 | |
1da177e4 LT |
941 | /* |
942 | * CPU halt call-back | |
943 | */ | |
a4ec1eff | 944 | static void smp_stop_cpu_function(void *dummy) |
1da177e4 LT |
945 | { |
946 | VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); | |
947 | cpu_clear(smp_processor_id(), cpu_online_map); | |
948 | local_irq_disable(); | |
a4ec1eff | 949 | for (;;) |
f2ab4461 | 950 | halt(); |
1da177e4 LT |
951 | } |
952 | ||
953 | static DEFINE_SPINLOCK(call_lock); | |
954 | ||
955 | struct call_data_struct { | |
956 | void (*func) (void *info); | |
957 | void *info; | |
958 | volatile unsigned long started; | |
959 | volatile unsigned long finished; | |
960 | int wait; | |
961 | }; | |
962 | ||
a4ec1eff | 963 | static struct call_data_struct *call_data; |
1da177e4 LT |
964 | |
965 | /* execute a thread on a new CPU. The function to be called must be | |
966 | * previously set up. This is used to schedule a function for | |
27b46d76 | 967 | * execution on all CPUs - set up the function then broadcast a |
1da177e4 | 968 | * function_interrupt CPI to come here on each CPU */ |
a4ec1eff | 969 | static void smp_call_function_interrupt(void) |
1da177e4 LT |
970 | { |
971 | void (*func) (void *info) = call_data->func; | |
972 | void *info = call_data->info; | |
973 | /* must take copy of wait because call_data may be replaced | |
974 | * unless the function is waiting for us to finish */ | |
975 | int wait = call_data->wait; | |
976 | __u8 cpu = smp_processor_id(); | |
977 | ||
978 | /* | |
979 | * Notify initiating CPU that I've grabbed the data and am | |
980 | * about to execute the function | |
981 | */ | |
982 | mb(); | |
a4ec1eff | 983 | if (!test_and_clear_bit(cpu, &call_data->started)) { |
1da177e4 | 984 | /* If the bit wasn't set, this could be a replay */ |
a4ec1eff IM |
985 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" |
986 | " with no call pending\n", cpu); | |
1da177e4 LT |
987 | return; |
988 | } | |
989 | /* | |
990 | * At this point the info structure may be out of scope unless wait==1 | |
991 | */ | |
992 | irq_enter(); | |
a4ec1eff | 993 | (*func) (info); |
38e760a1 | 994 | __get_cpu_var(irq_stat).irq_call_count++; |
1da177e4 LT |
995 | irq_exit(); |
996 | if (wait) { | |
997 | mb(); | |
998 | clear_bit(cpu, &call_data->finished); | |
999 | } | |
1000 | } | |
1001 | ||
0293ca81 | 1002 | static int |
a4ec1eff IM |
1003 | voyager_smp_call_function_mask(cpumask_t cpumask, |
1004 | void (*func) (void *info), void *info, int wait) | |
1da177e4 LT |
1005 | { |
1006 | struct call_data_struct data; | |
6a3ee3d5 | 1007 | u32 mask = cpus_addr(cpumask)[0]; |
1da177e4 | 1008 | |
a4ec1eff | 1009 | mask &= ~(1 << smp_processor_id()); |
1da177e4 LT |
1010 | |
1011 | if (!mask) | |
1012 | return 0; | |
1013 | ||
1014 | /* Can deadlock when called with interrupts disabled */ | |
1015 | WARN_ON(irqs_disabled()); | |
1016 | ||
1017 | data.func = func; | |
1018 | data.info = info; | |
1019 | data.started = mask; | |
1020 | data.wait = wait; | |
1021 | if (wait) | |
1022 | data.finished = mask; | |
1023 | ||
1024 | spin_lock(&call_lock); | |
1025 | call_data = &data; | |
1026 | wmb(); | |
1027 | /* Send a message to all other CPUs and wait for them to respond */ | |
0293ca81 | 1028 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); |
1da177e4 LT |
1029 | |
1030 | /* Wait for response */ | |
1031 | while (data.started) | |
1032 | barrier(); | |
1033 | ||
1034 | if (wait) | |
1035 | while (data.finished) | |
1036 | barrier(); | |
1037 | ||
1038 | spin_unlock(&call_lock); | |
1039 | ||
1040 | return 0; | |
1041 | } | |
0293ca81 | 1042 | |
1da177e4 LT |
1043 | /* Sorry about the name. In an APIC based system, the APICs |
1044 | * themselves are programmed to send a timer interrupt. This is used | |
1045 | * by linux to reschedule the processor. Voyager doesn't have this, | |
1046 | * so we use the system clock to interrupt one processor, which in | |
1047 | * turn, broadcasts a timer CPI to all the others --- we receive that | |
1048 | * CPI here. We don't use this actually for counting so losing | |
a4ec1eff | 1049 | * ticks doesn't matter |
1da177e4 | 1050 | * |
27b46d76 | 1051 | * FIXME: For those CPUs which actually have a local APIC, we could |
1da177e4 LT |
1052 | * try to use it to trigger this interrupt instead of having to |
1053 | * broadcast the timer tick. Unfortunately, all my pentium DYADs have | |
1054 | * no local APIC, so I can't do this | |
1055 | * | |
1056 | * This function is currently a placeholder and is unused in the code */ | |
75604d7f | 1057 | void smp_apic_timer_interrupt(struct pt_regs *regs) |
1da177e4 | 1058 | { |
7d12e780 DH |
1059 | struct pt_regs *old_regs = set_irq_regs(regs); |
1060 | wrapper_smp_local_timer_interrupt(); | |
1061 | set_irq_regs(old_regs); | |
1da177e4 LT |
1062 | } |
1063 | ||
1064 | /* All of the QUAD interrupt GATES */ | |
75604d7f | 1065 | void smp_qic_timer_interrupt(struct pt_regs *regs) |
1da177e4 | 1066 | { |
7d12e780 | 1067 | struct pt_regs *old_regs = set_irq_regs(regs); |
81c06b10 JB |
1068 | ack_QIC_CPI(QIC_TIMER_CPI); |
1069 | wrapper_smp_local_timer_interrupt(); | |
7d12e780 | 1070 | set_irq_regs(old_regs); |
1da177e4 LT |
1071 | } |
1072 | ||
75604d7f | 1073 | void smp_qic_invalidate_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1074 | { |
1075 | ack_QIC_CPI(QIC_INVALIDATE_CPI); | |
1076 | smp_invalidate_interrupt(); | |
1077 | } | |
1078 | ||
75604d7f | 1079 | void smp_qic_reschedule_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1080 | { |
1081 | ack_QIC_CPI(QIC_RESCHEDULE_CPI); | |
1082 | smp_reschedule_interrupt(); | |
1083 | } | |
1084 | ||
75604d7f | 1085 | void smp_qic_enable_irq_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1086 | { |
1087 | ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); | |
1088 | smp_enable_irq_interrupt(); | |
1089 | } | |
1090 | ||
75604d7f | 1091 | void smp_qic_call_function_interrupt(struct pt_regs *regs) |
1da177e4 LT |
1092 | { |
1093 | ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); | |
1094 | smp_call_function_interrupt(); | |
1095 | } | |
1096 | ||
75604d7f | 1097 | void smp_vic_cpi_interrupt(struct pt_regs *regs) |
1da177e4 | 1098 | { |
7d12e780 | 1099 | struct pt_regs *old_regs = set_irq_regs(regs); |
1da177e4 LT |
1100 | __u8 cpu = smp_processor_id(); |
1101 | ||
a4ec1eff | 1102 | if (is_cpu_quad()) |
1da177e4 LT |
1103 | ack_QIC_CPI(VIC_CPI_LEVEL0); |
1104 | else | |
1105 | ack_VIC_CPI(VIC_CPI_LEVEL0); | |
1106 | ||
a4ec1eff | 1107 | if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) |
7d12e780 | 1108 | wrapper_smp_local_timer_interrupt(); |
a4ec1eff | 1109 | if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1110 | smp_invalidate_interrupt(); |
a4ec1eff | 1111 | if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1112 | smp_reschedule_interrupt(); |
a4ec1eff | 1113 | if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1114 | smp_enable_irq_interrupt(); |
a4ec1eff | 1115 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) |
1da177e4 | 1116 | smp_call_function_interrupt(); |
7d12e780 | 1117 | set_irq_regs(old_regs); |
1da177e4 LT |
1118 | } |
1119 | ||
a4ec1eff | 1120 | static void do_flush_tlb_all(void *info) |
1da177e4 LT |
1121 | { |
1122 | unsigned long cpu = smp_processor_id(); | |
1123 | ||
1124 | __flush_tlb_all(); | |
1125 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | |
925596a0 | 1126 | voyager_leave_mm(cpu); |
1da177e4 LT |
1127 | } |
1128 | ||
1da177e4 | 1129 | /* flush the TLB of every active CPU in the system */ |
a4ec1eff | 1130 | void flush_tlb_all(void) |
1da177e4 LT |
1131 | { |
1132 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); | |
1133 | } | |
1134 | ||
1da177e4 | 1135 | /* send a reschedule CPI to one CPU by physical CPU number*/ |
a4ec1eff | 1136 | static void voyager_smp_send_reschedule(int cpu) |
1da177e4 LT |
1137 | { |
1138 | send_one_CPI(cpu, VIC_RESCHEDULE_CPI); | |
1139 | } | |
1140 | ||
a4ec1eff | 1141 | int hard_smp_processor_id(void) |
1da177e4 LT |
1142 | { |
1143 | __u8 i; | |
1144 | __u8 cpumask = inb(VIC_PROC_WHO_AM_I); | |
a4ec1eff | 1145 | if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) |
1da177e4 LT |
1146 | return cpumask & 0x1F; |
1147 | ||
a4ec1eff IM |
1148 | for (i = 0; i < 8; i++) { |
1149 | if (cpumask & (1 << i)) | |
1da177e4 LT |
1150 | return i; |
1151 | } | |
1152 | printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); | |
1153 | return 0; | |
1154 | } | |
1155 | ||
a4ec1eff | 1156 | int safe_smp_processor_id(void) |
2654c08c FV |
1157 | { |
1158 | return hard_smp_processor_id(); | |
1159 | } | |
1160 | ||
1da177e4 | 1161 | /* broadcast a halt to all other CPUs */ |
a4ec1eff | 1162 | static void voyager_smp_send_stop(void) |
1da177e4 LT |
1163 | { |
1164 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | |
1165 | } | |
1166 | ||
1167 | /* this function is triggered in time.c when a clock tick fires | |
1168 | * we need to re-broadcast the tick to all CPUs */ | |
a4ec1eff | 1169 | void smp_vic_timer_interrupt(void) |
1da177e4 LT |
1170 | { |
1171 | send_CPI_allbutself(VIC_TIMER_CPI); | |
7d12e780 | 1172 | smp_local_timer_interrupt(); |
1da177e4 LT |
1173 | } |
1174 | ||
1da177e4 LT |
1175 | /* local (per CPU) timer interrupt. It does both profiling and |
1176 | * process statistics/rescheduling. | |
1177 | * | |
1178 | * We do profiling in every local tick, statistics/rescheduling | |
1179 | * happen only every 'profiling multiplier' ticks. The default | |
1180 | * multiplier is 1 and it can be changed by writing the new multiplier | |
1181 | * value into /proc/profile. | |
1182 | */ | |
a4ec1eff | 1183 | void smp_local_timer_interrupt(void) |
1da177e4 LT |
1184 | { |
1185 | int cpu = smp_processor_id(); | |
1186 | long weight; | |
1187 | ||
7d12e780 | 1188 | profile_tick(CPU_PROFILING); |
1da177e4 LT |
1189 | if (--per_cpu(prof_counter, cpu) <= 0) { |
1190 | /* | |
1191 | * The multiplier may have changed since the last time we got | |
1192 | * to this point as a result of the user writing to | |
1193 | * /proc/profile. In this case we need to adjust the APIC | |
1194 | * timer accordingly. | |
1195 | * | |
1196 | * Interrupts are already masked off at this point. | |
1197 | */ | |
a4ec1eff | 1198 | per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu); |
1da177e4 | 1199 | if (per_cpu(prof_counter, cpu) != |
a4ec1eff | 1200 | per_cpu(prof_old_multiplier, cpu)) { |
1da177e4 LT |
1201 | /* FIXME: need to update the vic timer tick here */ |
1202 | per_cpu(prof_old_multiplier, cpu) = | |
a4ec1eff | 1203 | per_cpu(prof_counter, cpu); |
1da177e4 LT |
1204 | } |
1205 | ||
81c06b10 | 1206 | update_process_times(user_mode_vm(get_irq_regs())); |
1da177e4 LT |
1207 | } |
1208 | ||
a4ec1eff | 1209 | if (((1 << cpu) & voyager_extended_vic_processors) == 0) |
1da177e4 LT |
1210 | /* only extended VIC processors participate in |
1211 | * interrupt distribution */ | |
1212 | return; | |
1213 | ||
1214 | /* | |
1215 | * We take the 'long' return path, and there every subsystem | |
27b46d76 | 1216 | * grabs the appropriate locks (kernel lock/ irq lock). |
1da177e4 LT |
1217 | * |
1218 | * we might want to decouple profiling from the 'long path', | |
1219 | * and do the profiling totally in assembly. | |
1220 | * | |
1221 | * Currently this isn't too much of an issue (performance wise), | |
1222 | * we can take more than 100K local irqs per second on a 100 MHz P5. | |
1223 | */ | |
1224 | ||
a4ec1eff | 1225 | if ((++vic_tick[cpu] & 0x7) != 0) |
1da177e4 LT |
1226 | return; |
1227 | /* get here every 16 ticks (about every 1/6 of a second) */ | |
1228 | ||
1229 | /* Change our priority to give someone else a chance at getting | |
a4ec1eff | 1230 | * the IRQ. The algorithm goes like this: |
1da177e4 LT |
1231 | * |
1232 | * In the VIC, the dynamically routed interrupt is always | |
1233 | * handled by the lowest priority eligible (i.e. receiving | |
1234 | * interrupts) CPU. If >1 eligible CPUs are equal lowest, the | |
1235 | * lowest processor number gets it. | |
1236 | * | |
1237 | * The priority of a CPU is controlled by a special per-CPU | |
1238 | * VIC priority register which is 3 bits wide 0 being lowest | |
1239 | * and 7 highest priority.. | |
1240 | * | |
1241 | * Therefore we subtract the average number of interrupts from | |
1242 | * the number we've fielded. If this number is negative, we | |
1243 | * lower the activity count and if it is positive, we raise | |
1244 | * it. | |
1245 | * | |
1246 | * I'm afraid this still leads to odd looking interrupt counts: | |
1247 | * the totals are all roughly equal, but the individual ones | |
1248 | * look rather skewed. | |
1249 | * | |
1250 | * FIXME: This algorithm is total crap when mixed with SMP | |
1251 | * affinity code since we now try to even up the interrupt | |
1252 | * counts when an affinity binding is keeping them on a | |
1253 | * particular CPU*/ | |
a4ec1eff | 1254 | weight = (vic_intr_count[cpu] * voyager_extended_cpus |
1da177e4 LT |
1255 | - vic_intr_total) >> 4; |
1256 | weight += 4; | |
a4ec1eff | 1257 | if (weight > 7) |
1da177e4 | 1258 | weight = 7; |
a4ec1eff | 1259 | if (weight < 0) |
1da177e4 | 1260 | weight = 0; |
a4ec1eff IM |
1261 | |
1262 | outb((__u8) weight, VIC_PRIORITY_REGISTER); | |
1da177e4 LT |
1263 | |
1264 | #ifdef VOYAGER_DEBUG | |
a4ec1eff | 1265 | if ((vic_tick[cpu] & 0xFFF) == 0) { |
1da177e4 LT |
1266 | /* print this message roughly every 25 secs */ |
1267 | printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", | |
1268 | cpu, vic_tick[cpu], weight); | |
1269 | } | |
1270 | #endif | |
1271 | } | |
1272 | ||
1273 | /* setup the profiling timer */ | |
a4ec1eff | 1274 | int setup_profiling_timer(unsigned int multiplier) |
1da177e4 LT |
1275 | { |
1276 | int i; | |
1277 | ||
a4ec1eff | 1278 | if ((!multiplier)) |
1da177e4 LT |
1279 | return -EINVAL; |
1280 | ||
a4ec1eff | 1281 | /* |
1da177e4 LT |
1282 | * Set the new multiplier for each CPU. CPUs don't start using the |
1283 | * new values until the next timer interrupt in which they do process | |
1284 | * accounting. | |
1285 | */ | |
1286 | for (i = 0; i < NR_CPUS; ++i) | |
1287 | per_cpu(prof_multiplier, i) = multiplier; | |
1288 | ||
1289 | return 0; | |
1290 | } | |
1291 | ||
c771746e JB |
1292 | /* This is a bit of a mess, but forced on us by the genirq changes |
1293 | * there's no genirq handler that really does what voyager wants | |
1294 | * so hack it up with the simple IRQ handler */ | |
75604d7f | 1295 | static void handle_vic_irq(unsigned int irq, struct irq_desc *desc) |
c771746e JB |
1296 | { |
1297 | before_handle_vic_irq(irq); | |
1298 | handle_simple_irq(irq, desc); | |
1299 | after_handle_vic_irq(irq); | |
1300 | } | |
1301 | ||
1da177e4 LT |
1302 | /* The CPIs are handled in the per cpu 8259s, so they must be |
1303 | * enabled to be received: FIX: enabling the CPIs in the early | |
1304 | * boot sequence interferes with bug checking; enable them later | |
1305 | * on in smp_init */ | |
1306 | #define VIC_SET_GATE(cpi, vector) \ | |
1307 | set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector)) | |
1308 | #define QIC_SET_GATE(cpi, vector) \ | |
1309 | set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) | |
1310 | ||
a4ec1eff | 1311 | void __init smp_intr_init(void) |
1da177e4 LT |
1312 | { |
1313 | int i; | |
1314 | ||
1315 | /* initialize the per cpu irq mask to all disabled */ | |
a4ec1eff | 1316 | for (i = 0; i < NR_CPUS; i++) |
1da177e4 LT |
1317 | vic_irq_mask[i] = 0xFFFF; |
1318 | ||
1319 | VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); | |
1320 | ||
1321 | VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt); | |
1322 | VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt); | |
1323 | ||
1324 | QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt); | |
1325 | QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt); | |
1326 | QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); | |
1327 | QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); | |
1328 | QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); | |
1da177e4 | 1329 | |
a4ec1eff | 1330 | /* now put the VIC descriptor into the first 48 IRQs |
1da177e4 LT |
1331 | * |
1332 | * This is for later: first 16 correspond to PC IRQs; next 16 | |
1333 | * are Primary MC IRQs and final 16 are Secondary MC IRQs */ | |
a4ec1eff | 1334 | for (i = 0; i < 48; i++) |
c771746e | 1335 | set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); |
1da177e4 LT |
1336 | } |
1337 | ||
1338 | /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per | |
1339 | * processor to receive CPI */ | |
a4ec1eff | 1340 | static void send_CPI(__u32 cpuset, __u8 cpi) |
1da177e4 LT |
1341 | { |
1342 | int cpu; | |
1343 | __u32 quad_cpuset = (cpuset & voyager_quad_processors); | |
1344 | ||
a4ec1eff IM |
1345 | if (cpi < VIC_START_FAKE_CPI) { |
1346 | /* fake CPI are only used for booting, so send to the | |
1da177e4 | 1347 | * extended quads as well---Quads must be VIC booted */ |
a4ec1eff | 1348 | outb((__u8) (cpuset), VIC_CPI_Registers[cpi]); |
1da177e4 LT |
1349 | return; |
1350 | } | |
a4ec1eff | 1351 | if (quad_cpuset) |
1da177e4 LT |
1352 | send_QIC_CPI(quad_cpuset, cpi); |
1353 | cpuset &= ~quad_cpuset; | |
1354 | cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ | |
a4ec1eff | 1355 | if (cpuset == 0) |
1da177e4 LT |
1356 | return; |
1357 | for_each_online_cpu(cpu) { | |
a4ec1eff | 1358 | if (cpuset & (1 << cpu)) |
1da177e4 LT |
1359 | set_bit(cpi, &vic_cpi_mailbox[cpu]); |
1360 | } | |
a4ec1eff IM |
1361 | if (cpuset) |
1362 | outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]); | |
1da177e4 LT |
1363 | } |
1364 | ||
1365 | /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and | |
1366 | * set the cache line to shared by reading it. | |
1367 | * | |
1368 | * DON'T make this inline otherwise the cache line read will be | |
1369 | * optimised away | |
1370 | * */ | |
a4ec1eff IM |
1371 | static int ack_QIC_CPI(__u8 cpi) |
1372 | { | |
1da177e4 LT |
1373 | __u8 cpu = hard_smp_processor_id(); |
1374 | ||
1375 | cpi &= 7; | |
1376 | ||
a4ec1eff | 1377 | outb(1 << cpi, QIC_INTERRUPT_CLEAR1); |
1da177e4 LT |
1378 | return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi; |
1379 | } | |
1380 | ||
a4ec1eff | 1381 | static void ack_special_QIC_CPI(__u8 cpi) |
1da177e4 | 1382 | { |
a4ec1eff | 1383 | switch (cpi) { |
1da177e4 LT |
1384 | case VIC_CMN_INT: |
1385 | outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); | |
1386 | break; | |
1387 | case VIC_SYS_INT: | |
1388 | outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0); | |
1389 | break; | |
1390 | } | |
1391 | /* also clear at the VIC, just in case (nop for non-extended proc) */ | |
1392 | ack_VIC_CPI(cpi); | |
1393 | } | |
1394 | ||
1395 | /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */ | |
a4ec1eff | 1396 | static void ack_VIC_CPI(__u8 cpi) |
1da177e4 LT |
1397 | { |
1398 | #ifdef VOYAGER_DEBUG | |
1399 | unsigned long flags; | |
1400 | __u16 isr; | |
1401 | __u8 cpu = smp_processor_id(); | |
1402 | ||
1403 | local_irq_save(flags); | |
1404 | isr = vic_read_isr(); | |
a4ec1eff | 1405 | if ((isr & (1 << (cpi & 7))) == 0) { |
1da177e4 LT |
1406 | printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); |
1407 | } | |
1408 | #endif | |
1409 | /* send specific EOI; the two system interrupts have | |
1410 | * bit 4 set for a separate vector but behave as the | |
1411 | * corresponding 3 bit intr */ | |
a4ec1eff | 1412 | outb_p(0x60 | (cpi & 7), 0x20); |
1da177e4 LT |
1413 | |
1414 | #ifdef VOYAGER_DEBUG | |
a4ec1eff | 1415 | if ((vic_read_isr() & (1 << (cpi & 7))) != 0) { |
1da177e4 LT |
1416 | printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); |
1417 | } | |
1418 | local_irq_restore(flags); | |
1419 | #endif | |
1420 | } | |
1421 | ||
1422 | /* cribbed with thanks from irq.c */ | |
a4ec1eff | 1423 | #define __byte(x,y) (((unsigned char *)&(y))[x]) |
1da177e4 LT |
1424 | #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu])) |
1425 | #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu])) | |
1426 | ||
a4ec1eff | 1427 | static unsigned int startup_vic_irq(unsigned int irq) |
1da177e4 | 1428 | { |
c771746e | 1429 | unmask_vic_irq(irq); |
1da177e4 LT |
1430 | |
1431 | return 0; | |
1432 | } | |
1433 | ||
1434 | /* The enable and disable routines. This is where we run into | |
1435 | * conflicting architectural philosophy. Fundamentally, the voyager | |
1436 | * architecture does not expect to have to disable interrupts globally | |
1437 | * (the IRQ controllers belong to each CPU). The processor masquerade | |
1438 | * which is used to start the system shouldn't be used in a running OS | |
1439 | * since it will cause great confusion if two separate CPUs drive to | |
1440 | * the same IRQ controller (I know, I've tried it). | |
1441 | * | |
1442 | * The solution is a variant on the NCR lazy SPL design: | |
1443 | * | |
1444 | * 1) To disable an interrupt, do nothing (other than set the | |
1445 | * IRQ_DISABLED flag). This dares the interrupt actually to arrive. | |
1446 | * | |
1447 | * 2) If the interrupt dares to come in, raise the local mask against | |
1448 | * it (this will result in all the CPU masks being raised | |
1449 | * eventually). | |
1450 | * | |
1451 | * 3) To enable the interrupt, lower the mask on the local CPU and | |
1452 | * broadcast an Interrupt enable CPI which causes all other CPUs to | |
1453 | * adjust their masks accordingly. */ | |
1454 | ||
a4ec1eff | 1455 | static void unmask_vic_irq(unsigned int irq) |
1da177e4 LT |
1456 | { |
1457 | /* linux doesn't to processor-irq affinity, so enable on | |
1458 | * all CPUs we know about */ | |
1459 | int cpu = smp_processor_id(), real_cpu; | |
a4ec1eff | 1460 | __u16 mask = (1 << irq); |
1da177e4 LT |
1461 | __u32 processorList = 0; |
1462 | unsigned long flags; | |
1463 | ||
c771746e | 1464 | VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n", |
1da177e4 LT |
1465 | irq, cpu, cpu_irq_affinity[cpu])); |
1466 | spin_lock_irqsave(&vic_irq_lock, flags); | |
1467 | for_each_online_cpu(real_cpu) { | |
a4ec1eff | 1468 | if (!(voyager_extended_vic_processors & (1 << real_cpu))) |
1da177e4 | 1469 | continue; |
a4ec1eff | 1470 | if (!(cpu_irq_affinity[real_cpu] & mask)) { |
1da177e4 LT |
1471 | /* irq has no affinity for this CPU, ignore */ |
1472 | continue; | |
1473 | } | |
a4ec1eff | 1474 | if (real_cpu == cpu) { |
1da177e4 | 1475 | enable_local_vic_irq(irq); |
a4ec1eff | 1476 | } else if (vic_irq_mask[real_cpu] & mask) { |
1da177e4 | 1477 | vic_irq_enable_mask[real_cpu] |= mask; |
a4ec1eff | 1478 | processorList |= (1 << real_cpu); |
1da177e4 LT |
1479 | } |
1480 | } | |
1481 | spin_unlock_irqrestore(&vic_irq_lock, flags); | |
a4ec1eff | 1482 | if (processorList) |
1da177e4 LT |
1483 | send_CPI(processorList, VIC_ENABLE_IRQ_CPI); |
1484 | } | |
1485 | ||
a4ec1eff | 1486 | static void mask_vic_irq(unsigned int irq) |
1da177e4 LT |
1487 | { |
1488 | /* lazy disable, do nothing */ | |
1489 | } | |
1490 | ||
a4ec1eff | 1491 | static void enable_local_vic_irq(unsigned int irq) |
1da177e4 LT |
1492 | { |
1493 | __u8 cpu = smp_processor_id(); | |
1494 | __u16 mask = ~(1 << irq); | |
1495 | __u16 old_mask = vic_irq_mask[cpu]; | |
1496 | ||
1497 | vic_irq_mask[cpu] &= mask; | |
a4ec1eff | 1498 | if (vic_irq_mask[cpu] == old_mask) |
1da177e4 LT |
1499 | return; |
1500 | ||
1501 | VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", | |
1502 | irq, cpu)); | |
1503 | ||
1504 | if (irq & 8) { | |
a4ec1eff | 1505 | outb_p(cached_A1(cpu), 0xA1); |
1da177e4 | 1506 | (void)inb_p(0xA1); |
a4ec1eff IM |
1507 | } else { |
1508 | outb_p(cached_21(cpu), 0x21); | |
1da177e4 LT |
1509 | (void)inb_p(0x21); |
1510 | } | |
1511 | } | |
1512 | ||
a4ec1eff | 1513 | static void disable_local_vic_irq(unsigned int irq) |
1da177e4 LT |
1514 | { |
1515 | __u8 cpu = smp_processor_id(); | |
1516 | __u16 mask = (1 << irq); | |
1517 | __u16 old_mask = vic_irq_mask[cpu]; | |
1518 | ||
a4ec1eff | 1519 | if (irq == 7) |
1da177e4 LT |
1520 | return; |
1521 | ||
1522 | vic_irq_mask[cpu] |= mask; | |
a4ec1eff | 1523 | if (old_mask == vic_irq_mask[cpu]) |
1da177e4 LT |
1524 | return; |
1525 | ||
1526 | VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", | |
1527 | irq, cpu)); | |
1528 | ||
1529 | if (irq & 8) { | |
a4ec1eff | 1530 | outb_p(cached_A1(cpu), 0xA1); |
1da177e4 | 1531 | (void)inb_p(0xA1); |
a4ec1eff IM |
1532 | } else { |
1533 | outb_p(cached_21(cpu), 0x21); | |
1da177e4 LT |
1534 | (void)inb_p(0x21); |
1535 | } | |
1536 | } | |
1537 | ||
1538 | /* The VIC is level triggered, so the ack can only be issued after the | |
1539 | * interrupt completes. However, we do Voyager lazy interrupt | |
1540 | * handling here: It is an extremely expensive operation to mask an | |
1541 | * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If | |
1542 | * this interrupt actually comes in, then we mask and ack here to push | |
1543 | * the interrupt off to another CPU */ | |
a4ec1eff | 1544 | static void before_handle_vic_irq(unsigned int irq) |
1da177e4 LT |
1545 | { |
1546 | irq_desc_t *desc = irq_desc + irq; | |
1547 | __u8 cpu = smp_processor_id(); | |
1548 | ||
1549 | _raw_spin_lock(&vic_irq_lock); | |
1550 | vic_intr_total++; | |
1551 | vic_intr_count[cpu]++; | |
1552 | ||
a4ec1eff | 1553 | if (!(cpu_irq_affinity[cpu] & (1 << irq))) { |
1da177e4 LT |
1554 | /* The irq is not in our affinity mask, push it off |
1555 | * onto another CPU */ | |
a4ec1eff IM |
1556 | VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d " |
1557 | "on cpu %d\n", irq, cpu)); | |
1da177e4 LT |
1558 | disable_local_vic_irq(irq); |
1559 | /* set IRQ_INPROGRESS to prevent the handler in irq.c from | |
1560 | * actually calling the interrupt routine */ | |
1561 | desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; | |
a4ec1eff | 1562 | } else if (desc->status & IRQ_DISABLED) { |
1da177e4 LT |
1563 | /* Damn, the interrupt actually arrived, do the lazy |
1564 | * disable thing. The interrupt routine in irq.c will | |
1565 | * not handle a IRQ_DISABLED interrupt, so nothing more | |
1566 | * need be done here */ | |
1567 | VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n", | |
1568 | irq, cpu)); | |
1569 | disable_local_vic_irq(irq); | |
1570 | desc->status |= IRQ_REPLAY; | |
1571 | } else { | |
1572 | desc->status &= ~IRQ_REPLAY; | |
1573 | } | |
1574 | ||
1575 | _raw_spin_unlock(&vic_irq_lock); | |
1576 | } | |
1577 | ||
1578 | /* Finish the VIC interrupt: basically mask */ | |
a4ec1eff | 1579 | static void after_handle_vic_irq(unsigned int irq) |
1da177e4 LT |
1580 | { |
1581 | irq_desc_t *desc = irq_desc + irq; | |
1582 | ||
1583 | _raw_spin_lock(&vic_irq_lock); | |
1584 | { | |
1585 | unsigned int status = desc->status & ~IRQ_INPROGRESS; | |
1586 | #ifdef VOYAGER_DEBUG | |
1587 | __u16 isr; | |
1588 | #endif | |
1589 | ||
1590 | desc->status = status; | |
1591 | if ((status & IRQ_DISABLED)) | |
1592 | disable_local_vic_irq(irq); | |
1593 | #ifdef VOYAGER_DEBUG | |
1594 | /* DEBUG: before we ack, check what's in progress */ | |
1595 | isr = vic_read_isr(); | |
a4ec1eff | 1596 | if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) { |
1da177e4 LT |
1597 | int i; |
1598 | __u8 cpu = smp_processor_id(); | |
1599 | __u8 real_cpu; | |
a4ec1eff | 1600 | int mask; /* Um... initialize me??? --RR */ |
1da177e4 LT |
1601 | |
1602 | printk("VOYAGER SMP: CPU%d lost interrupt %d\n", | |
1603 | cpu, irq); | |
c8912599 | 1604 | for_each_possible_cpu(real_cpu, mask) { |
1da177e4 LT |
1605 | |
1606 | outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, | |
1607 | VIC_PROCESSOR_ID); | |
1608 | isr = vic_read_isr(); | |
a4ec1eff IM |
1609 | if (isr & (1 << irq)) { |
1610 | printk | |
1611 | ("VOYAGER SMP: CPU%d ack irq %d\n", | |
1612 | real_cpu, irq); | |
1da177e4 LT |
1613 | ack_vic_irq(irq); |
1614 | } | |
1615 | outb(cpu, VIC_PROCESSOR_ID); | |
1616 | } | |
1617 | } | |
1618 | #endif /* VOYAGER_DEBUG */ | |
1619 | /* as soon as we ack, the interrupt is eligible for | |
1620 | * receipt by another CPU so everything must be in | |
1621 | * order here */ | |
1622 | ack_vic_irq(irq); | |
a4ec1eff | 1623 | if (status & IRQ_REPLAY) { |
1da177e4 LT |
1624 | /* replay is set if we disable the interrupt |
1625 | * in the before_handle_vic_irq() routine, so | |
1626 | * clear the in progress bit here to allow the | |
1627 | * next CPU to handle this correctly */ | |
1628 | desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS); | |
1629 | } | |
1630 | #ifdef VOYAGER_DEBUG | |
1631 | isr = vic_read_isr(); | |
a4ec1eff IM |
1632 | if ((isr & (1 << irq)) != 0) |
1633 | printk("VOYAGER SMP: after_handle_vic_irq() after " | |
1634 | "ack irq=%d, isr=0x%x\n", irq, isr); | |
1da177e4 LT |
1635 | #endif /* VOYAGER_DEBUG */ |
1636 | } | |
1637 | _raw_spin_unlock(&vic_irq_lock); | |
1638 | ||
1639 | /* All code after this point is out of the main path - the IRQ | |
1640 | * may be intercepted by another CPU if reasserted */ | |
1641 | } | |
1642 | ||
1da177e4 LT |
1643 | /* Linux processor - interrupt affinity manipulations. |
1644 | * | |
1645 | * For each processor, we maintain a 32 bit irq affinity mask. | |
1646 | * Initially it is set to all 1's so every processor accepts every | |
1647 | * interrupt. In this call, we change the processor's affinity mask: | |
1648 | * | |
1649 | * Change from enable to disable: | |
1650 | * | |
1651 | * If the interrupt ever comes in to the processor, we will disable it | |
1652 | * and ack it to push it off to another CPU, so just accept the mask here. | |
1653 | * | |
1654 | * Change from disable to enable: | |
1655 | * | |
1656 | * change the mask and then do an interrupt enable CPI to re-enable on | |
1657 | * the selected processors */ | |
1658 | ||
a4ec1eff | 1659 | void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) |
1da177e4 LT |
1660 | { |
1661 | /* Only extended processors handle interrupts */ | |
1662 | unsigned long real_mask; | |
1663 | unsigned long irq_mask = 1 << irq; | |
1664 | int cpu; | |
1665 | ||
1666 | real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; | |
a4ec1eff IM |
1667 | |
1668 | if (cpus_addr(mask)[0] == 0) | |
27b46d76 | 1669 | /* can't have no CPUs to accept the interrupt -- extremely |
1da177e4 LT |
1670 | * bad things will happen */ |
1671 | return; | |
1672 | ||
a4ec1eff | 1673 | if (irq == 0) |
1da177e4 LT |
1674 | /* can't change the affinity of the timer IRQ. This |
1675 | * is due to the constraint in the voyager | |
1676 | * architecture that the CPI also comes in on and IRQ | |
1677 | * line and we have chosen IRQ0 for this. If you | |
1678 | * raise the mask on this interrupt, the processor | |
1679 | * will no-longer be able to accept VIC CPIs */ | |
1680 | return; | |
1681 | ||
a4ec1eff | 1682 | if (irq >= 32) |
1da177e4 LT |
1683 | /* You can only have 32 interrupts in a voyager system |
1684 | * (and 32 only if you have a secondary microchannel | |
1685 | * bus) */ | |
1686 | return; | |
1687 | ||
1688 | for_each_online_cpu(cpu) { | |
1689 | unsigned long cpu_mask = 1 << cpu; | |
a4ec1eff IM |
1690 | |
1691 | if (cpu_mask & real_mask) { | |
1da177e4 LT |
1692 | /* enable the interrupt for this cpu */ |
1693 | cpu_irq_affinity[cpu] |= irq_mask; | |
1694 | } else { | |
1695 | /* disable the interrupt for this cpu */ | |
1696 | cpu_irq_affinity[cpu] &= ~irq_mask; | |
1697 | } | |
1698 | } | |
1699 | /* this is magic, we now have the correct affinity maps, so | |
1700 | * enable the interrupt. This will send an enable CPI to | |
27b46d76 | 1701 | * those CPUs who need to enable it in their local masks, |
1da177e4 LT |
1702 | * causing them to correct for the new affinity . If the |
1703 | * interrupt is currently globally disabled, it will simply be | |
1704 | * disabled again as it comes in (voyager lazy disable). If | |
1705 | * the affinity map is tightened to disable the interrupt on a | |
1706 | * cpu, it will be pushed off when it comes in */ | |
c771746e | 1707 | unmask_vic_irq(irq); |
1da177e4 LT |
1708 | } |
1709 | ||
a4ec1eff | 1710 | static void ack_vic_irq(unsigned int irq) |
1da177e4 LT |
1711 | { |
1712 | if (irq & 8) { | |
a4ec1eff IM |
1713 | outb(0x62, 0x20); /* Specific EOI to cascade */ |
1714 | outb(0x60 | (irq & 7), 0xA0); | |
1da177e4 | 1715 | } else { |
a4ec1eff | 1716 | outb(0x60 | (irq & 7), 0x20); |
1da177e4 LT |
1717 | } |
1718 | } | |
1719 | ||
1720 | /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 | |
1721 | * but are not vectored by it. This means that the 8259 mask must be | |
1722 | * lowered to receive them */ | |
a4ec1eff | 1723 | static __init void vic_enable_cpi(void) |
1da177e4 LT |
1724 | { |
1725 | __u8 cpu = smp_processor_id(); | |
a4ec1eff | 1726 | |
1da177e4 LT |
1727 | /* just take a copy of the current mask (nop for boot cpu) */ |
1728 | vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; | |
1729 | ||
1730 | enable_local_vic_irq(VIC_CPI_LEVEL0); | |
1731 | enable_local_vic_irq(VIC_CPI_LEVEL1); | |
1732 | /* for sys int and cmn int */ | |
1733 | enable_local_vic_irq(7); | |
1734 | ||
a4ec1eff | 1735 | if (is_cpu_quad()) { |
1da177e4 LT |
1736 | outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); |
1737 | outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); | |
1738 | VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", | |
1739 | cpu, QIC_CPI_ENABLE)); | |
1740 | } | |
1741 | ||
1742 | VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n", | |
1743 | cpu, vic_irq_mask[cpu])); | |
1744 | } | |
1745 | ||
a4ec1eff | 1746 | void voyager_smp_dump() |
1da177e4 LT |
1747 | { |
1748 | int old_cpu = smp_processor_id(), cpu; | |
1749 | ||
1750 | /* dump the interrupt masks of each processor */ | |
1751 | for_each_online_cpu(cpu) { | |
1752 | __u16 imr, isr, irr; | |
1753 | unsigned long flags; | |
1754 | ||
1755 | local_irq_save(flags); | |
1756 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); | |
1757 | imr = (inb(0xa1) << 8) | inb(0x21); | |
1758 | outb(0x0a, 0xa0); | |
1759 | irr = inb(0xa0) << 8; | |
1760 | outb(0x0a, 0x20); | |
1761 | irr |= inb(0x20); | |
1762 | outb(0x0b, 0xa0); | |
1763 | isr = inb(0xa0) << 8; | |
1764 | outb(0x0b, 0x20); | |
1765 | isr |= inb(0x20); | |
1766 | outb(old_cpu, VIC_PROCESSOR_ID); | |
1767 | local_irq_restore(flags); | |
1768 | printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n", | |
1769 | cpu, vic_irq_mask[cpu], imr, irr, isr); | |
1770 | #if 0 | |
1771 | /* These lines are put in to try to unstick an un ack'd irq */ | |
a4ec1eff | 1772 | if (isr != 0) { |
1da177e4 | 1773 | int irq; |
a4ec1eff IM |
1774 | for (irq = 0; irq < 16; irq++) { |
1775 | if (isr & (1 << irq)) { | |
1da177e4 LT |
1776 | printk("\tCPU%d: ack irq %d\n", |
1777 | cpu, irq); | |
1778 | local_irq_save(flags); | |
1779 | outb(VIC_CPU_MASQUERADE_ENABLE | cpu, | |
1780 | VIC_PROCESSOR_ID); | |
1781 | ack_vic_irq(irq); | |
1782 | outb(old_cpu, VIC_PROCESSOR_ID); | |
1783 | local_irq_restore(flags); | |
1784 | } | |
1785 | } | |
1786 | } | |
1787 | #endif | |
1788 | } | |
1789 | } | |
1790 | ||
a4ec1eff | 1791 | void smp_voyager_power_off(void *dummy) |
1da177e4 | 1792 | { |
a4ec1eff | 1793 | if (smp_processor_id() == boot_cpu_id) |
1da177e4 LT |
1794 | voyager_power_off(); |
1795 | else | |
1796 | smp_stop_cpu_function(NULL); | |
1797 | } | |
1798 | ||
a4ec1eff | 1799 | static void __init voyager_smp_prepare_cpus(unsigned int max_cpus) |
1da177e4 LT |
1800 | { |
1801 | /* FIXME: ignore max_cpus for now */ | |
1802 | smp_boot_cpus(); | |
1803 | } | |
1804 | ||
8f818210 | 1805 | static void __cpuinit voyager_smp_prepare_boot_cpu(void) |
1da177e4 | 1806 | { |
6a3ee3d5 JF |
1807 | init_gdt(smp_processor_id()); |
1808 | switch_to_new_gdt(); | |
1809 | ||
1da177e4 LT |
1810 | cpu_set(smp_processor_id(), cpu_online_map); |
1811 | cpu_set(smp_processor_id(), cpu_callout_map); | |
4ad8d383 | 1812 | cpu_set(smp_processor_id(), cpu_possible_map); |
3c101cf0 | 1813 | cpu_set(smp_processor_id(), cpu_present_map); |
1da177e4 LT |
1814 | } |
1815 | ||
a4ec1eff | 1816 | static int __cpuinit voyager_cpu_up(unsigned int cpu) |
1da177e4 LT |
1817 | { |
1818 | /* This only works at boot for x86. See "rewrite" above. */ | |
1819 | if (cpu_isset(cpu, smp_commenced_mask)) | |
1820 | return -ENOSYS; | |
1821 | ||
1822 | /* In case one didn't come up */ | |
1823 | if (!cpu_isset(cpu, cpu_callin_map)) | |
1824 | return -EIO; | |
1825 | /* Unleash the CPU! */ | |
1826 | cpu_set(cpu, smp_commenced_mask); | |
7c04e64a | 1827 | while (!cpu_online(cpu)) |
1da177e4 LT |
1828 | mb(); |
1829 | return 0; | |
1830 | } | |
1831 | ||
a4ec1eff | 1832 | static void __init voyager_smp_cpus_done(unsigned int max_cpus) |
1da177e4 LT |
1833 | { |
1834 | zap_low_mappings(); | |
1835 | } | |
033ab7f8 | 1836 | |
a4ec1eff | 1837 | void __init smp_setup_processor_id(void) |
033ab7f8 AM |
1838 | { |
1839 | current_thread_info()->cpu = hard_smp_processor_id(); | |
6a3ee3d5 | 1840 | x86_write_percpu(cpu_number, hard_smp_processor_id()); |
033ab7f8 | 1841 | } |
6a3ee3d5 JF |
1842 | |
1843 | struct smp_ops smp_ops = { | |
1844 | .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, | |
1845 | .smp_prepare_cpus = voyager_smp_prepare_cpus, | |
1846 | .cpu_up = voyager_cpu_up, | |
1847 | .smp_cpus_done = voyager_smp_cpus_done, | |
1848 | ||
1849 | .smp_send_stop = voyager_smp_send_stop, | |
1850 | .smp_send_reschedule = voyager_smp_send_reschedule, | |
1851 | .smp_call_function_mask = voyager_smp_call_function_mask, | |
1852 | }; |