]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Intel SMP support routines. | |
3 | * | |
4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | |
5 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> | |
6 | * (c) 2002,2003 Andi Kleen, SuSE Labs. | |
7 | * | |
8 | * This code is released under the GNU General Public License version 2 or | |
9 | * later. | |
10 | */ | |
11 | ||
12 | #include <linux/init.h> | |
13 | ||
14 | #include <linux/mm.h> | |
1da177e4 LT |
15 | #include <linux/delay.h> |
16 | #include <linux/spinlock.h> | |
1da177e4 LT |
17 | #include <linux/smp.h> |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/mc146818rtc.h> | |
20 | #include <linux/interrupt.h> | |
21 | ||
22 | #include <asm/mtrr.h> | |
23 | #include <asm/pgalloc.h> | |
24 | #include <asm/tlbflush.h> | |
25 | #include <asm/mach_apic.h> | |
26 | #include <asm/mmu_context.h> | |
27 | #include <asm/proto.h> | |
a8ab26fe | 28 | #include <asm/apicdef.h> |
95833c83 | 29 | #include <asm/idle.h> |
1da177e4 LT |
30 | |
31 | /* | |
16da2f93 | 32 | * Smarter SMP flushing macros. |
1da177e4 LT |
33 | * c/o Linus Torvalds. |
34 | * | |
35 | * These mean you can really definitely utterly forget about | |
36 | * writing to user space from interrupts. (Its not allowed anyway). | |
37 | * | |
38 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
e5bc8b6b | 39 | * |
16da2f93 | 40 | * More scalable flush, from Andi Kleen |
e5bc8b6b | 41 | * |
16da2f93 TG |
42 | * To avoid global state use 8 different call vectors. |
43 | * Each CPU uses a specific vector to trigger flushes on other | |
44 | * CPUs. Depending on the received vector the target CPUs look into | |
e5bc8b6b AK |
45 | * the right per cpu variable for the flush data. |
46 | * | |
16da2f93 TG |
47 | * With more than 8 CPUs they are hashed to the 8 available |
48 | * vectors. The limited global vector space forces us to this right now. | |
e5bc8b6b AK |
49 | * In future when interrupts are split into per CPU domains this could be |
50 | * fixed, at the cost of triggering multiple IPIs in some cases. | |
1da177e4 LT |
51 | */ |
52 | ||
e5bc8b6b AK |
53 | union smp_flush_state { |
54 | struct { | |
55 | cpumask_t flush_cpumask; | |
56 | struct mm_struct *flush_mm; | |
57 | unsigned long flush_va; | |
e5bc8b6b AK |
58 | spinlock_t tlbstate_lock; |
59 | }; | |
60 | char pad[SMP_CACHE_BYTES]; | |
61 | } ____cacheline_aligned; | |
62 | ||
63 | /* State is put into the per CPU data section, but padded | |
64 | to a full cache line because other CPUs can access it and we don't | |
65 | want false sharing in the per cpu data segment. */ | |
66 | static DEFINE_PER_CPU(union smp_flush_state, flush_state); | |
1da177e4 LT |
67 | |
68 | /* | |
16da2f93 | 69 | * We cannot call mmdrop() because we are in interrupt context, |
1da177e4 LT |
70 | * instead update mm->cpu_vm_mask. |
71 | */ | |
bde6f5f5 | 72 | void leave_mm(int cpu) |
1da177e4 LT |
73 | { |
74 | if (read_pda(mmu_state) == TLBSTATE_OK) | |
75 | BUG(); | |
b1fc513d | 76 | cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); |
e3ebadd9 | 77 | load_cr3(swapper_pg_dir); |
1da177e4 | 78 | } |
bde6f5f5 | 79 | EXPORT_SYMBOL_GPL(leave_mm); |
1da177e4 LT |
80 | |
81 | /* | |
82 | * | |
83 | * The flush IPI assumes that a thread switch happens in this order: | |
84 | * [cpu0: the cpu that switches] | |
85 | * 1) switch_mm() either 1a) or 1b) | |
86 | * 1a) thread switch to a different mm | |
b1fc513d | 87 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); |
16da2f93 TG |
88 | * Stop ipi delivery for the old mm. This is not synchronized with |
89 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | |
90 | * for the wrong mm, and in the worst case we perform a superfluous | |
91 | * tlb flush. | |
1da177e4 | 92 | * 1a2) set cpu mmu_state to TLBSTATE_OK |
16da2f93 | 93 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 |
1da177e4 LT |
94 | * was in lazy tlb mode. |
95 | * 1a3) update cpu active_mm | |
16da2f93 | 96 | * Now cpu0 accepts tlb flushes for the new mm. |
b1fc513d | 97 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); |
16da2f93 | 98 | * Now the other cpus will send tlb flush ipis. |
1da177e4 LT |
99 | * 1a4) change cr3. |
100 | * 1b) thread switch without mm change | |
101 | * cpu active_mm is correct, cpu0 already handles | |
102 | * flush ipis. | |
103 | * 1b1) set cpu mmu_state to TLBSTATE_OK | |
104 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | |
16da2f93 TG |
105 | * Atomically set the bit [other cpus will start sending flush ipis], |
106 | * and test the bit. | |
1da177e4 LT |
107 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. |
108 | * 2) switch %%esp, ie current | |
109 | * | |
110 | * The interrupt must handle 2 special cases: | |
111 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
112 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
113 | * runs in kernel space, the cpu could load tlb entries for user space | |
114 | * pages. | |
115 | * | |
116 | * The good news is that cpu mmu_state is local to each cpu, no | |
117 | * write/read ordering problems. | |
118 | */ | |
119 | ||
120 | /* | |
121 | * TLB flush IPI: | |
122 | * | |
123 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | |
124 | * 2) Leave the mm if we are in the lazy tlb mode. | |
e5bc8b6b AK |
125 | * |
126 | * Interrupts are disabled. | |
1da177e4 LT |
127 | */ |
128 | ||
e5bc8b6b | 129 | asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) |
1da177e4 | 130 | { |
e5bc8b6b AK |
131 | int cpu; |
132 | int sender; | |
133 | union smp_flush_state *f; | |
1da177e4 | 134 | |
e5bc8b6b AK |
135 | cpu = smp_processor_id(); |
136 | /* | |
19eadf98 | 137 | * orig_rax contains the negated interrupt vector. |
e5bc8b6b AK |
138 | * Use that to determine where the sender put the data. |
139 | */ | |
65ea5b03 | 140 | sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; |
e5bc8b6b | 141 | f = &per_cpu(flush_state, sender); |
1da177e4 | 142 | |
e5bc8b6b | 143 | if (!cpu_isset(cpu, f->flush_cpumask)) |
1da177e4 | 144 | goto out; |
16da2f93 | 145 | /* |
1da177e4 LT |
146 | * This was a BUG() but until someone can quote me the |
147 | * line from the intel manual that guarantees an IPI to | |
148 | * multiple CPUs is retried _only_ on the erroring CPUs | |
149 | * its staying as a return | |
150 | * | |
151 | * BUG(); | |
152 | */ | |
16da2f93 | 153 | |
e5bc8b6b | 154 | if (f->flush_mm == read_pda(active_mm)) { |
1da177e4 | 155 | if (read_pda(mmu_state) == TLBSTATE_OK) { |
0b9c99b6 | 156 | if (f->flush_va == TLB_FLUSH_ALL) |
1da177e4 LT |
157 | local_flush_tlb(); |
158 | else | |
e5bc8b6b | 159 | __flush_tlb_one(f->flush_va); |
1da177e4 LT |
160 | } else |
161 | leave_mm(cpu); | |
162 | } | |
5df3574e | 163 | out: |
1da177e4 | 164 | ack_APIC_irq(); |
e5bc8b6b | 165 | cpu_clear(cpu, f->flush_cpumask); |
38e760a1 | 166 | add_pda(irq_tlb_count, 1); |
1da177e4 LT |
167 | } |
168 | ||
0b9c99b6 TG |
169 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, |
170 | unsigned long va) | |
1da177e4 | 171 | { |
e5bc8b6b AK |
172 | int sender; |
173 | union smp_flush_state *f; | |
0b9c99b6 | 174 | cpumask_t cpumask = *cpumaskp; |
1da177e4 | 175 | |
e5bc8b6b AK |
176 | /* Caller has disabled preemption */ |
177 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; | |
178 | f = &per_cpu(flush_state, sender); | |
179 | ||
16da2f93 TG |
180 | /* |
181 | * Could avoid this lock when | |
182 | * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is | |
183 | * probably not worth checking this for a cache-hot lock. | |
184 | */ | |
e5bc8b6b AK |
185 | spin_lock(&f->tlbstate_lock); |
186 | ||
187 | f->flush_mm = mm; | |
188 | f->flush_va = va; | |
189 | cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); | |
1da177e4 LT |
190 | |
191 | /* | |
192 | * We have to send the IPI only to | |
193 | * CPUs affected. | |
194 | */ | |
e5bc8b6b | 195 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); |
1da177e4 | 196 | |
e5bc8b6b AK |
197 | while (!cpus_empty(f->flush_cpumask)) |
198 | cpu_relax(); | |
1da177e4 | 199 | |
e5bc8b6b AK |
200 | f->flush_mm = NULL; |
201 | f->flush_va = 0; | |
202 | spin_unlock(&f->tlbstate_lock); | |
1da177e4 | 203 | } |
e5bc8b6b AK |
204 | |
205 | int __cpuinit init_smp_flush(void) | |
206 | { | |
207 | int i; | |
16da2f93 | 208 | |
e5bc8b6b | 209 | for_each_cpu_mask(i, cpu_possible_map) { |
825e037f | 210 | spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); |
e5bc8b6b AK |
211 | } |
212 | return 0; | |
213 | } | |
e5bc8b6b | 214 | core_initcall(init_smp_flush); |
16da2f93 | 215 | |
1da177e4 LT |
216 | void flush_tlb_current_task(void) |
217 | { | |
218 | struct mm_struct *mm = current->mm; | |
219 | cpumask_t cpu_mask; | |
220 | ||
221 | preempt_disable(); | |
222 | cpu_mask = mm->cpu_vm_mask; | |
223 | cpu_clear(smp_processor_id(), cpu_mask); | |
224 | ||
225 | local_flush_tlb(); | |
226 | if (!cpus_empty(cpu_mask)) | |
0b9c99b6 | 227 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
1da177e4 LT |
228 | preempt_enable(); |
229 | } | |
230 | ||
231 | void flush_tlb_mm (struct mm_struct * mm) | |
232 | { | |
233 | cpumask_t cpu_mask; | |
234 | ||
235 | preempt_disable(); | |
236 | cpu_mask = mm->cpu_vm_mask; | |
237 | cpu_clear(smp_processor_id(), cpu_mask); | |
238 | ||
239 | if (current->active_mm == mm) { | |
240 | if (current->mm) | |
241 | local_flush_tlb(); | |
242 | else | |
243 | leave_mm(smp_processor_id()); | |
244 | } | |
245 | if (!cpus_empty(cpu_mask)) | |
0b9c99b6 | 246 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
da8f153e | 247 | |
1da177e4 LT |
248 | preempt_enable(); |
249 | } | |
250 | ||
251 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |
252 | { | |
253 | struct mm_struct *mm = vma->vm_mm; | |
254 | cpumask_t cpu_mask; | |
255 | ||
256 | preempt_disable(); | |
257 | cpu_mask = mm->cpu_vm_mask; | |
258 | cpu_clear(smp_processor_id(), cpu_mask); | |
259 | ||
260 | if (current->active_mm == mm) { | |
261 | if(current->mm) | |
262 | __flush_tlb_one(va); | |
16da2f93 TG |
263 | else |
264 | leave_mm(smp_processor_id()); | |
1da177e4 LT |
265 | } |
266 | ||
267 | if (!cpus_empty(cpu_mask)) | |
268 | flush_tlb_others(cpu_mask, mm, va); | |
269 | ||
270 | preempt_enable(); | |
271 | } | |
272 | ||
273 | static void do_flush_tlb_all(void* info) | |
274 | { | |
275 | unsigned long cpu = smp_processor_id(); | |
276 | ||
277 | __flush_tlb_all(); | |
278 | if (read_pda(mmu_state) == TLBSTATE_LAZY) | |
279 | leave_mm(cpu); | |
280 | } | |
281 | ||
282 | void flush_tlb_all(void) | |
283 | { | |
284 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | |
285 | } | |
286 | ||
1da177e4 LT |
287 | /* |
288 | * this function sends a 'reschedule' IPI to another CPU. | |
289 | * it goes straight through and wastes no time serializing | |
290 | * anything. Worst case is that we lose a reschedule ... | |
291 | */ | |
292 | ||
8678969e | 293 | static void native_smp_send_reschedule(int cpu) |
1da177e4 | 294 | { |
8678969e | 295 | WARN_ON(cpu_is_offline(cpu)); |
1da177e4 LT |
296 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
297 | } | |
298 | ||
299 | /* | |
300 | * Structure and data for smp_call_function(). This is designed to minimise | |
301 | * static memory requirements. It also looks cleaner. | |
302 | */ | |
303 | static DEFINE_SPINLOCK(call_lock); | |
304 | ||
305 | struct call_data_struct { | |
306 | void (*func) (void *info); | |
307 | void *info; | |
308 | atomic_t started; | |
309 | atomic_t finished; | |
310 | int wait; | |
311 | }; | |
312 | ||
313 | static struct call_data_struct * call_data; | |
314 | ||
884d9e40 AR |
315 | void lock_ipi_call_lock(void) |
316 | { | |
317 | spin_lock_irq(&call_lock); | |
318 | } | |
319 | ||
320 | void unlock_ipi_call_lock(void) | |
321 | { | |
322 | spin_unlock_irq(&call_lock); | |
323 | } | |
324 | ||
3d483f47 | 325 | /* |
66d16ed4 LV |
326 | * this function sends a 'generic call function' IPI to all other CPU |
327 | * of the system defined in the mask. | |
3d483f47 | 328 | */ |
16da2f93 TG |
329 | static int __smp_call_function_mask(cpumask_t mask, |
330 | void (*func)(void *), void *info, | |
331 | int wait) | |
3d483f47 EB |
332 | { |
333 | struct call_data_struct data; | |
66d16ed4 LV |
334 | cpumask_t allbutself; |
335 | int cpus; | |
336 | ||
337 | allbutself = cpu_online_map; | |
338 | cpu_clear(smp_processor_id(), allbutself); | |
339 | ||
340 | cpus_and(mask, mask, allbutself); | |
341 | cpus = cpus_weight(mask); | |
342 | ||
343 | if (!cpus) | |
344 | return 0; | |
3d483f47 EB |
345 | |
346 | data.func = func; | |
347 | data.info = info; | |
348 | atomic_set(&data.started, 0); | |
349 | data.wait = wait; | |
350 | if (wait) | |
351 | atomic_set(&data.finished, 0); | |
352 | ||
353 | call_data = &data; | |
354 | wmb(); | |
66d16ed4 LV |
355 | |
356 | /* Send a message to other CPUs */ | |
357 | if (cpus_equal(mask, allbutself)) | |
358 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | |
359 | else | |
360 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | |
3d483f47 EB |
361 | |
362 | /* Wait for response */ | |
363 | while (atomic_read(&data.started) != cpus) | |
364 | cpu_relax(); | |
365 | ||
366 | if (!wait) | |
66d16ed4 | 367 | return 0; |
3d483f47 EB |
368 | |
369 | while (atomic_read(&data.finished) != cpus) | |
370 | cpu_relax(); | |
66d16ed4 LV |
371 | |
372 | return 0; | |
373 | } | |
374 | /** | |
375 | * smp_call_function_mask(): Run a function on a set of other CPUs. | |
376 | * @mask: The set of cpus to run on. Must not include the current cpu. | |
377 | * @func: The function to run. This must be fast and non-blocking. | |
378 | * @info: An arbitrary pointer to pass to the function. | |
379 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
380 | * | |
381 | * Returns 0 on success, else a negative status code. | |
382 | * | |
383 | * If @wait is true, then returns once @func has returned; otherwise | |
384 | * it returns just before the target cpu calls @func. | |
385 | * | |
386 | * You must not call this function with disabled interrupts or from a | |
387 | * hardware interrupt handler or from a bottom half handler. | |
388 | */ | |
64b1a21e GC |
389 | int native_smp_call_function_mask(cpumask_t mask, |
390 | void (*func)(void *), void *info, | |
391 | int wait) | |
66d16ed4 LV |
392 | { |
393 | int ret; | |
394 | ||
395 | /* Can deadlock when called with interrupts disabled */ | |
396 | WARN_ON(irqs_disabled()); | |
397 | ||
398 | spin_lock(&call_lock); | |
399 | ret = __smp_call_function_mask(mask, func, info, wait); | |
400 | spin_unlock(&call_lock); | |
401 | return ret; | |
3d483f47 EB |
402 | } |
403 | ||
404 | /* | |
4055551b | 405 | * smp_call_function_single - Run a function on a specific CPU |
3d483f47 EB |
406 | * @func: The function to run. This must be fast and non-blocking. |
407 | * @info: An arbitrary pointer to pass to the function. | |
408 | * @nonatomic: Currently unused. | |
409 | * @wait: If true, wait until function has completed on other CPUs. | |
410 | * | |
411 | * Retrurns 0 on success, else a negative status code. | |
412 | * | |
413 | * Does not return until the remote CPU is nearly ready to execute <func> | |
414 | * or is or has executed. | |
415 | */ | |
416 | ||
417 | int smp_call_function_single (int cpu, void (*func) (void *info), void *info, | |
16da2f93 | 418 | int nonatomic, int wait) |
3d483f47 EB |
419 | { |
420 | /* prevent preemption and reschedule on another processor */ | |
16da2f93 | 421 | int ret, me = get_cpu(); |
4055551b | 422 | |
3d483f47 | 423 | if (cpu == me) { |
4055551b AK |
424 | local_irq_disable(); |
425 | func(info); | |
426 | local_irq_enable(); | |
3d483f47 | 427 | put_cpu(); |
8c131af1 | 428 | return 0; |
3d483f47 | 429 | } |
a38a44c1 | 430 | |
66d16ed4 LV |
431 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); |
432 | ||
3d483f47 | 433 | put_cpu(); |
66d16ed4 | 434 | return ret; |
3d483f47 | 435 | } |
64a26a73 | 436 | EXPORT_SYMBOL(smp_call_function_single); |
3d483f47 | 437 | |
1da177e4 LT |
438 | /* |
439 | * smp_call_function - run a function on all other CPUs. | |
440 | * @func: The function to run. This must be fast and non-blocking. | |
441 | * @info: An arbitrary pointer to pass to the function. | |
442 | * @nonatomic: currently unused. | |
443 | * @wait: If true, wait (atomically) until function has completed on other | |
444 | * CPUs. | |
445 | * | |
446 | * Returns 0 on success, else a negative status code. Does not return until | |
447 | * remote CPUs are nearly ready to execute func or are or have executed. | |
448 | * | |
449 | * You must not call this function with disabled interrupts or from a | |
450 | * hardware interrupt handler or from a bottom half handler. | |
451 | * Actually there are a few legal cases, like panic. | |
452 | */ | |
453 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |
454 | int wait) | |
455 | { | |
66d16ed4 | 456 | return smp_call_function_mask(cpu_online_map, func, info, wait); |
1da177e4 | 457 | } |
2ee60e17 | 458 | EXPORT_SYMBOL(smp_call_function); |
1da177e4 | 459 | |
9964cf7d | 460 | static void stop_this_cpu(void *dummy) |
1da177e4 | 461 | { |
9964cf7d | 462 | local_irq_disable(); |
1da177e4 LT |
463 | /* |
464 | * Remove this CPU: | |
465 | */ | |
466 | cpu_clear(smp_processor_id(), cpu_online_map); | |
1da177e4 | 467 | disable_local_APIC(); |
16da2f93 | 468 | for (;;) |
46d13a38 | 469 | halt(); |
16da2f93 | 470 | } |
1da177e4 LT |
471 | |
472 | void smp_send_stop(void) | |
473 | { | |
9964cf7d JB |
474 | int nolock; |
475 | unsigned long flags; | |
476 | ||
1da177e4 LT |
477 | if (reboot_force) |
478 | return; | |
9964cf7d | 479 | |
1da177e4 | 480 | /* Don't deadlock on the call lock in panic */ |
9964cf7d JB |
481 | nolock = !spin_trylock(&call_lock); |
482 | local_irq_save(flags); | |
66d16ed4 | 483 | __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0); |
1da177e4 LT |
484 | if (!nolock) |
485 | spin_unlock(&call_lock); | |
1da177e4 | 486 | disable_local_APIC(); |
9964cf7d | 487 | local_irq_restore(flags); |
1da177e4 LT |
488 | } |
489 | ||
490 | /* | |
491 | * Reschedule call back. Nothing to do, | |
492 | * all the work is done automatically when | |
493 | * we return from the interrupt. | |
494 | */ | |
495 | asmlinkage void smp_reschedule_interrupt(void) | |
496 | { | |
497 | ack_APIC_irq(); | |
38e760a1 | 498 | add_pda(irq_resched_count, 1); |
1da177e4 LT |
499 | } |
500 | ||
501 | asmlinkage void smp_call_function_interrupt(void) | |
502 | { | |
503 | void (*func) (void *info) = call_data->func; | |
504 | void *info = call_data->info; | |
505 | int wait = call_data->wait; | |
506 | ||
507 | ack_APIC_irq(); | |
508 | /* | |
509 | * Notify initiating CPU that I've grabbed the data and am | |
510 | * about to execute the function | |
511 | */ | |
512 | mb(); | |
513 | atomic_inc(&call_data->started); | |
514 | /* | |
515 | * At this point the info structure may be out of scope unless wait==1 | |
516 | */ | |
95833c83 | 517 | exit_idle(); |
1da177e4 LT |
518 | irq_enter(); |
519 | (*func)(info); | |
38e760a1 | 520 | add_pda(irq_call_count, 1); |
1da177e4 LT |
521 | irq_exit(); |
522 | if (wait) { | |
523 | mb(); | |
524 | atomic_inc(&call_data->finished); | |
525 | } | |
526 | } | |
a8ab26fe | 527 | |
8678969e | 528 | struct smp_ops smp_ops = { |
1e3fac83 | 529 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, |
7557da67 | 530 | .smp_prepare_cpus = native_smp_prepare_cpus, |
c5597649 GC |
531 | .smp_cpus_done = native_smp_cpus_done, |
532 | ||
8678969e | 533 | .smp_send_reschedule = native_smp_send_reschedule, |
64b1a21e | 534 | .smp_call_function_mask = native_smp_call_function_mask, |
71d19549 | 535 | .cpu_up = native_cpu_up, |
8678969e | 536 | }; |
c76cb368 | 537 | EXPORT_SYMBOL_GPL(smp_ops); |