]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/xen/smp.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / xen / smp.c
CommitLineData
f87e4cac
JF
1/*
2 * Xen SMP support
3 *
4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
7 *
8 * IPIs are handled through the Xen event mechanism.
9 *
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded.
f87e4cac
JF
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
5a0e3ad6 17#include <linux/slab.h>
f87e4cac
JF
18#include <linux/smp.h>
19
20#include <asm/paravirt.h>
21#include <asm/desc.h>
22#include <asm/pgtable.h>
23#include <asm/cpu.h>
24
25#include <xen/interface/xen.h>
26#include <xen/interface/vcpu.h>
27
28#include <asm/xen/interface.h>
29#include <asm/xen/hypercall.h>
30
31#include <xen/page.h>
32#include <xen/events.h>
33
34#include "xen-ops.h"
35#include "mmu.h"
36
b78936e1 37cpumask_var_t xen_cpu_initialized_map;
f87e4cac 38
c6e22f9e
TH
39static DEFINE_PER_CPU(int, xen_resched_irq);
40static DEFINE_PER_CPU(int, xen_callfunc_irq);
41static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
42static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
f87e4cac
JF
43
44static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
3b16cf87 45static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
f87e4cac
JF
46
47/*
48 * Reschedule call back. Nothing to do,
49 * all the work is done automatically when
50 * we return from the interrupt.
51 */
52static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
53{
1b437c8c 54 inc_irq_stat(irq_resched_count);
38bb5ab4 55
f87e4cac
JF
56 return IRQ_HANDLED;
57}
58
d68d82af 59static __cpuinit void cpu_bringup(void)
f87e4cac
JF
60{
61 int cpu = smp_processor_id();
62
63 cpu_init();
d68d82af 64 touch_softlockup_watchdog();
c7b75947
JF
65 preempt_disable();
66
e2a81baf 67 xen_enable_sysenter();
6fcac6d3 68 xen_enable_syscall();
f87e4cac 69
c7b75947
JF
70 cpu = smp_processor_id();
71 smp_store_cpu_info(cpu);
72 cpu_data(cpu).x86_max_cores = 1;
73 set_cpu_sibling_map(cpu);
f87e4cac
JF
74
75 xen_setup_cpu_clockevents();
76
d7d3756c 77 set_cpu_online(cpu, true);
6dbde353 78 percpu_write(cpu_state, CPU_ONLINE);
c7b75947
JF
79 wmb();
80
f87e4cac
JF
81 /* We can take interrupts now: we're officially "up". */
82 local_irq_enable();
83
84 wmb(); /* make sure everything is out */
d68d82af
AN
85}
86
87static __cpuinit void cpu_bringup_and_idle(void)
88{
89 cpu_bringup();
f87e4cac
JF
90 cpu_idle();
91}
92
93static int xen_smp_intr_init(unsigned int cpu)
94{
95 int rc;
ee523ca1 96 const char *resched_name, *callfunc_name, *debug_name;
f87e4cac
JF
97
98 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
99 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
100 cpu,
101 xen_reschedule_interrupt,
102 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
103 resched_name,
104 NULL);
105 if (rc < 0)
106 goto fail;
c6e22f9e 107 per_cpu(xen_resched_irq, cpu) = rc;
f87e4cac
JF
108
109 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
110 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
111 cpu,
112 xen_call_function_interrupt,
113 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
114 callfunc_name,
115 NULL);
116 if (rc < 0)
117 goto fail;
c6e22f9e 118 per_cpu(xen_callfunc_irq, cpu) = rc;
f87e4cac 119
ee523ca1
JF
120 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
121 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
122 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
123 debug_name, NULL);
124 if (rc < 0)
125 goto fail;
c6e22f9e 126 per_cpu(xen_debug_irq, cpu) = rc;
ee523ca1 127
3b16cf87
JA
128 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
129 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
130 cpu,
131 xen_call_function_single_interrupt,
132 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
133 callfunc_name,
134 NULL);
135 if (rc < 0)
136 goto fail;
c6e22f9e 137 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
3b16cf87 138
f87e4cac
JF
139 return 0;
140
141 fail:
c6e22f9e
TH
142 if (per_cpu(xen_resched_irq, cpu) >= 0)
143 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
144 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
145 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
146 if (per_cpu(xen_debug_irq, cpu) >= 0)
147 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
148 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
149 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
150 NULL);
3b16cf87 151
f87e4cac
JF
152 return rc;
153}
154
c7b75947 155static void __init xen_fill_possible_map(void)
f87e4cac
JF
156{
157 int i, rc;
158
e7986739 159 for (i = 0; i < nr_cpu_ids; i++) {
f87e4cac 160 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
4560a294
JF
161 if (rc >= 0) {
162 num_processors++;
4f062896 163 set_cpu_possible(i, true);
4560a294 164 }
f87e4cac
JF
165 }
166}
167
a9e7062d 168static void __init xen_smp_prepare_boot_cpu(void)
f87e4cac 169{
f87e4cac
JF
170 BUG_ON(smp_processor_id() != 0);
171 native_smp_prepare_boot_cpu();
172
f87e4cac
JF
173 /* We've switched to the "real" per-cpu gdt, so make sure the
174 old memory can be recycled */
38341432 175 make_lowmem_page_readwrite(xen_initial_gdt);
60223a32
JF
176
177 xen_setup_vcpu_info_placement();
f87e4cac
JF
178}
179
a9e7062d 180static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
f87e4cac
JF
181{
182 unsigned cpu;
183
2d9e1e2f
JF
184 xen_init_lock_cpu(0);
185
f87e4cac 186 smp_store_cpu_info(0);
c7b75947 187 cpu_data(0).x86_max_cores = 1;
f87e4cac
JF
188 set_cpu_sibling_map(0);
189
190 if (xen_smp_intr_init(0))
191 BUG();
192
b78936e1
MT
193 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
194 panic("could not allocate xen_cpu_initialized_map\n");
195
196 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
f87e4cac
JF
197
198 /* Restrict the possible_map according to max_cpus. */
199 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
e7986739 200 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
f87e4cac 201 continue;
4f062896 202 set_cpu_possible(cpu, false);
f87e4cac
JF
203 }
204
205 for_each_possible_cpu (cpu) {
206 struct task_struct *idle;
207
208 if (cpu == 0)
209 continue;
210
211 idle = fork_idle(cpu);
212 if (IS_ERR(idle))
213 panic("failed fork for CPU %d", cpu);
214
4f062896 215 set_cpu_present(cpu, true);
f87e4cac 216 }
f87e4cac
JF
217}
218
219static __cpuinit int
220cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
221{
222 struct vcpu_guest_context *ctxt;
c7b75947 223 struct desc_struct *gdt;
9976b39b 224 unsigned long gdt_mfn;
f87e4cac 225
b78936e1 226 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
f87e4cac
JF
227 return 0;
228
229 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
230 if (ctxt == NULL)
231 return -ENOMEM;
232
c7b75947
JF
233 gdt = get_cpu_gdt_table(cpu);
234
f87e4cac
JF
235 ctxt->flags = VGCF_IN_KERNEL;
236 ctxt->user_regs.ds = __USER_DS;
237 ctxt->user_regs.es = __USER_DS;
f87e4cac 238 ctxt->user_regs.ss = __KERNEL_DS;
c7b75947
JF
239#ifdef CONFIG_X86_32
240 ctxt->user_regs.fs = __KERNEL_PERCPU;
577eebea 241 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
795f99b6
JF
242#else
243 ctxt->gs_base_kernel = per_cpu_offset(cpu);
c7b75947 244#endif
f87e4cac
JF
245 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
246 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
247
248 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
249
250 xen_copy_trap_info(ctxt->trap_ctxt);
251
252 ctxt->ldt_ents = 0;
253
c7b75947 254 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
9976b39b
JF
255
256 gdt_mfn = arbitrary_virt_to_mfn(gdt);
c7b75947 257 make_lowmem_page_readonly(gdt);
9976b39b 258 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
f87e4cac 259
9976b39b 260 ctxt->gdt_frames[0] = gdt_mfn;
c7b75947 261 ctxt->gdt_ents = GDT_ENTRIES;
f87e4cac
JF
262
263 ctxt->user_regs.cs = __KERNEL_CS;
faca6227 264 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
f87e4cac
JF
265
266 ctxt->kernel_ss = __KERNEL_DS;
faca6227 267 ctxt->kernel_sp = idle->thread.sp0;
f87e4cac 268
c7b75947 269#ifdef CONFIG_X86_32
f87e4cac 270 ctxt->event_callback_cs = __KERNEL_CS;
f87e4cac 271 ctxt->failsafe_callback_cs = __KERNEL_CS;
c7b75947
JF
272#endif
273 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
f87e4cac
JF
274 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
275
276 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
277 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
278
279 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
280 BUG();
281
282 kfree(ctxt);
283 return 0;
284}
285
a9e7062d 286static int __cpuinit xen_cpu_up(unsigned int cpu)
f87e4cac
JF
287{
288 struct task_struct *idle = idle_task(cpu);
289 int rc;
290
c6f5e0ac 291 per_cpu(current_task, cpu) = idle;
c7b75947 292#ifdef CONFIG_X86_32
f87e4cac 293 irq_ctx_init(cpu);
c7b75947 294#else
c7b75947 295 clear_tsk_thread_flag(idle, TIF_FORK);
38341432
JF
296 per_cpu(kernel_stack, cpu) =
297 (unsigned long)task_stack_page(idle) -
298 KERNEL_STACK_OFFSET + THREAD_SIZE;
c7b75947 299#endif
02889672 300 xen_setup_runstate_info(cpu);
f87e4cac 301 xen_setup_timer(cpu);
2d9e1e2f 302 xen_init_lock_cpu(cpu);
f87e4cac 303
c7b75947
JF
304 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
305
f87e4cac
JF
306 /* make sure interrupts start blocked */
307 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
308
309 rc = cpu_initialize_context(cpu, idle);
310 if (rc)
311 return rc;
312
313 if (num_online_cpus() == 1)
314 alternatives_smp_switch(1);
315
316 rc = xen_smp_intr_init(cpu);
317 if (rc)
318 return rc;
319
f87e4cac
JF
320 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
321 BUG_ON(rc);
322
c7b75947 323 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
1207cf8e 324 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
c7b75947
JF
325 barrier();
326 }
327
f87e4cac
JF
328 return 0;
329}
330
a9e7062d 331static void xen_smp_cpus_done(unsigned int max_cpus)
f87e4cac
JF
332{
333}
334
2737146b 335#ifdef CONFIG_HOTPLUG_CPU
26fd1051 336static int xen_cpu_disable(void)
d68d82af
AN
337{
338 unsigned int cpu = smp_processor_id();
339 if (cpu == 0)
340 return -EBUSY;
341
342 cpu_disable_common();
343
344 load_cr3(swapper_pg_dir);
345 return 0;
346}
347
26fd1051 348static void xen_cpu_die(unsigned int cpu)
d68d82af
AN
349{
350 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
351 current->state = TASK_UNINTERRUPTIBLE;
352 schedule_timeout(HZ/10);
353 }
c6e22f9e
TH
354 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
355 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
356 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
357 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
d68d82af
AN
358 xen_uninit_lock_cpu(cpu);
359 xen_teardown_timer(cpu);
360
361 if (num_online_cpus() == 1)
362 alternatives_smp_switch(0);
363}
364
71709247 365static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
d68d82af
AN
366{
367 play_dead_common();
368 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
369 cpu_bringup();
370}
371
2737146b 372#else /* !CONFIG_HOTPLUG_CPU */
26fd1051 373static int xen_cpu_disable(void)
2737146b
AN
374{
375 return -ENOSYS;
376}
377
26fd1051 378static void xen_cpu_die(unsigned int cpu)
2737146b
AN
379{
380 BUG();
381}
382
26fd1051 383static void xen_play_dead(void)
2737146b
AN
384{
385 BUG();
386}
387
388#endif
f87e4cac
JF
389static void stop_self(void *v)
390{
391 int cpu = smp_processor_id();
392
393 /* make sure we're not pinning something down */
394 load_cr3(swapper_pg_dir);
395 /* should set up a minimal gdt */
396
397 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
398 BUG();
399}
400
a9e7062d 401static void xen_smp_send_stop(void)
f87e4cac 402{
8691e5a8 403 smp_call_function(stop_self, NULL, 0);
f87e4cac
JF
404}
405
a9e7062d 406static void xen_smp_send_reschedule(int cpu)
f87e4cac
JF
407{
408 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
409}
410
bcda016e
MT
411static void xen_send_IPI_mask(const struct cpumask *mask,
412 enum ipi_vector vector)
f87e4cac
JF
413{
414 unsigned cpu;
415
bcda016e 416 for_each_cpu_and(cpu, mask, cpu_online_mask)
f87e4cac
JF
417 xen_send_IPI_one(cpu, vector);
418}
419
bcda016e 420static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
3b16cf87
JA
421{
422 int cpu;
423
424 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
425
426 /* Make sure other vcpus get a chance to run if they need to. */
bcda016e 427 for_each_cpu(cpu, mask) {
3b16cf87 428 if (xen_vcpu_stolen(cpu)) {
1207cf8e 429 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
3b16cf87
JA
430 break;
431 }
432 }
433}
434
a9e7062d 435static void xen_smp_send_call_function_single_ipi(int cpu)
3b16cf87 436{
bcda016e 437 xen_send_IPI_mask(cpumask_of(cpu),
e7986739 438 XEN_CALL_FUNCTION_SINGLE_VECTOR);
3b16cf87
JA
439}
440
f87e4cac
JF
441static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
442{
f87e4cac 443 irq_enter();
3b16cf87 444 generic_smp_call_function_interrupt();
1b437c8c 445 inc_irq_stat(irq_call_count);
f87e4cac
JF
446 irq_exit();
447
f87e4cac
JF
448 return IRQ_HANDLED;
449}
450
3b16cf87 451static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
f87e4cac 452{
3b16cf87
JA
453 irq_enter();
454 generic_smp_call_function_single_interrupt();
1b437c8c 455 inc_irq_stat(irq_call_count);
3b16cf87 456 irq_exit();
f87e4cac 457
3b16cf87 458 return IRQ_HANDLED;
f87e4cac 459}
a9e7062d
JF
460
461static const struct smp_ops xen_smp_ops __initdata = {
462 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
463 .smp_prepare_cpus = xen_smp_prepare_cpus,
a9e7062d
JF
464 .smp_cpus_done = xen_smp_cpus_done,
465
d68d82af
AN
466 .cpu_up = xen_cpu_up,
467 .cpu_die = xen_cpu_die,
468 .cpu_disable = xen_cpu_disable,
469 .play_dead = xen_play_dead,
470
a9e7062d
JF
471 .smp_send_stop = xen_smp_send_stop,
472 .smp_send_reschedule = xen_smp_send_reschedule,
473
474 .send_call_func_ipi = xen_smp_send_call_function_ipi,
475 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
476};
477
478void __init xen_smp_init(void)
479{
480 smp_ops = xen_smp_ops;
c7b75947 481 xen_fill_possible_map();
2d9e1e2f 482 xen_init_spinlocks();
a9e7062d 483}