]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/hv/hv.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / drivers / hv / hv.c
CommitLineData
3e7ee490 1/*
3e7ee490
HJ
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 *
21 */
0a46618d
HJ
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
a0086dc5
GKH
24#include <linux/kernel.h>
25#include <linux/mm.h>
5a0e3ad6 26#include <linux/slab.h>
b7c947f0 27#include <linux/vmalloc.h>
46a97191 28#include <linux/hyperv.h>
83ba0c4f 29#include <linux/version.h>
db11f12a 30#include <linux/interrupt.h>
4061ed9e 31#include <linux/clockchips.h>
407dd164 32#include <asm/hyperv.h>
4061ed9e 33#include <asm/mshyperv.h>
0f2a6619 34#include "hyperv_vmbus.h"
3e7ee490 35
454f18a9 36/* The one and only */
6a0aaa18
HZ
37struct hv_context hv_context = {
38 .synic_initialized = false,
3e7ee490
HJ
39};
40
4061ed9e
S
41#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
42#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
43#define HV_MIN_DELTA_TICKS 1
44
3e189519 45/*
d44890c8 46 * hv_init - Main initialization routine.
0831ad04
GKH
47 *
48 * This routine must be called before any other routines in here are called
49 */
d44890c8 50int hv_init(void)
3e7ee490 51{
3e7ee490 52
14c1bf8a 53 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
6a0aaa18 54 memset(hv_context.synic_message_page, 0,
14c1bf8a 55 sizeof(void *) * NR_CPUS);
b29ef354
S
56 memset(hv_context.post_msg_page, 0,
57 sizeof(void *) * NR_CPUS);
917ea427
S
58 memset(hv_context.vp_index, 0,
59 sizeof(int) * NR_CPUS);
db11f12a
S
60 memset(hv_context.event_dpc, 0,
61 sizeof(void *) * NR_CPUS);
d81274aa
S
62 memset(hv_context.msg_dpc, 0,
63 sizeof(void *) * NR_CPUS);
4061ed9e
S
64 memset(hv_context.clk_evt, 0,
65 sizeof(void *) * NR_CPUS);
3e7ee490 66
c4398016 67 if (!hv_is_hypercall_page_setup())
b1d6b256 68 return -ENOTSUPP;
3e7ee490 69
5433e003 70 return 0;
3e7ee490
HJ
71}
72
3e189519 73/*
d44890c8 74 * hv_post_message - Post a message using the hypervisor message IPC.
0831ad04
GKH
75 *
76 * This involves a hypercall.
77 */
415f0a02 78int hv_post_message(union hv_connection_id connection_id,
b8dfb264
HZ
79 enum hv_message_type message_type,
80 void *payload, size_t payload_size)
3e7ee490 81{
3e7ee490 82
b8dfb264 83 struct hv_input_post_message *aligned_msg;
a108393d 84 u64 status;
3e7ee490 85
b8dfb264 86 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
39594abc 87 return -EMSGSIZE;
3e7ee490 88
b8dfb264 89 aligned_msg = (struct hv_input_post_message *)
b29ef354 90 hv_context.post_msg_page[get_cpu()];
3e7ee490 91
b8dfb264 92 aligned_msg->connectionid = connection_id;
b29ef354 93 aligned_msg->reserved = 0;
b8dfb264
HZ
94 aligned_msg->message_type = message_type;
95 aligned_msg->payload_size = payload_size;
96 memcpy((void *)aligned_msg->payload, payload, payload_size);
3e7ee490 97
a108393d 98 status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
3e7ee490 99
b29ef354 100 put_cpu();
a108393d 101 return status & 0xFFFF;
3e7ee490
HJ
102}
103
4061ed9e
S
104static int hv_ce_set_next_event(unsigned long delta,
105 struct clock_event_device *evt)
106{
a5a1d1c2 107 u64 current_tick;
4061ed9e 108
bc609cb4 109 WARN_ON(!clockevent_state_oneshot(evt));
4061ed9e 110
72329152 111 hv_get_current_tick(current_tick);
4061ed9e 112 current_tick += delta;
72329152 113 hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
4061ed9e
S
114 return 0;
115}
116
bc609cb4
VK
117static int hv_ce_shutdown(struct clock_event_device *evt)
118{
72329152
S
119 hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
120 hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
bc609cb4
VK
121
122 return 0;
123}
124
125static int hv_ce_set_oneshot(struct clock_event_device *evt)
4061ed9e
S
126{
127 union hv_timer_config timer_cfg;
128
bc609cb4
VK
129 timer_cfg.enable = 1;
130 timer_cfg.auto_enable = 1;
131 timer_cfg.sintx = VMBUS_MESSAGE_SINT;
72329152 132 hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
bc609cb4
VK
133
134 return 0;
4061ed9e
S
135}
136
137static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
138{
139 dev->name = "Hyper-V clockevent";
140 dev->features = CLOCK_EVT_FEAT_ONESHOT;
141 dev->cpumask = cpumask_of(cpu);
142 dev->rating = 1000;
e086748c
VK
143 /*
144 * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
145 * result in clockevents_config_and_register() taking additional
146 * references to the hv_vmbus module making it impossible to unload.
147 */
4061ed9e 148
bc609cb4
VK
149 dev->set_state_shutdown = hv_ce_shutdown;
150 dev->set_state_oneshot = hv_ce_set_oneshot;
4061ed9e
S
151 dev->set_next_event = hv_ce_set_next_event;
152}
153
2608fb65
JW
154
155int hv_synic_alloc(void)
156{
157 size_t size = sizeof(struct tasklet_struct);
4061ed9e 158 size_t ced_size = sizeof(struct clock_event_device);
2608fb65
JW
159 int cpu;
160
9f01ec53
S
161 hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
162 GFP_ATOMIC);
163 if (hv_context.hv_numa_map == NULL) {
164 pr_err("Unable to allocate NUMA map\n");
165 goto err;
166 }
167
d74e2e80 168 for_each_present_cpu(cpu) {
2608fb65
JW
169 hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
170 if (hv_context.event_dpc[cpu] == NULL) {
171 pr_err("Unable to allocate event dpc\n");
172 goto err;
173 }
174 tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
175
d81274aa
S
176 hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
177 if (hv_context.msg_dpc[cpu] == NULL) {
178 pr_err("Unable to allocate event dpc\n");
179 goto err;
180 }
181 tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
182
4061ed9e
S
183 hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
184 if (hv_context.clk_evt[cpu] == NULL) {
185 pr_err("Unable to allocate clock event device\n");
186 goto err;
187 }
9f01ec53 188
4061ed9e
S
189 hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
190
2608fb65
JW
191 hv_context.synic_message_page[cpu] =
192 (void *)get_zeroed_page(GFP_ATOMIC);
193
194 if (hv_context.synic_message_page[cpu] == NULL) {
195 pr_err("Unable to allocate SYNIC message page\n");
196 goto err;
197 }
198
199 hv_context.synic_event_page[cpu] =
200 (void *)get_zeroed_page(GFP_ATOMIC);
201
202 if (hv_context.synic_event_page[cpu] == NULL) {
203 pr_err("Unable to allocate SYNIC event page\n");
204 goto err;
205 }
b29ef354
S
206
207 hv_context.post_msg_page[cpu] =
208 (void *)get_zeroed_page(GFP_ATOMIC);
209
210 if (hv_context.post_msg_page[cpu] == NULL) {
211 pr_err("Unable to allocate post msg page\n");
212 goto err;
213 }
9b1bf703
VK
214
215 INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
2608fb65
JW
216 }
217
218 return 0;
219err:
220 return -ENOMEM;
221}
222
8712954d 223static void hv_synic_free_cpu(int cpu)
2608fb65
JW
224{
225 kfree(hv_context.event_dpc[cpu]);
d81274aa 226 kfree(hv_context.msg_dpc[cpu]);
4061ed9e 227 kfree(hv_context.clk_evt[cpu]);
fdf91dae 228 if (hv_context.synic_event_page[cpu])
2608fb65
JW
229 free_page((unsigned long)hv_context.synic_event_page[cpu]);
230 if (hv_context.synic_message_page[cpu])
231 free_page((unsigned long)hv_context.synic_message_page[cpu]);
b29ef354
S
232 if (hv_context.post_msg_page[cpu])
233 free_page((unsigned long)hv_context.post_msg_page[cpu]);
2608fb65
JW
234}
235
236void hv_synic_free(void)
237{
238 int cpu;
239
9f01ec53 240 kfree(hv_context.hv_numa_map);
d74e2e80 241 for_each_present_cpu(cpu)
2608fb65
JW
242 hv_synic_free_cpu(cpu);
243}
244
3e189519 245/*
d44890c8 246 * hv_synic_init - Initialize the Synthethic Interrupt Controller.
0831ad04
GKH
247 *
248 * If it is already initialized by another entity (ie x2v shim), we need to
249 * retrieve the initialized message and event pages. Otherwise, we create and
250 * initialize the message and event pages.
251 */
fdf5149c 252int hv_synic_init(unsigned int cpu)
3e7ee490 253{
eacb1b4d
GKH
254 union hv_synic_simp simp;
255 union hv_synic_siefp siefp;
b8dfb264 256 union hv_synic_sint shared_sint;
eacb1b4d 257 union hv_synic_scontrol sctrl;
917ea427 258 u64 vp_index;
a73e6b7c 259
a73e6b7c 260 /* Setup the Synic's message page */
a62c7499 261 hv_get_simp(simp.as_uint64);
f6feebe0 262 simp.simp_enabled = 1;
6a0aaa18 263 simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
a73e6b7c 264 >> PAGE_SHIFT;
3e7ee490 265
a62c7499 266 hv_set_simp(simp.as_uint64);
3e7ee490 267
a73e6b7c 268 /* Setup the Synic's event page */
4a14dadd 269 hv_get_siefp(siefp.as_uint64);
f6feebe0 270 siefp.siefp_enabled = 1;
6a0aaa18 271 siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
a73e6b7c
HJ
272 >> PAGE_SHIFT;
273
4a14dadd 274 hv_set_siefp(siefp.as_uint64);
0831ad04 275
0831ad04 276 /* Setup the shared SINT. */
117f8ab2
S
277 hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
278 shared_sint.as_uint64);
3e7ee490 279
b8dfb264 280 shared_sint.as_uint64 = 0;
302a3c0f 281 shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
b8dfb264 282 shared_sint.masked = false;
b0209501 283 shared_sint.auto_eoi = true;
3e7ee490 284
117f8ab2
S
285 hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
286 shared_sint.as_uint64);
3e7ee490 287
454f18a9 288 /* Enable the global synic bit */
cd0142ec 289 hv_get_synic_state(sctrl.as_uint64);
f6feebe0 290 sctrl.enable = 1;
3e7ee490 291
cd0142ec 292 hv_set_synic_state(sctrl.as_uint64);
3e7ee490 293
6a0aaa18 294 hv_context.synic_initialized = true;
917ea427
S
295
296 /*
297 * Setup the mapping between Hyper-V's notion
298 * of cpuid and Linux' notion of cpuid.
299 * This array will be indexed using Linux cpuid.
300 */
041fcba2 301 hv_get_vp_index(vp_index);
917ea427 302 hv_context.vp_index[cpu] = (u32)vp_index;
3a28fa35 303
4061ed9e
S
304 /*
305 * Register the per-cpu clockevent source.
306 */
307 if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
308 clockevents_config_and_register(hv_context.clk_evt[cpu],
309 HV_TIMER_FREQUENCY,
310 HV_MIN_DELTA_TICKS,
311 HV_MAX_MAX_DELTA_TICKS);
fdf5149c 312 return 0;
3e7ee490
HJ
313}
314
e086748c
VK
315/*
316 * hv_synic_clockevents_cleanup - Cleanup clockevent devices
317 */
318void hv_synic_clockevents_cleanup(void)
319{
320 int cpu;
321
322 if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
323 return;
324
6ffc4b85 325 for_each_present_cpu(cpu)
e086748c
VK
326 clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
327}
328
3e189519 329/*
d44890c8 330 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
0831ad04 331 */
fdf5149c 332int hv_synic_cleanup(unsigned int cpu)
3e7ee490 333{
b8dfb264 334 union hv_synic_sint shared_sint;
eacb1b4d
GKH
335 union hv_synic_simp simp;
336 union hv_synic_siefp siefp;
e72e7ac5 337 union hv_synic_scontrol sctrl;
ccbf4254
VK
338 struct vmbus_channel *channel, *sc;
339 bool channel_found = false;
340 unsigned long flags;
3e7ee490 341
6a0aaa18 342 if (!hv_context.synic_initialized)
fdf5149c 343 return -EFAULT;
3e7ee490 344
ccbf4254
VK
345 /*
346 * Search for channels which are bound to the CPU we're about to
347 * cleanup. In case we find one and vmbus is still connected we need to
348 * fail, this will effectively prevent CPU offlining. There is no way
349 * we can re-bind channels to different CPUs for now.
350 */
351 mutex_lock(&vmbus_connection.channel_mutex);
352 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
353 if (channel->target_cpu == cpu) {
354 channel_found = true;
355 break;
356 }
357 spin_lock_irqsave(&channel->lock, flags);
358 list_for_each_entry(sc, &channel->sc_list, sc_list) {
359 if (sc->target_cpu == cpu) {
360 channel_found = true;
361 break;
362 }
363 }
364 spin_unlock_irqrestore(&channel->lock, flags);
365 if (channel_found)
366 break;
367 }
368 mutex_unlock(&vmbus_connection.channel_mutex);
369
370 if (channel_found && vmbus_connection.conn_state == CONNECTED)
371 return -EBUSY;
372
e086748c 373 /* Turn off clockevent device */
6ffc4b85
VK
374 if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
375 clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
bc609cb4 376 hv_ce_shutdown(hv_context.clk_evt[cpu]);
6ffc4b85 377 }
e086748c 378
117f8ab2
S
379 hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
380 shared_sint.as_uint64);
3e7ee490 381
b8dfb264 382 shared_sint.masked = 1;
3e7ee490 383
7692fd4d 384 /* Need to correctly cleanup in the case of SMP!!! */
454f18a9 385 /* Disable the interrupt */
117f8ab2
S
386 hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
387 shared_sint.as_uint64);
3e7ee490 388
a62c7499 389 hv_get_simp(simp.as_uint64);
f6feebe0
HZ
390 simp.simp_enabled = 0;
391 simp.base_simp_gpa = 0;
3e7ee490 392
a62c7499 393 hv_set_simp(simp.as_uint64);
3e7ee490 394
4a14dadd 395 hv_get_siefp(siefp.as_uint64);
f6feebe0
HZ
396 siefp.siefp_enabled = 0;
397 siefp.base_siefp_gpa = 0;
3e7ee490 398
4a14dadd 399 hv_set_siefp(siefp.as_uint64);
3e7ee490 400
e72e7ac5 401 /* Disable the global synic bit */
cd0142ec 402 hv_get_synic_state(sctrl.as_uint64);
e72e7ac5 403 sctrl.enable = 0;
cd0142ec 404 hv_set_synic_state(sctrl.as_uint64);
fdf5149c
VK
405
406 return 0;
3e7ee490 407}