]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/staging/unisys/visorbus/visorchipset.c
staging: unisys: visorbus: move textid store and show functions
[mirror_ubuntu-jammy-kernel.git] / drivers / staging / unisys / visorbus / visorchipset.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
6f14cc18 3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6f14cc18
BR
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
12e364b9
KC
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 */
16
55c67dca 17#include <linux/acpi.h>
c0a14641 18#include <linux/cdev.h>
46168810 19#include <linux/ctype.h>
e3420ed6
EA
20#include <linux/fs.h>
21#include <linux/mm.h>
12e364b9
KC
22#include <linux/nls.h>
23#include <linux/netdevice.h>
24#include <linux/platform_device.h>
90addb02 25#include <linux/uuid.h>
1ba00980 26#include <linux/crash_dump.h>
12e364b9 27
55c67dca
PB
28#include "version.h"
29#include "visorbus.h"
30#include "visorbus_private.h"
5f3a7e36 31#include "vmcallinterface.h"
55c67dca 32
12e364b9 33#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
12e364b9 34
12e364b9
KC
35#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
36#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
37
2c7e1d4e 38#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
2ee0deec
PB
39
40#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
41
d5b3f1dc
EA
42#define UNISYS_SPAR_LEAF_ID 0x40000000
43
44/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
45#define UNISYS_SPAR_ID_EBX 0x73696e55
46#define UNISYS_SPAR_ID_ECX 0x70537379
47#define UNISYS_SPAR_ID_EDX 0x34367261
48
b615d628
JS
49/*
50 * Module parameters
51 */
b615d628 52static int visorchipset_major;
b615d628 53
e3420ed6
EA
54static int
55visorchipset_open(struct inode *inode, struct file *file)
56{
e4feb2f2 57 unsigned int minor_number = iminor(inode);
e3420ed6
EA
58
59 if (minor_number)
60 return -ENODEV;
61 file->private_data = NULL;
62 return 0;
63}
64
65static int
66visorchipset_release(struct inode *inode, struct file *file)
67{
68 return 0;
69}
70
ec17f452
DB
71/*
72 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
73 * we switch to slow polling mode. As soon as we get a controlvm
74 * message, we switch back to fast polling mode.
75 */
12e364b9 76#define MIN_IDLE_SECONDS 10
52063eca 77static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2ee0d052
EA
78/* when we got our last controlvm message */
79static unsigned long most_recent_message_jiffies;
12e364b9 80
46168810
EA
81struct parser_context {
82 unsigned long allocbytes;
83 unsigned long param_bytes;
84 u8 *curr;
85 unsigned long bytes_remaining;
86 bool byte_stream;
87 char data[0];
88};
89
9232d2d6 90static struct delayed_work periodic_controlvm_work;
12e364b9 91
e3420ed6
EA
92static struct cdev file_cdev;
93static struct visorchannel **file_controlvm_channel;
12e364b9 94
c3d9a224 95static struct visorchannel *controlvm_channel;
12e364b9 96
84982fbf 97/* Manages the request payload in the controlvm channel */
c1f834eb 98struct visor_controlvm_payload_info {
3103dc03 99 u8 *ptr; /* pointer to base address of payload pool */
ec17f452
DB
100 u64 offset; /*
101 * offset from beginning of controlvm
2ee0d052
EA
102 * channel to beginning of payload * pool
103 */
b3c55b13 104 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
105};
106
107static struct visor_controlvm_payload_info controlvm_payload_info;
c071b6f5 108static unsigned long controlvm_payload_bytes_buffered;
12e364b9 109
ec17f452
DB
110/*
111 * The following globals are used to handle the scenario where we are unable to
112 * offload the payload from a controlvm message due to memory requirements. In
12e364b9
KC
113 * this scenario, we simply stash the controlvm message, then attempt to
114 * process it again the next time controlvm_periodic_work() runs.
115 */
7166ed19 116static struct controlvm_message controlvm_pending_msg;
c79b28f7 117static bool controlvm_pending_msg_valid;
12e364b9 118
ec17f452
DB
119/*
120 * This describes a buffer and its current state of transfer (e.g., how many
12e364b9
KC
121 * bytes have already been supplied as putfile data, and how many bytes are
122 * remaining) for a putfile_request.
123 */
124struct putfile_active_buffer {
125 /* a payload from a controlvm message, containing a file data buffer */
317d9614 126 struct parser_context *parser_ctx;
12e364b9 127 /* points within data area of parser_ctx to next byte of data */
12e364b9
KC
128 size_t bytes_remaining;
129};
130
131#define PUTFILE_REQUEST_SIG 0x0906101302281211
ec17f452
DB
132/*
133 * This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
134 * conversation. Structs of this type are dynamically linked into
12e364b9
KC
135 * <Putfile_request_list>.
136 */
137struct putfile_request {
138 u64 sig; /* PUTFILE_REQUEST_SIG */
139
140 /* header from original TransmitFile request */
98d7b594 141 struct controlvm_message_header controlvm_header;
12e364b9
KC
142
143 /* link to next struct putfile_request */
144 struct list_head next_putfile_request;
145
ec17f452
DB
146 /*
147 * head of putfile_buffer_entry list, which describes the data to be
12e364b9
KC
148 * supplied as putfile data;
149 * - this list is added to when controlvm messages come in that supply
150 * file data
151 * - this list is removed from via the hotplug program that is actually
2ee0d052
EA
152 * consuming these buffers to write as file data
153 */
12e364b9
KC
154 struct list_head input_buffer_list;
155 spinlock_t req_list_lock; /* lock for input_buffer_list */
156
157 /* waiters for input_buffer_list to go non-empty */
158 wait_queue_head_t input_buffer_wq;
159
160 /* data not yet read within current putfile_buffer_entry */
161 struct putfile_active_buffer active_buf;
162
ec17f452
DB
163 /*
164 * <0 = failed, 0 = in-progress, >0 = successful;
165 * note that this must be set with req_list_lock, and if you set <0,
166 * it is your responsibility to also free up all of the other objects
167 * in this struct (like input_buffer_list, active_buf.parser_ctx)
168 * before releasing the lock
169 */
12e364b9
KC
170 int completion_status;
171};
172
12e364b9
KC
173struct parahotplug_request {
174 struct list_head list;
175 int id;
176 unsigned long expiration;
3ab47701 177 struct controlvm_message msg;
12e364b9
KC
178};
179
ddf5de53
BR
180static LIST_HEAD(parahotplug_request_list);
181static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9 182
12e364b9 183/* info for /dev/visorchipset */
ec17f452 184static dev_t major_dev = -1; /*< indicates major num for device */
12e364b9 185
19f6634f
BR
186/* prototypes for attributes */
187static ssize_t toolaction_show(struct device *dev,
84efd207
DK
188 struct device_attribute *attr,
189 char *buf)
190{
191 u8 tool_action = 0;
192
193 visorchannel_read(controlvm_channel,
194 offsetof(struct spar_controlvm_channel_protocol,
195 tool_action), &tool_action, sizeof(u8));
196 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
197}
198
19f6634f 199static ssize_t toolaction_store(struct device *dev,
8e76e695 200 struct device_attribute *attr,
84efd207
DK
201 const char *buf, size_t count)
202{
203 u8 tool_action;
204 int ret;
205
206 if (kstrtou8(buf, 10, &tool_action))
207 return -EINVAL;
208
209 ret = visorchannel_write
210 (controlvm_channel,
211 offsetof(struct spar_controlvm_channel_protocol,
212 tool_action),
213 &tool_action, sizeof(u8));
214
215 if (ret)
216 return ret;
217 return count;
218}
19f6634f
BR
219static DEVICE_ATTR_RW(toolaction);
220
54b31229 221static ssize_t boottotool_show(struct device *dev,
1b1d463d
DK
222 struct device_attribute *attr,
223 char *buf)
224{
225 struct efi_spar_indication efi_spar_indication;
226
227 visorchannel_read(controlvm_channel,
228 offsetof(struct spar_controlvm_channel_protocol,
229 efi_spar_ind), &efi_spar_indication,
230 sizeof(struct efi_spar_indication));
231 return scnprintf(buf, PAGE_SIZE, "%u\n",
232 efi_spar_indication.boot_to_tool);
233}
234
54b31229 235static ssize_t boottotool_store(struct device *dev,
1b1d463d
DK
236 struct device_attribute *attr,
237 const char *buf, size_t count)
238{
239 int val, ret;
240 struct efi_spar_indication efi_spar_indication;
241
242 if (kstrtoint(buf, 10, &val))
243 return -EINVAL;
244
245 efi_spar_indication.boot_to_tool = val;
246 ret = visorchannel_write
247 (controlvm_channel,
248 offsetof(struct spar_controlvm_channel_protocol,
249 efi_spar_ind), &(efi_spar_indication),
250 sizeof(struct efi_spar_indication));
251
252 if (ret)
253 return ret;
254 return count;
255}
54b31229
BR
256static DEVICE_ATTR_RW(boottotool);
257
422af17c 258static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8a4a8a03
DK
259 char *buf)
260{
261 u32 error = 0;
262
263 visorchannel_read(controlvm_channel,
264 offsetof(struct spar_controlvm_channel_protocol,
265 installation_error),
266 &error, sizeof(u32));
267 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
268}
269
422af17c 270static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8a4a8a03
DK
271 const char *buf, size_t count)
272{
273 u32 error;
274 int ret;
275
276 if (kstrtou32(buf, 10, &error))
277 return -EINVAL;
278
279 ret = visorchannel_write
280 (controlvm_channel,
281 offsetof(struct spar_controlvm_channel_protocol,
282 installation_error),
283 &error, sizeof(u32));
284 if (ret)
285 return ret;
286 return count;
287}
422af17c
BR
288static DEVICE_ATTR_RW(error);
289
290static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
79730c7c
DK
291 char *buf)
292{
293 u32 text_id = 0;
294
295 visorchannel_read
296 (controlvm_channel,
297 offsetof(struct spar_controlvm_channel_protocol,
298 installation_text_id),
299 &text_id, sizeof(u32));
300 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
301}
302
422af17c 303static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
79730c7c
DK
304 const char *buf, size_t count)
305{
306 u32 text_id;
307 int ret;
308
309 if (kstrtou32(buf, 10, &text_id))
310 return -EINVAL;
311
312 ret = visorchannel_write
313 (controlvm_channel,
314 offsetof(struct spar_controlvm_channel_protocol,
315 installation_text_id),
316 &text_id, sizeof(u32));
317 if (ret)
318 return ret;
319 return count;
320}
422af17c
BR
321static DEVICE_ATTR_RW(textid);
322
323static ssize_t remaining_steps_show(struct device *dev,
8e76e695 324 struct device_attribute *attr, char *buf);
422af17c 325static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
326 struct device_attribute *attr,
327 const char *buf, size_t count);
422af17c
BR
328static DEVICE_ATTR_RW(remaining_steps);
329
e56fa7cd 330static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
331 struct device_attribute *attr,
332 const char *buf, size_t count);
e56fa7cd
BR
333static DEVICE_ATTR_WO(devicedisabled);
334
335static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
336 struct device_attribute *attr,
337 const char *buf, size_t count);
e56fa7cd
BR
338static DEVICE_ATTR_WO(deviceenabled);
339
19f6634f
BR
340static struct attribute *visorchipset_install_attrs[] = {
341 &dev_attr_toolaction.attr,
54b31229 342 &dev_attr_boottotool.attr,
422af17c
BR
343 &dev_attr_error.attr,
344 &dev_attr_textid.attr,
345 &dev_attr_remaining_steps.attr,
19f6634f
BR
346 NULL
347};
348
349static struct attribute_group visorchipset_install_group = {
350 .name = "install",
351 .attrs = visorchipset_install_attrs
352};
353
e56fa7cd
BR
354static struct attribute *visorchipset_parahotplug_attrs[] = {
355 &dev_attr_devicedisabled.attr,
356 &dev_attr_deviceenabled.attr,
357 NULL
358};
359
360static struct attribute_group visorchipset_parahotplug_group = {
361 .name = "parahotplug",
362 .attrs = visorchipset_parahotplug_attrs
363};
364
19f6634f
BR
365static const struct attribute_group *visorchipset_dev_groups[] = {
366 &visorchipset_install_group,
e56fa7cd 367 &visorchipset_parahotplug_group,
19f6634f
BR
368 NULL
369};
370
04dacacc
DZ
371static void visorchipset_dev_release(struct device *dev)
372{
373}
374
12e364b9 375/* /sys/devices/platform/visorchipset */
eb34e877 376static struct platform_device visorchipset_platform_device = {
12e364b9
KC
377 .name = "visorchipset",
378 .id = -1,
19f6634f 379 .dev.groups = visorchipset_dev_groups,
04dacacc 380 .dev.release = visorchipset_dev_release,
12e364b9
KC
381};
382
464129ed 383static uuid_le
46168810
EA
384parser_id_get(struct parser_context *ctx)
385{
386 struct spar_controlvm_parameters_header *phdr = NULL;
387
e4a3dd33 388 if (!ctx)
46168810
EA
389 return NULL_UUID_LE;
390 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
391 return phdr->id;
392}
393
ec17f452
DB
394/*
395 * Describes the state from the perspective of which controlvm messages have
396 * been received for a bus or device.
2ee0deec
PB
397 */
398
399enum PARSER_WHICH_STRING {
400 PARSERSTRING_INITIATOR,
401 PARSERSTRING_TARGET,
402 PARSERSTRING_CONNECTION,
403 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
404};
405
464129ed 406static void
2ee0deec
PB
407parser_param_start(struct parser_context *ctx,
408 enum PARSER_WHICH_STRING which_string)
46168810
EA
409{
410 struct spar_controlvm_parameters_header *phdr = NULL;
411
e4a3dd33 412 if (!ctx)
b4d4dfbc
BR
413 return;
414
46168810
EA
415 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
416 switch (which_string) {
417 case PARSERSTRING_INITIATOR:
418 ctx->curr = ctx->data + phdr->initiator_offset;
419 ctx->bytes_remaining = phdr->initiator_length;
420 break;
421 case PARSERSTRING_TARGET:
422 ctx->curr = ctx->data + phdr->target_offset;
423 ctx->bytes_remaining = phdr->target_length;
424 break;
425 case PARSERSTRING_CONNECTION:
426 ctx->curr = ctx->data + phdr->connection_offset;
427 ctx->bytes_remaining = phdr->connection_length;
428 break;
429 case PARSERSTRING_NAME:
430 ctx->curr = ctx->data + phdr->name_offset;
431 ctx->bytes_remaining = phdr->name_length;
432 break;
433 default:
434 break;
435 }
46168810
EA
436}
437
464129ed 438static void parser_done(struct parser_context *ctx)
46168810
EA
439{
440 if (!ctx)
441 return;
442 controlvm_payload_bytes_buffered -= ctx->param_bytes;
443 kfree(ctx);
444}
445
464129ed 446static void *
46168810
EA
447parser_string_get(struct parser_context *ctx)
448{
449 u8 *pscan;
450 unsigned long nscan;
451 int value_length = -1;
452 void *value = NULL;
453 int i;
454
455 if (!ctx)
456 return NULL;
457 pscan = ctx->curr;
458 nscan = ctx->bytes_remaining;
459 if (nscan == 0)
460 return NULL;
461 if (!pscan)
462 return NULL;
463 for (i = 0, value_length = -1; i < nscan; i++)
464 if (pscan[i] == '\0') {
465 value_length = i;
466 break;
467 }
468 if (value_length < 0) /* '\0' was not included in the length */
469 value_length = nscan;
8c395e74 470 value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
e4a3dd33 471 if (!value)
46168810
EA
472 return NULL;
473 if (value_length > 0)
474 memcpy(value, pscan, value_length);
0e7bf2f4 475 ((u8 *)(value))[value_length] = '\0';
46168810
EA
476 return value;
477}
478
422af17c 479static ssize_t remaining_steps_show(struct device *dev,
8e76e695 480 struct device_attribute *attr, char *buf)
422af17c 481{
3a56d700 482 u16 remaining_steps = 0;
422af17c 483
c3d9a224 484 visorchannel_read(controlvm_channel,
8e76e695
BR
485 offsetof(struct spar_controlvm_channel_protocol,
486 installation_remaining_steps),
487 &remaining_steps, sizeof(u16));
ee8da290 488 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
489}
490
491static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
492 struct device_attribute *attr,
493 const char *buf, size_t count)
422af17c 494{
ee8da290 495 u16 remaining_steps;
66e24b76 496 int ret;
422af17c 497
ebec8967 498 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
499 return -EINVAL;
500
a07d7c38
TS
501 ret = visorchannel_write
502 (controlvm_channel,
503 offsetof(struct spar_controlvm_channel_protocol,
504 installation_remaining_steps),
505 &remaining_steps, sizeof(u16));
66e24b76
BR
506 if (ret)
507 return ret;
e22a4a0f 508 return count;
422af17c
BR
509}
510
ab0592b9
DZ
511struct visor_busdev {
512 u32 bus_no;
513 u32 dev_no;
514};
515
516static int match_visorbus_dev_by_id(struct device *dev, void *data)
517{
518 struct visor_device *vdev = to_visor_device(dev);
7f44582e 519 struct visor_busdev *id = data;
ab0592b9
DZ
520 u32 bus_no = id->bus_no;
521 u32 dev_no = id->dev_no;
522
65bd6e46
DZ
523 if ((vdev->chipset_bus_no == bus_no) &&
524 (vdev->chipset_dev_no == dev_no))
ab0592b9
DZ
525 return 1;
526
527 return 0;
528}
d1e08637 529
ab0592b9
DZ
530struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
531 struct visor_device *from)
532{
533 struct device *dev;
534 struct device *dev_start = NULL;
535 struct visor_device *vdev = NULL;
536 struct visor_busdev id = {
537 .bus_no = bus_no,
538 .dev_no = dev_no
539 };
540
541 if (from)
542 dev_start = &from->device;
543 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
544 match_visorbus_dev_by_id);
545 if (dev)
546 vdev = to_visor_device(dev);
547 return vdev;
548}
ab0592b9 549
5f251395
DK
550static void
551controlvm_init_response(struct controlvm_message *msg,
552 struct controlvm_message_header *msg_hdr, int response)
553{
554 memset(msg, 0, sizeof(struct controlvm_message));
555 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
556 msg->hdr.payload_bytes = 0;
557 msg->hdr.payload_vm_offset = 0;
558 msg->hdr.payload_max_bytes = 0;
559 if (response < 0) {
560 msg->hdr.flags.failed = 1;
561 msg->hdr.completion_status = (u32)(-response);
562 }
563}
564
565static void
566controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
567 int response,
568 enum ultra_chipset_feature features)
569{
570 struct controlvm_message outmsg;
571
572 controlvm_init_response(&outmsg, msg_hdr, response);
573 outmsg.cmd.init_chipset.features = features;
574 if (!visorchannel_signalinsert(controlvm_channel,
575 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
576 return;
577 }
578}
579
12e364b9 580static void
3ab47701 581chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
582{
583 static int chipset_inited;
b9b141e8 584 enum ultra_chipset_feature features = 0;
12e364b9
KC
585 int rc = CONTROLVM_RESP_SUCCESS;
586
587 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
588 if (chipset_inited) {
22ad57ba 589 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
5233d1eb 590 goto out_respond;
12e364b9
KC
591 }
592 chipset_inited = 1;
593 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
594
ec17f452
DB
595 /*
596 * Set features to indicate we support parahotplug (if Command
2ee0d052
EA
597 * also supports it).
598 */
12e364b9 599 features =
2ea5117b 600 inmsg->cmd.init_chipset.
12e364b9
KC
601 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
602
ec17f452
DB
603 /*
604 * Set the "reply" bit so Command knows this is a
2ee0d052
EA
605 * features-aware driver.
606 */
12e364b9
KC
607 features |= ULTRA_CHIPSET_FEATURE_REPLY;
608
5233d1eb 609out_respond:
98d7b594 610 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
611 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
612}
613
12e364b9 614static void
b3168c70 615controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 616{
3ab47701 617 struct controlvm_message outmsg;
26eb2c0c 618
b3168c70 619 controlvm_init_response(&outmsg, msg_hdr, response);
2098dbd1 620 if (outmsg.hdr.flags.test_message == 1)
12e364b9 621 return;
2098dbd1 622
c3d9a224 623 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 624 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
625 return;
626 }
627}
628
98d7b594 629static void controlvm_respond_physdev_changestate(
b3168c70 630 struct controlvm_message_header *msg_hdr, int response,
98d7b594 631 struct spar_segment_state state)
12e364b9 632{
3ab47701 633 struct controlvm_message outmsg;
26eb2c0c 634
b3168c70 635 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
636 outmsg.cmd.device_change_state.state = state;
637 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 638 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 639 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
640 return;
641 }
642}
643
2ee0deec
PB
644enum crash_obj_type {
645 CRASH_DEV,
646 CRASH_BUS,
647};
648
12c957dc
TS
649static void
650save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
651{
652 u32 local_crash_msg_offset;
653 u16 local_crash_msg_count;
654
655 if (visorchannel_read(controlvm_channel,
656 offsetof(struct spar_controlvm_channel_protocol,
657 saved_crash_message_count),
658 &local_crash_msg_count, sizeof(u16)) < 0) {
659 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
660 POSTCODE_SEVERITY_ERR);
661 return;
662 }
663
664 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
665 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
666 local_crash_msg_count,
667 POSTCODE_SEVERITY_ERR);
668 return;
669 }
670
671 if (visorchannel_read(controlvm_channel,
672 offsetof(struct spar_controlvm_channel_protocol,
673 saved_crash_message_offset),
674 &local_crash_msg_offset, sizeof(u32)) < 0) {
675 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
676 POSTCODE_SEVERITY_ERR);
677 return;
678 }
679
680 if (typ == CRASH_BUS) {
681 if (visorchannel_write(controlvm_channel,
682 local_crash_msg_offset,
683 msg,
684 sizeof(struct controlvm_message)) < 0) {
685 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
686 POSTCODE_SEVERITY_ERR);
687 return;
688 }
689 } else {
690 local_crash_msg_offset += sizeof(struct controlvm_message);
691 if (visorchannel_write(controlvm_channel,
692 local_crash_msg_offset,
693 msg,
694 sizeof(struct controlvm_message)) < 0) {
695 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
696 POSTCODE_SEVERITY_ERR);
697 return;
698 }
699 }
700}
701
12e364b9 702static void
0274b5ae
DZ
703bus_responder(enum controlvm_id cmd_id,
704 struct controlvm_message_header *pending_msg_hdr,
3032aedd 705 int response)
12e364b9 706{
e4a3dd33 707 if (!pending_msg_hdr)
0274b5ae 708 return; /* no controlvm response needed */
12e364b9 709
0274b5ae 710 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 711 return;
0aca7844 712
0274b5ae 713 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
714}
715
716static void
fbb31f48 717device_changestate_responder(enum controlvm_id cmd_id,
a298bc0b 718 struct visor_device *p, int response,
fbb31f48 719 struct spar_segment_state response_state)
12e364b9 720{
3ab47701 721 struct controlvm_message outmsg;
a298bc0b
DZ
722 u32 bus_no = p->chipset_bus_no;
723 u32 dev_no = p->chipset_dev_no;
12e364b9 724
e4a3dd33 725 if (!p->pending_msg_hdr)
12e364b9 726 return; /* no controlvm response needed */
0274b5ae 727 if (p->pending_msg_hdr->id != cmd_id)
12e364b9 728 return;
12e364b9 729
0274b5ae 730 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
12e364b9 731
fbb31f48
BR
732 outmsg.cmd.device_change_state.bus_no = bus_no;
733 outmsg.cmd.device_change_state.dev_no = dev_no;
734 outmsg.cmd.device_change_state.state = response_state;
12e364b9 735
c3d9a224 736 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 737 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 738 return;
12e364b9
KC
739}
740
741static void
0274b5ae
DZ
742device_responder(enum controlvm_id cmd_id,
743 struct controlvm_message_header *pending_msg_hdr,
b4b598fd 744 int response)
12e364b9 745{
e4a3dd33 746 if (!pending_msg_hdr)
12e364b9 747 return; /* no controlvm response needed */
0aca7844 748
0274b5ae 749 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 750 return;
0aca7844 751
0274b5ae 752 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
753}
754
755static void
d32517e3 756bus_epilog(struct visor_device *bus_info,
2836c6a8 757 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 758 int response, bool need_response)
12e364b9 759{
0274b5ae 760 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 761
0274b5ae 762 if (!bus_info) {
ec17f452
DB
763 /*
764 * relying on a valid passed in response code
765 * be lazy and re-use msg_hdr for this failure, is this ok??
766 */
0274b5ae 767 pmsg_hdr = msg_hdr;
87241ab8 768 goto out_respond;
0274b5ae
DZ
769 }
770
771 if (bus_info->pending_msg_hdr) {
772 /* only non-NULL if dev is still waiting on a response */
773 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
774 pmsg_hdr = bus_info->pending_msg_hdr;
87241ab8 775 goto out_respond;
0274b5ae 776 }
0aca7844 777
2836c6a8 778 if (need_response) {
0274b5ae
DZ
779 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
780 if (!pmsg_hdr) {
368acb3f
DK
781 POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
782 bus_info->chipset_bus_no,
783 POSTCODE_SEVERITY_ERR);
87241ab8 784 return;
0274b5ae
DZ
785 }
786
787 memcpy(pmsg_hdr, msg_hdr,
98d7b594 788 sizeof(struct controlvm_message_header));
0274b5ae 789 bus_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 790 }
12e364b9 791
12e364b9
KC
792 if (response == CONTROLVM_RESP_SUCCESS) {
793 switch (cmd) {
794 case CONTROLVM_BUS_CREATE:
87241ab8 795 chipset_bus_create(bus_info);
12e364b9
KC
796 break;
797 case CONTROLVM_BUS_DESTROY:
87241ab8 798 chipset_bus_destroy(bus_info);
12e364b9
KC
799 break;
800 }
801 }
368acb3f 802
87241ab8 803out_respond:
4a185e54 804 bus_responder(cmd, pmsg_hdr, response);
12e364b9
KC
805}
806
807static void
a298bc0b 808device_epilog(struct visor_device *dev_info,
b4b598fd 809 struct spar_segment_state state, u32 cmd,
2836c6a8 810 struct controlvm_message_header *msg_hdr, int response,
f4c11551 811 bool need_response, bool for_visorbus)
12e364b9 812{
0274b5ae 813 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 814
0274b5ae 815 if (!dev_info) {
ec17f452
DB
816 /*
817 * relying on a valid passed in response code
818 * be lazy and re-use msg_hdr for this failure, is this ok??
819 */
0274b5ae 820 pmsg_hdr = msg_hdr;
87241ab8 821 goto out_respond;
0274b5ae
DZ
822 }
823
824 if (dev_info->pending_msg_hdr) {
825 /* only non-NULL if dev is still waiting on a response */
826 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
827 pmsg_hdr = dev_info->pending_msg_hdr;
87241ab8 828 goto out_respond;
0274b5ae
DZ
829 }
830
2836c6a8 831 if (need_response) {
0274b5ae
DZ
832 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
833 if (!pmsg_hdr) {
834 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
87241ab8 835 goto out_respond;
0274b5ae
DZ
836 }
837
838 memcpy(pmsg_hdr, msg_hdr,
98d7b594 839 sizeof(struct controlvm_message_header));
0274b5ae 840 dev_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 841 }
12e364b9 842
12e364b9
KC
843 if (response >= 0) {
844 switch (cmd) {
845 case CONTROLVM_DEVICE_CREATE:
87241ab8 846 chipset_device_create(dev_info);
12e364b9
KC
847 break;
848 case CONTROLVM_DEVICE_CHANGESTATE:
849 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
850 if (state.alive == segment_state_running.alive &&
851 state.operating ==
852 segment_state_running.operating) {
87241ab8 853 chipset_device_resume(dev_info);
12e364b9
KC
854 }
855 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 856 else if (state.alive == segment_state_standby.alive &&
3f833b54 857 state.operating ==
bd0d2dcc 858 segment_state_standby.operating) {
ec17f452
DB
859 /*
860 * technically this is standby case
12e364b9
KC
861 * where server is lost
862 */
87241ab8 863 chipset_device_pause(dev_info);
12e364b9
KC
864 }
865 break;
866 case CONTROLVM_DEVICE_DESTROY:
87241ab8 867 chipset_device_destroy(dev_info);
12e364b9
KC
868 break;
869 }
870 }
3cb3fa3b 871
87241ab8 872out_respond:
3cb3fa3b 873 device_responder(cmd, pmsg_hdr, response);
12e364b9
KC
874}
875
876static void
3ab47701 877bus_create(struct controlvm_message *inmsg)
12e364b9 878{
2ea5117b 879 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 880 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 881 int rc = CONTROLVM_RESP_SUCCESS;
d32517e3 882 struct visor_device *bus_info;
b32c4997 883 struct visorchannel *visorchannel;
12e364b9 884
d32517e3 885 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
6c5fed35
BR
886 if (bus_info && (bus_info->state.created == 1)) {
887 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 888 POSTCODE_SEVERITY_ERR);
22ad57ba 889 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
9fd04060 890 goto out_bus_epilog;
12e364b9 891 }
6c5fed35
BR
892 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
893 if (!bus_info) {
894 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 895 POSTCODE_SEVERITY_ERR);
22ad57ba 896 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
9fd04060 897 goto out_bus_epilog;
12e364b9
KC
898 }
899
4abce83d 900 INIT_LIST_HEAD(&bus_info->list_all);
d32517e3
DZ
901 bus_info->chipset_bus_no = bus_no;
902 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
12e364b9 903
6c5fed35 904 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 905
b32c4997
DZ
906 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
907 cmd->create_bus.channel_bytes,
908 GFP_KERNEL,
909 cmd->create_bus.bus_data_type_uuid);
910
911 if (!visorchannel) {
912 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
913 POSTCODE_SEVERITY_ERR);
914 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
915 kfree(bus_info);
916 bus_info = NULL;
9fd04060 917 goto out_bus_epilog;
b32c4997
DZ
918 }
919 bus_info->visorchannel = visorchannel;
820b11b6 920 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
12c957dc 921 save_crash_message(inmsg, CRASH_BUS);
12e364b9 922
6c5fed35 923 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 924
9fd04060 925out_bus_epilog:
3032aedd 926 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 927 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
928}
929
930static void
3ab47701 931bus_destroy(struct controlvm_message *inmsg)
12e364b9 932{
2ea5117b 933 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 934 u32 bus_no = cmd->destroy_bus.bus_no;
d32517e3 935 struct visor_device *bus_info;
12e364b9
KC
936 int rc = CONTROLVM_RESP_SUCCESS;
937
d32517e3 938 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
dff54cd6 939 if (!bus_info)
22ad57ba 940 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 941 else if (bus_info->state.created == 0)
22ad57ba 942 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 943
3032aedd 944 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 945 rc, inmsg->hdr.flags.response_expected == 1);
d32517e3
DZ
946
947 /* bus_info is freed as part of the busdevice_release function */
12e364b9
KC
948}
949
950static void
317d9614
BR
951bus_configure(struct controlvm_message *inmsg,
952 struct parser_context *parser_ctx)
12e364b9 953{
2ea5117b 954 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e 955 u32 bus_no;
d32517e3 956 struct visor_device *bus_info;
12e364b9 957 int rc = CONTROLVM_RESP_SUCCESS;
12e364b9 958
654bada0
BR
959 bus_no = cmd->configure_bus.bus_no;
960 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
961 POSTCODE_SEVERITY_INFO);
12e364b9 962
d32517e3 963 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
654bada0
BR
964 if (!bus_info) {
965 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 966 POSTCODE_SEVERITY_ERR);
22ad57ba 967 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
968 } else if (bus_info->state.created == 0) {
969 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 970 POSTCODE_SEVERITY_ERR);
22ad57ba 971 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
e4a3dd33 972 } else if (bus_info->pending_msg_hdr) {
654bada0 973 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 974 POSTCODE_SEVERITY_ERR);
22ad57ba 975 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0 976 } else {
a07d7c38
TS
977 visorchannel_set_clientpartition
978 (bus_info->visorchannel,
979 cmd->configure_bus.guest_handle);
654bada0
BR
980 bus_info->partition_uuid = parser_id_get(parser_ctx);
981 parser_param_start(parser_ctx, PARSERSTRING_NAME);
982 bus_info->name = parser_string_get(parser_ctx);
983
654bada0
BR
984 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
985 POSTCODE_SEVERITY_INFO);
12e364b9 986 }
3032aedd 987 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 988 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
989}
990
991static void
3ab47701 992my_device_create(struct controlvm_message *inmsg)
12e364b9 993{
2ea5117b 994 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
995 u32 bus_no = cmd->create_device.bus_no;
996 u32 dev_no = cmd->create_device.dev_no;
a298bc0b 997 struct visor_device *dev_info = NULL;
d32517e3 998 struct visor_device *bus_info;
b32c4997 999 struct visorchannel *visorchannel;
12e364b9
KC
1000 int rc = CONTROLVM_RESP_SUCCESS;
1001
a298bc0b
DZ
1002 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1003 if (!bus_info) {
c60c8e26 1004 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1005 POSTCODE_SEVERITY_ERR);
a298bc0b 1006 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c6af7a9c 1007 goto out_respond;
12e364b9 1008 }
a298bc0b
DZ
1009
1010 if (bus_info->state.created == 0) {
c60c8e26 1011 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1012 POSTCODE_SEVERITY_ERR);
22ad57ba 1013 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c6af7a9c 1014 goto out_respond;
12e364b9 1015 }
a298bc0b
DZ
1016
1017 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1018 if (dev_info && (dev_info->state.created == 1)) {
c60c8e26 1019 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1020 POSTCODE_SEVERITY_ERR);
a298bc0b 1021 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c6af7a9c 1022 goto out_respond;
12e364b9 1023 }
a298bc0b 1024
c60c8e26
BR
1025 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1026 if (!dev_info) {
1027 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1028 POSTCODE_SEVERITY_ERR);
22ad57ba 1029 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c6af7a9c 1030 goto out_respond;
12e364b9 1031 }
97a84f12 1032
a298bc0b
DZ
1033 dev_info->chipset_bus_no = bus_no;
1034 dev_info->chipset_dev_no = dev_no;
1035 dev_info->inst = cmd->create_device.dev_inst_uuid;
1036
1037 /* not sure where the best place to set the 'parent' */
1038 dev_info->device.parent = &bus_info->device;
1039
c60c8e26 1040 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1041 POSTCODE_SEVERITY_INFO);
1042
a3ef1a8e
DK
1043 visorchannel =
1044 visorchannel_create_with_lock(cmd->create_device.channel_addr,
1045 cmd->create_device.channel_bytes,
1046 GFP_KERNEL,
1047 cmd->create_device.data_type_uuid);
b32c4997
DZ
1048
1049 if (!visorchannel) {
1050 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1051 POSTCODE_SEVERITY_ERR);
1052 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1053 kfree(dev_info);
1054 dev_info = NULL;
c6af7a9c 1055 goto out_respond;
b32c4997
DZ
1056 }
1057 dev_info->visorchannel = visorchannel;
1058 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
12c957dc
TS
1059 if (uuid_le_cmp(cmd->create_device.data_type_uuid,
1060 spar_vhba_channel_protocol_uuid) == 0)
1061 save_crash_message(inmsg, CRASH_DEV);
1062
c60c8e26 1063 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1064 POSTCODE_SEVERITY_INFO);
c6af7a9c 1065out_respond:
b4b598fd 1066 device_epilog(dev_info, segment_state_running,
12e364b9 1067 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
4da3336c 1068 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1069}
1070
1071static void
3ab47701 1072my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1073{
2ea5117b 1074 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1075 u32 bus_no = cmd->device_change_state.bus_no;
1076 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1077 struct spar_segment_state state = cmd->device_change_state.state;
a298bc0b 1078 struct visor_device *dev_info;
12e364b9
KC
1079 int rc = CONTROLVM_RESP_SUCCESS;
1080
a298bc0b 1081 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
0278a905
BR
1082 if (!dev_info) {
1083 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1084 POSTCODE_SEVERITY_ERR);
22ad57ba 1085 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1086 } else if (dev_info->state.created == 0) {
1087 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1088 POSTCODE_SEVERITY_ERR);
22ad57ba 1089 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1090 }
0278a905 1091 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1092 device_epilog(dev_info, state,
0278a905 1093 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
4da3336c 1094 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1095}
1096
1097static void
3ab47701 1098my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1099{
2ea5117b 1100 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1101 u32 bus_no = cmd->destroy_device.bus_no;
1102 u32 dev_no = cmd->destroy_device.dev_no;
a298bc0b 1103 struct visor_device *dev_info;
12e364b9
KC
1104 int rc = CONTROLVM_RESP_SUCCESS;
1105
a298bc0b 1106 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
61715c8b 1107 if (!dev_info)
22ad57ba 1108 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1109 else if (dev_info->state.created == 0)
22ad57ba 1110 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1111
61715c8b 1112 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1113 device_epilog(dev_info, segment_state_running,
12e364b9 1114 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
4da3336c 1115 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1116}
1117
ec17f452
DB
1118/**
1119 * initialize_controlvm_payload_info() - init controlvm_payload_info struct
1120 * @phys_addr: the physical address of controlvm channel
1121 * @offset: the offset to payload
1122 * @bytes: the size of the payload in bytes
1123 * @info: the returning valid struct
1124 *
1125 * When provided with the physical address of the controlvm channel
12e364b9
KC
1126 * (phys_addr), the offset to the payload area we need to manage
1127 * (offset), and the size of this payload area (bytes), fills in the
ec17f452
DB
1128 * controlvm_payload_info struct.
1129 *
1130 * Return: CONTROLVM_RESP_SUCCESS for success or a negative for failure
12e364b9
KC
1131 */
1132static int
d5b3f1dc 1133initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
c1f834eb 1134 struct visor_controlvm_payload_info *info)
12e364b9 1135{
3103dc03 1136 u8 *payload = NULL;
12e364b9 1137
dde29996
DK
1138 if (!info)
1139 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1140
c1f834eb 1141 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
dde29996
DK
1142 if ((offset == 0) || (bytes == 0))
1143 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1144
3103dc03 1145 payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
dde29996
DK
1146 if (!payload)
1147 return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
12e364b9
KC
1148
1149 info->offset = offset;
1150 info->bytes = bytes;
1151 info->ptr = payload;
12e364b9 1152
dde29996 1153 return CONTROLVM_RESP_SUCCESS;
12e364b9
KC
1154}
1155
1156static void
c1f834eb 1157destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1158{
597c338f 1159 if (info->ptr) {
3103dc03 1160 memunmap(info->ptr);
12e364b9
KC
1161 info->ptr = NULL;
1162 }
c1f834eb 1163 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1164}
1165
1166static void
1167initialize_controlvm_payload(void)
1168{
d5b3f1dc 1169 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1170 u64 payload_offset = 0;
1171 u32 payload_bytes = 0;
26eb2c0c 1172
c3d9a224 1173 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1174 offsetof(struct spar_controlvm_channel_protocol,
1175 request_payload_offset),
cafefc0c 1176 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1177 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1178 POSTCODE_SEVERITY_ERR);
1179 return;
1180 }
c3d9a224 1181 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1182 offsetof(struct spar_controlvm_channel_protocol,
1183 request_payload_bytes),
cafefc0c 1184 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1185 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1186 POSTCODE_SEVERITY_ERR);
1187 return;
1188 }
1189 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1190 payload_offset, payload_bytes,
84982fbf 1191 &controlvm_payload_info);
12e364b9
KC
1192}
1193
ec17f452
DB
1194/**
1195 * visorchipset_chipset_ready() - sends chipset_ready action
1196 *
1197 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1198 *
1199 * Return: CONTROLVM_RESP_SUCCESS
12e364b9 1200 */
d3368a58 1201static int
12e364b9
KC
1202visorchipset_chipset_ready(void)
1203{
eb34e877 1204 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1205 return CONTROLVM_RESP_SUCCESS;
1206}
12e364b9 1207
d3368a58 1208static int
12e364b9
KC
1209visorchipset_chipset_selftest(void)
1210{
1211 char env_selftest[20];
1212 char *envp[] = { env_selftest, NULL };
26eb2c0c 1213
12e364b9 1214 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1215 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1216 envp);
1217 return CONTROLVM_RESP_SUCCESS;
1218}
12e364b9 1219
ec17f452
DB
1220/**
1221 * visorchipset_chipset_notready() - sends chipset_notready action
1222 *
1223 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1224 *
1225 * Return: CONTROLVM_RESP_SUCCESS
12e364b9 1226 */
d3368a58 1227static int
12e364b9
KC
1228visorchipset_chipset_notready(void)
1229{
eb34e877 1230 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1231 return CONTROLVM_RESP_SUCCESS;
1232}
12e364b9
KC
1233
1234static void
77a0449d 1235chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1236{
1237 int rc = visorchipset_chipset_ready();
26eb2c0c 1238
12e364b9
KC
1239 if (rc != CONTROLVM_RESP_SUCCESS)
1240 rc = -rc;
260d8992 1241 if (msg_hdr->flags.response_expected)
77a0449d 1242 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1243}
1244
1245static void
77a0449d 1246chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1247{
1248 int rc = visorchipset_chipset_selftest();
26eb2c0c 1249
12e364b9
KC
1250 if (rc != CONTROLVM_RESP_SUCCESS)
1251 rc = -rc;
77a0449d
BR
1252 if (msg_hdr->flags.response_expected)
1253 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1254}
1255
1256static void
77a0449d 1257chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1258{
1259 int rc = visorchipset_chipset_notready();
26eb2c0c 1260
12e364b9
KC
1261 if (rc != CONTROLVM_RESP_SUCCESS)
1262 rc = -rc;
77a0449d
BR
1263 if (msg_hdr->flags.response_expected)
1264 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1265}
1266
12e364b9 1267/*
ec17f452 1268 * The general parahotplug flow works as follows. The visorchipset
12e364b9 1269 * driver receives a DEVICE_CHANGESTATE message from Command
ec17f452 1270 * specifying a physical device to enable or disable. The CONTROLVM
12e364b9
KC
1271 * message handler calls parahotplug_process_message, which then adds
1272 * the message to a global list and kicks off a udev event which
1273 * causes a user level script to enable or disable the specified
ec17f452 1274 * device. The udev script then writes to
12e364b9
KC
1275 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1276 * to get called, at which point the appropriate CONTROLVM message is
1277 * retrieved from the list and responded to.
1278 */
1279
1280#define PARAHOTPLUG_TIMEOUT_MS 2000
1281
ec17f452
DB
1282/**
1283 * parahotplug_next_id() - generate unique int to match an outstanding CONTROLVM
1284 * message with a udev script /proc response
1285 *
1286 * Return: a unique integer value
12e364b9
KC
1287 */
1288static int
1289parahotplug_next_id(void)
1290{
1291 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1292
12e364b9
KC
1293 return atomic_inc_return(&id);
1294}
1295
ec17f452
DB
1296/**
1297 * parahotplug_next_expiration() - returns the time (in jiffies) when a
1298 * CONTROLVM message on the list should expire
1299 * -- PARAHOTPLUG_TIMEOUT_MS in the future
1300 *
1301 * Return: expected expiration time (in jiffies)
12e364b9
KC
1302 */
1303static unsigned long
1304parahotplug_next_expiration(void)
1305{
2cc1a1b3 1306 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1307}
1308
ec17f452
DB
1309/**
1310 * parahotplug_request_create() - create a parahotplug_request, which is
1311 * basically a wrapper for a CONTROLVM_MESSAGE
1312 * that we can stick on a list
1313 * @msg: the message to insert in the request
1314 *
1315 * Return: the request containing the provided message
12e364b9
KC
1316 */
1317static struct parahotplug_request *
3ab47701 1318parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1319{
ea0dcfcf
QL
1320 struct parahotplug_request *req;
1321
6a55e3c3 1322 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1323 if (!req)
12e364b9
KC
1324 return NULL;
1325
1326 req->id = parahotplug_next_id();
1327 req->expiration = parahotplug_next_expiration();
1328 req->msg = *msg;
1329
1330 return req;
1331}
1332
ec17f452
DB
1333/**
1334 * parahotplug_request_destroy() - free a parahotplug_request
1335 * @req: the request to deallocate
12e364b9
KC
1336 */
1337static void
1338parahotplug_request_destroy(struct parahotplug_request *req)
1339{
1340 kfree(req);
1341}
1342
ec17f452
DB
1343/**
1344 * parahotplug_request_kickoff() - initiate parahotplug request
1345 * @req: the request to initiate
1346 *
1347 * Cause uevent to run the user level script to do the disable/enable specified
1348 * in the parahotplug_request.
12e364b9
KC
1349 */
1350static void
1351parahotplug_request_kickoff(struct parahotplug_request *req)
1352{
2ea5117b 1353 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1354 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1355 env_func[40];
1356 char *envp[] = {
1357 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1358 };
1359
1360 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1361 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1362 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1363 cmd->device_change_state.state.active);
12e364b9 1364 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1365 cmd->device_change_state.bus_no);
12e364b9 1366 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1367 cmd->device_change_state.dev_no >> 3);
12e364b9 1368 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1369 cmd->device_change_state.dev_no & 0x7);
12e364b9 1370
eb34e877 1371 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1372 envp);
1373}
1374
ec17f452
DB
1375/**
1376 * parahotplug_request_complete() - mark request as complete
1377 * @id: the id of the request
1378 * @active: indicates whether the request is assigned to active partition
1379 *
12e364b9 1380 * Called from the /proc handler, which means the user script has
ec17f452 1381 * finished the enable/disable. Find the matching identifier, and
12e364b9 1382 * respond to the CONTROLVM message with success.
ec17f452
DB
1383 *
1384 * Return: 0 on success or -EINVAL on failure
12e364b9
KC
1385 */
1386static int
b06bdf7d 1387parahotplug_request_complete(int id, u16 active)
12e364b9 1388{
e82ba62e
JS
1389 struct list_head *pos;
1390 struct list_head *tmp;
12e364b9 1391
ddf5de53 1392 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1393
1394 /* Look for a request matching "id". */
ddf5de53 1395 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1396 struct parahotplug_request *req =
1397 list_entry(pos, struct parahotplug_request, list);
1398 if (req->id == id) {
ec17f452
DB
1399 /*
1400 * Found a match. Remove it from the list and
12e364b9
KC
1401 * respond.
1402 */
1403 list_del(pos);
ddf5de53 1404 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1405 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1406 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1407 controlvm_respond_physdev_changestate(
1408 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1409 req->msg.cmd.device_change_state.state);
12e364b9
KC
1410 parahotplug_request_destroy(req);
1411 return 0;
1412 }
1413 }
1414
ddf5de53 1415 spin_unlock(&parahotplug_request_list_lock);
119296ea 1416 return -EINVAL;
12e364b9
KC
1417}
1418
ec17f452
DB
1419/**
1420 * parahotplug_process_message() - enables or disables a PCI device by kicking
1421 * off a udev script
1422 * @inmsg: the message indicating whether to enable or disable
12e364b9 1423 */
bd5b9b32 1424static void
3ab47701 1425parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1426{
1427 struct parahotplug_request *req;
1428
1429 req = parahotplug_request_create(inmsg);
1430
38f736e9 1431 if (!req)
12e364b9 1432 return;
12e364b9 1433
2ea5117b 1434 if (inmsg->cmd.device_change_state.state.active) {
ec17f452
DB
1435 /*
1436 * For enable messages, just respond with success
1437 * right away. This is a bit of a hack, but there are
1438 * issues with the early enable messages we get (with
1439 * either the udev script not detecting that the device
1440 * is up, or not getting called at all). Fortunately
1441 * the messages that get lost don't matter anyway, as
1442 *
1443 * devices are automatically enabled at
1444 * initialization.
12e364b9
KC
1445 */
1446 parahotplug_request_kickoff(req);
a07d7c38
TS
1447 controlvm_respond_physdev_changestate
1448 (&inmsg->hdr,
1449 CONTROLVM_RESP_SUCCESS,
1450 inmsg->cmd.device_change_state.state);
12e364b9
KC
1451 parahotplug_request_destroy(req);
1452 } else {
ec17f452
DB
1453 /*
1454 * For disable messages, add the request to the
1455 * request list before kicking off the udev script. It
1456 * won't get responded to until the script has
1457 * indicated it's done.
1458 */
ddf5de53
BR
1459 spin_lock(&parahotplug_request_list_lock);
1460 list_add_tail(&req->list, &parahotplug_request_list);
1461 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1462
1463 parahotplug_request_kickoff(req);
1464 }
1465}
1466
5f3a7e36
DK
1467static inline unsigned int
1468issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1469{
1470 struct vmcall_io_controlvm_addr_params params;
1471 int result = VMCALL_SUCCESS;
1472 u64 physaddr;
1473
1474 physaddr = virt_to_phys(&params);
1475 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1476 if (VMCALL_SUCCESSFUL(result)) {
1477 *control_addr = params.address;
1478 *control_bytes = params.channel_bytes;
1479 }
1480 return result;
1481}
1482
d5b3f1dc 1483static u64 controlvm_get_channel_address(void)
524b0b63 1484{
5fc0229a 1485 u64 addr = 0;
b3c55b13 1486 u32 size = 0;
524b0b63 1487
0aca7844 1488 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1489 return 0;
0aca7844 1490
524b0b63
BR
1491 return addr;
1492}
1493
12e364b9
KC
1494static void
1495setup_crash_devices_work_queue(struct work_struct *work)
1496{
e6bdb904
BR
1497 struct controlvm_message local_crash_bus_msg;
1498 struct controlvm_message local_crash_dev_msg;
3ab47701 1499 struct controlvm_message msg;
e6bdb904
BR
1500 u32 local_crash_msg_offset;
1501 u16 local_crash_msg_count;
12e364b9 1502
12e364b9
KC
1503 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1504
1505 /* send init chipset msg */
98d7b594 1506 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1507 msg.cmd.init_chipset.bus_count = 23;
1508 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1509
1510 chipset_init(&msg);
1511
12e364b9 1512 /* get saved message count */
c3d9a224 1513 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1514 offsetof(struct spar_controlvm_channel_protocol,
1515 saved_crash_message_count),
e6bdb904 1516 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1517 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1518 POSTCODE_SEVERITY_ERR);
1519 return;
1520 }
1521
e6bdb904 1522 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 1523 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 1524 local_crash_msg_count,
12e364b9
KC
1525 POSTCODE_SEVERITY_ERR);
1526 return;
1527 }
1528
1529 /* get saved crash message offset */
c3d9a224 1530 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1531 offsetof(struct spar_controlvm_channel_protocol,
1532 saved_crash_message_offset),
e6bdb904 1533 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
1534 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1535 POSTCODE_SEVERITY_ERR);
1536 return;
1537 }
1538
1539 /* read create device message for storage bus offset */
c3d9a224 1540 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
1541 local_crash_msg_offset,
1542 &local_crash_bus_msg,
3ab47701 1543 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1544 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1545 POSTCODE_SEVERITY_ERR);
1546 return;
1547 }
1548
1549 /* read create device message for storage device */
c3d9a224 1550 if (visorchannel_read(controlvm_channel,
e6bdb904 1551 local_crash_msg_offset +
3ab47701 1552 sizeof(struct controlvm_message),
e6bdb904 1553 &local_crash_dev_msg,
3ab47701 1554 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1555 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1556 POSTCODE_SEVERITY_ERR);
1557 return;
1558 }
1559
1560 /* reuse IOVM create bus message */
ebec8967 1561 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 1562 bus_create(&local_crash_bus_msg);
75c1f8b7 1563 } else {
12e364b9
KC
1564 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1565 POSTCODE_SEVERITY_ERR);
1566 return;
1567 }
1568
1569 /* reuse create device message for storage device */
ebec8967 1570 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 1571 my_device_create(&local_crash_dev_msg);
75c1f8b7 1572 } else {
12e364b9
KC
1573 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1574 POSTCODE_SEVERITY_ERR);
1575 return;
1576 }
12e364b9 1577 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
1578}
1579
87241ab8 1580void
d32517e3 1581bus_create_response(struct visor_device *bus_info, int response)
12e364b9 1582{
4b4fd43a 1583 if (response >= 0)
0274b5ae 1584 bus_info->state.created = 1;
0274b5ae
DZ
1585
1586 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1587 response);
1588
1589 kfree(bus_info->pending_msg_hdr);
1590 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
1591}
1592
87241ab8 1593void
d32517e3 1594bus_destroy_response(struct visor_device *bus_info, int response)
12e364b9 1595{
0274b5ae
DZ
1596 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1597 response);
1598
1599 kfree(bus_info->pending_msg_hdr);
1600 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
1601}
1602
87241ab8 1603void
a298bc0b 1604device_create_response(struct visor_device *dev_info, int response)
12e364b9 1605{
0274b5ae
DZ
1606 if (response >= 0)
1607 dev_info->state.created = 1;
1608
1609 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1610 response);
1611
1612 kfree(dev_info->pending_msg_hdr);
addce19f 1613 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
1614}
1615
87241ab8 1616void
a298bc0b 1617device_destroy_response(struct visor_device *dev_info, int response)
12e364b9 1618{
0274b5ae
DZ
1619 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1620 response);
1621
1622 kfree(dev_info->pending_msg_hdr);
1623 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
1624}
1625
87241ab8 1626void
ea3a5aaf
DB
1627device_pause_response(struct visor_device *dev_info,
1628 int response)
12e364b9 1629{
12e364b9 1630 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 1631 dev_info, response,
bd0d2dcc 1632 segment_state_standby);
0274b5ae
DZ
1633
1634 kfree(dev_info->pending_msg_hdr);
1635 dev_info->pending_msg_hdr = NULL;
12e364b9 1636}
12e364b9 1637
87241ab8 1638void
a298bc0b 1639device_resume_response(struct visor_device *dev_info, int response)
12e364b9
KC
1640{
1641 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 1642 dev_info, response,
bd0d2dcc 1643 segment_state_running);
0274b5ae
DZ
1644
1645 kfree(dev_info->pending_msg_hdr);
1646 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
1647}
1648
ec17f452
DB
1649/**
1650 * devicedisabled_store() - disables the hotplug device
1651 * @dev: sysfs interface variable not utilized in this function
1652 * @attr: sysfs interface variable not utilized in this function
1653 * @buf: buffer containing the device id
1654 * @count: the size of the buffer
1655 *
1656 * The parahotplug/devicedisabled interface gets called by our support script
e56fa7cd
BR
1657 * when an SR-IOV device has been shut down. The ID is passed to the script
1658 * and then passed back when the device has been removed.
ec17f452
DB
1659 *
1660 * Return: the size of the buffer for success or negative for error
e56fa7cd
BR
1661 */
1662static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
1663 struct device_attribute *attr,
1664 const char *buf, size_t count)
e56fa7cd 1665{
94217363 1666 unsigned int id;
80224f06 1667 int err;
e56fa7cd 1668
ebec8967 1669 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
1670 return -EINVAL;
1671
80224f06
EA
1672 err = parahotplug_request_complete(id, 0);
1673 if (err < 0)
1674 return err;
e56fa7cd
BR
1675 return count;
1676}
1677
ec17f452
DB
1678/**
1679 * deviceenabled_store() - enables the hotplug device
1680 * @dev: sysfs interface variable not utilized in this function
1681 * @attr: sysfs interface variable not utilized in this function
1682 * @buf: buffer containing the device id
1683 * @count: the size of the buffer
1684 *
1685 * The parahotplug/deviceenabled interface gets called by our support script
e56fa7cd
BR
1686 * when an SR-IOV device has been recovered. The ID is passed to the script
1687 * and then passed back when the device has been brought back up.
ec17f452
DB
1688 *
1689 * Return: the size of the buffer for success or negative for error
e56fa7cd
BR
1690 */
1691static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
1692 struct device_attribute *attr,
1693 const char *buf, size_t count)
e56fa7cd 1694{
94217363 1695 unsigned int id;
e56fa7cd 1696
ebec8967 1697 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
1698 return -EINVAL;
1699
1700 parahotplug_request_complete(id, 1);
1701 return count;
1702}
1703
e3420ed6
EA
1704static int
1705visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
1706{
1707 unsigned long physaddr = 0;
1708 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
780fcad3 1709 u64 addr = 0;
e3420ed6
EA
1710
1711 /* sv_enable_dfp(); */
1712 if (offset & (PAGE_SIZE - 1))
1713 return -ENXIO; /* need aligned offsets */
1714
1715 switch (offset) {
1716 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
1717 vma->vm_flags |= VM_IO;
1718 if (!*file_controlvm_channel)
1719 return -ENXIO;
1720
a07d7c38
TS
1721 visorchannel_read
1722 (*file_controlvm_channel,
1723 offsetof(struct spar_controlvm_channel_protocol,
1724 gp_control_channel),
1725 &addr, sizeof(addr));
e3420ed6
EA
1726 if (!addr)
1727 return -ENXIO;
1728
1729 physaddr = (unsigned long)addr;
1730 if (remap_pfn_range(vma, vma->vm_start,
1731 physaddr >> PAGE_SHIFT,
1732 vma->vm_end - vma->vm_start,
1733 /*pgprot_noncached */
1734 (vma->vm_page_prot))) {
1735 return -EAGAIN;
1736 }
1737 break;
1738 default:
1739 return -ENXIO;
1740 }
1741 return 0;
1742}
1743
5f3a7e36
DK
1744static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
1745{
1746 u64 result = VMCALL_SUCCESS;
1747 u64 physaddr = 0;
1748
1749 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
1750 result);
1751 return result;
1752}
1753
1754static inline int issue_vmcall_update_physical_time(u64 adjustment)
1755{
1756 int result = VMCALL_SUCCESS;
1757
1758 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
1759 return result;
1760}
1761
e3420ed6
EA
1762static long visorchipset_ioctl(struct file *file, unsigned int cmd,
1763 unsigned long arg)
1764{
2500276e 1765 u64 adjustment;
e3420ed6
EA
1766 s64 vrtc_offset;
1767
1768 switch (cmd) {
1769 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
1770 /* get the physical rtc offset */
1771 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
1772 if (copy_to_user((void __user *)arg, &vrtc_offset,
1773 sizeof(vrtc_offset))) {
1774 return -EFAULT;
1775 }
d5b3f1dc 1776 return 0;
e3420ed6
EA
1777 case VMCALL_UPDATE_PHYSICAL_TIME:
1778 if (copy_from_user(&adjustment, (void __user *)arg,
1779 sizeof(adjustment))) {
1780 return -EFAULT;
1781 }
1782 return issue_vmcall_update_physical_time(adjustment);
1783 default:
1784 return -EFAULT;
1785 }
1786}
1787
1788static const struct file_operations visorchipset_fops = {
1789 .owner = THIS_MODULE,
1790 .open = visorchipset_open,
1791 .read = NULL,
1792 .write = NULL,
1793 .unlocked_ioctl = visorchipset_ioctl,
1794 .release = visorchipset_release,
1795 .mmap = visorchipset_mmap,
1796};
1797
0f570fc0 1798static int
e3420ed6
EA
1799visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
1800{
1801 int rc = 0;
1802
1803 file_controlvm_channel = controlvm_channel;
1804 cdev_init(&file_cdev, &visorchipset_fops);
1805 file_cdev.owner = THIS_MODULE;
1806 if (MAJOR(major_dev) == 0) {
46168810 1807 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
e3420ed6
EA
1808 /* dynamic major device number registration required */
1809 if (rc < 0)
1810 return rc;
1811 } else {
1812 /* static major device number registration required */
46168810 1813 rc = register_chrdev_region(major_dev, 1, "visorchipset");
e3420ed6
EA
1814 if (rc < 0)
1815 return rc;
1816 }
1817 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
1818 if (rc < 0) {
1819 unregister_chrdev_region(major_dev, 1);
1820 return rc;
1821 }
1822 return 0;
1823}
1824
1366a3db
DK
1825static void
1826visorchipset_file_cleanup(dev_t major_dev)
1827{
1828 if (file_cdev.ops)
1829 cdev_del(&file_cdev);
1830 file_cdev.ops = NULL;
1831 unregister_chrdev_region(major_dev, 1);
1832}
1833
612b81c9
DK
1834static struct parser_context *
1835parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
1836{
1837 int allocbytes = sizeof(struct parser_context) + bytes;
1838 struct parser_context *ctx;
1839
1840 if (retry)
1841 *retry = false;
1842
1843 /*
1844 * alloc an 0 extra byte to ensure payload is
1845 * '\0'-terminated
1846 */
1847 allocbytes++;
1848 if ((controlvm_payload_bytes_buffered + bytes)
1849 > MAX_CONTROLVM_PAYLOAD_BYTES) {
1850 if (retry)
1851 *retry = true;
1852 return NULL;
1853 }
1854 ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
1855 if (!ctx) {
1856 if (retry)
1857 *retry = true;
1858 return NULL;
1859 }
1860
1861 ctx->allocbytes = allocbytes;
1862 ctx->param_bytes = bytes;
1863 ctx->curr = NULL;
1864 ctx->bytes_remaining = 0;
1865 ctx->byte_stream = false;
1866 if (local) {
1867 void *p;
1868
1869 if (addr > virt_to_phys(high_memory - 1))
1870 goto err_finish_ctx;
1871 p = __va((unsigned long)(addr));
1872 memcpy(ctx->data, p, bytes);
1873 } else {
1874 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
1875
1876 if (!mapping)
1877 goto err_finish_ctx;
1878 memcpy(ctx->data, mapping, bytes);
1879 memunmap(mapping);
1880 }
1881
1882 ctx->byte_stream = true;
1883 controlvm_payload_bytes_buffered += ctx->param_bytes;
1884
1885 return ctx;
1886
1887err_finish_ctx:
1888 parser_done(ctx);
1889 return NULL;
1890}
1891
511474a5
DK
1892/**
1893 * handle_command() - process a controlvm message
1894 * @inmsg: the message to process
1895 * @channel_addr: address of the controlvm channel
1896 *
1897 * Return:
1898 * false - this function will return false only in the case where the
1899 * controlvm message was NOT processed, but processing must be
1900 * retried before reading the next controlvm message; a
1901 * scenario where this can occur is when we need to throttle
1902 * the allocation of memory in which to copy out controlvm
1903 * payload data
1904 * true - processing of the controlvm message completed,
1905 * either successfully or with an error
1906 */
1907static bool
1908handle_command(struct controlvm_message inmsg, u64 channel_addr)
1909{
1910 struct controlvm_message_packet *cmd = &inmsg.cmd;
1911 u64 parm_addr;
1912 u32 parm_bytes;
1913 struct parser_context *parser_ctx = NULL;
1914 bool local_addr;
1915 struct controlvm_message ackmsg;
1916
1917 /* create parsing context if necessary */
1918 local_addr = (inmsg.hdr.flags.test_message == 1);
1919 if (channel_addr == 0)
1920 return true;
1921 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1922 parm_bytes = inmsg.hdr.payload_bytes;
1923
1924 /*
1925 * Parameter and channel addresses within test messages actually lie
1926 * within our OS-controlled memory. We need to know that, because it
1927 * makes a difference in how we compute the virtual address.
1928 */
1929 if (parm_addr && parm_bytes) {
1930 bool retry = false;
1931
1932 parser_ctx =
1933 parser_init_byte_stream(parm_addr, parm_bytes,
1934 local_addr, &retry);
1935 if (!parser_ctx && retry)
1936 return false;
1937 }
1938
1939 if (!local_addr) {
1940 controlvm_init_response(&ackmsg, &inmsg.hdr,
1941 CONTROLVM_RESP_SUCCESS);
1942 if (controlvm_channel)
1943 visorchannel_signalinsert(controlvm_channel,
1944 CONTROLVM_QUEUE_ACK,
1945 &ackmsg);
1946 }
1947 switch (inmsg.hdr.id) {
1948 case CONTROLVM_CHIPSET_INIT:
1949 chipset_init(&inmsg);
1950 break;
1951 case CONTROLVM_BUS_CREATE:
1952 bus_create(&inmsg);
1953 break;
1954 case CONTROLVM_BUS_DESTROY:
1955 bus_destroy(&inmsg);
1956 break;
1957 case CONTROLVM_BUS_CONFIGURE:
1958 bus_configure(&inmsg, parser_ctx);
1959 break;
1960 case CONTROLVM_DEVICE_CREATE:
1961 my_device_create(&inmsg);
1962 break;
1963 case CONTROLVM_DEVICE_CHANGESTATE:
1964 if (cmd->device_change_state.flags.phys_device) {
1965 parahotplug_process_message(&inmsg);
1966 } else {
1967 /*
1968 * save the hdr and cmd structures for later use
1969 * when sending back the response to Command
1970 */
1971 my_device_changestate(&inmsg);
1972 break;
1973 }
1974 break;
1975 case CONTROLVM_DEVICE_DESTROY:
1976 my_device_destroy(&inmsg);
1977 break;
1978 case CONTROLVM_DEVICE_CONFIGURE:
1979 /* no op for now, just send a respond that we passed */
1980 if (inmsg.hdr.flags.response_expected)
1981 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1982 break;
1983 case CONTROLVM_CHIPSET_READY:
1984 chipset_ready(&inmsg.hdr);
1985 break;
1986 case CONTROLVM_CHIPSET_SELFTEST:
1987 chipset_selftest(&inmsg.hdr);
1988 break;
1989 case CONTROLVM_CHIPSET_STOP:
1990 chipset_notready(&inmsg.hdr);
1991 break;
1992 default:
1993 if (inmsg.hdr.flags.response_expected)
1994 controlvm_respond
1995 (&inmsg.hdr,
1996 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1997 break;
1998 }
1999
2000 if (parser_ctx) {
2001 parser_done(parser_ctx);
2002 parser_ctx = NULL;
2003 }
2004 return true;
2005}
2006
8a285327
DK
2007/**
2008 * read_controlvm_event() - retreives the next message from the
2009 * CONTROLVM_QUEUE_EVENT queue in the controlvm
2010 * channel
2011 * @msg: pointer to the retrieved message
2012 *
2013 * Return: true if a valid message was retrieved or false otherwise
2014 */
2015static bool
2016read_controlvm_event(struct controlvm_message *msg)
2017{
2018 if (visorchannel_signalremove(controlvm_channel,
2019 CONTROLVM_QUEUE_EVENT, msg)) {
2020 /* got a message */
2021 if (msg->hdr.flags.test_message == 1)
2022 return false;
2023 return true;
2024 }
2025 return false;
2026}
2027
a9c73937
DK
2028/**
2029 * parahotplug_process_list() - remove any request from the list that's been on
2030 * there too long and respond with an error
2031 */
2032static void
2033parahotplug_process_list(void)
2034{
2035 struct list_head *pos;
2036 struct list_head *tmp;
2037
2038 spin_lock(&parahotplug_request_list_lock);
2039
2040 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
2041 struct parahotplug_request *req =
2042 list_entry(pos, struct parahotplug_request, list);
2043
2044 if (!time_after_eq(jiffies, req->expiration))
2045 continue;
2046
2047 list_del(pos);
2048 if (req->msg.hdr.flags.response_expected)
2049 controlvm_respond_physdev_changestate(
2050 &req->msg.hdr,
2051 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
2052 req->msg.cmd.device_change_state.state);
2053 parahotplug_request_destroy(req);
2054 }
2055
2056 spin_unlock(&parahotplug_request_list_lock);
2057}
2058
3d8394c8
DK
2059static void
2060controlvm_periodic_work(struct work_struct *work)
2061{
2062 struct controlvm_message inmsg;
2063 bool got_command = false;
2064 bool handle_command_failed = false;
2065
2066 while (visorchannel_signalremove(controlvm_channel,
2067 CONTROLVM_QUEUE_RESPONSE,
2068 &inmsg))
2069 ;
2070 if (!got_command) {
2071 if (controlvm_pending_msg_valid) {
2072 /*
2073 * we throttled processing of a prior
2074 * msg, so try to process it again
2075 * rather than reading a new one
2076 */
2077 inmsg = controlvm_pending_msg;
2078 controlvm_pending_msg_valid = false;
2079 got_command = true;
2080 } else {
2081 got_command = read_controlvm_event(&inmsg);
2082 }
2083 }
2084
2085 handle_command_failed = false;
2086 while (got_command && (!handle_command_failed)) {
2087 most_recent_message_jiffies = jiffies;
2088 if (handle_command(inmsg,
2089 visorchannel_get_physaddr
2090 (controlvm_channel)))
2091 got_command = read_controlvm_event(&inmsg);
2092 else {
2093 /*
2094 * this is a scenario where throttling
2095 * is required, but probably NOT an
2096 * error...; we stash the current
2097 * controlvm msg so we will attempt to
2098 * reprocess it on our next loop
2099 */
2100 handle_command_failed = true;
2101 controlvm_pending_msg = inmsg;
2102 controlvm_pending_msg_valid = true;
2103 }
2104 }
2105
2106 /* parahotplug_worker */
2107 parahotplug_process_list();
2108
2109 if (time_after(jiffies,
2110 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2111 /*
2112 * it's been longer than MIN_IDLE_SECONDS since we
2113 * processed our last controlvm message; slow down the
2114 * polling
2115 */
2116 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2117 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2118 } else {
2119 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2120 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2121 }
2122
2123 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
2124}
2125
55c67dca
PB
2126static int
2127visorchipset_init(struct acpi_device *acpi_device)
12e364b9 2128{
1366a3db 2129 int err = -ENODEV;
d5b3f1dc 2130 u64 addr;
d3368a58
JS
2131 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2132
2133 addr = controlvm_get_channel_address();
2134 if (!addr)
1366a3db 2135 goto error;
12e364b9 2136
84982fbf 2137 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
12e364b9 2138
c732623b 2139 controlvm_channel = visorchannel_create_with_lock(addr, 0,
d3368a58 2140 GFP_KERNEL, uuid);
c732623b 2141 if (!controlvm_channel)
1366a3db
DK
2142 goto error;
2143
d3368a58
JS
2144 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2145 visorchannel_get_header(controlvm_channel))) {
2146 initialize_controlvm_payload();
8a1182eb 2147 } else {
1366a3db 2148 goto error_destroy_channel;
8a1182eb
BR
2149 }
2150
5aa8ae57 2151 major_dev = MKDEV(visorchipset_major, 0);
1366a3db
DK
2152 err = visorchipset_file_init(major_dev, &controlvm_channel);
2153 if (err < 0)
2154 goto error_destroy_payload;
12e364b9 2155
4da3336c
DK
2156 /* if booting in a crash kernel */
2157 if (is_kdump_kernel())
2158 INIT_DELAYED_WORK(&periodic_controlvm_work,
2159 setup_crash_devices_work_queue);
2160 else
2161 INIT_DELAYED_WORK(&periodic_controlvm_work,
2162 controlvm_periodic_work);
4da3336c 2163
4da3336c
DK
2164 most_recent_message_jiffies = jiffies;
2165 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
0bde2979 2166 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
12e364b9 2167
eb34e877
BR
2168 visorchipset_platform_device.dev.devt = major_dev;
2169 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9 2170 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
1366a3db
DK
2171 err = -ENODEV;
2172 goto error_cancel_work;
4cb005a9 2173 }
12e364b9 2174 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
c79b28f7 2175
1366a3db
DK
2176 err = visorbus_init();
2177 if (err < 0)
2178 goto error_unregister;
12e364b9 2179
1366a3db
DK
2180 return 0;
2181
2182error_unregister:
2183 platform_device_unregister(&visorchipset_platform_device);
2184
2185error_cancel_work:
2186 cancel_delayed_work_sync(&periodic_controlvm_work);
2187 visorchipset_file_cleanup(major_dev);
2188
2189error_destroy_payload:
2190 destroy_controlvm_payload_info(&controlvm_payload_info);
2191
2192error_destroy_channel:
2193 visorchannel_destroy(controlvm_channel);
2194
2195error:
2196 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
2197 return err;
e3420ed6
EA
2198}
2199
55c67dca
PB
2200static int
2201visorchipset_exit(struct acpi_device *acpi_device)
12e364b9 2202{
12e364b9
KC
2203 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2204
c79b28f7
PB
2205 visorbus_exit();
2206
0bde2979 2207 cancel_delayed_work_sync(&periodic_controlvm_work);
4da3336c 2208 destroy_controlvm_payload_info(&controlvm_payload_info);
1783319f 2209
c3d9a224 2210 visorchannel_destroy(controlvm_channel);
8a1182eb 2211
addceb12 2212 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
04dacacc 2213 platform_device_unregister(&visorchipset_platform_device);
12e364b9 2214 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
55c67dca
PB
2215
2216 return 0;
2217}
2218
2219static const struct acpi_device_id unisys_device_ids[] = {
2220 {"PNP0A07", 0},
2221 {"", 0},
2222};
55c67dca
PB
2223
2224static struct acpi_driver unisys_acpi_driver = {
2225 .name = "unisys_acpi",
2226 .class = "unisys_acpi_class",
2227 .owner = THIS_MODULE,
2228 .ids = unisys_device_ids,
2229 .ops = {
2230 .add = visorchipset_init,
2231 .remove = visorchipset_exit,
2232 },
2233};
1fc07f99
DK
2234
2235MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2236
d5b3f1dc
EA
2237static __init uint32_t visorutil_spar_detect(void)
2238{
2239 unsigned int eax, ebx, ecx, edx;
2240
0c9f3536 2241 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
d5b3f1dc
EA
2242 /* check the ID */
2243 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2244 return (ebx == UNISYS_SPAR_ID_EBX) &&
2245 (ecx == UNISYS_SPAR_ID_ECX) &&
2246 (edx == UNISYS_SPAR_ID_EDX);
2247 } else {
2248 return 0;
2249 }
2250}
55c67dca
PB
2251
2252static int init_unisys(void)
2253{
2254 int result;
35e606de 2255
d5b3f1dc 2256 if (!visorutil_spar_detect())
55c67dca
PB
2257 return -ENODEV;
2258
2259 result = acpi_bus_register_driver(&unisys_acpi_driver);
2260 if (result)
2261 return -ENODEV;
2262
2263 pr_info("Unisys Visorchipset Driver Loaded.\n");
2264 return 0;
2265};
2266
2267static void exit_unisys(void)
2268{
2269 acpi_bus_unregister_driver(&unisys_acpi_driver);
12e364b9
KC
2270}
2271
12e364b9 2272module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2273MODULE_PARM_DESC(visorchipset_major,
2274 "major device number to use for the device node");
b615d628 2275
55c67dca
PB
2276module_init(init_unisys);
2277module_exit(exit_unisys);
12e364b9
KC
2278
2279MODULE_AUTHOR("Unisys");
2280MODULE_LICENSE("GPL");
2281MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2282 VERSION);
2283MODULE_VERSION(VERSION);