]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/staging/unisys/visorbus/visorchipset.c
staging: unisys: Convert bus creation to use visor_device
[mirror_ubuntu-jammy-kernel.git] / drivers / staging / unisys / visorbus / visorchipset.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
55c67dca 18#include <linux/acpi.h>
c0a14641 19#include <linux/cdev.h>
46168810 20#include <linux/ctype.h>
e3420ed6
EA
21#include <linux/fs.h>
22#include <linux/mm.h>
12e364b9
KC
23#include <linux/nls.h>
24#include <linux/netdevice.h>
25#include <linux/platform_device.h>
90addb02 26#include <linux/uuid.h>
1ba00980 27#include <linux/crash_dump.h>
12e364b9 28
5f3a7e36 29#include "channel_guid.h"
55c67dca
PB
30#include "controlvmchannel.h"
31#include "controlvmcompletionstatus.h"
32#include "guestlinuxdebug.h"
33#include "periodic_work.h"
55c67dca
PB
34#include "version.h"
35#include "visorbus.h"
36#include "visorbus_private.h"
5f3a7e36 37#include "vmcallinterface.h"
55c67dca 38
12e364b9 39#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
12e364b9
KC
40
41#define MAX_NAME_SIZE 128
42#define MAX_IP_SIZE 50
43#define MAXOUTSTANDINGCHANNELCOMMAND 256
44#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
46168810 47#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
2ee0deec
PB
48
49#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
50
d5b3f1dc
EA
51
52#define UNISYS_SPAR_LEAF_ID 0x40000000
53
54/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55#define UNISYS_SPAR_ID_EBX 0x73696e55
56#define UNISYS_SPAR_ID_ECX 0x70537379
57#define UNISYS_SPAR_ID_EDX 0x34367261
58
b615d628
JS
59/*
60 * Module parameters
61 */
b615d628 62static int visorchipset_major;
4da3336c 63static int visorchipset_visorbusregwait = 1; /* default is on */
b615d628 64static int visorchipset_holdchipsetready;
46168810 65static unsigned long controlvm_payload_bytes_buffered;
b615d628 66
e3420ed6
EA
67static int
68visorchipset_open(struct inode *inode, struct file *file)
69{
70 unsigned minor_number = iminor(inode);
71
72 if (minor_number)
73 return -ENODEV;
74 file->private_data = NULL;
75 return 0;
76}
77
78static int
79visorchipset_release(struct inode *inode, struct file *file)
80{
81 return 0;
82}
83
12e364b9
KC
84/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85* we switch to slow polling mode. As soon as we get a controlvm
86* message, we switch back to fast polling mode.
87*/
88#define MIN_IDLE_SECONDS 10
52063eca
JS
89static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
90static unsigned long most_recent_message_jiffies; /* when we got our last
bd5b9b32 91 * controlvm message */
4da3336c 92static int visorbusregistered;
12e364b9
KC
93
94#define MAX_CHIPSET_EVENTS 2
c242233e 95static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 96
46168810
EA
97struct parser_context {
98 unsigned long allocbytes;
99 unsigned long param_bytes;
100 u8 *curr;
101 unsigned long bytes_remaining;
102 bool byte_stream;
103 char data[0];
104};
105
9232d2d6
BR
106static struct delayed_work periodic_controlvm_work;
107static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 108static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 109
e3420ed6
EA
110static struct cdev file_cdev;
111static struct visorchannel **file_controlvm_channel;
da021f02 112static struct controlvm_message_header g_chipset_msg_hdr;
59827f00 113static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 114 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 115/* 0xffffff is an invalid Bus/Device number */
52063eca
JS
116static u32 g_diagpool_bus_no = 0xffffff;
117static u32 g_diagpool_dev_no = 0xffffff;
4f44b72d 118static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9 119
12e364b9 120#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
121 (uuid_le_cmp(channel_type_guid,\
122 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 123
1390b88c
BR
124static LIST_HEAD(bus_info_list);
125static LIST_HEAD(dev_info_list);
12e364b9 126
c3d9a224 127static struct visorchannel *controlvm_channel;
12e364b9 128
84982fbf 129/* Manages the request payload in the controlvm channel */
c1f834eb 130struct visor_controlvm_payload_info {
c242233e 131 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 132 u64 offset; /* offset from beginning of controlvm
12e364b9 133 * channel to beginning of payload * pool */
b3c55b13 134 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
135};
136
137static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 138
12e364b9
KC
139/* The following globals are used to handle the scenario where we are unable to
140 * offload the payload from a controlvm message due to memory requirements. In
141 * this scenario, we simply stash the controlvm message, then attempt to
142 * process it again the next time controlvm_periodic_work() runs.
143 */
7166ed19 144static struct controlvm_message controlvm_pending_msg;
c79b28f7 145static bool controlvm_pending_msg_valid;
12e364b9 146
12e364b9
KC
147/* This identifies a data buffer that has been received via a controlvm messages
148 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
149 */
150struct putfile_buffer_entry {
151 struct list_head next; /* putfile_buffer_entry list */
317d9614 152 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
153};
154
155/* List of struct putfile_request *, via next_putfile_request member.
156 * Each entry in this list identifies an outstanding TRANSMIT_FILE
157 * conversation.
158 */
1eee0011 159static LIST_HEAD(putfile_request_list);
12e364b9
KC
160
161/* This describes a buffer and its current state of transfer (e.g., how many
162 * bytes have already been supplied as putfile data, and how many bytes are
163 * remaining) for a putfile_request.
164 */
165struct putfile_active_buffer {
166 /* a payload from a controlvm message, containing a file data buffer */
317d9614 167 struct parser_context *parser_ctx;
12e364b9
KC
168 /* points within data area of parser_ctx to next byte of data */
169 u8 *pnext;
170 /* # bytes left from <pnext> to the end of this data buffer */
171 size_t bytes_remaining;
172};
173
174#define PUTFILE_REQUEST_SIG 0x0906101302281211
175/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
176 * conversation. Structs of this type are dynamically linked into
177 * <Putfile_request_list>.
178 */
179struct putfile_request {
180 u64 sig; /* PUTFILE_REQUEST_SIG */
181
182 /* header from original TransmitFile request */
98d7b594 183 struct controlvm_message_header controlvm_header;
12e364b9
KC
184 u64 file_request_number; /* from original TransmitFile request */
185
186 /* link to next struct putfile_request */
187 struct list_head next_putfile_request;
188
189 /* most-recent sequence number supplied via a controlvm message */
190 u64 data_sequence_number;
191
192 /* head of putfile_buffer_entry list, which describes the data to be
193 * supplied as putfile data;
194 * - this list is added to when controlvm messages come in that supply
195 * file data
196 * - this list is removed from via the hotplug program that is actually
197 * consuming these buffers to write as file data */
198 struct list_head input_buffer_list;
199 spinlock_t req_list_lock; /* lock for input_buffer_list */
200
201 /* waiters for input_buffer_list to go non-empty */
202 wait_queue_head_t input_buffer_wq;
203
204 /* data not yet read within current putfile_buffer_entry */
205 struct putfile_active_buffer active_buf;
206
207 /* <0 = failed, 0 = in-progress, >0 = successful; */
208 /* note that this must be set with req_list_lock, and if you set <0, */
209 /* it is your responsibility to also free up all of the other objects */
210 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
211 /* before releasing the lock */
212 int completion_status;
213};
214
12e364b9
KC
215struct parahotplug_request {
216 struct list_head list;
217 int id;
218 unsigned long expiration;
3ab47701 219 struct controlvm_message msg;
12e364b9
KC
220};
221
ddf5de53
BR
222static LIST_HEAD(parahotplug_request_list);
223static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
224static void parahotplug_process_list(void);
225
226/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
227 * CONTROLVM_REPORTEVENT.
228 */
4da3336c 229static struct visorchipset_busdev_notifiers busdev_notifiers;
12e364b9 230
d32517e3
DZ
231static void bus_create_response(struct visor_device *p, int response);
232static void bus_destroy_response(struct visor_device *p, int response);
b4b598fd
DZ
233static void device_create_response(struct visorchipset_device_info *p,
234 int response);
235static void device_destroy_response(struct visorchipset_device_info *p,
236 int response);
237static void device_resume_response(struct visorchipset_device_info *p,
238 int response);
12e364b9 239
b4b598fd
DZ
240static void
241visorchipset_device_pause_response(struct visorchipset_device_info *p,
242 int response);
2ee0deec 243
8e3fedd6 244static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
245 .bus_create = bus_create_response,
246 .bus_destroy = bus_destroy_response,
247 .device_create = device_create_response,
248 .device_destroy = device_destroy_response,
927c7927 249 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
250 .device_resume = device_resume_response,
251};
252
253/* info for /dev/visorchipset */
5aa8ae57 254static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 255
19f6634f
BR
256/* prototypes for attributes */
257static ssize_t toolaction_show(struct device *dev,
8e76e695 258 struct device_attribute *attr, char *buf);
19f6634f 259static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
260 struct device_attribute *attr,
261 const char *buf, size_t count);
19f6634f
BR
262static DEVICE_ATTR_RW(toolaction);
263
54b31229 264static ssize_t boottotool_show(struct device *dev,
8e76e695 265 struct device_attribute *attr, char *buf);
54b31229 266static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
267 struct device_attribute *attr, const char *buf,
268 size_t count);
54b31229
BR
269static DEVICE_ATTR_RW(boottotool);
270
422af17c 271static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 272 char *buf);
422af17c 273static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 274 const char *buf, size_t count);
422af17c
BR
275static DEVICE_ATTR_RW(error);
276
277static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 278 char *buf);
422af17c 279static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 280 const char *buf, size_t count);
422af17c
BR
281static DEVICE_ATTR_RW(textid);
282
283static ssize_t remaining_steps_show(struct device *dev,
8e76e695 284 struct device_attribute *attr, char *buf);
422af17c 285static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
286 struct device_attribute *attr,
287 const char *buf, size_t count);
422af17c
BR
288static DEVICE_ATTR_RW(remaining_steps);
289
18b87ed1 290static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
291 struct device_attribute *attr,
292 const char *buf, size_t count);
18b87ed1
BR
293static DEVICE_ATTR_WO(chipsetready);
294
e56fa7cd 295static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
296 struct device_attribute *attr,
297 const char *buf, size_t count);
e56fa7cd
BR
298static DEVICE_ATTR_WO(devicedisabled);
299
300static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
301 struct device_attribute *attr,
302 const char *buf, size_t count);
e56fa7cd
BR
303static DEVICE_ATTR_WO(deviceenabled);
304
19f6634f
BR
305static struct attribute *visorchipset_install_attrs[] = {
306 &dev_attr_toolaction.attr,
54b31229 307 &dev_attr_boottotool.attr,
422af17c
BR
308 &dev_attr_error.attr,
309 &dev_attr_textid.attr,
310 &dev_attr_remaining_steps.attr,
19f6634f
BR
311 NULL
312};
313
314static struct attribute_group visorchipset_install_group = {
315 .name = "install",
316 .attrs = visorchipset_install_attrs
317};
318
18b87ed1
BR
319static struct attribute *visorchipset_guest_attrs[] = {
320 &dev_attr_chipsetready.attr,
321 NULL
322};
323
324static struct attribute_group visorchipset_guest_group = {
325 .name = "guest",
326 .attrs = visorchipset_guest_attrs
327};
328
e56fa7cd
BR
329static struct attribute *visorchipset_parahotplug_attrs[] = {
330 &dev_attr_devicedisabled.attr,
331 &dev_attr_deviceenabled.attr,
332 NULL
333};
334
335static struct attribute_group visorchipset_parahotplug_group = {
336 .name = "parahotplug",
337 .attrs = visorchipset_parahotplug_attrs
338};
339
19f6634f
BR
340static const struct attribute_group *visorchipset_dev_groups[] = {
341 &visorchipset_install_group,
18b87ed1 342 &visorchipset_guest_group,
e56fa7cd 343 &visorchipset_parahotplug_group,
19f6634f
BR
344 NULL
345};
346
12e364b9 347/* /sys/devices/platform/visorchipset */
eb34e877 348static struct platform_device visorchipset_platform_device = {
12e364b9
KC
349 .name = "visorchipset",
350 .id = -1,
19f6634f 351 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
352};
353
354/* Function prototypes */
b3168c70 355static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
356 int response);
357static void controlvm_respond_chipset_init(
b3168c70 358 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
359 enum ultra_chipset_feature features);
360static void controlvm_respond_physdev_changestate(
b3168c70 361 struct controlvm_message_header *msg_hdr, int response,
98d7b594 362 struct spar_segment_state state);
12e364b9 363
46168810 364
2ee0deec
PB
365static void parser_done(struct parser_context *ctx);
366
46168810 367static struct parser_context *
fbf35536 368parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
46168810
EA
369{
370 int allocbytes = sizeof(struct parser_context) + bytes;
371 struct parser_context *rc = NULL;
372 struct parser_context *ctx = NULL;
46168810
EA
373
374 if (retry)
375 *retry = false;
cc55b5c5
JS
376
377 /*
378 * alloc an 0 extra byte to ensure payload is
379 * '\0'-terminated
380 */
381 allocbytes++;
46168810
EA
382 if ((controlvm_payload_bytes_buffered + bytes)
383 > MAX_CONTROLVM_PAYLOAD_BYTES) {
384 if (retry)
385 *retry = true;
386 rc = NULL;
387 goto cleanup;
388 }
389 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
390 if (!ctx) {
391 if (retry)
392 *retry = true;
393 rc = NULL;
394 goto cleanup;
395 }
396
397 ctx->allocbytes = allocbytes;
398 ctx->param_bytes = bytes;
399 ctx->curr = NULL;
400 ctx->bytes_remaining = 0;
401 ctx->byte_stream = false;
402 if (local) {
403 void *p;
404
405 if (addr > virt_to_phys(high_memory - 1)) {
406 rc = NULL;
407 goto cleanup;
408 }
409 p = __va((unsigned long) (addr));
410 memcpy(ctx->data, p, bytes);
411 } else {
dd412751
JS
412 void __iomem *mapping;
413
414 if (!request_mem_region(addr, bytes, "visorchipset")) {
46168810
EA
415 rc = NULL;
416 goto cleanup;
417 }
712c03dc 418
dd412751
JS
419 mapping = ioremap_cache(addr, bytes);
420 if (!mapping) {
421 release_mem_region(addr, bytes);
46168810
EA
422 rc = NULL;
423 goto cleanup;
424 }
dd412751
JS
425 memcpy_fromio(ctx->data, mapping, bytes);
426 release_mem_region(addr, bytes);
46168810 427 }
46168810 428
cc55b5c5 429 ctx->byte_stream = true;
46168810
EA
430 rc = ctx;
431cleanup:
46168810
EA
432 if (rc) {
433 controlvm_payload_bytes_buffered += ctx->param_bytes;
434 } else {
435 if (ctx) {
436 parser_done(ctx);
437 ctx = NULL;
438 }
439 }
440 return rc;
441}
442
464129ed 443static uuid_le
46168810
EA
444parser_id_get(struct parser_context *ctx)
445{
446 struct spar_controlvm_parameters_header *phdr = NULL;
447
448 if (ctx == NULL)
449 return NULL_UUID_LE;
450 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
451 return phdr->id;
452}
453
2ee0deec
PB
454/** Describes the state from the perspective of which controlvm messages have
455 * been received for a bus or device.
456 */
457
458enum PARSER_WHICH_STRING {
459 PARSERSTRING_INITIATOR,
460 PARSERSTRING_TARGET,
461 PARSERSTRING_CONNECTION,
462 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
463};
464
464129ed 465static void
2ee0deec
PB
466parser_param_start(struct parser_context *ctx,
467 enum PARSER_WHICH_STRING which_string)
46168810
EA
468{
469 struct spar_controlvm_parameters_header *phdr = NULL;
470
471 if (ctx == NULL)
472 goto Away;
473 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
474 switch (which_string) {
475 case PARSERSTRING_INITIATOR:
476 ctx->curr = ctx->data + phdr->initiator_offset;
477 ctx->bytes_remaining = phdr->initiator_length;
478 break;
479 case PARSERSTRING_TARGET:
480 ctx->curr = ctx->data + phdr->target_offset;
481 ctx->bytes_remaining = phdr->target_length;
482 break;
483 case PARSERSTRING_CONNECTION:
484 ctx->curr = ctx->data + phdr->connection_offset;
485 ctx->bytes_remaining = phdr->connection_length;
486 break;
487 case PARSERSTRING_NAME:
488 ctx->curr = ctx->data + phdr->name_offset;
489 ctx->bytes_remaining = phdr->name_length;
490 break;
491 default:
492 break;
493 }
494
495Away:
496 return;
497}
498
464129ed 499static void parser_done(struct parser_context *ctx)
46168810
EA
500{
501 if (!ctx)
502 return;
503 controlvm_payload_bytes_buffered -= ctx->param_bytes;
504 kfree(ctx);
505}
506
464129ed 507static void *
46168810
EA
508parser_string_get(struct parser_context *ctx)
509{
510 u8 *pscan;
511 unsigned long nscan;
512 int value_length = -1;
513 void *value = NULL;
514 int i;
515
516 if (!ctx)
517 return NULL;
518 pscan = ctx->curr;
519 nscan = ctx->bytes_remaining;
520 if (nscan == 0)
521 return NULL;
522 if (!pscan)
523 return NULL;
524 for (i = 0, value_length = -1; i < nscan; i++)
525 if (pscan[i] == '\0') {
526 value_length = i;
527 break;
528 }
529 if (value_length < 0) /* '\0' was not included in the length */
530 value_length = nscan;
531 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
532 if (value == NULL)
533 return NULL;
534 if (value_length > 0)
535 memcpy(value, pscan, value_length);
536 ((u8 *) (value))[value_length] = '\0';
537 return value;
538}
539
540
d746cb55
VB
541static ssize_t toolaction_show(struct device *dev,
542 struct device_attribute *attr,
543 char *buf)
19f6634f 544{
01f4d85a 545 u8 tool_action;
19f6634f 546
c3d9a224 547 visorchannel_read(controlvm_channel,
d19642f6 548 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 549 tool_action), &tool_action, sizeof(u8));
01f4d85a 550 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
551}
552
d746cb55
VB
553static ssize_t toolaction_store(struct device *dev,
554 struct device_attribute *attr,
555 const char *buf, size_t count)
19f6634f 556{
01f4d85a 557 u8 tool_action;
66e24b76 558 int ret;
19f6634f 559
ebec8967 560 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
561 return -EINVAL;
562
c3d9a224 563 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
564 offsetof(struct spar_controlvm_channel_protocol,
565 tool_action),
01f4d85a 566 &tool_action, sizeof(u8));
66e24b76
BR
567
568 if (ret)
569 return ret;
e22a4a0f 570 return count;
19f6634f
BR
571}
572
d746cb55
VB
573static ssize_t boottotool_show(struct device *dev,
574 struct device_attribute *attr,
575 char *buf)
54b31229 576{
365522d9 577 struct efi_spar_indication efi_spar_indication;
54b31229 578
c3d9a224 579 visorchannel_read(controlvm_channel,
8e76e695
BR
580 offsetof(struct spar_controlvm_channel_protocol,
581 efi_spar_ind), &efi_spar_indication,
582 sizeof(struct efi_spar_indication));
54b31229 583 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 584 efi_spar_indication.boot_to_tool);
54b31229
BR
585}
586
d746cb55
VB
587static ssize_t boottotool_store(struct device *dev,
588 struct device_attribute *attr,
589 const char *buf, size_t count)
54b31229 590{
66e24b76 591 int val, ret;
365522d9 592 struct efi_spar_indication efi_spar_indication;
54b31229 593
ebec8967 594 if (kstrtoint(buf, 10, &val))
66e24b76
BR
595 return -EINVAL;
596
365522d9 597 efi_spar_indication.boot_to_tool = val;
c3d9a224 598 ret = visorchannel_write(controlvm_channel,
d19642f6 599 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
600 efi_spar_ind), &(efi_spar_indication),
601 sizeof(struct efi_spar_indication));
66e24b76
BR
602
603 if (ret)
604 return ret;
e22a4a0f 605 return count;
54b31229 606}
422af17c
BR
607
608static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 609 char *buf)
422af17c
BR
610{
611 u32 error;
612
8e76e695
BR
613 visorchannel_read(controlvm_channel,
614 offsetof(struct spar_controlvm_channel_protocol,
615 installation_error),
616 &error, sizeof(u32));
422af17c
BR
617 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
618}
619
620static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 621 const char *buf, size_t count)
422af17c
BR
622{
623 u32 error;
66e24b76 624 int ret;
422af17c 625
ebec8967 626 if (kstrtou32(buf, 10, &error))
66e24b76
BR
627 return -EINVAL;
628
c3d9a224 629 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
630 offsetof(struct spar_controlvm_channel_protocol,
631 installation_error),
632 &error, sizeof(u32));
66e24b76
BR
633 if (ret)
634 return ret;
e22a4a0f 635 return count;
422af17c
BR
636}
637
638static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 639 char *buf)
422af17c 640{
10dbf0e3 641 u32 text_id;
422af17c 642
8e76e695
BR
643 visorchannel_read(controlvm_channel,
644 offsetof(struct spar_controlvm_channel_protocol,
645 installation_text_id),
646 &text_id, sizeof(u32));
10dbf0e3 647 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
648}
649
650static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 651 const char *buf, size_t count)
422af17c 652{
10dbf0e3 653 u32 text_id;
66e24b76 654 int ret;
422af17c 655
ebec8967 656 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
657 return -EINVAL;
658
c3d9a224 659 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
660 offsetof(struct spar_controlvm_channel_protocol,
661 installation_text_id),
662 &text_id, sizeof(u32));
66e24b76
BR
663 if (ret)
664 return ret;
e22a4a0f 665 return count;
422af17c
BR
666}
667
422af17c 668static ssize_t remaining_steps_show(struct device *dev,
8e76e695 669 struct device_attribute *attr, char *buf)
422af17c 670{
ee8da290 671 u16 remaining_steps;
422af17c 672
c3d9a224 673 visorchannel_read(controlvm_channel,
8e76e695
BR
674 offsetof(struct spar_controlvm_channel_protocol,
675 installation_remaining_steps),
676 &remaining_steps, sizeof(u16));
ee8da290 677 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
678}
679
680static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
681 struct device_attribute *attr,
682 const char *buf, size_t count)
422af17c 683{
ee8da290 684 u16 remaining_steps;
66e24b76 685 int ret;
422af17c 686
ebec8967 687 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
688 return -EINVAL;
689
c3d9a224 690 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
691 offsetof(struct spar_controlvm_channel_protocol,
692 installation_remaining_steps),
693 &remaining_steps, sizeof(u16));
66e24b76
BR
694 if (ret)
695 return ret;
e22a4a0f 696 return count;
422af17c
BR
697}
698
12e364b9 699static void
9b989a98 700dev_info_clear(void *v)
12e364b9 701{
246e0cd0 702 struct visorchipset_device_info *p =
bbd4be30 703 (struct visorchipset_device_info *) v;
26eb2c0c 704
246e0cd0 705 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
706}
707
ab0592b9
DZ
708struct visor_busdev {
709 u32 bus_no;
710 u32 dev_no;
711};
712
713static int match_visorbus_dev_by_id(struct device *dev, void *data)
714{
715 struct visor_device *vdev = to_visor_device(dev);
716 struct visor_busdev *id = (struct visor_busdev *)data;
717 u32 bus_no = id->bus_no;
718 u32 dev_no = id->dev_no;
719
65bd6e46
DZ
720 if ((vdev->chipset_bus_no == bus_no) &&
721 (vdev->chipset_dev_no == dev_no))
ab0592b9
DZ
722 return 1;
723
724 return 0;
725}
726struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
727 struct visor_device *from)
728{
729 struct device *dev;
730 struct device *dev_start = NULL;
731 struct visor_device *vdev = NULL;
732 struct visor_busdev id = {
733 .bus_no = bus_no,
734 .dev_no = dev_no
735 };
736
737 if (from)
738 dev_start = &from->device;
739 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
740 match_visorbus_dev_by_id);
741 if (dev)
742 vdev = to_visor_device(dev);
743 return vdev;
744}
745EXPORT_SYMBOL(visorbus_get_device_by_id);
746
d480f6a2
JS
747static struct visorchipset_device_info *
748device_find(struct list_head *list, u32 bus_no, u32 dev_no)
749{
750 struct visorchipset_device_info *p;
751
752 list_for_each_entry(p, list, entry) {
753 if (p->bus_no == bus_no && p->dev_no == dev_no)
754 return p;
755 }
756
757 return NULL;
758}
759
28723521
JS
760static void busdevices_del(struct list_head *list, u32 bus_no)
761{
762 struct visorchipset_device_info *p, *tmp;
763
764 list_for_each_entry_safe(p, tmp, list, entry) {
765 if (p->bus_no == bus_no) {
766 list_del(&p->entry);
767 kfree(p);
768 }
769 }
770}
771
c242233e 772static u8
12e364b9
KC
773check_chipset_events(void)
774{
775 int i;
c242233e 776 u8 send_msg = 1;
12e364b9
KC
777 /* Check events to determine if response should be sent */
778 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
779 send_msg &= chipset_events[i];
780 return send_msg;
781}
782
783static void
784clear_chipset_events(void)
785{
786 int i;
787 /* Clear chipset_events */
788 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
789 chipset_events[i] = 0;
790}
791
792void
4da3336c 793visorchipset_register_busdev(
fe90d892 794 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 795 struct visorchipset_busdev_responders *responders,
1e7a59c1 796 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 797{
8f1947ac 798 down(&notifier_lock);
38f736e9 799 if (!notifiers) {
4da3336c
DK
800 memset(&busdev_notifiers, 0,
801 sizeof(busdev_notifiers));
802 visorbusregistered = 0; /* clear flag */
12e364b9 803 } else {
4da3336c
DK
804 busdev_notifiers = *notifiers;
805 visorbusregistered = 1; /* set flag */
12e364b9
KC
806 }
807 if (responders)
8e3fedd6 808 *responders = busdev_responders;
1e7a59c1
BR
809 if (driver_info)
810 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 811 VERSION, NULL);
12e364b9 812
8f1947ac 813 up(&notifier_lock);
12e364b9 814}
4da3336c 815EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
12e364b9
KC
816
817static void
818cleanup_controlvm_structures(void)
819{
246e0cd0 820 struct visorchipset_device_info *di, *tmp_di;
12e364b9 821
1390b88c 822 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 823 dev_info_clear(di);
12e364b9
KC
824 list_del(&di->entry);
825 kfree(di);
826 }
827}
828
829static void
3ab47701 830chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
831{
832 static int chipset_inited;
b9b141e8 833 enum ultra_chipset_feature features = 0;
12e364b9
KC
834 int rc = CONTROLVM_RESP_SUCCESS;
835
836 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
837 if (chipset_inited) {
22ad57ba 838 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 839 goto cleanup;
12e364b9
KC
840 }
841 chipset_inited = 1;
842 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
843
844 /* Set features to indicate we support parahotplug (if Command
845 * also supports it). */
846 features =
2ea5117b 847 inmsg->cmd.init_chipset.
12e364b9
KC
848 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
849
850 /* Set the "reply" bit so Command knows this is a
851 * features-aware driver. */
852 features |= ULTRA_CHIPSET_FEATURE_REPLY;
853
e3199b2e 854cleanup:
12e364b9
KC
855 if (rc < 0)
856 cleanup_controlvm_structures();
98d7b594 857 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
858 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
859}
860
861static void
3ab47701 862controlvm_init_response(struct controlvm_message *msg,
b3168c70 863 struct controlvm_message_header *msg_hdr, int response)
12e364b9 864{
3ab47701 865 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 866 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
867 msg->hdr.payload_bytes = 0;
868 msg->hdr.payload_vm_offset = 0;
869 msg->hdr.payload_max_bytes = 0;
12e364b9 870 if (response < 0) {
98d7b594
BR
871 msg->hdr.flags.failed = 1;
872 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
873 }
874}
875
876static void
b3168c70 877controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 878{
3ab47701 879 struct controlvm_message outmsg;
26eb2c0c 880
b3168c70 881 controlvm_init_response(&outmsg, msg_hdr, response);
12e364b9
KC
882 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
883 * back the deviceChangeState structure in the packet. */
b3168c70 884 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
0639ba67
BR
885 g_devicechangestate_packet.device_change_state.bus_no ==
886 g_diagpool_bus_no &&
887 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 888 g_diagpool_dev_no)
4f44b72d 889 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 890 if (outmsg.hdr.flags.test_message == 1)
12e364b9 891 return;
2098dbd1 892
c3d9a224 893 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 894 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
895 return;
896 }
897}
898
899static void
b3168c70 900controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 901 int response,
b9b141e8 902 enum ultra_chipset_feature features)
12e364b9 903{
3ab47701 904 struct controlvm_message outmsg;
26eb2c0c 905
b3168c70 906 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 907 outmsg.cmd.init_chipset.features = features;
c3d9a224 908 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 909 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
910 return;
911 }
912}
913
98d7b594 914static void controlvm_respond_physdev_changestate(
b3168c70 915 struct controlvm_message_header *msg_hdr, int response,
98d7b594 916 struct spar_segment_state state)
12e364b9 917{
3ab47701 918 struct controlvm_message outmsg;
26eb2c0c 919
b3168c70 920 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
921 outmsg.cmd.device_change_state.state = state;
922 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 923 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 924 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
925 return;
926 }
927}
928
2ee0deec
PB
929enum crash_obj_type {
930 CRASH_DEV,
931 CRASH_BUS,
932};
933
12e364b9 934static void
0274b5ae
DZ
935bus_responder(enum controlvm_id cmd_id,
936 struct controlvm_message_header *pending_msg_hdr,
3032aedd 937 int response)
12e364b9 938{
0274b5ae
DZ
939 if (pending_msg_hdr == NULL)
940 return; /* no controlvm response needed */
12e364b9 941
0274b5ae 942 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 943 return;
0aca7844 944
0274b5ae 945 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
946}
947
948static void
fbb31f48 949device_changestate_responder(enum controlvm_id cmd_id,
b4b598fd 950 struct visorchipset_device_info *p, int response,
fbb31f48 951 struct spar_segment_state response_state)
12e364b9 952{
3ab47701 953 struct controlvm_message outmsg;
b4b598fd
DZ
954 u32 bus_no = p->bus_no;
955 u32 dev_no = p->dev_no;
12e364b9 956
0274b5ae 957 if (p->pending_msg_hdr == NULL)
12e364b9 958 return; /* no controlvm response needed */
0274b5ae 959 if (p->pending_msg_hdr->id != cmd_id)
12e364b9 960 return;
12e364b9 961
0274b5ae 962 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
12e364b9 963
fbb31f48
BR
964 outmsg.cmd.device_change_state.bus_no = bus_no;
965 outmsg.cmd.device_change_state.dev_no = dev_no;
966 outmsg.cmd.device_change_state.state = response_state;
12e364b9 967
c3d9a224 968 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 969 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 970 return;
12e364b9
KC
971}
972
973static void
0274b5ae
DZ
974device_responder(enum controlvm_id cmd_id,
975 struct controlvm_message_header *pending_msg_hdr,
b4b598fd 976 int response)
12e364b9 977{
0274b5ae 978 if (pending_msg_hdr == NULL)
12e364b9 979 return; /* no controlvm response needed */
0aca7844 980
0274b5ae 981 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 982 return;
0aca7844 983
0274b5ae 984 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
985}
986
987static void
d32517e3 988bus_epilog(struct visor_device *bus_info,
2836c6a8 989 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 990 int response, bool need_response)
12e364b9 991{
f4c11551 992 bool notified = false;
0274b5ae 993 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 994
0274b5ae
DZ
995 if (!bus_info) {
996 /* relying on a valid passed in response code */
997 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
998 pmsg_hdr = msg_hdr;
999 goto away;
1000 }
1001
1002 if (bus_info->pending_msg_hdr) {
1003 /* only non-NULL if dev is still waiting on a response */
1004 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1005 pmsg_hdr = bus_info->pending_msg_hdr;
1006 goto away;
1007 }
0aca7844 1008
2836c6a8 1009 if (need_response) {
0274b5ae
DZ
1010 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1011 if (!pmsg_hdr) {
1012 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1013 goto away;
1014 }
1015
1016 memcpy(pmsg_hdr, msg_hdr,
98d7b594 1017 sizeof(struct controlvm_message_header));
0274b5ae 1018 bus_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 1019 }
12e364b9 1020
8f1947ac 1021 down(&notifier_lock);
12e364b9
KC
1022 if (response == CONTROLVM_RESP_SUCCESS) {
1023 switch (cmd) {
1024 case CONTROLVM_BUS_CREATE:
4da3336c 1025 if (busdev_notifiers.bus_create) {
3032aedd 1026 (*busdev_notifiers.bus_create) (bus_info);
f4c11551 1027 notified = true;
12e364b9
KC
1028 }
1029 break;
1030 case CONTROLVM_BUS_DESTROY:
4da3336c 1031 if (busdev_notifiers.bus_destroy) {
3032aedd 1032 (*busdev_notifiers.bus_destroy) (bus_info);
f4c11551 1033 notified = true;
12e364b9
KC
1034 }
1035 break;
1036 }
1037 }
0274b5ae 1038away:
12e364b9
KC
1039 if (notified)
1040 /* The callback function just called above is responsible
929aa8ae 1041 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1042 * function, which will call bus_responder()
1043 */
1044 ;
1045 else
0274b5ae
DZ
1046 /*
1047 * Do not kfree(pmsg_hdr) as this is the failure path.
1048 * The success path ('notified') will call the responder
1049 * directly and kfree() there.
1050 */
1051 bus_responder(cmd, pmsg_hdr, response);
8f1947ac 1052 up(&notifier_lock);
12e364b9
KC
1053}
1054
1055static void
b4b598fd
DZ
1056device_epilog(struct visorchipset_device_info *dev_info,
1057 struct spar_segment_state state, u32 cmd,
2836c6a8 1058 struct controlvm_message_header *msg_hdr, int response,
f4c11551 1059 bool need_response, bool for_visorbus)
12e364b9 1060{
e82ba62e 1061 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 1062 bool notified = false;
b4b598fd
DZ
1063 u32 bus_no = dev_info->bus_no;
1064 u32 dev_no = dev_info->dev_no;
0274b5ae 1065 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 1066
12e364b9
KC
1067 char *envp[] = {
1068 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1069 NULL
1070 };
1071
4da3336c
DK
1072 notifiers = &busdev_notifiers;
1073
0274b5ae
DZ
1074 if (!dev_info) {
1075 /* relying on a valid passed in response code */
1076 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1077 pmsg_hdr = msg_hdr;
1078 goto away;
1079 }
1080
1081 if (dev_info->pending_msg_hdr) {
1082 /* only non-NULL if dev is still waiting on a response */
1083 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1084 pmsg_hdr = dev_info->pending_msg_hdr;
1085 goto away;
1086 }
1087
2836c6a8 1088 if (need_response) {
0274b5ae
DZ
1089 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1090 if (!pmsg_hdr) {
1091 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1092 goto away;
1093 }
1094
1095 memcpy(pmsg_hdr, msg_hdr,
98d7b594 1096 sizeof(struct controlvm_message_header));
0274b5ae 1097 dev_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 1098 }
12e364b9 1099
8f1947ac 1100 down(&notifier_lock);
12e364b9
KC
1101 if (response >= 0) {
1102 switch (cmd) {
1103 case CONTROLVM_DEVICE_CREATE:
1104 if (notifiers->device_create) {
b4b598fd 1105 (*notifiers->device_create) (dev_info);
f4c11551 1106 notified = true;
12e364b9
KC
1107 }
1108 break;
1109 case CONTROLVM_DEVICE_CHANGESTATE:
1110 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1111 if (state.alive == segment_state_running.alive &&
1112 state.operating ==
1113 segment_state_running.operating) {
12e364b9 1114 if (notifiers->device_resume) {
b4b598fd 1115 (*notifiers->device_resume) (dev_info);
f4c11551 1116 notified = true;
12e364b9
KC
1117 }
1118 }
1119 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1120 else if (state.alive == segment_state_standby.alive &&
3f833b54 1121 state.operating ==
bd0d2dcc 1122 segment_state_standby.operating) {
12e364b9
KC
1123 /* technically this is standby case
1124 * where server is lost
1125 */
1126 if (notifiers->device_pause) {
b4b598fd 1127 (*notifiers->device_pause) (dev_info);
f4c11551 1128 notified = true;
12e364b9 1129 }
bd0d2dcc 1130 } else if (state.alive == segment_state_paused.alive &&
3f833b54 1131 state.operating ==
bd0d2dcc 1132 segment_state_paused.operating) {
12e364b9
KC
1133 /* this is lite pause where channel is
1134 * still valid just 'pause' of it
1135 */
2836c6a8
BR
1136 if (bus_no == g_diagpool_bus_no &&
1137 dev_no == g_diagpool_dev_no) {
12e364b9
KC
1138 /* this will trigger the
1139 * diag_shutdown.sh script in
1140 * the visorchipset hotplug */
1141 kobject_uevent_env
eb34e877 1142 (&visorchipset_platform_device.dev.
12e364b9
KC
1143 kobj, KOBJ_ONLINE, envp);
1144 }
1145 }
1146 break;
1147 case CONTROLVM_DEVICE_DESTROY:
1148 if (notifiers->device_destroy) {
b4b598fd 1149 (*notifiers->device_destroy) (dev_info);
f4c11551 1150 notified = true;
12e364b9
KC
1151 }
1152 break;
1153 }
1154 }
0274b5ae 1155away:
12e364b9
KC
1156 if (notified)
1157 /* The callback function just called above is responsible
929aa8ae 1158 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1159 * function, which will call device_responder()
1160 */
1161 ;
1162 else
0274b5ae
DZ
1163 /*
1164 * Do not kfree(pmsg_hdr) as this is the failure path.
1165 * The success path ('notified') will call the responder
1166 * directly and kfree() there.
1167 */
1168 device_responder(cmd, pmsg_hdr, response);
8f1947ac 1169 up(&notifier_lock);
12e364b9
KC
1170}
1171
1172static void
3ab47701 1173bus_create(struct controlvm_message *inmsg)
12e364b9 1174{
2ea5117b 1175 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1176 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1177 int rc = CONTROLVM_RESP_SUCCESS;
d32517e3 1178 struct visor_device *bus_info;
b32c4997 1179 struct visorchannel *visorchannel;
12e364b9 1180
d32517e3 1181 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
6c5fed35
BR
1182 if (bus_info && (bus_info->state.created == 1)) {
1183 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1184 POSTCODE_SEVERITY_ERR);
22ad57ba 1185 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1186 goto cleanup;
12e364b9 1187 }
6c5fed35
BR
1188 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1189 if (!bus_info) {
1190 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1191 POSTCODE_SEVERITY_ERR);
22ad57ba 1192 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1193 goto cleanup;
12e364b9
KC
1194 }
1195
d32517e3
DZ
1196 bus_info->chipset_bus_no = bus_no;
1197 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
12e364b9 1198
6c5fed35 1199 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1200
b32c4997
DZ
1201 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1202 cmd->create_bus.channel_bytes,
1203 GFP_KERNEL,
1204 cmd->create_bus.bus_data_type_uuid);
1205
1206 if (!visorchannel) {
1207 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1208 POSTCODE_SEVERITY_ERR);
1209 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1210 kfree(bus_info);
1211 bus_info = NULL;
1212 goto cleanup;
1213 }
1214 bus_info->visorchannel = visorchannel;
12e364b9 1215
6c5fed35 1216 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1217
6c5fed35 1218cleanup:
3032aedd 1219 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1220 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1221}
1222
1223static void
3ab47701 1224bus_destroy(struct controlvm_message *inmsg)
12e364b9 1225{
2ea5117b 1226 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1227 u32 bus_no = cmd->destroy_bus.bus_no;
d32517e3 1228 struct visor_device *bus_info;
12e364b9
KC
1229 int rc = CONTROLVM_RESP_SUCCESS;
1230
d32517e3 1231 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
dff54cd6 1232 if (!bus_info)
22ad57ba 1233 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1234 else if (bus_info->state.created == 0)
22ad57ba 1235 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1236
3032aedd 1237 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1238 rc, inmsg->hdr.flags.response_expected == 1);
d32517e3
DZ
1239
1240 /* bus_info is freed as part of the busdevice_release function */
12e364b9
KC
1241}
1242
1243static void
317d9614
BR
1244bus_configure(struct controlvm_message *inmsg,
1245 struct parser_context *parser_ctx)
12e364b9 1246{
2ea5117b 1247 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e 1248 u32 bus_no;
d32517e3 1249 struct visor_device *bus_info;
12e364b9 1250 int rc = CONTROLVM_RESP_SUCCESS;
12e364b9 1251
654bada0
BR
1252 bus_no = cmd->configure_bus.bus_no;
1253 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1254 POSTCODE_SEVERITY_INFO);
12e364b9 1255
d32517e3 1256 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
654bada0
BR
1257 if (!bus_info) {
1258 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1259 POSTCODE_SEVERITY_ERR);
22ad57ba 1260 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1261 } else if (bus_info->state.created == 0) {
1262 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1263 POSTCODE_SEVERITY_ERR);
22ad57ba 1264 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
0274b5ae 1265 } else if (bus_info->pending_msg_hdr != NULL) {
654bada0 1266 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1267 POSTCODE_SEVERITY_ERR);
22ad57ba 1268 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0 1269 } else {
b32c4997
DZ
1270 visorchannel_set_clientpartition(bus_info->visorchannel,
1271 cmd->configure_bus.guest_handle);
654bada0
BR
1272 bus_info->partition_uuid = parser_id_get(parser_ctx);
1273 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1274 bus_info->name = parser_string_get(parser_ctx);
1275
654bada0
BR
1276 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1277 POSTCODE_SEVERITY_INFO);
12e364b9 1278 }
3032aedd 1279 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1280 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1281}
1282
1283static void
3ab47701 1284my_device_create(struct controlvm_message *inmsg)
12e364b9 1285{
2ea5117b 1286 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1287 u32 bus_no = cmd->create_device.bus_no;
1288 u32 dev_no = cmd->create_device.dev_no;
e82ba62e 1289 struct visorchipset_device_info *dev_info;
d32517e3 1290 struct visor_device *bus_info;
b32c4997 1291 struct visorchannel *visorchannel;
12e364b9
KC
1292 int rc = CONTROLVM_RESP_SUCCESS;
1293
d480f6a2 1294 dev_info = device_find(&dev_info_list, bus_no, dev_no);
c60c8e26
BR
1295 if (dev_info && (dev_info->state.created == 1)) {
1296 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1297 POSTCODE_SEVERITY_ERR);
22ad57ba 1298 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1299 goto cleanup;
12e364b9 1300 }
d32517e3 1301 bus_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
c60c8e26
BR
1302 if (!bus_info) {
1303 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1304 POSTCODE_SEVERITY_ERR);
22ad57ba 1305 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1306 goto cleanup;
12e364b9 1307 }
c60c8e26
BR
1308 if (bus_info->state.created == 0) {
1309 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1310 POSTCODE_SEVERITY_ERR);
22ad57ba 1311 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1312 goto cleanup;
12e364b9 1313 }
c60c8e26
BR
1314 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1315 if (!dev_info) {
1316 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1317 POSTCODE_SEVERITY_ERR);
22ad57ba 1318 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1319 goto cleanup;
12e364b9 1320 }
97a84f12 1321
c60c8e26
BR
1322 INIT_LIST_HEAD(&dev_info->entry);
1323 dev_info->bus_no = bus_no;
1324 dev_info->dev_no = dev_no;
1325 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1326 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1327 POSTCODE_SEVERITY_INFO);
1328
b32c4997
DZ
1329 visorchannel = visorchannel_create(cmd->create_device.channel_addr,
1330 cmd->create_device.channel_bytes,
1331 GFP_KERNEL,
1332 cmd->create_device.data_type_uuid);
1333
1334 if (!visorchannel) {
1335 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1336 POSTCODE_SEVERITY_ERR);
1337 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1338 kfree(dev_info);
1339 dev_info = NULL;
1340 goto cleanup;
1341 }
1342 dev_info->visorchannel = visorchannel;
1343 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
c60c8e26
BR
1344 list_add(&dev_info->entry, &dev_info_list);
1345 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1346 POSTCODE_SEVERITY_INFO);
c60c8e26 1347cleanup:
12e364b9 1348 /* get the bus and devNo for DiagPool channel */
c60c8e26 1349 if (dev_info &&
b32c4997 1350 is_diagpool_channel(cmd->create_device.data_type_uuid)) {
c60c8e26
BR
1351 g_diagpool_bus_no = bus_no;
1352 g_diagpool_dev_no = dev_no;
12e364b9 1353 }
b4b598fd 1354 device_epilog(dev_info, segment_state_running,
12e364b9 1355 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
4da3336c 1356 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1357}
1358
1359static void
3ab47701 1360my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1361{
2ea5117b 1362 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1363 u32 bus_no = cmd->device_change_state.bus_no;
1364 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1365 struct spar_segment_state state = cmd->device_change_state.state;
e82ba62e 1366 struct visorchipset_device_info *dev_info;
12e364b9
KC
1367 int rc = CONTROLVM_RESP_SUCCESS;
1368
d480f6a2 1369 dev_info = device_find(&dev_info_list, bus_no, dev_no);
0278a905
BR
1370 if (!dev_info) {
1371 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1372 POSTCODE_SEVERITY_ERR);
22ad57ba 1373 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1374 } else if (dev_info->state.created == 0) {
1375 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1376 POSTCODE_SEVERITY_ERR);
22ad57ba 1377 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1378 }
0278a905 1379 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1380 device_epilog(dev_info, state,
0278a905 1381 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
4da3336c 1382 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1383}
1384
1385static void
3ab47701 1386my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1387{
2ea5117b 1388 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1389 u32 bus_no = cmd->destroy_device.bus_no;
1390 u32 dev_no = cmd->destroy_device.dev_no;
e82ba62e 1391 struct visorchipset_device_info *dev_info;
12e364b9
KC
1392 int rc = CONTROLVM_RESP_SUCCESS;
1393
d480f6a2 1394 dev_info = device_find(&dev_info_list, bus_no, dev_no);
61715c8b 1395 if (!dev_info)
22ad57ba 1396 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1397 else if (dev_info->state.created == 0)
22ad57ba 1398 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1399
61715c8b 1400 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1401 device_epilog(dev_info, segment_state_running,
12e364b9 1402 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
4da3336c 1403 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1404}
1405
1406/* When provided with the physical address of the controlvm channel
1407 * (phys_addr), the offset to the payload area we need to manage
1408 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1409 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1410 * for failure.
1411 */
1412static int
d5b3f1dc 1413initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
c1f834eb 1414 struct visor_controlvm_payload_info *info)
12e364b9 1415{
c242233e 1416 u8 __iomem *payload = NULL;
12e364b9
KC
1417 int rc = CONTROLVM_RESP_SUCCESS;
1418
38f736e9 1419 if (!info) {
22ad57ba 1420 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1421 goto cleanup;
12e364b9 1422 }
c1f834eb 1423 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1424 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1425 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1426 goto cleanup;
12e364b9
KC
1427 }
1428 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1429 if (!payload) {
22ad57ba 1430 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1431 goto cleanup;
12e364b9
KC
1432 }
1433
1434 info->offset = offset;
1435 info->bytes = bytes;
1436 info->ptr = payload;
12e364b9 1437
f118a39b 1438cleanup:
12e364b9 1439 if (rc < 0) {
f118a39b 1440 if (payload) {
12e364b9
KC
1441 iounmap(payload);
1442 payload = NULL;
1443 }
1444 }
1445 return rc;
1446}
1447
1448static void
c1f834eb 1449destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1450{
597c338f 1451 if (info->ptr) {
12e364b9
KC
1452 iounmap(info->ptr);
1453 info->ptr = NULL;
1454 }
c1f834eb 1455 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1456}
1457
1458static void
1459initialize_controlvm_payload(void)
1460{
d5b3f1dc 1461 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1462 u64 payload_offset = 0;
1463 u32 payload_bytes = 0;
26eb2c0c 1464
c3d9a224 1465 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1466 offsetof(struct spar_controlvm_channel_protocol,
1467 request_payload_offset),
cafefc0c 1468 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1469 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1470 POSTCODE_SEVERITY_ERR);
1471 return;
1472 }
c3d9a224 1473 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1474 offsetof(struct spar_controlvm_channel_protocol,
1475 request_payload_bytes),
cafefc0c 1476 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1477 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1478 POSTCODE_SEVERITY_ERR);
1479 return;
1480 }
1481 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1482 payload_offset, payload_bytes,
84982fbf 1483 &controlvm_payload_info);
12e364b9
KC
1484}
1485
1486/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1487 * Returns CONTROLVM_RESP_xxx code.
1488 */
d3368a58 1489static int
12e364b9
KC
1490visorchipset_chipset_ready(void)
1491{
eb34e877 1492 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1493 return CONTROLVM_RESP_SUCCESS;
1494}
12e364b9 1495
d3368a58 1496static int
12e364b9
KC
1497visorchipset_chipset_selftest(void)
1498{
1499 char env_selftest[20];
1500 char *envp[] = { env_selftest, NULL };
26eb2c0c 1501
12e364b9 1502 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1503 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1504 envp);
1505 return CONTROLVM_RESP_SUCCESS;
1506}
12e364b9
KC
1507
1508/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1509 * Returns CONTROLVM_RESP_xxx code.
1510 */
d3368a58 1511static int
12e364b9
KC
1512visorchipset_chipset_notready(void)
1513{
eb34e877 1514 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1515 return CONTROLVM_RESP_SUCCESS;
1516}
12e364b9
KC
1517
1518static void
77a0449d 1519chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1520{
1521 int rc = visorchipset_chipset_ready();
26eb2c0c 1522
12e364b9
KC
1523 if (rc != CONTROLVM_RESP_SUCCESS)
1524 rc = -rc;
77a0449d
BR
1525 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1526 controlvm_respond(msg_hdr, rc);
1527 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1528 /* Send CHIPSET_READY response when all modules have been loaded
1529 * and disks mounted for the partition
1530 */
77a0449d 1531 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1532 }
1533}
1534
1535static void
77a0449d 1536chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1537{
1538 int rc = visorchipset_chipset_selftest();
26eb2c0c 1539
12e364b9
KC
1540 if (rc != CONTROLVM_RESP_SUCCESS)
1541 rc = -rc;
77a0449d
BR
1542 if (msg_hdr->flags.response_expected)
1543 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1544}
1545
1546static void
77a0449d 1547chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1548{
1549 int rc = visorchipset_chipset_notready();
26eb2c0c 1550
12e364b9
KC
1551 if (rc != CONTROLVM_RESP_SUCCESS)
1552 rc = -rc;
77a0449d
BR
1553 if (msg_hdr->flags.response_expected)
1554 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1555}
1556
1557/* This is your "one-stop" shop for grabbing the next message from the
1558 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1559 */
f4c11551 1560static bool
3ab47701 1561read_controlvm_event(struct controlvm_message *msg)
12e364b9 1562{
c3d9a224 1563 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1564 CONTROLVM_QUEUE_EVENT, msg)) {
1565 /* got a message */
0aca7844 1566 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1567 return false;
1568 return true;
12e364b9 1569 }
f4c11551 1570 return false;
12e364b9
KC
1571}
1572
1573/*
1574 * The general parahotplug flow works as follows. The visorchipset
1575 * driver receives a DEVICE_CHANGESTATE message from Command
1576 * specifying a physical device to enable or disable. The CONTROLVM
1577 * message handler calls parahotplug_process_message, which then adds
1578 * the message to a global list and kicks off a udev event which
1579 * causes a user level script to enable or disable the specified
1580 * device. The udev script then writes to
1581 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1582 * to get called, at which point the appropriate CONTROLVM message is
1583 * retrieved from the list and responded to.
1584 */
1585
1586#define PARAHOTPLUG_TIMEOUT_MS 2000
1587
1588/*
1589 * Generate unique int to match an outstanding CONTROLVM message with a
1590 * udev script /proc response
1591 */
1592static int
1593parahotplug_next_id(void)
1594{
1595 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1596
12e364b9
KC
1597 return atomic_inc_return(&id);
1598}
1599
1600/*
1601 * Returns the time (in jiffies) when a CONTROLVM message on the list
1602 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1603 */
1604static unsigned long
1605parahotplug_next_expiration(void)
1606{
2cc1a1b3 1607 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1608}
1609
1610/*
1611 * Create a parahotplug_request, which is basically a wrapper for a
1612 * CONTROLVM_MESSAGE that we can stick on a list
1613 */
1614static struct parahotplug_request *
3ab47701 1615parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1616{
ea0dcfcf
QL
1617 struct parahotplug_request *req;
1618
6a55e3c3 1619 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1620 if (!req)
12e364b9
KC
1621 return NULL;
1622
1623 req->id = parahotplug_next_id();
1624 req->expiration = parahotplug_next_expiration();
1625 req->msg = *msg;
1626
1627 return req;
1628}
1629
1630/*
1631 * Free a parahotplug_request.
1632 */
1633static void
1634parahotplug_request_destroy(struct parahotplug_request *req)
1635{
1636 kfree(req);
1637}
1638
1639/*
1640 * Cause uevent to run the user level script to do the disable/enable
1641 * specified in (the CONTROLVM message in) the specified
1642 * parahotplug_request
1643 */
1644static void
1645parahotplug_request_kickoff(struct parahotplug_request *req)
1646{
2ea5117b 1647 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1648 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1649 env_func[40];
1650 char *envp[] = {
1651 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1652 };
1653
1654 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1655 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1656 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1657 cmd->device_change_state.state.active);
12e364b9 1658 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1659 cmd->device_change_state.bus_no);
12e364b9 1660 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1661 cmd->device_change_state.dev_no >> 3);
12e364b9 1662 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1663 cmd->device_change_state.dev_no & 0x7);
12e364b9 1664
eb34e877 1665 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1666 envp);
1667}
1668
1669/*
1670 * Remove any request from the list that's been on there too long and
1671 * respond with an error.
1672 */
1673static void
1674parahotplug_process_list(void)
1675{
e82ba62e
JS
1676 struct list_head *pos;
1677 struct list_head *tmp;
12e364b9 1678
ddf5de53 1679 spin_lock(&parahotplug_request_list_lock);
12e364b9 1680
ddf5de53 1681 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1682 struct parahotplug_request *req =
1683 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1684
1685 if (!time_after_eq(jiffies, req->expiration))
1686 continue;
1687
1688 list_del(pos);
1689 if (req->msg.hdr.flags.response_expected)
1690 controlvm_respond_physdev_changestate(
1691 &req->msg.hdr,
1692 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1693 req->msg.cmd.device_change_state.state);
1694 parahotplug_request_destroy(req);
12e364b9
KC
1695 }
1696
ddf5de53 1697 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1698}
1699
1700/*
1701 * Called from the /proc handler, which means the user script has
1702 * finished the enable/disable. Find the matching identifier, and
1703 * respond to the CONTROLVM message with success.
1704 */
1705static int
b06bdf7d 1706parahotplug_request_complete(int id, u16 active)
12e364b9 1707{
e82ba62e
JS
1708 struct list_head *pos;
1709 struct list_head *tmp;
12e364b9 1710
ddf5de53 1711 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1712
1713 /* Look for a request matching "id". */
ddf5de53 1714 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1715 struct parahotplug_request *req =
1716 list_entry(pos, struct parahotplug_request, list);
1717 if (req->id == id) {
1718 /* Found a match. Remove it from the list and
1719 * respond.
1720 */
1721 list_del(pos);
ddf5de53 1722 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1723 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1724 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1725 controlvm_respond_physdev_changestate(
1726 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1727 req->msg.cmd.device_change_state.state);
12e364b9
KC
1728 parahotplug_request_destroy(req);
1729 return 0;
1730 }
1731 }
1732
ddf5de53 1733 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1734 return -1;
1735}
1736
1737/*
1738 * Enables or disables a PCI device by kicking off a udev script
1739 */
bd5b9b32 1740static void
3ab47701 1741parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1742{
1743 struct parahotplug_request *req;
1744
1745 req = parahotplug_request_create(inmsg);
1746
38f736e9 1747 if (!req)
12e364b9 1748 return;
12e364b9 1749
2ea5117b 1750 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1751 /* For enable messages, just respond with success
1752 * right away. This is a bit of a hack, but there are
1753 * issues with the early enable messages we get (with
1754 * either the udev script not detecting that the device
1755 * is up, or not getting called at all). Fortunately
1756 * the messages that get lost don't matter anyway, as
1757 * devices are automatically enabled at
1758 * initialization.
1759 */
1760 parahotplug_request_kickoff(req);
1761 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1762 CONTROLVM_RESP_SUCCESS,
1763 inmsg->cmd.device_change_state.state);
12e364b9
KC
1764 parahotplug_request_destroy(req);
1765 } else {
1766 /* For disable messages, add the request to the
1767 * request list before kicking off the udev script. It
1768 * won't get responded to until the script has
1769 * indicated it's done.
1770 */
ddf5de53
BR
1771 spin_lock(&parahotplug_request_list_lock);
1772 list_add_tail(&req->list, &parahotplug_request_list);
1773 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1774
1775 parahotplug_request_kickoff(req);
1776 }
1777}
1778
12e364b9
KC
1779/* Process a controlvm message.
1780 * Return result:
779d0752 1781 * false - this function will return false only in the case where the
12e364b9
KC
1782 * controlvm message was NOT processed, but processing must be
1783 * retried before reading the next controlvm message; a
1784 * scenario where this can occur is when we need to throttle
1785 * the allocation of memory in which to copy out controlvm
1786 * payload data
f4c11551 1787 * true - processing of the controlvm message completed,
12e364b9
KC
1788 * either successfully or with an error.
1789 */
f4c11551 1790static bool
d5b3f1dc 1791handle_command(struct controlvm_message inmsg, u64 channel_addr)
12e364b9 1792{
2ea5117b 1793 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1794 u64 parm_addr;
1795 u32 parm_bytes;
317d9614 1796 struct parser_context *parser_ctx = NULL;
e82ba62e 1797 bool local_addr;
3ab47701 1798 struct controlvm_message ackmsg;
12e364b9
KC
1799
1800 /* create parsing context if necessary */
818352a8 1801 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1802 if (channel_addr == 0)
f4c11551 1803 return true;
818352a8
BR
1804 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1805 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1806
1807 /* Parameter and channel addresses within test messages actually lie
1808 * within our OS-controlled memory. We need to know that, because it
1809 * makes a difference in how we compute the virtual address.
1810 */
ebec8967 1811 if (parm_addr && parm_bytes) {
f4c11551 1812 bool retry = false;
26eb2c0c 1813
12e364b9 1814 parser_ctx =
818352a8
BR
1815 parser_init_byte_stream(parm_addr, parm_bytes,
1816 local_addr, &retry);
1b08872e 1817 if (!parser_ctx && retry)
f4c11551 1818 return false;
12e364b9
KC
1819 }
1820
818352a8 1821 if (!local_addr) {
12e364b9
KC
1822 controlvm_init_response(&ackmsg, &inmsg.hdr,
1823 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1824 if (controlvm_channel)
1825 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1826 CONTROLVM_QUEUE_ACK,
1827 &ackmsg);
12e364b9 1828 }
98d7b594 1829 switch (inmsg.hdr.id) {
12e364b9 1830 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1831 chipset_init(&inmsg);
1832 break;
1833 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1834 bus_create(&inmsg);
1835 break;
1836 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1837 bus_destroy(&inmsg);
1838 break;
1839 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1840 bus_configure(&inmsg, parser_ctx);
1841 break;
1842 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1843 my_device_create(&inmsg);
1844 break;
1845 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1846 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1847 parahotplug_process_message(&inmsg);
1848 } else {
12e364b9
KC
1849 /* save the hdr and cmd structures for later use */
1850 /* when sending back the response to Command */
1851 my_device_changestate(&inmsg);
4f44b72d 1852 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1853 break;
1854 }
1855 break;
1856 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1857 my_device_destroy(&inmsg);
1858 break;
1859 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1860 /* no op for now, just send a respond that we passed */
98d7b594 1861 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1862 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1863 break;
1864 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1865 chipset_ready(&inmsg.hdr);
1866 break;
1867 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1868 chipset_selftest(&inmsg.hdr);
1869 break;
1870 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1871 chipset_notready(&inmsg.hdr);
1872 break;
1873 default:
98d7b594 1874 if (inmsg.hdr.flags.response_expected)
12e364b9 1875 controlvm_respond(&inmsg.hdr,
818352a8 1876 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1877 break;
1878 }
1879
38f736e9 1880 if (parser_ctx) {
12e364b9
KC
1881 parser_done(parser_ctx);
1882 parser_ctx = NULL;
1883 }
f4c11551 1884 return true;
12e364b9
KC
1885}
1886
5f3a7e36
DK
1887static inline unsigned int
1888issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1889{
1890 struct vmcall_io_controlvm_addr_params params;
1891 int result = VMCALL_SUCCESS;
1892 u64 physaddr;
1893
1894 physaddr = virt_to_phys(&params);
1895 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1896 if (VMCALL_SUCCESSFUL(result)) {
1897 *control_addr = params.address;
1898 *control_bytes = params.channel_bytes;
1899 }
1900 return result;
1901}
1902
d5b3f1dc 1903static u64 controlvm_get_channel_address(void)
524b0b63 1904{
5fc0229a 1905 u64 addr = 0;
b3c55b13 1906 u32 size = 0;
524b0b63 1907
0aca7844 1908 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1909 return 0;
0aca7844 1910
524b0b63
BR
1911 return addr;
1912}
1913
12e364b9
KC
1914static void
1915controlvm_periodic_work(struct work_struct *work)
1916{
3ab47701 1917 struct controlvm_message inmsg;
f4c11551
JS
1918 bool got_command = false;
1919 bool handle_command_failed = false;
1c1ed292 1920 static u64 poll_count;
12e364b9
KC
1921
1922 /* make sure visorbus server is registered for controlvm callbacks */
4da3336c 1923 if (visorchipset_visorbusregwait && !visorbusregistered)
1c1ed292 1924 goto cleanup;
12e364b9 1925
1c1ed292
BR
1926 poll_count++;
1927 if (poll_count >= 250)
12e364b9
KC
1928 ; /* keep going */
1929 else
1c1ed292 1930 goto cleanup;
12e364b9
KC
1931
1932 /* Check events to determine if response to CHIPSET_READY
1933 * should be sent
1934 */
0639ba67
BR
1935 if (visorchipset_holdchipsetready &&
1936 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1937 if (check_chipset_events() == 1) {
da021f02 1938 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1939 clear_chipset_events();
da021f02 1940 memset(&g_chipset_msg_hdr, 0,
98d7b594 1941 sizeof(struct controlvm_message_header));
12e364b9
KC
1942 }
1943 }
1944
c3d9a224 1945 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1946 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1947 &inmsg))
1948 ;
1c1ed292 1949 if (!got_command) {
7166ed19 1950 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1951 /* we throttled processing of a prior
1952 * msg, so try to process it again
1953 * rather than reading a new one
1954 */
7166ed19 1955 inmsg = controlvm_pending_msg;
f4c11551 1956 controlvm_pending_msg_valid = false;
1c1ed292 1957 got_command = true;
75c1f8b7 1958 } else {
1c1ed292 1959 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1960 }
8a1182eb 1961 }
12e364b9 1962
f4c11551 1963 handle_command_failed = false;
1c1ed292 1964 while (got_command && (!handle_command_failed)) {
b53e0e93 1965 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1966 if (handle_command(inmsg,
1967 visorchannel_get_physaddr
c3d9a224 1968 (controlvm_channel)))
1c1ed292 1969 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
1970 else {
1971 /* this is a scenario where throttling
1972 * is required, but probably NOT an
1973 * error...; we stash the current
1974 * controlvm msg so we will attempt to
1975 * reprocess it on our next loop
1976 */
f4c11551 1977 handle_command_failed = true;
7166ed19 1978 controlvm_pending_msg = inmsg;
f4c11551 1979 controlvm_pending_msg_valid = true;
12e364b9
KC
1980 }
1981 }
1982
1983 /* parahotplug_worker */
1984 parahotplug_process_list();
1985
1c1ed292 1986cleanup:
12e364b9
KC
1987
1988 if (time_after(jiffies,
b53e0e93 1989 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1990 /* it's been longer than MIN_IDLE_SECONDS since we
1991 * processed our last controlvm message; slow down the
1992 * polling
1993 */
911e213e
BR
1994 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1995 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1996 } else {
911e213e
BR
1997 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1998 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1999 }
2000
9232d2d6
BR
2001 queue_delayed_work(periodic_controlvm_workqueue,
2002 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2003}
2004
2005static void
2006setup_crash_devices_work_queue(struct work_struct *work)
2007{
e6bdb904
BR
2008 struct controlvm_message local_crash_bus_msg;
2009 struct controlvm_message local_crash_dev_msg;
3ab47701 2010 struct controlvm_message msg;
e6bdb904
BR
2011 u32 local_crash_msg_offset;
2012 u16 local_crash_msg_count;
12e364b9 2013
4da3336c
DK
2014 /* make sure visorbus is registered for controlvm callbacks */
2015 if (visorchipset_visorbusregwait && !visorbusregistered)
e6bdb904 2016 goto cleanup;
12e364b9
KC
2017
2018 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2019
2020 /* send init chipset msg */
98d7b594 2021 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
2022 msg.cmd.init_chipset.bus_count = 23;
2023 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
2024
2025 chipset_init(&msg);
2026
12e364b9 2027 /* get saved message count */
c3d9a224 2028 if (visorchannel_read(controlvm_channel,
d19642f6
BR
2029 offsetof(struct spar_controlvm_channel_protocol,
2030 saved_crash_message_count),
e6bdb904 2031 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
2032 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2033 POSTCODE_SEVERITY_ERR);
2034 return;
2035 }
2036
e6bdb904 2037 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 2038 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 2039 local_crash_msg_count,
12e364b9
KC
2040 POSTCODE_SEVERITY_ERR);
2041 return;
2042 }
2043
2044 /* get saved crash message offset */
c3d9a224 2045 if (visorchannel_read(controlvm_channel,
d19642f6
BR
2046 offsetof(struct spar_controlvm_channel_protocol,
2047 saved_crash_message_offset),
e6bdb904 2048 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
2049 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2050 POSTCODE_SEVERITY_ERR);
2051 return;
2052 }
2053
2054 /* read create device message for storage bus offset */
c3d9a224 2055 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
2056 local_crash_msg_offset,
2057 &local_crash_bus_msg,
3ab47701 2058 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2059 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2060 POSTCODE_SEVERITY_ERR);
2061 return;
2062 }
2063
2064 /* read create device message for storage device */
c3d9a224 2065 if (visorchannel_read(controlvm_channel,
e6bdb904 2066 local_crash_msg_offset +
3ab47701 2067 sizeof(struct controlvm_message),
e6bdb904 2068 &local_crash_dev_msg,
3ab47701 2069 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2070 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2071 POSTCODE_SEVERITY_ERR);
2072 return;
2073 }
2074
2075 /* reuse IOVM create bus message */
ebec8967 2076 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 2077 bus_create(&local_crash_bus_msg);
75c1f8b7 2078 } else {
12e364b9
KC
2079 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2080 POSTCODE_SEVERITY_ERR);
2081 return;
2082 }
2083
2084 /* reuse create device message for storage device */
ebec8967 2085 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 2086 my_device_create(&local_crash_dev_msg);
75c1f8b7 2087 } else {
12e364b9
KC
2088 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2089 POSTCODE_SEVERITY_ERR);
2090 return;
2091 }
12e364b9
KC
2092 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2093 return;
2094
e6bdb904 2095cleanup:
12e364b9 2096
911e213e 2097 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2098
9232d2d6
BR
2099 queue_delayed_work(periodic_controlvm_workqueue,
2100 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2101}
2102
2103static void
d32517e3 2104bus_create_response(struct visor_device *bus_info, int response)
12e364b9 2105{
0274b5ae
DZ
2106 if (response >= 0) {
2107 bus_info->state.created = 1;
2108 } else {
2109 if (response != -CONTROLVM_RESP_ERROR_ALREADY_DONE)
2110 /* undo the row we just created... */
d32517e3
DZ
2111 busdevices_del(&dev_info_list,
2112 bus_info->chipset_bus_no);
0274b5ae
DZ
2113 }
2114
2115 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2116 response);
2117
2118 kfree(bus_info->pending_msg_hdr);
2119 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
2120}
2121
2122static void
d32517e3 2123bus_destroy_response(struct visor_device *bus_info, int response)
12e364b9 2124{
0274b5ae
DZ
2125 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2126 response);
2127
2128 kfree(bus_info->pending_msg_hdr);
2129 bus_info->pending_msg_hdr = NULL;
2130
d32517e3 2131 busdevices_del(&dev_info_list, bus_info->chipset_bus_no);
12e364b9
KC
2132}
2133
2134static void
b4b598fd 2135device_create_response(struct visorchipset_device_info *dev_info, int response)
12e364b9 2136{
0274b5ae
DZ
2137 if (response >= 0)
2138 dev_info->state.created = 1;
2139
2140 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2141 response);
2142
2143 kfree(dev_info->pending_msg_hdr);
2144 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
2145}
2146
2147static void
b4b598fd 2148device_destroy_response(struct visorchipset_device_info *dev_info, int response)
12e364b9 2149{
0274b5ae
DZ
2150 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2151 response);
2152
2153 kfree(dev_info->pending_msg_hdr);
2154 dev_info->pending_msg_hdr = NULL;
2155
2156 dev_info_clear(dev_info);
12e364b9
KC
2157}
2158
d3368a58 2159static void
b4b598fd
DZ
2160visorchipset_device_pause_response(struct visorchipset_device_info *dev_info,
2161 int response)
12e364b9 2162{
12e364b9 2163 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 2164 dev_info, response,
bd0d2dcc 2165 segment_state_standby);
0274b5ae
DZ
2166
2167 kfree(dev_info->pending_msg_hdr);
2168 dev_info->pending_msg_hdr = NULL;
12e364b9 2169}
12e364b9
KC
2170
2171static void
b4b598fd 2172device_resume_response(struct visorchipset_device_info *dev_info, int response)
12e364b9
KC
2173{
2174 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 2175 dev_info, response,
bd0d2dcc 2176 segment_state_running);
0274b5ae
DZ
2177
2178 kfree(dev_info->pending_msg_hdr);
2179 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
2180}
2181
f4c11551 2182bool
52063eca 2183visorchipset_get_device_info(u32 bus_no, u32 dev_no,
b486df19 2184 struct visorchipset_device_info *dev_info)
12e364b9 2185{
d480f6a2 2186 void *p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2187
0aca7844 2188 if (!p)
f4c11551 2189 return false;
b486df19 2190 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
f4c11551 2191 return true;
12e364b9
KC
2192}
2193EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2194
f4c11551 2195bool
b4b598fd
DZ
2196visorchipset_set_device_context(struct visorchipset_device_info *p,
2197 void *context)
12e364b9 2198{
0aca7844 2199 if (!p)
f4c11551 2200 return false;
12e364b9 2201 p->bus_driver_context = context;
f4c11551 2202 return true;
12e364b9
KC
2203}
2204EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2205
18b87ed1 2206static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2207 struct device_attribute *attr,
2208 const char *buf, size_t count)
12e364b9 2209{
18b87ed1 2210 char msgtype[64];
12e364b9 2211
66e24b76
BR
2212 if (sscanf(buf, "%63s", msgtype) != 1)
2213 return -EINVAL;
2214
ebec8967 2215 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2216 chipset_events[0] = 1;
2217 return count;
ebec8967 2218 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2219 chipset_events[1] = 1;
2220 return count;
e22a4a0f
BR
2221 }
2222 return -EINVAL;
12e364b9
KC
2223}
2224
e56fa7cd
BR
2225/* The parahotplug/devicedisabled interface gets called by our support script
2226 * when an SR-IOV device has been shut down. The ID is passed to the script
2227 * and then passed back when the device has been removed.
2228 */
2229static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2230 struct device_attribute *attr,
2231 const char *buf, size_t count)
e56fa7cd 2232{
94217363 2233 unsigned int id;
e56fa7cd 2234
ebec8967 2235 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2236 return -EINVAL;
2237
2238 parahotplug_request_complete(id, 0);
2239 return count;
2240}
2241
2242/* The parahotplug/deviceenabled interface gets called by our support script
2243 * when an SR-IOV device has been recovered. The ID is passed to the script
2244 * and then passed back when the device has been brought back up.
2245 */
2246static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2247 struct device_attribute *attr,
2248 const char *buf, size_t count)
e56fa7cd 2249{
94217363 2250 unsigned int id;
e56fa7cd 2251
ebec8967 2252 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2253 return -EINVAL;
2254
2255 parahotplug_request_complete(id, 1);
2256 return count;
2257}
2258
e3420ed6
EA
2259static int
2260visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2261{
2262 unsigned long physaddr = 0;
2263 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
780fcad3 2264 u64 addr = 0;
e3420ed6
EA
2265
2266 /* sv_enable_dfp(); */
2267 if (offset & (PAGE_SIZE - 1))
2268 return -ENXIO; /* need aligned offsets */
2269
2270 switch (offset) {
2271 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2272 vma->vm_flags |= VM_IO;
2273 if (!*file_controlvm_channel)
2274 return -ENXIO;
2275
2276 visorchannel_read(*file_controlvm_channel,
2277 offsetof(struct spar_controlvm_channel_protocol,
2278 gp_control_channel),
2279 &addr, sizeof(addr));
2280 if (!addr)
2281 return -ENXIO;
2282
2283 physaddr = (unsigned long)addr;
2284 if (remap_pfn_range(vma, vma->vm_start,
2285 physaddr >> PAGE_SHIFT,
2286 vma->vm_end - vma->vm_start,
2287 /*pgprot_noncached */
2288 (vma->vm_page_prot))) {
2289 return -EAGAIN;
2290 }
2291 break;
2292 default:
2293 return -ENXIO;
2294 }
2295 return 0;
2296}
2297
5f3a7e36
DK
2298static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2299{
2300 u64 result = VMCALL_SUCCESS;
2301 u64 physaddr = 0;
2302
2303 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2304 result);
2305 return result;
2306}
2307
2308static inline int issue_vmcall_update_physical_time(u64 adjustment)
2309{
2310 int result = VMCALL_SUCCESS;
2311
2312 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2313 return result;
2314}
2315
e3420ed6
EA
2316static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2317 unsigned long arg)
2318{
2319 s64 adjustment;
2320 s64 vrtc_offset;
2321
2322 switch (cmd) {
2323 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2324 /* get the physical rtc offset */
2325 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2326 if (copy_to_user((void __user *)arg, &vrtc_offset,
2327 sizeof(vrtc_offset))) {
2328 return -EFAULT;
2329 }
d5b3f1dc 2330 return 0;
e3420ed6
EA
2331 case VMCALL_UPDATE_PHYSICAL_TIME:
2332 if (copy_from_user(&adjustment, (void __user *)arg,
2333 sizeof(adjustment))) {
2334 return -EFAULT;
2335 }
2336 return issue_vmcall_update_physical_time(adjustment);
2337 default:
2338 return -EFAULT;
2339 }
2340}
2341
2342static const struct file_operations visorchipset_fops = {
2343 .owner = THIS_MODULE,
2344 .open = visorchipset_open,
2345 .read = NULL,
2346 .write = NULL,
2347 .unlocked_ioctl = visorchipset_ioctl,
2348 .release = visorchipset_release,
2349 .mmap = visorchipset_mmap,
2350};
2351
0f570fc0 2352static int
e3420ed6
EA
2353visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2354{
2355 int rc = 0;
2356
2357 file_controlvm_channel = controlvm_channel;
2358 cdev_init(&file_cdev, &visorchipset_fops);
2359 file_cdev.owner = THIS_MODULE;
2360 if (MAJOR(major_dev) == 0) {
46168810 2361 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
e3420ed6
EA
2362 /* dynamic major device number registration required */
2363 if (rc < 0)
2364 return rc;
2365 } else {
2366 /* static major device number registration required */
46168810 2367 rc = register_chrdev_region(major_dev, 1, "visorchipset");
e3420ed6
EA
2368 if (rc < 0)
2369 return rc;
2370 }
2371 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2372 if (rc < 0) {
2373 unregister_chrdev_region(major_dev, 1);
2374 return rc;
2375 }
2376 return 0;
2377}
2378
55c67dca
PB
2379static int
2380visorchipset_init(struct acpi_device *acpi_device)
12e364b9 2381{
33078257 2382 int rc = 0;
d5b3f1dc 2383 u64 addr;
d3368a58
JS
2384 int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2385 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2386
2387 addr = controlvm_get_channel_address();
2388 if (!addr)
2389 return -ENODEV;
12e364b9 2390
4da3336c 2391 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
84982fbf 2392 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
12e364b9 2393
d3368a58
JS
2394 controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2395 GFP_KERNEL, uuid);
2396 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2397 visorchannel_get_header(controlvm_channel))) {
2398 initialize_controlvm_payload();
8a1182eb 2399 } else {
d3368a58
JS
2400 visorchannel_destroy(controlvm_channel);
2401 controlvm_channel = NULL;
8a1182eb
BR
2402 return -ENODEV;
2403 }
2404
5aa8ae57
BR
2405 major_dev = MKDEV(visorchipset_major, 0);
2406 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2407 if (rc < 0) {
4cb005a9 2408 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2409 goto cleanup;
4cb005a9 2410 }
9f8d0e8b 2411
da021f02 2412 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2413
4da3336c
DK
2414 /* if booting in a crash kernel */
2415 if (is_kdump_kernel())
2416 INIT_DELAYED_WORK(&periodic_controlvm_work,
2417 setup_crash_devices_work_queue);
2418 else
2419 INIT_DELAYED_WORK(&periodic_controlvm_work,
2420 controlvm_periodic_work);
2421 periodic_controlvm_workqueue =
2422 create_singlethread_workqueue("visorchipset_controlvm");
2423
2424 if (!periodic_controlvm_workqueue) {
2425 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2426 DIAG_SEVERITY_ERR);
2427 rc = -ENOMEM;
2428 goto cleanup;
2429 }
2430 most_recent_message_jiffies = jiffies;
2431 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2432 rc = queue_delayed_work(periodic_controlvm_workqueue,
2433 &periodic_controlvm_work, poll_jiffies);
2434 if (rc < 0) {
2435 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2436 DIAG_SEVERITY_ERR);
2437 goto cleanup;
12e364b9
KC
2438 }
2439
eb34e877
BR
2440 visorchipset_platform_device.dev.devt = major_dev;
2441 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2442 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2443 rc = -1;
a6a3989b 2444 goto cleanup;
4cb005a9 2445 }
12e364b9 2446 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
c79b28f7
PB
2447
2448 rc = visorbus_init();
a6a3989b 2449cleanup:
12e364b9 2450 if (rc) {
12e364b9
KC
2451 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2452 POSTCODE_SEVERITY_ERR);
2453 }
2454 return rc;
2455}
2456
0f570fc0 2457static void
e3420ed6
EA
2458visorchipset_file_cleanup(dev_t major_dev)
2459{
2460 if (file_cdev.ops)
2461 cdev_del(&file_cdev);
2462 file_cdev.ops = NULL;
2463 unregister_chrdev_region(major_dev, 1);
2464}
2465
55c67dca
PB
2466static int
2467visorchipset_exit(struct acpi_device *acpi_device)
12e364b9 2468{
12e364b9
KC
2469 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2470
c79b28f7
PB
2471 visorbus_exit();
2472
4da3336c
DK
2473 cancel_delayed_work(&periodic_controlvm_work);
2474 flush_workqueue(periodic_controlvm_workqueue);
2475 destroy_workqueue(periodic_controlvm_workqueue);
2476 periodic_controlvm_workqueue = NULL;
2477 destroy_controlvm_payload_info(&controlvm_payload_info);
1783319f 2478
12e364b9
KC
2479 cleanup_controlvm_structures();
2480
da021f02 2481 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2482
c3d9a224 2483 visorchannel_destroy(controlvm_channel);
8a1182eb 2484
addceb12 2485 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2486 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
55c67dca
PB
2487
2488 return 0;
2489}
2490
2491static const struct acpi_device_id unisys_device_ids[] = {
2492 {"PNP0A07", 0},
2493 {"", 0},
2494};
55c67dca
PB
2495
2496static struct acpi_driver unisys_acpi_driver = {
2497 .name = "unisys_acpi",
2498 .class = "unisys_acpi_class",
2499 .owner = THIS_MODULE,
2500 .ids = unisys_device_ids,
2501 .ops = {
2502 .add = visorchipset_init,
2503 .remove = visorchipset_exit,
2504 },
2505};
d5b3f1dc
EA
2506static __init uint32_t visorutil_spar_detect(void)
2507{
2508 unsigned int eax, ebx, ecx, edx;
2509
2510 if (cpu_has_hypervisor) {
2511 /* check the ID */
2512 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2513 return (ebx == UNISYS_SPAR_ID_EBX) &&
2514 (ecx == UNISYS_SPAR_ID_ECX) &&
2515 (edx == UNISYS_SPAR_ID_EDX);
2516 } else {
2517 return 0;
2518 }
2519}
55c67dca
PB
2520
2521static int init_unisys(void)
2522{
2523 int result;
d5b3f1dc 2524 if (!visorutil_spar_detect())
55c67dca
PB
2525 return -ENODEV;
2526
2527 result = acpi_bus_register_driver(&unisys_acpi_driver);
2528 if (result)
2529 return -ENODEV;
2530
2531 pr_info("Unisys Visorchipset Driver Loaded.\n");
2532 return 0;
2533};
2534
2535static void exit_unisys(void)
2536{
2537 acpi_bus_unregister_driver(&unisys_acpi_driver);
12e364b9
KC
2538}
2539
12e364b9 2540module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2541MODULE_PARM_DESC(visorchipset_major,
2542 "major device number to use for the device node");
4da3336c
DK
2543module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2544MODULE_PARM_DESC(visorchipset_visorbusreqwait,
12e364b9 2545 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2546module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2547 int, S_IRUGO);
2548MODULE_PARM_DESC(visorchipset_holdchipsetready,
2549 "1 to hold response to CHIPSET_READY");
b615d628 2550
55c67dca
PB
2551module_init(init_unisys);
2552module_exit(exit_unisys);
12e364b9
KC
2553
2554MODULE_AUTHOR("Unisys");
2555MODULE_LICENSE("GPL");
2556MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2557 VERSION);
2558MODULE_VERSION(VERSION);