staging: unisys: Remove unused visorchipset_save_message()
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
55c67dca 18#include <linux/acpi.h>
c0a14641 19#include <linux/cdev.h>
46168810 20#include <linux/ctype.h>
e3420ed6
EA
21#include <linux/fs.h>
22#include <linux/mm.h>
12e364b9
KC
23#include <linux/nls.h>
24#include <linux/netdevice.h>
25#include <linux/platform_device.h>
90addb02 26#include <linux/uuid.h>
1ba00980 27#include <linux/crash_dump.h>
12e364b9 28
55c67dca
PB
29#include "controlvmchannel.h"
30#include "controlvmcompletionstatus.h"
31#include "guestlinuxdebug.h"
32#include "periodic_work.h"
55c67dca
PB
33#include "uisutils.h"
34#include "version.h"
35#include "visorbus.h"
36#include "visorbus_private.h"
37
12e364b9 38#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
12e364b9
KC
39
40#define MAX_NAME_SIZE 128
41#define MAX_IP_SIZE 50
42#define MAXOUTSTANDINGCHANNELCOMMAND 256
43#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46168810 46#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
2ee0deec
PB
47
48#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
49
d5b3f1dc
EA
50
51#define UNISYS_SPAR_LEAF_ID 0x40000000
52
53/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
54#define UNISYS_SPAR_ID_EBX 0x73696e55
55#define UNISYS_SPAR_ID_ECX 0x70537379
56#define UNISYS_SPAR_ID_EDX 0x34367261
57
b615d628
JS
58/*
59 * Module parameters
60 */
b615d628 61static int visorchipset_major;
4da3336c 62static int visorchipset_visorbusregwait = 1; /* default is on */
b615d628 63static int visorchipset_holdchipsetready;
46168810 64static unsigned long controlvm_payload_bytes_buffered;
b615d628 65
e3420ed6
EA
66static int
67visorchipset_open(struct inode *inode, struct file *file)
68{
69 unsigned minor_number = iminor(inode);
70
71 if (minor_number)
72 return -ENODEV;
73 file->private_data = NULL;
74 return 0;
75}
76
77static int
78visorchipset_release(struct inode *inode, struct file *file)
79{
80 return 0;
81}
82
12e364b9
KC
83/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
84* we switch to slow polling mode. As soon as we get a controlvm
85* message, we switch back to fast polling mode.
86*/
87#define MIN_IDLE_SECONDS 10
52063eca
JS
88static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
89static unsigned long most_recent_message_jiffies; /* when we got our last
bd5b9b32 90 * controlvm message */
4da3336c 91static int visorbusregistered;
12e364b9
KC
92
93#define MAX_CHIPSET_EVENTS 2
c242233e 94static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 95
46168810
EA
96struct parser_context {
97 unsigned long allocbytes;
98 unsigned long param_bytes;
99 u8 *curr;
100 unsigned long bytes_remaining;
101 bool byte_stream;
102 char data[0];
103};
104
9232d2d6
BR
105static struct delayed_work periodic_controlvm_work;
106static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 107static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 108
e3420ed6
EA
109static struct cdev file_cdev;
110static struct visorchannel **file_controlvm_channel;
da021f02 111static struct controlvm_message_header g_chipset_msg_hdr;
59827f00 112static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 113 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 114/* 0xffffff is an invalid Bus/Device number */
52063eca
JS
115static u32 g_diagpool_bus_no = 0xffffff;
116static u32 g_diagpool_dev_no = 0xffffff;
4f44b72d 117static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9 118
12e364b9 119#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
120 (uuid_le_cmp(channel_type_guid,\
121 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 122
1390b88c
BR
123static LIST_HEAD(bus_info_list);
124static LIST_HEAD(dev_info_list);
12e364b9 125
c3d9a224 126static struct visorchannel *controlvm_channel;
12e364b9 127
84982fbf 128/* Manages the request payload in the controlvm channel */
c1f834eb 129struct visor_controlvm_payload_info {
c242233e 130 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 131 u64 offset; /* offset from beginning of controlvm
12e364b9 132 * channel to beginning of payload * pool */
b3c55b13 133 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
134};
135
136static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 137
ea33b4ee
BR
138/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
139 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
140 */
c1f834eb 141struct visor_livedump_info {
ea33b4ee
BR
142 struct controlvm_message_header dumpcapture_header;
143 struct controlvm_message_header gettextdump_header;
144 struct controlvm_message_header dumpcomplete_header;
f4c11551 145 bool gettextdump_outstanding;
12e364b9 146 u32 crc32;
52063eca 147 unsigned long length;
12e364b9 148 atomic_t buffers_in_use;
52063eca 149 unsigned long destination;
c1f834eb
JS
150};
151
152static struct visor_livedump_info livedump_info;
12e364b9
KC
153
154/* The following globals are used to handle the scenario where we are unable to
155 * offload the payload from a controlvm message due to memory requirements. In
156 * this scenario, we simply stash the controlvm message, then attempt to
157 * process it again the next time controlvm_periodic_work() runs.
158 */
7166ed19 159static struct controlvm_message controlvm_pending_msg;
c79b28f7 160static bool controlvm_pending_msg_valid;
12e364b9 161
12e364b9
KC
162/* This identifies a data buffer that has been received via a controlvm messages
163 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
164 */
165struct putfile_buffer_entry {
166 struct list_head next; /* putfile_buffer_entry list */
317d9614 167 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
168};
169
170/* List of struct putfile_request *, via next_putfile_request member.
171 * Each entry in this list identifies an outstanding TRANSMIT_FILE
172 * conversation.
173 */
1eee0011 174static LIST_HEAD(putfile_request_list);
12e364b9
KC
175
176/* This describes a buffer and its current state of transfer (e.g., how many
177 * bytes have already been supplied as putfile data, and how many bytes are
178 * remaining) for a putfile_request.
179 */
180struct putfile_active_buffer {
181 /* a payload from a controlvm message, containing a file data buffer */
317d9614 182 struct parser_context *parser_ctx;
12e364b9
KC
183 /* points within data area of parser_ctx to next byte of data */
184 u8 *pnext;
185 /* # bytes left from <pnext> to the end of this data buffer */
186 size_t bytes_remaining;
187};
188
189#define PUTFILE_REQUEST_SIG 0x0906101302281211
190/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
191 * conversation. Structs of this type are dynamically linked into
192 * <Putfile_request_list>.
193 */
194struct putfile_request {
195 u64 sig; /* PUTFILE_REQUEST_SIG */
196
197 /* header from original TransmitFile request */
98d7b594 198 struct controlvm_message_header controlvm_header;
12e364b9
KC
199 u64 file_request_number; /* from original TransmitFile request */
200
201 /* link to next struct putfile_request */
202 struct list_head next_putfile_request;
203
204 /* most-recent sequence number supplied via a controlvm message */
205 u64 data_sequence_number;
206
207 /* head of putfile_buffer_entry list, which describes the data to be
208 * supplied as putfile data;
209 * - this list is added to when controlvm messages come in that supply
210 * file data
211 * - this list is removed from via the hotplug program that is actually
212 * consuming these buffers to write as file data */
213 struct list_head input_buffer_list;
214 spinlock_t req_list_lock; /* lock for input_buffer_list */
215
216 /* waiters for input_buffer_list to go non-empty */
217 wait_queue_head_t input_buffer_wq;
218
219 /* data not yet read within current putfile_buffer_entry */
220 struct putfile_active_buffer active_buf;
221
222 /* <0 = failed, 0 = in-progress, >0 = successful; */
223 /* note that this must be set with req_list_lock, and if you set <0, */
224 /* it is your responsibility to also free up all of the other objects */
225 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
226 /* before releasing the lock */
227 int completion_status;
228};
229
12e364b9
KC
230struct parahotplug_request {
231 struct list_head list;
232 int id;
233 unsigned long expiration;
3ab47701 234 struct controlvm_message msg;
12e364b9
KC
235};
236
ddf5de53
BR
237static LIST_HEAD(parahotplug_request_list);
238static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
239static void parahotplug_process_list(void);
240
241/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
242 * CONTROLVM_REPORTEVENT.
243 */
4da3336c 244static struct visorchipset_busdev_notifiers busdev_notifiers;
12e364b9 245
52063eca
JS
246static void bus_create_response(u32 bus_no, int response);
247static void bus_destroy_response(u32 bus_no, int response);
248static void device_create_response(u32 bus_no, u32 dev_no, int response);
249static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
250static void device_resume_response(u32 bus_no, u32 dev_no, int response);
12e364b9 251
2ee0deec
PB
252static void visorchipset_device_pause_response(u32 bus_no, u32 dev_no,
253 int response);
254
8e3fedd6 255static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
256 .bus_create = bus_create_response,
257 .bus_destroy = bus_destroy_response,
258 .device_create = device_create_response,
259 .device_destroy = device_destroy_response,
927c7927 260 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
261 .device_resume = device_resume_response,
262};
263
264/* info for /dev/visorchipset */
5aa8ae57 265static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 266
19f6634f
BR
267/* prototypes for attributes */
268static ssize_t toolaction_show(struct device *dev,
8e76e695 269 struct device_attribute *attr, char *buf);
19f6634f 270static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
271 struct device_attribute *attr,
272 const char *buf, size_t count);
19f6634f
BR
273static DEVICE_ATTR_RW(toolaction);
274
54b31229 275static ssize_t boottotool_show(struct device *dev,
8e76e695 276 struct device_attribute *attr, char *buf);
54b31229 277static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
278 struct device_attribute *attr, const char *buf,
279 size_t count);
54b31229
BR
280static DEVICE_ATTR_RW(boottotool);
281
422af17c 282static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 283 char *buf);
422af17c 284static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 285 const char *buf, size_t count);
422af17c
BR
286static DEVICE_ATTR_RW(error);
287
288static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 289 char *buf);
422af17c 290static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 291 const char *buf, size_t count);
422af17c
BR
292static DEVICE_ATTR_RW(textid);
293
294static ssize_t remaining_steps_show(struct device *dev,
8e76e695 295 struct device_attribute *attr, char *buf);
422af17c 296static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
297 struct device_attribute *attr,
298 const char *buf, size_t count);
422af17c
BR
299static DEVICE_ATTR_RW(remaining_steps);
300
18b87ed1 301static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
302 struct device_attribute *attr,
303 const char *buf, size_t count);
18b87ed1
BR
304static DEVICE_ATTR_WO(chipsetready);
305
e56fa7cd 306static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
307 struct device_attribute *attr,
308 const char *buf, size_t count);
e56fa7cd
BR
309static DEVICE_ATTR_WO(devicedisabled);
310
311static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
312 struct device_attribute *attr,
313 const char *buf, size_t count);
e56fa7cd
BR
314static DEVICE_ATTR_WO(deviceenabled);
315
19f6634f
BR
316static struct attribute *visorchipset_install_attrs[] = {
317 &dev_attr_toolaction.attr,
54b31229 318 &dev_attr_boottotool.attr,
422af17c
BR
319 &dev_attr_error.attr,
320 &dev_attr_textid.attr,
321 &dev_attr_remaining_steps.attr,
19f6634f
BR
322 NULL
323};
324
325static struct attribute_group visorchipset_install_group = {
326 .name = "install",
327 .attrs = visorchipset_install_attrs
328};
329
18b87ed1
BR
330static struct attribute *visorchipset_guest_attrs[] = {
331 &dev_attr_chipsetready.attr,
332 NULL
333};
334
335static struct attribute_group visorchipset_guest_group = {
336 .name = "guest",
337 .attrs = visorchipset_guest_attrs
338};
339
e56fa7cd
BR
340static struct attribute *visorchipset_parahotplug_attrs[] = {
341 &dev_attr_devicedisabled.attr,
342 &dev_attr_deviceenabled.attr,
343 NULL
344};
345
346static struct attribute_group visorchipset_parahotplug_group = {
347 .name = "parahotplug",
348 .attrs = visorchipset_parahotplug_attrs
349};
350
19f6634f
BR
351static const struct attribute_group *visorchipset_dev_groups[] = {
352 &visorchipset_install_group,
18b87ed1 353 &visorchipset_guest_group,
e56fa7cd 354 &visorchipset_parahotplug_group,
19f6634f
BR
355 NULL
356};
357
12e364b9 358/* /sys/devices/platform/visorchipset */
eb34e877 359static struct platform_device visorchipset_platform_device = {
12e364b9
KC
360 .name = "visorchipset",
361 .id = -1,
19f6634f 362 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
363};
364
365/* Function prototypes */
b3168c70 366static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
367 int response);
368static void controlvm_respond_chipset_init(
b3168c70 369 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
370 enum ultra_chipset_feature features);
371static void controlvm_respond_physdev_changestate(
b3168c70 372 struct controlvm_message_header *msg_hdr, int response,
98d7b594 373 struct spar_segment_state state);
12e364b9 374
46168810 375
2ee0deec
PB
376static void parser_done(struct parser_context *ctx);
377
46168810 378static struct parser_context *
fbf35536 379parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
46168810
EA
380{
381 int allocbytes = sizeof(struct parser_context) + bytes;
382 struct parser_context *rc = NULL;
383 struct parser_context *ctx = NULL;
46168810
EA
384
385 if (retry)
386 *retry = false;
cc55b5c5
JS
387
388 /*
389 * alloc an 0 extra byte to ensure payload is
390 * '\0'-terminated
391 */
392 allocbytes++;
46168810
EA
393 if ((controlvm_payload_bytes_buffered + bytes)
394 > MAX_CONTROLVM_PAYLOAD_BYTES) {
395 if (retry)
396 *retry = true;
397 rc = NULL;
398 goto cleanup;
399 }
400 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
401 if (!ctx) {
402 if (retry)
403 *retry = true;
404 rc = NULL;
405 goto cleanup;
406 }
407
408 ctx->allocbytes = allocbytes;
409 ctx->param_bytes = bytes;
410 ctx->curr = NULL;
411 ctx->bytes_remaining = 0;
412 ctx->byte_stream = false;
413 if (local) {
414 void *p;
415
416 if (addr > virt_to_phys(high_memory - 1)) {
417 rc = NULL;
418 goto cleanup;
419 }
420 p = __va((unsigned long) (addr));
421 memcpy(ctx->data, p, bytes);
422 } else {
dd412751
JS
423 void __iomem *mapping;
424
425 if (!request_mem_region(addr, bytes, "visorchipset")) {
46168810
EA
426 rc = NULL;
427 goto cleanup;
428 }
712c03dc 429
dd412751
JS
430 mapping = ioremap_cache(addr, bytes);
431 if (!mapping) {
432 release_mem_region(addr, bytes);
46168810
EA
433 rc = NULL;
434 goto cleanup;
435 }
dd412751
JS
436 memcpy_fromio(ctx->data, mapping, bytes);
437 release_mem_region(addr, bytes);
46168810 438 }
46168810 439
cc55b5c5 440 ctx->byte_stream = true;
46168810
EA
441 rc = ctx;
442cleanup:
46168810
EA
443 if (rc) {
444 controlvm_payload_bytes_buffered += ctx->param_bytes;
445 } else {
446 if (ctx) {
447 parser_done(ctx);
448 ctx = NULL;
449 }
450 }
451 return rc;
452}
453
464129ed 454static uuid_le
46168810
EA
455parser_id_get(struct parser_context *ctx)
456{
457 struct spar_controlvm_parameters_header *phdr = NULL;
458
459 if (ctx == NULL)
460 return NULL_UUID_LE;
461 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
462 return phdr->id;
463}
464
2ee0deec
PB
465/** Describes the state from the perspective of which controlvm messages have
466 * been received for a bus or device.
467 */
468
469enum PARSER_WHICH_STRING {
470 PARSERSTRING_INITIATOR,
471 PARSERSTRING_TARGET,
472 PARSERSTRING_CONNECTION,
473 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
474};
475
464129ed 476static void
2ee0deec
PB
477parser_param_start(struct parser_context *ctx,
478 enum PARSER_WHICH_STRING which_string)
46168810
EA
479{
480 struct spar_controlvm_parameters_header *phdr = NULL;
481
482 if (ctx == NULL)
483 goto Away;
484 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
485 switch (which_string) {
486 case PARSERSTRING_INITIATOR:
487 ctx->curr = ctx->data + phdr->initiator_offset;
488 ctx->bytes_remaining = phdr->initiator_length;
489 break;
490 case PARSERSTRING_TARGET:
491 ctx->curr = ctx->data + phdr->target_offset;
492 ctx->bytes_remaining = phdr->target_length;
493 break;
494 case PARSERSTRING_CONNECTION:
495 ctx->curr = ctx->data + phdr->connection_offset;
496 ctx->bytes_remaining = phdr->connection_length;
497 break;
498 case PARSERSTRING_NAME:
499 ctx->curr = ctx->data + phdr->name_offset;
500 ctx->bytes_remaining = phdr->name_length;
501 break;
502 default:
503 break;
504 }
505
506Away:
507 return;
508}
509
464129ed 510static void parser_done(struct parser_context *ctx)
46168810
EA
511{
512 if (!ctx)
513 return;
514 controlvm_payload_bytes_buffered -= ctx->param_bytes;
515 kfree(ctx);
516}
517
464129ed 518static void *
46168810
EA
519parser_string_get(struct parser_context *ctx)
520{
521 u8 *pscan;
522 unsigned long nscan;
523 int value_length = -1;
524 void *value = NULL;
525 int i;
526
527 if (!ctx)
528 return NULL;
529 pscan = ctx->curr;
530 nscan = ctx->bytes_remaining;
531 if (nscan == 0)
532 return NULL;
533 if (!pscan)
534 return NULL;
535 for (i = 0, value_length = -1; i < nscan; i++)
536 if (pscan[i] == '\0') {
537 value_length = i;
538 break;
539 }
540 if (value_length < 0) /* '\0' was not included in the length */
541 value_length = nscan;
542 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
543 if (value == NULL)
544 return NULL;
545 if (value_length > 0)
546 memcpy(value, pscan, value_length);
547 ((u8 *) (value))[value_length] = '\0';
548 return value;
549}
550
551
d746cb55
VB
552static ssize_t toolaction_show(struct device *dev,
553 struct device_attribute *attr,
554 char *buf)
19f6634f 555{
01f4d85a 556 u8 tool_action;
19f6634f 557
c3d9a224 558 visorchannel_read(controlvm_channel,
d19642f6 559 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 560 tool_action), &tool_action, sizeof(u8));
01f4d85a 561 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
562}
563
d746cb55
VB
564static ssize_t toolaction_store(struct device *dev,
565 struct device_attribute *attr,
566 const char *buf, size_t count)
19f6634f 567{
01f4d85a 568 u8 tool_action;
66e24b76 569 int ret;
19f6634f 570
ebec8967 571 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
572 return -EINVAL;
573
c3d9a224 574 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
575 offsetof(struct spar_controlvm_channel_protocol,
576 tool_action),
01f4d85a 577 &tool_action, sizeof(u8));
66e24b76
BR
578
579 if (ret)
580 return ret;
e22a4a0f 581 return count;
19f6634f
BR
582}
583
d746cb55
VB
584static ssize_t boottotool_show(struct device *dev,
585 struct device_attribute *attr,
586 char *buf)
54b31229 587{
365522d9 588 struct efi_spar_indication efi_spar_indication;
54b31229 589
c3d9a224 590 visorchannel_read(controlvm_channel,
8e76e695
BR
591 offsetof(struct spar_controlvm_channel_protocol,
592 efi_spar_ind), &efi_spar_indication,
593 sizeof(struct efi_spar_indication));
54b31229 594 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 595 efi_spar_indication.boot_to_tool);
54b31229
BR
596}
597
d746cb55
VB
598static ssize_t boottotool_store(struct device *dev,
599 struct device_attribute *attr,
600 const char *buf, size_t count)
54b31229 601{
66e24b76 602 int val, ret;
365522d9 603 struct efi_spar_indication efi_spar_indication;
54b31229 604
ebec8967 605 if (kstrtoint(buf, 10, &val))
66e24b76
BR
606 return -EINVAL;
607
365522d9 608 efi_spar_indication.boot_to_tool = val;
c3d9a224 609 ret = visorchannel_write(controlvm_channel,
d19642f6 610 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
611 efi_spar_ind), &(efi_spar_indication),
612 sizeof(struct efi_spar_indication));
66e24b76
BR
613
614 if (ret)
615 return ret;
e22a4a0f 616 return count;
54b31229 617}
422af17c
BR
618
619static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 620 char *buf)
422af17c
BR
621{
622 u32 error;
623
8e76e695
BR
624 visorchannel_read(controlvm_channel,
625 offsetof(struct spar_controlvm_channel_protocol,
626 installation_error),
627 &error, sizeof(u32));
422af17c
BR
628 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
629}
630
631static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 632 const char *buf, size_t count)
422af17c
BR
633{
634 u32 error;
66e24b76 635 int ret;
422af17c 636
ebec8967 637 if (kstrtou32(buf, 10, &error))
66e24b76
BR
638 return -EINVAL;
639
c3d9a224 640 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
641 offsetof(struct spar_controlvm_channel_protocol,
642 installation_error),
643 &error, sizeof(u32));
66e24b76
BR
644 if (ret)
645 return ret;
e22a4a0f 646 return count;
422af17c
BR
647}
648
649static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 650 char *buf)
422af17c 651{
10dbf0e3 652 u32 text_id;
422af17c 653
8e76e695
BR
654 visorchannel_read(controlvm_channel,
655 offsetof(struct spar_controlvm_channel_protocol,
656 installation_text_id),
657 &text_id, sizeof(u32));
10dbf0e3 658 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
659}
660
661static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 662 const char *buf, size_t count)
422af17c 663{
10dbf0e3 664 u32 text_id;
66e24b76 665 int ret;
422af17c 666
ebec8967 667 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
668 return -EINVAL;
669
c3d9a224 670 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
671 offsetof(struct spar_controlvm_channel_protocol,
672 installation_text_id),
673 &text_id, sizeof(u32));
66e24b76
BR
674 if (ret)
675 return ret;
e22a4a0f 676 return count;
422af17c
BR
677}
678
422af17c 679static ssize_t remaining_steps_show(struct device *dev,
8e76e695 680 struct device_attribute *attr, char *buf)
422af17c 681{
ee8da290 682 u16 remaining_steps;
422af17c 683
c3d9a224 684 visorchannel_read(controlvm_channel,
8e76e695
BR
685 offsetof(struct spar_controlvm_channel_protocol,
686 installation_remaining_steps),
687 &remaining_steps, sizeof(u16));
ee8da290 688 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
689}
690
691static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
692 struct device_attribute *attr,
693 const char *buf, size_t count)
422af17c 694{
ee8da290 695 u16 remaining_steps;
66e24b76 696 int ret;
422af17c 697
ebec8967 698 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
699 return -EINVAL;
700
c3d9a224 701 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
702 offsetof(struct spar_controlvm_channel_protocol,
703 installation_remaining_steps),
704 &remaining_steps, sizeof(u16));
66e24b76
BR
705 if (ret)
706 return ret;
e22a4a0f 707 return count;
422af17c
BR
708}
709
12e364b9 710static void
9b989a98 711bus_info_clear(void *v)
12e364b9 712{
bbd4be30 713 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
12e364b9 714
12e364b9 715 kfree(p->name);
12e364b9 716 kfree(p->description);
33192fa1 717 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
718}
719
720static void
9b989a98 721dev_info_clear(void *v)
12e364b9 722{
246e0cd0 723 struct visorchipset_device_info *p =
bbd4be30 724 (struct visorchipset_device_info *) v;
26eb2c0c 725
246e0cd0 726 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
727}
728
4f66520b
JS
729static struct visorchipset_bus_info *
730bus_find(struct list_head *list, u32 bus_no)
731{
732 struct visorchipset_bus_info *p;
733
734 list_for_each_entry(p, list, entry) {
735 if (p->bus_no == bus_no)
736 return p;
737 }
738
739 return NULL;
740}
741
d480f6a2
JS
742static struct visorchipset_device_info *
743device_find(struct list_head *list, u32 bus_no, u32 dev_no)
744{
745 struct visorchipset_device_info *p;
746
747 list_for_each_entry(p, list, entry) {
748 if (p->bus_no == bus_no && p->dev_no == dev_no)
749 return p;
750 }
751
752 return NULL;
753}
754
28723521
JS
755static void busdevices_del(struct list_head *list, u32 bus_no)
756{
757 struct visorchipset_device_info *p, *tmp;
758
759 list_for_each_entry_safe(p, tmp, list, entry) {
760 if (p->bus_no == bus_no) {
761 list_del(&p->entry);
762 kfree(p);
763 }
764 }
765}
766
c242233e 767static u8
12e364b9
KC
768check_chipset_events(void)
769{
770 int i;
c242233e 771 u8 send_msg = 1;
12e364b9
KC
772 /* Check events to determine if response should be sent */
773 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
774 send_msg &= chipset_events[i];
775 return send_msg;
776}
777
778static void
779clear_chipset_events(void)
780{
781 int i;
782 /* Clear chipset_events */
783 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
784 chipset_events[i] = 0;
785}
786
787void
4da3336c 788visorchipset_register_busdev(
fe90d892 789 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 790 struct visorchipset_busdev_responders *responders,
1e7a59c1 791 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 792{
8f1947ac 793 down(&notifier_lock);
38f736e9 794 if (!notifiers) {
4da3336c
DK
795 memset(&busdev_notifiers, 0,
796 sizeof(busdev_notifiers));
797 visorbusregistered = 0; /* clear flag */
12e364b9 798 } else {
4da3336c
DK
799 busdev_notifiers = *notifiers;
800 visorbusregistered = 1; /* set flag */
12e364b9
KC
801 }
802 if (responders)
8e3fedd6 803 *responders = busdev_responders;
1e7a59c1
BR
804 if (driver_info)
805 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 806 VERSION, NULL);
12e364b9 807
8f1947ac 808 up(&notifier_lock);
12e364b9 809}
4da3336c 810EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
12e364b9
KC
811
812static void
813cleanup_controlvm_structures(void)
814{
33192fa1 815 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 816 struct visorchipset_device_info *di, *tmp_di;
12e364b9 817
1390b88c 818 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
9b989a98 819 bus_info_clear(bi);
12e364b9
KC
820 list_del(&bi->entry);
821 kfree(bi);
822 }
823
1390b88c 824 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 825 dev_info_clear(di);
12e364b9
KC
826 list_del(&di->entry);
827 kfree(di);
828 }
829}
830
831static void
3ab47701 832chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
833{
834 static int chipset_inited;
b9b141e8 835 enum ultra_chipset_feature features = 0;
12e364b9
KC
836 int rc = CONTROLVM_RESP_SUCCESS;
837
838 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
839 if (chipset_inited) {
22ad57ba 840 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 841 goto cleanup;
12e364b9
KC
842 }
843 chipset_inited = 1;
844 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
845
846 /* Set features to indicate we support parahotplug (if Command
847 * also supports it). */
848 features =
2ea5117b 849 inmsg->cmd.init_chipset.
12e364b9
KC
850 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
851
852 /* Set the "reply" bit so Command knows this is a
853 * features-aware driver. */
854 features |= ULTRA_CHIPSET_FEATURE_REPLY;
855
e3199b2e 856cleanup:
12e364b9
KC
857 if (rc < 0)
858 cleanup_controlvm_structures();
98d7b594 859 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
860 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
861}
862
863static void
3ab47701 864controlvm_init_response(struct controlvm_message *msg,
b3168c70 865 struct controlvm_message_header *msg_hdr, int response)
12e364b9 866{
3ab47701 867 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 868 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
869 msg->hdr.payload_bytes = 0;
870 msg->hdr.payload_vm_offset = 0;
871 msg->hdr.payload_max_bytes = 0;
12e364b9 872 if (response < 0) {
98d7b594
BR
873 msg->hdr.flags.failed = 1;
874 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
875 }
876}
877
878static void
b3168c70 879controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 880{
3ab47701 881 struct controlvm_message outmsg;
26eb2c0c 882
b3168c70 883 controlvm_init_response(&outmsg, msg_hdr, response);
12e364b9
KC
884 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
885 * back the deviceChangeState structure in the packet. */
b3168c70 886 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
0639ba67
BR
887 g_devicechangestate_packet.device_change_state.bus_no ==
888 g_diagpool_bus_no &&
889 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 890 g_diagpool_dev_no)
4f44b72d 891 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 892 if (outmsg.hdr.flags.test_message == 1)
12e364b9 893 return;
2098dbd1 894
c3d9a224 895 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 896 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
897 return;
898 }
899}
900
901static void
b3168c70 902controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 903 int response,
b9b141e8 904 enum ultra_chipset_feature features)
12e364b9 905{
3ab47701 906 struct controlvm_message outmsg;
26eb2c0c 907
b3168c70 908 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 909 outmsg.cmd.init_chipset.features = features;
c3d9a224 910 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 911 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
912 return;
913 }
914}
915
98d7b594 916static void controlvm_respond_physdev_changestate(
b3168c70 917 struct controlvm_message_header *msg_hdr, int response,
98d7b594 918 struct spar_segment_state state)
12e364b9 919{
3ab47701 920 struct controlvm_message outmsg;
26eb2c0c 921
b3168c70 922 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
923 outmsg.cmd.device_change_state.state = state;
924 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 925 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 926 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
927 return;
928 }
929}
930
2ee0deec
PB
931enum crash_obj_type {
932 CRASH_DEV,
933 CRASH_BUS,
934};
935
12e364b9 936static void
52063eca 937bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
12e364b9 938{
e82ba62e 939 struct visorchipset_bus_info *p;
f4c11551 940 bool need_clear = false;
12e364b9 941
4f66520b 942 p = bus_find(&bus_info_list, bus_no);
0aca7844 943 if (!p)
12e364b9 944 return;
0aca7844 945
12e364b9 946 if (response < 0) {
fbb31f48 947 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
12e364b9
KC
948 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
949 /* undo the row we just created... */
28723521 950 busdevices_del(&dev_info_list, bus_no);
12e364b9 951 } else {
fbb31f48 952 if (cmd_id == CONTROLVM_BUS_CREATE)
12e364b9 953 p->state.created = 1;
fbb31f48 954 if (cmd_id == CONTROLVM_BUS_DESTROY)
f4c11551 955 need_clear = true;
12e364b9
KC
956 }
957
0aca7844 958 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 959 return; /* no controlvm response needed */
6b59b31d 960 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 961 return;
33192fa1
BR
962 controlvm_respond(&p->pending_msg_hdr, response);
963 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 964 if (need_clear) {
9b989a98 965 bus_info_clear(p);
28723521 966 busdevices_del(&dev_info_list, bus_no);
12e364b9
KC
967 }
968}
969
970static void
fbb31f48 971device_changestate_responder(enum controlvm_id cmd_id,
52063eca 972 u32 bus_no, u32 dev_no, int response,
fbb31f48 973 struct spar_segment_state response_state)
12e364b9 974{
e82ba62e 975 struct visorchipset_device_info *p;
3ab47701 976 struct controlvm_message outmsg;
12e364b9 977
d480f6a2 978 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 979 if (!p)
12e364b9 980 return;
0aca7844 981 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 982 return; /* no controlvm response needed */
fbb31f48 983 if (p->pending_msg_hdr.id != cmd_id)
12e364b9 984 return;
12e364b9 985
246e0cd0 986 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 987
fbb31f48
BR
988 outmsg.cmd.device_change_state.bus_no = bus_no;
989 outmsg.cmd.device_change_state.dev_no = dev_no;
990 outmsg.cmd.device_change_state.state = response_state;
12e364b9 991
c3d9a224 992 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 993 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 994 return;
12e364b9 995
246e0cd0 996 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
997}
998
999static void
52063eca 1000device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
12e364b9 1001{
e82ba62e 1002 struct visorchipset_device_info *p;
f4c11551 1003 bool need_clear = false;
12e364b9 1004
d480f6a2 1005 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 1006 if (!p)
12e364b9 1007 return;
12e364b9 1008 if (response >= 0) {
fbb31f48 1009 if (cmd_id == CONTROLVM_DEVICE_CREATE)
12e364b9 1010 p->state.created = 1;
fbb31f48 1011 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
f4c11551 1012 need_clear = true;
12e364b9
KC
1013 }
1014
0aca7844 1015 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 1016 return; /* no controlvm response needed */
0aca7844 1017
6b59b31d 1018 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 1019 return;
0aca7844 1020
246e0cd0
BR
1021 controlvm_respond(&p->pending_msg_hdr, response);
1022 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 1023 if (need_clear)
9b989a98 1024 dev_info_clear(p);
12e364b9
KC
1025}
1026
1027static void
2836c6a8
BR
1028bus_epilog(u32 bus_no,
1029 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 1030 int response, bool need_response)
12e364b9 1031{
4f66520b 1032 struct visorchipset_bus_info *bus_info;
f4c11551 1033 bool notified = false;
12e364b9 1034
4f66520b 1035 bus_info = bus_find(&bus_info_list, bus_no);
12e364b9 1036
2836c6a8 1037 if (!bus_info)
12e364b9 1038 return;
0aca7844 1039
2836c6a8
BR
1040 if (need_response) {
1041 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
98d7b594 1042 sizeof(struct controlvm_message_header));
75c1f8b7 1043 } else {
2836c6a8 1044 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 1045 }
12e364b9 1046
8f1947ac 1047 down(&notifier_lock);
12e364b9
KC
1048 if (response == CONTROLVM_RESP_SUCCESS) {
1049 switch (cmd) {
1050 case CONTROLVM_BUS_CREATE:
4da3336c
DK
1051 if (busdev_notifiers.bus_create) {
1052 (*busdev_notifiers.bus_create) (bus_no);
f4c11551 1053 notified = true;
12e364b9
KC
1054 }
1055 break;
1056 case CONTROLVM_BUS_DESTROY:
4da3336c
DK
1057 if (busdev_notifiers.bus_destroy) {
1058 (*busdev_notifiers.bus_destroy) (bus_no);
f4c11551 1059 notified = true;
12e364b9
KC
1060 }
1061 break;
1062 }
1063 }
1064 if (notified)
1065 /* The callback function just called above is responsible
929aa8ae 1066 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1067 * function, which will call bus_responder()
1068 */
1069 ;
1070 else
2836c6a8 1071 bus_responder(cmd, bus_no, response);
8f1947ac 1072 up(&notifier_lock);
12e364b9
KC
1073}
1074
1075static void
2836c6a8
BR
1076device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
1077 struct controlvm_message_header *msg_hdr, int response,
f4c11551 1078 bool need_response, bool for_visorbus)
12e364b9 1079{
e82ba62e 1080 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 1081 bool notified = false;
12e364b9 1082
2836c6a8 1083 struct visorchipset_device_info *dev_info =
d480f6a2 1084 device_find(&dev_info_list, bus_no, dev_no);
12e364b9
KC
1085 char *envp[] = {
1086 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1087 NULL
1088 };
1089
2836c6a8 1090 if (!dev_info)
12e364b9 1091 return;
0aca7844 1092
4da3336c
DK
1093 notifiers = &busdev_notifiers;
1094
2836c6a8
BR
1095 if (need_response) {
1096 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
98d7b594 1097 sizeof(struct controlvm_message_header));
75c1f8b7 1098 } else {
2836c6a8 1099 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 1100 }
12e364b9 1101
8f1947ac 1102 down(&notifier_lock);
12e364b9
KC
1103 if (response >= 0) {
1104 switch (cmd) {
1105 case CONTROLVM_DEVICE_CREATE:
1106 if (notifiers->device_create) {
2836c6a8 1107 (*notifiers->device_create) (bus_no, dev_no);
f4c11551 1108 notified = true;
12e364b9
KC
1109 }
1110 break;
1111 case CONTROLVM_DEVICE_CHANGESTATE:
1112 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1113 if (state.alive == segment_state_running.alive &&
1114 state.operating ==
1115 segment_state_running.operating) {
12e364b9 1116 if (notifiers->device_resume) {
2836c6a8
BR
1117 (*notifiers->device_resume) (bus_no,
1118 dev_no);
f4c11551 1119 notified = true;
12e364b9
KC
1120 }
1121 }
1122 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1123 else if (state.alive == segment_state_standby.alive &&
3f833b54 1124 state.operating ==
bd0d2dcc 1125 segment_state_standby.operating) {
12e364b9
KC
1126 /* technically this is standby case
1127 * where server is lost
1128 */
1129 if (notifiers->device_pause) {
2836c6a8
BR
1130 (*notifiers->device_pause) (bus_no,
1131 dev_no);
f4c11551 1132 notified = true;
12e364b9 1133 }
bd0d2dcc 1134 } else if (state.alive == segment_state_paused.alive &&
3f833b54 1135 state.operating ==
bd0d2dcc 1136 segment_state_paused.operating) {
12e364b9
KC
1137 /* this is lite pause where channel is
1138 * still valid just 'pause' of it
1139 */
2836c6a8
BR
1140 if (bus_no == g_diagpool_bus_no &&
1141 dev_no == g_diagpool_dev_no) {
12e364b9
KC
1142 /* this will trigger the
1143 * diag_shutdown.sh script in
1144 * the visorchipset hotplug */
1145 kobject_uevent_env
eb34e877 1146 (&visorchipset_platform_device.dev.
12e364b9
KC
1147 kobj, KOBJ_ONLINE, envp);
1148 }
1149 }
1150 break;
1151 case CONTROLVM_DEVICE_DESTROY:
1152 if (notifiers->device_destroy) {
2836c6a8 1153 (*notifiers->device_destroy) (bus_no, dev_no);
f4c11551 1154 notified = true;
12e364b9
KC
1155 }
1156 break;
1157 }
1158 }
1159 if (notified)
1160 /* The callback function just called above is responsible
929aa8ae 1161 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1162 * function, which will call device_responder()
1163 */
1164 ;
1165 else
2836c6a8 1166 device_responder(cmd, bus_no, dev_no, response);
8f1947ac 1167 up(&notifier_lock);
12e364b9
KC
1168}
1169
1170static void
3ab47701 1171bus_create(struct controlvm_message *inmsg)
12e364b9 1172{
2ea5117b 1173 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1174 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1175 int rc = CONTROLVM_RESP_SUCCESS;
e82ba62e 1176 struct visorchipset_bus_info *bus_info;
12e364b9 1177
4f66520b 1178 bus_info = bus_find(&bus_info_list, bus_no);
6c5fed35
BR
1179 if (bus_info && (bus_info->state.created == 1)) {
1180 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1181 POSTCODE_SEVERITY_ERR);
22ad57ba 1182 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1183 goto cleanup;
12e364b9 1184 }
6c5fed35
BR
1185 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1186 if (!bus_info) {
1187 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1188 POSTCODE_SEVERITY_ERR);
22ad57ba 1189 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1190 goto cleanup;
12e364b9
KC
1191 }
1192
6c5fed35
BR
1193 INIT_LIST_HEAD(&bus_info->entry);
1194 bus_info->bus_no = bus_no;
12e364b9 1195
6c5fed35 1196 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1197
98d7b594 1198 if (inmsg->hdr.flags.test_message == 1)
6c5fed35 1199 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1200 else
6c5fed35 1201 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1202
6c5fed35
BR
1203 bus_info->flags.server = inmsg->hdr.flags.server;
1204 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1205 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1206 bus_info->chan_info.channel_type_uuid =
9b1caee7 1207 cmd->create_bus.bus_data_type_uuid;
6c5fed35 1208 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9 1209
6c5fed35 1210 list_add(&bus_info->entry, &bus_info_list);
12e364b9 1211
6c5fed35 1212 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1213
6c5fed35
BR
1214cleanup:
1215 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1216 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1217}
1218
1219static void
3ab47701 1220bus_destroy(struct controlvm_message *inmsg)
12e364b9 1221{
2ea5117b 1222 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1223 u32 bus_no = cmd->destroy_bus.bus_no;
dff54cd6 1224 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1225 int rc = CONTROLVM_RESP_SUCCESS;
1226
4f66520b 1227 bus_info = bus_find(&bus_info_list, bus_no);
dff54cd6 1228 if (!bus_info)
22ad57ba 1229 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1230 else if (bus_info->state.created == 0)
22ad57ba 1231 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1232
dff54cd6 1233 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1234 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1235}
1236
1237static void
317d9614
BR
1238bus_configure(struct controlvm_message *inmsg,
1239 struct parser_context *parser_ctx)
12e364b9 1240{
2ea5117b 1241 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e
JS
1242 u32 bus_no;
1243 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1244 int rc = CONTROLVM_RESP_SUCCESS;
1245 char s[99];
1246
654bada0
BR
1247 bus_no = cmd->configure_bus.bus_no;
1248 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1249 POSTCODE_SEVERITY_INFO);
12e364b9 1250
4f66520b 1251 bus_info = bus_find(&bus_info_list, bus_no);
654bada0
BR
1252 if (!bus_info) {
1253 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1254 POSTCODE_SEVERITY_ERR);
22ad57ba 1255 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1256 } else if (bus_info->state.created == 0) {
1257 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1258 POSTCODE_SEVERITY_ERR);
22ad57ba 1259 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1260 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1261 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1262 POSTCODE_SEVERITY_ERR);
22ad57ba 1263 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0
BR
1264 } else {
1265 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1266 bus_info->partition_uuid = parser_id_get(parser_ctx);
1267 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1268 bus_info->name = parser_string_get(parser_ctx);
1269
1270 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1271 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1272 POSTCODE_SEVERITY_INFO);
12e364b9 1273 }
654bada0 1274 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1275 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1276}
1277
1278static void
3ab47701 1279my_device_create(struct controlvm_message *inmsg)
12e364b9 1280{
2ea5117b 1281 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1282 u32 bus_no = cmd->create_device.bus_no;
1283 u32 dev_no = cmd->create_device.dev_no;
e82ba62e
JS
1284 struct visorchipset_device_info *dev_info;
1285 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1286 int rc = CONTROLVM_RESP_SUCCESS;
1287
d480f6a2 1288 dev_info = device_find(&dev_info_list, bus_no, dev_no);
c60c8e26
BR
1289 if (dev_info && (dev_info->state.created == 1)) {
1290 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1291 POSTCODE_SEVERITY_ERR);
22ad57ba 1292 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1293 goto cleanup;
12e364b9 1294 }
4f66520b 1295 bus_info = bus_find(&bus_info_list, bus_no);
c60c8e26
BR
1296 if (!bus_info) {
1297 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1298 POSTCODE_SEVERITY_ERR);
22ad57ba 1299 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1300 goto cleanup;
12e364b9 1301 }
c60c8e26
BR
1302 if (bus_info->state.created == 0) {
1303 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1304 POSTCODE_SEVERITY_ERR);
22ad57ba 1305 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1306 goto cleanup;
12e364b9 1307 }
c60c8e26
BR
1308 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1309 if (!dev_info) {
1310 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1311 POSTCODE_SEVERITY_ERR);
22ad57ba 1312 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1313 goto cleanup;
12e364b9 1314 }
97a84f12 1315
c60c8e26
BR
1316 INIT_LIST_HEAD(&dev_info->entry);
1317 dev_info->bus_no = bus_no;
1318 dev_info->dev_no = dev_no;
1319 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1320 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1321 POSTCODE_SEVERITY_INFO);
1322
98d7b594 1323 if (inmsg->hdr.flags.test_message == 1)
c60c8e26 1324 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1325 else
c60c8e26
BR
1326 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1327 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1328 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1329 dev_info->chan_info.channel_type_uuid =
9b1caee7 1330 cmd->create_device.data_type_uuid;
c60c8e26
BR
1331 dev_info->chan_info.intr = cmd->create_device.intr;
1332 list_add(&dev_info->entry, &dev_info_list);
1333 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1334 POSTCODE_SEVERITY_INFO);
c60c8e26 1335cleanup:
12e364b9 1336 /* get the bus and devNo for DiagPool channel */
c60c8e26
BR
1337 if (dev_info &&
1338 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1339 g_diagpool_bus_no = bus_no;
1340 g_diagpool_dev_no = dev_no;
12e364b9 1341 }
c60c8e26 1342 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1343 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
4da3336c 1344 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1345}
1346
1347static void
3ab47701 1348my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1349{
2ea5117b 1350 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1351 u32 bus_no = cmd->device_change_state.bus_no;
1352 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1353 struct spar_segment_state state = cmd->device_change_state.state;
e82ba62e 1354 struct visorchipset_device_info *dev_info;
12e364b9
KC
1355 int rc = CONTROLVM_RESP_SUCCESS;
1356
d480f6a2 1357 dev_info = device_find(&dev_info_list, bus_no, dev_no);
0278a905
BR
1358 if (!dev_info) {
1359 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1360 POSTCODE_SEVERITY_ERR);
22ad57ba 1361 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1362 } else if (dev_info->state.created == 0) {
1363 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1364 POSTCODE_SEVERITY_ERR);
22ad57ba 1365 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1366 }
0278a905
BR
1367 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1368 device_epilog(bus_no, dev_no, state,
1369 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
4da3336c 1370 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1371}
1372
1373static void
3ab47701 1374my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1375{
2ea5117b 1376 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1377 u32 bus_no = cmd->destroy_device.bus_no;
1378 u32 dev_no = cmd->destroy_device.dev_no;
e82ba62e 1379 struct visorchipset_device_info *dev_info;
12e364b9
KC
1380 int rc = CONTROLVM_RESP_SUCCESS;
1381
d480f6a2 1382 dev_info = device_find(&dev_info_list, bus_no, dev_no);
61715c8b 1383 if (!dev_info)
22ad57ba 1384 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1385 else if (dev_info->state.created == 0)
22ad57ba 1386 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1387
61715c8b
BR
1388 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1389 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1390 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
4da3336c 1391 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1392}
1393
1394/* When provided with the physical address of the controlvm channel
1395 * (phys_addr), the offset to the payload area we need to manage
1396 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1397 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1398 * for failure.
1399 */
1400static int
d5b3f1dc 1401initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
c1f834eb 1402 struct visor_controlvm_payload_info *info)
12e364b9 1403{
c242233e 1404 u8 __iomem *payload = NULL;
12e364b9
KC
1405 int rc = CONTROLVM_RESP_SUCCESS;
1406
38f736e9 1407 if (!info) {
22ad57ba 1408 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1409 goto cleanup;
12e364b9 1410 }
c1f834eb 1411 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1412 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1413 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1414 goto cleanup;
12e364b9
KC
1415 }
1416 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1417 if (!payload) {
22ad57ba 1418 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1419 goto cleanup;
12e364b9
KC
1420 }
1421
1422 info->offset = offset;
1423 info->bytes = bytes;
1424 info->ptr = payload;
12e364b9 1425
f118a39b 1426cleanup:
12e364b9 1427 if (rc < 0) {
f118a39b 1428 if (payload) {
12e364b9
KC
1429 iounmap(payload);
1430 payload = NULL;
1431 }
1432 }
1433 return rc;
1434}
1435
1436static void
c1f834eb 1437destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1438{
597c338f 1439 if (info->ptr) {
12e364b9
KC
1440 iounmap(info->ptr);
1441 info->ptr = NULL;
1442 }
c1f834eb 1443 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1444}
1445
1446static void
1447initialize_controlvm_payload(void)
1448{
d5b3f1dc 1449 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1450 u64 payload_offset = 0;
1451 u32 payload_bytes = 0;
26eb2c0c 1452
c3d9a224 1453 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1454 offsetof(struct spar_controlvm_channel_protocol,
1455 request_payload_offset),
cafefc0c 1456 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1457 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1458 POSTCODE_SEVERITY_ERR);
1459 return;
1460 }
c3d9a224 1461 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1462 offsetof(struct spar_controlvm_channel_protocol,
1463 request_payload_bytes),
cafefc0c 1464 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1465 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1466 POSTCODE_SEVERITY_ERR);
1467 return;
1468 }
1469 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1470 payload_offset, payload_bytes,
84982fbf 1471 &controlvm_payload_info);
12e364b9
KC
1472}
1473
1474/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1475 * Returns CONTROLVM_RESP_xxx code.
1476 */
1477int
1478visorchipset_chipset_ready(void)
1479{
eb34e877 1480 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1481 return CONTROLVM_RESP_SUCCESS;
1482}
1483EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1484
1485int
1486visorchipset_chipset_selftest(void)
1487{
1488 char env_selftest[20];
1489 char *envp[] = { env_selftest, NULL };
26eb2c0c 1490
12e364b9 1491 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1492 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1493 envp);
1494 return CONTROLVM_RESP_SUCCESS;
1495}
1496EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1497
1498/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1499 * Returns CONTROLVM_RESP_xxx code.
1500 */
1501int
1502visorchipset_chipset_notready(void)
1503{
eb34e877 1504 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1505 return CONTROLVM_RESP_SUCCESS;
1506}
1507EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1508
1509static void
77a0449d 1510chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1511{
1512 int rc = visorchipset_chipset_ready();
26eb2c0c 1513
12e364b9
KC
1514 if (rc != CONTROLVM_RESP_SUCCESS)
1515 rc = -rc;
77a0449d
BR
1516 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1517 controlvm_respond(msg_hdr, rc);
1518 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1519 /* Send CHIPSET_READY response when all modules have been loaded
1520 * and disks mounted for the partition
1521 */
77a0449d 1522 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1523 }
1524}
1525
1526static void
77a0449d 1527chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1528{
1529 int rc = visorchipset_chipset_selftest();
26eb2c0c 1530
12e364b9
KC
1531 if (rc != CONTROLVM_RESP_SUCCESS)
1532 rc = -rc;
77a0449d
BR
1533 if (msg_hdr->flags.response_expected)
1534 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1535}
1536
1537static void
77a0449d 1538chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1539{
1540 int rc = visorchipset_chipset_notready();
26eb2c0c 1541
12e364b9
KC
1542 if (rc != CONTROLVM_RESP_SUCCESS)
1543 rc = -rc;
77a0449d
BR
1544 if (msg_hdr->flags.response_expected)
1545 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1546}
1547
1548/* This is your "one-stop" shop for grabbing the next message from the
1549 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1550 */
f4c11551 1551static bool
3ab47701 1552read_controlvm_event(struct controlvm_message *msg)
12e364b9 1553{
c3d9a224 1554 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1555 CONTROLVM_QUEUE_EVENT, msg)) {
1556 /* got a message */
0aca7844 1557 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1558 return false;
1559 return true;
12e364b9 1560 }
f4c11551 1561 return false;
12e364b9
KC
1562}
1563
1564/*
1565 * The general parahotplug flow works as follows. The visorchipset
1566 * driver receives a DEVICE_CHANGESTATE message from Command
1567 * specifying a physical device to enable or disable. The CONTROLVM
1568 * message handler calls parahotplug_process_message, which then adds
1569 * the message to a global list and kicks off a udev event which
1570 * causes a user level script to enable or disable the specified
1571 * device. The udev script then writes to
1572 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1573 * to get called, at which point the appropriate CONTROLVM message is
1574 * retrieved from the list and responded to.
1575 */
1576
1577#define PARAHOTPLUG_TIMEOUT_MS 2000
1578
1579/*
1580 * Generate unique int to match an outstanding CONTROLVM message with a
1581 * udev script /proc response
1582 */
1583static int
1584parahotplug_next_id(void)
1585{
1586 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1587
12e364b9
KC
1588 return atomic_inc_return(&id);
1589}
1590
1591/*
1592 * Returns the time (in jiffies) when a CONTROLVM message on the list
1593 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1594 */
1595static unsigned long
1596parahotplug_next_expiration(void)
1597{
2cc1a1b3 1598 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1599}
1600
1601/*
1602 * Create a parahotplug_request, which is basically a wrapper for a
1603 * CONTROLVM_MESSAGE that we can stick on a list
1604 */
1605static struct parahotplug_request *
3ab47701 1606parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1607{
ea0dcfcf
QL
1608 struct parahotplug_request *req;
1609
6a55e3c3 1610 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1611 if (!req)
12e364b9
KC
1612 return NULL;
1613
1614 req->id = parahotplug_next_id();
1615 req->expiration = parahotplug_next_expiration();
1616 req->msg = *msg;
1617
1618 return req;
1619}
1620
1621/*
1622 * Free a parahotplug_request.
1623 */
1624static void
1625parahotplug_request_destroy(struct parahotplug_request *req)
1626{
1627 kfree(req);
1628}
1629
1630/*
1631 * Cause uevent to run the user level script to do the disable/enable
1632 * specified in (the CONTROLVM message in) the specified
1633 * parahotplug_request
1634 */
1635static void
1636parahotplug_request_kickoff(struct parahotplug_request *req)
1637{
2ea5117b 1638 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1639 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1640 env_func[40];
1641 char *envp[] = {
1642 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1643 };
1644
1645 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1646 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1647 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1648 cmd->device_change_state.state.active);
12e364b9 1649 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1650 cmd->device_change_state.bus_no);
12e364b9 1651 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1652 cmd->device_change_state.dev_no >> 3);
12e364b9 1653 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1654 cmd->device_change_state.dev_no & 0x7);
12e364b9 1655
eb34e877 1656 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1657 envp);
1658}
1659
1660/*
1661 * Remove any request from the list that's been on there too long and
1662 * respond with an error.
1663 */
1664static void
1665parahotplug_process_list(void)
1666{
e82ba62e
JS
1667 struct list_head *pos;
1668 struct list_head *tmp;
12e364b9 1669
ddf5de53 1670 spin_lock(&parahotplug_request_list_lock);
12e364b9 1671
ddf5de53 1672 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1673 struct parahotplug_request *req =
1674 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1675
1676 if (!time_after_eq(jiffies, req->expiration))
1677 continue;
1678
1679 list_del(pos);
1680 if (req->msg.hdr.flags.response_expected)
1681 controlvm_respond_physdev_changestate(
1682 &req->msg.hdr,
1683 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1684 req->msg.cmd.device_change_state.state);
1685 parahotplug_request_destroy(req);
12e364b9
KC
1686 }
1687
ddf5de53 1688 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1689}
1690
1691/*
1692 * Called from the /proc handler, which means the user script has
1693 * finished the enable/disable. Find the matching identifier, and
1694 * respond to the CONTROLVM message with success.
1695 */
1696static int
b06bdf7d 1697parahotplug_request_complete(int id, u16 active)
12e364b9 1698{
e82ba62e
JS
1699 struct list_head *pos;
1700 struct list_head *tmp;
12e364b9 1701
ddf5de53 1702 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1703
1704 /* Look for a request matching "id". */
ddf5de53 1705 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1706 struct parahotplug_request *req =
1707 list_entry(pos, struct parahotplug_request, list);
1708 if (req->id == id) {
1709 /* Found a match. Remove it from the list and
1710 * respond.
1711 */
1712 list_del(pos);
ddf5de53 1713 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1714 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1715 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1716 controlvm_respond_physdev_changestate(
1717 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1718 req->msg.cmd.device_change_state.state);
12e364b9
KC
1719 parahotplug_request_destroy(req);
1720 return 0;
1721 }
1722 }
1723
ddf5de53 1724 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1725 return -1;
1726}
1727
1728/*
1729 * Enables or disables a PCI device by kicking off a udev script
1730 */
bd5b9b32 1731static void
3ab47701 1732parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1733{
1734 struct parahotplug_request *req;
1735
1736 req = parahotplug_request_create(inmsg);
1737
38f736e9 1738 if (!req)
12e364b9 1739 return;
12e364b9 1740
2ea5117b 1741 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1742 /* For enable messages, just respond with success
1743 * right away. This is a bit of a hack, but there are
1744 * issues with the early enable messages we get (with
1745 * either the udev script not detecting that the device
1746 * is up, or not getting called at all). Fortunately
1747 * the messages that get lost don't matter anyway, as
1748 * devices are automatically enabled at
1749 * initialization.
1750 */
1751 parahotplug_request_kickoff(req);
1752 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1753 CONTROLVM_RESP_SUCCESS,
1754 inmsg->cmd.device_change_state.state);
12e364b9
KC
1755 parahotplug_request_destroy(req);
1756 } else {
1757 /* For disable messages, add the request to the
1758 * request list before kicking off the udev script. It
1759 * won't get responded to until the script has
1760 * indicated it's done.
1761 */
ddf5de53
BR
1762 spin_lock(&parahotplug_request_list_lock);
1763 list_add_tail(&req->list, &parahotplug_request_list);
1764 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1765
1766 parahotplug_request_kickoff(req);
1767 }
1768}
1769
12e364b9
KC
1770/* Process a controlvm message.
1771 * Return result:
779d0752 1772 * false - this function will return false only in the case where the
12e364b9
KC
1773 * controlvm message was NOT processed, but processing must be
1774 * retried before reading the next controlvm message; a
1775 * scenario where this can occur is when we need to throttle
1776 * the allocation of memory in which to copy out controlvm
1777 * payload data
f4c11551 1778 * true - processing of the controlvm message completed,
12e364b9
KC
1779 * either successfully or with an error.
1780 */
f4c11551 1781static bool
d5b3f1dc 1782handle_command(struct controlvm_message inmsg, u64 channel_addr)
12e364b9 1783{
2ea5117b 1784 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1785 u64 parm_addr;
1786 u32 parm_bytes;
317d9614 1787 struct parser_context *parser_ctx = NULL;
e82ba62e 1788 bool local_addr;
3ab47701 1789 struct controlvm_message ackmsg;
12e364b9
KC
1790
1791 /* create parsing context if necessary */
818352a8 1792 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1793 if (channel_addr == 0)
f4c11551 1794 return true;
818352a8
BR
1795 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1796 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1797
1798 /* Parameter and channel addresses within test messages actually lie
1799 * within our OS-controlled memory. We need to know that, because it
1800 * makes a difference in how we compute the virtual address.
1801 */
ebec8967 1802 if (parm_addr && parm_bytes) {
f4c11551 1803 bool retry = false;
26eb2c0c 1804
12e364b9 1805 parser_ctx =
818352a8
BR
1806 parser_init_byte_stream(parm_addr, parm_bytes,
1807 local_addr, &retry);
1b08872e 1808 if (!parser_ctx && retry)
f4c11551 1809 return false;
12e364b9
KC
1810 }
1811
818352a8 1812 if (!local_addr) {
12e364b9
KC
1813 controlvm_init_response(&ackmsg, &inmsg.hdr,
1814 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1815 if (controlvm_channel)
1816 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1817 CONTROLVM_QUEUE_ACK,
1818 &ackmsg);
12e364b9 1819 }
98d7b594 1820 switch (inmsg.hdr.id) {
12e364b9 1821 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1822 chipset_init(&inmsg);
1823 break;
1824 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1825 bus_create(&inmsg);
1826 break;
1827 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1828 bus_destroy(&inmsg);
1829 break;
1830 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1831 bus_configure(&inmsg, parser_ctx);
1832 break;
1833 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1834 my_device_create(&inmsg);
1835 break;
1836 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1837 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1838 parahotplug_process_message(&inmsg);
1839 } else {
12e364b9
KC
1840 /* save the hdr and cmd structures for later use */
1841 /* when sending back the response to Command */
1842 my_device_changestate(&inmsg);
4f44b72d 1843 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1844 break;
1845 }
1846 break;
1847 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1848 my_device_destroy(&inmsg);
1849 break;
1850 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1851 /* no op for now, just send a respond that we passed */
98d7b594 1852 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1853 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1854 break;
1855 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1856 chipset_ready(&inmsg.hdr);
1857 break;
1858 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1859 chipset_selftest(&inmsg.hdr);
1860 break;
1861 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1862 chipset_notready(&inmsg.hdr);
1863 break;
1864 default:
98d7b594 1865 if (inmsg.hdr.flags.response_expected)
12e364b9 1866 controlvm_respond(&inmsg.hdr,
818352a8 1867 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1868 break;
1869 }
1870
38f736e9 1871 if (parser_ctx) {
12e364b9
KC
1872 parser_done(parser_ctx);
1873 parser_ctx = NULL;
1874 }
f4c11551 1875 return true;
12e364b9
KC
1876}
1877
d5b3f1dc 1878static u64 controlvm_get_channel_address(void)
524b0b63 1879{
5fc0229a 1880 u64 addr = 0;
b3c55b13 1881 u32 size = 0;
524b0b63 1882
0aca7844 1883 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1884 return 0;
0aca7844 1885
524b0b63
BR
1886 return addr;
1887}
1888
12e364b9
KC
1889static void
1890controlvm_periodic_work(struct work_struct *work)
1891{
3ab47701 1892 struct controlvm_message inmsg;
f4c11551
JS
1893 bool got_command = false;
1894 bool handle_command_failed = false;
1c1ed292 1895 static u64 poll_count;
12e364b9
KC
1896
1897 /* make sure visorbus server is registered for controlvm callbacks */
4da3336c 1898 if (visorchipset_visorbusregwait && !visorbusregistered)
1c1ed292 1899 goto cleanup;
12e364b9 1900
1c1ed292
BR
1901 poll_count++;
1902 if (poll_count >= 250)
12e364b9
KC
1903 ; /* keep going */
1904 else
1c1ed292 1905 goto cleanup;
12e364b9
KC
1906
1907 /* Check events to determine if response to CHIPSET_READY
1908 * should be sent
1909 */
0639ba67
BR
1910 if (visorchipset_holdchipsetready &&
1911 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1912 if (check_chipset_events() == 1) {
da021f02 1913 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1914 clear_chipset_events();
da021f02 1915 memset(&g_chipset_msg_hdr, 0,
98d7b594 1916 sizeof(struct controlvm_message_header));
12e364b9
KC
1917 }
1918 }
1919
c3d9a224 1920 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1921 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1922 &inmsg))
1923 ;
1c1ed292 1924 if (!got_command) {
7166ed19 1925 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1926 /* we throttled processing of a prior
1927 * msg, so try to process it again
1928 * rather than reading a new one
1929 */
7166ed19 1930 inmsg = controlvm_pending_msg;
f4c11551 1931 controlvm_pending_msg_valid = false;
1c1ed292 1932 got_command = true;
75c1f8b7 1933 } else {
1c1ed292 1934 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1935 }
8a1182eb 1936 }
12e364b9 1937
f4c11551 1938 handle_command_failed = false;
1c1ed292 1939 while (got_command && (!handle_command_failed)) {
b53e0e93 1940 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1941 if (handle_command(inmsg,
1942 visorchannel_get_physaddr
c3d9a224 1943 (controlvm_channel)))
1c1ed292 1944 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
1945 else {
1946 /* this is a scenario where throttling
1947 * is required, but probably NOT an
1948 * error...; we stash the current
1949 * controlvm msg so we will attempt to
1950 * reprocess it on our next loop
1951 */
f4c11551 1952 handle_command_failed = true;
7166ed19 1953 controlvm_pending_msg = inmsg;
f4c11551 1954 controlvm_pending_msg_valid = true;
12e364b9
KC
1955 }
1956 }
1957
1958 /* parahotplug_worker */
1959 parahotplug_process_list();
1960
1c1ed292 1961cleanup:
12e364b9
KC
1962
1963 if (time_after(jiffies,
b53e0e93 1964 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1965 /* it's been longer than MIN_IDLE_SECONDS since we
1966 * processed our last controlvm message; slow down the
1967 * polling
1968 */
911e213e
BR
1969 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1970 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1971 } else {
911e213e
BR
1972 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1973 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1974 }
1975
9232d2d6
BR
1976 queue_delayed_work(periodic_controlvm_workqueue,
1977 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1978}
1979
1980static void
1981setup_crash_devices_work_queue(struct work_struct *work)
1982{
e6bdb904
BR
1983 struct controlvm_message local_crash_bus_msg;
1984 struct controlvm_message local_crash_dev_msg;
3ab47701 1985 struct controlvm_message msg;
e6bdb904
BR
1986 u32 local_crash_msg_offset;
1987 u16 local_crash_msg_count;
12e364b9 1988
4da3336c
DK
1989 /* make sure visorbus is registered for controlvm callbacks */
1990 if (visorchipset_visorbusregwait && !visorbusregistered)
e6bdb904 1991 goto cleanup;
12e364b9
KC
1992
1993 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1994
1995 /* send init chipset msg */
98d7b594 1996 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1997 msg.cmd.init_chipset.bus_count = 23;
1998 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1999
2000 chipset_init(&msg);
2001
12e364b9 2002 /* get saved message count */
c3d9a224 2003 if (visorchannel_read(controlvm_channel,
d19642f6
BR
2004 offsetof(struct spar_controlvm_channel_protocol,
2005 saved_crash_message_count),
e6bdb904 2006 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
2007 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2008 POSTCODE_SEVERITY_ERR);
2009 return;
2010 }
2011
e6bdb904 2012 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 2013 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 2014 local_crash_msg_count,
12e364b9
KC
2015 POSTCODE_SEVERITY_ERR);
2016 return;
2017 }
2018
2019 /* get saved crash message offset */
c3d9a224 2020 if (visorchannel_read(controlvm_channel,
d19642f6
BR
2021 offsetof(struct spar_controlvm_channel_protocol,
2022 saved_crash_message_offset),
e6bdb904 2023 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
2024 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2025 POSTCODE_SEVERITY_ERR);
2026 return;
2027 }
2028
2029 /* read create device message for storage bus offset */
c3d9a224 2030 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
2031 local_crash_msg_offset,
2032 &local_crash_bus_msg,
3ab47701 2033 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2034 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2035 POSTCODE_SEVERITY_ERR);
2036 return;
2037 }
2038
2039 /* read create device message for storage device */
c3d9a224 2040 if (visorchannel_read(controlvm_channel,
e6bdb904 2041 local_crash_msg_offset +
3ab47701 2042 sizeof(struct controlvm_message),
e6bdb904 2043 &local_crash_dev_msg,
3ab47701 2044 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2045 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2046 POSTCODE_SEVERITY_ERR);
2047 return;
2048 }
2049
2050 /* reuse IOVM create bus message */
ebec8967 2051 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 2052 bus_create(&local_crash_bus_msg);
75c1f8b7 2053 } else {
12e364b9
KC
2054 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2055 POSTCODE_SEVERITY_ERR);
2056 return;
2057 }
2058
2059 /* reuse create device message for storage device */
ebec8967 2060 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 2061 my_device_create(&local_crash_dev_msg);
75c1f8b7 2062 } else {
12e364b9
KC
2063 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2064 POSTCODE_SEVERITY_ERR);
2065 return;
2066 }
12e364b9
KC
2067 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2068 return;
2069
e6bdb904 2070cleanup:
12e364b9 2071
911e213e 2072 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2073
9232d2d6
BR
2074 queue_delayed_work(periodic_controlvm_workqueue,
2075 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2076}
2077
2078static void
52063eca 2079bus_create_response(u32 bus_no, int response)
12e364b9 2080{
8e3fedd6 2081 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
12e364b9
KC
2082}
2083
2084static void
52063eca 2085bus_destroy_response(u32 bus_no, int response)
12e364b9 2086{
8e3fedd6 2087 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
12e364b9
KC
2088}
2089
2090static void
52063eca 2091device_create_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2092{
8e3fedd6 2093 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
12e364b9
KC
2094}
2095
2096static void
52063eca 2097device_destroy_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2098{
8e3fedd6 2099 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
12e364b9
KC
2100}
2101
2102void
52063eca 2103visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2104{
12e364b9 2105 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 2106 bus_no, dev_no, response,
bd0d2dcc 2107 segment_state_standby);
12e364b9 2108}
927c7927 2109EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
2110
2111static void
52063eca 2112device_resume_response(u32 bus_no, u32 dev_no, int response)
12e364b9
KC
2113{
2114 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8e3fedd6 2115 bus_no, dev_no, response,
bd0d2dcc 2116 segment_state_running);
12e364b9
KC
2117}
2118
f4c11551 2119bool
52063eca 2120visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 2121{
4f66520b 2122 void *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2123
0aca7844 2124 if (!p)
f4c11551 2125 return false;
77db7127 2126 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
f4c11551 2127 return true;
12e364b9
KC
2128}
2129EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2130
f4c11551 2131bool
52063eca 2132visorchipset_set_bus_context(u32 bus_no, void *context)
12e364b9 2133{
4f66520b 2134 struct visorchipset_bus_info *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2135
0aca7844 2136 if (!p)
f4c11551 2137 return false;
12e364b9 2138 p->bus_driver_context = context;
f4c11551 2139 return true;
12e364b9
KC
2140}
2141EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2142
f4c11551 2143bool
52063eca 2144visorchipset_get_device_info(u32 bus_no, u32 dev_no,
b486df19 2145 struct visorchipset_device_info *dev_info)
12e364b9 2146{
d480f6a2 2147 void *p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2148
0aca7844 2149 if (!p)
f4c11551 2150 return false;
b486df19 2151 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
f4c11551 2152 return true;
12e364b9
KC
2153}
2154EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2155
f4c11551 2156bool
52063eca 2157visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
12e364b9 2158{
d480f6a2
JS
2159 struct visorchipset_device_info *p;
2160
2161 p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2162
0aca7844 2163 if (!p)
f4c11551 2164 return false;
12e364b9 2165 p->bus_driver_context = context;
f4c11551 2166 return true;
12e364b9
KC
2167}
2168EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2169
18b87ed1 2170static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2171 struct device_attribute *attr,
2172 const char *buf, size_t count)
12e364b9 2173{
18b87ed1 2174 char msgtype[64];
12e364b9 2175
66e24b76
BR
2176 if (sscanf(buf, "%63s", msgtype) != 1)
2177 return -EINVAL;
2178
ebec8967 2179 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2180 chipset_events[0] = 1;
2181 return count;
ebec8967 2182 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2183 chipset_events[1] = 1;
2184 return count;
e22a4a0f
BR
2185 }
2186 return -EINVAL;
12e364b9
KC
2187}
2188
e56fa7cd
BR
2189/* The parahotplug/devicedisabled interface gets called by our support script
2190 * when an SR-IOV device has been shut down. The ID is passed to the script
2191 * and then passed back when the device has been removed.
2192 */
2193static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2194 struct device_attribute *attr,
2195 const char *buf, size_t count)
e56fa7cd 2196{
94217363 2197 unsigned int id;
e56fa7cd 2198
ebec8967 2199 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2200 return -EINVAL;
2201
2202 parahotplug_request_complete(id, 0);
2203 return count;
2204}
2205
2206/* The parahotplug/deviceenabled interface gets called by our support script
2207 * when an SR-IOV device has been recovered. The ID is passed to the script
2208 * and then passed back when the device has been brought back up.
2209 */
2210static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2211 struct device_attribute *attr,
2212 const char *buf, size_t count)
e56fa7cd 2213{
94217363 2214 unsigned int id;
e56fa7cd 2215
ebec8967 2216 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2217 return -EINVAL;
2218
2219 parahotplug_request_complete(id, 1);
2220 return count;
2221}
2222
e3420ed6
EA
2223static int
2224visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2225{
2226 unsigned long physaddr = 0;
2227 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
780fcad3 2228 u64 addr = 0;
e3420ed6
EA
2229
2230 /* sv_enable_dfp(); */
2231 if (offset & (PAGE_SIZE - 1))
2232 return -ENXIO; /* need aligned offsets */
2233
2234 switch (offset) {
2235 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2236 vma->vm_flags |= VM_IO;
2237 if (!*file_controlvm_channel)
2238 return -ENXIO;
2239
2240 visorchannel_read(*file_controlvm_channel,
2241 offsetof(struct spar_controlvm_channel_protocol,
2242 gp_control_channel),
2243 &addr, sizeof(addr));
2244 if (!addr)
2245 return -ENXIO;
2246
2247 physaddr = (unsigned long)addr;
2248 if (remap_pfn_range(vma, vma->vm_start,
2249 physaddr >> PAGE_SHIFT,
2250 vma->vm_end - vma->vm_start,
2251 /*pgprot_noncached */
2252 (vma->vm_page_prot))) {
2253 return -EAGAIN;
2254 }
2255 break;
2256 default:
2257 return -ENXIO;
2258 }
2259 return 0;
2260}
2261
2262static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2263 unsigned long arg)
2264{
2265 s64 adjustment;
2266 s64 vrtc_offset;
2267
2268 switch (cmd) {
2269 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2270 /* get the physical rtc offset */
2271 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2272 if (copy_to_user((void __user *)arg, &vrtc_offset,
2273 sizeof(vrtc_offset))) {
2274 return -EFAULT;
2275 }
d5b3f1dc 2276 return 0;
e3420ed6
EA
2277 case VMCALL_UPDATE_PHYSICAL_TIME:
2278 if (copy_from_user(&adjustment, (void __user *)arg,
2279 sizeof(adjustment))) {
2280 return -EFAULT;
2281 }
2282 return issue_vmcall_update_physical_time(adjustment);
2283 default:
2284 return -EFAULT;
2285 }
2286}
2287
2288static const struct file_operations visorchipset_fops = {
2289 .owner = THIS_MODULE,
2290 .open = visorchipset_open,
2291 .read = NULL,
2292 .write = NULL,
2293 .unlocked_ioctl = visorchipset_ioctl,
2294 .release = visorchipset_release,
2295 .mmap = visorchipset_mmap,
2296};
2297
0f570fc0 2298static int
e3420ed6
EA
2299visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2300{
2301 int rc = 0;
2302
2303 file_controlvm_channel = controlvm_channel;
2304 cdev_init(&file_cdev, &visorchipset_fops);
2305 file_cdev.owner = THIS_MODULE;
2306 if (MAJOR(major_dev) == 0) {
46168810 2307 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
e3420ed6
EA
2308 /* dynamic major device number registration required */
2309 if (rc < 0)
2310 return rc;
2311 } else {
2312 /* static major device number registration required */
46168810 2313 rc = register_chrdev_region(major_dev, 1, "visorchipset");
e3420ed6
EA
2314 if (rc < 0)
2315 return rc;
2316 }
2317 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2318 if (rc < 0) {
2319 unregister_chrdev_region(major_dev, 1);
2320 return rc;
2321 }
2322 return 0;
2323}
2324
55c67dca
PB
2325static int
2326visorchipset_init(struct acpi_device *acpi_device)
12e364b9 2327{
33078257 2328 int rc = 0;
d5b3f1dc 2329 u64 addr;
12e364b9 2330
4da3336c 2331 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
84982fbf 2332 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
ea33b4ee
BR
2333 memset(&livedump_info, 0, sizeof(livedump_info));
2334 atomic_set(&livedump_info.buffers_in_use, 0);
12e364b9 2335
8a1182eb 2336 addr = controlvm_get_channel_address();
ebec8967 2337 if (addr) {
df94247a
JS
2338 int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2339 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
c3d9a224 2340 controlvm_channel =
df94247a
JS
2341 visorchannel_create_with_lock(addr, tmp_sz,
2342 GFP_KERNEL, uuid);
93a84565 2343 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
c3d9a224 2344 visorchannel_get_header(controlvm_channel))) {
8a1182eb
BR
2345 initialize_controlvm_payload();
2346 } else {
c3d9a224
BR
2347 visorchannel_destroy(controlvm_channel);
2348 controlvm_channel = NULL;
8a1182eb
BR
2349 return -ENODEV;
2350 }
2351 } else {
8a1182eb
BR
2352 return -ENODEV;
2353 }
2354
5aa8ae57
BR
2355 major_dev = MKDEV(visorchipset_major, 0);
2356 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2357 if (rc < 0) {
4cb005a9 2358 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2359 goto cleanup;
4cb005a9 2360 }
9f8d0e8b 2361
da021f02 2362 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2363
4da3336c
DK
2364 /* if booting in a crash kernel */
2365 if (is_kdump_kernel())
2366 INIT_DELAYED_WORK(&periodic_controlvm_work,
2367 setup_crash_devices_work_queue);
2368 else
2369 INIT_DELAYED_WORK(&periodic_controlvm_work,
2370 controlvm_periodic_work);
2371 periodic_controlvm_workqueue =
2372 create_singlethread_workqueue("visorchipset_controlvm");
2373
2374 if (!periodic_controlvm_workqueue) {
2375 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2376 DIAG_SEVERITY_ERR);
2377 rc = -ENOMEM;
2378 goto cleanup;
2379 }
2380 most_recent_message_jiffies = jiffies;
2381 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2382 rc = queue_delayed_work(periodic_controlvm_workqueue,
2383 &periodic_controlvm_work, poll_jiffies);
2384 if (rc < 0) {
2385 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2386 DIAG_SEVERITY_ERR);
2387 goto cleanup;
12e364b9
KC
2388 }
2389
eb34e877
BR
2390 visorchipset_platform_device.dev.devt = major_dev;
2391 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2392 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2393 rc = -1;
a6a3989b 2394 goto cleanup;
4cb005a9 2395 }
12e364b9 2396 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
c79b28f7
PB
2397
2398 rc = visorbus_init();
a6a3989b 2399cleanup:
12e364b9 2400 if (rc) {
12e364b9
KC
2401 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2402 POSTCODE_SEVERITY_ERR);
2403 }
2404 return rc;
2405}
2406
0f570fc0 2407static void
e3420ed6
EA
2408visorchipset_file_cleanup(dev_t major_dev)
2409{
2410 if (file_cdev.ops)
2411 cdev_del(&file_cdev);
2412 file_cdev.ops = NULL;
2413 unregister_chrdev_region(major_dev, 1);
2414}
2415
55c67dca
PB
2416static int
2417visorchipset_exit(struct acpi_device *acpi_device)
12e364b9 2418{
12e364b9
KC
2419 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2420
c79b28f7
PB
2421 visorbus_exit();
2422
4da3336c
DK
2423 cancel_delayed_work(&periodic_controlvm_work);
2424 flush_workqueue(periodic_controlvm_workqueue);
2425 destroy_workqueue(periodic_controlvm_workqueue);
2426 periodic_controlvm_workqueue = NULL;
2427 destroy_controlvm_payload_info(&controlvm_payload_info);
1783319f 2428
12e364b9
KC
2429 cleanup_controlvm_structures();
2430
da021f02 2431 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2432
c3d9a224 2433 visorchannel_destroy(controlvm_channel);
8a1182eb 2434
addceb12 2435 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2436 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
55c67dca
PB
2437
2438 return 0;
2439}
2440
2441static const struct acpi_device_id unisys_device_ids[] = {
2442 {"PNP0A07", 0},
2443 {"", 0},
2444};
55c67dca
PB
2445
2446static struct acpi_driver unisys_acpi_driver = {
2447 .name = "unisys_acpi",
2448 .class = "unisys_acpi_class",
2449 .owner = THIS_MODULE,
2450 .ids = unisys_device_ids,
2451 .ops = {
2452 .add = visorchipset_init,
2453 .remove = visorchipset_exit,
2454 },
2455};
d5b3f1dc
EA
2456static __init uint32_t visorutil_spar_detect(void)
2457{
2458 unsigned int eax, ebx, ecx, edx;
2459
2460 if (cpu_has_hypervisor) {
2461 /* check the ID */
2462 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2463 return (ebx == UNISYS_SPAR_ID_EBX) &&
2464 (ecx == UNISYS_SPAR_ID_ECX) &&
2465 (edx == UNISYS_SPAR_ID_EDX);
2466 } else {
2467 return 0;
2468 }
2469}
55c67dca
PB
2470
2471static int init_unisys(void)
2472{
2473 int result;
d5b3f1dc 2474 if (!visorutil_spar_detect())
55c67dca
PB
2475 return -ENODEV;
2476
2477 result = acpi_bus_register_driver(&unisys_acpi_driver);
2478 if (result)
2479 return -ENODEV;
2480
2481 pr_info("Unisys Visorchipset Driver Loaded.\n");
2482 return 0;
2483};
2484
2485static void exit_unisys(void)
2486{
2487 acpi_bus_unregister_driver(&unisys_acpi_driver);
12e364b9
KC
2488}
2489
12e364b9 2490module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2491MODULE_PARM_DESC(visorchipset_major,
2492 "major device number to use for the device node");
4da3336c
DK
2493module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2494MODULE_PARM_DESC(visorchipset_visorbusreqwait,
12e364b9 2495 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2496module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2497 int, S_IRUGO);
2498MODULE_PARM_DESC(visorchipset_holdchipsetready,
2499 "1 to hold response to CHIPSET_READY");
b615d628 2500
55c67dca
PB
2501module_init(init_unisys);
2502module_exit(exit_unisys);
12e364b9
KC
2503
2504MODULE_AUTHOR("Unisys");
2505MODULE_LICENSE("GPL");
2506MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2507 VERSION);
2508MODULE_VERSION(VERSION);
This page took 0.430661 seconds and 5 git commands to generate.