Staging:Android:ion - Fix for memory leak if ion device registration get failed.
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
55c67dca 18#include <linux/acpi.h>
c0a14641 19#include <linux/cdev.h>
46168810 20#include <linux/ctype.h>
e3420ed6
EA
21#include <linux/fs.h>
22#include <linux/mm.h>
12e364b9
KC
23#include <linux/nls.h>
24#include <linux/netdevice.h>
25#include <linux/platform_device.h>
90addb02 26#include <linux/uuid.h>
1ba00980 27#include <linux/crash_dump.h>
12e364b9 28
55c67dca
PB
29#include "controlvmchannel.h"
30#include "controlvmcompletionstatus.h"
31#include "guestlinuxdebug.h"
32#include "periodic_work.h"
55c67dca
PB
33#include "uisutils.h"
34#include "version.h"
35#include "visorbus.h"
36#include "visorbus_private.h"
37
12e364b9 38#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
12e364b9
KC
39
40#define MAX_NAME_SIZE 128
41#define MAX_IP_SIZE 50
42#define MAXOUTSTANDINGCHANNELCOMMAND 256
43#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46168810 46#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
2ee0deec
PB
47
48#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
49
d5b3f1dc
EA
50
51#define UNISYS_SPAR_LEAF_ID 0x40000000
52
53/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
54#define UNISYS_SPAR_ID_EBX 0x73696e55
55#define UNISYS_SPAR_ID_ECX 0x70537379
56#define UNISYS_SPAR_ID_EDX 0x34367261
57
b615d628
JS
58/*
59 * Module parameters
60 */
b615d628 61static int visorchipset_major;
4da3336c 62static int visorchipset_visorbusregwait = 1; /* default is on */
b615d628 63static int visorchipset_holdchipsetready;
46168810 64static unsigned long controlvm_payload_bytes_buffered;
b615d628 65
e3420ed6
EA
66static int
67visorchipset_open(struct inode *inode, struct file *file)
68{
69 unsigned minor_number = iminor(inode);
70
71 if (minor_number)
72 return -ENODEV;
73 file->private_data = NULL;
74 return 0;
75}
76
77static int
78visorchipset_release(struct inode *inode, struct file *file)
79{
80 return 0;
81}
82
12e364b9
KC
83/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
84* we switch to slow polling mode. As soon as we get a controlvm
85* message, we switch back to fast polling mode.
86*/
87#define MIN_IDLE_SECONDS 10
52063eca
JS
88static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
89static unsigned long most_recent_message_jiffies; /* when we got our last
bd5b9b32 90 * controlvm message */
4da3336c 91static int visorbusregistered;
12e364b9
KC
92
93#define MAX_CHIPSET_EVENTS 2
c242233e 94static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 95
46168810
EA
96struct parser_context {
97 unsigned long allocbytes;
98 unsigned long param_bytes;
99 u8 *curr;
100 unsigned long bytes_remaining;
101 bool byte_stream;
102 char data[0];
103};
104
9232d2d6
BR
105static struct delayed_work periodic_controlvm_work;
106static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 107static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 108
e3420ed6
EA
109static struct cdev file_cdev;
110static struct visorchannel **file_controlvm_channel;
da021f02 111static struct controlvm_message_header g_chipset_msg_hdr;
59827f00 112static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 113 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 114/* 0xffffff is an invalid Bus/Device number */
52063eca
JS
115static u32 g_diagpool_bus_no = 0xffffff;
116static u32 g_diagpool_dev_no = 0xffffff;
4f44b72d 117static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9 118
12e364b9 119#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
120 (uuid_le_cmp(channel_type_guid,\
121 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 122
1390b88c
BR
123static LIST_HEAD(bus_info_list);
124static LIST_HEAD(dev_info_list);
12e364b9 125
c3d9a224 126static struct visorchannel *controlvm_channel;
12e364b9 127
84982fbf 128/* Manages the request payload in the controlvm channel */
c1f834eb 129struct visor_controlvm_payload_info {
c242233e 130 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 131 u64 offset; /* offset from beginning of controlvm
12e364b9 132 * channel to beginning of payload * pool */
b3c55b13 133 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
134};
135
136static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 137
ea33b4ee
BR
138/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
139 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
140 */
c1f834eb 141struct visor_livedump_info {
ea33b4ee
BR
142 struct controlvm_message_header dumpcapture_header;
143 struct controlvm_message_header gettextdump_header;
144 struct controlvm_message_header dumpcomplete_header;
f4c11551 145 bool gettextdump_outstanding;
12e364b9 146 u32 crc32;
52063eca 147 unsigned long length;
12e364b9 148 atomic_t buffers_in_use;
52063eca 149 unsigned long destination;
c1f834eb
JS
150};
151
152static struct visor_livedump_info livedump_info;
12e364b9
KC
153
154/* The following globals are used to handle the scenario where we are unable to
155 * offload the payload from a controlvm message due to memory requirements. In
156 * this scenario, we simply stash the controlvm message, then attempt to
157 * process it again the next time controlvm_periodic_work() runs.
158 */
7166ed19 159static struct controlvm_message controlvm_pending_msg;
c79b28f7 160static bool controlvm_pending_msg_valid;
12e364b9 161
12e364b9
KC
162/* This identifies a data buffer that has been received via a controlvm messages
163 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
164 */
165struct putfile_buffer_entry {
166 struct list_head next; /* putfile_buffer_entry list */
317d9614 167 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
168};
169
170/* List of struct putfile_request *, via next_putfile_request member.
171 * Each entry in this list identifies an outstanding TRANSMIT_FILE
172 * conversation.
173 */
1eee0011 174static LIST_HEAD(putfile_request_list);
12e364b9
KC
175
176/* This describes a buffer and its current state of transfer (e.g., how many
177 * bytes have already been supplied as putfile data, and how many bytes are
178 * remaining) for a putfile_request.
179 */
180struct putfile_active_buffer {
181 /* a payload from a controlvm message, containing a file data buffer */
317d9614 182 struct parser_context *parser_ctx;
12e364b9
KC
183 /* points within data area of parser_ctx to next byte of data */
184 u8 *pnext;
185 /* # bytes left from <pnext> to the end of this data buffer */
186 size_t bytes_remaining;
187};
188
189#define PUTFILE_REQUEST_SIG 0x0906101302281211
190/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
191 * conversation. Structs of this type are dynamically linked into
192 * <Putfile_request_list>.
193 */
194struct putfile_request {
195 u64 sig; /* PUTFILE_REQUEST_SIG */
196
197 /* header from original TransmitFile request */
98d7b594 198 struct controlvm_message_header controlvm_header;
12e364b9
KC
199 u64 file_request_number; /* from original TransmitFile request */
200
201 /* link to next struct putfile_request */
202 struct list_head next_putfile_request;
203
204 /* most-recent sequence number supplied via a controlvm message */
205 u64 data_sequence_number;
206
207 /* head of putfile_buffer_entry list, which describes the data to be
208 * supplied as putfile data;
209 * - this list is added to when controlvm messages come in that supply
210 * file data
211 * - this list is removed from via the hotplug program that is actually
212 * consuming these buffers to write as file data */
213 struct list_head input_buffer_list;
214 spinlock_t req_list_lock; /* lock for input_buffer_list */
215
216 /* waiters for input_buffer_list to go non-empty */
217 wait_queue_head_t input_buffer_wq;
218
219 /* data not yet read within current putfile_buffer_entry */
220 struct putfile_active_buffer active_buf;
221
222 /* <0 = failed, 0 = in-progress, >0 = successful; */
223 /* note that this must be set with req_list_lock, and if you set <0, */
224 /* it is your responsibility to also free up all of the other objects */
225 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
226 /* before releasing the lock */
227 int completion_status;
228};
229
12e364b9
KC
230struct parahotplug_request {
231 struct list_head list;
232 int id;
233 unsigned long expiration;
3ab47701 234 struct controlvm_message msg;
12e364b9
KC
235};
236
ddf5de53
BR
237static LIST_HEAD(parahotplug_request_list);
238static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
239static void parahotplug_process_list(void);
240
241/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
242 * CONTROLVM_REPORTEVENT.
243 */
4da3336c 244static struct visorchipset_busdev_notifiers busdev_notifiers;
12e364b9 245
52063eca
JS
246static void bus_create_response(u32 bus_no, int response);
247static void bus_destroy_response(u32 bus_no, int response);
248static void device_create_response(u32 bus_no, u32 dev_no, int response);
249static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
250static void device_resume_response(u32 bus_no, u32 dev_no, int response);
12e364b9 251
2ee0deec
PB
252static void visorchipset_device_pause_response(u32 bus_no, u32 dev_no,
253 int response);
254
8e3fedd6 255static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
256 .bus_create = bus_create_response,
257 .bus_destroy = bus_destroy_response,
258 .device_create = device_create_response,
259 .device_destroy = device_destroy_response,
927c7927 260 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
261 .device_resume = device_resume_response,
262};
263
264/* info for /dev/visorchipset */
5aa8ae57 265static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 266
19f6634f
BR
267/* prototypes for attributes */
268static ssize_t toolaction_show(struct device *dev,
8e76e695 269 struct device_attribute *attr, char *buf);
19f6634f 270static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
271 struct device_attribute *attr,
272 const char *buf, size_t count);
19f6634f
BR
273static DEVICE_ATTR_RW(toolaction);
274
54b31229 275static ssize_t boottotool_show(struct device *dev,
8e76e695 276 struct device_attribute *attr, char *buf);
54b31229 277static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
278 struct device_attribute *attr, const char *buf,
279 size_t count);
54b31229
BR
280static DEVICE_ATTR_RW(boottotool);
281
422af17c 282static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 283 char *buf);
422af17c 284static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 285 const char *buf, size_t count);
422af17c
BR
286static DEVICE_ATTR_RW(error);
287
288static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 289 char *buf);
422af17c 290static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 291 const char *buf, size_t count);
422af17c
BR
292static DEVICE_ATTR_RW(textid);
293
294static ssize_t remaining_steps_show(struct device *dev,
8e76e695 295 struct device_attribute *attr, char *buf);
422af17c 296static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
297 struct device_attribute *attr,
298 const char *buf, size_t count);
422af17c
BR
299static DEVICE_ATTR_RW(remaining_steps);
300
18b87ed1 301static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
302 struct device_attribute *attr,
303 const char *buf, size_t count);
18b87ed1
BR
304static DEVICE_ATTR_WO(chipsetready);
305
e56fa7cd 306static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
307 struct device_attribute *attr,
308 const char *buf, size_t count);
e56fa7cd
BR
309static DEVICE_ATTR_WO(devicedisabled);
310
311static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
312 struct device_attribute *attr,
313 const char *buf, size_t count);
e56fa7cd
BR
314static DEVICE_ATTR_WO(deviceenabled);
315
19f6634f
BR
316static struct attribute *visorchipset_install_attrs[] = {
317 &dev_attr_toolaction.attr,
54b31229 318 &dev_attr_boottotool.attr,
422af17c
BR
319 &dev_attr_error.attr,
320 &dev_attr_textid.attr,
321 &dev_attr_remaining_steps.attr,
19f6634f
BR
322 NULL
323};
324
325static struct attribute_group visorchipset_install_group = {
326 .name = "install",
327 .attrs = visorchipset_install_attrs
328};
329
18b87ed1
BR
330static struct attribute *visorchipset_guest_attrs[] = {
331 &dev_attr_chipsetready.attr,
332 NULL
333};
334
335static struct attribute_group visorchipset_guest_group = {
336 .name = "guest",
337 .attrs = visorchipset_guest_attrs
338};
339
e56fa7cd
BR
340static struct attribute *visorchipset_parahotplug_attrs[] = {
341 &dev_attr_devicedisabled.attr,
342 &dev_attr_deviceenabled.attr,
343 NULL
344};
345
346static struct attribute_group visorchipset_parahotplug_group = {
347 .name = "parahotplug",
348 .attrs = visorchipset_parahotplug_attrs
349};
350
19f6634f
BR
351static const struct attribute_group *visorchipset_dev_groups[] = {
352 &visorchipset_install_group,
18b87ed1 353 &visorchipset_guest_group,
e56fa7cd 354 &visorchipset_parahotplug_group,
19f6634f
BR
355 NULL
356};
357
12e364b9 358/* /sys/devices/platform/visorchipset */
eb34e877 359static struct platform_device visorchipset_platform_device = {
12e364b9
KC
360 .name = "visorchipset",
361 .id = -1,
19f6634f 362 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
363};
364
365/* Function prototypes */
b3168c70 366static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
367 int response);
368static void controlvm_respond_chipset_init(
b3168c70 369 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
370 enum ultra_chipset_feature features);
371static void controlvm_respond_physdev_changestate(
b3168c70 372 struct controlvm_message_header *msg_hdr, int response,
98d7b594 373 struct spar_segment_state state);
12e364b9 374
46168810 375
2ee0deec
PB
376static void parser_done(struct parser_context *ctx);
377
46168810 378static struct parser_context *
fbf35536 379parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
46168810
EA
380{
381 int allocbytes = sizeof(struct parser_context) + bytes;
382 struct parser_context *rc = NULL;
383 struct parser_context *ctx = NULL;
46168810
EA
384
385 if (retry)
386 *retry = false;
cc55b5c5
JS
387
388 /*
389 * alloc an 0 extra byte to ensure payload is
390 * '\0'-terminated
391 */
392 allocbytes++;
46168810
EA
393 if ((controlvm_payload_bytes_buffered + bytes)
394 > MAX_CONTROLVM_PAYLOAD_BYTES) {
395 if (retry)
396 *retry = true;
397 rc = NULL;
398 goto cleanup;
399 }
400 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
401 if (!ctx) {
402 if (retry)
403 *retry = true;
404 rc = NULL;
405 goto cleanup;
406 }
407
408 ctx->allocbytes = allocbytes;
409 ctx->param_bytes = bytes;
410 ctx->curr = NULL;
411 ctx->bytes_remaining = 0;
412 ctx->byte_stream = false;
413 if (local) {
414 void *p;
415
416 if (addr > virt_to_phys(high_memory - 1)) {
417 rc = NULL;
418 goto cleanup;
419 }
420 p = __va((unsigned long) (addr));
421 memcpy(ctx->data, p, bytes);
422 } else {
dd412751
JS
423 void __iomem *mapping;
424
425 if (!request_mem_region(addr, bytes, "visorchipset")) {
46168810
EA
426 rc = NULL;
427 goto cleanup;
428 }
712c03dc 429
dd412751
JS
430 mapping = ioremap_cache(addr, bytes);
431 if (!mapping) {
432 release_mem_region(addr, bytes);
46168810
EA
433 rc = NULL;
434 goto cleanup;
435 }
dd412751
JS
436 memcpy_fromio(ctx->data, mapping, bytes);
437 release_mem_region(addr, bytes);
46168810 438 }
46168810 439
cc55b5c5 440 ctx->byte_stream = true;
46168810
EA
441 rc = ctx;
442cleanup:
46168810
EA
443 if (rc) {
444 controlvm_payload_bytes_buffered += ctx->param_bytes;
445 } else {
446 if (ctx) {
447 parser_done(ctx);
448 ctx = NULL;
449 }
450 }
451 return rc;
452}
453
464129ed 454static uuid_le
46168810
EA
455parser_id_get(struct parser_context *ctx)
456{
457 struct spar_controlvm_parameters_header *phdr = NULL;
458
459 if (ctx == NULL)
460 return NULL_UUID_LE;
461 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
462 return phdr->id;
463}
464
2ee0deec
PB
465/** Describes the state from the perspective of which controlvm messages have
466 * been received for a bus or device.
467 */
468
469enum PARSER_WHICH_STRING {
470 PARSERSTRING_INITIATOR,
471 PARSERSTRING_TARGET,
472 PARSERSTRING_CONNECTION,
473 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
474};
475
464129ed 476static void
2ee0deec
PB
477parser_param_start(struct parser_context *ctx,
478 enum PARSER_WHICH_STRING which_string)
46168810
EA
479{
480 struct spar_controlvm_parameters_header *phdr = NULL;
481
482 if (ctx == NULL)
483 goto Away;
484 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
485 switch (which_string) {
486 case PARSERSTRING_INITIATOR:
487 ctx->curr = ctx->data + phdr->initiator_offset;
488 ctx->bytes_remaining = phdr->initiator_length;
489 break;
490 case PARSERSTRING_TARGET:
491 ctx->curr = ctx->data + phdr->target_offset;
492 ctx->bytes_remaining = phdr->target_length;
493 break;
494 case PARSERSTRING_CONNECTION:
495 ctx->curr = ctx->data + phdr->connection_offset;
496 ctx->bytes_remaining = phdr->connection_length;
497 break;
498 case PARSERSTRING_NAME:
499 ctx->curr = ctx->data + phdr->name_offset;
500 ctx->bytes_remaining = phdr->name_length;
501 break;
502 default:
503 break;
504 }
505
506Away:
507 return;
508}
509
464129ed 510static void parser_done(struct parser_context *ctx)
46168810
EA
511{
512 if (!ctx)
513 return;
514 controlvm_payload_bytes_buffered -= ctx->param_bytes;
515 kfree(ctx);
516}
517
464129ed 518static void *
46168810
EA
519parser_string_get(struct parser_context *ctx)
520{
521 u8 *pscan;
522 unsigned long nscan;
523 int value_length = -1;
524 void *value = NULL;
525 int i;
526
527 if (!ctx)
528 return NULL;
529 pscan = ctx->curr;
530 nscan = ctx->bytes_remaining;
531 if (nscan == 0)
532 return NULL;
533 if (!pscan)
534 return NULL;
535 for (i = 0, value_length = -1; i < nscan; i++)
536 if (pscan[i] == '\0') {
537 value_length = i;
538 break;
539 }
540 if (value_length < 0) /* '\0' was not included in the length */
541 value_length = nscan;
542 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
543 if (value == NULL)
544 return NULL;
545 if (value_length > 0)
546 memcpy(value, pscan, value_length);
547 ((u8 *) (value))[value_length] = '\0';
548 return value;
549}
550
551
d746cb55
VB
552static ssize_t toolaction_show(struct device *dev,
553 struct device_attribute *attr,
554 char *buf)
19f6634f 555{
01f4d85a 556 u8 tool_action;
19f6634f 557
c3d9a224 558 visorchannel_read(controlvm_channel,
d19642f6 559 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 560 tool_action), &tool_action, sizeof(u8));
01f4d85a 561 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
562}
563
d746cb55
VB
564static ssize_t toolaction_store(struct device *dev,
565 struct device_attribute *attr,
566 const char *buf, size_t count)
19f6634f 567{
01f4d85a 568 u8 tool_action;
66e24b76 569 int ret;
19f6634f 570
ebec8967 571 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
572 return -EINVAL;
573
c3d9a224 574 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
575 offsetof(struct spar_controlvm_channel_protocol,
576 tool_action),
01f4d85a 577 &tool_action, sizeof(u8));
66e24b76
BR
578
579 if (ret)
580 return ret;
e22a4a0f 581 return count;
19f6634f
BR
582}
583
d746cb55
VB
584static ssize_t boottotool_show(struct device *dev,
585 struct device_attribute *attr,
586 char *buf)
54b31229 587{
365522d9 588 struct efi_spar_indication efi_spar_indication;
54b31229 589
c3d9a224 590 visorchannel_read(controlvm_channel,
8e76e695
BR
591 offsetof(struct spar_controlvm_channel_protocol,
592 efi_spar_ind), &efi_spar_indication,
593 sizeof(struct efi_spar_indication));
54b31229 594 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 595 efi_spar_indication.boot_to_tool);
54b31229
BR
596}
597
d746cb55
VB
598static ssize_t boottotool_store(struct device *dev,
599 struct device_attribute *attr,
600 const char *buf, size_t count)
54b31229 601{
66e24b76 602 int val, ret;
365522d9 603 struct efi_spar_indication efi_spar_indication;
54b31229 604
ebec8967 605 if (kstrtoint(buf, 10, &val))
66e24b76
BR
606 return -EINVAL;
607
365522d9 608 efi_spar_indication.boot_to_tool = val;
c3d9a224 609 ret = visorchannel_write(controlvm_channel,
d19642f6 610 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
611 efi_spar_ind), &(efi_spar_indication),
612 sizeof(struct efi_spar_indication));
66e24b76
BR
613
614 if (ret)
615 return ret;
e22a4a0f 616 return count;
54b31229 617}
422af17c
BR
618
619static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 620 char *buf)
422af17c
BR
621{
622 u32 error;
623
8e76e695
BR
624 visorchannel_read(controlvm_channel,
625 offsetof(struct spar_controlvm_channel_protocol,
626 installation_error),
627 &error, sizeof(u32));
422af17c
BR
628 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
629}
630
631static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 632 const char *buf, size_t count)
422af17c
BR
633{
634 u32 error;
66e24b76 635 int ret;
422af17c 636
ebec8967 637 if (kstrtou32(buf, 10, &error))
66e24b76
BR
638 return -EINVAL;
639
c3d9a224 640 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
641 offsetof(struct spar_controlvm_channel_protocol,
642 installation_error),
643 &error, sizeof(u32));
66e24b76
BR
644 if (ret)
645 return ret;
e22a4a0f 646 return count;
422af17c
BR
647}
648
649static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 650 char *buf)
422af17c 651{
10dbf0e3 652 u32 text_id;
422af17c 653
8e76e695
BR
654 visorchannel_read(controlvm_channel,
655 offsetof(struct spar_controlvm_channel_protocol,
656 installation_text_id),
657 &text_id, sizeof(u32));
10dbf0e3 658 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
659}
660
661static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 662 const char *buf, size_t count)
422af17c 663{
10dbf0e3 664 u32 text_id;
66e24b76 665 int ret;
422af17c 666
ebec8967 667 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
668 return -EINVAL;
669
c3d9a224 670 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
671 offsetof(struct spar_controlvm_channel_protocol,
672 installation_text_id),
673 &text_id, sizeof(u32));
66e24b76
BR
674 if (ret)
675 return ret;
e22a4a0f 676 return count;
422af17c
BR
677}
678
422af17c 679static ssize_t remaining_steps_show(struct device *dev,
8e76e695 680 struct device_attribute *attr, char *buf)
422af17c 681{
ee8da290 682 u16 remaining_steps;
422af17c 683
c3d9a224 684 visorchannel_read(controlvm_channel,
8e76e695
BR
685 offsetof(struct spar_controlvm_channel_protocol,
686 installation_remaining_steps),
687 &remaining_steps, sizeof(u16));
ee8da290 688 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
689}
690
691static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
692 struct device_attribute *attr,
693 const char *buf, size_t count)
422af17c 694{
ee8da290 695 u16 remaining_steps;
66e24b76 696 int ret;
422af17c 697
ebec8967 698 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
699 return -EINVAL;
700
c3d9a224 701 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
702 offsetof(struct spar_controlvm_channel_protocol,
703 installation_remaining_steps),
704 &remaining_steps, sizeof(u16));
66e24b76
BR
705 if (ret)
706 return ret;
e22a4a0f 707 return count;
422af17c
BR
708}
709
12e364b9 710static void
9b989a98 711bus_info_clear(void *v)
12e364b9 712{
bbd4be30 713 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
12e364b9 714
12e364b9 715 kfree(p->name);
12e364b9 716 kfree(p->description);
33192fa1 717 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
718}
719
720static void
9b989a98 721dev_info_clear(void *v)
12e364b9 722{
246e0cd0 723 struct visorchipset_device_info *p =
bbd4be30 724 (struct visorchipset_device_info *) v;
26eb2c0c 725
246e0cd0 726 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
727}
728
4f66520b
JS
729static struct visorchipset_bus_info *
730bus_find(struct list_head *list, u32 bus_no)
731{
732 struct visorchipset_bus_info *p;
733
734 list_for_each_entry(p, list, entry) {
735 if (p->bus_no == bus_no)
736 return p;
737 }
738
739 return NULL;
740}
741
d480f6a2
JS
742static struct visorchipset_device_info *
743device_find(struct list_head *list, u32 bus_no, u32 dev_no)
744{
745 struct visorchipset_device_info *p;
746
747 list_for_each_entry(p, list, entry) {
748 if (p->bus_no == bus_no && p->dev_no == dev_no)
749 return p;
750 }
751
752 return NULL;
753}
754
28723521
JS
755static void busdevices_del(struct list_head *list, u32 bus_no)
756{
757 struct visorchipset_device_info *p, *tmp;
758
759 list_for_each_entry_safe(p, tmp, list, entry) {
760 if (p->bus_no == bus_no) {
761 list_del(&p->entry);
762 kfree(p);
763 }
764 }
765}
766
c242233e 767static u8
12e364b9
KC
768check_chipset_events(void)
769{
770 int i;
c242233e 771 u8 send_msg = 1;
12e364b9
KC
772 /* Check events to determine if response should be sent */
773 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
774 send_msg &= chipset_events[i];
775 return send_msg;
776}
777
778static void
779clear_chipset_events(void)
780{
781 int i;
782 /* Clear chipset_events */
783 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
784 chipset_events[i] = 0;
785}
786
787void
4da3336c 788visorchipset_register_busdev(
fe90d892 789 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 790 struct visorchipset_busdev_responders *responders,
1e7a59c1 791 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 792{
8f1947ac 793 down(&notifier_lock);
38f736e9 794 if (!notifiers) {
4da3336c
DK
795 memset(&busdev_notifiers, 0,
796 sizeof(busdev_notifiers));
797 visorbusregistered = 0; /* clear flag */
12e364b9 798 } else {
4da3336c
DK
799 busdev_notifiers = *notifiers;
800 visorbusregistered = 1; /* set flag */
12e364b9
KC
801 }
802 if (responders)
8e3fedd6 803 *responders = busdev_responders;
1e7a59c1
BR
804 if (driver_info)
805 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 806 VERSION, NULL);
12e364b9 807
8f1947ac 808 up(&notifier_lock);
12e364b9 809}
4da3336c 810EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
12e364b9
KC
811
812static void
813cleanup_controlvm_structures(void)
814{
33192fa1 815 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 816 struct visorchipset_device_info *di, *tmp_di;
12e364b9 817
1390b88c 818 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
9b989a98 819 bus_info_clear(bi);
12e364b9
KC
820 list_del(&bi->entry);
821 kfree(bi);
822 }
823
1390b88c 824 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 825 dev_info_clear(di);
12e364b9
KC
826 list_del(&di->entry);
827 kfree(di);
828 }
829}
830
831static void
3ab47701 832chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
833{
834 static int chipset_inited;
b9b141e8 835 enum ultra_chipset_feature features = 0;
12e364b9
KC
836 int rc = CONTROLVM_RESP_SUCCESS;
837
838 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
839 if (chipset_inited) {
22ad57ba 840 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 841 goto cleanup;
12e364b9
KC
842 }
843 chipset_inited = 1;
844 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
845
846 /* Set features to indicate we support parahotplug (if Command
847 * also supports it). */
848 features =
2ea5117b 849 inmsg->cmd.init_chipset.
12e364b9
KC
850 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
851
852 /* Set the "reply" bit so Command knows this is a
853 * features-aware driver. */
854 features |= ULTRA_CHIPSET_FEATURE_REPLY;
855
e3199b2e 856cleanup:
12e364b9
KC
857 if (rc < 0)
858 cleanup_controlvm_structures();
98d7b594 859 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
860 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
861}
862
863static void
3ab47701 864controlvm_init_response(struct controlvm_message *msg,
b3168c70 865 struct controlvm_message_header *msg_hdr, int response)
12e364b9 866{
3ab47701 867 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 868 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
869 msg->hdr.payload_bytes = 0;
870 msg->hdr.payload_vm_offset = 0;
871 msg->hdr.payload_max_bytes = 0;
12e364b9 872 if (response < 0) {
98d7b594
BR
873 msg->hdr.flags.failed = 1;
874 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
875 }
876}
877
878static void
b3168c70 879controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 880{
3ab47701 881 struct controlvm_message outmsg;
26eb2c0c 882
b3168c70 883 controlvm_init_response(&outmsg, msg_hdr, response);
12e364b9
KC
884 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
885 * back the deviceChangeState structure in the packet. */
b3168c70 886 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
0639ba67
BR
887 g_devicechangestate_packet.device_change_state.bus_no ==
888 g_diagpool_bus_no &&
889 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 890 g_diagpool_dev_no)
4f44b72d 891 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 892 if (outmsg.hdr.flags.test_message == 1)
12e364b9 893 return;
2098dbd1 894
c3d9a224 895 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 896 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
897 return;
898 }
899}
900
901static void
b3168c70 902controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 903 int response,
b9b141e8 904 enum ultra_chipset_feature features)
12e364b9 905{
3ab47701 906 struct controlvm_message outmsg;
26eb2c0c 907
b3168c70 908 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 909 outmsg.cmd.init_chipset.features = features;
c3d9a224 910 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 911 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
912 return;
913 }
914}
915
98d7b594 916static void controlvm_respond_physdev_changestate(
b3168c70 917 struct controlvm_message_header *msg_hdr, int response,
98d7b594 918 struct spar_segment_state state)
12e364b9 919{
3ab47701 920 struct controlvm_message outmsg;
26eb2c0c 921
b3168c70 922 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
923 outmsg.cmd.device_change_state.state = state;
924 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 925 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 926 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
927 return;
928 }
929}
930
2ee0deec
PB
931enum crash_obj_type {
932 CRASH_DEV,
933 CRASH_BUS,
934};
935
12e364b9 936void
2c683cde
BR
937visorchipset_save_message(struct controlvm_message *msg,
938 enum crash_obj_type type)
12e364b9 939{
4577225d
BR
940 u32 crash_msg_offset;
941 u16 crash_msg_count;
12e364b9
KC
942
943 /* get saved message count */
c3d9a224 944 if (visorchannel_read(controlvm_channel,
d19642f6
BR
945 offsetof(struct spar_controlvm_channel_protocol,
946 saved_crash_message_count),
4577225d 947 &crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
948 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
949 POSTCODE_SEVERITY_ERR);
950 return;
951 }
952
4577225d 953 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 954 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
4577225d 955 crash_msg_count,
12e364b9
KC
956 POSTCODE_SEVERITY_ERR);
957 return;
958 }
959
960 /* get saved crash message offset */
c3d9a224 961 if (visorchannel_read(controlvm_channel,
d19642f6
BR
962 offsetof(struct spar_controlvm_channel_protocol,
963 saved_crash_message_offset),
4577225d 964 &crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
965 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
966 POSTCODE_SEVERITY_ERR);
967 return;
968 }
969
2c683cde 970 if (type == CRASH_BUS) {
c3d9a224 971 if (visorchannel_write(controlvm_channel,
4577225d 972 crash_msg_offset,
3ab47701
BR
973 msg,
974 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
975 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
976 POSTCODE_SEVERITY_ERR);
977 return;
978 }
2ee0deec 979 } else { /* CRASH_DEV */
c3d9a224 980 if (visorchannel_write(controlvm_channel,
4577225d 981 crash_msg_offset +
3ab47701
BR
982 sizeof(struct controlvm_message), msg,
983 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
984 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
985 POSTCODE_SEVERITY_ERR);
986 return;
987 }
988 }
989}
990EXPORT_SYMBOL_GPL(visorchipset_save_message);
991
992static void
52063eca 993bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
12e364b9 994{
e82ba62e 995 struct visorchipset_bus_info *p;
f4c11551 996 bool need_clear = false;
12e364b9 997
4f66520b 998 p = bus_find(&bus_info_list, bus_no);
0aca7844 999 if (!p)
12e364b9 1000 return;
0aca7844 1001
12e364b9 1002 if (response < 0) {
fbb31f48 1003 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
12e364b9
KC
1004 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
1005 /* undo the row we just created... */
28723521 1006 busdevices_del(&dev_info_list, bus_no);
12e364b9 1007 } else {
fbb31f48 1008 if (cmd_id == CONTROLVM_BUS_CREATE)
12e364b9 1009 p->state.created = 1;
fbb31f48 1010 if (cmd_id == CONTROLVM_BUS_DESTROY)
f4c11551 1011 need_clear = true;
12e364b9
KC
1012 }
1013
0aca7844 1014 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 1015 return; /* no controlvm response needed */
6b59b31d 1016 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 1017 return;
33192fa1
BR
1018 controlvm_respond(&p->pending_msg_hdr, response);
1019 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 1020 if (need_clear) {
9b989a98 1021 bus_info_clear(p);
28723521 1022 busdevices_del(&dev_info_list, bus_no);
12e364b9
KC
1023 }
1024}
1025
1026static void
fbb31f48 1027device_changestate_responder(enum controlvm_id cmd_id,
52063eca 1028 u32 bus_no, u32 dev_no, int response,
fbb31f48 1029 struct spar_segment_state response_state)
12e364b9 1030{
e82ba62e 1031 struct visorchipset_device_info *p;
3ab47701 1032 struct controlvm_message outmsg;
12e364b9 1033
d480f6a2 1034 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 1035 if (!p)
12e364b9 1036 return;
0aca7844 1037 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 1038 return; /* no controlvm response needed */
fbb31f48 1039 if (p->pending_msg_hdr.id != cmd_id)
12e364b9 1040 return;
12e364b9 1041
246e0cd0 1042 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 1043
fbb31f48
BR
1044 outmsg.cmd.device_change_state.bus_no = bus_no;
1045 outmsg.cmd.device_change_state.dev_no = dev_no;
1046 outmsg.cmd.device_change_state.state = response_state;
12e364b9 1047
c3d9a224 1048 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 1049 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 1050 return;
12e364b9 1051
246e0cd0 1052 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
1053}
1054
1055static void
52063eca 1056device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
12e364b9 1057{
e82ba62e 1058 struct visorchipset_device_info *p;
f4c11551 1059 bool need_clear = false;
12e364b9 1060
d480f6a2 1061 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 1062 if (!p)
12e364b9 1063 return;
12e364b9 1064 if (response >= 0) {
fbb31f48 1065 if (cmd_id == CONTROLVM_DEVICE_CREATE)
12e364b9 1066 p->state.created = 1;
fbb31f48 1067 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
f4c11551 1068 need_clear = true;
12e364b9
KC
1069 }
1070
0aca7844 1071 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 1072 return; /* no controlvm response needed */
0aca7844 1073
6b59b31d 1074 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 1075 return;
0aca7844 1076
246e0cd0
BR
1077 controlvm_respond(&p->pending_msg_hdr, response);
1078 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 1079 if (need_clear)
9b989a98 1080 dev_info_clear(p);
12e364b9
KC
1081}
1082
1083static void
2836c6a8
BR
1084bus_epilog(u32 bus_no,
1085 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 1086 int response, bool need_response)
12e364b9 1087{
4f66520b 1088 struct visorchipset_bus_info *bus_info;
f4c11551 1089 bool notified = false;
12e364b9 1090
4f66520b 1091 bus_info = bus_find(&bus_info_list, bus_no);
12e364b9 1092
2836c6a8 1093 if (!bus_info)
12e364b9 1094 return;
0aca7844 1095
2836c6a8
BR
1096 if (need_response) {
1097 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
98d7b594 1098 sizeof(struct controlvm_message_header));
75c1f8b7 1099 } else {
2836c6a8 1100 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 1101 }
12e364b9 1102
8f1947ac 1103 down(&notifier_lock);
12e364b9
KC
1104 if (response == CONTROLVM_RESP_SUCCESS) {
1105 switch (cmd) {
1106 case CONTROLVM_BUS_CREATE:
4da3336c
DK
1107 if (busdev_notifiers.bus_create) {
1108 (*busdev_notifiers.bus_create) (bus_no);
f4c11551 1109 notified = true;
12e364b9
KC
1110 }
1111 break;
1112 case CONTROLVM_BUS_DESTROY:
4da3336c
DK
1113 if (busdev_notifiers.bus_destroy) {
1114 (*busdev_notifiers.bus_destroy) (bus_no);
f4c11551 1115 notified = true;
12e364b9
KC
1116 }
1117 break;
1118 }
1119 }
1120 if (notified)
1121 /* The callback function just called above is responsible
929aa8ae 1122 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1123 * function, which will call bus_responder()
1124 */
1125 ;
1126 else
2836c6a8 1127 bus_responder(cmd, bus_no, response);
8f1947ac 1128 up(&notifier_lock);
12e364b9
KC
1129}
1130
1131static void
2836c6a8
BR
1132device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
1133 struct controlvm_message_header *msg_hdr, int response,
f4c11551 1134 bool need_response, bool for_visorbus)
12e364b9 1135{
e82ba62e 1136 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 1137 bool notified = false;
12e364b9 1138
2836c6a8 1139 struct visorchipset_device_info *dev_info =
d480f6a2 1140 device_find(&dev_info_list, bus_no, dev_no);
12e364b9
KC
1141 char *envp[] = {
1142 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1143 NULL
1144 };
1145
2836c6a8 1146 if (!dev_info)
12e364b9 1147 return;
0aca7844 1148
4da3336c
DK
1149 notifiers = &busdev_notifiers;
1150
2836c6a8
BR
1151 if (need_response) {
1152 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
98d7b594 1153 sizeof(struct controlvm_message_header));
75c1f8b7 1154 } else {
2836c6a8 1155 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 1156 }
12e364b9 1157
8f1947ac 1158 down(&notifier_lock);
12e364b9
KC
1159 if (response >= 0) {
1160 switch (cmd) {
1161 case CONTROLVM_DEVICE_CREATE:
1162 if (notifiers->device_create) {
2836c6a8 1163 (*notifiers->device_create) (bus_no, dev_no);
f4c11551 1164 notified = true;
12e364b9
KC
1165 }
1166 break;
1167 case CONTROLVM_DEVICE_CHANGESTATE:
1168 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1169 if (state.alive == segment_state_running.alive &&
1170 state.operating ==
1171 segment_state_running.operating) {
12e364b9 1172 if (notifiers->device_resume) {
2836c6a8
BR
1173 (*notifiers->device_resume) (bus_no,
1174 dev_no);
f4c11551 1175 notified = true;
12e364b9
KC
1176 }
1177 }
1178 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1179 else if (state.alive == segment_state_standby.alive &&
3f833b54 1180 state.operating ==
bd0d2dcc 1181 segment_state_standby.operating) {
12e364b9
KC
1182 /* technically this is standby case
1183 * where server is lost
1184 */
1185 if (notifiers->device_pause) {
2836c6a8
BR
1186 (*notifiers->device_pause) (bus_no,
1187 dev_no);
f4c11551 1188 notified = true;
12e364b9 1189 }
bd0d2dcc 1190 } else if (state.alive == segment_state_paused.alive &&
3f833b54 1191 state.operating ==
bd0d2dcc 1192 segment_state_paused.operating) {
12e364b9
KC
1193 /* this is lite pause where channel is
1194 * still valid just 'pause' of it
1195 */
2836c6a8
BR
1196 if (bus_no == g_diagpool_bus_no &&
1197 dev_no == g_diagpool_dev_no) {
12e364b9
KC
1198 /* this will trigger the
1199 * diag_shutdown.sh script in
1200 * the visorchipset hotplug */
1201 kobject_uevent_env
eb34e877 1202 (&visorchipset_platform_device.dev.
12e364b9
KC
1203 kobj, KOBJ_ONLINE, envp);
1204 }
1205 }
1206 break;
1207 case CONTROLVM_DEVICE_DESTROY:
1208 if (notifiers->device_destroy) {
2836c6a8 1209 (*notifiers->device_destroy) (bus_no, dev_no);
f4c11551 1210 notified = true;
12e364b9
KC
1211 }
1212 break;
1213 }
1214 }
1215 if (notified)
1216 /* The callback function just called above is responsible
929aa8ae 1217 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1218 * function, which will call device_responder()
1219 */
1220 ;
1221 else
2836c6a8 1222 device_responder(cmd, bus_no, dev_no, response);
8f1947ac 1223 up(&notifier_lock);
12e364b9
KC
1224}
1225
1226static void
3ab47701 1227bus_create(struct controlvm_message *inmsg)
12e364b9 1228{
2ea5117b 1229 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1230 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1231 int rc = CONTROLVM_RESP_SUCCESS;
e82ba62e 1232 struct visorchipset_bus_info *bus_info;
12e364b9 1233
4f66520b 1234 bus_info = bus_find(&bus_info_list, bus_no);
6c5fed35
BR
1235 if (bus_info && (bus_info->state.created == 1)) {
1236 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1237 POSTCODE_SEVERITY_ERR);
22ad57ba 1238 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1239 goto cleanup;
12e364b9 1240 }
6c5fed35
BR
1241 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1242 if (!bus_info) {
1243 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1244 POSTCODE_SEVERITY_ERR);
22ad57ba 1245 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1246 goto cleanup;
12e364b9
KC
1247 }
1248
6c5fed35
BR
1249 INIT_LIST_HEAD(&bus_info->entry);
1250 bus_info->bus_no = bus_no;
12e364b9 1251
6c5fed35 1252 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1253
98d7b594 1254 if (inmsg->hdr.flags.test_message == 1)
6c5fed35 1255 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1256 else
6c5fed35 1257 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1258
6c5fed35
BR
1259 bus_info->flags.server = inmsg->hdr.flags.server;
1260 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1261 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1262 bus_info->chan_info.channel_type_uuid =
9b1caee7 1263 cmd->create_bus.bus_data_type_uuid;
6c5fed35 1264 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9 1265
6c5fed35 1266 list_add(&bus_info->entry, &bus_info_list);
12e364b9 1267
6c5fed35 1268 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1269
6c5fed35
BR
1270cleanup:
1271 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1272 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1273}
1274
1275static void
3ab47701 1276bus_destroy(struct controlvm_message *inmsg)
12e364b9 1277{
2ea5117b 1278 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1279 u32 bus_no = cmd->destroy_bus.bus_no;
dff54cd6 1280 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1281 int rc = CONTROLVM_RESP_SUCCESS;
1282
4f66520b 1283 bus_info = bus_find(&bus_info_list, bus_no);
dff54cd6 1284 if (!bus_info)
22ad57ba 1285 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1286 else if (bus_info->state.created == 0)
22ad57ba 1287 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1288
dff54cd6 1289 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1290 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1291}
1292
1293static void
317d9614
BR
1294bus_configure(struct controlvm_message *inmsg,
1295 struct parser_context *parser_ctx)
12e364b9 1296{
2ea5117b 1297 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e
JS
1298 u32 bus_no;
1299 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1300 int rc = CONTROLVM_RESP_SUCCESS;
1301 char s[99];
1302
654bada0
BR
1303 bus_no = cmd->configure_bus.bus_no;
1304 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1305 POSTCODE_SEVERITY_INFO);
12e364b9 1306
4f66520b 1307 bus_info = bus_find(&bus_info_list, bus_no);
654bada0
BR
1308 if (!bus_info) {
1309 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1310 POSTCODE_SEVERITY_ERR);
22ad57ba 1311 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1312 } else if (bus_info->state.created == 0) {
1313 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1314 POSTCODE_SEVERITY_ERR);
22ad57ba 1315 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1316 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1317 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1318 POSTCODE_SEVERITY_ERR);
22ad57ba 1319 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0
BR
1320 } else {
1321 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1322 bus_info->partition_uuid = parser_id_get(parser_ctx);
1323 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1324 bus_info->name = parser_string_get(parser_ctx);
1325
1326 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1327 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1328 POSTCODE_SEVERITY_INFO);
12e364b9 1329 }
654bada0 1330 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1331 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1332}
1333
1334static void
3ab47701 1335my_device_create(struct controlvm_message *inmsg)
12e364b9 1336{
2ea5117b 1337 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1338 u32 bus_no = cmd->create_device.bus_no;
1339 u32 dev_no = cmd->create_device.dev_no;
e82ba62e
JS
1340 struct visorchipset_device_info *dev_info;
1341 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1342 int rc = CONTROLVM_RESP_SUCCESS;
1343
d480f6a2 1344 dev_info = device_find(&dev_info_list, bus_no, dev_no);
c60c8e26
BR
1345 if (dev_info && (dev_info->state.created == 1)) {
1346 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1347 POSTCODE_SEVERITY_ERR);
22ad57ba 1348 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1349 goto cleanup;
12e364b9 1350 }
4f66520b 1351 bus_info = bus_find(&bus_info_list, bus_no);
c60c8e26
BR
1352 if (!bus_info) {
1353 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1354 POSTCODE_SEVERITY_ERR);
22ad57ba 1355 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1356 goto cleanup;
12e364b9 1357 }
c60c8e26
BR
1358 if (bus_info->state.created == 0) {
1359 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1360 POSTCODE_SEVERITY_ERR);
22ad57ba 1361 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1362 goto cleanup;
12e364b9 1363 }
c60c8e26
BR
1364 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1365 if (!dev_info) {
1366 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1367 POSTCODE_SEVERITY_ERR);
22ad57ba 1368 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1369 goto cleanup;
12e364b9 1370 }
97a84f12 1371
c60c8e26
BR
1372 INIT_LIST_HEAD(&dev_info->entry);
1373 dev_info->bus_no = bus_no;
1374 dev_info->dev_no = dev_no;
1375 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1376 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1377 POSTCODE_SEVERITY_INFO);
1378
98d7b594 1379 if (inmsg->hdr.flags.test_message == 1)
c60c8e26 1380 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1381 else
c60c8e26
BR
1382 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1383 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1384 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1385 dev_info->chan_info.channel_type_uuid =
9b1caee7 1386 cmd->create_device.data_type_uuid;
c60c8e26
BR
1387 dev_info->chan_info.intr = cmd->create_device.intr;
1388 list_add(&dev_info->entry, &dev_info_list);
1389 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1390 POSTCODE_SEVERITY_INFO);
c60c8e26 1391cleanup:
12e364b9 1392 /* get the bus and devNo for DiagPool channel */
c60c8e26
BR
1393 if (dev_info &&
1394 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1395 g_diagpool_bus_no = bus_no;
1396 g_diagpool_dev_no = dev_no;
12e364b9 1397 }
c60c8e26 1398 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1399 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
4da3336c 1400 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1401}
1402
1403static void
3ab47701 1404my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1405{
2ea5117b 1406 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1407 u32 bus_no = cmd->device_change_state.bus_no;
1408 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1409 struct spar_segment_state state = cmd->device_change_state.state;
e82ba62e 1410 struct visorchipset_device_info *dev_info;
12e364b9
KC
1411 int rc = CONTROLVM_RESP_SUCCESS;
1412
d480f6a2 1413 dev_info = device_find(&dev_info_list, bus_no, dev_no);
0278a905
BR
1414 if (!dev_info) {
1415 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1416 POSTCODE_SEVERITY_ERR);
22ad57ba 1417 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1418 } else if (dev_info->state.created == 0) {
1419 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1420 POSTCODE_SEVERITY_ERR);
22ad57ba 1421 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1422 }
0278a905
BR
1423 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1424 device_epilog(bus_no, dev_no, state,
1425 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
4da3336c 1426 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1427}
1428
1429static void
3ab47701 1430my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1431{
2ea5117b 1432 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1433 u32 bus_no = cmd->destroy_device.bus_no;
1434 u32 dev_no = cmd->destroy_device.dev_no;
e82ba62e 1435 struct visorchipset_device_info *dev_info;
12e364b9
KC
1436 int rc = CONTROLVM_RESP_SUCCESS;
1437
d480f6a2 1438 dev_info = device_find(&dev_info_list, bus_no, dev_no);
61715c8b 1439 if (!dev_info)
22ad57ba 1440 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1441 else if (dev_info->state.created == 0)
22ad57ba 1442 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1443
61715c8b
BR
1444 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1445 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1446 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
4da3336c 1447 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1448}
1449
1450/* When provided with the physical address of the controlvm channel
1451 * (phys_addr), the offset to the payload area we need to manage
1452 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1453 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1454 * for failure.
1455 */
1456static int
d5b3f1dc 1457initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
c1f834eb 1458 struct visor_controlvm_payload_info *info)
12e364b9 1459{
c242233e 1460 u8 __iomem *payload = NULL;
12e364b9
KC
1461 int rc = CONTROLVM_RESP_SUCCESS;
1462
38f736e9 1463 if (!info) {
22ad57ba 1464 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1465 goto cleanup;
12e364b9 1466 }
c1f834eb 1467 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1468 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1469 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1470 goto cleanup;
12e364b9
KC
1471 }
1472 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1473 if (!payload) {
22ad57ba 1474 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1475 goto cleanup;
12e364b9
KC
1476 }
1477
1478 info->offset = offset;
1479 info->bytes = bytes;
1480 info->ptr = payload;
12e364b9 1481
f118a39b 1482cleanup:
12e364b9 1483 if (rc < 0) {
f118a39b 1484 if (payload) {
12e364b9
KC
1485 iounmap(payload);
1486 payload = NULL;
1487 }
1488 }
1489 return rc;
1490}
1491
1492static void
c1f834eb 1493destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1494{
597c338f 1495 if (info->ptr) {
12e364b9
KC
1496 iounmap(info->ptr);
1497 info->ptr = NULL;
1498 }
c1f834eb 1499 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1500}
1501
1502static void
1503initialize_controlvm_payload(void)
1504{
d5b3f1dc 1505 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1506 u64 payload_offset = 0;
1507 u32 payload_bytes = 0;
26eb2c0c 1508
c3d9a224 1509 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1510 offsetof(struct spar_controlvm_channel_protocol,
1511 request_payload_offset),
cafefc0c 1512 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1513 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1514 POSTCODE_SEVERITY_ERR);
1515 return;
1516 }
c3d9a224 1517 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1518 offsetof(struct spar_controlvm_channel_protocol,
1519 request_payload_bytes),
cafefc0c 1520 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1521 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1522 POSTCODE_SEVERITY_ERR);
1523 return;
1524 }
1525 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1526 payload_offset, payload_bytes,
84982fbf 1527 &controlvm_payload_info);
12e364b9
KC
1528}
1529
1530/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1531 * Returns CONTROLVM_RESP_xxx code.
1532 */
1533int
1534visorchipset_chipset_ready(void)
1535{
eb34e877 1536 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1537 return CONTROLVM_RESP_SUCCESS;
1538}
1539EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1540
1541int
1542visorchipset_chipset_selftest(void)
1543{
1544 char env_selftest[20];
1545 char *envp[] = { env_selftest, NULL };
26eb2c0c 1546
12e364b9 1547 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1548 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1549 envp);
1550 return CONTROLVM_RESP_SUCCESS;
1551}
1552EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1553
1554/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1555 * Returns CONTROLVM_RESP_xxx code.
1556 */
1557int
1558visorchipset_chipset_notready(void)
1559{
eb34e877 1560 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1561 return CONTROLVM_RESP_SUCCESS;
1562}
1563EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1564
1565static void
77a0449d 1566chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1567{
1568 int rc = visorchipset_chipset_ready();
26eb2c0c 1569
12e364b9
KC
1570 if (rc != CONTROLVM_RESP_SUCCESS)
1571 rc = -rc;
77a0449d
BR
1572 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1573 controlvm_respond(msg_hdr, rc);
1574 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1575 /* Send CHIPSET_READY response when all modules have been loaded
1576 * and disks mounted for the partition
1577 */
77a0449d 1578 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1579 }
1580}
1581
1582static void
77a0449d 1583chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1584{
1585 int rc = visorchipset_chipset_selftest();
26eb2c0c 1586
12e364b9
KC
1587 if (rc != CONTROLVM_RESP_SUCCESS)
1588 rc = -rc;
77a0449d
BR
1589 if (msg_hdr->flags.response_expected)
1590 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1591}
1592
1593static void
77a0449d 1594chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1595{
1596 int rc = visorchipset_chipset_notready();
26eb2c0c 1597
12e364b9
KC
1598 if (rc != CONTROLVM_RESP_SUCCESS)
1599 rc = -rc;
77a0449d
BR
1600 if (msg_hdr->flags.response_expected)
1601 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1602}
1603
1604/* This is your "one-stop" shop for grabbing the next message from the
1605 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1606 */
f4c11551 1607static bool
3ab47701 1608read_controlvm_event(struct controlvm_message *msg)
12e364b9 1609{
c3d9a224 1610 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1611 CONTROLVM_QUEUE_EVENT, msg)) {
1612 /* got a message */
0aca7844 1613 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1614 return false;
1615 return true;
12e364b9 1616 }
f4c11551 1617 return false;
12e364b9
KC
1618}
1619
1620/*
1621 * The general parahotplug flow works as follows. The visorchipset
1622 * driver receives a DEVICE_CHANGESTATE message from Command
1623 * specifying a physical device to enable or disable. The CONTROLVM
1624 * message handler calls parahotplug_process_message, which then adds
1625 * the message to a global list and kicks off a udev event which
1626 * causes a user level script to enable or disable the specified
1627 * device. The udev script then writes to
1628 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1629 * to get called, at which point the appropriate CONTROLVM message is
1630 * retrieved from the list and responded to.
1631 */
1632
1633#define PARAHOTPLUG_TIMEOUT_MS 2000
1634
1635/*
1636 * Generate unique int to match an outstanding CONTROLVM message with a
1637 * udev script /proc response
1638 */
1639static int
1640parahotplug_next_id(void)
1641{
1642 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1643
12e364b9
KC
1644 return atomic_inc_return(&id);
1645}
1646
1647/*
1648 * Returns the time (in jiffies) when a CONTROLVM message on the list
1649 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1650 */
1651static unsigned long
1652parahotplug_next_expiration(void)
1653{
2cc1a1b3 1654 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1655}
1656
1657/*
1658 * Create a parahotplug_request, which is basically a wrapper for a
1659 * CONTROLVM_MESSAGE that we can stick on a list
1660 */
1661static struct parahotplug_request *
3ab47701 1662parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1663{
ea0dcfcf
QL
1664 struct parahotplug_request *req;
1665
6a55e3c3 1666 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1667 if (!req)
12e364b9
KC
1668 return NULL;
1669
1670 req->id = parahotplug_next_id();
1671 req->expiration = parahotplug_next_expiration();
1672 req->msg = *msg;
1673
1674 return req;
1675}
1676
1677/*
1678 * Free a parahotplug_request.
1679 */
1680static void
1681parahotplug_request_destroy(struct parahotplug_request *req)
1682{
1683 kfree(req);
1684}
1685
1686/*
1687 * Cause uevent to run the user level script to do the disable/enable
1688 * specified in (the CONTROLVM message in) the specified
1689 * parahotplug_request
1690 */
1691static void
1692parahotplug_request_kickoff(struct parahotplug_request *req)
1693{
2ea5117b 1694 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1695 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1696 env_func[40];
1697 char *envp[] = {
1698 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1699 };
1700
1701 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1702 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1703 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1704 cmd->device_change_state.state.active);
12e364b9 1705 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1706 cmd->device_change_state.bus_no);
12e364b9 1707 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1708 cmd->device_change_state.dev_no >> 3);
12e364b9 1709 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1710 cmd->device_change_state.dev_no & 0x7);
12e364b9 1711
eb34e877 1712 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1713 envp);
1714}
1715
1716/*
1717 * Remove any request from the list that's been on there too long and
1718 * respond with an error.
1719 */
1720static void
1721parahotplug_process_list(void)
1722{
e82ba62e
JS
1723 struct list_head *pos;
1724 struct list_head *tmp;
12e364b9 1725
ddf5de53 1726 spin_lock(&parahotplug_request_list_lock);
12e364b9 1727
ddf5de53 1728 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1729 struct parahotplug_request *req =
1730 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1731
1732 if (!time_after_eq(jiffies, req->expiration))
1733 continue;
1734
1735 list_del(pos);
1736 if (req->msg.hdr.flags.response_expected)
1737 controlvm_respond_physdev_changestate(
1738 &req->msg.hdr,
1739 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1740 req->msg.cmd.device_change_state.state);
1741 parahotplug_request_destroy(req);
12e364b9
KC
1742 }
1743
ddf5de53 1744 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1745}
1746
1747/*
1748 * Called from the /proc handler, which means the user script has
1749 * finished the enable/disable. Find the matching identifier, and
1750 * respond to the CONTROLVM message with success.
1751 */
1752static int
b06bdf7d 1753parahotplug_request_complete(int id, u16 active)
12e364b9 1754{
e82ba62e
JS
1755 struct list_head *pos;
1756 struct list_head *tmp;
12e364b9 1757
ddf5de53 1758 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1759
1760 /* Look for a request matching "id". */
ddf5de53 1761 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1762 struct parahotplug_request *req =
1763 list_entry(pos, struct parahotplug_request, list);
1764 if (req->id == id) {
1765 /* Found a match. Remove it from the list and
1766 * respond.
1767 */
1768 list_del(pos);
ddf5de53 1769 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1770 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1771 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1772 controlvm_respond_physdev_changestate(
1773 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1774 req->msg.cmd.device_change_state.state);
12e364b9
KC
1775 parahotplug_request_destroy(req);
1776 return 0;
1777 }
1778 }
1779
ddf5de53 1780 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1781 return -1;
1782}
1783
1784/*
1785 * Enables or disables a PCI device by kicking off a udev script
1786 */
bd5b9b32 1787static void
3ab47701 1788parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1789{
1790 struct parahotplug_request *req;
1791
1792 req = parahotplug_request_create(inmsg);
1793
38f736e9 1794 if (!req)
12e364b9 1795 return;
12e364b9 1796
2ea5117b 1797 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1798 /* For enable messages, just respond with success
1799 * right away. This is a bit of a hack, but there are
1800 * issues with the early enable messages we get (with
1801 * either the udev script not detecting that the device
1802 * is up, or not getting called at all). Fortunately
1803 * the messages that get lost don't matter anyway, as
1804 * devices are automatically enabled at
1805 * initialization.
1806 */
1807 parahotplug_request_kickoff(req);
1808 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1809 CONTROLVM_RESP_SUCCESS,
1810 inmsg->cmd.device_change_state.state);
12e364b9
KC
1811 parahotplug_request_destroy(req);
1812 } else {
1813 /* For disable messages, add the request to the
1814 * request list before kicking off the udev script. It
1815 * won't get responded to until the script has
1816 * indicated it's done.
1817 */
ddf5de53
BR
1818 spin_lock(&parahotplug_request_list_lock);
1819 list_add_tail(&req->list, &parahotplug_request_list);
1820 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1821
1822 parahotplug_request_kickoff(req);
1823 }
1824}
1825
12e364b9
KC
1826/* Process a controlvm message.
1827 * Return result:
779d0752 1828 * false - this function will return false only in the case where the
12e364b9
KC
1829 * controlvm message was NOT processed, but processing must be
1830 * retried before reading the next controlvm message; a
1831 * scenario where this can occur is when we need to throttle
1832 * the allocation of memory in which to copy out controlvm
1833 * payload data
f4c11551 1834 * true - processing of the controlvm message completed,
12e364b9
KC
1835 * either successfully or with an error.
1836 */
f4c11551 1837static bool
d5b3f1dc 1838handle_command(struct controlvm_message inmsg, u64 channel_addr)
12e364b9 1839{
2ea5117b 1840 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1841 u64 parm_addr;
1842 u32 parm_bytes;
317d9614 1843 struct parser_context *parser_ctx = NULL;
e82ba62e 1844 bool local_addr;
3ab47701 1845 struct controlvm_message ackmsg;
12e364b9
KC
1846
1847 /* create parsing context if necessary */
818352a8 1848 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1849 if (channel_addr == 0)
f4c11551 1850 return true;
818352a8
BR
1851 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1852 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1853
1854 /* Parameter and channel addresses within test messages actually lie
1855 * within our OS-controlled memory. We need to know that, because it
1856 * makes a difference in how we compute the virtual address.
1857 */
ebec8967 1858 if (parm_addr && parm_bytes) {
f4c11551 1859 bool retry = false;
26eb2c0c 1860
12e364b9 1861 parser_ctx =
818352a8
BR
1862 parser_init_byte_stream(parm_addr, parm_bytes,
1863 local_addr, &retry);
1b08872e 1864 if (!parser_ctx && retry)
f4c11551 1865 return false;
12e364b9
KC
1866 }
1867
818352a8 1868 if (!local_addr) {
12e364b9
KC
1869 controlvm_init_response(&ackmsg, &inmsg.hdr,
1870 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1871 if (controlvm_channel)
1872 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1873 CONTROLVM_QUEUE_ACK,
1874 &ackmsg);
12e364b9 1875 }
98d7b594 1876 switch (inmsg.hdr.id) {
12e364b9 1877 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1878 chipset_init(&inmsg);
1879 break;
1880 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1881 bus_create(&inmsg);
1882 break;
1883 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1884 bus_destroy(&inmsg);
1885 break;
1886 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1887 bus_configure(&inmsg, parser_ctx);
1888 break;
1889 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1890 my_device_create(&inmsg);
1891 break;
1892 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1893 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1894 parahotplug_process_message(&inmsg);
1895 } else {
12e364b9
KC
1896 /* save the hdr and cmd structures for later use */
1897 /* when sending back the response to Command */
1898 my_device_changestate(&inmsg);
4f44b72d 1899 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1900 break;
1901 }
1902 break;
1903 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1904 my_device_destroy(&inmsg);
1905 break;
1906 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1907 /* no op for now, just send a respond that we passed */
98d7b594 1908 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1909 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1910 break;
1911 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1912 chipset_ready(&inmsg.hdr);
1913 break;
1914 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1915 chipset_selftest(&inmsg.hdr);
1916 break;
1917 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1918 chipset_notready(&inmsg.hdr);
1919 break;
1920 default:
98d7b594 1921 if (inmsg.hdr.flags.response_expected)
12e364b9 1922 controlvm_respond(&inmsg.hdr,
818352a8 1923 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1924 break;
1925 }
1926
38f736e9 1927 if (parser_ctx) {
12e364b9
KC
1928 parser_done(parser_ctx);
1929 parser_ctx = NULL;
1930 }
f4c11551 1931 return true;
12e364b9
KC
1932}
1933
d5b3f1dc 1934static u64 controlvm_get_channel_address(void)
524b0b63 1935{
5fc0229a 1936 u64 addr = 0;
b3c55b13 1937 u32 size = 0;
524b0b63 1938
0aca7844 1939 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1940 return 0;
0aca7844 1941
524b0b63
BR
1942 return addr;
1943}
1944
12e364b9
KC
1945static void
1946controlvm_periodic_work(struct work_struct *work)
1947{
3ab47701 1948 struct controlvm_message inmsg;
f4c11551
JS
1949 bool got_command = false;
1950 bool handle_command_failed = false;
1c1ed292 1951 static u64 poll_count;
12e364b9
KC
1952
1953 /* make sure visorbus server is registered for controlvm callbacks */
4da3336c 1954 if (visorchipset_visorbusregwait && !visorbusregistered)
1c1ed292 1955 goto cleanup;
12e364b9 1956
1c1ed292
BR
1957 poll_count++;
1958 if (poll_count >= 250)
12e364b9
KC
1959 ; /* keep going */
1960 else
1c1ed292 1961 goto cleanup;
12e364b9
KC
1962
1963 /* Check events to determine if response to CHIPSET_READY
1964 * should be sent
1965 */
0639ba67
BR
1966 if (visorchipset_holdchipsetready &&
1967 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1968 if (check_chipset_events() == 1) {
da021f02 1969 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1970 clear_chipset_events();
da021f02 1971 memset(&g_chipset_msg_hdr, 0,
98d7b594 1972 sizeof(struct controlvm_message_header));
12e364b9
KC
1973 }
1974 }
1975
c3d9a224 1976 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1977 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1978 &inmsg))
1979 ;
1c1ed292 1980 if (!got_command) {
7166ed19 1981 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1982 /* we throttled processing of a prior
1983 * msg, so try to process it again
1984 * rather than reading a new one
1985 */
7166ed19 1986 inmsg = controlvm_pending_msg;
f4c11551 1987 controlvm_pending_msg_valid = false;
1c1ed292 1988 got_command = true;
75c1f8b7 1989 } else {
1c1ed292 1990 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1991 }
8a1182eb 1992 }
12e364b9 1993
f4c11551 1994 handle_command_failed = false;
1c1ed292 1995 while (got_command && (!handle_command_failed)) {
b53e0e93 1996 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1997 if (handle_command(inmsg,
1998 visorchannel_get_physaddr
c3d9a224 1999 (controlvm_channel)))
1c1ed292 2000 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
2001 else {
2002 /* this is a scenario where throttling
2003 * is required, but probably NOT an
2004 * error...; we stash the current
2005 * controlvm msg so we will attempt to
2006 * reprocess it on our next loop
2007 */
f4c11551 2008 handle_command_failed = true;
7166ed19 2009 controlvm_pending_msg = inmsg;
f4c11551 2010 controlvm_pending_msg_valid = true;
12e364b9
KC
2011 }
2012 }
2013
2014 /* parahotplug_worker */
2015 parahotplug_process_list();
2016
1c1ed292 2017cleanup:
12e364b9
KC
2018
2019 if (time_after(jiffies,
b53e0e93 2020 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
2021 /* it's been longer than MIN_IDLE_SECONDS since we
2022 * processed our last controlvm message; slow down the
2023 * polling
2024 */
911e213e
BR
2025 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2026 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2027 } else {
911e213e
BR
2028 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2029 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
2030 }
2031
9232d2d6
BR
2032 queue_delayed_work(periodic_controlvm_workqueue,
2033 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2034}
2035
2036static void
2037setup_crash_devices_work_queue(struct work_struct *work)
2038{
e6bdb904
BR
2039 struct controlvm_message local_crash_bus_msg;
2040 struct controlvm_message local_crash_dev_msg;
3ab47701 2041 struct controlvm_message msg;
e6bdb904
BR
2042 u32 local_crash_msg_offset;
2043 u16 local_crash_msg_count;
12e364b9 2044
4da3336c
DK
2045 /* make sure visorbus is registered for controlvm callbacks */
2046 if (visorchipset_visorbusregwait && !visorbusregistered)
e6bdb904 2047 goto cleanup;
12e364b9
KC
2048
2049 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2050
2051 /* send init chipset msg */
98d7b594 2052 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
2053 msg.cmd.init_chipset.bus_count = 23;
2054 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
2055
2056 chipset_init(&msg);
2057
12e364b9 2058 /* get saved message count */
c3d9a224 2059 if (visorchannel_read(controlvm_channel,
d19642f6
BR
2060 offsetof(struct spar_controlvm_channel_protocol,
2061 saved_crash_message_count),
e6bdb904 2062 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
2063 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2064 POSTCODE_SEVERITY_ERR);
2065 return;
2066 }
2067
e6bdb904 2068 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 2069 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 2070 local_crash_msg_count,
12e364b9
KC
2071 POSTCODE_SEVERITY_ERR);
2072 return;
2073 }
2074
2075 /* get saved crash message offset */
c3d9a224 2076 if (visorchannel_read(controlvm_channel,
d19642f6
BR
2077 offsetof(struct spar_controlvm_channel_protocol,
2078 saved_crash_message_offset),
e6bdb904 2079 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
2080 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2081 POSTCODE_SEVERITY_ERR);
2082 return;
2083 }
2084
2085 /* read create device message for storage bus offset */
c3d9a224 2086 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
2087 local_crash_msg_offset,
2088 &local_crash_bus_msg,
3ab47701 2089 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2090 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2091 POSTCODE_SEVERITY_ERR);
2092 return;
2093 }
2094
2095 /* read create device message for storage device */
c3d9a224 2096 if (visorchannel_read(controlvm_channel,
e6bdb904 2097 local_crash_msg_offset +
3ab47701 2098 sizeof(struct controlvm_message),
e6bdb904 2099 &local_crash_dev_msg,
3ab47701 2100 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2101 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2102 POSTCODE_SEVERITY_ERR);
2103 return;
2104 }
2105
2106 /* reuse IOVM create bus message */
ebec8967 2107 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 2108 bus_create(&local_crash_bus_msg);
75c1f8b7 2109 } else {
12e364b9
KC
2110 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2111 POSTCODE_SEVERITY_ERR);
2112 return;
2113 }
2114
2115 /* reuse create device message for storage device */
ebec8967 2116 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 2117 my_device_create(&local_crash_dev_msg);
75c1f8b7 2118 } else {
12e364b9
KC
2119 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2120 POSTCODE_SEVERITY_ERR);
2121 return;
2122 }
12e364b9
KC
2123 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2124 return;
2125
e6bdb904 2126cleanup:
12e364b9 2127
911e213e 2128 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2129
9232d2d6
BR
2130 queue_delayed_work(periodic_controlvm_workqueue,
2131 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2132}
2133
2134static void
52063eca 2135bus_create_response(u32 bus_no, int response)
12e364b9 2136{
8e3fedd6 2137 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
12e364b9
KC
2138}
2139
2140static void
52063eca 2141bus_destroy_response(u32 bus_no, int response)
12e364b9 2142{
8e3fedd6 2143 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
12e364b9
KC
2144}
2145
2146static void
52063eca 2147device_create_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2148{
8e3fedd6 2149 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
12e364b9
KC
2150}
2151
2152static void
52063eca 2153device_destroy_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2154{
8e3fedd6 2155 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
12e364b9
KC
2156}
2157
2158void
52063eca 2159visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2160{
12e364b9 2161 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 2162 bus_no, dev_no, response,
bd0d2dcc 2163 segment_state_standby);
12e364b9 2164}
927c7927 2165EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
2166
2167static void
52063eca 2168device_resume_response(u32 bus_no, u32 dev_no, int response)
12e364b9
KC
2169{
2170 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8e3fedd6 2171 bus_no, dev_no, response,
bd0d2dcc 2172 segment_state_running);
12e364b9
KC
2173}
2174
f4c11551 2175bool
52063eca 2176visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 2177{
4f66520b 2178 void *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2179
0aca7844 2180 if (!p)
f4c11551 2181 return false;
77db7127 2182 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
f4c11551 2183 return true;
12e364b9
KC
2184}
2185EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2186
f4c11551 2187bool
52063eca 2188visorchipset_set_bus_context(u32 bus_no, void *context)
12e364b9 2189{
4f66520b 2190 struct visorchipset_bus_info *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2191
0aca7844 2192 if (!p)
f4c11551 2193 return false;
12e364b9 2194 p->bus_driver_context = context;
f4c11551 2195 return true;
12e364b9
KC
2196}
2197EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2198
f4c11551 2199bool
52063eca 2200visorchipset_get_device_info(u32 bus_no, u32 dev_no,
b486df19 2201 struct visorchipset_device_info *dev_info)
12e364b9 2202{
d480f6a2 2203 void *p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2204
0aca7844 2205 if (!p)
f4c11551 2206 return false;
b486df19 2207 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
f4c11551 2208 return true;
12e364b9
KC
2209}
2210EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2211
f4c11551 2212bool
52063eca 2213visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
12e364b9 2214{
d480f6a2
JS
2215 struct visorchipset_device_info *p;
2216
2217 p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2218
0aca7844 2219 if (!p)
f4c11551 2220 return false;
12e364b9 2221 p->bus_driver_context = context;
f4c11551 2222 return true;
12e364b9
KC
2223}
2224EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2225
18b87ed1 2226static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2227 struct device_attribute *attr,
2228 const char *buf, size_t count)
12e364b9 2229{
18b87ed1 2230 char msgtype[64];
12e364b9 2231
66e24b76
BR
2232 if (sscanf(buf, "%63s", msgtype) != 1)
2233 return -EINVAL;
2234
ebec8967 2235 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2236 chipset_events[0] = 1;
2237 return count;
ebec8967 2238 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2239 chipset_events[1] = 1;
2240 return count;
e22a4a0f
BR
2241 }
2242 return -EINVAL;
12e364b9
KC
2243}
2244
e56fa7cd
BR
2245/* The parahotplug/devicedisabled interface gets called by our support script
2246 * when an SR-IOV device has been shut down. The ID is passed to the script
2247 * and then passed back when the device has been removed.
2248 */
2249static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2250 struct device_attribute *attr,
2251 const char *buf, size_t count)
e56fa7cd 2252{
94217363 2253 unsigned int id;
e56fa7cd 2254
ebec8967 2255 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2256 return -EINVAL;
2257
2258 parahotplug_request_complete(id, 0);
2259 return count;
2260}
2261
2262/* The parahotplug/deviceenabled interface gets called by our support script
2263 * when an SR-IOV device has been recovered. The ID is passed to the script
2264 * and then passed back when the device has been brought back up.
2265 */
2266static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2267 struct device_attribute *attr,
2268 const char *buf, size_t count)
e56fa7cd 2269{
94217363 2270 unsigned int id;
e56fa7cd 2271
ebec8967 2272 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2273 return -EINVAL;
2274
2275 parahotplug_request_complete(id, 1);
2276 return count;
2277}
2278
e3420ed6
EA
2279static int
2280visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2281{
2282 unsigned long physaddr = 0;
2283 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
780fcad3 2284 u64 addr = 0;
e3420ed6
EA
2285
2286 /* sv_enable_dfp(); */
2287 if (offset & (PAGE_SIZE - 1))
2288 return -ENXIO; /* need aligned offsets */
2289
2290 switch (offset) {
2291 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2292 vma->vm_flags |= VM_IO;
2293 if (!*file_controlvm_channel)
2294 return -ENXIO;
2295
2296 visorchannel_read(*file_controlvm_channel,
2297 offsetof(struct spar_controlvm_channel_protocol,
2298 gp_control_channel),
2299 &addr, sizeof(addr));
2300 if (!addr)
2301 return -ENXIO;
2302
2303 physaddr = (unsigned long)addr;
2304 if (remap_pfn_range(vma, vma->vm_start,
2305 physaddr >> PAGE_SHIFT,
2306 vma->vm_end - vma->vm_start,
2307 /*pgprot_noncached */
2308 (vma->vm_page_prot))) {
2309 return -EAGAIN;
2310 }
2311 break;
2312 default:
2313 return -ENXIO;
2314 }
2315 return 0;
2316}
2317
2318static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2319 unsigned long arg)
2320{
2321 s64 adjustment;
2322 s64 vrtc_offset;
2323
2324 switch (cmd) {
2325 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2326 /* get the physical rtc offset */
2327 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2328 if (copy_to_user((void __user *)arg, &vrtc_offset,
2329 sizeof(vrtc_offset))) {
2330 return -EFAULT;
2331 }
d5b3f1dc 2332 return 0;
e3420ed6
EA
2333 case VMCALL_UPDATE_PHYSICAL_TIME:
2334 if (copy_from_user(&adjustment, (void __user *)arg,
2335 sizeof(adjustment))) {
2336 return -EFAULT;
2337 }
2338 return issue_vmcall_update_physical_time(adjustment);
2339 default:
2340 return -EFAULT;
2341 }
2342}
2343
2344static const struct file_operations visorchipset_fops = {
2345 .owner = THIS_MODULE,
2346 .open = visorchipset_open,
2347 .read = NULL,
2348 .write = NULL,
2349 .unlocked_ioctl = visorchipset_ioctl,
2350 .release = visorchipset_release,
2351 .mmap = visorchipset_mmap,
2352};
2353
2354int
2355visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2356{
2357 int rc = 0;
2358
2359 file_controlvm_channel = controlvm_channel;
2360 cdev_init(&file_cdev, &visorchipset_fops);
2361 file_cdev.owner = THIS_MODULE;
2362 if (MAJOR(major_dev) == 0) {
46168810 2363 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
e3420ed6
EA
2364 /* dynamic major device number registration required */
2365 if (rc < 0)
2366 return rc;
2367 } else {
2368 /* static major device number registration required */
46168810 2369 rc = register_chrdev_region(major_dev, 1, "visorchipset");
e3420ed6
EA
2370 if (rc < 0)
2371 return rc;
2372 }
2373 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2374 if (rc < 0) {
2375 unregister_chrdev_region(major_dev, 1);
2376 return rc;
2377 }
2378 return 0;
2379}
2380
55c67dca
PB
2381static int
2382visorchipset_init(struct acpi_device *acpi_device)
12e364b9 2383{
33078257 2384 int rc = 0;
d5b3f1dc 2385 u64 addr;
12e364b9 2386
4da3336c 2387 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
84982fbf 2388 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
ea33b4ee
BR
2389 memset(&livedump_info, 0, sizeof(livedump_info));
2390 atomic_set(&livedump_info.buffers_in_use, 0);
12e364b9 2391
8a1182eb 2392 addr = controlvm_get_channel_address();
ebec8967 2393 if (addr) {
df94247a
JS
2394 int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2395 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
c3d9a224 2396 controlvm_channel =
df94247a
JS
2397 visorchannel_create_with_lock(addr, tmp_sz,
2398 GFP_KERNEL, uuid);
93a84565 2399 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
c3d9a224 2400 visorchannel_get_header(controlvm_channel))) {
8a1182eb
BR
2401 initialize_controlvm_payload();
2402 } else {
c3d9a224
BR
2403 visorchannel_destroy(controlvm_channel);
2404 controlvm_channel = NULL;
8a1182eb
BR
2405 return -ENODEV;
2406 }
2407 } else {
8a1182eb
BR
2408 return -ENODEV;
2409 }
2410
5aa8ae57
BR
2411 major_dev = MKDEV(visorchipset_major, 0);
2412 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2413 if (rc < 0) {
4cb005a9 2414 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2415 goto cleanup;
4cb005a9 2416 }
9f8d0e8b 2417
da021f02 2418 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2419
4da3336c
DK
2420 /* if booting in a crash kernel */
2421 if (is_kdump_kernel())
2422 INIT_DELAYED_WORK(&periodic_controlvm_work,
2423 setup_crash_devices_work_queue);
2424 else
2425 INIT_DELAYED_WORK(&periodic_controlvm_work,
2426 controlvm_periodic_work);
2427 periodic_controlvm_workqueue =
2428 create_singlethread_workqueue("visorchipset_controlvm");
2429
2430 if (!periodic_controlvm_workqueue) {
2431 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2432 DIAG_SEVERITY_ERR);
2433 rc = -ENOMEM;
2434 goto cleanup;
2435 }
2436 most_recent_message_jiffies = jiffies;
2437 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2438 rc = queue_delayed_work(periodic_controlvm_workqueue,
2439 &periodic_controlvm_work, poll_jiffies);
2440 if (rc < 0) {
2441 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2442 DIAG_SEVERITY_ERR);
2443 goto cleanup;
12e364b9
KC
2444 }
2445
eb34e877
BR
2446 visorchipset_platform_device.dev.devt = major_dev;
2447 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2448 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2449 rc = -1;
a6a3989b 2450 goto cleanup;
4cb005a9 2451 }
12e364b9 2452 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
c79b28f7
PB
2453
2454 rc = visorbus_init();
a6a3989b 2455cleanup:
12e364b9 2456 if (rc) {
12e364b9
KC
2457 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2458 POSTCODE_SEVERITY_ERR);
2459 }
2460 return rc;
2461}
2462
e3420ed6
EA
2463void
2464visorchipset_file_cleanup(dev_t major_dev)
2465{
2466 if (file_cdev.ops)
2467 cdev_del(&file_cdev);
2468 file_cdev.ops = NULL;
2469 unregister_chrdev_region(major_dev, 1);
2470}
2471
55c67dca
PB
2472static int
2473visorchipset_exit(struct acpi_device *acpi_device)
12e364b9 2474{
12e364b9
KC
2475 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2476
c79b28f7
PB
2477 visorbus_exit();
2478
4da3336c
DK
2479 cancel_delayed_work(&periodic_controlvm_work);
2480 flush_workqueue(periodic_controlvm_workqueue);
2481 destroy_workqueue(periodic_controlvm_workqueue);
2482 periodic_controlvm_workqueue = NULL;
2483 destroy_controlvm_payload_info(&controlvm_payload_info);
1783319f 2484
12e364b9
KC
2485 cleanup_controlvm_structures();
2486
da021f02 2487 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2488
c3d9a224 2489 visorchannel_destroy(controlvm_channel);
8a1182eb 2490
addceb12 2491 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2492 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
55c67dca
PB
2493
2494 return 0;
2495}
2496
2497static const struct acpi_device_id unisys_device_ids[] = {
2498 {"PNP0A07", 0},
2499 {"", 0},
2500};
55c67dca
PB
2501
2502static struct acpi_driver unisys_acpi_driver = {
2503 .name = "unisys_acpi",
2504 .class = "unisys_acpi_class",
2505 .owner = THIS_MODULE,
2506 .ids = unisys_device_ids,
2507 .ops = {
2508 .add = visorchipset_init,
2509 .remove = visorchipset_exit,
2510 },
2511};
d5b3f1dc
EA
2512static __init uint32_t visorutil_spar_detect(void)
2513{
2514 unsigned int eax, ebx, ecx, edx;
2515
2516 if (cpu_has_hypervisor) {
2517 /* check the ID */
2518 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2519 return (ebx == UNISYS_SPAR_ID_EBX) &&
2520 (ecx == UNISYS_SPAR_ID_ECX) &&
2521 (edx == UNISYS_SPAR_ID_EDX);
2522 } else {
2523 return 0;
2524 }
2525}
55c67dca
PB
2526
2527static int init_unisys(void)
2528{
2529 int result;
d5b3f1dc 2530 if (!visorutil_spar_detect())
55c67dca
PB
2531 return -ENODEV;
2532
2533 result = acpi_bus_register_driver(&unisys_acpi_driver);
2534 if (result)
2535 return -ENODEV;
2536
2537 pr_info("Unisys Visorchipset Driver Loaded.\n");
2538 return 0;
2539};
2540
2541static void exit_unisys(void)
2542{
2543 acpi_bus_unregister_driver(&unisys_acpi_driver);
12e364b9
KC
2544}
2545
12e364b9 2546module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2547MODULE_PARM_DESC(visorchipset_major,
2548 "major device number to use for the device node");
4da3336c
DK
2549module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2550MODULE_PARM_DESC(visorchipset_visorbusreqwait,
12e364b9 2551 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2552module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2553 int, S_IRUGO);
2554MODULE_PARM_DESC(visorchipset_holdchipsetready,
2555 "1 to hold response to CHIPSET_READY");
b615d628 2556
55c67dca
PB
2557module_init(init_unisys);
2558module_exit(exit_unisys);
12e364b9
KC
2559
2560MODULE_AUTHOR("Unisys");
2561MODULE_LICENSE("GPL");
2562MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2563 VERSION);
2564MODULE_VERSION(VERSION);
This page took 0.635912 seconds and 5 git commands to generate.