staging: unisys: Fix double sysfs create for module version
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
55c67dca 18#include <linux/acpi.h>
c0a14641 19#include <linux/cdev.h>
46168810 20#include <linux/ctype.h>
e3420ed6
EA
21#include <linux/fs.h>
22#include <linux/mm.h>
12e364b9
KC
23#include <linux/nls.h>
24#include <linux/netdevice.h>
25#include <linux/platform_device.h>
90addb02 26#include <linux/uuid.h>
1ba00980 27#include <linux/crash_dump.h>
12e364b9 28
5f3a7e36 29#include "channel_guid.h"
55c67dca
PB
30#include "controlvmchannel.h"
31#include "controlvmcompletionstatus.h"
32#include "guestlinuxdebug.h"
33#include "periodic_work.h"
55c67dca
PB
34#include "version.h"
35#include "visorbus.h"
36#include "visorbus_private.h"
5f3a7e36 37#include "vmcallinterface.h"
55c67dca 38
12e364b9 39#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
12e364b9
KC
40
41#define MAX_NAME_SIZE 128
42#define MAX_IP_SIZE 50
43#define MAXOUTSTANDINGCHANNELCOMMAND 256
44#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
46168810 47#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
2ee0deec
PB
48
49#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
50
d5b3f1dc
EA
51
52#define UNISYS_SPAR_LEAF_ID 0x40000000
53
54/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55#define UNISYS_SPAR_ID_EBX 0x73696e55
56#define UNISYS_SPAR_ID_ECX 0x70537379
57#define UNISYS_SPAR_ID_EDX 0x34367261
58
b615d628
JS
59/*
60 * Module parameters
61 */
b615d628 62static int visorchipset_major;
4da3336c 63static int visorchipset_visorbusregwait = 1; /* default is on */
b615d628 64static int visorchipset_holdchipsetready;
46168810 65static unsigned long controlvm_payload_bytes_buffered;
b615d628 66
e3420ed6
EA
67static int
68visorchipset_open(struct inode *inode, struct file *file)
69{
70 unsigned minor_number = iminor(inode);
71
72 if (minor_number)
73 return -ENODEV;
74 file->private_data = NULL;
75 return 0;
76}
77
78static int
79visorchipset_release(struct inode *inode, struct file *file)
80{
81 return 0;
82}
83
12e364b9
KC
84/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85* we switch to slow polling mode. As soon as we get a controlvm
86* message, we switch back to fast polling mode.
87*/
88#define MIN_IDLE_SECONDS 10
52063eca
JS
89static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
90static unsigned long most_recent_message_jiffies; /* when we got our last
bd5b9b32 91 * controlvm message */
4da3336c 92static int visorbusregistered;
12e364b9
KC
93
94#define MAX_CHIPSET_EVENTS 2
c242233e 95static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 96
46168810
EA
97struct parser_context {
98 unsigned long allocbytes;
99 unsigned long param_bytes;
100 u8 *curr;
101 unsigned long bytes_remaining;
102 bool byte_stream;
103 char data[0];
104};
105
9232d2d6
BR
106static struct delayed_work periodic_controlvm_work;
107static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 108static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 109
e3420ed6
EA
110static struct cdev file_cdev;
111static struct visorchannel **file_controlvm_channel;
da021f02 112static struct controlvm_message_header g_chipset_msg_hdr;
4f44b72d 113static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9 114
1390b88c
BR
115static LIST_HEAD(bus_info_list);
116static LIST_HEAD(dev_info_list);
12e364b9 117
c3d9a224 118static struct visorchannel *controlvm_channel;
12e364b9 119
84982fbf 120/* Manages the request payload in the controlvm channel */
c1f834eb 121struct visor_controlvm_payload_info {
c242233e 122 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 123 u64 offset; /* offset from beginning of controlvm
12e364b9 124 * channel to beginning of payload * pool */
b3c55b13 125 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
126};
127
128static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 129
12e364b9
KC
130/* The following globals are used to handle the scenario where we are unable to
131 * offload the payload from a controlvm message due to memory requirements. In
132 * this scenario, we simply stash the controlvm message, then attempt to
133 * process it again the next time controlvm_periodic_work() runs.
134 */
7166ed19 135static struct controlvm_message controlvm_pending_msg;
c79b28f7 136static bool controlvm_pending_msg_valid;
12e364b9 137
12e364b9
KC
138/* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140 */
141struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
317d9614 143 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
144};
145
146/* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
148 * conversation.
149 */
1eee0011 150static LIST_HEAD(putfile_request_list);
12e364b9
KC
151
152/* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
155 */
156struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
317d9614 158 struct parser_context *parser_ctx;
12e364b9
KC
159 /* points within data area of parser_ctx to next byte of data */
160 u8 *pnext;
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
163};
164
165#define PUTFILE_REQUEST_SIG 0x0906101302281211
166/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
169 */
170struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
172
173 /* header from original TransmitFile request */
98d7b594 174 struct controlvm_message_header controlvm_header;
12e364b9
KC
175 u64 file_request_number; /* from original TransmitFile request */
176
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
179
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
182
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
186 * file data
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
191
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
194
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
197
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
204};
205
12e364b9
KC
206struct parahotplug_request {
207 struct list_head list;
208 int id;
209 unsigned long expiration;
3ab47701 210 struct controlvm_message msg;
12e364b9
KC
211};
212
ddf5de53
BR
213static LIST_HEAD(parahotplug_request_list);
214static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
215static void parahotplug_process_list(void);
216
217/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
218 * CONTROLVM_REPORTEVENT.
219 */
4da3336c 220static struct visorchipset_busdev_notifiers busdev_notifiers;
12e364b9 221
d32517e3
DZ
222static void bus_create_response(struct visor_device *p, int response);
223static void bus_destroy_response(struct visor_device *p, int response);
a298bc0b
DZ
224static void device_create_response(struct visor_device *p, int response);
225static void device_destroy_response(struct visor_device *p, int response);
226static void device_resume_response(struct visor_device *p, int response);
12e364b9 227
a298bc0b
DZ
228static void visorchipset_device_pause_response(struct visor_device *p,
229 int response);
2ee0deec 230
8e3fedd6 231static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
927c7927 236 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
237 .device_resume = device_resume_response,
238};
239
240/* info for /dev/visorchipset */
5aa8ae57 241static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 242
19f6634f
BR
243/* prototypes for attributes */
244static ssize_t toolaction_show(struct device *dev,
8e76e695 245 struct device_attribute *attr, char *buf);
19f6634f 246static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
247 struct device_attribute *attr,
248 const char *buf, size_t count);
19f6634f
BR
249static DEVICE_ATTR_RW(toolaction);
250
54b31229 251static ssize_t boottotool_show(struct device *dev,
8e76e695 252 struct device_attribute *attr, char *buf);
54b31229 253static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
254 struct device_attribute *attr, const char *buf,
255 size_t count);
54b31229
BR
256static DEVICE_ATTR_RW(boottotool);
257
422af17c 258static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 259 char *buf);
422af17c 260static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 261 const char *buf, size_t count);
422af17c
BR
262static DEVICE_ATTR_RW(error);
263
264static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 265 char *buf);
422af17c 266static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 267 const char *buf, size_t count);
422af17c
BR
268static DEVICE_ATTR_RW(textid);
269
270static ssize_t remaining_steps_show(struct device *dev,
8e76e695 271 struct device_attribute *attr, char *buf);
422af17c 272static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
273 struct device_attribute *attr,
274 const char *buf, size_t count);
422af17c
BR
275static DEVICE_ATTR_RW(remaining_steps);
276
18b87ed1 277static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
278 struct device_attribute *attr,
279 const char *buf, size_t count);
18b87ed1
BR
280static DEVICE_ATTR_WO(chipsetready);
281
e56fa7cd 282static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
283 struct device_attribute *attr,
284 const char *buf, size_t count);
e56fa7cd
BR
285static DEVICE_ATTR_WO(devicedisabled);
286
287static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
288 struct device_attribute *attr,
289 const char *buf, size_t count);
e56fa7cd
BR
290static DEVICE_ATTR_WO(deviceenabled);
291
19f6634f
BR
292static struct attribute *visorchipset_install_attrs[] = {
293 &dev_attr_toolaction.attr,
54b31229 294 &dev_attr_boottotool.attr,
422af17c
BR
295 &dev_attr_error.attr,
296 &dev_attr_textid.attr,
297 &dev_attr_remaining_steps.attr,
19f6634f
BR
298 NULL
299};
300
301static struct attribute_group visorchipset_install_group = {
302 .name = "install",
303 .attrs = visorchipset_install_attrs
304};
305
18b87ed1
BR
306static struct attribute *visorchipset_guest_attrs[] = {
307 &dev_attr_chipsetready.attr,
308 NULL
309};
310
311static struct attribute_group visorchipset_guest_group = {
312 .name = "guest",
313 .attrs = visorchipset_guest_attrs
314};
315
e56fa7cd
BR
316static struct attribute *visorchipset_parahotplug_attrs[] = {
317 &dev_attr_devicedisabled.attr,
318 &dev_attr_deviceenabled.attr,
319 NULL
320};
321
322static struct attribute_group visorchipset_parahotplug_group = {
323 .name = "parahotplug",
324 .attrs = visorchipset_parahotplug_attrs
325};
326
19f6634f
BR
327static const struct attribute_group *visorchipset_dev_groups[] = {
328 &visorchipset_install_group,
18b87ed1 329 &visorchipset_guest_group,
e56fa7cd 330 &visorchipset_parahotplug_group,
19f6634f
BR
331 NULL
332};
333
12e364b9 334/* /sys/devices/platform/visorchipset */
eb34e877 335static struct platform_device visorchipset_platform_device = {
12e364b9
KC
336 .name = "visorchipset",
337 .id = -1,
19f6634f 338 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
339};
340
341/* Function prototypes */
b3168c70 342static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
343 int response);
344static void controlvm_respond_chipset_init(
b3168c70 345 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
346 enum ultra_chipset_feature features);
347static void controlvm_respond_physdev_changestate(
b3168c70 348 struct controlvm_message_header *msg_hdr, int response,
98d7b594 349 struct spar_segment_state state);
12e364b9 350
46168810 351
2ee0deec
PB
352static void parser_done(struct parser_context *ctx);
353
46168810 354static struct parser_context *
fbf35536 355parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
46168810
EA
356{
357 int allocbytes = sizeof(struct parser_context) + bytes;
358 struct parser_context *rc = NULL;
359 struct parser_context *ctx = NULL;
46168810
EA
360
361 if (retry)
362 *retry = false;
cc55b5c5
JS
363
364 /*
365 * alloc an 0 extra byte to ensure payload is
366 * '\0'-terminated
367 */
368 allocbytes++;
46168810
EA
369 if ((controlvm_payload_bytes_buffered + bytes)
370 > MAX_CONTROLVM_PAYLOAD_BYTES) {
371 if (retry)
372 *retry = true;
373 rc = NULL;
374 goto cleanup;
375 }
376 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
377 if (!ctx) {
378 if (retry)
379 *retry = true;
380 rc = NULL;
381 goto cleanup;
382 }
383
384 ctx->allocbytes = allocbytes;
385 ctx->param_bytes = bytes;
386 ctx->curr = NULL;
387 ctx->bytes_remaining = 0;
388 ctx->byte_stream = false;
389 if (local) {
390 void *p;
391
392 if (addr > virt_to_phys(high_memory - 1)) {
393 rc = NULL;
394 goto cleanup;
395 }
396 p = __va((unsigned long) (addr));
397 memcpy(ctx->data, p, bytes);
398 } else {
dd412751
JS
399 void __iomem *mapping;
400
401 if (!request_mem_region(addr, bytes, "visorchipset")) {
46168810
EA
402 rc = NULL;
403 goto cleanup;
404 }
712c03dc 405
dd412751
JS
406 mapping = ioremap_cache(addr, bytes);
407 if (!mapping) {
408 release_mem_region(addr, bytes);
46168810
EA
409 rc = NULL;
410 goto cleanup;
411 }
dd412751
JS
412 memcpy_fromio(ctx->data, mapping, bytes);
413 release_mem_region(addr, bytes);
46168810 414 }
46168810 415
cc55b5c5 416 ctx->byte_stream = true;
46168810
EA
417 rc = ctx;
418cleanup:
46168810
EA
419 if (rc) {
420 controlvm_payload_bytes_buffered += ctx->param_bytes;
421 } else {
422 if (ctx) {
423 parser_done(ctx);
424 ctx = NULL;
425 }
426 }
427 return rc;
428}
429
464129ed 430static uuid_le
46168810
EA
431parser_id_get(struct parser_context *ctx)
432{
433 struct spar_controlvm_parameters_header *phdr = NULL;
434
435 if (ctx == NULL)
436 return NULL_UUID_LE;
437 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
438 return phdr->id;
439}
440
2ee0deec
PB
441/** Describes the state from the perspective of which controlvm messages have
442 * been received for a bus or device.
443 */
444
445enum PARSER_WHICH_STRING {
446 PARSERSTRING_INITIATOR,
447 PARSERSTRING_TARGET,
448 PARSERSTRING_CONNECTION,
449 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
450};
451
464129ed 452static void
2ee0deec
PB
453parser_param_start(struct parser_context *ctx,
454 enum PARSER_WHICH_STRING which_string)
46168810
EA
455{
456 struct spar_controlvm_parameters_header *phdr = NULL;
457
458 if (ctx == NULL)
459 goto Away;
460 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
461 switch (which_string) {
462 case PARSERSTRING_INITIATOR:
463 ctx->curr = ctx->data + phdr->initiator_offset;
464 ctx->bytes_remaining = phdr->initiator_length;
465 break;
466 case PARSERSTRING_TARGET:
467 ctx->curr = ctx->data + phdr->target_offset;
468 ctx->bytes_remaining = phdr->target_length;
469 break;
470 case PARSERSTRING_CONNECTION:
471 ctx->curr = ctx->data + phdr->connection_offset;
472 ctx->bytes_remaining = phdr->connection_length;
473 break;
474 case PARSERSTRING_NAME:
475 ctx->curr = ctx->data + phdr->name_offset;
476 ctx->bytes_remaining = phdr->name_length;
477 break;
478 default:
479 break;
480 }
481
482Away:
483 return;
484}
485
464129ed 486static void parser_done(struct parser_context *ctx)
46168810
EA
487{
488 if (!ctx)
489 return;
490 controlvm_payload_bytes_buffered -= ctx->param_bytes;
491 kfree(ctx);
492}
493
464129ed 494static void *
46168810
EA
495parser_string_get(struct parser_context *ctx)
496{
497 u8 *pscan;
498 unsigned long nscan;
499 int value_length = -1;
500 void *value = NULL;
501 int i;
502
503 if (!ctx)
504 return NULL;
505 pscan = ctx->curr;
506 nscan = ctx->bytes_remaining;
507 if (nscan == 0)
508 return NULL;
509 if (!pscan)
510 return NULL;
511 for (i = 0, value_length = -1; i < nscan; i++)
512 if (pscan[i] == '\0') {
513 value_length = i;
514 break;
515 }
516 if (value_length < 0) /* '\0' was not included in the length */
517 value_length = nscan;
518 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
519 if (value == NULL)
520 return NULL;
521 if (value_length > 0)
522 memcpy(value, pscan, value_length);
523 ((u8 *) (value))[value_length] = '\0';
524 return value;
525}
526
527
d746cb55
VB
528static ssize_t toolaction_show(struct device *dev,
529 struct device_attribute *attr,
530 char *buf)
19f6634f 531{
01f4d85a 532 u8 tool_action;
19f6634f 533
c3d9a224 534 visorchannel_read(controlvm_channel,
d19642f6 535 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 536 tool_action), &tool_action, sizeof(u8));
01f4d85a 537 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
538}
539
d746cb55
VB
540static ssize_t toolaction_store(struct device *dev,
541 struct device_attribute *attr,
542 const char *buf, size_t count)
19f6634f 543{
01f4d85a 544 u8 tool_action;
66e24b76 545 int ret;
19f6634f 546
ebec8967 547 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
548 return -EINVAL;
549
c3d9a224 550 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
551 offsetof(struct spar_controlvm_channel_protocol,
552 tool_action),
01f4d85a 553 &tool_action, sizeof(u8));
66e24b76
BR
554
555 if (ret)
556 return ret;
e22a4a0f 557 return count;
19f6634f
BR
558}
559
d746cb55
VB
560static ssize_t boottotool_show(struct device *dev,
561 struct device_attribute *attr,
562 char *buf)
54b31229 563{
365522d9 564 struct efi_spar_indication efi_spar_indication;
54b31229 565
c3d9a224 566 visorchannel_read(controlvm_channel,
8e76e695
BR
567 offsetof(struct spar_controlvm_channel_protocol,
568 efi_spar_ind), &efi_spar_indication,
569 sizeof(struct efi_spar_indication));
54b31229 570 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 571 efi_spar_indication.boot_to_tool);
54b31229
BR
572}
573
d746cb55
VB
574static ssize_t boottotool_store(struct device *dev,
575 struct device_attribute *attr,
576 const char *buf, size_t count)
54b31229 577{
66e24b76 578 int val, ret;
365522d9 579 struct efi_spar_indication efi_spar_indication;
54b31229 580
ebec8967 581 if (kstrtoint(buf, 10, &val))
66e24b76
BR
582 return -EINVAL;
583
365522d9 584 efi_spar_indication.boot_to_tool = val;
c3d9a224 585 ret = visorchannel_write(controlvm_channel,
d19642f6 586 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
587 efi_spar_ind), &(efi_spar_indication),
588 sizeof(struct efi_spar_indication));
66e24b76
BR
589
590 if (ret)
591 return ret;
e22a4a0f 592 return count;
54b31229 593}
422af17c
BR
594
595static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 596 char *buf)
422af17c
BR
597{
598 u32 error;
599
8e76e695
BR
600 visorchannel_read(controlvm_channel,
601 offsetof(struct spar_controlvm_channel_protocol,
602 installation_error),
603 &error, sizeof(u32));
422af17c
BR
604 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
605}
606
607static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 608 const char *buf, size_t count)
422af17c
BR
609{
610 u32 error;
66e24b76 611 int ret;
422af17c 612
ebec8967 613 if (kstrtou32(buf, 10, &error))
66e24b76
BR
614 return -EINVAL;
615
c3d9a224 616 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
617 offsetof(struct spar_controlvm_channel_protocol,
618 installation_error),
619 &error, sizeof(u32));
66e24b76
BR
620 if (ret)
621 return ret;
e22a4a0f 622 return count;
422af17c
BR
623}
624
625static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 626 char *buf)
422af17c 627{
10dbf0e3 628 u32 text_id;
422af17c 629
8e76e695
BR
630 visorchannel_read(controlvm_channel,
631 offsetof(struct spar_controlvm_channel_protocol,
632 installation_text_id),
633 &text_id, sizeof(u32));
10dbf0e3 634 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
635}
636
637static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 638 const char *buf, size_t count)
422af17c 639{
10dbf0e3 640 u32 text_id;
66e24b76 641 int ret;
422af17c 642
ebec8967 643 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
644 return -EINVAL;
645
c3d9a224 646 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
647 offsetof(struct spar_controlvm_channel_protocol,
648 installation_text_id),
649 &text_id, sizeof(u32));
66e24b76
BR
650 if (ret)
651 return ret;
e22a4a0f 652 return count;
422af17c
BR
653}
654
422af17c 655static ssize_t remaining_steps_show(struct device *dev,
8e76e695 656 struct device_attribute *attr, char *buf)
422af17c 657{
ee8da290 658 u16 remaining_steps;
422af17c 659
c3d9a224 660 visorchannel_read(controlvm_channel,
8e76e695
BR
661 offsetof(struct spar_controlvm_channel_protocol,
662 installation_remaining_steps),
663 &remaining_steps, sizeof(u16));
ee8da290 664 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
665}
666
667static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
668 struct device_attribute *attr,
669 const char *buf, size_t count)
422af17c 670{
ee8da290 671 u16 remaining_steps;
66e24b76 672 int ret;
422af17c 673
ebec8967 674 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
675 return -EINVAL;
676
c3d9a224 677 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
678 offsetof(struct spar_controlvm_channel_protocol,
679 installation_remaining_steps),
680 &remaining_steps, sizeof(u16));
66e24b76
BR
681 if (ret)
682 return ret;
e22a4a0f 683 return count;
422af17c
BR
684}
685
ab0592b9
DZ
686struct visor_busdev {
687 u32 bus_no;
688 u32 dev_no;
689};
690
691static int match_visorbus_dev_by_id(struct device *dev, void *data)
692{
693 struct visor_device *vdev = to_visor_device(dev);
694 struct visor_busdev *id = (struct visor_busdev *)data;
695 u32 bus_no = id->bus_no;
696 u32 dev_no = id->dev_no;
697
65bd6e46
DZ
698 if ((vdev->chipset_bus_no == bus_no) &&
699 (vdev->chipset_dev_no == dev_no))
ab0592b9
DZ
700 return 1;
701
702 return 0;
703}
704struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
705 struct visor_device *from)
706{
707 struct device *dev;
708 struct device *dev_start = NULL;
709 struct visor_device *vdev = NULL;
710 struct visor_busdev id = {
711 .bus_no = bus_no,
712 .dev_no = dev_no
713 };
714
715 if (from)
716 dev_start = &from->device;
717 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
718 match_visorbus_dev_by_id);
719 if (dev)
720 vdev = to_visor_device(dev);
721 return vdev;
722}
723EXPORT_SYMBOL(visorbus_get_device_by_id);
724
c242233e 725static u8
12e364b9
KC
726check_chipset_events(void)
727{
728 int i;
c242233e 729 u8 send_msg = 1;
12e364b9
KC
730 /* Check events to determine if response should be sent */
731 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
732 send_msg &= chipset_events[i];
733 return send_msg;
734}
735
736static void
737clear_chipset_events(void)
738{
739 int i;
740 /* Clear chipset_events */
741 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
742 chipset_events[i] = 0;
743}
744
745void
4da3336c 746visorchipset_register_busdev(
fe90d892 747 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 748 struct visorchipset_busdev_responders *responders,
1e7a59c1 749 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 750{
8f1947ac 751 down(&notifier_lock);
38f736e9 752 if (!notifiers) {
4da3336c
DK
753 memset(&busdev_notifiers, 0,
754 sizeof(busdev_notifiers));
755 visorbusregistered = 0; /* clear flag */
12e364b9 756 } else {
4da3336c
DK
757 busdev_notifiers = *notifiers;
758 visorbusregistered = 1; /* set flag */
12e364b9
KC
759 }
760 if (responders)
8e3fedd6 761 *responders = busdev_responders;
1e7a59c1
BR
762 if (driver_info)
763 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 764 VERSION, NULL);
12e364b9 765
8f1947ac 766 up(&notifier_lock);
12e364b9 767}
4da3336c 768EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
12e364b9 769
12e364b9 770static void
3ab47701 771chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
772{
773 static int chipset_inited;
b9b141e8 774 enum ultra_chipset_feature features = 0;
12e364b9
KC
775 int rc = CONTROLVM_RESP_SUCCESS;
776
777 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
778 if (chipset_inited) {
22ad57ba 779 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 780 goto cleanup;
12e364b9
KC
781 }
782 chipset_inited = 1;
783 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
784
785 /* Set features to indicate we support parahotplug (if Command
786 * also supports it). */
787 features =
2ea5117b 788 inmsg->cmd.init_chipset.
12e364b9
KC
789 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
790
791 /* Set the "reply" bit so Command knows this is a
792 * features-aware driver. */
793 features |= ULTRA_CHIPSET_FEATURE_REPLY;
794
e3199b2e 795cleanup:
98d7b594 796 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
797 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
798}
799
800static void
3ab47701 801controlvm_init_response(struct controlvm_message *msg,
b3168c70 802 struct controlvm_message_header *msg_hdr, int response)
12e364b9 803{
3ab47701 804 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 805 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
806 msg->hdr.payload_bytes = 0;
807 msg->hdr.payload_vm_offset = 0;
808 msg->hdr.payload_max_bytes = 0;
12e364b9 809 if (response < 0) {
98d7b594
BR
810 msg->hdr.flags.failed = 1;
811 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
812 }
813}
814
815static void
b3168c70 816controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 817{
3ab47701 818 struct controlvm_message outmsg;
26eb2c0c 819
b3168c70 820 controlvm_init_response(&outmsg, msg_hdr, response);
2098dbd1 821 if (outmsg.hdr.flags.test_message == 1)
12e364b9 822 return;
2098dbd1 823
c3d9a224 824 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 825 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
826 return;
827 }
828}
829
830static void
b3168c70 831controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 832 int response,
b9b141e8 833 enum ultra_chipset_feature features)
12e364b9 834{
3ab47701 835 struct controlvm_message outmsg;
26eb2c0c 836
b3168c70 837 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 838 outmsg.cmd.init_chipset.features = features;
c3d9a224 839 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 840 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
841 return;
842 }
843}
844
98d7b594 845static void controlvm_respond_physdev_changestate(
b3168c70 846 struct controlvm_message_header *msg_hdr, int response,
98d7b594 847 struct spar_segment_state state)
12e364b9 848{
3ab47701 849 struct controlvm_message outmsg;
26eb2c0c 850
b3168c70 851 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
852 outmsg.cmd.device_change_state.state = state;
853 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 854 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 855 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
856 return;
857 }
858}
859
2ee0deec
PB
860enum crash_obj_type {
861 CRASH_DEV,
862 CRASH_BUS,
863};
864
12e364b9 865static void
0274b5ae
DZ
866bus_responder(enum controlvm_id cmd_id,
867 struct controlvm_message_header *pending_msg_hdr,
3032aedd 868 int response)
12e364b9 869{
0274b5ae
DZ
870 if (pending_msg_hdr == NULL)
871 return; /* no controlvm response needed */
12e364b9 872
0274b5ae 873 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 874 return;
0aca7844 875
0274b5ae 876 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
877}
878
879static void
fbb31f48 880device_changestate_responder(enum controlvm_id cmd_id,
a298bc0b 881 struct visor_device *p, int response,
fbb31f48 882 struct spar_segment_state response_state)
12e364b9 883{
3ab47701 884 struct controlvm_message outmsg;
a298bc0b
DZ
885 u32 bus_no = p->chipset_bus_no;
886 u32 dev_no = p->chipset_dev_no;
12e364b9 887
0274b5ae 888 if (p->pending_msg_hdr == NULL)
12e364b9 889 return; /* no controlvm response needed */
0274b5ae 890 if (p->pending_msg_hdr->id != cmd_id)
12e364b9 891 return;
12e364b9 892
0274b5ae 893 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
12e364b9 894
fbb31f48
BR
895 outmsg.cmd.device_change_state.bus_no = bus_no;
896 outmsg.cmd.device_change_state.dev_no = dev_no;
897 outmsg.cmd.device_change_state.state = response_state;
12e364b9 898
c3d9a224 899 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 900 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 901 return;
12e364b9
KC
902}
903
904static void
0274b5ae
DZ
905device_responder(enum controlvm_id cmd_id,
906 struct controlvm_message_header *pending_msg_hdr,
b4b598fd 907 int response)
12e364b9 908{
0274b5ae 909 if (pending_msg_hdr == NULL)
12e364b9 910 return; /* no controlvm response needed */
0aca7844 911
0274b5ae 912 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 913 return;
0aca7844 914
0274b5ae 915 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
916}
917
918static void
d32517e3 919bus_epilog(struct visor_device *bus_info,
2836c6a8 920 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 921 int response, bool need_response)
12e364b9 922{
f4c11551 923 bool notified = false;
0274b5ae 924 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 925
0274b5ae
DZ
926 if (!bus_info) {
927 /* relying on a valid passed in response code */
928 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
929 pmsg_hdr = msg_hdr;
930 goto away;
931 }
932
933 if (bus_info->pending_msg_hdr) {
934 /* only non-NULL if dev is still waiting on a response */
935 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
936 pmsg_hdr = bus_info->pending_msg_hdr;
937 goto away;
938 }
0aca7844 939
2836c6a8 940 if (need_response) {
0274b5ae
DZ
941 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
942 if (!pmsg_hdr) {
943 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
944 goto away;
945 }
946
947 memcpy(pmsg_hdr, msg_hdr,
98d7b594 948 sizeof(struct controlvm_message_header));
0274b5ae 949 bus_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 950 }
12e364b9 951
8f1947ac 952 down(&notifier_lock);
12e364b9
KC
953 if (response == CONTROLVM_RESP_SUCCESS) {
954 switch (cmd) {
955 case CONTROLVM_BUS_CREATE:
4da3336c 956 if (busdev_notifiers.bus_create) {
3032aedd 957 (*busdev_notifiers.bus_create) (bus_info);
f4c11551 958 notified = true;
12e364b9
KC
959 }
960 break;
961 case CONTROLVM_BUS_DESTROY:
4da3336c 962 if (busdev_notifiers.bus_destroy) {
3032aedd 963 (*busdev_notifiers.bus_destroy) (bus_info);
f4c11551 964 notified = true;
12e364b9
KC
965 }
966 break;
967 }
968 }
0274b5ae 969away:
12e364b9
KC
970 if (notified)
971 /* The callback function just called above is responsible
929aa8ae 972 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
973 * function, which will call bus_responder()
974 */
975 ;
976 else
0274b5ae
DZ
977 /*
978 * Do not kfree(pmsg_hdr) as this is the failure path.
979 * The success path ('notified') will call the responder
980 * directly and kfree() there.
981 */
982 bus_responder(cmd, pmsg_hdr, response);
8f1947ac 983 up(&notifier_lock);
12e364b9
KC
984}
985
986static void
a298bc0b 987device_epilog(struct visor_device *dev_info,
b4b598fd 988 struct spar_segment_state state, u32 cmd,
2836c6a8 989 struct controlvm_message_header *msg_hdr, int response,
f4c11551 990 bool need_response, bool for_visorbus)
12e364b9 991{
e82ba62e 992 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 993 bool notified = false;
0274b5ae 994 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 995
4da3336c
DK
996 notifiers = &busdev_notifiers;
997
0274b5ae
DZ
998 if (!dev_info) {
999 /* relying on a valid passed in response code */
1000 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1001 pmsg_hdr = msg_hdr;
1002 goto away;
1003 }
1004
1005 if (dev_info->pending_msg_hdr) {
1006 /* only non-NULL if dev is still waiting on a response */
1007 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1008 pmsg_hdr = dev_info->pending_msg_hdr;
1009 goto away;
1010 }
1011
2836c6a8 1012 if (need_response) {
0274b5ae
DZ
1013 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1014 if (!pmsg_hdr) {
1015 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1016 goto away;
1017 }
1018
1019 memcpy(pmsg_hdr, msg_hdr,
98d7b594 1020 sizeof(struct controlvm_message_header));
0274b5ae 1021 dev_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 1022 }
12e364b9 1023
8f1947ac 1024 down(&notifier_lock);
12e364b9
KC
1025 if (response >= 0) {
1026 switch (cmd) {
1027 case CONTROLVM_DEVICE_CREATE:
1028 if (notifiers->device_create) {
b4b598fd 1029 (*notifiers->device_create) (dev_info);
f4c11551 1030 notified = true;
12e364b9
KC
1031 }
1032 break;
1033 case CONTROLVM_DEVICE_CHANGESTATE:
1034 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1035 if (state.alive == segment_state_running.alive &&
1036 state.operating ==
1037 segment_state_running.operating) {
12e364b9 1038 if (notifiers->device_resume) {
b4b598fd 1039 (*notifiers->device_resume) (dev_info);
f4c11551 1040 notified = true;
12e364b9
KC
1041 }
1042 }
1043 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1044 else if (state.alive == segment_state_standby.alive &&
3f833b54 1045 state.operating ==
bd0d2dcc 1046 segment_state_standby.operating) {
12e364b9
KC
1047 /* technically this is standby case
1048 * where server is lost
1049 */
1050 if (notifiers->device_pause) {
b4b598fd 1051 (*notifiers->device_pause) (dev_info);
f4c11551 1052 notified = true;
12e364b9 1053 }
12e364b9
KC
1054 }
1055 break;
1056 case CONTROLVM_DEVICE_DESTROY:
1057 if (notifiers->device_destroy) {
b4b598fd 1058 (*notifiers->device_destroy) (dev_info);
f4c11551 1059 notified = true;
12e364b9
KC
1060 }
1061 break;
1062 }
1063 }
0274b5ae 1064away:
12e364b9
KC
1065 if (notified)
1066 /* The callback function just called above is responsible
929aa8ae 1067 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1068 * function, which will call device_responder()
1069 */
1070 ;
1071 else
0274b5ae
DZ
1072 /*
1073 * Do not kfree(pmsg_hdr) as this is the failure path.
1074 * The success path ('notified') will call the responder
1075 * directly and kfree() there.
1076 */
1077 device_responder(cmd, pmsg_hdr, response);
8f1947ac 1078 up(&notifier_lock);
12e364b9
KC
1079}
1080
1081static void
3ab47701 1082bus_create(struct controlvm_message *inmsg)
12e364b9 1083{
2ea5117b 1084 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1085 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1086 int rc = CONTROLVM_RESP_SUCCESS;
d32517e3 1087 struct visor_device *bus_info;
b32c4997 1088 struct visorchannel *visorchannel;
12e364b9 1089
d32517e3 1090 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
6c5fed35
BR
1091 if (bus_info && (bus_info->state.created == 1)) {
1092 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1093 POSTCODE_SEVERITY_ERR);
22ad57ba 1094 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1095 goto cleanup;
12e364b9 1096 }
6c5fed35
BR
1097 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1098 if (!bus_info) {
1099 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1100 POSTCODE_SEVERITY_ERR);
22ad57ba 1101 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1102 goto cleanup;
12e364b9
KC
1103 }
1104
d32517e3
DZ
1105 bus_info->chipset_bus_no = bus_no;
1106 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
12e364b9 1107
6c5fed35 1108 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1109
b32c4997
DZ
1110 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1111 cmd->create_bus.channel_bytes,
1112 GFP_KERNEL,
1113 cmd->create_bus.bus_data_type_uuid);
1114
1115 if (!visorchannel) {
1116 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1117 POSTCODE_SEVERITY_ERR);
1118 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1119 kfree(bus_info);
1120 bus_info = NULL;
1121 goto cleanup;
1122 }
1123 bus_info->visorchannel = visorchannel;
12e364b9 1124
6c5fed35 1125 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1126
6c5fed35 1127cleanup:
3032aedd 1128 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1129 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1130}
1131
1132static void
3ab47701 1133bus_destroy(struct controlvm_message *inmsg)
12e364b9 1134{
2ea5117b 1135 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1136 u32 bus_no = cmd->destroy_bus.bus_no;
d32517e3 1137 struct visor_device *bus_info;
12e364b9
KC
1138 int rc = CONTROLVM_RESP_SUCCESS;
1139
d32517e3 1140 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
dff54cd6 1141 if (!bus_info)
22ad57ba 1142 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1143 else if (bus_info->state.created == 0)
22ad57ba 1144 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1145
3032aedd 1146 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1147 rc, inmsg->hdr.flags.response_expected == 1);
d32517e3
DZ
1148
1149 /* bus_info is freed as part of the busdevice_release function */
12e364b9
KC
1150}
1151
1152static void
317d9614
BR
1153bus_configure(struct controlvm_message *inmsg,
1154 struct parser_context *parser_ctx)
12e364b9 1155{
2ea5117b 1156 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e 1157 u32 bus_no;
d32517e3 1158 struct visor_device *bus_info;
12e364b9 1159 int rc = CONTROLVM_RESP_SUCCESS;
12e364b9 1160
654bada0
BR
1161 bus_no = cmd->configure_bus.bus_no;
1162 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1163 POSTCODE_SEVERITY_INFO);
12e364b9 1164
d32517e3 1165 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
654bada0
BR
1166 if (!bus_info) {
1167 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1168 POSTCODE_SEVERITY_ERR);
22ad57ba 1169 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1170 } else if (bus_info->state.created == 0) {
1171 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1172 POSTCODE_SEVERITY_ERR);
22ad57ba 1173 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
0274b5ae 1174 } else if (bus_info->pending_msg_hdr != NULL) {
654bada0 1175 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1176 POSTCODE_SEVERITY_ERR);
22ad57ba 1177 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0 1178 } else {
b32c4997
DZ
1179 visorchannel_set_clientpartition(bus_info->visorchannel,
1180 cmd->configure_bus.guest_handle);
654bada0
BR
1181 bus_info->partition_uuid = parser_id_get(parser_ctx);
1182 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1183 bus_info->name = parser_string_get(parser_ctx);
1184
654bada0
BR
1185 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1186 POSTCODE_SEVERITY_INFO);
12e364b9 1187 }
3032aedd 1188 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1189 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1190}
1191
1192static void
3ab47701 1193my_device_create(struct controlvm_message *inmsg)
12e364b9 1194{
2ea5117b 1195 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1196 u32 bus_no = cmd->create_device.bus_no;
1197 u32 dev_no = cmd->create_device.dev_no;
a298bc0b 1198 struct visor_device *dev_info = NULL;
d32517e3 1199 struct visor_device *bus_info;
b32c4997 1200 struct visorchannel *visorchannel;
12e364b9
KC
1201 int rc = CONTROLVM_RESP_SUCCESS;
1202
a298bc0b
DZ
1203 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1204 if (!bus_info) {
c60c8e26 1205 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1206 POSTCODE_SEVERITY_ERR);
a298bc0b 1207 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1208 goto cleanup;
12e364b9 1209 }
a298bc0b
DZ
1210
1211 if (bus_info->state.created == 0) {
c60c8e26 1212 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1213 POSTCODE_SEVERITY_ERR);
22ad57ba 1214 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1215 goto cleanup;
12e364b9 1216 }
a298bc0b
DZ
1217
1218 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1219 if (dev_info && (dev_info->state.created == 1)) {
c60c8e26 1220 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1221 POSTCODE_SEVERITY_ERR);
a298bc0b 1222 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1223 goto cleanup;
12e364b9 1224 }
a298bc0b 1225
c60c8e26
BR
1226 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1227 if (!dev_info) {
1228 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1229 POSTCODE_SEVERITY_ERR);
22ad57ba 1230 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1231 goto cleanup;
12e364b9 1232 }
97a84f12 1233
a298bc0b
DZ
1234 dev_info->chipset_bus_no = bus_no;
1235 dev_info->chipset_dev_no = dev_no;
1236 dev_info->inst = cmd->create_device.dev_inst_uuid;
1237
1238 /* not sure where the best place to set the 'parent' */
1239 dev_info->device.parent = &bus_info->device;
1240
c60c8e26 1241 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1242 POSTCODE_SEVERITY_INFO);
1243
b32c4997
DZ
1244 visorchannel = visorchannel_create(cmd->create_device.channel_addr,
1245 cmd->create_device.channel_bytes,
1246 GFP_KERNEL,
1247 cmd->create_device.data_type_uuid);
1248
1249 if (!visorchannel) {
1250 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1251 POSTCODE_SEVERITY_ERR);
1252 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1253 kfree(dev_info);
1254 dev_info = NULL;
1255 goto cleanup;
1256 }
1257 dev_info->visorchannel = visorchannel;
1258 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
c60c8e26 1259 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1260 POSTCODE_SEVERITY_INFO);
c60c8e26 1261cleanup:
b4b598fd 1262 device_epilog(dev_info, segment_state_running,
12e364b9 1263 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
4da3336c 1264 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1265}
1266
1267static void
3ab47701 1268my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1269{
2ea5117b 1270 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1271 u32 bus_no = cmd->device_change_state.bus_no;
1272 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1273 struct spar_segment_state state = cmd->device_change_state.state;
a298bc0b 1274 struct visor_device *dev_info;
12e364b9
KC
1275 int rc = CONTROLVM_RESP_SUCCESS;
1276
a298bc0b 1277 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
0278a905
BR
1278 if (!dev_info) {
1279 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1280 POSTCODE_SEVERITY_ERR);
22ad57ba 1281 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1282 } else if (dev_info->state.created == 0) {
1283 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1284 POSTCODE_SEVERITY_ERR);
22ad57ba 1285 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1286 }
0278a905 1287 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1288 device_epilog(dev_info, state,
0278a905 1289 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
4da3336c 1290 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1291}
1292
1293static void
3ab47701 1294my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1295{
2ea5117b 1296 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1297 u32 bus_no = cmd->destroy_device.bus_no;
1298 u32 dev_no = cmd->destroy_device.dev_no;
a298bc0b 1299 struct visor_device *dev_info;
12e364b9
KC
1300 int rc = CONTROLVM_RESP_SUCCESS;
1301
a298bc0b 1302 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
61715c8b 1303 if (!dev_info)
22ad57ba 1304 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1305 else if (dev_info->state.created == 0)
22ad57ba 1306 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1307
61715c8b 1308 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1309 device_epilog(dev_info, segment_state_running,
12e364b9 1310 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
4da3336c 1311 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1312}
1313
1314/* When provided with the physical address of the controlvm channel
1315 * (phys_addr), the offset to the payload area we need to manage
1316 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1317 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1318 * for failure.
1319 */
1320static int
d5b3f1dc 1321initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
c1f834eb 1322 struct visor_controlvm_payload_info *info)
12e364b9 1323{
c242233e 1324 u8 __iomem *payload = NULL;
12e364b9
KC
1325 int rc = CONTROLVM_RESP_SUCCESS;
1326
38f736e9 1327 if (!info) {
22ad57ba 1328 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1329 goto cleanup;
12e364b9 1330 }
c1f834eb 1331 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1332 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1333 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1334 goto cleanup;
12e364b9
KC
1335 }
1336 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1337 if (!payload) {
22ad57ba 1338 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1339 goto cleanup;
12e364b9
KC
1340 }
1341
1342 info->offset = offset;
1343 info->bytes = bytes;
1344 info->ptr = payload;
12e364b9 1345
f118a39b 1346cleanup:
12e364b9 1347 if (rc < 0) {
f118a39b 1348 if (payload) {
12e364b9
KC
1349 iounmap(payload);
1350 payload = NULL;
1351 }
1352 }
1353 return rc;
1354}
1355
1356static void
c1f834eb 1357destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1358{
597c338f 1359 if (info->ptr) {
12e364b9
KC
1360 iounmap(info->ptr);
1361 info->ptr = NULL;
1362 }
c1f834eb 1363 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1364}
1365
1366static void
1367initialize_controlvm_payload(void)
1368{
d5b3f1dc 1369 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1370 u64 payload_offset = 0;
1371 u32 payload_bytes = 0;
26eb2c0c 1372
c3d9a224 1373 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1374 offsetof(struct spar_controlvm_channel_protocol,
1375 request_payload_offset),
cafefc0c 1376 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1377 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1378 POSTCODE_SEVERITY_ERR);
1379 return;
1380 }
c3d9a224 1381 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1382 offsetof(struct spar_controlvm_channel_protocol,
1383 request_payload_bytes),
cafefc0c 1384 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1385 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1386 POSTCODE_SEVERITY_ERR);
1387 return;
1388 }
1389 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1390 payload_offset, payload_bytes,
84982fbf 1391 &controlvm_payload_info);
12e364b9
KC
1392}
1393
1394/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1395 * Returns CONTROLVM_RESP_xxx code.
1396 */
d3368a58 1397static int
12e364b9
KC
1398visorchipset_chipset_ready(void)
1399{
eb34e877 1400 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1401 return CONTROLVM_RESP_SUCCESS;
1402}
12e364b9 1403
d3368a58 1404static int
12e364b9
KC
1405visorchipset_chipset_selftest(void)
1406{
1407 char env_selftest[20];
1408 char *envp[] = { env_selftest, NULL };
26eb2c0c 1409
12e364b9 1410 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1411 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1412 envp);
1413 return CONTROLVM_RESP_SUCCESS;
1414}
12e364b9
KC
1415
1416/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1417 * Returns CONTROLVM_RESP_xxx code.
1418 */
d3368a58 1419static int
12e364b9
KC
1420visorchipset_chipset_notready(void)
1421{
eb34e877 1422 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1423 return CONTROLVM_RESP_SUCCESS;
1424}
12e364b9
KC
1425
1426static void
77a0449d 1427chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1428{
1429 int rc = visorchipset_chipset_ready();
26eb2c0c 1430
12e364b9
KC
1431 if (rc != CONTROLVM_RESP_SUCCESS)
1432 rc = -rc;
77a0449d
BR
1433 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1434 controlvm_respond(msg_hdr, rc);
1435 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1436 /* Send CHIPSET_READY response when all modules have been loaded
1437 * and disks mounted for the partition
1438 */
77a0449d 1439 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1440 }
1441}
1442
1443static void
77a0449d 1444chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1445{
1446 int rc = visorchipset_chipset_selftest();
26eb2c0c 1447
12e364b9
KC
1448 if (rc != CONTROLVM_RESP_SUCCESS)
1449 rc = -rc;
77a0449d
BR
1450 if (msg_hdr->flags.response_expected)
1451 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1452}
1453
1454static void
77a0449d 1455chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1456{
1457 int rc = visorchipset_chipset_notready();
26eb2c0c 1458
12e364b9
KC
1459 if (rc != CONTROLVM_RESP_SUCCESS)
1460 rc = -rc;
77a0449d
BR
1461 if (msg_hdr->flags.response_expected)
1462 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1463}
1464
1465/* This is your "one-stop" shop for grabbing the next message from the
1466 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1467 */
f4c11551 1468static bool
3ab47701 1469read_controlvm_event(struct controlvm_message *msg)
12e364b9 1470{
c3d9a224 1471 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1472 CONTROLVM_QUEUE_EVENT, msg)) {
1473 /* got a message */
0aca7844 1474 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1475 return false;
1476 return true;
12e364b9 1477 }
f4c11551 1478 return false;
12e364b9
KC
1479}
1480
1481/*
1482 * The general parahotplug flow works as follows. The visorchipset
1483 * driver receives a DEVICE_CHANGESTATE message from Command
1484 * specifying a physical device to enable or disable. The CONTROLVM
1485 * message handler calls parahotplug_process_message, which then adds
1486 * the message to a global list and kicks off a udev event which
1487 * causes a user level script to enable or disable the specified
1488 * device. The udev script then writes to
1489 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1490 * to get called, at which point the appropriate CONTROLVM message is
1491 * retrieved from the list and responded to.
1492 */
1493
1494#define PARAHOTPLUG_TIMEOUT_MS 2000
1495
1496/*
1497 * Generate unique int to match an outstanding CONTROLVM message with a
1498 * udev script /proc response
1499 */
1500static int
1501parahotplug_next_id(void)
1502{
1503 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1504
12e364b9
KC
1505 return atomic_inc_return(&id);
1506}
1507
1508/*
1509 * Returns the time (in jiffies) when a CONTROLVM message on the list
1510 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1511 */
1512static unsigned long
1513parahotplug_next_expiration(void)
1514{
2cc1a1b3 1515 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1516}
1517
1518/*
1519 * Create a parahotplug_request, which is basically a wrapper for a
1520 * CONTROLVM_MESSAGE that we can stick on a list
1521 */
1522static struct parahotplug_request *
3ab47701 1523parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1524{
ea0dcfcf
QL
1525 struct parahotplug_request *req;
1526
6a55e3c3 1527 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1528 if (!req)
12e364b9
KC
1529 return NULL;
1530
1531 req->id = parahotplug_next_id();
1532 req->expiration = parahotplug_next_expiration();
1533 req->msg = *msg;
1534
1535 return req;
1536}
1537
1538/*
1539 * Free a parahotplug_request.
1540 */
1541static void
1542parahotplug_request_destroy(struct parahotplug_request *req)
1543{
1544 kfree(req);
1545}
1546
1547/*
1548 * Cause uevent to run the user level script to do the disable/enable
1549 * specified in (the CONTROLVM message in) the specified
1550 * parahotplug_request
1551 */
1552static void
1553parahotplug_request_kickoff(struct parahotplug_request *req)
1554{
2ea5117b 1555 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1556 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1557 env_func[40];
1558 char *envp[] = {
1559 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1560 };
1561
1562 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1563 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1564 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1565 cmd->device_change_state.state.active);
12e364b9 1566 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1567 cmd->device_change_state.bus_no);
12e364b9 1568 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1569 cmd->device_change_state.dev_no >> 3);
12e364b9 1570 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1571 cmd->device_change_state.dev_no & 0x7);
12e364b9 1572
eb34e877 1573 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1574 envp);
1575}
1576
1577/*
1578 * Remove any request from the list that's been on there too long and
1579 * respond with an error.
1580 */
1581static void
1582parahotplug_process_list(void)
1583{
e82ba62e
JS
1584 struct list_head *pos;
1585 struct list_head *tmp;
12e364b9 1586
ddf5de53 1587 spin_lock(&parahotplug_request_list_lock);
12e364b9 1588
ddf5de53 1589 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1590 struct parahotplug_request *req =
1591 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1592
1593 if (!time_after_eq(jiffies, req->expiration))
1594 continue;
1595
1596 list_del(pos);
1597 if (req->msg.hdr.flags.response_expected)
1598 controlvm_respond_physdev_changestate(
1599 &req->msg.hdr,
1600 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1601 req->msg.cmd.device_change_state.state);
1602 parahotplug_request_destroy(req);
12e364b9
KC
1603 }
1604
ddf5de53 1605 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1606}
1607
1608/*
1609 * Called from the /proc handler, which means the user script has
1610 * finished the enable/disable. Find the matching identifier, and
1611 * respond to the CONTROLVM message with success.
1612 */
1613static int
b06bdf7d 1614parahotplug_request_complete(int id, u16 active)
12e364b9 1615{
e82ba62e
JS
1616 struct list_head *pos;
1617 struct list_head *tmp;
12e364b9 1618
ddf5de53 1619 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1620
1621 /* Look for a request matching "id". */
ddf5de53 1622 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1623 struct parahotplug_request *req =
1624 list_entry(pos, struct parahotplug_request, list);
1625 if (req->id == id) {
1626 /* Found a match. Remove it from the list and
1627 * respond.
1628 */
1629 list_del(pos);
ddf5de53 1630 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1631 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1632 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1633 controlvm_respond_physdev_changestate(
1634 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1635 req->msg.cmd.device_change_state.state);
12e364b9
KC
1636 parahotplug_request_destroy(req);
1637 return 0;
1638 }
1639 }
1640
ddf5de53 1641 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1642 return -1;
1643}
1644
1645/*
1646 * Enables or disables a PCI device by kicking off a udev script
1647 */
bd5b9b32 1648static void
3ab47701 1649parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1650{
1651 struct parahotplug_request *req;
1652
1653 req = parahotplug_request_create(inmsg);
1654
38f736e9 1655 if (!req)
12e364b9 1656 return;
12e364b9 1657
2ea5117b 1658 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1659 /* For enable messages, just respond with success
1660 * right away. This is a bit of a hack, but there are
1661 * issues with the early enable messages we get (with
1662 * either the udev script not detecting that the device
1663 * is up, or not getting called at all). Fortunately
1664 * the messages that get lost don't matter anyway, as
1665 * devices are automatically enabled at
1666 * initialization.
1667 */
1668 parahotplug_request_kickoff(req);
1669 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1670 CONTROLVM_RESP_SUCCESS,
1671 inmsg->cmd.device_change_state.state);
12e364b9
KC
1672 parahotplug_request_destroy(req);
1673 } else {
1674 /* For disable messages, add the request to the
1675 * request list before kicking off the udev script. It
1676 * won't get responded to until the script has
1677 * indicated it's done.
1678 */
ddf5de53
BR
1679 spin_lock(&parahotplug_request_list_lock);
1680 list_add_tail(&req->list, &parahotplug_request_list);
1681 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1682
1683 parahotplug_request_kickoff(req);
1684 }
1685}
1686
12e364b9
KC
1687/* Process a controlvm message.
1688 * Return result:
779d0752 1689 * false - this function will return false only in the case where the
12e364b9
KC
1690 * controlvm message was NOT processed, but processing must be
1691 * retried before reading the next controlvm message; a
1692 * scenario where this can occur is when we need to throttle
1693 * the allocation of memory in which to copy out controlvm
1694 * payload data
f4c11551 1695 * true - processing of the controlvm message completed,
12e364b9
KC
1696 * either successfully or with an error.
1697 */
f4c11551 1698static bool
d5b3f1dc 1699handle_command(struct controlvm_message inmsg, u64 channel_addr)
12e364b9 1700{
2ea5117b 1701 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1702 u64 parm_addr;
1703 u32 parm_bytes;
317d9614 1704 struct parser_context *parser_ctx = NULL;
e82ba62e 1705 bool local_addr;
3ab47701 1706 struct controlvm_message ackmsg;
12e364b9
KC
1707
1708 /* create parsing context if necessary */
818352a8 1709 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1710 if (channel_addr == 0)
f4c11551 1711 return true;
818352a8
BR
1712 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1713 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1714
1715 /* Parameter and channel addresses within test messages actually lie
1716 * within our OS-controlled memory. We need to know that, because it
1717 * makes a difference in how we compute the virtual address.
1718 */
ebec8967 1719 if (parm_addr && parm_bytes) {
f4c11551 1720 bool retry = false;
26eb2c0c 1721
12e364b9 1722 parser_ctx =
818352a8
BR
1723 parser_init_byte_stream(parm_addr, parm_bytes,
1724 local_addr, &retry);
1b08872e 1725 if (!parser_ctx && retry)
f4c11551 1726 return false;
12e364b9
KC
1727 }
1728
818352a8 1729 if (!local_addr) {
12e364b9
KC
1730 controlvm_init_response(&ackmsg, &inmsg.hdr,
1731 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1732 if (controlvm_channel)
1733 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1734 CONTROLVM_QUEUE_ACK,
1735 &ackmsg);
12e364b9 1736 }
98d7b594 1737 switch (inmsg.hdr.id) {
12e364b9 1738 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1739 chipset_init(&inmsg);
1740 break;
1741 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1742 bus_create(&inmsg);
1743 break;
1744 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1745 bus_destroy(&inmsg);
1746 break;
1747 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1748 bus_configure(&inmsg, parser_ctx);
1749 break;
1750 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1751 my_device_create(&inmsg);
1752 break;
1753 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1754 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1755 parahotplug_process_message(&inmsg);
1756 } else {
12e364b9
KC
1757 /* save the hdr and cmd structures for later use */
1758 /* when sending back the response to Command */
1759 my_device_changestate(&inmsg);
4f44b72d 1760 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1761 break;
1762 }
1763 break;
1764 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1765 my_device_destroy(&inmsg);
1766 break;
1767 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1768 /* no op for now, just send a respond that we passed */
98d7b594 1769 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1770 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1771 break;
1772 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1773 chipset_ready(&inmsg.hdr);
1774 break;
1775 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1776 chipset_selftest(&inmsg.hdr);
1777 break;
1778 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1779 chipset_notready(&inmsg.hdr);
1780 break;
1781 default:
98d7b594 1782 if (inmsg.hdr.flags.response_expected)
12e364b9 1783 controlvm_respond(&inmsg.hdr,
818352a8 1784 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1785 break;
1786 }
1787
38f736e9 1788 if (parser_ctx) {
12e364b9
KC
1789 parser_done(parser_ctx);
1790 parser_ctx = NULL;
1791 }
f4c11551 1792 return true;
12e364b9
KC
1793}
1794
5f3a7e36
DK
1795static inline unsigned int
1796issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1797{
1798 struct vmcall_io_controlvm_addr_params params;
1799 int result = VMCALL_SUCCESS;
1800 u64 physaddr;
1801
1802 physaddr = virt_to_phys(&params);
1803 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1804 if (VMCALL_SUCCESSFUL(result)) {
1805 *control_addr = params.address;
1806 *control_bytes = params.channel_bytes;
1807 }
1808 return result;
1809}
1810
d5b3f1dc 1811static u64 controlvm_get_channel_address(void)
524b0b63 1812{
5fc0229a 1813 u64 addr = 0;
b3c55b13 1814 u32 size = 0;
524b0b63 1815
0aca7844 1816 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1817 return 0;
0aca7844 1818
524b0b63
BR
1819 return addr;
1820}
1821
12e364b9
KC
1822static void
1823controlvm_periodic_work(struct work_struct *work)
1824{
3ab47701 1825 struct controlvm_message inmsg;
f4c11551
JS
1826 bool got_command = false;
1827 bool handle_command_failed = false;
1c1ed292 1828 static u64 poll_count;
12e364b9
KC
1829
1830 /* make sure visorbus server is registered for controlvm callbacks */
4da3336c 1831 if (visorchipset_visorbusregwait && !visorbusregistered)
1c1ed292 1832 goto cleanup;
12e364b9 1833
1c1ed292
BR
1834 poll_count++;
1835 if (poll_count >= 250)
12e364b9
KC
1836 ; /* keep going */
1837 else
1c1ed292 1838 goto cleanup;
12e364b9
KC
1839
1840 /* Check events to determine if response to CHIPSET_READY
1841 * should be sent
1842 */
0639ba67
BR
1843 if (visorchipset_holdchipsetready &&
1844 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1845 if (check_chipset_events() == 1) {
da021f02 1846 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1847 clear_chipset_events();
da021f02 1848 memset(&g_chipset_msg_hdr, 0,
98d7b594 1849 sizeof(struct controlvm_message_header));
12e364b9
KC
1850 }
1851 }
1852
c3d9a224 1853 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1854 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1855 &inmsg))
1856 ;
1c1ed292 1857 if (!got_command) {
7166ed19 1858 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1859 /* we throttled processing of a prior
1860 * msg, so try to process it again
1861 * rather than reading a new one
1862 */
7166ed19 1863 inmsg = controlvm_pending_msg;
f4c11551 1864 controlvm_pending_msg_valid = false;
1c1ed292 1865 got_command = true;
75c1f8b7 1866 } else {
1c1ed292 1867 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1868 }
8a1182eb 1869 }
12e364b9 1870
f4c11551 1871 handle_command_failed = false;
1c1ed292 1872 while (got_command && (!handle_command_failed)) {
b53e0e93 1873 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1874 if (handle_command(inmsg,
1875 visorchannel_get_physaddr
c3d9a224 1876 (controlvm_channel)))
1c1ed292 1877 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
1878 else {
1879 /* this is a scenario where throttling
1880 * is required, but probably NOT an
1881 * error...; we stash the current
1882 * controlvm msg so we will attempt to
1883 * reprocess it on our next loop
1884 */
f4c11551 1885 handle_command_failed = true;
7166ed19 1886 controlvm_pending_msg = inmsg;
f4c11551 1887 controlvm_pending_msg_valid = true;
12e364b9
KC
1888 }
1889 }
1890
1891 /* parahotplug_worker */
1892 parahotplug_process_list();
1893
1c1ed292 1894cleanup:
12e364b9
KC
1895
1896 if (time_after(jiffies,
b53e0e93 1897 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1898 /* it's been longer than MIN_IDLE_SECONDS since we
1899 * processed our last controlvm message; slow down the
1900 * polling
1901 */
911e213e
BR
1902 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1903 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1904 } else {
911e213e
BR
1905 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1906 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1907 }
1908
9232d2d6
BR
1909 queue_delayed_work(periodic_controlvm_workqueue,
1910 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1911}
1912
1913static void
1914setup_crash_devices_work_queue(struct work_struct *work)
1915{
e6bdb904
BR
1916 struct controlvm_message local_crash_bus_msg;
1917 struct controlvm_message local_crash_dev_msg;
3ab47701 1918 struct controlvm_message msg;
e6bdb904
BR
1919 u32 local_crash_msg_offset;
1920 u16 local_crash_msg_count;
12e364b9 1921
4da3336c
DK
1922 /* make sure visorbus is registered for controlvm callbacks */
1923 if (visorchipset_visorbusregwait && !visorbusregistered)
e6bdb904 1924 goto cleanup;
12e364b9
KC
1925
1926 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1927
1928 /* send init chipset msg */
98d7b594 1929 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1930 msg.cmd.init_chipset.bus_count = 23;
1931 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1932
1933 chipset_init(&msg);
1934
12e364b9 1935 /* get saved message count */
c3d9a224 1936 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1937 offsetof(struct spar_controlvm_channel_protocol,
1938 saved_crash_message_count),
e6bdb904 1939 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1940 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1941 POSTCODE_SEVERITY_ERR);
1942 return;
1943 }
1944
e6bdb904 1945 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 1946 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 1947 local_crash_msg_count,
12e364b9
KC
1948 POSTCODE_SEVERITY_ERR);
1949 return;
1950 }
1951
1952 /* get saved crash message offset */
c3d9a224 1953 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1954 offsetof(struct spar_controlvm_channel_protocol,
1955 saved_crash_message_offset),
e6bdb904 1956 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
1957 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1958 POSTCODE_SEVERITY_ERR);
1959 return;
1960 }
1961
1962 /* read create device message for storage bus offset */
c3d9a224 1963 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
1964 local_crash_msg_offset,
1965 &local_crash_bus_msg,
3ab47701 1966 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1967 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1968 POSTCODE_SEVERITY_ERR);
1969 return;
1970 }
1971
1972 /* read create device message for storage device */
c3d9a224 1973 if (visorchannel_read(controlvm_channel,
e6bdb904 1974 local_crash_msg_offset +
3ab47701 1975 sizeof(struct controlvm_message),
e6bdb904 1976 &local_crash_dev_msg,
3ab47701 1977 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1978 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1979 POSTCODE_SEVERITY_ERR);
1980 return;
1981 }
1982
1983 /* reuse IOVM create bus message */
ebec8967 1984 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 1985 bus_create(&local_crash_bus_msg);
75c1f8b7 1986 } else {
12e364b9
KC
1987 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1988 POSTCODE_SEVERITY_ERR);
1989 return;
1990 }
1991
1992 /* reuse create device message for storage device */
ebec8967 1993 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 1994 my_device_create(&local_crash_dev_msg);
75c1f8b7 1995 } else {
12e364b9
KC
1996 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1997 POSTCODE_SEVERITY_ERR);
1998 return;
1999 }
12e364b9
KC
2000 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2001 return;
2002
e6bdb904 2003cleanup:
12e364b9 2004
911e213e 2005 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2006
9232d2d6
BR
2007 queue_delayed_work(periodic_controlvm_workqueue,
2008 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2009}
2010
2011static void
d32517e3 2012bus_create_response(struct visor_device *bus_info, int response)
12e364b9 2013{
0274b5ae
DZ
2014 if (response >= 0) {
2015 bus_info->state.created = 1;
0274b5ae
DZ
2016 }
2017
2018 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2019 response);
2020
2021 kfree(bus_info->pending_msg_hdr);
2022 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
2023}
2024
2025static void
d32517e3 2026bus_destroy_response(struct visor_device *bus_info, int response)
12e364b9 2027{
0274b5ae
DZ
2028 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2029 response);
2030
2031 kfree(bus_info->pending_msg_hdr);
2032 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
2033}
2034
2035static void
a298bc0b 2036device_create_response(struct visor_device *dev_info, int response)
12e364b9 2037{
0274b5ae
DZ
2038 if (response >= 0)
2039 dev_info->state.created = 1;
2040
2041 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2042 response);
2043
2044 kfree(dev_info->pending_msg_hdr);
12e364b9
KC
2045}
2046
2047static void
a298bc0b 2048device_destroy_response(struct visor_device *dev_info, int response)
12e364b9 2049{
0274b5ae
DZ
2050 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2051 response);
2052
2053 kfree(dev_info->pending_msg_hdr);
2054 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
2055}
2056
d3368a58 2057static void
a298bc0b 2058visorchipset_device_pause_response(struct visor_device *dev_info,
b4b598fd 2059 int response)
12e364b9 2060{
12e364b9 2061 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 2062 dev_info, response,
bd0d2dcc 2063 segment_state_standby);
0274b5ae
DZ
2064
2065 kfree(dev_info->pending_msg_hdr);
2066 dev_info->pending_msg_hdr = NULL;
12e364b9 2067}
12e364b9
KC
2068
2069static void
a298bc0b 2070device_resume_response(struct visor_device *dev_info, int response)
12e364b9
KC
2071{
2072 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 2073 dev_info, response,
bd0d2dcc 2074 segment_state_running);
0274b5ae
DZ
2075
2076 kfree(dev_info->pending_msg_hdr);
2077 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
2078}
2079
18b87ed1 2080static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2081 struct device_attribute *attr,
2082 const char *buf, size_t count)
12e364b9 2083{
18b87ed1 2084 char msgtype[64];
12e364b9 2085
66e24b76
BR
2086 if (sscanf(buf, "%63s", msgtype) != 1)
2087 return -EINVAL;
2088
ebec8967 2089 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2090 chipset_events[0] = 1;
2091 return count;
ebec8967 2092 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2093 chipset_events[1] = 1;
2094 return count;
e22a4a0f
BR
2095 }
2096 return -EINVAL;
12e364b9
KC
2097}
2098
e56fa7cd
BR
2099/* The parahotplug/devicedisabled interface gets called by our support script
2100 * when an SR-IOV device has been shut down. The ID is passed to the script
2101 * and then passed back when the device has been removed.
2102 */
2103static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2104 struct device_attribute *attr,
2105 const char *buf, size_t count)
e56fa7cd 2106{
94217363 2107 unsigned int id;
e56fa7cd 2108
ebec8967 2109 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2110 return -EINVAL;
2111
2112 parahotplug_request_complete(id, 0);
2113 return count;
2114}
2115
2116/* The parahotplug/deviceenabled interface gets called by our support script
2117 * when an SR-IOV device has been recovered. The ID is passed to the script
2118 * and then passed back when the device has been brought back up.
2119 */
2120static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2121 struct device_attribute *attr,
2122 const char *buf, size_t count)
e56fa7cd 2123{
94217363 2124 unsigned int id;
e56fa7cd 2125
ebec8967 2126 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2127 return -EINVAL;
2128
2129 parahotplug_request_complete(id, 1);
2130 return count;
2131}
2132
e3420ed6
EA
2133static int
2134visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2135{
2136 unsigned long physaddr = 0;
2137 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
780fcad3 2138 u64 addr = 0;
e3420ed6
EA
2139
2140 /* sv_enable_dfp(); */
2141 if (offset & (PAGE_SIZE - 1))
2142 return -ENXIO; /* need aligned offsets */
2143
2144 switch (offset) {
2145 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2146 vma->vm_flags |= VM_IO;
2147 if (!*file_controlvm_channel)
2148 return -ENXIO;
2149
2150 visorchannel_read(*file_controlvm_channel,
2151 offsetof(struct spar_controlvm_channel_protocol,
2152 gp_control_channel),
2153 &addr, sizeof(addr));
2154 if (!addr)
2155 return -ENXIO;
2156
2157 physaddr = (unsigned long)addr;
2158 if (remap_pfn_range(vma, vma->vm_start,
2159 physaddr >> PAGE_SHIFT,
2160 vma->vm_end - vma->vm_start,
2161 /*pgprot_noncached */
2162 (vma->vm_page_prot))) {
2163 return -EAGAIN;
2164 }
2165 break;
2166 default:
2167 return -ENXIO;
2168 }
2169 return 0;
2170}
2171
5f3a7e36
DK
2172static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2173{
2174 u64 result = VMCALL_SUCCESS;
2175 u64 physaddr = 0;
2176
2177 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2178 result);
2179 return result;
2180}
2181
2182static inline int issue_vmcall_update_physical_time(u64 adjustment)
2183{
2184 int result = VMCALL_SUCCESS;
2185
2186 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2187 return result;
2188}
2189
e3420ed6
EA
2190static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2191 unsigned long arg)
2192{
2193 s64 adjustment;
2194 s64 vrtc_offset;
2195
2196 switch (cmd) {
2197 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2198 /* get the physical rtc offset */
2199 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2200 if (copy_to_user((void __user *)arg, &vrtc_offset,
2201 sizeof(vrtc_offset))) {
2202 return -EFAULT;
2203 }
d5b3f1dc 2204 return 0;
e3420ed6
EA
2205 case VMCALL_UPDATE_PHYSICAL_TIME:
2206 if (copy_from_user(&adjustment, (void __user *)arg,
2207 sizeof(adjustment))) {
2208 return -EFAULT;
2209 }
2210 return issue_vmcall_update_physical_time(adjustment);
2211 default:
2212 return -EFAULT;
2213 }
2214}
2215
2216static const struct file_operations visorchipset_fops = {
2217 .owner = THIS_MODULE,
2218 .open = visorchipset_open,
2219 .read = NULL,
2220 .write = NULL,
2221 .unlocked_ioctl = visorchipset_ioctl,
2222 .release = visorchipset_release,
2223 .mmap = visorchipset_mmap,
2224};
2225
0f570fc0 2226static int
e3420ed6
EA
2227visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2228{
2229 int rc = 0;
2230
2231 file_controlvm_channel = controlvm_channel;
2232 cdev_init(&file_cdev, &visorchipset_fops);
2233 file_cdev.owner = THIS_MODULE;
2234 if (MAJOR(major_dev) == 0) {
46168810 2235 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
e3420ed6
EA
2236 /* dynamic major device number registration required */
2237 if (rc < 0)
2238 return rc;
2239 } else {
2240 /* static major device number registration required */
46168810 2241 rc = register_chrdev_region(major_dev, 1, "visorchipset");
e3420ed6
EA
2242 if (rc < 0)
2243 return rc;
2244 }
2245 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2246 if (rc < 0) {
2247 unregister_chrdev_region(major_dev, 1);
2248 return rc;
2249 }
2250 return 0;
2251}
2252
55c67dca
PB
2253static int
2254visorchipset_init(struct acpi_device *acpi_device)
12e364b9 2255{
33078257 2256 int rc = 0;
d5b3f1dc 2257 u64 addr;
d3368a58
JS
2258 int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2259 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2260
2261 addr = controlvm_get_channel_address();
2262 if (!addr)
2263 return -ENODEV;
12e364b9 2264
4da3336c 2265 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
84982fbf 2266 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
12e364b9 2267
d3368a58
JS
2268 controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2269 GFP_KERNEL, uuid);
2270 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2271 visorchannel_get_header(controlvm_channel))) {
2272 initialize_controlvm_payload();
8a1182eb 2273 } else {
d3368a58
JS
2274 visorchannel_destroy(controlvm_channel);
2275 controlvm_channel = NULL;
8a1182eb
BR
2276 return -ENODEV;
2277 }
2278
5aa8ae57
BR
2279 major_dev = MKDEV(visorchipset_major, 0);
2280 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2281 if (rc < 0) {
4cb005a9 2282 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2283 goto cleanup;
4cb005a9 2284 }
9f8d0e8b 2285
da021f02 2286 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2287
4da3336c
DK
2288 /* if booting in a crash kernel */
2289 if (is_kdump_kernel())
2290 INIT_DELAYED_WORK(&periodic_controlvm_work,
2291 setup_crash_devices_work_queue);
2292 else
2293 INIT_DELAYED_WORK(&periodic_controlvm_work,
2294 controlvm_periodic_work);
2295 periodic_controlvm_workqueue =
2296 create_singlethread_workqueue("visorchipset_controlvm");
2297
2298 if (!periodic_controlvm_workqueue) {
2299 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2300 DIAG_SEVERITY_ERR);
2301 rc = -ENOMEM;
2302 goto cleanup;
2303 }
2304 most_recent_message_jiffies = jiffies;
2305 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2306 rc = queue_delayed_work(periodic_controlvm_workqueue,
2307 &periodic_controlvm_work, poll_jiffies);
2308 if (rc < 0) {
2309 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2310 DIAG_SEVERITY_ERR);
2311 goto cleanup;
12e364b9
KC
2312 }
2313
eb34e877
BR
2314 visorchipset_platform_device.dev.devt = major_dev;
2315 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2316 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2317 rc = -1;
a6a3989b 2318 goto cleanup;
4cb005a9 2319 }
12e364b9 2320 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
c79b28f7
PB
2321
2322 rc = visorbus_init();
a6a3989b 2323cleanup:
12e364b9 2324 if (rc) {
12e364b9
KC
2325 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2326 POSTCODE_SEVERITY_ERR);
2327 }
2328 return rc;
2329}
2330
0f570fc0 2331static void
e3420ed6
EA
2332visorchipset_file_cleanup(dev_t major_dev)
2333{
2334 if (file_cdev.ops)
2335 cdev_del(&file_cdev);
2336 file_cdev.ops = NULL;
2337 unregister_chrdev_region(major_dev, 1);
2338}
2339
55c67dca
PB
2340static int
2341visorchipset_exit(struct acpi_device *acpi_device)
12e364b9 2342{
12e364b9
KC
2343 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2344
c79b28f7
PB
2345 visorbus_exit();
2346
4da3336c
DK
2347 cancel_delayed_work(&periodic_controlvm_work);
2348 flush_workqueue(periodic_controlvm_workqueue);
2349 destroy_workqueue(periodic_controlvm_workqueue);
2350 periodic_controlvm_workqueue = NULL;
2351 destroy_controlvm_payload_info(&controlvm_payload_info);
1783319f 2352
da021f02 2353 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2354
c3d9a224 2355 visorchannel_destroy(controlvm_channel);
8a1182eb 2356
addceb12 2357 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2358 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
55c67dca
PB
2359
2360 return 0;
2361}
2362
2363static const struct acpi_device_id unisys_device_ids[] = {
2364 {"PNP0A07", 0},
2365 {"", 0},
2366};
55c67dca
PB
2367
2368static struct acpi_driver unisys_acpi_driver = {
2369 .name = "unisys_acpi",
2370 .class = "unisys_acpi_class",
2371 .owner = THIS_MODULE,
2372 .ids = unisys_device_ids,
2373 .ops = {
2374 .add = visorchipset_init,
2375 .remove = visorchipset_exit,
2376 },
2377};
d5b3f1dc
EA
2378static __init uint32_t visorutil_spar_detect(void)
2379{
2380 unsigned int eax, ebx, ecx, edx;
2381
2382 if (cpu_has_hypervisor) {
2383 /* check the ID */
2384 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2385 return (ebx == UNISYS_SPAR_ID_EBX) &&
2386 (ecx == UNISYS_SPAR_ID_ECX) &&
2387 (edx == UNISYS_SPAR_ID_EDX);
2388 } else {
2389 return 0;
2390 }
2391}
55c67dca
PB
2392
2393static int init_unisys(void)
2394{
2395 int result;
d5b3f1dc 2396 if (!visorutil_spar_detect())
55c67dca
PB
2397 return -ENODEV;
2398
2399 result = acpi_bus_register_driver(&unisys_acpi_driver);
2400 if (result)
2401 return -ENODEV;
2402
2403 pr_info("Unisys Visorchipset Driver Loaded.\n");
2404 return 0;
2405};
2406
2407static void exit_unisys(void)
2408{
2409 acpi_bus_unregister_driver(&unisys_acpi_driver);
12e364b9
KC
2410}
2411
12e364b9 2412module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2413MODULE_PARM_DESC(visorchipset_major,
2414 "major device number to use for the device node");
4da3336c
DK
2415module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2416MODULE_PARM_DESC(visorchipset_visorbusreqwait,
12e364b9 2417 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2418module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2419 int, S_IRUGO);
2420MODULE_PARM_DESC(visorchipset_holdchipsetready,
2421 "1 to hold response to CHIPSET_READY");
b615d628 2422
55c67dca
PB
2423module_init(init_unisys);
2424module_exit(exit_unisys);
12e364b9
KC
2425
2426MODULE_AUTHOR("Unisys");
2427MODULE_LICENSE("GPL");
2428MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2429 VERSION);
2430MODULE_VERSION(VERSION);
This page took 0.729135 seconds and 5 git commands to generate.