staging: unisys: Convert device creation to use visor_device
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
55c67dca 18#include <linux/acpi.h>
c0a14641 19#include <linux/cdev.h>
46168810 20#include <linux/ctype.h>
e3420ed6
EA
21#include <linux/fs.h>
22#include <linux/mm.h>
12e364b9
KC
23#include <linux/nls.h>
24#include <linux/netdevice.h>
25#include <linux/platform_device.h>
90addb02 26#include <linux/uuid.h>
1ba00980 27#include <linux/crash_dump.h>
12e364b9 28
5f3a7e36 29#include "channel_guid.h"
55c67dca
PB
30#include "controlvmchannel.h"
31#include "controlvmcompletionstatus.h"
32#include "guestlinuxdebug.h"
33#include "periodic_work.h"
55c67dca
PB
34#include "version.h"
35#include "visorbus.h"
36#include "visorbus_private.h"
5f3a7e36 37#include "vmcallinterface.h"
55c67dca 38
12e364b9 39#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
12e364b9
KC
40
41#define MAX_NAME_SIZE 128
42#define MAX_IP_SIZE 50
43#define MAXOUTSTANDINGCHANNELCOMMAND 256
44#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
46168810 47#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
2ee0deec
PB
48
49#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
50
d5b3f1dc
EA
51
52#define UNISYS_SPAR_LEAF_ID 0x40000000
53
54/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55#define UNISYS_SPAR_ID_EBX 0x73696e55
56#define UNISYS_SPAR_ID_ECX 0x70537379
57#define UNISYS_SPAR_ID_EDX 0x34367261
58
b615d628
JS
59/*
60 * Module parameters
61 */
b615d628 62static int visorchipset_major;
4da3336c 63static int visorchipset_visorbusregwait = 1; /* default is on */
b615d628 64static int visorchipset_holdchipsetready;
46168810 65static unsigned long controlvm_payload_bytes_buffered;
b615d628 66
e3420ed6
EA
67static int
68visorchipset_open(struct inode *inode, struct file *file)
69{
70 unsigned minor_number = iminor(inode);
71
72 if (minor_number)
73 return -ENODEV;
74 file->private_data = NULL;
75 return 0;
76}
77
78static int
79visorchipset_release(struct inode *inode, struct file *file)
80{
81 return 0;
82}
83
12e364b9
KC
84/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85* we switch to slow polling mode. As soon as we get a controlvm
86* message, we switch back to fast polling mode.
87*/
88#define MIN_IDLE_SECONDS 10
52063eca
JS
89static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
90static unsigned long most_recent_message_jiffies; /* when we got our last
bd5b9b32 91 * controlvm message */
4da3336c 92static int visorbusregistered;
12e364b9
KC
93
94#define MAX_CHIPSET_EVENTS 2
c242233e 95static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 96
46168810
EA
97struct parser_context {
98 unsigned long allocbytes;
99 unsigned long param_bytes;
100 u8 *curr;
101 unsigned long bytes_remaining;
102 bool byte_stream;
103 char data[0];
104};
105
9232d2d6
BR
106static struct delayed_work periodic_controlvm_work;
107static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 108static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 109
e3420ed6
EA
110static struct cdev file_cdev;
111static struct visorchannel **file_controlvm_channel;
da021f02 112static struct controlvm_message_header g_chipset_msg_hdr;
59827f00 113static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 114 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 115/* 0xffffff is an invalid Bus/Device number */
52063eca
JS
116static u32 g_diagpool_bus_no = 0xffffff;
117static u32 g_diagpool_dev_no = 0xffffff;
4f44b72d 118static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9 119
12e364b9 120#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
121 (uuid_le_cmp(channel_type_guid,\
122 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 123
1390b88c
BR
124static LIST_HEAD(bus_info_list);
125static LIST_HEAD(dev_info_list);
12e364b9 126
c3d9a224 127static struct visorchannel *controlvm_channel;
12e364b9 128
84982fbf 129/* Manages the request payload in the controlvm channel */
c1f834eb 130struct visor_controlvm_payload_info {
c242233e 131 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 132 u64 offset; /* offset from beginning of controlvm
12e364b9 133 * channel to beginning of payload * pool */
b3c55b13 134 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
135};
136
137static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 138
12e364b9
KC
139/* The following globals are used to handle the scenario where we are unable to
140 * offload the payload from a controlvm message due to memory requirements. In
141 * this scenario, we simply stash the controlvm message, then attempt to
142 * process it again the next time controlvm_periodic_work() runs.
143 */
7166ed19 144static struct controlvm_message controlvm_pending_msg;
c79b28f7 145static bool controlvm_pending_msg_valid;
12e364b9 146
12e364b9
KC
147/* This identifies a data buffer that has been received via a controlvm messages
148 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
149 */
150struct putfile_buffer_entry {
151 struct list_head next; /* putfile_buffer_entry list */
317d9614 152 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
153};
154
155/* List of struct putfile_request *, via next_putfile_request member.
156 * Each entry in this list identifies an outstanding TRANSMIT_FILE
157 * conversation.
158 */
1eee0011 159static LIST_HEAD(putfile_request_list);
12e364b9
KC
160
161/* This describes a buffer and its current state of transfer (e.g., how many
162 * bytes have already been supplied as putfile data, and how many bytes are
163 * remaining) for a putfile_request.
164 */
165struct putfile_active_buffer {
166 /* a payload from a controlvm message, containing a file data buffer */
317d9614 167 struct parser_context *parser_ctx;
12e364b9
KC
168 /* points within data area of parser_ctx to next byte of data */
169 u8 *pnext;
170 /* # bytes left from <pnext> to the end of this data buffer */
171 size_t bytes_remaining;
172};
173
174#define PUTFILE_REQUEST_SIG 0x0906101302281211
175/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
176 * conversation. Structs of this type are dynamically linked into
177 * <Putfile_request_list>.
178 */
179struct putfile_request {
180 u64 sig; /* PUTFILE_REQUEST_SIG */
181
182 /* header from original TransmitFile request */
98d7b594 183 struct controlvm_message_header controlvm_header;
12e364b9
KC
184 u64 file_request_number; /* from original TransmitFile request */
185
186 /* link to next struct putfile_request */
187 struct list_head next_putfile_request;
188
189 /* most-recent sequence number supplied via a controlvm message */
190 u64 data_sequence_number;
191
192 /* head of putfile_buffer_entry list, which describes the data to be
193 * supplied as putfile data;
194 * - this list is added to when controlvm messages come in that supply
195 * file data
196 * - this list is removed from via the hotplug program that is actually
197 * consuming these buffers to write as file data */
198 struct list_head input_buffer_list;
199 spinlock_t req_list_lock; /* lock for input_buffer_list */
200
201 /* waiters for input_buffer_list to go non-empty */
202 wait_queue_head_t input_buffer_wq;
203
204 /* data not yet read within current putfile_buffer_entry */
205 struct putfile_active_buffer active_buf;
206
207 /* <0 = failed, 0 = in-progress, >0 = successful; */
208 /* note that this must be set with req_list_lock, and if you set <0, */
209 /* it is your responsibility to also free up all of the other objects */
210 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
211 /* before releasing the lock */
212 int completion_status;
213};
214
12e364b9
KC
215struct parahotplug_request {
216 struct list_head list;
217 int id;
218 unsigned long expiration;
3ab47701 219 struct controlvm_message msg;
12e364b9
KC
220};
221
ddf5de53
BR
222static LIST_HEAD(parahotplug_request_list);
223static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
224static void parahotplug_process_list(void);
225
226/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
227 * CONTROLVM_REPORTEVENT.
228 */
4da3336c 229static struct visorchipset_busdev_notifiers busdev_notifiers;
12e364b9 230
d32517e3
DZ
231static void bus_create_response(struct visor_device *p, int response);
232static void bus_destroy_response(struct visor_device *p, int response);
a298bc0b
DZ
233static void device_create_response(struct visor_device *p, int response);
234static void device_destroy_response(struct visor_device *p, int response);
235static void device_resume_response(struct visor_device *p, int response);
12e364b9 236
a298bc0b
DZ
237static void visorchipset_device_pause_response(struct visor_device *p,
238 int response);
2ee0deec 239
8e3fedd6 240static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
241 .bus_create = bus_create_response,
242 .bus_destroy = bus_destroy_response,
243 .device_create = device_create_response,
244 .device_destroy = device_destroy_response,
927c7927 245 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
246 .device_resume = device_resume_response,
247};
248
249/* info for /dev/visorchipset */
5aa8ae57 250static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 251
19f6634f
BR
252/* prototypes for attributes */
253static ssize_t toolaction_show(struct device *dev,
8e76e695 254 struct device_attribute *attr, char *buf);
19f6634f 255static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
256 struct device_attribute *attr,
257 const char *buf, size_t count);
19f6634f
BR
258static DEVICE_ATTR_RW(toolaction);
259
54b31229 260static ssize_t boottotool_show(struct device *dev,
8e76e695 261 struct device_attribute *attr, char *buf);
54b31229 262static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
263 struct device_attribute *attr, const char *buf,
264 size_t count);
54b31229
BR
265static DEVICE_ATTR_RW(boottotool);
266
422af17c 267static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 268 char *buf);
422af17c 269static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 270 const char *buf, size_t count);
422af17c
BR
271static DEVICE_ATTR_RW(error);
272
273static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 274 char *buf);
422af17c 275static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 276 const char *buf, size_t count);
422af17c
BR
277static DEVICE_ATTR_RW(textid);
278
279static ssize_t remaining_steps_show(struct device *dev,
8e76e695 280 struct device_attribute *attr, char *buf);
422af17c 281static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
282 struct device_attribute *attr,
283 const char *buf, size_t count);
422af17c
BR
284static DEVICE_ATTR_RW(remaining_steps);
285
18b87ed1 286static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
287 struct device_attribute *attr,
288 const char *buf, size_t count);
18b87ed1
BR
289static DEVICE_ATTR_WO(chipsetready);
290
e56fa7cd 291static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
292 struct device_attribute *attr,
293 const char *buf, size_t count);
e56fa7cd
BR
294static DEVICE_ATTR_WO(devicedisabled);
295
296static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
297 struct device_attribute *attr,
298 const char *buf, size_t count);
e56fa7cd
BR
299static DEVICE_ATTR_WO(deviceenabled);
300
19f6634f
BR
301static struct attribute *visorchipset_install_attrs[] = {
302 &dev_attr_toolaction.attr,
54b31229 303 &dev_attr_boottotool.attr,
422af17c
BR
304 &dev_attr_error.attr,
305 &dev_attr_textid.attr,
306 &dev_attr_remaining_steps.attr,
19f6634f
BR
307 NULL
308};
309
310static struct attribute_group visorchipset_install_group = {
311 .name = "install",
312 .attrs = visorchipset_install_attrs
313};
314
18b87ed1
BR
315static struct attribute *visorchipset_guest_attrs[] = {
316 &dev_attr_chipsetready.attr,
317 NULL
318};
319
320static struct attribute_group visorchipset_guest_group = {
321 .name = "guest",
322 .attrs = visorchipset_guest_attrs
323};
324
e56fa7cd
BR
325static struct attribute *visorchipset_parahotplug_attrs[] = {
326 &dev_attr_devicedisabled.attr,
327 &dev_attr_deviceenabled.attr,
328 NULL
329};
330
331static struct attribute_group visorchipset_parahotplug_group = {
332 .name = "parahotplug",
333 .attrs = visorchipset_parahotplug_attrs
334};
335
19f6634f
BR
336static const struct attribute_group *visorchipset_dev_groups[] = {
337 &visorchipset_install_group,
18b87ed1 338 &visorchipset_guest_group,
e56fa7cd 339 &visorchipset_parahotplug_group,
19f6634f
BR
340 NULL
341};
342
12e364b9 343/* /sys/devices/platform/visorchipset */
eb34e877 344static struct platform_device visorchipset_platform_device = {
12e364b9
KC
345 .name = "visorchipset",
346 .id = -1,
19f6634f 347 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
348};
349
350/* Function prototypes */
b3168c70 351static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
352 int response);
353static void controlvm_respond_chipset_init(
b3168c70 354 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
355 enum ultra_chipset_feature features);
356static void controlvm_respond_physdev_changestate(
b3168c70 357 struct controlvm_message_header *msg_hdr, int response,
98d7b594 358 struct spar_segment_state state);
12e364b9 359
46168810 360
2ee0deec
PB
361static void parser_done(struct parser_context *ctx);
362
46168810 363static struct parser_context *
fbf35536 364parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
46168810
EA
365{
366 int allocbytes = sizeof(struct parser_context) + bytes;
367 struct parser_context *rc = NULL;
368 struct parser_context *ctx = NULL;
46168810
EA
369
370 if (retry)
371 *retry = false;
cc55b5c5
JS
372
373 /*
374 * alloc an 0 extra byte to ensure payload is
375 * '\0'-terminated
376 */
377 allocbytes++;
46168810
EA
378 if ((controlvm_payload_bytes_buffered + bytes)
379 > MAX_CONTROLVM_PAYLOAD_BYTES) {
380 if (retry)
381 *retry = true;
382 rc = NULL;
383 goto cleanup;
384 }
385 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
386 if (!ctx) {
387 if (retry)
388 *retry = true;
389 rc = NULL;
390 goto cleanup;
391 }
392
393 ctx->allocbytes = allocbytes;
394 ctx->param_bytes = bytes;
395 ctx->curr = NULL;
396 ctx->bytes_remaining = 0;
397 ctx->byte_stream = false;
398 if (local) {
399 void *p;
400
401 if (addr > virt_to_phys(high_memory - 1)) {
402 rc = NULL;
403 goto cleanup;
404 }
405 p = __va((unsigned long) (addr));
406 memcpy(ctx->data, p, bytes);
407 } else {
dd412751
JS
408 void __iomem *mapping;
409
410 if (!request_mem_region(addr, bytes, "visorchipset")) {
46168810
EA
411 rc = NULL;
412 goto cleanup;
413 }
712c03dc 414
dd412751
JS
415 mapping = ioremap_cache(addr, bytes);
416 if (!mapping) {
417 release_mem_region(addr, bytes);
46168810
EA
418 rc = NULL;
419 goto cleanup;
420 }
dd412751
JS
421 memcpy_fromio(ctx->data, mapping, bytes);
422 release_mem_region(addr, bytes);
46168810 423 }
46168810 424
cc55b5c5 425 ctx->byte_stream = true;
46168810
EA
426 rc = ctx;
427cleanup:
46168810
EA
428 if (rc) {
429 controlvm_payload_bytes_buffered += ctx->param_bytes;
430 } else {
431 if (ctx) {
432 parser_done(ctx);
433 ctx = NULL;
434 }
435 }
436 return rc;
437}
438
464129ed 439static uuid_le
46168810
EA
440parser_id_get(struct parser_context *ctx)
441{
442 struct spar_controlvm_parameters_header *phdr = NULL;
443
444 if (ctx == NULL)
445 return NULL_UUID_LE;
446 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
447 return phdr->id;
448}
449
2ee0deec
PB
450/** Describes the state from the perspective of which controlvm messages have
451 * been received for a bus or device.
452 */
453
454enum PARSER_WHICH_STRING {
455 PARSERSTRING_INITIATOR,
456 PARSERSTRING_TARGET,
457 PARSERSTRING_CONNECTION,
458 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
459};
460
464129ed 461static void
2ee0deec
PB
462parser_param_start(struct parser_context *ctx,
463 enum PARSER_WHICH_STRING which_string)
46168810
EA
464{
465 struct spar_controlvm_parameters_header *phdr = NULL;
466
467 if (ctx == NULL)
468 goto Away;
469 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
470 switch (which_string) {
471 case PARSERSTRING_INITIATOR:
472 ctx->curr = ctx->data + phdr->initiator_offset;
473 ctx->bytes_remaining = phdr->initiator_length;
474 break;
475 case PARSERSTRING_TARGET:
476 ctx->curr = ctx->data + phdr->target_offset;
477 ctx->bytes_remaining = phdr->target_length;
478 break;
479 case PARSERSTRING_CONNECTION:
480 ctx->curr = ctx->data + phdr->connection_offset;
481 ctx->bytes_remaining = phdr->connection_length;
482 break;
483 case PARSERSTRING_NAME:
484 ctx->curr = ctx->data + phdr->name_offset;
485 ctx->bytes_remaining = phdr->name_length;
486 break;
487 default:
488 break;
489 }
490
491Away:
492 return;
493}
494
464129ed 495static void parser_done(struct parser_context *ctx)
46168810
EA
496{
497 if (!ctx)
498 return;
499 controlvm_payload_bytes_buffered -= ctx->param_bytes;
500 kfree(ctx);
501}
502
464129ed 503static void *
46168810
EA
504parser_string_get(struct parser_context *ctx)
505{
506 u8 *pscan;
507 unsigned long nscan;
508 int value_length = -1;
509 void *value = NULL;
510 int i;
511
512 if (!ctx)
513 return NULL;
514 pscan = ctx->curr;
515 nscan = ctx->bytes_remaining;
516 if (nscan == 0)
517 return NULL;
518 if (!pscan)
519 return NULL;
520 for (i = 0, value_length = -1; i < nscan; i++)
521 if (pscan[i] == '\0') {
522 value_length = i;
523 break;
524 }
525 if (value_length < 0) /* '\0' was not included in the length */
526 value_length = nscan;
527 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
528 if (value == NULL)
529 return NULL;
530 if (value_length > 0)
531 memcpy(value, pscan, value_length);
532 ((u8 *) (value))[value_length] = '\0';
533 return value;
534}
535
536
d746cb55
VB
537static ssize_t toolaction_show(struct device *dev,
538 struct device_attribute *attr,
539 char *buf)
19f6634f 540{
01f4d85a 541 u8 tool_action;
19f6634f 542
c3d9a224 543 visorchannel_read(controlvm_channel,
d19642f6 544 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 545 tool_action), &tool_action, sizeof(u8));
01f4d85a 546 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
547}
548
d746cb55
VB
549static ssize_t toolaction_store(struct device *dev,
550 struct device_attribute *attr,
551 const char *buf, size_t count)
19f6634f 552{
01f4d85a 553 u8 tool_action;
66e24b76 554 int ret;
19f6634f 555
ebec8967 556 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
557 return -EINVAL;
558
c3d9a224 559 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
560 offsetof(struct spar_controlvm_channel_protocol,
561 tool_action),
01f4d85a 562 &tool_action, sizeof(u8));
66e24b76
BR
563
564 if (ret)
565 return ret;
e22a4a0f 566 return count;
19f6634f
BR
567}
568
d746cb55
VB
569static ssize_t boottotool_show(struct device *dev,
570 struct device_attribute *attr,
571 char *buf)
54b31229 572{
365522d9 573 struct efi_spar_indication efi_spar_indication;
54b31229 574
c3d9a224 575 visorchannel_read(controlvm_channel,
8e76e695
BR
576 offsetof(struct spar_controlvm_channel_protocol,
577 efi_spar_ind), &efi_spar_indication,
578 sizeof(struct efi_spar_indication));
54b31229 579 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 580 efi_spar_indication.boot_to_tool);
54b31229
BR
581}
582
d746cb55
VB
583static ssize_t boottotool_store(struct device *dev,
584 struct device_attribute *attr,
585 const char *buf, size_t count)
54b31229 586{
66e24b76 587 int val, ret;
365522d9 588 struct efi_spar_indication efi_spar_indication;
54b31229 589
ebec8967 590 if (kstrtoint(buf, 10, &val))
66e24b76
BR
591 return -EINVAL;
592
365522d9 593 efi_spar_indication.boot_to_tool = val;
c3d9a224 594 ret = visorchannel_write(controlvm_channel,
d19642f6 595 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
596 efi_spar_ind), &(efi_spar_indication),
597 sizeof(struct efi_spar_indication));
66e24b76
BR
598
599 if (ret)
600 return ret;
e22a4a0f 601 return count;
54b31229 602}
422af17c
BR
603
604static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 605 char *buf)
422af17c
BR
606{
607 u32 error;
608
8e76e695
BR
609 visorchannel_read(controlvm_channel,
610 offsetof(struct spar_controlvm_channel_protocol,
611 installation_error),
612 &error, sizeof(u32));
422af17c
BR
613 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
614}
615
616static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 617 const char *buf, size_t count)
422af17c
BR
618{
619 u32 error;
66e24b76 620 int ret;
422af17c 621
ebec8967 622 if (kstrtou32(buf, 10, &error))
66e24b76
BR
623 return -EINVAL;
624
c3d9a224 625 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
626 offsetof(struct spar_controlvm_channel_protocol,
627 installation_error),
628 &error, sizeof(u32));
66e24b76
BR
629 if (ret)
630 return ret;
e22a4a0f 631 return count;
422af17c
BR
632}
633
634static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 635 char *buf)
422af17c 636{
10dbf0e3 637 u32 text_id;
422af17c 638
8e76e695
BR
639 visorchannel_read(controlvm_channel,
640 offsetof(struct spar_controlvm_channel_protocol,
641 installation_text_id),
642 &text_id, sizeof(u32));
10dbf0e3 643 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
644}
645
646static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 647 const char *buf, size_t count)
422af17c 648{
10dbf0e3 649 u32 text_id;
66e24b76 650 int ret;
422af17c 651
ebec8967 652 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
653 return -EINVAL;
654
c3d9a224 655 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
656 offsetof(struct spar_controlvm_channel_protocol,
657 installation_text_id),
658 &text_id, sizeof(u32));
66e24b76
BR
659 if (ret)
660 return ret;
e22a4a0f 661 return count;
422af17c
BR
662}
663
422af17c 664static ssize_t remaining_steps_show(struct device *dev,
8e76e695 665 struct device_attribute *attr, char *buf)
422af17c 666{
ee8da290 667 u16 remaining_steps;
422af17c 668
c3d9a224 669 visorchannel_read(controlvm_channel,
8e76e695
BR
670 offsetof(struct spar_controlvm_channel_protocol,
671 installation_remaining_steps),
672 &remaining_steps, sizeof(u16));
ee8da290 673 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
674}
675
676static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
677 struct device_attribute *attr,
678 const char *buf, size_t count)
422af17c 679{
ee8da290 680 u16 remaining_steps;
66e24b76 681 int ret;
422af17c 682
ebec8967 683 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
684 return -EINVAL;
685
c3d9a224 686 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
687 offsetof(struct spar_controlvm_channel_protocol,
688 installation_remaining_steps),
689 &remaining_steps, sizeof(u16));
66e24b76
BR
690 if (ret)
691 return ret;
e22a4a0f 692 return count;
422af17c
BR
693}
694
ab0592b9
DZ
695struct visor_busdev {
696 u32 bus_no;
697 u32 dev_no;
698};
699
700static int match_visorbus_dev_by_id(struct device *dev, void *data)
701{
702 struct visor_device *vdev = to_visor_device(dev);
703 struct visor_busdev *id = (struct visor_busdev *)data;
704 u32 bus_no = id->bus_no;
705 u32 dev_no = id->dev_no;
706
65bd6e46
DZ
707 if ((vdev->chipset_bus_no == bus_no) &&
708 (vdev->chipset_dev_no == dev_no))
ab0592b9
DZ
709 return 1;
710
711 return 0;
712}
713struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
714 struct visor_device *from)
715{
716 struct device *dev;
717 struct device *dev_start = NULL;
718 struct visor_device *vdev = NULL;
719 struct visor_busdev id = {
720 .bus_no = bus_no,
721 .dev_no = dev_no
722 };
723
724 if (from)
725 dev_start = &from->device;
726 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
727 match_visorbus_dev_by_id);
728 if (dev)
729 vdev = to_visor_device(dev);
730 return vdev;
731}
732EXPORT_SYMBOL(visorbus_get_device_by_id);
733
c242233e 734static u8
12e364b9
KC
735check_chipset_events(void)
736{
737 int i;
c242233e 738 u8 send_msg = 1;
12e364b9
KC
739 /* Check events to determine if response should be sent */
740 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
741 send_msg &= chipset_events[i];
742 return send_msg;
743}
744
745static void
746clear_chipset_events(void)
747{
748 int i;
749 /* Clear chipset_events */
750 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
751 chipset_events[i] = 0;
752}
753
754void
4da3336c 755visorchipset_register_busdev(
fe90d892 756 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 757 struct visorchipset_busdev_responders *responders,
1e7a59c1 758 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 759{
8f1947ac 760 down(&notifier_lock);
38f736e9 761 if (!notifiers) {
4da3336c
DK
762 memset(&busdev_notifiers, 0,
763 sizeof(busdev_notifiers));
764 visorbusregistered = 0; /* clear flag */
12e364b9 765 } else {
4da3336c
DK
766 busdev_notifiers = *notifiers;
767 visorbusregistered = 1; /* set flag */
12e364b9
KC
768 }
769 if (responders)
8e3fedd6 770 *responders = busdev_responders;
1e7a59c1
BR
771 if (driver_info)
772 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 773 VERSION, NULL);
12e364b9 774
8f1947ac 775 up(&notifier_lock);
12e364b9 776}
4da3336c 777EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
12e364b9 778
12e364b9 779static void
3ab47701 780chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
781{
782 static int chipset_inited;
b9b141e8 783 enum ultra_chipset_feature features = 0;
12e364b9
KC
784 int rc = CONTROLVM_RESP_SUCCESS;
785
786 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
787 if (chipset_inited) {
22ad57ba 788 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 789 goto cleanup;
12e364b9
KC
790 }
791 chipset_inited = 1;
792 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
793
794 /* Set features to indicate we support parahotplug (if Command
795 * also supports it). */
796 features =
2ea5117b 797 inmsg->cmd.init_chipset.
12e364b9
KC
798 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
799
800 /* Set the "reply" bit so Command knows this is a
801 * features-aware driver. */
802 features |= ULTRA_CHIPSET_FEATURE_REPLY;
803
e3199b2e 804cleanup:
98d7b594 805 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
806 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
807}
808
809static void
3ab47701 810controlvm_init_response(struct controlvm_message *msg,
b3168c70 811 struct controlvm_message_header *msg_hdr, int response)
12e364b9 812{
3ab47701 813 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 814 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
815 msg->hdr.payload_bytes = 0;
816 msg->hdr.payload_vm_offset = 0;
817 msg->hdr.payload_max_bytes = 0;
12e364b9 818 if (response < 0) {
98d7b594
BR
819 msg->hdr.flags.failed = 1;
820 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
821 }
822}
823
824static void
b3168c70 825controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 826{
3ab47701 827 struct controlvm_message outmsg;
26eb2c0c 828
b3168c70 829 controlvm_init_response(&outmsg, msg_hdr, response);
12e364b9
KC
830 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
831 * back the deviceChangeState structure in the packet. */
b3168c70 832 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
0639ba67
BR
833 g_devicechangestate_packet.device_change_state.bus_no ==
834 g_diagpool_bus_no &&
835 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 836 g_diagpool_dev_no)
4f44b72d 837 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 838 if (outmsg.hdr.flags.test_message == 1)
12e364b9 839 return;
2098dbd1 840
c3d9a224 841 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 842 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
843 return;
844 }
845}
846
847static void
b3168c70 848controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 849 int response,
b9b141e8 850 enum ultra_chipset_feature features)
12e364b9 851{
3ab47701 852 struct controlvm_message outmsg;
26eb2c0c 853
b3168c70 854 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 855 outmsg.cmd.init_chipset.features = features;
c3d9a224 856 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 857 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
858 return;
859 }
860}
861
98d7b594 862static void controlvm_respond_physdev_changestate(
b3168c70 863 struct controlvm_message_header *msg_hdr, int response,
98d7b594 864 struct spar_segment_state state)
12e364b9 865{
3ab47701 866 struct controlvm_message outmsg;
26eb2c0c 867
b3168c70 868 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
869 outmsg.cmd.device_change_state.state = state;
870 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 871 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 872 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
873 return;
874 }
875}
876
2ee0deec
PB
877enum crash_obj_type {
878 CRASH_DEV,
879 CRASH_BUS,
880};
881
12e364b9 882static void
0274b5ae
DZ
883bus_responder(enum controlvm_id cmd_id,
884 struct controlvm_message_header *pending_msg_hdr,
3032aedd 885 int response)
12e364b9 886{
0274b5ae
DZ
887 if (pending_msg_hdr == NULL)
888 return; /* no controlvm response needed */
12e364b9 889
0274b5ae 890 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 891 return;
0aca7844 892
0274b5ae 893 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
894}
895
896static void
fbb31f48 897device_changestate_responder(enum controlvm_id cmd_id,
a298bc0b 898 struct visor_device *p, int response,
fbb31f48 899 struct spar_segment_state response_state)
12e364b9 900{
3ab47701 901 struct controlvm_message outmsg;
a298bc0b
DZ
902 u32 bus_no = p->chipset_bus_no;
903 u32 dev_no = p->chipset_dev_no;
12e364b9 904
0274b5ae 905 if (p->pending_msg_hdr == NULL)
12e364b9 906 return; /* no controlvm response needed */
0274b5ae 907 if (p->pending_msg_hdr->id != cmd_id)
12e364b9 908 return;
12e364b9 909
0274b5ae 910 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
12e364b9 911
fbb31f48
BR
912 outmsg.cmd.device_change_state.bus_no = bus_no;
913 outmsg.cmd.device_change_state.dev_no = dev_no;
914 outmsg.cmd.device_change_state.state = response_state;
12e364b9 915
c3d9a224 916 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 917 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 918 return;
12e364b9
KC
919}
920
921static void
0274b5ae
DZ
922device_responder(enum controlvm_id cmd_id,
923 struct controlvm_message_header *pending_msg_hdr,
b4b598fd 924 int response)
12e364b9 925{
0274b5ae 926 if (pending_msg_hdr == NULL)
12e364b9 927 return; /* no controlvm response needed */
0aca7844 928
0274b5ae 929 if (pending_msg_hdr->id != (u32)cmd_id)
12e364b9 930 return;
0aca7844 931
0274b5ae 932 controlvm_respond(pending_msg_hdr, response);
12e364b9
KC
933}
934
935static void
d32517e3 936bus_epilog(struct visor_device *bus_info,
2836c6a8 937 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 938 int response, bool need_response)
12e364b9 939{
f4c11551 940 bool notified = false;
0274b5ae 941 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 942
0274b5ae
DZ
943 if (!bus_info) {
944 /* relying on a valid passed in response code */
945 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
946 pmsg_hdr = msg_hdr;
947 goto away;
948 }
949
950 if (bus_info->pending_msg_hdr) {
951 /* only non-NULL if dev is still waiting on a response */
952 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
953 pmsg_hdr = bus_info->pending_msg_hdr;
954 goto away;
955 }
0aca7844 956
2836c6a8 957 if (need_response) {
0274b5ae
DZ
958 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
959 if (!pmsg_hdr) {
960 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
961 goto away;
962 }
963
964 memcpy(pmsg_hdr, msg_hdr,
98d7b594 965 sizeof(struct controlvm_message_header));
0274b5ae 966 bus_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 967 }
12e364b9 968
8f1947ac 969 down(&notifier_lock);
12e364b9
KC
970 if (response == CONTROLVM_RESP_SUCCESS) {
971 switch (cmd) {
972 case CONTROLVM_BUS_CREATE:
4da3336c 973 if (busdev_notifiers.bus_create) {
3032aedd 974 (*busdev_notifiers.bus_create) (bus_info);
f4c11551 975 notified = true;
12e364b9
KC
976 }
977 break;
978 case CONTROLVM_BUS_DESTROY:
4da3336c 979 if (busdev_notifiers.bus_destroy) {
3032aedd 980 (*busdev_notifiers.bus_destroy) (bus_info);
f4c11551 981 notified = true;
12e364b9
KC
982 }
983 break;
984 }
985 }
0274b5ae 986away:
12e364b9
KC
987 if (notified)
988 /* The callback function just called above is responsible
929aa8ae 989 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
990 * function, which will call bus_responder()
991 */
992 ;
993 else
0274b5ae
DZ
994 /*
995 * Do not kfree(pmsg_hdr) as this is the failure path.
996 * The success path ('notified') will call the responder
997 * directly and kfree() there.
998 */
999 bus_responder(cmd, pmsg_hdr, response);
8f1947ac 1000 up(&notifier_lock);
12e364b9
KC
1001}
1002
1003static void
a298bc0b 1004device_epilog(struct visor_device *dev_info,
b4b598fd 1005 struct spar_segment_state state, u32 cmd,
2836c6a8 1006 struct controlvm_message_header *msg_hdr, int response,
f4c11551 1007 bool need_response, bool for_visorbus)
12e364b9 1008{
e82ba62e 1009 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 1010 bool notified = false;
a298bc0b
DZ
1011 u32 bus_no = dev_info->chipset_bus_no;
1012 u32 dev_no = dev_info->chipset_dev_no;
0274b5ae 1013 struct controlvm_message_header *pmsg_hdr = NULL;
12e364b9 1014
12e364b9
KC
1015 char *envp[] = {
1016 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1017 NULL
1018 };
1019
4da3336c
DK
1020 notifiers = &busdev_notifiers;
1021
0274b5ae
DZ
1022 if (!dev_info) {
1023 /* relying on a valid passed in response code */
1024 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1025 pmsg_hdr = msg_hdr;
1026 goto away;
1027 }
1028
1029 if (dev_info->pending_msg_hdr) {
1030 /* only non-NULL if dev is still waiting on a response */
1031 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1032 pmsg_hdr = dev_info->pending_msg_hdr;
1033 goto away;
1034 }
1035
2836c6a8 1036 if (need_response) {
0274b5ae
DZ
1037 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1038 if (!pmsg_hdr) {
1039 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1040 goto away;
1041 }
1042
1043 memcpy(pmsg_hdr, msg_hdr,
98d7b594 1044 sizeof(struct controlvm_message_header));
0274b5ae 1045 dev_info->pending_msg_hdr = pmsg_hdr;
75c1f8b7 1046 }
12e364b9 1047
8f1947ac 1048 down(&notifier_lock);
12e364b9
KC
1049 if (response >= 0) {
1050 switch (cmd) {
1051 case CONTROLVM_DEVICE_CREATE:
1052 if (notifiers->device_create) {
b4b598fd 1053 (*notifiers->device_create) (dev_info);
f4c11551 1054 notified = true;
12e364b9
KC
1055 }
1056 break;
1057 case CONTROLVM_DEVICE_CHANGESTATE:
1058 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1059 if (state.alive == segment_state_running.alive &&
1060 state.operating ==
1061 segment_state_running.operating) {
12e364b9 1062 if (notifiers->device_resume) {
b4b598fd 1063 (*notifiers->device_resume) (dev_info);
f4c11551 1064 notified = true;
12e364b9
KC
1065 }
1066 }
1067 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1068 else if (state.alive == segment_state_standby.alive &&
3f833b54 1069 state.operating ==
bd0d2dcc 1070 segment_state_standby.operating) {
12e364b9
KC
1071 /* technically this is standby case
1072 * where server is lost
1073 */
1074 if (notifiers->device_pause) {
b4b598fd 1075 (*notifiers->device_pause) (dev_info);
f4c11551 1076 notified = true;
12e364b9 1077 }
bd0d2dcc 1078 } else if (state.alive == segment_state_paused.alive &&
3f833b54 1079 state.operating ==
bd0d2dcc 1080 segment_state_paused.operating) {
12e364b9
KC
1081 /* this is lite pause where channel is
1082 * still valid just 'pause' of it
1083 */
2836c6a8
BR
1084 if (bus_no == g_diagpool_bus_no &&
1085 dev_no == g_diagpool_dev_no) {
12e364b9
KC
1086 /* this will trigger the
1087 * diag_shutdown.sh script in
1088 * the visorchipset hotplug */
1089 kobject_uevent_env
eb34e877 1090 (&visorchipset_platform_device.dev.
12e364b9
KC
1091 kobj, KOBJ_ONLINE, envp);
1092 }
1093 }
1094 break;
1095 case CONTROLVM_DEVICE_DESTROY:
1096 if (notifiers->device_destroy) {
b4b598fd 1097 (*notifiers->device_destroy) (dev_info);
f4c11551 1098 notified = true;
12e364b9
KC
1099 }
1100 break;
1101 }
1102 }
0274b5ae 1103away:
12e364b9
KC
1104 if (notified)
1105 /* The callback function just called above is responsible
929aa8ae 1106 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1107 * function, which will call device_responder()
1108 */
1109 ;
1110 else
0274b5ae
DZ
1111 /*
1112 * Do not kfree(pmsg_hdr) as this is the failure path.
1113 * The success path ('notified') will call the responder
1114 * directly and kfree() there.
1115 */
1116 device_responder(cmd, pmsg_hdr, response);
8f1947ac 1117 up(&notifier_lock);
12e364b9
KC
1118}
1119
1120static void
3ab47701 1121bus_create(struct controlvm_message *inmsg)
12e364b9 1122{
2ea5117b 1123 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1124 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1125 int rc = CONTROLVM_RESP_SUCCESS;
d32517e3 1126 struct visor_device *bus_info;
b32c4997 1127 struct visorchannel *visorchannel;
12e364b9 1128
d32517e3 1129 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
6c5fed35
BR
1130 if (bus_info && (bus_info->state.created == 1)) {
1131 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1132 POSTCODE_SEVERITY_ERR);
22ad57ba 1133 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1134 goto cleanup;
12e364b9 1135 }
6c5fed35
BR
1136 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1137 if (!bus_info) {
1138 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1139 POSTCODE_SEVERITY_ERR);
22ad57ba 1140 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1141 goto cleanup;
12e364b9
KC
1142 }
1143
d32517e3
DZ
1144 bus_info->chipset_bus_no = bus_no;
1145 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
12e364b9 1146
6c5fed35 1147 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1148
b32c4997
DZ
1149 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1150 cmd->create_bus.channel_bytes,
1151 GFP_KERNEL,
1152 cmd->create_bus.bus_data_type_uuid);
1153
1154 if (!visorchannel) {
1155 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1156 POSTCODE_SEVERITY_ERR);
1157 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1158 kfree(bus_info);
1159 bus_info = NULL;
1160 goto cleanup;
1161 }
1162 bus_info->visorchannel = visorchannel;
12e364b9 1163
6c5fed35 1164 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1165
6c5fed35 1166cleanup:
3032aedd 1167 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1168 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1169}
1170
1171static void
3ab47701 1172bus_destroy(struct controlvm_message *inmsg)
12e364b9 1173{
2ea5117b 1174 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1175 u32 bus_no = cmd->destroy_bus.bus_no;
d32517e3 1176 struct visor_device *bus_info;
12e364b9
KC
1177 int rc = CONTROLVM_RESP_SUCCESS;
1178
d32517e3 1179 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
dff54cd6 1180 if (!bus_info)
22ad57ba 1181 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1182 else if (bus_info->state.created == 0)
22ad57ba 1183 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1184
3032aedd 1185 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1186 rc, inmsg->hdr.flags.response_expected == 1);
d32517e3
DZ
1187
1188 /* bus_info is freed as part of the busdevice_release function */
12e364b9
KC
1189}
1190
1191static void
317d9614
BR
1192bus_configure(struct controlvm_message *inmsg,
1193 struct parser_context *parser_ctx)
12e364b9 1194{
2ea5117b 1195 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e 1196 u32 bus_no;
d32517e3 1197 struct visor_device *bus_info;
12e364b9 1198 int rc = CONTROLVM_RESP_SUCCESS;
12e364b9 1199
654bada0
BR
1200 bus_no = cmd->configure_bus.bus_no;
1201 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1202 POSTCODE_SEVERITY_INFO);
12e364b9 1203
d32517e3 1204 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
654bada0
BR
1205 if (!bus_info) {
1206 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1207 POSTCODE_SEVERITY_ERR);
22ad57ba 1208 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1209 } else if (bus_info->state.created == 0) {
1210 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1211 POSTCODE_SEVERITY_ERR);
22ad57ba 1212 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
0274b5ae 1213 } else if (bus_info->pending_msg_hdr != NULL) {
654bada0 1214 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1215 POSTCODE_SEVERITY_ERR);
22ad57ba 1216 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0 1217 } else {
b32c4997
DZ
1218 visorchannel_set_clientpartition(bus_info->visorchannel,
1219 cmd->configure_bus.guest_handle);
654bada0
BR
1220 bus_info->partition_uuid = parser_id_get(parser_ctx);
1221 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1222 bus_info->name = parser_string_get(parser_ctx);
1223
654bada0
BR
1224 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1225 POSTCODE_SEVERITY_INFO);
12e364b9 1226 }
3032aedd 1227 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1228 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1229}
1230
1231static void
3ab47701 1232my_device_create(struct controlvm_message *inmsg)
12e364b9 1233{
2ea5117b 1234 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1235 u32 bus_no = cmd->create_device.bus_no;
1236 u32 dev_no = cmd->create_device.dev_no;
a298bc0b 1237 struct visor_device *dev_info = NULL;
d32517e3 1238 struct visor_device *bus_info;
b32c4997 1239 struct visorchannel *visorchannel;
12e364b9
KC
1240 int rc = CONTROLVM_RESP_SUCCESS;
1241
a298bc0b
DZ
1242 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1243 if (!bus_info) {
c60c8e26 1244 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1245 POSTCODE_SEVERITY_ERR);
a298bc0b 1246 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1247 goto cleanup;
12e364b9 1248 }
a298bc0b
DZ
1249
1250 if (bus_info->state.created == 0) {
c60c8e26 1251 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1252 POSTCODE_SEVERITY_ERR);
22ad57ba 1253 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1254 goto cleanup;
12e364b9 1255 }
a298bc0b
DZ
1256
1257 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1258 if (dev_info && (dev_info->state.created == 1)) {
c60c8e26 1259 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1260 POSTCODE_SEVERITY_ERR);
a298bc0b 1261 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1262 goto cleanup;
12e364b9 1263 }
a298bc0b 1264
c60c8e26
BR
1265 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1266 if (!dev_info) {
1267 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1268 POSTCODE_SEVERITY_ERR);
22ad57ba 1269 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1270 goto cleanup;
12e364b9 1271 }
97a84f12 1272
a298bc0b
DZ
1273 dev_info->chipset_bus_no = bus_no;
1274 dev_info->chipset_dev_no = dev_no;
1275 dev_info->inst = cmd->create_device.dev_inst_uuid;
1276
1277 /* not sure where the best place to set the 'parent' */
1278 dev_info->device.parent = &bus_info->device;
1279
c60c8e26 1280 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1281 POSTCODE_SEVERITY_INFO);
1282
b32c4997
DZ
1283 visorchannel = visorchannel_create(cmd->create_device.channel_addr,
1284 cmd->create_device.channel_bytes,
1285 GFP_KERNEL,
1286 cmd->create_device.data_type_uuid);
1287
1288 if (!visorchannel) {
1289 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1290 POSTCODE_SEVERITY_ERR);
1291 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1292 kfree(dev_info);
1293 dev_info = NULL;
1294 goto cleanup;
1295 }
1296 dev_info->visorchannel = visorchannel;
1297 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
c60c8e26 1298 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1299 POSTCODE_SEVERITY_INFO);
c60c8e26 1300cleanup:
12e364b9 1301 /* get the bus and devNo for DiagPool channel */
c60c8e26 1302 if (dev_info &&
b32c4997 1303 is_diagpool_channel(cmd->create_device.data_type_uuid)) {
c60c8e26
BR
1304 g_diagpool_bus_no = bus_no;
1305 g_diagpool_dev_no = dev_no;
12e364b9 1306 }
b4b598fd 1307 device_epilog(dev_info, segment_state_running,
12e364b9 1308 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
4da3336c 1309 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1310}
1311
1312static void
3ab47701 1313my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1314{
2ea5117b 1315 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1316 u32 bus_no = cmd->device_change_state.bus_no;
1317 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1318 struct spar_segment_state state = cmd->device_change_state.state;
a298bc0b 1319 struct visor_device *dev_info;
12e364b9
KC
1320 int rc = CONTROLVM_RESP_SUCCESS;
1321
a298bc0b 1322 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
0278a905
BR
1323 if (!dev_info) {
1324 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1325 POSTCODE_SEVERITY_ERR);
22ad57ba 1326 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1327 } else if (dev_info->state.created == 0) {
1328 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1329 POSTCODE_SEVERITY_ERR);
22ad57ba 1330 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1331 }
0278a905 1332 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1333 device_epilog(dev_info, state,
0278a905 1334 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
4da3336c 1335 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1336}
1337
1338static void
3ab47701 1339my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1340{
2ea5117b 1341 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1342 u32 bus_no = cmd->destroy_device.bus_no;
1343 u32 dev_no = cmd->destroy_device.dev_no;
a298bc0b 1344 struct visor_device *dev_info;
12e364b9
KC
1345 int rc = CONTROLVM_RESP_SUCCESS;
1346
a298bc0b 1347 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
61715c8b 1348 if (!dev_info)
22ad57ba 1349 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1350 else if (dev_info->state.created == 0)
22ad57ba 1351 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1352
61715c8b 1353 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
b4b598fd 1354 device_epilog(dev_info, segment_state_running,
12e364b9 1355 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
4da3336c 1356 inmsg->hdr.flags.response_expected == 1, 1);
12e364b9
KC
1357}
1358
1359/* When provided with the physical address of the controlvm channel
1360 * (phys_addr), the offset to the payload area we need to manage
1361 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1362 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1363 * for failure.
1364 */
1365static int
d5b3f1dc 1366initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
c1f834eb 1367 struct visor_controlvm_payload_info *info)
12e364b9 1368{
c242233e 1369 u8 __iomem *payload = NULL;
12e364b9
KC
1370 int rc = CONTROLVM_RESP_SUCCESS;
1371
38f736e9 1372 if (!info) {
22ad57ba 1373 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1374 goto cleanup;
12e364b9 1375 }
c1f834eb 1376 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1377 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1378 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1379 goto cleanup;
12e364b9
KC
1380 }
1381 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1382 if (!payload) {
22ad57ba 1383 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1384 goto cleanup;
12e364b9
KC
1385 }
1386
1387 info->offset = offset;
1388 info->bytes = bytes;
1389 info->ptr = payload;
12e364b9 1390
f118a39b 1391cleanup:
12e364b9 1392 if (rc < 0) {
f118a39b 1393 if (payload) {
12e364b9
KC
1394 iounmap(payload);
1395 payload = NULL;
1396 }
1397 }
1398 return rc;
1399}
1400
1401static void
c1f834eb 1402destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1403{
597c338f 1404 if (info->ptr) {
12e364b9
KC
1405 iounmap(info->ptr);
1406 info->ptr = NULL;
1407 }
c1f834eb 1408 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1409}
1410
1411static void
1412initialize_controlvm_payload(void)
1413{
d5b3f1dc 1414 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1415 u64 payload_offset = 0;
1416 u32 payload_bytes = 0;
26eb2c0c 1417
c3d9a224 1418 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1419 offsetof(struct spar_controlvm_channel_protocol,
1420 request_payload_offset),
cafefc0c 1421 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1422 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1423 POSTCODE_SEVERITY_ERR);
1424 return;
1425 }
c3d9a224 1426 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1427 offsetof(struct spar_controlvm_channel_protocol,
1428 request_payload_bytes),
cafefc0c 1429 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1430 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1431 POSTCODE_SEVERITY_ERR);
1432 return;
1433 }
1434 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1435 payload_offset, payload_bytes,
84982fbf 1436 &controlvm_payload_info);
12e364b9
KC
1437}
1438
1439/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1440 * Returns CONTROLVM_RESP_xxx code.
1441 */
d3368a58 1442static int
12e364b9
KC
1443visorchipset_chipset_ready(void)
1444{
eb34e877 1445 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1446 return CONTROLVM_RESP_SUCCESS;
1447}
12e364b9 1448
d3368a58 1449static int
12e364b9
KC
1450visorchipset_chipset_selftest(void)
1451{
1452 char env_selftest[20];
1453 char *envp[] = { env_selftest, NULL };
26eb2c0c 1454
12e364b9 1455 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1456 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1457 envp);
1458 return CONTROLVM_RESP_SUCCESS;
1459}
12e364b9
KC
1460
1461/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1462 * Returns CONTROLVM_RESP_xxx code.
1463 */
d3368a58 1464static int
12e364b9
KC
1465visorchipset_chipset_notready(void)
1466{
eb34e877 1467 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1468 return CONTROLVM_RESP_SUCCESS;
1469}
12e364b9
KC
1470
1471static void
77a0449d 1472chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1473{
1474 int rc = visorchipset_chipset_ready();
26eb2c0c 1475
12e364b9
KC
1476 if (rc != CONTROLVM_RESP_SUCCESS)
1477 rc = -rc;
77a0449d
BR
1478 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1479 controlvm_respond(msg_hdr, rc);
1480 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1481 /* Send CHIPSET_READY response when all modules have been loaded
1482 * and disks mounted for the partition
1483 */
77a0449d 1484 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1485 }
1486}
1487
1488static void
77a0449d 1489chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1490{
1491 int rc = visorchipset_chipset_selftest();
26eb2c0c 1492
12e364b9
KC
1493 if (rc != CONTROLVM_RESP_SUCCESS)
1494 rc = -rc;
77a0449d
BR
1495 if (msg_hdr->flags.response_expected)
1496 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1497}
1498
1499static void
77a0449d 1500chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1501{
1502 int rc = visorchipset_chipset_notready();
26eb2c0c 1503
12e364b9
KC
1504 if (rc != CONTROLVM_RESP_SUCCESS)
1505 rc = -rc;
77a0449d
BR
1506 if (msg_hdr->flags.response_expected)
1507 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1508}
1509
1510/* This is your "one-stop" shop for grabbing the next message from the
1511 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1512 */
f4c11551 1513static bool
3ab47701 1514read_controlvm_event(struct controlvm_message *msg)
12e364b9 1515{
c3d9a224 1516 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1517 CONTROLVM_QUEUE_EVENT, msg)) {
1518 /* got a message */
0aca7844 1519 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1520 return false;
1521 return true;
12e364b9 1522 }
f4c11551 1523 return false;
12e364b9
KC
1524}
1525
1526/*
1527 * The general parahotplug flow works as follows. The visorchipset
1528 * driver receives a DEVICE_CHANGESTATE message from Command
1529 * specifying a physical device to enable or disable. The CONTROLVM
1530 * message handler calls parahotplug_process_message, which then adds
1531 * the message to a global list and kicks off a udev event which
1532 * causes a user level script to enable or disable the specified
1533 * device. The udev script then writes to
1534 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1535 * to get called, at which point the appropriate CONTROLVM message is
1536 * retrieved from the list and responded to.
1537 */
1538
1539#define PARAHOTPLUG_TIMEOUT_MS 2000
1540
1541/*
1542 * Generate unique int to match an outstanding CONTROLVM message with a
1543 * udev script /proc response
1544 */
1545static int
1546parahotplug_next_id(void)
1547{
1548 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1549
12e364b9
KC
1550 return atomic_inc_return(&id);
1551}
1552
1553/*
1554 * Returns the time (in jiffies) when a CONTROLVM message on the list
1555 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1556 */
1557static unsigned long
1558parahotplug_next_expiration(void)
1559{
2cc1a1b3 1560 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1561}
1562
1563/*
1564 * Create a parahotplug_request, which is basically a wrapper for a
1565 * CONTROLVM_MESSAGE that we can stick on a list
1566 */
1567static struct parahotplug_request *
3ab47701 1568parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1569{
ea0dcfcf
QL
1570 struct parahotplug_request *req;
1571
6a55e3c3 1572 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1573 if (!req)
12e364b9
KC
1574 return NULL;
1575
1576 req->id = parahotplug_next_id();
1577 req->expiration = parahotplug_next_expiration();
1578 req->msg = *msg;
1579
1580 return req;
1581}
1582
1583/*
1584 * Free a parahotplug_request.
1585 */
1586static void
1587parahotplug_request_destroy(struct parahotplug_request *req)
1588{
1589 kfree(req);
1590}
1591
1592/*
1593 * Cause uevent to run the user level script to do the disable/enable
1594 * specified in (the CONTROLVM message in) the specified
1595 * parahotplug_request
1596 */
1597static void
1598parahotplug_request_kickoff(struct parahotplug_request *req)
1599{
2ea5117b 1600 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1601 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1602 env_func[40];
1603 char *envp[] = {
1604 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1605 };
1606
1607 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1608 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1609 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1610 cmd->device_change_state.state.active);
12e364b9 1611 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1612 cmd->device_change_state.bus_no);
12e364b9 1613 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1614 cmd->device_change_state.dev_no >> 3);
12e364b9 1615 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1616 cmd->device_change_state.dev_no & 0x7);
12e364b9 1617
eb34e877 1618 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1619 envp);
1620}
1621
1622/*
1623 * Remove any request from the list that's been on there too long and
1624 * respond with an error.
1625 */
1626static void
1627parahotplug_process_list(void)
1628{
e82ba62e
JS
1629 struct list_head *pos;
1630 struct list_head *tmp;
12e364b9 1631
ddf5de53 1632 spin_lock(&parahotplug_request_list_lock);
12e364b9 1633
ddf5de53 1634 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1635 struct parahotplug_request *req =
1636 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1637
1638 if (!time_after_eq(jiffies, req->expiration))
1639 continue;
1640
1641 list_del(pos);
1642 if (req->msg.hdr.flags.response_expected)
1643 controlvm_respond_physdev_changestate(
1644 &req->msg.hdr,
1645 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1646 req->msg.cmd.device_change_state.state);
1647 parahotplug_request_destroy(req);
12e364b9
KC
1648 }
1649
ddf5de53 1650 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1651}
1652
1653/*
1654 * Called from the /proc handler, which means the user script has
1655 * finished the enable/disable. Find the matching identifier, and
1656 * respond to the CONTROLVM message with success.
1657 */
1658static int
b06bdf7d 1659parahotplug_request_complete(int id, u16 active)
12e364b9 1660{
e82ba62e
JS
1661 struct list_head *pos;
1662 struct list_head *tmp;
12e364b9 1663
ddf5de53 1664 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1665
1666 /* Look for a request matching "id". */
ddf5de53 1667 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1668 struct parahotplug_request *req =
1669 list_entry(pos, struct parahotplug_request, list);
1670 if (req->id == id) {
1671 /* Found a match. Remove it from the list and
1672 * respond.
1673 */
1674 list_del(pos);
ddf5de53 1675 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1676 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1677 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1678 controlvm_respond_physdev_changestate(
1679 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1680 req->msg.cmd.device_change_state.state);
12e364b9
KC
1681 parahotplug_request_destroy(req);
1682 return 0;
1683 }
1684 }
1685
ddf5de53 1686 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1687 return -1;
1688}
1689
1690/*
1691 * Enables or disables a PCI device by kicking off a udev script
1692 */
bd5b9b32 1693static void
3ab47701 1694parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1695{
1696 struct parahotplug_request *req;
1697
1698 req = parahotplug_request_create(inmsg);
1699
38f736e9 1700 if (!req)
12e364b9 1701 return;
12e364b9 1702
2ea5117b 1703 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1704 /* For enable messages, just respond with success
1705 * right away. This is a bit of a hack, but there are
1706 * issues with the early enable messages we get (with
1707 * either the udev script not detecting that the device
1708 * is up, or not getting called at all). Fortunately
1709 * the messages that get lost don't matter anyway, as
1710 * devices are automatically enabled at
1711 * initialization.
1712 */
1713 parahotplug_request_kickoff(req);
1714 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1715 CONTROLVM_RESP_SUCCESS,
1716 inmsg->cmd.device_change_state.state);
12e364b9
KC
1717 parahotplug_request_destroy(req);
1718 } else {
1719 /* For disable messages, add the request to the
1720 * request list before kicking off the udev script. It
1721 * won't get responded to until the script has
1722 * indicated it's done.
1723 */
ddf5de53
BR
1724 spin_lock(&parahotplug_request_list_lock);
1725 list_add_tail(&req->list, &parahotplug_request_list);
1726 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1727
1728 parahotplug_request_kickoff(req);
1729 }
1730}
1731
12e364b9
KC
1732/* Process a controlvm message.
1733 * Return result:
779d0752 1734 * false - this function will return false only in the case where the
12e364b9
KC
1735 * controlvm message was NOT processed, but processing must be
1736 * retried before reading the next controlvm message; a
1737 * scenario where this can occur is when we need to throttle
1738 * the allocation of memory in which to copy out controlvm
1739 * payload data
f4c11551 1740 * true - processing of the controlvm message completed,
12e364b9
KC
1741 * either successfully or with an error.
1742 */
f4c11551 1743static bool
d5b3f1dc 1744handle_command(struct controlvm_message inmsg, u64 channel_addr)
12e364b9 1745{
2ea5117b 1746 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1747 u64 parm_addr;
1748 u32 parm_bytes;
317d9614 1749 struct parser_context *parser_ctx = NULL;
e82ba62e 1750 bool local_addr;
3ab47701 1751 struct controlvm_message ackmsg;
12e364b9
KC
1752
1753 /* create parsing context if necessary */
818352a8 1754 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1755 if (channel_addr == 0)
f4c11551 1756 return true;
818352a8
BR
1757 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1758 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1759
1760 /* Parameter and channel addresses within test messages actually lie
1761 * within our OS-controlled memory. We need to know that, because it
1762 * makes a difference in how we compute the virtual address.
1763 */
ebec8967 1764 if (parm_addr && parm_bytes) {
f4c11551 1765 bool retry = false;
26eb2c0c 1766
12e364b9 1767 parser_ctx =
818352a8
BR
1768 parser_init_byte_stream(parm_addr, parm_bytes,
1769 local_addr, &retry);
1b08872e 1770 if (!parser_ctx && retry)
f4c11551 1771 return false;
12e364b9
KC
1772 }
1773
818352a8 1774 if (!local_addr) {
12e364b9
KC
1775 controlvm_init_response(&ackmsg, &inmsg.hdr,
1776 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1777 if (controlvm_channel)
1778 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1779 CONTROLVM_QUEUE_ACK,
1780 &ackmsg);
12e364b9 1781 }
98d7b594 1782 switch (inmsg.hdr.id) {
12e364b9 1783 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1784 chipset_init(&inmsg);
1785 break;
1786 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1787 bus_create(&inmsg);
1788 break;
1789 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1790 bus_destroy(&inmsg);
1791 break;
1792 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1793 bus_configure(&inmsg, parser_ctx);
1794 break;
1795 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1796 my_device_create(&inmsg);
1797 break;
1798 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1799 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1800 parahotplug_process_message(&inmsg);
1801 } else {
12e364b9
KC
1802 /* save the hdr and cmd structures for later use */
1803 /* when sending back the response to Command */
1804 my_device_changestate(&inmsg);
4f44b72d 1805 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1806 break;
1807 }
1808 break;
1809 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1810 my_device_destroy(&inmsg);
1811 break;
1812 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1813 /* no op for now, just send a respond that we passed */
98d7b594 1814 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1815 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1816 break;
1817 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1818 chipset_ready(&inmsg.hdr);
1819 break;
1820 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1821 chipset_selftest(&inmsg.hdr);
1822 break;
1823 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1824 chipset_notready(&inmsg.hdr);
1825 break;
1826 default:
98d7b594 1827 if (inmsg.hdr.flags.response_expected)
12e364b9 1828 controlvm_respond(&inmsg.hdr,
818352a8 1829 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1830 break;
1831 }
1832
38f736e9 1833 if (parser_ctx) {
12e364b9
KC
1834 parser_done(parser_ctx);
1835 parser_ctx = NULL;
1836 }
f4c11551 1837 return true;
12e364b9
KC
1838}
1839
5f3a7e36
DK
1840static inline unsigned int
1841issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1842{
1843 struct vmcall_io_controlvm_addr_params params;
1844 int result = VMCALL_SUCCESS;
1845 u64 physaddr;
1846
1847 physaddr = virt_to_phys(&params);
1848 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1849 if (VMCALL_SUCCESSFUL(result)) {
1850 *control_addr = params.address;
1851 *control_bytes = params.channel_bytes;
1852 }
1853 return result;
1854}
1855
d5b3f1dc 1856static u64 controlvm_get_channel_address(void)
524b0b63 1857{
5fc0229a 1858 u64 addr = 0;
b3c55b13 1859 u32 size = 0;
524b0b63 1860
0aca7844 1861 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1862 return 0;
0aca7844 1863
524b0b63
BR
1864 return addr;
1865}
1866
12e364b9
KC
1867static void
1868controlvm_periodic_work(struct work_struct *work)
1869{
3ab47701 1870 struct controlvm_message inmsg;
f4c11551
JS
1871 bool got_command = false;
1872 bool handle_command_failed = false;
1c1ed292 1873 static u64 poll_count;
12e364b9
KC
1874
1875 /* make sure visorbus server is registered for controlvm callbacks */
4da3336c 1876 if (visorchipset_visorbusregwait && !visorbusregistered)
1c1ed292 1877 goto cleanup;
12e364b9 1878
1c1ed292
BR
1879 poll_count++;
1880 if (poll_count >= 250)
12e364b9
KC
1881 ; /* keep going */
1882 else
1c1ed292 1883 goto cleanup;
12e364b9
KC
1884
1885 /* Check events to determine if response to CHIPSET_READY
1886 * should be sent
1887 */
0639ba67
BR
1888 if (visorchipset_holdchipsetready &&
1889 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1890 if (check_chipset_events() == 1) {
da021f02 1891 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1892 clear_chipset_events();
da021f02 1893 memset(&g_chipset_msg_hdr, 0,
98d7b594 1894 sizeof(struct controlvm_message_header));
12e364b9
KC
1895 }
1896 }
1897
c3d9a224 1898 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1899 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1900 &inmsg))
1901 ;
1c1ed292 1902 if (!got_command) {
7166ed19 1903 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1904 /* we throttled processing of a prior
1905 * msg, so try to process it again
1906 * rather than reading a new one
1907 */
7166ed19 1908 inmsg = controlvm_pending_msg;
f4c11551 1909 controlvm_pending_msg_valid = false;
1c1ed292 1910 got_command = true;
75c1f8b7 1911 } else {
1c1ed292 1912 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1913 }
8a1182eb 1914 }
12e364b9 1915
f4c11551 1916 handle_command_failed = false;
1c1ed292 1917 while (got_command && (!handle_command_failed)) {
b53e0e93 1918 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1919 if (handle_command(inmsg,
1920 visorchannel_get_physaddr
c3d9a224 1921 (controlvm_channel)))
1c1ed292 1922 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
1923 else {
1924 /* this is a scenario where throttling
1925 * is required, but probably NOT an
1926 * error...; we stash the current
1927 * controlvm msg so we will attempt to
1928 * reprocess it on our next loop
1929 */
f4c11551 1930 handle_command_failed = true;
7166ed19 1931 controlvm_pending_msg = inmsg;
f4c11551 1932 controlvm_pending_msg_valid = true;
12e364b9
KC
1933 }
1934 }
1935
1936 /* parahotplug_worker */
1937 parahotplug_process_list();
1938
1c1ed292 1939cleanup:
12e364b9
KC
1940
1941 if (time_after(jiffies,
b53e0e93 1942 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1943 /* it's been longer than MIN_IDLE_SECONDS since we
1944 * processed our last controlvm message; slow down the
1945 * polling
1946 */
911e213e
BR
1947 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1948 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1949 } else {
911e213e
BR
1950 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1951 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1952 }
1953
9232d2d6
BR
1954 queue_delayed_work(periodic_controlvm_workqueue,
1955 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1956}
1957
1958static void
1959setup_crash_devices_work_queue(struct work_struct *work)
1960{
e6bdb904
BR
1961 struct controlvm_message local_crash_bus_msg;
1962 struct controlvm_message local_crash_dev_msg;
3ab47701 1963 struct controlvm_message msg;
e6bdb904
BR
1964 u32 local_crash_msg_offset;
1965 u16 local_crash_msg_count;
12e364b9 1966
4da3336c
DK
1967 /* make sure visorbus is registered for controlvm callbacks */
1968 if (visorchipset_visorbusregwait && !visorbusregistered)
e6bdb904 1969 goto cleanup;
12e364b9
KC
1970
1971 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1972
1973 /* send init chipset msg */
98d7b594 1974 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1975 msg.cmd.init_chipset.bus_count = 23;
1976 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1977
1978 chipset_init(&msg);
1979
12e364b9 1980 /* get saved message count */
c3d9a224 1981 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1982 offsetof(struct spar_controlvm_channel_protocol,
1983 saved_crash_message_count),
e6bdb904 1984 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1985 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1986 POSTCODE_SEVERITY_ERR);
1987 return;
1988 }
1989
e6bdb904 1990 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 1991 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 1992 local_crash_msg_count,
12e364b9
KC
1993 POSTCODE_SEVERITY_ERR);
1994 return;
1995 }
1996
1997 /* get saved crash message offset */
c3d9a224 1998 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1999 offsetof(struct spar_controlvm_channel_protocol,
2000 saved_crash_message_offset),
e6bdb904 2001 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
2002 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2003 POSTCODE_SEVERITY_ERR);
2004 return;
2005 }
2006
2007 /* read create device message for storage bus offset */
c3d9a224 2008 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
2009 local_crash_msg_offset,
2010 &local_crash_bus_msg,
3ab47701 2011 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2012 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2013 POSTCODE_SEVERITY_ERR);
2014 return;
2015 }
2016
2017 /* read create device message for storage device */
c3d9a224 2018 if (visorchannel_read(controlvm_channel,
e6bdb904 2019 local_crash_msg_offset +
3ab47701 2020 sizeof(struct controlvm_message),
e6bdb904 2021 &local_crash_dev_msg,
3ab47701 2022 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2023 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2024 POSTCODE_SEVERITY_ERR);
2025 return;
2026 }
2027
2028 /* reuse IOVM create bus message */
ebec8967 2029 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 2030 bus_create(&local_crash_bus_msg);
75c1f8b7 2031 } else {
12e364b9
KC
2032 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2033 POSTCODE_SEVERITY_ERR);
2034 return;
2035 }
2036
2037 /* reuse create device message for storage device */
ebec8967 2038 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 2039 my_device_create(&local_crash_dev_msg);
75c1f8b7 2040 } else {
12e364b9
KC
2041 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2042 POSTCODE_SEVERITY_ERR);
2043 return;
2044 }
12e364b9
KC
2045 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2046 return;
2047
e6bdb904 2048cleanup:
12e364b9 2049
911e213e 2050 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2051
9232d2d6
BR
2052 queue_delayed_work(periodic_controlvm_workqueue,
2053 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2054}
2055
2056static void
d32517e3 2057bus_create_response(struct visor_device *bus_info, int response)
12e364b9 2058{
0274b5ae
DZ
2059 if (response >= 0) {
2060 bus_info->state.created = 1;
0274b5ae
DZ
2061 }
2062
2063 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2064 response);
2065
2066 kfree(bus_info->pending_msg_hdr);
2067 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
2068}
2069
2070static void
d32517e3 2071bus_destroy_response(struct visor_device *bus_info, int response)
12e364b9 2072{
0274b5ae
DZ
2073 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2074 response);
2075
2076 kfree(bus_info->pending_msg_hdr);
2077 bus_info->pending_msg_hdr = NULL;
12e364b9
KC
2078}
2079
2080static void
a298bc0b 2081device_create_response(struct visor_device *dev_info, int response)
12e364b9 2082{
0274b5ae
DZ
2083 if (response >= 0)
2084 dev_info->state.created = 1;
2085
2086 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2087 response);
2088
2089 kfree(dev_info->pending_msg_hdr);
12e364b9
KC
2090}
2091
2092static void
a298bc0b 2093device_destroy_response(struct visor_device *dev_info, int response)
12e364b9 2094{
0274b5ae
DZ
2095 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2096 response);
2097
2098 kfree(dev_info->pending_msg_hdr);
2099 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
2100}
2101
d3368a58 2102static void
a298bc0b 2103visorchipset_device_pause_response(struct visor_device *dev_info,
b4b598fd 2104 int response)
12e364b9 2105{
12e364b9 2106 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 2107 dev_info, response,
bd0d2dcc 2108 segment_state_standby);
0274b5ae
DZ
2109
2110 kfree(dev_info->pending_msg_hdr);
2111 dev_info->pending_msg_hdr = NULL;
12e364b9 2112}
12e364b9
KC
2113
2114static void
a298bc0b 2115device_resume_response(struct visor_device *dev_info, int response)
12e364b9
KC
2116{
2117 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
b4b598fd 2118 dev_info, response,
bd0d2dcc 2119 segment_state_running);
0274b5ae
DZ
2120
2121 kfree(dev_info->pending_msg_hdr);
2122 dev_info->pending_msg_hdr = NULL;
12e364b9
KC
2123}
2124
18b87ed1 2125static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2126 struct device_attribute *attr,
2127 const char *buf, size_t count)
12e364b9 2128{
18b87ed1 2129 char msgtype[64];
12e364b9 2130
66e24b76
BR
2131 if (sscanf(buf, "%63s", msgtype) != 1)
2132 return -EINVAL;
2133
ebec8967 2134 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2135 chipset_events[0] = 1;
2136 return count;
ebec8967 2137 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2138 chipset_events[1] = 1;
2139 return count;
e22a4a0f
BR
2140 }
2141 return -EINVAL;
12e364b9
KC
2142}
2143
e56fa7cd
BR
2144/* The parahotplug/devicedisabled interface gets called by our support script
2145 * when an SR-IOV device has been shut down. The ID is passed to the script
2146 * and then passed back when the device has been removed.
2147 */
2148static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2149 struct device_attribute *attr,
2150 const char *buf, size_t count)
e56fa7cd 2151{
94217363 2152 unsigned int id;
e56fa7cd 2153
ebec8967 2154 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2155 return -EINVAL;
2156
2157 parahotplug_request_complete(id, 0);
2158 return count;
2159}
2160
2161/* The parahotplug/deviceenabled interface gets called by our support script
2162 * when an SR-IOV device has been recovered. The ID is passed to the script
2163 * and then passed back when the device has been brought back up.
2164 */
2165static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2166 struct device_attribute *attr,
2167 const char *buf, size_t count)
e56fa7cd 2168{
94217363 2169 unsigned int id;
e56fa7cd 2170
ebec8967 2171 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2172 return -EINVAL;
2173
2174 parahotplug_request_complete(id, 1);
2175 return count;
2176}
2177
e3420ed6
EA
2178static int
2179visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2180{
2181 unsigned long physaddr = 0;
2182 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
780fcad3 2183 u64 addr = 0;
e3420ed6
EA
2184
2185 /* sv_enable_dfp(); */
2186 if (offset & (PAGE_SIZE - 1))
2187 return -ENXIO; /* need aligned offsets */
2188
2189 switch (offset) {
2190 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2191 vma->vm_flags |= VM_IO;
2192 if (!*file_controlvm_channel)
2193 return -ENXIO;
2194
2195 visorchannel_read(*file_controlvm_channel,
2196 offsetof(struct spar_controlvm_channel_protocol,
2197 gp_control_channel),
2198 &addr, sizeof(addr));
2199 if (!addr)
2200 return -ENXIO;
2201
2202 physaddr = (unsigned long)addr;
2203 if (remap_pfn_range(vma, vma->vm_start,
2204 physaddr >> PAGE_SHIFT,
2205 vma->vm_end - vma->vm_start,
2206 /*pgprot_noncached */
2207 (vma->vm_page_prot))) {
2208 return -EAGAIN;
2209 }
2210 break;
2211 default:
2212 return -ENXIO;
2213 }
2214 return 0;
2215}
2216
5f3a7e36
DK
2217static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2218{
2219 u64 result = VMCALL_SUCCESS;
2220 u64 physaddr = 0;
2221
2222 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2223 result);
2224 return result;
2225}
2226
2227static inline int issue_vmcall_update_physical_time(u64 adjustment)
2228{
2229 int result = VMCALL_SUCCESS;
2230
2231 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2232 return result;
2233}
2234
e3420ed6
EA
2235static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2236 unsigned long arg)
2237{
2238 s64 adjustment;
2239 s64 vrtc_offset;
2240
2241 switch (cmd) {
2242 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2243 /* get the physical rtc offset */
2244 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2245 if (copy_to_user((void __user *)arg, &vrtc_offset,
2246 sizeof(vrtc_offset))) {
2247 return -EFAULT;
2248 }
d5b3f1dc 2249 return 0;
e3420ed6
EA
2250 case VMCALL_UPDATE_PHYSICAL_TIME:
2251 if (copy_from_user(&adjustment, (void __user *)arg,
2252 sizeof(adjustment))) {
2253 return -EFAULT;
2254 }
2255 return issue_vmcall_update_physical_time(adjustment);
2256 default:
2257 return -EFAULT;
2258 }
2259}
2260
2261static const struct file_operations visorchipset_fops = {
2262 .owner = THIS_MODULE,
2263 .open = visorchipset_open,
2264 .read = NULL,
2265 .write = NULL,
2266 .unlocked_ioctl = visorchipset_ioctl,
2267 .release = visorchipset_release,
2268 .mmap = visorchipset_mmap,
2269};
2270
0f570fc0 2271static int
e3420ed6
EA
2272visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2273{
2274 int rc = 0;
2275
2276 file_controlvm_channel = controlvm_channel;
2277 cdev_init(&file_cdev, &visorchipset_fops);
2278 file_cdev.owner = THIS_MODULE;
2279 if (MAJOR(major_dev) == 0) {
46168810 2280 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
e3420ed6
EA
2281 /* dynamic major device number registration required */
2282 if (rc < 0)
2283 return rc;
2284 } else {
2285 /* static major device number registration required */
46168810 2286 rc = register_chrdev_region(major_dev, 1, "visorchipset");
e3420ed6
EA
2287 if (rc < 0)
2288 return rc;
2289 }
2290 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2291 if (rc < 0) {
2292 unregister_chrdev_region(major_dev, 1);
2293 return rc;
2294 }
2295 return 0;
2296}
2297
55c67dca
PB
2298static int
2299visorchipset_init(struct acpi_device *acpi_device)
12e364b9 2300{
33078257 2301 int rc = 0;
d5b3f1dc 2302 u64 addr;
d3368a58
JS
2303 int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2304 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2305
2306 addr = controlvm_get_channel_address();
2307 if (!addr)
2308 return -ENODEV;
12e364b9 2309
4da3336c 2310 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
84982fbf 2311 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
12e364b9 2312
d3368a58
JS
2313 controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2314 GFP_KERNEL, uuid);
2315 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2316 visorchannel_get_header(controlvm_channel))) {
2317 initialize_controlvm_payload();
8a1182eb 2318 } else {
d3368a58
JS
2319 visorchannel_destroy(controlvm_channel);
2320 controlvm_channel = NULL;
8a1182eb
BR
2321 return -ENODEV;
2322 }
2323
5aa8ae57
BR
2324 major_dev = MKDEV(visorchipset_major, 0);
2325 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2326 if (rc < 0) {
4cb005a9 2327 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2328 goto cleanup;
4cb005a9 2329 }
9f8d0e8b 2330
da021f02 2331 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2332
4da3336c
DK
2333 /* if booting in a crash kernel */
2334 if (is_kdump_kernel())
2335 INIT_DELAYED_WORK(&periodic_controlvm_work,
2336 setup_crash_devices_work_queue);
2337 else
2338 INIT_DELAYED_WORK(&periodic_controlvm_work,
2339 controlvm_periodic_work);
2340 periodic_controlvm_workqueue =
2341 create_singlethread_workqueue("visorchipset_controlvm");
2342
2343 if (!periodic_controlvm_workqueue) {
2344 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2345 DIAG_SEVERITY_ERR);
2346 rc = -ENOMEM;
2347 goto cleanup;
2348 }
2349 most_recent_message_jiffies = jiffies;
2350 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2351 rc = queue_delayed_work(periodic_controlvm_workqueue,
2352 &periodic_controlvm_work, poll_jiffies);
2353 if (rc < 0) {
2354 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2355 DIAG_SEVERITY_ERR);
2356 goto cleanup;
12e364b9
KC
2357 }
2358
eb34e877
BR
2359 visorchipset_platform_device.dev.devt = major_dev;
2360 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2361 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2362 rc = -1;
a6a3989b 2363 goto cleanup;
4cb005a9 2364 }
12e364b9 2365 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
c79b28f7
PB
2366
2367 rc = visorbus_init();
a6a3989b 2368cleanup:
12e364b9 2369 if (rc) {
12e364b9
KC
2370 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2371 POSTCODE_SEVERITY_ERR);
2372 }
2373 return rc;
2374}
2375
0f570fc0 2376static void
e3420ed6
EA
2377visorchipset_file_cleanup(dev_t major_dev)
2378{
2379 if (file_cdev.ops)
2380 cdev_del(&file_cdev);
2381 file_cdev.ops = NULL;
2382 unregister_chrdev_region(major_dev, 1);
2383}
2384
55c67dca
PB
2385static int
2386visorchipset_exit(struct acpi_device *acpi_device)
12e364b9 2387{
12e364b9
KC
2388 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2389
c79b28f7
PB
2390 visorbus_exit();
2391
4da3336c
DK
2392 cancel_delayed_work(&periodic_controlvm_work);
2393 flush_workqueue(periodic_controlvm_workqueue);
2394 destroy_workqueue(periodic_controlvm_workqueue);
2395 periodic_controlvm_workqueue = NULL;
2396 destroy_controlvm_payload_info(&controlvm_payload_info);
1783319f 2397
da021f02 2398 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2399
c3d9a224 2400 visorchannel_destroy(controlvm_channel);
8a1182eb 2401
addceb12 2402 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2403 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
55c67dca
PB
2404
2405 return 0;
2406}
2407
2408static const struct acpi_device_id unisys_device_ids[] = {
2409 {"PNP0A07", 0},
2410 {"", 0},
2411};
55c67dca
PB
2412
2413static struct acpi_driver unisys_acpi_driver = {
2414 .name = "unisys_acpi",
2415 .class = "unisys_acpi_class",
2416 .owner = THIS_MODULE,
2417 .ids = unisys_device_ids,
2418 .ops = {
2419 .add = visorchipset_init,
2420 .remove = visorchipset_exit,
2421 },
2422};
d5b3f1dc
EA
2423static __init uint32_t visorutil_spar_detect(void)
2424{
2425 unsigned int eax, ebx, ecx, edx;
2426
2427 if (cpu_has_hypervisor) {
2428 /* check the ID */
2429 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2430 return (ebx == UNISYS_SPAR_ID_EBX) &&
2431 (ecx == UNISYS_SPAR_ID_ECX) &&
2432 (edx == UNISYS_SPAR_ID_EDX);
2433 } else {
2434 return 0;
2435 }
2436}
55c67dca
PB
2437
2438static int init_unisys(void)
2439{
2440 int result;
d5b3f1dc 2441 if (!visorutil_spar_detect())
55c67dca
PB
2442 return -ENODEV;
2443
2444 result = acpi_bus_register_driver(&unisys_acpi_driver);
2445 if (result)
2446 return -ENODEV;
2447
2448 pr_info("Unisys Visorchipset Driver Loaded.\n");
2449 return 0;
2450};
2451
2452static void exit_unisys(void)
2453{
2454 acpi_bus_unregister_driver(&unisys_acpi_driver);
12e364b9
KC
2455}
2456
12e364b9 2457module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2458MODULE_PARM_DESC(visorchipset_major,
2459 "major device number to use for the device node");
4da3336c
DK
2460module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2461MODULE_PARM_DESC(visorchipset_visorbusreqwait,
12e364b9 2462 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2463module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2464 int, S_IRUGO);
2465MODULE_PARM_DESC(visorchipset_holdchipsetready,
2466 "1 to hold response to CHIPSET_READY");
b615d628 2467
55c67dca
PB
2468module_init(init_unisys);
2469module_exit(exit_unisys);
12e364b9
KC
2470
2471MODULE_AUTHOR("Unisys");
2472MODULE_LICENSE("GPL");
2473MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2474 VERSION);
2475MODULE_VERSION(VERSION);
This page took 0.528833 seconds and 5 git commands to generate.