staging: unisys: move visorchipset files to visorbus
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
46168810
EA
18#include "memregion.h"
19#include "controlvmchannel.h"
7023638c 20#include "version.h"
12e364b9 21#include "procobjecttree.h"
f6439218 22#include "visorbus.h"
12e364b9 23#include "periodic_work.h"
12e364b9 24#include "uisutils.h"
12e364b9
KC
25#include "controlvmcompletionstatus.h"
26#include "guestlinuxdebug.h"
c79b28f7 27#include "visorbus_private.h"
12e364b9 28
46168810
EA
29
30#include <linux/ctype.h>
e3420ed6
EA
31#include <linux/fs.h>
32#include <linux/mm.h>
12e364b9
KC
33#include <linux/nls.h>
34#include <linux/netdevice.h>
35#include <linux/platform_device.h>
90addb02 36#include <linux/uuid.h>
1ba00980 37#include <linux/crash_dump.h>
12e364b9
KC
38
39#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
40#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
41 * vnic loopback test */
42#define TEST_VNIC_SWITCHNO 1
43#define TEST_VNIC_BUSNO 9
44
45#define MAX_NAME_SIZE 128
46#define MAX_IP_SIZE 50
47#define MAXOUTSTANDINGCHANNELCOMMAND 256
48#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
49#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
50
46168810 51#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
b615d628
JS
52/*
53 * Module parameters
54 */
55static int visorchipset_testvnic;
56static int visorchipset_testvnicclient;
57static int visorchipset_testmsg;
58static int visorchipset_major;
59static int visorchipset_serverregwait;
60static int visorchipset_clientregwait = 1; /* default is on */
61static int visorchipset_testteardown;
62static int visorchipset_disable_controlvm;
63static int visorchipset_holdchipsetready;
46168810 64static unsigned long controlvm_payload_bytes_buffered;
b615d628 65
e3420ed6
EA
66static int
67visorchipset_open(struct inode *inode, struct file *file)
68{
69 unsigned minor_number = iminor(inode);
70
71 if (minor_number)
72 return -ENODEV;
73 file->private_data = NULL;
74 return 0;
75}
76
77static int
78visorchipset_release(struct inode *inode, struct file *file)
79{
80 return 0;
81}
82
12e364b9
KC
83/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
84* we switch to slow polling mode. As soon as we get a controlvm
85* message, we switch back to fast polling mode.
86*/
87#define MIN_IDLE_SECONDS 10
52063eca
JS
88static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
89static unsigned long most_recent_message_jiffies; /* when we got our last
bd5b9b32 90 * controlvm message */
12e364b9
KC
91static int serverregistered;
92static int clientregistered;
93
94#define MAX_CHIPSET_EVENTS 2
c242233e 95static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 96
46168810
EA
97struct parser_context {
98 unsigned long allocbytes;
99 unsigned long param_bytes;
100 u8 *curr;
101 unsigned long bytes_remaining;
102 bool byte_stream;
103 char data[0];
104};
105
9232d2d6
BR
106static struct delayed_work periodic_controlvm_work;
107static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 108static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 109
e3420ed6
EA
110static struct cdev file_cdev;
111static struct visorchannel **file_controlvm_channel;
da021f02 112static struct controlvm_message_header g_chipset_msg_hdr;
59827f00 113static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 114 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 115/* 0xffffff is an invalid Bus/Device number */
52063eca
JS
116static u32 g_diagpool_bus_no = 0xffffff;
117static u32 g_diagpool_dev_no = 0xffffff;
4f44b72d 118static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9
KC
119
120/* Only VNIC and VHBA channels are sent to visorclientbus (aka
121 * "visorhackbus")
122 */
123#define FOR_VISORHACKBUS(channel_type_guid) \
9eee5d1f 124 (((uuid_le_cmp(channel_type_guid,\
0639ba67
BR
125 spar_vnic_channel_protocol_uuid) == 0) ||\
126 (uuid_le_cmp(channel_type_guid,\
9eee5d1f 127 spar_vhba_channel_protocol_uuid) == 0)))
12e364b9
KC
128#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
129
130#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
131 (uuid_le_cmp(channel_type_guid,\
132 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 133
1390b88c
BR
134static LIST_HEAD(bus_info_list);
135static LIST_HEAD(dev_info_list);
12e364b9 136
c3d9a224 137static struct visorchannel *controlvm_channel;
12e364b9 138
84982fbf 139/* Manages the request payload in the controlvm channel */
c1f834eb 140struct visor_controlvm_payload_info {
c242233e 141 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 142 u64 offset; /* offset from beginning of controlvm
12e364b9 143 * channel to beginning of payload * pool */
b3c55b13 144 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
145};
146
147static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 148
ea33b4ee
BR
149/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
150 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
151 */
c1f834eb 152struct visor_livedump_info {
ea33b4ee
BR
153 struct controlvm_message_header dumpcapture_header;
154 struct controlvm_message_header gettextdump_header;
155 struct controlvm_message_header dumpcomplete_header;
f4c11551 156 bool gettextdump_outstanding;
12e364b9 157 u32 crc32;
52063eca 158 unsigned long length;
12e364b9 159 atomic_t buffers_in_use;
52063eca 160 unsigned long destination;
c1f834eb
JS
161};
162
163static struct visor_livedump_info livedump_info;
12e364b9
KC
164
165/* The following globals are used to handle the scenario where we are unable to
166 * offload the payload from a controlvm message due to memory requirements. In
167 * this scenario, we simply stash the controlvm message, then attempt to
168 * process it again the next time controlvm_periodic_work() runs.
169 */
7166ed19 170static struct controlvm_message controlvm_pending_msg;
c79b28f7 171static bool controlvm_pending_msg_valid;
12e364b9 172
12e364b9
KC
173/* This identifies a data buffer that has been received via a controlvm messages
174 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
175 */
176struct putfile_buffer_entry {
177 struct list_head next; /* putfile_buffer_entry list */
317d9614 178 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
179};
180
181/* List of struct putfile_request *, via next_putfile_request member.
182 * Each entry in this list identifies an outstanding TRANSMIT_FILE
183 * conversation.
184 */
1eee0011 185static LIST_HEAD(putfile_request_list);
12e364b9
KC
186
187/* This describes a buffer and its current state of transfer (e.g., how many
188 * bytes have already been supplied as putfile data, and how many bytes are
189 * remaining) for a putfile_request.
190 */
191struct putfile_active_buffer {
192 /* a payload from a controlvm message, containing a file data buffer */
317d9614 193 struct parser_context *parser_ctx;
12e364b9
KC
194 /* points within data area of parser_ctx to next byte of data */
195 u8 *pnext;
196 /* # bytes left from <pnext> to the end of this data buffer */
197 size_t bytes_remaining;
198};
199
200#define PUTFILE_REQUEST_SIG 0x0906101302281211
201/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
202 * conversation. Structs of this type are dynamically linked into
203 * <Putfile_request_list>.
204 */
205struct putfile_request {
206 u64 sig; /* PUTFILE_REQUEST_SIG */
207
208 /* header from original TransmitFile request */
98d7b594 209 struct controlvm_message_header controlvm_header;
12e364b9
KC
210 u64 file_request_number; /* from original TransmitFile request */
211
212 /* link to next struct putfile_request */
213 struct list_head next_putfile_request;
214
215 /* most-recent sequence number supplied via a controlvm message */
216 u64 data_sequence_number;
217
218 /* head of putfile_buffer_entry list, which describes the data to be
219 * supplied as putfile data;
220 * - this list is added to when controlvm messages come in that supply
221 * file data
222 * - this list is removed from via the hotplug program that is actually
223 * consuming these buffers to write as file data */
224 struct list_head input_buffer_list;
225 spinlock_t req_list_lock; /* lock for input_buffer_list */
226
227 /* waiters for input_buffer_list to go non-empty */
228 wait_queue_head_t input_buffer_wq;
229
230 /* data not yet read within current putfile_buffer_entry */
231 struct putfile_active_buffer active_buf;
232
233 /* <0 = failed, 0 = in-progress, >0 = successful; */
234 /* note that this must be set with req_list_lock, and if you set <0, */
235 /* it is your responsibility to also free up all of the other objects */
236 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
237 /* before releasing the lock */
238 int completion_status;
239};
240
12e364b9
KC
241struct parahotplug_request {
242 struct list_head list;
243 int id;
244 unsigned long expiration;
3ab47701 245 struct controlvm_message msg;
12e364b9
KC
246};
247
ddf5de53
BR
248static LIST_HEAD(parahotplug_request_list);
249static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
250static void parahotplug_process_list(void);
251
252/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
253 * CONTROLVM_REPORTEVENT.
254 */
6fe345af
BR
255static struct visorchipset_busdev_notifiers busdev_server_notifiers;
256static struct visorchipset_busdev_notifiers busdev_client_notifiers;
12e364b9 257
52063eca
JS
258static void bus_create_response(u32 bus_no, int response);
259static void bus_destroy_response(u32 bus_no, int response);
260static void device_create_response(u32 bus_no, u32 dev_no, int response);
261static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
262static void device_resume_response(u32 bus_no, u32 dev_no, int response);
12e364b9 263
8e3fedd6 264static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
265 .bus_create = bus_create_response,
266 .bus_destroy = bus_destroy_response,
267 .device_create = device_create_response,
268 .device_destroy = device_destroy_response,
927c7927 269 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
270 .device_resume = device_resume_response,
271};
272
273/* info for /dev/visorchipset */
5aa8ae57 274static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 275
19f6634f
BR
276/* prototypes for attributes */
277static ssize_t toolaction_show(struct device *dev,
8e76e695 278 struct device_attribute *attr, char *buf);
19f6634f 279static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
280 struct device_attribute *attr,
281 const char *buf, size_t count);
19f6634f
BR
282static DEVICE_ATTR_RW(toolaction);
283
54b31229 284static ssize_t boottotool_show(struct device *dev,
8e76e695 285 struct device_attribute *attr, char *buf);
54b31229 286static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
287 struct device_attribute *attr, const char *buf,
288 size_t count);
54b31229
BR
289static DEVICE_ATTR_RW(boottotool);
290
422af17c 291static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 292 char *buf);
422af17c 293static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 294 const char *buf, size_t count);
422af17c
BR
295static DEVICE_ATTR_RW(error);
296
297static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 298 char *buf);
422af17c 299static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 300 const char *buf, size_t count);
422af17c
BR
301static DEVICE_ATTR_RW(textid);
302
303static ssize_t remaining_steps_show(struct device *dev,
8e76e695 304 struct device_attribute *attr, char *buf);
422af17c 305static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
306 struct device_attribute *attr,
307 const char *buf, size_t count);
422af17c
BR
308static DEVICE_ATTR_RW(remaining_steps);
309
18b87ed1 310static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
311 struct device_attribute *attr,
312 const char *buf, size_t count);
18b87ed1
BR
313static DEVICE_ATTR_WO(chipsetready);
314
e56fa7cd 315static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
316 struct device_attribute *attr,
317 const char *buf, size_t count);
e56fa7cd
BR
318static DEVICE_ATTR_WO(devicedisabled);
319
320static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
321 struct device_attribute *attr,
322 const char *buf, size_t count);
e56fa7cd
BR
323static DEVICE_ATTR_WO(deviceenabled);
324
19f6634f
BR
325static struct attribute *visorchipset_install_attrs[] = {
326 &dev_attr_toolaction.attr,
54b31229 327 &dev_attr_boottotool.attr,
422af17c
BR
328 &dev_attr_error.attr,
329 &dev_attr_textid.attr,
330 &dev_attr_remaining_steps.attr,
19f6634f
BR
331 NULL
332};
333
334static struct attribute_group visorchipset_install_group = {
335 .name = "install",
336 .attrs = visorchipset_install_attrs
337};
338
18b87ed1
BR
339static struct attribute *visorchipset_guest_attrs[] = {
340 &dev_attr_chipsetready.attr,
341 NULL
342};
343
344static struct attribute_group visorchipset_guest_group = {
345 .name = "guest",
346 .attrs = visorchipset_guest_attrs
347};
348
e56fa7cd
BR
349static struct attribute *visorchipset_parahotplug_attrs[] = {
350 &dev_attr_devicedisabled.attr,
351 &dev_attr_deviceenabled.attr,
352 NULL
353};
354
355static struct attribute_group visorchipset_parahotplug_group = {
356 .name = "parahotplug",
357 .attrs = visorchipset_parahotplug_attrs
358};
359
19f6634f
BR
360static const struct attribute_group *visorchipset_dev_groups[] = {
361 &visorchipset_install_group,
18b87ed1 362 &visorchipset_guest_group,
e56fa7cd 363 &visorchipset_parahotplug_group,
19f6634f
BR
364 NULL
365};
366
12e364b9 367/* /sys/devices/platform/visorchipset */
eb34e877 368static struct platform_device visorchipset_platform_device = {
12e364b9
KC
369 .name = "visorchipset",
370 .id = -1,
19f6634f 371 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
372};
373
374/* Function prototypes */
b3168c70 375static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
376 int response);
377static void controlvm_respond_chipset_init(
b3168c70 378 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
379 enum ultra_chipset_feature features);
380static void controlvm_respond_physdev_changestate(
b3168c70 381 struct controlvm_message_header *msg_hdr, int response,
98d7b594 382 struct spar_segment_state state);
12e364b9 383
46168810
EA
384
385static struct parser_context *
386parser_init_guts(u64 addr, u32 bytes, bool local,
387 bool standard_payload_header, bool *retry)
388{
389 int allocbytes = sizeof(struct parser_context) + bytes;
390 struct parser_context *rc = NULL;
391 struct parser_context *ctx = NULL;
392 struct memregion *rgn = NULL;
393 struct spar_controlvm_parameters_header *phdr = NULL;
394
395 if (retry)
396 *retry = false;
397 if (!standard_payload_header)
398 /* alloc and 0 extra byte to ensure payload is
399 * '\0'-terminated
400 */
401 allocbytes++;
402 if ((controlvm_payload_bytes_buffered + bytes)
403 > MAX_CONTROLVM_PAYLOAD_BYTES) {
404 if (retry)
405 *retry = true;
406 rc = NULL;
407 goto cleanup;
408 }
409 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
410 if (!ctx) {
411 if (retry)
412 *retry = true;
413 rc = NULL;
414 goto cleanup;
415 }
416
417 ctx->allocbytes = allocbytes;
418 ctx->param_bytes = bytes;
419 ctx->curr = NULL;
420 ctx->bytes_remaining = 0;
421 ctx->byte_stream = false;
422 if (local) {
423 void *p;
424
425 if (addr > virt_to_phys(high_memory - 1)) {
426 rc = NULL;
427 goto cleanup;
428 }
429 p = __va((unsigned long) (addr));
430 memcpy(ctx->data, p, bytes);
431 } else {
432 rgn = visor_memregion_create(addr, bytes);
433 if (!rgn) {
434 rc = NULL;
435 goto cleanup;
436 }
437 if (visor_memregion_read(rgn, 0, ctx->data, bytes) < 0) {
438 rc = NULL;
439 goto cleanup;
440 }
441 }
442 if (!standard_payload_header) {
443 ctx->byte_stream = true;
444 rc = ctx;
445 goto cleanup;
446 }
447 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
448 if (phdr->total_length != bytes) {
449 rc = NULL;
450 goto cleanup;
451 }
452 if (phdr->total_length < phdr->header_length) {
453 rc = NULL;
454 goto cleanup;
455 }
456 if (phdr->header_length <
457 sizeof(struct spar_controlvm_parameters_header)) {
458 rc = NULL;
459 goto cleanup;
460 }
461
462 rc = ctx;
463cleanup:
464 if (rgn) {
465 visor_memregion_destroy(rgn);
466 rgn = NULL;
467 }
468 if (rc) {
469 controlvm_payload_bytes_buffered += ctx->param_bytes;
470 } else {
471 if (ctx) {
472 parser_done(ctx);
473 ctx = NULL;
474 }
475 }
476 return rc;
477}
478
479struct parser_context *
480parser_init(u64 addr, u32 bytes, bool local, bool *retry)
481{
482 return parser_init_guts(addr, bytes, local, true, retry);
483}
484
485/* Call this instead of parser_init() if the payload area consists of just
486 * a sequence of bytes, rather than a struct spar_controlvm_parameters_header
487 * structures. Afterwards, you can call parser_simpleString_get() or
488 * parser_byteStream_get() to obtain the data.
489 */
490struct parser_context *
491parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
492{
493 return parser_init_guts(addr, bytes, local, false, retry);
494}
495
496/* Obtain '\0'-terminated copy of string in payload area.
497 */
498char *
499parser_simpleString_get(struct parser_context *ctx)
500{
501 if (!ctx->byte_stream)
502 return NULL;
503 return ctx->data; /* note this IS '\0'-terminated, because of
504 * the num of bytes we alloc+clear in
505 * parser_init_byteStream() */
506}
507
508/* Obtain a copy of the buffer in the payload area.
509 */
510void *parser_byte_stream_get(struct parser_context *ctx, unsigned long *nbytes)
511{
512 if (!ctx->byte_stream)
513 return NULL;
514 if (nbytes)
515 *nbytes = ctx->param_bytes;
516 return (void *)ctx->data;
517}
518
519uuid_le
520parser_id_get(struct parser_context *ctx)
521{
522 struct spar_controlvm_parameters_header *phdr = NULL;
523
524 if (ctx == NULL)
525 return NULL_UUID_LE;
526 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
527 return phdr->id;
528}
529
530void
531parser_param_start(struct parser_context *ctx, PARSER_WHICH_STRING which_string)
532{
533 struct spar_controlvm_parameters_header *phdr = NULL;
534
535 if (ctx == NULL)
536 goto Away;
537 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
538 switch (which_string) {
539 case PARSERSTRING_INITIATOR:
540 ctx->curr = ctx->data + phdr->initiator_offset;
541 ctx->bytes_remaining = phdr->initiator_length;
542 break;
543 case PARSERSTRING_TARGET:
544 ctx->curr = ctx->data + phdr->target_offset;
545 ctx->bytes_remaining = phdr->target_length;
546 break;
547 case PARSERSTRING_CONNECTION:
548 ctx->curr = ctx->data + phdr->connection_offset;
549 ctx->bytes_remaining = phdr->connection_length;
550 break;
551 case PARSERSTRING_NAME:
552 ctx->curr = ctx->data + phdr->name_offset;
553 ctx->bytes_remaining = phdr->name_length;
554 break;
555 default:
556 break;
557 }
558
559Away:
560 return;
561}
562
563void
564parser_done(struct parser_context *ctx)
565{
566 if (!ctx)
567 return;
568 controlvm_payload_bytes_buffered -= ctx->param_bytes;
569 kfree(ctx);
570}
571
572/** Return length of string not counting trailing spaces. */
573static int
574string_length_no_trail(char *s, int len)
575{
576 int i = len - 1;
577
578 while (i >= 0) {
579 if (!isspace(s[i]))
580 return i + 1;
581 i--;
582 }
583 return 0;
584}
585
586/** Grab the next name and value out of the parameter buffer.
587 * The entire parameter buffer looks like this:
588 * <name>=<value>\0
589 * <name>=<value>\0
590 * ...
591 * \0
592 * If successful, the next <name> value is returned within the supplied
593 * <nam> buffer (the value is always upper-cased), and the corresponding
594 * <value> is returned within a kmalloc()ed buffer, whose pointer is
595 * provided as the return value of this function.
596 * (The total number of bytes allocated is strlen(<value>)+1.)
597 *
598 * NULL is returned to indicate failure, which can occur for several reasons:
599 * - all <name>=<value> pairs have already been processed
600 * - bad parameter
601 * - parameter buffer ends prematurely (couldn't find an '=' or '\0' within
602 * the confines of the parameter buffer)
603 * - the <nam> buffer is not large enough to hold the <name> of the next
604 * parameter
605 */
606void *
607parser_param_get(struct parser_context *ctx, char *nam, int namesize)
608{
609 u8 *pscan, *pnam = nam;
610 unsigned long nscan;
611 int value_length = -1, orig_value_length = -1;
612 void *value = NULL;
613 int i;
614 int closing_quote = 0;
615
616 if (!ctx)
617 return NULL;
618 pscan = ctx->curr;
619 nscan = ctx->bytes_remaining;
620 if (nscan == 0)
621 return NULL;
622 if (*pscan == '\0')
623 /* This is the normal return point after you have processed
624 * all of the <name>=<value> pairs in a syntactically-valid
625 * parameter buffer.
626 */
627 return NULL;
628
629 /* skip whitespace */
630 while (isspace(*pscan)) {
631 pscan++;
632 nscan--;
633 if (nscan == 0)
634 return NULL;
635 }
636
637 while (*pscan != ':') {
638 if (namesize <= 0)
639 return NULL;
640 *pnam = toupper(*pscan);
641 pnam++;
642 namesize--;
643 pscan++;
644 nscan--;
645 if (nscan == 0)
646 return NULL;
647 }
648 if (namesize <= 0)
649 return NULL;
650 *pnam = '\0';
651 nam[string_length_no_trail(nam, strlen(nam))] = '\0';
652
653 /* point to char immediately after ":" in "<name>:<value>" */
654 pscan++;
655 nscan--;
656 /* skip whitespace */
657 while (isspace(*pscan)) {
658 pscan++;
659 nscan--;
660 if (nscan == 0)
661 return NULL;
662 }
663 if (nscan == 0)
664 return NULL;
665 if (*pscan == '\'' || *pscan == '"') {
666 closing_quote = *pscan;
667 pscan++;
668 nscan--;
669 if (nscan == 0)
670 return NULL;
671 }
672
673 /* look for a separator character, terminator character, or
674 * end of data
675 */
676 for (i = 0, value_length = -1; i < nscan; i++) {
677 if (closing_quote) {
678 if (pscan[i] == '\0')
679 return NULL;
680 if (pscan[i] == closing_quote) {
681 value_length = i;
682 break;
683 }
684 } else
685 if (pscan[i] == ',' || pscan[i] == ';'
686 || pscan[i] == '\0') {
687 value_length = i;
688 break;
689 }
690 }
691 if (value_length < 0) {
692 if (closing_quote)
693 return NULL;
694 value_length = nscan;
695 }
696 orig_value_length = value_length;
697 if (closing_quote == 0)
698 value_length = string_length_no_trail(pscan, orig_value_length);
699 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
700 if (value == NULL)
701 return NULL;
702 memcpy(value, pscan, value_length);
703 ((u8 *) (value))[value_length] = '\0';
704
705 pscan += orig_value_length;
706 nscan -= orig_value_length;
707
708 /* skip past separator or closing quote */
709 if (nscan > 0) {
710 if (*pscan != '\0') {
711 pscan++;
712 nscan--;
713 }
714 }
715
716 if (closing_quote && (nscan > 0)) {
717 /* we still need to skip around the real separator if present */
718 /* first, skip whitespace */
719 while (isspace(*pscan)) {
720 pscan++;
721 nscan--;
722 if (nscan == 0)
723 break;
724 }
725 if (nscan > 0) {
726 if (*pscan == ',' || *pscan == ';') {
727 pscan++;
728 nscan--;
729 } else if (*pscan != '\0') {
730 kfree(value);
731 value = NULL;
732 return NULL;
733 }
734 }
735 }
736 ctx->curr = pscan;
737 ctx->bytes_remaining = nscan;
738 return value;
739}
740
741void *
742parser_string_get(struct parser_context *ctx)
743{
744 u8 *pscan;
745 unsigned long nscan;
746 int value_length = -1;
747 void *value = NULL;
748 int i;
749
750 if (!ctx)
751 return NULL;
752 pscan = ctx->curr;
753 nscan = ctx->bytes_remaining;
754 if (nscan == 0)
755 return NULL;
756 if (!pscan)
757 return NULL;
758 for (i = 0, value_length = -1; i < nscan; i++)
759 if (pscan[i] == '\0') {
760 value_length = i;
761 break;
762 }
763 if (value_length < 0) /* '\0' was not included in the length */
764 value_length = nscan;
765 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
766 if (value == NULL)
767 return NULL;
768 if (value_length > 0)
769 memcpy(value, pscan, value_length);
770 ((u8 *) (value))[value_length] = '\0';
771 return value;
772}
773
774
d746cb55
VB
775static ssize_t toolaction_show(struct device *dev,
776 struct device_attribute *attr,
777 char *buf)
19f6634f 778{
01f4d85a 779 u8 tool_action;
19f6634f 780
c3d9a224 781 visorchannel_read(controlvm_channel,
d19642f6 782 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 783 tool_action), &tool_action, sizeof(u8));
01f4d85a 784 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
785}
786
d746cb55
VB
787static ssize_t toolaction_store(struct device *dev,
788 struct device_attribute *attr,
789 const char *buf, size_t count)
19f6634f 790{
01f4d85a 791 u8 tool_action;
66e24b76 792 int ret;
19f6634f 793
ebec8967 794 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
795 return -EINVAL;
796
c3d9a224 797 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
798 offsetof(struct spar_controlvm_channel_protocol,
799 tool_action),
01f4d85a 800 &tool_action, sizeof(u8));
66e24b76
BR
801
802 if (ret)
803 return ret;
e22a4a0f 804 return count;
19f6634f
BR
805}
806
d746cb55
VB
807static ssize_t boottotool_show(struct device *dev,
808 struct device_attribute *attr,
809 char *buf)
54b31229 810{
365522d9 811 struct efi_spar_indication efi_spar_indication;
54b31229 812
c3d9a224 813 visorchannel_read(controlvm_channel,
8e76e695
BR
814 offsetof(struct spar_controlvm_channel_protocol,
815 efi_spar_ind), &efi_spar_indication,
816 sizeof(struct efi_spar_indication));
54b31229 817 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 818 efi_spar_indication.boot_to_tool);
54b31229
BR
819}
820
d746cb55
VB
821static ssize_t boottotool_store(struct device *dev,
822 struct device_attribute *attr,
823 const char *buf, size_t count)
54b31229 824{
66e24b76 825 int val, ret;
365522d9 826 struct efi_spar_indication efi_spar_indication;
54b31229 827
ebec8967 828 if (kstrtoint(buf, 10, &val))
66e24b76
BR
829 return -EINVAL;
830
365522d9 831 efi_spar_indication.boot_to_tool = val;
c3d9a224 832 ret = visorchannel_write(controlvm_channel,
d19642f6 833 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
834 efi_spar_ind), &(efi_spar_indication),
835 sizeof(struct efi_spar_indication));
66e24b76
BR
836
837 if (ret)
838 return ret;
e22a4a0f 839 return count;
54b31229 840}
422af17c
BR
841
842static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 843 char *buf)
422af17c
BR
844{
845 u32 error;
846
8e76e695
BR
847 visorchannel_read(controlvm_channel,
848 offsetof(struct spar_controlvm_channel_protocol,
849 installation_error),
850 &error, sizeof(u32));
422af17c
BR
851 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
852}
853
854static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 855 const char *buf, size_t count)
422af17c
BR
856{
857 u32 error;
66e24b76 858 int ret;
422af17c 859
ebec8967 860 if (kstrtou32(buf, 10, &error))
66e24b76
BR
861 return -EINVAL;
862
c3d9a224 863 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
864 offsetof(struct spar_controlvm_channel_protocol,
865 installation_error),
866 &error, sizeof(u32));
66e24b76
BR
867 if (ret)
868 return ret;
e22a4a0f 869 return count;
422af17c
BR
870}
871
872static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 873 char *buf)
422af17c 874{
10dbf0e3 875 u32 text_id;
422af17c 876
8e76e695
BR
877 visorchannel_read(controlvm_channel,
878 offsetof(struct spar_controlvm_channel_protocol,
879 installation_text_id),
880 &text_id, sizeof(u32));
10dbf0e3 881 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
882}
883
884static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 885 const char *buf, size_t count)
422af17c 886{
10dbf0e3 887 u32 text_id;
66e24b76 888 int ret;
422af17c 889
ebec8967 890 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
891 return -EINVAL;
892
c3d9a224 893 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
894 offsetof(struct spar_controlvm_channel_protocol,
895 installation_text_id),
896 &text_id, sizeof(u32));
66e24b76
BR
897 if (ret)
898 return ret;
e22a4a0f 899 return count;
422af17c
BR
900}
901
422af17c 902static ssize_t remaining_steps_show(struct device *dev,
8e76e695 903 struct device_attribute *attr, char *buf)
422af17c 904{
ee8da290 905 u16 remaining_steps;
422af17c 906
c3d9a224 907 visorchannel_read(controlvm_channel,
8e76e695
BR
908 offsetof(struct spar_controlvm_channel_protocol,
909 installation_remaining_steps),
910 &remaining_steps, sizeof(u16));
ee8da290 911 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
912}
913
914static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
915 struct device_attribute *attr,
916 const char *buf, size_t count)
422af17c 917{
ee8da290 918 u16 remaining_steps;
66e24b76 919 int ret;
422af17c 920
ebec8967 921 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
922 return -EINVAL;
923
c3d9a224 924 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
925 offsetof(struct spar_controlvm_channel_protocol,
926 installation_remaining_steps),
927 &remaining_steps, sizeof(u16));
66e24b76
BR
928 if (ret)
929 return ret;
e22a4a0f 930 return count;
422af17c
BR
931}
932
12e364b9 933static void
9b989a98 934bus_info_clear(void *v)
12e364b9 935{
bbd4be30 936 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
12e364b9 937
12e364b9 938 kfree(p->name);
12e364b9 939 kfree(p->description);
33192fa1 940 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
941}
942
943static void
9b989a98 944dev_info_clear(void *v)
12e364b9 945{
246e0cd0 946 struct visorchipset_device_info *p =
bbd4be30 947 (struct visorchipset_device_info *) v;
26eb2c0c 948
246e0cd0 949 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
950}
951
4f66520b
JS
952static struct visorchipset_bus_info *
953bus_find(struct list_head *list, u32 bus_no)
954{
955 struct visorchipset_bus_info *p;
956
957 list_for_each_entry(p, list, entry) {
958 if (p->bus_no == bus_no)
959 return p;
960 }
961
962 return NULL;
963}
964
d480f6a2
JS
965static struct visorchipset_device_info *
966device_find(struct list_head *list, u32 bus_no, u32 dev_no)
967{
968 struct visorchipset_device_info *p;
969
970 list_for_each_entry(p, list, entry) {
971 if (p->bus_no == bus_no && p->dev_no == dev_no)
972 return p;
973 }
974
975 return NULL;
976}
977
28723521
JS
978static void busdevices_del(struct list_head *list, u32 bus_no)
979{
980 struct visorchipset_device_info *p, *tmp;
981
982 list_for_each_entry_safe(p, tmp, list, entry) {
983 if (p->bus_no == bus_no) {
984 list_del(&p->entry);
985 kfree(p);
986 }
987 }
988}
989
c242233e 990static u8
12e364b9
KC
991check_chipset_events(void)
992{
993 int i;
c242233e 994 u8 send_msg = 1;
12e364b9
KC
995 /* Check events to determine if response should be sent */
996 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
997 send_msg &= chipset_events[i];
998 return send_msg;
999}
1000
1001static void
1002clear_chipset_events(void)
1003{
1004 int i;
1005 /* Clear chipset_events */
1006 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
1007 chipset_events[i] = 0;
1008}
1009
1010void
fe90d892
BR
1011visorchipset_register_busdev_server(
1012 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 1013 struct visorchipset_busdev_responders *responders,
1e7a59c1 1014 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 1015{
8f1947ac 1016 down(&notifier_lock);
38f736e9 1017 if (!notifiers) {
6fe345af
BR
1018 memset(&busdev_server_notifiers, 0,
1019 sizeof(busdev_server_notifiers));
12e364b9
KC
1020 serverregistered = 0; /* clear flag */
1021 } else {
6fe345af 1022 busdev_server_notifiers = *notifiers;
12e364b9
KC
1023 serverregistered = 1; /* set flag */
1024 }
1025 if (responders)
8e3fedd6 1026 *responders = busdev_responders;
1e7a59c1
BR
1027 if (driver_info)
1028 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 1029 VERSION, NULL);
12e364b9 1030
8f1947ac 1031 up(&notifier_lock);
12e364b9
KC
1032}
1033EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
1034
1035void
fe90d892
BR
1036visorchipset_register_busdev_client(
1037 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 1038 struct visorchipset_busdev_responders *responders,
43fce019 1039 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 1040{
8f1947ac 1041 down(&notifier_lock);
38f736e9 1042 if (!notifiers) {
6fe345af
BR
1043 memset(&busdev_client_notifiers, 0,
1044 sizeof(busdev_client_notifiers));
12e364b9
KC
1045 clientregistered = 0; /* clear flag */
1046 } else {
6fe345af 1047 busdev_client_notifiers = *notifiers;
12e364b9
KC
1048 clientregistered = 1; /* set flag */
1049 }
1050 if (responders)
8e3fedd6 1051 *responders = busdev_responders;
43fce019
BR
1052 if (driver_info)
1053 bus_device_info_init(driver_info, "chipset(bolts)",
1054 "visorchipset", VERSION, NULL);
8f1947ac 1055 up(&notifier_lock);
12e364b9
KC
1056}
1057EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
1058
1059static void
1060cleanup_controlvm_structures(void)
1061{
33192fa1 1062 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 1063 struct visorchipset_device_info *di, *tmp_di;
12e364b9 1064
1390b88c 1065 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
9b989a98 1066 bus_info_clear(bi);
12e364b9
KC
1067 list_del(&bi->entry);
1068 kfree(bi);
1069 }
1070
1390b88c 1071 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 1072 dev_info_clear(di);
12e364b9
KC
1073 list_del(&di->entry);
1074 kfree(di);
1075 }
1076}
1077
1078static void
3ab47701 1079chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
1080{
1081 static int chipset_inited;
b9b141e8 1082 enum ultra_chipset_feature features = 0;
12e364b9
KC
1083 int rc = CONTROLVM_RESP_SUCCESS;
1084
1085 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1086 if (chipset_inited) {
22ad57ba 1087 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 1088 goto cleanup;
12e364b9
KC
1089 }
1090 chipset_inited = 1;
1091 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
1092
1093 /* Set features to indicate we support parahotplug (if Command
1094 * also supports it). */
1095 features =
2ea5117b 1096 inmsg->cmd.init_chipset.
12e364b9
KC
1097 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
1098
1099 /* Set the "reply" bit so Command knows this is a
1100 * features-aware driver. */
1101 features |= ULTRA_CHIPSET_FEATURE_REPLY;
1102
e3199b2e 1103cleanup:
12e364b9
KC
1104 if (rc < 0)
1105 cleanup_controlvm_structures();
98d7b594 1106 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
1107 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
1108}
1109
1110static void
3ab47701 1111controlvm_init_response(struct controlvm_message *msg,
b3168c70 1112 struct controlvm_message_header *msg_hdr, int response)
12e364b9 1113{
3ab47701 1114 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 1115 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
1116 msg->hdr.payload_bytes = 0;
1117 msg->hdr.payload_vm_offset = 0;
1118 msg->hdr.payload_max_bytes = 0;
12e364b9 1119 if (response < 0) {
98d7b594
BR
1120 msg->hdr.flags.failed = 1;
1121 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
1122 }
1123}
1124
1125static void
b3168c70 1126controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 1127{
3ab47701 1128 struct controlvm_message outmsg;
26eb2c0c 1129
b3168c70 1130 controlvm_init_response(&outmsg, msg_hdr, response);
12e364b9
KC
1131 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
1132 * back the deviceChangeState structure in the packet. */
b3168c70 1133 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
0639ba67
BR
1134 g_devicechangestate_packet.device_change_state.bus_no ==
1135 g_diagpool_bus_no &&
1136 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 1137 g_diagpool_dev_no)
4f44b72d 1138 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 1139 if (outmsg.hdr.flags.test_message == 1)
12e364b9 1140 return;
2098dbd1 1141
c3d9a224 1142 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 1143 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
1144 return;
1145 }
1146}
1147
1148static void
b3168c70 1149controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 1150 int response,
b9b141e8 1151 enum ultra_chipset_feature features)
12e364b9 1152{
3ab47701 1153 struct controlvm_message outmsg;
26eb2c0c 1154
b3168c70 1155 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 1156 outmsg.cmd.init_chipset.features = features;
c3d9a224 1157 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 1158 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
1159 return;
1160 }
1161}
1162
98d7b594 1163static void controlvm_respond_physdev_changestate(
b3168c70 1164 struct controlvm_message_header *msg_hdr, int response,
98d7b594 1165 struct spar_segment_state state)
12e364b9 1166{
3ab47701 1167 struct controlvm_message outmsg;
26eb2c0c 1168
b3168c70 1169 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
1170 outmsg.cmd.device_change_state.state = state;
1171 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 1172 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 1173 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
1174 return;
1175 }
1176}
1177
1178void
2c683cde
BR
1179visorchipset_save_message(struct controlvm_message *msg,
1180 enum crash_obj_type type)
12e364b9 1181{
4577225d
BR
1182 u32 crash_msg_offset;
1183 u16 crash_msg_count;
12e364b9
KC
1184
1185 /* get saved message count */
c3d9a224 1186 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1187 offsetof(struct spar_controlvm_channel_protocol,
1188 saved_crash_message_count),
4577225d 1189 &crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1190 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1191 POSTCODE_SEVERITY_ERR);
1192 return;
1193 }
1194
4577225d 1195 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 1196 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
4577225d 1197 crash_msg_count,
12e364b9
KC
1198 POSTCODE_SEVERITY_ERR);
1199 return;
1200 }
1201
1202 /* get saved crash message offset */
c3d9a224 1203 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1204 offsetof(struct spar_controlvm_channel_protocol,
1205 saved_crash_message_offset),
4577225d 1206 &crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
1207 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1208 POSTCODE_SEVERITY_ERR);
1209 return;
1210 }
1211
2c683cde 1212 if (type == CRASH_BUS) {
c3d9a224 1213 if (visorchannel_write(controlvm_channel,
4577225d 1214 crash_msg_offset,
3ab47701
BR
1215 msg,
1216 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1217 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
1218 POSTCODE_SEVERITY_ERR);
1219 return;
1220 }
1221 } else {
c3d9a224 1222 if (visorchannel_write(controlvm_channel,
4577225d 1223 crash_msg_offset +
3ab47701
BR
1224 sizeof(struct controlvm_message), msg,
1225 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1226 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
1227 POSTCODE_SEVERITY_ERR);
1228 return;
1229 }
1230 }
1231}
1232EXPORT_SYMBOL_GPL(visorchipset_save_message);
1233
1234static void
52063eca 1235bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
12e364b9 1236{
e82ba62e 1237 struct visorchipset_bus_info *p;
f4c11551 1238 bool need_clear = false;
12e364b9 1239
4f66520b 1240 p = bus_find(&bus_info_list, bus_no);
0aca7844 1241 if (!p)
12e364b9 1242 return;
0aca7844 1243
12e364b9 1244 if (response < 0) {
fbb31f48 1245 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
12e364b9
KC
1246 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
1247 /* undo the row we just created... */
28723521 1248 busdevices_del(&dev_info_list, bus_no);
12e364b9 1249 } else {
fbb31f48 1250 if (cmd_id == CONTROLVM_BUS_CREATE)
12e364b9 1251 p->state.created = 1;
fbb31f48 1252 if (cmd_id == CONTROLVM_BUS_DESTROY)
f4c11551 1253 need_clear = true;
12e364b9
KC
1254 }
1255
0aca7844 1256 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 1257 return; /* no controlvm response needed */
6b59b31d 1258 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 1259 return;
33192fa1
BR
1260 controlvm_respond(&p->pending_msg_hdr, response);
1261 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 1262 if (need_clear) {
9b989a98 1263 bus_info_clear(p);
28723521 1264 busdevices_del(&dev_info_list, bus_no);
12e364b9
KC
1265 }
1266}
1267
1268static void
fbb31f48 1269device_changestate_responder(enum controlvm_id cmd_id,
52063eca 1270 u32 bus_no, u32 dev_no, int response,
fbb31f48 1271 struct spar_segment_state response_state)
12e364b9 1272{
e82ba62e 1273 struct visorchipset_device_info *p;
3ab47701 1274 struct controlvm_message outmsg;
12e364b9 1275
d480f6a2 1276 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 1277 if (!p)
12e364b9 1278 return;
0aca7844 1279 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 1280 return; /* no controlvm response needed */
fbb31f48 1281 if (p->pending_msg_hdr.id != cmd_id)
12e364b9 1282 return;
12e364b9 1283
246e0cd0 1284 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 1285
fbb31f48
BR
1286 outmsg.cmd.device_change_state.bus_no = bus_no;
1287 outmsg.cmd.device_change_state.dev_no = dev_no;
1288 outmsg.cmd.device_change_state.state = response_state;
12e364b9 1289
c3d9a224 1290 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 1291 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 1292 return;
12e364b9 1293
246e0cd0 1294 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
1295}
1296
1297static void
52063eca 1298device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
12e364b9 1299{
e82ba62e 1300 struct visorchipset_device_info *p;
f4c11551 1301 bool need_clear = false;
12e364b9 1302
d480f6a2 1303 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 1304 if (!p)
12e364b9 1305 return;
12e364b9 1306 if (response >= 0) {
fbb31f48 1307 if (cmd_id == CONTROLVM_DEVICE_CREATE)
12e364b9 1308 p->state.created = 1;
fbb31f48 1309 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
f4c11551 1310 need_clear = true;
12e364b9
KC
1311 }
1312
0aca7844 1313 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 1314 return; /* no controlvm response needed */
0aca7844 1315
6b59b31d 1316 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 1317 return;
0aca7844 1318
246e0cd0
BR
1319 controlvm_respond(&p->pending_msg_hdr, response);
1320 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 1321 if (need_clear)
9b989a98 1322 dev_info_clear(p);
12e364b9
KC
1323}
1324
1325static void
2836c6a8
BR
1326bus_epilog(u32 bus_no,
1327 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 1328 int response, bool need_response)
12e364b9 1329{
4f66520b 1330 struct visorchipset_bus_info *bus_info;
f4c11551 1331 bool notified = false;
12e364b9 1332
4f66520b 1333 bus_info = bus_find(&bus_info_list, bus_no);
12e364b9 1334
2836c6a8 1335 if (!bus_info)
12e364b9 1336 return;
0aca7844 1337
2836c6a8
BR
1338 if (need_response) {
1339 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
98d7b594 1340 sizeof(struct controlvm_message_header));
75c1f8b7 1341 } else {
2836c6a8 1342 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 1343 }
12e364b9 1344
8f1947ac 1345 down(&notifier_lock);
12e364b9
KC
1346 if (response == CONTROLVM_RESP_SUCCESS) {
1347 switch (cmd) {
1348 case CONTROLVM_BUS_CREATE:
1349 /* We can't tell from the bus_create
1350 * information which of our 2 bus flavors the
1351 * devices on this bus will ultimately end up.
1352 * FORTUNATELY, it turns out it is harmless to
1353 * send the bus_create to both of them. We can
1354 * narrow things down a little bit, though,
1355 * because we know: - BusDev_Server can handle
1356 * either server or client devices
1357 * - BusDev_Client can handle ONLY client
1358 * devices */
6fe345af
BR
1359 if (busdev_server_notifiers.bus_create) {
1360 (*busdev_server_notifiers.bus_create) (bus_no);
f4c11551 1361 notified = true;
12e364b9 1362 }
2836c6a8 1363 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
1364 busdev_client_notifiers.bus_create) {
1365 (*busdev_client_notifiers.bus_create) (bus_no);
f4c11551 1366 notified = true;
12e364b9
KC
1367 }
1368 break;
1369 case CONTROLVM_BUS_DESTROY:
6fe345af
BR
1370 if (busdev_server_notifiers.bus_destroy) {
1371 (*busdev_server_notifiers.bus_destroy) (bus_no);
f4c11551 1372 notified = true;
12e364b9 1373 }
2836c6a8 1374 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
1375 busdev_client_notifiers.bus_destroy) {
1376 (*busdev_client_notifiers.bus_destroy) (bus_no);
f4c11551 1377 notified = true;
12e364b9
KC
1378 }
1379 break;
1380 }
1381 }
1382 if (notified)
1383 /* The callback function just called above is responsible
929aa8ae 1384 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1385 * function, which will call bus_responder()
1386 */
1387 ;
1388 else
2836c6a8 1389 bus_responder(cmd, bus_no, response);
8f1947ac 1390 up(&notifier_lock);
12e364b9
KC
1391}
1392
1393static void
2836c6a8
BR
1394device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
1395 struct controlvm_message_header *msg_hdr, int response,
f4c11551 1396 bool need_response, bool for_visorbus)
12e364b9 1397{
e82ba62e 1398 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 1399 bool notified = false;
12e364b9 1400
2836c6a8 1401 struct visorchipset_device_info *dev_info =
d480f6a2 1402 device_find(&dev_info_list, bus_no, dev_no);
12e364b9
KC
1403 char *envp[] = {
1404 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1405 NULL
1406 };
1407
2836c6a8 1408 if (!dev_info)
12e364b9 1409 return;
0aca7844 1410
12e364b9 1411 if (for_visorbus)
6fe345af 1412 notifiers = &busdev_server_notifiers;
12e364b9 1413 else
6fe345af 1414 notifiers = &busdev_client_notifiers;
2836c6a8
BR
1415 if (need_response) {
1416 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
98d7b594 1417 sizeof(struct controlvm_message_header));
75c1f8b7 1418 } else {
2836c6a8 1419 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 1420 }
12e364b9 1421
8f1947ac 1422 down(&notifier_lock);
12e364b9
KC
1423 if (response >= 0) {
1424 switch (cmd) {
1425 case CONTROLVM_DEVICE_CREATE:
1426 if (notifiers->device_create) {
2836c6a8 1427 (*notifiers->device_create) (bus_no, dev_no);
f4c11551 1428 notified = true;
12e364b9
KC
1429 }
1430 break;
1431 case CONTROLVM_DEVICE_CHANGESTATE:
1432 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1433 if (state.alive == segment_state_running.alive &&
1434 state.operating ==
1435 segment_state_running.operating) {
12e364b9 1436 if (notifiers->device_resume) {
2836c6a8
BR
1437 (*notifiers->device_resume) (bus_no,
1438 dev_no);
f4c11551 1439 notified = true;
12e364b9
KC
1440 }
1441 }
1442 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1443 else if (state.alive == segment_state_standby.alive &&
3f833b54 1444 state.operating ==
bd0d2dcc 1445 segment_state_standby.operating) {
12e364b9
KC
1446 /* technically this is standby case
1447 * where server is lost
1448 */
1449 if (notifiers->device_pause) {
2836c6a8
BR
1450 (*notifiers->device_pause) (bus_no,
1451 dev_no);
f4c11551 1452 notified = true;
12e364b9 1453 }
bd0d2dcc 1454 } else if (state.alive == segment_state_paused.alive &&
3f833b54 1455 state.operating ==
bd0d2dcc 1456 segment_state_paused.operating) {
12e364b9
KC
1457 /* this is lite pause where channel is
1458 * still valid just 'pause' of it
1459 */
2836c6a8
BR
1460 if (bus_no == g_diagpool_bus_no &&
1461 dev_no == g_diagpool_dev_no) {
12e364b9
KC
1462 /* this will trigger the
1463 * diag_shutdown.sh script in
1464 * the visorchipset hotplug */
1465 kobject_uevent_env
eb34e877 1466 (&visorchipset_platform_device.dev.
12e364b9
KC
1467 kobj, KOBJ_ONLINE, envp);
1468 }
1469 }
1470 break;
1471 case CONTROLVM_DEVICE_DESTROY:
1472 if (notifiers->device_destroy) {
2836c6a8 1473 (*notifiers->device_destroy) (bus_no, dev_no);
f4c11551 1474 notified = true;
12e364b9
KC
1475 }
1476 break;
1477 }
1478 }
1479 if (notified)
1480 /* The callback function just called above is responsible
929aa8ae 1481 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1482 * function, which will call device_responder()
1483 */
1484 ;
1485 else
2836c6a8 1486 device_responder(cmd, bus_no, dev_no, response);
8f1947ac 1487 up(&notifier_lock);
12e364b9
KC
1488}
1489
1490static void
3ab47701 1491bus_create(struct controlvm_message *inmsg)
12e364b9 1492{
2ea5117b 1493 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1494 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1495 int rc = CONTROLVM_RESP_SUCCESS;
e82ba62e 1496 struct visorchipset_bus_info *bus_info;
12e364b9 1497
4f66520b 1498 bus_info = bus_find(&bus_info_list, bus_no);
6c5fed35
BR
1499 if (bus_info && (bus_info->state.created == 1)) {
1500 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1501 POSTCODE_SEVERITY_ERR);
22ad57ba 1502 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1503 goto cleanup;
12e364b9 1504 }
6c5fed35
BR
1505 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1506 if (!bus_info) {
1507 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1508 POSTCODE_SEVERITY_ERR);
22ad57ba 1509 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1510 goto cleanup;
12e364b9
KC
1511 }
1512
6c5fed35
BR
1513 INIT_LIST_HEAD(&bus_info->entry);
1514 bus_info->bus_no = bus_no;
12e364b9 1515
6c5fed35 1516 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1517
98d7b594 1518 if (inmsg->hdr.flags.test_message == 1)
6c5fed35 1519 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1520 else
6c5fed35 1521 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1522
6c5fed35
BR
1523 bus_info->flags.server = inmsg->hdr.flags.server;
1524 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1525 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1526 bus_info->chan_info.channel_type_uuid =
9b1caee7 1527 cmd->create_bus.bus_data_type_uuid;
6c5fed35 1528 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9 1529
6c5fed35 1530 list_add(&bus_info->entry, &bus_info_list);
12e364b9 1531
6c5fed35 1532 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1533
6c5fed35
BR
1534cleanup:
1535 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1536 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1537}
1538
1539static void
3ab47701 1540bus_destroy(struct controlvm_message *inmsg)
12e364b9 1541{
2ea5117b 1542 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1543 u32 bus_no = cmd->destroy_bus.bus_no;
dff54cd6 1544 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1545 int rc = CONTROLVM_RESP_SUCCESS;
1546
4f66520b 1547 bus_info = bus_find(&bus_info_list, bus_no);
dff54cd6 1548 if (!bus_info)
22ad57ba 1549 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1550 else if (bus_info->state.created == 0)
22ad57ba 1551 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1552
dff54cd6 1553 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1554 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1555}
1556
1557static void
317d9614
BR
1558bus_configure(struct controlvm_message *inmsg,
1559 struct parser_context *parser_ctx)
12e364b9 1560{
2ea5117b 1561 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e
JS
1562 u32 bus_no;
1563 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1564 int rc = CONTROLVM_RESP_SUCCESS;
1565 char s[99];
1566
654bada0
BR
1567 bus_no = cmd->configure_bus.bus_no;
1568 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1569 POSTCODE_SEVERITY_INFO);
12e364b9 1570
4f66520b 1571 bus_info = bus_find(&bus_info_list, bus_no);
654bada0
BR
1572 if (!bus_info) {
1573 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1574 POSTCODE_SEVERITY_ERR);
22ad57ba 1575 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1576 } else if (bus_info->state.created == 0) {
1577 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1578 POSTCODE_SEVERITY_ERR);
22ad57ba 1579 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1580 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1581 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1582 POSTCODE_SEVERITY_ERR);
22ad57ba 1583 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0
BR
1584 } else {
1585 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1586 bus_info->partition_uuid = parser_id_get(parser_ctx);
1587 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1588 bus_info->name = parser_string_get(parser_ctx);
1589
1590 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1591 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1592 POSTCODE_SEVERITY_INFO);
12e364b9 1593 }
654bada0 1594 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1595 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1596}
1597
1598static void
3ab47701 1599my_device_create(struct controlvm_message *inmsg)
12e364b9 1600{
2ea5117b 1601 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1602 u32 bus_no = cmd->create_device.bus_no;
1603 u32 dev_no = cmd->create_device.dev_no;
e82ba62e
JS
1604 struct visorchipset_device_info *dev_info;
1605 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1606 int rc = CONTROLVM_RESP_SUCCESS;
1607
d480f6a2 1608 dev_info = device_find(&dev_info_list, bus_no, dev_no);
c60c8e26
BR
1609 if (dev_info && (dev_info->state.created == 1)) {
1610 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1611 POSTCODE_SEVERITY_ERR);
22ad57ba 1612 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1613 goto cleanup;
12e364b9 1614 }
4f66520b 1615 bus_info = bus_find(&bus_info_list, bus_no);
c60c8e26
BR
1616 if (!bus_info) {
1617 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1618 POSTCODE_SEVERITY_ERR);
22ad57ba 1619 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1620 goto cleanup;
12e364b9 1621 }
c60c8e26
BR
1622 if (bus_info->state.created == 0) {
1623 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1624 POSTCODE_SEVERITY_ERR);
22ad57ba 1625 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1626 goto cleanup;
12e364b9 1627 }
c60c8e26
BR
1628 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1629 if (!dev_info) {
1630 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1631 POSTCODE_SEVERITY_ERR);
22ad57ba 1632 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1633 goto cleanup;
12e364b9 1634 }
97a84f12 1635
c60c8e26
BR
1636 INIT_LIST_HEAD(&dev_info->entry);
1637 dev_info->bus_no = bus_no;
1638 dev_info->dev_no = dev_no;
1639 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1640 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1641 POSTCODE_SEVERITY_INFO);
1642
98d7b594 1643 if (inmsg->hdr.flags.test_message == 1)
c60c8e26 1644 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1645 else
c60c8e26
BR
1646 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1647 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1648 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1649 dev_info->chan_info.channel_type_uuid =
9b1caee7 1650 cmd->create_device.data_type_uuid;
c60c8e26
BR
1651 dev_info->chan_info.intr = cmd->create_device.intr;
1652 list_add(&dev_info->entry, &dev_info_list);
1653 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1654 POSTCODE_SEVERITY_INFO);
c60c8e26 1655cleanup:
12e364b9 1656 /* get the bus and devNo for DiagPool channel */
c60c8e26
BR
1657 if (dev_info &&
1658 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1659 g_diagpool_bus_no = bus_no;
1660 g_diagpool_dev_no = dev_no;
12e364b9 1661 }
c60c8e26 1662 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1663 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
98d7b594 1664 inmsg->hdr.flags.response_expected == 1,
c60c8e26 1665 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1666}
1667
1668static void
3ab47701 1669my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1670{
2ea5117b 1671 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1672 u32 bus_no = cmd->device_change_state.bus_no;
1673 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1674 struct spar_segment_state state = cmd->device_change_state.state;
e82ba62e 1675 struct visorchipset_device_info *dev_info;
12e364b9
KC
1676 int rc = CONTROLVM_RESP_SUCCESS;
1677
d480f6a2 1678 dev_info = device_find(&dev_info_list, bus_no, dev_no);
0278a905
BR
1679 if (!dev_info) {
1680 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1681 POSTCODE_SEVERITY_ERR);
22ad57ba 1682 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1683 } else if (dev_info->state.created == 0) {
1684 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1685 POSTCODE_SEVERITY_ERR);
22ad57ba 1686 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1687 }
0278a905
BR
1688 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1689 device_epilog(bus_no, dev_no, state,
1690 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
98d7b594 1691 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1692 FOR_VISORBUS(
0278a905 1693 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1694}
1695
1696static void
3ab47701 1697my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1698{
2ea5117b 1699 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1700 u32 bus_no = cmd->destroy_device.bus_no;
1701 u32 dev_no = cmd->destroy_device.dev_no;
e82ba62e 1702 struct visorchipset_device_info *dev_info;
12e364b9
KC
1703 int rc = CONTROLVM_RESP_SUCCESS;
1704
d480f6a2 1705 dev_info = device_find(&dev_info_list, bus_no, dev_no);
61715c8b 1706 if (!dev_info)
22ad57ba 1707 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1708 else if (dev_info->state.created == 0)
22ad57ba 1709 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1710
61715c8b
BR
1711 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1712 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1713 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
98d7b594 1714 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1715 FOR_VISORBUS(
61715c8b 1716 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1717}
1718
1719/* When provided with the physical address of the controlvm channel
1720 * (phys_addr), the offset to the payload area we need to manage
1721 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1722 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1723 * for failure.
1724 */
1725static int
5fc0229a 1726initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
c1f834eb 1727 struct visor_controlvm_payload_info *info)
12e364b9 1728{
c242233e 1729 u8 __iomem *payload = NULL;
12e364b9
KC
1730 int rc = CONTROLVM_RESP_SUCCESS;
1731
38f736e9 1732 if (!info) {
22ad57ba 1733 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1734 goto cleanup;
12e364b9 1735 }
c1f834eb 1736 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1737 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1738 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1739 goto cleanup;
12e364b9
KC
1740 }
1741 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1742 if (!payload) {
22ad57ba 1743 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1744 goto cleanup;
12e364b9
KC
1745 }
1746
1747 info->offset = offset;
1748 info->bytes = bytes;
1749 info->ptr = payload;
12e364b9 1750
f118a39b 1751cleanup:
12e364b9 1752 if (rc < 0) {
f118a39b 1753 if (payload) {
12e364b9
KC
1754 iounmap(payload);
1755 payload = NULL;
1756 }
1757 }
1758 return rc;
1759}
1760
1761static void
c1f834eb 1762destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1763{
597c338f 1764 if (info->ptr) {
12e364b9
KC
1765 iounmap(info->ptr);
1766 info->ptr = NULL;
1767 }
c1f834eb 1768 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1769}
1770
1771static void
1772initialize_controlvm_payload(void)
1773{
c3d9a224 1774 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1775 u64 payload_offset = 0;
1776 u32 payload_bytes = 0;
26eb2c0c 1777
c3d9a224 1778 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1779 offsetof(struct spar_controlvm_channel_protocol,
1780 request_payload_offset),
cafefc0c 1781 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1782 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1783 POSTCODE_SEVERITY_ERR);
1784 return;
1785 }
c3d9a224 1786 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1787 offsetof(struct spar_controlvm_channel_protocol,
1788 request_payload_bytes),
cafefc0c 1789 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1790 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1791 POSTCODE_SEVERITY_ERR);
1792 return;
1793 }
1794 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1795 payload_offset, payload_bytes,
84982fbf 1796 &controlvm_payload_info);
12e364b9
KC
1797}
1798
1799/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1800 * Returns CONTROLVM_RESP_xxx code.
1801 */
1802int
1803visorchipset_chipset_ready(void)
1804{
eb34e877 1805 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1806 return CONTROLVM_RESP_SUCCESS;
1807}
1808EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1809
1810int
1811visorchipset_chipset_selftest(void)
1812{
1813 char env_selftest[20];
1814 char *envp[] = { env_selftest, NULL };
26eb2c0c 1815
12e364b9 1816 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1817 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1818 envp);
1819 return CONTROLVM_RESP_SUCCESS;
1820}
1821EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1822
1823/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1824 * Returns CONTROLVM_RESP_xxx code.
1825 */
1826int
1827visorchipset_chipset_notready(void)
1828{
eb34e877 1829 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1830 return CONTROLVM_RESP_SUCCESS;
1831}
1832EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1833
1834static void
77a0449d 1835chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1836{
1837 int rc = visorchipset_chipset_ready();
26eb2c0c 1838
12e364b9
KC
1839 if (rc != CONTROLVM_RESP_SUCCESS)
1840 rc = -rc;
77a0449d
BR
1841 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1842 controlvm_respond(msg_hdr, rc);
1843 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1844 /* Send CHIPSET_READY response when all modules have been loaded
1845 * and disks mounted for the partition
1846 */
77a0449d 1847 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1848 }
1849}
1850
1851static void
77a0449d 1852chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1853{
1854 int rc = visorchipset_chipset_selftest();
26eb2c0c 1855
12e364b9
KC
1856 if (rc != CONTROLVM_RESP_SUCCESS)
1857 rc = -rc;
77a0449d
BR
1858 if (msg_hdr->flags.response_expected)
1859 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1860}
1861
1862static void
77a0449d 1863chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1864{
1865 int rc = visorchipset_chipset_notready();
26eb2c0c 1866
12e364b9
KC
1867 if (rc != CONTROLVM_RESP_SUCCESS)
1868 rc = -rc;
77a0449d
BR
1869 if (msg_hdr->flags.response_expected)
1870 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1871}
1872
1873/* This is your "one-stop" shop for grabbing the next message from the
1874 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1875 */
f4c11551 1876static bool
3ab47701 1877read_controlvm_event(struct controlvm_message *msg)
12e364b9 1878{
c3d9a224 1879 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1880 CONTROLVM_QUEUE_EVENT, msg)) {
1881 /* got a message */
0aca7844 1882 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1883 return false;
1884 return true;
12e364b9 1885 }
f4c11551 1886 return false;
12e364b9
KC
1887}
1888
1889/*
1890 * The general parahotplug flow works as follows. The visorchipset
1891 * driver receives a DEVICE_CHANGESTATE message from Command
1892 * specifying a physical device to enable or disable. The CONTROLVM
1893 * message handler calls parahotplug_process_message, which then adds
1894 * the message to a global list and kicks off a udev event which
1895 * causes a user level script to enable or disable the specified
1896 * device. The udev script then writes to
1897 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1898 * to get called, at which point the appropriate CONTROLVM message is
1899 * retrieved from the list and responded to.
1900 */
1901
1902#define PARAHOTPLUG_TIMEOUT_MS 2000
1903
1904/*
1905 * Generate unique int to match an outstanding CONTROLVM message with a
1906 * udev script /proc response
1907 */
1908static int
1909parahotplug_next_id(void)
1910{
1911 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1912
12e364b9
KC
1913 return atomic_inc_return(&id);
1914}
1915
1916/*
1917 * Returns the time (in jiffies) when a CONTROLVM message on the list
1918 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1919 */
1920static unsigned long
1921parahotplug_next_expiration(void)
1922{
2cc1a1b3 1923 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1924}
1925
1926/*
1927 * Create a parahotplug_request, which is basically a wrapper for a
1928 * CONTROLVM_MESSAGE that we can stick on a list
1929 */
1930static struct parahotplug_request *
3ab47701 1931parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1932{
ea0dcfcf
QL
1933 struct parahotplug_request *req;
1934
6a55e3c3 1935 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1936 if (!req)
12e364b9
KC
1937 return NULL;
1938
1939 req->id = parahotplug_next_id();
1940 req->expiration = parahotplug_next_expiration();
1941 req->msg = *msg;
1942
1943 return req;
1944}
1945
1946/*
1947 * Free a parahotplug_request.
1948 */
1949static void
1950parahotplug_request_destroy(struct parahotplug_request *req)
1951{
1952 kfree(req);
1953}
1954
1955/*
1956 * Cause uevent to run the user level script to do the disable/enable
1957 * specified in (the CONTROLVM message in) the specified
1958 * parahotplug_request
1959 */
1960static void
1961parahotplug_request_kickoff(struct parahotplug_request *req)
1962{
2ea5117b 1963 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1964 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1965 env_func[40];
1966 char *envp[] = {
1967 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1968 };
1969
1970 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1971 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1972 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1973 cmd->device_change_state.state.active);
12e364b9 1974 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1975 cmd->device_change_state.bus_no);
12e364b9 1976 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1977 cmd->device_change_state.dev_no >> 3);
12e364b9 1978 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1979 cmd->device_change_state.dev_no & 0x7);
12e364b9 1980
eb34e877 1981 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1982 envp);
1983}
1984
1985/*
1986 * Remove any request from the list that's been on there too long and
1987 * respond with an error.
1988 */
1989static void
1990parahotplug_process_list(void)
1991{
e82ba62e
JS
1992 struct list_head *pos;
1993 struct list_head *tmp;
12e364b9 1994
ddf5de53 1995 spin_lock(&parahotplug_request_list_lock);
12e364b9 1996
ddf5de53 1997 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1998 struct parahotplug_request *req =
1999 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
2000
2001 if (!time_after_eq(jiffies, req->expiration))
2002 continue;
2003
2004 list_del(pos);
2005 if (req->msg.hdr.flags.response_expected)
2006 controlvm_respond_physdev_changestate(
2007 &req->msg.hdr,
2008 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
2009 req->msg.cmd.device_change_state.state);
2010 parahotplug_request_destroy(req);
12e364b9
KC
2011 }
2012
ddf5de53 2013 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
2014}
2015
2016/*
2017 * Called from the /proc handler, which means the user script has
2018 * finished the enable/disable. Find the matching identifier, and
2019 * respond to the CONTROLVM message with success.
2020 */
2021static int
b06bdf7d 2022parahotplug_request_complete(int id, u16 active)
12e364b9 2023{
e82ba62e
JS
2024 struct list_head *pos;
2025 struct list_head *tmp;
12e364b9 2026
ddf5de53 2027 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
2028
2029 /* Look for a request matching "id". */
ddf5de53 2030 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
2031 struct parahotplug_request *req =
2032 list_entry(pos, struct parahotplug_request, list);
2033 if (req->id == id) {
2034 /* Found a match. Remove it from the list and
2035 * respond.
2036 */
2037 list_del(pos);
ddf5de53 2038 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 2039 req->msg.cmd.device_change_state.state.active = active;
98d7b594 2040 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
2041 controlvm_respond_physdev_changestate(
2042 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 2043 req->msg.cmd.device_change_state.state);
12e364b9
KC
2044 parahotplug_request_destroy(req);
2045 return 0;
2046 }
2047 }
2048
ddf5de53 2049 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
2050 return -1;
2051}
2052
2053/*
2054 * Enables or disables a PCI device by kicking off a udev script
2055 */
bd5b9b32 2056static void
3ab47701 2057parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
2058{
2059 struct parahotplug_request *req;
2060
2061 req = parahotplug_request_create(inmsg);
2062
38f736e9 2063 if (!req)
12e364b9 2064 return;
12e364b9 2065
2ea5117b 2066 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
2067 /* For enable messages, just respond with success
2068 * right away. This is a bit of a hack, but there are
2069 * issues with the early enable messages we get (with
2070 * either the udev script not detecting that the device
2071 * is up, or not getting called at all). Fortunately
2072 * the messages that get lost don't matter anyway, as
2073 * devices are automatically enabled at
2074 * initialization.
2075 */
2076 parahotplug_request_kickoff(req);
2077 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
2078 CONTROLVM_RESP_SUCCESS,
2079 inmsg->cmd.device_change_state.state);
12e364b9
KC
2080 parahotplug_request_destroy(req);
2081 } else {
2082 /* For disable messages, add the request to the
2083 * request list before kicking off the udev script. It
2084 * won't get responded to until the script has
2085 * indicated it's done.
2086 */
ddf5de53
BR
2087 spin_lock(&parahotplug_request_list_lock);
2088 list_add_tail(&req->list, &parahotplug_request_list);
2089 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
2090
2091 parahotplug_request_kickoff(req);
2092 }
2093}
2094
12e364b9
KC
2095/* Process a controlvm message.
2096 * Return result:
f4c11551 2097 * false - this function will return FALSE only in the case where the
12e364b9
KC
2098 * controlvm message was NOT processed, but processing must be
2099 * retried before reading the next controlvm message; a
2100 * scenario where this can occur is when we need to throttle
2101 * the allocation of memory in which to copy out controlvm
2102 * payload data
f4c11551 2103 * true - processing of the controlvm message completed,
12e364b9
KC
2104 * either successfully or with an error.
2105 */
f4c11551 2106static bool
3ab47701 2107handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
12e364b9 2108{
2ea5117b 2109 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
2110 u64 parm_addr;
2111 u32 parm_bytes;
317d9614 2112 struct parser_context *parser_ctx = NULL;
e82ba62e 2113 bool local_addr;
3ab47701 2114 struct controlvm_message ackmsg;
12e364b9
KC
2115
2116 /* create parsing context if necessary */
818352a8 2117 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 2118 if (channel_addr == 0)
f4c11551 2119 return true;
818352a8
BR
2120 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
2121 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
2122
2123 /* Parameter and channel addresses within test messages actually lie
2124 * within our OS-controlled memory. We need to know that, because it
2125 * makes a difference in how we compute the virtual address.
2126 */
ebec8967 2127 if (parm_addr && parm_bytes) {
f4c11551 2128 bool retry = false;
26eb2c0c 2129
12e364b9 2130 parser_ctx =
818352a8
BR
2131 parser_init_byte_stream(parm_addr, parm_bytes,
2132 local_addr, &retry);
1b08872e 2133 if (!parser_ctx && retry)
f4c11551 2134 return false;
12e364b9
KC
2135 }
2136
818352a8 2137 if (!local_addr) {
12e364b9
KC
2138 controlvm_init_response(&ackmsg, &inmsg.hdr,
2139 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
2140 if (controlvm_channel)
2141 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
2142 CONTROLVM_QUEUE_ACK,
2143 &ackmsg);
12e364b9 2144 }
98d7b594 2145 switch (inmsg.hdr.id) {
12e364b9 2146 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
2147 chipset_init(&inmsg);
2148 break;
2149 case CONTROLVM_BUS_CREATE:
12e364b9
KC
2150 bus_create(&inmsg);
2151 break;
2152 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
2153 bus_destroy(&inmsg);
2154 break;
2155 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
2156 bus_configure(&inmsg, parser_ctx);
2157 break;
2158 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
2159 my_device_create(&inmsg);
2160 break;
2161 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 2162 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
2163 parahotplug_process_message(&inmsg);
2164 } else {
12e364b9
KC
2165 /* save the hdr and cmd structures for later use */
2166 /* when sending back the response to Command */
2167 my_device_changestate(&inmsg);
4f44b72d 2168 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
2169 break;
2170 }
2171 break;
2172 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
2173 my_device_destroy(&inmsg);
2174 break;
2175 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 2176 /* no op for now, just send a respond that we passed */
98d7b594 2177 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
2178 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
2179 break;
2180 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
2181 chipset_ready(&inmsg.hdr);
2182 break;
2183 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
2184 chipset_selftest(&inmsg.hdr);
2185 break;
2186 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
2187 chipset_notready(&inmsg.hdr);
2188 break;
2189 default:
98d7b594 2190 if (inmsg.hdr.flags.response_expected)
12e364b9 2191 controlvm_respond(&inmsg.hdr,
818352a8 2192 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
2193 break;
2194 }
2195
38f736e9 2196 if (parser_ctx) {
12e364b9
KC
2197 parser_done(parser_ctx);
2198 parser_ctx = NULL;
2199 }
f4c11551 2200 return true;
12e364b9
KC
2201}
2202
d746cb55 2203static HOSTADDRESS controlvm_get_channel_address(void)
524b0b63 2204{
5fc0229a 2205 u64 addr = 0;
b3c55b13 2206 u32 size = 0;
524b0b63 2207
0aca7844 2208 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 2209 return 0;
0aca7844 2210
524b0b63
BR
2211 return addr;
2212}
2213
12e364b9
KC
2214static void
2215controlvm_periodic_work(struct work_struct *work)
2216{
3ab47701 2217 struct controlvm_message inmsg;
f4c11551
JS
2218 bool got_command = false;
2219 bool handle_command_failed = false;
1c1ed292 2220 static u64 poll_count;
12e364b9
KC
2221
2222 /* make sure visorbus server is registered for controlvm callbacks */
2223 if (visorchipset_serverregwait && !serverregistered)
1c1ed292 2224 goto cleanup;
12e364b9
KC
2225 /* make sure visorclientbus server is regsitered for controlvm
2226 * callbacks
2227 */
2228 if (visorchipset_clientregwait && !clientregistered)
1c1ed292 2229 goto cleanup;
12e364b9 2230
1c1ed292
BR
2231 poll_count++;
2232 if (poll_count >= 250)
12e364b9
KC
2233 ; /* keep going */
2234 else
1c1ed292 2235 goto cleanup;
12e364b9
KC
2236
2237 /* Check events to determine if response to CHIPSET_READY
2238 * should be sent
2239 */
0639ba67
BR
2240 if (visorchipset_holdchipsetready &&
2241 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 2242 if (check_chipset_events() == 1) {
da021f02 2243 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 2244 clear_chipset_events();
da021f02 2245 memset(&g_chipset_msg_hdr, 0,
98d7b594 2246 sizeof(struct controlvm_message_header));
12e364b9
KC
2247 }
2248 }
2249
c3d9a224 2250 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 2251 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
2252 &inmsg))
2253 ;
1c1ed292 2254 if (!got_command) {
7166ed19 2255 if (controlvm_pending_msg_valid) {
8a1182eb
BR
2256 /* we throttled processing of a prior
2257 * msg, so try to process it again
2258 * rather than reading a new one
2259 */
7166ed19 2260 inmsg = controlvm_pending_msg;
f4c11551 2261 controlvm_pending_msg_valid = false;
1c1ed292 2262 got_command = true;
75c1f8b7 2263 } else {
1c1ed292 2264 got_command = read_controlvm_event(&inmsg);
75c1f8b7 2265 }
8a1182eb 2266 }
12e364b9 2267
f4c11551 2268 handle_command_failed = false;
1c1ed292 2269 while (got_command && (!handle_command_failed)) {
b53e0e93 2270 most_recent_message_jiffies = jiffies;
8a1182eb
BR
2271 if (handle_command(inmsg,
2272 visorchannel_get_physaddr
c3d9a224 2273 (controlvm_channel)))
1c1ed292 2274 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
2275 else {
2276 /* this is a scenario where throttling
2277 * is required, but probably NOT an
2278 * error...; we stash the current
2279 * controlvm msg so we will attempt to
2280 * reprocess it on our next loop
2281 */
f4c11551 2282 handle_command_failed = true;
7166ed19 2283 controlvm_pending_msg = inmsg;
f4c11551 2284 controlvm_pending_msg_valid = true;
12e364b9
KC
2285 }
2286 }
2287
2288 /* parahotplug_worker */
2289 parahotplug_process_list();
2290
1c1ed292 2291cleanup:
12e364b9
KC
2292
2293 if (time_after(jiffies,
b53e0e93 2294 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
2295 /* it's been longer than MIN_IDLE_SECONDS since we
2296 * processed our last controlvm message; slow down the
2297 * polling
2298 */
911e213e
BR
2299 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2300 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2301 } else {
911e213e
BR
2302 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2303 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
2304 }
2305
9232d2d6
BR
2306 queue_delayed_work(periodic_controlvm_workqueue,
2307 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2308}
2309
2310static void
2311setup_crash_devices_work_queue(struct work_struct *work)
2312{
e6bdb904
BR
2313 struct controlvm_message local_crash_bus_msg;
2314 struct controlvm_message local_crash_dev_msg;
3ab47701 2315 struct controlvm_message msg;
e6bdb904
BR
2316 u32 local_crash_msg_offset;
2317 u16 local_crash_msg_count;
12e364b9
KC
2318
2319 /* make sure visorbus server is registered for controlvm callbacks */
2320 if (visorchipset_serverregwait && !serverregistered)
e6bdb904 2321 goto cleanup;
12e364b9
KC
2322
2323 /* make sure visorclientbus server is regsitered for controlvm
2324 * callbacks
2325 */
2326 if (visorchipset_clientregwait && !clientregistered)
e6bdb904 2327 goto cleanup;
12e364b9
KC
2328
2329 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2330
2331 /* send init chipset msg */
98d7b594 2332 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
2333 msg.cmd.init_chipset.bus_count = 23;
2334 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
2335
2336 chipset_init(&msg);
2337
12e364b9 2338 /* get saved message count */
c3d9a224 2339 if (visorchannel_read(controlvm_channel,
d19642f6
BR
2340 offsetof(struct spar_controlvm_channel_protocol,
2341 saved_crash_message_count),
e6bdb904 2342 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
2343 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2344 POSTCODE_SEVERITY_ERR);
2345 return;
2346 }
2347
e6bdb904 2348 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 2349 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 2350 local_crash_msg_count,
12e364b9
KC
2351 POSTCODE_SEVERITY_ERR);
2352 return;
2353 }
2354
2355 /* get saved crash message offset */
c3d9a224 2356 if (visorchannel_read(controlvm_channel,
d19642f6
BR
2357 offsetof(struct spar_controlvm_channel_protocol,
2358 saved_crash_message_offset),
e6bdb904 2359 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
2360 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2361 POSTCODE_SEVERITY_ERR);
2362 return;
2363 }
2364
2365 /* read create device message for storage bus offset */
c3d9a224 2366 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
2367 local_crash_msg_offset,
2368 &local_crash_bus_msg,
3ab47701 2369 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2370 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2371 POSTCODE_SEVERITY_ERR);
2372 return;
2373 }
2374
2375 /* read create device message for storage device */
c3d9a224 2376 if (visorchannel_read(controlvm_channel,
e6bdb904 2377 local_crash_msg_offset +
3ab47701 2378 sizeof(struct controlvm_message),
e6bdb904 2379 &local_crash_dev_msg,
3ab47701 2380 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
2381 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2382 POSTCODE_SEVERITY_ERR);
2383 return;
2384 }
2385
2386 /* reuse IOVM create bus message */
ebec8967 2387 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 2388 bus_create(&local_crash_bus_msg);
75c1f8b7 2389 } else {
12e364b9
KC
2390 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2391 POSTCODE_SEVERITY_ERR);
2392 return;
2393 }
2394
2395 /* reuse create device message for storage device */
ebec8967 2396 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 2397 my_device_create(&local_crash_dev_msg);
75c1f8b7 2398 } else {
12e364b9
KC
2399 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2400 POSTCODE_SEVERITY_ERR);
2401 return;
2402 }
12e364b9
KC
2403 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2404 return;
2405
e6bdb904 2406cleanup:
12e364b9 2407
911e213e 2408 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2409
9232d2d6
BR
2410 queue_delayed_work(periodic_controlvm_workqueue,
2411 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2412}
2413
2414static void
52063eca 2415bus_create_response(u32 bus_no, int response)
12e364b9 2416{
8e3fedd6 2417 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
12e364b9
KC
2418}
2419
2420static void
52063eca 2421bus_destroy_response(u32 bus_no, int response)
12e364b9 2422{
8e3fedd6 2423 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
12e364b9
KC
2424}
2425
2426static void
52063eca 2427device_create_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2428{
8e3fedd6 2429 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
12e364b9
KC
2430}
2431
2432static void
52063eca 2433device_destroy_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2434{
8e3fedd6 2435 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
12e364b9
KC
2436}
2437
2438void
52063eca 2439visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2440{
12e364b9 2441 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 2442 bus_no, dev_no, response,
bd0d2dcc 2443 segment_state_standby);
12e364b9 2444}
927c7927 2445EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
2446
2447static void
52063eca 2448device_resume_response(u32 bus_no, u32 dev_no, int response)
12e364b9
KC
2449{
2450 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8e3fedd6 2451 bus_no, dev_no, response,
bd0d2dcc 2452 segment_state_running);
12e364b9
KC
2453}
2454
f4c11551 2455bool
52063eca 2456visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 2457{
4f66520b 2458 void *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2459
0aca7844 2460 if (!p)
f4c11551 2461 return false;
77db7127 2462 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
f4c11551 2463 return true;
12e364b9
KC
2464}
2465EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2466
f4c11551 2467bool
52063eca 2468visorchipset_set_bus_context(u32 bus_no, void *context)
12e364b9 2469{
4f66520b 2470 struct visorchipset_bus_info *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2471
0aca7844 2472 if (!p)
f4c11551 2473 return false;
12e364b9 2474 p->bus_driver_context = context;
f4c11551 2475 return true;
12e364b9
KC
2476}
2477EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2478
f4c11551 2479bool
52063eca 2480visorchipset_get_device_info(u32 bus_no, u32 dev_no,
b486df19 2481 struct visorchipset_device_info *dev_info)
12e364b9 2482{
d480f6a2 2483 void *p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2484
0aca7844 2485 if (!p)
f4c11551 2486 return false;
b486df19 2487 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
f4c11551 2488 return true;
12e364b9
KC
2489}
2490EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2491
f4c11551 2492bool
52063eca 2493visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
12e364b9 2494{
d480f6a2
JS
2495 struct visorchipset_device_info *p;
2496
2497 p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2498
0aca7844 2499 if (!p)
f4c11551 2500 return false;
12e364b9 2501 p->bus_driver_context = context;
f4c11551 2502 return true;
12e364b9
KC
2503}
2504EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2505
2506/* Generic wrapper function for allocating memory from a kmem_cache pool.
2507 */
2508void *
f4c11551 2509visorchipset_cache_alloc(struct kmem_cache *pool, bool ok_to_block,
12e364b9
KC
2510 char *fn, int ln)
2511{
2512 gfp_t gfp;
2513 void *p;
2514
2515 if (ok_to_block)
2516 gfp = GFP_KERNEL;
2517 else
2518 gfp = GFP_ATOMIC;
2519 /* __GFP_NORETRY means "ok to fail", meaning
2520 * kmem_cache_alloc() can return NULL, implying the caller CAN
2521 * cope with failure. If you do NOT specify __GFP_NORETRY,
2522 * Linux will go to extreme measures to get memory for you
2523 * (like, invoke oom killer), which will probably cripple the
2524 * system.
2525 */
2526 gfp |= __GFP_NORETRY;
2527 p = kmem_cache_alloc(pool, gfp);
0aca7844 2528 if (!p)
12e364b9 2529 return NULL;
0aca7844 2530
12e364b9
KC
2531 return p;
2532}
2533
2534/* Generic wrapper function for freeing memory from a kmem_cache pool.
2535 */
2536void
2537visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2538{
0aca7844 2539 if (!p)
12e364b9 2540 return;
0aca7844 2541
12e364b9
KC
2542 kmem_cache_free(pool, p);
2543}
2544
18b87ed1 2545static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2546 struct device_attribute *attr,
2547 const char *buf, size_t count)
12e364b9 2548{
18b87ed1 2549 char msgtype[64];
12e364b9 2550
66e24b76
BR
2551 if (sscanf(buf, "%63s", msgtype) != 1)
2552 return -EINVAL;
2553
ebec8967 2554 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2555 chipset_events[0] = 1;
2556 return count;
ebec8967 2557 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2558 chipset_events[1] = 1;
2559 return count;
e22a4a0f
BR
2560 }
2561 return -EINVAL;
12e364b9
KC
2562}
2563
e56fa7cd
BR
2564/* The parahotplug/devicedisabled interface gets called by our support script
2565 * when an SR-IOV device has been shut down. The ID is passed to the script
2566 * and then passed back when the device has been removed.
2567 */
2568static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2569 struct device_attribute *attr,
2570 const char *buf, size_t count)
e56fa7cd 2571{
94217363 2572 unsigned int id;
e56fa7cd 2573
ebec8967 2574 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2575 return -EINVAL;
2576
2577 parahotplug_request_complete(id, 0);
2578 return count;
2579}
2580
2581/* The parahotplug/deviceenabled interface gets called by our support script
2582 * when an SR-IOV device has been recovered. The ID is passed to the script
2583 * and then passed back when the device has been brought back up.
2584 */
2585static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2586 struct device_attribute *attr,
2587 const char *buf, size_t count)
e56fa7cd 2588{
94217363 2589 unsigned int id;
e56fa7cd 2590
ebec8967 2591 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2592 return -EINVAL;
2593
2594 parahotplug_request_complete(id, 1);
2595 return count;
2596}
2597
e3420ed6
EA
2598static int
2599visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2600{
2601 unsigned long physaddr = 0;
2602 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2603 GUEST_PHYSICAL_ADDRESS addr = 0;
2604
2605 /* sv_enable_dfp(); */
2606 if (offset & (PAGE_SIZE - 1))
2607 return -ENXIO; /* need aligned offsets */
2608
2609 switch (offset) {
2610 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2611 vma->vm_flags |= VM_IO;
2612 if (!*file_controlvm_channel)
2613 return -ENXIO;
2614
2615 visorchannel_read(*file_controlvm_channel,
2616 offsetof(struct spar_controlvm_channel_protocol,
2617 gp_control_channel),
2618 &addr, sizeof(addr));
2619 if (!addr)
2620 return -ENXIO;
2621
2622 physaddr = (unsigned long)addr;
2623 if (remap_pfn_range(vma, vma->vm_start,
2624 physaddr >> PAGE_SHIFT,
2625 vma->vm_end - vma->vm_start,
2626 /*pgprot_noncached */
2627 (vma->vm_page_prot))) {
2628 return -EAGAIN;
2629 }
2630 break;
2631 default:
2632 return -ENXIO;
2633 }
2634 return 0;
2635}
2636
2637static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2638 unsigned long arg)
2639{
2640 s64 adjustment;
2641 s64 vrtc_offset;
2642
2643 switch (cmd) {
2644 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2645 /* get the physical rtc offset */
2646 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2647 if (copy_to_user((void __user *)arg, &vrtc_offset,
2648 sizeof(vrtc_offset))) {
2649 return -EFAULT;
2650 }
2651 return SUCCESS;
2652 case VMCALL_UPDATE_PHYSICAL_TIME:
2653 if (copy_from_user(&adjustment, (void __user *)arg,
2654 sizeof(adjustment))) {
2655 return -EFAULT;
2656 }
2657 return issue_vmcall_update_physical_time(adjustment);
2658 default:
2659 return -EFAULT;
2660 }
2661}
2662
2663static const struct file_operations visorchipset_fops = {
2664 .owner = THIS_MODULE,
2665 .open = visorchipset_open,
2666 .read = NULL,
2667 .write = NULL,
2668 .unlocked_ioctl = visorchipset_ioctl,
2669 .release = visorchipset_release,
2670 .mmap = visorchipset_mmap,
2671};
2672
2673int
2674visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2675{
2676 int rc = 0;
2677
2678 file_controlvm_channel = controlvm_channel;
2679 cdev_init(&file_cdev, &visorchipset_fops);
2680 file_cdev.owner = THIS_MODULE;
2681 if (MAJOR(major_dev) == 0) {
46168810 2682 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
e3420ed6
EA
2683 /* dynamic major device number registration required */
2684 if (rc < 0)
2685 return rc;
2686 } else {
2687 /* static major device number registration required */
46168810 2688 rc = register_chrdev_region(major_dev, 1, "visorchipset");
e3420ed6
EA
2689 if (rc < 0)
2690 return rc;
2691 }
2692 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2693 if (rc < 0) {
2694 unregister_chrdev_region(major_dev, 1);
2695 return rc;
2696 }
2697 return 0;
2698}
2699
2700
2701
12e364b9
KC
2702static int __init
2703visorchipset_init(void)
2704{
2705 int rc = 0, x = 0;
8a1182eb 2706 HOSTADDRESS addr;
12e364b9 2707
fcd0157e
KC
2708 if (!unisys_spar_platform)
2709 return -ENODEV;
2710
6fe345af
BR
2711 memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2712 memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
84982fbf 2713 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
ea33b4ee
BR
2714 memset(&livedump_info, 0, sizeof(livedump_info));
2715 atomic_set(&livedump_info.buffers_in_use, 0);
12e364b9 2716
9f8d0e8b 2717 if (visorchipset_testvnic) {
9f8d0e8b
KC
2718 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2719 rc = x;
a6a3989b 2720 goto cleanup;
9f8d0e8b 2721 }
12e364b9 2722
8a1182eb 2723 addr = controlvm_get_channel_address();
ebec8967 2724 if (addr) {
c3d9a224 2725 controlvm_channel =
8a1182eb
BR
2726 visorchannel_create_with_lock
2727 (addr,
d19642f6 2728 sizeof(struct spar_controlvm_channel_protocol),
5fbaa4b3 2729 spar_controlvm_channel_protocol_uuid);
93a84565 2730 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
c3d9a224 2731 visorchannel_get_header(controlvm_channel))) {
8a1182eb
BR
2732 initialize_controlvm_payload();
2733 } else {
c3d9a224
BR
2734 visorchannel_destroy(controlvm_channel);
2735 controlvm_channel = NULL;
8a1182eb
BR
2736 return -ENODEV;
2737 }
2738 } else {
8a1182eb
BR
2739 return -ENODEV;
2740 }
2741
5aa8ae57
BR
2742 major_dev = MKDEV(visorchipset_major, 0);
2743 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2744 if (rc < 0) {
4cb005a9 2745 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2746 goto cleanup;
4cb005a9 2747 }
9f8d0e8b 2748
da021f02 2749 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2750
2098dbd1 2751 if (!visorchipset_disable_controlvm) {
12e364b9 2752 /* if booting in a crash kernel */
1ba00980 2753 if (is_kdump_kernel())
9232d2d6 2754 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9
KC
2755 setup_crash_devices_work_queue);
2756 else
9232d2d6 2757 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9 2758 controlvm_periodic_work);
9232d2d6 2759 periodic_controlvm_workqueue =
12e364b9
KC
2760 create_singlethread_workqueue("visorchipset_controlvm");
2761
38f736e9 2762 if (!periodic_controlvm_workqueue) {
4cb005a9
KC
2763 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2764 DIAG_SEVERITY_ERR);
2765 rc = -ENOMEM;
a6a3989b 2766 goto cleanup;
4cb005a9 2767 }
b53e0e93 2768 most_recent_message_jiffies = jiffies;
911e213e 2769 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
9232d2d6
BR
2770 rc = queue_delayed_work(periodic_controlvm_workqueue,
2771 &periodic_controlvm_work, poll_jiffies);
4cb005a9 2772 if (rc < 0) {
4cb005a9
KC
2773 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2774 DIAG_SEVERITY_ERR);
a6a3989b 2775 goto cleanup;
4cb005a9 2776 }
12e364b9
KC
2777 }
2778
eb34e877
BR
2779 visorchipset_platform_device.dev.devt = major_dev;
2780 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2781 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2782 rc = -1;
a6a3989b 2783 goto cleanup;
4cb005a9 2784 }
12e364b9 2785 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
c79b28f7
PB
2786
2787 rc = visorbus_init();
a6a3989b 2788cleanup:
12e364b9 2789 if (rc) {
12e364b9
KC
2790 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2791 POSTCODE_SEVERITY_ERR);
2792 }
2793 return rc;
2794}
2795
e3420ed6
EA
2796void
2797visorchipset_file_cleanup(dev_t major_dev)
2798{
2799 if (file_cdev.ops)
2800 cdev_del(&file_cdev);
2801 file_cdev.ops = NULL;
2802 unregister_chrdev_region(major_dev, 1);
2803}
2804
12e364b9
KC
2805static void
2806visorchipset_exit(void)
2807{
12e364b9
KC
2808 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2809
c79b28f7
PB
2810 visorbus_exit();
2811
12e364b9
KC
2812 if (visorchipset_disable_controlvm) {
2813 ;
2814 } else {
9232d2d6
BR
2815 cancel_delayed_work(&periodic_controlvm_work);
2816 flush_workqueue(periodic_controlvm_workqueue);
2817 destroy_workqueue(periodic_controlvm_workqueue);
2818 periodic_controlvm_workqueue = NULL;
84982fbf 2819 destroy_controlvm_payload_info(&controlvm_payload_info);
12e364b9 2820 }
1783319f 2821
12e364b9
KC
2822 cleanup_controlvm_structures();
2823
da021f02 2824 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2825
c3d9a224 2826 visorchannel_destroy(controlvm_channel);
8a1182eb 2827
addceb12 2828 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2829 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
2830}
2831
2832module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2833MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
12e364b9
KC
2834module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2835MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
12e364b9
KC
2836module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2837MODULE_PARM_DESC(visorchipset_testmsg,
2838 "1 to manufacture the chipset, bus, and switch messages");
12e364b9 2839module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2840MODULE_PARM_DESC(visorchipset_major,
2841 "major device number to use for the device node");
12e364b9
KC
2842module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2843MODULE_PARM_DESC(visorchipset_serverreqwait,
2844 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2845module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2846MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
12e364b9
KC
2847module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2848MODULE_PARM_DESC(visorchipset_testteardown,
2849 "1 to test teardown of the chipset, bus, and switch");
12e364b9
KC
2850module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2851 S_IRUGO);
2852MODULE_PARM_DESC(visorchipset_disable_controlvm,
2853 "1 to disable polling of controlVm channel");
12e364b9
KC
2854module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2855 int, S_IRUGO);
2856MODULE_PARM_DESC(visorchipset_holdchipsetready,
2857 "1 to hold response to CHIPSET_READY");
b615d628 2858
12e364b9
KC
2859module_init(visorchipset_init);
2860module_exit(visorchipset_exit);
2861
2862MODULE_AUTHOR("Unisys");
2863MODULE_LICENSE("GPL");
2864MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2865 VERSION);
2866MODULE_VERSION(VERSION);
This page took 0.445777 seconds and 5 git commands to generate.