staging: unisys: remove BOOL,TRUE,FALSE definitions
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
... / ...
CommitLineData
1/* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18#include "controlvmchannel.h"
19#include "version.h"
20#include "procobjecttree.h"
21#include "visorbus.h"
22#include "periodic_work.h"
23#include "uisutils.h"
24#include "controlvmcompletionstatus.h"
25#include "guestlinuxdebug.h"
26#include "visorbus_private.h"
27
28#include <linux/ctype.h>
29#include <linux/fs.h>
30#include <linux/mm.h>
31#include <linux/nls.h>
32#include <linux/netdevice.h>
33#include <linux/platform_device.h>
34#include <linux/uuid.h>
35#include <linux/crash_dump.h>
36
37#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
38
39#define MAX_NAME_SIZE 128
40#define MAX_IP_SIZE 50
41#define MAXOUTSTANDINGCHANNELCOMMAND 256
42#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
43#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
44
45#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
46
47#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
48
49/*
50 * Module parameters
51 */
52static int visorchipset_major;
53static int visorchipset_visorbusregwait = 1; /* default is on */
54static int visorchipset_holdchipsetready;
55static unsigned long controlvm_payload_bytes_buffered;
56
57static int
58visorchipset_open(struct inode *inode, struct file *file)
59{
60 unsigned minor_number = iminor(inode);
61
62 if (minor_number)
63 return -ENODEV;
64 file->private_data = NULL;
65 return 0;
66}
67
68static int
69visorchipset_release(struct inode *inode, struct file *file)
70{
71 return 0;
72}
73
74/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
75* we switch to slow polling mode. As soon as we get a controlvm
76* message, we switch back to fast polling mode.
77*/
78#define MIN_IDLE_SECONDS 10
79static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
80static unsigned long most_recent_message_jiffies; /* when we got our last
81 * controlvm message */
82static int visorbusregistered;
83
84#define MAX_CHIPSET_EVENTS 2
85static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
86
87struct parser_context {
88 unsigned long allocbytes;
89 unsigned long param_bytes;
90 u8 *curr;
91 unsigned long bytes_remaining;
92 bool byte_stream;
93 char data[0];
94};
95
96static struct delayed_work periodic_controlvm_work;
97static struct workqueue_struct *periodic_controlvm_workqueue;
98static DEFINE_SEMAPHORE(notifier_lock);
99
100static struct cdev file_cdev;
101static struct visorchannel **file_controlvm_channel;
102static struct controlvm_message_header g_chipset_msg_hdr;
103static const uuid_le spar_diag_pool_channel_protocol_uuid =
104 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
105/* 0xffffff is an invalid Bus/Device number */
106static u32 g_diagpool_bus_no = 0xffffff;
107static u32 g_diagpool_dev_no = 0xffffff;
108static struct controlvm_message_packet g_devicechangestate_packet;
109
110#define is_diagpool_channel(channel_type_guid) \
111 (uuid_le_cmp(channel_type_guid,\
112 spar_diag_pool_channel_protocol_uuid) == 0)
113
114static LIST_HEAD(bus_info_list);
115static LIST_HEAD(dev_info_list);
116
117static struct visorchannel *controlvm_channel;
118
119/* Manages the request payload in the controlvm channel */
120struct visor_controlvm_payload_info {
121 u8 __iomem *ptr; /* pointer to base address of payload pool */
122 u64 offset; /* offset from beginning of controlvm
123 * channel to beginning of payload * pool */
124 u32 bytes; /* number of bytes in payload pool */
125};
126
127static struct visor_controlvm_payload_info controlvm_payload_info;
128
129/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
130 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
131 */
132struct visor_livedump_info {
133 struct controlvm_message_header dumpcapture_header;
134 struct controlvm_message_header gettextdump_header;
135 struct controlvm_message_header dumpcomplete_header;
136 bool gettextdump_outstanding;
137 u32 crc32;
138 unsigned long length;
139 atomic_t buffers_in_use;
140 unsigned long destination;
141};
142
143static struct visor_livedump_info livedump_info;
144
145/* The following globals are used to handle the scenario where we are unable to
146 * offload the payload from a controlvm message due to memory requirements. In
147 * this scenario, we simply stash the controlvm message, then attempt to
148 * process it again the next time controlvm_periodic_work() runs.
149 */
150static struct controlvm_message controlvm_pending_msg;
151static bool controlvm_pending_msg_valid;
152
153/* This identifies a data buffer that has been received via a controlvm messages
154 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
155 */
156struct putfile_buffer_entry {
157 struct list_head next; /* putfile_buffer_entry list */
158 struct parser_context *parser_ctx; /* points to input data buffer */
159};
160
161/* List of struct putfile_request *, via next_putfile_request member.
162 * Each entry in this list identifies an outstanding TRANSMIT_FILE
163 * conversation.
164 */
165static LIST_HEAD(putfile_request_list);
166
167/* This describes a buffer and its current state of transfer (e.g., how many
168 * bytes have already been supplied as putfile data, and how many bytes are
169 * remaining) for a putfile_request.
170 */
171struct putfile_active_buffer {
172 /* a payload from a controlvm message, containing a file data buffer */
173 struct parser_context *parser_ctx;
174 /* points within data area of parser_ctx to next byte of data */
175 u8 *pnext;
176 /* # bytes left from <pnext> to the end of this data buffer */
177 size_t bytes_remaining;
178};
179
180#define PUTFILE_REQUEST_SIG 0x0906101302281211
181/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
182 * conversation. Structs of this type are dynamically linked into
183 * <Putfile_request_list>.
184 */
185struct putfile_request {
186 u64 sig; /* PUTFILE_REQUEST_SIG */
187
188 /* header from original TransmitFile request */
189 struct controlvm_message_header controlvm_header;
190 u64 file_request_number; /* from original TransmitFile request */
191
192 /* link to next struct putfile_request */
193 struct list_head next_putfile_request;
194
195 /* most-recent sequence number supplied via a controlvm message */
196 u64 data_sequence_number;
197
198 /* head of putfile_buffer_entry list, which describes the data to be
199 * supplied as putfile data;
200 * - this list is added to when controlvm messages come in that supply
201 * file data
202 * - this list is removed from via the hotplug program that is actually
203 * consuming these buffers to write as file data */
204 struct list_head input_buffer_list;
205 spinlock_t req_list_lock; /* lock for input_buffer_list */
206
207 /* waiters for input_buffer_list to go non-empty */
208 wait_queue_head_t input_buffer_wq;
209
210 /* data not yet read within current putfile_buffer_entry */
211 struct putfile_active_buffer active_buf;
212
213 /* <0 = failed, 0 = in-progress, >0 = successful; */
214 /* note that this must be set with req_list_lock, and if you set <0, */
215 /* it is your responsibility to also free up all of the other objects */
216 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
217 /* before releasing the lock */
218 int completion_status;
219};
220
221struct parahotplug_request {
222 struct list_head list;
223 int id;
224 unsigned long expiration;
225 struct controlvm_message msg;
226};
227
228static LIST_HEAD(parahotplug_request_list);
229static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
230static void parahotplug_process_list(void);
231
232/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
233 * CONTROLVM_REPORTEVENT.
234 */
235static struct visorchipset_busdev_notifiers busdev_notifiers;
236
237static void bus_create_response(u32 bus_no, int response);
238static void bus_destroy_response(u32 bus_no, int response);
239static void device_create_response(u32 bus_no, u32 dev_no, int response);
240static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
241static void device_resume_response(u32 bus_no, u32 dev_no, int response);
242
243static void visorchipset_device_pause_response(u32 bus_no, u32 dev_no,
244 int response);
245
246static struct visorchipset_busdev_responders busdev_responders = {
247 .bus_create = bus_create_response,
248 .bus_destroy = bus_destroy_response,
249 .device_create = device_create_response,
250 .device_destroy = device_destroy_response,
251 .device_pause = visorchipset_device_pause_response,
252 .device_resume = device_resume_response,
253};
254
255/* info for /dev/visorchipset */
256static dev_t major_dev = -1; /**< indicates major num for device */
257
258/* prototypes for attributes */
259static ssize_t toolaction_show(struct device *dev,
260 struct device_attribute *attr, char *buf);
261static ssize_t toolaction_store(struct device *dev,
262 struct device_attribute *attr,
263 const char *buf, size_t count);
264static DEVICE_ATTR_RW(toolaction);
265
266static ssize_t boottotool_show(struct device *dev,
267 struct device_attribute *attr, char *buf);
268static ssize_t boottotool_store(struct device *dev,
269 struct device_attribute *attr, const char *buf,
270 size_t count);
271static DEVICE_ATTR_RW(boottotool);
272
273static ssize_t error_show(struct device *dev, struct device_attribute *attr,
274 char *buf);
275static ssize_t error_store(struct device *dev, struct device_attribute *attr,
276 const char *buf, size_t count);
277static DEVICE_ATTR_RW(error);
278
279static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
280 char *buf);
281static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
282 const char *buf, size_t count);
283static DEVICE_ATTR_RW(textid);
284
285static ssize_t remaining_steps_show(struct device *dev,
286 struct device_attribute *attr, char *buf);
287static ssize_t remaining_steps_store(struct device *dev,
288 struct device_attribute *attr,
289 const char *buf, size_t count);
290static DEVICE_ATTR_RW(remaining_steps);
291
292static ssize_t chipsetready_store(struct device *dev,
293 struct device_attribute *attr,
294 const char *buf, size_t count);
295static DEVICE_ATTR_WO(chipsetready);
296
297static ssize_t devicedisabled_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count);
300static DEVICE_ATTR_WO(devicedisabled);
301
302static ssize_t deviceenabled_store(struct device *dev,
303 struct device_attribute *attr,
304 const char *buf, size_t count);
305static DEVICE_ATTR_WO(deviceenabled);
306
307static struct attribute *visorchipset_install_attrs[] = {
308 &dev_attr_toolaction.attr,
309 &dev_attr_boottotool.attr,
310 &dev_attr_error.attr,
311 &dev_attr_textid.attr,
312 &dev_attr_remaining_steps.attr,
313 NULL
314};
315
316static struct attribute_group visorchipset_install_group = {
317 .name = "install",
318 .attrs = visorchipset_install_attrs
319};
320
321static struct attribute *visorchipset_guest_attrs[] = {
322 &dev_attr_chipsetready.attr,
323 NULL
324};
325
326static struct attribute_group visorchipset_guest_group = {
327 .name = "guest",
328 .attrs = visorchipset_guest_attrs
329};
330
331static struct attribute *visorchipset_parahotplug_attrs[] = {
332 &dev_attr_devicedisabled.attr,
333 &dev_attr_deviceenabled.attr,
334 NULL
335};
336
337static struct attribute_group visorchipset_parahotplug_group = {
338 .name = "parahotplug",
339 .attrs = visorchipset_parahotplug_attrs
340};
341
342static const struct attribute_group *visorchipset_dev_groups[] = {
343 &visorchipset_install_group,
344 &visorchipset_guest_group,
345 &visorchipset_parahotplug_group,
346 NULL
347};
348
349/* /sys/devices/platform/visorchipset */
350static struct platform_device visorchipset_platform_device = {
351 .name = "visorchipset",
352 .id = -1,
353 .dev.groups = visorchipset_dev_groups,
354};
355
356/* Function prototypes */
357static void controlvm_respond(struct controlvm_message_header *msg_hdr,
358 int response);
359static void controlvm_respond_chipset_init(
360 struct controlvm_message_header *msg_hdr, int response,
361 enum ultra_chipset_feature features);
362static void controlvm_respond_physdev_changestate(
363 struct controlvm_message_header *msg_hdr, int response,
364 struct spar_segment_state state);
365
366
367static void parser_done(struct parser_context *ctx);
368
369static struct parser_context *
370parser_init_guts(u64 addr, u32 bytes, bool local,
371 bool standard_payload_header, bool *retry)
372{
373 int allocbytes = sizeof(struct parser_context) + bytes;
374 struct parser_context *rc = NULL;
375 struct parser_context *ctx = NULL;
376 struct spar_controlvm_parameters_header *phdr = NULL;
377
378 if (retry)
379 *retry = false;
380 if (!standard_payload_header)
381 /* alloc and 0 extra byte to ensure payload is
382 * '\0'-terminated
383 */
384 allocbytes++;
385 if ((controlvm_payload_bytes_buffered + bytes)
386 > MAX_CONTROLVM_PAYLOAD_BYTES) {
387 if (retry)
388 *retry = true;
389 rc = NULL;
390 goto cleanup;
391 }
392 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
393 if (!ctx) {
394 if (retry)
395 *retry = true;
396 rc = NULL;
397 goto cleanup;
398 }
399
400 ctx->allocbytes = allocbytes;
401 ctx->param_bytes = bytes;
402 ctx->curr = NULL;
403 ctx->bytes_remaining = 0;
404 ctx->byte_stream = false;
405 if (local) {
406 void *p;
407
408 if (addr > virt_to_phys(high_memory - 1)) {
409 rc = NULL;
410 goto cleanup;
411 }
412 p = __va((unsigned long) (addr));
413 memcpy(ctx->data, p, bytes);
414 } else {
415 void __iomem *mapping;
416
417 if (!request_mem_region(addr, bytes, "visorchipset")) {
418 rc = NULL;
419 goto cleanup;
420 }
421
422 mapping = ioremap_cache(addr, bytes);
423 if (!mapping) {
424 release_mem_region(addr, bytes);
425 rc = NULL;
426 goto cleanup;
427 }
428 memcpy_fromio(ctx->data, mapping, bytes);
429 release_mem_region(addr, bytes);
430 }
431 if (!standard_payload_header) {
432 ctx->byte_stream = true;
433 rc = ctx;
434 goto cleanup;
435 }
436 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
437 if (phdr->total_length != bytes) {
438 rc = NULL;
439 goto cleanup;
440 }
441 if (phdr->total_length < phdr->header_length) {
442 rc = NULL;
443 goto cleanup;
444 }
445 if (phdr->header_length <
446 sizeof(struct spar_controlvm_parameters_header)) {
447 rc = NULL;
448 goto cleanup;
449 }
450
451 rc = ctx;
452cleanup:
453 if (rc) {
454 controlvm_payload_bytes_buffered += ctx->param_bytes;
455 } else {
456 if (ctx) {
457 parser_done(ctx);
458 ctx = NULL;
459 }
460 }
461 return rc;
462}
463
464struct parser_context *
465parser_init(u64 addr, u32 bytes, bool local, bool *retry)
466{
467 return parser_init_guts(addr, bytes, local, true, retry);
468}
469
470/* Call this instead of parser_init() if the payload area consists of just
471 * a sequence of bytes, rather than a struct spar_controlvm_parameters_header
472 * structures. Afterwards, you can call parser_simpleString_get() or
473 * parser_byteStream_get() to obtain the data.
474 */
475struct parser_context *
476parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
477{
478 return parser_init_guts(addr, bytes, local, false, retry);
479}
480
481/* Obtain '\0'-terminated copy of string in payload area.
482 */
483char *
484parser_simpleString_get(struct parser_context *ctx)
485{
486 if (!ctx->byte_stream)
487 return NULL;
488 return ctx->data; /* note this IS '\0'-terminated, because of
489 * the num of bytes we alloc+clear in
490 * parser_init_byteStream() */
491}
492
493/* Obtain a copy of the buffer in the payload area.
494 */
495void *parser_byte_stream_get(struct parser_context *ctx, unsigned long *nbytes)
496{
497 if (!ctx->byte_stream)
498 return NULL;
499 if (nbytes)
500 *nbytes = ctx->param_bytes;
501 return (void *)ctx->data;
502}
503
504uuid_le
505parser_id_get(struct parser_context *ctx)
506{
507 struct spar_controlvm_parameters_header *phdr = NULL;
508
509 if (ctx == NULL)
510 return NULL_UUID_LE;
511 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
512 return phdr->id;
513}
514
515/** Describes the state from the perspective of which controlvm messages have
516 * been received for a bus or device.
517 */
518
519enum PARSER_WHICH_STRING {
520 PARSERSTRING_INITIATOR,
521 PARSERSTRING_TARGET,
522 PARSERSTRING_CONNECTION,
523 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
524};
525
526void
527parser_param_start(struct parser_context *ctx,
528 enum PARSER_WHICH_STRING which_string)
529{
530 struct spar_controlvm_parameters_header *phdr = NULL;
531
532 if (ctx == NULL)
533 goto Away;
534 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
535 switch (which_string) {
536 case PARSERSTRING_INITIATOR:
537 ctx->curr = ctx->data + phdr->initiator_offset;
538 ctx->bytes_remaining = phdr->initiator_length;
539 break;
540 case PARSERSTRING_TARGET:
541 ctx->curr = ctx->data + phdr->target_offset;
542 ctx->bytes_remaining = phdr->target_length;
543 break;
544 case PARSERSTRING_CONNECTION:
545 ctx->curr = ctx->data + phdr->connection_offset;
546 ctx->bytes_remaining = phdr->connection_length;
547 break;
548 case PARSERSTRING_NAME:
549 ctx->curr = ctx->data + phdr->name_offset;
550 ctx->bytes_remaining = phdr->name_length;
551 break;
552 default:
553 break;
554 }
555
556Away:
557 return;
558}
559
560void
561parser_done(struct parser_context *ctx)
562{
563 if (!ctx)
564 return;
565 controlvm_payload_bytes_buffered -= ctx->param_bytes;
566 kfree(ctx);
567}
568
569/** Return length of string not counting trailing spaces. */
570static int
571string_length_no_trail(char *s, int len)
572{
573 int i = len - 1;
574
575 while (i >= 0) {
576 if (!isspace(s[i]))
577 return i + 1;
578 i--;
579 }
580 return 0;
581}
582
583/** Grab the next name and value out of the parameter buffer.
584 * The entire parameter buffer looks like this:
585 * <name>=<value>\0
586 * <name>=<value>\0
587 * ...
588 * \0
589 * If successful, the next <name> value is returned within the supplied
590 * <nam> buffer (the value is always upper-cased), and the corresponding
591 * <value> is returned within a kmalloc()ed buffer, whose pointer is
592 * provided as the return value of this function.
593 * (The total number of bytes allocated is strlen(<value>)+1.)
594 *
595 * NULL is returned to indicate failure, which can occur for several reasons:
596 * - all <name>=<value> pairs have already been processed
597 * - bad parameter
598 * - parameter buffer ends prematurely (couldn't find an '=' or '\0' within
599 * the confines of the parameter buffer)
600 * - the <nam> buffer is not large enough to hold the <name> of the next
601 * parameter
602 */
603void *
604parser_param_get(struct parser_context *ctx, char *nam, int namesize)
605{
606 u8 *pscan, *pnam = nam;
607 unsigned long nscan;
608 int value_length = -1, orig_value_length = -1;
609 void *value = NULL;
610 int i;
611 int closing_quote = 0;
612
613 if (!ctx)
614 return NULL;
615 pscan = ctx->curr;
616 nscan = ctx->bytes_remaining;
617 if (nscan == 0)
618 return NULL;
619 if (*pscan == '\0')
620 /* This is the normal return point after you have processed
621 * all of the <name>=<value> pairs in a syntactically-valid
622 * parameter buffer.
623 */
624 return NULL;
625
626 /* skip whitespace */
627 while (isspace(*pscan)) {
628 pscan++;
629 nscan--;
630 if (nscan == 0)
631 return NULL;
632 }
633
634 while (*pscan != ':') {
635 if (namesize <= 0)
636 return NULL;
637 *pnam = toupper(*pscan);
638 pnam++;
639 namesize--;
640 pscan++;
641 nscan--;
642 if (nscan == 0)
643 return NULL;
644 }
645 if (namesize <= 0)
646 return NULL;
647 *pnam = '\0';
648 nam[string_length_no_trail(nam, strlen(nam))] = '\0';
649
650 /* point to char immediately after ":" in "<name>:<value>" */
651 pscan++;
652 nscan--;
653 /* skip whitespace */
654 while (isspace(*pscan)) {
655 pscan++;
656 nscan--;
657 if (nscan == 0)
658 return NULL;
659 }
660 if (nscan == 0)
661 return NULL;
662 if (*pscan == '\'' || *pscan == '"') {
663 closing_quote = *pscan;
664 pscan++;
665 nscan--;
666 if (nscan == 0)
667 return NULL;
668 }
669
670 /* look for a separator character, terminator character, or
671 * end of data
672 */
673 for (i = 0, value_length = -1; i < nscan; i++) {
674 if (closing_quote) {
675 if (pscan[i] == '\0')
676 return NULL;
677 if (pscan[i] == closing_quote) {
678 value_length = i;
679 break;
680 }
681 } else
682 if (pscan[i] == ',' || pscan[i] == ';'
683 || pscan[i] == '\0') {
684 value_length = i;
685 break;
686 }
687 }
688 if (value_length < 0) {
689 if (closing_quote)
690 return NULL;
691 value_length = nscan;
692 }
693 orig_value_length = value_length;
694 if (closing_quote == 0)
695 value_length = string_length_no_trail(pscan, orig_value_length);
696 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
697 if (value == NULL)
698 return NULL;
699 memcpy(value, pscan, value_length);
700 ((u8 *) (value))[value_length] = '\0';
701
702 pscan += orig_value_length;
703 nscan -= orig_value_length;
704
705 /* skip past separator or closing quote */
706 if (nscan > 0) {
707 if (*pscan != '\0') {
708 pscan++;
709 nscan--;
710 }
711 }
712
713 if (closing_quote && (nscan > 0)) {
714 /* we still need to skip around the real separator if present */
715 /* first, skip whitespace */
716 while (isspace(*pscan)) {
717 pscan++;
718 nscan--;
719 if (nscan == 0)
720 break;
721 }
722 if (nscan > 0) {
723 if (*pscan == ',' || *pscan == ';') {
724 pscan++;
725 nscan--;
726 } else if (*pscan != '\0') {
727 kfree(value);
728 value = NULL;
729 return NULL;
730 }
731 }
732 }
733 ctx->curr = pscan;
734 ctx->bytes_remaining = nscan;
735 return value;
736}
737
738void *
739parser_string_get(struct parser_context *ctx)
740{
741 u8 *pscan;
742 unsigned long nscan;
743 int value_length = -1;
744 void *value = NULL;
745 int i;
746
747 if (!ctx)
748 return NULL;
749 pscan = ctx->curr;
750 nscan = ctx->bytes_remaining;
751 if (nscan == 0)
752 return NULL;
753 if (!pscan)
754 return NULL;
755 for (i = 0, value_length = -1; i < nscan; i++)
756 if (pscan[i] == '\0') {
757 value_length = i;
758 break;
759 }
760 if (value_length < 0) /* '\0' was not included in the length */
761 value_length = nscan;
762 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
763 if (value == NULL)
764 return NULL;
765 if (value_length > 0)
766 memcpy(value, pscan, value_length);
767 ((u8 *) (value))[value_length] = '\0';
768 return value;
769}
770
771
772static ssize_t toolaction_show(struct device *dev,
773 struct device_attribute *attr,
774 char *buf)
775{
776 u8 tool_action;
777
778 visorchannel_read(controlvm_channel,
779 offsetof(struct spar_controlvm_channel_protocol,
780 tool_action), &tool_action, sizeof(u8));
781 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
782}
783
784static ssize_t toolaction_store(struct device *dev,
785 struct device_attribute *attr,
786 const char *buf, size_t count)
787{
788 u8 tool_action;
789 int ret;
790
791 if (kstrtou8(buf, 10, &tool_action))
792 return -EINVAL;
793
794 ret = visorchannel_write(controlvm_channel,
795 offsetof(struct spar_controlvm_channel_protocol,
796 tool_action),
797 &tool_action, sizeof(u8));
798
799 if (ret)
800 return ret;
801 return count;
802}
803
804static ssize_t boottotool_show(struct device *dev,
805 struct device_attribute *attr,
806 char *buf)
807{
808 struct efi_spar_indication efi_spar_indication;
809
810 visorchannel_read(controlvm_channel,
811 offsetof(struct spar_controlvm_channel_protocol,
812 efi_spar_ind), &efi_spar_indication,
813 sizeof(struct efi_spar_indication));
814 return scnprintf(buf, PAGE_SIZE, "%u\n",
815 efi_spar_indication.boot_to_tool);
816}
817
818static ssize_t boottotool_store(struct device *dev,
819 struct device_attribute *attr,
820 const char *buf, size_t count)
821{
822 int val, ret;
823 struct efi_spar_indication efi_spar_indication;
824
825 if (kstrtoint(buf, 10, &val))
826 return -EINVAL;
827
828 efi_spar_indication.boot_to_tool = val;
829 ret = visorchannel_write(controlvm_channel,
830 offsetof(struct spar_controlvm_channel_protocol,
831 efi_spar_ind), &(efi_spar_indication),
832 sizeof(struct efi_spar_indication));
833
834 if (ret)
835 return ret;
836 return count;
837}
838
839static ssize_t error_show(struct device *dev, struct device_attribute *attr,
840 char *buf)
841{
842 u32 error;
843
844 visorchannel_read(controlvm_channel,
845 offsetof(struct spar_controlvm_channel_protocol,
846 installation_error),
847 &error, sizeof(u32));
848 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
849}
850
851static ssize_t error_store(struct device *dev, struct device_attribute *attr,
852 const char *buf, size_t count)
853{
854 u32 error;
855 int ret;
856
857 if (kstrtou32(buf, 10, &error))
858 return -EINVAL;
859
860 ret = visorchannel_write(controlvm_channel,
861 offsetof(struct spar_controlvm_channel_protocol,
862 installation_error),
863 &error, sizeof(u32));
864 if (ret)
865 return ret;
866 return count;
867}
868
869static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
870 char *buf)
871{
872 u32 text_id;
873
874 visorchannel_read(controlvm_channel,
875 offsetof(struct spar_controlvm_channel_protocol,
876 installation_text_id),
877 &text_id, sizeof(u32));
878 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
879}
880
881static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
882 const char *buf, size_t count)
883{
884 u32 text_id;
885 int ret;
886
887 if (kstrtou32(buf, 10, &text_id))
888 return -EINVAL;
889
890 ret = visorchannel_write(controlvm_channel,
891 offsetof(struct spar_controlvm_channel_protocol,
892 installation_text_id),
893 &text_id, sizeof(u32));
894 if (ret)
895 return ret;
896 return count;
897}
898
899static ssize_t remaining_steps_show(struct device *dev,
900 struct device_attribute *attr, char *buf)
901{
902 u16 remaining_steps;
903
904 visorchannel_read(controlvm_channel,
905 offsetof(struct spar_controlvm_channel_protocol,
906 installation_remaining_steps),
907 &remaining_steps, sizeof(u16));
908 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
909}
910
911static ssize_t remaining_steps_store(struct device *dev,
912 struct device_attribute *attr,
913 const char *buf, size_t count)
914{
915 u16 remaining_steps;
916 int ret;
917
918 if (kstrtou16(buf, 10, &remaining_steps))
919 return -EINVAL;
920
921 ret = visorchannel_write(controlvm_channel,
922 offsetof(struct spar_controlvm_channel_protocol,
923 installation_remaining_steps),
924 &remaining_steps, sizeof(u16));
925 if (ret)
926 return ret;
927 return count;
928}
929
930static void
931bus_info_clear(void *v)
932{
933 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
934
935 kfree(p->name);
936 kfree(p->description);
937 memset(p, 0, sizeof(struct visorchipset_bus_info));
938}
939
940static void
941dev_info_clear(void *v)
942{
943 struct visorchipset_device_info *p =
944 (struct visorchipset_device_info *) v;
945
946 memset(p, 0, sizeof(struct visorchipset_device_info));
947}
948
949static struct visorchipset_bus_info *
950bus_find(struct list_head *list, u32 bus_no)
951{
952 struct visorchipset_bus_info *p;
953
954 list_for_each_entry(p, list, entry) {
955 if (p->bus_no == bus_no)
956 return p;
957 }
958
959 return NULL;
960}
961
962static struct visorchipset_device_info *
963device_find(struct list_head *list, u32 bus_no, u32 dev_no)
964{
965 struct visorchipset_device_info *p;
966
967 list_for_each_entry(p, list, entry) {
968 if (p->bus_no == bus_no && p->dev_no == dev_no)
969 return p;
970 }
971
972 return NULL;
973}
974
975static void busdevices_del(struct list_head *list, u32 bus_no)
976{
977 struct visorchipset_device_info *p, *tmp;
978
979 list_for_each_entry_safe(p, tmp, list, entry) {
980 if (p->bus_no == bus_no) {
981 list_del(&p->entry);
982 kfree(p);
983 }
984 }
985}
986
987static u8
988check_chipset_events(void)
989{
990 int i;
991 u8 send_msg = 1;
992 /* Check events to determine if response should be sent */
993 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
994 send_msg &= chipset_events[i];
995 return send_msg;
996}
997
998static void
999clear_chipset_events(void)
1000{
1001 int i;
1002 /* Clear chipset_events */
1003 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
1004 chipset_events[i] = 0;
1005}
1006
1007void
1008visorchipset_register_busdev(
1009 struct visorchipset_busdev_notifiers *notifiers,
1010 struct visorchipset_busdev_responders *responders,
1011 struct ultra_vbus_deviceinfo *driver_info)
1012{
1013 down(&notifier_lock);
1014 if (!notifiers) {
1015 memset(&busdev_notifiers, 0,
1016 sizeof(busdev_notifiers));
1017 visorbusregistered = 0; /* clear flag */
1018 } else {
1019 busdev_notifiers = *notifiers;
1020 visorbusregistered = 1; /* set flag */
1021 }
1022 if (responders)
1023 *responders = busdev_responders;
1024 if (driver_info)
1025 bus_device_info_init(driver_info, "chipset", "visorchipset",
1026 VERSION, NULL);
1027
1028 up(&notifier_lock);
1029}
1030EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
1031
1032static void
1033cleanup_controlvm_structures(void)
1034{
1035 struct visorchipset_bus_info *bi, *tmp_bi;
1036 struct visorchipset_device_info *di, *tmp_di;
1037
1038 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
1039 bus_info_clear(bi);
1040 list_del(&bi->entry);
1041 kfree(bi);
1042 }
1043
1044 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
1045 dev_info_clear(di);
1046 list_del(&di->entry);
1047 kfree(di);
1048 }
1049}
1050
1051static void
1052chipset_init(struct controlvm_message *inmsg)
1053{
1054 static int chipset_inited;
1055 enum ultra_chipset_feature features = 0;
1056 int rc = CONTROLVM_RESP_SUCCESS;
1057
1058 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1059 if (chipset_inited) {
1060 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1061 goto cleanup;
1062 }
1063 chipset_inited = 1;
1064 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
1065
1066 /* Set features to indicate we support parahotplug (if Command
1067 * also supports it). */
1068 features =
1069 inmsg->cmd.init_chipset.
1070 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
1071
1072 /* Set the "reply" bit so Command knows this is a
1073 * features-aware driver. */
1074 features |= ULTRA_CHIPSET_FEATURE_REPLY;
1075
1076cleanup:
1077 if (rc < 0)
1078 cleanup_controlvm_structures();
1079 if (inmsg->hdr.flags.response_expected)
1080 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
1081}
1082
1083static void
1084controlvm_init_response(struct controlvm_message *msg,
1085 struct controlvm_message_header *msg_hdr, int response)
1086{
1087 memset(msg, 0, sizeof(struct controlvm_message));
1088 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
1089 msg->hdr.payload_bytes = 0;
1090 msg->hdr.payload_vm_offset = 0;
1091 msg->hdr.payload_max_bytes = 0;
1092 if (response < 0) {
1093 msg->hdr.flags.failed = 1;
1094 msg->hdr.completion_status = (u32) (-response);
1095 }
1096}
1097
1098static void
1099controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
1100{
1101 struct controlvm_message outmsg;
1102
1103 controlvm_init_response(&outmsg, msg_hdr, response);
1104 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
1105 * back the deviceChangeState structure in the packet. */
1106 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
1107 g_devicechangestate_packet.device_change_state.bus_no ==
1108 g_diagpool_bus_no &&
1109 g_devicechangestate_packet.device_change_state.dev_no ==
1110 g_diagpool_dev_no)
1111 outmsg.cmd = g_devicechangestate_packet;
1112 if (outmsg.hdr.flags.test_message == 1)
1113 return;
1114
1115 if (!visorchannel_signalinsert(controlvm_channel,
1116 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
1117 return;
1118 }
1119}
1120
1121static void
1122controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
1123 int response,
1124 enum ultra_chipset_feature features)
1125{
1126 struct controlvm_message outmsg;
1127
1128 controlvm_init_response(&outmsg, msg_hdr, response);
1129 outmsg.cmd.init_chipset.features = features;
1130 if (!visorchannel_signalinsert(controlvm_channel,
1131 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
1132 return;
1133 }
1134}
1135
1136static void controlvm_respond_physdev_changestate(
1137 struct controlvm_message_header *msg_hdr, int response,
1138 struct spar_segment_state state)
1139{
1140 struct controlvm_message outmsg;
1141
1142 controlvm_init_response(&outmsg, msg_hdr, response);
1143 outmsg.cmd.device_change_state.state = state;
1144 outmsg.cmd.device_change_state.flags.phys_device = 1;
1145 if (!visorchannel_signalinsert(controlvm_channel,
1146 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
1147 return;
1148 }
1149}
1150
1151enum crash_obj_type {
1152 CRASH_DEV,
1153 CRASH_BUS,
1154};
1155
1156void
1157visorchipset_save_message(struct controlvm_message *msg,
1158 enum crash_obj_type type)
1159{
1160 u32 crash_msg_offset;
1161 u16 crash_msg_count;
1162
1163 /* get saved message count */
1164 if (visorchannel_read(controlvm_channel,
1165 offsetof(struct spar_controlvm_channel_protocol,
1166 saved_crash_message_count),
1167 &crash_msg_count, sizeof(u16)) < 0) {
1168 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1169 POSTCODE_SEVERITY_ERR);
1170 return;
1171 }
1172
1173 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1174 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1175 crash_msg_count,
1176 POSTCODE_SEVERITY_ERR);
1177 return;
1178 }
1179
1180 /* get saved crash message offset */
1181 if (visorchannel_read(controlvm_channel,
1182 offsetof(struct spar_controlvm_channel_protocol,
1183 saved_crash_message_offset),
1184 &crash_msg_offset, sizeof(u32)) < 0) {
1185 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1186 POSTCODE_SEVERITY_ERR);
1187 return;
1188 }
1189
1190 if (type == CRASH_BUS) {
1191 if (visorchannel_write(controlvm_channel,
1192 crash_msg_offset,
1193 msg,
1194 sizeof(struct controlvm_message)) < 0) {
1195 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
1196 POSTCODE_SEVERITY_ERR);
1197 return;
1198 }
1199 } else { /* CRASH_DEV */
1200 if (visorchannel_write(controlvm_channel,
1201 crash_msg_offset +
1202 sizeof(struct controlvm_message), msg,
1203 sizeof(struct controlvm_message)) < 0) {
1204 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
1205 POSTCODE_SEVERITY_ERR);
1206 return;
1207 }
1208 }
1209}
1210EXPORT_SYMBOL_GPL(visorchipset_save_message);
1211
1212static void
1213bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
1214{
1215 struct visorchipset_bus_info *p;
1216 bool need_clear = false;
1217
1218 p = bus_find(&bus_info_list, bus_no);
1219 if (!p)
1220 return;
1221
1222 if (response < 0) {
1223 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
1224 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
1225 /* undo the row we just created... */
1226 busdevices_del(&dev_info_list, bus_no);
1227 } else {
1228 if (cmd_id == CONTROLVM_BUS_CREATE)
1229 p->state.created = 1;
1230 if (cmd_id == CONTROLVM_BUS_DESTROY)
1231 need_clear = true;
1232 }
1233
1234 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1235 return; /* no controlvm response needed */
1236 if (p->pending_msg_hdr.id != (u32)cmd_id)
1237 return;
1238 controlvm_respond(&p->pending_msg_hdr, response);
1239 p->pending_msg_hdr.id = CONTROLVM_INVALID;
1240 if (need_clear) {
1241 bus_info_clear(p);
1242 busdevices_del(&dev_info_list, bus_no);
1243 }
1244}
1245
1246static void
1247device_changestate_responder(enum controlvm_id cmd_id,
1248 u32 bus_no, u32 dev_no, int response,
1249 struct spar_segment_state response_state)
1250{
1251 struct visorchipset_device_info *p;
1252 struct controlvm_message outmsg;
1253
1254 p = device_find(&dev_info_list, bus_no, dev_no);
1255 if (!p)
1256 return;
1257 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1258 return; /* no controlvm response needed */
1259 if (p->pending_msg_hdr.id != cmd_id)
1260 return;
1261
1262 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
1263
1264 outmsg.cmd.device_change_state.bus_no = bus_no;
1265 outmsg.cmd.device_change_state.dev_no = dev_no;
1266 outmsg.cmd.device_change_state.state = response_state;
1267
1268 if (!visorchannel_signalinsert(controlvm_channel,
1269 CONTROLVM_QUEUE_REQUEST, &outmsg))
1270 return;
1271
1272 p->pending_msg_hdr.id = CONTROLVM_INVALID;
1273}
1274
1275static void
1276device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
1277{
1278 struct visorchipset_device_info *p;
1279 bool need_clear = false;
1280
1281 p = device_find(&dev_info_list, bus_no, dev_no);
1282 if (!p)
1283 return;
1284 if (response >= 0) {
1285 if (cmd_id == CONTROLVM_DEVICE_CREATE)
1286 p->state.created = 1;
1287 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
1288 need_clear = true;
1289 }
1290
1291 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1292 return; /* no controlvm response needed */
1293
1294 if (p->pending_msg_hdr.id != (u32)cmd_id)
1295 return;
1296
1297 controlvm_respond(&p->pending_msg_hdr, response);
1298 p->pending_msg_hdr.id = CONTROLVM_INVALID;
1299 if (need_clear)
1300 dev_info_clear(p);
1301}
1302
1303static void
1304bus_epilog(u32 bus_no,
1305 u32 cmd, struct controlvm_message_header *msg_hdr,
1306 int response, bool need_response)
1307{
1308 struct visorchipset_bus_info *bus_info;
1309 bool notified = false;
1310
1311 bus_info = bus_find(&bus_info_list, bus_no);
1312
1313 if (!bus_info)
1314 return;
1315
1316 if (need_response) {
1317 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
1318 sizeof(struct controlvm_message_header));
1319 } else {
1320 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1321 }
1322
1323 down(&notifier_lock);
1324 if (response == CONTROLVM_RESP_SUCCESS) {
1325 switch (cmd) {
1326 case CONTROLVM_BUS_CREATE:
1327 if (busdev_notifiers.bus_create) {
1328 (*busdev_notifiers.bus_create) (bus_no);
1329 notified = true;
1330 }
1331 break;
1332 case CONTROLVM_BUS_DESTROY:
1333 if (busdev_notifiers.bus_destroy) {
1334 (*busdev_notifiers.bus_destroy) (bus_no);
1335 notified = true;
1336 }
1337 break;
1338 }
1339 }
1340 if (notified)
1341 /* The callback function just called above is responsible
1342 * for calling the appropriate visorchipset_busdev_responders
1343 * function, which will call bus_responder()
1344 */
1345 ;
1346 else
1347 bus_responder(cmd, bus_no, response);
1348 up(&notifier_lock);
1349}
1350
1351static void
1352device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
1353 struct controlvm_message_header *msg_hdr, int response,
1354 bool need_response, bool for_visorbus)
1355{
1356 struct visorchipset_busdev_notifiers *notifiers;
1357 bool notified = false;
1358
1359 struct visorchipset_device_info *dev_info =
1360 device_find(&dev_info_list, bus_no, dev_no);
1361 char *envp[] = {
1362 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1363 NULL
1364 };
1365
1366 if (!dev_info)
1367 return;
1368
1369 notifiers = &busdev_notifiers;
1370
1371 if (need_response) {
1372 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
1373 sizeof(struct controlvm_message_header));
1374 } else {
1375 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1376 }
1377
1378 down(&notifier_lock);
1379 if (response >= 0) {
1380 switch (cmd) {
1381 case CONTROLVM_DEVICE_CREATE:
1382 if (notifiers->device_create) {
1383 (*notifiers->device_create) (bus_no, dev_no);
1384 notified = true;
1385 }
1386 break;
1387 case CONTROLVM_DEVICE_CHANGESTATE:
1388 /* ServerReady / ServerRunning / SegmentStateRunning */
1389 if (state.alive == segment_state_running.alive &&
1390 state.operating ==
1391 segment_state_running.operating) {
1392 if (notifiers->device_resume) {
1393 (*notifiers->device_resume) (bus_no,
1394 dev_no);
1395 notified = true;
1396 }
1397 }
1398 /* ServerNotReady / ServerLost / SegmentStateStandby */
1399 else if (state.alive == segment_state_standby.alive &&
1400 state.operating ==
1401 segment_state_standby.operating) {
1402 /* technically this is standby case
1403 * where server is lost
1404 */
1405 if (notifiers->device_pause) {
1406 (*notifiers->device_pause) (bus_no,
1407 dev_no);
1408 notified = true;
1409 }
1410 } else if (state.alive == segment_state_paused.alive &&
1411 state.operating ==
1412 segment_state_paused.operating) {
1413 /* this is lite pause where channel is
1414 * still valid just 'pause' of it
1415 */
1416 if (bus_no == g_diagpool_bus_no &&
1417 dev_no == g_diagpool_dev_no) {
1418 /* this will trigger the
1419 * diag_shutdown.sh script in
1420 * the visorchipset hotplug */
1421 kobject_uevent_env
1422 (&visorchipset_platform_device.dev.
1423 kobj, KOBJ_ONLINE, envp);
1424 }
1425 }
1426 break;
1427 case CONTROLVM_DEVICE_DESTROY:
1428 if (notifiers->device_destroy) {
1429 (*notifiers->device_destroy) (bus_no, dev_no);
1430 notified = true;
1431 }
1432 break;
1433 }
1434 }
1435 if (notified)
1436 /* The callback function just called above is responsible
1437 * for calling the appropriate visorchipset_busdev_responders
1438 * function, which will call device_responder()
1439 */
1440 ;
1441 else
1442 device_responder(cmd, bus_no, dev_no, response);
1443 up(&notifier_lock);
1444}
1445
1446static void
1447bus_create(struct controlvm_message *inmsg)
1448{
1449 struct controlvm_message_packet *cmd = &inmsg->cmd;
1450 u32 bus_no = cmd->create_bus.bus_no;
1451 int rc = CONTROLVM_RESP_SUCCESS;
1452 struct visorchipset_bus_info *bus_info;
1453
1454 bus_info = bus_find(&bus_info_list, bus_no);
1455 if (bus_info && (bus_info->state.created == 1)) {
1456 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1457 POSTCODE_SEVERITY_ERR);
1458 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1459 goto cleanup;
1460 }
1461 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1462 if (!bus_info) {
1463 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1464 POSTCODE_SEVERITY_ERR);
1465 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1466 goto cleanup;
1467 }
1468
1469 INIT_LIST_HEAD(&bus_info->entry);
1470 bus_info->bus_no = bus_no;
1471
1472 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1473
1474 if (inmsg->hdr.flags.test_message == 1)
1475 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1476 else
1477 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1478
1479 bus_info->flags.server = inmsg->hdr.flags.server;
1480 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1481 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1482 bus_info->chan_info.channel_type_uuid =
1483 cmd->create_bus.bus_data_type_uuid;
1484 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1485
1486 list_add(&bus_info->entry, &bus_info_list);
1487
1488 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1489
1490cleanup:
1491 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1492 rc, inmsg->hdr.flags.response_expected == 1);
1493}
1494
1495static void
1496bus_destroy(struct controlvm_message *inmsg)
1497{
1498 struct controlvm_message_packet *cmd = &inmsg->cmd;
1499 u32 bus_no = cmd->destroy_bus.bus_no;
1500 struct visorchipset_bus_info *bus_info;
1501 int rc = CONTROLVM_RESP_SUCCESS;
1502
1503 bus_info = bus_find(&bus_info_list, bus_no);
1504 if (!bus_info)
1505 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1506 else if (bus_info->state.created == 0)
1507 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1508
1509 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1510 rc, inmsg->hdr.flags.response_expected == 1);
1511}
1512
1513static void
1514bus_configure(struct controlvm_message *inmsg,
1515 struct parser_context *parser_ctx)
1516{
1517 struct controlvm_message_packet *cmd = &inmsg->cmd;
1518 u32 bus_no;
1519 struct visorchipset_bus_info *bus_info;
1520 int rc = CONTROLVM_RESP_SUCCESS;
1521 char s[99];
1522
1523 bus_no = cmd->configure_bus.bus_no;
1524 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1525 POSTCODE_SEVERITY_INFO);
1526
1527 bus_info = bus_find(&bus_info_list, bus_no);
1528 if (!bus_info) {
1529 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1530 POSTCODE_SEVERITY_ERR);
1531 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1532 } else if (bus_info->state.created == 0) {
1533 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1534 POSTCODE_SEVERITY_ERR);
1535 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1536 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1537 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1538 POSTCODE_SEVERITY_ERR);
1539 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1540 } else {
1541 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1542 bus_info->partition_uuid = parser_id_get(parser_ctx);
1543 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1544 bus_info->name = parser_string_get(parser_ctx);
1545
1546 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1547 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1548 POSTCODE_SEVERITY_INFO);
1549 }
1550 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1551 rc, inmsg->hdr.flags.response_expected == 1);
1552}
1553
1554static void
1555my_device_create(struct controlvm_message *inmsg)
1556{
1557 struct controlvm_message_packet *cmd = &inmsg->cmd;
1558 u32 bus_no = cmd->create_device.bus_no;
1559 u32 dev_no = cmd->create_device.dev_no;
1560 struct visorchipset_device_info *dev_info;
1561 struct visorchipset_bus_info *bus_info;
1562 int rc = CONTROLVM_RESP_SUCCESS;
1563
1564 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1565 if (dev_info && (dev_info->state.created == 1)) {
1566 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1567 POSTCODE_SEVERITY_ERR);
1568 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1569 goto cleanup;
1570 }
1571 bus_info = bus_find(&bus_info_list, bus_no);
1572 if (!bus_info) {
1573 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1574 POSTCODE_SEVERITY_ERR);
1575 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1576 goto cleanup;
1577 }
1578 if (bus_info->state.created == 0) {
1579 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1580 POSTCODE_SEVERITY_ERR);
1581 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1582 goto cleanup;
1583 }
1584 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1585 if (!dev_info) {
1586 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1587 POSTCODE_SEVERITY_ERR);
1588 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1589 goto cleanup;
1590 }
1591
1592 INIT_LIST_HEAD(&dev_info->entry);
1593 dev_info->bus_no = bus_no;
1594 dev_info->dev_no = dev_no;
1595 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1596 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1597 POSTCODE_SEVERITY_INFO);
1598
1599 if (inmsg->hdr.flags.test_message == 1)
1600 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1601 else
1602 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1603 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1604 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1605 dev_info->chan_info.channel_type_uuid =
1606 cmd->create_device.data_type_uuid;
1607 dev_info->chan_info.intr = cmd->create_device.intr;
1608 list_add(&dev_info->entry, &dev_info_list);
1609 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1610 POSTCODE_SEVERITY_INFO);
1611cleanup:
1612 /* get the bus and devNo for DiagPool channel */
1613 if (dev_info &&
1614 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1615 g_diagpool_bus_no = bus_no;
1616 g_diagpool_dev_no = dev_no;
1617 }
1618 device_epilog(bus_no, dev_no, segment_state_running,
1619 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1620 inmsg->hdr.flags.response_expected == 1, 1);
1621}
1622
1623static void
1624my_device_changestate(struct controlvm_message *inmsg)
1625{
1626 struct controlvm_message_packet *cmd = &inmsg->cmd;
1627 u32 bus_no = cmd->device_change_state.bus_no;
1628 u32 dev_no = cmd->device_change_state.dev_no;
1629 struct spar_segment_state state = cmd->device_change_state.state;
1630 struct visorchipset_device_info *dev_info;
1631 int rc = CONTROLVM_RESP_SUCCESS;
1632
1633 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1634 if (!dev_info) {
1635 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1636 POSTCODE_SEVERITY_ERR);
1637 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1638 } else if (dev_info->state.created == 0) {
1639 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1640 POSTCODE_SEVERITY_ERR);
1641 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1642 }
1643 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1644 device_epilog(bus_no, dev_no, state,
1645 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1646 inmsg->hdr.flags.response_expected == 1, 1);
1647}
1648
1649static void
1650my_device_destroy(struct controlvm_message *inmsg)
1651{
1652 struct controlvm_message_packet *cmd = &inmsg->cmd;
1653 u32 bus_no = cmd->destroy_device.bus_no;
1654 u32 dev_no = cmd->destroy_device.dev_no;
1655 struct visorchipset_device_info *dev_info;
1656 int rc = CONTROLVM_RESP_SUCCESS;
1657
1658 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1659 if (!dev_info)
1660 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1661 else if (dev_info->state.created == 0)
1662 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1663
1664 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1665 device_epilog(bus_no, dev_no, segment_state_running,
1666 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1667 inmsg->hdr.flags.response_expected == 1, 1);
1668}
1669
1670/* When provided with the physical address of the controlvm channel
1671 * (phys_addr), the offset to the payload area we need to manage
1672 * (offset), and the size of this payload area (bytes), fills in the
1673 * controlvm_payload_info struct. Returns true for success or false
1674 * for failure.
1675 */
1676static int
1677initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1678 struct visor_controlvm_payload_info *info)
1679{
1680 u8 __iomem *payload = NULL;
1681 int rc = CONTROLVM_RESP_SUCCESS;
1682
1683 if (!info) {
1684 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1685 goto cleanup;
1686 }
1687 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1688 if ((offset == 0) || (bytes == 0)) {
1689 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1690 goto cleanup;
1691 }
1692 payload = ioremap_cache(phys_addr + offset, bytes);
1693 if (!payload) {
1694 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1695 goto cleanup;
1696 }
1697
1698 info->offset = offset;
1699 info->bytes = bytes;
1700 info->ptr = payload;
1701
1702cleanup:
1703 if (rc < 0) {
1704 if (payload) {
1705 iounmap(payload);
1706 payload = NULL;
1707 }
1708 }
1709 return rc;
1710}
1711
1712static void
1713destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1714{
1715 if (info->ptr) {
1716 iounmap(info->ptr);
1717 info->ptr = NULL;
1718 }
1719 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1720}
1721
1722static void
1723initialize_controlvm_payload(void)
1724{
1725 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1726 u64 payload_offset = 0;
1727 u32 payload_bytes = 0;
1728
1729 if (visorchannel_read(controlvm_channel,
1730 offsetof(struct spar_controlvm_channel_protocol,
1731 request_payload_offset),
1732 &payload_offset, sizeof(payload_offset)) < 0) {
1733 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1734 POSTCODE_SEVERITY_ERR);
1735 return;
1736 }
1737 if (visorchannel_read(controlvm_channel,
1738 offsetof(struct spar_controlvm_channel_protocol,
1739 request_payload_bytes),
1740 &payload_bytes, sizeof(payload_bytes)) < 0) {
1741 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1742 POSTCODE_SEVERITY_ERR);
1743 return;
1744 }
1745 initialize_controlvm_payload_info(phys_addr,
1746 payload_offset, payload_bytes,
1747 &controlvm_payload_info);
1748}
1749
1750/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1751 * Returns CONTROLVM_RESP_xxx code.
1752 */
1753int
1754visorchipset_chipset_ready(void)
1755{
1756 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1757 return CONTROLVM_RESP_SUCCESS;
1758}
1759EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1760
1761int
1762visorchipset_chipset_selftest(void)
1763{
1764 char env_selftest[20];
1765 char *envp[] = { env_selftest, NULL };
1766
1767 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1768 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1769 envp);
1770 return CONTROLVM_RESP_SUCCESS;
1771}
1772EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1773
1774/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1775 * Returns CONTROLVM_RESP_xxx code.
1776 */
1777int
1778visorchipset_chipset_notready(void)
1779{
1780 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1781 return CONTROLVM_RESP_SUCCESS;
1782}
1783EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1784
1785static void
1786chipset_ready(struct controlvm_message_header *msg_hdr)
1787{
1788 int rc = visorchipset_chipset_ready();
1789
1790 if (rc != CONTROLVM_RESP_SUCCESS)
1791 rc = -rc;
1792 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1793 controlvm_respond(msg_hdr, rc);
1794 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1795 /* Send CHIPSET_READY response when all modules have been loaded
1796 * and disks mounted for the partition
1797 */
1798 g_chipset_msg_hdr = *msg_hdr;
1799 }
1800}
1801
1802static void
1803chipset_selftest(struct controlvm_message_header *msg_hdr)
1804{
1805 int rc = visorchipset_chipset_selftest();
1806
1807 if (rc != CONTROLVM_RESP_SUCCESS)
1808 rc = -rc;
1809 if (msg_hdr->flags.response_expected)
1810 controlvm_respond(msg_hdr, rc);
1811}
1812
1813static void
1814chipset_notready(struct controlvm_message_header *msg_hdr)
1815{
1816 int rc = visorchipset_chipset_notready();
1817
1818 if (rc != CONTROLVM_RESP_SUCCESS)
1819 rc = -rc;
1820 if (msg_hdr->flags.response_expected)
1821 controlvm_respond(msg_hdr, rc);
1822}
1823
1824/* This is your "one-stop" shop for grabbing the next message from the
1825 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1826 */
1827static bool
1828read_controlvm_event(struct controlvm_message *msg)
1829{
1830 if (visorchannel_signalremove(controlvm_channel,
1831 CONTROLVM_QUEUE_EVENT, msg)) {
1832 /* got a message */
1833 if (msg->hdr.flags.test_message == 1)
1834 return false;
1835 return true;
1836 }
1837 return false;
1838}
1839
1840/*
1841 * The general parahotplug flow works as follows. The visorchipset
1842 * driver receives a DEVICE_CHANGESTATE message from Command
1843 * specifying a physical device to enable or disable. The CONTROLVM
1844 * message handler calls parahotplug_process_message, which then adds
1845 * the message to a global list and kicks off a udev event which
1846 * causes a user level script to enable or disable the specified
1847 * device. The udev script then writes to
1848 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1849 * to get called, at which point the appropriate CONTROLVM message is
1850 * retrieved from the list and responded to.
1851 */
1852
1853#define PARAHOTPLUG_TIMEOUT_MS 2000
1854
1855/*
1856 * Generate unique int to match an outstanding CONTROLVM message with a
1857 * udev script /proc response
1858 */
1859static int
1860parahotplug_next_id(void)
1861{
1862 static atomic_t id = ATOMIC_INIT(0);
1863
1864 return atomic_inc_return(&id);
1865}
1866
1867/*
1868 * Returns the time (in jiffies) when a CONTROLVM message on the list
1869 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1870 */
1871static unsigned long
1872parahotplug_next_expiration(void)
1873{
1874 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1875}
1876
1877/*
1878 * Create a parahotplug_request, which is basically a wrapper for a
1879 * CONTROLVM_MESSAGE that we can stick on a list
1880 */
1881static struct parahotplug_request *
1882parahotplug_request_create(struct controlvm_message *msg)
1883{
1884 struct parahotplug_request *req;
1885
1886 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1887 if (!req)
1888 return NULL;
1889
1890 req->id = parahotplug_next_id();
1891 req->expiration = parahotplug_next_expiration();
1892 req->msg = *msg;
1893
1894 return req;
1895}
1896
1897/*
1898 * Free a parahotplug_request.
1899 */
1900static void
1901parahotplug_request_destroy(struct parahotplug_request *req)
1902{
1903 kfree(req);
1904}
1905
1906/*
1907 * Cause uevent to run the user level script to do the disable/enable
1908 * specified in (the CONTROLVM message in) the specified
1909 * parahotplug_request
1910 */
1911static void
1912parahotplug_request_kickoff(struct parahotplug_request *req)
1913{
1914 struct controlvm_message_packet *cmd = &req->msg.cmd;
1915 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1916 env_func[40];
1917 char *envp[] = {
1918 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1919 };
1920
1921 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1922 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1923 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1924 cmd->device_change_state.state.active);
1925 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1926 cmd->device_change_state.bus_no);
1927 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1928 cmd->device_change_state.dev_no >> 3);
1929 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1930 cmd->device_change_state.dev_no & 0x7);
1931
1932 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1933 envp);
1934}
1935
1936/*
1937 * Remove any request from the list that's been on there too long and
1938 * respond with an error.
1939 */
1940static void
1941parahotplug_process_list(void)
1942{
1943 struct list_head *pos;
1944 struct list_head *tmp;
1945
1946 spin_lock(&parahotplug_request_list_lock);
1947
1948 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1949 struct parahotplug_request *req =
1950 list_entry(pos, struct parahotplug_request, list);
1951
1952 if (!time_after_eq(jiffies, req->expiration))
1953 continue;
1954
1955 list_del(pos);
1956 if (req->msg.hdr.flags.response_expected)
1957 controlvm_respond_physdev_changestate(
1958 &req->msg.hdr,
1959 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1960 req->msg.cmd.device_change_state.state);
1961 parahotplug_request_destroy(req);
1962 }
1963
1964 spin_unlock(&parahotplug_request_list_lock);
1965}
1966
1967/*
1968 * Called from the /proc handler, which means the user script has
1969 * finished the enable/disable. Find the matching identifier, and
1970 * respond to the CONTROLVM message with success.
1971 */
1972static int
1973parahotplug_request_complete(int id, u16 active)
1974{
1975 struct list_head *pos;
1976 struct list_head *tmp;
1977
1978 spin_lock(&parahotplug_request_list_lock);
1979
1980 /* Look for a request matching "id". */
1981 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1982 struct parahotplug_request *req =
1983 list_entry(pos, struct parahotplug_request, list);
1984 if (req->id == id) {
1985 /* Found a match. Remove it from the list and
1986 * respond.
1987 */
1988 list_del(pos);
1989 spin_unlock(&parahotplug_request_list_lock);
1990 req->msg.cmd.device_change_state.state.active = active;
1991 if (req->msg.hdr.flags.response_expected)
1992 controlvm_respond_physdev_changestate(
1993 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1994 req->msg.cmd.device_change_state.state);
1995 parahotplug_request_destroy(req);
1996 return 0;
1997 }
1998 }
1999
2000 spin_unlock(&parahotplug_request_list_lock);
2001 return -1;
2002}
2003
2004/*
2005 * Enables or disables a PCI device by kicking off a udev script
2006 */
2007static void
2008parahotplug_process_message(struct controlvm_message *inmsg)
2009{
2010 struct parahotplug_request *req;
2011
2012 req = parahotplug_request_create(inmsg);
2013
2014 if (!req)
2015 return;
2016
2017 if (inmsg->cmd.device_change_state.state.active) {
2018 /* For enable messages, just respond with success
2019 * right away. This is a bit of a hack, but there are
2020 * issues with the early enable messages we get (with
2021 * either the udev script not detecting that the device
2022 * is up, or not getting called at all). Fortunately
2023 * the messages that get lost don't matter anyway, as
2024 * devices are automatically enabled at
2025 * initialization.
2026 */
2027 parahotplug_request_kickoff(req);
2028 controlvm_respond_physdev_changestate(&inmsg->hdr,
2029 CONTROLVM_RESP_SUCCESS,
2030 inmsg->cmd.device_change_state.state);
2031 parahotplug_request_destroy(req);
2032 } else {
2033 /* For disable messages, add the request to the
2034 * request list before kicking off the udev script. It
2035 * won't get responded to until the script has
2036 * indicated it's done.
2037 */
2038 spin_lock(&parahotplug_request_list_lock);
2039 list_add_tail(&req->list, &parahotplug_request_list);
2040 spin_unlock(&parahotplug_request_list_lock);
2041
2042 parahotplug_request_kickoff(req);
2043 }
2044}
2045
2046/* Process a controlvm message.
2047 * Return result:
2048 * false - this function will return false only in the case where the
2049 * controlvm message was NOT processed, but processing must be
2050 * retried before reading the next controlvm message; a
2051 * scenario where this can occur is when we need to throttle
2052 * the allocation of memory in which to copy out controlvm
2053 * payload data
2054 * true - processing of the controlvm message completed,
2055 * either successfully or with an error.
2056 */
2057static bool
2058handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
2059{
2060 struct controlvm_message_packet *cmd = &inmsg.cmd;
2061 u64 parm_addr;
2062 u32 parm_bytes;
2063 struct parser_context *parser_ctx = NULL;
2064 bool local_addr;
2065 struct controlvm_message ackmsg;
2066
2067 /* create parsing context if necessary */
2068 local_addr = (inmsg.hdr.flags.test_message == 1);
2069 if (channel_addr == 0)
2070 return true;
2071 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
2072 parm_bytes = inmsg.hdr.payload_bytes;
2073
2074 /* Parameter and channel addresses within test messages actually lie
2075 * within our OS-controlled memory. We need to know that, because it
2076 * makes a difference in how we compute the virtual address.
2077 */
2078 if (parm_addr && parm_bytes) {
2079 bool retry = false;
2080
2081 parser_ctx =
2082 parser_init_byte_stream(parm_addr, parm_bytes,
2083 local_addr, &retry);
2084 if (!parser_ctx && retry)
2085 return false;
2086 }
2087
2088 if (!local_addr) {
2089 controlvm_init_response(&ackmsg, &inmsg.hdr,
2090 CONTROLVM_RESP_SUCCESS);
2091 if (controlvm_channel)
2092 visorchannel_signalinsert(controlvm_channel,
2093 CONTROLVM_QUEUE_ACK,
2094 &ackmsg);
2095 }
2096 switch (inmsg.hdr.id) {
2097 case CONTROLVM_CHIPSET_INIT:
2098 chipset_init(&inmsg);
2099 break;
2100 case CONTROLVM_BUS_CREATE:
2101 bus_create(&inmsg);
2102 break;
2103 case CONTROLVM_BUS_DESTROY:
2104 bus_destroy(&inmsg);
2105 break;
2106 case CONTROLVM_BUS_CONFIGURE:
2107 bus_configure(&inmsg, parser_ctx);
2108 break;
2109 case CONTROLVM_DEVICE_CREATE:
2110 my_device_create(&inmsg);
2111 break;
2112 case CONTROLVM_DEVICE_CHANGESTATE:
2113 if (cmd->device_change_state.flags.phys_device) {
2114 parahotplug_process_message(&inmsg);
2115 } else {
2116 /* save the hdr and cmd structures for later use */
2117 /* when sending back the response to Command */
2118 my_device_changestate(&inmsg);
2119 g_devicechangestate_packet = inmsg.cmd;
2120 break;
2121 }
2122 break;
2123 case CONTROLVM_DEVICE_DESTROY:
2124 my_device_destroy(&inmsg);
2125 break;
2126 case CONTROLVM_DEVICE_CONFIGURE:
2127 /* no op for now, just send a respond that we passed */
2128 if (inmsg.hdr.flags.response_expected)
2129 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
2130 break;
2131 case CONTROLVM_CHIPSET_READY:
2132 chipset_ready(&inmsg.hdr);
2133 break;
2134 case CONTROLVM_CHIPSET_SELFTEST:
2135 chipset_selftest(&inmsg.hdr);
2136 break;
2137 case CONTROLVM_CHIPSET_STOP:
2138 chipset_notready(&inmsg.hdr);
2139 break;
2140 default:
2141 if (inmsg.hdr.flags.response_expected)
2142 controlvm_respond(&inmsg.hdr,
2143 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
2144 break;
2145 }
2146
2147 if (parser_ctx) {
2148 parser_done(parser_ctx);
2149 parser_ctx = NULL;
2150 }
2151 return true;
2152}
2153
2154static HOSTADDRESS controlvm_get_channel_address(void)
2155{
2156 u64 addr = 0;
2157 u32 size = 0;
2158
2159 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
2160 return 0;
2161
2162 return addr;
2163}
2164
2165static void
2166controlvm_periodic_work(struct work_struct *work)
2167{
2168 struct controlvm_message inmsg;
2169 bool got_command = false;
2170 bool handle_command_failed = false;
2171 static u64 poll_count;
2172
2173 /* make sure visorbus server is registered for controlvm callbacks */
2174 if (visorchipset_visorbusregwait && !visorbusregistered)
2175 goto cleanup;
2176
2177 poll_count++;
2178 if (poll_count >= 250)
2179 ; /* keep going */
2180 else
2181 goto cleanup;
2182
2183 /* Check events to determine if response to CHIPSET_READY
2184 * should be sent
2185 */
2186 if (visorchipset_holdchipsetready &&
2187 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
2188 if (check_chipset_events() == 1) {
2189 controlvm_respond(&g_chipset_msg_hdr, 0);
2190 clear_chipset_events();
2191 memset(&g_chipset_msg_hdr, 0,
2192 sizeof(struct controlvm_message_header));
2193 }
2194 }
2195
2196 while (visorchannel_signalremove(controlvm_channel,
2197 CONTROLVM_QUEUE_RESPONSE,
2198 &inmsg))
2199 ;
2200 if (!got_command) {
2201 if (controlvm_pending_msg_valid) {
2202 /* we throttled processing of a prior
2203 * msg, so try to process it again
2204 * rather than reading a new one
2205 */
2206 inmsg = controlvm_pending_msg;
2207 controlvm_pending_msg_valid = false;
2208 got_command = true;
2209 } else {
2210 got_command = read_controlvm_event(&inmsg);
2211 }
2212 }
2213
2214 handle_command_failed = false;
2215 while (got_command && (!handle_command_failed)) {
2216 most_recent_message_jiffies = jiffies;
2217 if (handle_command(inmsg,
2218 visorchannel_get_physaddr
2219 (controlvm_channel)))
2220 got_command = read_controlvm_event(&inmsg);
2221 else {
2222 /* this is a scenario where throttling
2223 * is required, but probably NOT an
2224 * error...; we stash the current
2225 * controlvm msg so we will attempt to
2226 * reprocess it on our next loop
2227 */
2228 handle_command_failed = true;
2229 controlvm_pending_msg = inmsg;
2230 controlvm_pending_msg_valid = true;
2231 }
2232 }
2233
2234 /* parahotplug_worker */
2235 parahotplug_process_list();
2236
2237cleanup:
2238
2239 if (time_after(jiffies,
2240 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2241 /* it's been longer than MIN_IDLE_SECONDS since we
2242 * processed our last controlvm message; slow down the
2243 * polling
2244 */
2245 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2246 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2247 } else {
2248 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2249 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2250 }
2251
2252 queue_delayed_work(periodic_controlvm_workqueue,
2253 &periodic_controlvm_work, poll_jiffies);
2254}
2255
2256static void
2257setup_crash_devices_work_queue(struct work_struct *work)
2258{
2259 struct controlvm_message local_crash_bus_msg;
2260 struct controlvm_message local_crash_dev_msg;
2261 struct controlvm_message msg;
2262 u32 local_crash_msg_offset;
2263 u16 local_crash_msg_count;
2264
2265 /* make sure visorbus is registered for controlvm callbacks */
2266 if (visorchipset_visorbusregwait && !visorbusregistered)
2267 goto cleanup;
2268
2269 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2270
2271 /* send init chipset msg */
2272 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2273 msg.cmd.init_chipset.bus_count = 23;
2274 msg.cmd.init_chipset.switch_count = 0;
2275
2276 chipset_init(&msg);
2277
2278 /* get saved message count */
2279 if (visorchannel_read(controlvm_channel,
2280 offsetof(struct spar_controlvm_channel_protocol,
2281 saved_crash_message_count),
2282 &local_crash_msg_count, sizeof(u16)) < 0) {
2283 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2284 POSTCODE_SEVERITY_ERR);
2285 return;
2286 }
2287
2288 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
2289 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2290 local_crash_msg_count,
2291 POSTCODE_SEVERITY_ERR);
2292 return;
2293 }
2294
2295 /* get saved crash message offset */
2296 if (visorchannel_read(controlvm_channel,
2297 offsetof(struct spar_controlvm_channel_protocol,
2298 saved_crash_message_offset),
2299 &local_crash_msg_offset, sizeof(u32)) < 0) {
2300 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2301 POSTCODE_SEVERITY_ERR);
2302 return;
2303 }
2304
2305 /* read create device message for storage bus offset */
2306 if (visorchannel_read(controlvm_channel,
2307 local_crash_msg_offset,
2308 &local_crash_bus_msg,
2309 sizeof(struct controlvm_message)) < 0) {
2310 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2311 POSTCODE_SEVERITY_ERR);
2312 return;
2313 }
2314
2315 /* read create device message for storage device */
2316 if (visorchannel_read(controlvm_channel,
2317 local_crash_msg_offset +
2318 sizeof(struct controlvm_message),
2319 &local_crash_dev_msg,
2320 sizeof(struct controlvm_message)) < 0) {
2321 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2322 POSTCODE_SEVERITY_ERR);
2323 return;
2324 }
2325
2326 /* reuse IOVM create bus message */
2327 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
2328 bus_create(&local_crash_bus_msg);
2329 } else {
2330 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2331 POSTCODE_SEVERITY_ERR);
2332 return;
2333 }
2334
2335 /* reuse create device message for storage device */
2336 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2337 my_device_create(&local_crash_dev_msg);
2338 } else {
2339 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2340 POSTCODE_SEVERITY_ERR);
2341 return;
2342 }
2343 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2344 return;
2345
2346cleanup:
2347
2348 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2349
2350 queue_delayed_work(periodic_controlvm_workqueue,
2351 &periodic_controlvm_work, poll_jiffies);
2352}
2353
2354static void
2355bus_create_response(u32 bus_no, int response)
2356{
2357 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
2358}
2359
2360static void
2361bus_destroy_response(u32 bus_no, int response)
2362{
2363 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
2364}
2365
2366static void
2367device_create_response(u32 bus_no, u32 dev_no, int response)
2368{
2369 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
2370}
2371
2372static void
2373device_destroy_response(u32 bus_no, u32 dev_no, int response)
2374{
2375 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
2376}
2377
2378void
2379visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
2380{
2381 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2382 bus_no, dev_no, response,
2383 segment_state_standby);
2384}
2385EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2386
2387static void
2388device_resume_response(u32 bus_no, u32 dev_no, int response)
2389{
2390 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2391 bus_no, dev_no, response,
2392 segment_state_running);
2393}
2394
2395bool
2396visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
2397{
2398 void *p = bus_find(&bus_info_list, bus_no);
2399
2400 if (!p)
2401 return false;
2402 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2403 return true;
2404}
2405EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2406
2407bool
2408visorchipset_set_bus_context(u32 bus_no, void *context)
2409{
2410 struct visorchipset_bus_info *p = bus_find(&bus_info_list, bus_no);
2411
2412 if (!p)
2413 return false;
2414 p->bus_driver_context = context;
2415 return true;
2416}
2417EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2418
2419bool
2420visorchipset_get_device_info(u32 bus_no, u32 dev_no,
2421 struct visorchipset_device_info *dev_info)
2422{
2423 void *p = device_find(&dev_info_list, bus_no, dev_no);
2424
2425 if (!p)
2426 return false;
2427 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2428 return true;
2429}
2430EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2431
2432bool
2433visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
2434{
2435 struct visorchipset_device_info *p;
2436
2437 p = device_find(&dev_info_list, bus_no, dev_no);
2438
2439 if (!p)
2440 return false;
2441 p->bus_driver_context = context;
2442 return true;
2443}
2444EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2445
2446/* Generic wrapper function for allocating memory from a kmem_cache pool.
2447 */
2448void *
2449visorchipset_cache_alloc(struct kmem_cache *pool, bool ok_to_block,
2450 char *fn, int ln)
2451{
2452 gfp_t gfp;
2453 void *p;
2454
2455 if (ok_to_block)
2456 gfp = GFP_KERNEL;
2457 else
2458 gfp = GFP_ATOMIC;
2459 /* __GFP_NORETRY means "ok to fail", meaning
2460 * kmem_cache_alloc() can return NULL, implying the caller CAN
2461 * cope with failure. If you do NOT specify __GFP_NORETRY,
2462 * Linux will go to extreme measures to get memory for you
2463 * (like, invoke oom killer), which will probably cripple the
2464 * system.
2465 */
2466 gfp |= __GFP_NORETRY;
2467 p = kmem_cache_alloc(pool, gfp);
2468 if (!p)
2469 return NULL;
2470
2471 return p;
2472}
2473
2474/* Generic wrapper function for freeing memory from a kmem_cache pool.
2475 */
2476void
2477visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2478{
2479 if (!p)
2480 return;
2481
2482 kmem_cache_free(pool, p);
2483}
2484
2485static ssize_t chipsetready_store(struct device *dev,
2486 struct device_attribute *attr,
2487 const char *buf, size_t count)
2488{
2489 char msgtype[64];
2490
2491 if (sscanf(buf, "%63s", msgtype) != 1)
2492 return -EINVAL;
2493
2494 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2495 chipset_events[0] = 1;
2496 return count;
2497 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2498 chipset_events[1] = 1;
2499 return count;
2500 }
2501 return -EINVAL;
2502}
2503
2504/* The parahotplug/devicedisabled interface gets called by our support script
2505 * when an SR-IOV device has been shut down. The ID is passed to the script
2506 * and then passed back when the device has been removed.
2507 */
2508static ssize_t devicedisabled_store(struct device *dev,
2509 struct device_attribute *attr,
2510 const char *buf, size_t count)
2511{
2512 unsigned int id;
2513
2514 if (kstrtouint(buf, 10, &id))
2515 return -EINVAL;
2516
2517 parahotplug_request_complete(id, 0);
2518 return count;
2519}
2520
2521/* The parahotplug/deviceenabled interface gets called by our support script
2522 * when an SR-IOV device has been recovered. The ID is passed to the script
2523 * and then passed back when the device has been brought back up.
2524 */
2525static ssize_t deviceenabled_store(struct device *dev,
2526 struct device_attribute *attr,
2527 const char *buf, size_t count)
2528{
2529 unsigned int id;
2530
2531 if (kstrtouint(buf, 10, &id))
2532 return -EINVAL;
2533
2534 parahotplug_request_complete(id, 1);
2535 return count;
2536}
2537
2538static int
2539visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2540{
2541 unsigned long physaddr = 0;
2542 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2543 u64 addr = 0;
2544
2545 /* sv_enable_dfp(); */
2546 if (offset & (PAGE_SIZE - 1))
2547 return -ENXIO; /* need aligned offsets */
2548
2549 switch (offset) {
2550 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2551 vma->vm_flags |= VM_IO;
2552 if (!*file_controlvm_channel)
2553 return -ENXIO;
2554
2555 visorchannel_read(*file_controlvm_channel,
2556 offsetof(struct spar_controlvm_channel_protocol,
2557 gp_control_channel),
2558 &addr, sizeof(addr));
2559 if (!addr)
2560 return -ENXIO;
2561
2562 physaddr = (unsigned long)addr;
2563 if (remap_pfn_range(vma, vma->vm_start,
2564 physaddr >> PAGE_SHIFT,
2565 vma->vm_end - vma->vm_start,
2566 /*pgprot_noncached */
2567 (vma->vm_page_prot))) {
2568 return -EAGAIN;
2569 }
2570 break;
2571 default:
2572 return -ENXIO;
2573 }
2574 return 0;
2575}
2576
2577static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2578 unsigned long arg)
2579{
2580 s64 adjustment;
2581 s64 vrtc_offset;
2582
2583 switch (cmd) {
2584 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2585 /* get the physical rtc offset */
2586 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2587 if (copy_to_user((void __user *)arg, &vrtc_offset,
2588 sizeof(vrtc_offset))) {
2589 return -EFAULT;
2590 }
2591 return SUCCESS;
2592 case VMCALL_UPDATE_PHYSICAL_TIME:
2593 if (copy_from_user(&adjustment, (void __user *)arg,
2594 sizeof(adjustment))) {
2595 return -EFAULT;
2596 }
2597 return issue_vmcall_update_physical_time(adjustment);
2598 default:
2599 return -EFAULT;
2600 }
2601}
2602
2603static const struct file_operations visorchipset_fops = {
2604 .owner = THIS_MODULE,
2605 .open = visorchipset_open,
2606 .read = NULL,
2607 .write = NULL,
2608 .unlocked_ioctl = visorchipset_ioctl,
2609 .release = visorchipset_release,
2610 .mmap = visorchipset_mmap,
2611};
2612
2613int
2614visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2615{
2616 int rc = 0;
2617
2618 file_controlvm_channel = controlvm_channel;
2619 cdev_init(&file_cdev, &visorchipset_fops);
2620 file_cdev.owner = THIS_MODULE;
2621 if (MAJOR(major_dev) == 0) {
2622 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2623 /* dynamic major device number registration required */
2624 if (rc < 0)
2625 return rc;
2626 } else {
2627 /* static major device number registration required */
2628 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2629 if (rc < 0)
2630 return rc;
2631 }
2632 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2633 if (rc < 0) {
2634 unregister_chrdev_region(major_dev, 1);
2635 return rc;
2636 }
2637 return 0;
2638}
2639
2640
2641
2642static int __init
2643visorchipset_init(void)
2644{
2645 int rc = 0;
2646 HOSTADDRESS addr;
2647
2648 if (!unisys_spar_platform)
2649 return -ENODEV;
2650
2651 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2652 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2653 memset(&livedump_info, 0, sizeof(livedump_info));
2654 atomic_set(&livedump_info.buffers_in_use, 0);
2655
2656 addr = controlvm_get_channel_address();
2657 if (addr) {
2658 controlvm_channel =
2659 visorchannel_create_with_lock
2660 (addr,
2661 sizeof(struct spar_controlvm_channel_protocol),
2662 spar_controlvm_channel_protocol_uuid);
2663 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2664 visorchannel_get_header(controlvm_channel))) {
2665 initialize_controlvm_payload();
2666 } else {
2667 visorchannel_destroy(controlvm_channel);
2668 controlvm_channel = NULL;
2669 return -ENODEV;
2670 }
2671 } else {
2672 return -ENODEV;
2673 }
2674
2675 major_dev = MKDEV(visorchipset_major, 0);
2676 rc = visorchipset_file_init(major_dev, &controlvm_channel);
2677 if (rc < 0) {
2678 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2679 goto cleanup;
2680 }
2681
2682 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2683
2684 /* if booting in a crash kernel */
2685 if (is_kdump_kernel())
2686 INIT_DELAYED_WORK(&periodic_controlvm_work,
2687 setup_crash_devices_work_queue);
2688 else
2689 INIT_DELAYED_WORK(&periodic_controlvm_work,
2690 controlvm_periodic_work);
2691 periodic_controlvm_workqueue =
2692 create_singlethread_workqueue("visorchipset_controlvm");
2693
2694 if (!periodic_controlvm_workqueue) {
2695 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2696 DIAG_SEVERITY_ERR);
2697 rc = -ENOMEM;
2698 goto cleanup;
2699 }
2700 most_recent_message_jiffies = jiffies;
2701 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2702 rc = queue_delayed_work(periodic_controlvm_workqueue,
2703 &periodic_controlvm_work, poll_jiffies);
2704 if (rc < 0) {
2705 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2706 DIAG_SEVERITY_ERR);
2707 goto cleanup;
2708 }
2709
2710 visorchipset_platform_device.dev.devt = major_dev;
2711 if (platform_device_register(&visorchipset_platform_device) < 0) {
2712 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2713 rc = -1;
2714 goto cleanup;
2715 }
2716 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2717
2718 rc = visorbus_init();
2719cleanup:
2720 if (rc) {
2721 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2722 POSTCODE_SEVERITY_ERR);
2723 }
2724 return rc;
2725}
2726
2727void
2728visorchipset_file_cleanup(dev_t major_dev)
2729{
2730 if (file_cdev.ops)
2731 cdev_del(&file_cdev);
2732 file_cdev.ops = NULL;
2733 unregister_chrdev_region(major_dev, 1);
2734}
2735
2736static void
2737visorchipset_exit(void)
2738{
2739 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2740
2741 visorbus_exit();
2742
2743 cancel_delayed_work(&periodic_controlvm_work);
2744 flush_workqueue(periodic_controlvm_workqueue);
2745 destroy_workqueue(periodic_controlvm_workqueue);
2746 periodic_controlvm_workqueue = NULL;
2747 destroy_controlvm_payload_info(&controlvm_payload_info);
2748
2749 cleanup_controlvm_structures();
2750
2751 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2752
2753 visorchannel_destroy(controlvm_channel);
2754
2755 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2756 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2757}
2758
2759module_param_named(major, visorchipset_major, int, S_IRUGO);
2760MODULE_PARM_DESC(visorchipset_major,
2761 "major device number to use for the device node");
2762module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2763MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2764 "1 to have the module wait for the visor bus to register");
2765module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2766 int, S_IRUGO);
2767MODULE_PARM_DESC(visorchipset_holdchipsetready,
2768 "1 to hold response to CHIPSET_READY");
2769
2770module_init(visorchipset_init);
2771module_exit(visorchipset_exit);
2772
2773MODULE_AUTHOR("Unisys");
2774MODULE_LICENSE("GPL");
2775MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2776 VERSION);
2777MODULE_VERSION(VERSION);
This page took 0.034315 seconds and 5 git commands to generate.