3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
18 #include <linux/acpi.h>
19 #include <linux/cdev.h>
20 #include <linux/ctype.h>
23 #include <linux/nls.h>
24 #include <linux/netdevice.h>
25 #include <linux/platform_device.h>
26 #include <linux/uuid.h>
27 #include <linux/crash_dump.h>
29 #include "channel_guid.h"
30 #include "controlvmchannel.h"
31 #include "controlvmcompletionstatus.h"
32 #include "guestlinuxdebug.h"
33 #include "periodic_work.h"
36 #include "visorbus_private.h"
37 #include "vmcallinterface.h"
39 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE 50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
47 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
49 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
52 #define UNISYS_SPAR_LEAF_ID 0x40000000
54 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55 #define UNISYS_SPAR_ID_EBX 0x73696e55
56 #define UNISYS_SPAR_ID_ECX 0x70537379
57 #define UNISYS_SPAR_ID_EDX 0x34367261
62 static int visorchipset_major
;
63 static int visorchipset_visorbusregwait
= 1; /* default is on */
64 static int visorchipset_holdchipsetready
;
65 static unsigned long controlvm_payload_bytes_buffered
;
68 visorchipset_open(struct inode
*inode
, struct file
*file
)
70 unsigned minor_number
= iminor(inode
);
74 file
->private_data
= NULL
;
79 visorchipset_release(struct inode
*inode
, struct file
*file
)
84 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85 * we switch to slow polling mode. As soon as we get a controlvm
86 * message, we switch back to fast polling mode.
88 #define MIN_IDLE_SECONDS 10
89 static unsigned long poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
90 static unsigned long most_recent_message_jiffies
; /* when we got our last
91 * controlvm message */
92 static int visorbusregistered
;
94 #define MAX_CHIPSET_EVENTS 2
95 static u8 chipset_events
[MAX_CHIPSET_EVENTS
] = { 0, 0 };
97 struct parser_context
{
98 unsigned long allocbytes
;
99 unsigned long param_bytes
;
101 unsigned long bytes_remaining
;
106 static struct delayed_work periodic_controlvm_work
;
107 static struct workqueue_struct
*periodic_controlvm_workqueue
;
108 static DEFINE_SEMAPHORE(notifier_lock
);
110 static struct cdev file_cdev
;
111 static struct visorchannel
**file_controlvm_channel
;
112 static struct controlvm_message_header g_chipset_msg_hdr
;
113 static struct controlvm_message_packet g_devicechangestate_packet
;
115 static LIST_HEAD(bus_info_list
);
116 static LIST_HEAD(dev_info_list
);
118 static struct visorchannel
*controlvm_channel
;
120 /* Manages the request payload in the controlvm channel */
121 struct visor_controlvm_payload_info
{
122 u8 __iomem
*ptr
; /* pointer to base address of payload pool */
123 u64 offset
; /* offset from beginning of controlvm
124 * channel to beginning of payload * pool */
125 u32 bytes
; /* number of bytes in payload pool */
128 static struct visor_controlvm_payload_info controlvm_payload_info
;
130 /* The following globals are used to handle the scenario where we are unable to
131 * offload the payload from a controlvm message due to memory requirements. In
132 * this scenario, we simply stash the controlvm message, then attempt to
133 * process it again the next time controlvm_periodic_work() runs.
135 static struct controlvm_message controlvm_pending_msg
;
136 static bool controlvm_pending_msg_valid
;
138 /* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
141 struct putfile_buffer_entry
{
142 struct list_head next
; /* putfile_buffer_entry list */
143 struct parser_context
*parser_ctx
; /* points to input data buffer */
146 /* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
150 static LIST_HEAD(putfile_request_list
);
152 /* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
156 struct putfile_active_buffer
{
157 /* a payload from a controlvm message, containing a file data buffer */
158 struct parser_context
*parser_ctx
;
159 /* points within data area of parser_ctx to next byte of data */
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining
;
165 #define PUTFILE_REQUEST_SIG 0x0906101302281211
166 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
170 struct putfile_request
{
171 u64 sig
; /* PUTFILE_REQUEST_SIG */
173 /* header from original TransmitFile request */
174 struct controlvm_message_header controlvm_header
;
175 u64 file_request_number
; /* from original TransmitFile request */
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request
;
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number
;
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list
;
190 spinlock_t req_list_lock
; /* lock for input_buffer_list */
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq
;
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf
;
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status
;
206 struct parahotplug_request
{
207 struct list_head list
;
209 unsigned long expiration
;
210 struct controlvm_message msg
;
213 static LIST_HEAD(parahotplug_request_list
);
214 static DEFINE_SPINLOCK(parahotplug_request_list_lock
); /* lock for above */
215 static void parahotplug_process_list(void);
217 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
218 * CONTROLVM_REPORTEVENT.
220 static struct visorchipset_busdev_notifiers busdev_notifiers
;
222 static void bus_create_response(struct visor_device
*p
, int response
);
223 static void bus_destroy_response(struct visor_device
*p
, int response
);
224 static void device_create_response(struct visor_device
*p
, int response
);
225 static void device_destroy_response(struct visor_device
*p
, int response
);
226 static void device_resume_response(struct visor_device
*p
, int response
);
228 static void visorchipset_device_pause_response(struct visor_device
*p
,
231 static struct visorchipset_busdev_responders busdev_responders
= {
232 .bus_create
= bus_create_response
,
233 .bus_destroy
= bus_destroy_response
,
234 .device_create
= device_create_response
,
235 .device_destroy
= device_destroy_response
,
236 .device_pause
= visorchipset_device_pause_response
,
237 .device_resume
= device_resume_response
,
240 /* info for /dev/visorchipset */
241 static dev_t major_dev
= -1; /**< indicates major num for device */
243 /* prototypes for attributes */
244 static ssize_t
toolaction_show(struct device
*dev
,
245 struct device_attribute
*attr
, char *buf
);
246 static ssize_t
toolaction_store(struct device
*dev
,
247 struct device_attribute
*attr
,
248 const char *buf
, size_t count
);
249 static DEVICE_ATTR_RW(toolaction
);
251 static ssize_t
boottotool_show(struct device
*dev
,
252 struct device_attribute
*attr
, char *buf
);
253 static ssize_t
boottotool_store(struct device
*dev
,
254 struct device_attribute
*attr
, const char *buf
,
256 static DEVICE_ATTR_RW(boottotool
);
258 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
260 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
261 const char *buf
, size_t count
);
262 static DEVICE_ATTR_RW(error
);
264 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
266 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
267 const char *buf
, size_t count
);
268 static DEVICE_ATTR_RW(textid
);
270 static ssize_t
remaining_steps_show(struct device
*dev
,
271 struct device_attribute
*attr
, char *buf
);
272 static ssize_t
remaining_steps_store(struct device
*dev
,
273 struct device_attribute
*attr
,
274 const char *buf
, size_t count
);
275 static DEVICE_ATTR_RW(remaining_steps
);
277 static ssize_t
chipsetready_store(struct device
*dev
,
278 struct device_attribute
*attr
,
279 const char *buf
, size_t count
);
280 static DEVICE_ATTR_WO(chipsetready
);
282 static ssize_t
devicedisabled_store(struct device
*dev
,
283 struct device_attribute
*attr
,
284 const char *buf
, size_t count
);
285 static DEVICE_ATTR_WO(devicedisabled
);
287 static ssize_t
deviceenabled_store(struct device
*dev
,
288 struct device_attribute
*attr
,
289 const char *buf
, size_t count
);
290 static DEVICE_ATTR_WO(deviceenabled
);
292 static struct attribute
*visorchipset_install_attrs
[] = {
293 &dev_attr_toolaction
.attr
,
294 &dev_attr_boottotool
.attr
,
295 &dev_attr_error
.attr
,
296 &dev_attr_textid
.attr
,
297 &dev_attr_remaining_steps
.attr
,
301 static struct attribute_group visorchipset_install_group
= {
303 .attrs
= visorchipset_install_attrs
306 static struct attribute
*visorchipset_guest_attrs
[] = {
307 &dev_attr_chipsetready
.attr
,
311 static struct attribute_group visorchipset_guest_group
= {
313 .attrs
= visorchipset_guest_attrs
316 static struct attribute
*visorchipset_parahotplug_attrs
[] = {
317 &dev_attr_devicedisabled
.attr
,
318 &dev_attr_deviceenabled
.attr
,
322 static struct attribute_group visorchipset_parahotplug_group
= {
323 .name
= "parahotplug",
324 .attrs
= visorchipset_parahotplug_attrs
327 static const struct attribute_group
*visorchipset_dev_groups
[] = {
328 &visorchipset_install_group
,
329 &visorchipset_guest_group
,
330 &visorchipset_parahotplug_group
,
334 static void visorchipset_dev_release(struct device
*dev
)
338 /* /sys/devices/platform/visorchipset */
339 static struct platform_device visorchipset_platform_device
= {
340 .name
= "visorchipset",
342 .dev
.groups
= visorchipset_dev_groups
,
343 .dev
.release
= visorchipset_dev_release
,
346 /* Function prototypes */
347 static void controlvm_respond(struct controlvm_message_header
*msg_hdr
,
349 static void controlvm_respond_chipset_init(
350 struct controlvm_message_header
*msg_hdr
, int response
,
351 enum ultra_chipset_feature features
);
352 static void controlvm_respond_physdev_changestate(
353 struct controlvm_message_header
*msg_hdr
, int response
,
354 struct spar_segment_state state
);
357 static void parser_done(struct parser_context
*ctx
);
359 static struct parser_context
*
360 parser_init_byte_stream(u64 addr
, u32 bytes
, bool local
, bool *retry
)
362 int allocbytes
= sizeof(struct parser_context
) + bytes
;
363 struct parser_context
*rc
= NULL
;
364 struct parser_context
*ctx
= NULL
;
370 * alloc an 0 extra byte to ensure payload is
374 if ((controlvm_payload_bytes_buffered
+ bytes
)
375 > MAX_CONTROLVM_PAYLOAD_BYTES
) {
381 ctx
= kzalloc(allocbytes
, GFP_KERNEL
|__GFP_NORETRY
);
389 ctx
->allocbytes
= allocbytes
;
390 ctx
->param_bytes
= bytes
;
392 ctx
->bytes_remaining
= 0;
393 ctx
->byte_stream
= false;
397 if (addr
> virt_to_phys(high_memory
- 1)) {
401 p
= __va((unsigned long) (addr
));
402 memcpy(ctx
->data
, p
, bytes
);
404 void __iomem
*mapping
;
406 if (!request_mem_region(addr
, bytes
, "visorchipset")) {
411 mapping
= ioremap_cache(addr
, bytes
);
413 release_mem_region(addr
, bytes
);
417 memcpy_fromio(ctx
->data
, mapping
, bytes
);
418 release_mem_region(addr
, bytes
);
421 ctx
->byte_stream
= true;
425 controlvm_payload_bytes_buffered
+= ctx
->param_bytes
;
436 parser_id_get(struct parser_context
*ctx
)
438 struct spar_controlvm_parameters_header
*phdr
= NULL
;
442 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
446 /** Describes the state from the perspective of which controlvm messages have
447 * been received for a bus or device.
450 enum PARSER_WHICH_STRING
{
451 PARSERSTRING_INITIATOR
,
453 PARSERSTRING_CONNECTION
,
454 PARSERSTRING_NAME
, /* TODO: only PARSERSTRING_NAME is used ? */
458 parser_param_start(struct parser_context
*ctx
,
459 enum PARSER_WHICH_STRING which_string
)
461 struct spar_controlvm_parameters_header
*phdr
= NULL
;
465 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
466 switch (which_string
) {
467 case PARSERSTRING_INITIATOR
:
468 ctx
->curr
= ctx
->data
+ phdr
->initiator_offset
;
469 ctx
->bytes_remaining
= phdr
->initiator_length
;
471 case PARSERSTRING_TARGET
:
472 ctx
->curr
= ctx
->data
+ phdr
->target_offset
;
473 ctx
->bytes_remaining
= phdr
->target_length
;
475 case PARSERSTRING_CONNECTION
:
476 ctx
->curr
= ctx
->data
+ phdr
->connection_offset
;
477 ctx
->bytes_remaining
= phdr
->connection_length
;
479 case PARSERSTRING_NAME
:
480 ctx
->curr
= ctx
->data
+ phdr
->name_offset
;
481 ctx
->bytes_remaining
= phdr
->name_length
;
491 static void parser_done(struct parser_context
*ctx
)
495 controlvm_payload_bytes_buffered
-= ctx
->param_bytes
;
500 parser_string_get(struct parser_context
*ctx
)
504 int value_length
= -1;
511 nscan
= ctx
->bytes_remaining
;
516 for (i
= 0, value_length
= -1; i
< nscan
; i
++)
517 if (pscan
[i
] == '\0') {
521 if (value_length
< 0) /* '\0' was not included in the length */
522 value_length
= nscan
;
523 value
= kmalloc(value_length
+ 1, GFP_KERNEL
|__GFP_NORETRY
);
526 if (value_length
> 0)
527 memcpy(value
, pscan
, value_length
);
528 ((u8
*) (value
))[value_length
] = '\0';
533 static ssize_t
toolaction_show(struct device
*dev
,
534 struct device_attribute
*attr
,
539 visorchannel_read(controlvm_channel
,
540 offsetof(struct spar_controlvm_channel_protocol
,
541 tool_action
), &tool_action
, sizeof(u8
));
542 return scnprintf(buf
, PAGE_SIZE
, "%u\n", tool_action
);
545 static ssize_t
toolaction_store(struct device
*dev
,
546 struct device_attribute
*attr
,
547 const char *buf
, size_t count
)
552 if (kstrtou8(buf
, 10, &tool_action
))
555 ret
= visorchannel_write(controlvm_channel
,
556 offsetof(struct spar_controlvm_channel_protocol
,
558 &tool_action
, sizeof(u8
));
565 static ssize_t
boottotool_show(struct device
*dev
,
566 struct device_attribute
*attr
,
569 struct efi_spar_indication efi_spar_indication
;
571 visorchannel_read(controlvm_channel
,
572 offsetof(struct spar_controlvm_channel_protocol
,
573 efi_spar_ind
), &efi_spar_indication
,
574 sizeof(struct efi_spar_indication
));
575 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
576 efi_spar_indication
.boot_to_tool
);
579 static ssize_t
boottotool_store(struct device
*dev
,
580 struct device_attribute
*attr
,
581 const char *buf
, size_t count
)
584 struct efi_spar_indication efi_spar_indication
;
586 if (kstrtoint(buf
, 10, &val
))
589 efi_spar_indication
.boot_to_tool
= val
;
590 ret
= visorchannel_write(controlvm_channel
,
591 offsetof(struct spar_controlvm_channel_protocol
,
592 efi_spar_ind
), &(efi_spar_indication
),
593 sizeof(struct efi_spar_indication
));
600 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
605 visorchannel_read(controlvm_channel
,
606 offsetof(struct spar_controlvm_channel_protocol
,
608 &error
, sizeof(u32
));
609 return scnprintf(buf
, PAGE_SIZE
, "%i\n", error
);
612 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
613 const char *buf
, size_t count
)
618 if (kstrtou32(buf
, 10, &error
))
621 ret
= visorchannel_write(controlvm_channel
,
622 offsetof(struct spar_controlvm_channel_protocol
,
624 &error
, sizeof(u32
));
630 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
635 visorchannel_read(controlvm_channel
,
636 offsetof(struct spar_controlvm_channel_protocol
,
637 installation_text_id
),
638 &text_id
, sizeof(u32
));
639 return scnprintf(buf
, PAGE_SIZE
, "%i\n", text_id
);
642 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
643 const char *buf
, size_t count
)
648 if (kstrtou32(buf
, 10, &text_id
))
651 ret
= visorchannel_write(controlvm_channel
,
652 offsetof(struct spar_controlvm_channel_protocol
,
653 installation_text_id
),
654 &text_id
, sizeof(u32
));
660 static ssize_t
remaining_steps_show(struct device
*dev
,
661 struct device_attribute
*attr
, char *buf
)
665 visorchannel_read(controlvm_channel
,
666 offsetof(struct spar_controlvm_channel_protocol
,
667 installation_remaining_steps
),
668 &remaining_steps
, sizeof(u16
));
669 return scnprintf(buf
, PAGE_SIZE
, "%hu\n", remaining_steps
);
672 static ssize_t
remaining_steps_store(struct device
*dev
,
673 struct device_attribute
*attr
,
674 const char *buf
, size_t count
)
679 if (kstrtou16(buf
, 10, &remaining_steps
))
682 ret
= visorchannel_write(controlvm_channel
,
683 offsetof(struct spar_controlvm_channel_protocol
,
684 installation_remaining_steps
),
685 &remaining_steps
, sizeof(u16
));
691 struct visor_busdev
{
696 static int match_visorbus_dev_by_id(struct device
*dev
, void *data
)
698 struct visor_device
*vdev
= to_visor_device(dev
);
699 struct visor_busdev
*id
= (struct visor_busdev
*)data
;
700 u32 bus_no
= id
->bus_no
;
701 u32 dev_no
= id
->dev_no
;
703 if ((vdev
->chipset_bus_no
== bus_no
) &&
704 (vdev
->chipset_dev_no
== dev_no
))
709 struct visor_device
*visorbus_get_device_by_id(u32 bus_no
, u32 dev_no
,
710 struct visor_device
*from
)
713 struct device
*dev_start
= NULL
;
714 struct visor_device
*vdev
= NULL
;
715 struct visor_busdev id
= {
721 dev_start
= &from
->device
;
722 dev
= bus_find_device(&visorbus_type
, dev_start
, (void *)&id
,
723 match_visorbus_dev_by_id
);
725 vdev
= to_visor_device(dev
);
728 EXPORT_SYMBOL(visorbus_get_device_by_id
);
731 check_chipset_events(void)
735 /* Check events to determine if response should be sent */
736 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
737 send_msg
&= chipset_events
[i
];
742 clear_chipset_events(void)
745 /* Clear chipset_events */
746 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
747 chipset_events
[i
] = 0;
751 visorchipset_register_busdev(
752 struct visorchipset_busdev_notifiers
*notifiers
,
753 struct visorchipset_busdev_responders
*responders
,
754 struct ultra_vbus_deviceinfo
*driver_info
)
756 down(¬ifier_lock
);
758 memset(&busdev_notifiers
, 0,
759 sizeof(busdev_notifiers
));
760 visorbusregistered
= 0; /* clear flag */
762 busdev_notifiers
= *notifiers
;
763 visorbusregistered
= 1; /* set flag */
766 *responders
= busdev_responders
;
768 bus_device_info_init(driver_info
, "chipset", "visorchipset",
773 EXPORT_SYMBOL_GPL(visorchipset_register_busdev
);
776 chipset_init(struct controlvm_message
*inmsg
)
778 static int chipset_inited
;
779 enum ultra_chipset_feature features
= 0;
780 int rc
= CONTROLVM_RESP_SUCCESS
;
782 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
783 if (chipset_inited
) {
784 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
788 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
790 /* Set features to indicate we support parahotplug (if Command
791 * also supports it). */
793 inmsg
->cmd
.init_chipset
.
794 features
& ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG
;
796 /* Set the "reply" bit so Command knows this is a
797 * features-aware driver. */
798 features
|= ULTRA_CHIPSET_FEATURE_REPLY
;
801 if (inmsg
->hdr
.flags
.response_expected
)
802 controlvm_respond_chipset_init(&inmsg
->hdr
, rc
, features
);
806 controlvm_init_response(struct controlvm_message
*msg
,
807 struct controlvm_message_header
*msg_hdr
, int response
)
809 memset(msg
, 0, sizeof(struct controlvm_message
));
810 memcpy(&msg
->hdr
, msg_hdr
, sizeof(struct controlvm_message_header
));
811 msg
->hdr
.payload_bytes
= 0;
812 msg
->hdr
.payload_vm_offset
= 0;
813 msg
->hdr
.payload_max_bytes
= 0;
815 msg
->hdr
.flags
.failed
= 1;
816 msg
->hdr
.completion_status
= (u32
) (-response
);
821 controlvm_respond(struct controlvm_message_header
*msg_hdr
, int response
)
823 struct controlvm_message outmsg
;
825 controlvm_init_response(&outmsg
, msg_hdr
, response
);
826 if (outmsg
.hdr
.flags
.test_message
== 1)
829 if (!visorchannel_signalinsert(controlvm_channel
,
830 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
836 controlvm_respond_chipset_init(struct controlvm_message_header
*msg_hdr
,
838 enum ultra_chipset_feature features
)
840 struct controlvm_message outmsg
;
842 controlvm_init_response(&outmsg
, msg_hdr
, response
);
843 outmsg
.cmd
.init_chipset
.features
= features
;
844 if (!visorchannel_signalinsert(controlvm_channel
,
845 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
850 static void controlvm_respond_physdev_changestate(
851 struct controlvm_message_header
*msg_hdr
, int response
,
852 struct spar_segment_state state
)
854 struct controlvm_message outmsg
;
856 controlvm_init_response(&outmsg
, msg_hdr
, response
);
857 outmsg
.cmd
.device_change_state
.state
= state
;
858 outmsg
.cmd
.device_change_state
.flags
.phys_device
= 1;
859 if (!visorchannel_signalinsert(controlvm_channel
,
860 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
865 enum crash_obj_type
{
871 bus_responder(enum controlvm_id cmd_id
,
872 struct controlvm_message_header
*pending_msg_hdr
,
875 if (pending_msg_hdr
== NULL
)
876 return; /* no controlvm response needed */
878 if (pending_msg_hdr
->id
!= (u32
)cmd_id
)
881 controlvm_respond(pending_msg_hdr
, response
);
885 device_changestate_responder(enum controlvm_id cmd_id
,
886 struct visor_device
*p
, int response
,
887 struct spar_segment_state response_state
)
889 struct controlvm_message outmsg
;
890 u32 bus_no
= p
->chipset_bus_no
;
891 u32 dev_no
= p
->chipset_dev_no
;
893 if (p
->pending_msg_hdr
== NULL
)
894 return; /* no controlvm response needed */
895 if (p
->pending_msg_hdr
->id
!= cmd_id
)
898 controlvm_init_response(&outmsg
, p
->pending_msg_hdr
, response
);
900 outmsg
.cmd
.device_change_state
.bus_no
= bus_no
;
901 outmsg
.cmd
.device_change_state
.dev_no
= dev_no
;
902 outmsg
.cmd
.device_change_state
.state
= response_state
;
904 if (!visorchannel_signalinsert(controlvm_channel
,
905 CONTROLVM_QUEUE_REQUEST
, &outmsg
))
910 device_responder(enum controlvm_id cmd_id
,
911 struct controlvm_message_header
*pending_msg_hdr
,
914 if (pending_msg_hdr
== NULL
)
915 return; /* no controlvm response needed */
917 if (pending_msg_hdr
->id
!= (u32
)cmd_id
)
920 controlvm_respond(pending_msg_hdr
, response
);
924 bus_epilog(struct visor_device
*bus_info
,
925 u32 cmd
, struct controlvm_message_header
*msg_hdr
,
926 int response
, bool need_response
)
928 bool notified
= false;
929 struct controlvm_message_header
*pmsg_hdr
= NULL
;
932 /* relying on a valid passed in response code */
933 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
938 if (bus_info
->pending_msg_hdr
) {
939 /* only non-NULL if dev is still waiting on a response */
940 response
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
941 pmsg_hdr
= bus_info
->pending_msg_hdr
;
946 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
948 response
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
952 memcpy(pmsg_hdr
, msg_hdr
,
953 sizeof(struct controlvm_message_header
));
954 bus_info
->pending_msg_hdr
= pmsg_hdr
;
957 down(¬ifier_lock
);
958 if (response
== CONTROLVM_RESP_SUCCESS
) {
960 case CONTROLVM_BUS_CREATE
:
961 if (busdev_notifiers
.bus_create
) {
962 (*busdev_notifiers
.bus_create
) (bus_info
);
966 case CONTROLVM_BUS_DESTROY
:
967 if (busdev_notifiers
.bus_destroy
) {
968 (*busdev_notifiers
.bus_destroy
) (bus_info
);
976 /* The callback function just called above is responsible
977 * for calling the appropriate visorchipset_busdev_responders
978 * function, which will call bus_responder()
983 * Do not kfree(pmsg_hdr) as this is the failure path.
984 * The success path ('notified') will call the responder
985 * directly and kfree() there.
987 bus_responder(cmd
, pmsg_hdr
, response
);
992 device_epilog(struct visor_device
*dev_info
,
993 struct spar_segment_state state
, u32 cmd
,
994 struct controlvm_message_header
*msg_hdr
, int response
,
995 bool need_response
, bool for_visorbus
)
997 struct visorchipset_busdev_notifiers
*notifiers
;
998 bool notified
= false;
999 struct controlvm_message_header
*pmsg_hdr
= NULL
;
1001 notifiers
= &busdev_notifiers
;
1004 /* relying on a valid passed in response code */
1005 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1010 if (dev_info
->pending_msg_hdr
) {
1011 /* only non-NULL if dev is still waiting on a response */
1012 response
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1013 pmsg_hdr
= dev_info
->pending_msg_hdr
;
1017 if (need_response
) {
1018 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
1020 response
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1024 memcpy(pmsg_hdr
, msg_hdr
,
1025 sizeof(struct controlvm_message_header
));
1026 dev_info
->pending_msg_hdr
= pmsg_hdr
;
1029 down(¬ifier_lock
);
1030 if (response
>= 0) {
1032 case CONTROLVM_DEVICE_CREATE
:
1033 if (notifiers
->device_create
) {
1034 (*notifiers
->device_create
) (dev_info
);
1038 case CONTROLVM_DEVICE_CHANGESTATE
:
1039 /* ServerReady / ServerRunning / SegmentStateRunning */
1040 if (state
.alive
== segment_state_running
.alive
&&
1042 segment_state_running
.operating
) {
1043 if (notifiers
->device_resume
) {
1044 (*notifiers
->device_resume
) (dev_info
);
1048 /* ServerNotReady / ServerLost / SegmentStateStandby */
1049 else if (state
.alive
== segment_state_standby
.alive
&&
1051 segment_state_standby
.operating
) {
1052 /* technically this is standby case
1053 * where server is lost
1055 if (notifiers
->device_pause
) {
1056 (*notifiers
->device_pause
) (dev_info
);
1061 case CONTROLVM_DEVICE_DESTROY
:
1062 if (notifiers
->device_destroy
) {
1063 (*notifiers
->device_destroy
) (dev_info
);
1071 /* The callback function just called above is responsible
1072 * for calling the appropriate visorchipset_busdev_responders
1073 * function, which will call device_responder()
1078 * Do not kfree(pmsg_hdr) as this is the failure path.
1079 * The success path ('notified') will call the responder
1080 * directly and kfree() there.
1082 device_responder(cmd
, pmsg_hdr
, response
);
1087 bus_create(struct controlvm_message
*inmsg
)
1089 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1090 u32 bus_no
= cmd
->create_bus
.bus_no
;
1091 int rc
= CONTROLVM_RESP_SUCCESS
;
1092 struct visor_device
*bus_info
;
1093 struct visorchannel
*visorchannel
;
1095 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
1096 if (bus_info
&& (bus_info
->state
.created
== 1)) {
1097 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1098 POSTCODE_SEVERITY_ERR
);
1099 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1102 bus_info
= kzalloc(sizeof(*bus_info
), GFP_KERNEL
);
1104 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1105 POSTCODE_SEVERITY_ERR
);
1106 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1110 INIT_LIST_HEAD(&bus_info
->list_all
);
1111 bus_info
->chipset_bus_no
= bus_no
;
1112 bus_info
->chipset_dev_no
= BUS_ROOT_DEVICE
;
1114 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1116 visorchannel
= visorchannel_create(cmd
->create_bus
.channel_addr
,
1117 cmd
->create_bus
.channel_bytes
,
1119 cmd
->create_bus
.bus_data_type_uuid
);
1121 if (!visorchannel
) {
1122 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1123 POSTCODE_SEVERITY_ERR
);
1124 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1129 bus_info
->visorchannel
= visorchannel
;
1131 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1134 bus_epilog(bus_info
, CONTROLVM_BUS_CREATE
, &inmsg
->hdr
,
1135 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1139 bus_destroy(struct controlvm_message
*inmsg
)
1141 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1142 u32 bus_no
= cmd
->destroy_bus
.bus_no
;
1143 struct visor_device
*bus_info
;
1144 int rc
= CONTROLVM_RESP_SUCCESS
;
1146 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
1148 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1149 else if (bus_info
->state
.created
== 0)
1150 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1152 bus_epilog(bus_info
, CONTROLVM_BUS_DESTROY
, &inmsg
->hdr
,
1153 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1155 /* bus_info is freed as part of the busdevice_release function */
1159 bus_configure(struct controlvm_message
*inmsg
,
1160 struct parser_context
*parser_ctx
)
1162 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1164 struct visor_device
*bus_info
;
1165 int rc
= CONTROLVM_RESP_SUCCESS
;
1167 bus_no
= cmd
->configure_bus
.bus_no
;
1168 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC
, bus_no
,
1169 POSTCODE_SEVERITY_INFO
);
1171 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
1173 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1174 POSTCODE_SEVERITY_ERR
);
1175 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1176 } else if (bus_info
->state
.created
== 0) {
1177 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1178 POSTCODE_SEVERITY_ERR
);
1179 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1180 } else if (bus_info
->pending_msg_hdr
!= NULL
) {
1181 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1182 POSTCODE_SEVERITY_ERR
);
1183 rc
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1185 visorchannel_set_clientpartition(bus_info
->visorchannel
,
1186 cmd
->configure_bus
.guest_handle
);
1187 bus_info
->partition_uuid
= parser_id_get(parser_ctx
);
1188 parser_param_start(parser_ctx
, PARSERSTRING_NAME
);
1189 bus_info
->name
= parser_string_get(parser_ctx
);
1191 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC
, bus_no
,
1192 POSTCODE_SEVERITY_INFO
);
1194 bus_epilog(bus_info
, CONTROLVM_BUS_CONFIGURE
, &inmsg
->hdr
,
1195 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1199 my_device_create(struct controlvm_message
*inmsg
)
1201 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1202 u32 bus_no
= cmd
->create_device
.bus_no
;
1203 u32 dev_no
= cmd
->create_device
.dev_no
;
1204 struct visor_device
*dev_info
= NULL
;
1205 struct visor_device
*bus_info
;
1206 struct visorchannel
*visorchannel
;
1207 int rc
= CONTROLVM_RESP_SUCCESS
;
1209 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
1211 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1212 POSTCODE_SEVERITY_ERR
);
1213 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1217 if (bus_info
->state
.created
== 0) {
1218 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1219 POSTCODE_SEVERITY_ERR
);
1220 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1224 dev_info
= visorbus_get_device_by_id(bus_no
, dev_no
, NULL
);
1225 if (dev_info
&& (dev_info
->state
.created
== 1)) {
1226 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1227 POSTCODE_SEVERITY_ERR
);
1228 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1232 dev_info
= kzalloc(sizeof(*dev_info
), GFP_KERNEL
);
1234 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1235 POSTCODE_SEVERITY_ERR
);
1236 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1240 dev_info
->chipset_bus_no
= bus_no
;
1241 dev_info
->chipset_dev_no
= dev_no
;
1242 dev_info
->inst
= cmd
->create_device
.dev_inst_uuid
;
1244 /* not sure where the best place to set the 'parent' */
1245 dev_info
->device
.parent
= &bus_info
->device
;
1247 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC
, dev_no
, bus_no
,
1248 POSTCODE_SEVERITY_INFO
);
1250 visorchannel
= visorchannel_create(cmd
->create_device
.channel_addr
,
1251 cmd
->create_device
.channel_bytes
,
1253 cmd
->create_device
.data_type_uuid
);
1255 if (!visorchannel
) {
1256 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1257 POSTCODE_SEVERITY_ERR
);
1258 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1263 dev_info
->visorchannel
= visorchannel
;
1264 dev_info
->channel_type_guid
= cmd
->create_device
.data_type_uuid
;
1265 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC
, dev_no
, bus_no
,
1266 POSTCODE_SEVERITY_INFO
);
1268 device_epilog(dev_info
, segment_state_running
,
1269 CONTROLVM_DEVICE_CREATE
, &inmsg
->hdr
, rc
,
1270 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1274 my_device_changestate(struct controlvm_message
*inmsg
)
1276 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1277 u32 bus_no
= cmd
->device_change_state
.bus_no
;
1278 u32 dev_no
= cmd
->device_change_state
.dev_no
;
1279 struct spar_segment_state state
= cmd
->device_change_state
.state
;
1280 struct visor_device
*dev_info
;
1281 int rc
= CONTROLVM_RESP_SUCCESS
;
1283 dev_info
= visorbus_get_device_by_id(bus_no
, dev_no
, NULL
);
1285 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1286 POSTCODE_SEVERITY_ERR
);
1287 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1288 } else if (dev_info
->state
.created
== 0) {
1289 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1290 POSTCODE_SEVERITY_ERR
);
1291 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1293 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1294 device_epilog(dev_info
, state
,
1295 CONTROLVM_DEVICE_CHANGESTATE
, &inmsg
->hdr
, rc
,
1296 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1300 my_device_destroy(struct controlvm_message
*inmsg
)
1302 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1303 u32 bus_no
= cmd
->destroy_device
.bus_no
;
1304 u32 dev_no
= cmd
->destroy_device
.dev_no
;
1305 struct visor_device
*dev_info
;
1306 int rc
= CONTROLVM_RESP_SUCCESS
;
1308 dev_info
= visorbus_get_device_by_id(bus_no
, dev_no
, NULL
);
1310 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1311 else if (dev_info
->state
.created
== 0)
1312 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1314 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1315 device_epilog(dev_info
, segment_state_running
,
1316 CONTROLVM_DEVICE_DESTROY
, &inmsg
->hdr
, rc
,
1317 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1320 /* When provided with the physical address of the controlvm channel
1321 * (phys_addr), the offset to the payload area we need to manage
1322 * (offset), and the size of this payload area (bytes), fills in the
1323 * controlvm_payload_info struct. Returns true for success or false
1327 initialize_controlvm_payload_info(u64 phys_addr
, u64 offset
, u32 bytes
,
1328 struct visor_controlvm_payload_info
*info
)
1330 u8 __iomem
*payload
= NULL
;
1331 int rc
= CONTROLVM_RESP_SUCCESS
;
1334 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1337 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1338 if ((offset
== 0) || (bytes
== 0)) {
1339 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1342 payload
= ioremap_cache(phys_addr
+ offset
, bytes
);
1344 rc
= -CONTROLVM_RESP_ERROR_IOREMAP_FAILED
;
1348 info
->offset
= offset
;
1349 info
->bytes
= bytes
;
1350 info
->ptr
= payload
;
1363 destroy_controlvm_payload_info(struct visor_controlvm_payload_info
*info
)
1369 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1373 initialize_controlvm_payload(void)
1375 u64 phys_addr
= visorchannel_get_physaddr(controlvm_channel
);
1376 u64 payload_offset
= 0;
1377 u32 payload_bytes
= 0;
1379 if (visorchannel_read(controlvm_channel
,
1380 offsetof(struct spar_controlvm_channel_protocol
,
1381 request_payload_offset
),
1382 &payload_offset
, sizeof(payload_offset
)) < 0) {
1383 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1384 POSTCODE_SEVERITY_ERR
);
1387 if (visorchannel_read(controlvm_channel
,
1388 offsetof(struct spar_controlvm_channel_protocol
,
1389 request_payload_bytes
),
1390 &payload_bytes
, sizeof(payload_bytes
)) < 0) {
1391 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1392 POSTCODE_SEVERITY_ERR
);
1395 initialize_controlvm_payload_info(phys_addr
,
1396 payload_offset
, payload_bytes
,
1397 &controlvm_payload_info
);
1400 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1401 * Returns CONTROLVM_RESP_xxx code.
1404 visorchipset_chipset_ready(void)
1406 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_ONLINE
);
1407 return CONTROLVM_RESP_SUCCESS
;
1411 visorchipset_chipset_selftest(void)
1413 char env_selftest
[20];
1414 char *envp
[] = { env_selftest
, NULL
};
1416 sprintf(env_selftest
, "SPARSP_SELFTEST=%d", 1);
1417 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1419 return CONTROLVM_RESP_SUCCESS
;
1422 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1423 * Returns CONTROLVM_RESP_xxx code.
1426 visorchipset_chipset_notready(void)
1428 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_OFFLINE
);
1429 return CONTROLVM_RESP_SUCCESS
;
1433 chipset_ready(struct controlvm_message_header
*msg_hdr
)
1435 int rc
= visorchipset_chipset_ready();
1437 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1439 if (msg_hdr
->flags
.response_expected
&& !visorchipset_holdchipsetready
)
1440 controlvm_respond(msg_hdr
, rc
);
1441 if (msg_hdr
->flags
.response_expected
&& visorchipset_holdchipsetready
) {
1442 /* Send CHIPSET_READY response when all modules have been loaded
1443 * and disks mounted for the partition
1445 g_chipset_msg_hdr
= *msg_hdr
;
1450 chipset_selftest(struct controlvm_message_header
*msg_hdr
)
1452 int rc
= visorchipset_chipset_selftest();
1454 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1456 if (msg_hdr
->flags
.response_expected
)
1457 controlvm_respond(msg_hdr
, rc
);
1461 chipset_notready(struct controlvm_message_header
*msg_hdr
)
1463 int rc
= visorchipset_chipset_notready();
1465 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1467 if (msg_hdr
->flags
.response_expected
)
1468 controlvm_respond(msg_hdr
, rc
);
1471 /* This is your "one-stop" shop for grabbing the next message from the
1472 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1475 read_controlvm_event(struct controlvm_message
*msg
)
1477 if (visorchannel_signalremove(controlvm_channel
,
1478 CONTROLVM_QUEUE_EVENT
, msg
)) {
1480 if (msg
->hdr
.flags
.test_message
== 1)
1488 * The general parahotplug flow works as follows. The visorchipset
1489 * driver receives a DEVICE_CHANGESTATE message from Command
1490 * specifying a physical device to enable or disable. The CONTROLVM
1491 * message handler calls parahotplug_process_message, which then adds
1492 * the message to a global list and kicks off a udev event which
1493 * causes a user level script to enable or disable the specified
1494 * device. The udev script then writes to
1495 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1496 * to get called, at which point the appropriate CONTROLVM message is
1497 * retrieved from the list and responded to.
1500 #define PARAHOTPLUG_TIMEOUT_MS 2000
1503 * Generate unique int to match an outstanding CONTROLVM message with a
1504 * udev script /proc response
1507 parahotplug_next_id(void)
1509 static atomic_t id
= ATOMIC_INIT(0);
1511 return atomic_inc_return(&id
);
1515 * Returns the time (in jiffies) when a CONTROLVM message on the list
1516 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1518 static unsigned long
1519 parahotplug_next_expiration(void)
1521 return jiffies
+ msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS
);
1525 * Create a parahotplug_request, which is basically a wrapper for a
1526 * CONTROLVM_MESSAGE that we can stick on a list
1528 static struct parahotplug_request
*
1529 parahotplug_request_create(struct controlvm_message
*msg
)
1531 struct parahotplug_request
*req
;
1533 req
= kmalloc(sizeof(*req
), GFP_KERNEL
| __GFP_NORETRY
);
1537 req
->id
= parahotplug_next_id();
1538 req
->expiration
= parahotplug_next_expiration();
1545 * Free a parahotplug_request.
1548 parahotplug_request_destroy(struct parahotplug_request
*req
)
1554 * Cause uevent to run the user level script to do the disable/enable
1555 * specified in (the CONTROLVM message in) the specified
1556 * parahotplug_request
1559 parahotplug_request_kickoff(struct parahotplug_request
*req
)
1561 struct controlvm_message_packet
*cmd
= &req
->msg
.cmd
;
1562 char env_cmd
[40], env_id
[40], env_state
[40], env_bus
[40], env_dev
[40],
1565 env_cmd
, env_id
, env_state
, env_bus
, env_dev
, env_func
, NULL
1568 sprintf(env_cmd
, "SPAR_PARAHOTPLUG=1");
1569 sprintf(env_id
, "SPAR_PARAHOTPLUG_ID=%d", req
->id
);
1570 sprintf(env_state
, "SPAR_PARAHOTPLUG_STATE=%d",
1571 cmd
->device_change_state
.state
.active
);
1572 sprintf(env_bus
, "SPAR_PARAHOTPLUG_BUS=%d",
1573 cmd
->device_change_state
.bus_no
);
1574 sprintf(env_dev
, "SPAR_PARAHOTPLUG_DEVICE=%d",
1575 cmd
->device_change_state
.dev_no
>> 3);
1576 sprintf(env_func
, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1577 cmd
->device_change_state
.dev_no
& 0x7);
1579 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1584 * Remove any request from the list that's been on there too long and
1585 * respond with an error.
1588 parahotplug_process_list(void)
1590 struct list_head
*pos
;
1591 struct list_head
*tmp
;
1593 spin_lock(¶hotplug_request_list_lock
);
1595 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1596 struct parahotplug_request
*req
=
1597 list_entry(pos
, struct parahotplug_request
, list
);
1599 if (!time_after_eq(jiffies
, req
->expiration
))
1603 if (req
->msg
.hdr
.flags
.response_expected
)
1604 controlvm_respond_physdev_changestate(
1606 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT
,
1607 req
->msg
.cmd
.device_change_state
.state
);
1608 parahotplug_request_destroy(req
);
1611 spin_unlock(¶hotplug_request_list_lock
);
1615 * Called from the /proc handler, which means the user script has
1616 * finished the enable/disable. Find the matching identifier, and
1617 * respond to the CONTROLVM message with success.
1620 parahotplug_request_complete(int id
, u16 active
)
1622 struct list_head
*pos
;
1623 struct list_head
*tmp
;
1625 spin_lock(¶hotplug_request_list_lock
);
1627 /* Look for a request matching "id". */
1628 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1629 struct parahotplug_request
*req
=
1630 list_entry(pos
, struct parahotplug_request
, list
);
1631 if (req
->id
== id
) {
1632 /* Found a match. Remove it from the list and
1636 spin_unlock(¶hotplug_request_list_lock
);
1637 req
->msg
.cmd
.device_change_state
.state
.active
= active
;
1638 if (req
->msg
.hdr
.flags
.response_expected
)
1639 controlvm_respond_physdev_changestate(
1640 &req
->msg
.hdr
, CONTROLVM_RESP_SUCCESS
,
1641 req
->msg
.cmd
.device_change_state
.state
);
1642 parahotplug_request_destroy(req
);
1647 spin_unlock(¶hotplug_request_list_lock
);
1652 * Enables or disables a PCI device by kicking off a udev script
1655 parahotplug_process_message(struct controlvm_message
*inmsg
)
1657 struct parahotplug_request
*req
;
1659 req
= parahotplug_request_create(inmsg
);
1664 if (inmsg
->cmd
.device_change_state
.state
.active
) {
1665 /* For enable messages, just respond with success
1666 * right away. This is a bit of a hack, but there are
1667 * issues with the early enable messages we get (with
1668 * either the udev script not detecting that the device
1669 * is up, or not getting called at all). Fortunately
1670 * the messages that get lost don't matter anyway, as
1671 * devices are automatically enabled at
1674 parahotplug_request_kickoff(req
);
1675 controlvm_respond_physdev_changestate(&inmsg
->hdr
,
1676 CONTROLVM_RESP_SUCCESS
,
1677 inmsg
->cmd
.device_change_state
.state
);
1678 parahotplug_request_destroy(req
);
1680 /* For disable messages, add the request to the
1681 * request list before kicking off the udev script. It
1682 * won't get responded to until the script has
1683 * indicated it's done.
1685 spin_lock(¶hotplug_request_list_lock
);
1686 list_add_tail(&req
->list
, ¶hotplug_request_list
);
1687 spin_unlock(¶hotplug_request_list_lock
);
1689 parahotplug_request_kickoff(req
);
1693 /* Process a controlvm message.
1695 * false - this function will return false only in the case where the
1696 * controlvm message was NOT processed, but processing must be
1697 * retried before reading the next controlvm message; a
1698 * scenario where this can occur is when we need to throttle
1699 * the allocation of memory in which to copy out controlvm
1701 * true - processing of the controlvm message completed,
1702 * either successfully or with an error.
1705 handle_command(struct controlvm_message inmsg
, u64 channel_addr
)
1707 struct controlvm_message_packet
*cmd
= &inmsg
.cmd
;
1710 struct parser_context
*parser_ctx
= NULL
;
1712 struct controlvm_message ackmsg
;
1714 /* create parsing context if necessary */
1715 local_addr
= (inmsg
.hdr
.flags
.test_message
== 1);
1716 if (channel_addr
== 0)
1718 parm_addr
= channel_addr
+ inmsg
.hdr
.payload_vm_offset
;
1719 parm_bytes
= inmsg
.hdr
.payload_bytes
;
1721 /* Parameter and channel addresses within test messages actually lie
1722 * within our OS-controlled memory. We need to know that, because it
1723 * makes a difference in how we compute the virtual address.
1725 if (parm_addr
&& parm_bytes
) {
1729 parser_init_byte_stream(parm_addr
, parm_bytes
,
1730 local_addr
, &retry
);
1731 if (!parser_ctx
&& retry
)
1736 controlvm_init_response(&ackmsg
, &inmsg
.hdr
,
1737 CONTROLVM_RESP_SUCCESS
);
1738 if (controlvm_channel
)
1739 visorchannel_signalinsert(controlvm_channel
,
1740 CONTROLVM_QUEUE_ACK
,
1743 switch (inmsg
.hdr
.id
) {
1744 case CONTROLVM_CHIPSET_INIT
:
1745 chipset_init(&inmsg
);
1747 case CONTROLVM_BUS_CREATE
:
1750 case CONTROLVM_BUS_DESTROY
:
1751 bus_destroy(&inmsg
);
1753 case CONTROLVM_BUS_CONFIGURE
:
1754 bus_configure(&inmsg
, parser_ctx
);
1756 case CONTROLVM_DEVICE_CREATE
:
1757 my_device_create(&inmsg
);
1759 case CONTROLVM_DEVICE_CHANGESTATE
:
1760 if (cmd
->device_change_state
.flags
.phys_device
) {
1761 parahotplug_process_message(&inmsg
);
1763 /* save the hdr and cmd structures for later use */
1764 /* when sending back the response to Command */
1765 my_device_changestate(&inmsg
);
1766 g_devicechangestate_packet
= inmsg
.cmd
;
1770 case CONTROLVM_DEVICE_DESTROY
:
1771 my_device_destroy(&inmsg
);
1773 case CONTROLVM_DEVICE_CONFIGURE
:
1774 /* no op for now, just send a respond that we passed */
1775 if (inmsg
.hdr
.flags
.response_expected
)
1776 controlvm_respond(&inmsg
.hdr
, CONTROLVM_RESP_SUCCESS
);
1778 case CONTROLVM_CHIPSET_READY
:
1779 chipset_ready(&inmsg
.hdr
);
1781 case CONTROLVM_CHIPSET_SELFTEST
:
1782 chipset_selftest(&inmsg
.hdr
);
1784 case CONTROLVM_CHIPSET_STOP
:
1785 chipset_notready(&inmsg
.hdr
);
1788 if (inmsg
.hdr
.flags
.response_expected
)
1789 controlvm_respond(&inmsg
.hdr
,
1790 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN
);
1795 parser_done(parser_ctx
);
1801 static inline unsigned int
1802 issue_vmcall_io_controlvm_addr(u64
*control_addr
, u32
*control_bytes
)
1804 struct vmcall_io_controlvm_addr_params params
;
1805 int result
= VMCALL_SUCCESS
;
1808 physaddr
= virt_to_phys(¶ms
);
1809 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR
, physaddr
, result
);
1810 if (VMCALL_SUCCESSFUL(result
)) {
1811 *control_addr
= params
.address
;
1812 *control_bytes
= params
.channel_bytes
;
1817 static u64
controlvm_get_channel_address(void)
1822 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr
, &size
)))
1829 controlvm_periodic_work(struct work_struct
*work
)
1831 struct controlvm_message inmsg
;
1832 bool got_command
= false;
1833 bool handle_command_failed
= false;
1834 static u64 poll_count
;
1836 /* make sure visorbus server is registered for controlvm callbacks */
1837 if (visorchipset_visorbusregwait
&& !visorbusregistered
)
1841 if (poll_count
>= 250)
1846 /* Check events to determine if response to CHIPSET_READY
1849 if (visorchipset_holdchipsetready
&&
1850 (g_chipset_msg_hdr
.id
!= CONTROLVM_INVALID
)) {
1851 if (check_chipset_events() == 1) {
1852 controlvm_respond(&g_chipset_msg_hdr
, 0);
1853 clear_chipset_events();
1854 memset(&g_chipset_msg_hdr
, 0,
1855 sizeof(struct controlvm_message_header
));
1859 while (visorchannel_signalremove(controlvm_channel
,
1860 CONTROLVM_QUEUE_RESPONSE
,
1864 if (controlvm_pending_msg_valid
) {
1865 /* we throttled processing of a prior
1866 * msg, so try to process it again
1867 * rather than reading a new one
1869 inmsg
= controlvm_pending_msg
;
1870 controlvm_pending_msg_valid
= false;
1873 got_command
= read_controlvm_event(&inmsg
);
1877 handle_command_failed
= false;
1878 while (got_command
&& (!handle_command_failed
)) {
1879 most_recent_message_jiffies
= jiffies
;
1880 if (handle_command(inmsg
,
1881 visorchannel_get_physaddr
1882 (controlvm_channel
)))
1883 got_command
= read_controlvm_event(&inmsg
);
1885 /* this is a scenario where throttling
1886 * is required, but probably NOT an
1887 * error...; we stash the current
1888 * controlvm msg so we will attempt to
1889 * reprocess it on our next loop
1891 handle_command_failed
= true;
1892 controlvm_pending_msg
= inmsg
;
1893 controlvm_pending_msg_valid
= true;
1897 /* parahotplug_worker */
1898 parahotplug_process_list();
1902 if (time_after(jiffies
,
1903 most_recent_message_jiffies
+ (HZ
* MIN_IDLE_SECONDS
))) {
1904 /* it's been longer than MIN_IDLE_SECONDS since we
1905 * processed our last controlvm message; slow down the
1908 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
)
1909 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
1911 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_FAST
)
1912 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
1915 queue_delayed_work(periodic_controlvm_workqueue
,
1916 &periodic_controlvm_work
, poll_jiffies
);
1920 setup_crash_devices_work_queue(struct work_struct
*work
)
1922 struct controlvm_message local_crash_bus_msg
;
1923 struct controlvm_message local_crash_dev_msg
;
1924 struct controlvm_message msg
;
1925 u32 local_crash_msg_offset
;
1926 u16 local_crash_msg_count
;
1928 /* make sure visorbus is registered for controlvm callbacks */
1929 if (visorchipset_visorbusregwait
&& !visorbusregistered
)
1932 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
1934 /* send init chipset msg */
1935 msg
.hdr
.id
= CONTROLVM_CHIPSET_INIT
;
1936 msg
.cmd
.init_chipset
.bus_count
= 23;
1937 msg
.cmd
.init_chipset
.switch_count
= 0;
1941 /* get saved message count */
1942 if (visorchannel_read(controlvm_channel
,
1943 offsetof(struct spar_controlvm_channel_protocol
,
1944 saved_crash_message_count
),
1945 &local_crash_msg_count
, sizeof(u16
)) < 0) {
1946 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
1947 POSTCODE_SEVERITY_ERR
);
1951 if (local_crash_msg_count
!= CONTROLVM_CRASHMSG_MAX
) {
1952 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC
,
1953 local_crash_msg_count
,
1954 POSTCODE_SEVERITY_ERR
);
1958 /* get saved crash message offset */
1959 if (visorchannel_read(controlvm_channel
,
1960 offsetof(struct spar_controlvm_channel_protocol
,
1961 saved_crash_message_offset
),
1962 &local_crash_msg_offset
, sizeof(u32
)) < 0) {
1963 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
1964 POSTCODE_SEVERITY_ERR
);
1968 /* read create device message for storage bus offset */
1969 if (visorchannel_read(controlvm_channel
,
1970 local_crash_msg_offset
,
1971 &local_crash_bus_msg
,
1972 sizeof(struct controlvm_message
)) < 0) {
1973 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC
,
1974 POSTCODE_SEVERITY_ERR
);
1978 /* read create device message for storage device */
1979 if (visorchannel_read(controlvm_channel
,
1980 local_crash_msg_offset
+
1981 sizeof(struct controlvm_message
),
1982 &local_crash_dev_msg
,
1983 sizeof(struct controlvm_message
)) < 0) {
1984 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC
,
1985 POSTCODE_SEVERITY_ERR
);
1989 /* reuse IOVM create bus message */
1990 if (local_crash_bus_msg
.cmd
.create_bus
.channel_addr
) {
1991 bus_create(&local_crash_bus_msg
);
1993 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC
,
1994 POSTCODE_SEVERITY_ERR
);
1998 /* reuse create device message for storage device */
1999 if (local_crash_dev_msg
.cmd
.create_device
.channel_addr
) {
2000 my_device_create(&local_crash_dev_msg
);
2002 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC
,
2003 POSTCODE_SEVERITY_ERR
);
2006 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2011 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
2013 queue_delayed_work(periodic_controlvm_workqueue
,
2014 &periodic_controlvm_work
, poll_jiffies
);
2018 bus_create_response(struct visor_device
*bus_info
, int response
)
2021 bus_info
->state
.created
= 1;
2023 bus_responder(CONTROLVM_BUS_CREATE
, bus_info
->pending_msg_hdr
,
2026 kfree(bus_info
->pending_msg_hdr
);
2027 bus_info
->pending_msg_hdr
= NULL
;
2031 bus_destroy_response(struct visor_device
*bus_info
, int response
)
2033 bus_responder(CONTROLVM_BUS_DESTROY
, bus_info
->pending_msg_hdr
,
2036 kfree(bus_info
->pending_msg_hdr
);
2037 bus_info
->pending_msg_hdr
= NULL
;
2041 device_create_response(struct visor_device
*dev_info
, int response
)
2044 dev_info
->state
.created
= 1;
2046 device_responder(CONTROLVM_DEVICE_CREATE
, dev_info
->pending_msg_hdr
,
2049 kfree(dev_info
->pending_msg_hdr
);
2053 device_destroy_response(struct visor_device
*dev_info
, int response
)
2055 device_responder(CONTROLVM_DEVICE_DESTROY
, dev_info
->pending_msg_hdr
,
2058 kfree(dev_info
->pending_msg_hdr
);
2059 dev_info
->pending_msg_hdr
= NULL
;
2063 visorchipset_device_pause_response(struct visor_device
*dev_info
,
2066 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
2068 segment_state_standby
);
2070 kfree(dev_info
->pending_msg_hdr
);
2071 dev_info
->pending_msg_hdr
= NULL
;
2075 device_resume_response(struct visor_device
*dev_info
, int response
)
2077 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
2079 segment_state_running
);
2081 kfree(dev_info
->pending_msg_hdr
);
2082 dev_info
->pending_msg_hdr
= NULL
;
2085 static ssize_t
chipsetready_store(struct device
*dev
,
2086 struct device_attribute
*attr
,
2087 const char *buf
, size_t count
)
2091 if (sscanf(buf
, "%63s", msgtype
) != 1)
2094 if (!strcmp(msgtype
, "CALLHOMEDISK_MOUNTED")) {
2095 chipset_events
[0] = 1;
2097 } else if (!strcmp(msgtype
, "MODULES_LOADED")) {
2098 chipset_events
[1] = 1;
2104 /* The parahotplug/devicedisabled interface gets called by our support script
2105 * when an SR-IOV device has been shut down. The ID is passed to the script
2106 * and then passed back when the device has been removed.
2108 static ssize_t
devicedisabled_store(struct device
*dev
,
2109 struct device_attribute
*attr
,
2110 const char *buf
, size_t count
)
2114 if (kstrtouint(buf
, 10, &id
))
2117 parahotplug_request_complete(id
, 0);
2121 /* The parahotplug/deviceenabled interface gets called by our support script
2122 * when an SR-IOV device has been recovered. The ID is passed to the script
2123 * and then passed back when the device has been brought back up.
2125 static ssize_t
deviceenabled_store(struct device
*dev
,
2126 struct device_attribute
*attr
,
2127 const char *buf
, size_t count
)
2131 if (kstrtouint(buf
, 10, &id
))
2134 parahotplug_request_complete(id
, 1);
2139 visorchipset_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2141 unsigned long physaddr
= 0;
2142 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
2145 /* sv_enable_dfp(); */
2146 if (offset
& (PAGE_SIZE
- 1))
2147 return -ENXIO
; /* need aligned offsets */
2150 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET
:
2151 vma
->vm_flags
|= VM_IO
;
2152 if (!*file_controlvm_channel
)
2155 visorchannel_read(*file_controlvm_channel
,
2156 offsetof(struct spar_controlvm_channel_protocol
,
2157 gp_control_channel
),
2158 &addr
, sizeof(addr
));
2162 physaddr
= (unsigned long)addr
;
2163 if (remap_pfn_range(vma
, vma
->vm_start
,
2164 physaddr
>> PAGE_SHIFT
,
2165 vma
->vm_end
- vma
->vm_start
,
2166 /*pgprot_noncached */
2167 (vma
->vm_page_prot
))) {
2177 static inline s64
issue_vmcall_query_guest_virtual_time_offset(void)
2179 u64 result
= VMCALL_SUCCESS
;
2182 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
, physaddr
,
2187 static inline int issue_vmcall_update_physical_time(u64 adjustment
)
2189 int result
= VMCALL_SUCCESS
;
2191 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME
, adjustment
, result
);
2195 static long visorchipset_ioctl(struct file
*file
, unsigned int cmd
,
2202 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
:
2203 /* get the physical rtc offset */
2204 vrtc_offset
= issue_vmcall_query_guest_virtual_time_offset();
2205 if (copy_to_user((void __user
*)arg
, &vrtc_offset
,
2206 sizeof(vrtc_offset
))) {
2210 case VMCALL_UPDATE_PHYSICAL_TIME
:
2211 if (copy_from_user(&adjustment
, (void __user
*)arg
,
2212 sizeof(adjustment
))) {
2215 return issue_vmcall_update_physical_time(adjustment
);
2221 static const struct file_operations visorchipset_fops
= {
2222 .owner
= THIS_MODULE
,
2223 .open
= visorchipset_open
,
2226 .unlocked_ioctl
= visorchipset_ioctl
,
2227 .release
= visorchipset_release
,
2228 .mmap
= visorchipset_mmap
,
2232 visorchipset_file_init(dev_t major_dev
, struct visorchannel
**controlvm_channel
)
2236 file_controlvm_channel
= controlvm_channel
;
2237 cdev_init(&file_cdev
, &visorchipset_fops
);
2238 file_cdev
.owner
= THIS_MODULE
;
2239 if (MAJOR(major_dev
) == 0) {
2240 rc
= alloc_chrdev_region(&major_dev
, 0, 1, "visorchipset");
2241 /* dynamic major device number registration required */
2245 /* static major device number registration required */
2246 rc
= register_chrdev_region(major_dev
, 1, "visorchipset");
2250 rc
= cdev_add(&file_cdev
, MKDEV(MAJOR(major_dev
), 0), 1);
2252 unregister_chrdev_region(major_dev
, 1);
2259 visorchipset_init(struct acpi_device
*acpi_device
)
2263 int tmp_sz
= sizeof(struct spar_controlvm_channel_protocol
);
2264 uuid_le uuid
= SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID
;
2266 addr
= controlvm_get_channel_address();
2270 memset(&busdev_notifiers
, 0, sizeof(busdev_notifiers
));
2271 memset(&controlvm_payload_info
, 0, sizeof(controlvm_payload_info
));
2273 controlvm_channel
= visorchannel_create_with_lock(addr
, tmp_sz
,
2275 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2276 visorchannel_get_header(controlvm_channel
))) {
2277 initialize_controlvm_payload();
2279 visorchannel_destroy(controlvm_channel
);
2280 controlvm_channel
= NULL
;
2284 major_dev
= MKDEV(visorchipset_major
, 0);
2285 rc
= visorchipset_file_init(major_dev
, &controlvm_channel
);
2287 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2291 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2293 /* if booting in a crash kernel */
2294 if (is_kdump_kernel())
2295 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2296 setup_crash_devices_work_queue
);
2298 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2299 controlvm_periodic_work
);
2300 periodic_controlvm_workqueue
=
2301 create_singlethread_workqueue("visorchipset_controlvm");
2303 if (!periodic_controlvm_workqueue
) {
2304 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC
,
2309 most_recent_message_jiffies
= jiffies
;
2310 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
2311 rc
= queue_delayed_work(periodic_controlvm_workqueue
,
2312 &periodic_controlvm_work
, poll_jiffies
);
2314 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC
,
2319 visorchipset_platform_device
.dev
.devt
= major_dev
;
2320 if (platform_device_register(&visorchipset_platform_device
) < 0) {
2321 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2325 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC
, POSTCODE_SEVERITY_INFO
);
2327 rc
= visorbus_init();
2330 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC
, rc
,
2331 POSTCODE_SEVERITY_ERR
);
2337 visorchipset_file_cleanup(dev_t major_dev
)
2340 cdev_del(&file_cdev
);
2341 file_cdev
.ops
= NULL
;
2342 unregister_chrdev_region(major_dev
, 1);
2346 visorchipset_exit(struct acpi_device
*acpi_device
)
2348 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2352 cancel_delayed_work(&periodic_controlvm_work
);
2353 flush_workqueue(periodic_controlvm_workqueue
);
2354 destroy_workqueue(periodic_controlvm_workqueue
);
2355 periodic_controlvm_workqueue
= NULL
;
2356 destroy_controlvm_payload_info(&controlvm_payload_info
);
2358 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2360 visorchannel_destroy(controlvm_channel
);
2362 visorchipset_file_cleanup(visorchipset_platform_device
.dev
.devt
);
2363 platform_device_unregister(&visorchipset_platform_device
);
2364 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2369 static const struct acpi_device_id unisys_device_ids
[] = {
2374 static struct acpi_driver unisys_acpi_driver
= {
2375 .name
= "unisys_acpi",
2376 .class = "unisys_acpi_class",
2377 .owner
= THIS_MODULE
,
2378 .ids
= unisys_device_ids
,
2380 .add
= visorchipset_init
,
2381 .remove
= visorchipset_exit
,
2384 static __init
uint32_t visorutil_spar_detect(void)
2386 unsigned int eax
, ebx
, ecx
, edx
;
2388 if (cpu_has_hypervisor
) {
2390 cpuid(UNISYS_SPAR_LEAF_ID
, &eax
, &ebx
, &ecx
, &edx
);
2391 return (ebx
== UNISYS_SPAR_ID_EBX
) &&
2392 (ecx
== UNISYS_SPAR_ID_ECX
) &&
2393 (edx
== UNISYS_SPAR_ID_EDX
);
2399 static int init_unisys(void)
2403 if (!visorutil_spar_detect())
2406 result
= acpi_bus_register_driver(&unisys_acpi_driver
);
2410 pr_info("Unisys Visorchipset Driver Loaded.\n");
2414 static void exit_unisys(void)
2416 acpi_bus_unregister_driver(&unisys_acpi_driver
);
2419 module_param_named(major
, visorchipset_major
, int, S_IRUGO
);
2420 MODULE_PARM_DESC(visorchipset_major
,
2421 "major device number to use for the device node");
2422 module_param_named(visorbusregwait
, visorchipset_visorbusregwait
, int, S_IRUGO
);
2423 MODULE_PARM_DESC(visorchipset_visorbusreqwait
,
2424 "1 to have the module wait for the visor bus to register");
2425 module_param_named(holdchipsetready
, visorchipset_holdchipsetready
,
2427 MODULE_PARM_DESC(visorchipset_holdchipsetready
,
2428 "1 to hold response to CHIPSET_READY");
2430 module_init(init_unisys
);
2431 module_exit(exit_unisys
);
2433 MODULE_AUTHOR("Unisys");
2434 MODULE_LICENSE("GPL");
2435 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2437 MODULE_VERSION(VERSION
);