3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
17 #include <linux/acpi.h>
18 #include <linux/cdev.h>
19 #include <linux/ctype.h>
22 #include <linux/nls.h>
23 #include <linux/netdevice.h>
24 #include <linux/platform_device.h>
25 #include <linux/uuid.h>
26 #include <linux/crash_dump.h>
28 #include "channel_guid.h"
29 #include "controlvmchannel.h"
30 #include "controlvmcompletionstatus.h"
31 #include "guestlinuxdebug.h"
32 #include "periodic_work.h"
35 #include "visorbus_private.h"
36 #include "vmcallinterface.h"
38 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE 50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
48 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
51 #define UNISYS_SPAR_LEAF_ID 0x40000000
53 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
54 #define UNISYS_SPAR_ID_EBX 0x73696e55
55 #define UNISYS_SPAR_ID_ECX 0x70537379
56 #define UNISYS_SPAR_ID_EDX 0x34367261
61 static int visorchipset_major
;
62 static int visorchipset_visorbusregwait
= 1; /* default is on */
63 static int visorchipset_holdchipsetready
;
64 static unsigned long controlvm_payload_bytes_buffered
;
67 visorchipset_open(struct inode
*inode
, struct file
*file
)
69 unsigned minor_number
= iminor(inode
);
73 file
->private_data
= NULL
;
78 visorchipset_release(struct inode
*inode
, struct file
*file
)
83 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
84 * we switch to slow polling mode. As soon as we get a controlvm
85 * message, we switch back to fast polling mode.
87 #define MIN_IDLE_SECONDS 10
88 static unsigned long poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
89 static unsigned long most_recent_message_jiffies
; /* when we got our last
90 * controlvm message */
91 static int visorbusregistered
;
93 #define MAX_CHIPSET_EVENTS 2
94 static u8 chipset_events
[MAX_CHIPSET_EVENTS
] = { 0, 0 };
96 struct parser_context
{
97 unsigned long allocbytes
;
98 unsigned long param_bytes
;
100 unsigned long bytes_remaining
;
105 static struct delayed_work periodic_controlvm_work
;
106 static struct workqueue_struct
*periodic_controlvm_workqueue
;
107 static DEFINE_SEMAPHORE(notifier_lock
);
109 static struct cdev file_cdev
;
110 static struct visorchannel
**file_controlvm_channel
;
111 static struct controlvm_message_header g_chipset_msg_hdr
;
112 static struct controlvm_message_packet g_devicechangestate_packet
;
114 static LIST_HEAD(bus_info_list
);
115 static LIST_HEAD(dev_info_list
);
117 static struct visorchannel
*controlvm_channel
;
119 /* Manages the request payload in the controlvm channel */
120 struct visor_controlvm_payload_info
{
121 u8
*ptr
; /* pointer to base address of payload pool */
122 u64 offset
; /* offset from beginning of controlvm
123 * channel to beginning of payload * pool */
124 u32 bytes
; /* number of bytes in payload pool */
127 static struct visor_controlvm_payload_info controlvm_payload_info
;
129 /* The following globals are used to handle the scenario where we are unable to
130 * offload the payload from a controlvm message due to memory requirements. In
131 * this scenario, we simply stash the controlvm message, then attempt to
132 * process it again the next time controlvm_periodic_work() runs.
134 static struct controlvm_message controlvm_pending_msg
;
135 static bool controlvm_pending_msg_valid
;
137 /* This identifies a data buffer that has been received via a controlvm messages
138 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140 struct putfile_buffer_entry
{
141 struct list_head next
; /* putfile_buffer_entry list */
142 struct parser_context
*parser_ctx
; /* points to input data buffer */
145 /* List of struct putfile_request *, via next_putfile_request member.
146 * Each entry in this list identifies an outstanding TRANSMIT_FILE
149 static LIST_HEAD(putfile_request_list
);
151 /* This describes a buffer and its current state of transfer (e.g., how many
152 * bytes have already been supplied as putfile data, and how many bytes are
153 * remaining) for a putfile_request.
155 struct putfile_active_buffer
{
156 /* a payload from a controlvm message, containing a file data buffer */
157 struct parser_context
*parser_ctx
;
158 /* points within data area of parser_ctx to next byte of data */
160 /* # bytes left from <pnext> to the end of this data buffer */
161 size_t bytes_remaining
;
164 #define PUTFILE_REQUEST_SIG 0x0906101302281211
165 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
166 * conversation. Structs of this type are dynamically linked into
167 * <Putfile_request_list>.
169 struct putfile_request
{
170 u64 sig
; /* PUTFILE_REQUEST_SIG */
172 /* header from original TransmitFile request */
173 struct controlvm_message_header controlvm_header
;
174 u64 file_request_number
; /* from original TransmitFile request */
176 /* link to next struct putfile_request */
177 struct list_head next_putfile_request
;
179 /* most-recent sequence number supplied via a controlvm message */
180 u64 data_sequence_number
;
182 /* head of putfile_buffer_entry list, which describes the data to be
183 * supplied as putfile data;
184 * - this list is added to when controlvm messages come in that supply
186 * - this list is removed from via the hotplug program that is actually
187 * consuming these buffers to write as file data */
188 struct list_head input_buffer_list
;
189 spinlock_t req_list_lock
; /* lock for input_buffer_list */
191 /* waiters for input_buffer_list to go non-empty */
192 wait_queue_head_t input_buffer_wq
;
194 /* data not yet read within current putfile_buffer_entry */
195 struct putfile_active_buffer active_buf
;
197 /* <0 = failed, 0 = in-progress, >0 = successful; */
198 /* note that this must be set with req_list_lock, and if you set <0, */
199 /* it is your responsibility to also free up all of the other objects */
200 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
201 /* before releasing the lock */
202 int completion_status
;
205 struct parahotplug_request
{
206 struct list_head list
;
208 unsigned long expiration
;
209 struct controlvm_message msg
;
212 static LIST_HEAD(parahotplug_request_list
);
213 static DEFINE_SPINLOCK(parahotplug_request_list_lock
); /* lock for above */
214 static void parahotplug_process_list(void);
216 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
217 * CONTROLVM_REPORTEVENT.
219 static struct visorchipset_busdev_notifiers busdev_notifiers
;
221 static void bus_create_response(struct visor_device
*p
, int response
);
222 static void bus_destroy_response(struct visor_device
*p
, int response
);
223 static void device_create_response(struct visor_device
*p
, int response
);
224 static void device_destroy_response(struct visor_device
*p
, int response
);
225 static void device_resume_response(struct visor_device
*p
, int response
);
227 static void visorchipset_device_pause_response(struct visor_device
*p
,
230 static struct visorchipset_busdev_responders busdev_responders
= {
231 .bus_create
= bus_create_response
,
232 .bus_destroy
= bus_destroy_response
,
233 .device_create
= device_create_response
,
234 .device_destroy
= device_destroy_response
,
235 .device_pause
= visorchipset_device_pause_response
,
236 .device_resume
= device_resume_response
,
239 /* info for /dev/visorchipset */
240 static dev_t major_dev
= -1; /**< indicates major num for device */
242 /* prototypes for attributes */
243 static ssize_t
toolaction_show(struct device
*dev
,
244 struct device_attribute
*attr
, char *buf
);
245 static ssize_t
toolaction_store(struct device
*dev
,
246 struct device_attribute
*attr
,
247 const char *buf
, size_t count
);
248 static DEVICE_ATTR_RW(toolaction
);
250 static ssize_t
boottotool_show(struct device
*dev
,
251 struct device_attribute
*attr
, char *buf
);
252 static ssize_t
boottotool_store(struct device
*dev
,
253 struct device_attribute
*attr
, const char *buf
,
255 static DEVICE_ATTR_RW(boottotool
);
257 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
259 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
260 const char *buf
, size_t count
);
261 static DEVICE_ATTR_RW(error
);
263 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
265 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
266 const char *buf
, size_t count
);
267 static DEVICE_ATTR_RW(textid
);
269 static ssize_t
remaining_steps_show(struct device
*dev
,
270 struct device_attribute
*attr
, char *buf
);
271 static ssize_t
remaining_steps_store(struct device
*dev
,
272 struct device_attribute
*attr
,
273 const char *buf
, size_t count
);
274 static DEVICE_ATTR_RW(remaining_steps
);
276 static ssize_t
chipsetready_store(struct device
*dev
,
277 struct device_attribute
*attr
,
278 const char *buf
, size_t count
);
279 static DEVICE_ATTR_WO(chipsetready
);
281 static ssize_t
devicedisabled_store(struct device
*dev
,
282 struct device_attribute
*attr
,
283 const char *buf
, size_t count
);
284 static DEVICE_ATTR_WO(devicedisabled
);
286 static ssize_t
deviceenabled_store(struct device
*dev
,
287 struct device_attribute
*attr
,
288 const char *buf
, size_t count
);
289 static DEVICE_ATTR_WO(deviceenabled
);
291 static struct attribute
*visorchipset_install_attrs
[] = {
292 &dev_attr_toolaction
.attr
,
293 &dev_attr_boottotool
.attr
,
294 &dev_attr_error
.attr
,
295 &dev_attr_textid
.attr
,
296 &dev_attr_remaining_steps
.attr
,
300 static struct attribute_group visorchipset_install_group
= {
302 .attrs
= visorchipset_install_attrs
305 static struct attribute
*visorchipset_guest_attrs
[] = {
306 &dev_attr_chipsetready
.attr
,
310 static struct attribute_group visorchipset_guest_group
= {
312 .attrs
= visorchipset_guest_attrs
315 static struct attribute
*visorchipset_parahotplug_attrs
[] = {
316 &dev_attr_devicedisabled
.attr
,
317 &dev_attr_deviceenabled
.attr
,
321 static struct attribute_group visorchipset_parahotplug_group
= {
322 .name
= "parahotplug",
323 .attrs
= visorchipset_parahotplug_attrs
326 static const struct attribute_group
*visorchipset_dev_groups
[] = {
327 &visorchipset_install_group
,
328 &visorchipset_guest_group
,
329 &visorchipset_parahotplug_group
,
333 static void visorchipset_dev_release(struct device
*dev
)
337 /* /sys/devices/platform/visorchipset */
338 static struct platform_device visorchipset_platform_device
= {
339 .name
= "visorchipset",
341 .dev
.groups
= visorchipset_dev_groups
,
342 .dev
.release
= visorchipset_dev_release
,
345 /* Function prototypes */
346 static void controlvm_respond(struct controlvm_message_header
*msg_hdr
,
348 static void controlvm_respond_chipset_init(
349 struct controlvm_message_header
*msg_hdr
, int response
,
350 enum ultra_chipset_feature features
);
351 static void controlvm_respond_physdev_changestate(
352 struct controlvm_message_header
*msg_hdr
, int response
,
353 struct spar_segment_state state
);
356 static void parser_done(struct parser_context
*ctx
);
358 static struct parser_context
*
359 parser_init_byte_stream(u64 addr
, u32 bytes
, bool local
, bool *retry
)
361 int allocbytes
= sizeof(struct parser_context
) + bytes
;
362 struct parser_context
*rc
= NULL
;
363 struct parser_context
*ctx
= NULL
;
369 * alloc an 0 extra byte to ensure payload is
373 if ((controlvm_payload_bytes_buffered
+ bytes
)
374 > MAX_CONTROLVM_PAYLOAD_BYTES
) {
380 ctx
= kzalloc(allocbytes
, GFP_KERNEL
|__GFP_NORETRY
);
388 ctx
->allocbytes
= allocbytes
;
389 ctx
->param_bytes
= bytes
;
391 ctx
->bytes_remaining
= 0;
392 ctx
->byte_stream
= false;
396 if (addr
> virt_to_phys(high_memory
- 1)) {
400 p
= __va((unsigned long) (addr
));
401 memcpy(ctx
->data
, p
, bytes
);
405 if (!request_mem_region(addr
, bytes
, "visorchipset")) {
410 mapping
= memremap(addr
, bytes
, MEMREMAP_WB
);
412 release_mem_region(addr
, bytes
);
416 memcpy(ctx
->data
, mapping
, bytes
);
417 release_mem_region(addr
, bytes
);
421 ctx
->byte_stream
= true;
425 controlvm_payload_bytes_buffered
+= ctx
->param_bytes
;
436 parser_id_get(struct parser_context
*ctx
)
438 struct spar_controlvm_parameters_header
*phdr
= NULL
;
442 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
446 /** Describes the state from the perspective of which controlvm messages have
447 * been received for a bus or device.
450 enum PARSER_WHICH_STRING
{
451 PARSERSTRING_INITIATOR
,
453 PARSERSTRING_CONNECTION
,
454 PARSERSTRING_NAME
, /* TODO: only PARSERSTRING_NAME is used ? */
458 parser_param_start(struct parser_context
*ctx
,
459 enum PARSER_WHICH_STRING which_string
)
461 struct spar_controlvm_parameters_header
*phdr
= NULL
;
465 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
466 switch (which_string
) {
467 case PARSERSTRING_INITIATOR
:
468 ctx
->curr
= ctx
->data
+ phdr
->initiator_offset
;
469 ctx
->bytes_remaining
= phdr
->initiator_length
;
471 case PARSERSTRING_TARGET
:
472 ctx
->curr
= ctx
->data
+ phdr
->target_offset
;
473 ctx
->bytes_remaining
= phdr
->target_length
;
475 case PARSERSTRING_CONNECTION
:
476 ctx
->curr
= ctx
->data
+ phdr
->connection_offset
;
477 ctx
->bytes_remaining
= phdr
->connection_length
;
479 case PARSERSTRING_NAME
:
480 ctx
->curr
= ctx
->data
+ phdr
->name_offset
;
481 ctx
->bytes_remaining
= phdr
->name_length
;
491 static void parser_done(struct parser_context
*ctx
)
495 controlvm_payload_bytes_buffered
-= ctx
->param_bytes
;
500 parser_string_get(struct parser_context
*ctx
)
504 int value_length
= -1;
511 nscan
= ctx
->bytes_remaining
;
516 for (i
= 0, value_length
= -1; i
< nscan
; i
++)
517 if (pscan
[i
] == '\0') {
521 if (value_length
< 0) /* '\0' was not included in the length */
522 value_length
= nscan
;
523 value
= kmalloc(value_length
+ 1, GFP_KERNEL
|__GFP_NORETRY
);
526 if (value_length
> 0)
527 memcpy(value
, pscan
, value_length
);
528 ((u8
*) (value
))[value_length
] = '\0';
533 static ssize_t
toolaction_show(struct device
*dev
,
534 struct device_attribute
*attr
,
539 visorchannel_read(controlvm_channel
,
540 offsetof(struct spar_controlvm_channel_protocol
,
541 tool_action
), &tool_action
, sizeof(u8
));
542 return scnprintf(buf
, PAGE_SIZE
, "%u\n", tool_action
);
545 static ssize_t
toolaction_store(struct device
*dev
,
546 struct device_attribute
*attr
,
547 const char *buf
, size_t count
)
552 if (kstrtou8(buf
, 10, &tool_action
))
555 ret
= visorchannel_write(controlvm_channel
,
556 offsetof(struct spar_controlvm_channel_protocol
,
558 &tool_action
, sizeof(u8
));
565 static ssize_t
boottotool_show(struct device
*dev
,
566 struct device_attribute
*attr
,
569 struct efi_spar_indication efi_spar_indication
;
571 visorchannel_read(controlvm_channel
,
572 offsetof(struct spar_controlvm_channel_protocol
,
573 efi_spar_ind
), &efi_spar_indication
,
574 sizeof(struct efi_spar_indication
));
575 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
576 efi_spar_indication
.boot_to_tool
);
579 static ssize_t
boottotool_store(struct device
*dev
,
580 struct device_attribute
*attr
,
581 const char *buf
, size_t count
)
584 struct efi_spar_indication efi_spar_indication
;
586 if (kstrtoint(buf
, 10, &val
))
589 efi_spar_indication
.boot_to_tool
= val
;
590 ret
= visorchannel_write(controlvm_channel
,
591 offsetof(struct spar_controlvm_channel_protocol
,
592 efi_spar_ind
), &(efi_spar_indication
),
593 sizeof(struct efi_spar_indication
));
600 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
605 visorchannel_read(controlvm_channel
,
606 offsetof(struct spar_controlvm_channel_protocol
,
608 &error
, sizeof(u32
));
609 return scnprintf(buf
, PAGE_SIZE
, "%i\n", error
);
612 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
613 const char *buf
, size_t count
)
618 if (kstrtou32(buf
, 10, &error
))
621 ret
= visorchannel_write(controlvm_channel
,
622 offsetof(struct spar_controlvm_channel_protocol
,
624 &error
, sizeof(u32
));
630 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
635 visorchannel_read(controlvm_channel
,
636 offsetof(struct spar_controlvm_channel_protocol
,
637 installation_text_id
),
638 &text_id
, sizeof(u32
));
639 return scnprintf(buf
, PAGE_SIZE
, "%i\n", text_id
);
642 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
643 const char *buf
, size_t count
)
648 if (kstrtou32(buf
, 10, &text_id
))
651 ret
= visorchannel_write(controlvm_channel
,
652 offsetof(struct spar_controlvm_channel_protocol
,
653 installation_text_id
),
654 &text_id
, sizeof(u32
));
660 static ssize_t
remaining_steps_show(struct device
*dev
,
661 struct device_attribute
*attr
, char *buf
)
665 visorchannel_read(controlvm_channel
,
666 offsetof(struct spar_controlvm_channel_protocol
,
667 installation_remaining_steps
),
668 &remaining_steps
, sizeof(u16
));
669 return scnprintf(buf
, PAGE_SIZE
, "%hu\n", remaining_steps
);
672 static ssize_t
remaining_steps_store(struct device
*dev
,
673 struct device_attribute
*attr
,
674 const char *buf
, size_t count
)
679 if (kstrtou16(buf
, 10, &remaining_steps
))
682 ret
= visorchannel_write(controlvm_channel
,
683 offsetof(struct spar_controlvm_channel_protocol
,
684 installation_remaining_steps
),
685 &remaining_steps
, sizeof(u16
));
691 struct visor_busdev
{
696 static int match_visorbus_dev_by_id(struct device
*dev
, void *data
)
698 struct visor_device
*vdev
= to_visor_device(dev
);
699 struct visor_busdev
*id
= data
;
700 u32 bus_no
= id
->bus_no
;
701 u32 dev_no
= id
->dev_no
;
703 if ((vdev
->chipset_bus_no
== bus_no
) &&
704 (vdev
->chipset_dev_no
== dev_no
))
709 struct visor_device
*visorbus_get_device_by_id(u32 bus_no
, u32 dev_no
,
710 struct visor_device
*from
)
713 struct device
*dev_start
= NULL
;
714 struct visor_device
*vdev
= NULL
;
715 struct visor_busdev id
= {
721 dev_start
= &from
->device
;
722 dev
= bus_find_device(&visorbus_type
, dev_start
, (void *)&id
,
723 match_visorbus_dev_by_id
);
725 vdev
= to_visor_device(dev
);
728 EXPORT_SYMBOL(visorbus_get_device_by_id
);
731 check_chipset_events(void)
735 /* Check events to determine if response should be sent */
736 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
737 send_msg
&= chipset_events
[i
];
742 clear_chipset_events(void)
745 /* Clear chipset_events */
746 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
747 chipset_events
[i
] = 0;
751 visorchipset_register_busdev(
752 struct visorchipset_busdev_notifiers
*notifiers
,
753 struct visorchipset_busdev_responders
*responders
,
754 struct ultra_vbus_deviceinfo
*driver_info
)
756 down(¬ifier_lock
);
758 memset(&busdev_notifiers
, 0,
759 sizeof(busdev_notifiers
));
760 visorbusregistered
= 0; /* clear flag */
762 busdev_notifiers
= *notifiers
;
763 visorbusregistered
= 1; /* set flag */
766 *responders
= busdev_responders
;
768 bus_device_info_init(driver_info
, "chipset", "visorchipset",
773 EXPORT_SYMBOL_GPL(visorchipset_register_busdev
);
776 chipset_init(struct controlvm_message
*inmsg
)
778 static int chipset_inited
;
779 enum ultra_chipset_feature features
= 0;
780 int rc
= CONTROLVM_RESP_SUCCESS
;
782 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
783 if (chipset_inited
) {
784 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
788 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
790 /* Set features to indicate we support parahotplug (if Command
791 * also supports it). */
793 inmsg
->cmd
.init_chipset
.
794 features
& ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG
;
796 /* Set the "reply" bit so Command knows this is a
797 * features-aware driver. */
798 features
|= ULTRA_CHIPSET_FEATURE_REPLY
;
801 if (inmsg
->hdr
.flags
.response_expected
)
802 controlvm_respond_chipset_init(&inmsg
->hdr
, rc
, features
);
806 controlvm_init_response(struct controlvm_message
*msg
,
807 struct controlvm_message_header
*msg_hdr
, int response
)
809 memset(msg
, 0, sizeof(struct controlvm_message
));
810 memcpy(&msg
->hdr
, msg_hdr
, sizeof(struct controlvm_message_header
));
811 msg
->hdr
.payload_bytes
= 0;
812 msg
->hdr
.payload_vm_offset
= 0;
813 msg
->hdr
.payload_max_bytes
= 0;
815 msg
->hdr
.flags
.failed
= 1;
816 msg
->hdr
.completion_status
= (u32
) (-response
);
821 controlvm_respond(struct controlvm_message_header
*msg_hdr
, int response
)
823 struct controlvm_message outmsg
;
825 controlvm_init_response(&outmsg
, msg_hdr
, response
);
826 if (outmsg
.hdr
.flags
.test_message
== 1)
829 if (!visorchannel_signalinsert(controlvm_channel
,
830 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
836 controlvm_respond_chipset_init(struct controlvm_message_header
*msg_hdr
,
838 enum ultra_chipset_feature features
)
840 struct controlvm_message outmsg
;
842 controlvm_init_response(&outmsg
, msg_hdr
, response
);
843 outmsg
.cmd
.init_chipset
.features
= features
;
844 if (!visorchannel_signalinsert(controlvm_channel
,
845 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
850 static void controlvm_respond_physdev_changestate(
851 struct controlvm_message_header
*msg_hdr
, int response
,
852 struct spar_segment_state state
)
854 struct controlvm_message outmsg
;
856 controlvm_init_response(&outmsg
, msg_hdr
, response
);
857 outmsg
.cmd
.device_change_state
.state
= state
;
858 outmsg
.cmd
.device_change_state
.flags
.phys_device
= 1;
859 if (!visorchannel_signalinsert(controlvm_channel
,
860 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
865 enum crash_obj_type
{
871 bus_responder(enum controlvm_id cmd_id
,
872 struct controlvm_message_header
*pending_msg_hdr
,
875 if (pending_msg_hdr
== NULL
)
876 return; /* no controlvm response needed */
878 if (pending_msg_hdr
->id
!= (u32
)cmd_id
)
881 controlvm_respond(pending_msg_hdr
, response
);
885 device_changestate_responder(enum controlvm_id cmd_id
,
886 struct visor_device
*p
, int response
,
887 struct spar_segment_state response_state
)
889 struct controlvm_message outmsg
;
890 u32 bus_no
= p
->chipset_bus_no
;
891 u32 dev_no
= p
->chipset_dev_no
;
893 if (p
->pending_msg_hdr
== NULL
)
894 return; /* no controlvm response needed */
895 if (p
->pending_msg_hdr
->id
!= cmd_id
)
898 controlvm_init_response(&outmsg
, p
->pending_msg_hdr
, response
);
900 outmsg
.cmd
.device_change_state
.bus_no
= bus_no
;
901 outmsg
.cmd
.device_change_state
.dev_no
= dev_no
;
902 outmsg
.cmd
.device_change_state
.state
= response_state
;
904 if (!visorchannel_signalinsert(controlvm_channel
,
905 CONTROLVM_QUEUE_REQUEST
, &outmsg
))
910 device_responder(enum controlvm_id cmd_id
,
911 struct controlvm_message_header
*pending_msg_hdr
,
914 if (pending_msg_hdr
== NULL
)
915 return; /* no controlvm response needed */
917 if (pending_msg_hdr
->id
!= (u32
)cmd_id
)
920 controlvm_respond(pending_msg_hdr
, response
);
924 bus_epilog(struct visor_device
*bus_info
,
925 u32 cmd
, struct controlvm_message_header
*msg_hdr
,
926 int response
, bool need_response
)
928 bool notified
= false;
929 struct controlvm_message_header
*pmsg_hdr
= NULL
;
932 /* relying on a valid passed in response code */
933 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
938 if (bus_info
->pending_msg_hdr
) {
939 /* only non-NULL if dev is still waiting on a response */
940 response
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
941 pmsg_hdr
= bus_info
->pending_msg_hdr
;
946 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
948 response
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
952 memcpy(pmsg_hdr
, msg_hdr
,
953 sizeof(struct controlvm_message_header
));
954 bus_info
->pending_msg_hdr
= pmsg_hdr
;
957 down(¬ifier_lock
);
958 if (response
== CONTROLVM_RESP_SUCCESS
) {
960 case CONTROLVM_BUS_CREATE
:
961 if (busdev_notifiers
.bus_create
) {
962 (*busdev_notifiers
.bus_create
) (bus_info
);
966 case CONTROLVM_BUS_DESTROY
:
967 if (busdev_notifiers
.bus_destroy
) {
968 (*busdev_notifiers
.bus_destroy
) (bus_info
);
976 /* The callback function just called above is responsible
977 * for calling the appropriate visorchipset_busdev_responders
978 * function, which will call bus_responder()
983 * Do not kfree(pmsg_hdr) as this is the failure path.
984 * The success path ('notified') will call the responder
985 * directly and kfree() there.
987 bus_responder(cmd
, pmsg_hdr
, response
);
992 device_epilog(struct visor_device
*dev_info
,
993 struct spar_segment_state state
, u32 cmd
,
994 struct controlvm_message_header
*msg_hdr
, int response
,
995 bool need_response
, bool for_visorbus
)
997 struct visorchipset_busdev_notifiers
*notifiers
;
998 bool notified
= false;
999 struct controlvm_message_header
*pmsg_hdr
= NULL
;
1001 notifiers
= &busdev_notifiers
;
1004 /* relying on a valid passed in response code */
1005 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1010 if (dev_info
->pending_msg_hdr
) {
1011 /* only non-NULL if dev is still waiting on a response */
1012 response
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1013 pmsg_hdr
= dev_info
->pending_msg_hdr
;
1017 if (need_response
) {
1018 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
1020 response
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1024 memcpy(pmsg_hdr
, msg_hdr
,
1025 sizeof(struct controlvm_message_header
));
1026 dev_info
->pending_msg_hdr
= pmsg_hdr
;
1029 down(¬ifier_lock
);
1030 if (response
>= 0) {
1032 case CONTROLVM_DEVICE_CREATE
:
1033 if (notifiers
->device_create
) {
1034 (*notifiers
->device_create
) (dev_info
);
1038 case CONTROLVM_DEVICE_CHANGESTATE
:
1039 /* ServerReady / ServerRunning / SegmentStateRunning */
1040 if (state
.alive
== segment_state_running
.alive
&&
1042 segment_state_running
.operating
) {
1043 if (notifiers
->device_resume
) {
1044 (*notifiers
->device_resume
) (dev_info
);
1048 /* ServerNotReady / ServerLost / SegmentStateStandby */
1049 else if (state
.alive
== segment_state_standby
.alive
&&
1051 segment_state_standby
.operating
) {
1052 /* technically this is standby case
1053 * where server is lost
1055 if (notifiers
->device_pause
) {
1056 (*notifiers
->device_pause
) (dev_info
);
1061 case CONTROLVM_DEVICE_DESTROY
:
1062 if (notifiers
->device_destroy
) {
1063 (*notifiers
->device_destroy
) (dev_info
);
1071 /* The callback function just called above is responsible
1072 * for calling the appropriate visorchipset_busdev_responders
1073 * function, which will call device_responder()
1078 * Do not kfree(pmsg_hdr) as this is the failure path.
1079 * The success path ('notified') will call the responder
1080 * directly and kfree() there.
1082 device_responder(cmd
, pmsg_hdr
, response
);
1087 bus_create(struct controlvm_message
*inmsg
)
1089 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1090 u32 bus_no
= cmd
->create_bus
.bus_no
;
1091 int rc
= CONTROLVM_RESP_SUCCESS
;
1092 struct visor_device
*bus_info
;
1093 struct visorchannel
*visorchannel
;
1095 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
1096 if (bus_info
&& (bus_info
->state
.created
== 1)) {
1097 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1098 POSTCODE_SEVERITY_ERR
);
1099 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1102 bus_info
= kzalloc(sizeof(*bus_info
), GFP_KERNEL
);
1104 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1105 POSTCODE_SEVERITY_ERR
);
1106 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1110 INIT_LIST_HEAD(&bus_info
->list_all
);
1111 bus_info
->chipset_bus_no
= bus_no
;
1112 bus_info
->chipset_dev_no
= BUS_ROOT_DEVICE
;
1114 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1116 visorchannel
= visorchannel_create(cmd
->create_bus
.channel_addr
,
1117 cmd
->create_bus
.channel_bytes
,
1119 cmd
->create_bus
.bus_data_type_uuid
);
1121 if (!visorchannel
) {
1122 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1123 POSTCODE_SEVERITY_ERR
);
1124 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1129 bus_info
->visorchannel
= visorchannel
;
1131 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1134 bus_epilog(bus_info
, CONTROLVM_BUS_CREATE
, &inmsg
->hdr
,
1135 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1139 bus_destroy(struct controlvm_message
*inmsg
)
1141 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1142 u32 bus_no
= cmd
->destroy_bus
.bus_no
;
1143 struct visor_device
*bus_info
;
1144 int rc
= CONTROLVM_RESP_SUCCESS
;
1146 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
1148 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1149 else if (bus_info
->state
.created
== 0)
1150 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1152 bus_epilog(bus_info
, CONTROLVM_BUS_DESTROY
, &inmsg
->hdr
,
1153 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1155 /* bus_info is freed as part of the busdevice_release function */
1159 bus_configure(struct controlvm_message
*inmsg
,
1160 struct parser_context
*parser_ctx
)
1162 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1164 struct visor_device
*bus_info
;
1165 int rc
= CONTROLVM_RESP_SUCCESS
;
1167 bus_no
= cmd
->configure_bus
.bus_no
;
1168 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC
, bus_no
,
1169 POSTCODE_SEVERITY_INFO
);
1171 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
1173 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1174 POSTCODE_SEVERITY_ERR
);
1175 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1176 } else if (bus_info
->state
.created
== 0) {
1177 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1178 POSTCODE_SEVERITY_ERR
);
1179 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1180 } else if (bus_info
->pending_msg_hdr
!= NULL
) {
1181 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1182 POSTCODE_SEVERITY_ERR
);
1183 rc
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1185 visorchannel_set_clientpartition(bus_info
->visorchannel
,
1186 cmd
->configure_bus
.guest_handle
);
1187 bus_info
->partition_uuid
= parser_id_get(parser_ctx
);
1188 parser_param_start(parser_ctx
, PARSERSTRING_NAME
);
1189 bus_info
->name
= parser_string_get(parser_ctx
);
1191 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC
, bus_no
,
1192 POSTCODE_SEVERITY_INFO
);
1194 bus_epilog(bus_info
, CONTROLVM_BUS_CONFIGURE
, &inmsg
->hdr
,
1195 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1199 my_device_create(struct controlvm_message
*inmsg
)
1201 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1202 u32 bus_no
= cmd
->create_device
.bus_no
;
1203 u32 dev_no
= cmd
->create_device
.dev_no
;
1204 struct visor_device
*dev_info
= NULL
;
1205 struct visor_device
*bus_info
;
1206 struct visorchannel
*visorchannel
;
1207 int rc
= CONTROLVM_RESP_SUCCESS
;
1209 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
1211 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1212 POSTCODE_SEVERITY_ERR
);
1213 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1217 if (bus_info
->state
.created
== 0) {
1218 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1219 POSTCODE_SEVERITY_ERR
);
1220 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1224 dev_info
= visorbus_get_device_by_id(bus_no
, dev_no
, NULL
);
1225 if (dev_info
&& (dev_info
->state
.created
== 1)) {
1226 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1227 POSTCODE_SEVERITY_ERR
);
1228 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1232 dev_info
= kzalloc(sizeof(*dev_info
), GFP_KERNEL
);
1234 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1235 POSTCODE_SEVERITY_ERR
);
1236 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1240 dev_info
->chipset_bus_no
= bus_no
;
1241 dev_info
->chipset_dev_no
= dev_no
;
1242 dev_info
->inst
= cmd
->create_device
.dev_inst_uuid
;
1244 /* not sure where the best place to set the 'parent' */
1245 dev_info
->device
.parent
= &bus_info
->device
;
1247 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC
, dev_no
, bus_no
,
1248 POSTCODE_SEVERITY_INFO
);
1251 visorchannel_create_with_lock(cmd
->create_device
.channel_addr
,
1252 cmd
->create_device
.channel_bytes
,
1254 cmd
->create_device
.data_type_uuid
);
1256 if (!visorchannel
) {
1257 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1258 POSTCODE_SEVERITY_ERR
);
1259 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1264 dev_info
->visorchannel
= visorchannel
;
1265 dev_info
->channel_type_guid
= cmd
->create_device
.data_type_uuid
;
1266 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC
, dev_no
, bus_no
,
1267 POSTCODE_SEVERITY_INFO
);
1269 device_epilog(dev_info
, segment_state_running
,
1270 CONTROLVM_DEVICE_CREATE
, &inmsg
->hdr
, rc
,
1271 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1275 my_device_changestate(struct controlvm_message
*inmsg
)
1277 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1278 u32 bus_no
= cmd
->device_change_state
.bus_no
;
1279 u32 dev_no
= cmd
->device_change_state
.dev_no
;
1280 struct spar_segment_state state
= cmd
->device_change_state
.state
;
1281 struct visor_device
*dev_info
;
1282 int rc
= CONTROLVM_RESP_SUCCESS
;
1284 dev_info
= visorbus_get_device_by_id(bus_no
, dev_no
, NULL
);
1286 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1287 POSTCODE_SEVERITY_ERR
);
1288 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1289 } else if (dev_info
->state
.created
== 0) {
1290 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1291 POSTCODE_SEVERITY_ERR
);
1292 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1294 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1295 device_epilog(dev_info
, state
,
1296 CONTROLVM_DEVICE_CHANGESTATE
, &inmsg
->hdr
, rc
,
1297 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1301 my_device_destroy(struct controlvm_message
*inmsg
)
1303 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1304 u32 bus_no
= cmd
->destroy_device
.bus_no
;
1305 u32 dev_no
= cmd
->destroy_device
.dev_no
;
1306 struct visor_device
*dev_info
;
1307 int rc
= CONTROLVM_RESP_SUCCESS
;
1309 dev_info
= visorbus_get_device_by_id(bus_no
, dev_no
, NULL
);
1311 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1312 else if (dev_info
->state
.created
== 0)
1313 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1315 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1316 device_epilog(dev_info
, segment_state_running
,
1317 CONTROLVM_DEVICE_DESTROY
, &inmsg
->hdr
, rc
,
1318 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1321 /* When provided with the physical address of the controlvm channel
1322 * (phys_addr), the offset to the payload area we need to manage
1323 * (offset), and the size of this payload area (bytes), fills in the
1324 * controlvm_payload_info struct. Returns true for success or false
1328 initialize_controlvm_payload_info(u64 phys_addr
, u64 offset
, u32 bytes
,
1329 struct visor_controlvm_payload_info
*info
)
1332 int rc
= CONTROLVM_RESP_SUCCESS
;
1335 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1338 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1339 if ((offset
== 0) || (bytes
== 0)) {
1340 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1343 payload
= memremap(phys_addr
+ offset
, bytes
, MEMREMAP_WB
);
1345 rc
= -CONTROLVM_RESP_ERROR_IOREMAP_FAILED
;
1349 info
->offset
= offset
;
1350 info
->bytes
= bytes
;
1351 info
->ptr
= payload
;
1364 destroy_controlvm_payload_info(struct visor_controlvm_payload_info
*info
)
1367 memunmap(info
->ptr
);
1370 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1374 initialize_controlvm_payload(void)
1376 u64 phys_addr
= visorchannel_get_physaddr(controlvm_channel
);
1377 u64 payload_offset
= 0;
1378 u32 payload_bytes
= 0;
1380 if (visorchannel_read(controlvm_channel
,
1381 offsetof(struct spar_controlvm_channel_protocol
,
1382 request_payload_offset
),
1383 &payload_offset
, sizeof(payload_offset
)) < 0) {
1384 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1385 POSTCODE_SEVERITY_ERR
);
1388 if (visorchannel_read(controlvm_channel
,
1389 offsetof(struct spar_controlvm_channel_protocol
,
1390 request_payload_bytes
),
1391 &payload_bytes
, sizeof(payload_bytes
)) < 0) {
1392 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1393 POSTCODE_SEVERITY_ERR
);
1396 initialize_controlvm_payload_info(phys_addr
,
1397 payload_offset
, payload_bytes
,
1398 &controlvm_payload_info
);
1401 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1402 * Returns CONTROLVM_RESP_xxx code.
1405 visorchipset_chipset_ready(void)
1407 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_ONLINE
);
1408 return CONTROLVM_RESP_SUCCESS
;
1412 visorchipset_chipset_selftest(void)
1414 char env_selftest
[20];
1415 char *envp
[] = { env_selftest
, NULL
};
1417 sprintf(env_selftest
, "SPARSP_SELFTEST=%d", 1);
1418 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1420 return CONTROLVM_RESP_SUCCESS
;
1423 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1424 * Returns CONTROLVM_RESP_xxx code.
1427 visorchipset_chipset_notready(void)
1429 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_OFFLINE
);
1430 return CONTROLVM_RESP_SUCCESS
;
1434 chipset_ready(struct controlvm_message_header
*msg_hdr
)
1436 int rc
= visorchipset_chipset_ready();
1438 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1440 if (msg_hdr
->flags
.response_expected
&& !visorchipset_holdchipsetready
)
1441 controlvm_respond(msg_hdr
, rc
);
1442 if (msg_hdr
->flags
.response_expected
&& visorchipset_holdchipsetready
) {
1443 /* Send CHIPSET_READY response when all modules have been loaded
1444 * and disks mounted for the partition
1446 g_chipset_msg_hdr
= *msg_hdr
;
1451 chipset_selftest(struct controlvm_message_header
*msg_hdr
)
1453 int rc
= visorchipset_chipset_selftest();
1455 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1457 if (msg_hdr
->flags
.response_expected
)
1458 controlvm_respond(msg_hdr
, rc
);
1462 chipset_notready(struct controlvm_message_header
*msg_hdr
)
1464 int rc
= visorchipset_chipset_notready();
1466 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1468 if (msg_hdr
->flags
.response_expected
)
1469 controlvm_respond(msg_hdr
, rc
);
1472 /* This is your "one-stop" shop for grabbing the next message from the
1473 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1476 read_controlvm_event(struct controlvm_message
*msg
)
1478 if (visorchannel_signalremove(controlvm_channel
,
1479 CONTROLVM_QUEUE_EVENT
, msg
)) {
1481 if (msg
->hdr
.flags
.test_message
== 1)
1489 * The general parahotplug flow works as follows. The visorchipset
1490 * driver receives a DEVICE_CHANGESTATE message from Command
1491 * specifying a physical device to enable or disable. The CONTROLVM
1492 * message handler calls parahotplug_process_message, which then adds
1493 * the message to a global list and kicks off a udev event which
1494 * causes a user level script to enable or disable the specified
1495 * device. The udev script then writes to
1496 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1497 * to get called, at which point the appropriate CONTROLVM message is
1498 * retrieved from the list and responded to.
1501 #define PARAHOTPLUG_TIMEOUT_MS 2000
1504 * Generate unique int to match an outstanding CONTROLVM message with a
1505 * udev script /proc response
1508 parahotplug_next_id(void)
1510 static atomic_t id
= ATOMIC_INIT(0);
1512 return atomic_inc_return(&id
);
1516 * Returns the time (in jiffies) when a CONTROLVM message on the list
1517 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1519 static unsigned long
1520 parahotplug_next_expiration(void)
1522 return jiffies
+ msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS
);
1526 * Create a parahotplug_request, which is basically a wrapper for a
1527 * CONTROLVM_MESSAGE that we can stick on a list
1529 static struct parahotplug_request
*
1530 parahotplug_request_create(struct controlvm_message
*msg
)
1532 struct parahotplug_request
*req
;
1534 req
= kmalloc(sizeof(*req
), GFP_KERNEL
| __GFP_NORETRY
);
1538 req
->id
= parahotplug_next_id();
1539 req
->expiration
= parahotplug_next_expiration();
1546 * Free a parahotplug_request.
1549 parahotplug_request_destroy(struct parahotplug_request
*req
)
1555 * Cause uevent to run the user level script to do the disable/enable
1556 * specified in (the CONTROLVM message in) the specified
1557 * parahotplug_request
1560 parahotplug_request_kickoff(struct parahotplug_request
*req
)
1562 struct controlvm_message_packet
*cmd
= &req
->msg
.cmd
;
1563 char env_cmd
[40], env_id
[40], env_state
[40], env_bus
[40], env_dev
[40],
1566 env_cmd
, env_id
, env_state
, env_bus
, env_dev
, env_func
, NULL
1569 sprintf(env_cmd
, "SPAR_PARAHOTPLUG=1");
1570 sprintf(env_id
, "SPAR_PARAHOTPLUG_ID=%d", req
->id
);
1571 sprintf(env_state
, "SPAR_PARAHOTPLUG_STATE=%d",
1572 cmd
->device_change_state
.state
.active
);
1573 sprintf(env_bus
, "SPAR_PARAHOTPLUG_BUS=%d",
1574 cmd
->device_change_state
.bus_no
);
1575 sprintf(env_dev
, "SPAR_PARAHOTPLUG_DEVICE=%d",
1576 cmd
->device_change_state
.dev_no
>> 3);
1577 sprintf(env_func
, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1578 cmd
->device_change_state
.dev_no
& 0x7);
1580 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1585 * Remove any request from the list that's been on there too long and
1586 * respond with an error.
1589 parahotplug_process_list(void)
1591 struct list_head
*pos
;
1592 struct list_head
*tmp
;
1594 spin_lock(¶hotplug_request_list_lock
);
1596 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1597 struct parahotplug_request
*req
=
1598 list_entry(pos
, struct parahotplug_request
, list
);
1600 if (!time_after_eq(jiffies
, req
->expiration
))
1604 if (req
->msg
.hdr
.flags
.response_expected
)
1605 controlvm_respond_physdev_changestate(
1607 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT
,
1608 req
->msg
.cmd
.device_change_state
.state
);
1609 parahotplug_request_destroy(req
);
1612 spin_unlock(¶hotplug_request_list_lock
);
1616 * Called from the /proc handler, which means the user script has
1617 * finished the enable/disable. Find the matching identifier, and
1618 * respond to the CONTROLVM message with success.
1621 parahotplug_request_complete(int id
, u16 active
)
1623 struct list_head
*pos
;
1624 struct list_head
*tmp
;
1626 spin_lock(¶hotplug_request_list_lock
);
1628 /* Look for a request matching "id". */
1629 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1630 struct parahotplug_request
*req
=
1631 list_entry(pos
, struct parahotplug_request
, list
);
1632 if (req
->id
== id
) {
1633 /* Found a match. Remove it from the list and
1637 spin_unlock(¶hotplug_request_list_lock
);
1638 req
->msg
.cmd
.device_change_state
.state
.active
= active
;
1639 if (req
->msg
.hdr
.flags
.response_expected
)
1640 controlvm_respond_physdev_changestate(
1641 &req
->msg
.hdr
, CONTROLVM_RESP_SUCCESS
,
1642 req
->msg
.cmd
.device_change_state
.state
);
1643 parahotplug_request_destroy(req
);
1648 spin_unlock(¶hotplug_request_list_lock
);
1653 * Enables or disables a PCI device by kicking off a udev script
1656 parahotplug_process_message(struct controlvm_message
*inmsg
)
1658 struct parahotplug_request
*req
;
1660 req
= parahotplug_request_create(inmsg
);
1665 if (inmsg
->cmd
.device_change_state
.state
.active
) {
1666 /* For enable messages, just respond with success
1667 * right away. This is a bit of a hack, but there are
1668 * issues with the early enable messages we get (with
1669 * either the udev script not detecting that the device
1670 * is up, or not getting called at all). Fortunately
1671 * the messages that get lost don't matter anyway, as
1672 * devices are automatically enabled at
1675 parahotplug_request_kickoff(req
);
1676 controlvm_respond_physdev_changestate(&inmsg
->hdr
,
1677 CONTROLVM_RESP_SUCCESS
,
1678 inmsg
->cmd
.device_change_state
.state
);
1679 parahotplug_request_destroy(req
);
1681 /* For disable messages, add the request to the
1682 * request list before kicking off the udev script. It
1683 * won't get responded to until the script has
1684 * indicated it's done.
1686 spin_lock(¶hotplug_request_list_lock
);
1687 list_add_tail(&req
->list
, ¶hotplug_request_list
);
1688 spin_unlock(¶hotplug_request_list_lock
);
1690 parahotplug_request_kickoff(req
);
1694 /* Process a controlvm message.
1696 * false - this function will return false only in the case where the
1697 * controlvm message was NOT processed, but processing must be
1698 * retried before reading the next controlvm message; a
1699 * scenario where this can occur is when we need to throttle
1700 * the allocation of memory in which to copy out controlvm
1702 * true - processing of the controlvm message completed,
1703 * either successfully or with an error.
1706 handle_command(struct controlvm_message inmsg
, u64 channel_addr
)
1708 struct controlvm_message_packet
*cmd
= &inmsg
.cmd
;
1711 struct parser_context
*parser_ctx
= NULL
;
1713 struct controlvm_message ackmsg
;
1715 /* create parsing context if necessary */
1716 local_addr
= (inmsg
.hdr
.flags
.test_message
== 1);
1717 if (channel_addr
== 0)
1719 parm_addr
= channel_addr
+ inmsg
.hdr
.payload_vm_offset
;
1720 parm_bytes
= inmsg
.hdr
.payload_bytes
;
1722 /* Parameter and channel addresses within test messages actually lie
1723 * within our OS-controlled memory. We need to know that, because it
1724 * makes a difference in how we compute the virtual address.
1726 if (parm_addr
&& parm_bytes
) {
1730 parser_init_byte_stream(parm_addr
, parm_bytes
,
1731 local_addr
, &retry
);
1732 if (!parser_ctx
&& retry
)
1737 controlvm_init_response(&ackmsg
, &inmsg
.hdr
,
1738 CONTROLVM_RESP_SUCCESS
);
1739 if (controlvm_channel
)
1740 visorchannel_signalinsert(controlvm_channel
,
1741 CONTROLVM_QUEUE_ACK
,
1744 switch (inmsg
.hdr
.id
) {
1745 case CONTROLVM_CHIPSET_INIT
:
1746 chipset_init(&inmsg
);
1748 case CONTROLVM_BUS_CREATE
:
1751 case CONTROLVM_BUS_DESTROY
:
1752 bus_destroy(&inmsg
);
1754 case CONTROLVM_BUS_CONFIGURE
:
1755 bus_configure(&inmsg
, parser_ctx
);
1757 case CONTROLVM_DEVICE_CREATE
:
1758 my_device_create(&inmsg
);
1760 case CONTROLVM_DEVICE_CHANGESTATE
:
1761 if (cmd
->device_change_state
.flags
.phys_device
) {
1762 parahotplug_process_message(&inmsg
);
1764 /* save the hdr and cmd structures for later use */
1765 /* when sending back the response to Command */
1766 my_device_changestate(&inmsg
);
1767 g_devicechangestate_packet
= inmsg
.cmd
;
1771 case CONTROLVM_DEVICE_DESTROY
:
1772 my_device_destroy(&inmsg
);
1774 case CONTROLVM_DEVICE_CONFIGURE
:
1775 /* no op for now, just send a respond that we passed */
1776 if (inmsg
.hdr
.flags
.response_expected
)
1777 controlvm_respond(&inmsg
.hdr
, CONTROLVM_RESP_SUCCESS
);
1779 case CONTROLVM_CHIPSET_READY
:
1780 chipset_ready(&inmsg
.hdr
);
1782 case CONTROLVM_CHIPSET_SELFTEST
:
1783 chipset_selftest(&inmsg
.hdr
);
1785 case CONTROLVM_CHIPSET_STOP
:
1786 chipset_notready(&inmsg
.hdr
);
1789 if (inmsg
.hdr
.flags
.response_expected
)
1790 controlvm_respond(&inmsg
.hdr
,
1791 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN
);
1796 parser_done(parser_ctx
);
1802 static inline unsigned int
1803 issue_vmcall_io_controlvm_addr(u64
*control_addr
, u32
*control_bytes
)
1805 struct vmcall_io_controlvm_addr_params params
;
1806 int result
= VMCALL_SUCCESS
;
1809 physaddr
= virt_to_phys(¶ms
);
1810 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR
, physaddr
, result
);
1811 if (VMCALL_SUCCESSFUL(result
)) {
1812 *control_addr
= params
.address
;
1813 *control_bytes
= params
.channel_bytes
;
1818 static u64
controlvm_get_channel_address(void)
1823 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr
, &size
)))
1830 controlvm_periodic_work(struct work_struct
*work
)
1832 struct controlvm_message inmsg
;
1833 bool got_command
= false;
1834 bool handle_command_failed
= false;
1835 static u64 poll_count
;
1837 /* make sure visorbus server is registered for controlvm callbacks */
1838 if (visorchipset_visorbusregwait
&& !visorbusregistered
)
1842 if (poll_count
>= 250)
1847 /* Check events to determine if response to CHIPSET_READY
1850 if (visorchipset_holdchipsetready
&&
1851 (g_chipset_msg_hdr
.id
!= CONTROLVM_INVALID
)) {
1852 if (check_chipset_events() == 1) {
1853 controlvm_respond(&g_chipset_msg_hdr
, 0);
1854 clear_chipset_events();
1855 memset(&g_chipset_msg_hdr
, 0,
1856 sizeof(struct controlvm_message_header
));
1860 while (visorchannel_signalremove(controlvm_channel
,
1861 CONTROLVM_QUEUE_RESPONSE
,
1865 if (controlvm_pending_msg_valid
) {
1866 /* we throttled processing of a prior
1867 * msg, so try to process it again
1868 * rather than reading a new one
1870 inmsg
= controlvm_pending_msg
;
1871 controlvm_pending_msg_valid
= false;
1874 got_command
= read_controlvm_event(&inmsg
);
1878 handle_command_failed
= false;
1879 while (got_command
&& (!handle_command_failed
)) {
1880 most_recent_message_jiffies
= jiffies
;
1881 if (handle_command(inmsg
,
1882 visorchannel_get_physaddr
1883 (controlvm_channel
)))
1884 got_command
= read_controlvm_event(&inmsg
);
1886 /* this is a scenario where throttling
1887 * is required, but probably NOT an
1888 * error...; we stash the current
1889 * controlvm msg so we will attempt to
1890 * reprocess it on our next loop
1892 handle_command_failed
= true;
1893 controlvm_pending_msg
= inmsg
;
1894 controlvm_pending_msg_valid
= true;
1898 /* parahotplug_worker */
1899 parahotplug_process_list();
1903 if (time_after(jiffies
,
1904 most_recent_message_jiffies
+ (HZ
* MIN_IDLE_SECONDS
))) {
1905 /* it's been longer than MIN_IDLE_SECONDS since we
1906 * processed our last controlvm message; slow down the
1909 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
)
1910 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
1912 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_FAST
)
1913 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
1916 queue_delayed_work(periodic_controlvm_workqueue
,
1917 &periodic_controlvm_work
, poll_jiffies
);
1921 setup_crash_devices_work_queue(struct work_struct
*work
)
1923 struct controlvm_message local_crash_bus_msg
;
1924 struct controlvm_message local_crash_dev_msg
;
1925 struct controlvm_message msg
;
1926 u32 local_crash_msg_offset
;
1927 u16 local_crash_msg_count
;
1929 /* make sure visorbus is registered for controlvm callbacks */
1930 if (visorchipset_visorbusregwait
&& !visorbusregistered
)
1933 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
1935 /* send init chipset msg */
1936 msg
.hdr
.id
= CONTROLVM_CHIPSET_INIT
;
1937 msg
.cmd
.init_chipset
.bus_count
= 23;
1938 msg
.cmd
.init_chipset
.switch_count
= 0;
1942 /* get saved message count */
1943 if (visorchannel_read(controlvm_channel
,
1944 offsetof(struct spar_controlvm_channel_protocol
,
1945 saved_crash_message_count
),
1946 &local_crash_msg_count
, sizeof(u16
)) < 0) {
1947 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
1948 POSTCODE_SEVERITY_ERR
);
1952 if (local_crash_msg_count
!= CONTROLVM_CRASHMSG_MAX
) {
1953 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC
,
1954 local_crash_msg_count
,
1955 POSTCODE_SEVERITY_ERR
);
1959 /* get saved crash message offset */
1960 if (visorchannel_read(controlvm_channel
,
1961 offsetof(struct spar_controlvm_channel_protocol
,
1962 saved_crash_message_offset
),
1963 &local_crash_msg_offset
, sizeof(u32
)) < 0) {
1964 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
1965 POSTCODE_SEVERITY_ERR
);
1969 /* read create device message for storage bus offset */
1970 if (visorchannel_read(controlvm_channel
,
1971 local_crash_msg_offset
,
1972 &local_crash_bus_msg
,
1973 sizeof(struct controlvm_message
)) < 0) {
1974 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC
,
1975 POSTCODE_SEVERITY_ERR
);
1979 /* read create device message for storage device */
1980 if (visorchannel_read(controlvm_channel
,
1981 local_crash_msg_offset
+
1982 sizeof(struct controlvm_message
),
1983 &local_crash_dev_msg
,
1984 sizeof(struct controlvm_message
)) < 0) {
1985 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC
,
1986 POSTCODE_SEVERITY_ERR
);
1990 /* reuse IOVM create bus message */
1991 if (local_crash_bus_msg
.cmd
.create_bus
.channel_addr
) {
1992 bus_create(&local_crash_bus_msg
);
1994 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC
,
1995 POSTCODE_SEVERITY_ERR
);
1999 /* reuse create device message for storage device */
2000 if (local_crash_dev_msg
.cmd
.create_device
.channel_addr
) {
2001 my_device_create(&local_crash_dev_msg
);
2003 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC
,
2004 POSTCODE_SEVERITY_ERR
);
2007 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2012 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
2014 queue_delayed_work(periodic_controlvm_workqueue
,
2015 &periodic_controlvm_work
, poll_jiffies
);
2019 bus_create_response(struct visor_device
*bus_info
, int response
)
2022 bus_info
->state
.created
= 1;
2024 bus_responder(CONTROLVM_BUS_CREATE
, bus_info
->pending_msg_hdr
,
2027 kfree(bus_info
->pending_msg_hdr
);
2028 bus_info
->pending_msg_hdr
= NULL
;
2032 bus_destroy_response(struct visor_device
*bus_info
, int response
)
2034 bus_responder(CONTROLVM_BUS_DESTROY
, bus_info
->pending_msg_hdr
,
2037 kfree(bus_info
->pending_msg_hdr
);
2038 bus_info
->pending_msg_hdr
= NULL
;
2042 device_create_response(struct visor_device
*dev_info
, int response
)
2045 dev_info
->state
.created
= 1;
2047 device_responder(CONTROLVM_DEVICE_CREATE
, dev_info
->pending_msg_hdr
,
2050 kfree(dev_info
->pending_msg_hdr
);
2051 dev_info
->pending_msg_hdr
= NULL
;
2055 device_destroy_response(struct visor_device
*dev_info
, int response
)
2057 device_responder(CONTROLVM_DEVICE_DESTROY
, dev_info
->pending_msg_hdr
,
2060 kfree(dev_info
->pending_msg_hdr
);
2061 dev_info
->pending_msg_hdr
= NULL
;
2065 visorchipset_device_pause_response(struct visor_device
*dev_info
,
2068 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
2070 segment_state_standby
);
2072 kfree(dev_info
->pending_msg_hdr
);
2073 dev_info
->pending_msg_hdr
= NULL
;
2077 device_resume_response(struct visor_device
*dev_info
, int response
)
2079 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
2081 segment_state_running
);
2083 kfree(dev_info
->pending_msg_hdr
);
2084 dev_info
->pending_msg_hdr
= NULL
;
2087 static ssize_t
chipsetready_store(struct device
*dev
,
2088 struct device_attribute
*attr
,
2089 const char *buf
, size_t count
)
2093 if (sscanf(buf
, "%63s", msgtype
) != 1)
2096 if (!strcmp(msgtype
, "CALLHOMEDISK_MOUNTED")) {
2097 chipset_events
[0] = 1;
2099 } else if (!strcmp(msgtype
, "MODULES_LOADED")) {
2100 chipset_events
[1] = 1;
2106 /* The parahotplug/devicedisabled interface gets called by our support script
2107 * when an SR-IOV device has been shut down. The ID is passed to the script
2108 * and then passed back when the device has been removed.
2110 static ssize_t
devicedisabled_store(struct device
*dev
,
2111 struct device_attribute
*attr
,
2112 const char *buf
, size_t count
)
2116 if (kstrtouint(buf
, 10, &id
))
2119 parahotplug_request_complete(id
, 0);
2123 /* The parahotplug/deviceenabled interface gets called by our support script
2124 * when an SR-IOV device has been recovered. The ID is passed to the script
2125 * and then passed back when the device has been brought back up.
2127 static ssize_t
deviceenabled_store(struct device
*dev
,
2128 struct device_attribute
*attr
,
2129 const char *buf
, size_t count
)
2133 if (kstrtouint(buf
, 10, &id
))
2136 parahotplug_request_complete(id
, 1);
2141 visorchipset_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2143 unsigned long physaddr
= 0;
2144 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
2147 /* sv_enable_dfp(); */
2148 if (offset
& (PAGE_SIZE
- 1))
2149 return -ENXIO
; /* need aligned offsets */
2152 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET
:
2153 vma
->vm_flags
|= VM_IO
;
2154 if (!*file_controlvm_channel
)
2157 visorchannel_read(*file_controlvm_channel
,
2158 offsetof(struct spar_controlvm_channel_protocol
,
2159 gp_control_channel
),
2160 &addr
, sizeof(addr
));
2164 physaddr
= (unsigned long)addr
;
2165 if (remap_pfn_range(vma
, vma
->vm_start
,
2166 physaddr
>> PAGE_SHIFT
,
2167 vma
->vm_end
- vma
->vm_start
,
2168 /*pgprot_noncached */
2169 (vma
->vm_page_prot
))) {
2179 static inline s64
issue_vmcall_query_guest_virtual_time_offset(void)
2181 u64 result
= VMCALL_SUCCESS
;
2184 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
, physaddr
,
2189 static inline int issue_vmcall_update_physical_time(u64 adjustment
)
2191 int result
= VMCALL_SUCCESS
;
2193 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME
, adjustment
, result
);
2197 static long visorchipset_ioctl(struct file
*file
, unsigned int cmd
,
2204 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
:
2205 /* get the physical rtc offset */
2206 vrtc_offset
= issue_vmcall_query_guest_virtual_time_offset();
2207 if (copy_to_user((void __user
*)arg
, &vrtc_offset
,
2208 sizeof(vrtc_offset
))) {
2212 case VMCALL_UPDATE_PHYSICAL_TIME
:
2213 if (copy_from_user(&adjustment
, (void __user
*)arg
,
2214 sizeof(adjustment
))) {
2217 return issue_vmcall_update_physical_time(adjustment
);
2223 static const struct file_operations visorchipset_fops
= {
2224 .owner
= THIS_MODULE
,
2225 .open
= visorchipset_open
,
2228 .unlocked_ioctl
= visorchipset_ioctl
,
2229 .release
= visorchipset_release
,
2230 .mmap
= visorchipset_mmap
,
2234 visorchipset_file_init(dev_t major_dev
, struct visorchannel
**controlvm_channel
)
2238 file_controlvm_channel
= controlvm_channel
;
2239 cdev_init(&file_cdev
, &visorchipset_fops
);
2240 file_cdev
.owner
= THIS_MODULE
;
2241 if (MAJOR(major_dev
) == 0) {
2242 rc
= alloc_chrdev_region(&major_dev
, 0, 1, "visorchipset");
2243 /* dynamic major device number registration required */
2247 /* static major device number registration required */
2248 rc
= register_chrdev_region(major_dev
, 1, "visorchipset");
2252 rc
= cdev_add(&file_cdev
, MKDEV(MAJOR(major_dev
), 0), 1);
2254 unregister_chrdev_region(major_dev
, 1);
2261 visorchipset_init(struct acpi_device
*acpi_device
)
2265 int tmp_sz
= sizeof(struct spar_controlvm_channel_protocol
);
2266 uuid_le uuid
= SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID
;
2268 addr
= controlvm_get_channel_address();
2272 memset(&busdev_notifiers
, 0, sizeof(busdev_notifiers
));
2273 memset(&controlvm_payload_info
, 0, sizeof(controlvm_payload_info
));
2275 controlvm_channel
= visorchannel_create_with_lock(addr
, tmp_sz
,
2277 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2278 visorchannel_get_header(controlvm_channel
))) {
2279 initialize_controlvm_payload();
2281 visorchannel_destroy(controlvm_channel
);
2282 controlvm_channel
= NULL
;
2286 major_dev
= MKDEV(visorchipset_major
, 0);
2287 rc
= visorchipset_file_init(major_dev
, &controlvm_channel
);
2289 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2293 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2295 /* if booting in a crash kernel */
2296 if (is_kdump_kernel())
2297 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2298 setup_crash_devices_work_queue
);
2300 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2301 controlvm_periodic_work
);
2302 periodic_controlvm_workqueue
=
2303 create_singlethread_workqueue("visorchipset_controlvm");
2305 if (!periodic_controlvm_workqueue
) {
2306 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC
,
2311 most_recent_message_jiffies
= jiffies
;
2312 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
2313 rc
= queue_delayed_work(periodic_controlvm_workqueue
,
2314 &periodic_controlvm_work
, poll_jiffies
);
2316 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC
,
2321 visorchipset_platform_device
.dev
.devt
= major_dev
;
2322 if (platform_device_register(&visorchipset_platform_device
) < 0) {
2323 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2327 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC
, POSTCODE_SEVERITY_INFO
);
2329 rc
= visorbus_init();
2332 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC
, rc
,
2333 POSTCODE_SEVERITY_ERR
);
2339 visorchipset_file_cleanup(dev_t major_dev
)
2342 cdev_del(&file_cdev
);
2343 file_cdev
.ops
= NULL
;
2344 unregister_chrdev_region(major_dev
, 1);
2348 visorchipset_exit(struct acpi_device
*acpi_device
)
2350 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2354 cancel_delayed_work(&periodic_controlvm_work
);
2355 flush_workqueue(periodic_controlvm_workqueue
);
2356 destroy_workqueue(periodic_controlvm_workqueue
);
2357 periodic_controlvm_workqueue
= NULL
;
2358 destroy_controlvm_payload_info(&controlvm_payload_info
);
2360 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2362 visorchannel_destroy(controlvm_channel
);
2364 visorchipset_file_cleanup(visorchipset_platform_device
.dev
.devt
);
2365 platform_device_unregister(&visorchipset_platform_device
);
2366 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2371 static const struct acpi_device_id unisys_device_ids
[] = {
2376 static struct acpi_driver unisys_acpi_driver
= {
2377 .name
= "unisys_acpi",
2378 .class = "unisys_acpi_class",
2379 .owner
= THIS_MODULE
,
2380 .ids
= unisys_device_ids
,
2382 .add
= visorchipset_init
,
2383 .remove
= visorchipset_exit
,
2387 MODULE_DEVICE_TABLE(acpi
, unisys_device_ids
);
2389 static __init
uint32_t visorutil_spar_detect(void)
2391 unsigned int eax
, ebx
, ecx
, edx
;
2393 if (cpu_has_hypervisor
) {
2395 cpuid(UNISYS_SPAR_LEAF_ID
, &eax
, &ebx
, &ecx
, &edx
);
2396 return (ebx
== UNISYS_SPAR_ID_EBX
) &&
2397 (ecx
== UNISYS_SPAR_ID_ECX
) &&
2398 (edx
== UNISYS_SPAR_ID_EDX
);
2404 static int init_unisys(void)
2408 if (!visorutil_spar_detect())
2411 result
= acpi_bus_register_driver(&unisys_acpi_driver
);
2415 pr_info("Unisys Visorchipset Driver Loaded.\n");
2419 static void exit_unisys(void)
2421 acpi_bus_unregister_driver(&unisys_acpi_driver
);
2424 module_param_named(major
, visorchipset_major
, int, S_IRUGO
);
2425 MODULE_PARM_DESC(visorchipset_major
,
2426 "major device number to use for the device node");
2427 module_param_named(visorbusregwait
, visorchipset_visorbusregwait
, int, S_IRUGO
);
2428 MODULE_PARM_DESC(visorchipset_visorbusreqwait
,
2429 "1 to have the module wait for the visor bus to register");
2430 module_param_named(holdchipsetready
, visorchipset_holdchipsetready
,
2432 MODULE_PARM_DESC(visorchipset_holdchipsetready
,
2433 "1 to hold response to CHIPSET_READY");
2435 module_init(init_unisys
);
2436 module_exit(exit_unisys
);
2438 MODULE_AUTHOR("Unisys");
2439 MODULE_LICENSE("GPL");
2440 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2442 MODULE_VERSION(VERSION
);