3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
18 #include <linux/acpi.h>
19 #include <linux/cdev.h>
20 #include <linux/ctype.h>
23 #include <linux/nls.h>
24 #include <linux/netdevice.h>
25 #include <linux/platform_device.h>
26 #include <linux/uuid.h>
27 #include <linux/crash_dump.h>
29 #include "channel_guid.h"
30 #include "controlvmchannel.h"
31 #include "controlvmcompletionstatus.h"
32 #include "guestlinuxdebug.h"
33 #include "periodic_work.h"
36 #include "visorbus_private.h"
37 #include "vmcallinterface.h"
39 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE 50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
47 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
49 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
52 #define UNISYS_SPAR_LEAF_ID 0x40000000
54 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55 #define UNISYS_SPAR_ID_EBX 0x73696e55
56 #define UNISYS_SPAR_ID_ECX 0x70537379
57 #define UNISYS_SPAR_ID_EDX 0x34367261
62 static int visorchipset_major
;
63 static int visorchipset_visorbusregwait
= 1; /* default is on */
64 static int visorchipset_holdchipsetready
;
65 static unsigned long controlvm_payload_bytes_buffered
;
68 visorchipset_open(struct inode
*inode
, struct file
*file
)
70 unsigned minor_number
= iminor(inode
);
74 file
->private_data
= NULL
;
79 visorchipset_release(struct inode
*inode
, struct file
*file
)
84 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85 * we switch to slow polling mode. As soon as we get a controlvm
86 * message, we switch back to fast polling mode.
88 #define MIN_IDLE_SECONDS 10
89 static unsigned long poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
90 static unsigned long most_recent_message_jiffies
; /* when we got our last
91 * controlvm message */
92 static int visorbusregistered
;
94 #define MAX_CHIPSET_EVENTS 2
95 static u8 chipset_events
[MAX_CHIPSET_EVENTS
] = { 0, 0 };
97 struct parser_context
{
98 unsigned long allocbytes
;
99 unsigned long param_bytes
;
101 unsigned long bytes_remaining
;
106 static struct delayed_work periodic_controlvm_work
;
107 static struct workqueue_struct
*periodic_controlvm_workqueue
;
108 static DEFINE_SEMAPHORE(notifier_lock
);
110 static struct cdev file_cdev
;
111 static struct visorchannel
**file_controlvm_channel
;
112 static struct controlvm_message_header g_chipset_msg_hdr
;
113 static const uuid_le spar_diag_pool_channel_protocol_uuid
=
114 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID
;
115 /* 0xffffff is an invalid Bus/Device number */
116 static u32 g_diagpool_bus_no
= 0xffffff;
117 static u32 g_diagpool_dev_no
= 0xffffff;
118 static struct controlvm_message_packet g_devicechangestate_packet
;
120 #define is_diagpool_channel(channel_type_guid) \
121 (uuid_le_cmp(channel_type_guid,\
122 spar_diag_pool_channel_protocol_uuid) == 0)
124 static LIST_HEAD(bus_info_list
);
125 static LIST_HEAD(dev_info_list
);
127 static struct visorchannel
*controlvm_channel
;
129 /* Manages the request payload in the controlvm channel */
130 struct visor_controlvm_payload_info
{
131 u8 __iomem
*ptr
; /* pointer to base address of payload pool */
132 u64 offset
; /* offset from beginning of controlvm
133 * channel to beginning of payload * pool */
134 u32 bytes
; /* number of bytes in payload pool */
137 static struct visor_controlvm_payload_info controlvm_payload_info
;
139 /* The following globals are used to handle the scenario where we are unable to
140 * offload the payload from a controlvm message due to memory requirements. In
141 * this scenario, we simply stash the controlvm message, then attempt to
142 * process it again the next time controlvm_periodic_work() runs.
144 static struct controlvm_message controlvm_pending_msg
;
145 static bool controlvm_pending_msg_valid
;
147 /* This identifies a data buffer that has been received via a controlvm messages
148 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
150 struct putfile_buffer_entry
{
151 struct list_head next
; /* putfile_buffer_entry list */
152 struct parser_context
*parser_ctx
; /* points to input data buffer */
155 /* List of struct putfile_request *, via next_putfile_request member.
156 * Each entry in this list identifies an outstanding TRANSMIT_FILE
159 static LIST_HEAD(putfile_request_list
);
161 /* This describes a buffer and its current state of transfer (e.g., how many
162 * bytes have already been supplied as putfile data, and how many bytes are
163 * remaining) for a putfile_request.
165 struct putfile_active_buffer
{
166 /* a payload from a controlvm message, containing a file data buffer */
167 struct parser_context
*parser_ctx
;
168 /* points within data area of parser_ctx to next byte of data */
170 /* # bytes left from <pnext> to the end of this data buffer */
171 size_t bytes_remaining
;
174 #define PUTFILE_REQUEST_SIG 0x0906101302281211
175 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
176 * conversation. Structs of this type are dynamically linked into
177 * <Putfile_request_list>.
179 struct putfile_request
{
180 u64 sig
; /* PUTFILE_REQUEST_SIG */
182 /* header from original TransmitFile request */
183 struct controlvm_message_header controlvm_header
;
184 u64 file_request_number
; /* from original TransmitFile request */
186 /* link to next struct putfile_request */
187 struct list_head next_putfile_request
;
189 /* most-recent sequence number supplied via a controlvm message */
190 u64 data_sequence_number
;
192 /* head of putfile_buffer_entry list, which describes the data to be
193 * supplied as putfile data;
194 * - this list is added to when controlvm messages come in that supply
196 * - this list is removed from via the hotplug program that is actually
197 * consuming these buffers to write as file data */
198 struct list_head input_buffer_list
;
199 spinlock_t req_list_lock
; /* lock for input_buffer_list */
201 /* waiters for input_buffer_list to go non-empty */
202 wait_queue_head_t input_buffer_wq
;
204 /* data not yet read within current putfile_buffer_entry */
205 struct putfile_active_buffer active_buf
;
207 /* <0 = failed, 0 = in-progress, >0 = successful; */
208 /* note that this must be set with req_list_lock, and if you set <0, */
209 /* it is your responsibility to also free up all of the other objects */
210 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
211 /* before releasing the lock */
212 int completion_status
;
215 struct parahotplug_request
{
216 struct list_head list
;
218 unsigned long expiration
;
219 struct controlvm_message msg
;
222 static LIST_HEAD(parahotplug_request_list
);
223 static DEFINE_SPINLOCK(parahotplug_request_list_lock
); /* lock for above */
224 static void parahotplug_process_list(void);
226 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
227 * CONTROLVM_REPORTEVENT.
229 static struct visorchipset_busdev_notifiers busdev_notifiers
;
231 static void bus_create_response(struct visorchipset_bus_info
*p
, int response
);
232 static void bus_destroy_response(struct visorchipset_bus_info
*p
, int response
);
233 static void device_create_response(struct visorchipset_device_info
*p
,
235 static void device_destroy_response(struct visorchipset_device_info
*p
,
237 static void device_resume_response(struct visorchipset_device_info
*p
,
241 visorchipset_device_pause_response(struct visorchipset_device_info
*p
,
244 static struct visorchipset_busdev_responders busdev_responders
= {
245 .bus_create
= bus_create_response
,
246 .bus_destroy
= bus_destroy_response
,
247 .device_create
= device_create_response
,
248 .device_destroy
= device_destroy_response
,
249 .device_pause
= visorchipset_device_pause_response
,
250 .device_resume
= device_resume_response
,
253 /* info for /dev/visorchipset */
254 static dev_t major_dev
= -1; /**< indicates major num for device */
256 /* prototypes for attributes */
257 static ssize_t
toolaction_show(struct device
*dev
,
258 struct device_attribute
*attr
, char *buf
);
259 static ssize_t
toolaction_store(struct device
*dev
,
260 struct device_attribute
*attr
,
261 const char *buf
, size_t count
);
262 static DEVICE_ATTR_RW(toolaction
);
264 static ssize_t
boottotool_show(struct device
*dev
,
265 struct device_attribute
*attr
, char *buf
);
266 static ssize_t
boottotool_store(struct device
*dev
,
267 struct device_attribute
*attr
, const char *buf
,
269 static DEVICE_ATTR_RW(boottotool
);
271 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
273 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
274 const char *buf
, size_t count
);
275 static DEVICE_ATTR_RW(error
);
277 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
279 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
280 const char *buf
, size_t count
);
281 static DEVICE_ATTR_RW(textid
);
283 static ssize_t
remaining_steps_show(struct device
*dev
,
284 struct device_attribute
*attr
, char *buf
);
285 static ssize_t
remaining_steps_store(struct device
*dev
,
286 struct device_attribute
*attr
,
287 const char *buf
, size_t count
);
288 static DEVICE_ATTR_RW(remaining_steps
);
290 static ssize_t
chipsetready_store(struct device
*dev
,
291 struct device_attribute
*attr
,
292 const char *buf
, size_t count
);
293 static DEVICE_ATTR_WO(chipsetready
);
295 static ssize_t
devicedisabled_store(struct device
*dev
,
296 struct device_attribute
*attr
,
297 const char *buf
, size_t count
);
298 static DEVICE_ATTR_WO(devicedisabled
);
300 static ssize_t
deviceenabled_store(struct device
*dev
,
301 struct device_attribute
*attr
,
302 const char *buf
, size_t count
);
303 static DEVICE_ATTR_WO(deviceenabled
);
305 static struct attribute
*visorchipset_install_attrs
[] = {
306 &dev_attr_toolaction
.attr
,
307 &dev_attr_boottotool
.attr
,
308 &dev_attr_error
.attr
,
309 &dev_attr_textid
.attr
,
310 &dev_attr_remaining_steps
.attr
,
314 static struct attribute_group visorchipset_install_group
= {
316 .attrs
= visorchipset_install_attrs
319 static struct attribute
*visorchipset_guest_attrs
[] = {
320 &dev_attr_chipsetready
.attr
,
324 static struct attribute_group visorchipset_guest_group
= {
326 .attrs
= visorchipset_guest_attrs
329 static struct attribute
*visorchipset_parahotplug_attrs
[] = {
330 &dev_attr_devicedisabled
.attr
,
331 &dev_attr_deviceenabled
.attr
,
335 static struct attribute_group visorchipset_parahotplug_group
= {
336 .name
= "parahotplug",
337 .attrs
= visorchipset_parahotplug_attrs
340 static const struct attribute_group
*visorchipset_dev_groups
[] = {
341 &visorchipset_install_group
,
342 &visorchipset_guest_group
,
343 &visorchipset_parahotplug_group
,
347 /* /sys/devices/platform/visorchipset */
348 static struct platform_device visorchipset_platform_device
= {
349 .name
= "visorchipset",
351 .dev
.groups
= visorchipset_dev_groups
,
354 /* Function prototypes */
355 static void controlvm_respond(struct controlvm_message_header
*msg_hdr
,
357 static void controlvm_respond_chipset_init(
358 struct controlvm_message_header
*msg_hdr
, int response
,
359 enum ultra_chipset_feature features
);
360 static void controlvm_respond_physdev_changestate(
361 struct controlvm_message_header
*msg_hdr
, int response
,
362 struct spar_segment_state state
);
365 static void parser_done(struct parser_context
*ctx
);
367 static struct parser_context
*
368 parser_init_byte_stream(u64 addr
, u32 bytes
, bool local
, bool *retry
)
370 int allocbytes
= sizeof(struct parser_context
) + bytes
;
371 struct parser_context
*rc
= NULL
;
372 struct parser_context
*ctx
= NULL
;
378 * alloc an 0 extra byte to ensure payload is
382 if ((controlvm_payload_bytes_buffered
+ bytes
)
383 > MAX_CONTROLVM_PAYLOAD_BYTES
) {
389 ctx
= kzalloc(allocbytes
, GFP_KERNEL
|__GFP_NORETRY
);
397 ctx
->allocbytes
= allocbytes
;
398 ctx
->param_bytes
= bytes
;
400 ctx
->bytes_remaining
= 0;
401 ctx
->byte_stream
= false;
405 if (addr
> virt_to_phys(high_memory
- 1)) {
409 p
= __va((unsigned long) (addr
));
410 memcpy(ctx
->data
, p
, bytes
);
412 void __iomem
*mapping
;
414 if (!request_mem_region(addr
, bytes
, "visorchipset")) {
419 mapping
= ioremap_cache(addr
, bytes
);
421 release_mem_region(addr
, bytes
);
425 memcpy_fromio(ctx
->data
, mapping
, bytes
);
426 release_mem_region(addr
, bytes
);
429 ctx
->byte_stream
= true;
433 controlvm_payload_bytes_buffered
+= ctx
->param_bytes
;
444 parser_id_get(struct parser_context
*ctx
)
446 struct spar_controlvm_parameters_header
*phdr
= NULL
;
450 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
454 /** Describes the state from the perspective of which controlvm messages have
455 * been received for a bus or device.
458 enum PARSER_WHICH_STRING
{
459 PARSERSTRING_INITIATOR
,
461 PARSERSTRING_CONNECTION
,
462 PARSERSTRING_NAME
, /* TODO: only PARSERSTRING_NAME is used ? */
466 parser_param_start(struct parser_context
*ctx
,
467 enum PARSER_WHICH_STRING which_string
)
469 struct spar_controlvm_parameters_header
*phdr
= NULL
;
473 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
474 switch (which_string
) {
475 case PARSERSTRING_INITIATOR
:
476 ctx
->curr
= ctx
->data
+ phdr
->initiator_offset
;
477 ctx
->bytes_remaining
= phdr
->initiator_length
;
479 case PARSERSTRING_TARGET
:
480 ctx
->curr
= ctx
->data
+ phdr
->target_offset
;
481 ctx
->bytes_remaining
= phdr
->target_length
;
483 case PARSERSTRING_CONNECTION
:
484 ctx
->curr
= ctx
->data
+ phdr
->connection_offset
;
485 ctx
->bytes_remaining
= phdr
->connection_length
;
487 case PARSERSTRING_NAME
:
488 ctx
->curr
= ctx
->data
+ phdr
->name_offset
;
489 ctx
->bytes_remaining
= phdr
->name_length
;
499 static void parser_done(struct parser_context
*ctx
)
503 controlvm_payload_bytes_buffered
-= ctx
->param_bytes
;
508 parser_string_get(struct parser_context
*ctx
)
512 int value_length
= -1;
519 nscan
= ctx
->bytes_remaining
;
524 for (i
= 0, value_length
= -1; i
< nscan
; i
++)
525 if (pscan
[i
] == '\0') {
529 if (value_length
< 0) /* '\0' was not included in the length */
530 value_length
= nscan
;
531 value
= kmalloc(value_length
+ 1, GFP_KERNEL
|__GFP_NORETRY
);
534 if (value_length
> 0)
535 memcpy(value
, pscan
, value_length
);
536 ((u8
*) (value
))[value_length
] = '\0';
541 static ssize_t
toolaction_show(struct device
*dev
,
542 struct device_attribute
*attr
,
547 visorchannel_read(controlvm_channel
,
548 offsetof(struct spar_controlvm_channel_protocol
,
549 tool_action
), &tool_action
, sizeof(u8
));
550 return scnprintf(buf
, PAGE_SIZE
, "%u\n", tool_action
);
553 static ssize_t
toolaction_store(struct device
*dev
,
554 struct device_attribute
*attr
,
555 const char *buf
, size_t count
)
560 if (kstrtou8(buf
, 10, &tool_action
))
563 ret
= visorchannel_write(controlvm_channel
,
564 offsetof(struct spar_controlvm_channel_protocol
,
566 &tool_action
, sizeof(u8
));
573 static ssize_t
boottotool_show(struct device
*dev
,
574 struct device_attribute
*attr
,
577 struct efi_spar_indication efi_spar_indication
;
579 visorchannel_read(controlvm_channel
,
580 offsetof(struct spar_controlvm_channel_protocol
,
581 efi_spar_ind
), &efi_spar_indication
,
582 sizeof(struct efi_spar_indication
));
583 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
584 efi_spar_indication
.boot_to_tool
);
587 static ssize_t
boottotool_store(struct device
*dev
,
588 struct device_attribute
*attr
,
589 const char *buf
, size_t count
)
592 struct efi_spar_indication efi_spar_indication
;
594 if (kstrtoint(buf
, 10, &val
))
597 efi_spar_indication
.boot_to_tool
= val
;
598 ret
= visorchannel_write(controlvm_channel
,
599 offsetof(struct spar_controlvm_channel_protocol
,
600 efi_spar_ind
), &(efi_spar_indication
),
601 sizeof(struct efi_spar_indication
));
608 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
613 visorchannel_read(controlvm_channel
,
614 offsetof(struct spar_controlvm_channel_protocol
,
616 &error
, sizeof(u32
));
617 return scnprintf(buf
, PAGE_SIZE
, "%i\n", error
);
620 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
621 const char *buf
, size_t count
)
626 if (kstrtou32(buf
, 10, &error
))
629 ret
= visorchannel_write(controlvm_channel
,
630 offsetof(struct spar_controlvm_channel_protocol
,
632 &error
, sizeof(u32
));
638 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
643 visorchannel_read(controlvm_channel
,
644 offsetof(struct spar_controlvm_channel_protocol
,
645 installation_text_id
),
646 &text_id
, sizeof(u32
));
647 return scnprintf(buf
, PAGE_SIZE
, "%i\n", text_id
);
650 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
651 const char *buf
, size_t count
)
656 if (kstrtou32(buf
, 10, &text_id
))
659 ret
= visorchannel_write(controlvm_channel
,
660 offsetof(struct spar_controlvm_channel_protocol
,
661 installation_text_id
),
662 &text_id
, sizeof(u32
));
668 static ssize_t
remaining_steps_show(struct device
*dev
,
669 struct device_attribute
*attr
, char *buf
)
673 visorchannel_read(controlvm_channel
,
674 offsetof(struct spar_controlvm_channel_protocol
,
675 installation_remaining_steps
),
676 &remaining_steps
, sizeof(u16
));
677 return scnprintf(buf
, PAGE_SIZE
, "%hu\n", remaining_steps
);
680 static ssize_t
remaining_steps_store(struct device
*dev
,
681 struct device_attribute
*attr
,
682 const char *buf
, size_t count
)
687 if (kstrtou16(buf
, 10, &remaining_steps
))
690 ret
= visorchannel_write(controlvm_channel
,
691 offsetof(struct spar_controlvm_channel_protocol
,
692 installation_remaining_steps
),
693 &remaining_steps
, sizeof(u16
));
700 bus_info_clear(void *v
)
702 struct visorchipset_bus_info
*p
= (struct visorchipset_bus_info
*) v
;
705 kfree(p
->description
);
706 memset(p
, 0, sizeof(struct visorchipset_bus_info
));
710 dev_info_clear(void *v
)
712 struct visorchipset_device_info
*p
=
713 (struct visorchipset_device_info
*) v
;
715 memset(p
, 0, sizeof(struct visorchipset_device_info
));
718 struct visor_busdev
{
723 static int match_visorbus_dev_by_id(struct device
*dev
, void *data
)
725 struct visor_device
*vdev
= to_visor_device(dev
);
726 struct visor_busdev
*id
= (struct visor_busdev
*)data
;
727 u32 bus_no
= id
->bus_no
;
728 u32 dev_no
= id
->dev_no
;
730 if (((bus_no
== -1) || (vdev
->chipset_bus_no
== bus_no
)) &&
731 ((dev_no
== -1) || (vdev
->chipset_dev_no
== dev_no
)))
736 struct visor_device
*visorbus_get_device_by_id(u32 bus_no
, u32 dev_no
,
737 struct visor_device
*from
)
740 struct device
*dev_start
= NULL
;
741 struct visor_device
*vdev
= NULL
;
742 struct visor_busdev id
= {
748 dev_start
= &from
->device
;
749 dev
= bus_find_device(&visorbus_type
, dev_start
, (void *)&id
,
750 match_visorbus_dev_by_id
);
752 vdev
= to_visor_device(dev
);
755 EXPORT_SYMBOL(visorbus_get_device_by_id
);
757 static struct visorchipset_bus_info
*
758 bus_find(struct list_head
*list
, u32 bus_no
)
760 struct visorchipset_bus_info
*p
;
762 list_for_each_entry(p
, list
, entry
) {
763 if (p
->bus_no
== bus_no
)
770 static struct visorchipset_device_info
*
771 device_find(struct list_head
*list
, u32 bus_no
, u32 dev_no
)
773 struct visorchipset_device_info
*p
;
775 list_for_each_entry(p
, list
, entry
) {
776 if (p
->bus_no
== bus_no
&& p
->dev_no
== dev_no
)
783 static void busdevices_del(struct list_head
*list
, u32 bus_no
)
785 struct visorchipset_device_info
*p
, *tmp
;
787 list_for_each_entry_safe(p
, tmp
, list
, entry
) {
788 if (p
->bus_no
== bus_no
) {
796 check_chipset_events(void)
800 /* Check events to determine if response should be sent */
801 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
802 send_msg
&= chipset_events
[i
];
807 clear_chipset_events(void)
810 /* Clear chipset_events */
811 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
812 chipset_events
[i
] = 0;
816 visorchipset_register_busdev(
817 struct visorchipset_busdev_notifiers
*notifiers
,
818 struct visorchipset_busdev_responders
*responders
,
819 struct ultra_vbus_deviceinfo
*driver_info
)
821 down(¬ifier_lock
);
823 memset(&busdev_notifiers
, 0,
824 sizeof(busdev_notifiers
));
825 visorbusregistered
= 0; /* clear flag */
827 busdev_notifiers
= *notifiers
;
828 visorbusregistered
= 1; /* set flag */
831 *responders
= busdev_responders
;
833 bus_device_info_init(driver_info
, "chipset", "visorchipset",
838 EXPORT_SYMBOL_GPL(visorchipset_register_busdev
);
841 cleanup_controlvm_structures(void)
843 struct visorchipset_bus_info
*bi
, *tmp_bi
;
844 struct visorchipset_device_info
*di
, *tmp_di
;
846 list_for_each_entry_safe(bi
, tmp_bi
, &bus_info_list
, entry
) {
848 list_del(&bi
->entry
);
852 list_for_each_entry_safe(di
, tmp_di
, &dev_info_list
, entry
) {
854 list_del(&di
->entry
);
860 chipset_init(struct controlvm_message
*inmsg
)
862 static int chipset_inited
;
863 enum ultra_chipset_feature features
= 0;
864 int rc
= CONTROLVM_RESP_SUCCESS
;
866 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
867 if (chipset_inited
) {
868 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
872 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
874 /* Set features to indicate we support parahotplug (if Command
875 * also supports it). */
877 inmsg
->cmd
.init_chipset
.
878 features
& ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG
;
880 /* Set the "reply" bit so Command knows this is a
881 * features-aware driver. */
882 features
|= ULTRA_CHIPSET_FEATURE_REPLY
;
886 cleanup_controlvm_structures();
887 if (inmsg
->hdr
.flags
.response_expected
)
888 controlvm_respond_chipset_init(&inmsg
->hdr
, rc
, features
);
892 controlvm_init_response(struct controlvm_message
*msg
,
893 struct controlvm_message_header
*msg_hdr
, int response
)
895 memset(msg
, 0, sizeof(struct controlvm_message
));
896 memcpy(&msg
->hdr
, msg_hdr
, sizeof(struct controlvm_message_header
));
897 msg
->hdr
.payload_bytes
= 0;
898 msg
->hdr
.payload_vm_offset
= 0;
899 msg
->hdr
.payload_max_bytes
= 0;
901 msg
->hdr
.flags
.failed
= 1;
902 msg
->hdr
.completion_status
= (u32
) (-response
);
907 controlvm_respond(struct controlvm_message_header
*msg_hdr
, int response
)
909 struct controlvm_message outmsg
;
911 controlvm_init_response(&outmsg
, msg_hdr
, response
);
912 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
913 * back the deviceChangeState structure in the packet. */
914 if (msg_hdr
->id
== CONTROLVM_DEVICE_CHANGESTATE
&&
915 g_devicechangestate_packet
.device_change_state
.bus_no
==
917 g_devicechangestate_packet
.device_change_state
.dev_no
==
919 outmsg
.cmd
= g_devicechangestate_packet
;
920 if (outmsg
.hdr
.flags
.test_message
== 1)
923 if (!visorchannel_signalinsert(controlvm_channel
,
924 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
930 controlvm_respond_chipset_init(struct controlvm_message_header
*msg_hdr
,
932 enum ultra_chipset_feature features
)
934 struct controlvm_message outmsg
;
936 controlvm_init_response(&outmsg
, msg_hdr
, response
);
937 outmsg
.cmd
.init_chipset
.features
= features
;
938 if (!visorchannel_signalinsert(controlvm_channel
,
939 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
944 static void controlvm_respond_physdev_changestate(
945 struct controlvm_message_header
*msg_hdr
, int response
,
946 struct spar_segment_state state
)
948 struct controlvm_message outmsg
;
950 controlvm_init_response(&outmsg
, msg_hdr
, response
);
951 outmsg
.cmd
.device_change_state
.state
= state
;
952 outmsg
.cmd
.device_change_state
.flags
.phys_device
= 1;
953 if (!visorchannel_signalinsert(controlvm_channel
,
954 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
959 enum crash_obj_type
{
965 bus_responder(enum controlvm_id cmd_id
, struct visorchipset_bus_info
*p
,
968 bool need_clear
= false;
969 u32 bus_no
= p
->bus_no
;
975 if ((cmd_id
== CONTROLVM_BUS_CREATE
) &&
976 (response
!= (-CONTROLVM_RESP_ERROR_ALREADY_DONE
)))
977 /* undo the row we just created... */
978 busdevices_del(&dev_info_list
, bus_no
);
980 if (cmd_id
== CONTROLVM_BUS_CREATE
)
981 p
->state
.created
= 1;
982 if (cmd_id
== CONTROLVM_BUS_DESTROY
)
986 if (p
->pending_msg_hdr
.id
== CONTROLVM_INVALID
)
987 return; /* no controlvm response needed */
988 if (p
->pending_msg_hdr
.id
!= (u32
)cmd_id
)
990 controlvm_respond(&p
->pending_msg_hdr
, response
);
991 p
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
994 busdevices_del(&dev_info_list
, bus_no
);
999 device_changestate_responder(enum controlvm_id cmd_id
,
1000 struct visorchipset_device_info
*p
, int response
,
1001 struct spar_segment_state response_state
)
1003 struct controlvm_message outmsg
;
1004 u32 bus_no
= p
->bus_no
;
1005 u32 dev_no
= p
->dev_no
;
1009 if (p
->pending_msg_hdr
.id
== CONTROLVM_INVALID
)
1010 return; /* no controlvm response needed */
1011 if (p
->pending_msg_hdr
.id
!= cmd_id
)
1014 controlvm_init_response(&outmsg
, &p
->pending_msg_hdr
, response
);
1016 outmsg
.cmd
.device_change_state
.bus_no
= bus_no
;
1017 outmsg
.cmd
.device_change_state
.dev_no
= dev_no
;
1018 outmsg
.cmd
.device_change_state
.state
= response_state
;
1020 if (!visorchannel_signalinsert(controlvm_channel
,
1021 CONTROLVM_QUEUE_REQUEST
, &outmsg
))
1024 p
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
1028 device_responder(enum controlvm_id cmd_id
, struct visorchipset_device_info
*p
,
1031 bool need_clear
= false;
1035 if (response
>= 0) {
1036 if (cmd_id
== CONTROLVM_DEVICE_CREATE
)
1037 p
->state
.created
= 1;
1038 if (cmd_id
== CONTROLVM_DEVICE_DESTROY
)
1042 if (p
->pending_msg_hdr
.id
== CONTROLVM_INVALID
)
1043 return; /* no controlvm response needed */
1045 if (p
->pending_msg_hdr
.id
!= (u32
)cmd_id
)
1048 controlvm_respond(&p
->pending_msg_hdr
, response
);
1049 p
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
1055 bus_epilog(struct visorchipset_bus_info
*bus_info
,
1056 u32 cmd
, struct controlvm_message_header
*msg_hdr
,
1057 int response
, bool need_response
)
1059 bool notified
= false;
1064 if (need_response
) {
1065 memcpy(&bus_info
->pending_msg_hdr
, msg_hdr
,
1066 sizeof(struct controlvm_message_header
));
1068 bus_info
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
1071 down(¬ifier_lock
);
1072 if (response
== CONTROLVM_RESP_SUCCESS
) {
1074 case CONTROLVM_BUS_CREATE
:
1075 if (busdev_notifiers
.bus_create
) {
1076 (*busdev_notifiers
.bus_create
) (bus_info
);
1080 case CONTROLVM_BUS_DESTROY
:
1081 if (busdev_notifiers
.bus_destroy
) {
1082 (*busdev_notifiers
.bus_destroy
) (bus_info
);
1089 /* The callback function just called above is responsible
1090 * for calling the appropriate visorchipset_busdev_responders
1091 * function, which will call bus_responder()
1095 bus_responder(cmd
, bus_info
, response
);
1100 device_epilog(struct visorchipset_device_info
*dev_info
,
1101 struct spar_segment_state state
, u32 cmd
,
1102 struct controlvm_message_header
*msg_hdr
, int response
,
1103 bool need_response
, bool for_visorbus
)
1105 struct visorchipset_busdev_notifiers
*notifiers
;
1106 bool notified
= false;
1107 u32 bus_no
= dev_info
->bus_no
;
1108 u32 dev_no
= dev_info
->dev_no
;
1111 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1118 notifiers
= &busdev_notifiers
;
1120 if (need_response
) {
1121 memcpy(&dev_info
->pending_msg_hdr
, msg_hdr
,
1122 sizeof(struct controlvm_message_header
));
1124 dev_info
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
1127 down(¬ifier_lock
);
1128 if (response
>= 0) {
1130 case CONTROLVM_DEVICE_CREATE
:
1131 if (notifiers
->device_create
) {
1132 (*notifiers
->device_create
) (dev_info
);
1136 case CONTROLVM_DEVICE_CHANGESTATE
:
1137 /* ServerReady / ServerRunning / SegmentStateRunning */
1138 if (state
.alive
== segment_state_running
.alive
&&
1140 segment_state_running
.operating
) {
1141 if (notifiers
->device_resume
) {
1142 (*notifiers
->device_resume
) (dev_info
);
1146 /* ServerNotReady / ServerLost / SegmentStateStandby */
1147 else if (state
.alive
== segment_state_standby
.alive
&&
1149 segment_state_standby
.operating
) {
1150 /* technically this is standby case
1151 * where server is lost
1153 if (notifiers
->device_pause
) {
1154 (*notifiers
->device_pause
) (dev_info
);
1157 } else if (state
.alive
== segment_state_paused
.alive
&&
1159 segment_state_paused
.operating
) {
1160 /* this is lite pause where channel is
1161 * still valid just 'pause' of it
1163 if (bus_no
== g_diagpool_bus_no
&&
1164 dev_no
== g_diagpool_dev_no
) {
1165 /* this will trigger the
1166 * diag_shutdown.sh script in
1167 * the visorchipset hotplug */
1169 (&visorchipset_platform_device
.dev
.
1170 kobj
, KOBJ_ONLINE
, envp
);
1174 case CONTROLVM_DEVICE_DESTROY
:
1175 if (notifiers
->device_destroy
) {
1176 (*notifiers
->device_destroy
) (dev_info
);
1183 /* The callback function just called above is responsible
1184 * for calling the appropriate visorchipset_busdev_responders
1185 * function, which will call device_responder()
1189 device_responder(cmd
, dev_info
, response
);
1194 bus_create(struct controlvm_message
*inmsg
)
1196 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1197 u32 bus_no
= cmd
->create_bus
.bus_no
;
1198 int rc
= CONTROLVM_RESP_SUCCESS
;
1199 struct visorchipset_bus_info
*bus_info
;
1201 bus_info
= bus_find(&bus_info_list
, bus_no
);
1202 if (bus_info
&& (bus_info
->state
.created
== 1)) {
1203 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1204 POSTCODE_SEVERITY_ERR
);
1205 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1208 bus_info
= kzalloc(sizeof(*bus_info
), GFP_KERNEL
);
1210 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1211 POSTCODE_SEVERITY_ERR
);
1212 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1216 INIT_LIST_HEAD(&bus_info
->entry
);
1217 bus_info
->bus_no
= bus_no
;
1219 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1221 if (inmsg
->hdr
.flags
.test_message
== 1)
1222 bus_info
->chan_info
.addr_type
= ADDRTYPE_LOCALTEST
;
1224 bus_info
->chan_info
.addr_type
= ADDRTYPE_LOCALPHYSICAL
;
1226 bus_info
->flags
.server
= inmsg
->hdr
.flags
.server
;
1227 bus_info
->chan_info
.channel_addr
= cmd
->create_bus
.channel_addr
;
1228 bus_info
->chan_info
.n_channel_bytes
= cmd
->create_bus
.channel_bytes
;
1229 bus_info
->chan_info
.channel_type_uuid
=
1230 cmd
->create_bus
.bus_data_type_uuid
;
1231 bus_info
->chan_info
.channel_inst_uuid
= cmd
->create_bus
.bus_inst_uuid
;
1233 list_add(&bus_info
->entry
, &bus_info_list
);
1235 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1238 bus_epilog(bus_info
, CONTROLVM_BUS_CREATE
, &inmsg
->hdr
,
1239 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1243 bus_destroy(struct controlvm_message
*inmsg
)
1245 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1246 u32 bus_no
= cmd
->destroy_bus
.bus_no
;
1247 struct visorchipset_bus_info
*bus_info
;
1248 int rc
= CONTROLVM_RESP_SUCCESS
;
1250 bus_info
= bus_find(&bus_info_list
, bus_no
);
1252 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1253 else if (bus_info
->state
.created
== 0)
1254 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1256 bus_epilog(bus_info
, CONTROLVM_BUS_DESTROY
, &inmsg
->hdr
,
1257 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1261 bus_configure(struct controlvm_message
*inmsg
,
1262 struct parser_context
*parser_ctx
)
1264 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1266 struct visorchipset_bus_info
*bus_info
;
1267 int rc
= CONTROLVM_RESP_SUCCESS
;
1270 bus_no
= cmd
->configure_bus
.bus_no
;
1271 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC
, bus_no
,
1272 POSTCODE_SEVERITY_INFO
);
1274 bus_info
= bus_find(&bus_info_list
, bus_no
);
1276 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1277 POSTCODE_SEVERITY_ERR
);
1278 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1279 } else if (bus_info
->state
.created
== 0) {
1280 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1281 POSTCODE_SEVERITY_ERR
);
1282 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1283 } else if (bus_info
->pending_msg_hdr
.id
!= CONTROLVM_INVALID
) {
1284 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1285 POSTCODE_SEVERITY_ERR
);
1286 rc
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1288 bus_info
->partition_handle
= cmd
->configure_bus
.guest_handle
;
1289 bus_info
->partition_uuid
= parser_id_get(parser_ctx
);
1290 parser_param_start(parser_ctx
, PARSERSTRING_NAME
);
1291 bus_info
->name
= parser_string_get(parser_ctx
);
1293 visorchannel_uuid_id(&bus_info
->partition_uuid
, s
);
1294 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC
, bus_no
,
1295 POSTCODE_SEVERITY_INFO
);
1297 bus_epilog(bus_info
, CONTROLVM_BUS_CONFIGURE
, &inmsg
->hdr
,
1298 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1302 my_device_create(struct controlvm_message
*inmsg
)
1304 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1305 u32 bus_no
= cmd
->create_device
.bus_no
;
1306 u32 dev_no
= cmd
->create_device
.dev_no
;
1307 struct visorchipset_device_info
*dev_info
;
1308 struct visorchipset_bus_info
*bus_info
;
1309 int rc
= CONTROLVM_RESP_SUCCESS
;
1311 dev_info
= device_find(&dev_info_list
, bus_no
, dev_no
);
1312 if (dev_info
&& (dev_info
->state
.created
== 1)) {
1313 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1314 POSTCODE_SEVERITY_ERR
);
1315 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1318 bus_info
= bus_find(&bus_info_list
, bus_no
);
1320 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1321 POSTCODE_SEVERITY_ERR
);
1322 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1325 if (bus_info
->state
.created
== 0) {
1326 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1327 POSTCODE_SEVERITY_ERR
);
1328 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1331 dev_info
= kzalloc(sizeof(*dev_info
), GFP_KERNEL
);
1333 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1334 POSTCODE_SEVERITY_ERR
);
1335 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1339 INIT_LIST_HEAD(&dev_info
->entry
);
1340 dev_info
->bus_no
= bus_no
;
1341 dev_info
->dev_no
= dev_no
;
1342 dev_info
->dev_inst_uuid
= cmd
->create_device
.dev_inst_uuid
;
1343 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC
, dev_no
, bus_no
,
1344 POSTCODE_SEVERITY_INFO
);
1346 if (inmsg
->hdr
.flags
.test_message
== 1)
1347 dev_info
->chan_info
.addr_type
= ADDRTYPE_LOCALTEST
;
1349 dev_info
->chan_info
.addr_type
= ADDRTYPE_LOCALPHYSICAL
;
1350 dev_info
->chan_info
.channel_addr
= cmd
->create_device
.channel_addr
;
1351 dev_info
->chan_info
.n_channel_bytes
= cmd
->create_device
.channel_bytes
;
1352 dev_info
->chan_info
.channel_type_uuid
=
1353 cmd
->create_device
.data_type_uuid
;
1354 list_add(&dev_info
->entry
, &dev_info_list
);
1355 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC
, dev_no
, bus_no
,
1356 POSTCODE_SEVERITY_INFO
);
1358 /* get the bus and devNo for DiagPool channel */
1360 is_diagpool_channel(dev_info
->chan_info
.channel_type_uuid
)) {
1361 g_diagpool_bus_no
= bus_no
;
1362 g_diagpool_dev_no
= dev_no
;
1364 device_epilog(dev_info
, segment_state_running
,
1365 CONTROLVM_DEVICE_CREATE
, &inmsg
->hdr
, rc
,
1366 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1370 my_device_changestate(struct controlvm_message
*inmsg
)
1372 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1373 u32 bus_no
= cmd
->device_change_state
.bus_no
;
1374 u32 dev_no
= cmd
->device_change_state
.dev_no
;
1375 struct spar_segment_state state
= cmd
->device_change_state
.state
;
1376 struct visorchipset_device_info
*dev_info
;
1377 int rc
= CONTROLVM_RESP_SUCCESS
;
1379 dev_info
= device_find(&dev_info_list
, bus_no
, dev_no
);
1381 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1382 POSTCODE_SEVERITY_ERR
);
1383 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1384 } else if (dev_info
->state
.created
== 0) {
1385 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1386 POSTCODE_SEVERITY_ERR
);
1387 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1389 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1390 device_epilog(dev_info
, state
,
1391 CONTROLVM_DEVICE_CHANGESTATE
, &inmsg
->hdr
, rc
,
1392 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1396 my_device_destroy(struct controlvm_message
*inmsg
)
1398 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1399 u32 bus_no
= cmd
->destroy_device
.bus_no
;
1400 u32 dev_no
= cmd
->destroy_device
.dev_no
;
1401 struct visorchipset_device_info
*dev_info
;
1402 int rc
= CONTROLVM_RESP_SUCCESS
;
1404 dev_info
= device_find(&dev_info_list
, bus_no
, dev_no
);
1406 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1407 else if (dev_info
->state
.created
== 0)
1408 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1410 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1411 device_epilog(dev_info
, segment_state_running
,
1412 CONTROLVM_DEVICE_DESTROY
, &inmsg
->hdr
, rc
,
1413 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1416 /* When provided with the physical address of the controlvm channel
1417 * (phys_addr), the offset to the payload area we need to manage
1418 * (offset), and the size of this payload area (bytes), fills in the
1419 * controlvm_payload_info struct. Returns true for success or false
1423 initialize_controlvm_payload_info(u64 phys_addr
, u64 offset
, u32 bytes
,
1424 struct visor_controlvm_payload_info
*info
)
1426 u8 __iomem
*payload
= NULL
;
1427 int rc
= CONTROLVM_RESP_SUCCESS
;
1430 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1433 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1434 if ((offset
== 0) || (bytes
== 0)) {
1435 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1438 payload
= ioremap_cache(phys_addr
+ offset
, bytes
);
1440 rc
= -CONTROLVM_RESP_ERROR_IOREMAP_FAILED
;
1444 info
->offset
= offset
;
1445 info
->bytes
= bytes
;
1446 info
->ptr
= payload
;
1459 destroy_controlvm_payload_info(struct visor_controlvm_payload_info
*info
)
1465 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1469 initialize_controlvm_payload(void)
1471 u64 phys_addr
= visorchannel_get_physaddr(controlvm_channel
);
1472 u64 payload_offset
= 0;
1473 u32 payload_bytes
= 0;
1475 if (visorchannel_read(controlvm_channel
,
1476 offsetof(struct spar_controlvm_channel_protocol
,
1477 request_payload_offset
),
1478 &payload_offset
, sizeof(payload_offset
)) < 0) {
1479 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1480 POSTCODE_SEVERITY_ERR
);
1483 if (visorchannel_read(controlvm_channel
,
1484 offsetof(struct spar_controlvm_channel_protocol
,
1485 request_payload_bytes
),
1486 &payload_bytes
, sizeof(payload_bytes
)) < 0) {
1487 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1488 POSTCODE_SEVERITY_ERR
);
1491 initialize_controlvm_payload_info(phys_addr
,
1492 payload_offset
, payload_bytes
,
1493 &controlvm_payload_info
);
1496 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1497 * Returns CONTROLVM_RESP_xxx code.
1500 visorchipset_chipset_ready(void)
1502 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_ONLINE
);
1503 return CONTROLVM_RESP_SUCCESS
;
1507 visorchipset_chipset_selftest(void)
1509 char env_selftest
[20];
1510 char *envp
[] = { env_selftest
, NULL
};
1512 sprintf(env_selftest
, "SPARSP_SELFTEST=%d", 1);
1513 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1515 return CONTROLVM_RESP_SUCCESS
;
1518 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1519 * Returns CONTROLVM_RESP_xxx code.
1522 visorchipset_chipset_notready(void)
1524 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_OFFLINE
);
1525 return CONTROLVM_RESP_SUCCESS
;
1529 chipset_ready(struct controlvm_message_header
*msg_hdr
)
1531 int rc
= visorchipset_chipset_ready();
1533 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1535 if (msg_hdr
->flags
.response_expected
&& !visorchipset_holdchipsetready
)
1536 controlvm_respond(msg_hdr
, rc
);
1537 if (msg_hdr
->flags
.response_expected
&& visorchipset_holdchipsetready
) {
1538 /* Send CHIPSET_READY response when all modules have been loaded
1539 * and disks mounted for the partition
1541 g_chipset_msg_hdr
= *msg_hdr
;
1546 chipset_selftest(struct controlvm_message_header
*msg_hdr
)
1548 int rc
= visorchipset_chipset_selftest();
1550 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1552 if (msg_hdr
->flags
.response_expected
)
1553 controlvm_respond(msg_hdr
, rc
);
1557 chipset_notready(struct controlvm_message_header
*msg_hdr
)
1559 int rc
= visorchipset_chipset_notready();
1561 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1563 if (msg_hdr
->flags
.response_expected
)
1564 controlvm_respond(msg_hdr
, rc
);
1567 /* This is your "one-stop" shop for grabbing the next message from the
1568 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1571 read_controlvm_event(struct controlvm_message
*msg
)
1573 if (visorchannel_signalremove(controlvm_channel
,
1574 CONTROLVM_QUEUE_EVENT
, msg
)) {
1576 if (msg
->hdr
.flags
.test_message
== 1)
1584 * The general parahotplug flow works as follows. The visorchipset
1585 * driver receives a DEVICE_CHANGESTATE message from Command
1586 * specifying a physical device to enable or disable. The CONTROLVM
1587 * message handler calls parahotplug_process_message, which then adds
1588 * the message to a global list and kicks off a udev event which
1589 * causes a user level script to enable or disable the specified
1590 * device. The udev script then writes to
1591 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1592 * to get called, at which point the appropriate CONTROLVM message is
1593 * retrieved from the list and responded to.
1596 #define PARAHOTPLUG_TIMEOUT_MS 2000
1599 * Generate unique int to match an outstanding CONTROLVM message with a
1600 * udev script /proc response
1603 parahotplug_next_id(void)
1605 static atomic_t id
= ATOMIC_INIT(0);
1607 return atomic_inc_return(&id
);
1611 * Returns the time (in jiffies) when a CONTROLVM message on the list
1612 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1614 static unsigned long
1615 parahotplug_next_expiration(void)
1617 return jiffies
+ msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS
);
1621 * Create a parahotplug_request, which is basically a wrapper for a
1622 * CONTROLVM_MESSAGE that we can stick on a list
1624 static struct parahotplug_request
*
1625 parahotplug_request_create(struct controlvm_message
*msg
)
1627 struct parahotplug_request
*req
;
1629 req
= kmalloc(sizeof(*req
), GFP_KERNEL
| __GFP_NORETRY
);
1633 req
->id
= parahotplug_next_id();
1634 req
->expiration
= parahotplug_next_expiration();
1641 * Free a parahotplug_request.
1644 parahotplug_request_destroy(struct parahotplug_request
*req
)
1650 * Cause uevent to run the user level script to do the disable/enable
1651 * specified in (the CONTROLVM message in) the specified
1652 * parahotplug_request
1655 parahotplug_request_kickoff(struct parahotplug_request
*req
)
1657 struct controlvm_message_packet
*cmd
= &req
->msg
.cmd
;
1658 char env_cmd
[40], env_id
[40], env_state
[40], env_bus
[40], env_dev
[40],
1661 env_cmd
, env_id
, env_state
, env_bus
, env_dev
, env_func
, NULL
1664 sprintf(env_cmd
, "SPAR_PARAHOTPLUG=1");
1665 sprintf(env_id
, "SPAR_PARAHOTPLUG_ID=%d", req
->id
);
1666 sprintf(env_state
, "SPAR_PARAHOTPLUG_STATE=%d",
1667 cmd
->device_change_state
.state
.active
);
1668 sprintf(env_bus
, "SPAR_PARAHOTPLUG_BUS=%d",
1669 cmd
->device_change_state
.bus_no
);
1670 sprintf(env_dev
, "SPAR_PARAHOTPLUG_DEVICE=%d",
1671 cmd
->device_change_state
.dev_no
>> 3);
1672 sprintf(env_func
, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1673 cmd
->device_change_state
.dev_no
& 0x7);
1675 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1680 * Remove any request from the list that's been on there too long and
1681 * respond with an error.
1684 parahotplug_process_list(void)
1686 struct list_head
*pos
;
1687 struct list_head
*tmp
;
1689 spin_lock(¶hotplug_request_list_lock
);
1691 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1692 struct parahotplug_request
*req
=
1693 list_entry(pos
, struct parahotplug_request
, list
);
1695 if (!time_after_eq(jiffies
, req
->expiration
))
1699 if (req
->msg
.hdr
.flags
.response_expected
)
1700 controlvm_respond_physdev_changestate(
1702 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT
,
1703 req
->msg
.cmd
.device_change_state
.state
);
1704 parahotplug_request_destroy(req
);
1707 spin_unlock(¶hotplug_request_list_lock
);
1711 * Called from the /proc handler, which means the user script has
1712 * finished the enable/disable. Find the matching identifier, and
1713 * respond to the CONTROLVM message with success.
1716 parahotplug_request_complete(int id
, u16 active
)
1718 struct list_head
*pos
;
1719 struct list_head
*tmp
;
1721 spin_lock(¶hotplug_request_list_lock
);
1723 /* Look for a request matching "id". */
1724 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1725 struct parahotplug_request
*req
=
1726 list_entry(pos
, struct parahotplug_request
, list
);
1727 if (req
->id
== id
) {
1728 /* Found a match. Remove it from the list and
1732 spin_unlock(¶hotplug_request_list_lock
);
1733 req
->msg
.cmd
.device_change_state
.state
.active
= active
;
1734 if (req
->msg
.hdr
.flags
.response_expected
)
1735 controlvm_respond_physdev_changestate(
1736 &req
->msg
.hdr
, CONTROLVM_RESP_SUCCESS
,
1737 req
->msg
.cmd
.device_change_state
.state
);
1738 parahotplug_request_destroy(req
);
1743 spin_unlock(¶hotplug_request_list_lock
);
1748 * Enables or disables a PCI device by kicking off a udev script
1751 parahotplug_process_message(struct controlvm_message
*inmsg
)
1753 struct parahotplug_request
*req
;
1755 req
= parahotplug_request_create(inmsg
);
1760 if (inmsg
->cmd
.device_change_state
.state
.active
) {
1761 /* For enable messages, just respond with success
1762 * right away. This is a bit of a hack, but there are
1763 * issues with the early enable messages we get (with
1764 * either the udev script not detecting that the device
1765 * is up, or not getting called at all). Fortunately
1766 * the messages that get lost don't matter anyway, as
1767 * devices are automatically enabled at
1770 parahotplug_request_kickoff(req
);
1771 controlvm_respond_physdev_changestate(&inmsg
->hdr
,
1772 CONTROLVM_RESP_SUCCESS
,
1773 inmsg
->cmd
.device_change_state
.state
);
1774 parahotplug_request_destroy(req
);
1776 /* For disable messages, add the request to the
1777 * request list before kicking off the udev script. It
1778 * won't get responded to until the script has
1779 * indicated it's done.
1781 spin_lock(¶hotplug_request_list_lock
);
1782 list_add_tail(&req
->list
, ¶hotplug_request_list
);
1783 spin_unlock(¶hotplug_request_list_lock
);
1785 parahotplug_request_kickoff(req
);
1789 /* Process a controlvm message.
1791 * false - this function will return false only in the case where the
1792 * controlvm message was NOT processed, but processing must be
1793 * retried before reading the next controlvm message; a
1794 * scenario where this can occur is when we need to throttle
1795 * the allocation of memory in which to copy out controlvm
1797 * true - processing of the controlvm message completed,
1798 * either successfully or with an error.
1801 handle_command(struct controlvm_message inmsg
, u64 channel_addr
)
1803 struct controlvm_message_packet
*cmd
= &inmsg
.cmd
;
1806 struct parser_context
*parser_ctx
= NULL
;
1808 struct controlvm_message ackmsg
;
1810 /* create parsing context if necessary */
1811 local_addr
= (inmsg
.hdr
.flags
.test_message
== 1);
1812 if (channel_addr
== 0)
1814 parm_addr
= channel_addr
+ inmsg
.hdr
.payload_vm_offset
;
1815 parm_bytes
= inmsg
.hdr
.payload_bytes
;
1817 /* Parameter and channel addresses within test messages actually lie
1818 * within our OS-controlled memory. We need to know that, because it
1819 * makes a difference in how we compute the virtual address.
1821 if (parm_addr
&& parm_bytes
) {
1825 parser_init_byte_stream(parm_addr
, parm_bytes
,
1826 local_addr
, &retry
);
1827 if (!parser_ctx
&& retry
)
1832 controlvm_init_response(&ackmsg
, &inmsg
.hdr
,
1833 CONTROLVM_RESP_SUCCESS
);
1834 if (controlvm_channel
)
1835 visorchannel_signalinsert(controlvm_channel
,
1836 CONTROLVM_QUEUE_ACK
,
1839 switch (inmsg
.hdr
.id
) {
1840 case CONTROLVM_CHIPSET_INIT
:
1841 chipset_init(&inmsg
);
1843 case CONTROLVM_BUS_CREATE
:
1846 case CONTROLVM_BUS_DESTROY
:
1847 bus_destroy(&inmsg
);
1849 case CONTROLVM_BUS_CONFIGURE
:
1850 bus_configure(&inmsg
, parser_ctx
);
1852 case CONTROLVM_DEVICE_CREATE
:
1853 my_device_create(&inmsg
);
1855 case CONTROLVM_DEVICE_CHANGESTATE
:
1856 if (cmd
->device_change_state
.flags
.phys_device
) {
1857 parahotplug_process_message(&inmsg
);
1859 /* save the hdr and cmd structures for later use */
1860 /* when sending back the response to Command */
1861 my_device_changestate(&inmsg
);
1862 g_devicechangestate_packet
= inmsg
.cmd
;
1866 case CONTROLVM_DEVICE_DESTROY
:
1867 my_device_destroy(&inmsg
);
1869 case CONTROLVM_DEVICE_CONFIGURE
:
1870 /* no op for now, just send a respond that we passed */
1871 if (inmsg
.hdr
.flags
.response_expected
)
1872 controlvm_respond(&inmsg
.hdr
, CONTROLVM_RESP_SUCCESS
);
1874 case CONTROLVM_CHIPSET_READY
:
1875 chipset_ready(&inmsg
.hdr
);
1877 case CONTROLVM_CHIPSET_SELFTEST
:
1878 chipset_selftest(&inmsg
.hdr
);
1880 case CONTROLVM_CHIPSET_STOP
:
1881 chipset_notready(&inmsg
.hdr
);
1884 if (inmsg
.hdr
.flags
.response_expected
)
1885 controlvm_respond(&inmsg
.hdr
,
1886 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN
);
1891 parser_done(parser_ctx
);
1897 static inline unsigned int
1898 issue_vmcall_io_controlvm_addr(u64
*control_addr
, u32
*control_bytes
)
1900 struct vmcall_io_controlvm_addr_params params
;
1901 int result
= VMCALL_SUCCESS
;
1904 physaddr
= virt_to_phys(¶ms
);
1905 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR
, physaddr
, result
);
1906 if (VMCALL_SUCCESSFUL(result
)) {
1907 *control_addr
= params
.address
;
1908 *control_bytes
= params
.channel_bytes
;
1913 static u64
controlvm_get_channel_address(void)
1918 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr
, &size
)))
1925 controlvm_periodic_work(struct work_struct
*work
)
1927 struct controlvm_message inmsg
;
1928 bool got_command
= false;
1929 bool handle_command_failed
= false;
1930 static u64 poll_count
;
1932 /* make sure visorbus server is registered for controlvm callbacks */
1933 if (visorchipset_visorbusregwait
&& !visorbusregistered
)
1937 if (poll_count
>= 250)
1942 /* Check events to determine if response to CHIPSET_READY
1945 if (visorchipset_holdchipsetready
&&
1946 (g_chipset_msg_hdr
.id
!= CONTROLVM_INVALID
)) {
1947 if (check_chipset_events() == 1) {
1948 controlvm_respond(&g_chipset_msg_hdr
, 0);
1949 clear_chipset_events();
1950 memset(&g_chipset_msg_hdr
, 0,
1951 sizeof(struct controlvm_message_header
));
1955 while (visorchannel_signalremove(controlvm_channel
,
1956 CONTROLVM_QUEUE_RESPONSE
,
1960 if (controlvm_pending_msg_valid
) {
1961 /* we throttled processing of a prior
1962 * msg, so try to process it again
1963 * rather than reading a new one
1965 inmsg
= controlvm_pending_msg
;
1966 controlvm_pending_msg_valid
= false;
1969 got_command
= read_controlvm_event(&inmsg
);
1973 handle_command_failed
= false;
1974 while (got_command
&& (!handle_command_failed
)) {
1975 most_recent_message_jiffies
= jiffies
;
1976 if (handle_command(inmsg
,
1977 visorchannel_get_physaddr
1978 (controlvm_channel
)))
1979 got_command
= read_controlvm_event(&inmsg
);
1981 /* this is a scenario where throttling
1982 * is required, but probably NOT an
1983 * error...; we stash the current
1984 * controlvm msg so we will attempt to
1985 * reprocess it on our next loop
1987 handle_command_failed
= true;
1988 controlvm_pending_msg
= inmsg
;
1989 controlvm_pending_msg_valid
= true;
1993 /* parahotplug_worker */
1994 parahotplug_process_list();
1998 if (time_after(jiffies
,
1999 most_recent_message_jiffies
+ (HZ
* MIN_IDLE_SECONDS
))) {
2000 /* it's been longer than MIN_IDLE_SECONDS since we
2001 * processed our last controlvm message; slow down the
2004 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
)
2005 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
2007 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_FAST
)
2008 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
2011 queue_delayed_work(periodic_controlvm_workqueue
,
2012 &periodic_controlvm_work
, poll_jiffies
);
2016 setup_crash_devices_work_queue(struct work_struct
*work
)
2018 struct controlvm_message local_crash_bus_msg
;
2019 struct controlvm_message local_crash_dev_msg
;
2020 struct controlvm_message msg
;
2021 u32 local_crash_msg_offset
;
2022 u16 local_crash_msg_count
;
2024 /* make sure visorbus is registered for controlvm callbacks */
2025 if (visorchipset_visorbusregwait
&& !visorbusregistered
)
2028 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
2030 /* send init chipset msg */
2031 msg
.hdr
.id
= CONTROLVM_CHIPSET_INIT
;
2032 msg
.cmd
.init_chipset
.bus_count
= 23;
2033 msg
.cmd
.init_chipset
.switch_count
= 0;
2037 /* get saved message count */
2038 if (visorchannel_read(controlvm_channel
,
2039 offsetof(struct spar_controlvm_channel_protocol
,
2040 saved_crash_message_count
),
2041 &local_crash_msg_count
, sizeof(u16
)) < 0) {
2042 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
2043 POSTCODE_SEVERITY_ERR
);
2047 if (local_crash_msg_count
!= CONTROLVM_CRASHMSG_MAX
) {
2048 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC
,
2049 local_crash_msg_count
,
2050 POSTCODE_SEVERITY_ERR
);
2054 /* get saved crash message offset */
2055 if (visorchannel_read(controlvm_channel
,
2056 offsetof(struct spar_controlvm_channel_protocol
,
2057 saved_crash_message_offset
),
2058 &local_crash_msg_offset
, sizeof(u32
)) < 0) {
2059 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
2060 POSTCODE_SEVERITY_ERR
);
2064 /* read create device message for storage bus offset */
2065 if (visorchannel_read(controlvm_channel
,
2066 local_crash_msg_offset
,
2067 &local_crash_bus_msg
,
2068 sizeof(struct controlvm_message
)) < 0) {
2069 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC
,
2070 POSTCODE_SEVERITY_ERR
);
2074 /* read create device message for storage device */
2075 if (visorchannel_read(controlvm_channel
,
2076 local_crash_msg_offset
+
2077 sizeof(struct controlvm_message
),
2078 &local_crash_dev_msg
,
2079 sizeof(struct controlvm_message
)) < 0) {
2080 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC
,
2081 POSTCODE_SEVERITY_ERR
);
2085 /* reuse IOVM create bus message */
2086 if (local_crash_bus_msg
.cmd
.create_bus
.channel_addr
) {
2087 bus_create(&local_crash_bus_msg
);
2089 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC
,
2090 POSTCODE_SEVERITY_ERR
);
2094 /* reuse create device message for storage device */
2095 if (local_crash_dev_msg
.cmd
.create_device
.channel_addr
) {
2096 my_device_create(&local_crash_dev_msg
);
2098 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC
,
2099 POSTCODE_SEVERITY_ERR
);
2102 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2107 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
2109 queue_delayed_work(periodic_controlvm_workqueue
,
2110 &periodic_controlvm_work
, poll_jiffies
);
2114 bus_create_response(struct visorchipset_bus_info
*bus_info
, int response
)
2116 bus_responder(CONTROLVM_BUS_CREATE
, bus_info
, response
);
2120 bus_destroy_response(struct visorchipset_bus_info
*bus_info
, int response
)
2122 bus_responder(CONTROLVM_BUS_DESTROY
, bus_info
, response
);
2126 device_create_response(struct visorchipset_device_info
*dev_info
, int response
)
2128 device_responder(CONTROLVM_DEVICE_CREATE
, dev_info
, response
);
2132 device_destroy_response(struct visorchipset_device_info
*dev_info
, int response
)
2134 device_responder(CONTROLVM_DEVICE_DESTROY
, dev_info
, response
);
2138 visorchipset_device_pause_response(struct visorchipset_device_info
*dev_info
,
2141 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
2143 segment_state_standby
);
2147 device_resume_response(struct visorchipset_device_info
*dev_info
, int response
)
2149 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
2151 segment_state_running
);
2155 visorchipset_get_bus_info(u32 bus_no
, struct visorchipset_bus_info
*bus_info
)
2157 void *p
= bus_find(&bus_info_list
, bus_no
);
2161 memcpy(bus_info
, p
, sizeof(struct visorchipset_bus_info
));
2164 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info
);
2167 visorchipset_set_bus_context(struct visorchipset_bus_info
*p
, void *context
)
2171 p
->bus_driver_context
= context
;
2174 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context
);
2177 visorchipset_get_device_info(u32 bus_no
, u32 dev_no
,
2178 struct visorchipset_device_info
*dev_info
)
2180 void *p
= device_find(&dev_info_list
, bus_no
, dev_no
);
2184 memcpy(dev_info
, p
, sizeof(struct visorchipset_device_info
));
2187 EXPORT_SYMBOL_GPL(visorchipset_get_device_info
);
2190 visorchipset_set_device_context(struct visorchipset_device_info
*p
,
2195 p
->bus_driver_context
= context
;
2198 EXPORT_SYMBOL_GPL(visorchipset_set_device_context
);
2200 static ssize_t
chipsetready_store(struct device
*dev
,
2201 struct device_attribute
*attr
,
2202 const char *buf
, size_t count
)
2206 if (sscanf(buf
, "%63s", msgtype
) != 1)
2209 if (!strcmp(msgtype
, "CALLHOMEDISK_MOUNTED")) {
2210 chipset_events
[0] = 1;
2212 } else if (!strcmp(msgtype
, "MODULES_LOADED")) {
2213 chipset_events
[1] = 1;
2219 /* The parahotplug/devicedisabled interface gets called by our support script
2220 * when an SR-IOV device has been shut down. The ID is passed to the script
2221 * and then passed back when the device has been removed.
2223 static ssize_t
devicedisabled_store(struct device
*dev
,
2224 struct device_attribute
*attr
,
2225 const char *buf
, size_t count
)
2229 if (kstrtouint(buf
, 10, &id
))
2232 parahotplug_request_complete(id
, 0);
2236 /* The parahotplug/deviceenabled interface gets called by our support script
2237 * when an SR-IOV device has been recovered. The ID is passed to the script
2238 * and then passed back when the device has been brought back up.
2240 static ssize_t
deviceenabled_store(struct device
*dev
,
2241 struct device_attribute
*attr
,
2242 const char *buf
, size_t count
)
2246 if (kstrtouint(buf
, 10, &id
))
2249 parahotplug_request_complete(id
, 1);
2254 visorchipset_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2256 unsigned long physaddr
= 0;
2257 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
2260 /* sv_enable_dfp(); */
2261 if (offset
& (PAGE_SIZE
- 1))
2262 return -ENXIO
; /* need aligned offsets */
2265 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET
:
2266 vma
->vm_flags
|= VM_IO
;
2267 if (!*file_controlvm_channel
)
2270 visorchannel_read(*file_controlvm_channel
,
2271 offsetof(struct spar_controlvm_channel_protocol
,
2272 gp_control_channel
),
2273 &addr
, sizeof(addr
));
2277 physaddr
= (unsigned long)addr
;
2278 if (remap_pfn_range(vma
, vma
->vm_start
,
2279 physaddr
>> PAGE_SHIFT
,
2280 vma
->vm_end
- vma
->vm_start
,
2281 /*pgprot_noncached */
2282 (vma
->vm_page_prot
))) {
2292 static inline s64
issue_vmcall_query_guest_virtual_time_offset(void)
2294 u64 result
= VMCALL_SUCCESS
;
2297 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
, physaddr
,
2302 static inline int issue_vmcall_update_physical_time(u64 adjustment
)
2304 int result
= VMCALL_SUCCESS
;
2306 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME
, adjustment
, result
);
2310 static long visorchipset_ioctl(struct file
*file
, unsigned int cmd
,
2317 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
:
2318 /* get the physical rtc offset */
2319 vrtc_offset
= issue_vmcall_query_guest_virtual_time_offset();
2320 if (copy_to_user((void __user
*)arg
, &vrtc_offset
,
2321 sizeof(vrtc_offset
))) {
2325 case VMCALL_UPDATE_PHYSICAL_TIME
:
2326 if (copy_from_user(&adjustment
, (void __user
*)arg
,
2327 sizeof(adjustment
))) {
2330 return issue_vmcall_update_physical_time(adjustment
);
2336 static const struct file_operations visorchipset_fops
= {
2337 .owner
= THIS_MODULE
,
2338 .open
= visorchipset_open
,
2341 .unlocked_ioctl
= visorchipset_ioctl
,
2342 .release
= visorchipset_release
,
2343 .mmap
= visorchipset_mmap
,
2347 visorchipset_file_init(dev_t major_dev
, struct visorchannel
**controlvm_channel
)
2351 file_controlvm_channel
= controlvm_channel
;
2352 cdev_init(&file_cdev
, &visorchipset_fops
);
2353 file_cdev
.owner
= THIS_MODULE
;
2354 if (MAJOR(major_dev
) == 0) {
2355 rc
= alloc_chrdev_region(&major_dev
, 0, 1, "visorchipset");
2356 /* dynamic major device number registration required */
2360 /* static major device number registration required */
2361 rc
= register_chrdev_region(major_dev
, 1, "visorchipset");
2365 rc
= cdev_add(&file_cdev
, MKDEV(MAJOR(major_dev
), 0), 1);
2367 unregister_chrdev_region(major_dev
, 1);
2374 visorchipset_init(struct acpi_device
*acpi_device
)
2378 int tmp_sz
= sizeof(struct spar_controlvm_channel_protocol
);
2379 uuid_le uuid
= SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID
;
2381 addr
= controlvm_get_channel_address();
2385 memset(&busdev_notifiers
, 0, sizeof(busdev_notifiers
));
2386 memset(&controlvm_payload_info
, 0, sizeof(controlvm_payload_info
));
2388 controlvm_channel
= visorchannel_create_with_lock(addr
, tmp_sz
,
2390 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2391 visorchannel_get_header(controlvm_channel
))) {
2392 initialize_controlvm_payload();
2394 visorchannel_destroy(controlvm_channel
);
2395 controlvm_channel
= NULL
;
2399 major_dev
= MKDEV(visorchipset_major
, 0);
2400 rc
= visorchipset_file_init(major_dev
, &controlvm_channel
);
2402 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2406 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2408 /* if booting in a crash kernel */
2409 if (is_kdump_kernel())
2410 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2411 setup_crash_devices_work_queue
);
2413 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2414 controlvm_periodic_work
);
2415 periodic_controlvm_workqueue
=
2416 create_singlethread_workqueue("visorchipset_controlvm");
2418 if (!periodic_controlvm_workqueue
) {
2419 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC
,
2424 most_recent_message_jiffies
= jiffies
;
2425 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
2426 rc
= queue_delayed_work(periodic_controlvm_workqueue
,
2427 &periodic_controlvm_work
, poll_jiffies
);
2429 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC
,
2434 visorchipset_platform_device
.dev
.devt
= major_dev
;
2435 if (platform_device_register(&visorchipset_platform_device
) < 0) {
2436 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2440 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC
, POSTCODE_SEVERITY_INFO
);
2442 rc
= visorbus_init();
2445 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC
, rc
,
2446 POSTCODE_SEVERITY_ERR
);
2452 visorchipset_file_cleanup(dev_t major_dev
)
2455 cdev_del(&file_cdev
);
2456 file_cdev
.ops
= NULL
;
2457 unregister_chrdev_region(major_dev
, 1);
2461 visorchipset_exit(struct acpi_device
*acpi_device
)
2463 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2467 cancel_delayed_work(&periodic_controlvm_work
);
2468 flush_workqueue(periodic_controlvm_workqueue
);
2469 destroy_workqueue(periodic_controlvm_workqueue
);
2470 periodic_controlvm_workqueue
= NULL
;
2471 destroy_controlvm_payload_info(&controlvm_payload_info
);
2473 cleanup_controlvm_structures();
2475 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2477 visorchannel_destroy(controlvm_channel
);
2479 visorchipset_file_cleanup(visorchipset_platform_device
.dev
.devt
);
2480 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2485 static const struct acpi_device_id unisys_device_ids
[] = {
2490 static struct acpi_driver unisys_acpi_driver
= {
2491 .name
= "unisys_acpi",
2492 .class = "unisys_acpi_class",
2493 .owner
= THIS_MODULE
,
2494 .ids
= unisys_device_ids
,
2496 .add
= visorchipset_init
,
2497 .remove
= visorchipset_exit
,
2500 static __init
uint32_t visorutil_spar_detect(void)
2502 unsigned int eax
, ebx
, ecx
, edx
;
2504 if (cpu_has_hypervisor
) {
2506 cpuid(UNISYS_SPAR_LEAF_ID
, &eax
, &ebx
, &ecx
, &edx
);
2507 return (ebx
== UNISYS_SPAR_ID_EBX
) &&
2508 (ecx
== UNISYS_SPAR_ID_ECX
) &&
2509 (edx
== UNISYS_SPAR_ID_EDX
);
2515 static int init_unisys(void)
2518 if (!visorutil_spar_detect())
2521 result
= acpi_bus_register_driver(&unisys_acpi_driver
);
2525 pr_info("Unisys Visorchipset Driver Loaded.\n");
2529 static void exit_unisys(void)
2531 acpi_bus_unregister_driver(&unisys_acpi_driver
);
2534 module_param_named(major
, visorchipset_major
, int, S_IRUGO
);
2535 MODULE_PARM_DESC(visorchipset_major
,
2536 "major device number to use for the device node");
2537 module_param_named(visorbusregwait
, visorchipset_visorbusregwait
, int, S_IRUGO
);
2538 MODULE_PARM_DESC(visorchipset_visorbusreqwait
,
2539 "1 to have the module wait for the visor bus to register");
2540 module_param_named(holdchipsetready
, visorchipset_holdchipsetready
,
2542 MODULE_PARM_DESC(visorchipset_holdchipsetready
,
2543 "1 to hold response to CHIPSET_READY");
2545 module_init(init_unisys
);
2546 module_exit(exit_unisys
);
2548 MODULE_AUTHOR("Unisys");
2549 MODULE_LICENSE("GPL");
2550 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2552 MODULE_VERSION(VERSION
);