3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
18 #include <linux/acpi.h>
19 #include <linux/cdev.h>
20 #include <linux/ctype.h>
23 #include <linux/nls.h>
24 #include <linux/netdevice.h>
25 #include <linux/platform_device.h>
26 #include <linux/uuid.h>
27 #include <linux/crash_dump.h>
29 #include "channel_guid.h"
30 #include "controlvmchannel.h"
31 #include "controlvmcompletionstatus.h"
32 #include "guestlinuxdebug.h"
33 #include "periodic_work.h"
36 #include "visorbus_private.h"
37 #include "vmcallinterface.h"
39 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE 50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
47 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
49 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
52 #define UNISYS_SPAR_LEAF_ID 0x40000000
54 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55 #define UNISYS_SPAR_ID_EBX 0x73696e55
56 #define UNISYS_SPAR_ID_ECX 0x70537379
57 #define UNISYS_SPAR_ID_EDX 0x34367261
59 #define BUS_ROOT_DEVICE UINT_MAX
64 static int visorchipset_major
;
65 static int visorchipset_visorbusregwait
= 1; /* default is on */
66 static int visorchipset_holdchipsetready
;
67 static unsigned long controlvm_payload_bytes_buffered
;
70 visorchipset_open(struct inode
*inode
, struct file
*file
)
72 unsigned minor_number
= iminor(inode
);
76 file
->private_data
= NULL
;
81 visorchipset_release(struct inode
*inode
, struct file
*file
)
86 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
87 * we switch to slow polling mode. As soon as we get a controlvm
88 * message, we switch back to fast polling mode.
90 #define MIN_IDLE_SECONDS 10
91 static unsigned long poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
92 static unsigned long most_recent_message_jiffies
; /* when we got our last
93 * controlvm message */
94 static int visorbusregistered
;
96 #define MAX_CHIPSET_EVENTS 2
97 static u8 chipset_events
[MAX_CHIPSET_EVENTS
] = { 0, 0 };
99 struct parser_context
{
100 unsigned long allocbytes
;
101 unsigned long param_bytes
;
103 unsigned long bytes_remaining
;
108 static struct delayed_work periodic_controlvm_work
;
109 static struct workqueue_struct
*periodic_controlvm_workqueue
;
110 static DEFINE_SEMAPHORE(notifier_lock
);
112 static struct cdev file_cdev
;
113 static struct visorchannel
**file_controlvm_channel
;
114 static struct controlvm_message_header g_chipset_msg_hdr
;
115 static const uuid_le spar_diag_pool_channel_protocol_uuid
=
116 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID
;
117 /* 0xffffff is an invalid Bus/Device number */
118 static u32 g_diagpool_bus_no
= 0xffffff;
119 static u32 g_diagpool_dev_no
= 0xffffff;
120 static struct controlvm_message_packet g_devicechangestate_packet
;
122 #define is_diagpool_channel(channel_type_guid) \
123 (uuid_le_cmp(channel_type_guid,\
124 spar_diag_pool_channel_protocol_uuid) == 0)
126 static LIST_HEAD(bus_info_list
);
127 static LIST_HEAD(dev_info_list
);
129 static struct visorchannel
*controlvm_channel
;
131 /* Manages the request payload in the controlvm channel */
132 struct visor_controlvm_payload_info
{
133 u8 __iomem
*ptr
; /* pointer to base address of payload pool */
134 u64 offset
; /* offset from beginning of controlvm
135 * channel to beginning of payload * pool */
136 u32 bytes
; /* number of bytes in payload pool */
139 static struct visor_controlvm_payload_info controlvm_payload_info
;
141 /* The following globals are used to handle the scenario where we are unable to
142 * offload the payload from a controlvm message due to memory requirements. In
143 * this scenario, we simply stash the controlvm message, then attempt to
144 * process it again the next time controlvm_periodic_work() runs.
146 static struct controlvm_message controlvm_pending_msg
;
147 static bool controlvm_pending_msg_valid
;
149 /* This identifies a data buffer that has been received via a controlvm messages
150 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
152 struct putfile_buffer_entry
{
153 struct list_head next
; /* putfile_buffer_entry list */
154 struct parser_context
*parser_ctx
; /* points to input data buffer */
157 /* List of struct putfile_request *, via next_putfile_request member.
158 * Each entry in this list identifies an outstanding TRANSMIT_FILE
161 static LIST_HEAD(putfile_request_list
);
163 /* This describes a buffer and its current state of transfer (e.g., how many
164 * bytes have already been supplied as putfile data, and how many bytes are
165 * remaining) for a putfile_request.
167 struct putfile_active_buffer
{
168 /* a payload from a controlvm message, containing a file data buffer */
169 struct parser_context
*parser_ctx
;
170 /* points within data area of parser_ctx to next byte of data */
172 /* # bytes left from <pnext> to the end of this data buffer */
173 size_t bytes_remaining
;
176 #define PUTFILE_REQUEST_SIG 0x0906101302281211
177 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
178 * conversation. Structs of this type are dynamically linked into
179 * <Putfile_request_list>.
181 struct putfile_request
{
182 u64 sig
; /* PUTFILE_REQUEST_SIG */
184 /* header from original TransmitFile request */
185 struct controlvm_message_header controlvm_header
;
186 u64 file_request_number
; /* from original TransmitFile request */
188 /* link to next struct putfile_request */
189 struct list_head next_putfile_request
;
191 /* most-recent sequence number supplied via a controlvm message */
192 u64 data_sequence_number
;
194 /* head of putfile_buffer_entry list, which describes the data to be
195 * supplied as putfile data;
196 * - this list is added to when controlvm messages come in that supply
198 * - this list is removed from via the hotplug program that is actually
199 * consuming these buffers to write as file data */
200 struct list_head input_buffer_list
;
201 spinlock_t req_list_lock
; /* lock for input_buffer_list */
203 /* waiters for input_buffer_list to go non-empty */
204 wait_queue_head_t input_buffer_wq
;
206 /* data not yet read within current putfile_buffer_entry */
207 struct putfile_active_buffer active_buf
;
209 /* <0 = failed, 0 = in-progress, >0 = successful; */
210 /* note that this must be set with req_list_lock, and if you set <0, */
211 /* it is your responsibility to also free up all of the other objects */
212 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
213 /* before releasing the lock */
214 int completion_status
;
217 struct parahotplug_request
{
218 struct list_head list
;
220 unsigned long expiration
;
221 struct controlvm_message msg
;
224 static LIST_HEAD(parahotplug_request_list
);
225 static DEFINE_SPINLOCK(parahotplug_request_list_lock
); /* lock for above */
226 static void parahotplug_process_list(void);
228 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
229 * CONTROLVM_REPORTEVENT.
231 static struct visorchipset_busdev_notifiers busdev_notifiers
;
233 static void bus_create_response(struct visorchipset_bus_info
*p
, int response
);
234 static void bus_destroy_response(struct visorchipset_bus_info
*p
, int response
);
235 static void device_create_response(struct visorchipset_device_info
*p
,
237 static void device_destroy_response(struct visorchipset_device_info
*p
,
239 static void device_resume_response(struct visorchipset_device_info
*p
,
243 visorchipset_device_pause_response(struct visorchipset_device_info
*p
,
246 static struct visorchipset_busdev_responders busdev_responders
= {
247 .bus_create
= bus_create_response
,
248 .bus_destroy
= bus_destroy_response
,
249 .device_create
= device_create_response
,
250 .device_destroy
= device_destroy_response
,
251 .device_pause
= visorchipset_device_pause_response
,
252 .device_resume
= device_resume_response
,
255 /* info for /dev/visorchipset */
256 static dev_t major_dev
= -1; /**< indicates major num for device */
258 /* prototypes for attributes */
259 static ssize_t
toolaction_show(struct device
*dev
,
260 struct device_attribute
*attr
, char *buf
);
261 static ssize_t
toolaction_store(struct device
*dev
,
262 struct device_attribute
*attr
,
263 const char *buf
, size_t count
);
264 static DEVICE_ATTR_RW(toolaction
);
266 static ssize_t
boottotool_show(struct device
*dev
,
267 struct device_attribute
*attr
, char *buf
);
268 static ssize_t
boottotool_store(struct device
*dev
,
269 struct device_attribute
*attr
, const char *buf
,
271 static DEVICE_ATTR_RW(boottotool
);
273 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
275 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
276 const char *buf
, size_t count
);
277 static DEVICE_ATTR_RW(error
);
279 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
281 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
282 const char *buf
, size_t count
);
283 static DEVICE_ATTR_RW(textid
);
285 static ssize_t
remaining_steps_show(struct device
*dev
,
286 struct device_attribute
*attr
, char *buf
);
287 static ssize_t
remaining_steps_store(struct device
*dev
,
288 struct device_attribute
*attr
,
289 const char *buf
, size_t count
);
290 static DEVICE_ATTR_RW(remaining_steps
);
292 static ssize_t
chipsetready_store(struct device
*dev
,
293 struct device_attribute
*attr
,
294 const char *buf
, size_t count
);
295 static DEVICE_ATTR_WO(chipsetready
);
297 static ssize_t
devicedisabled_store(struct device
*dev
,
298 struct device_attribute
*attr
,
299 const char *buf
, size_t count
);
300 static DEVICE_ATTR_WO(devicedisabled
);
302 static ssize_t
deviceenabled_store(struct device
*dev
,
303 struct device_attribute
*attr
,
304 const char *buf
, size_t count
);
305 static DEVICE_ATTR_WO(deviceenabled
);
307 static struct attribute
*visorchipset_install_attrs
[] = {
308 &dev_attr_toolaction
.attr
,
309 &dev_attr_boottotool
.attr
,
310 &dev_attr_error
.attr
,
311 &dev_attr_textid
.attr
,
312 &dev_attr_remaining_steps
.attr
,
316 static struct attribute_group visorchipset_install_group
= {
318 .attrs
= visorchipset_install_attrs
321 static struct attribute
*visorchipset_guest_attrs
[] = {
322 &dev_attr_chipsetready
.attr
,
326 static struct attribute_group visorchipset_guest_group
= {
328 .attrs
= visorchipset_guest_attrs
331 static struct attribute
*visorchipset_parahotplug_attrs
[] = {
332 &dev_attr_devicedisabled
.attr
,
333 &dev_attr_deviceenabled
.attr
,
337 static struct attribute_group visorchipset_parahotplug_group
= {
338 .name
= "parahotplug",
339 .attrs
= visorchipset_parahotplug_attrs
342 static const struct attribute_group
*visorchipset_dev_groups
[] = {
343 &visorchipset_install_group
,
344 &visorchipset_guest_group
,
345 &visorchipset_parahotplug_group
,
349 /* /sys/devices/platform/visorchipset */
350 static struct platform_device visorchipset_platform_device
= {
351 .name
= "visorchipset",
353 .dev
.groups
= visorchipset_dev_groups
,
356 /* Function prototypes */
357 static void controlvm_respond(struct controlvm_message_header
*msg_hdr
,
359 static void controlvm_respond_chipset_init(
360 struct controlvm_message_header
*msg_hdr
, int response
,
361 enum ultra_chipset_feature features
);
362 static void controlvm_respond_physdev_changestate(
363 struct controlvm_message_header
*msg_hdr
, int response
,
364 struct spar_segment_state state
);
367 static void parser_done(struct parser_context
*ctx
);
369 static struct parser_context
*
370 parser_init_byte_stream(u64 addr
, u32 bytes
, bool local
, bool *retry
)
372 int allocbytes
= sizeof(struct parser_context
) + bytes
;
373 struct parser_context
*rc
= NULL
;
374 struct parser_context
*ctx
= NULL
;
380 * alloc an 0 extra byte to ensure payload is
384 if ((controlvm_payload_bytes_buffered
+ bytes
)
385 > MAX_CONTROLVM_PAYLOAD_BYTES
) {
391 ctx
= kzalloc(allocbytes
, GFP_KERNEL
|__GFP_NORETRY
);
399 ctx
->allocbytes
= allocbytes
;
400 ctx
->param_bytes
= bytes
;
402 ctx
->bytes_remaining
= 0;
403 ctx
->byte_stream
= false;
407 if (addr
> virt_to_phys(high_memory
- 1)) {
411 p
= __va((unsigned long) (addr
));
412 memcpy(ctx
->data
, p
, bytes
);
414 void __iomem
*mapping
;
416 if (!request_mem_region(addr
, bytes
, "visorchipset")) {
421 mapping
= ioremap_cache(addr
, bytes
);
423 release_mem_region(addr
, bytes
);
427 memcpy_fromio(ctx
->data
, mapping
, bytes
);
428 release_mem_region(addr
, bytes
);
431 ctx
->byte_stream
= true;
435 controlvm_payload_bytes_buffered
+= ctx
->param_bytes
;
446 parser_id_get(struct parser_context
*ctx
)
448 struct spar_controlvm_parameters_header
*phdr
= NULL
;
452 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
456 /** Describes the state from the perspective of which controlvm messages have
457 * been received for a bus or device.
460 enum PARSER_WHICH_STRING
{
461 PARSERSTRING_INITIATOR
,
463 PARSERSTRING_CONNECTION
,
464 PARSERSTRING_NAME
, /* TODO: only PARSERSTRING_NAME is used ? */
468 parser_param_start(struct parser_context
*ctx
,
469 enum PARSER_WHICH_STRING which_string
)
471 struct spar_controlvm_parameters_header
*phdr
= NULL
;
475 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
476 switch (which_string
) {
477 case PARSERSTRING_INITIATOR
:
478 ctx
->curr
= ctx
->data
+ phdr
->initiator_offset
;
479 ctx
->bytes_remaining
= phdr
->initiator_length
;
481 case PARSERSTRING_TARGET
:
482 ctx
->curr
= ctx
->data
+ phdr
->target_offset
;
483 ctx
->bytes_remaining
= phdr
->target_length
;
485 case PARSERSTRING_CONNECTION
:
486 ctx
->curr
= ctx
->data
+ phdr
->connection_offset
;
487 ctx
->bytes_remaining
= phdr
->connection_length
;
489 case PARSERSTRING_NAME
:
490 ctx
->curr
= ctx
->data
+ phdr
->name_offset
;
491 ctx
->bytes_remaining
= phdr
->name_length
;
501 static void parser_done(struct parser_context
*ctx
)
505 controlvm_payload_bytes_buffered
-= ctx
->param_bytes
;
510 parser_string_get(struct parser_context
*ctx
)
514 int value_length
= -1;
521 nscan
= ctx
->bytes_remaining
;
526 for (i
= 0, value_length
= -1; i
< nscan
; i
++)
527 if (pscan
[i
] == '\0') {
531 if (value_length
< 0) /* '\0' was not included in the length */
532 value_length
= nscan
;
533 value
= kmalloc(value_length
+ 1, GFP_KERNEL
|__GFP_NORETRY
);
536 if (value_length
> 0)
537 memcpy(value
, pscan
, value_length
);
538 ((u8
*) (value
))[value_length
] = '\0';
543 static ssize_t
toolaction_show(struct device
*dev
,
544 struct device_attribute
*attr
,
549 visorchannel_read(controlvm_channel
,
550 offsetof(struct spar_controlvm_channel_protocol
,
551 tool_action
), &tool_action
, sizeof(u8
));
552 return scnprintf(buf
, PAGE_SIZE
, "%u\n", tool_action
);
555 static ssize_t
toolaction_store(struct device
*dev
,
556 struct device_attribute
*attr
,
557 const char *buf
, size_t count
)
562 if (kstrtou8(buf
, 10, &tool_action
))
565 ret
= visorchannel_write(controlvm_channel
,
566 offsetof(struct spar_controlvm_channel_protocol
,
568 &tool_action
, sizeof(u8
));
575 static ssize_t
boottotool_show(struct device
*dev
,
576 struct device_attribute
*attr
,
579 struct efi_spar_indication efi_spar_indication
;
581 visorchannel_read(controlvm_channel
,
582 offsetof(struct spar_controlvm_channel_protocol
,
583 efi_spar_ind
), &efi_spar_indication
,
584 sizeof(struct efi_spar_indication
));
585 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
586 efi_spar_indication
.boot_to_tool
);
589 static ssize_t
boottotool_store(struct device
*dev
,
590 struct device_attribute
*attr
,
591 const char *buf
, size_t count
)
594 struct efi_spar_indication efi_spar_indication
;
596 if (kstrtoint(buf
, 10, &val
))
599 efi_spar_indication
.boot_to_tool
= val
;
600 ret
= visorchannel_write(controlvm_channel
,
601 offsetof(struct spar_controlvm_channel_protocol
,
602 efi_spar_ind
), &(efi_spar_indication
),
603 sizeof(struct efi_spar_indication
));
610 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
615 visorchannel_read(controlvm_channel
,
616 offsetof(struct spar_controlvm_channel_protocol
,
618 &error
, sizeof(u32
));
619 return scnprintf(buf
, PAGE_SIZE
, "%i\n", error
);
622 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
623 const char *buf
, size_t count
)
628 if (kstrtou32(buf
, 10, &error
))
631 ret
= visorchannel_write(controlvm_channel
,
632 offsetof(struct spar_controlvm_channel_protocol
,
634 &error
, sizeof(u32
));
640 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
645 visorchannel_read(controlvm_channel
,
646 offsetof(struct spar_controlvm_channel_protocol
,
647 installation_text_id
),
648 &text_id
, sizeof(u32
));
649 return scnprintf(buf
, PAGE_SIZE
, "%i\n", text_id
);
652 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
653 const char *buf
, size_t count
)
658 if (kstrtou32(buf
, 10, &text_id
))
661 ret
= visorchannel_write(controlvm_channel
,
662 offsetof(struct spar_controlvm_channel_protocol
,
663 installation_text_id
),
664 &text_id
, sizeof(u32
));
670 static ssize_t
remaining_steps_show(struct device
*dev
,
671 struct device_attribute
*attr
, char *buf
)
675 visorchannel_read(controlvm_channel
,
676 offsetof(struct spar_controlvm_channel_protocol
,
677 installation_remaining_steps
),
678 &remaining_steps
, sizeof(u16
));
679 return scnprintf(buf
, PAGE_SIZE
, "%hu\n", remaining_steps
);
682 static ssize_t
remaining_steps_store(struct device
*dev
,
683 struct device_attribute
*attr
,
684 const char *buf
, size_t count
)
689 if (kstrtou16(buf
, 10, &remaining_steps
))
692 ret
= visorchannel_write(controlvm_channel
,
693 offsetof(struct spar_controlvm_channel_protocol
,
694 installation_remaining_steps
),
695 &remaining_steps
, sizeof(u16
));
702 bus_info_clear(void *v
)
704 struct visorchipset_bus_info
*p
= (struct visorchipset_bus_info
*) v
;
707 kfree(p
->description
);
708 memset(p
, 0, sizeof(struct visorchipset_bus_info
));
712 dev_info_clear(void *v
)
714 struct visorchipset_device_info
*p
=
715 (struct visorchipset_device_info
*) v
;
717 memset(p
, 0, sizeof(struct visorchipset_device_info
));
720 struct visor_busdev
{
725 static int match_visorbus_dev_by_id(struct device
*dev
, void *data
)
727 struct visor_device
*vdev
= to_visor_device(dev
);
728 struct visor_busdev
*id
= (struct visor_busdev
*)data
;
729 u32 bus_no
= id
->bus_no
;
730 u32 dev_no
= id
->dev_no
;
732 if ((vdev
->chipset_bus_no
== bus_no
) &&
733 (vdev
->chipset_dev_no
== dev_no
))
738 struct visor_device
*visorbus_get_device_by_id(u32 bus_no
, u32 dev_no
,
739 struct visor_device
*from
)
742 struct device
*dev_start
= NULL
;
743 struct visor_device
*vdev
= NULL
;
744 struct visor_busdev id
= {
750 dev_start
= &from
->device
;
751 dev
= bus_find_device(&visorbus_type
, dev_start
, (void *)&id
,
752 match_visorbus_dev_by_id
);
754 vdev
= to_visor_device(dev
);
757 EXPORT_SYMBOL(visorbus_get_device_by_id
);
759 static struct visorchipset_bus_info
*
760 bus_find(struct list_head
*list
, u32 bus_no
)
762 struct visorchipset_bus_info
*p
;
764 list_for_each_entry(p
, list
, entry
) {
765 if (p
->bus_no
== bus_no
)
772 static struct visorchipset_device_info
*
773 device_find(struct list_head
*list
, u32 bus_no
, u32 dev_no
)
775 struct visorchipset_device_info
*p
;
777 list_for_each_entry(p
, list
, entry
) {
778 if (p
->bus_no
== bus_no
&& p
->dev_no
== dev_no
)
785 static void busdevices_del(struct list_head
*list
, u32 bus_no
)
787 struct visorchipset_device_info
*p
, *tmp
;
789 list_for_each_entry_safe(p
, tmp
, list
, entry
) {
790 if (p
->bus_no
== bus_no
) {
798 check_chipset_events(void)
802 /* Check events to determine if response should be sent */
803 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
804 send_msg
&= chipset_events
[i
];
809 clear_chipset_events(void)
812 /* Clear chipset_events */
813 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
814 chipset_events
[i
] = 0;
818 visorchipset_register_busdev(
819 struct visorchipset_busdev_notifiers
*notifiers
,
820 struct visorchipset_busdev_responders
*responders
,
821 struct ultra_vbus_deviceinfo
*driver_info
)
823 down(¬ifier_lock
);
825 memset(&busdev_notifiers
, 0,
826 sizeof(busdev_notifiers
));
827 visorbusregistered
= 0; /* clear flag */
829 busdev_notifiers
= *notifiers
;
830 visorbusregistered
= 1; /* set flag */
833 *responders
= busdev_responders
;
835 bus_device_info_init(driver_info
, "chipset", "visorchipset",
840 EXPORT_SYMBOL_GPL(visorchipset_register_busdev
);
843 cleanup_controlvm_structures(void)
845 struct visorchipset_bus_info
*bi
, *tmp_bi
;
846 struct visorchipset_device_info
*di
, *tmp_di
;
848 list_for_each_entry_safe(bi
, tmp_bi
, &bus_info_list
, entry
) {
850 list_del(&bi
->entry
);
854 list_for_each_entry_safe(di
, tmp_di
, &dev_info_list
, entry
) {
856 list_del(&di
->entry
);
862 chipset_init(struct controlvm_message
*inmsg
)
864 static int chipset_inited
;
865 enum ultra_chipset_feature features
= 0;
866 int rc
= CONTROLVM_RESP_SUCCESS
;
868 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
869 if (chipset_inited
) {
870 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
874 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
876 /* Set features to indicate we support parahotplug (if Command
877 * also supports it). */
879 inmsg
->cmd
.init_chipset
.
880 features
& ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG
;
882 /* Set the "reply" bit so Command knows this is a
883 * features-aware driver. */
884 features
|= ULTRA_CHIPSET_FEATURE_REPLY
;
888 cleanup_controlvm_structures();
889 if (inmsg
->hdr
.flags
.response_expected
)
890 controlvm_respond_chipset_init(&inmsg
->hdr
, rc
, features
);
894 controlvm_init_response(struct controlvm_message
*msg
,
895 struct controlvm_message_header
*msg_hdr
, int response
)
897 memset(msg
, 0, sizeof(struct controlvm_message
));
898 memcpy(&msg
->hdr
, msg_hdr
, sizeof(struct controlvm_message_header
));
899 msg
->hdr
.payload_bytes
= 0;
900 msg
->hdr
.payload_vm_offset
= 0;
901 msg
->hdr
.payload_max_bytes
= 0;
903 msg
->hdr
.flags
.failed
= 1;
904 msg
->hdr
.completion_status
= (u32
) (-response
);
909 controlvm_respond(struct controlvm_message_header
*msg_hdr
, int response
)
911 struct controlvm_message outmsg
;
913 controlvm_init_response(&outmsg
, msg_hdr
, response
);
914 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
915 * back the deviceChangeState structure in the packet. */
916 if (msg_hdr
->id
== CONTROLVM_DEVICE_CHANGESTATE
&&
917 g_devicechangestate_packet
.device_change_state
.bus_no
==
919 g_devicechangestate_packet
.device_change_state
.dev_no
==
921 outmsg
.cmd
= g_devicechangestate_packet
;
922 if (outmsg
.hdr
.flags
.test_message
== 1)
925 if (!visorchannel_signalinsert(controlvm_channel
,
926 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
932 controlvm_respond_chipset_init(struct controlvm_message_header
*msg_hdr
,
934 enum ultra_chipset_feature features
)
936 struct controlvm_message outmsg
;
938 controlvm_init_response(&outmsg
, msg_hdr
, response
);
939 outmsg
.cmd
.init_chipset
.features
= features
;
940 if (!visorchannel_signalinsert(controlvm_channel
,
941 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
946 static void controlvm_respond_physdev_changestate(
947 struct controlvm_message_header
*msg_hdr
, int response
,
948 struct spar_segment_state state
)
950 struct controlvm_message outmsg
;
952 controlvm_init_response(&outmsg
, msg_hdr
, response
);
953 outmsg
.cmd
.device_change_state
.state
= state
;
954 outmsg
.cmd
.device_change_state
.flags
.phys_device
= 1;
955 if (!visorchannel_signalinsert(controlvm_channel
,
956 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
961 enum crash_obj_type
{
967 bus_responder(enum controlvm_id cmd_id
,
968 struct controlvm_message_header
*pending_msg_hdr
,
971 if (pending_msg_hdr
== NULL
)
972 return; /* no controlvm response needed */
974 if (pending_msg_hdr
->id
!= (u32
)cmd_id
)
977 controlvm_respond(pending_msg_hdr
, response
);
981 device_changestate_responder(enum controlvm_id cmd_id
,
982 struct visorchipset_device_info
*p
, int response
,
983 struct spar_segment_state response_state
)
985 struct controlvm_message outmsg
;
986 u32 bus_no
= p
->bus_no
;
987 u32 dev_no
= p
->dev_no
;
989 if (p
->pending_msg_hdr
== NULL
)
990 return; /* no controlvm response needed */
991 if (p
->pending_msg_hdr
->id
!= cmd_id
)
994 controlvm_init_response(&outmsg
, p
->pending_msg_hdr
, response
);
996 outmsg
.cmd
.device_change_state
.bus_no
= bus_no
;
997 outmsg
.cmd
.device_change_state
.dev_no
= dev_no
;
998 outmsg
.cmd
.device_change_state
.state
= response_state
;
1000 if (!visorchannel_signalinsert(controlvm_channel
,
1001 CONTROLVM_QUEUE_REQUEST
, &outmsg
))
1006 device_responder(enum controlvm_id cmd_id
,
1007 struct controlvm_message_header
*pending_msg_hdr
,
1010 if (pending_msg_hdr
== NULL
)
1011 return; /* no controlvm response needed */
1013 if (pending_msg_hdr
->id
!= (u32
)cmd_id
)
1016 controlvm_respond(pending_msg_hdr
, response
);
1020 bus_epilog(struct visorchipset_bus_info
*bus_info
,
1021 u32 cmd
, struct controlvm_message_header
*msg_hdr
,
1022 int response
, bool need_response
)
1024 bool notified
= false;
1025 struct controlvm_message_header
*pmsg_hdr
= NULL
;
1028 /* relying on a valid passed in response code */
1029 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1034 if (bus_info
->pending_msg_hdr
) {
1035 /* only non-NULL if dev is still waiting on a response */
1036 response
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1037 pmsg_hdr
= bus_info
->pending_msg_hdr
;
1041 if (need_response
) {
1042 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
1044 response
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1048 memcpy(pmsg_hdr
, msg_hdr
,
1049 sizeof(struct controlvm_message_header
));
1050 bus_info
->pending_msg_hdr
= pmsg_hdr
;
1053 down(¬ifier_lock
);
1054 if (response
== CONTROLVM_RESP_SUCCESS
) {
1056 case CONTROLVM_BUS_CREATE
:
1057 if (busdev_notifiers
.bus_create
) {
1058 (*busdev_notifiers
.bus_create
) (bus_info
);
1062 case CONTROLVM_BUS_DESTROY
:
1063 if (busdev_notifiers
.bus_destroy
) {
1064 (*busdev_notifiers
.bus_destroy
) (bus_info
);
1072 /* The callback function just called above is responsible
1073 * for calling the appropriate visorchipset_busdev_responders
1074 * function, which will call bus_responder()
1079 * Do not kfree(pmsg_hdr) as this is the failure path.
1080 * The success path ('notified') will call the responder
1081 * directly and kfree() there.
1083 bus_responder(cmd
, pmsg_hdr
, response
);
1088 device_epilog(struct visorchipset_device_info
*dev_info
,
1089 struct spar_segment_state state
, u32 cmd
,
1090 struct controlvm_message_header
*msg_hdr
, int response
,
1091 bool need_response
, bool for_visorbus
)
1093 struct visorchipset_busdev_notifiers
*notifiers
;
1094 bool notified
= false;
1095 u32 bus_no
= dev_info
->bus_no
;
1096 u32 dev_no
= dev_info
->dev_no
;
1097 struct controlvm_message_header
*pmsg_hdr
= NULL
;
1100 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1104 notifiers
= &busdev_notifiers
;
1107 /* relying on a valid passed in response code */
1108 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1113 if (dev_info
->pending_msg_hdr
) {
1114 /* only non-NULL if dev is still waiting on a response */
1115 response
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1116 pmsg_hdr
= dev_info
->pending_msg_hdr
;
1120 if (need_response
) {
1121 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
1123 response
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1127 memcpy(pmsg_hdr
, msg_hdr
,
1128 sizeof(struct controlvm_message_header
));
1129 dev_info
->pending_msg_hdr
= pmsg_hdr
;
1132 down(¬ifier_lock
);
1133 if (response
>= 0) {
1135 case CONTROLVM_DEVICE_CREATE
:
1136 if (notifiers
->device_create
) {
1137 (*notifiers
->device_create
) (dev_info
);
1141 case CONTROLVM_DEVICE_CHANGESTATE
:
1142 /* ServerReady / ServerRunning / SegmentStateRunning */
1143 if (state
.alive
== segment_state_running
.alive
&&
1145 segment_state_running
.operating
) {
1146 if (notifiers
->device_resume
) {
1147 (*notifiers
->device_resume
) (dev_info
);
1151 /* ServerNotReady / ServerLost / SegmentStateStandby */
1152 else if (state
.alive
== segment_state_standby
.alive
&&
1154 segment_state_standby
.operating
) {
1155 /* technically this is standby case
1156 * where server is lost
1158 if (notifiers
->device_pause
) {
1159 (*notifiers
->device_pause
) (dev_info
);
1162 } else if (state
.alive
== segment_state_paused
.alive
&&
1164 segment_state_paused
.operating
) {
1165 /* this is lite pause where channel is
1166 * still valid just 'pause' of it
1168 if (bus_no
== g_diagpool_bus_no
&&
1169 dev_no
== g_diagpool_dev_no
) {
1170 /* this will trigger the
1171 * diag_shutdown.sh script in
1172 * the visorchipset hotplug */
1174 (&visorchipset_platform_device
.dev
.
1175 kobj
, KOBJ_ONLINE
, envp
);
1179 case CONTROLVM_DEVICE_DESTROY
:
1180 if (notifiers
->device_destroy
) {
1181 (*notifiers
->device_destroy
) (dev_info
);
1189 /* The callback function just called above is responsible
1190 * for calling the appropriate visorchipset_busdev_responders
1191 * function, which will call device_responder()
1196 * Do not kfree(pmsg_hdr) as this is the failure path.
1197 * The success path ('notified') will call the responder
1198 * directly and kfree() there.
1200 device_responder(cmd
, pmsg_hdr
, response
);
1205 bus_create(struct controlvm_message
*inmsg
)
1207 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1208 u32 bus_no
= cmd
->create_bus
.bus_no
;
1209 int rc
= CONTROLVM_RESP_SUCCESS
;
1210 struct visorchipset_bus_info
*bus_info
;
1211 struct visorchannel
*visorchannel
;
1213 bus_info
= bus_find(&bus_info_list
, bus_no
);
1214 if (bus_info
&& (bus_info
->state
.created
== 1)) {
1215 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1216 POSTCODE_SEVERITY_ERR
);
1217 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1220 bus_info
= kzalloc(sizeof(*bus_info
), GFP_KERNEL
);
1222 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1223 POSTCODE_SEVERITY_ERR
);
1224 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1228 INIT_LIST_HEAD(&bus_info
->entry
);
1229 bus_info
->bus_no
= bus_no
;
1231 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1233 visorchannel
= visorchannel_create(cmd
->create_bus
.channel_addr
,
1234 cmd
->create_bus
.channel_bytes
,
1236 cmd
->create_bus
.bus_data_type_uuid
);
1238 if (!visorchannel
) {
1239 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1240 POSTCODE_SEVERITY_ERR
);
1241 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1246 bus_info
->visorchannel
= visorchannel
;
1247 list_add(&bus_info
->entry
, &bus_info_list
);
1249 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1252 bus_epilog(bus_info
, CONTROLVM_BUS_CREATE
, &inmsg
->hdr
,
1253 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1257 bus_destroy(struct controlvm_message
*inmsg
)
1259 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1260 u32 bus_no
= cmd
->destroy_bus
.bus_no
;
1261 struct visorchipset_bus_info
*bus_info
;
1262 int rc
= CONTROLVM_RESP_SUCCESS
;
1264 bus_info
= bus_find(&bus_info_list
, bus_no
);
1266 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1267 else if (bus_info
->state
.created
== 0)
1268 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1270 bus_epilog(bus_info
, CONTROLVM_BUS_DESTROY
, &inmsg
->hdr
,
1271 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1275 bus_configure(struct controlvm_message
*inmsg
,
1276 struct parser_context
*parser_ctx
)
1278 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1280 struct visorchipset_bus_info
*bus_info
;
1281 int rc
= CONTROLVM_RESP_SUCCESS
;
1284 bus_no
= cmd
->configure_bus
.bus_no
;
1285 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC
, bus_no
,
1286 POSTCODE_SEVERITY_INFO
);
1288 bus_info
= bus_find(&bus_info_list
, bus_no
);
1290 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1291 POSTCODE_SEVERITY_ERR
);
1292 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1293 } else if (bus_info
->state
.created
== 0) {
1294 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1295 POSTCODE_SEVERITY_ERR
);
1296 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1297 } else if (bus_info
->pending_msg_hdr
!= NULL
) {
1298 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1299 POSTCODE_SEVERITY_ERR
);
1300 rc
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1302 visorchannel_set_clientpartition(bus_info
->visorchannel
,
1303 cmd
->configure_bus
.guest_handle
);
1304 bus_info
->partition_uuid
= parser_id_get(parser_ctx
);
1305 parser_param_start(parser_ctx
, PARSERSTRING_NAME
);
1306 bus_info
->name
= parser_string_get(parser_ctx
);
1308 visorchannel_uuid_id(&bus_info
->partition_uuid
, s
);
1309 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC
, bus_no
,
1310 POSTCODE_SEVERITY_INFO
);
1312 bus_epilog(bus_info
, CONTROLVM_BUS_CONFIGURE
, &inmsg
->hdr
,
1313 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1317 my_device_create(struct controlvm_message
*inmsg
)
1319 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1320 u32 bus_no
= cmd
->create_device
.bus_no
;
1321 u32 dev_no
= cmd
->create_device
.dev_no
;
1322 struct visorchipset_device_info
*dev_info
;
1323 struct visorchipset_bus_info
*bus_info
;
1324 struct visorchannel
*visorchannel
;
1325 int rc
= CONTROLVM_RESP_SUCCESS
;
1327 dev_info
= device_find(&dev_info_list
, bus_no
, dev_no
);
1328 if (dev_info
&& (dev_info
->state
.created
== 1)) {
1329 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1330 POSTCODE_SEVERITY_ERR
);
1331 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1334 bus_info
= bus_find(&bus_info_list
, bus_no
);
1336 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1337 POSTCODE_SEVERITY_ERR
);
1338 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1341 if (bus_info
->state
.created
== 0) {
1342 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1343 POSTCODE_SEVERITY_ERR
);
1344 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1347 dev_info
= kzalloc(sizeof(*dev_info
), GFP_KERNEL
);
1349 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1350 POSTCODE_SEVERITY_ERR
);
1351 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1355 INIT_LIST_HEAD(&dev_info
->entry
);
1356 dev_info
->bus_no
= bus_no
;
1357 dev_info
->dev_no
= dev_no
;
1358 dev_info
->dev_inst_uuid
= cmd
->create_device
.dev_inst_uuid
;
1359 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC
, dev_no
, bus_no
,
1360 POSTCODE_SEVERITY_INFO
);
1362 visorchannel
= visorchannel_create(cmd
->create_device
.channel_addr
,
1363 cmd
->create_device
.channel_bytes
,
1365 cmd
->create_device
.data_type_uuid
);
1367 if (!visorchannel
) {
1368 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1369 POSTCODE_SEVERITY_ERR
);
1370 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1375 dev_info
->visorchannel
= visorchannel
;
1376 dev_info
->channel_type_guid
= cmd
->create_device
.data_type_uuid
;
1377 list_add(&dev_info
->entry
, &dev_info_list
);
1378 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC
, dev_no
, bus_no
,
1379 POSTCODE_SEVERITY_INFO
);
1381 /* get the bus and devNo for DiagPool channel */
1383 is_diagpool_channel(cmd
->create_device
.data_type_uuid
)) {
1384 g_diagpool_bus_no
= bus_no
;
1385 g_diagpool_dev_no
= dev_no
;
1387 device_epilog(dev_info
, segment_state_running
,
1388 CONTROLVM_DEVICE_CREATE
, &inmsg
->hdr
, rc
,
1389 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1393 my_device_changestate(struct controlvm_message
*inmsg
)
1395 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1396 u32 bus_no
= cmd
->device_change_state
.bus_no
;
1397 u32 dev_no
= cmd
->device_change_state
.dev_no
;
1398 struct spar_segment_state state
= cmd
->device_change_state
.state
;
1399 struct visorchipset_device_info
*dev_info
;
1400 int rc
= CONTROLVM_RESP_SUCCESS
;
1402 dev_info
= device_find(&dev_info_list
, bus_no
, dev_no
);
1404 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1405 POSTCODE_SEVERITY_ERR
);
1406 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1407 } else if (dev_info
->state
.created
== 0) {
1408 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1409 POSTCODE_SEVERITY_ERR
);
1410 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1412 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1413 device_epilog(dev_info
, state
,
1414 CONTROLVM_DEVICE_CHANGESTATE
, &inmsg
->hdr
, rc
,
1415 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1419 my_device_destroy(struct controlvm_message
*inmsg
)
1421 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1422 u32 bus_no
= cmd
->destroy_device
.bus_no
;
1423 u32 dev_no
= cmd
->destroy_device
.dev_no
;
1424 struct visorchipset_device_info
*dev_info
;
1425 int rc
= CONTROLVM_RESP_SUCCESS
;
1427 dev_info
= device_find(&dev_info_list
, bus_no
, dev_no
);
1429 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1430 else if (dev_info
->state
.created
== 0)
1431 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1433 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1434 device_epilog(dev_info
, segment_state_running
,
1435 CONTROLVM_DEVICE_DESTROY
, &inmsg
->hdr
, rc
,
1436 inmsg
->hdr
.flags
.response_expected
== 1, 1);
1439 /* When provided with the physical address of the controlvm channel
1440 * (phys_addr), the offset to the payload area we need to manage
1441 * (offset), and the size of this payload area (bytes), fills in the
1442 * controlvm_payload_info struct. Returns true for success or false
1446 initialize_controlvm_payload_info(u64 phys_addr
, u64 offset
, u32 bytes
,
1447 struct visor_controlvm_payload_info
*info
)
1449 u8 __iomem
*payload
= NULL
;
1450 int rc
= CONTROLVM_RESP_SUCCESS
;
1453 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1456 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1457 if ((offset
== 0) || (bytes
== 0)) {
1458 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1461 payload
= ioremap_cache(phys_addr
+ offset
, bytes
);
1463 rc
= -CONTROLVM_RESP_ERROR_IOREMAP_FAILED
;
1467 info
->offset
= offset
;
1468 info
->bytes
= bytes
;
1469 info
->ptr
= payload
;
1482 destroy_controlvm_payload_info(struct visor_controlvm_payload_info
*info
)
1488 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1492 initialize_controlvm_payload(void)
1494 u64 phys_addr
= visorchannel_get_physaddr(controlvm_channel
);
1495 u64 payload_offset
= 0;
1496 u32 payload_bytes
= 0;
1498 if (visorchannel_read(controlvm_channel
,
1499 offsetof(struct spar_controlvm_channel_protocol
,
1500 request_payload_offset
),
1501 &payload_offset
, sizeof(payload_offset
)) < 0) {
1502 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1503 POSTCODE_SEVERITY_ERR
);
1506 if (visorchannel_read(controlvm_channel
,
1507 offsetof(struct spar_controlvm_channel_protocol
,
1508 request_payload_bytes
),
1509 &payload_bytes
, sizeof(payload_bytes
)) < 0) {
1510 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1511 POSTCODE_SEVERITY_ERR
);
1514 initialize_controlvm_payload_info(phys_addr
,
1515 payload_offset
, payload_bytes
,
1516 &controlvm_payload_info
);
1519 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1520 * Returns CONTROLVM_RESP_xxx code.
1523 visorchipset_chipset_ready(void)
1525 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_ONLINE
);
1526 return CONTROLVM_RESP_SUCCESS
;
1530 visorchipset_chipset_selftest(void)
1532 char env_selftest
[20];
1533 char *envp
[] = { env_selftest
, NULL
};
1535 sprintf(env_selftest
, "SPARSP_SELFTEST=%d", 1);
1536 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1538 return CONTROLVM_RESP_SUCCESS
;
1541 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1542 * Returns CONTROLVM_RESP_xxx code.
1545 visorchipset_chipset_notready(void)
1547 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_OFFLINE
);
1548 return CONTROLVM_RESP_SUCCESS
;
1552 chipset_ready(struct controlvm_message_header
*msg_hdr
)
1554 int rc
= visorchipset_chipset_ready();
1556 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1558 if (msg_hdr
->flags
.response_expected
&& !visorchipset_holdchipsetready
)
1559 controlvm_respond(msg_hdr
, rc
);
1560 if (msg_hdr
->flags
.response_expected
&& visorchipset_holdchipsetready
) {
1561 /* Send CHIPSET_READY response when all modules have been loaded
1562 * and disks mounted for the partition
1564 g_chipset_msg_hdr
= *msg_hdr
;
1569 chipset_selftest(struct controlvm_message_header
*msg_hdr
)
1571 int rc
= visorchipset_chipset_selftest();
1573 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1575 if (msg_hdr
->flags
.response_expected
)
1576 controlvm_respond(msg_hdr
, rc
);
1580 chipset_notready(struct controlvm_message_header
*msg_hdr
)
1582 int rc
= visorchipset_chipset_notready();
1584 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1586 if (msg_hdr
->flags
.response_expected
)
1587 controlvm_respond(msg_hdr
, rc
);
1590 /* This is your "one-stop" shop for grabbing the next message from the
1591 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1594 read_controlvm_event(struct controlvm_message
*msg
)
1596 if (visorchannel_signalremove(controlvm_channel
,
1597 CONTROLVM_QUEUE_EVENT
, msg
)) {
1599 if (msg
->hdr
.flags
.test_message
== 1)
1607 * The general parahotplug flow works as follows. The visorchipset
1608 * driver receives a DEVICE_CHANGESTATE message from Command
1609 * specifying a physical device to enable or disable. The CONTROLVM
1610 * message handler calls parahotplug_process_message, which then adds
1611 * the message to a global list and kicks off a udev event which
1612 * causes a user level script to enable or disable the specified
1613 * device. The udev script then writes to
1614 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1615 * to get called, at which point the appropriate CONTROLVM message is
1616 * retrieved from the list and responded to.
1619 #define PARAHOTPLUG_TIMEOUT_MS 2000
1622 * Generate unique int to match an outstanding CONTROLVM message with a
1623 * udev script /proc response
1626 parahotplug_next_id(void)
1628 static atomic_t id
= ATOMIC_INIT(0);
1630 return atomic_inc_return(&id
);
1634 * Returns the time (in jiffies) when a CONTROLVM message on the list
1635 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1637 static unsigned long
1638 parahotplug_next_expiration(void)
1640 return jiffies
+ msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS
);
1644 * Create a parahotplug_request, which is basically a wrapper for a
1645 * CONTROLVM_MESSAGE that we can stick on a list
1647 static struct parahotplug_request
*
1648 parahotplug_request_create(struct controlvm_message
*msg
)
1650 struct parahotplug_request
*req
;
1652 req
= kmalloc(sizeof(*req
), GFP_KERNEL
| __GFP_NORETRY
);
1656 req
->id
= parahotplug_next_id();
1657 req
->expiration
= parahotplug_next_expiration();
1664 * Free a parahotplug_request.
1667 parahotplug_request_destroy(struct parahotplug_request
*req
)
1673 * Cause uevent to run the user level script to do the disable/enable
1674 * specified in (the CONTROLVM message in) the specified
1675 * parahotplug_request
1678 parahotplug_request_kickoff(struct parahotplug_request
*req
)
1680 struct controlvm_message_packet
*cmd
= &req
->msg
.cmd
;
1681 char env_cmd
[40], env_id
[40], env_state
[40], env_bus
[40], env_dev
[40],
1684 env_cmd
, env_id
, env_state
, env_bus
, env_dev
, env_func
, NULL
1687 sprintf(env_cmd
, "SPAR_PARAHOTPLUG=1");
1688 sprintf(env_id
, "SPAR_PARAHOTPLUG_ID=%d", req
->id
);
1689 sprintf(env_state
, "SPAR_PARAHOTPLUG_STATE=%d",
1690 cmd
->device_change_state
.state
.active
);
1691 sprintf(env_bus
, "SPAR_PARAHOTPLUG_BUS=%d",
1692 cmd
->device_change_state
.bus_no
);
1693 sprintf(env_dev
, "SPAR_PARAHOTPLUG_DEVICE=%d",
1694 cmd
->device_change_state
.dev_no
>> 3);
1695 sprintf(env_func
, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1696 cmd
->device_change_state
.dev_no
& 0x7);
1698 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1703 * Remove any request from the list that's been on there too long and
1704 * respond with an error.
1707 parahotplug_process_list(void)
1709 struct list_head
*pos
;
1710 struct list_head
*tmp
;
1712 spin_lock(¶hotplug_request_list_lock
);
1714 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1715 struct parahotplug_request
*req
=
1716 list_entry(pos
, struct parahotplug_request
, list
);
1718 if (!time_after_eq(jiffies
, req
->expiration
))
1722 if (req
->msg
.hdr
.flags
.response_expected
)
1723 controlvm_respond_physdev_changestate(
1725 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT
,
1726 req
->msg
.cmd
.device_change_state
.state
);
1727 parahotplug_request_destroy(req
);
1730 spin_unlock(¶hotplug_request_list_lock
);
1734 * Called from the /proc handler, which means the user script has
1735 * finished the enable/disable. Find the matching identifier, and
1736 * respond to the CONTROLVM message with success.
1739 parahotplug_request_complete(int id
, u16 active
)
1741 struct list_head
*pos
;
1742 struct list_head
*tmp
;
1744 spin_lock(¶hotplug_request_list_lock
);
1746 /* Look for a request matching "id". */
1747 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1748 struct parahotplug_request
*req
=
1749 list_entry(pos
, struct parahotplug_request
, list
);
1750 if (req
->id
== id
) {
1751 /* Found a match. Remove it from the list and
1755 spin_unlock(¶hotplug_request_list_lock
);
1756 req
->msg
.cmd
.device_change_state
.state
.active
= active
;
1757 if (req
->msg
.hdr
.flags
.response_expected
)
1758 controlvm_respond_physdev_changestate(
1759 &req
->msg
.hdr
, CONTROLVM_RESP_SUCCESS
,
1760 req
->msg
.cmd
.device_change_state
.state
);
1761 parahotplug_request_destroy(req
);
1766 spin_unlock(¶hotplug_request_list_lock
);
1771 * Enables or disables a PCI device by kicking off a udev script
1774 parahotplug_process_message(struct controlvm_message
*inmsg
)
1776 struct parahotplug_request
*req
;
1778 req
= parahotplug_request_create(inmsg
);
1783 if (inmsg
->cmd
.device_change_state
.state
.active
) {
1784 /* For enable messages, just respond with success
1785 * right away. This is a bit of a hack, but there are
1786 * issues with the early enable messages we get (with
1787 * either the udev script not detecting that the device
1788 * is up, or not getting called at all). Fortunately
1789 * the messages that get lost don't matter anyway, as
1790 * devices are automatically enabled at
1793 parahotplug_request_kickoff(req
);
1794 controlvm_respond_physdev_changestate(&inmsg
->hdr
,
1795 CONTROLVM_RESP_SUCCESS
,
1796 inmsg
->cmd
.device_change_state
.state
);
1797 parahotplug_request_destroy(req
);
1799 /* For disable messages, add the request to the
1800 * request list before kicking off the udev script. It
1801 * won't get responded to until the script has
1802 * indicated it's done.
1804 spin_lock(¶hotplug_request_list_lock
);
1805 list_add_tail(&req
->list
, ¶hotplug_request_list
);
1806 spin_unlock(¶hotplug_request_list_lock
);
1808 parahotplug_request_kickoff(req
);
1812 /* Process a controlvm message.
1814 * false - this function will return false only in the case where the
1815 * controlvm message was NOT processed, but processing must be
1816 * retried before reading the next controlvm message; a
1817 * scenario where this can occur is when we need to throttle
1818 * the allocation of memory in which to copy out controlvm
1820 * true - processing of the controlvm message completed,
1821 * either successfully or with an error.
1824 handle_command(struct controlvm_message inmsg
, u64 channel_addr
)
1826 struct controlvm_message_packet
*cmd
= &inmsg
.cmd
;
1829 struct parser_context
*parser_ctx
= NULL
;
1831 struct controlvm_message ackmsg
;
1833 /* create parsing context if necessary */
1834 local_addr
= (inmsg
.hdr
.flags
.test_message
== 1);
1835 if (channel_addr
== 0)
1837 parm_addr
= channel_addr
+ inmsg
.hdr
.payload_vm_offset
;
1838 parm_bytes
= inmsg
.hdr
.payload_bytes
;
1840 /* Parameter and channel addresses within test messages actually lie
1841 * within our OS-controlled memory. We need to know that, because it
1842 * makes a difference in how we compute the virtual address.
1844 if (parm_addr
&& parm_bytes
) {
1848 parser_init_byte_stream(parm_addr
, parm_bytes
,
1849 local_addr
, &retry
);
1850 if (!parser_ctx
&& retry
)
1855 controlvm_init_response(&ackmsg
, &inmsg
.hdr
,
1856 CONTROLVM_RESP_SUCCESS
);
1857 if (controlvm_channel
)
1858 visorchannel_signalinsert(controlvm_channel
,
1859 CONTROLVM_QUEUE_ACK
,
1862 switch (inmsg
.hdr
.id
) {
1863 case CONTROLVM_CHIPSET_INIT
:
1864 chipset_init(&inmsg
);
1866 case CONTROLVM_BUS_CREATE
:
1869 case CONTROLVM_BUS_DESTROY
:
1870 bus_destroy(&inmsg
);
1872 case CONTROLVM_BUS_CONFIGURE
:
1873 bus_configure(&inmsg
, parser_ctx
);
1875 case CONTROLVM_DEVICE_CREATE
:
1876 my_device_create(&inmsg
);
1878 case CONTROLVM_DEVICE_CHANGESTATE
:
1879 if (cmd
->device_change_state
.flags
.phys_device
) {
1880 parahotplug_process_message(&inmsg
);
1882 /* save the hdr and cmd structures for later use */
1883 /* when sending back the response to Command */
1884 my_device_changestate(&inmsg
);
1885 g_devicechangestate_packet
= inmsg
.cmd
;
1889 case CONTROLVM_DEVICE_DESTROY
:
1890 my_device_destroy(&inmsg
);
1892 case CONTROLVM_DEVICE_CONFIGURE
:
1893 /* no op for now, just send a respond that we passed */
1894 if (inmsg
.hdr
.flags
.response_expected
)
1895 controlvm_respond(&inmsg
.hdr
, CONTROLVM_RESP_SUCCESS
);
1897 case CONTROLVM_CHIPSET_READY
:
1898 chipset_ready(&inmsg
.hdr
);
1900 case CONTROLVM_CHIPSET_SELFTEST
:
1901 chipset_selftest(&inmsg
.hdr
);
1903 case CONTROLVM_CHIPSET_STOP
:
1904 chipset_notready(&inmsg
.hdr
);
1907 if (inmsg
.hdr
.flags
.response_expected
)
1908 controlvm_respond(&inmsg
.hdr
,
1909 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN
);
1914 parser_done(parser_ctx
);
1920 static inline unsigned int
1921 issue_vmcall_io_controlvm_addr(u64
*control_addr
, u32
*control_bytes
)
1923 struct vmcall_io_controlvm_addr_params params
;
1924 int result
= VMCALL_SUCCESS
;
1927 physaddr
= virt_to_phys(¶ms
);
1928 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR
, physaddr
, result
);
1929 if (VMCALL_SUCCESSFUL(result
)) {
1930 *control_addr
= params
.address
;
1931 *control_bytes
= params
.channel_bytes
;
1936 static u64
controlvm_get_channel_address(void)
1941 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr
, &size
)))
1948 controlvm_periodic_work(struct work_struct
*work
)
1950 struct controlvm_message inmsg
;
1951 bool got_command
= false;
1952 bool handle_command_failed
= false;
1953 static u64 poll_count
;
1955 /* make sure visorbus server is registered for controlvm callbacks */
1956 if (visorchipset_visorbusregwait
&& !visorbusregistered
)
1960 if (poll_count
>= 250)
1965 /* Check events to determine if response to CHIPSET_READY
1968 if (visorchipset_holdchipsetready
&&
1969 (g_chipset_msg_hdr
.id
!= CONTROLVM_INVALID
)) {
1970 if (check_chipset_events() == 1) {
1971 controlvm_respond(&g_chipset_msg_hdr
, 0);
1972 clear_chipset_events();
1973 memset(&g_chipset_msg_hdr
, 0,
1974 sizeof(struct controlvm_message_header
));
1978 while (visorchannel_signalremove(controlvm_channel
,
1979 CONTROLVM_QUEUE_RESPONSE
,
1983 if (controlvm_pending_msg_valid
) {
1984 /* we throttled processing of a prior
1985 * msg, so try to process it again
1986 * rather than reading a new one
1988 inmsg
= controlvm_pending_msg
;
1989 controlvm_pending_msg_valid
= false;
1992 got_command
= read_controlvm_event(&inmsg
);
1996 handle_command_failed
= false;
1997 while (got_command
&& (!handle_command_failed
)) {
1998 most_recent_message_jiffies
= jiffies
;
1999 if (handle_command(inmsg
,
2000 visorchannel_get_physaddr
2001 (controlvm_channel
)))
2002 got_command
= read_controlvm_event(&inmsg
);
2004 /* this is a scenario where throttling
2005 * is required, but probably NOT an
2006 * error...; we stash the current
2007 * controlvm msg so we will attempt to
2008 * reprocess it on our next loop
2010 handle_command_failed
= true;
2011 controlvm_pending_msg
= inmsg
;
2012 controlvm_pending_msg_valid
= true;
2016 /* parahotplug_worker */
2017 parahotplug_process_list();
2021 if (time_after(jiffies
,
2022 most_recent_message_jiffies
+ (HZ
* MIN_IDLE_SECONDS
))) {
2023 /* it's been longer than MIN_IDLE_SECONDS since we
2024 * processed our last controlvm message; slow down the
2027 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
)
2028 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
2030 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_FAST
)
2031 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
2034 queue_delayed_work(periodic_controlvm_workqueue
,
2035 &periodic_controlvm_work
, poll_jiffies
);
2039 setup_crash_devices_work_queue(struct work_struct
*work
)
2041 struct controlvm_message local_crash_bus_msg
;
2042 struct controlvm_message local_crash_dev_msg
;
2043 struct controlvm_message msg
;
2044 u32 local_crash_msg_offset
;
2045 u16 local_crash_msg_count
;
2047 /* make sure visorbus is registered for controlvm callbacks */
2048 if (visorchipset_visorbusregwait
&& !visorbusregistered
)
2051 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
2053 /* send init chipset msg */
2054 msg
.hdr
.id
= CONTROLVM_CHIPSET_INIT
;
2055 msg
.cmd
.init_chipset
.bus_count
= 23;
2056 msg
.cmd
.init_chipset
.switch_count
= 0;
2060 /* get saved message count */
2061 if (visorchannel_read(controlvm_channel
,
2062 offsetof(struct spar_controlvm_channel_protocol
,
2063 saved_crash_message_count
),
2064 &local_crash_msg_count
, sizeof(u16
)) < 0) {
2065 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
2066 POSTCODE_SEVERITY_ERR
);
2070 if (local_crash_msg_count
!= CONTROLVM_CRASHMSG_MAX
) {
2071 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC
,
2072 local_crash_msg_count
,
2073 POSTCODE_SEVERITY_ERR
);
2077 /* get saved crash message offset */
2078 if (visorchannel_read(controlvm_channel
,
2079 offsetof(struct spar_controlvm_channel_protocol
,
2080 saved_crash_message_offset
),
2081 &local_crash_msg_offset
, sizeof(u32
)) < 0) {
2082 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
2083 POSTCODE_SEVERITY_ERR
);
2087 /* read create device message for storage bus offset */
2088 if (visorchannel_read(controlvm_channel
,
2089 local_crash_msg_offset
,
2090 &local_crash_bus_msg
,
2091 sizeof(struct controlvm_message
)) < 0) {
2092 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC
,
2093 POSTCODE_SEVERITY_ERR
);
2097 /* read create device message for storage device */
2098 if (visorchannel_read(controlvm_channel
,
2099 local_crash_msg_offset
+
2100 sizeof(struct controlvm_message
),
2101 &local_crash_dev_msg
,
2102 sizeof(struct controlvm_message
)) < 0) {
2103 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC
,
2104 POSTCODE_SEVERITY_ERR
);
2108 /* reuse IOVM create bus message */
2109 if (local_crash_bus_msg
.cmd
.create_bus
.channel_addr
) {
2110 bus_create(&local_crash_bus_msg
);
2112 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC
,
2113 POSTCODE_SEVERITY_ERR
);
2117 /* reuse create device message for storage device */
2118 if (local_crash_dev_msg
.cmd
.create_device
.channel_addr
) {
2119 my_device_create(&local_crash_dev_msg
);
2121 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC
,
2122 POSTCODE_SEVERITY_ERR
);
2125 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2130 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
2132 queue_delayed_work(periodic_controlvm_workqueue
,
2133 &periodic_controlvm_work
, poll_jiffies
);
2137 bus_create_response(struct visorchipset_bus_info
*bus_info
, int response
)
2139 if (response
>= 0) {
2140 bus_info
->state
.created
= 1;
2142 if (response
!= -CONTROLVM_RESP_ERROR_ALREADY_DONE
)
2143 /* undo the row we just created... */
2144 busdevices_del(&dev_info_list
, bus_info
->bus_no
);
2147 bus_responder(CONTROLVM_BUS_CREATE
, bus_info
->pending_msg_hdr
,
2150 kfree(bus_info
->pending_msg_hdr
);
2151 bus_info
->pending_msg_hdr
= NULL
;
2155 bus_destroy_response(struct visorchipset_bus_info
*bus_info
, int response
)
2157 bus_responder(CONTROLVM_BUS_DESTROY
, bus_info
->pending_msg_hdr
,
2160 kfree(bus_info
->pending_msg_hdr
);
2161 bus_info
->pending_msg_hdr
= NULL
;
2163 bus_info_clear(bus_info
);
2164 busdevices_del(&dev_info_list
, bus_info
->bus_no
);
2168 device_create_response(struct visorchipset_device_info
*dev_info
, int response
)
2171 dev_info
->state
.created
= 1;
2173 device_responder(CONTROLVM_DEVICE_CREATE
, dev_info
->pending_msg_hdr
,
2176 kfree(dev_info
->pending_msg_hdr
);
2177 dev_info
->pending_msg_hdr
= NULL
;
2181 device_destroy_response(struct visorchipset_device_info
*dev_info
, int response
)
2183 device_responder(CONTROLVM_DEVICE_DESTROY
, dev_info
->pending_msg_hdr
,
2186 kfree(dev_info
->pending_msg_hdr
);
2187 dev_info
->pending_msg_hdr
= NULL
;
2189 dev_info_clear(dev_info
);
2193 visorchipset_device_pause_response(struct visorchipset_device_info
*dev_info
,
2196 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
2198 segment_state_standby
);
2200 kfree(dev_info
->pending_msg_hdr
);
2201 dev_info
->pending_msg_hdr
= NULL
;
2205 device_resume_response(struct visorchipset_device_info
*dev_info
, int response
)
2207 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
2209 segment_state_running
);
2211 kfree(dev_info
->pending_msg_hdr
);
2212 dev_info
->pending_msg_hdr
= NULL
;
2216 visorchipset_get_bus_info(u32 bus_no
, struct visorchipset_bus_info
*bus_info
)
2218 void *p
= bus_find(&bus_info_list
, bus_no
);
2222 memcpy(bus_info
, p
, sizeof(struct visorchipset_bus_info
));
2225 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info
);
2228 visorchipset_set_bus_context(struct visorchipset_bus_info
*p
, void *context
)
2232 p
->bus_driver_context
= context
;
2235 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context
);
2238 visorchipset_get_device_info(u32 bus_no
, u32 dev_no
,
2239 struct visorchipset_device_info
*dev_info
)
2241 void *p
= device_find(&dev_info_list
, bus_no
, dev_no
);
2245 memcpy(dev_info
, p
, sizeof(struct visorchipset_device_info
));
2248 EXPORT_SYMBOL_GPL(visorchipset_get_device_info
);
2251 visorchipset_set_device_context(struct visorchipset_device_info
*p
,
2256 p
->bus_driver_context
= context
;
2259 EXPORT_SYMBOL_GPL(visorchipset_set_device_context
);
2261 static ssize_t
chipsetready_store(struct device
*dev
,
2262 struct device_attribute
*attr
,
2263 const char *buf
, size_t count
)
2267 if (sscanf(buf
, "%63s", msgtype
) != 1)
2270 if (!strcmp(msgtype
, "CALLHOMEDISK_MOUNTED")) {
2271 chipset_events
[0] = 1;
2273 } else if (!strcmp(msgtype
, "MODULES_LOADED")) {
2274 chipset_events
[1] = 1;
2280 /* The parahotplug/devicedisabled interface gets called by our support script
2281 * when an SR-IOV device has been shut down. The ID is passed to the script
2282 * and then passed back when the device has been removed.
2284 static ssize_t
devicedisabled_store(struct device
*dev
,
2285 struct device_attribute
*attr
,
2286 const char *buf
, size_t count
)
2290 if (kstrtouint(buf
, 10, &id
))
2293 parahotplug_request_complete(id
, 0);
2297 /* The parahotplug/deviceenabled interface gets called by our support script
2298 * when an SR-IOV device has been recovered. The ID is passed to the script
2299 * and then passed back when the device has been brought back up.
2301 static ssize_t
deviceenabled_store(struct device
*dev
,
2302 struct device_attribute
*attr
,
2303 const char *buf
, size_t count
)
2307 if (kstrtouint(buf
, 10, &id
))
2310 parahotplug_request_complete(id
, 1);
2315 visorchipset_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2317 unsigned long physaddr
= 0;
2318 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
2321 /* sv_enable_dfp(); */
2322 if (offset
& (PAGE_SIZE
- 1))
2323 return -ENXIO
; /* need aligned offsets */
2326 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET
:
2327 vma
->vm_flags
|= VM_IO
;
2328 if (!*file_controlvm_channel
)
2331 visorchannel_read(*file_controlvm_channel
,
2332 offsetof(struct spar_controlvm_channel_protocol
,
2333 gp_control_channel
),
2334 &addr
, sizeof(addr
));
2338 physaddr
= (unsigned long)addr
;
2339 if (remap_pfn_range(vma
, vma
->vm_start
,
2340 physaddr
>> PAGE_SHIFT
,
2341 vma
->vm_end
- vma
->vm_start
,
2342 /*pgprot_noncached */
2343 (vma
->vm_page_prot
))) {
2353 static inline s64
issue_vmcall_query_guest_virtual_time_offset(void)
2355 u64 result
= VMCALL_SUCCESS
;
2358 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
, physaddr
,
2363 static inline int issue_vmcall_update_physical_time(u64 adjustment
)
2365 int result
= VMCALL_SUCCESS
;
2367 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME
, adjustment
, result
);
2371 static long visorchipset_ioctl(struct file
*file
, unsigned int cmd
,
2378 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
:
2379 /* get the physical rtc offset */
2380 vrtc_offset
= issue_vmcall_query_guest_virtual_time_offset();
2381 if (copy_to_user((void __user
*)arg
, &vrtc_offset
,
2382 sizeof(vrtc_offset
))) {
2386 case VMCALL_UPDATE_PHYSICAL_TIME
:
2387 if (copy_from_user(&adjustment
, (void __user
*)arg
,
2388 sizeof(adjustment
))) {
2391 return issue_vmcall_update_physical_time(adjustment
);
2397 static const struct file_operations visorchipset_fops
= {
2398 .owner
= THIS_MODULE
,
2399 .open
= visorchipset_open
,
2402 .unlocked_ioctl
= visorchipset_ioctl
,
2403 .release
= visorchipset_release
,
2404 .mmap
= visorchipset_mmap
,
2408 visorchipset_file_init(dev_t major_dev
, struct visorchannel
**controlvm_channel
)
2412 file_controlvm_channel
= controlvm_channel
;
2413 cdev_init(&file_cdev
, &visorchipset_fops
);
2414 file_cdev
.owner
= THIS_MODULE
;
2415 if (MAJOR(major_dev
) == 0) {
2416 rc
= alloc_chrdev_region(&major_dev
, 0, 1, "visorchipset");
2417 /* dynamic major device number registration required */
2421 /* static major device number registration required */
2422 rc
= register_chrdev_region(major_dev
, 1, "visorchipset");
2426 rc
= cdev_add(&file_cdev
, MKDEV(MAJOR(major_dev
), 0), 1);
2428 unregister_chrdev_region(major_dev
, 1);
2435 visorchipset_init(struct acpi_device
*acpi_device
)
2439 int tmp_sz
= sizeof(struct spar_controlvm_channel_protocol
);
2440 uuid_le uuid
= SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID
;
2442 addr
= controlvm_get_channel_address();
2446 memset(&busdev_notifiers
, 0, sizeof(busdev_notifiers
));
2447 memset(&controlvm_payload_info
, 0, sizeof(controlvm_payload_info
));
2449 controlvm_channel
= visorchannel_create_with_lock(addr
, tmp_sz
,
2451 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2452 visorchannel_get_header(controlvm_channel
))) {
2453 initialize_controlvm_payload();
2455 visorchannel_destroy(controlvm_channel
);
2456 controlvm_channel
= NULL
;
2460 major_dev
= MKDEV(visorchipset_major
, 0);
2461 rc
= visorchipset_file_init(major_dev
, &controlvm_channel
);
2463 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2467 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2469 /* if booting in a crash kernel */
2470 if (is_kdump_kernel())
2471 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2472 setup_crash_devices_work_queue
);
2474 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2475 controlvm_periodic_work
);
2476 periodic_controlvm_workqueue
=
2477 create_singlethread_workqueue("visorchipset_controlvm");
2479 if (!periodic_controlvm_workqueue
) {
2480 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC
,
2485 most_recent_message_jiffies
= jiffies
;
2486 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
2487 rc
= queue_delayed_work(periodic_controlvm_workqueue
,
2488 &periodic_controlvm_work
, poll_jiffies
);
2490 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC
,
2495 visorchipset_platform_device
.dev
.devt
= major_dev
;
2496 if (platform_device_register(&visorchipset_platform_device
) < 0) {
2497 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2501 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC
, POSTCODE_SEVERITY_INFO
);
2503 rc
= visorbus_init();
2506 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC
, rc
,
2507 POSTCODE_SEVERITY_ERR
);
2513 visorchipset_file_cleanup(dev_t major_dev
)
2516 cdev_del(&file_cdev
);
2517 file_cdev
.ops
= NULL
;
2518 unregister_chrdev_region(major_dev
, 1);
2522 visorchipset_exit(struct acpi_device
*acpi_device
)
2524 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2528 cancel_delayed_work(&periodic_controlvm_work
);
2529 flush_workqueue(periodic_controlvm_workqueue
);
2530 destroy_workqueue(periodic_controlvm_workqueue
);
2531 periodic_controlvm_workqueue
= NULL
;
2532 destroy_controlvm_payload_info(&controlvm_payload_info
);
2534 cleanup_controlvm_structures();
2536 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2538 visorchannel_destroy(controlvm_channel
);
2540 visorchipset_file_cleanup(visorchipset_platform_device
.dev
.devt
);
2541 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2546 static const struct acpi_device_id unisys_device_ids
[] = {
2551 static struct acpi_driver unisys_acpi_driver
= {
2552 .name
= "unisys_acpi",
2553 .class = "unisys_acpi_class",
2554 .owner
= THIS_MODULE
,
2555 .ids
= unisys_device_ids
,
2557 .add
= visorchipset_init
,
2558 .remove
= visorchipset_exit
,
2561 static __init
uint32_t visorutil_spar_detect(void)
2563 unsigned int eax
, ebx
, ecx
, edx
;
2565 if (cpu_has_hypervisor
) {
2567 cpuid(UNISYS_SPAR_LEAF_ID
, &eax
, &ebx
, &ecx
, &edx
);
2568 return (ebx
== UNISYS_SPAR_ID_EBX
) &&
2569 (ecx
== UNISYS_SPAR_ID_ECX
) &&
2570 (edx
== UNISYS_SPAR_ID_EDX
);
2576 static int init_unisys(void)
2579 if (!visorutil_spar_detect())
2582 result
= acpi_bus_register_driver(&unisys_acpi_driver
);
2586 pr_info("Unisys Visorchipset Driver Loaded.\n");
2590 static void exit_unisys(void)
2592 acpi_bus_unregister_driver(&unisys_acpi_driver
);
2595 module_param_named(major
, visorchipset_major
, int, S_IRUGO
);
2596 MODULE_PARM_DESC(visorchipset_major
,
2597 "major device number to use for the device node");
2598 module_param_named(visorbusregwait
, visorchipset_visorbusregwait
, int, S_IRUGO
);
2599 MODULE_PARM_DESC(visorchipset_visorbusreqwait
,
2600 "1 to have the module wait for the visor bus to register");
2601 module_param_named(holdchipsetready
, visorchipset_holdchipsetready
,
2603 MODULE_PARM_DESC(visorchipset_holdchipsetready
,
2604 "1 to hold response to CHIPSET_READY");
2606 module_init(init_unisys
);
2607 module_exit(exit_unisys
);
2609 MODULE_AUTHOR("Unisys");
2610 MODULE_LICENSE("GPL");
2611 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2613 MODULE_VERSION(VERSION
);