3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33 #include <linux/crash_dump.h>
35 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
36 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
37 * vnic loopback test */
38 #define TEST_VNIC_SWITCHNO 1
39 #define TEST_VNIC_BUSNO 9
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE 50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
50 static int visorchipset_testvnic
;
51 static int visorchipset_testvnicclient
;
52 static int visorchipset_testmsg
;
53 static int visorchipset_major
;
54 static int visorchipset_serverregwait
;
55 static int visorchipset_clientregwait
= 1; /* default is on */
56 static int visorchipset_testteardown
;
57 static int visorchipset_disable_controlvm
;
58 static int visorchipset_holdchipsetready
;
60 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
61 * we switch to slow polling mode. As soon as we get a controlvm
62 * message, we switch back to fast polling mode.
64 #define MIN_IDLE_SECONDS 10
65 static unsigned long poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
66 static unsigned long most_recent_message_jiffies
; /* when we got our last
67 * controlvm message */
68 static int serverregistered
;
69 static int clientregistered
;
71 #define MAX_CHIPSET_EVENTS 2
72 static u8 chipset_events
[MAX_CHIPSET_EVENTS
] = { 0, 0 };
74 static struct delayed_work periodic_controlvm_work
;
75 static struct workqueue_struct
*periodic_controlvm_workqueue
;
76 static DEFINE_SEMAPHORE(notifier_lock
);
78 static struct controlvm_message_header g_diag_msg_hdr
;
79 static struct controlvm_message_header g_chipset_msg_hdr
;
80 static struct controlvm_message_header g_del_dump_msg_hdr
;
81 static const uuid_le spar_diag_pool_channel_protocol_uuid
=
82 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID
;
83 /* 0xffffff is an invalid Bus/Device number */
84 static u32 g_diagpool_bus_no
= 0xffffff;
85 static u32 g_diagpool_dev_no
= 0xffffff;
86 static struct controlvm_message_packet g_devicechangestate_packet
;
88 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
91 #define FOR_VISORHACKBUS(channel_type_guid) \
92 (((uuid_le_cmp(channel_type_guid,\
93 spar_vnic_channel_protocol_uuid) == 0) ||\
94 (uuid_le_cmp(channel_type_guid,\
95 spar_vhba_channel_protocol_uuid) == 0)))
96 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
98 #define is_diagpool_channel(channel_type_guid) \
99 (uuid_le_cmp(channel_type_guid,\
100 spar_diag_pool_channel_protocol_uuid) == 0)
102 static LIST_HEAD(bus_info_list
);
103 static LIST_HEAD(dev_info_list
);
105 static struct visorchannel
*controlvm_channel
;
107 /* Manages the request payload in the controlvm channel */
108 struct visor_controlvm_payload_info
{
109 u8 __iomem
*ptr
; /* pointer to base address of payload pool */
110 u64 offset
; /* offset from beginning of controlvm
111 * channel to beginning of payload * pool */
112 u32 bytes
; /* number of bytes in payload pool */
115 static struct visor_controlvm_payload_info controlvm_payload_info
;
117 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
118 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
120 struct visor_livedump_info
{
121 struct controlvm_message_header dumpcapture_header
;
122 struct controlvm_message_header gettextdump_header
;
123 struct controlvm_message_header dumpcomplete_header
;
124 bool gettextdump_outstanding
;
126 unsigned long length
;
127 atomic_t buffers_in_use
;
128 unsigned long destination
;
131 static struct visor_livedump_info livedump_info
;
133 /* The following globals are used to handle the scenario where we are unable to
134 * offload the payload from a controlvm message due to memory requirements. In
135 * this scenario, we simply stash the controlvm message, then attempt to
136 * process it again the next time controlvm_periodic_work() runs.
138 static struct controlvm_message controlvm_pending_msg
;
139 static bool controlvm_pending_msg_valid
= false;
141 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
142 * TRANSMIT_FILE PutFile payloads.
144 static struct kmem_cache
*putfile_buffer_list_pool
;
145 static const char putfile_buffer_list_pool_name
[] =
146 "controlvm_putfile_buffer_list_pool";
148 /* This identifies a data buffer that has been received via a controlvm messages
149 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
151 struct putfile_buffer_entry
{
152 struct list_head next
; /* putfile_buffer_entry list */
153 struct parser_context
*parser_ctx
; /* points to input data buffer */
156 /* List of struct putfile_request *, via next_putfile_request member.
157 * Each entry in this list identifies an outstanding TRANSMIT_FILE
160 static LIST_HEAD(putfile_request_list
);
162 /* This describes a buffer and its current state of transfer (e.g., how many
163 * bytes have already been supplied as putfile data, and how many bytes are
164 * remaining) for a putfile_request.
166 struct putfile_active_buffer
{
167 /* a payload from a controlvm message, containing a file data buffer */
168 struct parser_context
*parser_ctx
;
169 /* points within data area of parser_ctx to next byte of data */
171 /* # bytes left from <pnext> to the end of this data buffer */
172 size_t bytes_remaining
;
175 #define PUTFILE_REQUEST_SIG 0x0906101302281211
176 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
177 * conversation. Structs of this type are dynamically linked into
178 * <Putfile_request_list>.
180 struct putfile_request
{
181 u64 sig
; /* PUTFILE_REQUEST_SIG */
183 /* header from original TransmitFile request */
184 struct controlvm_message_header controlvm_header
;
185 u64 file_request_number
; /* from original TransmitFile request */
187 /* link to next struct putfile_request */
188 struct list_head next_putfile_request
;
190 /* most-recent sequence number supplied via a controlvm message */
191 u64 data_sequence_number
;
193 /* head of putfile_buffer_entry list, which describes the data to be
194 * supplied as putfile data;
195 * - this list is added to when controlvm messages come in that supply
197 * - this list is removed from via the hotplug program that is actually
198 * consuming these buffers to write as file data */
199 struct list_head input_buffer_list
;
200 spinlock_t req_list_lock
; /* lock for input_buffer_list */
202 /* waiters for input_buffer_list to go non-empty */
203 wait_queue_head_t input_buffer_wq
;
205 /* data not yet read within current putfile_buffer_entry */
206 struct putfile_active_buffer active_buf
;
208 /* <0 = failed, 0 = in-progress, >0 = successful; */
209 /* note that this must be set with req_list_lock, and if you set <0, */
210 /* it is your responsibility to also free up all of the other objects */
211 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
212 /* before releasing the lock */
213 int completion_status
;
216 static atomic_t visorchipset_cache_buffers_in_use
= ATOMIC_INIT(0);
218 struct parahotplug_request
{
219 struct list_head list
;
221 unsigned long expiration
;
222 struct controlvm_message msg
;
225 static LIST_HEAD(parahotplug_request_list
);
226 static DEFINE_SPINLOCK(parahotplug_request_list_lock
); /* lock for above */
227 static void parahotplug_process_list(void);
229 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
230 * CONTROLVM_REPORTEVENT.
232 static struct visorchipset_busdev_notifiers busdev_server_notifiers
;
233 static struct visorchipset_busdev_notifiers busdev_client_notifiers
;
235 static void bus_create_response(u32 bus_no
, int response
);
236 static void bus_destroy_response(u32 bus_no
, int response
);
237 static void device_create_response(u32 bus_no
, u32 dev_no
, int response
);
238 static void device_destroy_response(u32 bus_no
, u32 dev_no
, int response
);
239 static void device_resume_response(u32 bus_no
, u32 dev_no
, int response
);
241 static struct visorchipset_busdev_responders busdev_responders
= {
242 .bus_create
= bus_create_response
,
243 .bus_destroy
= bus_destroy_response
,
244 .device_create
= device_create_response
,
245 .device_destroy
= device_destroy_response
,
246 .device_pause
= visorchipset_device_pause_response
,
247 .device_resume
= device_resume_response
,
250 /* info for /dev/visorchipset */
251 static dev_t major_dev
= -1; /**< indicates major num for device */
253 /* prototypes for attributes */
254 static ssize_t
toolaction_show(struct device
*dev
,
255 struct device_attribute
*attr
, char *buf
);
256 static ssize_t
toolaction_store(struct device
*dev
,
257 struct device_attribute
*attr
,
258 const char *buf
, size_t count
);
259 static DEVICE_ATTR_RW(toolaction
);
261 static ssize_t
boottotool_show(struct device
*dev
,
262 struct device_attribute
*attr
, char *buf
);
263 static ssize_t
boottotool_store(struct device
*dev
,
264 struct device_attribute
*attr
, const char *buf
,
266 static DEVICE_ATTR_RW(boottotool
);
268 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
270 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
271 const char *buf
, size_t count
);
272 static DEVICE_ATTR_RW(error
);
274 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
276 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
277 const char *buf
, size_t count
);
278 static DEVICE_ATTR_RW(textid
);
280 static ssize_t
remaining_steps_show(struct device
*dev
,
281 struct device_attribute
*attr
, char *buf
);
282 static ssize_t
remaining_steps_store(struct device
*dev
,
283 struct device_attribute
*attr
,
284 const char *buf
, size_t count
);
285 static DEVICE_ATTR_RW(remaining_steps
);
287 static ssize_t
chipsetready_store(struct device
*dev
,
288 struct device_attribute
*attr
,
289 const char *buf
, size_t count
);
290 static DEVICE_ATTR_WO(chipsetready
);
292 static ssize_t
devicedisabled_store(struct device
*dev
,
293 struct device_attribute
*attr
,
294 const char *buf
, size_t count
);
295 static DEVICE_ATTR_WO(devicedisabled
);
297 static ssize_t
deviceenabled_store(struct device
*dev
,
298 struct device_attribute
*attr
,
299 const char *buf
, size_t count
);
300 static DEVICE_ATTR_WO(deviceenabled
);
302 static struct attribute
*visorchipset_install_attrs
[] = {
303 &dev_attr_toolaction
.attr
,
304 &dev_attr_boottotool
.attr
,
305 &dev_attr_error
.attr
,
306 &dev_attr_textid
.attr
,
307 &dev_attr_remaining_steps
.attr
,
311 static struct attribute_group visorchipset_install_group
= {
313 .attrs
= visorchipset_install_attrs
316 static struct attribute
*visorchipset_guest_attrs
[] = {
317 &dev_attr_chipsetready
.attr
,
321 static struct attribute_group visorchipset_guest_group
= {
323 .attrs
= visorchipset_guest_attrs
326 static struct attribute
*visorchipset_parahotplug_attrs
[] = {
327 &dev_attr_devicedisabled
.attr
,
328 &dev_attr_deviceenabled
.attr
,
332 static struct attribute_group visorchipset_parahotplug_group
= {
333 .name
= "parahotplug",
334 .attrs
= visorchipset_parahotplug_attrs
337 static const struct attribute_group
*visorchipset_dev_groups
[] = {
338 &visorchipset_install_group
,
339 &visorchipset_guest_group
,
340 &visorchipset_parahotplug_group
,
344 /* /sys/devices/platform/visorchipset */
345 static struct platform_device visorchipset_platform_device
= {
346 .name
= "visorchipset",
348 .dev
.groups
= visorchipset_dev_groups
,
351 /* Function prototypes */
352 static void controlvm_respond(struct controlvm_message_header
*msg_hdr
,
354 static void controlvm_respond_chipset_init(
355 struct controlvm_message_header
*msg_hdr
, int response
,
356 enum ultra_chipset_feature features
);
357 static void controlvm_respond_physdev_changestate(
358 struct controlvm_message_header
*msg_hdr
, int response
,
359 struct spar_segment_state state
);
361 static ssize_t
toolaction_show(struct device
*dev
,
362 struct device_attribute
*attr
,
367 visorchannel_read(controlvm_channel
,
368 offsetof(struct spar_controlvm_channel_protocol
,
369 tool_action
), &tool_action
, sizeof(u8
));
370 return scnprintf(buf
, PAGE_SIZE
, "%u\n", tool_action
);
373 static ssize_t
toolaction_store(struct device
*dev
,
374 struct device_attribute
*attr
,
375 const char *buf
, size_t count
)
380 if (kstrtou8(buf
, 10, &tool_action
) != 0)
383 ret
= visorchannel_write(controlvm_channel
,
384 offsetof(struct spar_controlvm_channel_protocol
,
386 &tool_action
, sizeof(u8
));
393 static ssize_t
boottotool_show(struct device
*dev
,
394 struct device_attribute
*attr
,
397 struct efi_spar_indication efi_spar_indication
;
399 visorchannel_read(controlvm_channel
,
400 offsetof(struct spar_controlvm_channel_protocol
,
401 efi_spar_ind
), &efi_spar_indication
,
402 sizeof(struct efi_spar_indication
));
403 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
404 efi_spar_indication
.boot_to_tool
);
407 static ssize_t
boottotool_store(struct device
*dev
,
408 struct device_attribute
*attr
,
409 const char *buf
, size_t count
)
412 struct efi_spar_indication efi_spar_indication
;
414 if (kstrtoint(buf
, 10, &val
) != 0)
417 efi_spar_indication
.boot_to_tool
= val
;
418 ret
= visorchannel_write(controlvm_channel
,
419 offsetof(struct spar_controlvm_channel_protocol
,
420 efi_spar_ind
), &(efi_spar_indication
),
421 sizeof(struct efi_spar_indication
));
428 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
433 visorchannel_read(controlvm_channel
,
434 offsetof(struct spar_controlvm_channel_protocol
,
436 &error
, sizeof(u32
));
437 return scnprintf(buf
, PAGE_SIZE
, "%i\n", error
);
440 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
441 const char *buf
, size_t count
)
446 if (kstrtou32(buf
, 10, &error
) != 0)
449 ret
= visorchannel_write(controlvm_channel
,
450 offsetof(struct spar_controlvm_channel_protocol
,
452 &error
, sizeof(u32
));
458 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
463 visorchannel_read(controlvm_channel
,
464 offsetof(struct spar_controlvm_channel_protocol
,
465 installation_text_id
),
466 &text_id
, sizeof(u32
));
467 return scnprintf(buf
, PAGE_SIZE
, "%i\n", text_id
);
470 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
471 const char *buf
, size_t count
)
476 if (kstrtou32(buf
, 10, &text_id
) != 0)
479 ret
= visorchannel_write(controlvm_channel
,
480 offsetof(struct spar_controlvm_channel_protocol
,
481 installation_text_id
),
482 &text_id
, sizeof(u32
));
488 static ssize_t
remaining_steps_show(struct device
*dev
,
489 struct device_attribute
*attr
, char *buf
)
493 visorchannel_read(controlvm_channel
,
494 offsetof(struct spar_controlvm_channel_protocol
,
495 installation_remaining_steps
),
496 &remaining_steps
, sizeof(u16
));
497 return scnprintf(buf
, PAGE_SIZE
, "%hu\n", remaining_steps
);
500 static ssize_t
remaining_steps_store(struct device
*dev
,
501 struct device_attribute
*attr
,
502 const char *buf
, size_t count
)
507 if (kstrtou16(buf
, 10, &remaining_steps
) != 0)
510 ret
= visorchannel_write(controlvm_channel
,
511 offsetof(struct spar_controlvm_channel_protocol
,
512 installation_remaining_steps
),
513 &remaining_steps
, sizeof(u16
));
520 bus_info_clear(void *v
)
522 struct visorchipset_bus_info
*p
= (struct visorchipset_bus_info
*) (v
);
527 kfree(p
->description
);
528 p
->description
= NULL
;
530 p
->state
.created
= 0;
531 memset(p
, 0, sizeof(struct visorchipset_bus_info
));
535 dev_info_clear(void *v
)
537 struct visorchipset_device_info
*p
=
538 (struct visorchipset_device_info
*)(v
);
540 p
->state
.created
= 0;
541 memset(p
, 0, sizeof(struct visorchipset_device_info
));
545 check_chipset_events(void)
549 /* Check events to determine if response should be sent */
550 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
551 send_msg
&= chipset_events
[i
];
556 clear_chipset_events(void)
559 /* Clear chipset_events */
560 for (i
= 0; i
< MAX_CHIPSET_EVENTS
; i
++)
561 chipset_events
[i
] = 0;
565 visorchipset_register_busdev_server(
566 struct visorchipset_busdev_notifiers
*notifiers
,
567 struct visorchipset_busdev_responders
*responders
,
568 struct ultra_vbus_deviceinfo
*driver_info
)
570 down(¬ifier_lock
);
572 memset(&busdev_server_notifiers
, 0,
573 sizeof(busdev_server_notifiers
));
574 serverregistered
= 0; /* clear flag */
576 busdev_server_notifiers
= *notifiers
;
577 serverregistered
= 1; /* set flag */
580 *responders
= busdev_responders
;
582 bus_device_info_init(driver_info
, "chipset", "visorchipset",
587 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server
);
590 visorchipset_register_busdev_client(
591 struct visorchipset_busdev_notifiers
*notifiers
,
592 struct visorchipset_busdev_responders
*responders
,
593 struct ultra_vbus_deviceinfo
*driver_info
)
595 down(¬ifier_lock
);
597 memset(&busdev_client_notifiers
, 0,
598 sizeof(busdev_client_notifiers
));
599 clientregistered
= 0; /* clear flag */
601 busdev_client_notifiers
= *notifiers
;
602 clientregistered
= 1; /* set flag */
605 *responders
= busdev_responders
;
607 bus_device_info_init(driver_info
, "chipset(bolts)",
608 "visorchipset", VERSION
, NULL
);
611 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client
);
614 cleanup_controlvm_structures(void)
616 struct visorchipset_bus_info
*bi
, *tmp_bi
;
617 struct visorchipset_device_info
*di
, *tmp_di
;
619 list_for_each_entry_safe(bi
, tmp_bi
, &bus_info_list
, entry
) {
621 list_del(&bi
->entry
);
625 list_for_each_entry_safe(di
, tmp_di
, &dev_info_list
, entry
) {
627 list_del(&di
->entry
);
633 chipset_init(struct controlvm_message
*inmsg
)
635 static int chipset_inited
;
636 enum ultra_chipset_feature features
= 0;
637 int rc
= CONTROLVM_RESP_SUCCESS
;
639 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
640 if (chipset_inited
) {
641 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
645 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
647 /* Set features to indicate we support parahotplug (if Command
648 * also supports it). */
650 inmsg
->cmd
.init_chipset
.
651 features
& ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG
;
653 /* Set the "reply" bit so Command knows this is a
654 * features-aware driver. */
655 features
|= ULTRA_CHIPSET_FEATURE_REPLY
;
659 cleanup_controlvm_structures();
660 if (inmsg
->hdr
.flags
.response_expected
)
661 controlvm_respond_chipset_init(&inmsg
->hdr
, rc
, features
);
665 controlvm_init_response(struct controlvm_message
*msg
,
666 struct controlvm_message_header
*msg_hdr
, int response
)
668 memset(msg
, 0, sizeof(struct controlvm_message
));
669 memcpy(&msg
->hdr
, msg_hdr
, sizeof(struct controlvm_message_header
));
670 msg
->hdr
.payload_bytes
= 0;
671 msg
->hdr
.payload_vm_offset
= 0;
672 msg
->hdr
.payload_max_bytes
= 0;
674 msg
->hdr
.flags
.failed
= 1;
675 msg
->hdr
.completion_status
= (u32
) (-response
);
680 controlvm_respond(struct controlvm_message_header
*msg_hdr
, int response
)
682 struct controlvm_message outmsg
;
684 controlvm_init_response(&outmsg
, msg_hdr
, response
);
685 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
686 * back the deviceChangeState structure in the packet. */
687 if (msg_hdr
->id
== CONTROLVM_DEVICE_CHANGESTATE
&&
688 g_devicechangestate_packet
.device_change_state
.bus_no
==
690 g_devicechangestate_packet
.device_change_state
.dev_no
==
692 outmsg
.cmd
= g_devicechangestate_packet
;
693 if (outmsg
.hdr
.flags
.test_message
== 1)
696 if (!visorchannel_signalinsert(controlvm_channel
,
697 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
703 controlvm_respond_chipset_init(struct controlvm_message_header
*msg_hdr
,
705 enum ultra_chipset_feature features
)
707 struct controlvm_message outmsg
;
709 controlvm_init_response(&outmsg
, msg_hdr
, response
);
710 outmsg
.cmd
.init_chipset
.features
= features
;
711 if (!visorchannel_signalinsert(controlvm_channel
,
712 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
717 static void controlvm_respond_physdev_changestate(
718 struct controlvm_message_header
*msg_hdr
, int response
,
719 struct spar_segment_state state
)
721 struct controlvm_message outmsg
;
723 controlvm_init_response(&outmsg
, msg_hdr
, response
);
724 outmsg
.cmd
.device_change_state
.state
= state
;
725 outmsg
.cmd
.device_change_state
.flags
.phys_device
= 1;
726 if (!visorchannel_signalinsert(controlvm_channel
,
727 CONTROLVM_QUEUE_REQUEST
, &outmsg
)) {
733 visorchipset_save_message(struct controlvm_message
*msg
,
734 enum crash_obj_type type
)
736 u32 crash_msg_offset
;
739 /* get saved message count */
740 if (visorchannel_read(controlvm_channel
,
741 offsetof(struct spar_controlvm_channel_protocol
,
742 saved_crash_message_count
),
743 &crash_msg_count
, sizeof(u16
)) < 0) {
744 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
745 POSTCODE_SEVERITY_ERR
);
749 if (crash_msg_count
!= CONTROLVM_CRASHMSG_MAX
) {
750 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC
,
752 POSTCODE_SEVERITY_ERR
);
756 /* get saved crash message offset */
757 if (visorchannel_read(controlvm_channel
,
758 offsetof(struct spar_controlvm_channel_protocol
,
759 saved_crash_message_offset
),
760 &crash_msg_offset
, sizeof(u32
)) < 0) {
761 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
762 POSTCODE_SEVERITY_ERR
);
766 if (type
== CRASH_BUS
) {
767 if (visorchannel_write(controlvm_channel
,
770 sizeof(struct controlvm_message
)) < 0) {
771 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC
,
772 POSTCODE_SEVERITY_ERR
);
776 if (visorchannel_write(controlvm_channel
,
778 sizeof(struct controlvm_message
), msg
,
779 sizeof(struct controlvm_message
)) < 0) {
780 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC
,
781 POSTCODE_SEVERITY_ERR
);
786 EXPORT_SYMBOL_GPL(visorchipset_save_message
);
789 bus_responder(enum controlvm_id cmd_id
, u32 bus_no
, int response
)
791 struct visorchipset_bus_info
*p
= NULL
;
792 bool need_clear
= false;
794 p
= findbus(&bus_info_list
, bus_no
);
799 if ((cmd_id
== CONTROLVM_BUS_CREATE
) &&
800 (response
!= (-CONTROLVM_RESP_ERROR_ALREADY_DONE
)))
801 /* undo the row we just created... */
802 delbusdevices(&dev_info_list
, bus_no
);
804 if (cmd_id
== CONTROLVM_BUS_CREATE
)
805 p
->state
.created
= 1;
806 if (cmd_id
== CONTROLVM_BUS_DESTROY
)
810 if (p
->pending_msg_hdr
.id
== CONTROLVM_INVALID
)
811 return; /* no controlvm response needed */
812 if (p
->pending_msg_hdr
.id
!= (u32
)cmd_id
)
814 controlvm_respond(&p
->pending_msg_hdr
, response
);
815 p
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
818 delbusdevices(&dev_info_list
, bus_no
);
823 device_changestate_responder(enum controlvm_id cmd_id
,
824 u32 bus_no
, u32 dev_no
, int response
,
825 struct spar_segment_state response_state
)
827 struct visorchipset_device_info
*p
= NULL
;
828 struct controlvm_message outmsg
;
830 p
= finddevice(&dev_info_list
, bus_no
, dev_no
);
833 if (p
->pending_msg_hdr
.id
== CONTROLVM_INVALID
)
834 return; /* no controlvm response needed */
835 if (p
->pending_msg_hdr
.id
!= cmd_id
)
838 controlvm_init_response(&outmsg
, &p
->pending_msg_hdr
, response
);
840 outmsg
.cmd
.device_change_state
.bus_no
= bus_no
;
841 outmsg
.cmd
.device_change_state
.dev_no
= dev_no
;
842 outmsg
.cmd
.device_change_state
.state
= response_state
;
844 if (!visorchannel_signalinsert(controlvm_channel
,
845 CONTROLVM_QUEUE_REQUEST
, &outmsg
))
848 p
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
852 device_responder(enum controlvm_id cmd_id
, u32 bus_no
, u32 dev_no
, int response
)
854 struct visorchipset_device_info
*p
= NULL
;
855 bool need_clear
= false;
857 p
= finddevice(&dev_info_list
, bus_no
, dev_no
);
861 if (cmd_id
== CONTROLVM_DEVICE_CREATE
)
862 p
->state
.created
= 1;
863 if (cmd_id
== CONTROLVM_DEVICE_DESTROY
)
867 if (p
->pending_msg_hdr
.id
== CONTROLVM_INVALID
)
868 return; /* no controlvm response needed */
870 if (p
->pending_msg_hdr
.id
!= (u32
)cmd_id
)
873 controlvm_respond(&p
->pending_msg_hdr
, response
);
874 p
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
880 bus_epilog(u32 bus_no
,
881 u32 cmd
, struct controlvm_message_header
*msg_hdr
,
882 int response
, bool need_response
)
884 bool notified
= false;
886 struct visorchipset_bus_info
*bus_info
= findbus(&bus_info_list
,
893 memcpy(&bus_info
->pending_msg_hdr
, msg_hdr
,
894 sizeof(struct controlvm_message_header
));
896 bus_info
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
899 down(¬ifier_lock
);
900 if (response
== CONTROLVM_RESP_SUCCESS
) {
902 case CONTROLVM_BUS_CREATE
:
903 /* We can't tell from the bus_create
904 * information which of our 2 bus flavors the
905 * devices on this bus will ultimately end up.
906 * FORTUNATELY, it turns out it is harmless to
907 * send the bus_create to both of them. We can
908 * narrow things down a little bit, though,
909 * because we know: - BusDev_Server can handle
910 * either server or client devices
911 * - BusDev_Client can handle ONLY client
913 if (busdev_server_notifiers
.bus_create
) {
914 (*busdev_server_notifiers
.bus_create
) (bus_no
);
917 if ((!bus_info
->flags
.server
) /*client */ &&
918 busdev_client_notifiers
.bus_create
) {
919 (*busdev_client_notifiers
.bus_create
) (bus_no
);
923 case CONTROLVM_BUS_DESTROY
:
924 if (busdev_server_notifiers
.bus_destroy
) {
925 (*busdev_server_notifiers
.bus_destroy
) (bus_no
);
928 if ((!bus_info
->flags
.server
) /*client */ &&
929 busdev_client_notifiers
.bus_destroy
) {
930 (*busdev_client_notifiers
.bus_destroy
) (bus_no
);
937 /* The callback function just called above is responsible
938 * for calling the appropriate visorchipset_busdev_responders
939 * function, which will call bus_responder()
943 bus_responder(cmd
, bus_no
, response
);
948 device_epilog(u32 bus_no
, u32 dev_no
, struct spar_segment_state state
, u32 cmd
,
949 struct controlvm_message_header
*msg_hdr
, int response
,
950 bool need_response
, bool for_visorbus
)
952 struct visorchipset_busdev_notifiers
*notifiers
= NULL
;
953 bool notified
= false;
955 struct visorchipset_device_info
*dev_info
=
956 finddevice(&dev_info_list
, bus_no
, dev_no
);
958 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
966 notifiers
= &busdev_server_notifiers
;
968 notifiers
= &busdev_client_notifiers
;
970 memcpy(&dev_info
->pending_msg_hdr
, msg_hdr
,
971 sizeof(struct controlvm_message_header
));
973 dev_info
->pending_msg_hdr
.id
= CONTROLVM_INVALID
;
976 down(¬ifier_lock
);
979 case CONTROLVM_DEVICE_CREATE
:
980 if (notifiers
->device_create
) {
981 (*notifiers
->device_create
) (bus_no
, dev_no
);
985 case CONTROLVM_DEVICE_CHANGESTATE
:
986 /* ServerReady / ServerRunning / SegmentStateRunning */
987 if (state
.alive
== segment_state_running
.alive
&&
989 segment_state_running
.operating
) {
990 if (notifiers
->device_resume
) {
991 (*notifiers
->device_resume
) (bus_no
,
996 /* ServerNotReady / ServerLost / SegmentStateStandby */
997 else if (state
.alive
== segment_state_standby
.alive
&&
999 segment_state_standby
.operating
) {
1000 /* technically this is standby case
1001 * where server is lost
1003 if (notifiers
->device_pause
) {
1004 (*notifiers
->device_pause
) (bus_no
,
1008 } else if (state
.alive
== segment_state_paused
.alive
&&
1010 segment_state_paused
.operating
) {
1011 /* this is lite pause where channel is
1012 * still valid just 'pause' of it
1014 if (bus_no
== g_diagpool_bus_no
&&
1015 dev_no
== g_diagpool_dev_no
) {
1016 /* this will trigger the
1017 * diag_shutdown.sh script in
1018 * the visorchipset hotplug */
1020 (&visorchipset_platform_device
.dev
.
1021 kobj
, KOBJ_ONLINE
, envp
);
1025 case CONTROLVM_DEVICE_DESTROY
:
1026 if (notifiers
->device_destroy
) {
1027 (*notifiers
->device_destroy
) (bus_no
, dev_no
);
1034 /* The callback function just called above is responsible
1035 * for calling the appropriate visorchipset_busdev_responders
1036 * function, which will call device_responder()
1040 device_responder(cmd
, bus_no
, dev_no
, response
);
1045 bus_create(struct controlvm_message
*inmsg
)
1047 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1048 u32 bus_no
= cmd
->create_bus
.bus_no
;
1049 int rc
= CONTROLVM_RESP_SUCCESS
;
1050 struct visorchipset_bus_info
*bus_info
= NULL
;
1052 bus_info
= findbus(&bus_info_list
, bus_no
);
1053 if (bus_info
&& (bus_info
->state
.created
== 1)) {
1054 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1055 POSTCODE_SEVERITY_ERR
);
1056 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1059 bus_info
= kzalloc(sizeof(*bus_info
), GFP_KERNEL
);
1061 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC
, bus_no
,
1062 POSTCODE_SEVERITY_ERR
);
1063 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1067 INIT_LIST_HEAD(&bus_info
->entry
);
1068 bus_info
->bus_no
= bus_no
;
1069 bus_info
->dev_no
= cmd
->create_bus
.dev_count
;
1071 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1073 if (inmsg
->hdr
.flags
.test_message
== 1)
1074 bus_info
->chan_info
.addr_type
= ADDRTYPE_LOCALTEST
;
1076 bus_info
->chan_info
.addr_type
= ADDRTYPE_LOCALPHYSICAL
;
1078 bus_info
->flags
.server
= inmsg
->hdr
.flags
.server
;
1079 bus_info
->chan_info
.channel_addr
= cmd
->create_bus
.channel_addr
;
1080 bus_info
->chan_info
.n_channel_bytes
= cmd
->create_bus
.channel_bytes
;
1081 bus_info
->chan_info
.channel_type_uuid
=
1082 cmd
->create_bus
.bus_data_type_uuid
;
1083 bus_info
->chan_info
.channel_inst_uuid
= cmd
->create_bus
.bus_inst_uuid
;
1085 list_add(&bus_info
->entry
, &bus_info_list
);
1087 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC
, bus_no
, POSTCODE_SEVERITY_INFO
);
1090 bus_epilog(bus_no
, CONTROLVM_BUS_CREATE
, &inmsg
->hdr
,
1091 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1095 bus_destroy(struct controlvm_message
*inmsg
)
1097 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1098 u32 bus_no
= cmd
->destroy_bus
.bus_no
;
1099 struct visorchipset_bus_info
*bus_info
;
1100 int rc
= CONTROLVM_RESP_SUCCESS
;
1102 bus_info
= findbus(&bus_info_list
, bus_no
);
1104 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1105 else if (bus_info
->state
.created
== 0)
1106 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1108 bus_epilog(bus_no
, CONTROLVM_BUS_DESTROY
, &inmsg
->hdr
,
1109 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1113 bus_configure(struct controlvm_message
*inmsg
,
1114 struct parser_context
*parser_ctx
)
1116 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1117 u32 bus_no
= cmd
->configure_bus
.bus_no
;
1118 struct visorchipset_bus_info
*bus_info
= NULL
;
1119 int rc
= CONTROLVM_RESP_SUCCESS
;
1122 bus_no
= cmd
->configure_bus
.bus_no
;
1123 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC
, bus_no
,
1124 POSTCODE_SEVERITY_INFO
);
1126 bus_info
= findbus(&bus_info_list
, bus_no
);
1128 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1129 POSTCODE_SEVERITY_ERR
);
1130 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1131 } else if (bus_info
->state
.created
== 0) {
1132 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1133 POSTCODE_SEVERITY_ERR
);
1134 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1135 } else if (bus_info
->pending_msg_hdr
.id
!= CONTROLVM_INVALID
) {
1136 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC
, bus_no
,
1137 POSTCODE_SEVERITY_ERR
);
1138 rc
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1140 bus_info
->partition_handle
= cmd
->configure_bus
.guest_handle
;
1141 bus_info
->partition_uuid
= parser_id_get(parser_ctx
);
1142 parser_param_start(parser_ctx
, PARSERSTRING_NAME
);
1143 bus_info
->name
= parser_string_get(parser_ctx
);
1145 visorchannel_uuid_id(&bus_info
->partition_uuid
, s
);
1146 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC
, bus_no
,
1147 POSTCODE_SEVERITY_INFO
);
1149 bus_epilog(bus_no
, CONTROLVM_BUS_CONFIGURE
, &inmsg
->hdr
,
1150 rc
, inmsg
->hdr
.flags
.response_expected
== 1);
1154 my_device_create(struct controlvm_message
*inmsg
)
1156 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1157 u32 bus_no
= cmd
->create_device
.bus_no
;
1158 u32 dev_no
= cmd
->create_device
.dev_no
;
1159 struct visorchipset_device_info
*dev_info
= NULL
;
1160 struct visorchipset_bus_info
*bus_info
= NULL
;
1161 int rc
= CONTROLVM_RESP_SUCCESS
;
1163 dev_info
= finddevice(&dev_info_list
, bus_no
, dev_no
);
1164 if (dev_info
&& (dev_info
->state
.created
== 1)) {
1165 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1166 POSTCODE_SEVERITY_ERR
);
1167 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1170 bus_info
= findbus(&bus_info_list
, bus_no
);
1172 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1173 POSTCODE_SEVERITY_ERR
);
1174 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1177 if (bus_info
->state
.created
== 0) {
1178 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1179 POSTCODE_SEVERITY_ERR
);
1180 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
1183 dev_info
= kzalloc(sizeof(*dev_info
), GFP_KERNEL
);
1185 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
1186 POSTCODE_SEVERITY_ERR
);
1187 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1191 INIT_LIST_HEAD(&dev_info
->entry
);
1192 dev_info
->bus_no
= bus_no
;
1193 dev_info
->dev_no
= dev_no
;
1194 dev_info
->dev_inst_uuid
= cmd
->create_device
.dev_inst_uuid
;
1195 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC
, dev_no
, bus_no
,
1196 POSTCODE_SEVERITY_INFO
);
1198 if (inmsg
->hdr
.flags
.test_message
== 1)
1199 dev_info
->chan_info
.addr_type
= ADDRTYPE_LOCALTEST
;
1201 dev_info
->chan_info
.addr_type
= ADDRTYPE_LOCALPHYSICAL
;
1202 dev_info
->chan_info
.channel_addr
= cmd
->create_device
.channel_addr
;
1203 dev_info
->chan_info
.n_channel_bytes
= cmd
->create_device
.channel_bytes
;
1204 dev_info
->chan_info
.channel_type_uuid
=
1205 cmd
->create_device
.data_type_uuid
;
1206 dev_info
->chan_info
.intr
= cmd
->create_device
.intr
;
1207 list_add(&dev_info
->entry
, &dev_info_list
);
1208 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC
, dev_no
, bus_no
,
1209 POSTCODE_SEVERITY_INFO
);
1211 /* get the bus and devNo for DiagPool channel */
1213 is_diagpool_channel(dev_info
->chan_info
.channel_type_uuid
)) {
1214 g_diagpool_bus_no
= bus_no
;
1215 g_diagpool_dev_no
= dev_no
;
1217 device_epilog(bus_no
, dev_no
, segment_state_running
,
1218 CONTROLVM_DEVICE_CREATE
, &inmsg
->hdr
, rc
,
1219 inmsg
->hdr
.flags
.response_expected
== 1,
1220 FOR_VISORBUS(dev_info
->chan_info
.channel_type_uuid
));
1224 my_device_changestate(struct controlvm_message
*inmsg
)
1226 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1227 u32 bus_no
= cmd
->device_change_state
.bus_no
;
1228 u32 dev_no
= cmd
->device_change_state
.dev_no
;
1229 struct spar_segment_state state
= cmd
->device_change_state
.state
;
1230 struct visorchipset_device_info
*dev_info
= NULL
;
1231 int rc
= CONTROLVM_RESP_SUCCESS
;
1233 dev_info
= finddevice(&dev_info_list
, bus_no
, dev_no
);
1235 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1236 POSTCODE_SEVERITY_ERR
);
1237 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1238 } else if (dev_info
->state
.created
== 0) {
1239 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
1240 POSTCODE_SEVERITY_ERR
);
1241 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1243 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1244 device_epilog(bus_no
, dev_no
, state
,
1245 CONTROLVM_DEVICE_CHANGESTATE
, &inmsg
->hdr
, rc
,
1246 inmsg
->hdr
.flags
.response_expected
== 1,
1248 dev_info
->chan_info
.channel_type_uuid
));
1252 my_device_destroy(struct controlvm_message
*inmsg
)
1254 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1255 u32 bus_no
= cmd
->destroy_device
.bus_no
;
1256 u32 dev_no
= cmd
->destroy_device
.dev_no
;
1257 struct visorchipset_device_info
*dev_info
= NULL
;
1258 int rc
= CONTROLVM_RESP_SUCCESS
;
1260 dev_info
= finddevice(&dev_info_list
, bus_no
, dev_no
);
1262 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1263 else if (dev_info
->state
.created
== 0)
1264 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1266 if ((rc
>= CONTROLVM_RESP_SUCCESS
) && dev_info
)
1267 device_epilog(bus_no
, dev_no
, segment_state_running
,
1268 CONTROLVM_DEVICE_DESTROY
, &inmsg
->hdr
, rc
,
1269 inmsg
->hdr
.flags
.response_expected
== 1,
1271 dev_info
->chan_info
.channel_type_uuid
));
1274 /* When provided with the physical address of the controlvm channel
1275 * (phys_addr), the offset to the payload area we need to manage
1276 * (offset), and the size of this payload area (bytes), fills in the
1277 * controlvm_payload_info struct. Returns true for success or false
1281 initialize_controlvm_payload_info(HOSTADDRESS phys_addr
, u64 offset
, u32 bytes
,
1282 struct visor_controlvm_payload_info
*info
)
1284 u8 __iomem
*payload
= NULL
;
1285 int rc
= CONTROLVM_RESP_SUCCESS
;
1288 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1291 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1292 if ((offset
== 0) || (bytes
== 0)) {
1293 rc
= -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1296 payload
= ioremap_cache(phys_addr
+ offset
, bytes
);
1298 rc
= -CONTROLVM_RESP_ERROR_IOREMAP_FAILED
;
1302 info
->offset
= offset
;
1303 info
->bytes
= bytes
;
1304 info
->ptr
= payload
;
1317 destroy_controlvm_payload_info(struct visor_controlvm_payload_info
*info
)
1323 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1327 initialize_controlvm_payload(void)
1329 HOSTADDRESS phys_addr
= visorchannel_get_physaddr(controlvm_channel
);
1330 u64 payload_offset
= 0;
1331 u32 payload_bytes
= 0;
1333 if (visorchannel_read(controlvm_channel
,
1334 offsetof(struct spar_controlvm_channel_protocol
,
1335 request_payload_offset
),
1336 &payload_offset
, sizeof(payload_offset
)) < 0) {
1337 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1338 POSTCODE_SEVERITY_ERR
);
1341 if (visorchannel_read(controlvm_channel
,
1342 offsetof(struct spar_controlvm_channel_protocol
,
1343 request_payload_bytes
),
1344 &payload_bytes
, sizeof(payload_bytes
)) < 0) {
1345 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC
,
1346 POSTCODE_SEVERITY_ERR
);
1349 initialize_controlvm_payload_info(phys_addr
,
1350 payload_offset
, payload_bytes
,
1351 &controlvm_payload_info
);
1354 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1355 * Returns CONTROLVM_RESP_xxx code.
1358 visorchipset_chipset_ready(void)
1360 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_ONLINE
);
1361 return CONTROLVM_RESP_SUCCESS
;
1363 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready
);
1366 visorchipset_chipset_selftest(void)
1368 char env_selftest
[20];
1369 char *envp
[] = { env_selftest
, NULL
};
1371 sprintf(env_selftest
, "SPARSP_SELFTEST=%d", 1);
1372 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1374 return CONTROLVM_RESP_SUCCESS
;
1376 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest
);
1378 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1379 * Returns CONTROLVM_RESP_xxx code.
1382 visorchipset_chipset_notready(void)
1384 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_OFFLINE
);
1385 return CONTROLVM_RESP_SUCCESS
;
1387 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready
);
1390 chipset_ready(struct controlvm_message_header
*msg_hdr
)
1392 int rc
= visorchipset_chipset_ready();
1394 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1396 if (msg_hdr
->flags
.response_expected
&& !visorchipset_holdchipsetready
)
1397 controlvm_respond(msg_hdr
, rc
);
1398 if (msg_hdr
->flags
.response_expected
&& visorchipset_holdchipsetready
) {
1399 /* Send CHIPSET_READY response when all modules have been loaded
1400 * and disks mounted for the partition
1402 g_chipset_msg_hdr
= *msg_hdr
;
1407 chipset_selftest(struct controlvm_message_header
*msg_hdr
)
1409 int rc
= visorchipset_chipset_selftest();
1411 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1413 if (msg_hdr
->flags
.response_expected
)
1414 controlvm_respond(msg_hdr
, rc
);
1418 chipset_notready(struct controlvm_message_header
*msg_hdr
)
1420 int rc
= visorchipset_chipset_notready();
1422 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1424 if (msg_hdr
->flags
.response_expected
)
1425 controlvm_respond(msg_hdr
, rc
);
1428 /* This is your "one-stop" shop for grabbing the next message from the
1429 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1432 read_controlvm_event(struct controlvm_message
*msg
)
1434 if (visorchannel_signalremove(controlvm_channel
,
1435 CONTROLVM_QUEUE_EVENT
, msg
)) {
1437 if (msg
->hdr
.flags
.test_message
== 1)
1445 * The general parahotplug flow works as follows. The visorchipset
1446 * driver receives a DEVICE_CHANGESTATE message from Command
1447 * specifying a physical device to enable or disable. The CONTROLVM
1448 * message handler calls parahotplug_process_message, which then adds
1449 * the message to a global list and kicks off a udev event which
1450 * causes a user level script to enable or disable the specified
1451 * device. The udev script then writes to
1452 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1453 * to get called, at which point the appropriate CONTROLVM message is
1454 * retrieved from the list and responded to.
1457 #define PARAHOTPLUG_TIMEOUT_MS 2000
1460 * Generate unique int to match an outstanding CONTROLVM message with a
1461 * udev script /proc response
1464 parahotplug_next_id(void)
1466 static atomic_t id
= ATOMIC_INIT(0);
1468 return atomic_inc_return(&id
);
1472 * Returns the time (in jiffies) when a CONTROLVM message on the list
1473 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1475 static unsigned long
1476 parahotplug_next_expiration(void)
1478 return jiffies
+ msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS
);
1482 * Create a parahotplug_request, which is basically a wrapper for a
1483 * CONTROLVM_MESSAGE that we can stick on a list
1485 static struct parahotplug_request
*
1486 parahotplug_request_create(struct controlvm_message
*msg
)
1488 struct parahotplug_request
*req
;
1490 req
= kmalloc(sizeof(*req
), GFP_KERNEL
| __GFP_NORETRY
);
1494 req
->id
= parahotplug_next_id();
1495 req
->expiration
= parahotplug_next_expiration();
1502 * Free a parahotplug_request.
1505 parahotplug_request_destroy(struct parahotplug_request
*req
)
1511 * Cause uevent to run the user level script to do the disable/enable
1512 * specified in (the CONTROLVM message in) the specified
1513 * parahotplug_request
1516 parahotplug_request_kickoff(struct parahotplug_request
*req
)
1518 struct controlvm_message_packet
*cmd
= &req
->msg
.cmd
;
1519 char env_cmd
[40], env_id
[40], env_state
[40], env_bus
[40], env_dev
[40],
1522 env_cmd
, env_id
, env_state
, env_bus
, env_dev
, env_func
, NULL
1525 sprintf(env_cmd
, "SPAR_PARAHOTPLUG=1");
1526 sprintf(env_id
, "SPAR_PARAHOTPLUG_ID=%d", req
->id
);
1527 sprintf(env_state
, "SPAR_PARAHOTPLUG_STATE=%d",
1528 cmd
->device_change_state
.state
.active
);
1529 sprintf(env_bus
, "SPAR_PARAHOTPLUG_BUS=%d",
1530 cmd
->device_change_state
.bus_no
);
1531 sprintf(env_dev
, "SPAR_PARAHOTPLUG_DEVICE=%d",
1532 cmd
->device_change_state
.dev_no
>> 3);
1533 sprintf(env_func
, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1534 cmd
->device_change_state
.dev_no
& 0x7);
1536 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1541 * Remove any request from the list that's been on there too long and
1542 * respond with an error.
1545 parahotplug_process_list(void)
1547 struct list_head
*pos
= NULL
;
1548 struct list_head
*tmp
= NULL
;
1550 spin_lock(¶hotplug_request_list_lock
);
1552 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1553 struct parahotplug_request
*req
=
1554 list_entry(pos
, struct parahotplug_request
, list
);
1556 if (!time_after_eq(jiffies
, req
->expiration
))
1560 if (req
->msg
.hdr
.flags
.response_expected
)
1561 controlvm_respond_physdev_changestate(
1563 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT
,
1564 req
->msg
.cmd
.device_change_state
.state
);
1565 parahotplug_request_destroy(req
);
1568 spin_unlock(¶hotplug_request_list_lock
);
1572 * Called from the /proc handler, which means the user script has
1573 * finished the enable/disable. Find the matching identifier, and
1574 * respond to the CONTROLVM message with success.
1577 parahotplug_request_complete(int id
, u16 active
)
1579 struct list_head
*pos
= NULL
;
1580 struct list_head
*tmp
= NULL
;
1582 spin_lock(¶hotplug_request_list_lock
);
1584 /* Look for a request matching "id". */
1585 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1586 struct parahotplug_request
*req
=
1587 list_entry(pos
, struct parahotplug_request
, list
);
1588 if (req
->id
== id
) {
1589 /* Found a match. Remove it from the list and
1593 spin_unlock(¶hotplug_request_list_lock
);
1594 req
->msg
.cmd
.device_change_state
.state
.active
= active
;
1595 if (req
->msg
.hdr
.flags
.response_expected
)
1596 controlvm_respond_physdev_changestate(
1597 &req
->msg
.hdr
, CONTROLVM_RESP_SUCCESS
,
1598 req
->msg
.cmd
.device_change_state
.state
);
1599 parahotplug_request_destroy(req
);
1604 spin_unlock(¶hotplug_request_list_lock
);
1609 * Enables or disables a PCI device by kicking off a udev script
1612 parahotplug_process_message(struct controlvm_message
*inmsg
)
1614 struct parahotplug_request
*req
;
1616 req
= parahotplug_request_create(inmsg
);
1621 if (inmsg
->cmd
.device_change_state
.state
.active
) {
1622 /* For enable messages, just respond with success
1623 * right away. This is a bit of a hack, but there are
1624 * issues with the early enable messages we get (with
1625 * either the udev script not detecting that the device
1626 * is up, or not getting called at all). Fortunately
1627 * the messages that get lost don't matter anyway, as
1628 * devices are automatically enabled at
1631 parahotplug_request_kickoff(req
);
1632 controlvm_respond_physdev_changestate(&inmsg
->hdr
,
1633 CONTROLVM_RESP_SUCCESS
,
1634 inmsg
->cmd
.device_change_state
.state
);
1635 parahotplug_request_destroy(req
);
1637 /* For disable messages, add the request to the
1638 * request list before kicking off the udev script. It
1639 * won't get responded to until the script has
1640 * indicated it's done.
1642 spin_lock(¶hotplug_request_list_lock
);
1643 list_add_tail(&req
->list
, ¶hotplug_request_list
);
1644 spin_unlock(¶hotplug_request_list_lock
);
1646 parahotplug_request_kickoff(req
);
1650 /* Process a controlvm message.
1652 * false - this function will return FALSE only in the case where the
1653 * controlvm message was NOT processed, but processing must be
1654 * retried before reading the next controlvm message; a
1655 * scenario where this can occur is when we need to throttle
1656 * the allocation of memory in which to copy out controlvm
1658 * true - processing of the controlvm message completed,
1659 * either successfully or with an error.
1662 handle_command(struct controlvm_message inmsg
, HOSTADDRESS channel_addr
)
1664 struct controlvm_message_packet
*cmd
= &inmsg
.cmd
;
1667 struct parser_context
*parser_ctx
= NULL
;
1668 bool local_addr
= false;
1669 struct controlvm_message ackmsg
;
1671 /* create parsing context if necessary */
1672 local_addr
= (inmsg
.hdr
.flags
.test_message
== 1);
1673 if (channel_addr
== 0)
1675 parm_addr
= channel_addr
+ inmsg
.hdr
.payload_vm_offset
;
1676 parm_bytes
= inmsg
.hdr
.payload_bytes
;
1678 /* Parameter and channel addresses within test messages actually lie
1679 * within our OS-controlled memory. We need to know that, because it
1680 * makes a difference in how we compute the virtual address.
1682 if (parm_addr
!= 0 && parm_bytes
!= 0) {
1686 parser_init_byte_stream(parm_addr
, parm_bytes
,
1687 local_addr
, &retry
);
1688 if (!parser_ctx
&& retry
)
1693 controlvm_init_response(&ackmsg
, &inmsg
.hdr
,
1694 CONTROLVM_RESP_SUCCESS
);
1695 if (controlvm_channel
)
1696 visorchannel_signalinsert(controlvm_channel
,
1697 CONTROLVM_QUEUE_ACK
,
1700 switch (inmsg
.hdr
.id
) {
1701 case CONTROLVM_CHIPSET_INIT
:
1702 chipset_init(&inmsg
);
1704 case CONTROLVM_BUS_CREATE
:
1707 case CONTROLVM_BUS_DESTROY
:
1708 bus_destroy(&inmsg
);
1710 case CONTROLVM_BUS_CONFIGURE
:
1711 bus_configure(&inmsg
, parser_ctx
);
1713 case CONTROLVM_DEVICE_CREATE
:
1714 my_device_create(&inmsg
);
1716 case CONTROLVM_DEVICE_CHANGESTATE
:
1717 if (cmd
->device_change_state
.flags
.phys_device
) {
1718 parahotplug_process_message(&inmsg
);
1720 /* save the hdr and cmd structures for later use */
1721 /* when sending back the response to Command */
1722 my_device_changestate(&inmsg
);
1723 g_diag_msg_hdr
= inmsg
.hdr
;
1724 g_devicechangestate_packet
= inmsg
.cmd
;
1728 case CONTROLVM_DEVICE_DESTROY
:
1729 my_device_destroy(&inmsg
);
1731 case CONTROLVM_DEVICE_CONFIGURE
:
1732 /* no op for now, just send a respond that we passed */
1733 if (inmsg
.hdr
.flags
.response_expected
)
1734 controlvm_respond(&inmsg
.hdr
, CONTROLVM_RESP_SUCCESS
);
1736 case CONTROLVM_CHIPSET_READY
:
1737 chipset_ready(&inmsg
.hdr
);
1739 case CONTROLVM_CHIPSET_SELFTEST
:
1740 chipset_selftest(&inmsg
.hdr
);
1742 case CONTROLVM_CHIPSET_STOP
:
1743 chipset_notready(&inmsg
.hdr
);
1746 if (inmsg
.hdr
.flags
.response_expected
)
1747 controlvm_respond(&inmsg
.hdr
,
1748 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN
);
1753 parser_done(parser_ctx
);
1759 static HOSTADDRESS
controlvm_get_channel_address(void)
1764 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr
, &size
)))
1771 controlvm_periodic_work(struct work_struct
*work
)
1773 struct controlvm_message inmsg
;
1774 bool got_command
= false;
1775 bool handle_command_failed
= false;
1776 static u64 poll_count
;
1778 /* make sure visorbus server is registered for controlvm callbacks */
1779 if (visorchipset_serverregwait
&& !serverregistered
)
1781 /* make sure visorclientbus server is regsitered for controlvm
1784 if (visorchipset_clientregwait
&& !clientregistered
)
1788 if (poll_count
>= 250)
1793 /* Check events to determine if response to CHIPSET_READY
1796 if (visorchipset_holdchipsetready
&&
1797 (g_chipset_msg_hdr
.id
!= CONTROLVM_INVALID
)) {
1798 if (check_chipset_events() == 1) {
1799 controlvm_respond(&g_chipset_msg_hdr
, 0);
1800 clear_chipset_events();
1801 memset(&g_chipset_msg_hdr
, 0,
1802 sizeof(struct controlvm_message_header
));
1806 while (visorchannel_signalremove(controlvm_channel
,
1807 CONTROLVM_QUEUE_RESPONSE
,
1811 if (controlvm_pending_msg_valid
) {
1812 /* we throttled processing of a prior
1813 * msg, so try to process it again
1814 * rather than reading a new one
1816 inmsg
= controlvm_pending_msg
;
1817 controlvm_pending_msg_valid
= false;
1820 got_command
= read_controlvm_event(&inmsg
);
1824 handle_command_failed
= false;
1825 while (got_command
&& (!handle_command_failed
)) {
1826 most_recent_message_jiffies
= jiffies
;
1827 if (handle_command(inmsg
,
1828 visorchannel_get_physaddr
1829 (controlvm_channel
)))
1830 got_command
= read_controlvm_event(&inmsg
);
1832 /* this is a scenario where throttling
1833 * is required, but probably NOT an
1834 * error...; we stash the current
1835 * controlvm msg so we will attempt to
1836 * reprocess it on our next loop
1838 handle_command_failed
= true;
1839 controlvm_pending_msg
= inmsg
;
1840 controlvm_pending_msg_valid
= true;
1844 /* parahotplug_worker */
1845 parahotplug_process_list();
1849 if (time_after(jiffies
,
1850 most_recent_message_jiffies
+ (HZ
* MIN_IDLE_SECONDS
))) {
1851 /* it's been longer than MIN_IDLE_SECONDS since we
1852 * processed our last controlvm message; slow down the
1855 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
)
1856 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
1858 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_FAST
)
1859 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
1862 queue_delayed_work(periodic_controlvm_workqueue
,
1863 &periodic_controlvm_work
, poll_jiffies
);
1867 setup_crash_devices_work_queue(struct work_struct
*work
)
1869 struct controlvm_message local_crash_bus_msg
;
1870 struct controlvm_message local_crash_dev_msg
;
1871 struct controlvm_message msg
;
1872 u32 local_crash_msg_offset
;
1873 u16 local_crash_msg_count
;
1875 /* make sure visorbus server is registered for controlvm callbacks */
1876 if (visorchipset_serverregwait
&& !serverregistered
)
1879 /* make sure visorclientbus server is regsitered for controlvm
1882 if (visorchipset_clientregwait
&& !clientregistered
)
1885 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC
, POSTCODE_SEVERITY_INFO
);
1887 /* send init chipset msg */
1888 msg
.hdr
.id
= CONTROLVM_CHIPSET_INIT
;
1889 msg
.cmd
.init_chipset
.bus_count
= 23;
1890 msg
.cmd
.init_chipset
.switch_count
= 0;
1894 /* get saved message count */
1895 if (visorchannel_read(controlvm_channel
,
1896 offsetof(struct spar_controlvm_channel_protocol
,
1897 saved_crash_message_count
),
1898 &local_crash_msg_count
, sizeof(u16
)) < 0) {
1899 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
1900 POSTCODE_SEVERITY_ERR
);
1904 if (local_crash_msg_count
!= CONTROLVM_CRASHMSG_MAX
) {
1905 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC
,
1906 local_crash_msg_count
,
1907 POSTCODE_SEVERITY_ERR
);
1911 /* get saved crash message offset */
1912 if (visorchannel_read(controlvm_channel
,
1913 offsetof(struct spar_controlvm_channel_protocol
,
1914 saved_crash_message_offset
),
1915 &local_crash_msg_offset
, sizeof(u32
)) < 0) {
1916 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC
,
1917 POSTCODE_SEVERITY_ERR
);
1921 /* read create device message for storage bus offset */
1922 if (visorchannel_read(controlvm_channel
,
1923 local_crash_msg_offset
,
1924 &local_crash_bus_msg
,
1925 sizeof(struct controlvm_message
)) < 0) {
1926 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC
,
1927 POSTCODE_SEVERITY_ERR
);
1931 /* read create device message for storage device */
1932 if (visorchannel_read(controlvm_channel
,
1933 local_crash_msg_offset
+
1934 sizeof(struct controlvm_message
),
1935 &local_crash_dev_msg
,
1936 sizeof(struct controlvm_message
)) < 0) {
1937 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC
,
1938 POSTCODE_SEVERITY_ERR
);
1942 /* reuse IOVM create bus message */
1943 if (local_crash_bus_msg
.cmd
.create_bus
.channel_addr
!= 0) {
1944 bus_create(&local_crash_bus_msg
);
1946 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC
,
1947 POSTCODE_SEVERITY_ERR
);
1951 /* reuse create device message for storage device */
1952 if (local_crash_dev_msg
.cmd
.create_device
.channel_addr
!= 0) {
1953 my_device_create(&local_crash_dev_msg
);
1955 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC
,
1956 POSTCODE_SEVERITY_ERR
);
1959 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
1964 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
1966 queue_delayed_work(periodic_controlvm_workqueue
,
1967 &periodic_controlvm_work
, poll_jiffies
);
1971 bus_create_response(u32 bus_no
, int response
)
1973 bus_responder(CONTROLVM_BUS_CREATE
, bus_no
, response
);
1977 bus_destroy_response(u32 bus_no
, int response
)
1979 bus_responder(CONTROLVM_BUS_DESTROY
, bus_no
, response
);
1983 device_create_response(u32 bus_no
, u32 dev_no
, int response
)
1985 device_responder(CONTROLVM_DEVICE_CREATE
, bus_no
, dev_no
, response
);
1989 device_destroy_response(u32 bus_no
, u32 dev_no
, int response
)
1991 device_responder(CONTROLVM_DEVICE_DESTROY
, bus_no
, dev_no
, response
);
1995 visorchipset_device_pause_response(u32 bus_no
, u32 dev_no
, int response
)
1997 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
1998 bus_no
, dev_no
, response
,
1999 segment_state_standby
);
2001 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response
);
2004 device_resume_response(u32 bus_no
, u32 dev_no
, int response
)
2006 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
2007 bus_no
, dev_no
, response
,
2008 segment_state_running
);
2012 visorchipset_get_bus_info(u32 bus_no
, struct visorchipset_bus_info
*bus_info
)
2014 void *p
= findbus(&bus_info_list
, bus_no
);
2018 memcpy(bus_info
, p
, sizeof(struct visorchipset_bus_info
));
2021 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info
);
2024 visorchipset_set_bus_context(u32 bus_no
, void *context
)
2026 struct visorchipset_bus_info
*p
= findbus(&bus_info_list
, bus_no
);
2030 p
->bus_driver_context
= context
;
2033 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context
);
2036 visorchipset_get_device_info(u32 bus_no
, u32 dev_no
,
2037 struct visorchipset_device_info
*dev_info
)
2039 void *p
= finddevice(&dev_info_list
, bus_no
, dev_no
);
2043 memcpy(dev_info
, p
, sizeof(struct visorchipset_device_info
));
2046 EXPORT_SYMBOL_GPL(visorchipset_get_device_info
);
2049 visorchipset_set_device_context(u32 bus_no
, u32 dev_no
, void *context
)
2051 struct visorchipset_device_info
*p
=
2052 finddevice(&dev_info_list
, bus_no
, dev_no
);
2056 p
->bus_driver_context
= context
;
2059 EXPORT_SYMBOL_GPL(visorchipset_set_device_context
);
2061 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2064 visorchipset_cache_alloc(struct kmem_cache
*pool
, bool ok_to_block
,
2074 /* __GFP_NORETRY means "ok to fail", meaning
2075 * kmem_cache_alloc() can return NULL, implying the caller CAN
2076 * cope with failure. If you do NOT specify __GFP_NORETRY,
2077 * Linux will go to extreme measures to get memory for you
2078 * (like, invoke oom killer), which will probably cripple the
2081 gfp
|= __GFP_NORETRY
;
2082 p
= kmem_cache_alloc(pool
, gfp
);
2086 atomic_inc(&visorchipset_cache_buffers_in_use
);
2090 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2093 visorchipset_cache_free(struct kmem_cache
*pool
, void *p
, char *fn
, int ln
)
2098 atomic_dec(&visorchipset_cache_buffers_in_use
);
2099 kmem_cache_free(pool
, p
);
2102 static ssize_t
chipsetready_store(struct device
*dev
,
2103 struct device_attribute
*attr
,
2104 const char *buf
, size_t count
)
2108 if (sscanf(buf
, "%63s", msgtype
) != 1)
2111 if (strcmp(msgtype
, "CALLHOMEDISK_MOUNTED") == 0) {
2112 chipset_events
[0] = 1;
2114 } else if (strcmp(msgtype
, "MODULES_LOADED") == 0) {
2115 chipset_events
[1] = 1;
2121 /* The parahotplug/devicedisabled interface gets called by our support script
2122 * when an SR-IOV device has been shut down. The ID is passed to the script
2123 * and then passed back when the device has been removed.
2125 static ssize_t
devicedisabled_store(struct device
*dev
,
2126 struct device_attribute
*attr
,
2127 const char *buf
, size_t count
)
2131 if (kstrtouint(buf
, 10, &id
) != 0)
2134 parahotplug_request_complete(id
, 0);
2138 /* The parahotplug/deviceenabled interface gets called by our support script
2139 * when an SR-IOV device has been recovered. The ID is passed to the script
2140 * and then passed back when the device has been brought back up.
2142 static ssize_t
deviceenabled_store(struct device
*dev
,
2143 struct device_attribute
*attr
,
2144 const char *buf
, size_t count
)
2148 if (kstrtouint(buf
, 10, &id
) != 0)
2151 parahotplug_request_complete(id
, 1);
2156 visorchipset_init(void)
2161 if (!unisys_spar_platform
)
2164 memset(&busdev_server_notifiers
, 0, sizeof(busdev_server_notifiers
));
2165 memset(&busdev_client_notifiers
, 0, sizeof(busdev_client_notifiers
));
2166 memset(&controlvm_payload_info
, 0, sizeof(controlvm_payload_info
));
2167 memset(&livedump_info
, 0, sizeof(livedump_info
));
2168 atomic_set(&livedump_info
.buffers_in_use
, 0);
2170 if (visorchipset_testvnic
) {
2171 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC
, x
, DIAG_SEVERITY_ERR
);
2176 addr
= controlvm_get_channel_address();
2179 visorchannel_create_with_lock
2181 sizeof(struct spar_controlvm_channel_protocol
),
2182 spar_controlvm_channel_protocol_uuid
);
2183 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2184 visorchannel_get_header(controlvm_channel
))) {
2185 initialize_controlvm_payload();
2187 visorchannel_destroy(controlvm_channel
);
2188 controlvm_channel
= NULL
;
2195 major_dev
= MKDEV(visorchipset_major
, 0);
2196 rc
= visorchipset_file_init(major_dev
, &controlvm_channel
);
2198 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2202 memset(&g_diag_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2204 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2206 memset(&g_del_dump_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2208 putfile_buffer_list_pool
=
2209 kmem_cache_create(putfile_buffer_list_pool_name
,
2210 sizeof(struct putfile_buffer_entry
),
2211 0, SLAB_HWCACHE_ALIGN
, NULL
);
2212 if (!putfile_buffer_list_pool
) {
2213 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2217 if (!visorchipset_disable_controlvm
) {
2218 /* if booting in a crash kernel */
2219 if (is_kdump_kernel())
2220 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2221 setup_crash_devices_work_queue
);
2223 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2224 controlvm_periodic_work
);
2225 periodic_controlvm_workqueue
=
2226 create_singlethread_workqueue("visorchipset_controlvm");
2228 if (!periodic_controlvm_workqueue
) {
2229 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC
,
2234 most_recent_message_jiffies
= jiffies
;
2235 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
2236 rc
= queue_delayed_work(periodic_controlvm_workqueue
,
2237 &periodic_controlvm_work
, poll_jiffies
);
2239 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC
,
2245 visorchipset_platform_device
.dev
.devt
= major_dev
;
2246 if (platform_device_register(&visorchipset_platform_device
) < 0) {
2247 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC
, DIAG_SEVERITY_ERR
);
2251 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC
, POSTCODE_SEVERITY_INFO
);
2255 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC
, rc
,
2256 POSTCODE_SEVERITY_ERR
);
2262 visorchipset_exit(void)
2264 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2266 if (visorchipset_disable_controlvm
) {
2269 cancel_delayed_work(&periodic_controlvm_work
);
2270 flush_workqueue(periodic_controlvm_workqueue
);
2271 destroy_workqueue(periodic_controlvm_workqueue
);
2272 periodic_controlvm_workqueue
= NULL
;
2273 destroy_controlvm_payload_info(&controlvm_payload_info
);
2275 if (putfile_buffer_list_pool
) {
2276 kmem_cache_destroy(putfile_buffer_list_pool
);
2277 putfile_buffer_list_pool
= NULL
;
2280 cleanup_controlvm_structures();
2282 memset(&g_diag_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2284 memset(&g_chipset_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2286 memset(&g_del_dump_msg_hdr
, 0, sizeof(struct controlvm_message_header
));
2288 visorchannel_destroy(controlvm_channel
);
2290 visorchipset_file_cleanup(visorchipset_platform_device
.dev
.devt
);
2291 POSTCODE_LINUX_2(DRIVER_EXIT_PC
, POSTCODE_SEVERITY_INFO
);
2294 module_param_named(testvnic
, visorchipset_testvnic
, int, S_IRUGO
);
2295 MODULE_PARM_DESC(visorchipset_testvnic
, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2296 module_param_named(testvnicclient
, visorchipset_testvnicclient
, int, S_IRUGO
);
2297 MODULE_PARM_DESC(visorchipset_testvnicclient
, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2298 module_param_named(testmsg
, visorchipset_testmsg
, int, S_IRUGO
);
2299 MODULE_PARM_DESC(visorchipset_testmsg
,
2300 "1 to manufacture the chipset, bus, and switch messages");
2301 module_param_named(major
, visorchipset_major
, int, S_IRUGO
);
2302 MODULE_PARM_DESC(visorchipset_major
,
2303 "major device number to use for the device node");
2304 module_param_named(serverregwait
, visorchipset_serverregwait
, int, S_IRUGO
);
2305 MODULE_PARM_DESC(visorchipset_serverreqwait
,
2306 "1 to have the module wait for the visor bus to register");
2307 module_param_named(clientregwait
, visorchipset_clientregwait
, int, S_IRUGO
);
2308 MODULE_PARM_DESC(visorchipset_clientregwait
, "1 to have the module wait for the visorclientbus to register");
2309 module_param_named(testteardown
, visorchipset_testteardown
, int, S_IRUGO
);
2310 MODULE_PARM_DESC(visorchipset_testteardown
,
2311 "1 to test teardown of the chipset, bus, and switch");
2312 module_param_named(disable_controlvm
, visorchipset_disable_controlvm
, int,
2314 MODULE_PARM_DESC(visorchipset_disable_controlvm
,
2315 "1 to disable polling of controlVm channel");
2316 module_param_named(holdchipsetready
, visorchipset_holdchipsetready
,
2318 MODULE_PARM_DESC(visorchipset_holdchipsetready
,
2319 "1 to hold response to CHIPSET_READY");
2321 module_init(visorchipset_init
);
2322 module_exit(visorchipset_exit
);
2324 MODULE_AUTHOR("Unisys");
2325 MODULE_LICENSE("GPL");
2326 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2328 MODULE_VERSION(VERSION
);