staging: unisys: fix CamelCased Parahotplug globals
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
1 /* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18 #include "globals.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "file.h"
24 #include "parser.h"
25 #include "uisutils.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
28
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
39
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE 50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode. As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
49 */
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong most_recent_message_jiffies; /* when we got our last
53 * controlvm message */
54 static inline char *
55 NONULLSTR(char *s)
56 {
57 if (s)
58 return s;
59 return "";
60 }
61
62 static int serverregistered;
63 static int clientregistered;
64
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
67
68 static struct delayed_work periodic_controlvm_work;
69 static struct workqueue_struct *periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(notifier_lock);
71
72 static struct controlvm_message_header g_diag_msg_hdr;
73 static struct controlvm_message_header g_chipset_msg_hdr;
74 static struct controlvm_message_header g_del_dump_msg_hdr;
75 static const uuid_le spar_diag_pool_channel_protocol_uuid =
76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpool_bus_no = 0xffffff;
79 static ulong g_diagpool_dev_no = 0xffffff;
80 static struct controlvm_message_packet g_devicechangestate_packet;
81
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
83 * "visorhackbus")
84 */
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86 (((uuid_le_cmp(channel_type_guid,\
87 spar_vnic_channel_protocol_uuid) == 0) ||\
88 (uuid_le_cmp(channel_type_guid,\
89 spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
91
92 #define is_diagpool_channel(channel_type_guid) \
93 (uuid_le_cmp(channel_type_guid,\
94 spar_diag_pool_channel_protocol_uuid) == 0)
95
96 static LIST_HEAD(bus_info_list);
97 static LIST_HEAD(dev_info_list);
98
99 static struct visorchannel *controlvm_channel;
100
101 /* Manages the request payload in the controlvm channel */
102 static struct controlvm_payload_info {
103 u8 __iomem *ptr; /* pointer to base address of payload pool */
104 u64 offset; /* offset from beginning of controlvm
105 * channel to beginning of payload * pool */
106 u32 bytes; /* number of bytes in payload pool */
107 } controlvm_payload_info;
108
109 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
110 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
111 */
112 static struct livedump_info {
113 struct controlvm_message_header dumpcapture_header;
114 struct controlvm_message_header gettextdump_header;
115 struct controlvm_message_header dumpcomplete_header;
116 BOOL gettextdump_outstanding;
117 u32 crc32;
118 ulong length;
119 atomic_t buffers_in_use;
120 ulong destination;
121 } livedump_info;
122
123 /* The following globals are used to handle the scenario where we are unable to
124 * offload the payload from a controlvm message due to memory requirements. In
125 * this scenario, we simply stash the controlvm message, then attempt to
126 * process it again the next time controlvm_periodic_work() runs.
127 */
128 static struct controlvm_message controlvm_pending_msg;
129 static BOOL controlvm_pending_msg_valid = FALSE;
130
131 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
132 * TRANSMIT_FILE PutFile payloads.
133 */
134 static struct kmem_cache *putfile_buffer_list_pool;
135 static const char putfile_buffer_list_pool_name[] =
136 "controlvm_putfile_buffer_list_pool";
137
138 /* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140 */
141 struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
143 struct parser_context *parser_ctx; /* points to input data buffer */
144 };
145
146 /* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
148 * conversation.
149 */
150 static LIST_HEAD(putfile_request_list);
151
152 /* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
155 */
156 struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
158 struct parser_context *parser_ctx;
159 /* points within data area of parser_ctx to next byte of data */
160 u8 *pnext;
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
163 };
164
165 #define PUTFILE_REQUEST_SIG 0x0906101302281211
166 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
169 */
170 struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
172
173 /* header from original TransmitFile request */
174 struct controlvm_message_header controlvm_header;
175 u64 file_request_number; /* from original TransmitFile request */
176
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
179
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
182
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
186 * file data
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
191
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
194
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
197
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
204 };
205
206 static atomic_t visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
207
208 struct parahotplug_request {
209 struct list_head list;
210 int id;
211 unsigned long expiration;
212 struct controlvm_message msg;
213 };
214
215 static LIST_HEAD(parahotplug_request_list);
216 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
217 static void parahotplug_process_list(void);
218
219 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
220 * CONTROLVM_REPORTEVENT.
221 */
222 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
223 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
224
225 static void bus_create_response(ulong busNo, int response);
226 static void bus_destroy_response(ulong busNo, int response);
227 static void device_create_response(ulong busNo, ulong devNo, int response);
228 static void device_destroy_response(ulong busNo, ulong devNo, int response);
229 static void device_resume_response(ulong busNo, ulong devNo, int response);
230
231 static struct visorchipset_busdev_responders BusDev_Responders = {
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
236 .device_pause = visorchipset_device_pause_response,
237 .device_resume = device_resume_response,
238 };
239
240 /* info for /dev/visorchipset */
241 static dev_t MajorDev = -1; /**< indicates major num for device */
242
243 /* prototypes for attributes */
244 static ssize_t toolaction_show(struct device *dev,
245 struct device_attribute *attr, char *buf);
246 static ssize_t toolaction_store(struct device *dev,
247 struct device_attribute *attr, const char *buf, size_t count);
248 static DEVICE_ATTR_RW(toolaction);
249
250 static ssize_t boottotool_show(struct device *dev,
251 struct device_attribute *attr, char *buf);
252 static ssize_t boottotool_store(struct device *dev,
253 struct device_attribute *attr, const char *buf, size_t count);
254 static DEVICE_ATTR_RW(boottotool);
255
256 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
257 char *buf);
258 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
259 const char *buf, size_t count);
260 static DEVICE_ATTR_RW(error);
261
262 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
263 char *buf);
264 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
265 const char *buf, size_t count);
266 static DEVICE_ATTR_RW(textid);
267
268 static ssize_t remaining_steps_show(struct device *dev,
269 struct device_attribute *attr, char *buf);
270 static ssize_t remaining_steps_store(struct device *dev,
271 struct device_attribute *attr, const char *buf, size_t count);
272 static DEVICE_ATTR_RW(remaining_steps);
273
274 static ssize_t chipsetready_store(struct device *dev,
275 struct device_attribute *attr, const char *buf, size_t count);
276 static DEVICE_ATTR_WO(chipsetready);
277
278 static ssize_t devicedisabled_store(struct device *dev,
279 struct device_attribute *attr, const char *buf, size_t count);
280 static DEVICE_ATTR_WO(devicedisabled);
281
282 static ssize_t deviceenabled_store(struct device *dev,
283 struct device_attribute *attr, const char *buf, size_t count);
284 static DEVICE_ATTR_WO(deviceenabled);
285
286 static struct attribute *visorchipset_install_attrs[] = {
287 &dev_attr_toolaction.attr,
288 &dev_attr_boottotool.attr,
289 &dev_attr_error.attr,
290 &dev_attr_textid.attr,
291 &dev_attr_remaining_steps.attr,
292 NULL
293 };
294
295 static struct attribute_group visorchipset_install_group = {
296 .name = "install",
297 .attrs = visorchipset_install_attrs
298 };
299
300 static struct attribute *visorchipset_guest_attrs[] = {
301 &dev_attr_chipsetready.attr,
302 NULL
303 };
304
305 static struct attribute_group visorchipset_guest_group = {
306 .name = "guest",
307 .attrs = visorchipset_guest_attrs
308 };
309
310 static struct attribute *visorchipset_parahotplug_attrs[] = {
311 &dev_attr_devicedisabled.attr,
312 &dev_attr_deviceenabled.attr,
313 NULL
314 };
315
316 static struct attribute_group visorchipset_parahotplug_group = {
317 .name = "parahotplug",
318 .attrs = visorchipset_parahotplug_attrs
319 };
320
321 static const struct attribute_group *visorchipset_dev_groups[] = {
322 &visorchipset_install_group,
323 &visorchipset_guest_group,
324 &visorchipset_parahotplug_group,
325 NULL
326 };
327
328 /* /sys/devices/platform/visorchipset */
329 static struct platform_device Visorchipset_platform_device = {
330 .name = "visorchipset",
331 .id = -1,
332 .dev.groups = visorchipset_dev_groups,
333 };
334
335 /* Function prototypes */
336 static void controlvm_respond(struct controlvm_message_header *msgHdr,
337 int response);
338 static void controlvm_respond_chipset_init(
339 struct controlvm_message_header *msgHdr, int response,
340 enum ultra_chipset_feature features);
341 static void controlvm_respond_physdev_changestate(
342 struct controlvm_message_header *msgHdr, int response,
343 struct spar_segment_state state);
344
345 static ssize_t toolaction_show(struct device *dev,
346 struct device_attribute *attr,
347 char *buf)
348 {
349 u8 toolAction;
350
351 visorchannel_read(controlvm_channel,
352 offsetof(struct spar_controlvm_channel_protocol,
353 tool_action), &toolAction, sizeof(u8));
354 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
355 }
356
357 static ssize_t toolaction_store(struct device *dev,
358 struct device_attribute *attr,
359 const char *buf, size_t count)
360 {
361 u8 toolAction;
362 int ret;
363
364 if (kstrtou8(buf, 10, &toolAction) != 0)
365 return -EINVAL;
366
367 ret = visorchannel_write(controlvm_channel,
368 offsetof(struct spar_controlvm_channel_protocol, tool_action),
369 &toolAction, sizeof(u8));
370
371 if (ret)
372 return ret;
373 return count;
374 }
375
376 static ssize_t boottotool_show(struct device *dev,
377 struct device_attribute *attr,
378 char *buf)
379 {
380 struct efi_spar_indication efiSparIndication;
381
382 visorchannel_read(controlvm_channel,
383 offsetof(struct spar_controlvm_channel_protocol,
384 efi_spar_ind), &efiSparIndication,
385 sizeof(struct efi_spar_indication));
386 return scnprintf(buf, PAGE_SIZE, "%u\n",
387 efiSparIndication.boot_to_tool);
388 }
389
390 static ssize_t boottotool_store(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf, size_t count)
393 {
394 int val, ret;
395 struct efi_spar_indication efiSparIndication;
396
397 if (kstrtoint(buf, 10, &val) != 0)
398 return -EINVAL;
399
400 efiSparIndication.boot_to_tool = val;
401 ret = visorchannel_write(controlvm_channel,
402 offsetof(struct spar_controlvm_channel_protocol,
403 efi_spar_ind),
404 &(efiSparIndication),
405 sizeof(struct efi_spar_indication));
406
407 if (ret)
408 return ret;
409 return count;
410 }
411
412 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
413 char *buf)
414 {
415 u32 error;
416
417 visorchannel_read(controlvm_channel, offsetof(
418 struct spar_controlvm_channel_protocol, installation_error),
419 &error, sizeof(u32));
420 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
421 }
422
423 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
424 const char *buf, size_t count)
425 {
426 u32 error;
427 int ret;
428
429 if (kstrtou32(buf, 10, &error) != 0)
430 return -EINVAL;
431
432 ret = visorchannel_write(controlvm_channel,
433 offsetof(struct spar_controlvm_channel_protocol,
434 installation_error),
435 &error, sizeof(u32));
436 if (ret)
437 return ret;
438 return count;
439 }
440
441 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
442 char *buf)
443 {
444 u32 textId;
445
446 visorchannel_read(controlvm_channel, offsetof(
447 struct spar_controlvm_channel_protocol, installation_text_id),
448 &textId, sizeof(u32));
449 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
450 }
451
452 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
453 const char *buf, size_t count)
454 {
455 u32 textId;
456 int ret;
457
458 if (kstrtou32(buf, 10, &textId) != 0)
459 return -EINVAL;
460
461 ret = visorchannel_write(controlvm_channel,
462 offsetof(struct spar_controlvm_channel_protocol,
463 installation_text_id),
464 &textId, sizeof(u32));
465 if (ret)
466 return ret;
467 return count;
468 }
469
470 static ssize_t remaining_steps_show(struct device *dev,
471 struct device_attribute *attr, char *buf)
472 {
473 u16 remainingSteps;
474
475 visorchannel_read(controlvm_channel,
476 offsetof(struct spar_controlvm_channel_protocol,
477 installation_remaining_steps),
478 &remainingSteps,
479 sizeof(u16));
480 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
481 }
482
483 static ssize_t remaining_steps_store(struct device *dev,
484 struct device_attribute *attr, const char *buf, size_t count)
485 {
486 u16 remainingSteps;
487 int ret;
488
489 if (kstrtou16(buf, 10, &remainingSteps) != 0)
490 return -EINVAL;
491
492 ret = visorchannel_write(controlvm_channel,
493 offsetof(struct spar_controlvm_channel_protocol,
494 installation_remaining_steps),
495 &remainingSteps, sizeof(u16));
496 if (ret)
497 return ret;
498 return count;
499 }
500
501 static void
502 bus_info_clear(void *v)
503 {
504 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
505
506 kfree(p->name);
507 p->name = NULL;
508
509 kfree(p->description);
510 p->description = NULL;
511
512 p->state.created = 0;
513 memset(p, 0, sizeof(struct visorchipset_bus_info));
514 }
515
516 static void
517 dev_info_clear(void *v)
518 {
519 struct visorchipset_device_info *p =
520 (struct visorchipset_device_info *)(v);
521
522 p->state.created = 0;
523 memset(p, 0, sizeof(struct visorchipset_device_info));
524 }
525
526 static u8
527 check_chipset_events(void)
528 {
529 int i;
530 u8 send_msg = 1;
531 /* Check events to determine if response should be sent */
532 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
533 send_msg &= chipset_events[i];
534 return send_msg;
535 }
536
537 static void
538 clear_chipset_events(void)
539 {
540 int i;
541 /* Clear chipset_events */
542 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
543 chipset_events[i] = 0;
544 }
545
546 void
547 visorchipset_register_busdev_server(
548 struct visorchipset_busdev_notifiers *notifiers,
549 struct visorchipset_busdev_responders *responders,
550 struct ultra_vbus_deviceinfo *driver_info)
551 {
552 down(&notifier_lock);
553 if (!notifiers) {
554 memset(&BusDev_Server_Notifiers, 0,
555 sizeof(BusDev_Server_Notifiers));
556 serverregistered = 0; /* clear flag */
557 } else {
558 BusDev_Server_Notifiers = *notifiers;
559 serverregistered = 1; /* set flag */
560 }
561 if (responders)
562 *responders = BusDev_Responders;
563 if (driver_info)
564 bus_device_info_init(driver_info, "chipset", "visorchipset",
565 VERSION, NULL);
566
567 up(&notifier_lock);
568 }
569 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
570
571 void
572 visorchipset_register_busdev_client(
573 struct visorchipset_busdev_notifiers *notifiers,
574 struct visorchipset_busdev_responders *responders,
575 struct ultra_vbus_deviceinfo *driver_info)
576 {
577 down(&notifier_lock);
578 if (!notifiers) {
579 memset(&BusDev_Client_Notifiers, 0,
580 sizeof(BusDev_Client_Notifiers));
581 clientregistered = 0; /* clear flag */
582 } else {
583 BusDev_Client_Notifiers = *notifiers;
584 clientregistered = 1; /* set flag */
585 }
586 if (responders)
587 *responders = BusDev_Responders;
588 if (driver_info)
589 bus_device_info_init(driver_info, "chipset(bolts)",
590 "visorchipset", VERSION, NULL);
591 up(&notifier_lock);
592 }
593 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
594
595 static void
596 cleanup_controlvm_structures(void)
597 {
598 struct visorchipset_bus_info *bi, *tmp_bi;
599 struct visorchipset_device_info *di, *tmp_di;
600
601 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
602 bus_info_clear(bi);
603 list_del(&bi->entry);
604 kfree(bi);
605 }
606
607 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
608 dev_info_clear(di);
609 list_del(&di->entry);
610 kfree(di);
611 }
612 }
613
614 static void
615 chipset_init(struct controlvm_message *inmsg)
616 {
617 static int chipset_inited;
618 enum ultra_chipset_feature features = 0;
619 int rc = CONTROLVM_RESP_SUCCESS;
620
621 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
622 if (chipset_inited) {
623 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
624 goto cleanup;
625 }
626 chipset_inited = 1;
627 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
628
629 /* Set features to indicate we support parahotplug (if Command
630 * also supports it). */
631 features =
632 inmsg->cmd.init_chipset.
633 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
634
635 /* Set the "reply" bit so Command knows this is a
636 * features-aware driver. */
637 features |= ULTRA_CHIPSET_FEATURE_REPLY;
638
639 cleanup:
640 if (rc < 0)
641 cleanup_controlvm_structures();
642 if (inmsg->hdr.flags.response_expected)
643 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
644 }
645
646 static void
647 controlvm_init_response(struct controlvm_message *msg,
648 struct controlvm_message_header *msgHdr, int response)
649 {
650 memset(msg, 0, sizeof(struct controlvm_message));
651 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
652 msg->hdr.payload_bytes = 0;
653 msg->hdr.payload_vm_offset = 0;
654 msg->hdr.payload_max_bytes = 0;
655 if (response < 0) {
656 msg->hdr.flags.failed = 1;
657 msg->hdr.completion_status = (u32) (-response);
658 }
659 }
660
661 static void
662 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
663 {
664 struct controlvm_message outmsg;
665
666 controlvm_init_response(&outmsg, msgHdr, response);
667 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
668 * back the deviceChangeState structure in the packet. */
669 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
670 g_devicechangestate_packet.device_change_state.bus_no ==
671 g_diagpool_bus_no &&
672 g_devicechangestate_packet.device_change_state.dev_no ==
673 g_diagpool_dev_no)
674 outmsg.cmd = g_devicechangestate_packet;
675 if (outmsg.hdr.flags.test_message == 1)
676 return;
677
678 if (!visorchannel_signalinsert(controlvm_channel,
679 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
680 return;
681 }
682 }
683
684 static void
685 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
686 int response,
687 enum ultra_chipset_feature features)
688 {
689 struct controlvm_message outmsg;
690
691 controlvm_init_response(&outmsg, msgHdr, response);
692 outmsg.cmd.init_chipset.features = features;
693 if (!visorchannel_signalinsert(controlvm_channel,
694 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
695 return;
696 }
697 }
698
699 static void controlvm_respond_physdev_changestate(
700 struct controlvm_message_header *msgHdr, int response,
701 struct spar_segment_state state)
702 {
703 struct controlvm_message outmsg;
704
705 controlvm_init_response(&outmsg, msgHdr, response);
706 outmsg.cmd.device_change_state.state = state;
707 outmsg.cmd.device_change_state.flags.phys_device = 1;
708 if (!visorchannel_signalinsert(controlvm_channel,
709 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
710 return;
711 }
712 }
713
714 void
715 visorchipset_save_message(struct controlvm_message *msg,
716 enum crash_obj_type type)
717 {
718 u32 crash_msg_offset;
719 u16 crash_msg_count;
720
721 /* get saved message count */
722 if (visorchannel_read(controlvm_channel,
723 offsetof(struct spar_controlvm_channel_protocol,
724 saved_crash_message_count),
725 &crash_msg_count, sizeof(u16)) < 0) {
726 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
727 POSTCODE_SEVERITY_ERR);
728 return;
729 }
730
731 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
732 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
733 crash_msg_count,
734 POSTCODE_SEVERITY_ERR);
735 return;
736 }
737
738 /* get saved crash message offset */
739 if (visorchannel_read(controlvm_channel,
740 offsetof(struct spar_controlvm_channel_protocol,
741 saved_crash_message_offset),
742 &crash_msg_offset, sizeof(u32)) < 0) {
743 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
744 POSTCODE_SEVERITY_ERR);
745 return;
746 }
747
748 if (type == CRASH_BUS) {
749 if (visorchannel_write(controlvm_channel,
750 crash_msg_offset,
751 msg,
752 sizeof(struct controlvm_message)) < 0) {
753 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
754 POSTCODE_SEVERITY_ERR);
755 return;
756 }
757 } else {
758 if (visorchannel_write(controlvm_channel,
759 crash_msg_offset +
760 sizeof(struct controlvm_message), msg,
761 sizeof(struct controlvm_message)) < 0) {
762 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
763 POSTCODE_SEVERITY_ERR);
764 return;
765 }
766 }
767 }
768 EXPORT_SYMBOL_GPL(visorchipset_save_message);
769
770 static void
771 bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response)
772 {
773 struct visorchipset_bus_info *p = NULL;
774 BOOL need_clear = FALSE;
775
776 p = findbus(&bus_info_list, bus_no);
777 if (!p)
778 return;
779
780 if (response < 0) {
781 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
782 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
783 /* undo the row we just created... */
784 delbusdevices(&dev_info_list, bus_no);
785 } else {
786 if (cmd_id == CONTROLVM_BUS_CREATE)
787 p->state.created = 1;
788 if (cmd_id == CONTROLVM_BUS_DESTROY)
789 need_clear = TRUE;
790 }
791
792 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
793 return; /* no controlvm response needed */
794 if (p->pending_msg_hdr.id != (u32)cmd_id)
795 return;
796 controlvm_respond(&p->pending_msg_hdr, response);
797 p->pending_msg_hdr.id = CONTROLVM_INVALID;
798 if (need_clear) {
799 bus_info_clear(p);
800 delbusdevices(&dev_info_list, bus_no);
801 }
802 }
803
804 static void
805 device_changestate_responder(enum controlvm_id cmd_id,
806 ulong bus_no, ulong dev_no, int response,
807 struct spar_segment_state response_state)
808 {
809 struct visorchipset_device_info *p = NULL;
810 struct controlvm_message outmsg;
811
812 p = finddevice(&dev_info_list, bus_no, dev_no);
813 if (!p)
814 return;
815 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
816 return; /* no controlvm response needed */
817 if (p->pending_msg_hdr.id != cmd_id)
818 return;
819
820 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
821
822 outmsg.cmd.device_change_state.bus_no = bus_no;
823 outmsg.cmd.device_change_state.dev_no = dev_no;
824 outmsg.cmd.device_change_state.state = response_state;
825
826 if (!visorchannel_signalinsert(controlvm_channel,
827 CONTROLVM_QUEUE_REQUEST, &outmsg))
828 return;
829
830 p->pending_msg_hdr.id = CONTROLVM_INVALID;
831 }
832
833 static void
834 device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no,
835 int response)
836 {
837 struct visorchipset_device_info *p = NULL;
838 BOOL need_clear = FALSE;
839
840 p = finddevice(&dev_info_list, bus_no, dev_no);
841 if (!p)
842 return;
843 if (response >= 0) {
844 if (cmd_id == CONTROLVM_DEVICE_CREATE)
845 p->state.created = 1;
846 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
847 need_clear = TRUE;
848 }
849
850 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
851 return; /* no controlvm response needed */
852
853 if (p->pending_msg_hdr.id != (u32)cmd_id)
854 return;
855
856 controlvm_respond(&p->pending_msg_hdr, response);
857 p->pending_msg_hdr.id = CONTROLVM_INVALID;
858 if (need_clear)
859 dev_info_clear(p);
860 }
861
862 static void
863 bus_epilog(u32 bus_no,
864 u32 cmd, struct controlvm_message_header *msg_hdr,
865 int response, BOOL need_response)
866 {
867 BOOL notified = FALSE;
868
869 struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
870 bus_no);
871
872 if (!bus_info)
873 return;
874
875 if (need_response) {
876 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
877 sizeof(struct controlvm_message_header));
878 } else {
879 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
880 }
881
882 down(&notifier_lock);
883 if (response == CONTROLVM_RESP_SUCCESS) {
884 switch (cmd) {
885 case CONTROLVM_BUS_CREATE:
886 /* We can't tell from the bus_create
887 * information which of our 2 bus flavors the
888 * devices on this bus will ultimately end up.
889 * FORTUNATELY, it turns out it is harmless to
890 * send the bus_create to both of them. We can
891 * narrow things down a little bit, though,
892 * because we know: - BusDev_Server can handle
893 * either server or client devices
894 * - BusDev_Client can handle ONLY client
895 * devices */
896 if (BusDev_Server_Notifiers.bus_create) {
897 (*BusDev_Server_Notifiers.bus_create) (bus_no);
898 notified = TRUE;
899 }
900 if ((!bus_info->flags.server) /*client */ &&
901 BusDev_Client_Notifiers.bus_create) {
902 (*BusDev_Client_Notifiers.bus_create) (bus_no);
903 notified = TRUE;
904 }
905 break;
906 case CONTROLVM_BUS_DESTROY:
907 if (BusDev_Server_Notifiers.bus_destroy) {
908 (*BusDev_Server_Notifiers.bus_destroy) (bus_no);
909 notified = TRUE;
910 }
911 if ((!bus_info->flags.server) /*client */ &&
912 BusDev_Client_Notifiers.bus_destroy) {
913 (*BusDev_Client_Notifiers.bus_destroy) (bus_no);
914 notified = TRUE;
915 }
916 break;
917 }
918 }
919 if (notified)
920 /* The callback function just called above is responsible
921 * for calling the appropriate visorchipset_busdev_responders
922 * function, which will call bus_responder()
923 */
924 ;
925 else
926 bus_responder(cmd, bus_no, response);
927 up(&notifier_lock);
928 }
929
930 static void
931 device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
932 struct controlvm_message_header *msg_hdr, int response,
933 BOOL need_response, BOOL for_visorbus)
934 {
935 struct visorchipset_busdev_notifiers *notifiers = NULL;
936 BOOL notified = FALSE;
937
938 struct visorchipset_device_info *dev_info =
939 finddevice(&dev_info_list, bus_no, dev_no);
940 char *envp[] = {
941 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
942 NULL
943 };
944
945 if (!dev_info)
946 return;
947
948 if (for_visorbus)
949 notifiers = &BusDev_Server_Notifiers;
950 else
951 notifiers = &BusDev_Client_Notifiers;
952 if (need_response) {
953 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
954 sizeof(struct controlvm_message_header));
955 } else {
956 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
957 }
958
959 down(&notifier_lock);
960 if (response >= 0) {
961 switch (cmd) {
962 case CONTROLVM_DEVICE_CREATE:
963 if (notifiers->device_create) {
964 (*notifiers->device_create) (bus_no, dev_no);
965 notified = TRUE;
966 }
967 break;
968 case CONTROLVM_DEVICE_CHANGESTATE:
969 /* ServerReady / ServerRunning / SegmentStateRunning */
970 if (state.alive == segment_state_running.alive &&
971 state.operating ==
972 segment_state_running.operating) {
973 if (notifiers->device_resume) {
974 (*notifiers->device_resume) (bus_no,
975 dev_no);
976 notified = TRUE;
977 }
978 }
979 /* ServerNotReady / ServerLost / SegmentStateStandby */
980 else if (state.alive == segment_state_standby.alive &&
981 state.operating ==
982 segment_state_standby.operating) {
983 /* technically this is standby case
984 * where server is lost
985 */
986 if (notifiers->device_pause) {
987 (*notifiers->device_pause) (bus_no,
988 dev_no);
989 notified = TRUE;
990 }
991 } else if (state.alive == segment_state_paused.alive &&
992 state.operating ==
993 segment_state_paused.operating) {
994 /* this is lite pause where channel is
995 * still valid just 'pause' of it
996 */
997 if (bus_no == g_diagpool_bus_no &&
998 dev_no == g_diagpool_dev_no) {
999 /* this will trigger the
1000 * diag_shutdown.sh script in
1001 * the visorchipset hotplug */
1002 kobject_uevent_env
1003 (&Visorchipset_platform_device.dev.
1004 kobj, KOBJ_ONLINE, envp);
1005 }
1006 }
1007 break;
1008 case CONTROLVM_DEVICE_DESTROY:
1009 if (notifiers->device_destroy) {
1010 (*notifiers->device_destroy) (bus_no, dev_no);
1011 notified = TRUE;
1012 }
1013 break;
1014 }
1015 }
1016 if (notified)
1017 /* The callback function just called above is responsible
1018 * for calling the appropriate visorchipset_busdev_responders
1019 * function, which will call device_responder()
1020 */
1021 ;
1022 else
1023 device_responder(cmd, bus_no, dev_no, response);
1024 up(&notifier_lock);
1025 }
1026
1027 static void
1028 bus_create(struct controlvm_message *inmsg)
1029 {
1030 struct controlvm_message_packet *cmd = &inmsg->cmd;
1031 ulong bus_no = cmd->create_bus.bus_no;
1032 int rc = CONTROLVM_RESP_SUCCESS;
1033 struct visorchipset_bus_info *bus_info = NULL;
1034
1035 bus_info = findbus(&bus_info_list, bus_no);
1036 if (bus_info && (bus_info->state.created == 1)) {
1037 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1038 POSTCODE_SEVERITY_ERR);
1039 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1040 goto cleanup;
1041 }
1042 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1043 if (!bus_info) {
1044 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1045 POSTCODE_SEVERITY_ERR);
1046 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1047 goto cleanup;
1048 }
1049
1050 INIT_LIST_HEAD(&bus_info->entry);
1051 bus_info->bus_no = bus_no;
1052 bus_info->dev_no = cmd->create_bus.dev_count;
1053
1054 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1055
1056 if (inmsg->hdr.flags.test_message == 1)
1057 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1058 else
1059 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1060
1061 bus_info->flags.server = inmsg->hdr.flags.server;
1062 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1063 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1064 bus_info->chan_info.channel_type_uuid =
1065 cmd->create_bus.bus_data_type_uuid;
1066 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1067
1068 list_add(&bus_info->entry, &bus_info_list);
1069
1070 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1071
1072 cleanup:
1073 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1074 rc, inmsg->hdr.flags.response_expected == 1);
1075 }
1076
1077 static void
1078 bus_destroy(struct controlvm_message *inmsg)
1079 {
1080 struct controlvm_message_packet *cmd = &inmsg->cmd;
1081 ulong bus_no = cmd->destroy_bus.bus_no;
1082 struct visorchipset_bus_info *bus_info;
1083 int rc = CONTROLVM_RESP_SUCCESS;
1084
1085 bus_info = findbus(&bus_info_list, bus_no);
1086 if (!bus_info)
1087 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1088 else if (bus_info->state.created == 0)
1089 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1090
1091 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1092 rc, inmsg->hdr.flags.response_expected == 1);
1093 }
1094
1095 static void
1096 bus_configure(struct controlvm_message *inmsg,
1097 struct parser_context *parser_ctx)
1098 {
1099 struct controlvm_message_packet *cmd = &inmsg->cmd;
1100 ulong bus_no = cmd->configure_bus.bus_no;
1101 struct visorchipset_bus_info *bus_info = NULL;
1102 int rc = CONTROLVM_RESP_SUCCESS;
1103 char s[99];
1104
1105 bus_no = cmd->configure_bus.bus_no;
1106 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1107 POSTCODE_SEVERITY_INFO);
1108
1109 bus_info = findbus(&bus_info_list, bus_no);
1110 if (!bus_info) {
1111 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1112 POSTCODE_SEVERITY_ERR);
1113 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1114 } else if (bus_info->state.created == 0) {
1115 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1116 POSTCODE_SEVERITY_ERR);
1117 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1118 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1119 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1120 POSTCODE_SEVERITY_ERR);
1121 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1122 } else {
1123 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1124 bus_info->partition_uuid = parser_id_get(parser_ctx);
1125 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1126 bus_info->name = parser_string_get(parser_ctx);
1127
1128 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1129 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1130 POSTCODE_SEVERITY_INFO);
1131 }
1132 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1133 rc, inmsg->hdr.flags.response_expected == 1);
1134 }
1135
1136 static void
1137 my_device_create(struct controlvm_message *inmsg)
1138 {
1139 struct controlvm_message_packet *cmd = &inmsg->cmd;
1140 ulong bus_no = cmd->create_device.bus_no;
1141 ulong dev_no = cmd->create_device.dev_no;
1142 struct visorchipset_device_info *dev_info = NULL;
1143 struct visorchipset_bus_info *bus_info = NULL;
1144 int rc = CONTROLVM_RESP_SUCCESS;
1145
1146 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1147 if (dev_info && (dev_info->state.created == 1)) {
1148 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1149 POSTCODE_SEVERITY_ERR);
1150 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1151 goto cleanup;
1152 }
1153 bus_info = findbus(&bus_info_list, bus_no);
1154 if (!bus_info) {
1155 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1156 POSTCODE_SEVERITY_ERR);
1157 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1158 goto cleanup;
1159 }
1160 if (bus_info->state.created == 0) {
1161 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1162 POSTCODE_SEVERITY_ERR);
1163 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1164 goto cleanup;
1165 }
1166 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1167 if (!dev_info) {
1168 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1169 POSTCODE_SEVERITY_ERR);
1170 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1171 goto cleanup;
1172 }
1173
1174 INIT_LIST_HEAD(&dev_info->entry);
1175 dev_info->bus_no = bus_no;
1176 dev_info->dev_no = dev_no;
1177 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1178 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1179 POSTCODE_SEVERITY_INFO);
1180
1181 if (inmsg->hdr.flags.test_message == 1)
1182 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1183 else
1184 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1185 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1186 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1187 dev_info->chan_info.channel_type_uuid =
1188 cmd->create_device.data_type_uuid;
1189 dev_info->chan_info.intr = cmd->create_device.intr;
1190 list_add(&dev_info->entry, &dev_info_list);
1191 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1192 POSTCODE_SEVERITY_INFO);
1193 cleanup:
1194 /* get the bus and devNo for DiagPool channel */
1195 if (dev_info &&
1196 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1197 g_diagpool_bus_no = bus_no;
1198 g_diagpool_dev_no = dev_no;
1199 }
1200 device_epilog(bus_no, dev_no, segment_state_running,
1201 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1202 inmsg->hdr.flags.response_expected == 1,
1203 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
1204 }
1205
1206 static void
1207 my_device_changestate(struct controlvm_message *inmsg)
1208 {
1209 struct controlvm_message_packet *cmd = &inmsg->cmd;
1210 ulong bus_no = cmd->device_change_state.bus_no;
1211 ulong dev_no = cmd->device_change_state.dev_no;
1212 struct spar_segment_state state = cmd->device_change_state.state;
1213 struct visorchipset_device_info *dev_info = NULL;
1214 int rc = CONTROLVM_RESP_SUCCESS;
1215
1216 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1217 if (!dev_info) {
1218 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1219 POSTCODE_SEVERITY_ERR);
1220 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1221 } else if (dev_info->state.created == 0) {
1222 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1223 POSTCODE_SEVERITY_ERR);
1224 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1225 }
1226 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1227 device_epilog(bus_no, dev_no, state,
1228 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1229 inmsg->hdr.flags.response_expected == 1,
1230 FOR_VISORBUS(
1231 dev_info->chan_info.channel_type_uuid));
1232 }
1233
1234 static void
1235 my_device_destroy(struct controlvm_message *inmsg)
1236 {
1237 struct controlvm_message_packet *cmd = &inmsg->cmd;
1238 ulong bus_no = cmd->destroy_device.bus_no;
1239 ulong dev_no = cmd->destroy_device.dev_no;
1240 struct visorchipset_device_info *dev_info = NULL;
1241 int rc = CONTROLVM_RESP_SUCCESS;
1242
1243 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1244 if (!dev_info)
1245 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1246 else if (dev_info->state.created == 0)
1247 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1248
1249 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1250 device_epilog(bus_no, dev_no, segment_state_running,
1251 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1252 inmsg->hdr.flags.response_expected == 1,
1253 FOR_VISORBUS(
1254 dev_info->chan_info.channel_type_uuid));
1255 }
1256
1257 /* When provided with the physical address of the controlvm channel
1258 * (phys_addr), the offset to the payload area we need to manage
1259 * (offset), and the size of this payload area (bytes), fills in the
1260 * controlvm_payload_info struct. Returns TRUE for success or FALSE
1261 * for failure.
1262 */
1263 static int
1264 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1265 struct controlvm_payload_info *info)
1266 {
1267 u8 __iomem *payload = NULL;
1268 int rc = CONTROLVM_RESP_SUCCESS;
1269
1270 if (!info) {
1271 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1272 goto cleanup;
1273 }
1274 memset(info, 0, sizeof(struct controlvm_payload_info));
1275 if ((offset == 0) || (bytes == 0)) {
1276 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1277 goto cleanup;
1278 }
1279 payload = ioremap_cache(phys_addr + offset, bytes);
1280 if (!payload) {
1281 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1282 goto cleanup;
1283 }
1284
1285 info->offset = offset;
1286 info->bytes = bytes;
1287 info->ptr = payload;
1288
1289 cleanup:
1290 if (rc < 0) {
1291 if (payload) {
1292 iounmap(payload);
1293 payload = NULL;
1294 }
1295 }
1296 return rc;
1297 }
1298
1299 static void
1300 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1301 {
1302 if (info->ptr) {
1303 iounmap(info->ptr);
1304 info->ptr = NULL;
1305 }
1306 memset(info, 0, sizeof(struct controlvm_payload_info));
1307 }
1308
1309 static void
1310 initialize_controlvm_payload(void)
1311 {
1312 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1313 u64 payload_offset = 0;
1314 u32 payload_bytes = 0;
1315
1316 if (visorchannel_read(controlvm_channel,
1317 offsetof(struct spar_controlvm_channel_protocol,
1318 request_payload_offset),
1319 &payload_offset, sizeof(payload_offset)) < 0) {
1320 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1321 POSTCODE_SEVERITY_ERR);
1322 return;
1323 }
1324 if (visorchannel_read(controlvm_channel,
1325 offsetof(struct spar_controlvm_channel_protocol,
1326 request_payload_bytes),
1327 &payload_bytes, sizeof(payload_bytes)) < 0) {
1328 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1329 POSTCODE_SEVERITY_ERR);
1330 return;
1331 }
1332 initialize_controlvm_payload_info(phys_addr,
1333 payload_offset, payload_bytes,
1334 &controlvm_payload_info);
1335 }
1336
1337 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1338 * Returns CONTROLVM_RESP_xxx code.
1339 */
1340 int
1341 visorchipset_chipset_ready(void)
1342 {
1343 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1344 return CONTROLVM_RESP_SUCCESS;
1345 }
1346 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1347
1348 int
1349 visorchipset_chipset_selftest(void)
1350 {
1351 char env_selftest[20];
1352 char *envp[] = { env_selftest, NULL };
1353
1354 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1355 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1356 envp);
1357 return CONTROLVM_RESP_SUCCESS;
1358 }
1359 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1360
1361 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1362 * Returns CONTROLVM_RESP_xxx code.
1363 */
1364 int
1365 visorchipset_chipset_notready(void)
1366 {
1367 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1368 return CONTROLVM_RESP_SUCCESS;
1369 }
1370 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1371
1372 static void
1373 chipset_ready(struct controlvm_message_header *msgHdr)
1374 {
1375 int rc = visorchipset_chipset_ready();
1376
1377 if (rc != CONTROLVM_RESP_SUCCESS)
1378 rc = -rc;
1379 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1380 controlvm_respond(msgHdr, rc);
1381 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1382 /* Send CHIPSET_READY response when all modules have been loaded
1383 * and disks mounted for the partition
1384 */
1385 g_chipset_msg_hdr = *msgHdr;
1386 }
1387 }
1388
1389 static void
1390 chipset_selftest(struct controlvm_message_header *msgHdr)
1391 {
1392 int rc = visorchipset_chipset_selftest();
1393
1394 if (rc != CONTROLVM_RESP_SUCCESS)
1395 rc = -rc;
1396 if (msgHdr->flags.response_expected)
1397 controlvm_respond(msgHdr, rc);
1398 }
1399
1400 static void
1401 chipset_notready(struct controlvm_message_header *msgHdr)
1402 {
1403 int rc = visorchipset_chipset_notready();
1404
1405 if (rc != CONTROLVM_RESP_SUCCESS)
1406 rc = -rc;
1407 if (msgHdr->flags.response_expected)
1408 controlvm_respond(msgHdr, rc);
1409 }
1410
1411 /* This is your "one-stop" shop for grabbing the next message from the
1412 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1413 */
1414 static BOOL
1415 read_controlvm_event(struct controlvm_message *msg)
1416 {
1417 if (visorchannel_signalremove(controlvm_channel,
1418 CONTROLVM_QUEUE_EVENT, msg)) {
1419 /* got a message */
1420 if (msg->hdr.flags.test_message == 1)
1421 return FALSE;
1422 return TRUE;
1423 }
1424 return FALSE;
1425 }
1426
1427 /*
1428 * The general parahotplug flow works as follows. The visorchipset
1429 * driver receives a DEVICE_CHANGESTATE message from Command
1430 * specifying a physical device to enable or disable. The CONTROLVM
1431 * message handler calls parahotplug_process_message, which then adds
1432 * the message to a global list and kicks off a udev event which
1433 * causes a user level script to enable or disable the specified
1434 * device. The udev script then writes to
1435 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1436 * to get called, at which point the appropriate CONTROLVM message is
1437 * retrieved from the list and responded to.
1438 */
1439
1440 #define PARAHOTPLUG_TIMEOUT_MS 2000
1441
1442 /*
1443 * Generate unique int to match an outstanding CONTROLVM message with a
1444 * udev script /proc response
1445 */
1446 static int
1447 parahotplug_next_id(void)
1448 {
1449 static atomic_t id = ATOMIC_INIT(0);
1450
1451 return atomic_inc_return(&id);
1452 }
1453
1454 /*
1455 * Returns the time (in jiffies) when a CONTROLVM message on the list
1456 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1457 */
1458 static unsigned long
1459 parahotplug_next_expiration(void)
1460 {
1461 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1462 }
1463
1464 /*
1465 * Create a parahotplug_request, which is basically a wrapper for a
1466 * CONTROLVM_MESSAGE that we can stick on a list
1467 */
1468 static struct parahotplug_request *
1469 parahotplug_request_create(struct controlvm_message *msg)
1470 {
1471 struct parahotplug_request *req;
1472
1473 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1474 if (!req)
1475 return NULL;
1476
1477 req->id = parahotplug_next_id();
1478 req->expiration = parahotplug_next_expiration();
1479 req->msg = *msg;
1480
1481 return req;
1482 }
1483
1484 /*
1485 * Free a parahotplug_request.
1486 */
1487 static void
1488 parahotplug_request_destroy(struct parahotplug_request *req)
1489 {
1490 kfree(req);
1491 }
1492
1493 /*
1494 * Cause uevent to run the user level script to do the disable/enable
1495 * specified in (the CONTROLVM message in) the specified
1496 * parahotplug_request
1497 */
1498 static void
1499 parahotplug_request_kickoff(struct parahotplug_request *req)
1500 {
1501 struct controlvm_message_packet *cmd = &req->msg.cmd;
1502 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1503 env_func[40];
1504 char *envp[] = {
1505 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1506 };
1507
1508 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1509 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1510 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1511 cmd->device_change_state.state.active);
1512 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1513 cmd->device_change_state.bus_no);
1514 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1515 cmd->device_change_state.dev_no >> 3);
1516 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1517 cmd->device_change_state.dev_no & 0x7);
1518
1519 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1520 envp);
1521 }
1522
1523 /*
1524 * Remove any request from the list that's been on there too long and
1525 * respond with an error.
1526 */
1527 static void
1528 parahotplug_process_list(void)
1529 {
1530 struct list_head *pos = NULL;
1531 struct list_head *tmp = NULL;
1532
1533 spin_lock(&parahotplug_request_list_lock);
1534
1535 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1536 struct parahotplug_request *req =
1537 list_entry(pos, struct parahotplug_request, list);
1538
1539 if (!time_after_eq(jiffies, req->expiration))
1540 continue;
1541
1542 list_del(pos);
1543 if (req->msg.hdr.flags.response_expected)
1544 controlvm_respond_physdev_changestate(
1545 &req->msg.hdr,
1546 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1547 req->msg.cmd.device_change_state.state);
1548 parahotplug_request_destroy(req);
1549 }
1550
1551 spin_unlock(&parahotplug_request_list_lock);
1552 }
1553
1554 /*
1555 * Called from the /proc handler, which means the user script has
1556 * finished the enable/disable. Find the matching identifier, and
1557 * respond to the CONTROLVM message with success.
1558 */
1559 static int
1560 parahotplug_request_complete(int id, u16 active)
1561 {
1562 struct list_head *pos = NULL;
1563 struct list_head *tmp = NULL;
1564
1565 spin_lock(&parahotplug_request_list_lock);
1566
1567 /* Look for a request matching "id". */
1568 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1569 struct parahotplug_request *req =
1570 list_entry(pos, struct parahotplug_request, list);
1571 if (req->id == id) {
1572 /* Found a match. Remove it from the list and
1573 * respond.
1574 */
1575 list_del(pos);
1576 spin_unlock(&parahotplug_request_list_lock);
1577 req->msg.cmd.device_change_state.state.active = active;
1578 if (req->msg.hdr.flags.response_expected)
1579 controlvm_respond_physdev_changestate(
1580 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1581 req->msg.cmd.device_change_state.state);
1582 parahotplug_request_destroy(req);
1583 return 0;
1584 }
1585 }
1586
1587 spin_unlock(&parahotplug_request_list_lock);
1588 return -1;
1589 }
1590
1591 /*
1592 * Enables or disables a PCI device by kicking off a udev script
1593 */
1594 static void
1595 parahotplug_process_message(struct controlvm_message *inmsg)
1596 {
1597 struct parahotplug_request *req;
1598
1599 req = parahotplug_request_create(inmsg);
1600
1601 if (!req)
1602 return;
1603
1604 if (inmsg->cmd.device_change_state.state.active) {
1605 /* For enable messages, just respond with success
1606 * right away. This is a bit of a hack, but there are
1607 * issues with the early enable messages we get (with
1608 * either the udev script not detecting that the device
1609 * is up, or not getting called at all). Fortunately
1610 * the messages that get lost don't matter anyway, as
1611 * devices are automatically enabled at
1612 * initialization.
1613 */
1614 parahotplug_request_kickoff(req);
1615 controlvm_respond_physdev_changestate(&inmsg->hdr,
1616 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1617 device_change_state.state);
1618 parahotplug_request_destroy(req);
1619 } else {
1620 /* For disable messages, add the request to the
1621 * request list before kicking off the udev script. It
1622 * won't get responded to until the script has
1623 * indicated it's done.
1624 */
1625 spin_lock(&parahotplug_request_list_lock);
1626 list_add_tail(&req->list, &parahotplug_request_list);
1627 spin_unlock(&parahotplug_request_list_lock);
1628
1629 parahotplug_request_kickoff(req);
1630 }
1631 }
1632
1633 /* Process a controlvm message.
1634 * Return result:
1635 * FALSE - this function will return FALSE only in the case where the
1636 * controlvm message was NOT processed, but processing must be
1637 * retried before reading the next controlvm message; a
1638 * scenario where this can occur is when we need to throttle
1639 * the allocation of memory in which to copy out controlvm
1640 * payload data
1641 * TRUE - processing of the controlvm message completed,
1642 * either successfully or with an error.
1643 */
1644 static BOOL
1645 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1646 {
1647 struct controlvm_message_packet *cmd = &inmsg.cmd;
1648 u64 parm_addr = 0;
1649 u32 parm_bytes = 0;
1650 struct parser_context *parser_ctx = NULL;
1651 bool local_addr = false;
1652 struct controlvm_message ackmsg;
1653
1654 /* create parsing context if necessary */
1655 local_addr = (inmsg.hdr.flags.test_message == 1);
1656 if (channel_addr == 0)
1657 return TRUE;
1658 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1659 parm_bytes = inmsg.hdr.payload_bytes;
1660
1661 /* Parameter and channel addresses within test messages actually lie
1662 * within our OS-controlled memory. We need to know that, because it
1663 * makes a difference in how we compute the virtual address.
1664 */
1665 if (parm_addr != 0 && parm_bytes != 0) {
1666 BOOL retry = FALSE;
1667
1668 parser_ctx =
1669 parser_init_byte_stream(parm_addr, parm_bytes,
1670 local_addr, &retry);
1671 if (!parser_ctx && retry)
1672 return FALSE;
1673 }
1674
1675 if (!local_addr) {
1676 controlvm_init_response(&ackmsg, &inmsg.hdr,
1677 CONTROLVM_RESP_SUCCESS);
1678 if (controlvm_channel)
1679 visorchannel_signalinsert(controlvm_channel,
1680 CONTROLVM_QUEUE_ACK,
1681 &ackmsg);
1682 }
1683 switch (inmsg.hdr.id) {
1684 case CONTROLVM_CHIPSET_INIT:
1685 chipset_init(&inmsg);
1686 break;
1687 case CONTROLVM_BUS_CREATE:
1688 bus_create(&inmsg);
1689 break;
1690 case CONTROLVM_BUS_DESTROY:
1691 bus_destroy(&inmsg);
1692 break;
1693 case CONTROLVM_BUS_CONFIGURE:
1694 bus_configure(&inmsg, parser_ctx);
1695 break;
1696 case CONTROLVM_DEVICE_CREATE:
1697 my_device_create(&inmsg);
1698 break;
1699 case CONTROLVM_DEVICE_CHANGESTATE:
1700 if (cmd->device_change_state.flags.phys_device) {
1701 parahotplug_process_message(&inmsg);
1702 } else {
1703 /* save the hdr and cmd structures for later use */
1704 /* when sending back the response to Command */
1705 my_device_changestate(&inmsg);
1706 g_diag_msg_hdr = inmsg.hdr;
1707 g_devicechangestate_packet = inmsg.cmd;
1708 break;
1709 }
1710 break;
1711 case CONTROLVM_DEVICE_DESTROY:
1712 my_device_destroy(&inmsg);
1713 break;
1714 case CONTROLVM_DEVICE_CONFIGURE:
1715 /* no op for now, just send a respond that we passed */
1716 if (inmsg.hdr.flags.response_expected)
1717 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1718 break;
1719 case CONTROLVM_CHIPSET_READY:
1720 chipset_ready(&inmsg.hdr);
1721 break;
1722 case CONTROLVM_CHIPSET_SELFTEST:
1723 chipset_selftest(&inmsg.hdr);
1724 break;
1725 case CONTROLVM_CHIPSET_STOP:
1726 chipset_notready(&inmsg.hdr);
1727 break;
1728 default:
1729 if (inmsg.hdr.flags.response_expected)
1730 controlvm_respond(&inmsg.hdr,
1731 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1732 break;
1733 }
1734
1735 if (parser_ctx) {
1736 parser_done(parser_ctx);
1737 parser_ctx = NULL;
1738 }
1739 return TRUE;
1740 }
1741
1742 static HOSTADDRESS controlvm_get_channel_address(void)
1743 {
1744 u64 addr = 0;
1745 u32 size = 0;
1746
1747 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1748 return 0;
1749
1750 return addr;
1751 }
1752
1753 static void
1754 controlvm_periodic_work(struct work_struct *work)
1755 {
1756 struct controlvm_message inmsg;
1757 BOOL got_command = FALSE;
1758 BOOL handle_command_failed = FALSE;
1759 static u64 poll_count;
1760
1761 /* make sure visorbus server is registered for controlvm callbacks */
1762 if (visorchipset_serverregwait && !serverregistered)
1763 goto cleanup;
1764 /* make sure visorclientbus server is regsitered for controlvm
1765 * callbacks
1766 */
1767 if (visorchipset_clientregwait && !clientregistered)
1768 goto cleanup;
1769
1770 poll_count++;
1771 if (poll_count >= 250)
1772 ; /* keep going */
1773 else
1774 goto cleanup;
1775
1776 /* Check events to determine if response to CHIPSET_READY
1777 * should be sent
1778 */
1779 if (visorchipset_holdchipsetready &&
1780 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1781 if (check_chipset_events() == 1) {
1782 controlvm_respond(&g_chipset_msg_hdr, 0);
1783 clear_chipset_events();
1784 memset(&g_chipset_msg_hdr, 0,
1785 sizeof(struct controlvm_message_header));
1786 }
1787 }
1788
1789 while (visorchannel_signalremove(controlvm_channel,
1790 CONTROLVM_QUEUE_RESPONSE,
1791 &inmsg))
1792 ;
1793 if (!got_command) {
1794 if (controlvm_pending_msg_valid) {
1795 /* we throttled processing of a prior
1796 * msg, so try to process it again
1797 * rather than reading a new one
1798 */
1799 inmsg = controlvm_pending_msg;
1800 controlvm_pending_msg_valid = FALSE;
1801 got_command = true;
1802 } else {
1803 got_command = read_controlvm_event(&inmsg);
1804 }
1805 }
1806
1807 handle_command_failed = FALSE;
1808 while (got_command && (!handle_command_failed)) {
1809 most_recent_message_jiffies = jiffies;
1810 if (handle_command(inmsg,
1811 visorchannel_get_physaddr
1812 (controlvm_channel)))
1813 got_command = read_controlvm_event(&inmsg);
1814 else {
1815 /* this is a scenario where throttling
1816 * is required, but probably NOT an
1817 * error...; we stash the current
1818 * controlvm msg so we will attempt to
1819 * reprocess it on our next loop
1820 */
1821 handle_command_failed = TRUE;
1822 controlvm_pending_msg = inmsg;
1823 controlvm_pending_msg_valid = TRUE;
1824 }
1825 }
1826
1827 /* parahotplug_worker */
1828 parahotplug_process_list();
1829
1830 cleanup:
1831
1832 if (time_after(jiffies,
1833 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1834 /* it's been longer than MIN_IDLE_SECONDS since we
1835 * processed our last controlvm message; slow down the
1836 * polling
1837 */
1838 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1839 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1840 } else {
1841 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1842 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1843 }
1844
1845 queue_delayed_work(periodic_controlvm_workqueue,
1846 &periodic_controlvm_work, poll_jiffies);
1847 }
1848
1849 static void
1850 setup_crash_devices_work_queue(struct work_struct *work)
1851 {
1852 struct controlvm_message local_crash_bus_msg;
1853 struct controlvm_message local_crash_dev_msg;
1854 struct controlvm_message msg;
1855 u32 local_crash_msg_offset;
1856 u16 local_crash_msg_count;
1857
1858 /* make sure visorbus server is registered for controlvm callbacks */
1859 if (visorchipset_serverregwait && !serverregistered)
1860 goto cleanup;
1861
1862 /* make sure visorclientbus server is regsitered for controlvm
1863 * callbacks
1864 */
1865 if (visorchipset_clientregwait && !clientregistered)
1866 goto cleanup;
1867
1868 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1869
1870 /* send init chipset msg */
1871 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1872 msg.cmd.init_chipset.bus_count = 23;
1873 msg.cmd.init_chipset.switch_count = 0;
1874
1875 chipset_init(&msg);
1876
1877 /* get saved message count */
1878 if (visorchannel_read(controlvm_channel,
1879 offsetof(struct spar_controlvm_channel_protocol,
1880 saved_crash_message_count),
1881 &local_crash_msg_count, sizeof(u16)) < 0) {
1882 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1883 POSTCODE_SEVERITY_ERR);
1884 return;
1885 }
1886
1887 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1888 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1889 local_crash_msg_count,
1890 POSTCODE_SEVERITY_ERR);
1891 return;
1892 }
1893
1894 /* get saved crash message offset */
1895 if (visorchannel_read(controlvm_channel,
1896 offsetof(struct spar_controlvm_channel_protocol,
1897 saved_crash_message_offset),
1898 &local_crash_msg_offset, sizeof(u32)) < 0) {
1899 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1900 POSTCODE_SEVERITY_ERR);
1901 return;
1902 }
1903
1904 /* read create device message for storage bus offset */
1905 if (visorchannel_read(controlvm_channel,
1906 local_crash_msg_offset,
1907 &local_crash_bus_msg,
1908 sizeof(struct controlvm_message)) < 0) {
1909 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1910 POSTCODE_SEVERITY_ERR);
1911 return;
1912 }
1913
1914 /* read create device message for storage device */
1915 if (visorchannel_read(controlvm_channel,
1916 local_crash_msg_offset +
1917 sizeof(struct controlvm_message),
1918 &local_crash_dev_msg,
1919 sizeof(struct controlvm_message)) < 0) {
1920 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1921 POSTCODE_SEVERITY_ERR);
1922 return;
1923 }
1924
1925 /* reuse IOVM create bus message */
1926 if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) {
1927 bus_create(&local_crash_bus_msg);
1928 } else {
1929 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1930 POSTCODE_SEVERITY_ERR);
1931 return;
1932 }
1933
1934 /* reuse create device message for storage device */
1935 if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) {
1936 my_device_create(&local_crash_dev_msg);
1937 } else {
1938 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1939 POSTCODE_SEVERITY_ERR);
1940 return;
1941 }
1942 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1943 return;
1944
1945 cleanup:
1946
1947 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1948
1949 queue_delayed_work(periodic_controlvm_workqueue,
1950 &periodic_controlvm_work, poll_jiffies);
1951 }
1952
1953 static void
1954 bus_create_response(ulong busNo, int response)
1955 {
1956 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
1957 }
1958
1959 static void
1960 bus_destroy_response(ulong busNo, int response)
1961 {
1962 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
1963 }
1964
1965 static void
1966 device_create_response(ulong busNo, ulong devNo, int response)
1967 {
1968 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
1969 }
1970
1971 static void
1972 device_destroy_response(ulong busNo, ulong devNo, int response)
1973 {
1974 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
1975 }
1976
1977 void
1978 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
1979 {
1980 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1981 bus_no, dev_no, response,
1982 segment_state_standby);
1983 }
1984 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
1985
1986 static void
1987 device_resume_response(ulong busNo, ulong devNo, int response)
1988 {
1989 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1990 busNo, devNo, response,
1991 segment_state_running);
1992 }
1993
1994 BOOL
1995 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
1996 {
1997 void *p = findbus(&bus_info_list, bus_no);
1998
1999 if (!p)
2000 return FALSE;
2001 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2002 return TRUE;
2003 }
2004 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2005
2006 BOOL
2007 visorchipset_set_bus_context(ulong bus_no, void *context)
2008 {
2009 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2010
2011 if (!p)
2012 return FALSE;
2013 p->bus_driver_context = context;
2014 return TRUE;
2015 }
2016 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2017
2018 BOOL
2019 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2020 struct visorchipset_device_info *dev_info)
2021 {
2022 void *p = finddevice(&dev_info_list, bus_no, dev_no);
2023
2024 if (!p)
2025 return FALSE;
2026 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2027 return TRUE;
2028 }
2029 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2030
2031 BOOL
2032 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2033 {
2034 struct visorchipset_device_info *p =
2035 finddevice(&dev_info_list, bus_no, dev_no);
2036
2037 if (!p)
2038 return FALSE;
2039 p->bus_driver_context = context;
2040 return TRUE;
2041 }
2042 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2043
2044 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2045 */
2046 void *
2047 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2048 char *fn, int ln)
2049 {
2050 gfp_t gfp;
2051 void *p;
2052
2053 if (ok_to_block)
2054 gfp = GFP_KERNEL;
2055 else
2056 gfp = GFP_ATOMIC;
2057 /* __GFP_NORETRY means "ok to fail", meaning
2058 * kmem_cache_alloc() can return NULL, implying the caller CAN
2059 * cope with failure. If you do NOT specify __GFP_NORETRY,
2060 * Linux will go to extreme measures to get memory for you
2061 * (like, invoke oom killer), which will probably cripple the
2062 * system.
2063 */
2064 gfp |= __GFP_NORETRY;
2065 p = kmem_cache_alloc(pool, gfp);
2066 if (!p)
2067 return NULL;
2068
2069 atomic_inc(&visorchipset_cache_buffers_in_use);
2070 return p;
2071 }
2072
2073 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2074 */
2075 void
2076 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2077 {
2078 if (!p)
2079 return;
2080
2081 atomic_dec(&visorchipset_cache_buffers_in_use);
2082 kmem_cache_free(pool, p);
2083 }
2084
2085 static ssize_t chipsetready_store(struct device *dev,
2086 struct device_attribute *attr, const char *buf, size_t count)
2087 {
2088 char msgtype[64];
2089
2090 if (sscanf(buf, "%63s", msgtype) != 1)
2091 return -EINVAL;
2092
2093 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2094 chipset_events[0] = 1;
2095 return count;
2096 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2097 chipset_events[1] = 1;
2098 return count;
2099 }
2100 return -EINVAL;
2101 }
2102
2103 /* The parahotplug/devicedisabled interface gets called by our support script
2104 * when an SR-IOV device has been shut down. The ID is passed to the script
2105 * and then passed back when the device has been removed.
2106 */
2107 static ssize_t devicedisabled_store(struct device *dev,
2108 struct device_attribute *attr, const char *buf, size_t count)
2109 {
2110 uint id;
2111
2112 if (kstrtouint(buf, 10, &id) != 0)
2113 return -EINVAL;
2114
2115 parahotplug_request_complete(id, 0);
2116 return count;
2117 }
2118
2119 /* The parahotplug/deviceenabled interface gets called by our support script
2120 * when an SR-IOV device has been recovered. The ID is passed to the script
2121 * and then passed back when the device has been brought back up.
2122 */
2123 static ssize_t deviceenabled_store(struct device *dev,
2124 struct device_attribute *attr, const char *buf, size_t count)
2125 {
2126 uint id;
2127
2128 if (kstrtouint(buf, 10, &id) != 0)
2129 return -EINVAL;
2130
2131 parahotplug_request_complete(id, 1);
2132 return count;
2133 }
2134
2135 static int __init
2136 visorchipset_init(void)
2137 {
2138 int rc = 0, x = 0;
2139 HOSTADDRESS addr;
2140
2141 if (!unisys_spar_platform)
2142 return -ENODEV;
2143
2144 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2145 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2146 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2147 memset(&livedump_info, 0, sizeof(livedump_info));
2148 atomic_set(&livedump_info.buffers_in_use, 0);
2149
2150 if (visorchipset_testvnic) {
2151 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2152 rc = x;
2153 goto cleanup;
2154 }
2155
2156 addr = controlvm_get_channel_address();
2157 if (addr != 0) {
2158 controlvm_channel =
2159 visorchannel_create_with_lock
2160 (addr,
2161 sizeof(struct spar_controlvm_channel_protocol),
2162 spar_controlvm_channel_protocol_uuid);
2163 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2164 visorchannel_get_header(controlvm_channel))) {
2165 initialize_controlvm_payload();
2166 } else {
2167 visorchannel_destroy(controlvm_channel);
2168 controlvm_channel = NULL;
2169 return -ENODEV;
2170 }
2171 } else {
2172 return -ENODEV;
2173 }
2174
2175 MajorDev = MKDEV(visorchipset_major, 0);
2176 rc = visorchipset_file_init(MajorDev, &controlvm_channel);
2177 if (rc < 0) {
2178 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2179 goto cleanup;
2180 }
2181
2182 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2183
2184 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2185
2186 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2187
2188 putfile_buffer_list_pool =
2189 kmem_cache_create(putfile_buffer_list_pool_name,
2190 sizeof(struct putfile_buffer_entry),
2191 0, SLAB_HWCACHE_ALIGN, NULL);
2192 if (!putfile_buffer_list_pool) {
2193 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2194 rc = -1;
2195 goto cleanup;
2196 }
2197 if (!visorchipset_disable_controlvm) {
2198 /* if booting in a crash kernel */
2199 if (visorchipset_crash_kernel)
2200 INIT_DELAYED_WORK(&periodic_controlvm_work,
2201 setup_crash_devices_work_queue);
2202 else
2203 INIT_DELAYED_WORK(&periodic_controlvm_work,
2204 controlvm_periodic_work);
2205 periodic_controlvm_workqueue =
2206 create_singlethread_workqueue("visorchipset_controlvm");
2207
2208 if (!periodic_controlvm_workqueue) {
2209 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2210 DIAG_SEVERITY_ERR);
2211 rc = -ENOMEM;
2212 goto cleanup;
2213 }
2214 most_recent_message_jiffies = jiffies;
2215 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2216 rc = queue_delayed_work(periodic_controlvm_workqueue,
2217 &periodic_controlvm_work, poll_jiffies);
2218 if (rc < 0) {
2219 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2220 DIAG_SEVERITY_ERR);
2221 goto cleanup;
2222 }
2223 }
2224
2225 Visorchipset_platform_device.dev.devt = MajorDev;
2226 if (platform_device_register(&Visorchipset_platform_device) < 0) {
2227 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2228 rc = -1;
2229 goto cleanup;
2230 }
2231 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2232 rc = 0;
2233 cleanup:
2234 if (rc) {
2235 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2236 POSTCODE_SEVERITY_ERR);
2237 }
2238 return rc;
2239 }
2240
2241 static void
2242 visorchipset_exit(void)
2243 {
2244 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2245
2246 if (visorchipset_disable_controlvm) {
2247 ;
2248 } else {
2249 cancel_delayed_work(&periodic_controlvm_work);
2250 flush_workqueue(periodic_controlvm_workqueue);
2251 destroy_workqueue(periodic_controlvm_workqueue);
2252 periodic_controlvm_workqueue = NULL;
2253 destroy_controlvm_payload_info(&controlvm_payload_info);
2254 }
2255 if (putfile_buffer_list_pool) {
2256 kmem_cache_destroy(putfile_buffer_list_pool);
2257 putfile_buffer_list_pool = NULL;
2258 }
2259
2260 cleanup_controlvm_structures();
2261
2262 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2263
2264 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2265
2266 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2267
2268 visorchannel_destroy(controlvm_channel);
2269
2270 visorchipset_file_cleanup();
2271 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2272 }
2273
2274 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2275 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2276 int visorchipset_testvnic = 0;
2277
2278 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2279 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2280 int visorchipset_testvnicclient = 0;
2281
2282 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2283 MODULE_PARM_DESC(visorchipset_testmsg,
2284 "1 to manufacture the chipset, bus, and switch messages");
2285 int visorchipset_testmsg = 0;
2286
2287 module_param_named(major, visorchipset_major, int, S_IRUGO);
2288 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2289 int visorchipset_major = 0;
2290
2291 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2292 MODULE_PARM_DESC(visorchipset_serverreqwait,
2293 "1 to have the module wait for the visor bus to register");
2294 int visorchipset_serverregwait = 0; /* default is off */
2295 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2296 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2297 int visorchipset_clientregwait = 1; /* default is on */
2298 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2299 MODULE_PARM_DESC(visorchipset_testteardown,
2300 "1 to test teardown of the chipset, bus, and switch");
2301 int visorchipset_testteardown = 0; /* default is off */
2302 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2303 S_IRUGO);
2304 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2305 "1 to disable polling of controlVm channel");
2306 int visorchipset_disable_controlvm = 0; /* default is off */
2307 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2308 MODULE_PARM_DESC(visorchipset_crash_kernel,
2309 "1 means we are running in crash kernel");
2310 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2311 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2312 int, S_IRUGO);
2313 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2314 "1 to hold response to CHIPSET_READY");
2315 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2316 * response immediately */
2317 module_init(visorchipset_init);
2318 module_exit(visorchipset_exit);
2319
2320 MODULE_AUTHOR("Unisys");
2321 MODULE_LICENSE("GPL");
2322 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2323 VERSION);
2324 MODULE_VERSION(VERSION);
This page took 0.129163 seconds and 5 git commands to generate.