staging: unisys: fix Camelcase Away goto label
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
1 /* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18 #include "globals.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "file.h"
24 #include "parser.h"
25 #include "uisutils.h"
26 #include "controlvmcompletionstatus.h"
27 #include "guestlinuxdebug.h"
28
29 #include <linux/nls.h>
30 #include <linux/netdevice.h>
31 #include <linux/platform_device.h>
32 #include <linux/uuid.h>
33
34 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37 #define TEST_VNIC_SWITCHNO 1
38 #define TEST_VNIC_BUSNO 9
39
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE 50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47 * we switch to slow polling mode. As soon as we get a controlvm
48 * message, we switch back to fast polling mode.
49 */
50 #define MIN_IDLE_SECONDS 10
51 static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
52 static ulong most_recent_message_jiffies; /* when we got our last
53 * controlvm message */
54 static inline char *
55 NONULLSTR(char *s)
56 {
57 if (s)
58 return s;
59 return "";
60 }
61
62 static int serverregistered;
63 static int clientregistered;
64
65 #define MAX_CHIPSET_EVENTS 2
66 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
67
68 static struct delayed_work periodic_controlvm_work;
69 static struct workqueue_struct *periodic_controlvm_workqueue;
70 static DEFINE_SEMAPHORE(notifier_lock);
71
72 static struct controlvm_message_header g_diag_msg_hdr;
73 static struct controlvm_message_header g_chipset_msg_hdr;
74 static struct controlvm_message_header g_del_dump_msg_hdr;
75 static const uuid_le spar_diag_pool_channel_protocol_uuid =
76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
77 /* 0xffffff is an invalid Bus/Device number */
78 static ulong g_diagpool_bus_no = 0xffffff;
79 static ulong g_diagpool_dev_no = 0xffffff;
80 static struct controlvm_message_packet g_devicechangestate_packet;
81
82 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
83 * "visorhackbus")
84 */
85 #define FOR_VISORHACKBUS(channel_type_guid) \
86 (((uuid_le_cmp(channel_type_guid,\
87 spar_vnic_channel_protocol_uuid) == 0) ||\
88 (uuid_le_cmp(channel_type_guid,\
89 spar_vhba_channel_protocol_uuid) == 0)))
90 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
91
92 #define is_diagpool_channel(channel_type_guid) \
93 (uuid_le_cmp(channel_type_guid,\
94 spar_diag_pool_channel_protocol_uuid) == 0)
95
96 static LIST_HEAD(bus_info_list);
97 static LIST_HEAD(dev_info_list);
98
99 static struct visorchannel *controlvm_channel;
100
101 /* Manages the request payload in the controlvm channel */
102 static struct controlvm_payload_info {
103 u8 __iomem *ptr; /* pointer to base address of payload pool */
104 u64 offset; /* offset from beginning of controlvm
105 * channel to beginning of payload * pool */
106 u32 bytes; /* number of bytes in payload pool */
107 } controlvm_payload_info;
108
109 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
110 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
111 */
112 static struct livedump_info {
113 struct controlvm_message_header dumpcapture_header;
114 struct controlvm_message_header gettextdump_header;
115 struct controlvm_message_header dumpcomplete_header;
116 BOOL gettextdump_outstanding;
117 u32 crc32;
118 ulong length;
119 atomic_t buffers_in_use;
120 ulong destination;
121 } livedump_info;
122
123 /* The following globals are used to handle the scenario where we are unable to
124 * offload the payload from a controlvm message due to memory requirements. In
125 * this scenario, we simply stash the controlvm message, then attempt to
126 * process it again the next time controlvm_periodic_work() runs.
127 */
128 static struct controlvm_message ControlVm_Pending_Msg;
129 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
130
131 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
132 * TRANSMIT_FILE PutFile payloads.
133 */
134 static struct kmem_cache *Putfile_buffer_list_pool;
135 static const char Putfile_buffer_list_pool_name[] =
136 "controlvm_putfile_buffer_list_pool";
137
138 /* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140 */
141 struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
143 struct parser_context *parser_ctx; /* points to input data buffer */
144 };
145
146 /* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
148 * conversation.
149 */
150 static LIST_HEAD(Putfile_request_list);
151
152 /* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
155 */
156 struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
158 struct parser_context *parser_ctx;
159 /* points within data area of parser_ctx to next byte of data */
160 u8 *pnext;
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
163 };
164
165 #define PUTFILE_REQUEST_SIG 0x0906101302281211
166 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
169 */
170 struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
172
173 /* header from original TransmitFile request */
174 struct controlvm_message_header controlvm_header;
175 u64 file_request_number; /* from original TransmitFile request */
176
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
179
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
182
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
186 * file data
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
191
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
194
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
197
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
204 };
205
206 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
207
208 struct parahotplug_request {
209 struct list_head list;
210 int id;
211 unsigned long expiration;
212 struct controlvm_message msg;
213 };
214
215 static LIST_HEAD(Parahotplug_request_list);
216 static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
217 static void parahotplug_process_list(void);
218
219 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
220 * CONTROLVM_REPORTEVENT.
221 */
222 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
223 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
224
225 static void bus_create_response(ulong busNo, int response);
226 static void bus_destroy_response(ulong busNo, int response);
227 static void device_create_response(ulong busNo, ulong devNo, int response);
228 static void device_destroy_response(ulong busNo, ulong devNo, int response);
229 static void device_resume_response(ulong busNo, ulong devNo, int response);
230
231 static struct visorchipset_busdev_responders BusDev_Responders = {
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
236 .device_pause = visorchipset_device_pause_response,
237 .device_resume = device_resume_response,
238 };
239
240 /* info for /dev/visorchipset */
241 static dev_t MajorDev = -1; /**< indicates major num for device */
242
243 /* prototypes for attributes */
244 static ssize_t toolaction_show(struct device *dev,
245 struct device_attribute *attr, char *buf);
246 static ssize_t toolaction_store(struct device *dev,
247 struct device_attribute *attr, const char *buf, size_t count);
248 static DEVICE_ATTR_RW(toolaction);
249
250 static ssize_t boottotool_show(struct device *dev,
251 struct device_attribute *attr, char *buf);
252 static ssize_t boottotool_store(struct device *dev,
253 struct device_attribute *attr, const char *buf, size_t count);
254 static DEVICE_ATTR_RW(boottotool);
255
256 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
257 char *buf);
258 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
259 const char *buf, size_t count);
260 static DEVICE_ATTR_RW(error);
261
262 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
263 char *buf);
264 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
265 const char *buf, size_t count);
266 static DEVICE_ATTR_RW(textid);
267
268 static ssize_t remaining_steps_show(struct device *dev,
269 struct device_attribute *attr, char *buf);
270 static ssize_t remaining_steps_store(struct device *dev,
271 struct device_attribute *attr, const char *buf, size_t count);
272 static DEVICE_ATTR_RW(remaining_steps);
273
274 static ssize_t chipsetready_store(struct device *dev,
275 struct device_attribute *attr, const char *buf, size_t count);
276 static DEVICE_ATTR_WO(chipsetready);
277
278 static ssize_t devicedisabled_store(struct device *dev,
279 struct device_attribute *attr, const char *buf, size_t count);
280 static DEVICE_ATTR_WO(devicedisabled);
281
282 static ssize_t deviceenabled_store(struct device *dev,
283 struct device_attribute *attr, const char *buf, size_t count);
284 static DEVICE_ATTR_WO(deviceenabled);
285
286 static struct attribute *visorchipset_install_attrs[] = {
287 &dev_attr_toolaction.attr,
288 &dev_attr_boottotool.attr,
289 &dev_attr_error.attr,
290 &dev_attr_textid.attr,
291 &dev_attr_remaining_steps.attr,
292 NULL
293 };
294
295 static struct attribute_group visorchipset_install_group = {
296 .name = "install",
297 .attrs = visorchipset_install_attrs
298 };
299
300 static struct attribute *visorchipset_guest_attrs[] = {
301 &dev_attr_chipsetready.attr,
302 NULL
303 };
304
305 static struct attribute_group visorchipset_guest_group = {
306 .name = "guest",
307 .attrs = visorchipset_guest_attrs
308 };
309
310 static struct attribute *visorchipset_parahotplug_attrs[] = {
311 &dev_attr_devicedisabled.attr,
312 &dev_attr_deviceenabled.attr,
313 NULL
314 };
315
316 static struct attribute_group visorchipset_parahotplug_group = {
317 .name = "parahotplug",
318 .attrs = visorchipset_parahotplug_attrs
319 };
320
321 static const struct attribute_group *visorchipset_dev_groups[] = {
322 &visorchipset_install_group,
323 &visorchipset_guest_group,
324 &visorchipset_parahotplug_group,
325 NULL
326 };
327
328 /* /sys/devices/platform/visorchipset */
329 static struct platform_device Visorchipset_platform_device = {
330 .name = "visorchipset",
331 .id = -1,
332 .dev.groups = visorchipset_dev_groups,
333 };
334
335 /* Function prototypes */
336 static void controlvm_respond(struct controlvm_message_header *msgHdr,
337 int response);
338 static void controlvm_respond_chipset_init(
339 struct controlvm_message_header *msgHdr, int response,
340 enum ultra_chipset_feature features);
341 static void controlvm_respond_physdev_changestate(
342 struct controlvm_message_header *msgHdr, int response,
343 struct spar_segment_state state);
344
345 static ssize_t toolaction_show(struct device *dev,
346 struct device_attribute *attr,
347 char *buf)
348 {
349 u8 toolAction;
350
351 visorchannel_read(controlvm_channel,
352 offsetof(struct spar_controlvm_channel_protocol,
353 tool_action), &toolAction, sizeof(u8));
354 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
355 }
356
357 static ssize_t toolaction_store(struct device *dev,
358 struct device_attribute *attr,
359 const char *buf, size_t count)
360 {
361 u8 toolAction;
362 int ret;
363
364 if (kstrtou8(buf, 10, &toolAction) != 0)
365 return -EINVAL;
366
367 ret = visorchannel_write(controlvm_channel,
368 offsetof(struct spar_controlvm_channel_protocol, tool_action),
369 &toolAction, sizeof(u8));
370
371 if (ret)
372 return ret;
373 return count;
374 }
375
376 static ssize_t boottotool_show(struct device *dev,
377 struct device_attribute *attr,
378 char *buf)
379 {
380 struct efi_spar_indication efiSparIndication;
381
382 visorchannel_read(controlvm_channel,
383 offsetof(struct spar_controlvm_channel_protocol,
384 efi_spar_ind), &efiSparIndication,
385 sizeof(struct efi_spar_indication));
386 return scnprintf(buf, PAGE_SIZE, "%u\n",
387 efiSparIndication.boot_to_tool);
388 }
389
390 static ssize_t boottotool_store(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf, size_t count)
393 {
394 int val, ret;
395 struct efi_spar_indication efiSparIndication;
396
397 if (kstrtoint(buf, 10, &val) != 0)
398 return -EINVAL;
399
400 efiSparIndication.boot_to_tool = val;
401 ret = visorchannel_write(controlvm_channel,
402 offsetof(struct spar_controlvm_channel_protocol,
403 efi_spar_ind),
404 &(efiSparIndication),
405 sizeof(struct efi_spar_indication));
406
407 if (ret)
408 return ret;
409 return count;
410 }
411
412 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
413 char *buf)
414 {
415 u32 error;
416
417 visorchannel_read(controlvm_channel, offsetof(
418 struct spar_controlvm_channel_protocol, installation_error),
419 &error, sizeof(u32));
420 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
421 }
422
423 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
424 const char *buf, size_t count)
425 {
426 u32 error;
427 int ret;
428
429 if (kstrtou32(buf, 10, &error) != 0)
430 return -EINVAL;
431
432 ret = visorchannel_write(controlvm_channel,
433 offsetof(struct spar_controlvm_channel_protocol,
434 installation_error),
435 &error, sizeof(u32));
436 if (ret)
437 return ret;
438 return count;
439 }
440
441 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
442 char *buf)
443 {
444 u32 textId;
445
446 visorchannel_read(controlvm_channel, offsetof(
447 struct spar_controlvm_channel_protocol, installation_text_id),
448 &textId, sizeof(u32));
449 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
450 }
451
452 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
453 const char *buf, size_t count)
454 {
455 u32 textId;
456 int ret;
457
458 if (kstrtou32(buf, 10, &textId) != 0)
459 return -EINVAL;
460
461 ret = visorchannel_write(controlvm_channel,
462 offsetof(struct spar_controlvm_channel_protocol,
463 installation_text_id),
464 &textId, sizeof(u32));
465 if (ret)
466 return ret;
467 return count;
468 }
469
470 static ssize_t remaining_steps_show(struct device *dev,
471 struct device_attribute *attr, char *buf)
472 {
473 u16 remainingSteps;
474
475 visorchannel_read(controlvm_channel,
476 offsetof(struct spar_controlvm_channel_protocol,
477 installation_remaining_steps),
478 &remainingSteps,
479 sizeof(u16));
480 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
481 }
482
483 static ssize_t remaining_steps_store(struct device *dev,
484 struct device_attribute *attr, const char *buf, size_t count)
485 {
486 u16 remainingSteps;
487 int ret;
488
489 if (kstrtou16(buf, 10, &remainingSteps) != 0)
490 return -EINVAL;
491
492 ret = visorchannel_write(controlvm_channel,
493 offsetof(struct spar_controlvm_channel_protocol,
494 installation_remaining_steps),
495 &remainingSteps, sizeof(u16));
496 if (ret)
497 return ret;
498 return count;
499 }
500
501 static void
502 bus_info_clear(void *v)
503 {
504 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
505
506 kfree(p->name);
507 p->name = NULL;
508
509 kfree(p->description);
510 p->description = NULL;
511
512 p->state.created = 0;
513 memset(p, 0, sizeof(struct visorchipset_bus_info));
514 }
515
516 static void
517 dev_info_clear(void *v)
518 {
519 struct visorchipset_device_info *p =
520 (struct visorchipset_device_info *)(v);
521
522 p->state.created = 0;
523 memset(p, 0, sizeof(struct visorchipset_device_info));
524 }
525
526 static u8
527 check_chipset_events(void)
528 {
529 int i;
530 u8 send_msg = 1;
531 /* Check events to determine if response should be sent */
532 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
533 send_msg &= chipset_events[i];
534 return send_msg;
535 }
536
537 static void
538 clear_chipset_events(void)
539 {
540 int i;
541 /* Clear chipset_events */
542 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
543 chipset_events[i] = 0;
544 }
545
546 void
547 visorchipset_register_busdev_server(
548 struct visorchipset_busdev_notifiers *notifiers,
549 struct visorchipset_busdev_responders *responders,
550 struct ultra_vbus_deviceinfo *driver_info)
551 {
552 down(&notifier_lock);
553 if (!notifiers) {
554 memset(&BusDev_Server_Notifiers, 0,
555 sizeof(BusDev_Server_Notifiers));
556 serverregistered = 0; /* clear flag */
557 } else {
558 BusDev_Server_Notifiers = *notifiers;
559 serverregistered = 1; /* set flag */
560 }
561 if (responders)
562 *responders = BusDev_Responders;
563 if (driver_info)
564 bus_device_info_init(driver_info, "chipset", "visorchipset",
565 VERSION, NULL);
566
567 up(&notifier_lock);
568 }
569 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
570
571 void
572 visorchipset_register_busdev_client(
573 struct visorchipset_busdev_notifiers *notifiers,
574 struct visorchipset_busdev_responders *responders,
575 struct ultra_vbus_deviceinfo *driver_info)
576 {
577 down(&notifier_lock);
578 if (!notifiers) {
579 memset(&BusDev_Client_Notifiers, 0,
580 sizeof(BusDev_Client_Notifiers));
581 clientregistered = 0; /* clear flag */
582 } else {
583 BusDev_Client_Notifiers = *notifiers;
584 clientregistered = 1; /* set flag */
585 }
586 if (responders)
587 *responders = BusDev_Responders;
588 if (driver_info)
589 bus_device_info_init(driver_info, "chipset(bolts)",
590 "visorchipset", VERSION, NULL);
591 up(&notifier_lock);
592 }
593 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
594
595 static void
596 cleanup_controlvm_structures(void)
597 {
598 struct visorchipset_bus_info *bi, *tmp_bi;
599 struct visorchipset_device_info *di, *tmp_di;
600
601 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
602 bus_info_clear(bi);
603 list_del(&bi->entry);
604 kfree(bi);
605 }
606
607 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
608 dev_info_clear(di);
609 list_del(&di->entry);
610 kfree(di);
611 }
612 }
613
614 static void
615 chipset_init(struct controlvm_message *inmsg)
616 {
617 static int chipset_inited;
618 enum ultra_chipset_feature features = 0;
619 int rc = CONTROLVM_RESP_SUCCESS;
620
621 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
622 if (chipset_inited) {
623 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
624 goto cleanup;
625 }
626 chipset_inited = 1;
627 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
628
629 /* Set features to indicate we support parahotplug (if Command
630 * also supports it). */
631 features =
632 inmsg->cmd.init_chipset.
633 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
634
635 /* Set the "reply" bit so Command knows this is a
636 * features-aware driver. */
637 features |= ULTRA_CHIPSET_FEATURE_REPLY;
638
639 cleanup:
640 if (rc < 0)
641 cleanup_controlvm_structures();
642 if (inmsg->hdr.flags.response_expected)
643 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
644 }
645
646 static void
647 controlvm_init_response(struct controlvm_message *msg,
648 struct controlvm_message_header *msgHdr, int response)
649 {
650 memset(msg, 0, sizeof(struct controlvm_message));
651 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
652 msg->hdr.payload_bytes = 0;
653 msg->hdr.payload_vm_offset = 0;
654 msg->hdr.payload_max_bytes = 0;
655 if (response < 0) {
656 msg->hdr.flags.failed = 1;
657 msg->hdr.completion_status = (u32) (-response);
658 }
659 }
660
661 static void
662 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
663 {
664 struct controlvm_message outmsg;
665
666 controlvm_init_response(&outmsg, msgHdr, response);
667 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
668 * back the deviceChangeState structure in the packet. */
669 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
670 g_devicechangestate_packet.device_change_state.bus_no ==
671 g_diagpool_bus_no &&
672 g_devicechangestate_packet.device_change_state.dev_no ==
673 g_diagpool_dev_no)
674 outmsg.cmd = g_devicechangestate_packet;
675 if (outmsg.hdr.flags.test_message == 1)
676 return;
677
678 if (!visorchannel_signalinsert(controlvm_channel,
679 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
680 return;
681 }
682 }
683
684 static void
685 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
686 int response,
687 enum ultra_chipset_feature features)
688 {
689 struct controlvm_message outmsg;
690
691 controlvm_init_response(&outmsg, msgHdr, response);
692 outmsg.cmd.init_chipset.features = features;
693 if (!visorchannel_signalinsert(controlvm_channel,
694 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
695 return;
696 }
697 }
698
699 static void controlvm_respond_physdev_changestate(
700 struct controlvm_message_header *msgHdr, int response,
701 struct spar_segment_state state)
702 {
703 struct controlvm_message outmsg;
704
705 controlvm_init_response(&outmsg, msgHdr, response);
706 outmsg.cmd.device_change_state.state = state;
707 outmsg.cmd.device_change_state.flags.phys_device = 1;
708 if (!visorchannel_signalinsert(controlvm_channel,
709 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
710 return;
711 }
712 }
713
714 void
715 visorchipset_save_message(struct controlvm_message *msg,
716 enum crash_obj_type type)
717 {
718 u32 localSavedCrashMsgOffset;
719 u16 localSavedCrashMsgCount;
720
721 /* get saved message count */
722 if (visorchannel_read(controlvm_channel,
723 offsetof(struct spar_controlvm_channel_protocol,
724 saved_crash_message_count),
725 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
726 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
727 POSTCODE_SEVERITY_ERR);
728 return;
729 }
730
731 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
732 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
733 localSavedCrashMsgCount,
734 POSTCODE_SEVERITY_ERR);
735 return;
736 }
737
738 /* get saved crash message offset */
739 if (visorchannel_read(controlvm_channel,
740 offsetof(struct spar_controlvm_channel_protocol,
741 saved_crash_message_offset),
742 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
743 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
744 POSTCODE_SEVERITY_ERR);
745 return;
746 }
747
748 if (type == CRASH_BUS) {
749 if (visorchannel_write(controlvm_channel,
750 localSavedCrashMsgOffset,
751 msg,
752 sizeof(struct controlvm_message)) < 0) {
753 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
754 POSTCODE_SEVERITY_ERR);
755 return;
756 }
757 } else {
758 if (visorchannel_write(controlvm_channel,
759 localSavedCrashMsgOffset +
760 sizeof(struct controlvm_message), msg,
761 sizeof(struct controlvm_message)) < 0) {
762 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
763 POSTCODE_SEVERITY_ERR);
764 return;
765 }
766 }
767 }
768 EXPORT_SYMBOL_GPL(visorchipset_save_message);
769
770 static void
771 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
772 {
773 struct visorchipset_bus_info *p = NULL;
774 BOOL need_clear = FALSE;
775
776 p = findbus(&bus_info_list, busNo);
777 if (!p)
778 return;
779
780 if (response < 0) {
781 if ((cmdId == CONTROLVM_BUS_CREATE) &&
782 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
783 /* undo the row we just created... */
784 delbusdevices(&dev_info_list, busNo);
785 } else {
786 if (cmdId == CONTROLVM_BUS_CREATE)
787 p->state.created = 1;
788 if (cmdId == CONTROLVM_BUS_DESTROY)
789 need_clear = TRUE;
790 }
791
792 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
793 return; /* no controlvm response needed */
794 if (p->pending_msg_hdr.id != (u32) cmdId)
795 return;
796 controlvm_respond(&p->pending_msg_hdr, response);
797 p->pending_msg_hdr.id = CONTROLVM_INVALID;
798 if (need_clear) {
799 bus_info_clear(p);
800 delbusdevices(&dev_info_list, busNo);
801 }
802 }
803
804 static void
805 device_changestate_responder(enum controlvm_id cmdId,
806 ulong busNo, ulong devNo, int response,
807 struct spar_segment_state responseState)
808 {
809 struct visorchipset_device_info *p = NULL;
810 struct controlvm_message outmsg;
811
812 p = finddevice(&dev_info_list, busNo, devNo);
813 if (!p)
814 return;
815 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
816 return; /* no controlvm response needed */
817 if (p->pending_msg_hdr.id != cmdId)
818 return;
819
820 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
821
822 outmsg.cmd.device_change_state.bus_no = busNo;
823 outmsg.cmd.device_change_state.dev_no = devNo;
824 outmsg.cmd.device_change_state.state = responseState;
825
826 if (!visorchannel_signalinsert(controlvm_channel,
827 CONTROLVM_QUEUE_REQUEST, &outmsg))
828 return;
829
830 p->pending_msg_hdr.id = CONTROLVM_INVALID;
831 }
832
833 static void
834 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
835 int response)
836 {
837 struct visorchipset_device_info *p = NULL;
838 BOOL need_clear = FALSE;
839
840 p = finddevice(&dev_info_list, busNo, devNo);
841 if (!p)
842 return;
843 if (response >= 0) {
844 if (cmdId == CONTROLVM_DEVICE_CREATE)
845 p->state.created = 1;
846 if (cmdId == CONTROLVM_DEVICE_DESTROY)
847 need_clear = TRUE;
848 }
849
850 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
851 return; /* no controlvm response needed */
852
853 if (p->pending_msg_hdr.id != (u32) cmdId)
854 return;
855
856 controlvm_respond(&p->pending_msg_hdr, response);
857 p->pending_msg_hdr.id = CONTROLVM_INVALID;
858 if (need_clear)
859 dev_info_clear(p);
860 }
861
862 static void
863 bus_epilog(u32 busNo,
864 u32 cmd, struct controlvm_message_header *msgHdr,
865 int response, BOOL needResponse)
866 {
867 BOOL notified = FALSE;
868
869 struct visorchipset_bus_info *pBusInfo = findbus(&bus_info_list, busNo);
870
871 if (!pBusInfo)
872 return;
873
874 if (needResponse) {
875 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
876 sizeof(struct controlvm_message_header));
877 } else
878 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
879
880 down(&notifier_lock);
881 if (response == CONTROLVM_RESP_SUCCESS) {
882 switch (cmd) {
883 case CONTROLVM_BUS_CREATE:
884 /* We can't tell from the bus_create
885 * information which of our 2 bus flavors the
886 * devices on this bus will ultimately end up.
887 * FORTUNATELY, it turns out it is harmless to
888 * send the bus_create to both of them. We can
889 * narrow things down a little bit, though,
890 * because we know: - BusDev_Server can handle
891 * either server or client devices
892 * - BusDev_Client can handle ONLY client
893 * devices */
894 if (BusDev_Server_Notifiers.bus_create) {
895 (*BusDev_Server_Notifiers.bus_create) (busNo);
896 notified = TRUE;
897 }
898 if ((!pBusInfo->flags.server) /*client */ &&
899 BusDev_Client_Notifiers.bus_create) {
900 (*BusDev_Client_Notifiers.bus_create) (busNo);
901 notified = TRUE;
902 }
903 break;
904 case CONTROLVM_BUS_DESTROY:
905 if (BusDev_Server_Notifiers.bus_destroy) {
906 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
907 notified = TRUE;
908 }
909 if ((!pBusInfo->flags.server) /*client */ &&
910 BusDev_Client_Notifiers.bus_destroy) {
911 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
912 notified = TRUE;
913 }
914 break;
915 }
916 }
917 if (notified)
918 /* The callback function just called above is responsible
919 * for calling the appropriate visorchipset_busdev_responders
920 * function, which will call bus_responder()
921 */
922 ;
923 else
924 bus_responder(cmd, busNo, response);
925 up(&notifier_lock);
926 }
927
928 static void
929 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
930 struct controlvm_message_header *msgHdr, int response,
931 BOOL needResponse, BOOL for_visorbus)
932 {
933 struct visorchipset_busdev_notifiers *notifiers = NULL;
934 BOOL notified = FALSE;
935
936 struct visorchipset_device_info *pDevInfo =
937 finddevice(&dev_info_list, busNo, devNo);
938 char *envp[] = {
939 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
940 NULL
941 };
942
943 if (!pDevInfo)
944 return;
945
946 if (for_visorbus)
947 notifiers = &BusDev_Server_Notifiers;
948 else
949 notifiers = &BusDev_Client_Notifiers;
950 if (needResponse) {
951 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
952 sizeof(struct controlvm_message_header));
953 } else
954 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
955
956 down(&notifier_lock);
957 if (response >= 0) {
958 switch (cmd) {
959 case CONTROLVM_DEVICE_CREATE:
960 if (notifiers->device_create) {
961 (*notifiers->device_create) (busNo, devNo);
962 notified = TRUE;
963 }
964 break;
965 case CONTROLVM_DEVICE_CHANGESTATE:
966 /* ServerReady / ServerRunning / SegmentStateRunning */
967 if (state.alive == segment_state_running.alive &&
968 state.operating ==
969 segment_state_running.operating) {
970 if (notifiers->device_resume) {
971 (*notifiers->device_resume) (busNo,
972 devNo);
973 notified = TRUE;
974 }
975 }
976 /* ServerNotReady / ServerLost / SegmentStateStandby */
977 else if (state.alive == segment_state_standby.alive &&
978 state.operating ==
979 segment_state_standby.operating) {
980 /* technically this is standby case
981 * where server is lost
982 */
983 if (notifiers->device_pause) {
984 (*notifiers->device_pause) (busNo,
985 devNo);
986 notified = TRUE;
987 }
988 } else if (state.alive == segment_state_paused.alive &&
989 state.operating ==
990 segment_state_paused.operating) {
991 /* this is lite pause where channel is
992 * still valid just 'pause' of it
993 */
994 if (busNo == g_diagpool_bus_no &&
995 devNo == g_diagpool_dev_no) {
996 /* this will trigger the
997 * diag_shutdown.sh script in
998 * the visorchipset hotplug */
999 kobject_uevent_env
1000 (&Visorchipset_platform_device.dev.
1001 kobj, KOBJ_ONLINE, envp);
1002 }
1003 }
1004 break;
1005 case CONTROLVM_DEVICE_DESTROY:
1006 if (notifiers->device_destroy) {
1007 (*notifiers->device_destroy) (busNo, devNo);
1008 notified = TRUE;
1009 }
1010 break;
1011 }
1012 }
1013 if (notified)
1014 /* The callback function just called above is responsible
1015 * for calling the appropriate visorchipset_busdev_responders
1016 * function, which will call device_responder()
1017 */
1018 ;
1019 else
1020 device_responder(cmd, busNo, devNo, response);
1021 up(&notifier_lock);
1022 }
1023
1024 static void
1025 bus_create(struct controlvm_message *inmsg)
1026 {
1027 struct controlvm_message_packet *cmd = &inmsg->cmd;
1028 ulong busNo = cmd->create_bus.bus_no;
1029 int rc = CONTROLVM_RESP_SUCCESS;
1030 struct visorchipset_bus_info *pBusInfo = NULL;
1031
1032
1033 pBusInfo = findbus(&bus_info_list, busNo);
1034 if (pBusInfo && (pBusInfo->state.created == 1)) {
1035 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1036 POSTCODE_SEVERITY_ERR);
1037 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1038 goto Away;
1039 }
1040 pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
1041 if (!pBusInfo) {
1042 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1043 POSTCODE_SEVERITY_ERR);
1044 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1045 goto Away;
1046 }
1047
1048 INIT_LIST_HEAD(&pBusInfo->entry);
1049 pBusInfo->bus_no = busNo;
1050 pBusInfo->dev_no = cmd->create_bus.dev_count;
1051
1052 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1053
1054 if (inmsg->hdr.flags.test_message == 1)
1055 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1056 else
1057 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1058
1059 pBusInfo->flags.server = inmsg->hdr.flags.server;
1060 pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1061 pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1062 pBusInfo->chan_info.channel_type_uuid =
1063 cmd->create_bus.bus_data_type_uuid;
1064 pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1065
1066 list_add(&pBusInfo->entry, &bus_info_list);
1067
1068 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1069
1070 Away:
1071 bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1072 rc, inmsg->hdr.flags.response_expected == 1);
1073 }
1074
1075 static void
1076 bus_destroy(struct controlvm_message *inmsg)
1077 {
1078 struct controlvm_message_packet *cmd = &inmsg->cmd;
1079 ulong busNo = cmd->destroy_bus.bus_no;
1080 struct visorchipset_bus_info *pBusInfo;
1081 int rc = CONTROLVM_RESP_SUCCESS;
1082
1083 pBusInfo = findbus(&bus_info_list, busNo);
1084 if (!pBusInfo) {
1085 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1086 goto Away;
1087 }
1088 if (pBusInfo->state.created == 0) {
1089 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1090 goto Away;
1091 }
1092
1093 Away:
1094 bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1095 rc, inmsg->hdr.flags.response_expected == 1);
1096 }
1097
1098 static void
1099 bus_configure(struct controlvm_message *inmsg,
1100 struct parser_context *parser_ctx)
1101 {
1102 struct controlvm_message_packet *cmd = &inmsg->cmd;
1103 ulong busNo = cmd->configure_bus.bus_no;
1104 struct visorchipset_bus_info *pBusInfo = NULL;
1105 int rc = CONTROLVM_RESP_SUCCESS;
1106 char s[99];
1107
1108 busNo = cmd->configure_bus.bus_no;
1109 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1110
1111 pBusInfo = findbus(&bus_info_list, busNo);
1112 if (!pBusInfo) {
1113 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1114 POSTCODE_SEVERITY_ERR);
1115 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1116 goto Away;
1117 }
1118 if (pBusInfo->state.created == 0) {
1119 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1120 POSTCODE_SEVERITY_ERR);
1121 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1122 goto Away;
1123 }
1124 /* TBD - add this check to other commands also... */
1125 if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
1126 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1127 POSTCODE_SEVERITY_ERR);
1128 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1129 goto Away;
1130 }
1131
1132 pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1133 pBusInfo->partition_uuid = parser_id_get(parser_ctx);
1134 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1135 pBusInfo->name = parser_string_get(parser_ctx);
1136
1137 visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
1138 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1139 Away:
1140 bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1141 rc, inmsg->hdr.flags.response_expected == 1);
1142 }
1143
1144 static void
1145 my_device_create(struct controlvm_message *inmsg)
1146 {
1147 struct controlvm_message_packet *cmd = &inmsg->cmd;
1148 ulong busNo = cmd->create_device.bus_no;
1149 ulong devNo = cmd->create_device.dev_no;
1150 struct visorchipset_device_info *pDevInfo = NULL;
1151 struct visorchipset_bus_info *pBusInfo = NULL;
1152 int rc = CONTROLVM_RESP_SUCCESS;
1153
1154 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1155 if (pDevInfo && (pDevInfo->state.created == 1)) {
1156 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1157 POSTCODE_SEVERITY_ERR);
1158 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1159 goto Away;
1160 }
1161 pBusInfo = findbus(&bus_info_list, busNo);
1162 if (!pBusInfo) {
1163 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1164 POSTCODE_SEVERITY_ERR);
1165 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1166 goto Away;
1167 }
1168 if (pBusInfo->state.created == 0) {
1169 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1170 POSTCODE_SEVERITY_ERR);
1171 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1172 goto Away;
1173 }
1174 pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
1175 if (!pDevInfo) {
1176 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1177 POSTCODE_SEVERITY_ERR);
1178 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1179 goto Away;
1180 }
1181
1182 INIT_LIST_HEAD(&pDevInfo->entry);
1183 pDevInfo->bus_no = busNo;
1184 pDevInfo->dev_no = devNo;
1185 pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1186 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1187 POSTCODE_SEVERITY_INFO);
1188
1189 if (inmsg->hdr.flags.test_message == 1)
1190 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1191 else
1192 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1193 pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1194 pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1195 pDevInfo->chan_info.channel_type_uuid =
1196 cmd->create_device.data_type_uuid;
1197 pDevInfo->chan_info.intr = cmd->create_device.intr;
1198 list_add(&pDevInfo->entry, &dev_info_list);
1199 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1200 POSTCODE_SEVERITY_INFO);
1201 Away:
1202 /* get the bus and devNo for DiagPool channel */
1203 if (pDevInfo &&
1204 is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
1205 g_diagpool_bus_no = busNo;
1206 g_diagpool_dev_no = devNo;
1207 }
1208 device_epilog(busNo, devNo, segment_state_running,
1209 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1210 inmsg->hdr.flags.response_expected == 1,
1211 FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
1212 }
1213
1214 static void
1215 my_device_changestate(struct controlvm_message *inmsg)
1216 {
1217 struct controlvm_message_packet *cmd = &inmsg->cmd;
1218 ulong busNo = cmd->device_change_state.bus_no;
1219 ulong devNo = cmd->device_change_state.dev_no;
1220 struct spar_segment_state state = cmd->device_change_state.state;
1221 struct visorchipset_device_info *pDevInfo = NULL;
1222 int rc = CONTROLVM_RESP_SUCCESS;
1223
1224 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1225 if (!pDevInfo) {
1226 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1227 POSTCODE_SEVERITY_ERR);
1228 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1229 goto Away;
1230 }
1231 if (pDevInfo->state.created == 0) {
1232 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1233 POSTCODE_SEVERITY_ERR);
1234 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1235 }
1236 Away:
1237 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1238 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1239 &inmsg->hdr, rc,
1240 inmsg->hdr.flags.response_expected == 1,
1241 FOR_VISORBUS(
1242 pDevInfo->chan_info.channel_type_uuid));
1243 }
1244
1245 static void
1246 my_device_destroy(struct controlvm_message *inmsg)
1247 {
1248 struct controlvm_message_packet *cmd = &inmsg->cmd;
1249 ulong busNo = cmd->destroy_device.bus_no;
1250 ulong devNo = cmd->destroy_device.dev_no;
1251 struct visorchipset_device_info *pDevInfo = NULL;
1252 int rc = CONTROLVM_RESP_SUCCESS;
1253
1254 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
1255 if (!pDevInfo) {
1256 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1257 goto Away;
1258 }
1259 if (pDevInfo->state.created == 0) {
1260 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1261 }
1262
1263 Away:
1264 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1265 device_epilog(busNo, devNo, segment_state_running,
1266 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1267 inmsg->hdr.flags.response_expected == 1,
1268 FOR_VISORBUS(
1269 pDevInfo->chan_info.channel_type_uuid));
1270 }
1271
1272 /* When provided with the physical address of the controlvm channel
1273 * (phys_addr), the offset to the payload area we need to manage
1274 * (offset), and the size of this payload area (bytes), fills in the
1275 * controlvm_payload_info struct. Returns TRUE for success or FALSE
1276 * for failure.
1277 */
1278 static int
1279 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1280 struct controlvm_payload_info *info)
1281 {
1282 u8 __iomem *payload = NULL;
1283 int rc = CONTROLVM_RESP_SUCCESS;
1284
1285 if (!info) {
1286 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1287 goto Away;
1288 }
1289 memset(info, 0, sizeof(struct controlvm_payload_info));
1290 if ((offset == 0) || (bytes == 0)) {
1291 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1292 goto Away;
1293 }
1294 payload = ioremap_cache(phys_addr + offset, bytes);
1295 if (!payload) {
1296 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1297 goto Away;
1298 }
1299
1300 info->offset = offset;
1301 info->bytes = bytes;
1302 info->ptr = payload;
1303
1304 Away:
1305 if (rc < 0) {
1306 if (payload != NULL) {
1307 iounmap(payload);
1308 payload = NULL;
1309 }
1310 }
1311 return rc;
1312 }
1313
1314 static void
1315 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1316 {
1317 if (info->ptr != NULL) {
1318 iounmap(info->ptr);
1319 info->ptr = NULL;
1320 }
1321 memset(info, 0, sizeof(struct controlvm_payload_info));
1322 }
1323
1324 static void
1325 initialize_controlvm_payload(void)
1326 {
1327 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1328 u64 payloadOffset = 0;
1329 u32 payloadBytes = 0;
1330
1331 if (visorchannel_read(controlvm_channel,
1332 offsetof(struct spar_controlvm_channel_protocol,
1333 request_payload_offset),
1334 &payloadOffset, sizeof(payloadOffset)) < 0) {
1335 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1336 POSTCODE_SEVERITY_ERR);
1337 return;
1338 }
1339 if (visorchannel_read(controlvm_channel,
1340 offsetof(struct spar_controlvm_channel_protocol,
1341 request_payload_bytes),
1342 &payloadBytes, sizeof(payloadBytes)) < 0) {
1343 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1344 POSTCODE_SEVERITY_ERR);
1345 return;
1346 }
1347 initialize_controlvm_payload_info(phys_addr,
1348 payloadOffset, payloadBytes,
1349 &controlvm_payload_info);
1350 }
1351
1352 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1353 * Returns CONTROLVM_RESP_xxx code.
1354 */
1355 int
1356 visorchipset_chipset_ready(void)
1357 {
1358 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1359 return CONTROLVM_RESP_SUCCESS;
1360 }
1361 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1362
1363 int
1364 visorchipset_chipset_selftest(void)
1365 {
1366 char env_selftest[20];
1367 char *envp[] = { env_selftest, NULL };
1368
1369 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1370 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1371 envp);
1372 return CONTROLVM_RESP_SUCCESS;
1373 }
1374 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1375
1376 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1377 * Returns CONTROLVM_RESP_xxx code.
1378 */
1379 int
1380 visorchipset_chipset_notready(void)
1381 {
1382 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1383 return CONTROLVM_RESP_SUCCESS;
1384 }
1385 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1386
1387 static void
1388 chipset_ready(struct controlvm_message_header *msgHdr)
1389 {
1390 int rc = visorchipset_chipset_ready();
1391
1392 if (rc != CONTROLVM_RESP_SUCCESS)
1393 rc = -rc;
1394 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1395 controlvm_respond(msgHdr, rc);
1396 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1397 /* Send CHIPSET_READY response when all modules have been loaded
1398 * and disks mounted for the partition
1399 */
1400 g_chipset_msg_hdr = *msgHdr;
1401 }
1402 }
1403
1404 static void
1405 chipset_selftest(struct controlvm_message_header *msgHdr)
1406 {
1407 int rc = visorchipset_chipset_selftest();
1408
1409 if (rc != CONTROLVM_RESP_SUCCESS)
1410 rc = -rc;
1411 if (msgHdr->flags.response_expected)
1412 controlvm_respond(msgHdr, rc);
1413 }
1414
1415 static void
1416 chipset_notready(struct controlvm_message_header *msgHdr)
1417 {
1418 int rc = visorchipset_chipset_notready();
1419
1420 if (rc != CONTROLVM_RESP_SUCCESS)
1421 rc = -rc;
1422 if (msgHdr->flags.response_expected)
1423 controlvm_respond(msgHdr, rc);
1424 }
1425
1426 /* This is your "one-stop" shop for grabbing the next message from the
1427 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1428 */
1429 static BOOL
1430 read_controlvm_event(struct controlvm_message *msg)
1431 {
1432 if (visorchannel_signalremove(controlvm_channel,
1433 CONTROLVM_QUEUE_EVENT, msg)) {
1434 /* got a message */
1435 if (msg->hdr.flags.test_message == 1)
1436 return FALSE;
1437 return TRUE;
1438 }
1439 return FALSE;
1440 }
1441
1442 /*
1443 * The general parahotplug flow works as follows. The visorchipset
1444 * driver receives a DEVICE_CHANGESTATE message from Command
1445 * specifying a physical device to enable or disable. The CONTROLVM
1446 * message handler calls parahotplug_process_message, which then adds
1447 * the message to a global list and kicks off a udev event which
1448 * causes a user level script to enable or disable the specified
1449 * device. The udev script then writes to
1450 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1451 * to get called, at which point the appropriate CONTROLVM message is
1452 * retrieved from the list and responded to.
1453 */
1454
1455 #define PARAHOTPLUG_TIMEOUT_MS 2000
1456
1457 /*
1458 * Generate unique int to match an outstanding CONTROLVM message with a
1459 * udev script /proc response
1460 */
1461 static int
1462 parahotplug_next_id(void)
1463 {
1464 static atomic_t id = ATOMIC_INIT(0);
1465
1466 return atomic_inc_return(&id);
1467 }
1468
1469 /*
1470 * Returns the time (in jiffies) when a CONTROLVM message on the list
1471 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1472 */
1473 static unsigned long
1474 parahotplug_next_expiration(void)
1475 {
1476 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1477 }
1478
1479 /*
1480 * Create a parahotplug_request, which is basically a wrapper for a
1481 * CONTROLVM_MESSAGE that we can stick on a list
1482 */
1483 static struct parahotplug_request *
1484 parahotplug_request_create(struct controlvm_message *msg)
1485 {
1486 struct parahotplug_request *req;
1487
1488 req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
1489 if (!req)
1490 return NULL;
1491
1492 req->id = parahotplug_next_id();
1493 req->expiration = parahotplug_next_expiration();
1494 req->msg = *msg;
1495
1496 return req;
1497 }
1498
1499 /*
1500 * Free a parahotplug_request.
1501 */
1502 static void
1503 parahotplug_request_destroy(struct parahotplug_request *req)
1504 {
1505 kfree(req);
1506 }
1507
1508 /*
1509 * Cause uevent to run the user level script to do the disable/enable
1510 * specified in (the CONTROLVM message in) the specified
1511 * parahotplug_request
1512 */
1513 static void
1514 parahotplug_request_kickoff(struct parahotplug_request *req)
1515 {
1516 struct controlvm_message_packet *cmd = &req->msg.cmd;
1517 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1518 env_func[40];
1519 char *envp[] = {
1520 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1521 };
1522
1523 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1524 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1525 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1526 cmd->device_change_state.state.active);
1527 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1528 cmd->device_change_state.bus_no);
1529 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1530 cmd->device_change_state.dev_no >> 3);
1531 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1532 cmd->device_change_state.dev_no & 0x7);
1533
1534 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1535 envp);
1536 }
1537
1538 /*
1539 * Remove any request from the list that's been on there too long and
1540 * respond with an error.
1541 */
1542 static void
1543 parahotplug_process_list(void)
1544 {
1545 struct list_head *pos = NULL;
1546 struct list_head *tmp = NULL;
1547
1548 spin_lock(&Parahotplug_request_list_lock);
1549
1550 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1551 struct parahotplug_request *req =
1552 list_entry(pos, struct parahotplug_request, list);
1553 if (time_after_eq(jiffies, req->expiration)) {
1554 list_del(pos);
1555 if (req->msg.hdr.flags.response_expected)
1556 controlvm_respond_physdev_changestate(
1557 &req->msg.hdr,
1558 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1559 req->msg.cmd.device_change_state.state);
1560 parahotplug_request_destroy(req);
1561 }
1562 }
1563
1564 spin_unlock(&Parahotplug_request_list_lock);
1565 }
1566
1567 /*
1568 * Called from the /proc handler, which means the user script has
1569 * finished the enable/disable. Find the matching identifier, and
1570 * respond to the CONTROLVM message with success.
1571 */
1572 static int
1573 parahotplug_request_complete(int id, u16 active)
1574 {
1575 struct list_head *pos = NULL;
1576 struct list_head *tmp = NULL;
1577
1578 spin_lock(&Parahotplug_request_list_lock);
1579
1580 /* Look for a request matching "id". */
1581 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1582 struct parahotplug_request *req =
1583 list_entry(pos, struct parahotplug_request, list);
1584 if (req->id == id) {
1585 /* Found a match. Remove it from the list and
1586 * respond.
1587 */
1588 list_del(pos);
1589 spin_unlock(&Parahotplug_request_list_lock);
1590 req->msg.cmd.device_change_state.state.active = active;
1591 if (req->msg.hdr.flags.response_expected)
1592 controlvm_respond_physdev_changestate(
1593 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1594 req->msg.cmd.device_change_state.state);
1595 parahotplug_request_destroy(req);
1596 return 0;
1597 }
1598 }
1599
1600 spin_unlock(&Parahotplug_request_list_lock);
1601 return -1;
1602 }
1603
1604 /*
1605 * Enables or disables a PCI device by kicking off a udev script
1606 */
1607 static void
1608 parahotplug_process_message(struct controlvm_message *inmsg)
1609 {
1610 struct parahotplug_request *req;
1611
1612 req = parahotplug_request_create(inmsg);
1613
1614 if (!req)
1615 return;
1616
1617 if (inmsg->cmd.device_change_state.state.active) {
1618 /* For enable messages, just respond with success
1619 * right away. This is a bit of a hack, but there are
1620 * issues with the early enable messages we get (with
1621 * either the udev script not detecting that the device
1622 * is up, or not getting called at all). Fortunately
1623 * the messages that get lost don't matter anyway, as
1624 * devices are automatically enabled at
1625 * initialization.
1626 */
1627 parahotplug_request_kickoff(req);
1628 controlvm_respond_physdev_changestate(&inmsg->hdr,
1629 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1630 device_change_state.state);
1631 parahotplug_request_destroy(req);
1632 } else {
1633 /* For disable messages, add the request to the
1634 * request list before kicking off the udev script. It
1635 * won't get responded to until the script has
1636 * indicated it's done.
1637 */
1638 spin_lock(&Parahotplug_request_list_lock);
1639 list_add_tail(&(req->list), &Parahotplug_request_list);
1640 spin_unlock(&Parahotplug_request_list_lock);
1641
1642 parahotplug_request_kickoff(req);
1643 }
1644 }
1645
1646 /* Process a controlvm message.
1647 * Return result:
1648 * FALSE - this function will return FALSE only in the case where the
1649 * controlvm message was NOT processed, but processing must be
1650 * retried before reading the next controlvm message; a
1651 * scenario where this can occur is when we need to throttle
1652 * the allocation of memory in which to copy out controlvm
1653 * payload data
1654 * TRUE - processing of the controlvm message completed,
1655 * either successfully or with an error.
1656 */
1657 static BOOL
1658 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1659 {
1660 struct controlvm_message_packet *cmd = &inmsg.cmd;
1661 u64 parametersAddr = 0;
1662 u32 parametersBytes = 0;
1663 struct parser_context *parser_ctx = NULL;
1664 BOOL isLocalAddr = FALSE;
1665 struct controlvm_message ackmsg;
1666
1667 /* create parsing context if necessary */
1668 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1669 if (channel_addr == 0)
1670 return TRUE;
1671 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1672 parametersBytes = inmsg.hdr.payload_bytes;
1673
1674 /* Parameter and channel addresses within test messages actually lie
1675 * within our OS-controlled memory. We need to know that, because it
1676 * makes a difference in how we compute the virtual address.
1677 */
1678 if (parametersAddr != 0 && parametersBytes != 0) {
1679 BOOL retry = FALSE;
1680
1681 parser_ctx =
1682 parser_init_byte_stream(parametersAddr, parametersBytes,
1683 isLocalAddr, &retry);
1684 if (!parser_ctx && retry)
1685 return FALSE;
1686 }
1687
1688 if (!isLocalAddr) {
1689 controlvm_init_response(&ackmsg, &inmsg.hdr,
1690 CONTROLVM_RESP_SUCCESS);
1691 if (controlvm_channel)
1692 visorchannel_signalinsert(controlvm_channel,
1693 CONTROLVM_QUEUE_ACK,
1694 &ackmsg);
1695 }
1696 switch (inmsg.hdr.id) {
1697 case CONTROLVM_CHIPSET_INIT:
1698 chipset_init(&inmsg);
1699 break;
1700 case CONTROLVM_BUS_CREATE:
1701 bus_create(&inmsg);
1702 break;
1703 case CONTROLVM_BUS_DESTROY:
1704 bus_destroy(&inmsg);
1705 break;
1706 case CONTROLVM_BUS_CONFIGURE:
1707 bus_configure(&inmsg, parser_ctx);
1708 break;
1709 case CONTROLVM_DEVICE_CREATE:
1710 my_device_create(&inmsg);
1711 break;
1712 case CONTROLVM_DEVICE_CHANGESTATE:
1713 if (cmd->device_change_state.flags.phys_device) {
1714 parahotplug_process_message(&inmsg);
1715 } else {
1716 /* save the hdr and cmd structures for later use */
1717 /* when sending back the response to Command */
1718 my_device_changestate(&inmsg);
1719 g_diag_msg_hdr = inmsg.hdr;
1720 g_devicechangestate_packet = inmsg.cmd;
1721 break;
1722 }
1723 break;
1724 case CONTROLVM_DEVICE_DESTROY:
1725 my_device_destroy(&inmsg);
1726 break;
1727 case CONTROLVM_DEVICE_CONFIGURE:
1728 /* no op for now, just send a respond that we passed */
1729 if (inmsg.hdr.flags.response_expected)
1730 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1731 break;
1732 case CONTROLVM_CHIPSET_READY:
1733 chipset_ready(&inmsg.hdr);
1734 break;
1735 case CONTROLVM_CHIPSET_SELFTEST:
1736 chipset_selftest(&inmsg.hdr);
1737 break;
1738 case CONTROLVM_CHIPSET_STOP:
1739 chipset_notready(&inmsg.hdr);
1740 break;
1741 default:
1742 if (inmsg.hdr.flags.response_expected)
1743 controlvm_respond(&inmsg.hdr,
1744 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1745 break;
1746 }
1747
1748 if (parser_ctx) {
1749 parser_done(parser_ctx);
1750 parser_ctx = NULL;
1751 }
1752 return TRUE;
1753 }
1754
1755 static HOSTADDRESS controlvm_get_channel_address(void)
1756 {
1757 u64 addr = 0;
1758 u32 size = 0;
1759
1760 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1761 return 0;
1762
1763 return addr;
1764 }
1765
1766 static void
1767 controlvm_periodic_work(struct work_struct *work)
1768 {
1769 struct controlvm_message inmsg;
1770 BOOL gotACommand = FALSE;
1771 BOOL handle_command_failed = FALSE;
1772 static u64 Poll_Count;
1773
1774 /* make sure visorbus server is registered for controlvm callbacks */
1775 if (visorchipset_serverregwait && !serverregistered)
1776 goto Away;
1777 /* make sure visorclientbus server is regsitered for controlvm
1778 * callbacks
1779 */
1780 if (visorchipset_clientregwait && !clientregistered)
1781 goto Away;
1782
1783 Poll_Count++;
1784 if (Poll_Count >= 250)
1785 ; /* keep going */
1786 else
1787 goto Away;
1788
1789 /* Check events to determine if response to CHIPSET_READY
1790 * should be sent
1791 */
1792 if (visorchipset_holdchipsetready &&
1793 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1794 if (check_chipset_events() == 1) {
1795 controlvm_respond(&g_chipset_msg_hdr, 0);
1796 clear_chipset_events();
1797 memset(&g_chipset_msg_hdr, 0,
1798 sizeof(struct controlvm_message_header));
1799 }
1800 }
1801
1802 while (visorchannel_signalremove(controlvm_channel,
1803 CONTROLVM_QUEUE_RESPONSE,
1804 &inmsg))
1805 ;
1806 if (!gotACommand) {
1807 if (ControlVm_Pending_Msg_Valid) {
1808 /* we throttled processing of a prior
1809 * msg, so try to process it again
1810 * rather than reading a new one
1811 */
1812 inmsg = ControlVm_Pending_Msg;
1813 ControlVm_Pending_Msg_Valid = FALSE;
1814 gotACommand = TRUE;
1815 } else
1816 gotACommand = read_controlvm_event(&inmsg);
1817 }
1818
1819 handle_command_failed = FALSE;
1820 while (gotACommand && (!handle_command_failed)) {
1821 most_recent_message_jiffies = jiffies;
1822 if (handle_command(inmsg,
1823 visorchannel_get_physaddr
1824 (controlvm_channel)))
1825 gotACommand = read_controlvm_event(&inmsg);
1826 else {
1827 /* this is a scenario where throttling
1828 * is required, but probably NOT an
1829 * error...; we stash the current
1830 * controlvm msg so we will attempt to
1831 * reprocess it on our next loop
1832 */
1833 handle_command_failed = TRUE;
1834 ControlVm_Pending_Msg = inmsg;
1835 ControlVm_Pending_Msg_Valid = TRUE;
1836 }
1837 }
1838
1839 /* parahotplug_worker */
1840 parahotplug_process_list();
1841
1842 Away:
1843
1844 if (time_after(jiffies,
1845 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1846 /* it's been longer than MIN_IDLE_SECONDS since we
1847 * processed our last controlvm message; slow down the
1848 * polling
1849 */
1850 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1851 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1852 } else {
1853 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1854 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1855 }
1856
1857 queue_delayed_work(periodic_controlvm_workqueue,
1858 &periodic_controlvm_work, poll_jiffies);
1859 }
1860
1861 static void
1862 setup_crash_devices_work_queue(struct work_struct *work)
1863 {
1864 struct controlvm_message localCrashCreateBusMsg;
1865 struct controlvm_message localCrashCreateDevMsg;
1866 struct controlvm_message msg;
1867 u32 localSavedCrashMsgOffset;
1868 u16 localSavedCrashMsgCount;
1869
1870 /* make sure visorbus server is registered for controlvm callbacks */
1871 if (visorchipset_serverregwait && !serverregistered)
1872 goto Away;
1873
1874 /* make sure visorclientbus server is regsitered for controlvm
1875 * callbacks
1876 */
1877 if (visorchipset_clientregwait && !clientregistered)
1878 goto Away;
1879
1880 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1881
1882 /* send init chipset msg */
1883 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1884 msg.cmd.init_chipset.bus_count = 23;
1885 msg.cmd.init_chipset.switch_count = 0;
1886
1887 chipset_init(&msg);
1888
1889 /* get saved message count */
1890 if (visorchannel_read(controlvm_channel,
1891 offsetof(struct spar_controlvm_channel_protocol,
1892 saved_crash_message_count),
1893 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
1894 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1895 POSTCODE_SEVERITY_ERR);
1896 return;
1897 }
1898
1899 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
1900 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1901 localSavedCrashMsgCount,
1902 POSTCODE_SEVERITY_ERR);
1903 return;
1904 }
1905
1906 /* get saved crash message offset */
1907 if (visorchannel_read(controlvm_channel,
1908 offsetof(struct spar_controlvm_channel_protocol,
1909 saved_crash_message_offset),
1910 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
1911 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1912 POSTCODE_SEVERITY_ERR);
1913 return;
1914 }
1915
1916 /* read create device message for storage bus offset */
1917 if (visorchannel_read(controlvm_channel,
1918 localSavedCrashMsgOffset,
1919 &localCrashCreateBusMsg,
1920 sizeof(struct controlvm_message)) < 0) {
1921 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1922 POSTCODE_SEVERITY_ERR);
1923 return;
1924 }
1925
1926 /* read create device message for storage device */
1927 if (visorchannel_read(controlvm_channel,
1928 localSavedCrashMsgOffset +
1929 sizeof(struct controlvm_message),
1930 &localCrashCreateDevMsg,
1931 sizeof(struct controlvm_message)) < 0) {
1932 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1933 POSTCODE_SEVERITY_ERR);
1934 return;
1935 }
1936
1937 /* reuse IOVM create bus message */
1938 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
1939 bus_create(&localCrashCreateBusMsg);
1940 else {
1941 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1942 POSTCODE_SEVERITY_ERR);
1943 return;
1944 }
1945
1946 /* reuse create device message for storage device */
1947 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
1948 my_device_create(&localCrashCreateDevMsg);
1949 else {
1950 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1951 POSTCODE_SEVERITY_ERR);
1952 return;
1953 }
1954 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1955 return;
1956
1957 Away:
1958
1959 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1960
1961 queue_delayed_work(periodic_controlvm_workqueue,
1962 &periodic_controlvm_work, poll_jiffies);
1963 }
1964
1965 static void
1966 bus_create_response(ulong busNo, int response)
1967 {
1968 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
1969 }
1970
1971 static void
1972 bus_destroy_response(ulong busNo, int response)
1973 {
1974 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
1975 }
1976
1977 static void
1978 device_create_response(ulong busNo, ulong devNo, int response)
1979 {
1980 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
1981 }
1982
1983 static void
1984 device_destroy_response(ulong busNo, ulong devNo, int response)
1985 {
1986 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
1987 }
1988
1989 void
1990 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
1991 {
1992 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1993 bus_no, dev_no, response,
1994 segment_state_standby);
1995 }
1996 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
1997
1998 static void
1999 device_resume_response(ulong busNo, ulong devNo, int response)
2000 {
2001 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2002 busNo, devNo, response,
2003 segment_state_running);
2004 }
2005
2006 BOOL
2007 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2008 {
2009 void *p = findbus(&bus_info_list, bus_no);
2010
2011 if (!p)
2012 return FALSE;
2013 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2014 return TRUE;
2015 }
2016 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2017
2018 BOOL
2019 visorchipset_set_bus_context(ulong bus_no, void *context)
2020 {
2021 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2022
2023 if (!p)
2024 return FALSE;
2025 p->bus_driver_context = context;
2026 return TRUE;
2027 }
2028 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2029
2030 BOOL
2031 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2032 struct visorchipset_device_info *dev_info)
2033 {
2034 void *p = finddevice(&dev_info_list, bus_no, dev_no);
2035
2036 if (!p)
2037 return FALSE;
2038 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2039 return TRUE;
2040 }
2041 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2042
2043 BOOL
2044 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2045 {
2046 struct visorchipset_device_info *p =
2047 finddevice(&dev_info_list, bus_no, dev_no);
2048
2049 if (!p)
2050 return FALSE;
2051 p->bus_driver_context = context;
2052 return TRUE;
2053 }
2054 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2055
2056 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2057 */
2058 void *
2059 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2060 char *fn, int ln)
2061 {
2062 gfp_t gfp;
2063 void *p;
2064
2065 if (ok_to_block)
2066 gfp = GFP_KERNEL;
2067 else
2068 gfp = GFP_ATOMIC;
2069 /* __GFP_NORETRY means "ok to fail", meaning
2070 * kmem_cache_alloc() can return NULL, implying the caller CAN
2071 * cope with failure. If you do NOT specify __GFP_NORETRY,
2072 * Linux will go to extreme measures to get memory for you
2073 * (like, invoke oom killer), which will probably cripple the
2074 * system.
2075 */
2076 gfp |= __GFP_NORETRY;
2077 p = kmem_cache_alloc(pool, gfp);
2078 if (!p)
2079 return NULL;
2080
2081 atomic_inc(&Visorchipset_cache_buffers_in_use);
2082 return p;
2083 }
2084
2085 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2086 */
2087 void
2088 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2089 {
2090 if (!p)
2091 return;
2092
2093 atomic_dec(&Visorchipset_cache_buffers_in_use);
2094 kmem_cache_free(pool, p);
2095 }
2096
2097 static ssize_t chipsetready_store(struct device *dev,
2098 struct device_attribute *attr, const char *buf, size_t count)
2099 {
2100 char msgtype[64];
2101
2102 if (sscanf(buf, "%63s", msgtype) != 1)
2103 return -EINVAL;
2104
2105 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2106 chipset_events[0] = 1;
2107 return count;
2108 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2109 chipset_events[1] = 1;
2110 return count;
2111 }
2112 return -EINVAL;
2113 }
2114
2115 /* The parahotplug/devicedisabled interface gets called by our support script
2116 * when an SR-IOV device has been shut down. The ID is passed to the script
2117 * and then passed back when the device has been removed.
2118 */
2119 static ssize_t devicedisabled_store(struct device *dev,
2120 struct device_attribute *attr, const char *buf, size_t count)
2121 {
2122 uint id;
2123
2124 if (kstrtouint(buf, 10, &id) != 0)
2125 return -EINVAL;
2126
2127 parahotplug_request_complete(id, 0);
2128 return count;
2129 }
2130
2131 /* The parahotplug/deviceenabled interface gets called by our support script
2132 * when an SR-IOV device has been recovered. The ID is passed to the script
2133 * and then passed back when the device has been brought back up.
2134 */
2135 static ssize_t deviceenabled_store(struct device *dev,
2136 struct device_attribute *attr, const char *buf, size_t count)
2137 {
2138 uint id;
2139
2140 if (kstrtouint(buf, 10, &id) != 0)
2141 return -EINVAL;
2142
2143 parahotplug_request_complete(id, 1);
2144 return count;
2145 }
2146
2147 static int __init
2148 visorchipset_init(void)
2149 {
2150 int rc = 0, x = 0;
2151 HOSTADDRESS addr;
2152
2153 if (!unisys_spar_platform)
2154 return -ENODEV;
2155
2156 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2157 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2158 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2159 memset(&livedump_info, 0, sizeof(livedump_info));
2160 atomic_set(&livedump_info.buffers_in_use, 0);
2161
2162 if (visorchipset_testvnic) {
2163 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2164 rc = x;
2165 goto Away;
2166 }
2167
2168 addr = controlvm_get_channel_address();
2169 if (addr != 0) {
2170 controlvm_channel =
2171 visorchannel_create_with_lock
2172 (addr,
2173 sizeof(struct spar_controlvm_channel_protocol),
2174 spar_controlvm_channel_protocol_uuid);
2175 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2176 visorchannel_get_header(controlvm_channel))) {
2177 initialize_controlvm_payload();
2178 } else {
2179 visorchannel_destroy(controlvm_channel);
2180 controlvm_channel = NULL;
2181 return -ENODEV;
2182 }
2183 } else {
2184 return -ENODEV;
2185 }
2186
2187 MajorDev = MKDEV(visorchipset_major, 0);
2188 rc = visorchipset_file_init(MajorDev, &controlvm_channel);
2189 if (rc < 0) {
2190 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2191 goto Away;
2192 }
2193
2194 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2195
2196 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2197
2198 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2199
2200 Putfile_buffer_list_pool =
2201 kmem_cache_create(Putfile_buffer_list_pool_name,
2202 sizeof(struct putfile_buffer_entry),
2203 0, SLAB_HWCACHE_ALIGN, NULL);
2204 if (!Putfile_buffer_list_pool) {
2205 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2206 rc = -1;
2207 goto Away;
2208 }
2209 if (!visorchipset_disable_controlvm) {
2210 /* if booting in a crash kernel */
2211 if (visorchipset_crash_kernel)
2212 INIT_DELAYED_WORK(&periodic_controlvm_work,
2213 setup_crash_devices_work_queue);
2214 else
2215 INIT_DELAYED_WORK(&periodic_controlvm_work,
2216 controlvm_periodic_work);
2217 periodic_controlvm_workqueue =
2218 create_singlethread_workqueue("visorchipset_controlvm");
2219
2220 if (!periodic_controlvm_workqueue) {
2221 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2222 DIAG_SEVERITY_ERR);
2223 rc = -ENOMEM;
2224 goto Away;
2225 }
2226 most_recent_message_jiffies = jiffies;
2227 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2228 rc = queue_delayed_work(periodic_controlvm_workqueue,
2229 &periodic_controlvm_work, poll_jiffies);
2230 if (rc < 0) {
2231 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2232 DIAG_SEVERITY_ERR);
2233 goto Away;
2234 }
2235 }
2236
2237 Visorchipset_platform_device.dev.devt = MajorDev;
2238 if (platform_device_register(&Visorchipset_platform_device) < 0) {
2239 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2240 rc = -1;
2241 goto Away;
2242 }
2243 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2244 rc = 0;
2245 Away:
2246 if (rc) {
2247 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2248 POSTCODE_SEVERITY_ERR);
2249 }
2250 return rc;
2251 }
2252
2253 static void
2254 visorchipset_exit(void)
2255 {
2256 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2257
2258 if (visorchipset_disable_controlvm) {
2259 ;
2260 } else {
2261 cancel_delayed_work(&periodic_controlvm_work);
2262 flush_workqueue(periodic_controlvm_workqueue);
2263 destroy_workqueue(periodic_controlvm_workqueue);
2264 periodic_controlvm_workqueue = NULL;
2265 destroy_controlvm_payload_info(&controlvm_payload_info);
2266 }
2267 if (Putfile_buffer_list_pool) {
2268 kmem_cache_destroy(Putfile_buffer_list_pool);
2269 Putfile_buffer_list_pool = NULL;
2270 }
2271
2272 cleanup_controlvm_structures();
2273
2274 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2275
2276 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2277
2278 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2279
2280 visorchannel_destroy(controlvm_channel);
2281
2282 visorchipset_file_cleanup();
2283 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2284 }
2285
2286 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2287 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2288 int visorchipset_testvnic = 0;
2289
2290 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2291 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2292 int visorchipset_testvnicclient = 0;
2293
2294 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2295 MODULE_PARM_DESC(visorchipset_testmsg,
2296 "1 to manufacture the chipset, bus, and switch messages");
2297 int visorchipset_testmsg = 0;
2298
2299 module_param_named(major, visorchipset_major, int, S_IRUGO);
2300 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2301 int visorchipset_major = 0;
2302
2303 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2304 MODULE_PARM_DESC(visorchipset_serverreqwait,
2305 "1 to have the module wait for the visor bus to register");
2306 int visorchipset_serverregwait = 0; /* default is off */
2307 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2308 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2309 int visorchipset_clientregwait = 1; /* default is on */
2310 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2311 MODULE_PARM_DESC(visorchipset_testteardown,
2312 "1 to test teardown of the chipset, bus, and switch");
2313 int visorchipset_testteardown = 0; /* default is off */
2314 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2315 S_IRUGO);
2316 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2317 "1 to disable polling of controlVm channel");
2318 int visorchipset_disable_controlvm = 0; /* default is off */
2319 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2320 MODULE_PARM_DESC(visorchipset_crash_kernel,
2321 "1 means we are running in crash kernel");
2322 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2323 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2324 int, S_IRUGO);
2325 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2326 "1 to hold response to CHIPSET_READY");
2327 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2328 * response immediately */
2329 module_init(visorchipset_init);
2330 module_exit(visorchipset_exit);
2331
2332 MODULE_AUTHOR("Unisys");
2333 MODULE_LICENSE("GPL");
2334 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2335 VERSION);
2336 MODULE_VERSION(VERSION);
This page took 0.091096 seconds and 5 git commands to generate.