staging: unisys: remove DBGINF, DBGVER, DEBUGDEV, and DEBUGDRV macros
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
1 /* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18 #include "globals.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "file.h"
24 #include "parser.h"
25 #include "uniklog.h"
26 #include "uisutils.h"
27 #include "controlvmcompletionstatus.h"
28 #include "guestlinuxdebug.h"
29
30 #include <linux/nls.h>
31 #include <linux/netdevice.h>
32 #include <linux/platform_device.h>
33 #include <linux/uuid.h>
34
35 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
36 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
37 * vnic loopback test */
38 #define TEST_VNIC_SWITCHNO 1
39 #define TEST_VNIC_BUSNO 9
40
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE 50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
48 * we switch to slow polling mode. As soon as we get a controlvm
49 * message, we switch back to fast polling mode.
50 */
51 #define MIN_IDLE_SECONDS 10
52 static ulong Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
53 static ulong Most_recent_message_jiffies; /* when we got our last
54 * controlvm message */
55 static inline char *
56 NONULLSTR(char *s)
57 {
58 if (s)
59 return s;
60 return "";
61 }
62
63 static int serverregistered;
64 static int clientregistered;
65
66 #define MAX_CHIPSET_EVENTS 2
67 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
68
69 static struct delayed_work Periodic_controlvm_work;
70 static struct workqueue_struct *Periodic_controlvm_workqueue;
71 static DEFINE_SEMAPHORE(NotifierLock);
72
73 static struct controlvm_message_header g_DiagMsgHdr;
74 static struct controlvm_message_header g_ChipSetMsgHdr;
75 static struct controlvm_message_header g_DelDumpMsgHdr;
76 static const uuid_le UltraDiagPoolChannelProtocolGuid =
77 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
78 /* 0xffffff is an invalid Bus/Device number */
79 static ulong g_diagpoolBusNo = 0xffffff;
80 static ulong g_diagpoolDevNo = 0xffffff;
81 static struct controlvm_message_packet g_DeviceChangeStatePacket;
82
83 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
84 * "visorhackbus")
85 */
86 #define FOR_VISORHACKBUS(channel_type_guid) \
87 (((uuid_le_cmp(channel_type_guid,\
88 spar_vnic_channel_protocol_uuid) == 0)\
89 || (uuid_le_cmp(channel_type_guid,\
90 spar_vhba_channel_protocol_uuid) == 0)))
91 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
92
93 #define is_diagpool_channel(channel_type_guid) \
94 (uuid_le_cmp(channel_type_guid, UltraDiagPoolChannelProtocolGuid) == 0)
95
96 static LIST_HEAD(BusInfoList);
97 static LIST_HEAD(DevInfoList);
98
99 static struct visorchannel *ControlVm_channel;
100
101 struct controlvm_payload_info {
102 u8 __iomem *ptr; /* pointer to base address of payload pool */
103 u64 offset; /* offset from beginning of controlvm
104 * channel to beginning of payload * pool */
105 u32 bytes; /* number of bytes in payload pool */
106 };
107
108 /* Manages the request payload in the controlvm channel */
109 static struct controlvm_payload_info ControlVm_payload_info;
110
111 static struct channel_header *Test_Vnic_channel;
112
113 struct livedump_info {
114 struct controlvm_message_header Dumpcapture_header;
115 struct controlvm_message_header Gettextdump_header;
116 struct controlvm_message_header Dumpcomplete_header;
117 BOOL Gettextdump_outstanding;
118 u32 crc32;
119 ulong length;
120 atomic_t buffers_in_use;
121 ulong destination;
122 };
123 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
124 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
125 */
126 static struct livedump_info LiveDump_info;
127
128 /* The following globals are used to handle the scenario where we are unable to
129 * offload the payload from a controlvm message due to memory requirements. In
130 * this scenario, we simply stash the controlvm message, then attempt to
131 * process it again the next time controlvm_periodic_work() runs.
132 */
133 static struct controlvm_message ControlVm_Pending_Msg;
134 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
135
136 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
137 * TRANSMIT_FILE PutFile payloads.
138 */
139 static struct kmem_cache *Putfile_buffer_list_pool;
140 static const char Putfile_buffer_list_pool_name[] =
141 "controlvm_putfile_buffer_list_pool";
142
143 /* This identifies a data buffer that has been received via a controlvm messages
144 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
145 */
146 struct putfile_buffer_entry {
147 struct list_head next; /* putfile_buffer_entry list */
148 PARSER_CONTEXT *parser_ctx; /* points to buffer containing input data */
149 };
150
151 /* List of struct putfile_request *, via next_putfile_request member.
152 * Each entry in this list identifies an outstanding TRANSMIT_FILE
153 * conversation.
154 */
155 static LIST_HEAD(Putfile_request_list);
156
157 /* This describes a buffer and its current state of transfer (e.g., how many
158 * bytes have already been supplied as putfile data, and how many bytes are
159 * remaining) for a putfile_request.
160 */
161 struct putfile_active_buffer {
162 /* a payload from a controlvm message, containing a file data buffer */
163 PARSER_CONTEXT *parser_ctx;
164 /* points within data area of parser_ctx to next byte of data */
165 u8 *pnext;
166 /* # bytes left from <pnext> to the end of this data buffer */
167 size_t bytes_remaining;
168 };
169
170 #define PUTFILE_REQUEST_SIG 0x0906101302281211
171 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
172 * conversation. Structs of this type are dynamically linked into
173 * <Putfile_request_list>.
174 */
175 struct putfile_request {
176 u64 sig; /* PUTFILE_REQUEST_SIG */
177
178 /* header from original TransmitFile request */
179 struct controlvm_message_header controlvm_header;
180 u64 file_request_number; /* from original TransmitFile request */
181
182 /* link to next struct putfile_request */
183 struct list_head next_putfile_request;
184
185 /* most-recent sequence number supplied via a controlvm message */
186 u64 data_sequence_number;
187
188 /* head of putfile_buffer_entry list, which describes the data to be
189 * supplied as putfile data;
190 * - this list is added to when controlvm messages come in that supply
191 * file data
192 * - this list is removed from via the hotplug program that is actually
193 * consuming these buffers to write as file data */
194 struct list_head input_buffer_list;
195 spinlock_t req_list_lock; /* lock for input_buffer_list */
196
197 /* waiters for input_buffer_list to go non-empty */
198 wait_queue_head_t input_buffer_wq;
199
200 /* data not yet read within current putfile_buffer_entry */
201 struct putfile_active_buffer active_buf;
202
203 /* <0 = failed, 0 = in-progress, >0 = successful; */
204 /* note that this must be set with req_list_lock, and if you set <0, */
205 /* it is your responsibility to also free up all of the other objects */
206 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
207 /* before releasing the lock */
208 int completion_status;
209 };
210
211 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
212
213 struct parahotplug_request {
214 struct list_head list;
215 int id;
216 unsigned long expiration;
217 struct controlvm_message msg;
218 };
219
220 static LIST_HEAD(Parahotplug_request_list);
221 static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
222 static void parahotplug_process_list(void);
223
224 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
225 * CONTROLVM_REPORTEVENT.
226 */
227 static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
228 static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
229
230 static void bus_create_response(ulong busNo, int response);
231 static void bus_destroy_response(ulong busNo, int response);
232 static void device_create_response(ulong busNo, ulong devNo, int response);
233 static void device_destroy_response(ulong busNo, ulong devNo, int response);
234 static void device_resume_response(ulong busNo, ulong devNo, int response);
235
236 static struct visorchipset_busdev_responders BusDev_Responders = {
237 .bus_create = bus_create_response,
238 .bus_destroy = bus_destroy_response,
239 .device_create = device_create_response,
240 .device_destroy = device_destroy_response,
241 .device_pause = visorchipset_device_pause_response,
242 .device_resume = device_resume_response,
243 };
244
245 /* info for /dev/visorchipset */
246 static dev_t MajorDev = -1; /**< indicates major num for device */
247
248 /* prototypes for attributes */
249 static ssize_t toolaction_show(struct device *dev,
250 struct device_attribute *attr, char *buf);
251 static ssize_t toolaction_store(struct device *dev,
252 struct device_attribute *attr, const char *buf, size_t count);
253 static DEVICE_ATTR_RW(toolaction);
254
255 static ssize_t boottotool_show(struct device *dev,
256 struct device_attribute *attr, char *buf);
257 static ssize_t boottotool_store(struct device *dev,
258 struct device_attribute *attr, const char *buf, size_t count);
259 static DEVICE_ATTR_RW(boottotool);
260
261 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
262 char *buf);
263 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
264 const char *buf, size_t count);
265 static DEVICE_ATTR_RW(error);
266
267 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
268 char *buf);
269 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
270 const char *buf, size_t count);
271 static DEVICE_ATTR_RW(textid);
272
273 static ssize_t remaining_steps_show(struct device *dev,
274 struct device_attribute *attr, char *buf);
275 static ssize_t remaining_steps_store(struct device *dev,
276 struct device_attribute *attr, const char *buf, size_t count);
277 static DEVICE_ATTR_RW(remaining_steps);
278
279 static ssize_t chipsetready_store(struct device *dev,
280 struct device_attribute *attr, const char *buf, size_t count);
281 static DEVICE_ATTR_WO(chipsetready);
282
283 static ssize_t devicedisabled_store(struct device *dev,
284 struct device_attribute *attr, const char *buf, size_t count);
285 static DEVICE_ATTR_WO(devicedisabled);
286
287 static ssize_t deviceenabled_store(struct device *dev,
288 struct device_attribute *attr, const char *buf, size_t count);
289 static DEVICE_ATTR_WO(deviceenabled);
290
291 static struct attribute *visorchipset_install_attrs[] = {
292 &dev_attr_toolaction.attr,
293 &dev_attr_boottotool.attr,
294 &dev_attr_error.attr,
295 &dev_attr_textid.attr,
296 &dev_attr_remaining_steps.attr,
297 NULL
298 };
299
300 static struct attribute_group visorchipset_install_group = {
301 .name = "install",
302 .attrs = visorchipset_install_attrs
303 };
304
305 static struct attribute *visorchipset_guest_attrs[] = {
306 &dev_attr_chipsetready.attr,
307 NULL
308 };
309
310 static struct attribute_group visorchipset_guest_group = {
311 .name = "guest",
312 .attrs = visorchipset_guest_attrs
313 };
314
315 static struct attribute *visorchipset_parahotplug_attrs[] = {
316 &dev_attr_devicedisabled.attr,
317 &dev_attr_deviceenabled.attr,
318 NULL
319 };
320
321 static struct attribute_group visorchipset_parahotplug_group = {
322 .name = "parahotplug",
323 .attrs = visorchipset_parahotplug_attrs
324 };
325
326 static const struct attribute_group *visorchipset_dev_groups[] = {
327 &visorchipset_install_group,
328 &visorchipset_guest_group,
329 &visorchipset_parahotplug_group,
330 NULL
331 };
332
333 /* /sys/devices/platform/visorchipset */
334 static struct platform_device Visorchipset_platform_device = {
335 .name = "visorchipset",
336 .id = -1,
337 .dev.groups = visorchipset_dev_groups,
338 };
339
340 /* Function prototypes */
341 static void controlvm_respond(struct controlvm_message_header *msgHdr,
342 int response);
343 static void controlvm_respond_chipset_init(
344 struct controlvm_message_header *msgHdr, int response,
345 enum ultra_chipset_feature features);
346 static void controlvm_respond_physdev_changestate(
347 struct controlvm_message_header *msgHdr, int response,
348 struct spar_segment_state state);
349
350 static ssize_t toolaction_show(struct device *dev,
351 struct device_attribute *attr,
352 char *buf)
353 {
354 u8 toolAction;
355
356 visorchannel_read(ControlVm_channel,
357 offsetof(struct spar_controlvm_channel_protocol,
358 tool_action), &toolAction, sizeof(u8));
359 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
360 }
361
362 static ssize_t toolaction_store(struct device *dev,
363 struct device_attribute *attr,
364 const char *buf, size_t count)
365 {
366 u8 toolAction;
367 int ret;
368
369 if (kstrtou8(buf, 10, &toolAction) != 0)
370 return -EINVAL;
371
372 ret = visorchannel_write(ControlVm_channel,
373 offsetof(struct spar_controlvm_channel_protocol, tool_action),
374 &toolAction, sizeof(u8));
375
376 if (ret)
377 return ret;
378 return count;
379 }
380
381 static ssize_t boottotool_show(struct device *dev,
382 struct device_attribute *attr,
383 char *buf)
384 {
385 struct efi_spar_indication efiSparIndication;
386
387 visorchannel_read(ControlVm_channel,
388 offsetof(struct spar_controlvm_channel_protocol,
389 efi_spar_ind), &efiSparIndication,
390 sizeof(struct efi_spar_indication));
391 return scnprintf(buf, PAGE_SIZE, "%u\n",
392 efiSparIndication.boot_to_tool);
393 }
394
395 static ssize_t boottotool_store(struct device *dev,
396 struct device_attribute *attr,
397 const char *buf, size_t count)
398 {
399 int val, ret;
400 struct efi_spar_indication efiSparIndication;
401
402 if (kstrtoint(buf, 10, &val) != 0)
403 return -EINVAL;
404
405 efiSparIndication.boot_to_tool = val;
406 ret = visorchannel_write(ControlVm_channel,
407 offsetof(struct spar_controlvm_channel_protocol,
408 efi_spar_ind),
409 &(efiSparIndication),
410 sizeof(struct efi_spar_indication));
411
412 if (ret)
413 return ret;
414 return count;
415 }
416
417 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
418 char *buf)
419 {
420 u32 error;
421
422 visorchannel_read(ControlVm_channel, offsetof(
423 struct spar_controlvm_channel_protocol, installation_error),
424 &error, sizeof(u32));
425 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
426 }
427
428 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
429 const char *buf, size_t count)
430 {
431 u32 error;
432 int ret;
433
434 if (kstrtou32(buf, 10, &error) != 0)
435 return -EINVAL;
436
437 ret = visorchannel_write(ControlVm_channel,
438 offsetof(struct spar_controlvm_channel_protocol,
439 installation_error),
440 &error, sizeof(u32));
441 if (ret)
442 return ret;
443 return count;
444 }
445
446 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
447 char *buf)
448 {
449 u32 textId;
450
451 visorchannel_read(ControlVm_channel, offsetof(
452 struct spar_controlvm_channel_protocol, installation_text_id),
453 &textId, sizeof(u32));
454 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
455 }
456
457 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
458 const char *buf, size_t count)
459 {
460 u32 textId;
461 int ret;
462
463 if (kstrtou32(buf, 10, &textId) != 0)
464 return -EINVAL;
465
466 ret = visorchannel_write(ControlVm_channel,
467 offsetof(struct spar_controlvm_channel_protocol,
468 installation_text_id),
469 &textId, sizeof(u32));
470 if (ret)
471 return ret;
472 return count;
473 }
474
475
476 static ssize_t remaining_steps_show(struct device *dev,
477 struct device_attribute *attr, char *buf)
478 {
479 u16 remainingSteps;
480
481 visorchannel_read(ControlVm_channel,
482 offsetof(struct spar_controlvm_channel_protocol,
483 installation_remaining_steps),
484 &remainingSteps,
485 sizeof(u16));
486 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
487 }
488
489 static ssize_t remaining_steps_store(struct device *dev,
490 struct device_attribute *attr, const char *buf, size_t count)
491 {
492 u16 remainingSteps;
493 int ret;
494
495 if (kstrtou16(buf, 10, &remainingSteps) != 0)
496 return -EINVAL;
497
498 ret = visorchannel_write(ControlVm_channel,
499 offsetof(struct spar_controlvm_channel_protocol,
500 installation_remaining_steps),
501 &remainingSteps, sizeof(u16));
502 if (ret)
503 return ret;
504 return count;
505 }
506
507 #if 0
508 static void
509 testUnicode(void)
510 {
511 wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
512 char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
513 wchar_t unicode2[99];
514
515 /* NOTE: Either due to a bug, or feature I don't understand, the
516 * kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
517 * trailed NUL byte!! REALLY!!!!! Arrrrgggghhhhh
518 */
519
520 LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
521 LOGINF("utf8_wcstombs=%d",
522 chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
523 if (chrs >= 0)
524 s[chrs] = '\0'; /* GRRRRRRRR */
525 LOGINF("s='%s'", s);
526 LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
527 if (chrs >= 0)
528 unicode2[chrs] = 0; /* GRRRRRRRR */
529 if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
530 LOGINF("strings match... good");
531 else
532 LOGINF("strings did not match!!");
533 }
534 #endif
535
536 static void
537 busInfo_clear(void *v)
538 {
539 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
540
541 if (p->proc_object) {
542 visor_proc_DestroyObject(p->proc_object);
543 p->proc_object = NULL;
544 }
545 kfree(p->name);
546 p->name = NULL;
547
548 kfree(p->description);
549 p->description = NULL;
550
551 p->state.created = 0;
552 memset(p, 0, sizeof(struct visorchipset_bus_info));
553 }
554
555 static void
556 devInfo_clear(void *v)
557 {
558 struct visorchipset_device_info *p =
559 (struct visorchipset_device_info *)(v);
560
561 p->state.created = 0;
562 memset(p, 0, sizeof(struct visorchipset_device_info));
563 }
564
565 static u8
566 check_chipset_events(void)
567 {
568 int i;
569 u8 send_msg = 1;
570 /* Check events to determine if response should be sent */
571 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
572 send_msg &= chipset_events[i];
573 return send_msg;
574 }
575
576 static void
577 clear_chipset_events(void)
578 {
579 int i;
580 /* Clear chipset_events */
581 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
582 chipset_events[i] = 0;
583 }
584
585 void
586 visorchipset_register_busdev_server(
587 struct visorchipset_busdev_notifiers *notifiers,
588 struct visorchipset_busdev_responders *responders,
589 struct ultra_vbus_deviceinfo *driver_info)
590 {
591 down(&NotifierLock);
592 if (notifiers == NULL) {
593 memset(&BusDev_Server_Notifiers, 0,
594 sizeof(BusDev_Server_Notifiers));
595 serverregistered = 0; /* clear flag */
596 } else {
597 BusDev_Server_Notifiers = *notifiers;
598 serverregistered = 1; /* set flag */
599 }
600 if (responders)
601 *responders = BusDev_Responders;
602 if (driver_info)
603 bus_device_info_init(driver_info, "chipset", "visorchipset",
604 VERSION, NULL);
605
606 up(&NotifierLock);
607 }
608 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
609
610 void
611 visorchipset_register_busdev_client(
612 struct visorchipset_busdev_notifiers *notifiers,
613 struct visorchipset_busdev_responders *responders,
614 struct ultra_vbus_deviceinfo *driver_info)
615 {
616 down(&NotifierLock);
617 if (notifiers == NULL) {
618 memset(&BusDev_Client_Notifiers, 0,
619 sizeof(BusDev_Client_Notifiers));
620 clientregistered = 0; /* clear flag */
621 } else {
622 BusDev_Client_Notifiers = *notifiers;
623 clientregistered = 1; /* set flag */
624 }
625 if (responders)
626 *responders = BusDev_Responders;
627 if (driver_info)
628 bus_device_info_init(driver_info, "chipset(bolts)",
629 "visorchipset", VERSION, NULL);
630 up(&NotifierLock);
631 }
632 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
633
634 static void
635 cleanup_controlvm_structures(void)
636 {
637 struct visorchipset_bus_info *bi, *tmp_bi;
638 struct visorchipset_device_info *di, *tmp_di;
639
640 list_for_each_entry_safe(bi, tmp_bi, &BusInfoList, entry) {
641 busInfo_clear(bi);
642 list_del(&bi->entry);
643 kfree(bi);
644 }
645
646 list_for_each_entry_safe(di, tmp_di, &DevInfoList, entry) {
647 devInfo_clear(di);
648 list_del(&di->entry);
649 kfree(di);
650 }
651 }
652
653 static void
654 chipset_init(struct controlvm_message *inmsg)
655 {
656 static int chipset_inited;
657 enum ultra_chipset_feature features = 0;
658 int rc = CONTROLVM_RESP_SUCCESS;
659
660 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
661 if (chipset_inited) {
662 LOGERR("CONTROLVM_CHIPSET_INIT Failed: Already Done.");
663 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
664 goto Away;
665 }
666 chipset_inited = 1;
667 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
668
669 /* Set features to indicate we support parahotplug (if Command
670 * also supports it). */
671 features =
672 inmsg->cmd.init_chipset.
673 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
674
675 /* Set the "reply" bit so Command knows this is a
676 * features-aware driver. */
677 features |= ULTRA_CHIPSET_FEATURE_REPLY;
678
679 Away:
680 if (rc < 0)
681 cleanup_controlvm_structures();
682 if (inmsg->hdr.flags.response_expected)
683 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
684 }
685
686 static void
687 controlvm_init_response(struct controlvm_message *msg,
688 struct controlvm_message_header *msgHdr, int response)
689 {
690 memset(msg, 0, sizeof(struct controlvm_message));
691 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
692 msg->hdr.payload_bytes = 0;
693 msg->hdr.payload_vm_offset = 0;
694 msg->hdr.payload_max_bytes = 0;
695 if (response < 0) {
696 msg->hdr.flags.failed = 1;
697 msg->hdr.completion_status = (u32) (-response);
698 }
699 }
700
701 static void
702 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
703 {
704 struct controlvm_message outmsg;
705
706 controlvm_init_response(&outmsg, msgHdr, response);
707 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
708 * back the deviceChangeState structure in the packet. */
709 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE
710 && g_DeviceChangeStatePacket.device_change_state.bus_no ==
711 g_diagpoolBusNo
712 && g_DeviceChangeStatePacket.device_change_state.dev_no ==
713 g_diagpoolDevNo)
714 outmsg.cmd = g_DeviceChangeStatePacket;
715 if (outmsg.hdr.flags.test_message == 1) {
716 LOGINF("%s controlvm_msg=0x%x response=%d for test message",
717 __func__, outmsg.hdr.id, response);
718 return;
719 }
720 if (!visorchannel_signalinsert(ControlVm_channel,
721 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
722 LOGERR("signalinsert failed!");
723 return;
724 }
725 }
726
727 static void
728 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
729 int response,
730 enum ultra_chipset_feature features)
731 {
732 struct controlvm_message outmsg;
733
734 controlvm_init_response(&outmsg, msgHdr, response);
735 outmsg.cmd.init_chipset.features = features;
736 if (!visorchannel_signalinsert(ControlVm_channel,
737 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
738 LOGERR("signalinsert failed!");
739 return;
740 }
741 }
742
743 static void controlvm_respond_physdev_changestate(
744 struct controlvm_message_header *msgHdr, int response,
745 struct spar_segment_state state)
746 {
747 struct controlvm_message outmsg;
748
749 controlvm_init_response(&outmsg, msgHdr, response);
750 outmsg.cmd.device_change_state.state = state;
751 outmsg.cmd.device_change_state.flags.phys_device = 1;
752 if (!visorchannel_signalinsert(ControlVm_channel,
753 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
754 LOGERR("signalinsert failed!");
755 return;
756 }
757 }
758
759 void
760 visorchipset_save_message(struct controlvm_message *msg,
761 enum crash_obj_type type)
762 {
763 u32 localSavedCrashMsgOffset;
764 u16 localSavedCrashMsgCount;
765
766 /* get saved message count */
767 if (visorchannel_read(ControlVm_channel,
768 offsetof(struct spar_controlvm_channel_protocol,
769 saved_crash_message_count),
770 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
771 LOGERR("failed to get Saved Message Count");
772 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
773 POSTCODE_SEVERITY_ERR);
774 return;
775 }
776
777 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
778 LOGERR("Saved Message Count incorrect %d",
779 localSavedCrashMsgCount);
780 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
781 localSavedCrashMsgCount,
782 POSTCODE_SEVERITY_ERR);
783 return;
784 }
785
786 /* get saved crash message offset */
787 if (visorchannel_read(ControlVm_channel,
788 offsetof(struct spar_controlvm_channel_protocol,
789 saved_crash_message_offset),
790 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
791 LOGERR("failed to get Saved Message Offset");
792 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
793 POSTCODE_SEVERITY_ERR);
794 return;
795 }
796
797 if (type == CRASH_BUS) {
798 if (visorchannel_write(ControlVm_channel,
799 localSavedCrashMsgOffset,
800 msg,
801 sizeof(struct controlvm_message)) < 0) {
802 LOGERR("SAVE_MSG_BUS_FAILURE: Failed to write CrashCreateBusMsg!");
803 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
804 POSTCODE_SEVERITY_ERR);
805 return;
806 }
807 } else {
808 if (visorchannel_write(ControlVm_channel,
809 localSavedCrashMsgOffset +
810 sizeof(struct controlvm_message), msg,
811 sizeof(struct controlvm_message)) < 0) {
812 LOGERR("SAVE_MSG_DEV_FAILURE: Failed to write CrashCreateDevMsg!");
813 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
814 POSTCODE_SEVERITY_ERR);
815 return;
816 }
817 }
818 }
819 EXPORT_SYMBOL_GPL(visorchipset_save_message);
820
821 static void
822 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
823 {
824 struct visorchipset_bus_info *p = NULL;
825 BOOL need_clear = FALSE;
826
827 p = findbus(&BusInfoList, busNo);
828 if (!p) {
829 LOGERR("internal error busNo=%lu", busNo);
830 return;
831 }
832 if (response < 0) {
833 if ((cmdId == CONTROLVM_BUS_CREATE) &&
834 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
835 /* undo the row we just created... */
836 delbusdevices(&DevInfoList, busNo);
837 } else {
838 if (cmdId == CONTROLVM_BUS_CREATE)
839 p->state.created = 1;
840 if (cmdId == CONTROLVM_BUS_DESTROY)
841 need_clear = TRUE;
842 }
843
844 if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
845 LOGERR("bus_responder no pending msg");
846 return; /* no controlvm response needed */
847 }
848 if (p->pending_msg_hdr.id != (u32) cmdId) {
849 LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
850 return;
851 }
852 controlvm_respond(&p->pending_msg_hdr, response);
853 p->pending_msg_hdr.id = CONTROLVM_INVALID;
854 if (need_clear) {
855 busInfo_clear(p);
856 delbusdevices(&DevInfoList, busNo);
857 }
858 }
859
860 static void
861 device_changestate_responder(enum controlvm_id cmdId,
862 ulong busNo, ulong devNo, int response,
863 struct spar_segment_state responseState)
864 {
865 struct visorchipset_device_info *p = NULL;
866 struct controlvm_message outmsg;
867
868 p = finddevice(&DevInfoList, busNo, devNo);
869 if (!p) {
870 LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
871 return;
872 }
873 if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
874 LOGERR("device_responder no pending msg");
875 return; /* no controlvm response needed */
876 }
877 if (p->pending_msg_hdr.id != cmdId) {
878 LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
879 return;
880 }
881
882 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
883
884 outmsg.cmd.device_change_state.bus_no = busNo;
885 outmsg.cmd.device_change_state.dev_no = devNo;
886 outmsg.cmd.device_change_state.state = responseState;
887
888 if (!visorchannel_signalinsert(ControlVm_channel,
889 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
890 LOGERR("signalinsert failed!");
891 return;
892 }
893
894 p->pending_msg_hdr.id = CONTROLVM_INVALID;
895 }
896
897 static void
898 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
899 int response)
900 {
901 struct visorchipset_device_info *p = NULL;
902 BOOL need_clear = FALSE;
903
904 p = finddevice(&DevInfoList, busNo, devNo);
905 if (!p) {
906 LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
907 return;
908 }
909 if (response >= 0) {
910 if (cmdId == CONTROLVM_DEVICE_CREATE)
911 p->state.created = 1;
912 if (cmdId == CONTROLVM_DEVICE_DESTROY)
913 need_clear = TRUE;
914 }
915
916 if (p->pending_msg_hdr.id == CONTROLVM_INVALID) {
917 LOGERR("device_responder no pending msg");
918 return; /* no controlvm response needed */
919 }
920 if (p->pending_msg_hdr.id != (u32) cmdId) {
921 LOGERR("expected=%d, found=%d", cmdId, p->pending_msg_hdr.id);
922 return;
923 }
924 controlvm_respond(&p->pending_msg_hdr, response);
925 p->pending_msg_hdr.id = CONTROLVM_INVALID;
926 if (need_clear)
927 devInfo_clear(p);
928 }
929
930 static void
931 bus_epilog(u32 busNo,
932 u32 cmd, struct controlvm_message_header *msgHdr,
933 int response, BOOL needResponse)
934 {
935 BOOL notified = FALSE;
936
937 struct visorchipset_bus_info *pBusInfo = findbus(&BusInfoList, busNo);
938
939 if (!pBusInfo) {
940 LOGERR("HUH? bad busNo=%d", busNo);
941 return;
942 }
943 if (needResponse) {
944 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
945 sizeof(struct controlvm_message_header));
946 } else
947 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
948
949 down(&NotifierLock);
950 if (response == CONTROLVM_RESP_SUCCESS) {
951 switch (cmd) {
952 case CONTROLVM_BUS_CREATE:
953 /* We can't tell from the bus_create
954 * information which of our 2 bus flavors the
955 * devices on this bus will ultimately end up.
956 * FORTUNATELY, it turns out it is harmless to
957 * send the bus_create to both of them. We can
958 * narrow things down a little bit, though,
959 * because we know: - BusDev_Server can handle
960 * either server or client devices
961 * - BusDev_Client can handle ONLY client
962 * devices */
963 if (BusDev_Server_Notifiers.bus_create) {
964 (*BusDev_Server_Notifiers.bus_create) (busNo);
965 notified = TRUE;
966 }
967 if ((!pBusInfo->flags.server) /*client */ &&
968 BusDev_Client_Notifiers.bus_create) {
969 (*BusDev_Client_Notifiers.bus_create) (busNo);
970 notified = TRUE;
971 }
972 break;
973 case CONTROLVM_BUS_DESTROY:
974 if (BusDev_Server_Notifiers.bus_destroy) {
975 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
976 notified = TRUE;
977 }
978 if ((!pBusInfo->flags.server) /*client */ &&
979 BusDev_Client_Notifiers.bus_destroy) {
980 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
981 notified = TRUE;
982 }
983 break;
984 }
985 }
986 if (notified)
987 /* The callback function just called above is responsible
988 * for calling the appropriate visorchipset_busdev_responders
989 * function, which will call bus_responder()
990 */
991 ;
992 else
993 bus_responder(cmd, busNo, response);
994 up(&NotifierLock);
995 }
996
997 static void
998 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
999 struct controlvm_message_header *msgHdr, int response,
1000 BOOL needResponse, BOOL for_visorbus)
1001 {
1002 struct visorchipset_busdev_notifiers *notifiers = NULL;
1003 BOOL notified = FALSE;
1004
1005 struct visorchipset_device_info *pDevInfo =
1006 finddevice(&DevInfoList, busNo, devNo);
1007 char *envp[] = {
1008 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1009 NULL
1010 };
1011
1012 if (!pDevInfo) {
1013 LOGERR("HUH? bad busNo=%d, devNo=%d", busNo, devNo);
1014 return;
1015 }
1016 if (for_visorbus)
1017 notifiers = &BusDev_Server_Notifiers;
1018 else
1019 notifiers = &BusDev_Client_Notifiers;
1020 if (needResponse) {
1021 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
1022 sizeof(struct controlvm_message_header));
1023 } else
1024 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
1025
1026 down(&NotifierLock);
1027 if (response >= 0) {
1028 switch (cmd) {
1029 case CONTROLVM_DEVICE_CREATE:
1030 if (notifiers->device_create) {
1031 (*notifiers->device_create) (busNo, devNo);
1032 notified = TRUE;
1033 }
1034 break;
1035 case CONTROLVM_DEVICE_CHANGESTATE:
1036 /* ServerReady / ServerRunning / SegmentStateRunning */
1037 if (state.alive == segment_state_running.alive &&
1038 state.operating ==
1039 segment_state_running.operating) {
1040 if (notifiers->device_resume) {
1041 (*notifiers->device_resume) (busNo,
1042 devNo);
1043 notified = TRUE;
1044 }
1045 }
1046 /* ServerNotReady / ServerLost / SegmentStateStandby */
1047 else if (state.alive == segment_state_standby.alive &&
1048 state.operating ==
1049 segment_state_standby.operating) {
1050 /* technically this is standby case
1051 * where server is lost
1052 */
1053 if (notifiers->device_pause) {
1054 (*notifiers->device_pause) (busNo,
1055 devNo);
1056 notified = TRUE;
1057 }
1058 } else if (state.alive == segment_state_paused.alive &&
1059 state.operating ==
1060 segment_state_paused.operating) {
1061 /* this is lite pause where channel is
1062 * still valid just 'pause' of it
1063 */
1064 if (busNo == g_diagpoolBusNo
1065 && devNo == g_diagpoolDevNo) {
1066 LOGINF("DEVICE_CHANGESTATE(DiagpoolChannel busNo=%d devNo=%d is pausing...)",
1067 busNo, devNo);
1068 /* this will trigger the
1069 * diag_shutdown.sh script in
1070 * the visorchipset hotplug */
1071 kobject_uevent_env
1072 (&Visorchipset_platform_device.dev.
1073 kobj, KOBJ_ONLINE, envp);
1074 }
1075 }
1076 break;
1077 case CONTROLVM_DEVICE_DESTROY:
1078 if (notifiers->device_destroy) {
1079 (*notifiers->device_destroy) (busNo, devNo);
1080 notified = TRUE;
1081 }
1082 break;
1083 }
1084 }
1085 if (notified)
1086 /* The callback function just called above is responsible
1087 * for calling the appropriate visorchipset_busdev_responders
1088 * function, which will call device_responder()
1089 */
1090 ;
1091 else
1092 device_responder(cmd, busNo, devNo, response);
1093 up(&NotifierLock);
1094 }
1095
1096 static void
1097 bus_create(struct controlvm_message *inmsg)
1098 {
1099 struct controlvm_message_packet *cmd = &inmsg->cmd;
1100 ulong busNo = cmd->create_bus.bus_no;
1101 int rc = CONTROLVM_RESP_SUCCESS;
1102 struct visorchipset_bus_info *pBusInfo = NULL;
1103
1104
1105 pBusInfo = findbus(&BusInfoList, busNo);
1106 if (pBusInfo && (pBusInfo->state.created == 1)) {
1107 LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu already exists",
1108 busNo);
1109 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1110 POSTCODE_SEVERITY_ERR);
1111 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1112 goto Away;
1113 }
1114 pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
1115 if (pBusInfo == NULL) {
1116 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1117 POSTCODE_SEVERITY_ERR);
1118 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1119 goto Away;
1120 }
1121
1122 INIT_LIST_HEAD(&pBusInfo->entry);
1123 pBusInfo->bus_no = busNo;
1124 pBusInfo->dev_no = cmd->create_bus.dev_count;
1125
1126 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1127
1128 if (inmsg->hdr.flags.test_message == 1)
1129 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1130 else
1131 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1132
1133 pBusInfo->flags.server = inmsg->hdr.flags.server;
1134 pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1135 pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1136 pBusInfo->chan_info.channel_type_uuid =
1137 cmd->create_bus.bus_data_type_uuid;
1138 pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1139
1140 list_add(&pBusInfo->entry, &BusInfoList);
1141
1142 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1143
1144 Away:
1145 bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1146 rc, inmsg->hdr.flags.response_expected == 1);
1147 }
1148
1149 static void
1150 bus_destroy(struct controlvm_message *inmsg)
1151 {
1152 struct controlvm_message_packet *cmd = &inmsg->cmd;
1153 ulong busNo = cmd->destroy_bus.bus_no;
1154 struct visorchipset_bus_info *pBusInfo;
1155 int rc = CONTROLVM_RESP_SUCCESS;
1156
1157 pBusInfo = findbus(&BusInfoList, busNo);
1158 if (!pBusInfo) {
1159 LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu invalid", busNo);
1160 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1161 goto Away;
1162 }
1163 if (pBusInfo->state.created == 0) {
1164 LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu already destroyed",
1165 busNo);
1166 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1167 goto Away;
1168 }
1169
1170 Away:
1171 bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1172 rc, inmsg->hdr.flags.response_expected == 1);
1173 }
1174
1175 static void
1176 bus_configure(struct controlvm_message *inmsg, PARSER_CONTEXT *parser_ctx)
1177 {
1178 struct controlvm_message_packet *cmd = &inmsg->cmd;
1179 ulong busNo = cmd->configure_bus.bus_no;
1180 struct visorchipset_bus_info *pBusInfo = NULL;
1181 int rc = CONTROLVM_RESP_SUCCESS;
1182 char s[99];
1183
1184 busNo = cmd->configure_bus.bus_no;
1185 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1186
1187 pBusInfo = findbus(&BusInfoList, busNo);
1188 if (!pBusInfo) {
1189 LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu invalid",
1190 busNo);
1191 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1192 POSTCODE_SEVERITY_ERR);
1193 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1194 goto Away;
1195 }
1196 if (pBusInfo->state.created == 0) {
1197 LOGERR("CONTROLVM_BUS_CONFIGURE Failed: Invalid bus %lu - not created yet",
1198 busNo);
1199 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1200 POSTCODE_SEVERITY_ERR);
1201 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1202 goto Away;
1203 }
1204 /* TBD - add this check to other commands also... */
1205 if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
1206 LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu MsgId=%u outstanding",
1207 busNo, (uint) pBusInfo->pending_msg_hdr.id);
1208 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1209 POSTCODE_SEVERITY_ERR);
1210 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1211 goto Away;
1212 }
1213
1214 pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1215 pBusInfo->partition_uuid = parser_id_get(parser_ctx);
1216 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1217 pBusInfo->name = parser_string_get(parser_ctx);
1218
1219 visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
1220 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1221 Away:
1222 bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1223 rc, inmsg->hdr.flags.response_expected == 1);
1224 }
1225
1226 static void
1227 my_device_create(struct controlvm_message *inmsg)
1228 {
1229 struct controlvm_message_packet *cmd = &inmsg->cmd;
1230 ulong busNo = cmd->create_device.bus_no;
1231 ulong devNo = cmd->create_device.dev_no;
1232 struct visorchipset_device_info *pDevInfo = NULL;
1233 struct visorchipset_bus_info *pBusInfo = NULL;
1234 int rc = CONTROLVM_RESP_SUCCESS;
1235
1236 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1237 if (pDevInfo && (pDevInfo->state.created == 1)) {
1238 LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu already exists",
1239 busNo, devNo);
1240 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1241 POSTCODE_SEVERITY_ERR);
1242 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1243 goto Away;
1244 }
1245 pBusInfo = findbus(&BusInfoList, busNo);
1246 if (!pBusInfo) {
1247 LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - out of range",
1248 busNo);
1249 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1250 POSTCODE_SEVERITY_ERR);
1251 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1252 goto Away;
1253 }
1254 if (pBusInfo->state.created == 0) {
1255 LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - not created yet",
1256 busNo);
1257 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1258 POSTCODE_SEVERITY_ERR);
1259 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1260 goto Away;
1261 }
1262 pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
1263 if (pDevInfo == NULL) {
1264 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1265 POSTCODE_SEVERITY_ERR);
1266 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1267 goto Away;
1268 }
1269
1270 INIT_LIST_HEAD(&pDevInfo->entry);
1271 pDevInfo->bus_no = busNo;
1272 pDevInfo->dev_no = devNo;
1273 pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1274 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1275 POSTCODE_SEVERITY_INFO);
1276
1277 if (inmsg->hdr.flags.test_message == 1)
1278 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1279 else
1280 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1281 pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1282 pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1283 pDevInfo->chan_info.channel_type_uuid =
1284 cmd->create_device.data_type_uuid;
1285 pDevInfo->chan_info.intr = cmd->create_device.intr;
1286 list_add(&pDevInfo->entry, &DevInfoList);
1287 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1288 POSTCODE_SEVERITY_INFO);
1289 Away:
1290 /* get the bus and devNo for DiagPool channel */
1291 if (pDevInfo &&
1292 is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
1293 g_diagpoolBusNo = busNo;
1294 g_diagpoolDevNo = devNo;
1295 LOGINF("CONTROLVM_DEVICE_CREATE for DiagPool channel: busNo=%lu, devNo=%lu",
1296 g_diagpoolBusNo, g_diagpoolDevNo);
1297 }
1298 device_epilog(busNo, devNo, segment_state_running,
1299 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1300 inmsg->hdr.flags.response_expected == 1,
1301 FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
1302 }
1303
1304 static void
1305 my_device_changestate(struct controlvm_message *inmsg)
1306 {
1307 struct controlvm_message_packet *cmd = &inmsg->cmd;
1308 ulong busNo = cmd->device_change_state.bus_no;
1309 ulong devNo = cmd->device_change_state.dev_no;
1310 struct spar_segment_state state = cmd->device_change_state.state;
1311 struct visorchipset_device_info *pDevInfo = NULL;
1312 int rc = CONTROLVM_RESP_SUCCESS;
1313
1314 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1315 if (!pDevInfo) {
1316 LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (doesn't exist)",
1317 busNo, devNo);
1318 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1319 POSTCODE_SEVERITY_ERR);
1320 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1321 goto Away;
1322 }
1323 if (pDevInfo->state.created == 0) {
1324 LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (not created)",
1325 busNo, devNo);
1326 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1327 POSTCODE_SEVERITY_ERR);
1328 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1329 }
1330 Away:
1331 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1332 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1333 &inmsg->hdr, rc,
1334 inmsg->hdr.flags.response_expected == 1,
1335 FOR_VISORBUS(
1336 pDevInfo->chan_info.channel_type_uuid));
1337 }
1338
1339 static void
1340 my_device_destroy(struct controlvm_message *inmsg)
1341 {
1342 struct controlvm_message_packet *cmd = &inmsg->cmd;
1343 ulong busNo = cmd->destroy_device.bus_no;
1344 ulong devNo = cmd->destroy_device.dev_no;
1345 struct visorchipset_device_info *pDevInfo = NULL;
1346 int rc = CONTROLVM_RESP_SUCCESS;
1347
1348 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1349 if (!pDevInfo) {
1350 LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu invalid",
1351 busNo, devNo);
1352 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1353 goto Away;
1354 }
1355 if (pDevInfo->state.created == 0) {
1356 LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu already destroyed",
1357 busNo, devNo);
1358 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1359 }
1360
1361 Away:
1362 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1363 device_epilog(busNo, devNo, segment_state_running,
1364 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1365 inmsg->hdr.flags.response_expected == 1,
1366 FOR_VISORBUS(
1367 pDevInfo->chan_info.channel_type_uuid));
1368 }
1369
1370 /* When provided with the physical address of the controlvm channel
1371 * (phys_addr), the offset to the payload area we need to manage
1372 * (offset), and the size of this payload area (bytes), fills in the
1373 * controlvm_payload_info struct. Returns TRUE for success or FALSE
1374 * for failure.
1375 */
1376 static int
1377 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1378 struct controlvm_payload_info *info)
1379 {
1380 u8 __iomem *payload = NULL;
1381 int rc = CONTROLVM_RESP_SUCCESS;
1382
1383 if (info == NULL) {
1384 LOGERR("HUH ? CONTROLVM_PAYLOAD_INIT Failed : Programmer check at %s:%d",
1385 __FILE__, __LINE__);
1386 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1387 goto Away;
1388 }
1389 memset(info, 0, sizeof(struct controlvm_payload_info));
1390 if ((offset == 0) || (bytes == 0)) {
1391 LOGERR("CONTROLVM_PAYLOAD_INIT Failed: request_payload_offset=%llu request_payload_bytes=%llu!",
1392 (u64) offset, (u64) bytes);
1393 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1394 goto Away;
1395 }
1396 payload = ioremap_cache(phys_addr + offset, bytes);
1397 if (payload == NULL) {
1398 LOGERR("CONTROLVM_PAYLOAD_INIT Failed: ioremap_cache %llu for %llu bytes failed",
1399 (u64) offset, (u64) bytes);
1400 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1401 goto Away;
1402 }
1403
1404 info->offset = offset;
1405 info->bytes = bytes;
1406 info->ptr = payload;
1407 LOGINF("offset=%llu, bytes=%lu, ptr=%p",
1408 (u64) (info->offset), (ulong) (info->bytes), info->ptr);
1409
1410 Away:
1411 if (rc < 0) {
1412 if (payload != NULL) {
1413 iounmap(payload);
1414 payload = NULL;
1415 }
1416 }
1417 return rc;
1418 }
1419
1420 static void
1421 destroy_controlvm_payload_info(struct controlvm_payload_info *info)
1422 {
1423 if (info->ptr != NULL) {
1424 iounmap(info->ptr);
1425 info->ptr = NULL;
1426 }
1427 memset(info, 0, sizeof(struct controlvm_payload_info));
1428 }
1429
1430 static void
1431 initialize_controlvm_payload(void)
1432 {
1433 HOSTADDRESS phys_addr = visorchannel_get_physaddr(ControlVm_channel);
1434 u64 payloadOffset = 0;
1435 u32 payloadBytes = 0;
1436
1437 if (visorchannel_read(ControlVm_channel,
1438 offsetof(struct spar_controlvm_channel_protocol,
1439 request_payload_offset),
1440 &payloadOffset, sizeof(payloadOffset)) < 0) {
1441 LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
1442 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1443 POSTCODE_SEVERITY_ERR);
1444 return;
1445 }
1446 if (visorchannel_read(ControlVm_channel,
1447 offsetof(struct spar_controlvm_channel_protocol,
1448 request_payload_bytes),
1449 &payloadBytes, sizeof(payloadBytes)) < 0) {
1450 LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
1451 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1452 POSTCODE_SEVERITY_ERR);
1453 return;
1454 }
1455 initialize_controlvm_payload_info(phys_addr,
1456 payloadOffset, payloadBytes,
1457 &ControlVm_payload_info);
1458 }
1459
1460 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1461 * Returns CONTROLVM_RESP_xxx code.
1462 */
1463 int
1464 visorchipset_chipset_ready(void)
1465 {
1466 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1467 return CONTROLVM_RESP_SUCCESS;
1468 }
1469 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1470
1471 int
1472 visorchipset_chipset_selftest(void)
1473 {
1474 char env_selftest[20];
1475 char *envp[] = { env_selftest, NULL };
1476
1477 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1478 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1479 envp);
1480 return CONTROLVM_RESP_SUCCESS;
1481 }
1482 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1483
1484 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1485 * Returns CONTROLVM_RESP_xxx code.
1486 */
1487 int
1488 visorchipset_chipset_notready(void)
1489 {
1490 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1491 return CONTROLVM_RESP_SUCCESS;
1492 }
1493 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1494
1495 static void
1496 chipset_ready(struct controlvm_message_header *msgHdr)
1497 {
1498 int rc = visorchipset_chipset_ready();
1499
1500 if (rc != CONTROLVM_RESP_SUCCESS)
1501 rc = -rc;
1502 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1503 controlvm_respond(msgHdr, rc);
1504 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1505 /* Send CHIPSET_READY response when all modules have been loaded
1506 * and disks mounted for the partition
1507 */
1508 g_ChipSetMsgHdr = *msgHdr;
1509 LOGINF("Holding CHIPSET_READY response");
1510 }
1511 }
1512
1513 static void
1514 chipset_selftest(struct controlvm_message_header *msgHdr)
1515 {
1516 int rc = visorchipset_chipset_selftest();
1517
1518 if (rc != CONTROLVM_RESP_SUCCESS)
1519 rc = -rc;
1520 if (msgHdr->flags.response_expected)
1521 controlvm_respond(msgHdr, rc);
1522 }
1523
1524 static void
1525 chipset_notready(struct controlvm_message_header *msgHdr)
1526 {
1527 int rc = visorchipset_chipset_notready();
1528
1529 if (rc != CONTROLVM_RESP_SUCCESS)
1530 rc = -rc;
1531 if (msgHdr->flags.response_expected)
1532 controlvm_respond(msgHdr, rc);
1533 }
1534
1535 /* This is your "one-stop" shop for grabbing the next message from the
1536 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1537 */
1538 static BOOL
1539 read_controlvm_event(struct controlvm_message *msg)
1540 {
1541 if (visorchannel_signalremove(ControlVm_channel,
1542 CONTROLVM_QUEUE_EVENT, msg)) {
1543 /* got a message */
1544 if (msg->hdr.flags.test_message == 1) {
1545 LOGERR("ignoring bad CONTROLVM_QUEUE_EVENT msg with controlvm_msg_id=0x%x because Flags.testMessage is nonsensical (=1)",
1546 msg->hdr.id);
1547 return FALSE;
1548 }
1549 return TRUE;
1550 }
1551 return FALSE;
1552 }
1553
1554 /*
1555 * The general parahotplug flow works as follows. The visorchipset
1556 * driver receives a DEVICE_CHANGESTATE message from Command
1557 * specifying a physical device to enable or disable. The CONTROLVM
1558 * message handler calls parahotplug_process_message, which then adds
1559 * the message to a global list and kicks off a udev event which
1560 * causes a user level script to enable or disable the specified
1561 * device. The udev script then writes to
1562 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1563 * to get called, at which point the appropriate CONTROLVM message is
1564 * retrieved from the list and responded to.
1565 */
1566
1567 #define PARAHOTPLUG_TIMEOUT_MS 2000
1568
1569 /*
1570 * Generate unique int to match an outstanding CONTROLVM message with a
1571 * udev script /proc response
1572 */
1573 static int
1574 parahotplug_next_id(void)
1575 {
1576 static atomic_t id = ATOMIC_INIT(0);
1577
1578 return atomic_inc_return(&id);
1579 }
1580
1581 /*
1582 * Returns the time (in jiffies) when a CONTROLVM message on the list
1583 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1584 */
1585 static unsigned long
1586 parahotplug_next_expiration(void)
1587 {
1588 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1589 }
1590
1591 /*
1592 * Create a parahotplug_request, which is basically a wrapper for a
1593 * CONTROLVM_MESSAGE that we can stick on a list
1594 */
1595 static struct parahotplug_request *
1596 parahotplug_request_create(struct controlvm_message *msg)
1597 {
1598 struct parahotplug_request *req;
1599
1600 req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
1601 if (req == NULL)
1602 return NULL;
1603
1604 req->id = parahotplug_next_id();
1605 req->expiration = parahotplug_next_expiration();
1606 req->msg = *msg;
1607
1608 return req;
1609 }
1610
1611 /*
1612 * Free a parahotplug_request.
1613 */
1614 static void
1615 parahotplug_request_destroy(struct parahotplug_request *req)
1616 {
1617 kfree(req);
1618 }
1619
1620 /*
1621 * Cause uevent to run the user level script to do the disable/enable
1622 * specified in (the CONTROLVM message in) the specified
1623 * parahotplug_request
1624 */
1625 static void
1626 parahotplug_request_kickoff(struct parahotplug_request *req)
1627 {
1628 struct controlvm_message_packet *cmd = &req->msg.cmd;
1629 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1630 env_func[40];
1631 char *envp[] = {
1632 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1633 };
1634
1635 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1636 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1637 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1638 cmd->device_change_state.state.active);
1639 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1640 cmd->device_change_state.bus_no);
1641 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1642 cmd->device_change_state.dev_no >> 3);
1643 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1644 cmd->device_change_state.dev_no & 0x7);
1645
1646 LOGINF("parahotplug_request_kickoff: state=%d, bdf=%d/%d/%d, id=%u\n",
1647 cmd->device_change_state.state.active,
1648 cmd->device_change_state.bus_no,
1649 cmd->device_change_state.dev_no >> 3,
1650 cmd->device_change_state.dev_no & 7, req->id);
1651
1652 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1653 envp);
1654 }
1655
1656 /*
1657 * Remove any request from the list that's been on there too long and
1658 * respond with an error.
1659 */
1660 static void
1661 parahotplug_process_list(void)
1662 {
1663 struct list_head *pos = NULL;
1664 struct list_head *tmp = NULL;
1665
1666 spin_lock(&Parahotplug_request_list_lock);
1667
1668 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1669 struct parahotplug_request *req =
1670 list_entry(pos, struct parahotplug_request, list);
1671 if (time_after_eq(jiffies, req->expiration)) {
1672 list_del(pos);
1673 if (req->msg.hdr.flags.response_expected)
1674 controlvm_respond_physdev_changestate(
1675 &req->msg.hdr,
1676 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1677 req->msg.cmd.device_change_state.state);
1678 parahotplug_request_destroy(req);
1679 }
1680 }
1681
1682 spin_unlock(&Parahotplug_request_list_lock);
1683 }
1684
1685 /*
1686 * Called from the /proc handler, which means the user script has
1687 * finished the enable/disable. Find the matching identifier, and
1688 * respond to the CONTROLVM message with success.
1689 */
1690 static int
1691 parahotplug_request_complete(int id, u16 active)
1692 {
1693 struct list_head *pos = NULL;
1694 struct list_head *tmp = NULL;
1695
1696 spin_lock(&Parahotplug_request_list_lock);
1697
1698 /* Look for a request matching "id". */
1699 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1700 struct parahotplug_request *req =
1701 list_entry(pos, struct parahotplug_request, list);
1702 if (req->id == id) {
1703 /* Found a match. Remove it from the list and
1704 * respond.
1705 */
1706 list_del(pos);
1707 spin_unlock(&Parahotplug_request_list_lock);
1708 req->msg.cmd.device_change_state.state.active = active;
1709 if (req->msg.hdr.flags.response_expected)
1710 controlvm_respond_physdev_changestate(
1711 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1712 req->msg.cmd.device_change_state.state);
1713 parahotplug_request_destroy(req);
1714 return 0;
1715 }
1716 }
1717
1718 spin_unlock(&Parahotplug_request_list_lock);
1719 return -1;
1720 }
1721
1722 /*
1723 * Enables or disables a PCI device by kicking off a udev script
1724 */
1725 static void
1726 parahotplug_process_message(struct controlvm_message *inmsg)
1727 {
1728 struct parahotplug_request *req;
1729
1730 req = parahotplug_request_create(inmsg);
1731
1732 if (req == NULL) {
1733 LOGERR("parahotplug_process_message: couldn't allocate request");
1734 return;
1735 }
1736
1737 if (inmsg->cmd.device_change_state.state.active) {
1738 /* For enable messages, just respond with success
1739 * right away. This is a bit of a hack, but there are
1740 * issues with the early enable messages we get (with
1741 * either the udev script not detecting that the device
1742 * is up, or not getting called at all). Fortunately
1743 * the messages that get lost don't matter anyway, as
1744 * devices are automatically enabled at
1745 * initialization.
1746 */
1747 parahotplug_request_kickoff(req);
1748 controlvm_respond_physdev_changestate(&inmsg->hdr,
1749 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1750 device_change_state.state);
1751 parahotplug_request_destroy(req);
1752 } else {
1753 /* For disable messages, add the request to the
1754 * request list before kicking off the udev script. It
1755 * won't get responded to until the script has
1756 * indicated it's done.
1757 */
1758 spin_lock(&Parahotplug_request_list_lock);
1759 list_add_tail(&(req->list), &Parahotplug_request_list);
1760 spin_unlock(&Parahotplug_request_list_lock);
1761
1762 parahotplug_request_kickoff(req);
1763 }
1764 }
1765
1766 /* Process a controlvm message.
1767 * Return result:
1768 * FALSE - this function will return FALSE only in the case where the
1769 * controlvm message was NOT processed, but processing must be
1770 * retried before reading the next controlvm message; a
1771 * scenario where this can occur is when we need to throttle
1772 * the allocation of memory in which to copy out controlvm
1773 * payload data
1774 * TRUE - processing of the controlvm message completed,
1775 * either successfully or with an error.
1776 */
1777 static BOOL
1778 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1779 {
1780 struct controlvm_message_packet *cmd = &inmsg.cmd;
1781 u64 parametersAddr = 0;
1782 u32 parametersBytes = 0;
1783 PARSER_CONTEXT *parser_ctx = NULL;
1784 BOOL isLocalAddr = FALSE;
1785 struct controlvm_message ackmsg;
1786
1787 /* create parsing context if necessary */
1788 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1789 if (channel_addr == 0) {
1790 LOGERR("HUH? channel_addr is 0!");
1791 return TRUE;
1792 }
1793 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1794 parametersBytes = inmsg.hdr.payload_bytes;
1795
1796 /* Parameter and channel addresses within test messages actually lie
1797 * within our OS-controlled memory. We need to know that, because it
1798 * makes a difference in how we compute the virtual address.
1799 */
1800 if (parametersAddr != 0 && parametersBytes != 0) {
1801 BOOL retry = FALSE;
1802
1803 parser_ctx =
1804 parser_init_byteStream(parametersAddr, parametersBytes,
1805 isLocalAddr, &retry);
1806 if (!parser_ctx) {
1807 if (retry) {
1808 LOGWRN("throttling to copy payload");
1809 return FALSE;
1810 }
1811 LOGWRN("parsing failed");
1812 LOGWRN("inmsg.hdr.Id=0x%lx", (ulong) inmsg.hdr.id);
1813 LOGWRN("parametersAddr=0x%llx", (u64) parametersAddr);
1814 LOGWRN("parametersBytes=%lu", (ulong) parametersBytes);
1815 LOGWRN("isLocalAddr=%d", isLocalAddr);
1816 }
1817 }
1818
1819 if (!isLocalAddr) {
1820 controlvm_init_response(&ackmsg, &inmsg.hdr,
1821 CONTROLVM_RESP_SUCCESS);
1822 if ((ControlVm_channel)
1823 &&
1824 (!visorchannel_signalinsert
1825 (ControlVm_channel, CONTROLVM_QUEUE_ACK, &ackmsg)))
1826 LOGWRN("failed to send ACK failed");
1827 }
1828 switch (inmsg.hdr.id) {
1829 case CONTROLVM_CHIPSET_INIT:
1830 LOGINF("CHIPSET_INIT(#busses=%lu,#switches=%lu)",
1831 (ulong) inmsg.cmd.init_chipset.bus_count,
1832 (ulong) inmsg.cmd.init_chipset.switch_count);
1833 chipset_init(&inmsg);
1834 break;
1835 case CONTROLVM_BUS_CREATE:
1836 LOGINF("BUS_CREATE(%lu,#devs=%lu)",
1837 (ulong) cmd->create_bus.bus_no,
1838 (ulong) cmd->create_bus.dev_count);
1839 bus_create(&inmsg);
1840 break;
1841 case CONTROLVM_BUS_DESTROY:
1842 LOGINF("BUS_DESTROY(%lu)", (ulong) cmd->destroy_bus.bus_no);
1843 bus_destroy(&inmsg);
1844 break;
1845 case CONTROLVM_BUS_CONFIGURE:
1846 LOGINF("BUS_CONFIGURE(%lu)", (ulong) cmd->configure_bus.bus_no);
1847 bus_configure(&inmsg, parser_ctx);
1848 break;
1849 case CONTROLVM_DEVICE_CREATE:
1850 LOGINF("DEVICE_CREATE(%lu,%lu)",
1851 (ulong) cmd->create_device.bus_no,
1852 (ulong) cmd->create_device.dev_no);
1853 my_device_create(&inmsg);
1854 break;
1855 case CONTROLVM_DEVICE_CHANGESTATE:
1856 if (cmd->device_change_state.flags.phys_device) {
1857 LOGINF("DEVICE_CHANGESTATE for physical device (%lu,%lu, active=%lu)",
1858 (ulong) cmd->device_change_state.bus_no,
1859 (ulong) cmd->device_change_state.dev_no,
1860 (ulong) cmd->device_change_state.state.active);
1861 parahotplug_process_message(&inmsg);
1862 } else {
1863 LOGINF("DEVICE_CHANGESTATE for virtual device (%lu,%lu, state.Alive=0x%lx)",
1864 (ulong) cmd->device_change_state.bus_no,
1865 (ulong) cmd->device_change_state.dev_no,
1866 (ulong) cmd->device_change_state.state.alive);
1867 /* save the hdr and cmd structures for later use */
1868 /* when sending back the response to Command */
1869 my_device_changestate(&inmsg);
1870 g_DiagMsgHdr = inmsg.hdr;
1871 g_DeviceChangeStatePacket = inmsg.cmd;
1872 break;
1873 }
1874 break;
1875 case CONTROLVM_DEVICE_DESTROY:
1876 LOGINF("DEVICE_DESTROY(%lu,%lu)",
1877 (ulong) cmd->destroy_device.bus_no,
1878 (ulong) cmd->destroy_device.dev_no);
1879 my_device_destroy(&inmsg);
1880 break;
1881 case CONTROLVM_DEVICE_CONFIGURE:
1882 LOGINF("DEVICE_CONFIGURE(%lu,%lu)",
1883 (ulong) cmd->configure_device.bus_no,
1884 (ulong) cmd->configure_device.dev_no);
1885 /* no op for now, just send a respond that we passed */
1886 if (inmsg.hdr.flags.response_expected)
1887 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1888 break;
1889 case CONTROLVM_CHIPSET_READY:
1890 LOGINF("CHIPSET_READY");
1891 chipset_ready(&inmsg.hdr);
1892 break;
1893 case CONTROLVM_CHIPSET_SELFTEST:
1894 LOGINF("CHIPSET_SELFTEST");
1895 chipset_selftest(&inmsg.hdr);
1896 break;
1897 case CONTROLVM_CHIPSET_STOP:
1898 LOGINF("CHIPSET_STOP");
1899 chipset_notready(&inmsg.hdr);
1900 break;
1901 default:
1902 LOGERR("unrecognized controlvm cmd=%d", (int) inmsg.hdr.id);
1903 if (inmsg.hdr.flags.response_expected)
1904 controlvm_respond(&inmsg.hdr,
1905 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1906 break;
1907 }
1908
1909 if (parser_ctx != NULL) {
1910 parser_done(parser_ctx);
1911 parser_ctx = NULL;
1912 }
1913 return TRUE;
1914 }
1915
1916 static HOSTADDRESS controlvm_get_channel_address(void)
1917 {
1918 u64 addr = 0;
1919 u32 size = 0;
1920
1921 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size))) {
1922 ERRDRV("%s - vmcall to determine controlvm channel addr failed",
1923 __func__);
1924 return 0;
1925 }
1926 INFODRV("controlvm addr=%Lx", addr);
1927 return addr;
1928 }
1929
1930 static void
1931 controlvm_periodic_work(struct work_struct *work)
1932 {
1933 struct controlvm_message inmsg;
1934 BOOL gotACommand = FALSE;
1935 BOOL handle_command_failed = FALSE;
1936 static u64 Poll_Count;
1937
1938 /* make sure visorbus server is registered for controlvm callbacks */
1939 if (visorchipset_serverregwait && !serverregistered)
1940 goto Away;
1941 /* make sure visorclientbus server is regsitered for controlvm
1942 * callbacks
1943 */
1944 if (visorchipset_clientregwait && !clientregistered)
1945 goto Away;
1946
1947 Poll_Count++;
1948 if (Poll_Count >= 250)
1949 ; /* keep going */
1950 else
1951 goto Away;
1952
1953 /* Check events to determine if response to CHIPSET_READY
1954 * should be sent
1955 */
1956 if (visorchipset_holdchipsetready
1957 && (g_ChipSetMsgHdr.id != CONTROLVM_INVALID)) {
1958 if (check_chipset_events() == 1) {
1959 LOGINF("Sending CHIPSET_READY response");
1960 controlvm_respond(&g_ChipSetMsgHdr, 0);
1961 clear_chipset_events();
1962 memset(&g_ChipSetMsgHdr, 0,
1963 sizeof(struct controlvm_message_header));
1964 }
1965 }
1966
1967 while (visorchannel_signalremove(ControlVm_channel,
1968 CONTROLVM_QUEUE_RESPONSE,
1969 &inmsg)) {
1970 if (inmsg.hdr.payload_max_bytes != 0) {
1971 LOGERR("Payload of size %lu returned @%lu with unexpected message id %d.",
1972 (ulong) inmsg.hdr.payload_max_bytes,
1973 (ulong) inmsg.hdr.payload_vm_offset,
1974 inmsg.hdr.id);
1975 }
1976 }
1977 if (!gotACommand) {
1978 if (ControlVm_Pending_Msg_Valid) {
1979 /* we throttled processing of a prior
1980 * msg, so try to process it again
1981 * rather than reading a new one
1982 */
1983 inmsg = ControlVm_Pending_Msg;
1984 ControlVm_Pending_Msg_Valid = FALSE;
1985 gotACommand = TRUE;
1986 } else
1987 gotACommand = read_controlvm_event(&inmsg);
1988 }
1989
1990 handle_command_failed = FALSE;
1991 while (gotACommand && (!handle_command_failed)) {
1992 Most_recent_message_jiffies = jiffies;
1993 if (handle_command(inmsg,
1994 visorchannel_get_physaddr
1995 (ControlVm_channel)))
1996 gotACommand = read_controlvm_event(&inmsg);
1997 else {
1998 /* this is a scenario where throttling
1999 * is required, but probably NOT an
2000 * error...; we stash the current
2001 * controlvm msg so we will attempt to
2002 * reprocess it on our next loop
2003 */
2004 handle_command_failed = TRUE;
2005 ControlVm_Pending_Msg = inmsg;
2006 ControlVm_Pending_Msg_Valid = TRUE;
2007 }
2008 }
2009
2010 /* parahotplug_worker */
2011 parahotplug_process_list();
2012
2013 Away:
2014
2015 if (time_after(jiffies,
2016 Most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2017 /* it's been longer than MIN_IDLE_SECONDS since we
2018 * processed our last controlvm message; slow down the
2019 * polling
2020 */
2021 if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW) {
2022 LOGINF("switched to slow controlvm polling");
2023 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2024 }
2025 } else {
2026 if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST) {
2027 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2028 LOGINF("switched to fast controlvm polling");
2029 }
2030 }
2031
2032 queue_delayed_work(Periodic_controlvm_workqueue,
2033 &Periodic_controlvm_work, Poll_jiffies);
2034 }
2035
2036 static void
2037 setup_crash_devices_work_queue(struct work_struct *work)
2038 {
2039
2040 struct controlvm_message localCrashCreateBusMsg;
2041 struct controlvm_message localCrashCreateDevMsg;
2042 struct controlvm_message msg;
2043 u32 localSavedCrashMsgOffset;
2044 u16 localSavedCrashMsgCount;
2045
2046 /* make sure visorbus server is registered for controlvm callbacks */
2047 if (visorchipset_serverregwait && !serverregistered)
2048 goto Away;
2049
2050 /* make sure visorclientbus server is regsitered for controlvm
2051 * callbacks
2052 */
2053 if (visorchipset_clientregwait && !clientregistered)
2054 goto Away;
2055
2056 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2057
2058 /* send init chipset msg */
2059 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2060 msg.cmd.init_chipset.bus_count = 23;
2061 msg.cmd.init_chipset.switch_count = 0;
2062
2063 chipset_init(&msg);
2064
2065 /* get saved message count */
2066 if (visorchannel_read(ControlVm_channel,
2067 offsetof(struct spar_controlvm_channel_protocol,
2068 saved_crash_message_count),
2069 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
2070 LOGERR("failed to get Saved Message Count");
2071 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2072 POSTCODE_SEVERITY_ERR);
2073 return;
2074 }
2075
2076 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
2077 LOGERR("Saved Message Count incorrect %d",
2078 localSavedCrashMsgCount);
2079 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2080 localSavedCrashMsgCount,
2081 POSTCODE_SEVERITY_ERR);
2082 return;
2083 }
2084
2085 /* get saved crash message offset */
2086 if (visorchannel_read(ControlVm_channel,
2087 offsetof(struct spar_controlvm_channel_protocol,
2088 saved_crash_message_offset),
2089 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
2090 LOGERR("failed to get Saved Message Offset");
2091 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2092 POSTCODE_SEVERITY_ERR);
2093 return;
2094 }
2095
2096 /* read create device message for storage bus offset */
2097 if (visorchannel_read(ControlVm_channel,
2098 localSavedCrashMsgOffset,
2099 &localCrashCreateBusMsg,
2100 sizeof(struct controlvm_message)) < 0) {
2101 LOGERR("CRASH_DEV_RD_BUS_FAIULRE: Failed to read CrashCreateBusMsg!");
2102 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2103 POSTCODE_SEVERITY_ERR);
2104 return;
2105 }
2106
2107 /* read create device message for storage device */
2108 if (visorchannel_read(ControlVm_channel,
2109 localSavedCrashMsgOffset +
2110 sizeof(struct controlvm_message),
2111 &localCrashCreateDevMsg,
2112 sizeof(struct controlvm_message)) < 0) {
2113 LOGERR("CRASH_DEV_RD_DEV_FAIULRE: Failed to read CrashCreateDevMsg!");
2114 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2115 POSTCODE_SEVERITY_ERR);
2116 return;
2117 }
2118
2119 /* reuse IOVM create bus message */
2120 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
2121 bus_create(&localCrashCreateBusMsg);
2122 else {
2123 LOGERR("CrashCreateBusMsg is null, no dump will be taken");
2124 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2125 POSTCODE_SEVERITY_ERR);
2126 return;
2127 }
2128
2129 /* reuse create device message for storage device */
2130 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
2131 my_device_create(&localCrashCreateDevMsg);
2132 else {
2133 LOGERR("CrashCreateDevMsg is null, no dump will be taken");
2134 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2135 POSTCODE_SEVERITY_ERR);
2136 return;
2137 }
2138 LOGINF("Bus and device ready for dumping");
2139 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2140 return;
2141
2142 Away:
2143
2144 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2145
2146 queue_delayed_work(Periodic_controlvm_workqueue,
2147 &Periodic_controlvm_work, Poll_jiffies);
2148 }
2149
2150 static void
2151 bus_create_response(ulong busNo, int response)
2152 {
2153 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2154 }
2155
2156 static void
2157 bus_destroy_response(ulong busNo, int response)
2158 {
2159 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2160 }
2161
2162 static void
2163 device_create_response(ulong busNo, ulong devNo, int response)
2164 {
2165 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2166 }
2167
2168 static void
2169 device_destroy_response(ulong busNo, ulong devNo, int response)
2170 {
2171 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2172 }
2173
2174 void
2175 visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
2176 {
2177
2178 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2179 bus_no, dev_no, response,
2180 segment_state_standby);
2181 }
2182 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2183
2184 static void
2185 device_resume_response(ulong busNo, ulong devNo, int response)
2186 {
2187 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2188 busNo, devNo, response,
2189 segment_state_running);
2190 }
2191
2192 BOOL
2193 visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
2194 {
2195 void *p = findbus(&BusInfoList, bus_no);
2196
2197 if (!p) {
2198 LOGERR("(%lu) failed", bus_no);
2199 return FALSE;
2200 }
2201 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2202 return TRUE;
2203 }
2204 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2205
2206 BOOL
2207 visorchipset_set_bus_context(ulong bus_no, void *context)
2208 {
2209 struct visorchipset_bus_info *p = findbus(&BusInfoList, bus_no);
2210
2211 if (!p) {
2212 LOGERR("(%lu) failed", bus_no);
2213 return FALSE;
2214 }
2215 p->bus_driver_context = context;
2216 return TRUE;
2217 }
2218 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2219
2220 BOOL
2221 visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2222 struct visorchipset_device_info *dev_info)
2223 {
2224 void *p = finddevice(&DevInfoList, bus_no, dev_no);
2225
2226 if (!p) {
2227 LOGERR("(%lu,%lu) failed", bus_no, dev_no);
2228 return FALSE;
2229 }
2230 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2231 return TRUE;
2232 }
2233 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2234
2235 BOOL
2236 visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
2237 {
2238 struct visorchipset_device_info *p =
2239 finddevice(&DevInfoList, bus_no, dev_no);
2240
2241 if (!p) {
2242 LOGERR("(%lu,%lu) failed", bus_no, dev_no);
2243 return FALSE;
2244 }
2245 p->bus_driver_context = context;
2246 return TRUE;
2247 }
2248 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2249
2250 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2251 */
2252 void *
2253 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2254 char *fn, int ln)
2255 {
2256 gfp_t gfp;
2257 void *p;
2258
2259 if (ok_to_block)
2260 gfp = GFP_KERNEL;
2261 else
2262 gfp = GFP_ATOMIC;
2263 /* __GFP_NORETRY means "ok to fail", meaning
2264 * kmem_cache_alloc() can return NULL, implying the caller CAN
2265 * cope with failure. If you do NOT specify __GFP_NORETRY,
2266 * Linux will go to extreme measures to get memory for you
2267 * (like, invoke oom killer), which will probably cripple the
2268 * system.
2269 */
2270 gfp |= __GFP_NORETRY;
2271 p = kmem_cache_alloc(pool, gfp);
2272 if (!p) {
2273 LOGERR("kmem_cache_alloc failed early @%s:%d\n", fn, ln);
2274 return NULL;
2275 }
2276 atomic_inc(&Visorchipset_cache_buffers_in_use);
2277 return p;
2278 }
2279
2280 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2281 */
2282 void
2283 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2284 {
2285 if (!p) {
2286 LOGERR("NULL pointer @%s:%d\n", fn, ln);
2287 return;
2288 }
2289 atomic_dec(&Visorchipset_cache_buffers_in_use);
2290 kmem_cache_free(pool, p);
2291 }
2292
2293 static ssize_t chipsetready_store(struct device *dev,
2294 struct device_attribute *attr, const char *buf, size_t count)
2295 {
2296 char msgtype[64];
2297
2298 if (sscanf(buf, "%63s", msgtype) != 1)
2299 return -EINVAL;
2300
2301 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2302 chipset_events[0] = 1;
2303 return count;
2304 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2305 chipset_events[1] = 1;
2306 return count;
2307 }
2308 return -EINVAL;
2309 }
2310
2311 /* The parahotplug/devicedisabled interface gets called by our support script
2312 * when an SR-IOV device has been shut down. The ID is passed to the script
2313 * and then passed back when the device has been removed.
2314 */
2315 static ssize_t devicedisabled_store(struct device *dev,
2316 struct device_attribute *attr, const char *buf, size_t count)
2317 {
2318 uint id;
2319
2320 if (kstrtouint(buf, 10, &id) != 0)
2321 return -EINVAL;
2322
2323 parahotplug_request_complete(id, 0);
2324 return count;
2325 }
2326
2327 /* The parahotplug/deviceenabled interface gets called by our support script
2328 * when an SR-IOV device has been recovered. The ID is passed to the script
2329 * and then passed back when the device has been brought back up.
2330 */
2331 static ssize_t deviceenabled_store(struct device *dev,
2332 struct device_attribute *attr, const char *buf, size_t count)
2333 {
2334 uint id;
2335
2336 if (kstrtouint(buf, 10, &id) != 0)
2337 return -EINVAL;
2338
2339 parahotplug_request_complete(id, 1);
2340 return count;
2341 }
2342
2343 static int __init
2344 visorchipset_init(void)
2345 {
2346 int rc = 0, x = 0;
2347 char s[64];
2348 HOSTADDRESS addr;
2349
2350 if (!unisys_spar_platform)
2351 return -ENODEV;
2352
2353 LOGINF("chipset driver version %s loaded", VERSION);
2354 /* process module options */
2355 POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2356
2357 LOGINF("option - testvnic=%d", visorchipset_testvnic);
2358 LOGINF("option - testvnicclient=%d", visorchipset_testvnicclient);
2359 LOGINF("option - testmsg=%d", visorchipset_testmsg);
2360 LOGINF("option - testteardown=%d", visorchipset_testteardown);
2361 LOGINF("option - major=%d", visorchipset_major);
2362 LOGINF("option - serverregwait=%d", visorchipset_serverregwait);
2363 LOGINF("option - clientregwait=%d", visorchipset_clientregwait);
2364 LOGINF("option - holdchipsetready=%d", visorchipset_holdchipsetready);
2365
2366 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2367 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2368 memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
2369 memset(&LiveDump_info, 0, sizeof(LiveDump_info));
2370 atomic_set(&LiveDump_info.buffers_in_use, 0);
2371
2372 if (visorchipset_testvnic) {
2373 ERRDRV("testvnic option no longer supported: (status = %d)\n",
2374 x);
2375 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2376 rc = x;
2377 goto Away;
2378 }
2379
2380 addr = controlvm_get_channel_address();
2381 if (addr != 0) {
2382 ControlVm_channel =
2383 visorchannel_create_with_lock
2384 (addr,
2385 sizeof(struct spar_controlvm_channel_protocol),
2386 spar_controlvm_channel_protocol_uuid);
2387 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2388 visorchannel_get_header(ControlVm_channel))) {
2389 LOGINF("Channel %s (ControlVm) discovered",
2390 visorchannel_id(ControlVm_channel, s));
2391 initialize_controlvm_payload();
2392 } else {
2393 LOGERR("controlvm channel is invalid");
2394 visorchannel_destroy(ControlVm_channel);
2395 ControlVm_channel = NULL;
2396 return -ENODEV;
2397 }
2398 } else {
2399 LOGERR("no controlvm channel discovered");
2400 return -ENODEV;
2401 }
2402
2403 MajorDev = MKDEV(visorchipset_major, 0);
2404 rc = visorchipset_file_init(MajorDev, &ControlVm_channel);
2405 if (rc < 0) {
2406 ERRDRV("visorchipset_file_init(MajorDev, &ControlVm_channel): error (status=%d)\n", rc);
2407 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2408 goto Away;
2409 }
2410
2411 memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
2412
2413 memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
2414
2415 memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
2416
2417 Putfile_buffer_list_pool =
2418 kmem_cache_create(Putfile_buffer_list_pool_name,
2419 sizeof(struct putfile_buffer_entry),
2420 0, SLAB_HWCACHE_ALIGN, NULL);
2421 if (!Putfile_buffer_list_pool) {
2422 ERRDRV("failed to alloc Putfile_buffer_list_pool: (status=-1)\n");
2423 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2424 rc = -1;
2425 goto Away;
2426 }
2427 if (visorchipset_disable_controlvm) {
2428 LOGINF("visorchipset_init:controlvm disabled");
2429 } else {
2430 /* if booting in a crash kernel */
2431 if (visorchipset_crash_kernel)
2432 INIT_DELAYED_WORK(&Periodic_controlvm_work,
2433 setup_crash_devices_work_queue);
2434 else
2435 INIT_DELAYED_WORK(&Periodic_controlvm_work,
2436 controlvm_periodic_work);
2437 Periodic_controlvm_workqueue =
2438 create_singlethread_workqueue("visorchipset_controlvm");
2439
2440 if (Periodic_controlvm_workqueue == NULL) {
2441 ERRDRV("cannot create controlvm workqueue: (status=%d)\n",
2442 -ENOMEM);
2443 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2444 DIAG_SEVERITY_ERR);
2445 rc = -ENOMEM;
2446 goto Away;
2447 }
2448 Most_recent_message_jiffies = jiffies;
2449 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2450 rc = queue_delayed_work(Periodic_controlvm_workqueue,
2451 &Periodic_controlvm_work, Poll_jiffies);
2452 if (rc < 0) {
2453 ERRDRV("queue_delayed_work(Periodic_controlvm_workqueue, &Periodic_controlvm_work, Poll_jiffies): error (status=%d)\n", rc);
2454 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2455 DIAG_SEVERITY_ERR);
2456 goto Away;
2457 }
2458
2459 }
2460
2461 Visorchipset_platform_device.dev.devt = MajorDev;
2462 if (platform_device_register(&Visorchipset_platform_device) < 0) {
2463 ERRDRV("platform_device_register(visorchipset) failed: (status=-1)\n");
2464 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2465 rc = -1;
2466 goto Away;
2467 }
2468 LOGINF("visorchipset device created");
2469 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2470 rc = 0;
2471 Away:
2472 if (rc) {
2473 LOGERR("visorchipset_init failed");
2474 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2475 POSTCODE_SEVERITY_ERR);
2476 }
2477 return rc;
2478 }
2479
2480 static void
2481 visorchipset_exit(void)
2482 {
2483 char s[99];
2484
2485 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2486
2487 if (visorchipset_disable_controlvm) {
2488 ;
2489 } else {
2490 cancel_delayed_work(&Periodic_controlvm_work);
2491 flush_workqueue(Periodic_controlvm_workqueue);
2492 destroy_workqueue(Periodic_controlvm_workqueue);
2493 Periodic_controlvm_workqueue = NULL;
2494 destroy_controlvm_payload_info(&ControlVm_payload_info);
2495 }
2496 Test_Vnic_channel = NULL;
2497 if (Putfile_buffer_list_pool) {
2498 kmem_cache_destroy(Putfile_buffer_list_pool);
2499 Putfile_buffer_list_pool = NULL;
2500 }
2501
2502 cleanup_controlvm_structures();
2503
2504 memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
2505
2506 memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
2507
2508 memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
2509
2510 LOGINF("Channel %s (ControlVm) disconnected",
2511 visorchannel_id(ControlVm_channel, s));
2512 visorchannel_destroy(ControlVm_channel);
2513
2514 visorchipset_file_cleanup();
2515 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2516 LOGINF("chipset driver unloaded");
2517 }
2518
2519 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2520 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2521 int visorchipset_testvnic = 0;
2522
2523 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2524 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2525 int visorchipset_testvnicclient = 0;
2526
2527 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2528 MODULE_PARM_DESC(visorchipset_testmsg,
2529 "1 to manufacture the chipset, bus, and switch messages");
2530 int visorchipset_testmsg = 0;
2531
2532 module_param_named(major, visorchipset_major, int, S_IRUGO);
2533 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2534 int visorchipset_major = 0;
2535
2536 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2537 MODULE_PARM_DESC(visorchipset_serverreqwait,
2538 "1 to have the module wait for the visor bus to register");
2539 int visorchipset_serverregwait = 0; /* default is off */
2540 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2541 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2542 int visorchipset_clientregwait = 1; /* default is on */
2543 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2544 MODULE_PARM_DESC(visorchipset_testteardown,
2545 "1 to test teardown of the chipset, bus, and switch");
2546 int visorchipset_testteardown = 0; /* default is off */
2547 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2548 S_IRUGO);
2549 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2550 "1 to disable polling of controlVm channel");
2551 int visorchipset_disable_controlvm = 0; /* default is off */
2552 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2553 MODULE_PARM_DESC(visorchipset_crash_kernel,
2554 "1 means we are running in crash kernel");
2555 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2556 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2557 int, S_IRUGO);
2558 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2559 "1 to hold response to CHIPSET_READY");
2560 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2561 * response immediately */
2562 module_init(visorchipset_init);
2563 module_exit(visorchipset_exit);
2564
2565 MODULE_AUTHOR("Unisys");
2566 MODULE_LICENSE("GPL");
2567 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2568 VERSION);
2569 MODULE_VERSION(VERSION);
This page took 0.088122 seconds and 6 git commands to generate.