staging: unisys: refactor GUEST_DEVICES
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
1 /* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18 #include "globals.h"
19 #include "visorchipset.h"
20 #include "procobjecttree.h"
21 #include "visorchannel.h"
22 #include "periodic_work.h"
23 #include "testing.h"
24 #include "file.h"
25 #include "parser.h"
26 #include "uniklog.h"
27 #include "uisutils.h"
28 #include "controlvmcompletionstatus.h"
29 #include "guestlinuxdebug.h"
30
31 #include <linux/nls.h>
32 #include <linux/netdevice.h>
33 #include <linux/platform_device.h>
34 #include <linux/uuid.h>
35
36 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
37 #define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
38 * vnic loopback test */
39 #define TEST_VNIC_SWITCHNO 1
40 #define TEST_VNIC_BUSNO 9
41
42 #define MAX_NAME_SIZE 128
43 #define MAX_IP_SIZE 50
44 #define MAXOUTSTANDINGCHANNELCOMMAND 256
45 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
46 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
47
48 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
49 * we switch to slow polling mode. As soon as we get a controlvm
50 * message, we switch back to fast polling mode.
51 */
52 #define MIN_IDLE_SECONDS 10
53 static ulong Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
54 static ulong Most_recent_message_jiffies; /* when we got our last
55 * controlvm message */
56 static inline char *
57 NONULLSTR(char *s)
58 {
59 if (s)
60 return s;
61 return "";
62 }
63
64 static int serverregistered;
65 static int clientregistered;
66
67 #define MAX_CHIPSET_EVENTS 2
68 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
69
70 static struct delayed_work Periodic_controlvm_work;
71 static struct workqueue_struct *Periodic_controlvm_workqueue;
72 static DEFINE_SEMAPHORE(NotifierLock);
73
74 typedef struct {
75 struct controlvm_message message;
76 unsigned int crc;
77 } MESSAGE_ENVELOPE;
78
79 static struct controlvm_message_header g_DiagMsgHdr;
80 static struct controlvm_message_header g_ChipSetMsgHdr;
81 static struct controlvm_message_header g_DelDumpMsgHdr;
82 static const uuid_le UltraDiagPoolChannelProtocolGuid =
83 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
84 /* 0xffffff is an invalid Bus/Device number */
85 static ulong g_diagpoolBusNo = 0xffffff;
86 static ulong g_diagpoolDevNo = 0xffffff;
87 static struct controlvm_message_packet g_DeviceChangeStatePacket;
88
89 /* Only VNIC and VHBA channels are sent to visorclientbus (aka
90 * "visorhackbus")
91 */
92 #define FOR_VISORHACKBUS(channel_type_guid) \
93 (((uuid_le_cmp(channel_type_guid,\
94 spar_vnic_channel_protocol_uuid) == 0)\
95 || (uuid_le_cmp(channel_type_guid,\
96 spar_vhba_channel_protocol_uuid) == 0)))
97 #define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
98
99 #define is_diagpool_channel(channel_type_guid) \
100 (uuid_le_cmp(channel_type_guid, UltraDiagPoolChannelProtocolGuid) == 0)
101
102 static LIST_HEAD(BusInfoList);
103 static LIST_HEAD(DevInfoList);
104
105 static VISORCHANNEL *ControlVm_channel;
106
107 typedef struct {
108 u8 __iomem *ptr; /* pointer to base address of payload pool */
109 u64 offset; /* offset from beginning of controlvm
110 * channel to beginning of payload * pool */
111 u32 bytes; /* number of bytes in payload pool */
112 } CONTROLVM_PAYLOAD_INFO;
113
114 /* Manages the request payload in the controlvm channel */
115 static CONTROLVM_PAYLOAD_INFO ControlVm_payload_info;
116
117 static struct channel_header *Test_Vnic_channel;
118
119 typedef struct {
120 struct controlvm_message_header Dumpcapture_header;
121 struct controlvm_message_header Gettextdump_header;
122 struct controlvm_message_header Dumpcomplete_header;
123 BOOL Gettextdump_outstanding;
124 u32 crc32;
125 ulong length;
126 atomic_t buffers_in_use;
127 ulong destination;
128 } LIVEDUMP_INFO;
129 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
130 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
131 */
132 static LIVEDUMP_INFO LiveDump_info;
133
134 /* The following globals are used to handle the scenario where we are unable to
135 * offload the payload from a controlvm message due to memory requirements. In
136 * this scenario, we simply stash the controlvm message, then attempt to
137 * process it again the next time controlvm_periodic_work() runs.
138 */
139 static struct controlvm_message ControlVm_Pending_Msg;
140 static BOOL ControlVm_Pending_Msg_Valid = FALSE;
141
142 /* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
143 * TRANSMIT_FILE PutFile payloads.
144 */
145 static struct kmem_cache *Putfile_buffer_list_pool;
146 static const char Putfile_buffer_list_pool_name[] =
147 "controlvm_putfile_buffer_list_pool";
148
149 /* This identifies a data buffer that has been received via a controlvm messages
150 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
151 */
152 struct putfile_buffer_entry {
153 struct list_head next; /* putfile_buffer_entry list */
154 PARSER_CONTEXT *parser_ctx; /* points to buffer containing input data */
155 };
156
157 /* List of struct putfile_request *, via next_putfile_request member.
158 * Each entry in this list identifies an outstanding TRANSMIT_FILE
159 * conversation.
160 */
161 static LIST_HEAD(Putfile_request_list);
162
163 /* This describes a buffer and its current state of transfer (e.g., how many
164 * bytes have already been supplied as putfile data, and how many bytes are
165 * remaining) for a putfile_request.
166 */
167 struct putfile_active_buffer {
168 /* a payload from a controlvm message, containing a file data buffer */
169 PARSER_CONTEXT *parser_ctx;
170 /* points within data area of parser_ctx to next byte of data */
171 u8 *pnext;
172 /* # bytes left from <pnext> to the end of this data buffer */
173 size_t bytes_remaining;
174 };
175
176 #define PUTFILE_REQUEST_SIG 0x0906101302281211
177 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
178 * conversation. Structs of this type are dynamically linked into
179 * <Putfile_request_list>.
180 */
181 struct putfile_request {
182 u64 sig; /* PUTFILE_REQUEST_SIG */
183
184 /* header from original TransmitFile request */
185 struct controlvm_message_header controlvm_header;
186 u64 file_request_number; /* from original TransmitFile request */
187
188 /* link to next struct putfile_request */
189 struct list_head next_putfile_request;
190
191 /* most-recent sequence number supplied via a controlvm message */
192 u64 data_sequence_number;
193
194 /* head of putfile_buffer_entry list, which describes the data to be
195 * supplied as putfile data;
196 * - this list is added to when controlvm messages come in that supply
197 * file data
198 * - this list is removed from via the hotplug program that is actually
199 * consuming these buffers to write as file data */
200 struct list_head input_buffer_list;
201 spinlock_t req_list_lock; /* lock for input_buffer_list */
202
203 /* waiters for input_buffer_list to go non-empty */
204 wait_queue_head_t input_buffer_wq;
205
206 /* data not yet read within current putfile_buffer_entry */
207 struct putfile_active_buffer active_buf;
208
209 /* <0 = failed, 0 = in-progress, >0 = successful; */
210 /* note that this must be set with req_list_lock, and if you set <0, */
211 /* it is your responsibility to also free up all of the other objects */
212 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
213 /* before releasing the lock */
214 int completion_status;
215 };
216
217 static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
218
219 struct parahotplug_request {
220 struct list_head list;
221 int id;
222 unsigned long expiration;
223 struct controlvm_message msg;
224 };
225
226 static LIST_HEAD(Parahotplug_request_list);
227 static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
228 static void parahotplug_process_list(void);
229
230 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
231 * CONTROLVM_REPORTEVENT.
232 */
233 static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Server_Notifiers;
234 static VISORCHIPSET_BUSDEV_NOTIFIERS BusDev_Client_Notifiers;
235
236 static void bus_create_response(ulong busNo, int response);
237 static void bus_destroy_response(ulong busNo, int response);
238 static void device_create_response(ulong busNo, ulong devNo, int response);
239 static void device_destroy_response(ulong busNo, ulong devNo, int response);
240 static void device_resume_response(ulong busNo, ulong devNo, int response);
241
242 static VISORCHIPSET_BUSDEV_RESPONDERS BusDev_Responders = {
243 .bus_create = bus_create_response,
244 .bus_destroy = bus_destroy_response,
245 .device_create = device_create_response,
246 .device_destroy = device_destroy_response,
247 .device_pause = visorchipset_device_pause_response,
248 .device_resume = device_resume_response,
249 };
250
251 /* info for /dev/visorchipset */
252 static dev_t MajorDev = -1; /**< indicates major num for device */
253
254 /* prototypes for attributes */
255 static ssize_t toolaction_show(struct device *dev,
256 struct device_attribute *attr, char *buf);
257 static ssize_t toolaction_store(struct device *dev,
258 struct device_attribute *attr, const char *buf, size_t count);
259 static DEVICE_ATTR_RW(toolaction);
260
261 static ssize_t boottotool_show(struct device *dev,
262 struct device_attribute *attr, char *buf);
263 static ssize_t boottotool_store(struct device *dev,
264 struct device_attribute *attr, const char *buf, size_t count);
265 static DEVICE_ATTR_RW(boottotool);
266
267 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
268 char *buf);
269 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
270 const char *buf, size_t count);
271 static DEVICE_ATTR_RW(error);
272
273 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
274 char *buf);
275 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
276 const char *buf, size_t count);
277 static DEVICE_ATTR_RW(textid);
278
279 static ssize_t remaining_steps_show(struct device *dev,
280 struct device_attribute *attr, char *buf);
281 static ssize_t remaining_steps_store(struct device *dev,
282 struct device_attribute *attr, const char *buf, size_t count);
283 static DEVICE_ATTR_RW(remaining_steps);
284
285 static ssize_t chipsetready_store(struct device *dev,
286 struct device_attribute *attr, const char *buf, size_t count);
287 static DEVICE_ATTR_WO(chipsetready);
288
289 static ssize_t devicedisabled_store(struct device *dev,
290 struct device_attribute *attr, const char *buf, size_t count);
291 static DEVICE_ATTR_WO(devicedisabled);
292
293 static ssize_t deviceenabled_store(struct device *dev,
294 struct device_attribute *attr, const char *buf, size_t count);
295 static DEVICE_ATTR_WO(deviceenabled);
296
297 static struct attribute *visorchipset_install_attrs[] = {
298 &dev_attr_toolaction.attr,
299 &dev_attr_boottotool.attr,
300 &dev_attr_error.attr,
301 &dev_attr_textid.attr,
302 &dev_attr_remaining_steps.attr,
303 NULL
304 };
305
306 static struct attribute_group visorchipset_install_group = {
307 .name = "install",
308 .attrs = visorchipset_install_attrs
309 };
310
311 static struct attribute *visorchipset_guest_attrs[] = {
312 &dev_attr_chipsetready.attr,
313 NULL
314 };
315
316 static struct attribute_group visorchipset_guest_group = {
317 .name = "guest",
318 .attrs = visorchipset_guest_attrs
319 };
320
321 static struct attribute *visorchipset_parahotplug_attrs[] = {
322 &dev_attr_devicedisabled.attr,
323 &dev_attr_deviceenabled.attr,
324 NULL
325 };
326
327 static struct attribute_group visorchipset_parahotplug_group = {
328 .name = "parahotplug",
329 .attrs = visorchipset_parahotplug_attrs
330 };
331
332 static const struct attribute_group *visorchipset_dev_groups[] = {
333 &visorchipset_install_group,
334 &visorchipset_guest_group,
335 &visorchipset_parahotplug_group,
336 NULL
337 };
338
339 /* /sys/devices/platform/visorchipset */
340 static struct platform_device Visorchipset_platform_device = {
341 .name = "visorchipset",
342 .id = -1,
343 .dev.groups = visorchipset_dev_groups,
344 };
345
346 /* Function prototypes */
347 static void controlvm_respond(struct controlvm_message_header *msgHdr,
348 int response);
349 static void controlvm_respond_chipset_init(
350 struct controlvm_message_header *msgHdr, int response,
351 enum ultra_chipset_feature features);
352 static void controlvm_respond_physdev_changestate(
353 struct controlvm_message_header *msgHdr, int response,
354 struct spar_segment_state state);
355
356 static ssize_t toolaction_show(struct device *dev,
357 struct device_attribute *attr,
358 char *buf)
359 {
360 u8 toolAction;
361
362 visorchannel_read(ControlVm_channel,
363 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
364 ToolAction), &toolAction, sizeof(u8));
365 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
366 }
367
368 static ssize_t toolaction_store(struct device *dev,
369 struct device_attribute *attr,
370 const char *buf, size_t count)
371 {
372 u8 toolAction;
373 int ret;
374
375 if (kstrtou8(buf, 10, &toolAction) != 0)
376 return -EINVAL;
377
378 ret = visorchannel_write(ControlVm_channel,
379 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL, ToolAction),
380 &toolAction, sizeof(u8));
381
382 if (ret)
383 return ret;
384 return count;
385 }
386
387 static ssize_t boottotool_show(struct device *dev,
388 struct device_attribute *attr,
389 char *buf)
390 {
391 struct efi_spar_indication efiSparIndication;
392
393 visorchannel_read(ControlVm_channel,
394 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
395 EfiSparIndication), &efiSparIndication,
396 sizeof(struct efi_spar_indication));
397 return scnprintf(buf, PAGE_SIZE, "%u\n",
398 efiSparIndication.boot_to_tool);
399 }
400
401 static ssize_t boottotool_store(struct device *dev,
402 struct device_attribute *attr,
403 const char *buf, size_t count)
404 {
405 int val, ret;
406 struct efi_spar_indication efiSparIndication;
407
408 if (kstrtoint(buf, 10, &val) != 0)
409 return -EINVAL;
410
411 efiSparIndication.boot_to_tool = val;
412 ret = visorchannel_write(ControlVm_channel,
413 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
414 EfiSparIndication),
415 &(efiSparIndication),
416 sizeof(struct efi_spar_indication));
417
418 if (ret)
419 return ret;
420 return count;
421 }
422
423 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
424 char *buf)
425 {
426 u32 error;
427
428 visorchannel_read(ControlVm_channel, offsetof(
429 ULTRA_CONTROLVM_CHANNEL_PROTOCOL, InstallationError),
430 &error, sizeof(u32));
431 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
432 }
433
434 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
435 const char *buf, size_t count)
436 {
437 u32 error;
438 int ret;
439
440 if (kstrtou32(buf, 10, &error) != 0)
441 return -EINVAL;
442
443 ret = visorchannel_write(ControlVm_channel,
444 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
445 InstallationError),
446 &error, sizeof(u32));
447 if (ret)
448 return ret;
449 return count;
450 }
451
452 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
453 char *buf)
454 {
455 u32 textId;
456
457 visorchannel_read(ControlVm_channel, offsetof(
458 ULTRA_CONTROLVM_CHANNEL_PROTOCOL, InstallationTextId),
459 &textId, sizeof(u32));
460 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
461 }
462
463 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
464 const char *buf, size_t count)
465 {
466 u32 textId;
467 int ret;
468
469 if (kstrtou32(buf, 10, &textId) != 0)
470 return -EINVAL;
471
472 ret = visorchannel_write(ControlVm_channel,
473 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
474 InstallationTextId),
475 &textId, sizeof(u32));
476 if (ret)
477 return ret;
478 return count;
479 }
480
481
482 static ssize_t remaining_steps_show(struct device *dev,
483 struct device_attribute *attr, char *buf)
484 {
485 u16 remainingSteps;
486
487 visorchannel_read(ControlVm_channel,
488 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
489 InstallationRemainingSteps),
490 &remainingSteps,
491 sizeof(u16));
492 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
493 }
494
495 static ssize_t remaining_steps_store(struct device *dev,
496 struct device_attribute *attr, const char *buf, size_t count)
497 {
498 u16 remainingSteps;
499 int ret;
500
501 if (kstrtou16(buf, 10, &remainingSteps) != 0)
502 return -EINVAL;
503
504 ret = visorchannel_write(ControlVm_channel,
505 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
506 InstallationRemainingSteps),
507 &remainingSteps, sizeof(u16));
508 if (ret)
509 return ret;
510 return count;
511 }
512
513 #if 0
514 static void
515 testUnicode(void)
516 {
517 wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
518 char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
519 wchar_t unicode2[99];
520
521 /* NOTE: Either due to a bug, or feature I don't understand, the
522 * kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
523 * trailed NUL byte!! REALLY!!!!! Arrrrgggghhhhh
524 */
525
526 LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
527 LOGINF("utf8_wcstombs=%d",
528 chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
529 if (chrs >= 0)
530 s[chrs] = '\0'; /* GRRRRRRRR */
531 LOGINF("s='%s'", s);
532 LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
533 if (chrs >= 0)
534 unicode2[chrs] = 0; /* GRRRRRRRR */
535 if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
536 LOGINF("strings match... good");
537 else
538 LOGINF("strings did not match!!");
539 }
540 #endif
541
542 static void
543 busInfo_clear(void *v)
544 {
545 VISORCHIPSET_BUS_INFO *p = (VISORCHIPSET_BUS_INFO *) (v);
546
547 if (p->procObject) {
548 visor_proc_DestroyObject(p->procObject);
549 p->procObject = NULL;
550 }
551 kfree(p->name);
552 p->name = NULL;
553
554 kfree(p->description);
555 p->description = NULL;
556
557 p->state.created = 0;
558 memset(p, 0, sizeof(VISORCHIPSET_BUS_INFO));
559 }
560
561 static void
562 devInfo_clear(void *v)
563 {
564 VISORCHIPSET_DEVICE_INFO *p = (VISORCHIPSET_DEVICE_INFO *) (v);
565
566 p->state.created = 0;
567 memset(p, 0, sizeof(VISORCHIPSET_DEVICE_INFO));
568 }
569
570 static u8
571 check_chipset_events(void)
572 {
573 int i;
574 u8 send_msg = 1;
575 /* Check events to determine if response should be sent */
576 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
577 send_msg &= chipset_events[i];
578 return send_msg;
579 }
580
581 static void
582 clear_chipset_events(void)
583 {
584 int i;
585 /* Clear chipset_events */
586 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
587 chipset_events[i] = 0;
588 }
589
590 void
591 visorchipset_register_busdev_server(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
592 VISORCHIPSET_BUSDEV_RESPONDERS *responders,
593 struct ultra_vbus_deviceinfo *driverInfo)
594 {
595 down(&NotifierLock);
596 if (notifiers == NULL) {
597 memset(&BusDev_Server_Notifiers, 0,
598 sizeof(BusDev_Server_Notifiers));
599 serverregistered = 0; /* clear flag */
600 } else {
601 BusDev_Server_Notifiers = *notifiers;
602 serverregistered = 1; /* set flag */
603 }
604 if (responders)
605 *responders = BusDev_Responders;
606 if (driverInfo)
607 bus_device_info_init(driverInfo, "chipset", "visorchipset",
608 VERSION, NULL);
609
610 up(&NotifierLock);
611 }
612 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
613
614 void
615 visorchipset_register_busdev_client(VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers,
616 VISORCHIPSET_BUSDEV_RESPONDERS *responders,
617 struct ultra_vbus_deviceinfo *driverInfo)
618 {
619 down(&NotifierLock);
620 if (notifiers == NULL) {
621 memset(&BusDev_Client_Notifiers, 0,
622 sizeof(BusDev_Client_Notifiers));
623 clientregistered = 0; /* clear flag */
624 } else {
625 BusDev_Client_Notifiers = *notifiers;
626 clientregistered = 1; /* set flag */
627 }
628 if (responders)
629 *responders = BusDev_Responders;
630 if (driverInfo)
631 bus_device_info_init(driverInfo, "chipset(bolts)", "visorchipset",
632 VERSION, NULL);
633 up(&NotifierLock);
634 }
635 EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
636
637 static void
638 cleanup_controlvm_structures(void)
639 {
640 VISORCHIPSET_BUS_INFO *bi, *tmp_bi;
641 VISORCHIPSET_DEVICE_INFO *di, *tmp_di;
642
643 list_for_each_entry_safe(bi, tmp_bi, &BusInfoList, entry) {
644 busInfo_clear(bi);
645 list_del(&bi->entry);
646 kfree(bi);
647 }
648
649 list_for_each_entry_safe(di, tmp_di, &DevInfoList, entry) {
650 devInfo_clear(di);
651 list_del(&di->entry);
652 kfree(di);
653 }
654 }
655
656 static void
657 chipset_init(struct controlvm_message *inmsg)
658 {
659 static int chipset_inited;
660 enum ultra_chipset_feature features = 0;
661 int rc = CONTROLVM_RESP_SUCCESS;
662
663 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
664 if (chipset_inited) {
665 LOGERR("CONTROLVM_CHIPSET_INIT Failed: Already Done.");
666 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
667 goto Away;
668 }
669 chipset_inited = 1;
670 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
671
672 /* Set features to indicate we support parahotplug (if Command
673 * also supports it). */
674 features =
675 inmsg->cmd.init_chipset.
676 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
677
678 /* Set the "reply" bit so Command knows this is a
679 * features-aware driver. */
680 features |= ULTRA_CHIPSET_FEATURE_REPLY;
681
682 Away:
683 if (rc < 0)
684 cleanup_controlvm_structures();
685 if (inmsg->hdr.flags.response_expected)
686 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
687 }
688
689 static void
690 controlvm_init_response(struct controlvm_message *msg,
691 struct controlvm_message_header *msgHdr, int response)
692 {
693 memset(msg, 0, sizeof(struct controlvm_message));
694 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
695 msg->hdr.payload_bytes = 0;
696 msg->hdr.payload_vm_offset = 0;
697 msg->hdr.payload_max_bytes = 0;
698 if (response < 0) {
699 msg->hdr.flags.failed = 1;
700 msg->hdr.completion_status = (u32) (-response);
701 }
702 }
703
704 static void
705 controlvm_respond(struct controlvm_message_header *msgHdr, int response)
706 {
707 struct controlvm_message outmsg;
708
709 controlvm_init_response(&outmsg, msgHdr, response);
710 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
711 * back the deviceChangeState structure in the packet. */
712 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE
713 && g_DeviceChangeStatePacket.device_change_state.bus_no ==
714 g_diagpoolBusNo
715 && g_DeviceChangeStatePacket.device_change_state.dev_no ==
716 g_diagpoolDevNo)
717 outmsg.cmd = g_DeviceChangeStatePacket;
718 if (outmsg.hdr.flags.test_message == 1) {
719 LOGINF("%s controlvm_msg=0x%x response=%d for test message",
720 __func__, outmsg.hdr.id, response);
721 return;
722 }
723 if (!visorchannel_signalinsert(ControlVm_channel,
724 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
725 LOGERR("signalinsert failed!");
726 return;
727 }
728 }
729
730 static void
731 controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
732 int response,
733 enum ultra_chipset_feature features)
734 {
735 struct controlvm_message outmsg;
736
737 controlvm_init_response(&outmsg, msgHdr, response);
738 outmsg.cmd.init_chipset.features = features;
739 if (!visorchannel_signalinsert(ControlVm_channel,
740 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
741 LOGERR("signalinsert failed!");
742 return;
743 }
744 }
745
746 static void controlvm_respond_physdev_changestate(
747 struct controlvm_message_header *msgHdr, int response,
748 struct spar_segment_state state)
749 {
750 struct controlvm_message outmsg;
751
752 controlvm_init_response(&outmsg, msgHdr, response);
753 outmsg.cmd.device_change_state.state = state;
754 outmsg.cmd.device_change_state.flags.phys_device = 1;
755 if (!visorchannel_signalinsert(ControlVm_channel,
756 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
757 LOGERR("signalinsert failed!");
758 return;
759 }
760 }
761
762 void
763 visorchipset_save_message(struct controlvm_message *msg, CRASH_OBJ_TYPE type)
764 {
765 u32 localSavedCrashMsgOffset;
766 u16 localSavedCrashMsgCount;
767
768 /* get saved message count */
769 if (visorchannel_read(ControlVm_channel,
770 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
771 SavedCrashMsgCount),
772 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
773 LOGERR("failed to get Saved Message Count");
774 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
775 POSTCODE_SEVERITY_ERR);
776 return;
777 }
778
779 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
780 LOGERR("Saved Message Count incorrect %d",
781 localSavedCrashMsgCount);
782 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
783 localSavedCrashMsgCount,
784 POSTCODE_SEVERITY_ERR);
785 return;
786 }
787
788 /* get saved crash message offset */
789 if (visorchannel_read(ControlVm_channel,
790 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
791 SavedCrashMsgOffset),
792 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
793 LOGERR("failed to get Saved Message Offset");
794 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
795 POSTCODE_SEVERITY_ERR);
796 return;
797 }
798
799 if (type == CRASH_bus) {
800 if (visorchannel_write(ControlVm_channel,
801 localSavedCrashMsgOffset,
802 msg,
803 sizeof(struct controlvm_message)) < 0) {
804 LOGERR("SAVE_MSG_BUS_FAILURE: Failed to write CrashCreateBusMsg!");
805 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
806 POSTCODE_SEVERITY_ERR);
807 return;
808 }
809 } else {
810 if (visorchannel_write(ControlVm_channel,
811 localSavedCrashMsgOffset +
812 sizeof(struct controlvm_message), msg,
813 sizeof(struct controlvm_message)) < 0) {
814 LOGERR("SAVE_MSG_DEV_FAILURE: Failed to write CrashCreateDevMsg!");
815 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
816 POSTCODE_SEVERITY_ERR);
817 return;
818 }
819 }
820 }
821 EXPORT_SYMBOL_GPL(visorchipset_save_message);
822
823 static void
824 bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
825 {
826 VISORCHIPSET_BUS_INFO *p = NULL;
827 BOOL need_clear = FALSE;
828
829 p = findbus(&BusInfoList, busNo);
830 if (!p) {
831 LOGERR("internal error busNo=%lu", busNo);
832 return;
833 }
834 if (response < 0) {
835 if ((cmdId == CONTROLVM_BUS_CREATE) &&
836 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
837 /* undo the row we just created... */
838 delbusdevices(&DevInfoList, busNo);
839 } else {
840 if (cmdId == CONTROLVM_BUS_CREATE)
841 p->state.created = 1;
842 if (cmdId == CONTROLVM_BUS_DESTROY)
843 need_clear = TRUE;
844 }
845
846 if (p->pendingMsgHdr.id == CONTROLVM_INVALID) {
847 LOGERR("bus_responder no pending msg");
848 return; /* no controlvm response needed */
849 }
850 if (p->pendingMsgHdr.id != (u32) cmdId) {
851 LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.id);
852 return;
853 }
854 controlvm_respond(&p->pendingMsgHdr, response);
855 p->pendingMsgHdr.id = CONTROLVM_INVALID;
856 if (need_clear) {
857 busInfo_clear(p);
858 delbusdevices(&DevInfoList, busNo);
859 }
860 }
861
862 static void
863 device_changestate_responder(enum controlvm_id cmdId,
864 ulong busNo, ulong devNo, int response,
865 struct spar_segment_state responseState)
866 {
867 VISORCHIPSET_DEVICE_INFO *p = NULL;
868 struct controlvm_message outmsg;
869
870 p = finddevice(&DevInfoList, busNo, devNo);
871 if (!p) {
872 LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
873 return;
874 }
875 if (p->pendingMsgHdr.id == CONTROLVM_INVALID) {
876 LOGERR("device_responder no pending msg");
877 return; /* no controlvm response needed */
878 }
879 if (p->pendingMsgHdr.id != cmdId) {
880 LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.id);
881 return;
882 }
883
884 controlvm_init_response(&outmsg, &p->pendingMsgHdr, response);
885
886 outmsg.cmd.device_change_state.bus_no = busNo;
887 outmsg.cmd.device_change_state.dev_no = devNo;
888 outmsg.cmd.device_change_state.state = responseState;
889
890 if (!visorchannel_signalinsert(ControlVm_channel,
891 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
892 LOGERR("signalinsert failed!");
893 return;
894 }
895
896 p->pendingMsgHdr.id = CONTROLVM_INVALID;
897 }
898
899 static void
900 device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
901 int response)
902 {
903 VISORCHIPSET_DEVICE_INFO *p = NULL;
904 BOOL need_clear = FALSE;
905
906 p = finddevice(&DevInfoList, busNo, devNo);
907 if (!p) {
908 LOGERR("internal error; busNo=%lu, devNo=%lu", busNo, devNo);
909 return;
910 }
911 if (response >= 0) {
912 if (cmdId == CONTROLVM_DEVICE_CREATE)
913 p->state.created = 1;
914 if (cmdId == CONTROLVM_DEVICE_DESTROY)
915 need_clear = TRUE;
916 }
917
918 if (p->pendingMsgHdr.id == CONTROLVM_INVALID) {
919 LOGERR("device_responder no pending msg");
920 return; /* no controlvm response needed */
921 }
922 if (p->pendingMsgHdr.id != (u32) cmdId) {
923 LOGERR("expected=%d, found=%d", cmdId, p->pendingMsgHdr.id);
924 return;
925 }
926 controlvm_respond(&p->pendingMsgHdr, response);
927 p->pendingMsgHdr.id = CONTROLVM_INVALID;
928 if (need_clear)
929 devInfo_clear(p);
930 }
931
932 static void
933 bus_epilog(u32 busNo,
934 u32 cmd, struct controlvm_message_header *msgHdr,
935 int response, BOOL needResponse)
936 {
937 BOOL notified = FALSE;
938
939 VISORCHIPSET_BUS_INFO *pBusInfo = findbus(&BusInfoList, busNo);
940
941 if (!pBusInfo) {
942 LOGERR("HUH? bad busNo=%d", busNo);
943 return;
944 }
945 if (needResponse) {
946 memcpy(&pBusInfo->pendingMsgHdr, msgHdr,
947 sizeof(struct controlvm_message_header));
948 } else
949 pBusInfo->pendingMsgHdr.id = CONTROLVM_INVALID;
950
951 down(&NotifierLock);
952 if (response == CONTROLVM_RESP_SUCCESS) {
953 switch (cmd) {
954 case CONTROLVM_BUS_CREATE:
955 /* We can't tell from the bus_create
956 * information which of our 2 bus flavors the
957 * devices on this bus will ultimately end up.
958 * FORTUNATELY, it turns out it is harmless to
959 * send the bus_create to both of them. We can
960 * narrow things down a little bit, though,
961 * because we know: - BusDev_Server can handle
962 * either server or client devices
963 * - BusDev_Client can handle ONLY client
964 * devices */
965 if (BusDev_Server_Notifiers.bus_create) {
966 (*BusDev_Server_Notifiers.bus_create) (busNo);
967 notified = TRUE;
968 }
969 if ((!pBusInfo->flags.server) /*client */ &&
970 BusDev_Client_Notifiers.bus_create) {
971 (*BusDev_Client_Notifiers.bus_create) (busNo);
972 notified = TRUE;
973 }
974 break;
975 case CONTROLVM_BUS_DESTROY:
976 if (BusDev_Server_Notifiers.bus_destroy) {
977 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
978 notified = TRUE;
979 }
980 if ((!pBusInfo->flags.server) /*client */ &&
981 BusDev_Client_Notifiers.bus_destroy) {
982 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
983 notified = TRUE;
984 }
985 break;
986 }
987 }
988 if (notified)
989 /* The callback function just called above is responsible
990 * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
991 * function, which will call bus_responder()
992 */
993 ;
994 else
995 bus_responder(cmd, busNo, response);
996 up(&NotifierLock);
997 }
998
999 static void
1000 device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
1001 struct controlvm_message_header *msgHdr, int response,
1002 BOOL needResponse, BOOL for_visorbus)
1003 {
1004 VISORCHIPSET_BUSDEV_NOTIFIERS *notifiers = NULL;
1005 BOOL notified = FALSE;
1006
1007 VISORCHIPSET_DEVICE_INFO *pDevInfo =
1008 finddevice(&DevInfoList, busNo, devNo);
1009 char *envp[] = {
1010 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1011 NULL
1012 };
1013
1014 if (!pDevInfo) {
1015 LOGERR("HUH? bad busNo=%d, devNo=%d", busNo, devNo);
1016 return;
1017 }
1018 if (for_visorbus)
1019 notifiers = &BusDev_Server_Notifiers;
1020 else
1021 notifiers = &BusDev_Client_Notifiers;
1022 if (needResponse) {
1023 memcpy(&pDevInfo->pendingMsgHdr, msgHdr,
1024 sizeof(struct controlvm_message_header));
1025 } else
1026 pDevInfo->pendingMsgHdr.id = CONTROLVM_INVALID;
1027
1028 down(&NotifierLock);
1029 if (response >= 0) {
1030 switch (cmd) {
1031 case CONTROLVM_DEVICE_CREATE:
1032 if (notifiers->device_create) {
1033 (*notifiers->device_create) (busNo, devNo);
1034 notified = TRUE;
1035 }
1036 break;
1037 case CONTROLVM_DEVICE_CHANGESTATE:
1038 /* ServerReady / ServerRunning / SegmentStateRunning */
1039 if (state.alive == segment_state_running.alive &&
1040 state.operating ==
1041 segment_state_running.operating) {
1042 if (notifiers->device_resume) {
1043 (*notifiers->device_resume) (busNo,
1044 devNo);
1045 notified = TRUE;
1046 }
1047 }
1048 /* ServerNotReady / ServerLost / SegmentStateStandby */
1049 else if (state.alive == segment_state_standby.alive &&
1050 state.operating ==
1051 segment_state_standby.operating) {
1052 /* technically this is standby case
1053 * where server is lost
1054 */
1055 if (notifiers->device_pause) {
1056 (*notifiers->device_pause) (busNo,
1057 devNo);
1058 notified = TRUE;
1059 }
1060 } else if (state.alive == segment_state_paused.alive &&
1061 state.operating ==
1062 segment_state_paused.operating) {
1063 /* this is lite pause where channel is
1064 * still valid just 'pause' of it
1065 */
1066 if (busNo == g_diagpoolBusNo
1067 && devNo == g_diagpoolDevNo) {
1068 LOGINF("DEVICE_CHANGESTATE(DiagpoolChannel busNo=%d devNo=%d is pausing...)",
1069 busNo, devNo);
1070 /* this will trigger the
1071 * diag_shutdown.sh script in
1072 * the visorchipset hotplug */
1073 kobject_uevent_env
1074 (&Visorchipset_platform_device.dev.
1075 kobj, KOBJ_ONLINE, envp);
1076 }
1077 }
1078 break;
1079 case CONTROLVM_DEVICE_DESTROY:
1080 if (notifiers->device_destroy) {
1081 (*notifiers->device_destroy) (busNo, devNo);
1082 notified = TRUE;
1083 }
1084 break;
1085 }
1086 }
1087 if (notified)
1088 /* The callback function just called above is responsible
1089 * for calling the appropriate VISORCHIPSET_BUSDEV_RESPONDERS
1090 * function, which will call device_responder()
1091 */
1092 ;
1093 else
1094 device_responder(cmd, busNo, devNo, response);
1095 up(&NotifierLock);
1096 }
1097
1098 static void
1099 bus_create(struct controlvm_message *inmsg)
1100 {
1101 struct controlvm_message_packet *cmd = &inmsg->cmd;
1102 ulong busNo = cmd->create_bus.bus_no;
1103 int rc = CONTROLVM_RESP_SUCCESS;
1104 VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
1105
1106
1107 pBusInfo = findbus(&BusInfoList, busNo);
1108 if (pBusInfo && (pBusInfo->state.created == 1)) {
1109 LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu already exists",
1110 busNo);
1111 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1112 POSTCODE_SEVERITY_ERR);
1113 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1114 goto Away;
1115 }
1116 pBusInfo = kzalloc(sizeof(VISORCHIPSET_BUS_INFO), GFP_KERNEL);
1117 if (pBusInfo == NULL) {
1118 LOGERR("CONTROLVM_BUS_CREATE Failed: bus %lu kzalloc failed",
1119 busNo);
1120 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1121 POSTCODE_SEVERITY_ERR);
1122 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1123 goto Away;
1124 }
1125
1126 INIT_LIST_HEAD(&pBusInfo->entry);
1127 pBusInfo->busNo = busNo;
1128 pBusInfo->devNo = cmd->create_bus.dev_count;
1129
1130 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1131
1132 if (inmsg->hdr.flags.test_message == 1)
1133 pBusInfo->chanInfo.addrType = ADDRTYPE_localTest;
1134 else
1135 pBusInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
1136
1137 pBusInfo->flags.server = inmsg->hdr.flags.server;
1138 pBusInfo->chanInfo.channelAddr = cmd->create_bus.channel_addr;
1139 pBusInfo->chanInfo.nChannelBytes = cmd->create_bus.channel_bytes;
1140 pBusInfo->chanInfo.channelTypeGuid = cmd->create_bus.bus_data_type_uuid;
1141 pBusInfo->chanInfo.channelInstGuid = cmd->create_bus.bus_inst_uuid;
1142
1143 list_add(&pBusInfo->entry, &BusInfoList);
1144
1145 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1146
1147 Away:
1148 bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1149 rc, inmsg->hdr.flags.response_expected == 1);
1150 }
1151
1152 static void
1153 bus_destroy(struct controlvm_message *inmsg)
1154 {
1155 struct controlvm_message_packet *cmd = &inmsg->cmd;
1156 ulong busNo = cmd->destroy_bus.bus_no;
1157 VISORCHIPSET_BUS_INFO *pBusInfo;
1158 int rc = CONTROLVM_RESP_SUCCESS;
1159
1160 pBusInfo = findbus(&BusInfoList, busNo);
1161 if (!pBusInfo) {
1162 LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu invalid", busNo);
1163 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1164 goto Away;
1165 }
1166 if (pBusInfo->state.created == 0) {
1167 LOGERR("CONTROLVM_BUS_DESTROY Failed: bus %lu already destroyed",
1168 busNo);
1169 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1170 goto Away;
1171 }
1172
1173 Away:
1174 bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1175 rc, inmsg->hdr.flags.response_expected == 1);
1176 }
1177
1178 static void
1179 bus_configure(struct controlvm_message *inmsg, PARSER_CONTEXT *parser_ctx)
1180 {
1181 struct controlvm_message_packet *cmd = &inmsg->cmd;
1182 ulong busNo = cmd->configure_bus.bus_no;
1183 VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
1184 int rc = CONTROLVM_RESP_SUCCESS;
1185 char s[99];
1186
1187 busNo = cmd->configure_bus.bus_no;
1188 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1189
1190 pBusInfo = findbus(&BusInfoList, busNo);
1191 if (!pBusInfo) {
1192 LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu invalid",
1193 busNo);
1194 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1195 POSTCODE_SEVERITY_ERR);
1196 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1197 goto Away;
1198 }
1199 if (pBusInfo->state.created == 0) {
1200 LOGERR("CONTROLVM_BUS_CONFIGURE Failed: Invalid bus %lu - not created yet",
1201 busNo);
1202 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1203 POSTCODE_SEVERITY_ERR);
1204 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1205 goto Away;
1206 }
1207 /* TBD - add this check to other commands also... */
1208 if (pBusInfo->pendingMsgHdr.id != CONTROLVM_INVALID) {
1209 LOGERR("CONTROLVM_BUS_CONFIGURE Failed: bus %lu MsgId=%u outstanding",
1210 busNo, (uint) pBusInfo->pendingMsgHdr.id);
1211 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1212 POSTCODE_SEVERITY_ERR);
1213 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1214 goto Away;
1215 }
1216
1217 pBusInfo->partitionHandle = cmd->configure_bus.guest_handle;
1218 pBusInfo->partitionGuid = parser_id_get(parser_ctx);
1219 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1220 pBusInfo->name = parser_string_get(parser_ctx);
1221
1222 visorchannel_uuid_id(&pBusInfo->partitionGuid, s);
1223 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1224 Away:
1225 bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1226 rc, inmsg->hdr.flags.response_expected == 1);
1227 }
1228
1229 static void
1230 my_device_create(struct controlvm_message *inmsg)
1231 {
1232 struct controlvm_message_packet *cmd = &inmsg->cmd;
1233 ulong busNo = cmd->create_device.bus_no;
1234 ulong devNo = cmd->create_device.dev_no;
1235 VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
1236 VISORCHIPSET_BUS_INFO *pBusInfo = NULL;
1237 int rc = CONTROLVM_RESP_SUCCESS;
1238
1239 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1240 if (pDevInfo && (pDevInfo->state.created == 1)) {
1241 LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu already exists",
1242 busNo, devNo);
1243 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1244 POSTCODE_SEVERITY_ERR);
1245 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1246 goto Away;
1247 }
1248 pBusInfo = findbus(&BusInfoList, busNo);
1249 if (!pBusInfo) {
1250 LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - out of range",
1251 busNo);
1252 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1253 POSTCODE_SEVERITY_ERR);
1254 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1255 goto Away;
1256 }
1257 if (pBusInfo->state.created == 0) {
1258 LOGERR("CONTROLVM_DEVICE_CREATE Failed: Invalid bus %lu - not created yet",
1259 busNo);
1260 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1261 POSTCODE_SEVERITY_ERR);
1262 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1263 goto Away;
1264 }
1265 pDevInfo = kzalloc(sizeof(VISORCHIPSET_DEVICE_INFO), GFP_KERNEL);
1266 if (pDevInfo == NULL) {
1267 LOGERR("CONTROLVM_DEVICE_CREATE Failed: busNo=%lu, devNo=%lu kmaloc failed",
1268 busNo, devNo);
1269 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1270 POSTCODE_SEVERITY_ERR);
1271 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1272 goto Away;
1273 }
1274
1275 INIT_LIST_HEAD(&pDevInfo->entry);
1276 pDevInfo->busNo = busNo;
1277 pDevInfo->devNo = devNo;
1278 pDevInfo->devInstGuid = cmd->create_device.dev_inst_uuid;
1279 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1280 POSTCODE_SEVERITY_INFO);
1281
1282 if (inmsg->hdr.flags.test_message == 1)
1283 pDevInfo->chanInfo.addrType = ADDRTYPE_localTest;
1284 else
1285 pDevInfo->chanInfo.addrType = ADDRTYPE_localPhysical;
1286 pDevInfo->chanInfo.channelAddr = cmd->create_device.channel_addr;
1287 pDevInfo->chanInfo.nChannelBytes = cmd->create_device.channel_bytes;
1288 pDevInfo->chanInfo.channelTypeGuid = cmd->create_device.data_type_uuid;
1289 pDevInfo->chanInfo.intr = cmd->create_device.intr;
1290 list_add(&pDevInfo->entry, &DevInfoList);
1291 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1292 POSTCODE_SEVERITY_INFO);
1293 Away:
1294 /* get the bus and devNo for DiagPool channel */
1295 if (is_diagpool_channel(pDevInfo->chanInfo.channelTypeGuid)) {
1296 g_diagpoolBusNo = busNo;
1297 g_diagpoolDevNo = devNo;
1298 LOGINF("CONTROLVM_DEVICE_CREATE for DiagPool channel: busNo=%lu, devNo=%lu",
1299 g_diagpoolBusNo, g_diagpoolDevNo);
1300 }
1301 device_epilog(busNo, devNo, segment_state_running,
1302 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1303 inmsg->hdr.flags.response_expected == 1,
1304 FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
1305 }
1306
1307 static void
1308 my_device_changestate(struct controlvm_message *inmsg)
1309 {
1310 struct controlvm_message_packet *cmd = &inmsg->cmd;
1311 ulong busNo = cmd->device_change_state.bus_no;
1312 ulong devNo = cmd->device_change_state.dev_no;
1313 struct spar_segment_state state = cmd->device_change_state.state;
1314 VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
1315 int rc = CONTROLVM_RESP_SUCCESS;
1316
1317 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1318 if (!pDevInfo) {
1319 LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (doesn't exist)",
1320 busNo, devNo);
1321 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1322 POSTCODE_SEVERITY_ERR);
1323 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1324 goto Away;
1325 }
1326 if (pDevInfo->state.created == 0) {
1327 LOGERR("CONTROLVM_DEVICE_CHANGESTATE Failed: busNo=%lu, devNo=%lu invalid (not created)",
1328 busNo, devNo);
1329 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1330 POSTCODE_SEVERITY_ERR);
1331 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1332 }
1333 Away:
1334 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1335 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1336 &inmsg->hdr, rc,
1337 inmsg->hdr.flags.response_expected == 1,
1338 FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
1339 }
1340
1341 static void
1342 my_device_destroy(struct controlvm_message *inmsg)
1343 {
1344 struct controlvm_message_packet *cmd = &inmsg->cmd;
1345 ulong busNo = cmd->destroy_device.bus_no;
1346 ulong devNo = cmd->destroy_device.dev_no;
1347 VISORCHIPSET_DEVICE_INFO *pDevInfo = NULL;
1348 int rc = CONTROLVM_RESP_SUCCESS;
1349
1350 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1351 if (!pDevInfo) {
1352 LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu invalid",
1353 busNo, devNo);
1354 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1355 goto Away;
1356 }
1357 if (pDevInfo->state.created == 0) {
1358 LOGERR("CONTROLVM_DEVICE_DESTROY Failed: busNo=%lu, devNo=%lu already destroyed",
1359 busNo, devNo);
1360 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1361 }
1362
1363 Away:
1364 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1365 device_epilog(busNo, devNo, segment_state_running,
1366 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1367 inmsg->hdr.flags.response_expected == 1,
1368 FOR_VISORBUS(pDevInfo->chanInfo.channelTypeGuid));
1369 }
1370
1371 /* When provided with the physical address of the controlvm channel
1372 * (phys_addr), the offset to the payload area we need to manage
1373 * (offset), and the size of this payload area (bytes), fills in the
1374 * CONTROLVM_PAYLOAD_INFO struct. Returns TRUE for success or FALSE
1375 * for failure.
1376 */
1377 static int
1378 initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1379 CONTROLVM_PAYLOAD_INFO *info)
1380 {
1381 u8 __iomem *payload = NULL;
1382 int rc = CONTROLVM_RESP_SUCCESS;
1383
1384 if (info == NULL) {
1385 LOGERR("HUH ? CONTROLVM_PAYLOAD_INIT Failed : Programmer check at %s:%d",
1386 __FILE__, __LINE__);
1387 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1388 goto Away;
1389 }
1390 memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
1391 if ((offset == 0) || (bytes == 0)) {
1392 LOGERR("CONTROLVM_PAYLOAD_INIT Failed: RequestPayloadOffset=%llu RequestPayloadBytes=%llu!",
1393 (u64) offset, (u64) bytes);
1394 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1395 goto Away;
1396 }
1397 payload = ioremap_cache(phys_addr + offset, bytes);
1398 if (payload == NULL) {
1399 LOGERR("CONTROLVM_PAYLOAD_INIT Failed: ioremap_cache %llu for %llu bytes failed",
1400 (u64) offset, (u64) bytes);
1401 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1402 goto Away;
1403 }
1404
1405 info->offset = offset;
1406 info->bytes = bytes;
1407 info->ptr = payload;
1408 LOGINF("offset=%llu, bytes=%lu, ptr=%p",
1409 (u64) (info->offset), (ulong) (info->bytes), info->ptr);
1410
1411 Away:
1412 if (rc < 0) {
1413 if (payload != NULL) {
1414 iounmap(payload);
1415 payload = NULL;
1416 }
1417 }
1418 return rc;
1419 }
1420
1421 static void
1422 destroy_controlvm_payload_info(CONTROLVM_PAYLOAD_INFO *info)
1423 {
1424 if (info->ptr != NULL) {
1425 iounmap(info->ptr);
1426 info->ptr = NULL;
1427 }
1428 memset(info, 0, sizeof(CONTROLVM_PAYLOAD_INFO));
1429 }
1430
1431 static void
1432 initialize_controlvm_payload(void)
1433 {
1434 HOSTADDRESS phys_addr = visorchannel_get_physaddr(ControlVm_channel);
1435 u64 payloadOffset = 0;
1436 u32 payloadBytes = 0;
1437
1438 if (visorchannel_read(ControlVm_channel,
1439 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
1440 RequestPayloadOffset),
1441 &payloadOffset, sizeof(payloadOffset)) < 0) {
1442 LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
1443 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1444 POSTCODE_SEVERITY_ERR);
1445 return;
1446 }
1447 if (visorchannel_read(ControlVm_channel,
1448 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
1449 RequestPayloadBytes),
1450 &payloadBytes, sizeof(payloadBytes)) < 0) {
1451 LOGERR("CONTROLVM_PAYLOAD_INIT Failed to read controlvm channel!");
1452 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1453 POSTCODE_SEVERITY_ERR);
1454 return;
1455 }
1456 initialize_controlvm_payload_info(phys_addr,
1457 payloadOffset, payloadBytes,
1458 &ControlVm_payload_info);
1459 }
1460
1461 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1462 * Returns CONTROLVM_RESP_xxx code.
1463 */
1464 int
1465 visorchipset_chipset_ready(void)
1466 {
1467 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1468 return CONTROLVM_RESP_SUCCESS;
1469 }
1470 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1471
1472 int
1473 visorchipset_chipset_selftest(void)
1474 {
1475 char env_selftest[20];
1476 char *envp[] = { env_selftest, NULL };
1477
1478 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1479 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1480 envp);
1481 return CONTROLVM_RESP_SUCCESS;
1482 }
1483 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1484
1485 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1486 * Returns CONTROLVM_RESP_xxx code.
1487 */
1488 int
1489 visorchipset_chipset_notready(void)
1490 {
1491 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1492 return CONTROLVM_RESP_SUCCESS;
1493 }
1494 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1495
1496 static void
1497 chipset_ready(struct controlvm_message_header *msgHdr)
1498 {
1499 int rc = visorchipset_chipset_ready();
1500
1501 if (rc != CONTROLVM_RESP_SUCCESS)
1502 rc = -rc;
1503 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
1504 controlvm_respond(msgHdr, rc);
1505 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
1506 /* Send CHIPSET_READY response when all modules have been loaded
1507 * and disks mounted for the partition
1508 */
1509 g_ChipSetMsgHdr = *msgHdr;
1510 LOGINF("Holding CHIPSET_READY response");
1511 }
1512 }
1513
1514 static void
1515 chipset_selftest(struct controlvm_message_header *msgHdr)
1516 {
1517 int rc = visorchipset_chipset_selftest();
1518
1519 if (rc != CONTROLVM_RESP_SUCCESS)
1520 rc = -rc;
1521 if (msgHdr->flags.response_expected)
1522 controlvm_respond(msgHdr, rc);
1523 }
1524
1525 static void
1526 chipset_notready(struct controlvm_message_header *msgHdr)
1527 {
1528 int rc = visorchipset_chipset_notready();
1529
1530 if (rc != CONTROLVM_RESP_SUCCESS)
1531 rc = -rc;
1532 if (msgHdr->flags.response_expected)
1533 controlvm_respond(msgHdr, rc);
1534 }
1535
1536 /* This is your "one-stop" shop for grabbing the next message from the
1537 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1538 */
1539 static BOOL
1540 read_controlvm_event(struct controlvm_message *msg)
1541 {
1542 if (visorchannel_signalremove(ControlVm_channel,
1543 CONTROLVM_QUEUE_EVENT, msg)) {
1544 /* got a message */
1545 if (msg->hdr.flags.test_message == 1) {
1546 LOGERR("ignoring bad CONTROLVM_QUEUE_EVENT msg with controlvm_msg_id=0x%x because Flags.testMessage is nonsensical (=1)",
1547 msg->hdr.id);
1548 return FALSE;
1549 }
1550 return TRUE;
1551 }
1552 return FALSE;
1553 }
1554
1555 /*
1556 * The general parahotplug flow works as follows. The visorchipset
1557 * driver receives a DEVICE_CHANGESTATE message from Command
1558 * specifying a physical device to enable or disable. The CONTROLVM
1559 * message handler calls parahotplug_process_message, which then adds
1560 * the message to a global list and kicks off a udev event which
1561 * causes a user level script to enable or disable the specified
1562 * device. The udev script then writes to
1563 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1564 * to get called, at which point the appropriate CONTROLVM message is
1565 * retrieved from the list and responded to.
1566 */
1567
1568 #define PARAHOTPLUG_TIMEOUT_MS 2000
1569
1570 /*
1571 * Generate unique int to match an outstanding CONTROLVM message with a
1572 * udev script /proc response
1573 */
1574 static int
1575 parahotplug_next_id(void)
1576 {
1577 static atomic_t id = ATOMIC_INIT(0);
1578
1579 return atomic_inc_return(&id);
1580 }
1581
1582 /*
1583 * Returns the time (in jiffies) when a CONTROLVM message on the list
1584 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1585 */
1586 static unsigned long
1587 parahotplug_next_expiration(void)
1588 {
1589 return jiffies + PARAHOTPLUG_TIMEOUT_MS * HZ / 1000;
1590 }
1591
1592 /*
1593 * Create a parahotplug_request, which is basically a wrapper for a
1594 * CONTROLVM_MESSAGE that we can stick on a list
1595 */
1596 static struct parahotplug_request *
1597 parahotplug_request_create(struct controlvm_message *msg)
1598 {
1599 struct parahotplug_request *req =
1600 kmalloc(sizeof(struct parahotplug_request),
1601 GFP_KERNEL|__GFP_NORETRY);
1602 if (req == NULL)
1603 return NULL;
1604
1605 req->id = parahotplug_next_id();
1606 req->expiration = parahotplug_next_expiration();
1607 req->msg = *msg;
1608
1609 return req;
1610 }
1611
1612 /*
1613 * Free a parahotplug_request.
1614 */
1615 static void
1616 parahotplug_request_destroy(struct parahotplug_request *req)
1617 {
1618 kfree(req);
1619 }
1620
1621 /*
1622 * Cause uevent to run the user level script to do the disable/enable
1623 * specified in (the CONTROLVM message in) the specified
1624 * parahotplug_request
1625 */
1626 static void
1627 parahotplug_request_kickoff(struct parahotplug_request *req)
1628 {
1629 struct controlvm_message_packet *cmd = &req->msg.cmd;
1630 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1631 env_func[40];
1632 char *envp[] = {
1633 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1634 };
1635
1636 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1637 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1638 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1639 cmd->device_change_state.state.active);
1640 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1641 cmd->device_change_state.bus_no);
1642 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1643 cmd->device_change_state.dev_no >> 3);
1644 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1645 cmd->device_change_state.dev_no & 0x7);
1646
1647 LOGINF("parahotplug_request_kickoff: state=%d, bdf=%d/%d/%d, id=%u\n",
1648 cmd->device_change_state.state.active,
1649 cmd->device_change_state.bus_no,
1650 cmd->device_change_state.dev_no >> 3,
1651 cmd->device_change_state.dev_no & 7, req->id);
1652
1653 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1654 envp);
1655 }
1656
1657 /*
1658 * Remove any request from the list that's been on there too long and
1659 * respond with an error.
1660 */
1661 static void
1662 parahotplug_process_list(void)
1663 {
1664 struct list_head *pos = NULL;
1665 struct list_head *tmp = NULL;
1666
1667 spin_lock(&Parahotplug_request_list_lock);
1668
1669 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1670 struct parahotplug_request *req =
1671 list_entry(pos, struct parahotplug_request, list);
1672 if (time_after_eq(jiffies, req->expiration)) {
1673 list_del(pos);
1674 if (req->msg.hdr.flags.response_expected)
1675 controlvm_respond_physdev_changestate(
1676 &req->msg.hdr,
1677 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1678 req->msg.cmd.device_change_state.state);
1679 parahotplug_request_destroy(req);
1680 }
1681 }
1682
1683 spin_unlock(&Parahotplug_request_list_lock);
1684 }
1685
1686 /*
1687 * Called from the /proc handler, which means the user script has
1688 * finished the enable/disable. Find the matching identifier, and
1689 * respond to the CONTROLVM message with success.
1690 */
1691 static int
1692 parahotplug_request_complete(int id, u16 active)
1693 {
1694 struct list_head *pos = NULL;
1695 struct list_head *tmp = NULL;
1696
1697 spin_lock(&Parahotplug_request_list_lock);
1698
1699 /* Look for a request matching "id". */
1700 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1701 struct parahotplug_request *req =
1702 list_entry(pos, struct parahotplug_request, list);
1703 if (req->id == id) {
1704 /* Found a match. Remove it from the list and
1705 * respond.
1706 */
1707 list_del(pos);
1708 spin_unlock(&Parahotplug_request_list_lock);
1709 req->msg.cmd.device_change_state.state.active = active;
1710 if (req->msg.hdr.flags.response_expected)
1711 controlvm_respond_physdev_changestate(
1712 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1713 req->msg.cmd.device_change_state.state);
1714 parahotplug_request_destroy(req);
1715 return 0;
1716 }
1717 }
1718
1719 spin_unlock(&Parahotplug_request_list_lock);
1720 return -1;
1721 }
1722
1723 /*
1724 * Enables or disables a PCI device by kicking off a udev script
1725 */
1726 static void
1727 parahotplug_process_message(struct controlvm_message *inmsg)
1728 {
1729 struct parahotplug_request *req;
1730
1731 req = parahotplug_request_create(inmsg);
1732
1733 if (req == NULL) {
1734 LOGERR("parahotplug_process_message: couldn't allocate request");
1735 return;
1736 }
1737
1738 if (inmsg->cmd.device_change_state.state.active) {
1739 /* For enable messages, just respond with success
1740 * right away. This is a bit of a hack, but there are
1741 * issues with the early enable messages we get (with
1742 * either the udev script not detecting that the device
1743 * is up, or not getting called at all). Fortunately
1744 * the messages that get lost don't matter anyway, as
1745 * devices are automatically enabled at
1746 * initialization.
1747 */
1748 parahotplug_request_kickoff(req);
1749 controlvm_respond_physdev_changestate(&inmsg->hdr,
1750 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1751 device_change_state.state);
1752 parahotplug_request_destroy(req);
1753 } else {
1754 /* For disable messages, add the request to the
1755 * request list before kicking off the udev script. It
1756 * won't get responded to until the script has
1757 * indicated it's done.
1758 */
1759 spin_lock(&Parahotplug_request_list_lock);
1760 list_add_tail(&(req->list), &Parahotplug_request_list);
1761 spin_unlock(&Parahotplug_request_list_lock);
1762
1763 parahotplug_request_kickoff(req);
1764 }
1765 }
1766
1767 /* Process a controlvm message.
1768 * Return result:
1769 * FALSE - this function will return FALSE only in the case where the
1770 * controlvm message was NOT processed, but processing must be
1771 * retried before reading the next controlvm message; a
1772 * scenario where this can occur is when we need to throttle
1773 * the allocation of memory in which to copy out controlvm
1774 * payload data
1775 * TRUE - processing of the controlvm message completed,
1776 * either successfully or with an error.
1777 */
1778 static BOOL
1779 handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1780 {
1781 struct controlvm_message_packet *cmd = &inmsg.cmd;
1782 u64 parametersAddr = 0;
1783 u32 parametersBytes = 0;
1784 PARSER_CONTEXT *parser_ctx = NULL;
1785 BOOL isLocalAddr = FALSE;
1786 struct controlvm_message ackmsg;
1787
1788 /* create parsing context if necessary */
1789 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
1790 if (channel_addr == 0) {
1791 LOGERR("HUH? channel_addr is 0!");
1792 return TRUE;
1793 }
1794 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1795 parametersBytes = inmsg.hdr.payload_bytes;
1796
1797 /* Parameter and channel addresses within test messages actually lie
1798 * within our OS-controlled memory. We need to know that, because it
1799 * makes a difference in how we compute the virtual address.
1800 */
1801 if (parametersAddr != 0 && parametersBytes != 0) {
1802 BOOL retry = FALSE;
1803
1804 parser_ctx =
1805 parser_init_byteStream(parametersAddr, parametersBytes,
1806 isLocalAddr, &retry);
1807 if (!parser_ctx) {
1808 if (retry) {
1809 LOGWRN("throttling to copy payload");
1810 return FALSE;
1811 }
1812 LOGWRN("parsing failed");
1813 LOGWRN("inmsg.hdr.Id=0x%lx", (ulong) inmsg.hdr.id);
1814 LOGWRN("parametersAddr=0x%llx", (u64) parametersAddr);
1815 LOGWRN("parametersBytes=%lu", (ulong) parametersBytes);
1816 LOGWRN("isLocalAddr=%d", isLocalAddr);
1817 }
1818 }
1819
1820 if (!isLocalAddr) {
1821 controlvm_init_response(&ackmsg, &inmsg.hdr,
1822 CONTROLVM_RESP_SUCCESS);
1823 if ((ControlVm_channel)
1824 &&
1825 (!visorchannel_signalinsert
1826 (ControlVm_channel, CONTROLVM_QUEUE_ACK, &ackmsg)))
1827 LOGWRN("failed to send ACK failed");
1828 }
1829 switch (inmsg.hdr.id) {
1830 case CONTROLVM_CHIPSET_INIT:
1831 LOGINF("CHIPSET_INIT(#busses=%lu,#switches=%lu)",
1832 (ulong) inmsg.cmd.init_chipset.bus_count,
1833 (ulong) inmsg.cmd.init_chipset.switch_count);
1834 chipset_init(&inmsg);
1835 break;
1836 case CONTROLVM_BUS_CREATE:
1837 LOGINF("BUS_CREATE(%lu,#devs=%lu)",
1838 (ulong) cmd->create_bus.bus_no,
1839 (ulong) cmd->create_bus.dev_count);
1840 bus_create(&inmsg);
1841 break;
1842 case CONTROLVM_BUS_DESTROY:
1843 LOGINF("BUS_DESTROY(%lu)", (ulong) cmd->destroy_bus.bus_no);
1844 bus_destroy(&inmsg);
1845 break;
1846 case CONTROLVM_BUS_CONFIGURE:
1847 LOGINF("BUS_CONFIGURE(%lu)", (ulong) cmd->configure_bus.bus_no);
1848 bus_configure(&inmsg, parser_ctx);
1849 break;
1850 case CONTROLVM_DEVICE_CREATE:
1851 LOGINF("DEVICE_CREATE(%lu,%lu)",
1852 (ulong) cmd->create_device.bus_no,
1853 (ulong) cmd->create_device.dev_no);
1854 my_device_create(&inmsg);
1855 break;
1856 case CONTROLVM_DEVICE_CHANGESTATE:
1857 if (cmd->device_change_state.flags.phys_device) {
1858 LOGINF("DEVICE_CHANGESTATE for physical device (%lu,%lu, active=%lu)",
1859 (ulong) cmd->device_change_state.bus_no,
1860 (ulong) cmd->device_change_state.dev_no,
1861 (ulong) cmd->device_change_state.state.active);
1862 parahotplug_process_message(&inmsg);
1863 } else {
1864 LOGINF("DEVICE_CHANGESTATE for virtual device (%lu,%lu, state.Alive=0x%lx)",
1865 (ulong) cmd->device_change_state.bus_no,
1866 (ulong) cmd->device_change_state.dev_no,
1867 (ulong) cmd->device_change_state.state.alive);
1868 /* save the hdr and cmd structures for later use */
1869 /* when sending back the response to Command */
1870 my_device_changestate(&inmsg);
1871 g_DiagMsgHdr = inmsg.hdr;
1872 g_DeviceChangeStatePacket = inmsg.cmd;
1873 break;
1874 }
1875 break;
1876 case CONTROLVM_DEVICE_DESTROY:
1877 LOGINF("DEVICE_DESTROY(%lu,%lu)",
1878 (ulong) cmd->destroy_device.bus_no,
1879 (ulong) cmd->destroy_device.dev_no);
1880 my_device_destroy(&inmsg);
1881 break;
1882 case CONTROLVM_DEVICE_CONFIGURE:
1883 LOGINF("DEVICE_CONFIGURE(%lu,%lu)",
1884 (ulong) cmd->configure_device.bus_no,
1885 (ulong) cmd->configure_device.dev_no);
1886 /* no op for now, just send a respond that we passed */
1887 if (inmsg.hdr.flags.response_expected)
1888 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1889 break;
1890 case CONTROLVM_CHIPSET_READY:
1891 LOGINF("CHIPSET_READY");
1892 chipset_ready(&inmsg.hdr);
1893 break;
1894 case CONTROLVM_CHIPSET_SELFTEST:
1895 LOGINF("CHIPSET_SELFTEST");
1896 chipset_selftest(&inmsg.hdr);
1897 break;
1898 case CONTROLVM_CHIPSET_STOP:
1899 LOGINF("CHIPSET_STOP");
1900 chipset_notready(&inmsg.hdr);
1901 break;
1902 default:
1903 LOGERR("unrecognized controlvm cmd=%d", (int) inmsg.hdr.id);
1904 if (inmsg.hdr.flags.response_expected)
1905 controlvm_respond(&inmsg.hdr,
1906 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1907 break;
1908 }
1909
1910 if (parser_ctx != NULL) {
1911 parser_done(parser_ctx);
1912 parser_ctx = NULL;
1913 }
1914 return TRUE;
1915 }
1916
1917 static HOSTADDRESS controlvm_get_channel_address(void)
1918 {
1919 u64 addr = 0;
1920 u32 size = 0;
1921
1922 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size))) {
1923 ERRDRV("%s - vmcall to determine controlvm channel addr failed",
1924 __func__);
1925 return 0;
1926 }
1927 INFODRV("controlvm addr=%Lx", addr);
1928 return addr;
1929 }
1930
1931 static void
1932 controlvm_periodic_work(struct work_struct *work)
1933 {
1934 VISORCHIPSET_CHANNEL_INFO chanInfo;
1935 struct controlvm_message inmsg;
1936 BOOL gotACommand = FALSE;
1937 BOOL handle_command_failed = FALSE;
1938 static u64 Poll_Count;
1939
1940 /* make sure visorbus server is registered for controlvm callbacks */
1941 if (visorchipset_serverregwait && !serverregistered)
1942 goto Away;
1943 /* make sure visorclientbus server is regsitered for controlvm
1944 * callbacks
1945 */
1946 if (visorchipset_clientregwait && !clientregistered)
1947 goto Away;
1948
1949 memset(&chanInfo, 0, sizeof(VISORCHIPSET_CHANNEL_INFO));
1950
1951 Poll_Count++;
1952 if (Poll_Count >= 250)
1953 ; /* keep going */
1954 else
1955 goto Away;
1956
1957 /* Check events to determine if response to CHIPSET_READY
1958 * should be sent
1959 */
1960 if (visorchipset_holdchipsetready
1961 && (g_ChipSetMsgHdr.id != CONTROLVM_INVALID)) {
1962 if (check_chipset_events() == 1) {
1963 LOGINF("Sending CHIPSET_READY response");
1964 controlvm_respond(&g_ChipSetMsgHdr, 0);
1965 clear_chipset_events();
1966 memset(&g_ChipSetMsgHdr, 0,
1967 sizeof(struct controlvm_message_header));
1968 }
1969 }
1970
1971 while (visorchannel_signalremove(ControlVm_channel,
1972 CONTROLVM_QUEUE_RESPONSE,
1973 &inmsg)) {
1974 if (inmsg.hdr.payload_max_bytes != 0) {
1975 LOGERR("Payload of size %lu returned @%lu with unexpected message id %d.",
1976 (ulong) inmsg.hdr.payload_max_bytes,
1977 (ulong) inmsg.hdr.payload_vm_offset,
1978 inmsg.hdr.id);
1979 }
1980 }
1981 if (!gotACommand) {
1982 if (ControlVm_Pending_Msg_Valid) {
1983 /* we throttled processing of a prior
1984 * msg, so try to process it again
1985 * rather than reading a new one
1986 */
1987 inmsg = ControlVm_Pending_Msg;
1988 ControlVm_Pending_Msg_Valid = FALSE;
1989 gotACommand = TRUE;
1990 } else
1991 gotACommand = read_controlvm_event(&inmsg);
1992 }
1993
1994 handle_command_failed = FALSE;
1995 while (gotACommand && (!handle_command_failed)) {
1996 Most_recent_message_jiffies = jiffies;
1997 if (handle_command(inmsg,
1998 visorchannel_get_physaddr
1999 (ControlVm_channel)))
2000 gotACommand = read_controlvm_event(&inmsg);
2001 else {
2002 /* this is a scenario where throttling
2003 * is required, but probably NOT an
2004 * error...; we stash the current
2005 * controlvm msg so we will attempt to
2006 * reprocess it on our next loop
2007 */
2008 handle_command_failed = TRUE;
2009 ControlVm_Pending_Msg = inmsg;
2010 ControlVm_Pending_Msg_Valid = TRUE;
2011 }
2012 }
2013
2014 /* parahotplug_worker */
2015 parahotplug_process_list();
2016
2017 Away:
2018
2019 if (time_after(jiffies,
2020 Most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2021 /* it's been longer than MIN_IDLE_SECONDS since we
2022 * processed our last controlvm message; slow down the
2023 * polling
2024 */
2025 if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW) {
2026 LOGINF("switched to slow controlvm polling");
2027 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2028 }
2029 } else {
2030 if (Poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST) {
2031 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2032 LOGINF("switched to fast controlvm polling");
2033 }
2034 }
2035
2036 queue_delayed_work(Periodic_controlvm_workqueue,
2037 &Periodic_controlvm_work, Poll_jiffies);
2038 }
2039
2040 static void
2041 setup_crash_devices_work_queue(struct work_struct *work)
2042 {
2043
2044 struct controlvm_message localCrashCreateBusMsg;
2045 struct controlvm_message localCrashCreateDevMsg;
2046 struct controlvm_message msg;
2047 u32 localSavedCrashMsgOffset;
2048 u16 localSavedCrashMsgCount;
2049
2050 /* make sure visorbus server is registered for controlvm callbacks */
2051 if (visorchipset_serverregwait && !serverregistered)
2052 goto Away;
2053
2054 /* make sure visorclientbus server is regsitered for controlvm
2055 * callbacks
2056 */
2057 if (visorchipset_clientregwait && !clientregistered)
2058 goto Away;
2059
2060 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2061
2062 /* send init chipset msg */
2063 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2064 msg.cmd.init_chipset.bus_count = 23;
2065 msg.cmd.init_chipset.switch_count = 0;
2066
2067 chipset_init(&msg);
2068
2069 /* get saved message count */
2070 if (visorchannel_read(ControlVm_channel,
2071 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
2072 SavedCrashMsgCount),
2073 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
2074 LOGERR("failed to get Saved Message Count");
2075 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2076 POSTCODE_SEVERITY_ERR);
2077 return;
2078 }
2079
2080 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
2081 LOGERR("Saved Message Count incorrect %d",
2082 localSavedCrashMsgCount);
2083 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2084 localSavedCrashMsgCount,
2085 POSTCODE_SEVERITY_ERR);
2086 return;
2087 }
2088
2089 /* get saved crash message offset */
2090 if (visorchannel_read(ControlVm_channel,
2091 offsetof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL,
2092 SavedCrashMsgOffset),
2093 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
2094 LOGERR("failed to get Saved Message Offset");
2095 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2096 POSTCODE_SEVERITY_ERR);
2097 return;
2098 }
2099
2100 /* read create device message for storage bus offset */
2101 if (visorchannel_read(ControlVm_channel,
2102 localSavedCrashMsgOffset,
2103 &localCrashCreateBusMsg,
2104 sizeof(struct controlvm_message)) < 0) {
2105 LOGERR("CRASH_DEV_RD_BUS_FAIULRE: Failed to read CrashCreateBusMsg!");
2106 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2107 POSTCODE_SEVERITY_ERR);
2108 return;
2109 }
2110
2111 /* read create device message for storage device */
2112 if (visorchannel_read(ControlVm_channel,
2113 localSavedCrashMsgOffset +
2114 sizeof(struct controlvm_message),
2115 &localCrashCreateDevMsg,
2116 sizeof(struct controlvm_message)) < 0) {
2117 LOGERR("CRASH_DEV_RD_DEV_FAIULRE: Failed to read CrashCreateDevMsg!");
2118 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2119 POSTCODE_SEVERITY_ERR);
2120 return;
2121 }
2122
2123 /* reuse IOVM create bus message */
2124 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
2125 bus_create(&localCrashCreateBusMsg);
2126 else {
2127 LOGERR("CrashCreateBusMsg is null, no dump will be taken");
2128 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2129 POSTCODE_SEVERITY_ERR);
2130 return;
2131 }
2132
2133 /* reuse create device message for storage device */
2134 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
2135 my_device_create(&localCrashCreateDevMsg);
2136 else {
2137 LOGERR("CrashCreateDevMsg is null, no dump will be taken");
2138 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2139 POSTCODE_SEVERITY_ERR);
2140 return;
2141 }
2142 LOGINF("Bus and device ready for dumping");
2143 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2144 return;
2145
2146 Away:
2147
2148 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2149
2150 queue_delayed_work(Periodic_controlvm_workqueue,
2151 &Periodic_controlvm_work, Poll_jiffies);
2152 }
2153
2154 static void
2155 bus_create_response(ulong busNo, int response)
2156 {
2157 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2158 }
2159
2160 static void
2161 bus_destroy_response(ulong busNo, int response)
2162 {
2163 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2164 }
2165
2166 static void
2167 device_create_response(ulong busNo, ulong devNo, int response)
2168 {
2169 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2170 }
2171
2172 static void
2173 device_destroy_response(ulong busNo, ulong devNo, int response)
2174 {
2175 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2176 }
2177
2178 void
2179 visorchipset_device_pause_response(ulong busNo, ulong devNo, int response)
2180 {
2181
2182 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2183 busNo, devNo, response,
2184 segment_state_standby);
2185 }
2186 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2187
2188 static void
2189 device_resume_response(ulong busNo, ulong devNo, int response)
2190 {
2191 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2192 busNo, devNo, response,
2193 segment_state_running);
2194 }
2195
2196 BOOL
2197 visorchipset_get_bus_info(ulong busNo, VISORCHIPSET_BUS_INFO *busInfo)
2198 {
2199 void *p = findbus(&BusInfoList, busNo);
2200
2201 if (!p) {
2202 LOGERR("(%lu) failed", busNo);
2203 return FALSE;
2204 }
2205 memcpy(busInfo, p, sizeof(VISORCHIPSET_BUS_INFO));
2206 return TRUE;
2207 }
2208 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2209
2210 BOOL
2211 visorchipset_set_bus_context(ulong busNo, void *context)
2212 {
2213 VISORCHIPSET_BUS_INFO *p = findbus(&BusInfoList, busNo);
2214
2215 if (!p) {
2216 LOGERR("(%lu) failed", busNo);
2217 return FALSE;
2218 }
2219 p->bus_driver_context = context;
2220 return TRUE;
2221 }
2222 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2223
2224 BOOL
2225 visorchipset_get_device_info(ulong busNo, ulong devNo,
2226 VISORCHIPSET_DEVICE_INFO *devInfo)
2227 {
2228 void *p = finddevice(&DevInfoList, busNo, devNo);
2229
2230 if (!p) {
2231 LOGERR("(%lu,%lu) failed", busNo, devNo);
2232 return FALSE;
2233 }
2234 memcpy(devInfo, p, sizeof(VISORCHIPSET_DEVICE_INFO));
2235 return TRUE;
2236 }
2237 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2238
2239 BOOL
2240 visorchipset_set_device_context(ulong busNo, ulong devNo, void *context)
2241 {
2242 VISORCHIPSET_DEVICE_INFO *p = finddevice(&DevInfoList, busNo, devNo);
2243
2244 if (!p) {
2245 LOGERR("(%lu,%lu) failed", busNo, devNo);
2246 return FALSE;
2247 }
2248 p->bus_driver_context = context;
2249 return TRUE;
2250 }
2251 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2252
2253 /* Generic wrapper function for allocating memory from a kmem_cache pool.
2254 */
2255 void *
2256 visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2257 char *fn, int ln)
2258 {
2259 gfp_t gfp;
2260 void *p;
2261
2262 if (ok_to_block)
2263 gfp = GFP_KERNEL;
2264 else
2265 gfp = GFP_ATOMIC;
2266 /* __GFP_NORETRY means "ok to fail", meaning
2267 * kmem_cache_alloc() can return NULL, implying the caller CAN
2268 * cope with failure. If you do NOT specify __GFP_NORETRY,
2269 * Linux will go to extreme measures to get memory for you
2270 * (like, invoke oom killer), which will probably cripple the
2271 * system.
2272 */
2273 gfp |= __GFP_NORETRY;
2274 p = kmem_cache_alloc(pool, gfp);
2275 if (!p) {
2276 LOGERR("kmem_cache_alloc failed early @%s:%d\n", fn, ln);
2277 return NULL;
2278 }
2279 atomic_inc(&Visorchipset_cache_buffers_in_use);
2280 return p;
2281 }
2282
2283 /* Generic wrapper function for freeing memory from a kmem_cache pool.
2284 */
2285 void
2286 visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2287 {
2288 if (!p) {
2289 LOGERR("NULL pointer @%s:%d\n", fn, ln);
2290 return;
2291 }
2292 atomic_dec(&Visorchipset_cache_buffers_in_use);
2293 kmem_cache_free(pool, p);
2294 }
2295
2296 static ssize_t chipsetready_store(struct device *dev,
2297 struct device_attribute *attr, const char *buf, size_t count)
2298 {
2299 char msgtype[64];
2300
2301 if (sscanf(buf, "%63s", msgtype) != 1)
2302 return -EINVAL;
2303
2304 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2305 chipset_events[0] = 1;
2306 return count;
2307 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2308 chipset_events[1] = 1;
2309 return count;
2310 }
2311 return -EINVAL;
2312 }
2313
2314 /* The parahotplug/devicedisabled interface gets called by our support script
2315 * when an SR-IOV device has been shut down. The ID is passed to the script
2316 * and then passed back when the device has been removed.
2317 */
2318 static ssize_t devicedisabled_store(struct device *dev,
2319 struct device_attribute *attr, const char *buf, size_t count)
2320 {
2321 uint id;
2322
2323 if (kstrtouint(buf, 10, &id) != 0)
2324 return -EINVAL;
2325
2326 parahotplug_request_complete(id, 0);
2327 return count;
2328 }
2329
2330 /* The parahotplug/deviceenabled interface gets called by our support script
2331 * when an SR-IOV device has been recovered. The ID is passed to the script
2332 * and then passed back when the device has been brought back up.
2333 */
2334 static ssize_t deviceenabled_store(struct device *dev,
2335 struct device_attribute *attr, const char *buf, size_t count)
2336 {
2337 uint id;
2338
2339 if (kstrtouint(buf, 10, &id) != 0)
2340 return -EINVAL;
2341
2342 parahotplug_request_complete(id, 1);
2343 return count;
2344 }
2345
2346 static int __init
2347 visorchipset_init(void)
2348 {
2349 int rc = 0, x = 0;
2350 char s[64];
2351 HOSTADDRESS addr;
2352
2353 if (!unisys_spar_platform)
2354 return -ENODEV;
2355
2356 LOGINF("chipset driver version %s loaded", VERSION);
2357 /* process module options */
2358 POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2359
2360 LOGINF("option - testvnic=%d", visorchipset_testvnic);
2361 LOGINF("option - testvnicclient=%d", visorchipset_testvnicclient);
2362 LOGINF("option - testmsg=%d", visorchipset_testmsg);
2363 LOGINF("option - testteardown=%d", visorchipset_testteardown);
2364 LOGINF("option - major=%d", visorchipset_major);
2365 LOGINF("option - serverregwait=%d", visorchipset_serverregwait);
2366 LOGINF("option - clientregwait=%d", visorchipset_clientregwait);
2367 LOGINF("option - holdchipsetready=%d", visorchipset_holdchipsetready);
2368
2369 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2370 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2371 memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
2372 memset(&LiveDump_info, 0, sizeof(LiveDump_info));
2373 atomic_set(&LiveDump_info.buffers_in_use, 0);
2374
2375 if (visorchipset_testvnic) {
2376 ERRDRV("testvnic option no longer supported: (status = %d)\n",
2377 x);
2378 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2379 rc = x;
2380 goto Away;
2381 }
2382
2383 addr = controlvm_get_channel_address();
2384 if (addr != 0) {
2385 ControlVm_channel =
2386 visorchannel_create_with_lock
2387 (addr,
2388 sizeof(ULTRA_CONTROLVM_CHANNEL_PROTOCOL),
2389 spar_controlvm_channel_protocol_uuid);
2390 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2391 visorchannel_get_header(ControlVm_channel))) {
2392 LOGINF("Channel %s (ControlVm) discovered",
2393 visorchannel_id(ControlVm_channel, s));
2394 initialize_controlvm_payload();
2395 } else {
2396 LOGERR("controlvm channel is invalid");
2397 visorchannel_destroy(ControlVm_channel);
2398 ControlVm_channel = NULL;
2399 return -ENODEV;
2400 }
2401 } else {
2402 LOGERR("no controlvm channel discovered");
2403 return -ENODEV;
2404 }
2405
2406 MajorDev = MKDEV(visorchipset_major, 0);
2407 rc = visorchipset_file_init(MajorDev, &ControlVm_channel);
2408 if (rc < 0) {
2409 ERRDRV("visorchipset_file_init(MajorDev, &ControlVm_channel): error (status=%d)\n", rc);
2410 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2411 goto Away;
2412 }
2413
2414 memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
2415
2416 memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
2417
2418 memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
2419
2420 Putfile_buffer_list_pool =
2421 kmem_cache_create(Putfile_buffer_list_pool_name,
2422 sizeof(struct putfile_buffer_entry),
2423 0, SLAB_HWCACHE_ALIGN, NULL);
2424 if (!Putfile_buffer_list_pool) {
2425 ERRDRV("failed to alloc Putfile_buffer_list_pool: (status=-1)\n");
2426 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2427 rc = -1;
2428 goto Away;
2429 }
2430 if (visorchipset_disable_controlvm) {
2431 LOGINF("visorchipset_init:controlvm disabled");
2432 } else {
2433 /* if booting in a crash kernel */
2434 if (visorchipset_crash_kernel)
2435 INIT_DELAYED_WORK(&Periodic_controlvm_work,
2436 setup_crash_devices_work_queue);
2437 else
2438 INIT_DELAYED_WORK(&Periodic_controlvm_work,
2439 controlvm_periodic_work);
2440 Periodic_controlvm_workqueue =
2441 create_singlethread_workqueue("visorchipset_controlvm");
2442
2443 if (Periodic_controlvm_workqueue == NULL) {
2444 ERRDRV("cannot create controlvm workqueue: (status=%d)\n",
2445 -ENOMEM);
2446 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2447 DIAG_SEVERITY_ERR);
2448 rc = -ENOMEM;
2449 goto Away;
2450 }
2451 Most_recent_message_jiffies = jiffies;
2452 Poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2453 rc = queue_delayed_work(Periodic_controlvm_workqueue,
2454 &Periodic_controlvm_work, Poll_jiffies);
2455 if (rc < 0) {
2456 ERRDRV("queue_delayed_work(Periodic_controlvm_workqueue, &Periodic_controlvm_work, Poll_jiffies): error (status=%d)\n", rc);
2457 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2458 DIAG_SEVERITY_ERR);
2459 goto Away;
2460 }
2461
2462 }
2463
2464 Visorchipset_platform_device.dev.devt = MajorDev;
2465 if (platform_device_register(&Visorchipset_platform_device) < 0) {
2466 ERRDRV("platform_device_register(visorchipset) failed: (status=-1)\n");
2467 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2468 rc = -1;
2469 goto Away;
2470 }
2471 LOGINF("visorchipset device created");
2472 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2473 rc = 0;
2474 Away:
2475 if (rc) {
2476 LOGERR("visorchipset_init failed");
2477 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2478 POSTCODE_SEVERITY_ERR);
2479 }
2480 return rc;
2481 }
2482
2483 static void
2484 visorchipset_exit(void)
2485 {
2486 char s[99];
2487
2488 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2489
2490 if (visorchipset_disable_controlvm) {
2491 ;
2492 } else {
2493 cancel_delayed_work(&Periodic_controlvm_work);
2494 flush_workqueue(Periodic_controlvm_workqueue);
2495 destroy_workqueue(Periodic_controlvm_workqueue);
2496 Periodic_controlvm_workqueue = NULL;
2497 destroy_controlvm_payload_info(&ControlVm_payload_info);
2498 }
2499 Test_Vnic_channel = NULL;
2500 if (Putfile_buffer_list_pool) {
2501 kmem_cache_destroy(Putfile_buffer_list_pool);
2502 Putfile_buffer_list_pool = NULL;
2503 }
2504
2505 cleanup_controlvm_structures();
2506
2507 memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
2508
2509 memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
2510
2511 memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
2512
2513 LOGINF("Channel %s (ControlVm) disconnected",
2514 visorchannel_id(ControlVm_channel, s));
2515 visorchannel_destroy(ControlVm_channel);
2516
2517 visorchipset_file_cleanup();
2518 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2519 LOGINF("chipset driver unloaded");
2520 }
2521
2522 module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2523 MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2524 int visorchipset_testvnic = 0;
2525
2526 module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2527 MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2528 int visorchipset_testvnicclient = 0;
2529
2530 module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2531 MODULE_PARM_DESC(visorchipset_testmsg,
2532 "1 to manufacture the chipset, bus, and switch messages");
2533 int visorchipset_testmsg = 0;
2534
2535 module_param_named(major, visorchipset_major, int, S_IRUGO);
2536 MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2537 int visorchipset_major = 0;
2538
2539 module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2540 MODULE_PARM_DESC(visorchipset_serverreqwait,
2541 "1 to have the module wait for the visor bus to register");
2542 int visorchipset_serverregwait = 0; /* default is off */
2543 module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2544 MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2545 int visorchipset_clientregwait = 1; /* default is on */
2546 module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2547 MODULE_PARM_DESC(visorchipset_testteardown,
2548 "1 to test teardown of the chipset, bus, and switch");
2549 int visorchipset_testteardown = 0; /* default is off */
2550 module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2551 S_IRUGO);
2552 MODULE_PARM_DESC(visorchipset_disable_controlvm,
2553 "1 to disable polling of controlVm channel");
2554 int visorchipset_disable_controlvm = 0; /* default is off */
2555 module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2556 MODULE_PARM_DESC(visorchipset_crash_kernel,
2557 "1 means we are running in crash kernel");
2558 int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2559 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2560 int, S_IRUGO);
2561 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2562 "1 to hold response to CHIPSET_READY");
2563 int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2564 * response immediately */
2565 module_init(visorchipset_init);
2566 module_exit(visorchipset_exit);
2567
2568 MODULE_AUTHOR("Unisys");
2569 MODULE_LICENSE("GPL");
2570 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2571 VERSION);
2572 MODULE_VERSION(VERSION);
This page took 0.087103 seconds and 6 git commands to generate.