staging: unisys: fix CamelCase global Most_recent_message_jiffies
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18#include "globals.h"
12e364b9
KC
19#include "visorchipset.h"
20#include "procobjecttree.h"
21#include "visorchannel.h"
22#include "periodic_work.h"
12e364b9
KC
23#include "file.h"
24#include "parser.h"
12e364b9 25#include "uisutils.h"
12e364b9
KC
26#include "controlvmcompletionstatus.h"
27#include "guestlinuxdebug.h"
12e364b9
KC
28
29#include <linux/nls.h>
30#include <linux/netdevice.h>
31#include <linux/platform_device.h>
90addb02 32#include <linux/uuid.h>
12e364b9
KC
33
34#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37#define TEST_VNIC_SWITCHNO 1
38#define TEST_VNIC_BUSNO 9
39
40#define MAX_NAME_SIZE 128
41#define MAX_IP_SIZE 50
42#define MAXOUTSTANDINGCHANNELCOMMAND 256
43#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47* we switch to slow polling mode. As soon as we get a controlvm
48* message, we switch back to fast polling mode.
49*/
50#define MIN_IDLE_SECONDS 10
911e213e 51static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
b53e0e93 52static ulong most_recent_message_jiffies; /* when we got our last
bd5b9b32 53 * controlvm message */
12e364b9
KC
54static inline char *
55NONULLSTR(char *s)
56{
57 if (s)
58 return s;
e22a4a0f 59 return "";
12e364b9
KC
60}
61
62static int serverregistered;
63static int clientregistered;
64
65#define MAX_CHIPSET_EVENTS 2
c242233e 66static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9
KC
67
68static struct delayed_work Periodic_controlvm_work;
69static struct workqueue_struct *Periodic_controlvm_workqueue;
bd5b9b32 70static DEFINE_SEMAPHORE(NotifierLock);
12e364b9 71
98d7b594
BR
72static struct controlvm_message_header g_DiagMsgHdr;
73static struct controlvm_message_header g_ChipSetMsgHdr;
74static struct controlvm_message_header g_DelDumpMsgHdr;
90addb02 75static const uuid_le UltraDiagPoolChannelProtocolGuid =
9eee5d1f 76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9
KC
77/* 0xffffff is an invalid Bus/Device number */
78static ulong g_diagpoolBusNo = 0xffffff;
79static ulong g_diagpoolDevNo = 0xffffff;
2ea5117b 80static struct controlvm_message_packet g_DeviceChangeStatePacket;
12e364b9
KC
81
82/* Only VNIC and VHBA channels are sent to visorclientbus (aka
83 * "visorhackbus")
84 */
85#define FOR_VISORHACKBUS(channel_type_guid) \
9eee5d1f
BR
86 (((uuid_le_cmp(channel_type_guid,\
87 spar_vnic_channel_protocol_uuid) == 0)\
88 || (uuid_le_cmp(channel_type_guid,\
89 spar_vhba_channel_protocol_uuid) == 0)))
12e364b9
KC
90#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
91
92#define is_diagpool_channel(channel_type_guid) \
90addb02 93 (uuid_le_cmp(channel_type_guid, UltraDiagPoolChannelProtocolGuid) == 0)
12e364b9 94
12e364b9
KC
95static LIST_HEAD(BusInfoList);
96static LIST_HEAD(DevInfoList);
97
383df64e 98static struct visorchannel *ControlVm_channel;
12e364b9 99
84b11dfd 100struct controlvm_payload_info {
c242233e 101 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 102 u64 offset; /* offset from beginning of controlvm
12e364b9 103 * channel to beginning of payload * pool */
b3c55b13 104 u32 bytes; /* number of bytes in payload pool */
84b11dfd 105};
12e364b9
KC
106
107/* Manages the request payload in the controlvm channel */
84b11dfd 108static struct controlvm_payload_info ControlVm_payload_info;
12e364b9 109
9fd1b95a 110static struct channel_header *Test_Vnic_channel;
12e364b9 111
84b11dfd 112struct livedump_info {
98d7b594
BR
113 struct controlvm_message_header Dumpcapture_header;
114 struct controlvm_message_header Gettextdump_header;
115 struct controlvm_message_header Dumpcomplete_header;
12e364b9
KC
116 BOOL Gettextdump_outstanding;
117 u32 crc32;
118 ulong length;
119 atomic_t buffers_in_use;
120 ulong destination;
84b11dfd 121};
12e364b9
KC
122/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
123 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
124 */
84b11dfd 125static struct livedump_info LiveDump_info;
12e364b9
KC
126
127/* The following globals are used to handle the scenario where we are unable to
128 * offload the payload from a controlvm message due to memory requirements. In
129 * this scenario, we simply stash the controlvm message, then attempt to
130 * process it again the next time controlvm_periodic_work() runs.
131 */
3ab47701 132static struct controlvm_message ControlVm_Pending_Msg;
12e364b9
KC
133static BOOL ControlVm_Pending_Msg_Valid = FALSE;
134
135/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
136 * TRANSMIT_FILE PutFile payloads.
137 */
138static struct kmem_cache *Putfile_buffer_list_pool;
139static const char Putfile_buffer_list_pool_name[] =
140 "controlvm_putfile_buffer_list_pool";
141
142/* This identifies a data buffer that has been received via a controlvm messages
143 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
144 */
145struct putfile_buffer_entry {
146 struct list_head next; /* putfile_buffer_entry list */
317d9614 147 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
148};
149
150/* List of struct putfile_request *, via next_putfile_request member.
151 * Each entry in this list identifies an outstanding TRANSMIT_FILE
152 * conversation.
153 */
154static LIST_HEAD(Putfile_request_list);
155
156/* This describes a buffer and its current state of transfer (e.g., how many
157 * bytes have already been supplied as putfile data, and how many bytes are
158 * remaining) for a putfile_request.
159 */
160struct putfile_active_buffer {
161 /* a payload from a controlvm message, containing a file data buffer */
317d9614 162 struct parser_context *parser_ctx;
12e364b9
KC
163 /* points within data area of parser_ctx to next byte of data */
164 u8 *pnext;
165 /* # bytes left from <pnext> to the end of this data buffer */
166 size_t bytes_remaining;
167};
168
169#define PUTFILE_REQUEST_SIG 0x0906101302281211
170/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
171 * conversation. Structs of this type are dynamically linked into
172 * <Putfile_request_list>.
173 */
174struct putfile_request {
175 u64 sig; /* PUTFILE_REQUEST_SIG */
176
177 /* header from original TransmitFile request */
98d7b594 178 struct controlvm_message_header controlvm_header;
12e364b9
KC
179 u64 file_request_number; /* from original TransmitFile request */
180
181 /* link to next struct putfile_request */
182 struct list_head next_putfile_request;
183
184 /* most-recent sequence number supplied via a controlvm message */
185 u64 data_sequence_number;
186
187 /* head of putfile_buffer_entry list, which describes the data to be
188 * supplied as putfile data;
189 * - this list is added to when controlvm messages come in that supply
190 * file data
191 * - this list is removed from via the hotplug program that is actually
192 * consuming these buffers to write as file data */
193 struct list_head input_buffer_list;
194 spinlock_t req_list_lock; /* lock for input_buffer_list */
195
196 /* waiters for input_buffer_list to go non-empty */
197 wait_queue_head_t input_buffer_wq;
198
199 /* data not yet read within current putfile_buffer_entry */
200 struct putfile_active_buffer active_buf;
201
202 /* <0 = failed, 0 = in-progress, >0 = successful; */
203 /* note that this must be set with req_list_lock, and if you set <0, */
204 /* it is your responsibility to also free up all of the other objects */
205 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
206 /* before releasing the lock */
207 int completion_status;
208};
209
bd5b9b32 210static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
12e364b9
KC
211
212struct parahotplug_request {
213 struct list_head list;
214 int id;
215 unsigned long expiration;
3ab47701 216 struct controlvm_message msg;
12e364b9
KC
217};
218
219static LIST_HEAD(Parahotplug_request_list);
220static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
221static void parahotplug_process_list(void);
222
223/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
224 * CONTROLVM_REPORTEVENT.
225 */
fe90d892
BR
226static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
227static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
12e364b9
KC
228
229static void bus_create_response(ulong busNo, int response);
230static void bus_destroy_response(ulong busNo, int response);
231static void device_create_response(ulong busNo, ulong devNo, int response);
232static void device_destroy_response(ulong busNo, ulong devNo, int response);
233static void device_resume_response(ulong busNo, ulong devNo, int response);
234
929aa8ae 235static struct visorchipset_busdev_responders BusDev_Responders = {
12e364b9
KC
236 .bus_create = bus_create_response,
237 .bus_destroy = bus_destroy_response,
238 .device_create = device_create_response,
239 .device_destroy = device_destroy_response,
927c7927 240 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
241 .device_resume = device_resume_response,
242};
243
244/* info for /dev/visorchipset */
245static dev_t MajorDev = -1; /**< indicates major num for device */
246
19f6634f
BR
247/* prototypes for attributes */
248static ssize_t toolaction_show(struct device *dev,
249 struct device_attribute *attr, char *buf);
250static ssize_t toolaction_store(struct device *dev,
251 struct device_attribute *attr, const char *buf, size_t count);
252static DEVICE_ATTR_RW(toolaction);
253
54b31229
BR
254static ssize_t boottotool_show(struct device *dev,
255 struct device_attribute *attr, char *buf);
256static ssize_t boottotool_store(struct device *dev,
257 struct device_attribute *attr, const char *buf, size_t count);
258static DEVICE_ATTR_RW(boottotool);
259
422af17c
BR
260static ssize_t error_show(struct device *dev, struct device_attribute *attr,
261 char *buf);
262static ssize_t error_store(struct device *dev, struct device_attribute *attr,
263 const char *buf, size_t count);
264static DEVICE_ATTR_RW(error);
265
266static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
267 char *buf);
268static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
269 const char *buf, size_t count);
270static DEVICE_ATTR_RW(textid);
271
272static ssize_t remaining_steps_show(struct device *dev,
273 struct device_attribute *attr, char *buf);
274static ssize_t remaining_steps_store(struct device *dev,
275 struct device_attribute *attr, const char *buf, size_t count);
276static DEVICE_ATTR_RW(remaining_steps);
277
18b87ed1
BR
278static ssize_t chipsetready_store(struct device *dev,
279 struct device_attribute *attr, const char *buf, size_t count);
280static DEVICE_ATTR_WO(chipsetready);
281
e56fa7cd
BR
282static ssize_t devicedisabled_store(struct device *dev,
283 struct device_attribute *attr, const char *buf, size_t count);
284static DEVICE_ATTR_WO(devicedisabled);
285
286static ssize_t deviceenabled_store(struct device *dev,
287 struct device_attribute *attr, const char *buf, size_t count);
288static DEVICE_ATTR_WO(deviceenabled);
289
19f6634f
BR
290static struct attribute *visorchipset_install_attrs[] = {
291 &dev_attr_toolaction.attr,
54b31229 292 &dev_attr_boottotool.attr,
422af17c
BR
293 &dev_attr_error.attr,
294 &dev_attr_textid.attr,
295 &dev_attr_remaining_steps.attr,
19f6634f
BR
296 NULL
297};
298
299static struct attribute_group visorchipset_install_group = {
300 .name = "install",
301 .attrs = visorchipset_install_attrs
302};
303
18b87ed1
BR
304static struct attribute *visorchipset_guest_attrs[] = {
305 &dev_attr_chipsetready.attr,
306 NULL
307};
308
309static struct attribute_group visorchipset_guest_group = {
310 .name = "guest",
311 .attrs = visorchipset_guest_attrs
312};
313
e56fa7cd
BR
314static struct attribute *visorchipset_parahotplug_attrs[] = {
315 &dev_attr_devicedisabled.attr,
316 &dev_attr_deviceenabled.attr,
317 NULL
318};
319
320static struct attribute_group visorchipset_parahotplug_group = {
321 .name = "parahotplug",
322 .attrs = visorchipset_parahotplug_attrs
323};
324
19f6634f
BR
325static const struct attribute_group *visorchipset_dev_groups[] = {
326 &visorchipset_install_group,
18b87ed1 327 &visorchipset_guest_group,
e56fa7cd 328 &visorchipset_parahotplug_group,
19f6634f
BR
329 NULL
330};
331
12e364b9
KC
332/* /sys/devices/platform/visorchipset */
333static struct platform_device Visorchipset_platform_device = {
334 .name = "visorchipset",
335 .id = -1,
19f6634f 336 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
337};
338
339/* Function prototypes */
98d7b594
BR
340static void controlvm_respond(struct controlvm_message_header *msgHdr,
341 int response);
342static void controlvm_respond_chipset_init(
343 struct controlvm_message_header *msgHdr, int response,
344 enum ultra_chipset_feature features);
345static void controlvm_respond_physdev_changestate(
346 struct controlvm_message_header *msgHdr, int response,
347 struct spar_segment_state state);
12e364b9 348
d746cb55
VB
349static ssize_t toolaction_show(struct device *dev,
350 struct device_attribute *attr,
351 char *buf)
19f6634f 352{
66e24b76 353 u8 toolAction;
19f6634f
BR
354
355 visorchannel_read(ControlVm_channel,
d19642f6
BR
356 offsetof(struct spar_controlvm_channel_protocol,
357 tool_action), &toolAction, sizeof(u8));
19f6634f
BR
358 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
359}
360
d746cb55
VB
361static ssize_t toolaction_store(struct device *dev,
362 struct device_attribute *attr,
363 const char *buf, size_t count)
19f6634f 364{
66e24b76
BR
365 u8 toolAction;
366 int ret;
19f6634f 367
66e24b76
BR
368 if (kstrtou8(buf, 10, &toolAction) != 0)
369 return -EINVAL;
370
371 ret = visorchannel_write(ControlVm_channel,
d19642f6 372 offsetof(struct spar_controlvm_channel_protocol, tool_action),
66e24b76
BR
373 &toolAction, sizeof(u8));
374
375 if (ret)
376 return ret;
e22a4a0f 377 return count;
19f6634f
BR
378}
379
d746cb55
VB
380static ssize_t boottotool_show(struct device *dev,
381 struct device_attribute *attr,
382 char *buf)
54b31229 383{
755e2ecc 384 struct efi_spar_indication efiSparIndication;
54b31229
BR
385
386 visorchannel_read(ControlVm_channel,
d19642f6
BR
387 offsetof(struct spar_controlvm_channel_protocol,
388 efi_spar_ind), &efiSparIndication,
755e2ecc 389 sizeof(struct efi_spar_indication));
54b31229 390 return scnprintf(buf, PAGE_SIZE, "%u\n",
2450301a 391 efiSparIndication.boot_to_tool);
54b31229
BR
392}
393
d746cb55
VB
394static ssize_t boottotool_store(struct device *dev,
395 struct device_attribute *attr,
396 const char *buf, size_t count)
54b31229 397{
66e24b76 398 int val, ret;
755e2ecc 399 struct efi_spar_indication efiSparIndication;
54b31229 400
66e24b76
BR
401 if (kstrtoint(buf, 10, &val) != 0)
402 return -EINVAL;
403
2450301a 404 efiSparIndication.boot_to_tool = val;
66e24b76 405 ret = visorchannel_write(ControlVm_channel,
d19642f6
BR
406 offsetof(struct spar_controlvm_channel_protocol,
407 efi_spar_ind),
54b31229 408 &(efiSparIndication),
755e2ecc 409 sizeof(struct efi_spar_indication));
66e24b76
BR
410
411 if (ret)
412 return ret;
e22a4a0f 413 return count;
54b31229 414}
422af17c
BR
415
416static ssize_t error_show(struct device *dev, struct device_attribute *attr,
417 char *buf)
418{
419 u32 error;
420
421 visorchannel_read(ControlVm_channel, offsetof(
d19642f6 422 struct spar_controlvm_channel_protocol, installation_error),
422af17c
BR
423 &error, sizeof(u32));
424 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
425}
426
427static ssize_t error_store(struct device *dev, struct device_attribute *attr,
428 const char *buf, size_t count)
429{
430 u32 error;
66e24b76 431 int ret;
422af17c 432
66e24b76
BR
433 if (kstrtou32(buf, 10, &error) != 0)
434 return -EINVAL;
435
436 ret = visorchannel_write(ControlVm_channel,
d19642f6
BR
437 offsetof(struct spar_controlvm_channel_protocol,
438 installation_error),
66e24b76
BR
439 &error, sizeof(u32));
440 if (ret)
441 return ret;
e22a4a0f 442 return count;
422af17c
BR
443}
444
445static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
446 char *buf)
447{
448 u32 textId;
449
450 visorchannel_read(ControlVm_channel, offsetof(
d19642f6 451 struct spar_controlvm_channel_protocol, installation_text_id),
422af17c
BR
452 &textId, sizeof(u32));
453 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
454}
455
456static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
457 const char *buf, size_t count)
458{
459 u32 textId;
66e24b76 460 int ret;
422af17c 461
66e24b76
BR
462 if (kstrtou32(buf, 10, &textId) != 0)
463 return -EINVAL;
464
465 ret = visorchannel_write(ControlVm_channel,
d19642f6
BR
466 offsetof(struct spar_controlvm_channel_protocol,
467 installation_text_id),
66e24b76
BR
468 &textId, sizeof(u32));
469 if (ret)
470 return ret;
e22a4a0f 471 return count;
422af17c
BR
472}
473
474
475static ssize_t remaining_steps_show(struct device *dev,
476 struct device_attribute *attr, char *buf)
477{
478 u16 remainingSteps;
479
480 visorchannel_read(ControlVm_channel,
d19642f6
BR
481 offsetof(struct spar_controlvm_channel_protocol,
482 installation_remaining_steps),
422af17c
BR
483 &remainingSteps,
484 sizeof(u16));
485 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
486}
487
488static ssize_t remaining_steps_store(struct device *dev,
489 struct device_attribute *attr, const char *buf, size_t count)
490{
491 u16 remainingSteps;
66e24b76 492 int ret;
422af17c 493
66e24b76
BR
494 if (kstrtou16(buf, 10, &remainingSteps) != 0)
495 return -EINVAL;
496
497 ret = visorchannel_write(ControlVm_channel,
d19642f6
BR
498 offsetof(struct spar_controlvm_channel_protocol,
499 installation_remaining_steps),
66e24b76
BR
500 &remainingSteps, sizeof(u16));
501 if (ret)
502 return ret;
e22a4a0f 503 return count;
422af17c
BR
504}
505
12e364b9
KC
506#if 0
507static void
508testUnicode(void)
509{
510 wchar_t unicodeString[] = { 'a', 'b', 'c', 0 };
511 char s[sizeof(unicodeString) * NLS_MAX_CHARSET_SIZE];
512 wchar_t unicode2[99];
513
514 /* NOTE: Either due to a bug, or feature I don't understand, the
515 * kernel utf8_mbstowcs() and utf_wcstombs() do NOT copy the
516 * trailed NUL byte!! REALLY!!!!! Arrrrgggghhhhh
517 */
518
519 LOGINF("sizeof(wchar_t) = %d", sizeof(wchar_t));
520 LOGINF("utf8_wcstombs=%d",
521 chrs = utf8_wcstombs(s, unicodeString, sizeof(s)));
522 if (chrs >= 0)
523 s[chrs] = '\0'; /* GRRRRRRRR */
524 LOGINF("s='%s'", s);
525 LOGINF("utf8_mbstowcs=%d", chrs = utf8_mbstowcs(unicode2, s, 100));
526 if (chrs >= 0)
527 unicode2[chrs] = 0; /* GRRRRRRRR */
528 if (memcmp(unicodeString, unicode2, sizeof(unicodeString)) == 0)
529 LOGINF("strings match... good");
530 else
531 LOGINF("strings did not match!!");
532}
533#endif
534
535static void
536busInfo_clear(void *v)
537{
33192fa1 538 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
12e364b9 539
33192fa1
BR
540 if (p->proc_object) {
541 visor_proc_DestroyObject(p->proc_object);
542 p->proc_object = NULL;
12e364b9
KC
543 }
544 kfree(p->name);
545 p->name = NULL;
546
547 kfree(p->description);
548 p->description = NULL;
549
550 p->state.created = 0;
33192fa1 551 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
552}
553
554static void
555devInfo_clear(void *v)
556{
246e0cd0
BR
557 struct visorchipset_device_info *p =
558 (struct visorchipset_device_info *)(v);
26eb2c0c 559
12e364b9 560 p->state.created = 0;
246e0cd0 561 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
562}
563
c242233e 564static u8
12e364b9
KC
565check_chipset_events(void)
566{
567 int i;
c242233e 568 u8 send_msg = 1;
12e364b9
KC
569 /* Check events to determine if response should be sent */
570 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
571 send_msg &= chipset_events[i];
572 return send_msg;
573}
574
575static void
576clear_chipset_events(void)
577{
578 int i;
579 /* Clear chipset_events */
580 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
581 chipset_events[i] = 0;
582}
583
584void
fe90d892
BR
585visorchipset_register_busdev_server(
586 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 587 struct visorchipset_busdev_responders *responders,
1e7a59c1 588 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 589{
f2170625 590 down(&NotifierLock);
12e364b9
KC
591 if (notifiers == NULL) {
592 memset(&BusDev_Server_Notifiers, 0,
593 sizeof(BusDev_Server_Notifiers));
594 serverregistered = 0; /* clear flag */
595 } else {
596 BusDev_Server_Notifiers = *notifiers;
597 serverregistered = 1; /* set flag */
598 }
599 if (responders)
600 *responders = BusDev_Responders;
1e7a59c1
BR
601 if (driver_info)
602 bus_device_info_init(driver_info, "chipset", "visorchipset",
836bee9e 603 VERSION, NULL);
12e364b9 604
f2170625 605 up(&NotifierLock);
12e364b9
KC
606}
607EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
608
609void
fe90d892
BR
610visorchipset_register_busdev_client(
611 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 612 struct visorchipset_busdev_responders *responders,
43fce019 613 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 614{
f2170625 615 down(&NotifierLock);
12e364b9
KC
616 if (notifiers == NULL) {
617 memset(&BusDev_Client_Notifiers, 0,
618 sizeof(BusDev_Client_Notifiers));
619 clientregistered = 0; /* clear flag */
620 } else {
621 BusDev_Client_Notifiers = *notifiers;
622 clientregistered = 1; /* set flag */
623 }
624 if (responders)
625 *responders = BusDev_Responders;
43fce019
BR
626 if (driver_info)
627 bus_device_info_init(driver_info, "chipset(bolts)",
628 "visorchipset", VERSION, NULL);
f2170625 629 up(&NotifierLock);
12e364b9
KC
630}
631EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
632
633static void
634cleanup_controlvm_structures(void)
635{
33192fa1 636 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 637 struct visorchipset_device_info *di, *tmp_di;
12e364b9 638
e6b1ea77 639 list_for_each_entry_safe(bi, tmp_bi, &BusInfoList, entry) {
12e364b9
KC
640 busInfo_clear(bi);
641 list_del(&bi->entry);
642 kfree(bi);
643 }
644
e6b1ea77 645 list_for_each_entry_safe(di, tmp_di, &DevInfoList, entry) {
12e364b9
KC
646 devInfo_clear(di);
647 list_del(&di->entry);
648 kfree(di);
649 }
650}
651
652static void
3ab47701 653chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
654{
655 static int chipset_inited;
b9b141e8 656 enum ultra_chipset_feature features = 0;
12e364b9
KC
657 int rc = CONTROLVM_RESP_SUCCESS;
658
659 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
660 if (chipset_inited) {
22ad57ba
KC
661 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
662 goto Away;
12e364b9
KC
663 }
664 chipset_inited = 1;
665 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
666
667 /* Set features to indicate we support parahotplug (if Command
668 * also supports it). */
669 features =
2ea5117b 670 inmsg->cmd.init_chipset.
12e364b9
KC
671 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
672
673 /* Set the "reply" bit so Command knows this is a
674 * features-aware driver. */
675 features |= ULTRA_CHIPSET_FEATURE_REPLY;
676
677Away:
678 if (rc < 0)
679 cleanup_controlvm_structures();
98d7b594 680 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
681 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
682}
683
684static void
3ab47701 685controlvm_init_response(struct controlvm_message *msg,
98d7b594 686 struct controlvm_message_header *msgHdr, int response)
12e364b9 687{
3ab47701 688 memset(msg, 0, sizeof(struct controlvm_message));
98d7b594
BR
689 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
690 msg->hdr.payload_bytes = 0;
691 msg->hdr.payload_vm_offset = 0;
692 msg->hdr.payload_max_bytes = 0;
12e364b9 693 if (response < 0) {
98d7b594
BR
694 msg->hdr.flags.failed = 1;
695 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
696 }
697}
698
699static void
98d7b594 700controlvm_respond(struct controlvm_message_header *msgHdr, int response)
12e364b9 701{
3ab47701 702 struct controlvm_message outmsg;
26eb2c0c 703
12e364b9
KC
704 controlvm_init_response(&outmsg, msgHdr, response);
705 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
706 * back the deviceChangeState structure in the packet. */
98d7b594 707 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE
2ea5117b 708 && g_DeviceChangeStatePacket.device_change_state.bus_no ==
12e364b9 709 g_diagpoolBusNo
2ea5117b 710 && g_DeviceChangeStatePacket.device_change_state.dev_no ==
12e364b9
KC
711 g_diagpoolDevNo)
712 outmsg.cmd = g_DeviceChangeStatePacket;
2098dbd1 713 if (outmsg.hdr.flags.test_message == 1)
12e364b9 714 return;
2098dbd1 715
12e364b9
KC
716 if (!visorchannel_signalinsert(ControlVm_channel,
717 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
718 return;
719 }
720}
721
722static void
98d7b594
BR
723controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
724 int response,
b9b141e8 725 enum ultra_chipset_feature features)
12e364b9 726{
3ab47701 727 struct controlvm_message outmsg;
26eb2c0c 728
12e364b9 729 controlvm_init_response(&outmsg, msgHdr, response);
2ea5117b 730 outmsg.cmd.init_chipset.features = features;
12e364b9
KC
731 if (!visorchannel_signalinsert(ControlVm_channel,
732 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
733 return;
734 }
735}
736
98d7b594
BR
737static void controlvm_respond_physdev_changestate(
738 struct controlvm_message_header *msgHdr, int response,
739 struct spar_segment_state state)
12e364b9 740{
3ab47701 741 struct controlvm_message outmsg;
26eb2c0c 742
12e364b9 743 controlvm_init_response(&outmsg, msgHdr, response);
2ea5117b
BR
744 outmsg.cmd.device_change_state.state = state;
745 outmsg.cmd.device_change_state.flags.phys_device = 1;
12e364b9
KC
746 if (!visorchannel_signalinsert(ControlVm_channel,
747 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
748 return;
749 }
750}
751
752void
2c683cde
BR
753visorchipset_save_message(struct controlvm_message *msg,
754 enum crash_obj_type type)
12e364b9 755{
b3c55b13 756 u32 localSavedCrashMsgOffset;
b06bdf7d 757 u16 localSavedCrashMsgCount;
12e364b9
KC
758
759 /* get saved message count */
760 if (visorchannel_read(ControlVm_channel,
d19642f6
BR
761 offsetof(struct spar_controlvm_channel_protocol,
762 saved_crash_message_count),
b06bdf7d 763 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
12e364b9
KC
764 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
765 POSTCODE_SEVERITY_ERR);
766 return;
767 }
768
769 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
12e364b9
KC
770 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
771 localSavedCrashMsgCount,
772 POSTCODE_SEVERITY_ERR);
773 return;
774 }
775
776 /* get saved crash message offset */
777 if (visorchannel_read(ControlVm_channel,
d19642f6
BR
778 offsetof(struct spar_controlvm_channel_protocol,
779 saved_crash_message_offset),
b3c55b13 780 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
12e364b9
KC
781 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
782 POSTCODE_SEVERITY_ERR);
783 return;
784 }
785
2c683cde 786 if (type == CRASH_BUS) {
12e364b9
KC
787 if (visorchannel_write(ControlVm_channel,
788 localSavedCrashMsgOffset,
3ab47701
BR
789 msg,
790 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
791 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
792 POSTCODE_SEVERITY_ERR);
793 return;
794 }
795 } else {
796 if (visorchannel_write(ControlVm_channel,
797 localSavedCrashMsgOffset +
3ab47701
BR
798 sizeof(struct controlvm_message), msg,
799 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
800 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
801 POSTCODE_SEVERITY_ERR);
802 return;
803 }
804 }
805}
806EXPORT_SYMBOL_GPL(visorchipset_save_message);
807
808static void
53bebb13 809bus_responder(enum controlvm_id cmdId, ulong busNo, int response)
12e364b9 810{
33192fa1 811 struct visorchipset_bus_info *p = NULL;
12e364b9
KC
812 BOOL need_clear = FALSE;
813
814 p = findbus(&BusInfoList, busNo);
0aca7844 815 if (!p)
12e364b9 816 return;
0aca7844 817
12e364b9
KC
818 if (response < 0) {
819 if ((cmdId == CONTROLVM_BUS_CREATE) &&
820 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
821 /* undo the row we just created... */
822 delbusdevices(&DevInfoList, busNo);
823 } else {
824 if (cmdId == CONTROLVM_BUS_CREATE)
825 p->state.created = 1;
826 if (cmdId == CONTROLVM_BUS_DESTROY)
827 need_clear = TRUE;
828 }
829
0aca7844 830 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 831 return; /* no controlvm response needed */
0aca7844 832 if (p->pending_msg_hdr.id != (u32) cmdId)
12e364b9 833 return;
33192fa1
BR
834 controlvm_respond(&p->pending_msg_hdr, response);
835 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
836 if (need_clear) {
837 busInfo_clear(p);
838 delbusdevices(&DevInfoList, busNo);
839 }
840}
841
842static void
53bebb13 843device_changestate_responder(enum controlvm_id cmdId,
12e364b9 844 ulong busNo, ulong devNo, int response,
3f833b54 845 struct spar_segment_state responseState)
12e364b9 846{
246e0cd0 847 struct visorchipset_device_info *p = NULL;
3ab47701 848 struct controlvm_message outmsg;
12e364b9 849
12e364b9 850 p = finddevice(&DevInfoList, busNo, devNo);
0aca7844 851 if (!p)
12e364b9 852 return;
0aca7844 853 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 854 return; /* no controlvm response needed */
0aca7844 855 if (p->pending_msg_hdr.id != cmdId)
12e364b9 856 return;
12e364b9 857
246e0cd0 858 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 859
2ea5117b
BR
860 outmsg.cmd.device_change_state.bus_no = busNo;
861 outmsg.cmd.device_change_state.dev_no = devNo;
862 outmsg.cmd.device_change_state.state = responseState;
12e364b9
KC
863
864 if (!visorchannel_signalinsert(ControlVm_channel,
0aca7844 865 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 866 return;
12e364b9 867
246e0cd0 868 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
869}
870
871static void
53bebb13
BR
872device_responder(enum controlvm_id cmdId, ulong busNo, ulong devNo,
873 int response)
12e364b9 874{
246e0cd0 875 struct visorchipset_device_info *p = NULL;
12e364b9
KC
876 BOOL need_clear = FALSE;
877
878 p = finddevice(&DevInfoList, busNo, devNo);
0aca7844 879 if (!p)
12e364b9 880 return;
12e364b9
KC
881 if (response >= 0) {
882 if (cmdId == CONTROLVM_DEVICE_CREATE)
883 p->state.created = 1;
884 if (cmdId == CONTROLVM_DEVICE_DESTROY)
885 need_clear = TRUE;
886 }
887
0aca7844 888 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 889 return; /* no controlvm response needed */
0aca7844
BR
890
891 if (p->pending_msg_hdr.id != (u32) cmdId)
12e364b9 892 return;
0aca7844 893
246e0cd0
BR
894 controlvm_respond(&p->pending_msg_hdr, response);
895 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
896 if (need_clear)
897 devInfo_clear(p);
898}
899
900static void
b3c55b13 901bus_epilog(u32 busNo,
98d7b594 902 u32 cmd, struct controlvm_message_header *msgHdr,
12e364b9
KC
903 int response, BOOL needResponse)
904{
905 BOOL notified = FALSE;
906
33192fa1 907 struct visorchipset_bus_info *pBusInfo = findbus(&BusInfoList, busNo);
12e364b9 908
0aca7844 909 if (!pBusInfo)
12e364b9 910 return;
0aca7844 911
12e364b9 912 if (needResponse) {
33192fa1 913 memcpy(&pBusInfo->pending_msg_hdr, msgHdr,
98d7b594 914 sizeof(struct controlvm_message_header));
12e364b9 915 } else
33192fa1 916 pBusInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 917
f2170625 918 down(&NotifierLock);
12e364b9
KC
919 if (response == CONTROLVM_RESP_SUCCESS) {
920 switch (cmd) {
921 case CONTROLVM_BUS_CREATE:
922 /* We can't tell from the bus_create
923 * information which of our 2 bus flavors the
924 * devices on this bus will ultimately end up.
925 * FORTUNATELY, it turns out it is harmless to
926 * send the bus_create to both of them. We can
927 * narrow things down a little bit, though,
928 * because we know: - BusDev_Server can handle
929 * either server or client devices
930 * - BusDev_Client can handle ONLY client
931 * devices */
932 if (BusDev_Server_Notifiers.bus_create) {
933 (*BusDev_Server_Notifiers.bus_create) (busNo);
934 notified = TRUE;
935 }
936 if ((!pBusInfo->flags.server) /*client */ &&
937 BusDev_Client_Notifiers.bus_create) {
938 (*BusDev_Client_Notifiers.bus_create) (busNo);
939 notified = TRUE;
940 }
941 break;
942 case CONTROLVM_BUS_DESTROY:
943 if (BusDev_Server_Notifiers.bus_destroy) {
944 (*BusDev_Server_Notifiers.bus_destroy) (busNo);
945 notified = TRUE;
946 }
947 if ((!pBusInfo->flags.server) /*client */ &&
948 BusDev_Client_Notifiers.bus_destroy) {
949 (*BusDev_Client_Notifiers.bus_destroy) (busNo);
950 notified = TRUE;
951 }
952 break;
953 }
954 }
955 if (notified)
956 /* The callback function just called above is responsible
929aa8ae 957 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
958 * function, which will call bus_responder()
959 */
960 ;
961 else
962 bus_responder(cmd, busNo, response);
f2170625 963 up(&NotifierLock);
12e364b9
KC
964}
965
966static void
3f833b54 967device_epilog(u32 busNo, u32 devNo, struct spar_segment_state state, u32 cmd,
98d7b594 968 struct controlvm_message_header *msgHdr, int response,
12e364b9
KC
969 BOOL needResponse, BOOL for_visorbus)
970{
fe90d892 971 struct visorchipset_busdev_notifiers *notifiers = NULL;
12e364b9
KC
972 BOOL notified = FALSE;
973
246e0cd0 974 struct visorchipset_device_info *pDevInfo =
12e364b9
KC
975 finddevice(&DevInfoList, busNo, devNo);
976 char *envp[] = {
977 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
978 NULL
979 };
980
0aca7844 981 if (!pDevInfo)
12e364b9 982 return;
0aca7844 983
12e364b9
KC
984 if (for_visorbus)
985 notifiers = &BusDev_Server_Notifiers;
986 else
987 notifiers = &BusDev_Client_Notifiers;
988 if (needResponse) {
246e0cd0 989 memcpy(&pDevInfo->pending_msg_hdr, msgHdr,
98d7b594 990 sizeof(struct controlvm_message_header));
12e364b9 991 } else
246e0cd0 992 pDevInfo->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 993
f2170625 994 down(&NotifierLock);
12e364b9
KC
995 if (response >= 0) {
996 switch (cmd) {
997 case CONTROLVM_DEVICE_CREATE:
998 if (notifiers->device_create) {
999 (*notifiers->device_create) (busNo, devNo);
1000 notified = TRUE;
1001 }
1002 break;
1003 case CONTROLVM_DEVICE_CHANGESTATE:
1004 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1005 if (state.alive == segment_state_running.alive &&
1006 state.operating ==
1007 segment_state_running.operating) {
12e364b9
KC
1008 if (notifiers->device_resume) {
1009 (*notifiers->device_resume) (busNo,
1010 devNo);
1011 notified = TRUE;
1012 }
1013 }
1014 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1015 else if (state.alive == segment_state_standby.alive &&
3f833b54 1016 state.operating ==
bd0d2dcc 1017 segment_state_standby.operating) {
12e364b9
KC
1018 /* technically this is standby case
1019 * where server is lost
1020 */
1021 if (notifiers->device_pause) {
1022 (*notifiers->device_pause) (busNo,
1023 devNo);
1024 notified = TRUE;
1025 }
bd0d2dcc 1026 } else if (state.alive == segment_state_paused.alive &&
3f833b54 1027 state.operating ==
bd0d2dcc 1028 segment_state_paused.operating) {
12e364b9
KC
1029 /* this is lite pause where channel is
1030 * still valid just 'pause' of it
1031 */
1032 if (busNo == g_diagpoolBusNo
1033 && devNo == g_diagpoolDevNo) {
12e364b9
KC
1034 /* this will trigger the
1035 * diag_shutdown.sh script in
1036 * the visorchipset hotplug */
1037 kobject_uevent_env
1038 (&Visorchipset_platform_device.dev.
1039 kobj, KOBJ_ONLINE, envp);
1040 }
1041 }
1042 break;
1043 case CONTROLVM_DEVICE_DESTROY:
1044 if (notifiers->device_destroy) {
1045 (*notifiers->device_destroy) (busNo, devNo);
1046 notified = TRUE;
1047 }
1048 break;
1049 }
1050 }
1051 if (notified)
1052 /* The callback function just called above is responsible
929aa8ae 1053 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1054 * function, which will call device_responder()
1055 */
1056 ;
1057 else
1058 device_responder(cmd, busNo, devNo, response);
f2170625 1059 up(&NotifierLock);
12e364b9
KC
1060}
1061
1062static void
3ab47701 1063bus_create(struct controlvm_message *inmsg)
12e364b9 1064{
2ea5117b
BR
1065 struct controlvm_message_packet *cmd = &inmsg->cmd;
1066 ulong busNo = cmd->create_bus.bus_no;
12e364b9 1067 int rc = CONTROLVM_RESP_SUCCESS;
33192fa1 1068 struct visorchipset_bus_info *pBusInfo = NULL;
12e364b9
KC
1069
1070
1071 pBusInfo = findbus(&BusInfoList, busNo);
1072 if (pBusInfo && (pBusInfo->state.created == 1)) {
12e364b9
KC
1073 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1074 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1075 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1076 goto Away;
12e364b9 1077 }
33192fa1 1078 pBusInfo = kzalloc(sizeof(struct visorchipset_bus_info), GFP_KERNEL);
12e364b9 1079 if (pBusInfo == NULL) {
12e364b9
KC
1080 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, busNo,
1081 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1082 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1083 goto Away;
12e364b9
KC
1084 }
1085
12e364b9 1086 INIT_LIST_HEAD(&pBusInfo->entry);
33192fa1
BR
1087 pBusInfo->bus_no = busNo;
1088 pBusInfo->dev_no = cmd->create_bus.dev_count;
12e364b9
KC
1089
1090 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1091
98d7b594 1092 if (inmsg->hdr.flags.test_message == 1)
33192fa1 1093 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1094 else
33192fa1 1095 pBusInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1096
98d7b594 1097 pBusInfo->flags.server = inmsg->hdr.flags.server;
33192fa1
BR
1098 pBusInfo->chan_info.channel_addr = cmd->create_bus.channel_addr;
1099 pBusInfo->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1100 pBusInfo->chan_info.channel_type_uuid =
9b1caee7 1101 cmd->create_bus.bus_data_type_uuid;
33192fa1 1102 pBusInfo->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9
KC
1103
1104 list_add(&pBusInfo->entry, &BusInfoList);
1105
1106 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1107
1108Away:
1109 bus_epilog(busNo, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1110 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1111}
1112
1113static void
3ab47701 1114bus_destroy(struct controlvm_message *inmsg)
12e364b9 1115{
2ea5117b
BR
1116 struct controlvm_message_packet *cmd = &inmsg->cmd;
1117 ulong busNo = cmd->destroy_bus.bus_no;
33192fa1 1118 struct visorchipset_bus_info *pBusInfo;
12e364b9
KC
1119 int rc = CONTROLVM_RESP_SUCCESS;
1120
1121 pBusInfo = findbus(&BusInfoList, busNo);
1122 if (!pBusInfo) {
22ad57ba
KC
1123 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1124 goto Away;
12e364b9
KC
1125 }
1126 if (pBusInfo->state.created == 0) {
22ad57ba
KC
1127 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1128 goto Away;
12e364b9
KC
1129 }
1130
1131Away:
1132 bus_epilog(busNo, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1133 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1134}
1135
1136static void
317d9614
BR
1137bus_configure(struct controlvm_message *inmsg,
1138 struct parser_context *parser_ctx)
12e364b9 1139{
2ea5117b
BR
1140 struct controlvm_message_packet *cmd = &inmsg->cmd;
1141 ulong busNo = cmd->configure_bus.bus_no;
33192fa1 1142 struct visorchipset_bus_info *pBusInfo = NULL;
12e364b9
KC
1143 int rc = CONTROLVM_RESP_SUCCESS;
1144 char s[99];
1145
2ea5117b 1146 busNo = cmd->configure_bus.bus_no;
12e364b9
KC
1147 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1148
1149 pBusInfo = findbus(&BusInfoList, busNo);
1150 if (!pBusInfo) {
12e364b9
KC
1151 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1152 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1153 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1154 goto Away;
12e364b9
KC
1155 }
1156 if (pBusInfo->state.created == 0) {
12e364b9
KC
1157 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1158 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1159 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1160 goto Away;
12e364b9
KC
1161 }
1162 /* TBD - add this check to other commands also... */
33192fa1 1163 if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
12e364b9
KC
1164 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1165 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1166 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1167 goto Away;
12e364b9
KC
1168 }
1169
33192fa1
BR
1170 pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1171 pBusInfo->partition_uuid = parser_id_get(parser_ctx);
12e364b9
KC
1172 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1173 pBusInfo->name = parser_string_get(parser_ctx);
1174
33192fa1 1175 visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
12e364b9
KC
1176 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1177Away:
1178 bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1179 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1180}
1181
1182static void
3ab47701 1183my_device_create(struct controlvm_message *inmsg)
12e364b9 1184{
2ea5117b 1185 struct controlvm_message_packet *cmd = &inmsg->cmd;
f91b9262
BR
1186 ulong busNo = cmd->create_device.bus_no;
1187 ulong devNo = cmd->create_device.dev_no;
246e0cd0 1188 struct visorchipset_device_info *pDevInfo = NULL;
33192fa1 1189 struct visorchipset_bus_info *pBusInfo = NULL;
12e364b9
KC
1190 int rc = CONTROLVM_RESP_SUCCESS;
1191
1192 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1193 if (pDevInfo && (pDevInfo->state.created == 1)) {
12e364b9
KC
1194 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1195 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1196 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1197 goto Away;
12e364b9
KC
1198 }
1199 pBusInfo = findbus(&BusInfoList, busNo);
1200 if (!pBusInfo) {
12e364b9
KC
1201 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1202 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1203 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1204 goto Away;
12e364b9
KC
1205 }
1206 if (pBusInfo->state.created == 0) {
12e364b9
KC
1207 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1208 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1209 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1210 goto Away;
12e364b9 1211 }
246e0cd0 1212 pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
12e364b9 1213 if (pDevInfo == NULL) {
12e364b9
KC
1214 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1215 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1216 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1217 goto Away;
12e364b9 1218 }
97a84f12 1219
12e364b9 1220 INIT_LIST_HEAD(&pDevInfo->entry);
246e0cd0
BR
1221 pDevInfo->bus_no = busNo;
1222 pDevInfo->dev_no = devNo;
1223 pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
12e364b9
KC
1224 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1225 POSTCODE_SEVERITY_INFO);
1226
98d7b594 1227 if (inmsg->hdr.flags.test_message == 1)
246e0cd0 1228 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1229 else
246e0cd0
BR
1230 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1231 pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1232 pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1233 pDevInfo->chan_info.channel_type_uuid =
9b1caee7 1234 cmd->create_device.data_type_uuid;
246e0cd0 1235 pDevInfo->chan_info.intr = cmd->create_device.intr;
12e364b9
KC
1236 list_add(&pDevInfo->entry, &DevInfoList);
1237 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1238 POSTCODE_SEVERITY_INFO);
1239Away:
1240 /* get the bus and devNo for DiagPool channel */
930a021f
SM
1241 if (pDevInfo &&
1242 is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
12e364b9
KC
1243 g_diagpoolBusNo = busNo;
1244 g_diagpoolDevNo = devNo;
12e364b9 1245 }
bd0d2dcc 1246 device_epilog(busNo, devNo, segment_state_running,
12e364b9 1247 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
98d7b594 1248 inmsg->hdr.flags.response_expected == 1,
246e0cd0 1249 FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
12e364b9
KC
1250}
1251
1252static void
3ab47701 1253my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1254{
2ea5117b
BR
1255 struct controlvm_message_packet *cmd = &inmsg->cmd;
1256 ulong busNo = cmd->device_change_state.bus_no;
1257 ulong devNo = cmd->device_change_state.dev_no;
1258 struct spar_segment_state state = cmd->device_change_state.state;
246e0cd0 1259 struct visorchipset_device_info *pDevInfo = NULL;
12e364b9
KC
1260 int rc = CONTROLVM_RESP_SUCCESS;
1261
1262 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1263 if (!pDevInfo) {
12e364b9
KC
1264 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1265 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1266 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1267 goto Away;
12e364b9
KC
1268 }
1269 if (pDevInfo->state.created == 0) {
12e364b9
KC
1270 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1271 POSTCODE_SEVERITY_ERR);
22ad57ba 1272 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9
KC
1273 }
1274Away:
1275 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1276 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1277 &inmsg->hdr, rc,
98d7b594 1278 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1279 FOR_VISORBUS(
246e0cd0 1280 pDevInfo->chan_info.channel_type_uuid));
12e364b9
KC
1281}
1282
1283static void
3ab47701 1284my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1285{
2ea5117b
BR
1286 struct controlvm_message_packet *cmd = &inmsg->cmd;
1287 ulong busNo = cmd->destroy_device.bus_no;
1288 ulong devNo = cmd->destroy_device.dev_no;
246e0cd0 1289 struct visorchipset_device_info *pDevInfo = NULL;
12e364b9
KC
1290 int rc = CONTROLVM_RESP_SUCCESS;
1291
1292 pDevInfo = finddevice(&DevInfoList, busNo, devNo);
1293 if (!pDevInfo) {
22ad57ba
KC
1294 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1295 goto Away;
12e364b9
KC
1296 }
1297 if (pDevInfo->state.created == 0) {
22ad57ba 1298 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9
KC
1299 }
1300
1301Away:
1302 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
bd0d2dcc 1303 device_epilog(busNo, devNo, segment_state_running,
12e364b9 1304 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
98d7b594 1305 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1306 FOR_VISORBUS(
246e0cd0 1307 pDevInfo->chan_info.channel_type_uuid));
12e364b9
KC
1308}
1309
1310/* When provided with the physical address of the controlvm channel
1311 * (phys_addr), the offset to the payload area we need to manage
1312 * (offset), and the size of this payload area (bytes), fills in the
84b11dfd 1313 * controlvm_payload_info struct. Returns TRUE for success or FALSE
12e364b9
KC
1314 * for failure.
1315 */
1316static int
5fc0229a 1317initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
84b11dfd 1318 struct controlvm_payload_info *info)
12e364b9 1319{
c242233e 1320 u8 __iomem *payload = NULL;
12e364b9
KC
1321 int rc = CONTROLVM_RESP_SUCCESS;
1322
1323 if (info == NULL) {
22ad57ba
KC
1324 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1325 goto Away;
12e364b9 1326 }
84b11dfd 1327 memset(info, 0, sizeof(struct controlvm_payload_info));
12e364b9 1328 if ((offset == 0) || (bytes == 0)) {
22ad57ba
KC
1329 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1330 goto Away;
12e364b9
KC
1331 }
1332 payload = ioremap_cache(phys_addr + offset, bytes);
1333 if (payload == NULL) {
22ad57ba
KC
1334 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1335 goto Away;
12e364b9
KC
1336 }
1337
1338 info->offset = offset;
1339 info->bytes = bytes;
1340 info->ptr = payload;
12e364b9
KC
1341
1342Away:
1343 if (rc < 0) {
1344 if (payload != NULL) {
1345 iounmap(payload);
1346 payload = NULL;
1347 }
1348 }
1349 return rc;
1350}
1351
1352static void
84b11dfd 1353destroy_controlvm_payload_info(struct controlvm_payload_info *info)
12e364b9
KC
1354{
1355 if (info->ptr != NULL) {
1356 iounmap(info->ptr);
1357 info->ptr = NULL;
1358 }
84b11dfd 1359 memset(info, 0, sizeof(struct controlvm_payload_info));
12e364b9
KC
1360}
1361
1362static void
1363initialize_controlvm_payload(void)
1364{
1365 HOSTADDRESS phys_addr = visorchannel_get_physaddr(ControlVm_channel);
5fc0229a 1366 u64 payloadOffset = 0;
b3c55b13 1367 u32 payloadBytes = 0;
26eb2c0c 1368
12e364b9 1369 if (visorchannel_read(ControlVm_channel,
d19642f6
BR
1370 offsetof(struct spar_controlvm_channel_protocol,
1371 request_payload_offset),
12e364b9 1372 &payloadOffset, sizeof(payloadOffset)) < 0) {
12e364b9
KC
1373 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1374 POSTCODE_SEVERITY_ERR);
1375 return;
1376 }
1377 if (visorchannel_read(ControlVm_channel,
d19642f6
BR
1378 offsetof(struct spar_controlvm_channel_protocol,
1379 request_payload_bytes),
12e364b9 1380 &payloadBytes, sizeof(payloadBytes)) < 0) {
12e364b9
KC
1381 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1382 POSTCODE_SEVERITY_ERR);
1383 return;
1384 }
1385 initialize_controlvm_payload_info(phys_addr,
1386 payloadOffset, payloadBytes,
1387 &ControlVm_payload_info);
1388}
1389
1390/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1391 * Returns CONTROLVM_RESP_xxx code.
1392 */
1393int
1394visorchipset_chipset_ready(void)
1395{
1396 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1397 return CONTROLVM_RESP_SUCCESS;
1398}
1399EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1400
1401int
1402visorchipset_chipset_selftest(void)
1403{
1404 char env_selftest[20];
1405 char *envp[] = { env_selftest, NULL };
26eb2c0c 1406
12e364b9
KC
1407 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1408 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1409 envp);
1410 return CONTROLVM_RESP_SUCCESS;
1411}
1412EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1413
1414/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1415 * Returns CONTROLVM_RESP_xxx code.
1416 */
1417int
1418visorchipset_chipset_notready(void)
1419{
1420 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1421 return CONTROLVM_RESP_SUCCESS;
1422}
1423EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1424
1425static void
98d7b594 1426chipset_ready(struct controlvm_message_header *msgHdr)
12e364b9
KC
1427{
1428 int rc = visorchipset_chipset_ready();
26eb2c0c 1429
12e364b9
KC
1430 if (rc != CONTROLVM_RESP_SUCCESS)
1431 rc = -rc;
98d7b594 1432 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
12e364b9 1433 controlvm_respond(msgHdr, rc);
98d7b594 1434 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1435 /* Send CHIPSET_READY response when all modules have been loaded
1436 * and disks mounted for the partition
1437 */
1438 g_ChipSetMsgHdr = *msgHdr;
12e364b9
KC
1439 }
1440}
1441
1442static void
98d7b594 1443chipset_selftest(struct controlvm_message_header *msgHdr)
12e364b9
KC
1444{
1445 int rc = visorchipset_chipset_selftest();
26eb2c0c 1446
12e364b9
KC
1447 if (rc != CONTROLVM_RESP_SUCCESS)
1448 rc = -rc;
98d7b594 1449 if (msgHdr->flags.response_expected)
12e364b9
KC
1450 controlvm_respond(msgHdr, rc);
1451}
1452
1453static void
98d7b594 1454chipset_notready(struct controlvm_message_header *msgHdr)
12e364b9
KC
1455{
1456 int rc = visorchipset_chipset_notready();
26eb2c0c 1457
12e364b9
KC
1458 if (rc != CONTROLVM_RESP_SUCCESS)
1459 rc = -rc;
98d7b594 1460 if (msgHdr->flags.response_expected)
12e364b9
KC
1461 controlvm_respond(msgHdr, rc);
1462}
1463
1464/* This is your "one-stop" shop for grabbing the next message from the
1465 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1466 */
1467static BOOL
3ab47701 1468read_controlvm_event(struct controlvm_message *msg)
12e364b9
KC
1469{
1470 if (visorchannel_signalremove(ControlVm_channel,
1471 CONTROLVM_QUEUE_EVENT, msg)) {
1472 /* got a message */
0aca7844 1473 if (msg->hdr.flags.test_message == 1)
12e364b9 1474 return FALSE;
e22a4a0f 1475 return TRUE;
12e364b9
KC
1476 }
1477 return FALSE;
1478}
1479
1480/*
1481 * The general parahotplug flow works as follows. The visorchipset
1482 * driver receives a DEVICE_CHANGESTATE message from Command
1483 * specifying a physical device to enable or disable. The CONTROLVM
1484 * message handler calls parahotplug_process_message, which then adds
1485 * the message to a global list and kicks off a udev event which
1486 * causes a user level script to enable or disable the specified
1487 * device. The udev script then writes to
1488 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1489 * to get called, at which point the appropriate CONTROLVM message is
1490 * retrieved from the list and responded to.
1491 */
1492
1493#define PARAHOTPLUG_TIMEOUT_MS 2000
1494
1495/*
1496 * Generate unique int to match an outstanding CONTROLVM message with a
1497 * udev script /proc response
1498 */
1499static int
1500parahotplug_next_id(void)
1501{
1502 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1503
12e364b9
KC
1504 return atomic_inc_return(&id);
1505}
1506
1507/*
1508 * Returns the time (in jiffies) when a CONTROLVM message on the list
1509 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1510 */
1511static unsigned long
1512parahotplug_next_expiration(void)
1513{
2cc1a1b3 1514 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1515}
1516
1517/*
1518 * Create a parahotplug_request, which is basically a wrapper for a
1519 * CONTROLVM_MESSAGE that we can stick on a list
1520 */
1521static struct parahotplug_request *
3ab47701 1522parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1523{
ea0dcfcf
QL
1524 struct parahotplug_request *req;
1525
1526 req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
12e364b9
KC
1527 if (req == NULL)
1528 return NULL;
1529
1530 req->id = parahotplug_next_id();
1531 req->expiration = parahotplug_next_expiration();
1532 req->msg = *msg;
1533
1534 return req;
1535}
1536
1537/*
1538 * Free a parahotplug_request.
1539 */
1540static void
1541parahotplug_request_destroy(struct parahotplug_request *req)
1542{
1543 kfree(req);
1544}
1545
1546/*
1547 * Cause uevent to run the user level script to do the disable/enable
1548 * specified in (the CONTROLVM message in) the specified
1549 * parahotplug_request
1550 */
1551static void
1552parahotplug_request_kickoff(struct parahotplug_request *req)
1553{
2ea5117b 1554 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1555 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1556 env_func[40];
1557 char *envp[] = {
1558 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1559 };
1560
1561 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1562 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1563 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1564 cmd->device_change_state.state.active);
12e364b9 1565 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1566 cmd->device_change_state.bus_no);
12e364b9 1567 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1568 cmd->device_change_state.dev_no >> 3);
12e364b9 1569 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1570 cmd->device_change_state.dev_no & 0x7);
12e364b9 1571
12e364b9
KC
1572 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1573 envp);
1574}
1575
1576/*
1577 * Remove any request from the list that's been on there too long and
1578 * respond with an error.
1579 */
1580static void
1581parahotplug_process_list(void)
1582{
1583 struct list_head *pos = NULL;
1584 struct list_head *tmp = NULL;
1585
1586 spin_lock(&Parahotplug_request_list_lock);
1587
1588 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1589 struct parahotplug_request *req =
1590 list_entry(pos, struct parahotplug_request, list);
1591 if (time_after_eq(jiffies, req->expiration)) {
1592 list_del(pos);
98d7b594 1593 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1594 controlvm_respond_physdev_changestate(
1595 &req->msg.hdr,
1596 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
2ea5117b 1597 req->msg.cmd.device_change_state.state);
12e364b9
KC
1598 parahotplug_request_destroy(req);
1599 }
1600 }
1601
1602 spin_unlock(&Parahotplug_request_list_lock);
1603}
1604
1605/*
1606 * Called from the /proc handler, which means the user script has
1607 * finished the enable/disable. Find the matching identifier, and
1608 * respond to the CONTROLVM message with success.
1609 */
1610static int
b06bdf7d 1611parahotplug_request_complete(int id, u16 active)
12e364b9
KC
1612{
1613 struct list_head *pos = NULL;
1614 struct list_head *tmp = NULL;
1615
1616 spin_lock(&Parahotplug_request_list_lock);
1617
1618 /* Look for a request matching "id". */
1619 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1620 struct parahotplug_request *req =
1621 list_entry(pos, struct parahotplug_request, list);
1622 if (req->id == id) {
1623 /* Found a match. Remove it from the list and
1624 * respond.
1625 */
1626 list_del(pos);
1627 spin_unlock(&Parahotplug_request_list_lock);
2ea5117b 1628 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1629 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1630 controlvm_respond_physdev_changestate(
1631 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1632 req->msg.cmd.device_change_state.state);
12e364b9
KC
1633 parahotplug_request_destroy(req);
1634 return 0;
1635 }
1636 }
1637
1638 spin_unlock(&Parahotplug_request_list_lock);
1639 return -1;
1640}
1641
1642/*
1643 * Enables or disables a PCI device by kicking off a udev script
1644 */
bd5b9b32 1645static void
3ab47701 1646parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1647{
1648 struct parahotplug_request *req;
1649
1650 req = parahotplug_request_create(inmsg);
1651
0aca7844 1652 if (req == NULL)
12e364b9 1653 return;
12e364b9 1654
2ea5117b 1655 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1656 /* For enable messages, just respond with success
1657 * right away. This is a bit of a hack, but there are
1658 * issues with the early enable messages we get (with
1659 * either the udev script not detecting that the device
1660 * is up, or not getting called at all). Fortunately
1661 * the messages that get lost don't matter anyway, as
1662 * devices are automatically enabled at
1663 * initialization.
1664 */
1665 parahotplug_request_kickoff(req);
1666 controlvm_respond_physdev_changestate(&inmsg->hdr,
2ea5117b
BR
1667 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1668 device_change_state.state);
12e364b9
KC
1669 parahotplug_request_destroy(req);
1670 } else {
1671 /* For disable messages, add the request to the
1672 * request list before kicking off the udev script. It
1673 * won't get responded to until the script has
1674 * indicated it's done.
1675 */
1676 spin_lock(&Parahotplug_request_list_lock);
1677 list_add_tail(&(req->list), &Parahotplug_request_list);
1678 spin_unlock(&Parahotplug_request_list_lock);
1679
1680 parahotplug_request_kickoff(req);
1681 }
1682}
1683
12e364b9
KC
1684/* Process a controlvm message.
1685 * Return result:
1686 * FALSE - this function will return FALSE only in the case where the
1687 * controlvm message was NOT processed, but processing must be
1688 * retried before reading the next controlvm message; a
1689 * scenario where this can occur is when we need to throttle
1690 * the allocation of memory in which to copy out controlvm
1691 * payload data
1692 * TRUE - processing of the controlvm message completed,
1693 * either successfully or with an error.
1694 */
1695static BOOL
3ab47701 1696handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
12e364b9 1697{
2ea5117b 1698 struct controlvm_message_packet *cmd = &inmsg.cmd;
5fc0229a 1699 u64 parametersAddr = 0;
b3c55b13 1700 u32 parametersBytes = 0;
317d9614 1701 struct parser_context *parser_ctx = NULL;
12e364b9 1702 BOOL isLocalAddr = FALSE;
3ab47701 1703 struct controlvm_message ackmsg;
12e364b9
KC
1704
1705 /* create parsing context if necessary */
98d7b594 1706 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1707 if (channel_addr == 0)
12e364b9 1708 return TRUE;
98d7b594
BR
1709 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1710 parametersBytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1711
1712 /* Parameter and channel addresses within test messages actually lie
1713 * within our OS-controlled memory. We need to know that, because it
1714 * makes a difference in how we compute the virtual address.
1715 */
1716 if (parametersAddr != 0 && parametersBytes != 0) {
1717 BOOL retry = FALSE;
26eb2c0c 1718
12e364b9 1719 parser_ctx =
b2d97e4b 1720 parser_init_byte_stream(parametersAddr, parametersBytes,
12e364b9 1721 isLocalAddr, &retry);
1b08872e
BR
1722 if (!parser_ctx && retry)
1723 return FALSE;
12e364b9
KC
1724 }
1725
1726 if (!isLocalAddr) {
1727 controlvm_init_response(&ackmsg, &inmsg.hdr,
1728 CONTROLVM_RESP_SUCCESS);
1b08872e
BR
1729 if (ControlVm_channel)
1730 visorchannel_signalinsert(ControlVm_channel,
1731 CONTROLVM_QUEUE_ACK,
1732 &ackmsg);
12e364b9 1733 }
98d7b594 1734 switch (inmsg.hdr.id) {
12e364b9 1735 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1736 chipset_init(&inmsg);
1737 break;
1738 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1739 bus_create(&inmsg);
1740 break;
1741 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1742 bus_destroy(&inmsg);
1743 break;
1744 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1745 bus_configure(&inmsg, parser_ctx);
1746 break;
1747 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1748 my_device_create(&inmsg);
1749 break;
1750 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1751 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1752 parahotplug_process_message(&inmsg);
1753 } else {
12e364b9
KC
1754 /* save the hdr and cmd structures for later use */
1755 /* when sending back the response to Command */
1756 my_device_changestate(&inmsg);
1757 g_DiagMsgHdr = inmsg.hdr;
1758 g_DeviceChangeStatePacket = inmsg.cmd;
1759 break;
1760 }
1761 break;
1762 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1763 my_device_destroy(&inmsg);
1764 break;
1765 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1766 /* no op for now, just send a respond that we passed */
98d7b594 1767 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1768 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1769 break;
1770 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1771 chipset_ready(&inmsg.hdr);
1772 break;
1773 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1774 chipset_selftest(&inmsg.hdr);
1775 break;
1776 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1777 chipset_notready(&inmsg.hdr);
1778 break;
1779 default:
98d7b594 1780 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1781 controlvm_respond(&inmsg.hdr,
1782 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1783 break;
1784 }
1785
1786 if (parser_ctx != NULL) {
1787 parser_done(parser_ctx);
1788 parser_ctx = NULL;
1789 }
1790 return TRUE;
1791}
1792
d746cb55 1793static HOSTADDRESS controlvm_get_channel_address(void)
524b0b63 1794{
5fc0229a 1795 u64 addr = 0;
b3c55b13 1796 u32 size = 0;
524b0b63 1797
0aca7844 1798 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1799 return 0;
0aca7844 1800
524b0b63
BR
1801 return addr;
1802}
1803
12e364b9
KC
1804static void
1805controlvm_periodic_work(struct work_struct *work)
1806{
3ab47701 1807 struct controlvm_message inmsg;
12e364b9
KC
1808 BOOL gotACommand = FALSE;
1809 BOOL handle_command_failed = FALSE;
5fc0229a 1810 static u64 Poll_Count;
12e364b9
KC
1811
1812 /* make sure visorbus server is registered for controlvm callbacks */
1813 if (visorchipset_serverregwait && !serverregistered)
097f4c19 1814 goto Away;
12e364b9
KC
1815 /* make sure visorclientbus server is regsitered for controlvm
1816 * callbacks
1817 */
1818 if (visorchipset_clientregwait && !clientregistered)
097f4c19 1819 goto Away;
12e364b9 1820
12e364b9 1821 Poll_Count++;
8a1182eb 1822 if (Poll_Count >= 250)
12e364b9
KC
1823 ; /* keep going */
1824 else
097f4c19 1825 goto Away;
12e364b9
KC
1826
1827 /* Check events to determine if response to CHIPSET_READY
1828 * should be sent
1829 */
1830 if (visorchipset_holdchipsetready
98d7b594 1831 && (g_ChipSetMsgHdr.id != CONTROLVM_INVALID)) {
12e364b9 1832 if (check_chipset_events() == 1) {
12e364b9
KC
1833 controlvm_respond(&g_ChipSetMsgHdr, 0);
1834 clear_chipset_events();
1835 memset(&g_ChipSetMsgHdr, 0,
98d7b594 1836 sizeof(struct controlvm_message_header));
12e364b9
KC
1837 }
1838 }
1839
8a1182eb
BR
1840 while (visorchannel_signalremove(ControlVm_channel,
1841 CONTROLVM_QUEUE_RESPONSE,
1842 &inmsg)) {
12e364b9 1843 }
8a1182eb
BR
1844 if (!gotACommand) {
1845 if (ControlVm_Pending_Msg_Valid) {
1846 /* we throttled processing of a prior
1847 * msg, so try to process it again
1848 * rather than reading a new one
1849 */
1850 inmsg = ControlVm_Pending_Msg;
1851 ControlVm_Pending_Msg_Valid = FALSE;
1852 gotACommand = TRUE;
1853 } else
1854 gotACommand = read_controlvm_event(&inmsg);
1855 }
12e364b9
KC
1856
1857 handle_command_failed = FALSE;
1858 while (gotACommand && (!handle_command_failed)) {
b53e0e93 1859 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1860 if (handle_command(inmsg,
1861 visorchannel_get_physaddr
1862 (ControlVm_channel)))
1863 gotACommand = read_controlvm_event(&inmsg);
1864 else {
1865 /* this is a scenario where throttling
1866 * is required, but probably NOT an
1867 * error...; we stash the current
1868 * controlvm msg so we will attempt to
1869 * reprocess it on our next loop
1870 */
1871 handle_command_failed = TRUE;
1872 ControlVm_Pending_Msg = inmsg;
1873 ControlVm_Pending_Msg_Valid = TRUE;
12e364b9
KC
1874 }
1875 }
1876
1877 /* parahotplug_worker */
1878 parahotplug_process_list();
1879
12e364b9
KC
1880Away:
1881
1882 if (time_after(jiffies,
b53e0e93 1883 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1884 /* it's been longer than MIN_IDLE_SECONDS since we
1885 * processed our last controlvm message; slow down the
1886 * polling
1887 */
911e213e
BR
1888 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1889 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1890 } else {
911e213e
BR
1891 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1892 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1893 }
1894
4b4b535e 1895 queue_delayed_work(Periodic_controlvm_workqueue,
911e213e 1896 &Periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1897}
1898
1899static void
1900setup_crash_devices_work_queue(struct work_struct *work)
1901{
1902
3ab47701
BR
1903 struct controlvm_message localCrashCreateBusMsg;
1904 struct controlvm_message localCrashCreateDevMsg;
1905 struct controlvm_message msg;
b3c55b13 1906 u32 localSavedCrashMsgOffset;
b06bdf7d 1907 u16 localSavedCrashMsgCount;
12e364b9
KC
1908
1909 /* make sure visorbus server is registered for controlvm callbacks */
1910 if (visorchipset_serverregwait && !serverregistered)
097f4c19 1911 goto Away;
12e364b9
KC
1912
1913 /* make sure visorclientbus server is regsitered for controlvm
1914 * callbacks
1915 */
1916 if (visorchipset_clientregwait && !clientregistered)
097f4c19 1917 goto Away;
12e364b9
KC
1918
1919 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1920
1921 /* send init chipset msg */
98d7b594 1922 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1923 msg.cmd.init_chipset.bus_count = 23;
1924 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1925
1926 chipset_init(&msg);
1927
12e364b9
KC
1928 /* get saved message count */
1929 if (visorchannel_read(ControlVm_channel,
d19642f6
BR
1930 offsetof(struct spar_controlvm_channel_protocol,
1931 saved_crash_message_count),
b06bdf7d 1932 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
12e364b9
KC
1933 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1934 POSTCODE_SEVERITY_ERR);
1935 return;
1936 }
1937
1938 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
12e364b9
KC
1939 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1940 localSavedCrashMsgCount,
1941 POSTCODE_SEVERITY_ERR);
1942 return;
1943 }
1944
1945 /* get saved crash message offset */
1946 if (visorchannel_read(ControlVm_channel,
d19642f6
BR
1947 offsetof(struct spar_controlvm_channel_protocol,
1948 saved_crash_message_offset),
b3c55b13 1949 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
12e364b9
KC
1950 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1951 POSTCODE_SEVERITY_ERR);
1952 return;
1953 }
1954
1955 /* read create device message for storage bus offset */
1956 if (visorchannel_read(ControlVm_channel,
1957 localSavedCrashMsgOffset,
1958 &localCrashCreateBusMsg,
3ab47701 1959 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1960 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1961 POSTCODE_SEVERITY_ERR);
1962 return;
1963 }
1964
1965 /* read create device message for storage device */
1966 if (visorchannel_read(ControlVm_channel,
1967 localSavedCrashMsgOffset +
3ab47701 1968 sizeof(struct controlvm_message),
12e364b9 1969 &localCrashCreateDevMsg,
3ab47701 1970 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1971 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1972 POSTCODE_SEVERITY_ERR);
1973 return;
1974 }
1975
1976 /* reuse IOVM create bus message */
2ea5117b 1977 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0)
12e364b9
KC
1978 bus_create(&localCrashCreateBusMsg);
1979 else {
12e364b9
KC
1980 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1981 POSTCODE_SEVERITY_ERR);
1982 return;
1983 }
1984
1985 /* reuse create device message for storage device */
f91b9262 1986 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0)
12e364b9
KC
1987 my_device_create(&localCrashCreateDevMsg);
1988 else {
12e364b9
KC
1989 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1990 POSTCODE_SEVERITY_ERR);
1991 return;
1992 }
12e364b9
KC
1993 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1994 return;
1995
1996Away:
1997
911e213e 1998 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1999
4b4b535e 2000 queue_delayed_work(Periodic_controlvm_workqueue,
911e213e 2001 &Periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2002}
2003
2004static void
2005bus_create_response(ulong busNo, int response)
2006{
2007 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
2008}
2009
2010static void
2011bus_destroy_response(ulong busNo, int response)
2012{
2013 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
2014}
2015
2016static void
2017device_create_response(ulong busNo, ulong devNo, int response)
2018{
2019 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
2020}
2021
2022static void
2023device_destroy_response(ulong busNo, ulong devNo, int response)
2024{
2025 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
2026}
2027
2028void
8420f417 2029visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
12e364b9
KC
2030{
2031
2032 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 2033 bus_no, dev_no, response,
bd0d2dcc 2034 segment_state_standby);
12e364b9 2035}
927c7927 2036EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
2037
2038static void
2039device_resume_response(ulong busNo, ulong devNo, int response)
2040{
2041 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2042 busNo, devNo, response,
bd0d2dcc 2043 segment_state_running);
12e364b9
KC
2044}
2045
2046BOOL
77db7127 2047visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 2048{
77db7127 2049 void *p = findbus(&BusInfoList, bus_no);
26eb2c0c 2050
0aca7844 2051 if (!p)
12e364b9 2052 return FALSE;
77db7127 2053 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
12e364b9
KC
2054 return TRUE;
2055}
2056EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2057
2058BOOL
58dd8f2d 2059visorchipset_set_bus_context(ulong bus_no, void *context)
12e364b9 2060{
58dd8f2d 2061 struct visorchipset_bus_info *p = findbus(&BusInfoList, bus_no);
26eb2c0c 2062
0aca7844 2063 if (!p)
12e364b9 2064 return FALSE;
12e364b9
KC
2065 p->bus_driver_context = context;
2066 return TRUE;
2067}
2068EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2069
2070BOOL
b486df19
BR
2071visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2072 struct visorchipset_device_info *dev_info)
12e364b9 2073{
b486df19 2074 void *p = finddevice(&DevInfoList, bus_no, dev_no);
26eb2c0c 2075
0aca7844 2076 if (!p)
12e364b9 2077 return FALSE;
b486df19 2078 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
12e364b9
KC
2079 return TRUE;
2080}
2081EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2082
2083BOOL
cf0bd0b5 2084visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
12e364b9 2085{
246e0cd0 2086 struct visorchipset_device_info *p =
cf0bd0b5 2087 finddevice(&DevInfoList, bus_no, dev_no);
26eb2c0c 2088
0aca7844 2089 if (!p)
12e364b9 2090 return FALSE;
12e364b9
KC
2091 p->bus_driver_context = context;
2092 return TRUE;
2093}
2094EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2095
2096/* Generic wrapper function for allocating memory from a kmem_cache pool.
2097 */
2098void *
2099visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2100 char *fn, int ln)
2101{
2102 gfp_t gfp;
2103 void *p;
2104
2105 if (ok_to_block)
2106 gfp = GFP_KERNEL;
2107 else
2108 gfp = GFP_ATOMIC;
2109 /* __GFP_NORETRY means "ok to fail", meaning
2110 * kmem_cache_alloc() can return NULL, implying the caller CAN
2111 * cope with failure. If you do NOT specify __GFP_NORETRY,
2112 * Linux will go to extreme measures to get memory for you
2113 * (like, invoke oom killer), which will probably cripple the
2114 * system.
2115 */
2116 gfp |= __GFP_NORETRY;
2117 p = kmem_cache_alloc(pool, gfp);
0aca7844 2118 if (!p)
12e364b9 2119 return NULL;
0aca7844 2120
12e364b9
KC
2121 atomic_inc(&Visorchipset_cache_buffers_in_use);
2122 return p;
2123}
2124
2125/* Generic wrapper function for freeing memory from a kmem_cache pool.
2126 */
2127void
2128visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2129{
0aca7844 2130 if (!p)
12e364b9 2131 return;
0aca7844 2132
12e364b9
KC
2133 atomic_dec(&Visorchipset_cache_buffers_in_use);
2134 kmem_cache_free(pool, p);
2135}
2136
18b87ed1
BR
2137static ssize_t chipsetready_store(struct device *dev,
2138 struct device_attribute *attr, const char *buf, size_t count)
12e364b9 2139{
18b87ed1 2140 char msgtype[64];
12e364b9 2141
66e24b76
BR
2142 if (sscanf(buf, "%63s", msgtype) != 1)
2143 return -EINVAL;
2144
2145 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2146 chipset_events[0] = 1;
2147 return count;
2148 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2149 chipset_events[1] = 1;
2150 return count;
e22a4a0f
BR
2151 }
2152 return -EINVAL;
12e364b9
KC
2153}
2154
e56fa7cd
BR
2155/* The parahotplug/devicedisabled interface gets called by our support script
2156 * when an SR-IOV device has been shut down. The ID is passed to the script
2157 * and then passed back when the device has been removed.
2158 */
2159static ssize_t devicedisabled_store(struct device *dev,
2160 struct device_attribute *attr, const char *buf, size_t count)
2161{
2162 uint id;
2163
2164 if (kstrtouint(buf, 10, &id) != 0)
2165 return -EINVAL;
2166
2167 parahotplug_request_complete(id, 0);
2168 return count;
2169}
2170
2171/* The parahotplug/deviceenabled interface gets called by our support script
2172 * when an SR-IOV device has been recovered. The ID is passed to the script
2173 * and then passed back when the device has been brought back up.
2174 */
2175static ssize_t deviceenabled_store(struct device *dev,
2176 struct device_attribute *attr, const char *buf, size_t count)
2177{
2178 uint id;
2179
2180 if (kstrtouint(buf, 10, &id) != 0)
2181 return -EINVAL;
2182
2183 parahotplug_request_complete(id, 1);
2184 return count;
2185}
2186
12e364b9
KC
2187static int __init
2188visorchipset_init(void)
2189{
2190 int rc = 0, x = 0;
8a1182eb 2191 HOSTADDRESS addr;
12e364b9 2192
fcd0157e
KC
2193 if (!unisys_spar_platform)
2194 return -ENODEV;
2195
12e364b9
KC
2196 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2197 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
2198 memset(&ControlVm_payload_info, 0, sizeof(ControlVm_payload_info));
2199 memset(&LiveDump_info, 0, sizeof(LiveDump_info));
2200 atomic_set(&LiveDump_info.buffers_in_use, 0);
2201
9f8d0e8b 2202 if (visorchipset_testvnic) {
9f8d0e8b
KC
2203 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2204 rc = x;
2205 goto Away;
2206 }
12e364b9 2207
8a1182eb
BR
2208 addr = controlvm_get_channel_address();
2209 if (addr != 0) {
2210 ControlVm_channel =
2211 visorchannel_create_with_lock
2212 (addr,
d19642f6 2213 sizeof(struct spar_controlvm_channel_protocol),
5fbaa4b3 2214 spar_controlvm_channel_protocol_uuid);
93a84565
BR
2215 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2216 visorchannel_get_header(ControlVm_channel))) {
8a1182eb
BR
2217 initialize_controlvm_payload();
2218 } else {
8a1182eb
BR
2219 visorchannel_destroy(ControlVm_channel);
2220 ControlVm_channel = NULL;
2221 return -ENODEV;
2222 }
2223 } else {
8a1182eb
BR
2224 return -ENODEV;
2225 }
2226
12e364b9 2227 MajorDev = MKDEV(visorchipset_major, 0);
9f8d0e8b 2228 rc = visorchipset_file_init(MajorDev, &ControlVm_channel);
4cb005a9 2229 if (rc < 0) {
4cb005a9
KC
2230 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2231 goto Away;
2232 }
9f8d0e8b 2233
98d7b594 2234 memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2235
98d7b594 2236 memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2237
98d7b594 2238 memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2239
12e364b9
KC
2240 Putfile_buffer_list_pool =
2241 kmem_cache_create(Putfile_buffer_list_pool_name,
2242 sizeof(struct putfile_buffer_entry),
2243 0, SLAB_HWCACHE_ALIGN, NULL);
2244 if (!Putfile_buffer_list_pool) {
4cb005a9
KC
2245 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2246 rc = -1;
2247 goto Away;
12e364b9 2248 }
2098dbd1 2249 if (!visorchipset_disable_controlvm) {
12e364b9
KC
2250 /* if booting in a crash kernel */
2251 if (visorchipset_crash_kernel)
2252 INIT_DELAYED_WORK(&Periodic_controlvm_work,
2253 setup_crash_devices_work_queue);
2254 else
2255 INIT_DELAYED_WORK(&Periodic_controlvm_work,
2256 controlvm_periodic_work);
2257 Periodic_controlvm_workqueue =
2258 create_singlethread_workqueue("visorchipset_controlvm");
2259
4cb005a9 2260 if (Periodic_controlvm_workqueue == NULL) {
4cb005a9
KC
2261 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2262 DIAG_SEVERITY_ERR);
2263 rc = -ENOMEM;
2264 goto Away;
2265 }
b53e0e93 2266 most_recent_message_jiffies = jiffies;
911e213e 2267 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
9f8d0e8b 2268 rc = queue_delayed_work(Periodic_controlvm_workqueue,
911e213e 2269 &Periodic_controlvm_work, poll_jiffies);
4cb005a9 2270 if (rc < 0) {
4cb005a9
KC
2271 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2272 DIAG_SEVERITY_ERR);
2273 goto Away;
2274 }
9f8d0e8b 2275
12e364b9
KC
2276 }
2277
2278 Visorchipset_platform_device.dev.devt = MajorDev;
4cb005a9 2279 if (platform_device_register(&Visorchipset_platform_device) < 0) {
4cb005a9
KC
2280 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2281 rc = -1;
2282 goto Away;
2283 }
12e364b9 2284 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
22ad57ba 2285 rc = 0;
12e364b9 2286Away:
12e364b9 2287 if (rc) {
12e364b9
KC
2288 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2289 POSTCODE_SEVERITY_ERR);
2290 }
2291 return rc;
2292}
2293
2294static void
2295visorchipset_exit(void)
2296{
12e364b9
KC
2297 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2298
2299 if (visorchipset_disable_controlvm) {
2300 ;
2301 } else {
2302 cancel_delayed_work(&Periodic_controlvm_work);
2303 flush_workqueue(Periodic_controlvm_workqueue);
2304 destroy_workqueue(Periodic_controlvm_workqueue);
2305 Periodic_controlvm_workqueue = NULL;
2306 destroy_controlvm_payload_info(&ControlVm_payload_info);
2307 }
2308 Test_Vnic_channel = NULL;
2309 if (Putfile_buffer_list_pool) {
2310 kmem_cache_destroy(Putfile_buffer_list_pool);
2311 Putfile_buffer_list_pool = NULL;
2312 }
1783319f 2313
12e364b9
KC
2314 cleanup_controlvm_structures();
2315
98d7b594 2316 memset(&g_DiagMsgHdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2317
98d7b594 2318 memset(&g_ChipSetMsgHdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2319
98d7b594 2320 memset(&g_DelDumpMsgHdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2321
8a1182eb
BR
2322 visorchannel_destroy(ControlVm_channel);
2323
12e364b9
KC
2324 visorchipset_file_cleanup();
2325 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
2326}
2327
2328module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2329MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2330int visorchipset_testvnic = 0;
2331
2332module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2333MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2334int visorchipset_testvnicclient = 0;
2335
2336module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2337MODULE_PARM_DESC(visorchipset_testmsg,
2338 "1 to manufacture the chipset, bus, and switch messages");
2339int visorchipset_testmsg = 0;
2340
2341module_param_named(major, visorchipset_major, int, S_IRUGO);
2342MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2343int visorchipset_major = 0;
2344
2345module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2346MODULE_PARM_DESC(visorchipset_serverreqwait,
2347 "1 to have the module wait for the visor bus to register");
2348int visorchipset_serverregwait = 0; /* default is off */
2349module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2350MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2351int visorchipset_clientregwait = 1; /* default is on */
2352module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2353MODULE_PARM_DESC(visorchipset_testteardown,
2354 "1 to test teardown of the chipset, bus, and switch");
2355int visorchipset_testteardown = 0; /* default is off */
2356module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2357 S_IRUGO);
2358MODULE_PARM_DESC(visorchipset_disable_controlvm,
2359 "1 to disable polling of controlVm channel");
2360int visorchipset_disable_controlvm = 0; /* default is off */
2361module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2362MODULE_PARM_DESC(visorchipset_crash_kernel,
2363 "1 means we are running in crash kernel");
2364int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2365module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2366 int, S_IRUGO);
2367MODULE_PARM_DESC(visorchipset_holdchipsetready,
2368 "1 to hold response to CHIPSET_READY");
2369int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2370 * response immediately */
2371module_init(visorchipset_init);
2372module_exit(visorchipset_exit);
2373
2374MODULE_AUTHOR("Unisys");
2375MODULE_LICENSE("GPL");
2376MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2377 VERSION);
2378MODULE_VERSION(VERSION);
This page took 0.366535 seconds and 5 git commands to generate.