staging: unisys: refactor bus_configure()
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18#include "globals.h"
12e364b9
KC
19#include "visorchipset.h"
20#include "procobjecttree.h"
21#include "visorchannel.h"
22#include "periodic_work.h"
12e364b9
KC
23#include "file.h"
24#include "parser.h"
12e364b9 25#include "uisutils.h"
12e364b9
KC
26#include "controlvmcompletionstatus.h"
27#include "guestlinuxdebug.h"
12e364b9
KC
28
29#include <linux/nls.h>
30#include <linux/netdevice.h>
31#include <linux/platform_device.h>
90addb02 32#include <linux/uuid.h>
12e364b9
KC
33
34#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37#define TEST_VNIC_SWITCHNO 1
38#define TEST_VNIC_BUSNO 9
39
40#define MAX_NAME_SIZE 128
41#define MAX_IP_SIZE 50
42#define MAXOUTSTANDINGCHANNELCOMMAND 256
43#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47* we switch to slow polling mode. As soon as we get a controlvm
48* message, we switch back to fast polling mode.
49*/
50#define MIN_IDLE_SECONDS 10
911e213e 51static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
b53e0e93 52static ulong most_recent_message_jiffies; /* when we got our last
bd5b9b32 53 * controlvm message */
12e364b9
KC
54static inline char *
55NONULLSTR(char *s)
56{
57 if (s)
58 return s;
e22a4a0f 59 return "";
12e364b9
KC
60}
61
62static int serverregistered;
63static int clientregistered;
64
65#define MAX_CHIPSET_EVENTS 2
c242233e 66static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 67
9232d2d6
BR
68static struct delayed_work periodic_controlvm_work;
69static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 70static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 71
da021f02
BR
72static struct controlvm_message_header g_diag_msg_hdr;
73static struct controlvm_message_header g_chipset_msg_hdr;
74static struct controlvm_message_header g_del_dump_msg_hdr;
59827f00 75static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 77/* 0xffffff is an invalid Bus/Device number */
83d48905
BR
78static ulong g_diagpool_bus_no = 0xffffff;
79static ulong g_diagpool_dev_no = 0xffffff;
4f44b72d 80static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9
KC
81
82/* Only VNIC and VHBA channels are sent to visorclientbus (aka
83 * "visorhackbus")
84 */
85#define FOR_VISORHACKBUS(channel_type_guid) \
9eee5d1f 86 (((uuid_le_cmp(channel_type_guid,\
0639ba67
BR
87 spar_vnic_channel_protocol_uuid) == 0) ||\
88 (uuid_le_cmp(channel_type_guid,\
9eee5d1f 89 spar_vhba_channel_protocol_uuid) == 0)))
12e364b9
KC
90#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
91
92#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
93 (uuid_le_cmp(channel_type_guid,\
94 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 95
1390b88c
BR
96static LIST_HEAD(bus_info_list);
97static LIST_HEAD(dev_info_list);
12e364b9 98
c3d9a224 99static struct visorchannel *controlvm_channel;
12e364b9 100
84982fbf
BR
101/* Manages the request payload in the controlvm channel */
102static struct controlvm_payload_info {
c242233e 103 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 104 u64 offset; /* offset from beginning of controlvm
12e364b9 105 * channel to beginning of payload * pool */
b3c55b13 106 u32 bytes; /* number of bytes in payload pool */
84982fbf 107} controlvm_payload_info;
12e364b9 108
ea33b4ee
BR
109/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
110 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
111 */
112static struct livedump_info {
113 struct controlvm_message_header dumpcapture_header;
114 struct controlvm_message_header gettextdump_header;
115 struct controlvm_message_header dumpcomplete_header;
116 BOOL gettextdump_outstanding;
12e364b9
KC
117 u32 crc32;
118 ulong length;
119 atomic_t buffers_in_use;
120 ulong destination;
ea33b4ee 121} livedump_info;
12e364b9
KC
122
123/* The following globals are used to handle the scenario where we are unable to
124 * offload the payload from a controlvm message due to memory requirements. In
125 * this scenario, we simply stash the controlvm message, then attempt to
126 * process it again the next time controlvm_periodic_work() runs.
127 */
3ab47701 128static struct controlvm_message ControlVm_Pending_Msg;
12e364b9
KC
129static BOOL ControlVm_Pending_Msg_Valid = FALSE;
130
131/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
132 * TRANSMIT_FILE PutFile payloads.
133 */
134static struct kmem_cache *Putfile_buffer_list_pool;
135static const char Putfile_buffer_list_pool_name[] =
136 "controlvm_putfile_buffer_list_pool";
137
138/* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140 */
141struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
317d9614 143 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
144};
145
146/* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
148 * conversation.
149 */
150static LIST_HEAD(Putfile_request_list);
151
152/* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
155 */
156struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
317d9614 158 struct parser_context *parser_ctx;
12e364b9
KC
159 /* points within data area of parser_ctx to next byte of data */
160 u8 *pnext;
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
163};
164
165#define PUTFILE_REQUEST_SIG 0x0906101302281211
166/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
169 */
170struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
172
173 /* header from original TransmitFile request */
98d7b594 174 struct controlvm_message_header controlvm_header;
12e364b9
KC
175 u64 file_request_number; /* from original TransmitFile request */
176
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
179
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
182
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
186 * file data
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
191
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
194
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
197
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
204};
205
bd5b9b32 206static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
12e364b9
KC
207
208struct parahotplug_request {
209 struct list_head list;
210 int id;
211 unsigned long expiration;
3ab47701 212 struct controlvm_message msg;
12e364b9
KC
213};
214
215static LIST_HEAD(Parahotplug_request_list);
216static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
217static void parahotplug_process_list(void);
218
219/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
220 * CONTROLVM_REPORTEVENT.
221 */
fe90d892
BR
222static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
223static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
12e364b9
KC
224
225static void bus_create_response(ulong busNo, int response);
226static void bus_destroy_response(ulong busNo, int response);
227static void device_create_response(ulong busNo, ulong devNo, int response);
228static void device_destroy_response(ulong busNo, ulong devNo, int response);
229static void device_resume_response(ulong busNo, ulong devNo, int response);
230
929aa8ae 231static struct visorchipset_busdev_responders BusDev_Responders = {
12e364b9
KC
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
927c7927 236 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
237 .device_resume = device_resume_response,
238};
239
240/* info for /dev/visorchipset */
241static dev_t MajorDev = -1; /**< indicates major num for device */
242
19f6634f
BR
243/* prototypes for attributes */
244static ssize_t toolaction_show(struct device *dev,
245 struct device_attribute *attr, char *buf);
246static ssize_t toolaction_store(struct device *dev,
247 struct device_attribute *attr, const char *buf, size_t count);
248static DEVICE_ATTR_RW(toolaction);
249
54b31229
BR
250static ssize_t boottotool_show(struct device *dev,
251 struct device_attribute *attr, char *buf);
252static ssize_t boottotool_store(struct device *dev,
253 struct device_attribute *attr, const char *buf, size_t count);
254static DEVICE_ATTR_RW(boottotool);
255
422af17c
BR
256static ssize_t error_show(struct device *dev, struct device_attribute *attr,
257 char *buf);
258static ssize_t error_store(struct device *dev, struct device_attribute *attr,
259 const char *buf, size_t count);
260static DEVICE_ATTR_RW(error);
261
262static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
263 char *buf);
264static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
265 const char *buf, size_t count);
266static DEVICE_ATTR_RW(textid);
267
268static ssize_t remaining_steps_show(struct device *dev,
269 struct device_attribute *attr, char *buf);
270static ssize_t remaining_steps_store(struct device *dev,
271 struct device_attribute *attr, const char *buf, size_t count);
272static DEVICE_ATTR_RW(remaining_steps);
273
18b87ed1
BR
274static ssize_t chipsetready_store(struct device *dev,
275 struct device_attribute *attr, const char *buf, size_t count);
276static DEVICE_ATTR_WO(chipsetready);
277
e56fa7cd
BR
278static ssize_t devicedisabled_store(struct device *dev,
279 struct device_attribute *attr, const char *buf, size_t count);
280static DEVICE_ATTR_WO(devicedisabled);
281
282static ssize_t deviceenabled_store(struct device *dev,
283 struct device_attribute *attr, const char *buf, size_t count);
284static DEVICE_ATTR_WO(deviceenabled);
285
19f6634f
BR
286static struct attribute *visorchipset_install_attrs[] = {
287 &dev_attr_toolaction.attr,
54b31229 288 &dev_attr_boottotool.attr,
422af17c
BR
289 &dev_attr_error.attr,
290 &dev_attr_textid.attr,
291 &dev_attr_remaining_steps.attr,
19f6634f
BR
292 NULL
293};
294
295static struct attribute_group visorchipset_install_group = {
296 .name = "install",
297 .attrs = visorchipset_install_attrs
298};
299
18b87ed1
BR
300static struct attribute *visorchipset_guest_attrs[] = {
301 &dev_attr_chipsetready.attr,
302 NULL
303};
304
305static struct attribute_group visorchipset_guest_group = {
306 .name = "guest",
307 .attrs = visorchipset_guest_attrs
308};
309
e56fa7cd
BR
310static struct attribute *visorchipset_parahotplug_attrs[] = {
311 &dev_attr_devicedisabled.attr,
312 &dev_attr_deviceenabled.attr,
313 NULL
314};
315
316static struct attribute_group visorchipset_parahotplug_group = {
317 .name = "parahotplug",
318 .attrs = visorchipset_parahotplug_attrs
319};
320
19f6634f
BR
321static const struct attribute_group *visorchipset_dev_groups[] = {
322 &visorchipset_install_group,
18b87ed1 323 &visorchipset_guest_group,
e56fa7cd 324 &visorchipset_parahotplug_group,
19f6634f
BR
325 NULL
326};
327
12e364b9
KC
328/* /sys/devices/platform/visorchipset */
329static struct platform_device Visorchipset_platform_device = {
330 .name = "visorchipset",
331 .id = -1,
19f6634f 332 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
333};
334
335/* Function prototypes */
98d7b594
BR
336static void controlvm_respond(struct controlvm_message_header *msgHdr,
337 int response);
338static void controlvm_respond_chipset_init(
339 struct controlvm_message_header *msgHdr, int response,
340 enum ultra_chipset_feature features);
341static void controlvm_respond_physdev_changestate(
342 struct controlvm_message_header *msgHdr, int response,
343 struct spar_segment_state state);
12e364b9 344
d746cb55
VB
345static ssize_t toolaction_show(struct device *dev,
346 struct device_attribute *attr,
347 char *buf)
19f6634f 348{
66e24b76 349 u8 toolAction;
19f6634f 350
c3d9a224 351 visorchannel_read(controlvm_channel,
d19642f6
BR
352 offsetof(struct spar_controlvm_channel_protocol,
353 tool_action), &toolAction, sizeof(u8));
19f6634f
BR
354 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
355}
356
d746cb55
VB
357static ssize_t toolaction_store(struct device *dev,
358 struct device_attribute *attr,
359 const char *buf, size_t count)
19f6634f 360{
66e24b76
BR
361 u8 toolAction;
362 int ret;
19f6634f 363
66e24b76
BR
364 if (kstrtou8(buf, 10, &toolAction) != 0)
365 return -EINVAL;
366
c3d9a224 367 ret = visorchannel_write(controlvm_channel,
d19642f6 368 offsetof(struct spar_controlvm_channel_protocol, tool_action),
66e24b76
BR
369 &toolAction, sizeof(u8));
370
371 if (ret)
372 return ret;
e22a4a0f 373 return count;
19f6634f
BR
374}
375
d746cb55
VB
376static ssize_t boottotool_show(struct device *dev,
377 struct device_attribute *attr,
378 char *buf)
54b31229 379{
755e2ecc 380 struct efi_spar_indication efiSparIndication;
54b31229 381
c3d9a224 382 visorchannel_read(controlvm_channel,
d19642f6
BR
383 offsetof(struct spar_controlvm_channel_protocol,
384 efi_spar_ind), &efiSparIndication,
755e2ecc 385 sizeof(struct efi_spar_indication));
54b31229 386 return scnprintf(buf, PAGE_SIZE, "%u\n",
2450301a 387 efiSparIndication.boot_to_tool);
54b31229
BR
388}
389
d746cb55
VB
390static ssize_t boottotool_store(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf, size_t count)
54b31229 393{
66e24b76 394 int val, ret;
755e2ecc 395 struct efi_spar_indication efiSparIndication;
54b31229 396
66e24b76
BR
397 if (kstrtoint(buf, 10, &val) != 0)
398 return -EINVAL;
399
2450301a 400 efiSparIndication.boot_to_tool = val;
c3d9a224 401 ret = visorchannel_write(controlvm_channel,
d19642f6
BR
402 offsetof(struct spar_controlvm_channel_protocol,
403 efi_spar_ind),
54b31229 404 &(efiSparIndication),
755e2ecc 405 sizeof(struct efi_spar_indication));
66e24b76
BR
406
407 if (ret)
408 return ret;
e22a4a0f 409 return count;
54b31229 410}
422af17c
BR
411
412static ssize_t error_show(struct device *dev, struct device_attribute *attr,
413 char *buf)
414{
415 u32 error;
416
c3d9a224 417 visorchannel_read(controlvm_channel, offsetof(
d19642f6 418 struct spar_controlvm_channel_protocol, installation_error),
422af17c
BR
419 &error, sizeof(u32));
420 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
421}
422
423static ssize_t error_store(struct device *dev, struct device_attribute *attr,
424 const char *buf, size_t count)
425{
426 u32 error;
66e24b76 427 int ret;
422af17c 428
66e24b76
BR
429 if (kstrtou32(buf, 10, &error) != 0)
430 return -EINVAL;
431
c3d9a224 432 ret = visorchannel_write(controlvm_channel,
d19642f6
BR
433 offsetof(struct spar_controlvm_channel_protocol,
434 installation_error),
66e24b76
BR
435 &error, sizeof(u32));
436 if (ret)
437 return ret;
e22a4a0f 438 return count;
422af17c
BR
439}
440
441static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
442 char *buf)
443{
444 u32 textId;
445
c3d9a224 446 visorchannel_read(controlvm_channel, offsetof(
d19642f6 447 struct spar_controlvm_channel_protocol, installation_text_id),
422af17c
BR
448 &textId, sizeof(u32));
449 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
450}
451
452static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
453 const char *buf, size_t count)
454{
455 u32 textId;
66e24b76 456 int ret;
422af17c 457
66e24b76
BR
458 if (kstrtou32(buf, 10, &textId) != 0)
459 return -EINVAL;
460
c3d9a224 461 ret = visorchannel_write(controlvm_channel,
d19642f6
BR
462 offsetof(struct spar_controlvm_channel_protocol,
463 installation_text_id),
66e24b76
BR
464 &textId, sizeof(u32));
465 if (ret)
466 return ret;
e22a4a0f 467 return count;
422af17c
BR
468}
469
422af17c
BR
470static ssize_t remaining_steps_show(struct device *dev,
471 struct device_attribute *attr, char *buf)
472{
473 u16 remainingSteps;
474
c3d9a224 475 visorchannel_read(controlvm_channel,
d19642f6
BR
476 offsetof(struct spar_controlvm_channel_protocol,
477 installation_remaining_steps),
422af17c
BR
478 &remainingSteps,
479 sizeof(u16));
480 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
481}
482
483static ssize_t remaining_steps_store(struct device *dev,
484 struct device_attribute *attr, const char *buf, size_t count)
485{
486 u16 remainingSteps;
66e24b76 487 int ret;
422af17c 488
66e24b76
BR
489 if (kstrtou16(buf, 10, &remainingSteps) != 0)
490 return -EINVAL;
491
c3d9a224 492 ret = visorchannel_write(controlvm_channel,
d19642f6
BR
493 offsetof(struct spar_controlvm_channel_protocol,
494 installation_remaining_steps),
66e24b76
BR
495 &remainingSteps, sizeof(u16));
496 if (ret)
497 return ret;
e22a4a0f 498 return count;
422af17c
BR
499}
500
12e364b9 501static void
9b989a98 502bus_info_clear(void *v)
12e364b9 503{
33192fa1 504 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
12e364b9 505
12e364b9
KC
506 kfree(p->name);
507 p->name = NULL;
508
509 kfree(p->description);
510 p->description = NULL;
511
512 p->state.created = 0;
33192fa1 513 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
514}
515
516static void
9b989a98 517dev_info_clear(void *v)
12e364b9 518{
246e0cd0
BR
519 struct visorchipset_device_info *p =
520 (struct visorchipset_device_info *)(v);
26eb2c0c 521
12e364b9 522 p->state.created = 0;
246e0cd0 523 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
524}
525
c242233e 526static u8
12e364b9
KC
527check_chipset_events(void)
528{
529 int i;
c242233e 530 u8 send_msg = 1;
12e364b9
KC
531 /* Check events to determine if response should be sent */
532 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
533 send_msg &= chipset_events[i];
534 return send_msg;
535}
536
537static void
538clear_chipset_events(void)
539{
540 int i;
541 /* Clear chipset_events */
542 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
543 chipset_events[i] = 0;
544}
545
546void
fe90d892
BR
547visorchipset_register_busdev_server(
548 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 549 struct visorchipset_busdev_responders *responders,
1e7a59c1 550 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 551{
8f1947ac 552 down(&notifier_lock);
38f736e9 553 if (!notifiers) {
12e364b9
KC
554 memset(&BusDev_Server_Notifiers, 0,
555 sizeof(BusDev_Server_Notifiers));
556 serverregistered = 0; /* clear flag */
557 } else {
558 BusDev_Server_Notifiers = *notifiers;
559 serverregistered = 1; /* set flag */
560 }
561 if (responders)
562 *responders = BusDev_Responders;
1e7a59c1
BR
563 if (driver_info)
564 bus_device_info_init(driver_info, "chipset", "visorchipset",
836bee9e 565 VERSION, NULL);
12e364b9 566
8f1947ac 567 up(&notifier_lock);
12e364b9
KC
568}
569EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
570
571void
fe90d892
BR
572visorchipset_register_busdev_client(
573 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 574 struct visorchipset_busdev_responders *responders,
43fce019 575 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 576{
8f1947ac 577 down(&notifier_lock);
38f736e9 578 if (!notifiers) {
12e364b9
KC
579 memset(&BusDev_Client_Notifiers, 0,
580 sizeof(BusDev_Client_Notifiers));
581 clientregistered = 0; /* clear flag */
582 } else {
583 BusDev_Client_Notifiers = *notifiers;
584 clientregistered = 1; /* set flag */
585 }
586 if (responders)
587 *responders = BusDev_Responders;
43fce019
BR
588 if (driver_info)
589 bus_device_info_init(driver_info, "chipset(bolts)",
590 "visorchipset", VERSION, NULL);
8f1947ac 591 up(&notifier_lock);
12e364b9
KC
592}
593EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
594
595static void
596cleanup_controlvm_structures(void)
597{
33192fa1 598 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 599 struct visorchipset_device_info *di, *tmp_di;
12e364b9 600
1390b88c 601 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
9b989a98 602 bus_info_clear(bi);
12e364b9
KC
603 list_del(&bi->entry);
604 kfree(bi);
605 }
606
1390b88c 607 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 608 dev_info_clear(di);
12e364b9
KC
609 list_del(&di->entry);
610 kfree(di);
611 }
612}
613
614static void
3ab47701 615chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
616{
617 static int chipset_inited;
b9b141e8 618 enum ultra_chipset_feature features = 0;
12e364b9
KC
619 int rc = CONTROLVM_RESP_SUCCESS;
620
621 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
622 if (chipset_inited) {
22ad57ba 623 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 624 goto cleanup;
12e364b9
KC
625 }
626 chipset_inited = 1;
627 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
628
629 /* Set features to indicate we support parahotplug (if Command
630 * also supports it). */
631 features =
2ea5117b 632 inmsg->cmd.init_chipset.
12e364b9
KC
633 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
634
635 /* Set the "reply" bit so Command knows this is a
636 * features-aware driver. */
637 features |= ULTRA_CHIPSET_FEATURE_REPLY;
638
e3199b2e 639cleanup:
12e364b9
KC
640 if (rc < 0)
641 cleanup_controlvm_structures();
98d7b594 642 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
643 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
644}
645
646static void
3ab47701 647controlvm_init_response(struct controlvm_message *msg,
98d7b594 648 struct controlvm_message_header *msgHdr, int response)
12e364b9 649{
3ab47701 650 memset(msg, 0, sizeof(struct controlvm_message));
98d7b594
BR
651 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
652 msg->hdr.payload_bytes = 0;
653 msg->hdr.payload_vm_offset = 0;
654 msg->hdr.payload_max_bytes = 0;
12e364b9 655 if (response < 0) {
98d7b594
BR
656 msg->hdr.flags.failed = 1;
657 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
658 }
659}
660
661static void
98d7b594 662controlvm_respond(struct controlvm_message_header *msgHdr, int response)
12e364b9 663{
3ab47701 664 struct controlvm_message outmsg;
26eb2c0c 665
12e364b9
KC
666 controlvm_init_response(&outmsg, msgHdr, response);
667 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
668 * back the deviceChangeState structure in the packet. */
0639ba67
BR
669 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
670 g_devicechangestate_packet.device_change_state.bus_no ==
671 g_diagpool_bus_no &&
672 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 673 g_diagpool_dev_no)
4f44b72d 674 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 675 if (outmsg.hdr.flags.test_message == 1)
12e364b9 676 return;
2098dbd1 677
c3d9a224 678 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 679 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
680 return;
681 }
682}
683
684static void
98d7b594
BR
685controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
686 int response,
b9b141e8 687 enum ultra_chipset_feature features)
12e364b9 688{
3ab47701 689 struct controlvm_message outmsg;
26eb2c0c 690
12e364b9 691 controlvm_init_response(&outmsg, msgHdr, response);
2ea5117b 692 outmsg.cmd.init_chipset.features = features;
c3d9a224 693 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 694 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
695 return;
696 }
697}
698
98d7b594
BR
699static void controlvm_respond_physdev_changestate(
700 struct controlvm_message_header *msgHdr, int response,
701 struct spar_segment_state state)
12e364b9 702{
3ab47701 703 struct controlvm_message outmsg;
26eb2c0c 704
12e364b9 705 controlvm_init_response(&outmsg, msgHdr, response);
2ea5117b
BR
706 outmsg.cmd.device_change_state.state = state;
707 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 708 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 709 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
710 return;
711 }
712}
713
714void
2c683cde
BR
715visorchipset_save_message(struct controlvm_message *msg,
716 enum crash_obj_type type)
12e364b9 717{
4577225d
BR
718 u32 crash_msg_offset;
719 u16 crash_msg_count;
12e364b9
KC
720
721 /* get saved message count */
c3d9a224 722 if (visorchannel_read(controlvm_channel,
d19642f6
BR
723 offsetof(struct spar_controlvm_channel_protocol,
724 saved_crash_message_count),
4577225d 725 &crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
726 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
727 POSTCODE_SEVERITY_ERR);
728 return;
729 }
730
4577225d 731 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 732 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
4577225d 733 crash_msg_count,
12e364b9
KC
734 POSTCODE_SEVERITY_ERR);
735 return;
736 }
737
738 /* get saved crash message offset */
c3d9a224 739 if (visorchannel_read(controlvm_channel,
d19642f6
BR
740 offsetof(struct spar_controlvm_channel_protocol,
741 saved_crash_message_offset),
4577225d 742 &crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
743 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
744 POSTCODE_SEVERITY_ERR);
745 return;
746 }
747
2c683cde 748 if (type == CRASH_BUS) {
c3d9a224 749 if (visorchannel_write(controlvm_channel,
4577225d 750 crash_msg_offset,
3ab47701
BR
751 msg,
752 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
753 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
754 POSTCODE_SEVERITY_ERR);
755 return;
756 }
757 } else {
c3d9a224 758 if (visorchannel_write(controlvm_channel,
4577225d 759 crash_msg_offset +
3ab47701
BR
760 sizeof(struct controlvm_message), msg,
761 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
762 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
763 POSTCODE_SEVERITY_ERR);
764 return;
765 }
766 }
767}
768EXPORT_SYMBOL_GPL(visorchipset_save_message);
769
770static void
fbb31f48 771bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response)
12e364b9 772{
33192fa1 773 struct visorchipset_bus_info *p = NULL;
12e364b9
KC
774 BOOL need_clear = FALSE;
775
fbb31f48 776 p = findbus(&bus_info_list, bus_no);
0aca7844 777 if (!p)
12e364b9 778 return;
0aca7844 779
12e364b9 780 if (response < 0) {
fbb31f48 781 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
12e364b9
KC
782 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
783 /* undo the row we just created... */
fbb31f48 784 delbusdevices(&dev_info_list, bus_no);
12e364b9 785 } else {
fbb31f48 786 if (cmd_id == CONTROLVM_BUS_CREATE)
12e364b9 787 p->state.created = 1;
fbb31f48 788 if (cmd_id == CONTROLVM_BUS_DESTROY)
12e364b9
KC
789 need_clear = TRUE;
790 }
791
0aca7844 792 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 793 return; /* no controlvm response needed */
6b59b31d 794 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 795 return;
33192fa1
BR
796 controlvm_respond(&p->pending_msg_hdr, response);
797 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 798 if (need_clear) {
9b989a98 799 bus_info_clear(p);
fbb31f48 800 delbusdevices(&dev_info_list, bus_no);
12e364b9
KC
801 }
802}
803
804static void
fbb31f48
BR
805device_changestate_responder(enum controlvm_id cmd_id,
806 ulong bus_no, ulong dev_no, int response,
807 struct spar_segment_state response_state)
12e364b9 808{
246e0cd0 809 struct visorchipset_device_info *p = NULL;
3ab47701 810 struct controlvm_message outmsg;
12e364b9 811
fbb31f48 812 p = finddevice(&dev_info_list, bus_no, dev_no);
0aca7844 813 if (!p)
12e364b9 814 return;
0aca7844 815 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 816 return; /* no controlvm response needed */
fbb31f48 817 if (p->pending_msg_hdr.id != cmd_id)
12e364b9 818 return;
12e364b9 819
246e0cd0 820 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 821
fbb31f48
BR
822 outmsg.cmd.device_change_state.bus_no = bus_no;
823 outmsg.cmd.device_change_state.dev_no = dev_no;
824 outmsg.cmd.device_change_state.state = response_state;
12e364b9 825
c3d9a224 826 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 827 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 828 return;
12e364b9 829
246e0cd0 830 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
831}
832
833static void
fbb31f48 834device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no,
53bebb13 835 int response)
12e364b9 836{
246e0cd0 837 struct visorchipset_device_info *p = NULL;
12e364b9
KC
838 BOOL need_clear = FALSE;
839
fbb31f48 840 p = finddevice(&dev_info_list, bus_no, dev_no);
0aca7844 841 if (!p)
12e364b9 842 return;
12e364b9 843 if (response >= 0) {
fbb31f48 844 if (cmd_id == CONTROLVM_DEVICE_CREATE)
12e364b9 845 p->state.created = 1;
fbb31f48 846 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
12e364b9
KC
847 need_clear = TRUE;
848 }
849
0aca7844 850 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 851 return; /* no controlvm response needed */
0aca7844 852
6b59b31d 853 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 854 return;
0aca7844 855
246e0cd0
BR
856 controlvm_respond(&p->pending_msg_hdr, response);
857 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 858 if (need_clear)
9b989a98 859 dev_info_clear(p);
12e364b9
KC
860}
861
862static void
2836c6a8
BR
863bus_epilog(u32 bus_no,
864 u32 cmd, struct controlvm_message_header *msg_hdr,
865 int response, BOOL need_response)
12e364b9
KC
866{
867 BOOL notified = FALSE;
868
2836c6a8
BR
869 struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
870 bus_no);
12e364b9 871
2836c6a8 872 if (!bus_info)
12e364b9 873 return;
0aca7844 874
2836c6a8
BR
875 if (need_response) {
876 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
98d7b594 877 sizeof(struct controlvm_message_header));
75c1f8b7 878 } else {
2836c6a8 879 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 880 }
12e364b9 881
8f1947ac 882 down(&notifier_lock);
12e364b9
KC
883 if (response == CONTROLVM_RESP_SUCCESS) {
884 switch (cmd) {
885 case CONTROLVM_BUS_CREATE:
886 /* We can't tell from the bus_create
887 * information which of our 2 bus flavors the
888 * devices on this bus will ultimately end up.
889 * FORTUNATELY, it turns out it is harmless to
890 * send the bus_create to both of them. We can
891 * narrow things down a little bit, though,
892 * because we know: - BusDev_Server can handle
893 * either server or client devices
894 * - BusDev_Client can handle ONLY client
895 * devices */
896 if (BusDev_Server_Notifiers.bus_create) {
2836c6a8 897 (*BusDev_Server_Notifiers.bus_create) (bus_no);
12e364b9
KC
898 notified = TRUE;
899 }
2836c6a8 900 if ((!bus_info->flags.server) /*client */ &&
12e364b9 901 BusDev_Client_Notifiers.bus_create) {
2836c6a8 902 (*BusDev_Client_Notifiers.bus_create) (bus_no);
12e364b9
KC
903 notified = TRUE;
904 }
905 break;
906 case CONTROLVM_BUS_DESTROY:
907 if (BusDev_Server_Notifiers.bus_destroy) {
2836c6a8 908 (*BusDev_Server_Notifiers.bus_destroy) (bus_no);
12e364b9
KC
909 notified = TRUE;
910 }
2836c6a8 911 if ((!bus_info->flags.server) /*client */ &&
12e364b9 912 BusDev_Client_Notifiers.bus_destroy) {
2836c6a8 913 (*BusDev_Client_Notifiers.bus_destroy) (bus_no);
12e364b9
KC
914 notified = TRUE;
915 }
916 break;
917 }
918 }
919 if (notified)
920 /* The callback function just called above is responsible
929aa8ae 921 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
922 * function, which will call bus_responder()
923 */
924 ;
925 else
2836c6a8 926 bus_responder(cmd, bus_no, response);
8f1947ac 927 up(&notifier_lock);
12e364b9
KC
928}
929
930static void
2836c6a8
BR
931device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
932 struct controlvm_message_header *msg_hdr, int response,
933 BOOL need_response, BOOL for_visorbus)
12e364b9 934{
fe90d892 935 struct visorchipset_busdev_notifiers *notifiers = NULL;
12e364b9
KC
936 BOOL notified = FALSE;
937
2836c6a8
BR
938 struct visorchipset_device_info *dev_info =
939 finddevice(&dev_info_list, bus_no, dev_no);
12e364b9
KC
940 char *envp[] = {
941 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
942 NULL
943 };
944
2836c6a8 945 if (!dev_info)
12e364b9 946 return;
0aca7844 947
12e364b9
KC
948 if (for_visorbus)
949 notifiers = &BusDev_Server_Notifiers;
950 else
951 notifiers = &BusDev_Client_Notifiers;
2836c6a8
BR
952 if (need_response) {
953 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
98d7b594 954 sizeof(struct controlvm_message_header));
75c1f8b7 955 } else {
2836c6a8 956 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 957 }
12e364b9 958
8f1947ac 959 down(&notifier_lock);
12e364b9
KC
960 if (response >= 0) {
961 switch (cmd) {
962 case CONTROLVM_DEVICE_CREATE:
963 if (notifiers->device_create) {
2836c6a8 964 (*notifiers->device_create) (bus_no, dev_no);
12e364b9
KC
965 notified = TRUE;
966 }
967 break;
968 case CONTROLVM_DEVICE_CHANGESTATE:
969 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
970 if (state.alive == segment_state_running.alive &&
971 state.operating ==
972 segment_state_running.operating) {
12e364b9 973 if (notifiers->device_resume) {
2836c6a8
BR
974 (*notifiers->device_resume) (bus_no,
975 dev_no);
12e364b9
KC
976 notified = TRUE;
977 }
978 }
979 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 980 else if (state.alive == segment_state_standby.alive &&
3f833b54 981 state.operating ==
bd0d2dcc 982 segment_state_standby.operating) {
12e364b9
KC
983 /* technically this is standby case
984 * where server is lost
985 */
986 if (notifiers->device_pause) {
2836c6a8
BR
987 (*notifiers->device_pause) (bus_no,
988 dev_no);
12e364b9
KC
989 notified = TRUE;
990 }
bd0d2dcc 991 } else if (state.alive == segment_state_paused.alive &&
3f833b54 992 state.operating ==
bd0d2dcc 993 segment_state_paused.operating) {
12e364b9
KC
994 /* this is lite pause where channel is
995 * still valid just 'pause' of it
996 */
2836c6a8
BR
997 if (bus_no == g_diagpool_bus_no &&
998 dev_no == g_diagpool_dev_no) {
12e364b9
KC
999 /* this will trigger the
1000 * diag_shutdown.sh script in
1001 * the visorchipset hotplug */
1002 kobject_uevent_env
1003 (&Visorchipset_platform_device.dev.
1004 kobj, KOBJ_ONLINE, envp);
1005 }
1006 }
1007 break;
1008 case CONTROLVM_DEVICE_DESTROY:
1009 if (notifiers->device_destroy) {
2836c6a8 1010 (*notifiers->device_destroy) (bus_no, dev_no);
12e364b9
KC
1011 notified = TRUE;
1012 }
1013 break;
1014 }
1015 }
1016 if (notified)
1017 /* The callback function just called above is responsible
929aa8ae 1018 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1019 * function, which will call device_responder()
1020 */
1021 ;
1022 else
2836c6a8 1023 device_responder(cmd, bus_no, dev_no, response);
8f1947ac 1024 up(&notifier_lock);
12e364b9
KC
1025}
1026
1027static void
3ab47701 1028bus_create(struct controlvm_message *inmsg)
12e364b9 1029{
2ea5117b 1030 struct controlvm_message_packet *cmd = &inmsg->cmd;
6c5fed35 1031 ulong bus_no = cmd->create_bus.bus_no;
12e364b9 1032 int rc = CONTROLVM_RESP_SUCCESS;
6c5fed35 1033 struct visorchipset_bus_info *bus_info = NULL;
12e364b9 1034
6c5fed35
BR
1035 bus_info = findbus(&bus_info_list, bus_no);
1036 if (bus_info && (bus_info->state.created == 1)) {
1037 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1038 POSTCODE_SEVERITY_ERR);
22ad57ba 1039 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1040 goto cleanup;
12e364b9 1041 }
6c5fed35
BR
1042 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1043 if (!bus_info) {
1044 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1045 POSTCODE_SEVERITY_ERR);
22ad57ba 1046 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1047 goto cleanup;
12e364b9
KC
1048 }
1049
6c5fed35
BR
1050 INIT_LIST_HEAD(&bus_info->entry);
1051 bus_info->bus_no = bus_no;
1052 bus_info->dev_no = cmd->create_bus.dev_count;
12e364b9 1053
6c5fed35 1054 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1055
98d7b594 1056 if (inmsg->hdr.flags.test_message == 1)
6c5fed35 1057 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1058 else
6c5fed35 1059 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1060
6c5fed35
BR
1061 bus_info->flags.server = inmsg->hdr.flags.server;
1062 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1063 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1064 bus_info->chan_info.channel_type_uuid =
9b1caee7 1065 cmd->create_bus.bus_data_type_uuid;
6c5fed35 1066 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9 1067
6c5fed35 1068 list_add(&bus_info->entry, &bus_info_list);
12e364b9 1069
6c5fed35 1070 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1071
6c5fed35
BR
1072cleanup:
1073 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1074 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1075}
1076
1077static void
3ab47701 1078bus_destroy(struct controlvm_message *inmsg)
12e364b9 1079{
2ea5117b 1080 struct controlvm_message_packet *cmd = &inmsg->cmd;
dff54cd6
BR
1081 ulong bus_no = cmd->destroy_bus.bus_no;
1082 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1083 int rc = CONTROLVM_RESP_SUCCESS;
1084
dff54cd6
BR
1085 bus_info = findbus(&bus_info_list, bus_no);
1086 if (!bus_info)
22ad57ba 1087 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1088 else if (bus_info->state.created == 0)
22ad57ba 1089 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1090
dff54cd6 1091 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1092 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1093}
1094
1095static void
317d9614
BR
1096bus_configure(struct controlvm_message *inmsg,
1097 struct parser_context *parser_ctx)
12e364b9 1098{
2ea5117b 1099 struct controlvm_message_packet *cmd = &inmsg->cmd;
654bada0
BR
1100 ulong bus_no = cmd->configure_bus.bus_no;
1101 struct visorchipset_bus_info *bus_info = NULL;
12e364b9
KC
1102 int rc = CONTROLVM_RESP_SUCCESS;
1103 char s[99];
1104
654bada0
BR
1105 bus_no = cmd->configure_bus.bus_no;
1106 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1107 POSTCODE_SEVERITY_INFO);
12e364b9 1108
654bada0
BR
1109 bus_info = findbus(&bus_info_list, bus_no);
1110 if (!bus_info) {
1111 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1112 POSTCODE_SEVERITY_ERR);
22ad57ba 1113 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1114 } else if (bus_info->state.created == 0) {
1115 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1116 POSTCODE_SEVERITY_ERR);
22ad57ba 1117 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1118 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1119 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1120 POSTCODE_SEVERITY_ERR);
22ad57ba 1121 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0
BR
1122 } else {
1123 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1124 bus_info->partition_uuid = parser_id_get(parser_ctx);
1125 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1126 bus_info->name = parser_string_get(parser_ctx);
1127
1128 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1129 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1130 POSTCODE_SEVERITY_INFO);
12e364b9 1131 }
654bada0 1132 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1133 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1134}
1135
1136static void
3ab47701 1137my_device_create(struct controlvm_message *inmsg)
12e364b9 1138{
2ea5117b 1139 struct controlvm_message_packet *cmd = &inmsg->cmd;
f91b9262
BR
1140 ulong busNo = cmd->create_device.bus_no;
1141 ulong devNo = cmd->create_device.dev_no;
246e0cd0 1142 struct visorchipset_device_info *pDevInfo = NULL;
33192fa1 1143 struct visorchipset_bus_info *pBusInfo = NULL;
12e364b9
KC
1144 int rc = CONTROLVM_RESP_SUCCESS;
1145
1390b88c 1146 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
12e364b9 1147 if (pDevInfo && (pDevInfo->state.created == 1)) {
12e364b9
KC
1148 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1149 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1150 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1151 goto Away;
12e364b9 1152 }
1390b88c 1153 pBusInfo = findbus(&bus_info_list, busNo);
12e364b9 1154 if (!pBusInfo) {
12e364b9
KC
1155 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1156 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1157 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1158 goto Away;
12e364b9
KC
1159 }
1160 if (pBusInfo->state.created == 0) {
12e364b9
KC
1161 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1162 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1163 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1164 goto Away;
12e364b9 1165 }
246e0cd0 1166 pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
38f736e9 1167 if (!pDevInfo) {
12e364b9
KC
1168 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1169 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1170 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1171 goto Away;
12e364b9 1172 }
97a84f12 1173
12e364b9 1174 INIT_LIST_HEAD(&pDevInfo->entry);
246e0cd0
BR
1175 pDevInfo->bus_no = busNo;
1176 pDevInfo->dev_no = devNo;
1177 pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
12e364b9
KC
1178 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1179 POSTCODE_SEVERITY_INFO);
1180
98d7b594 1181 if (inmsg->hdr.flags.test_message == 1)
246e0cd0 1182 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1183 else
246e0cd0
BR
1184 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1185 pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1186 pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1187 pDevInfo->chan_info.channel_type_uuid =
9b1caee7 1188 cmd->create_device.data_type_uuid;
246e0cd0 1189 pDevInfo->chan_info.intr = cmd->create_device.intr;
1390b88c 1190 list_add(&pDevInfo->entry, &dev_info_list);
12e364b9
KC
1191 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1192 POSTCODE_SEVERITY_INFO);
1193Away:
1194 /* get the bus and devNo for DiagPool channel */
930a021f
SM
1195 if (pDevInfo &&
1196 is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
83d48905
BR
1197 g_diagpool_bus_no = busNo;
1198 g_diagpool_dev_no = devNo;
12e364b9 1199 }
bd0d2dcc 1200 device_epilog(busNo, devNo, segment_state_running,
12e364b9 1201 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
98d7b594 1202 inmsg->hdr.flags.response_expected == 1,
246e0cd0 1203 FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
12e364b9
KC
1204}
1205
1206static void
3ab47701 1207my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1208{
2ea5117b
BR
1209 struct controlvm_message_packet *cmd = &inmsg->cmd;
1210 ulong busNo = cmd->device_change_state.bus_no;
1211 ulong devNo = cmd->device_change_state.dev_no;
1212 struct spar_segment_state state = cmd->device_change_state.state;
246e0cd0 1213 struct visorchipset_device_info *pDevInfo = NULL;
12e364b9
KC
1214 int rc = CONTROLVM_RESP_SUCCESS;
1215
1390b88c 1216 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
12e364b9 1217 if (!pDevInfo) {
12e364b9
KC
1218 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1219 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1220 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1221 goto Away;
12e364b9
KC
1222 }
1223 if (pDevInfo->state.created == 0) {
12e364b9
KC
1224 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1225 POSTCODE_SEVERITY_ERR);
22ad57ba 1226 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9
KC
1227 }
1228Away:
1229 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1230 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1231 &inmsg->hdr, rc,
98d7b594 1232 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1233 FOR_VISORBUS(
246e0cd0 1234 pDevInfo->chan_info.channel_type_uuid));
12e364b9
KC
1235}
1236
1237static void
3ab47701 1238my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1239{
2ea5117b
BR
1240 struct controlvm_message_packet *cmd = &inmsg->cmd;
1241 ulong busNo = cmd->destroy_device.bus_no;
1242 ulong devNo = cmd->destroy_device.dev_no;
246e0cd0 1243 struct visorchipset_device_info *pDevInfo = NULL;
12e364b9
KC
1244 int rc = CONTROLVM_RESP_SUCCESS;
1245
1390b88c 1246 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
12e364b9 1247 if (!pDevInfo) {
22ad57ba
KC
1248 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1249 goto Away;
12e364b9 1250 }
75c1f8b7 1251 if (pDevInfo->state.created == 0)
22ad57ba 1252 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9
KC
1253
1254Away:
1255 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
bd0d2dcc 1256 device_epilog(busNo, devNo, segment_state_running,
12e364b9 1257 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
98d7b594 1258 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1259 FOR_VISORBUS(
246e0cd0 1260 pDevInfo->chan_info.channel_type_uuid));
12e364b9
KC
1261}
1262
1263/* When provided with the physical address of the controlvm channel
1264 * (phys_addr), the offset to the payload area we need to manage
1265 * (offset), and the size of this payload area (bytes), fills in the
84b11dfd 1266 * controlvm_payload_info struct. Returns TRUE for success or FALSE
12e364b9
KC
1267 * for failure.
1268 */
1269static int
5fc0229a 1270initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
84b11dfd 1271 struct controlvm_payload_info *info)
12e364b9 1272{
c242233e 1273 u8 __iomem *payload = NULL;
12e364b9
KC
1274 int rc = CONTROLVM_RESP_SUCCESS;
1275
38f736e9 1276 if (!info) {
22ad57ba
KC
1277 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1278 goto Away;
12e364b9 1279 }
84b11dfd 1280 memset(info, 0, sizeof(struct controlvm_payload_info));
12e364b9 1281 if ((offset == 0) || (bytes == 0)) {
22ad57ba
KC
1282 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1283 goto Away;
12e364b9
KC
1284 }
1285 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1286 if (!payload) {
22ad57ba
KC
1287 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1288 goto Away;
12e364b9
KC
1289 }
1290
1291 info->offset = offset;
1292 info->bytes = bytes;
1293 info->ptr = payload;
12e364b9
KC
1294
1295Away:
1296 if (rc < 0) {
1297 if (payload != NULL) {
1298 iounmap(payload);
1299 payload = NULL;
1300 }
1301 }
1302 return rc;
1303}
1304
1305static void
84b11dfd 1306destroy_controlvm_payload_info(struct controlvm_payload_info *info)
12e364b9
KC
1307{
1308 if (info->ptr != NULL) {
1309 iounmap(info->ptr);
1310 info->ptr = NULL;
1311 }
84b11dfd 1312 memset(info, 0, sizeof(struct controlvm_payload_info));
12e364b9
KC
1313}
1314
1315static void
1316initialize_controlvm_payload(void)
1317{
c3d9a224 1318 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
5fc0229a 1319 u64 payloadOffset = 0;
b3c55b13 1320 u32 payloadBytes = 0;
26eb2c0c 1321
c3d9a224 1322 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1323 offsetof(struct spar_controlvm_channel_protocol,
1324 request_payload_offset),
12e364b9 1325 &payloadOffset, sizeof(payloadOffset)) < 0) {
12e364b9
KC
1326 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1327 POSTCODE_SEVERITY_ERR);
1328 return;
1329 }
c3d9a224 1330 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1331 offsetof(struct spar_controlvm_channel_protocol,
1332 request_payload_bytes),
12e364b9 1333 &payloadBytes, sizeof(payloadBytes)) < 0) {
12e364b9
KC
1334 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1335 POSTCODE_SEVERITY_ERR);
1336 return;
1337 }
1338 initialize_controlvm_payload_info(phys_addr,
1339 payloadOffset, payloadBytes,
84982fbf 1340 &controlvm_payload_info);
12e364b9
KC
1341}
1342
1343/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1344 * Returns CONTROLVM_RESP_xxx code.
1345 */
1346int
1347visorchipset_chipset_ready(void)
1348{
1349 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1350 return CONTROLVM_RESP_SUCCESS;
1351}
1352EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1353
1354int
1355visorchipset_chipset_selftest(void)
1356{
1357 char env_selftest[20];
1358 char *envp[] = { env_selftest, NULL };
26eb2c0c 1359
12e364b9
KC
1360 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1361 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1362 envp);
1363 return CONTROLVM_RESP_SUCCESS;
1364}
1365EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1366
1367/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1368 * Returns CONTROLVM_RESP_xxx code.
1369 */
1370int
1371visorchipset_chipset_notready(void)
1372{
1373 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1374 return CONTROLVM_RESP_SUCCESS;
1375}
1376EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1377
1378static void
98d7b594 1379chipset_ready(struct controlvm_message_header *msgHdr)
12e364b9
KC
1380{
1381 int rc = visorchipset_chipset_ready();
26eb2c0c 1382
12e364b9
KC
1383 if (rc != CONTROLVM_RESP_SUCCESS)
1384 rc = -rc;
98d7b594 1385 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
12e364b9 1386 controlvm_respond(msgHdr, rc);
98d7b594 1387 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1388 /* Send CHIPSET_READY response when all modules have been loaded
1389 * and disks mounted for the partition
1390 */
da021f02 1391 g_chipset_msg_hdr = *msgHdr;
12e364b9
KC
1392 }
1393}
1394
1395static void
98d7b594 1396chipset_selftest(struct controlvm_message_header *msgHdr)
12e364b9
KC
1397{
1398 int rc = visorchipset_chipset_selftest();
26eb2c0c 1399
12e364b9
KC
1400 if (rc != CONTROLVM_RESP_SUCCESS)
1401 rc = -rc;
98d7b594 1402 if (msgHdr->flags.response_expected)
12e364b9
KC
1403 controlvm_respond(msgHdr, rc);
1404}
1405
1406static void
98d7b594 1407chipset_notready(struct controlvm_message_header *msgHdr)
12e364b9
KC
1408{
1409 int rc = visorchipset_chipset_notready();
26eb2c0c 1410
12e364b9
KC
1411 if (rc != CONTROLVM_RESP_SUCCESS)
1412 rc = -rc;
98d7b594 1413 if (msgHdr->flags.response_expected)
12e364b9
KC
1414 controlvm_respond(msgHdr, rc);
1415}
1416
1417/* This is your "one-stop" shop for grabbing the next message from the
1418 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1419 */
1420static BOOL
3ab47701 1421read_controlvm_event(struct controlvm_message *msg)
12e364b9 1422{
c3d9a224 1423 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1424 CONTROLVM_QUEUE_EVENT, msg)) {
1425 /* got a message */
0aca7844 1426 if (msg->hdr.flags.test_message == 1)
12e364b9 1427 return FALSE;
e22a4a0f 1428 return TRUE;
12e364b9
KC
1429 }
1430 return FALSE;
1431}
1432
1433/*
1434 * The general parahotplug flow works as follows. The visorchipset
1435 * driver receives a DEVICE_CHANGESTATE message from Command
1436 * specifying a physical device to enable or disable. The CONTROLVM
1437 * message handler calls parahotplug_process_message, which then adds
1438 * the message to a global list and kicks off a udev event which
1439 * causes a user level script to enable or disable the specified
1440 * device. The udev script then writes to
1441 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1442 * to get called, at which point the appropriate CONTROLVM message is
1443 * retrieved from the list and responded to.
1444 */
1445
1446#define PARAHOTPLUG_TIMEOUT_MS 2000
1447
1448/*
1449 * Generate unique int to match an outstanding CONTROLVM message with a
1450 * udev script /proc response
1451 */
1452static int
1453parahotplug_next_id(void)
1454{
1455 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1456
12e364b9
KC
1457 return atomic_inc_return(&id);
1458}
1459
1460/*
1461 * Returns the time (in jiffies) when a CONTROLVM message on the list
1462 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1463 */
1464static unsigned long
1465parahotplug_next_expiration(void)
1466{
2cc1a1b3 1467 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1468}
1469
1470/*
1471 * Create a parahotplug_request, which is basically a wrapper for a
1472 * CONTROLVM_MESSAGE that we can stick on a list
1473 */
1474static struct parahotplug_request *
3ab47701 1475parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1476{
ea0dcfcf
QL
1477 struct parahotplug_request *req;
1478
1479 req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
38f736e9 1480 if (!req)
12e364b9
KC
1481 return NULL;
1482
1483 req->id = parahotplug_next_id();
1484 req->expiration = parahotplug_next_expiration();
1485 req->msg = *msg;
1486
1487 return req;
1488}
1489
1490/*
1491 * Free a parahotplug_request.
1492 */
1493static void
1494parahotplug_request_destroy(struct parahotplug_request *req)
1495{
1496 kfree(req);
1497}
1498
1499/*
1500 * Cause uevent to run the user level script to do the disable/enable
1501 * specified in (the CONTROLVM message in) the specified
1502 * parahotplug_request
1503 */
1504static void
1505parahotplug_request_kickoff(struct parahotplug_request *req)
1506{
2ea5117b 1507 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1508 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1509 env_func[40];
1510 char *envp[] = {
1511 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1512 };
1513
1514 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1515 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1516 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1517 cmd->device_change_state.state.active);
12e364b9 1518 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1519 cmd->device_change_state.bus_no);
12e364b9 1520 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1521 cmd->device_change_state.dev_no >> 3);
12e364b9 1522 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1523 cmd->device_change_state.dev_no & 0x7);
12e364b9 1524
12e364b9
KC
1525 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1526 envp);
1527}
1528
1529/*
1530 * Remove any request from the list that's been on there too long and
1531 * respond with an error.
1532 */
1533static void
1534parahotplug_process_list(void)
1535{
1536 struct list_head *pos = NULL;
1537 struct list_head *tmp = NULL;
1538
1539 spin_lock(&Parahotplug_request_list_lock);
1540
1541 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1542 struct parahotplug_request *req =
1543 list_entry(pos, struct parahotplug_request, list);
1544 if (time_after_eq(jiffies, req->expiration)) {
1545 list_del(pos);
98d7b594 1546 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1547 controlvm_respond_physdev_changestate(
1548 &req->msg.hdr,
1549 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
2ea5117b 1550 req->msg.cmd.device_change_state.state);
12e364b9
KC
1551 parahotplug_request_destroy(req);
1552 }
1553 }
1554
1555 spin_unlock(&Parahotplug_request_list_lock);
1556}
1557
1558/*
1559 * Called from the /proc handler, which means the user script has
1560 * finished the enable/disable. Find the matching identifier, and
1561 * respond to the CONTROLVM message with success.
1562 */
1563static int
b06bdf7d 1564parahotplug_request_complete(int id, u16 active)
12e364b9
KC
1565{
1566 struct list_head *pos = NULL;
1567 struct list_head *tmp = NULL;
1568
1569 spin_lock(&Parahotplug_request_list_lock);
1570
1571 /* Look for a request matching "id". */
1572 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1573 struct parahotplug_request *req =
1574 list_entry(pos, struct parahotplug_request, list);
1575 if (req->id == id) {
1576 /* Found a match. Remove it from the list and
1577 * respond.
1578 */
1579 list_del(pos);
1580 spin_unlock(&Parahotplug_request_list_lock);
2ea5117b 1581 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1582 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1583 controlvm_respond_physdev_changestate(
1584 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1585 req->msg.cmd.device_change_state.state);
12e364b9
KC
1586 parahotplug_request_destroy(req);
1587 return 0;
1588 }
1589 }
1590
1591 spin_unlock(&Parahotplug_request_list_lock);
1592 return -1;
1593}
1594
1595/*
1596 * Enables or disables a PCI device by kicking off a udev script
1597 */
bd5b9b32 1598static void
3ab47701 1599parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1600{
1601 struct parahotplug_request *req;
1602
1603 req = parahotplug_request_create(inmsg);
1604
38f736e9 1605 if (!req)
12e364b9 1606 return;
12e364b9 1607
2ea5117b 1608 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1609 /* For enable messages, just respond with success
1610 * right away. This is a bit of a hack, but there are
1611 * issues with the early enable messages we get (with
1612 * either the udev script not detecting that the device
1613 * is up, or not getting called at all). Fortunately
1614 * the messages that get lost don't matter anyway, as
1615 * devices are automatically enabled at
1616 * initialization.
1617 */
1618 parahotplug_request_kickoff(req);
1619 controlvm_respond_physdev_changestate(&inmsg->hdr,
2ea5117b
BR
1620 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1621 device_change_state.state);
12e364b9
KC
1622 parahotplug_request_destroy(req);
1623 } else {
1624 /* For disable messages, add the request to the
1625 * request list before kicking off the udev script. It
1626 * won't get responded to until the script has
1627 * indicated it's done.
1628 */
1629 spin_lock(&Parahotplug_request_list_lock);
1630 list_add_tail(&(req->list), &Parahotplug_request_list);
1631 spin_unlock(&Parahotplug_request_list_lock);
1632
1633 parahotplug_request_kickoff(req);
1634 }
1635}
1636
12e364b9
KC
1637/* Process a controlvm message.
1638 * Return result:
1639 * FALSE - this function will return FALSE only in the case where the
1640 * controlvm message was NOT processed, but processing must be
1641 * retried before reading the next controlvm message; a
1642 * scenario where this can occur is when we need to throttle
1643 * the allocation of memory in which to copy out controlvm
1644 * payload data
1645 * TRUE - processing of the controlvm message completed,
1646 * either successfully or with an error.
1647 */
1648static BOOL
3ab47701 1649handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
12e364b9 1650{
2ea5117b 1651 struct controlvm_message_packet *cmd = &inmsg.cmd;
5fc0229a 1652 u64 parametersAddr = 0;
b3c55b13 1653 u32 parametersBytes = 0;
317d9614 1654 struct parser_context *parser_ctx = NULL;
12e364b9 1655 BOOL isLocalAddr = FALSE;
3ab47701 1656 struct controlvm_message ackmsg;
12e364b9
KC
1657
1658 /* create parsing context if necessary */
98d7b594 1659 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1660 if (channel_addr == 0)
12e364b9 1661 return TRUE;
98d7b594
BR
1662 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1663 parametersBytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1664
1665 /* Parameter and channel addresses within test messages actually lie
1666 * within our OS-controlled memory. We need to know that, because it
1667 * makes a difference in how we compute the virtual address.
1668 */
1669 if (parametersAddr != 0 && parametersBytes != 0) {
1670 BOOL retry = FALSE;
26eb2c0c 1671
12e364b9 1672 parser_ctx =
b2d97e4b 1673 parser_init_byte_stream(parametersAddr, parametersBytes,
12e364b9 1674 isLocalAddr, &retry);
1b08872e
BR
1675 if (!parser_ctx && retry)
1676 return FALSE;
12e364b9
KC
1677 }
1678
1679 if (!isLocalAddr) {
1680 controlvm_init_response(&ackmsg, &inmsg.hdr,
1681 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1682 if (controlvm_channel)
1683 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1684 CONTROLVM_QUEUE_ACK,
1685 &ackmsg);
12e364b9 1686 }
98d7b594 1687 switch (inmsg.hdr.id) {
12e364b9 1688 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1689 chipset_init(&inmsg);
1690 break;
1691 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1692 bus_create(&inmsg);
1693 break;
1694 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1695 bus_destroy(&inmsg);
1696 break;
1697 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1698 bus_configure(&inmsg, parser_ctx);
1699 break;
1700 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1701 my_device_create(&inmsg);
1702 break;
1703 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1704 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1705 parahotplug_process_message(&inmsg);
1706 } else {
12e364b9
KC
1707 /* save the hdr and cmd structures for later use */
1708 /* when sending back the response to Command */
1709 my_device_changestate(&inmsg);
da021f02 1710 g_diag_msg_hdr = inmsg.hdr;
4f44b72d 1711 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1712 break;
1713 }
1714 break;
1715 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1716 my_device_destroy(&inmsg);
1717 break;
1718 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1719 /* no op for now, just send a respond that we passed */
98d7b594 1720 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1721 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1722 break;
1723 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1724 chipset_ready(&inmsg.hdr);
1725 break;
1726 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1727 chipset_selftest(&inmsg.hdr);
1728 break;
1729 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1730 chipset_notready(&inmsg.hdr);
1731 break;
1732 default:
98d7b594 1733 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1734 controlvm_respond(&inmsg.hdr,
1735 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1736 break;
1737 }
1738
38f736e9 1739 if (parser_ctx) {
12e364b9
KC
1740 parser_done(parser_ctx);
1741 parser_ctx = NULL;
1742 }
1743 return TRUE;
1744}
1745
d746cb55 1746static HOSTADDRESS controlvm_get_channel_address(void)
524b0b63 1747{
5fc0229a 1748 u64 addr = 0;
b3c55b13 1749 u32 size = 0;
524b0b63 1750
0aca7844 1751 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1752 return 0;
0aca7844 1753
524b0b63
BR
1754 return addr;
1755}
1756
12e364b9
KC
1757static void
1758controlvm_periodic_work(struct work_struct *work)
1759{
3ab47701 1760 struct controlvm_message inmsg;
12e364b9
KC
1761 BOOL gotACommand = FALSE;
1762 BOOL handle_command_failed = FALSE;
5fc0229a 1763 static u64 Poll_Count;
12e364b9
KC
1764
1765 /* make sure visorbus server is registered for controlvm callbacks */
1766 if (visorchipset_serverregwait && !serverregistered)
097f4c19 1767 goto Away;
12e364b9
KC
1768 /* make sure visorclientbus server is regsitered for controlvm
1769 * callbacks
1770 */
1771 if (visorchipset_clientregwait && !clientregistered)
097f4c19 1772 goto Away;
12e364b9 1773
12e364b9 1774 Poll_Count++;
8a1182eb 1775 if (Poll_Count >= 250)
12e364b9
KC
1776 ; /* keep going */
1777 else
097f4c19 1778 goto Away;
12e364b9
KC
1779
1780 /* Check events to determine if response to CHIPSET_READY
1781 * should be sent
1782 */
0639ba67
BR
1783 if (visorchipset_holdchipsetready &&
1784 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1785 if (check_chipset_events() == 1) {
da021f02 1786 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1787 clear_chipset_events();
da021f02 1788 memset(&g_chipset_msg_hdr, 0,
98d7b594 1789 sizeof(struct controlvm_message_header));
12e364b9
KC
1790 }
1791 }
1792
c3d9a224 1793 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1794 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1795 &inmsg))
1796 ;
8a1182eb
BR
1797 if (!gotACommand) {
1798 if (ControlVm_Pending_Msg_Valid) {
1799 /* we throttled processing of a prior
1800 * msg, so try to process it again
1801 * rather than reading a new one
1802 */
1803 inmsg = ControlVm_Pending_Msg;
1804 ControlVm_Pending_Msg_Valid = FALSE;
1805 gotACommand = TRUE;
75c1f8b7 1806 } else {
8a1182eb 1807 gotACommand = read_controlvm_event(&inmsg);
75c1f8b7 1808 }
8a1182eb 1809 }
12e364b9
KC
1810
1811 handle_command_failed = FALSE;
1812 while (gotACommand && (!handle_command_failed)) {
b53e0e93 1813 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1814 if (handle_command(inmsg,
1815 visorchannel_get_physaddr
c3d9a224 1816 (controlvm_channel)))
8a1182eb
BR
1817 gotACommand = read_controlvm_event(&inmsg);
1818 else {
1819 /* this is a scenario where throttling
1820 * is required, but probably NOT an
1821 * error...; we stash the current
1822 * controlvm msg so we will attempt to
1823 * reprocess it on our next loop
1824 */
1825 handle_command_failed = TRUE;
1826 ControlVm_Pending_Msg = inmsg;
1827 ControlVm_Pending_Msg_Valid = TRUE;
12e364b9
KC
1828 }
1829 }
1830
1831 /* parahotplug_worker */
1832 parahotplug_process_list();
1833
12e364b9
KC
1834Away:
1835
1836 if (time_after(jiffies,
b53e0e93 1837 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1838 /* it's been longer than MIN_IDLE_SECONDS since we
1839 * processed our last controlvm message; slow down the
1840 * polling
1841 */
911e213e
BR
1842 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1843 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1844 } else {
911e213e
BR
1845 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1846 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1847 }
1848
9232d2d6
BR
1849 queue_delayed_work(periodic_controlvm_workqueue,
1850 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1851}
1852
1853static void
1854setup_crash_devices_work_queue(struct work_struct *work)
1855{
3ab47701
BR
1856 struct controlvm_message localCrashCreateBusMsg;
1857 struct controlvm_message localCrashCreateDevMsg;
1858 struct controlvm_message msg;
b3c55b13 1859 u32 localSavedCrashMsgOffset;
b06bdf7d 1860 u16 localSavedCrashMsgCount;
12e364b9
KC
1861
1862 /* make sure visorbus server is registered for controlvm callbacks */
1863 if (visorchipset_serverregwait && !serverregistered)
097f4c19 1864 goto Away;
12e364b9
KC
1865
1866 /* make sure visorclientbus server is regsitered for controlvm
1867 * callbacks
1868 */
1869 if (visorchipset_clientregwait && !clientregistered)
097f4c19 1870 goto Away;
12e364b9
KC
1871
1872 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1873
1874 /* send init chipset msg */
98d7b594 1875 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1876 msg.cmd.init_chipset.bus_count = 23;
1877 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1878
1879 chipset_init(&msg);
1880
12e364b9 1881 /* get saved message count */
c3d9a224 1882 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1883 offsetof(struct spar_controlvm_channel_protocol,
1884 saved_crash_message_count),
b06bdf7d 1885 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
12e364b9
KC
1886 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1887 POSTCODE_SEVERITY_ERR);
1888 return;
1889 }
1890
1891 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
12e364b9
KC
1892 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1893 localSavedCrashMsgCount,
1894 POSTCODE_SEVERITY_ERR);
1895 return;
1896 }
1897
1898 /* get saved crash message offset */
c3d9a224 1899 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1900 offsetof(struct spar_controlvm_channel_protocol,
1901 saved_crash_message_offset),
b3c55b13 1902 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
12e364b9
KC
1903 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1904 POSTCODE_SEVERITY_ERR);
1905 return;
1906 }
1907
1908 /* read create device message for storage bus offset */
c3d9a224 1909 if (visorchannel_read(controlvm_channel,
12e364b9
KC
1910 localSavedCrashMsgOffset,
1911 &localCrashCreateBusMsg,
3ab47701 1912 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1913 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1914 POSTCODE_SEVERITY_ERR);
1915 return;
1916 }
1917
1918 /* read create device message for storage device */
c3d9a224 1919 if (visorchannel_read(controlvm_channel,
12e364b9 1920 localSavedCrashMsgOffset +
3ab47701 1921 sizeof(struct controlvm_message),
12e364b9 1922 &localCrashCreateDevMsg,
3ab47701 1923 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1924 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1925 POSTCODE_SEVERITY_ERR);
1926 return;
1927 }
1928
1929 /* reuse IOVM create bus message */
75c1f8b7 1930 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0) {
12e364b9 1931 bus_create(&localCrashCreateBusMsg);
75c1f8b7 1932 } else {
12e364b9
KC
1933 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1934 POSTCODE_SEVERITY_ERR);
1935 return;
1936 }
1937
1938 /* reuse create device message for storage device */
75c1f8b7 1939 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0) {
12e364b9 1940 my_device_create(&localCrashCreateDevMsg);
75c1f8b7 1941 } else {
12e364b9
KC
1942 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1943 POSTCODE_SEVERITY_ERR);
1944 return;
1945 }
12e364b9
KC
1946 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1947 return;
1948
1949Away:
1950
911e213e 1951 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1952
9232d2d6
BR
1953 queue_delayed_work(periodic_controlvm_workqueue,
1954 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1955}
1956
1957static void
1958bus_create_response(ulong busNo, int response)
1959{
1960 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
1961}
1962
1963static void
1964bus_destroy_response(ulong busNo, int response)
1965{
1966 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
1967}
1968
1969static void
1970device_create_response(ulong busNo, ulong devNo, int response)
1971{
1972 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
1973}
1974
1975static void
1976device_destroy_response(ulong busNo, ulong devNo, int response)
1977{
1978 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
1979}
1980
1981void
8420f417 1982visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
12e364b9 1983{
12e364b9 1984 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 1985 bus_no, dev_no, response,
bd0d2dcc 1986 segment_state_standby);
12e364b9 1987}
927c7927 1988EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
1989
1990static void
1991device_resume_response(ulong busNo, ulong devNo, int response)
1992{
1993 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1994 busNo, devNo, response,
bd0d2dcc 1995 segment_state_running);
12e364b9
KC
1996}
1997
1998BOOL
77db7127 1999visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 2000{
1390b88c 2001 void *p = findbus(&bus_info_list, bus_no);
26eb2c0c 2002
0aca7844 2003 if (!p)
12e364b9 2004 return FALSE;
77db7127 2005 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
12e364b9
KC
2006 return TRUE;
2007}
2008EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2009
2010BOOL
58dd8f2d 2011visorchipset_set_bus_context(ulong bus_no, void *context)
12e364b9 2012{
1390b88c 2013 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
26eb2c0c 2014
0aca7844 2015 if (!p)
12e364b9 2016 return FALSE;
12e364b9
KC
2017 p->bus_driver_context = context;
2018 return TRUE;
2019}
2020EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2021
2022BOOL
b486df19
BR
2023visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2024 struct visorchipset_device_info *dev_info)
12e364b9 2025{
1390b88c 2026 void *p = finddevice(&dev_info_list, bus_no, dev_no);
26eb2c0c 2027
0aca7844 2028 if (!p)
12e364b9 2029 return FALSE;
b486df19 2030 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
12e364b9
KC
2031 return TRUE;
2032}
2033EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2034
2035BOOL
cf0bd0b5 2036visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
12e364b9 2037{
246e0cd0 2038 struct visorchipset_device_info *p =
1390b88c 2039 finddevice(&dev_info_list, bus_no, dev_no);
26eb2c0c 2040
0aca7844 2041 if (!p)
12e364b9 2042 return FALSE;
12e364b9
KC
2043 p->bus_driver_context = context;
2044 return TRUE;
2045}
2046EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2047
2048/* Generic wrapper function for allocating memory from a kmem_cache pool.
2049 */
2050void *
2051visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2052 char *fn, int ln)
2053{
2054 gfp_t gfp;
2055 void *p;
2056
2057 if (ok_to_block)
2058 gfp = GFP_KERNEL;
2059 else
2060 gfp = GFP_ATOMIC;
2061 /* __GFP_NORETRY means "ok to fail", meaning
2062 * kmem_cache_alloc() can return NULL, implying the caller CAN
2063 * cope with failure. If you do NOT specify __GFP_NORETRY,
2064 * Linux will go to extreme measures to get memory for you
2065 * (like, invoke oom killer), which will probably cripple the
2066 * system.
2067 */
2068 gfp |= __GFP_NORETRY;
2069 p = kmem_cache_alloc(pool, gfp);
0aca7844 2070 if (!p)
12e364b9 2071 return NULL;
0aca7844 2072
12e364b9
KC
2073 atomic_inc(&Visorchipset_cache_buffers_in_use);
2074 return p;
2075}
2076
2077/* Generic wrapper function for freeing memory from a kmem_cache pool.
2078 */
2079void
2080visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2081{
0aca7844 2082 if (!p)
12e364b9 2083 return;
0aca7844 2084
12e364b9
KC
2085 atomic_dec(&Visorchipset_cache_buffers_in_use);
2086 kmem_cache_free(pool, p);
2087}
2088
18b87ed1
BR
2089static ssize_t chipsetready_store(struct device *dev,
2090 struct device_attribute *attr, const char *buf, size_t count)
12e364b9 2091{
18b87ed1 2092 char msgtype[64];
12e364b9 2093
66e24b76
BR
2094 if (sscanf(buf, "%63s", msgtype) != 1)
2095 return -EINVAL;
2096
2097 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2098 chipset_events[0] = 1;
2099 return count;
2100 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2101 chipset_events[1] = 1;
2102 return count;
e22a4a0f
BR
2103 }
2104 return -EINVAL;
12e364b9
KC
2105}
2106
e56fa7cd
BR
2107/* The parahotplug/devicedisabled interface gets called by our support script
2108 * when an SR-IOV device has been shut down. The ID is passed to the script
2109 * and then passed back when the device has been removed.
2110 */
2111static ssize_t devicedisabled_store(struct device *dev,
2112 struct device_attribute *attr, const char *buf, size_t count)
2113{
2114 uint id;
2115
2116 if (kstrtouint(buf, 10, &id) != 0)
2117 return -EINVAL;
2118
2119 parahotplug_request_complete(id, 0);
2120 return count;
2121}
2122
2123/* The parahotplug/deviceenabled interface gets called by our support script
2124 * when an SR-IOV device has been recovered. The ID is passed to the script
2125 * and then passed back when the device has been brought back up.
2126 */
2127static ssize_t deviceenabled_store(struct device *dev,
2128 struct device_attribute *attr, const char *buf, size_t count)
2129{
2130 uint id;
2131
2132 if (kstrtouint(buf, 10, &id) != 0)
2133 return -EINVAL;
2134
2135 parahotplug_request_complete(id, 1);
2136 return count;
2137}
2138
12e364b9
KC
2139static int __init
2140visorchipset_init(void)
2141{
2142 int rc = 0, x = 0;
8a1182eb 2143 HOSTADDRESS addr;
12e364b9 2144
fcd0157e
KC
2145 if (!unisys_spar_platform)
2146 return -ENODEV;
2147
12e364b9
KC
2148 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2149 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
84982fbf 2150 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
ea33b4ee
BR
2151 memset(&livedump_info, 0, sizeof(livedump_info));
2152 atomic_set(&livedump_info.buffers_in_use, 0);
12e364b9 2153
9f8d0e8b 2154 if (visorchipset_testvnic) {
9f8d0e8b
KC
2155 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2156 rc = x;
2157 goto Away;
2158 }
12e364b9 2159
8a1182eb
BR
2160 addr = controlvm_get_channel_address();
2161 if (addr != 0) {
c3d9a224 2162 controlvm_channel =
8a1182eb
BR
2163 visorchannel_create_with_lock
2164 (addr,
d19642f6 2165 sizeof(struct spar_controlvm_channel_protocol),
5fbaa4b3 2166 spar_controlvm_channel_protocol_uuid);
93a84565 2167 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
c3d9a224 2168 visorchannel_get_header(controlvm_channel))) {
8a1182eb
BR
2169 initialize_controlvm_payload();
2170 } else {
c3d9a224
BR
2171 visorchannel_destroy(controlvm_channel);
2172 controlvm_channel = NULL;
8a1182eb
BR
2173 return -ENODEV;
2174 }
2175 } else {
8a1182eb
BR
2176 return -ENODEV;
2177 }
2178
12e364b9 2179 MajorDev = MKDEV(visorchipset_major, 0);
c3d9a224 2180 rc = visorchipset_file_init(MajorDev, &controlvm_channel);
4cb005a9 2181 if (rc < 0) {
4cb005a9
KC
2182 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2183 goto Away;
2184 }
9f8d0e8b 2185
da021f02 2186 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2187
da021f02 2188 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2189
da021f02 2190 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2191
12e364b9
KC
2192 Putfile_buffer_list_pool =
2193 kmem_cache_create(Putfile_buffer_list_pool_name,
2194 sizeof(struct putfile_buffer_entry),
2195 0, SLAB_HWCACHE_ALIGN, NULL);
2196 if (!Putfile_buffer_list_pool) {
4cb005a9
KC
2197 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2198 rc = -1;
2199 goto Away;
12e364b9 2200 }
2098dbd1 2201 if (!visorchipset_disable_controlvm) {
12e364b9
KC
2202 /* if booting in a crash kernel */
2203 if (visorchipset_crash_kernel)
9232d2d6 2204 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9
KC
2205 setup_crash_devices_work_queue);
2206 else
9232d2d6 2207 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9 2208 controlvm_periodic_work);
9232d2d6 2209 periodic_controlvm_workqueue =
12e364b9
KC
2210 create_singlethread_workqueue("visorchipset_controlvm");
2211
38f736e9 2212 if (!periodic_controlvm_workqueue) {
4cb005a9
KC
2213 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2214 DIAG_SEVERITY_ERR);
2215 rc = -ENOMEM;
2216 goto Away;
2217 }
b53e0e93 2218 most_recent_message_jiffies = jiffies;
911e213e 2219 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
9232d2d6
BR
2220 rc = queue_delayed_work(periodic_controlvm_workqueue,
2221 &periodic_controlvm_work, poll_jiffies);
4cb005a9 2222 if (rc < 0) {
4cb005a9
KC
2223 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2224 DIAG_SEVERITY_ERR);
2225 goto Away;
2226 }
12e364b9
KC
2227 }
2228
2229 Visorchipset_platform_device.dev.devt = MajorDev;
4cb005a9 2230 if (platform_device_register(&Visorchipset_platform_device) < 0) {
4cb005a9
KC
2231 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2232 rc = -1;
2233 goto Away;
2234 }
12e364b9 2235 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
22ad57ba 2236 rc = 0;
12e364b9 2237Away:
12e364b9 2238 if (rc) {
12e364b9
KC
2239 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2240 POSTCODE_SEVERITY_ERR);
2241 }
2242 return rc;
2243}
2244
2245static void
2246visorchipset_exit(void)
2247{
12e364b9
KC
2248 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2249
2250 if (visorchipset_disable_controlvm) {
2251 ;
2252 } else {
9232d2d6
BR
2253 cancel_delayed_work(&periodic_controlvm_work);
2254 flush_workqueue(periodic_controlvm_workqueue);
2255 destroy_workqueue(periodic_controlvm_workqueue);
2256 periodic_controlvm_workqueue = NULL;
84982fbf 2257 destroy_controlvm_payload_info(&controlvm_payload_info);
12e364b9 2258 }
12e364b9
KC
2259 if (Putfile_buffer_list_pool) {
2260 kmem_cache_destroy(Putfile_buffer_list_pool);
2261 Putfile_buffer_list_pool = NULL;
2262 }
1783319f 2263
12e364b9
KC
2264 cleanup_controlvm_structures();
2265
da021f02 2266 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2267
da021f02 2268 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2269
da021f02 2270 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2271
c3d9a224 2272 visorchannel_destroy(controlvm_channel);
8a1182eb 2273
12e364b9
KC
2274 visorchipset_file_cleanup();
2275 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
2276}
2277
2278module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2279MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2280int visorchipset_testvnic = 0;
2281
2282module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2283MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2284int visorchipset_testvnicclient = 0;
2285
2286module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2287MODULE_PARM_DESC(visorchipset_testmsg,
2288 "1 to manufacture the chipset, bus, and switch messages");
2289int visorchipset_testmsg = 0;
2290
2291module_param_named(major, visorchipset_major, int, S_IRUGO);
2292MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2293int visorchipset_major = 0;
2294
2295module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2296MODULE_PARM_DESC(visorchipset_serverreqwait,
2297 "1 to have the module wait for the visor bus to register");
2298int visorchipset_serverregwait = 0; /* default is off */
2299module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2300MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2301int visorchipset_clientregwait = 1; /* default is on */
2302module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2303MODULE_PARM_DESC(visorchipset_testteardown,
2304 "1 to test teardown of the chipset, bus, and switch");
2305int visorchipset_testteardown = 0; /* default is off */
2306module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2307 S_IRUGO);
2308MODULE_PARM_DESC(visorchipset_disable_controlvm,
2309 "1 to disable polling of controlVm channel");
2310int visorchipset_disable_controlvm = 0; /* default is off */
2311module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2312MODULE_PARM_DESC(visorchipset_crash_kernel,
2313 "1 means we are running in crash kernel");
2314int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2315module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2316 int, S_IRUGO);
2317MODULE_PARM_DESC(visorchipset_holdchipsetready,
2318 "1 to hold response to CHIPSET_READY");
2319int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2320 * response immediately */
2321module_init(visorchipset_init);
2322module_exit(visorchipset_exit);
2323
2324MODULE_AUTHOR("Unisys");
2325MODULE_LICENSE("GPL");
2326MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2327 VERSION);
2328MODULE_VERSION(VERSION);
This page took 0.440575 seconds and 5 git commands to generate.