staging: unisys: refactor bus_destroy()
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18#include "globals.h"
12e364b9
KC
19#include "visorchipset.h"
20#include "procobjecttree.h"
21#include "visorchannel.h"
22#include "periodic_work.h"
12e364b9
KC
23#include "file.h"
24#include "parser.h"
12e364b9 25#include "uisutils.h"
12e364b9
KC
26#include "controlvmcompletionstatus.h"
27#include "guestlinuxdebug.h"
12e364b9
KC
28
29#include <linux/nls.h>
30#include <linux/netdevice.h>
31#include <linux/platform_device.h>
90addb02 32#include <linux/uuid.h>
12e364b9
KC
33
34#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37#define TEST_VNIC_SWITCHNO 1
38#define TEST_VNIC_BUSNO 9
39
40#define MAX_NAME_SIZE 128
41#define MAX_IP_SIZE 50
42#define MAXOUTSTANDINGCHANNELCOMMAND 256
43#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
47* we switch to slow polling mode. As soon as we get a controlvm
48* message, we switch back to fast polling mode.
49*/
50#define MIN_IDLE_SECONDS 10
911e213e 51static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
b53e0e93 52static ulong most_recent_message_jiffies; /* when we got our last
bd5b9b32 53 * controlvm message */
12e364b9
KC
54static inline char *
55NONULLSTR(char *s)
56{
57 if (s)
58 return s;
e22a4a0f 59 return "";
12e364b9
KC
60}
61
62static int serverregistered;
63static int clientregistered;
64
65#define MAX_CHIPSET_EVENTS 2
c242233e 66static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 67
9232d2d6
BR
68static struct delayed_work periodic_controlvm_work;
69static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 70static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 71
da021f02
BR
72static struct controlvm_message_header g_diag_msg_hdr;
73static struct controlvm_message_header g_chipset_msg_hdr;
74static struct controlvm_message_header g_del_dump_msg_hdr;
59827f00 75static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 76 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 77/* 0xffffff is an invalid Bus/Device number */
83d48905
BR
78static ulong g_diagpool_bus_no = 0xffffff;
79static ulong g_diagpool_dev_no = 0xffffff;
4f44b72d 80static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9
KC
81
82/* Only VNIC and VHBA channels are sent to visorclientbus (aka
83 * "visorhackbus")
84 */
85#define FOR_VISORHACKBUS(channel_type_guid) \
9eee5d1f 86 (((uuid_le_cmp(channel_type_guid,\
0639ba67
BR
87 spar_vnic_channel_protocol_uuid) == 0) ||\
88 (uuid_le_cmp(channel_type_guid,\
9eee5d1f 89 spar_vhba_channel_protocol_uuid) == 0)))
12e364b9
KC
90#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
91
92#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
93 (uuid_le_cmp(channel_type_guid,\
94 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 95
1390b88c
BR
96static LIST_HEAD(bus_info_list);
97static LIST_HEAD(dev_info_list);
12e364b9 98
c3d9a224 99static struct visorchannel *controlvm_channel;
12e364b9 100
84982fbf
BR
101/* Manages the request payload in the controlvm channel */
102static struct controlvm_payload_info {
c242233e 103 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 104 u64 offset; /* offset from beginning of controlvm
12e364b9 105 * channel to beginning of payload * pool */
b3c55b13 106 u32 bytes; /* number of bytes in payload pool */
84982fbf 107} controlvm_payload_info;
12e364b9 108
ea33b4ee
BR
109/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
110 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
111 */
112static struct livedump_info {
113 struct controlvm_message_header dumpcapture_header;
114 struct controlvm_message_header gettextdump_header;
115 struct controlvm_message_header dumpcomplete_header;
116 BOOL gettextdump_outstanding;
12e364b9
KC
117 u32 crc32;
118 ulong length;
119 atomic_t buffers_in_use;
120 ulong destination;
ea33b4ee 121} livedump_info;
12e364b9
KC
122
123/* The following globals are used to handle the scenario where we are unable to
124 * offload the payload from a controlvm message due to memory requirements. In
125 * this scenario, we simply stash the controlvm message, then attempt to
126 * process it again the next time controlvm_periodic_work() runs.
127 */
3ab47701 128static struct controlvm_message ControlVm_Pending_Msg;
12e364b9
KC
129static BOOL ControlVm_Pending_Msg_Valid = FALSE;
130
131/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
132 * TRANSMIT_FILE PutFile payloads.
133 */
134static struct kmem_cache *Putfile_buffer_list_pool;
135static const char Putfile_buffer_list_pool_name[] =
136 "controlvm_putfile_buffer_list_pool";
137
138/* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140 */
141struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
317d9614 143 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
144};
145
146/* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
148 * conversation.
149 */
150static LIST_HEAD(Putfile_request_list);
151
152/* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
155 */
156struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
317d9614 158 struct parser_context *parser_ctx;
12e364b9
KC
159 /* points within data area of parser_ctx to next byte of data */
160 u8 *pnext;
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
163};
164
165#define PUTFILE_REQUEST_SIG 0x0906101302281211
166/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
169 */
170struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
172
173 /* header from original TransmitFile request */
98d7b594 174 struct controlvm_message_header controlvm_header;
12e364b9
KC
175 u64 file_request_number; /* from original TransmitFile request */
176
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
179
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
182
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
186 * file data
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
191
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
194
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
197
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
204};
205
bd5b9b32 206static atomic_t Visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
12e364b9
KC
207
208struct parahotplug_request {
209 struct list_head list;
210 int id;
211 unsigned long expiration;
3ab47701 212 struct controlvm_message msg;
12e364b9
KC
213};
214
215static LIST_HEAD(Parahotplug_request_list);
216static DEFINE_SPINLOCK(Parahotplug_request_list_lock); /* lock for above */
217static void parahotplug_process_list(void);
218
219/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
220 * CONTROLVM_REPORTEVENT.
221 */
fe90d892
BR
222static struct visorchipset_busdev_notifiers BusDev_Server_Notifiers;
223static struct visorchipset_busdev_notifiers BusDev_Client_Notifiers;
12e364b9
KC
224
225static void bus_create_response(ulong busNo, int response);
226static void bus_destroy_response(ulong busNo, int response);
227static void device_create_response(ulong busNo, ulong devNo, int response);
228static void device_destroy_response(ulong busNo, ulong devNo, int response);
229static void device_resume_response(ulong busNo, ulong devNo, int response);
230
929aa8ae 231static struct visorchipset_busdev_responders BusDev_Responders = {
12e364b9
KC
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
927c7927 236 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
237 .device_resume = device_resume_response,
238};
239
240/* info for /dev/visorchipset */
241static dev_t MajorDev = -1; /**< indicates major num for device */
242
19f6634f
BR
243/* prototypes for attributes */
244static ssize_t toolaction_show(struct device *dev,
245 struct device_attribute *attr, char *buf);
246static ssize_t toolaction_store(struct device *dev,
247 struct device_attribute *attr, const char *buf, size_t count);
248static DEVICE_ATTR_RW(toolaction);
249
54b31229
BR
250static ssize_t boottotool_show(struct device *dev,
251 struct device_attribute *attr, char *buf);
252static ssize_t boottotool_store(struct device *dev,
253 struct device_attribute *attr, const char *buf, size_t count);
254static DEVICE_ATTR_RW(boottotool);
255
422af17c
BR
256static ssize_t error_show(struct device *dev, struct device_attribute *attr,
257 char *buf);
258static ssize_t error_store(struct device *dev, struct device_attribute *attr,
259 const char *buf, size_t count);
260static DEVICE_ATTR_RW(error);
261
262static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
263 char *buf);
264static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
265 const char *buf, size_t count);
266static DEVICE_ATTR_RW(textid);
267
268static ssize_t remaining_steps_show(struct device *dev,
269 struct device_attribute *attr, char *buf);
270static ssize_t remaining_steps_store(struct device *dev,
271 struct device_attribute *attr, const char *buf, size_t count);
272static DEVICE_ATTR_RW(remaining_steps);
273
18b87ed1
BR
274static ssize_t chipsetready_store(struct device *dev,
275 struct device_attribute *attr, const char *buf, size_t count);
276static DEVICE_ATTR_WO(chipsetready);
277
e56fa7cd
BR
278static ssize_t devicedisabled_store(struct device *dev,
279 struct device_attribute *attr, const char *buf, size_t count);
280static DEVICE_ATTR_WO(devicedisabled);
281
282static ssize_t deviceenabled_store(struct device *dev,
283 struct device_attribute *attr, const char *buf, size_t count);
284static DEVICE_ATTR_WO(deviceenabled);
285
19f6634f
BR
286static struct attribute *visorchipset_install_attrs[] = {
287 &dev_attr_toolaction.attr,
54b31229 288 &dev_attr_boottotool.attr,
422af17c
BR
289 &dev_attr_error.attr,
290 &dev_attr_textid.attr,
291 &dev_attr_remaining_steps.attr,
19f6634f
BR
292 NULL
293};
294
295static struct attribute_group visorchipset_install_group = {
296 .name = "install",
297 .attrs = visorchipset_install_attrs
298};
299
18b87ed1
BR
300static struct attribute *visorchipset_guest_attrs[] = {
301 &dev_attr_chipsetready.attr,
302 NULL
303};
304
305static struct attribute_group visorchipset_guest_group = {
306 .name = "guest",
307 .attrs = visorchipset_guest_attrs
308};
309
e56fa7cd
BR
310static struct attribute *visorchipset_parahotplug_attrs[] = {
311 &dev_attr_devicedisabled.attr,
312 &dev_attr_deviceenabled.attr,
313 NULL
314};
315
316static struct attribute_group visorchipset_parahotplug_group = {
317 .name = "parahotplug",
318 .attrs = visorchipset_parahotplug_attrs
319};
320
19f6634f
BR
321static const struct attribute_group *visorchipset_dev_groups[] = {
322 &visorchipset_install_group,
18b87ed1 323 &visorchipset_guest_group,
e56fa7cd 324 &visorchipset_parahotplug_group,
19f6634f
BR
325 NULL
326};
327
12e364b9
KC
328/* /sys/devices/platform/visorchipset */
329static struct platform_device Visorchipset_platform_device = {
330 .name = "visorchipset",
331 .id = -1,
19f6634f 332 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
333};
334
335/* Function prototypes */
98d7b594
BR
336static void controlvm_respond(struct controlvm_message_header *msgHdr,
337 int response);
338static void controlvm_respond_chipset_init(
339 struct controlvm_message_header *msgHdr, int response,
340 enum ultra_chipset_feature features);
341static void controlvm_respond_physdev_changestate(
342 struct controlvm_message_header *msgHdr, int response,
343 struct spar_segment_state state);
12e364b9 344
d746cb55
VB
345static ssize_t toolaction_show(struct device *dev,
346 struct device_attribute *attr,
347 char *buf)
19f6634f 348{
66e24b76 349 u8 toolAction;
19f6634f 350
c3d9a224 351 visorchannel_read(controlvm_channel,
d19642f6
BR
352 offsetof(struct spar_controlvm_channel_protocol,
353 tool_action), &toolAction, sizeof(u8));
19f6634f
BR
354 return scnprintf(buf, PAGE_SIZE, "%u\n", toolAction);
355}
356
d746cb55
VB
357static ssize_t toolaction_store(struct device *dev,
358 struct device_attribute *attr,
359 const char *buf, size_t count)
19f6634f 360{
66e24b76
BR
361 u8 toolAction;
362 int ret;
19f6634f 363
66e24b76
BR
364 if (kstrtou8(buf, 10, &toolAction) != 0)
365 return -EINVAL;
366
c3d9a224 367 ret = visorchannel_write(controlvm_channel,
d19642f6 368 offsetof(struct spar_controlvm_channel_protocol, tool_action),
66e24b76
BR
369 &toolAction, sizeof(u8));
370
371 if (ret)
372 return ret;
e22a4a0f 373 return count;
19f6634f
BR
374}
375
d746cb55
VB
376static ssize_t boottotool_show(struct device *dev,
377 struct device_attribute *attr,
378 char *buf)
54b31229 379{
755e2ecc 380 struct efi_spar_indication efiSparIndication;
54b31229 381
c3d9a224 382 visorchannel_read(controlvm_channel,
d19642f6
BR
383 offsetof(struct spar_controlvm_channel_protocol,
384 efi_spar_ind), &efiSparIndication,
755e2ecc 385 sizeof(struct efi_spar_indication));
54b31229 386 return scnprintf(buf, PAGE_SIZE, "%u\n",
2450301a 387 efiSparIndication.boot_to_tool);
54b31229
BR
388}
389
d746cb55
VB
390static ssize_t boottotool_store(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf, size_t count)
54b31229 393{
66e24b76 394 int val, ret;
755e2ecc 395 struct efi_spar_indication efiSparIndication;
54b31229 396
66e24b76
BR
397 if (kstrtoint(buf, 10, &val) != 0)
398 return -EINVAL;
399
2450301a 400 efiSparIndication.boot_to_tool = val;
c3d9a224 401 ret = visorchannel_write(controlvm_channel,
d19642f6
BR
402 offsetof(struct spar_controlvm_channel_protocol,
403 efi_spar_ind),
54b31229 404 &(efiSparIndication),
755e2ecc 405 sizeof(struct efi_spar_indication));
66e24b76
BR
406
407 if (ret)
408 return ret;
e22a4a0f 409 return count;
54b31229 410}
422af17c
BR
411
412static ssize_t error_show(struct device *dev, struct device_attribute *attr,
413 char *buf)
414{
415 u32 error;
416
c3d9a224 417 visorchannel_read(controlvm_channel, offsetof(
d19642f6 418 struct spar_controlvm_channel_protocol, installation_error),
422af17c
BR
419 &error, sizeof(u32));
420 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
421}
422
423static ssize_t error_store(struct device *dev, struct device_attribute *attr,
424 const char *buf, size_t count)
425{
426 u32 error;
66e24b76 427 int ret;
422af17c 428
66e24b76
BR
429 if (kstrtou32(buf, 10, &error) != 0)
430 return -EINVAL;
431
c3d9a224 432 ret = visorchannel_write(controlvm_channel,
d19642f6
BR
433 offsetof(struct spar_controlvm_channel_protocol,
434 installation_error),
66e24b76
BR
435 &error, sizeof(u32));
436 if (ret)
437 return ret;
e22a4a0f 438 return count;
422af17c
BR
439}
440
441static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
442 char *buf)
443{
444 u32 textId;
445
c3d9a224 446 visorchannel_read(controlvm_channel, offsetof(
d19642f6 447 struct spar_controlvm_channel_protocol, installation_text_id),
422af17c
BR
448 &textId, sizeof(u32));
449 return scnprintf(buf, PAGE_SIZE, "%i\n", textId);
450}
451
452static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
453 const char *buf, size_t count)
454{
455 u32 textId;
66e24b76 456 int ret;
422af17c 457
66e24b76
BR
458 if (kstrtou32(buf, 10, &textId) != 0)
459 return -EINVAL;
460
c3d9a224 461 ret = visorchannel_write(controlvm_channel,
d19642f6
BR
462 offsetof(struct spar_controlvm_channel_protocol,
463 installation_text_id),
66e24b76
BR
464 &textId, sizeof(u32));
465 if (ret)
466 return ret;
e22a4a0f 467 return count;
422af17c
BR
468}
469
422af17c
BR
470static ssize_t remaining_steps_show(struct device *dev,
471 struct device_attribute *attr, char *buf)
472{
473 u16 remainingSteps;
474
c3d9a224 475 visorchannel_read(controlvm_channel,
d19642f6
BR
476 offsetof(struct spar_controlvm_channel_protocol,
477 installation_remaining_steps),
422af17c
BR
478 &remainingSteps,
479 sizeof(u16));
480 return scnprintf(buf, PAGE_SIZE, "%hu\n", remainingSteps);
481}
482
483static ssize_t remaining_steps_store(struct device *dev,
484 struct device_attribute *attr, const char *buf, size_t count)
485{
486 u16 remainingSteps;
66e24b76 487 int ret;
422af17c 488
66e24b76
BR
489 if (kstrtou16(buf, 10, &remainingSteps) != 0)
490 return -EINVAL;
491
c3d9a224 492 ret = visorchannel_write(controlvm_channel,
d19642f6
BR
493 offsetof(struct spar_controlvm_channel_protocol,
494 installation_remaining_steps),
66e24b76
BR
495 &remainingSteps, sizeof(u16));
496 if (ret)
497 return ret;
e22a4a0f 498 return count;
422af17c
BR
499}
500
12e364b9 501static void
9b989a98 502bus_info_clear(void *v)
12e364b9 503{
33192fa1 504 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
12e364b9 505
12e364b9
KC
506 kfree(p->name);
507 p->name = NULL;
508
509 kfree(p->description);
510 p->description = NULL;
511
512 p->state.created = 0;
33192fa1 513 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
514}
515
516static void
9b989a98 517dev_info_clear(void *v)
12e364b9 518{
246e0cd0
BR
519 struct visorchipset_device_info *p =
520 (struct visorchipset_device_info *)(v);
26eb2c0c 521
12e364b9 522 p->state.created = 0;
246e0cd0 523 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
524}
525
c242233e 526static u8
12e364b9
KC
527check_chipset_events(void)
528{
529 int i;
c242233e 530 u8 send_msg = 1;
12e364b9
KC
531 /* Check events to determine if response should be sent */
532 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
533 send_msg &= chipset_events[i];
534 return send_msg;
535}
536
537static void
538clear_chipset_events(void)
539{
540 int i;
541 /* Clear chipset_events */
542 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
543 chipset_events[i] = 0;
544}
545
546void
fe90d892
BR
547visorchipset_register_busdev_server(
548 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 549 struct visorchipset_busdev_responders *responders,
1e7a59c1 550 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 551{
8f1947ac 552 down(&notifier_lock);
38f736e9 553 if (!notifiers) {
12e364b9
KC
554 memset(&BusDev_Server_Notifiers, 0,
555 sizeof(BusDev_Server_Notifiers));
556 serverregistered = 0; /* clear flag */
557 } else {
558 BusDev_Server_Notifiers = *notifiers;
559 serverregistered = 1; /* set flag */
560 }
561 if (responders)
562 *responders = BusDev_Responders;
1e7a59c1
BR
563 if (driver_info)
564 bus_device_info_init(driver_info, "chipset", "visorchipset",
836bee9e 565 VERSION, NULL);
12e364b9 566
8f1947ac 567 up(&notifier_lock);
12e364b9
KC
568}
569EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
570
571void
fe90d892
BR
572visorchipset_register_busdev_client(
573 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 574 struct visorchipset_busdev_responders *responders,
43fce019 575 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 576{
8f1947ac 577 down(&notifier_lock);
38f736e9 578 if (!notifiers) {
12e364b9
KC
579 memset(&BusDev_Client_Notifiers, 0,
580 sizeof(BusDev_Client_Notifiers));
581 clientregistered = 0; /* clear flag */
582 } else {
583 BusDev_Client_Notifiers = *notifiers;
584 clientregistered = 1; /* set flag */
585 }
586 if (responders)
587 *responders = BusDev_Responders;
43fce019
BR
588 if (driver_info)
589 bus_device_info_init(driver_info, "chipset(bolts)",
590 "visorchipset", VERSION, NULL);
8f1947ac 591 up(&notifier_lock);
12e364b9
KC
592}
593EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
594
595static void
596cleanup_controlvm_structures(void)
597{
33192fa1 598 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 599 struct visorchipset_device_info *di, *tmp_di;
12e364b9 600
1390b88c 601 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
9b989a98 602 bus_info_clear(bi);
12e364b9
KC
603 list_del(&bi->entry);
604 kfree(bi);
605 }
606
1390b88c 607 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 608 dev_info_clear(di);
12e364b9
KC
609 list_del(&di->entry);
610 kfree(di);
611 }
612}
613
614static void
3ab47701 615chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
616{
617 static int chipset_inited;
b9b141e8 618 enum ultra_chipset_feature features = 0;
12e364b9
KC
619 int rc = CONTROLVM_RESP_SUCCESS;
620
621 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
622 if (chipset_inited) {
22ad57ba 623 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 624 goto cleanup;
12e364b9
KC
625 }
626 chipset_inited = 1;
627 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
628
629 /* Set features to indicate we support parahotplug (if Command
630 * also supports it). */
631 features =
2ea5117b 632 inmsg->cmd.init_chipset.
12e364b9
KC
633 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
634
635 /* Set the "reply" bit so Command knows this is a
636 * features-aware driver. */
637 features |= ULTRA_CHIPSET_FEATURE_REPLY;
638
e3199b2e 639cleanup:
12e364b9
KC
640 if (rc < 0)
641 cleanup_controlvm_structures();
98d7b594 642 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
643 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
644}
645
646static void
3ab47701 647controlvm_init_response(struct controlvm_message *msg,
98d7b594 648 struct controlvm_message_header *msgHdr, int response)
12e364b9 649{
3ab47701 650 memset(msg, 0, sizeof(struct controlvm_message));
98d7b594
BR
651 memcpy(&msg->hdr, msgHdr, sizeof(struct controlvm_message_header));
652 msg->hdr.payload_bytes = 0;
653 msg->hdr.payload_vm_offset = 0;
654 msg->hdr.payload_max_bytes = 0;
12e364b9 655 if (response < 0) {
98d7b594
BR
656 msg->hdr.flags.failed = 1;
657 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
658 }
659}
660
661static void
98d7b594 662controlvm_respond(struct controlvm_message_header *msgHdr, int response)
12e364b9 663{
3ab47701 664 struct controlvm_message outmsg;
26eb2c0c 665
12e364b9
KC
666 controlvm_init_response(&outmsg, msgHdr, response);
667 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
668 * back the deviceChangeState structure in the packet. */
0639ba67
BR
669 if (msgHdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
670 g_devicechangestate_packet.device_change_state.bus_no ==
671 g_diagpool_bus_no &&
672 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 673 g_diagpool_dev_no)
4f44b72d 674 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 675 if (outmsg.hdr.flags.test_message == 1)
12e364b9 676 return;
2098dbd1 677
c3d9a224 678 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 679 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
680 return;
681 }
682}
683
684static void
98d7b594
BR
685controlvm_respond_chipset_init(struct controlvm_message_header *msgHdr,
686 int response,
b9b141e8 687 enum ultra_chipset_feature features)
12e364b9 688{
3ab47701 689 struct controlvm_message outmsg;
26eb2c0c 690
12e364b9 691 controlvm_init_response(&outmsg, msgHdr, response);
2ea5117b 692 outmsg.cmd.init_chipset.features = features;
c3d9a224 693 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 694 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
695 return;
696 }
697}
698
98d7b594
BR
699static void controlvm_respond_physdev_changestate(
700 struct controlvm_message_header *msgHdr, int response,
701 struct spar_segment_state state)
12e364b9 702{
3ab47701 703 struct controlvm_message outmsg;
26eb2c0c 704
12e364b9 705 controlvm_init_response(&outmsg, msgHdr, response);
2ea5117b
BR
706 outmsg.cmd.device_change_state.state = state;
707 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 708 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 709 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
710 return;
711 }
712}
713
714void
2c683cde
BR
715visorchipset_save_message(struct controlvm_message *msg,
716 enum crash_obj_type type)
12e364b9 717{
4577225d
BR
718 u32 crash_msg_offset;
719 u16 crash_msg_count;
12e364b9
KC
720
721 /* get saved message count */
c3d9a224 722 if (visorchannel_read(controlvm_channel,
d19642f6
BR
723 offsetof(struct spar_controlvm_channel_protocol,
724 saved_crash_message_count),
4577225d 725 &crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
726 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
727 POSTCODE_SEVERITY_ERR);
728 return;
729 }
730
4577225d 731 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 732 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
4577225d 733 crash_msg_count,
12e364b9
KC
734 POSTCODE_SEVERITY_ERR);
735 return;
736 }
737
738 /* get saved crash message offset */
c3d9a224 739 if (visorchannel_read(controlvm_channel,
d19642f6
BR
740 offsetof(struct spar_controlvm_channel_protocol,
741 saved_crash_message_offset),
4577225d 742 &crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
743 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
744 POSTCODE_SEVERITY_ERR);
745 return;
746 }
747
2c683cde 748 if (type == CRASH_BUS) {
c3d9a224 749 if (visorchannel_write(controlvm_channel,
4577225d 750 crash_msg_offset,
3ab47701
BR
751 msg,
752 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
753 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
754 POSTCODE_SEVERITY_ERR);
755 return;
756 }
757 } else {
c3d9a224 758 if (visorchannel_write(controlvm_channel,
4577225d 759 crash_msg_offset +
3ab47701
BR
760 sizeof(struct controlvm_message), msg,
761 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
762 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
763 POSTCODE_SEVERITY_ERR);
764 return;
765 }
766 }
767}
768EXPORT_SYMBOL_GPL(visorchipset_save_message);
769
770static void
fbb31f48 771bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response)
12e364b9 772{
33192fa1 773 struct visorchipset_bus_info *p = NULL;
12e364b9
KC
774 BOOL need_clear = FALSE;
775
fbb31f48 776 p = findbus(&bus_info_list, bus_no);
0aca7844 777 if (!p)
12e364b9 778 return;
0aca7844 779
12e364b9 780 if (response < 0) {
fbb31f48 781 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
12e364b9
KC
782 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
783 /* undo the row we just created... */
fbb31f48 784 delbusdevices(&dev_info_list, bus_no);
12e364b9 785 } else {
fbb31f48 786 if (cmd_id == CONTROLVM_BUS_CREATE)
12e364b9 787 p->state.created = 1;
fbb31f48 788 if (cmd_id == CONTROLVM_BUS_DESTROY)
12e364b9
KC
789 need_clear = TRUE;
790 }
791
0aca7844 792 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 793 return; /* no controlvm response needed */
6b59b31d 794 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 795 return;
33192fa1
BR
796 controlvm_respond(&p->pending_msg_hdr, response);
797 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 798 if (need_clear) {
9b989a98 799 bus_info_clear(p);
fbb31f48 800 delbusdevices(&dev_info_list, bus_no);
12e364b9
KC
801 }
802}
803
804static void
fbb31f48
BR
805device_changestate_responder(enum controlvm_id cmd_id,
806 ulong bus_no, ulong dev_no, int response,
807 struct spar_segment_state response_state)
12e364b9 808{
246e0cd0 809 struct visorchipset_device_info *p = NULL;
3ab47701 810 struct controlvm_message outmsg;
12e364b9 811
fbb31f48 812 p = finddevice(&dev_info_list, bus_no, dev_no);
0aca7844 813 if (!p)
12e364b9 814 return;
0aca7844 815 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 816 return; /* no controlvm response needed */
fbb31f48 817 if (p->pending_msg_hdr.id != cmd_id)
12e364b9 818 return;
12e364b9 819
246e0cd0 820 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 821
fbb31f48
BR
822 outmsg.cmd.device_change_state.bus_no = bus_no;
823 outmsg.cmd.device_change_state.dev_no = dev_no;
824 outmsg.cmd.device_change_state.state = response_state;
12e364b9 825
c3d9a224 826 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 827 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 828 return;
12e364b9 829
246e0cd0 830 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
831}
832
833static void
fbb31f48 834device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no,
53bebb13 835 int response)
12e364b9 836{
246e0cd0 837 struct visorchipset_device_info *p = NULL;
12e364b9
KC
838 BOOL need_clear = FALSE;
839
fbb31f48 840 p = finddevice(&dev_info_list, bus_no, dev_no);
0aca7844 841 if (!p)
12e364b9 842 return;
12e364b9 843 if (response >= 0) {
fbb31f48 844 if (cmd_id == CONTROLVM_DEVICE_CREATE)
12e364b9 845 p->state.created = 1;
fbb31f48 846 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
12e364b9
KC
847 need_clear = TRUE;
848 }
849
0aca7844 850 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 851 return; /* no controlvm response needed */
0aca7844 852
6b59b31d 853 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 854 return;
0aca7844 855
246e0cd0
BR
856 controlvm_respond(&p->pending_msg_hdr, response);
857 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 858 if (need_clear)
9b989a98 859 dev_info_clear(p);
12e364b9
KC
860}
861
862static void
2836c6a8
BR
863bus_epilog(u32 bus_no,
864 u32 cmd, struct controlvm_message_header *msg_hdr,
865 int response, BOOL need_response)
12e364b9
KC
866{
867 BOOL notified = FALSE;
868
2836c6a8
BR
869 struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
870 bus_no);
12e364b9 871
2836c6a8 872 if (!bus_info)
12e364b9 873 return;
0aca7844 874
2836c6a8
BR
875 if (need_response) {
876 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
98d7b594 877 sizeof(struct controlvm_message_header));
75c1f8b7 878 } else {
2836c6a8 879 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 880 }
12e364b9 881
8f1947ac 882 down(&notifier_lock);
12e364b9
KC
883 if (response == CONTROLVM_RESP_SUCCESS) {
884 switch (cmd) {
885 case CONTROLVM_BUS_CREATE:
886 /* We can't tell from the bus_create
887 * information which of our 2 bus flavors the
888 * devices on this bus will ultimately end up.
889 * FORTUNATELY, it turns out it is harmless to
890 * send the bus_create to both of them. We can
891 * narrow things down a little bit, though,
892 * because we know: - BusDev_Server can handle
893 * either server or client devices
894 * - BusDev_Client can handle ONLY client
895 * devices */
896 if (BusDev_Server_Notifiers.bus_create) {
2836c6a8 897 (*BusDev_Server_Notifiers.bus_create) (bus_no);
12e364b9
KC
898 notified = TRUE;
899 }
2836c6a8 900 if ((!bus_info->flags.server) /*client */ &&
12e364b9 901 BusDev_Client_Notifiers.bus_create) {
2836c6a8 902 (*BusDev_Client_Notifiers.bus_create) (bus_no);
12e364b9
KC
903 notified = TRUE;
904 }
905 break;
906 case CONTROLVM_BUS_DESTROY:
907 if (BusDev_Server_Notifiers.bus_destroy) {
2836c6a8 908 (*BusDev_Server_Notifiers.bus_destroy) (bus_no);
12e364b9
KC
909 notified = TRUE;
910 }
2836c6a8 911 if ((!bus_info->flags.server) /*client */ &&
12e364b9 912 BusDev_Client_Notifiers.bus_destroy) {
2836c6a8 913 (*BusDev_Client_Notifiers.bus_destroy) (bus_no);
12e364b9
KC
914 notified = TRUE;
915 }
916 break;
917 }
918 }
919 if (notified)
920 /* The callback function just called above is responsible
929aa8ae 921 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
922 * function, which will call bus_responder()
923 */
924 ;
925 else
2836c6a8 926 bus_responder(cmd, bus_no, response);
8f1947ac 927 up(&notifier_lock);
12e364b9
KC
928}
929
930static void
2836c6a8
BR
931device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
932 struct controlvm_message_header *msg_hdr, int response,
933 BOOL need_response, BOOL for_visorbus)
12e364b9 934{
fe90d892 935 struct visorchipset_busdev_notifiers *notifiers = NULL;
12e364b9
KC
936 BOOL notified = FALSE;
937
2836c6a8
BR
938 struct visorchipset_device_info *dev_info =
939 finddevice(&dev_info_list, bus_no, dev_no);
12e364b9
KC
940 char *envp[] = {
941 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
942 NULL
943 };
944
2836c6a8 945 if (!dev_info)
12e364b9 946 return;
0aca7844 947
12e364b9
KC
948 if (for_visorbus)
949 notifiers = &BusDev_Server_Notifiers;
950 else
951 notifiers = &BusDev_Client_Notifiers;
2836c6a8
BR
952 if (need_response) {
953 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
98d7b594 954 sizeof(struct controlvm_message_header));
75c1f8b7 955 } else {
2836c6a8 956 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 957 }
12e364b9 958
8f1947ac 959 down(&notifier_lock);
12e364b9
KC
960 if (response >= 0) {
961 switch (cmd) {
962 case CONTROLVM_DEVICE_CREATE:
963 if (notifiers->device_create) {
2836c6a8 964 (*notifiers->device_create) (bus_no, dev_no);
12e364b9
KC
965 notified = TRUE;
966 }
967 break;
968 case CONTROLVM_DEVICE_CHANGESTATE:
969 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
970 if (state.alive == segment_state_running.alive &&
971 state.operating ==
972 segment_state_running.operating) {
12e364b9 973 if (notifiers->device_resume) {
2836c6a8
BR
974 (*notifiers->device_resume) (bus_no,
975 dev_no);
12e364b9
KC
976 notified = TRUE;
977 }
978 }
979 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 980 else if (state.alive == segment_state_standby.alive &&
3f833b54 981 state.operating ==
bd0d2dcc 982 segment_state_standby.operating) {
12e364b9
KC
983 /* technically this is standby case
984 * where server is lost
985 */
986 if (notifiers->device_pause) {
2836c6a8
BR
987 (*notifiers->device_pause) (bus_no,
988 dev_no);
12e364b9
KC
989 notified = TRUE;
990 }
bd0d2dcc 991 } else if (state.alive == segment_state_paused.alive &&
3f833b54 992 state.operating ==
bd0d2dcc 993 segment_state_paused.operating) {
12e364b9
KC
994 /* this is lite pause where channel is
995 * still valid just 'pause' of it
996 */
2836c6a8
BR
997 if (bus_no == g_diagpool_bus_no &&
998 dev_no == g_diagpool_dev_no) {
12e364b9
KC
999 /* this will trigger the
1000 * diag_shutdown.sh script in
1001 * the visorchipset hotplug */
1002 kobject_uevent_env
1003 (&Visorchipset_platform_device.dev.
1004 kobj, KOBJ_ONLINE, envp);
1005 }
1006 }
1007 break;
1008 case CONTROLVM_DEVICE_DESTROY:
1009 if (notifiers->device_destroy) {
2836c6a8 1010 (*notifiers->device_destroy) (bus_no, dev_no);
12e364b9
KC
1011 notified = TRUE;
1012 }
1013 break;
1014 }
1015 }
1016 if (notified)
1017 /* The callback function just called above is responsible
929aa8ae 1018 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1019 * function, which will call device_responder()
1020 */
1021 ;
1022 else
2836c6a8 1023 device_responder(cmd, bus_no, dev_no, response);
8f1947ac 1024 up(&notifier_lock);
12e364b9
KC
1025}
1026
1027static void
3ab47701 1028bus_create(struct controlvm_message *inmsg)
12e364b9 1029{
2ea5117b 1030 struct controlvm_message_packet *cmd = &inmsg->cmd;
6c5fed35 1031 ulong bus_no = cmd->create_bus.bus_no;
12e364b9 1032 int rc = CONTROLVM_RESP_SUCCESS;
6c5fed35 1033 struct visorchipset_bus_info *bus_info = NULL;
12e364b9 1034
6c5fed35
BR
1035 bus_info = findbus(&bus_info_list, bus_no);
1036 if (bus_info && (bus_info->state.created == 1)) {
1037 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1038 POSTCODE_SEVERITY_ERR);
22ad57ba 1039 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1040 goto cleanup;
12e364b9 1041 }
6c5fed35
BR
1042 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1043 if (!bus_info) {
1044 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1045 POSTCODE_SEVERITY_ERR);
22ad57ba 1046 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1047 goto cleanup;
12e364b9
KC
1048 }
1049
6c5fed35
BR
1050 INIT_LIST_HEAD(&bus_info->entry);
1051 bus_info->bus_no = bus_no;
1052 bus_info->dev_no = cmd->create_bus.dev_count;
12e364b9 1053
6c5fed35 1054 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1055
98d7b594 1056 if (inmsg->hdr.flags.test_message == 1)
6c5fed35 1057 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1058 else
6c5fed35 1059 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1060
6c5fed35
BR
1061 bus_info->flags.server = inmsg->hdr.flags.server;
1062 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1063 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1064 bus_info->chan_info.channel_type_uuid =
9b1caee7 1065 cmd->create_bus.bus_data_type_uuid;
6c5fed35 1066 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9 1067
6c5fed35 1068 list_add(&bus_info->entry, &bus_info_list);
12e364b9 1069
6c5fed35 1070 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1071
6c5fed35
BR
1072cleanup:
1073 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1074 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1075}
1076
1077static void
3ab47701 1078bus_destroy(struct controlvm_message *inmsg)
12e364b9 1079{
2ea5117b 1080 struct controlvm_message_packet *cmd = &inmsg->cmd;
dff54cd6
BR
1081 ulong bus_no = cmd->destroy_bus.bus_no;
1082 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1083 int rc = CONTROLVM_RESP_SUCCESS;
1084
dff54cd6
BR
1085 bus_info = findbus(&bus_info_list, bus_no);
1086 if (!bus_info)
22ad57ba 1087 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1088 else if (bus_info->state.created == 0)
22ad57ba 1089 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1090
dff54cd6 1091 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1092 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1093}
1094
1095static void
317d9614
BR
1096bus_configure(struct controlvm_message *inmsg,
1097 struct parser_context *parser_ctx)
12e364b9 1098{
2ea5117b
BR
1099 struct controlvm_message_packet *cmd = &inmsg->cmd;
1100 ulong busNo = cmd->configure_bus.bus_no;
33192fa1 1101 struct visorchipset_bus_info *pBusInfo = NULL;
12e364b9
KC
1102 int rc = CONTROLVM_RESP_SUCCESS;
1103 char s[99];
1104
2ea5117b 1105 busNo = cmd->configure_bus.bus_no;
12e364b9
KC
1106 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, busNo, POSTCODE_SEVERITY_INFO);
1107
1390b88c 1108 pBusInfo = findbus(&bus_info_list, busNo);
12e364b9 1109 if (!pBusInfo) {
12e364b9
KC
1110 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1111 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1112 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1113 goto Away;
12e364b9
KC
1114 }
1115 if (pBusInfo->state.created == 0) {
12e364b9
KC
1116 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1117 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1118 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1119 goto Away;
12e364b9
KC
1120 }
1121 /* TBD - add this check to other commands also... */
33192fa1 1122 if (pBusInfo->pending_msg_hdr.id != CONTROLVM_INVALID) {
12e364b9
KC
1123 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, busNo,
1124 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1125 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1126 goto Away;
12e364b9
KC
1127 }
1128
33192fa1
BR
1129 pBusInfo->partition_handle = cmd->configure_bus.guest_handle;
1130 pBusInfo->partition_uuid = parser_id_get(parser_ctx);
12e364b9
KC
1131 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1132 pBusInfo->name = parser_string_get(parser_ctx);
1133
33192fa1 1134 visorchannel_uuid_id(&pBusInfo->partition_uuid, s);
12e364b9
KC
1135 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, busNo, POSTCODE_SEVERITY_INFO);
1136Away:
1137 bus_epilog(busNo, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1138 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1139}
1140
1141static void
3ab47701 1142my_device_create(struct controlvm_message *inmsg)
12e364b9 1143{
2ea5117b 1144 struct controlvm_message_packet *cmd = &inmsg->cmd;
f91b9262
BR
1145 ulong busNo = cmd->create_device.bus_no;
1146 ulong devNo = cmd->create_device.dev_no;
246e0cd0 1147 struct visorchipset_device_info *pDevInfo = NULL;
33192fa1 1148 struct visorchipset_bus_info *pBusInfo = NULL;
12e364b9
KC
1149 int rc = CONTROLVM_RESP_SUCCESS;
1150
1390b88c 1151 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
12e364b9 1152 if (pDevInfo && (pDevInfo->state.created == 1)) {
12e364b9
KC
1153 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1154 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1155 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1156 goto Away;
12e364b9 1157 }
1390b88c 1158 pBusInfo = findbus(&bus_info_list, busNo);
12e364b9 1159 if (!pBusInfo) {
12e364b9
KC
1160 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1161 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1162 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1163 goto Away;
12e364b9
KC
1164 }
1165 if (pBusInfo->state.created == 0) {
12e364b9
KC
1166 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1167 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1168 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1169 goto Away;
12e364b9 1170 }
246e0cd0 1171 pDevInfo = kzalloc(sizeof(struct visorchipset_device_info), GFP_KERNEL);
38f736e9 1172 if (!pDevInfo) {
12e364b9
KC
1173 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, devNo, busNo,
1174 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1175 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1176 goto Away;
12e364b9 1177 }
97a84f12 1178
12e364b9 1179 INIT_LIST_HEAD(&pDevInfo->entry);
246e0cd0
BR
1180 pDevInfo->bus_no = busNo;
1181 pDevInfo->dev_no = devNo;
1182 pDevInfo->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
12e364b9
KC
1183 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, devNo, busNo,
1184 POSTCODE_SEVERITY_INFO);
1185
98d7b594 1186 if (inmsg->hdr.flags.test_message == 1)
246e0cd0 1187 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1188 else
246e0cd0
BR
1189 pDevInfo->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1190 pDevInfo->chan_info.channel_addr = cmd->create_device.channel_addr;
1191 pDevInfo->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1192 pDevInfo->chan_info.channel_type_uuid =
9b1caee7 1193 cmd->create_device.data_type_uuid;
246e0cd0 1194 pDevInfo->chan_info.intr = cmd->create_device.intr;
1390b88c 1195 list_add(&pDevInfo->entry, &dev_info_list);
12e364b9
KC
1196 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, devNo, busNo,
1197 POSTCODE_SEVERITY_INFO);
1198Away:
1199 /* get the bus and devNo for DiagPool channel */
930a021f
SM
1200 if (pDevInfo &&
1201 is_diagpool_channel(pDevInfo->chan_info.channel_type_uuid)) {
83d48905
BR
1202 g_diagpool_bus_no = busNo;
1203 g_diagpool_dev_no = devNo;
12e364b9 1204 }
bd0d2dcc 1205 device_epilog(busNo, devNo, segment_state_running,
12e364b9 1206 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
98d7b594 1207 inmsg->hdr.flags.response_expected == 1,
246e0cd0 1208 FOR_VISORBUS(pDevInfo->chan_info.channel_type_uuid));
12e364b9
KC
1209}
1210
1211static void
3ab47701 1212my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1213{
2ea5117b
BR
1214 struct controlvm_message_packet *cmd = &inmsg->cmd;
1215 ulong busNo = cmd->device_change_state.bus_no;
1216 ulong devNo = cmd->device_change_state.dev_no;
1217 struct spar_segment_state state = cmd->device_change_state.state;
246e0cd0 1218 struct visorchipset_device_info *pDevInfo = NULL;
12e364b9
KC
1219 int rc = CONTROLVM_RESP_SUCCESS;
1220
1390b88c 1221 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
12e364b9 1222 if (!pDevInfo) {
12e364b9
KC
1223 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1224 POSTCODE_SEVERITY_ERR);
22ad57ba
KC
1225 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1226 goto Away;
12e364b9
KC
1227 }
1228 if (pDevInfo->state.created == 0) {
12e364b9
KC
1229 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, devNo, busNo,
1230 POSTCODE_SEVERITY_ERR);
22ad57ba 1231 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9
KC
1232 }
1233Away:
1234 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
1235 device_epilog(busNo, devNo, state, CONTROLVM_DEVICE_CHANGESTATE,
1236 &inmsg->hdr, rc,
98d7b594 1237 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1238 FOR_VISORBUS(
246e0cd0 1239 pDevInfo->chan_info.channel_type_uuid));
12e364b9
KC
1240}
1241
1242static void
3ab47701 1243my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1244{
2ea5117b
BR
1245 struct controlvm_message_packet *cmd = &inmsg->cmd;
1246 ulong busNo = cmd->destroy_device.bus_no;
1247 ulong devNo = cmd->destroy_device.dev_no;
246e0cd0 1248 struct visorchipset_device_info *pDevInfo = NULL;
12e364b9
KC
1249 int rc = CONTROLVM_RESP_SUCCESS;
1250
1390b88c 1251 pDevInfo = finddevice(&dev_info_list, busNo, devNo);
12e364b9 1252 if (!pDevInfo) {
22ad57ba
KC
1253 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1254 goto Away;
12e364b9 1255 }
75c1f8b7 1256 if (pDevInfo->state.created == 0)
22ad57ba 1257 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9
KC
1258
1259Away:
1260 if ((rc >= CONTROLVM_RESP_SUCCESS) && pDevInfo)
bd0d2dcc 1261 device_epilog(busNo, devNo, segment_state_running,
12e364b9 1262 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
98d7b594 1263 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1264 FOR_VISORBUS(
246e0cd0 1265 pDevInfo->chan_info.channel_type_uuid));
12e364b9
KC
1266}
1267
1268/* When provided with the physical address of the controlvm channel
1269 * (phys_addr), the offset to the payload area we need to manage
1270 * (offset), and the size of this payload area (bytes), fills in the
84b11dfd 1271 * controlvm_payload_info struct. Returns TRUE for success or FALSE
12e364b9
KC
1272 * for failure.
1273 */
1274static int
5fc0229a 1275initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
84b11dfd 1276 struct controlvm_payload_info *info)
12e364b9 1277{
c242233e 1278 u8 __iomem *payload = NULL;
12e364b9
KC
1279 int rc = CONTROLVM_RESP_SUCCESS;
1280
38f736e9 1281 if (!info) {
22ad57ba
KC
1282 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1283 goto Away;
12e364b9 1284 }
84b11dfd 1285 memset(info, 0, sizeof(struct controlvm_payload_info));
12e364b9 1286 if ((offset == 0) || (bytes == 0)) {
22ad57ba
KC
1287 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1288 goto Away;
12e364b9
KC
1289 }
1290 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1291 if (!payload) {
22ad57ba
KC
1292 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1293 goto Away;
12e364b9
KC
1294 }
1295
1296 info->offset = offset;
1297 info->bytes = bytes;
1298 info->ptr = payload;
12e364b9
KC
1299
1300Away:
1301 if (rc < 0) {
1302 if (payload != NULL) {
1303 iounmap(payload);
1304 payload = NULL;
1305 }
1306 }
1307 return rc;
1308}
1309
1310static void
84b11dfd 1311destroy_controlvm_payload_info(struct controlvm_payload_info *info)
12e364b9
KC
1312{
1313 if (info->ptr != NULL) {
1314 iounmap(info->ptr);
1315 info->ptr = NULL;
1316 }
84b11dfd 1317 memset(info, 0, sizeof(struct controlvm_payload_info));
12e364b9
KC
1318}
1319
1320static void
1321initialize_controlvm_payload(void)
1322{
c3d9a224 1323 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
5fc0229a 1324 u64 payloadOffset = 0;
b3c55b13 1325 u32 payloadBytes = 0;
26eb2c0c 1326
c3d9a224 1327 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1328 offsetof(struct spar_controlvm_channel_protocol,
1329 request_payload_offset),
12e364b9 1330 &payloadOffset, sizeof(payloadOffset)) < 0) {
12e364b9
KC
1331 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1332 POSTCODE_SEVERITY_ERR);
1333 return;
1334 }
c3d9a224 1335 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1336 offsetof(struct spar_controlvm_channel_protocol,
1337 request_payload_bytes),
12e364b9 1338 &payloadBytes, sizeof(payloadBytes)) < 0) {
12e364b9
KC
1339 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1340 POSTCODE_SEVERITY_ERR);
1341 return;
1342 }
1343 initialize_controlvm_payload_info(phys_addr,
1344 payloadOffset, payloadBytes,
84982fbf 1345 &controlvm_payload_info);
12e364b9
KC
1346}
1347
1348/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1349 * Returns CONTROLVM_RESP_xxx code.
1350 */
1351int
1352visorchipset_chipset_ready(void)
1353{
1354 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1355 return CONTROLVM_RESP_SUCCESS;
1356}
1357EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1358
1359int
1360visorchipset_chipset_selftest(void)
1361{
1362 char env_selftest[20];
1363 char *envp[] = { env_selftest, NULL };
26eb2c0c 1364
12e364b9
KC
1365 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1366 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1367 envp);
1368 return CONTROLVM_RESP_SUCCESS;
1369}
1370EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1371
1372/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1373 * Returns CONTROLVM_RESP_xxx code.
1374 */
1375int
1376visorchipset_chipset_notready(void)
1377{
1378 kobject_uevent(&Visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1379 return CONTROLVM_RESP_SUCCESS;
1380}
1381EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1382
1383static void
98d7b594 1384chipset_ready(struct controlvm_message_header *msgHdr)
12e364b9
KC
1385{
1386 int rc = visorchipset_chipset_ready();
26eb2c0c 1387
12e364b9
KC
1388 if (rc != CONTROLVM_RESP_SUCCESS)
1389 rc = -rc;
98d7b594 1390 if (msgHdr->flags.response_expected && !visorchipset_holdchipsetready)
12e364b9 1391 controlvm_respond(msgHdr, rc);
98d7b594 1392 if (msgHdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1393 /* Send CHIPSET_READY response when all modules have been loaded
1394 * and disks mounted for the partition
1395 */
da021f02 1396 g_chipset_msg_hdr = *msgHdr;
12e364b9
KC
1397 }
1398}
1399
1400static void
98d7b594 1401chipset_selftest(struct controlvm_message_header *msgHdr)
12e364b9
KC
1402{
1403 int rc = visorchipset_chipset_selftest();
26eb2c0c 1404
12e364b9
KC
1405 if (rc != CONTROLVM_RESP_SUCCESS)
1406 rc = -rc;
98d7b594 1407 if (msgHdr->flags.response_expected)
12e364b9
KC
1408 controlvm_respond(msgHdr, rc);
1409}
1410
1411static void
98d7b594 1412chipset_notready(struct controlvm_message_header *msgHdr)
12e364b9
KC
1413{
1414 int rc = visorchipset_chipset_notready();
26eb2c0c 1415
12e364b9
KC
1416 if (rc != CONTROLVM_RESP_SUCCESS)
1417 rc = -rc;
98d7b594 1418 if (msgHdr->flags.response_expected)
12e364b9
KC
1419 controlvm_respond(msgHdr, rc);
1420}
1421
1422/* This is your "one-stop" shop for grabbing the next message from the
1423 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1424 */
1425static BOOL
3ab47701 1426read_controlvm_event(struct controlvm_message *msg)
12e364b9 1427{
c3d9a224 1428 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1429 CONTROLVM_QUEUE_EVENT, msg)) {
1430 /* got a message */
0aca7844 1431 if (msg->hdr.flags.test_message == 1)
12e364b9 1432 return FALSE;
e22a4a0f 1433 return TRUE;
12e364b9
KC
1434 }
1435 return FALSE;
1436}
1437
1438/*
1439 * The general parahotplug flow works as follows. The visorchipset
1440 * driver receives a DEVICE_CHANGESTATE message from Command
1441 * specifying a physical device to enable or disable. The CONTROLVM
1442 * message handler calls parahotplug_process_message, which then adds
1443 * the message to a global list and kicks off a udev event which
1444 * causes a user level script to enable or disable the specified
1445 * device. The udev script then writes to
1446 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1447 * to get called, at which point the appropriate CONTROLVM message is
1448 * retrieved from the list and responded to.
1449 */
1450
1451#define PARAHOTPLUG_TIMEOUT_MS 2000
1452
1453/*
1454 * Generate unique int to match an outstanding CONTROLVM message with a
1455 * udev script /proc response
1456 */
1457static int
1458parahotplug_next_id(void)
1459{
1460 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1461
12e364b9
KC
1462 return atomic_inc_return(&id);
1463}
1464
1465/*
1466 * Returns the time (in jiffies) when a CONTROLVM message on the list
1467 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1468 */
1469static unsigned long
1470parahotplug_next_expiration(void)
1471{
2cc1a1b3 1472 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1473}
1474
1475/*
1476 * Create a parahotplug_request, which is basically a wrapper for a
1477 * CONTROLVM_MESSAGE that we can stick on a list
1478 */
1479static struct parahotplug_request *
3ab47701 1480parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1481{
ea0dcfcf
QL
1482 struct parahotplug_request *req;
1483
1484 req = kmalloc(sizeof(*req), GFP_KERNEL|__GFP_NORETRY);
38f736e9 1485 if (!req)
12e364b9
KC
1486 return NULL;
1487
1488 req->id = parahotplug_next_id();
1489 req->expiration = parahotplug_next_expiration();
1490 req->msg = *msg;
1491
1492 return req;
1493}
1494
1495/*
1496 * Free a parahotplug_request.
1497 */
1498static void
1499parahotplug_request_destroy(struct parahotplug_request *req)
1500{
1501 kfree(req);
1502}
1503
1504/*
1505 * Cause uevent to run the user level script to do the disable/enable
1506 * specified in (the CONTROLVM message in) the specified
1507 * parahotplug_request
1508 */
1509static void
1510parahotplug_request_kickoff(struct parahotplug_request *req)
1511{
2ea5117b 1512 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1513 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1514 env_func[40];
1515 char *envp[] = {
1516 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1517 };
1518
1519 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1520 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1521 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1522 cmd->device_change_state.state.active);
12e364b9 1523 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1524 cmd->device_change_state.bus_no);
12e364b9 1525 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1526 cmd->device_change_state.dev_no >> 3);
12e364b9 1527 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1528 cmd->device_change_state.dev_no & 0x7);
12e364b9 1529
12e364b9
KC
1530 kobject_uevent_env(&Visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1531 envp);
1532}
1533
1534/*
1535 * Remove any request from the list that's been on there too long and
1536 * respond with an error.
1537 */
1538static void
1539parahotplug_process_list(void)
1540{
1541 struct list_head *pos = NULL;
1542 struct list_head *tmp = NULL;
1543
1544 spin_lock(&Parahotplug_request_list_lock);
1545
1546 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1547 struct parahotplug_request *req =
1548 list_entry(pos, struct parahotplug_request, list);
1549 if (time_after_eq(jiffies, req->expiration)) {
1550 list_del(pos);
98d7b594 1551 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1552 controlvm_respond_physdev_changestate(
1553 &req->msg.hdr,
1554 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
2ea5117b 1555 req->msg.cmd.device_change_state.state);
12e364b9
KC
1556 parahotplug_request_destroy(req);
1557 }
1558 }
1559
1560 spin_unlock(&Parahotplug_request_list_lock);
1561}
1562
1563/*
1564 * Called from the /proc handler, which means the user script has
1565 * finished the enable/disable. Find the matching identifier, and
1566 * respond to the CONTROLVM message with success.
1567 */
1568static int
b06bdf7d 1569parahotplug_request_complete(int id, u16 active)
12e364b9
KC
1570{
1571 struct list_head *pos = NULL;
1572 struct list_head *tmp = NULL;
1573
1574 spin_lock(&Parahotplug_request_list_lock);
1575
1576 /* Look for a request matching "id". */
1577 list_for_each_safe(pos, tmp, &Parahotplug_request_list) {
1578 struct parahotplug_request *req =
1579 list_entry(pos, struct parahotplug_request, list);
1580 if (req->id == id) {
1581 /* Found a match. Remove it from the list and
1582 * respond.
1583 */
1584 list_del(pos);
1585 spin_unlock(&Parahotplug_request_list_lock);
2ea5117b 1586 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1587 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1588 controlvm_respond_physdev_changestate(
1589 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1590 req->msg.cmd.device_change_state.state);
12e364b9
KC
1591 parahotplug_request_destroy(req);
1592 return 0;
1593 }
1594 }
1595
1596 spin_unlock(&Parahotplug_request_list_lock);
1597 return -1;
1598}
1599
1600/*
1601 * Enables or disables a PCI device by kicking off a udev script
1602 */
bd5b9b32 1603static void
3ab47701 1604parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1605{
1606 struct parahotplug_request *req;
1607
1608 req = parahotplug_request_create(inmsg);
1609
38f736e9 1610 if (!req)
12e364b9 1611 return;
12e364b9 1612
2ea5117b 1613 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1614 /* For enable messages, just respond with success
1615 * right away. This is a bit of a hack, but there are
1616 * issues with the early enable messages we get (with
1617 * either the udev script not detecting that the device
1618 * is up, or not getting called at all). Fortunately
1619 * the messages that get lost don't matter anyway, as
1620 * devices are automatically enabled at
1621 * initialization.
1622 */
1623 parahotplug_request_kickoff(req);
1624 controlvm_respond_physdev_changestate(&inmsg->hdr,
2ea5117b
BR
1625 CONTROLVM_RESP_SUCCESS, inmsg->cmd.
1626 device_change_state.state);
12e364b9
KC
1627 parahotplug_request_destroy(req);
1628 } else {
1629 /* For disable messages, add the request to the
1630 * request list before kicking off the udev script. It
1631 * won't get responded to until the script has
1632 * indicated it's done.
1633 */
1634 spin_lock(&Parahotplug_request_list_lock);
1635 list_add_tail(&(req->list), &Parahotplug_request_list);
1636 spin_unlock(&Parahotplug_request_list_lock);
1637
1638 parahotplug_request_kickoff(req);
1639 }
1640}
1641
12e364b9
KC
1642/* Process a controlvm message.
1643 * Return result:
1644 * FALSE - this function will return FALSE only in the case where the
1645 * controlvm message was NOT processed, but processing must be
1646 * retried before reading the next controlvm message; a
1647 * scenario where this can occur is when we need to throttle
1648 * the allocation of memory in which to copy out controlvm
1649 * payload data
1650 * TRUE - processing of the controlvm message completed,
1651 * either successfully or with an error.
1652 */
1653static BOOL
3ab47701 1654handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
12e364b9 1655{
2ea5117b 1656 struct controlvm_message_packet *cmd = &inmsg.cmd;
5fc0229a 1657 u64 parametersAddr = 0;
b3c55b13 1658 u32 parametersBytes = 0;
317d9614 1659 struct parser_context *parser_ctx = NULL;
12e364b9 1660 BOOL isLocalAddr = FALSE;
3ab47701 1661 struct controlvm_message ackmsg;
12e364b9
KC
1662
1663 /* create parsing context if necessary */
98d7b594 1664 isLocalAddr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1665 if (channel_addr == 0)
12e364b9 1666 return TRUE;
98d7b594
BR
1667 parametersAddr = channel_addr + inmsg.hdr.payload_vm_offset;
1668 parametersBytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1669
1670 /* Parameter and channel addresses within test messages actually lie
1671 * within our OS-controlled memory. We need to know that, because it
1672 * makes a difference in how we compute the virtual address.
1673 */
1674 if (parametersAddr != 0 && parametersBytes != 0) {
1675 BOOL retry = FALSE;
26eb2c0c 1676
12e364b9 1677 parser_ctx =
b2d97e4b 1678 parser_init_byte_stream(parametersAddr, parametersBytes,
12e364b9 1679 isLocalAddr, &retry);
1b08872e
BR
1680 if (!parser_ctx && retry)
1681 return FALSE;
12e364b9
KC
1682 }
1683
1684 if (!isLocalAddr) {
1685 controlvm_init_response(&ackmsg, &inmsg.hdr,
1686 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1687 if (controlvm_channel)
1688 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1689 CONTROLVM_QUEUE_ACK,
1690 &ackmsg);
12e364b9 1691 }
98d7b594 1692 switch (inmsg.hdr.id) {
12e364b9 1693 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1694 chipset_init(&inmsg);
1695 break;
1696 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1697 bus_create(&inmsg);
1698 break;
1699 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1700 bus_destroy(&inmsg);
1701 break;
1702 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1703 bus_configure(&inmsg, parser_ctx);
1704 break;
1705 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1706 my_device_create(&inmsg);
1707 break;
1708 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1709 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1710 parahotplug_process_message(&inmsg);
1711 } else {
12e364b9
KC
1712 /* save the hdr and cmd structures for later use */
1713 /* when sending back the response to Command */
1714 my_device_changestate(&inmsg);
da021f02 1715 g_diag_msg_hdr = inmsg.hdr;
4f44b72d 1716 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1717 break;
1718 }
1719 break;
1720 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1721 my_device_destroy(&inmsg);
1722 break;
1723 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1724 /* no op for now, just send a respond that we passed */
98d7b594 1725 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1726 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1727 break;
1728 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1729 chipset_ready(&inmsg.hdr);
1730 break;
1731 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1732 chipset_selftest(&inmsg.hdr);
1733 break;
1734 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1735 chipset_notready(&inmsg.hdr);
1736 break;
1737 default:
98d7b594 1738 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1739 controlvm_respond(&inmsg.hdr,
1740 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1741 break;
1742 }
1743
38f736e9 1744 if (parser_ctx) {
12e364b9
KC
1745 parser_done(parser_ctx);
1746 parser_ctx = NULL;
1747 }
1748 return TRUE;
1749}
1750
d746cb55 1751static HOSTADDRESS controlvm_get_channel_address(void)
524b0b63 1752{
5fc0229a 1753 u64 addr = 0;
b3c55b13 1754 u32 size = 0;
524b0b63 1755
0aca7844 1756 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1757 return 0;
0aca7844 1758
524b0b63
BR
1759 return addr;
1760}
1761
12e364b9
KC
1762static void
1763controlvm_periodic_work(struct work_struct *work)
1764{
3ab47701 1765 struct controlvm_message inmsg;
12e364b9
KC
1766 BOOL gotACommand = FALSE;
1767 BOOL handle_command_failed = FALSE;
5fc0229a 1768 static u64 Poll_Count;
12e364b9
KC
1769
1770 /* make sure visorbus server is registered for controlvm callbacks */
1771 if (visorchipset_serverregwait && !serverregistered)
097f4c19 1772 goto Away;
12e364b9
KC
1773 /* make sure visorclientbus server is regsitered for controlvm
1774 * callbacks
1775 */
1776 if (visorchipset_clientregwait && !clientregistered)
097f4c19 1777 goto Away;
12e364b9 1778
12e364b9 1779 Poll_Count++;
8a1182eb 1780 if (Poll_Count >= 250)
12e364b9
KC
1781 ; /* keep going */
1782 else
097f4c19 1783 goto Away;
12e364b9
KC
1784
1785 /* Check events to determine if response to CHIPSET_READY
1786 * should be sent
1787 */
0639ba67
BR
1788 if (visorchipset_holdchipsetready &&
1789 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1790 if (check_chipset_events() == 1) {
da021f02 1791 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1792 clear_chipset_events();
da021f02 1793 memset(&g_chipset_msg_hdr, 0,
98d7b594 1794 sizeof(struct controlvm_message_header));
12e364b9
KC
1795 }
1796 }
1797
c3d9a224 1798 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1799 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1800 &inmsg))
1801 ;
8a1182eb
BR
1802 if (!gotACommand) {
1803 if (ControlVm_Pending_Msg_Valid) {
1804 /* we throttled processing of a prior
1805 * msg, so try to process it again
1806 * rather than reading a new one
1807 */
1808 inmsg = ControlVm_Pending_Msg;
1809 ControlVm_Pending_Msg_Valid = FALSE;
1810 gotACommand = TRUE;
75c1f8b7 1811 } else {
8a1182eb 1812 gotACommand = read_controlvm_event(&inmsg);
75c1f8b7 1813 }
8a1182eb 1814 }
12e364b9
KC
1815
1816 handle_command_failed = FALSE;
1817 while (gotACommand && (!handle_command_failed)) {
b53e0e93 1818 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1819 if (handle_command(inmsg,
1820 visorchannel_get_physaddr
c3d9a224 1821 (controlvm_channel)))
8a1182eb
BR
1822 gotACommand = read_controlvm_event(&inmsg);
1823 else {
1824 /* this is a scenario where throttling
1825 * is required, but probably NOT an
1826 * error...; we stash the current
1827 * controlvm msg so we will attempt to
1828 * reprocess it on our next loop
1829 */
1830 handle_command_failed = TRUE;
1831 ControlVm_Pending_Msg = inmsg;
1832 ControlVm_Pending_Msg_Valid = TRUE;
12e364b9
KC
1833 }
1834 }
1835
1836 /* parahotplug_worker */
1837 parahotplug_process_list();
1838
12e364b9
KC
1839Away:
1840
1841 if (time_after(jiffies,
b53e0e93 1842 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1843 /* it's been longer than MIN_IDLE_SECONDS since we
1844 * processed our last controlvm message; slow down the
1845 * polling
1846 */
911e213e
BR
1847 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1848 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1849 } else {
911e213e
BR
1850 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1851 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1852 }
1853
9232d2d6
BR
1854 queue_delayed_work(periodic_controlvm_workqueue,
1855 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1856}
1857
1858static void
1859setup_crash_devices_work_queue(struct work_struct *work)
1860{
3ab47701
BR
1861 struct controlvm_message localCrashCreateBusMsg;
1862 struct controlvm_message localCrashCreateDevMsg;
1863 struct controlvm_message msg;
b3c55b13 1864 u32 localSavedCrashMsgOffset;
b06bdf7d 1865 u16 localSavedCrashMsgCount;
12e364b9
KC
1866
1867 /* make sure visorbus server is registered for controlvm callbacks */
1868 if (visorchipset_serverregwait && !serverregistered)
097f4c19 1869 goto Away;
12e364b9
KC
1870
1871 /* make sure visorclientbus server is regsitered for controlvm
1872 * callbacks
1873 */
1874 if (visorchipset_clientregwait && !clientregistered)
097f4c19 1875 goto Away;
12e364b9
KC
1876
1877 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1878
1879 /* send init chipset msg */
98d7b594 1880 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1881 msg.cmd.init_chipset.bus_count = 23;
1882 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1883
1884 chipset_init(&msg);
1885
12e364b9 1886 /* get saved message count */
c3d9a224 1887 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1888 offsetof(struct spar_controlvm_channel_protocol,
1889 saved_crash_message_count),
b06bdf7d 1890 &localSavedCrashMsgCount, sizeof(u16)) < 0) {
12e364b9
KC
1891 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1892 POSTCODE_SEVERITY_ERR);
1893 return;
1894 }
1895
1896 if (localSavedCrashMsgCount != CONTROLVM_CRASHMSG_MAX) {
12e364b9
KC
1897 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1898 localSavedCrashMsgCount,
1899 POSTCODE_SEVERITY_ERR);
1900 return;
1901 }
1902
1903 /* get saved crash message offset */
c3d9a224 1904 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1905 offsetof(struct spar_controlvm_channel_protocol,
1906 saved_crash_message_offset),
b3c55b13 1907 &localSavedCrashMsgOffset, sizeof(u32)) < 0) {
12e364b9
KC
1908 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1909 POSTCODE_SEVERITY_ERR);
1910 return;
1911 }
1912
1913 /* read create device message for storage bus offset */
c3d9a224 1914 if (visorchannel_read(controlvm_channel,
12e364b9
KC
1915 localSavedCrashMsgOffset,
1916 &localCrashCreateBusMsg,
3ab47701 1917 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1918 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1919 POSTCODE_SEVERITY_ERR);
1920 return;
1921 }
1922
1923 /* read create device message for storage device */
c3d9a224 1924 if (visorchannel_read(controlvm_channel,
12e364b9 1925 localSavedCrashMsgOffset +
3ab47701 1926 sizeof(struct controlvm_message),
12e364b9 1927 &localCrashCreateDevMsg,
3ab47701 1928 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1929 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1930 POSTCODE_SEVERITY_ERR);
1931 return;
1932 }
1933
1934 /* reuse IOVM create bus message */
75c1f8b7 1935 if (localCrashCreateBusMsg.cmd.create_bus.channel_addr != 0) {
12e364b9 1936 bus_create(&localCrashCreateBusMsg);
75c1f8b7 1937 } else {
12e364b9
KC
1938 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1939 POSTCODE_SEVERITY_ERR);
1940 return;
1941 }
1942
1943 /* reuse create device message for storage device */
75c1f8b7 1944 if (localCrashCreateDevMsg.cmd.create_device.channel_addr != 0) {
12e364b9 1945 my_device_create(&localCrashCreateDevMsg);
75c1f8b7 1946 } else {
12e364b9
KC
1947 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1948 POSTCODE_SEVERITY_ERR);
1949 return;
1950 }
12e364b9
KC
1951 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1952 return;
1953
1954Away:
1955
911e213e 1956 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1957
9232d2d6
BR
1958 queue_delayed_work(periodic_controlvm_workqueue,
1959 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1960}
1961
1962static void
1963bus_create_response(ulong busNo, int response)
1964{
1965 bus_responder(CONTROLVM_BUS_CREATE, busNo, response);
1966}
1967
1968static void
1969bus_destroy_response(ulong busNo, int response)
1970{
1971 bus_responder(CONTROLVM_BUS_DESTROY, busNo, response);
1972}
1973
1974static void
1975device_create_response(ulong busNo, ulong devNo, int response)
1976{
1977 device_responder(CONTROLVM_DEVICE_CREATE, busNo, devNo, response);
1978}
1979
1980static void
1981device_destroy_response(ulong busNo, ulong devNo, int response)
1982{
1983 device_responder(CONTROLVM_DEVICE_DESTROY, busNo, devNo, response);
1984}
1985
1986void
8420f417 1987visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
12e364b9 1988{
12e364b9 1989 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 1990 bus_no, dev_no, response,
bd0d2dcc 1991 segment_state_standby);
12e364b9 1992}
927c7927 1993EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
1994
1995static void
1996device_resume_response(ulong busNo, ulong devNo, int response)
1997{
1998 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1999 busNo, devNo, response,
bd0d2dcc 2000 segment_state_running);
12e364b9
KC
2001}
2002
2003BOOL
77db7127 2004visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 2005{
1390b88c 2006 void *p = findbus(&bus_info_list, bus_no);
26eb2c0c 2007
0aca7844 2008 if (!p)
12e364b9 2009 return FALSE;
77db7127 2010 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
12e364b9
KC
2011 return TRUE;
2012}
2013EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2014
2015BOOL
58dd8f2d 2016visorchipset_set_bus_context(ulong bus_no, void *context)
12e364b9 2017{
1390b88c 2018 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
26eb2c0c 2019
0aca7844 2020 if (!p)
12e364b9 2021 return FALSE;
12e364b9
KC
2022 p->bus_driver_context = context;
2023 return TRUE;
2024}
2025EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2026
2027BOOL
b486df19
BR
2028visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2029 struct visorchipset_device_info *dev_info)
12e364b9 2030{
1390b88c 2031 void *p = finddevice(&dev_info_list, bus_no, dev_no);
26eb2c0c 2032
0aca7844 2033 if (!p)
12e364b9 2034 return FALSE;
b486df19 2035 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
12e364b9
KC
2036 return TRUE;
2037}
2038EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2039
2040BOOL
cf0bd0b5 2041visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
12e364b9 2042{
246e0cd0 2043 struct visorchipset_device_info *p =
1390b88c 2044 finddevice(&dev_info_list, bus_no, dev_no);
26eb2c0c 2045
0aca7844 2046 if (!p)
12e364b9 2047 return FALSE;
12e364b9
KC
2048 p->bus_driver_context = context;
2049 return TRUE;
2050}
2051EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2052
2053/* Generic wrapper function for allocating memory from a kmem_cache pool.
2054 */
2055void *
2056visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2057 char *fn, int ln)
2058{
2059 gfp_t gfp;
2060 void *p;
2061
2062 if (ok_to_block)
2063 gfp = GFP_KERNEL;
2064 else
2065 gfp = GFP_ATOMIC;
2066 /* __GFP_NORETRY means "ok to fail", meaning
2067 * kmem_cache_alloc() can return NULL, implying the caller CAN
2068 * cope with failure. If you do NOT specify __GFP_NORETRY,
2069 * Linux will go to extreme measures to get memory for you
2070 * (like, invoke oom killer), which will probably cripple the
2071 * system.
2072 */
2073 gfp |= __GFP_NORETRY;
2074 p = kmem_cache_alloc(pool, gfp);
0aca7844 2075 if (!p)
12e364b9 2076 return NULL;
0aca7844 2077
12e364b9
KC
2078 atomic_inc(&Visorchipset_cache_buffers_in_use);
2079 return p;
2080}
2081
2082/* Generic wrapper function for freeing memory from a kmem_cache pool.
2083 */
2084void
2085visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2086{
0aca7844 2087 if (!p)
12e364b9 2088 return;
0aca7844 2089
12e364b9
KC
2090 atomic_dec(&Visorchipset_cache_buffers_in_use);
2091 kmem_cache_free(pool, p);
2092}
2093
18b87ed1
BR
2094static ssize_t chipsetready_store(struct device *dev,
2095 struct device_attribute *attr, const char *buf, size_t count)
12e364b9 2096{
18b87ed1 2097 char msgtype[64];
12e364b9 2098
66e24b76
BR
2099 if (sscanf(buf, "%63s", msgtype) != 1)
2100 return -EINVAL;
2101
2102 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2103 chipset_events[0] = 1;
2104 return count;
2105 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2106 chipset_events[1] = 1;
2107 return count;
e22a4a0f
BR
2108 }
2109 return -EINVAL;
12e364b9
KC
2110}
2111
e56fa7cd
BR
2112/* The parahotplug/devicedisabled interface gets called by our support script
2113 * when an SR-IOV device has been shut down. The ID is passed to the script
2114 * and then passed back when the device has been removed.
2115 */
2116static ssize_t devicedisabled_store(struct device *dev,
2117 struct device_attribute *attr, const char *buf, size_t count)
2118{
2119 uint id;
2120
2121 if (kstrtouint(buf, 10, &id) != 0)
2122 return -EINVAL;
2123
2124 parahotplug_request_complete(id, 0);
2125 return count;
2126}
2127
2128/* The parahotplug/deviceenabled interface gets called by our support script
2129 * when an SR-IOV device has been recovered. The ID is passed to the script
2130 * and then passed back when the device has been brought back up.
2131 */
2132static ssize_t deviceenabled_store(struct device *dev,
2133 struct device_attribute *attr, const char *buf, size_t count)
2134{
2135 uint id;
2136
2137 if (kstrtouint(buf, 10, &id) != 0)
2138 return -EINVAL;
2139
2140 parahotplug_request_complete(id, 1);
2141 return count;
2142}
2143
12e364b9
KC
2144static int __init
2145visorchipset_init(void)
2146{
2147 int rc = 0, x = 0;
8a1182eb 2148 HOSTADDRESS addr;
12e364b9 2149
fcd0157e
KC
2150 if (!unisys_spar_platform)
2151 return -ENODEV;
2152
12e364b9
KC
2153 memset(&BusDev_Server_Notifiers, 0, sizeof(BusDev_Server_Notifiers));
2154 memset(&BusDev_Client_Notifiers, 0, sizeof(BusDev_Client_Notifiers));
84982fbf 2155 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
ea33b4ee
BR
2156 memset(&livedump_info, 0, sizeof(livedump_info));
2157 atomic_set(&livedump_info.buffers_in_use, 0);
12e364b9 2158
9f8d0e8b 2159 if (visorchipset_testvnic) {
9f8d0e8b
KC
2160 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2161 rc = x;
2162 goto Away;
2163 }
12e364b9 2164
8a1182eb
BR
2165 addr = controlvm_get_channel_address();
2166 if (addr != 0) {
c3d9a224 2167 controlvm_channel =
8a1182eb
BR
2168 visorchannel_create_with_lock
2169 (addr,
d19642f6 2170 sizeof(struct spar_controlvm_channel_protocol),
5fbaa4b3 2171 spar_controlvm_channel_protocol_uuid);
93a84565 2172 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
c3d9a224 2173 visorchannel_get_header(controlvm_channel))) {
8a1182eb
BR
2174 initialize_controlvm_payload();
2175 } else {
c3d9a224
BR
2176 visorchannel_destroy(controlvm_channel);
2177 controlvm_channel = NULL;
8a1182eb
BR
2178 return -ENODEV;
2179 }
2180 } else {
8a1182eb
BR
2181 return -ENODEV;
2182 }
2183
12e364b9 2184 MajorDev = MKDEV(visorchipset_major, 0);
c3d9a224 2185 rc = visorchipset_file_init(MajorDev, &controlvm_channel);
4cb005a9 2186 if (rc < 0) {
4cb005a9
KC
2187 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2188 goto Away;
2189 }
9f8d0e8b 2190
da021f02 2191 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2192
da021f02 2193 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2194
da021f02 2195 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2196
12e364b9
KC
2197 Putfile_buffer_list_pool =
2198 kmem_cache_create(Putfile_buffer_list_pool_name,
2199 sizeof(struct putfile_buffer_entry),
2200 0, SLAB_HWCACHE_ALIGN, NULL);
2201 if (!Putfile_buffer_list_pool) {
4cb005a9
KC
2202 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2203 rc = -1;
2204 goto Away;
12e364b9 2205 }
2098dbd1 2206 if (!visorchipset_disable_controlvm) {
12e364b9
KC
2207 /* if booting in a crash kernel */
2208 if (visorchipset_crash_kernel)
9232d2d6 2209 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9
KC
2210 setup_crash_devices_work_queue);
2211 else
9232d2d6 2212 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9 2213 controlvm_periodic_work);
9232d2d6 2214 periodic_controlvm_workqueue =
12e364b9
KC
2215 create_singlethread_workqueue("visorchipset_controlvm");
2216
38f736e9 2217 if (!periodic_controlvm_workqueue) {
4cb005a9
KC
2218 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2219 DIAG_SEVERITY_ERR);
2220 rc = -ENOMEM;
2221 goto Away;
2222 }
b53e0e93 2223 most_recent_message_jiffies = jiffies;
911e213e 2224 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
9232d2d6
BR
2225 rc = queue_delayed_work(periodic_controlvm_workqueue,
2226 &periodic_controlvm_work, poll_jiffies);
4cb005a9 2227 if (rc < 0) {
4cb005a9
KC
2228 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2229 DIAG_SEVERITY_ERR);
2230 goto Away;
2231 }
12e364b9
KC
2232 }
2233
2234 Visorchipset_platform_device.dev.devt = MajorDev;
4cb005a9 2235 if (platform_device_register(&Visorchipset_platform_device) < 0) {
4cb005a9
KC
2236 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2237 rc = -1;
2238 goto Away;
2239 }
12e364b9 2240 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
22ad57ba 2241 rc = 0;
12e364b9 2242Away:
12e364b9 2243 if (rc) {
12e364b9
KC
2244 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2245 POSTCODE_SEVERITY_ERR);
2246 }
2247 return rc;
2248}
2249
2250static void
2251visorchipset_exit(void)
2252{
12e364b9
KC
2253 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2254
2255 if (visorchipset_disable_controlvm) {
2256 ;
2257 } else {
9232d2d6
BR
2258 cancel_delayed_work(&periodic_controlvm_work);
2259 flush_workqueue(periodic_controlvm_workqueue);
2260 destroy_workqueue(periodic_controlvm_workqueue);
2261 periodic_controlvm_workqueue = NULL;
84982fbf 2262 destroy_controlvm_payload_info(&controlvm_payload_info);
12e364b9 2263 }
12e364b9
KC
2264 if (Putfile_buffer_list_pool) {
2265 kmem_cache_destroy(Putfile_buffer_list_pool);
2266 Putfile_buffer_list_pool = NULL;
2267 }
1783319f 2268
12e364b9
KC
2269 cleanup_controlvm_structures();
2270
da021f02 2271 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2272
da021f02 2273 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2274
da021f02 2275 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2276
c3d9a224 2277 visorchannel_destroy(controlvm_channel);
8a1182eb 2278
12e364b9
KC
2279 visorchipset_file_cleanup();
2280 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
2281}
2282
2283module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2284MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2285int visorchipset_testvnic = 0;
2286
2287module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2288MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2289int visorchipset_testvnicclient = 0;
2290
2291module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2292MODULE_PARM_DESC(visorchipset_testmsg,
2293 "1 to manufacture the chipset, bus, and switch messages");
2294int visorchipset_testmsg = 0;
2295
2296module_param_named(major, visorchipset_major, int, S_IRUGO);
2297MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2298int visorchipset_major = 0;
2299
2300module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2301MODULE_PARM_DESC(visorchipset_serverreqwait,
2302 "1 to have the module wait for the visor bus to register");
2303int visorchipset_serverregwait = 0; /* default is off */
2304module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2305MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2306int visorchipset_clientregwait = 1; /* default is on */
2307module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2308MODULE_PARM_DESC(visorchipset_testteardown,
2309 "1 to test teardown of the chipset, bus, and switch");
2310int visorchipset_testteardown = 0; /* default is off */
2311module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2312 S_IRUGO);
2313MODULE_PARM_DESC(visorchipset_disable_controlvm,
2314 "1 to disable polling of controlVm channel");
2315int visorchipset_disable_controlvm = 0; /* default is off */
2316module_param_named(crash_kernel, visorchipset_crash_kernel, int, S_IRUGO);
2317MODULE_PARM_DESC(visorchipset_crash_kernel,
2318 "1 means we are running in crash kernel");
2319int visorchipset_crash_kernel = 0; /* default is running in non-crash kernel */
2320module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2321 int, S_IRUGO);
2322MODULE_PARM_DESC(visorchipset_holdchipsetready,
2323 "1 to hold response to CHIPSET_READY");
2324int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2325 * response immediately */
2326module_init(visorchipset_init);
2327module_exit(visorchipset_exit);
2328
2329MODULE_AUTHOR("Unisys");
2330MODULE_LICENSE("GPL");
2331MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2332 VERSION);
2333MODULE_VERSION(VERSION);
This page took 0.571287 seconds and 5 git commands to generate.