staging: unisys: visorchipset: Remove unused NONULLSTR()
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18#include "globals.h"
12e364b9
KC
19#include "visorchipset.h"
20#include "procobjecttree.h"
21#include "visorchannel.h"
22#include "periodic_work.h"
12e364b9
KC
23#include "file.h"
24#include "parser.h"
12e364b9 25#include "uisutils.h"
12e364b9
KC
26#include "controlvmcompletionstatus.h"
27#include "guestlinuxdebug.h"
12e364b9
KC
28
29#include <linux/nls.h>
30#include <linux/netdevice.h>
31#include <linux/platform_device.h>
90addb02 32#include <linux/uuid.h>
1ba00980 33#include <linux/crash_dump.h>
12e364b9
KC
34
35#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
36#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
37 * vnic loopback test */
38#define TEST_VNIC_SWITCHNO 1
39#define TEST_VNIC_BUSNO 9
40
41#define MAX_NAME_SIZE 128
42#define MAX_IP_SIZE 50
43#define MAXOUTSTANDINGCHANNELCOMMAND 256
44#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
48* we switch to slow polling mode. As soon as we get a controlvm
49* message, we switch back to fast polling mode.
50*/
51#define MIN_IDLE_SECONDS 10
911e213e 52static ulong poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
b53e0e93 53static ulong most_recent_message_jiffies; /* when we got our last
bd5b9b32 54 * controlvm message */
12e364b9
KC
55static int serverregistered;
56static int clientregistered;
57
58#define MAX_CHIPSET_EVENTS 2
c242233e 59static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 60
9232d2d6
BR
61static struct delayed_work periodic_controlvm_work;
62static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 63static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 64
da021f02
BR
65static struct controlvm_message_header g_diag_msg_hdr;
66static struct controlvm_message_header g_chipset_msg_hdr;
67static struct controlvm_message_header g_del_dump_msg_hdr;
59827f00 68static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 69 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 70/* 0xffffff is an invalid Bus/Device number */
83d48905
BR
71static ulong g_diagpool_bus_no = 0xffffff;
72static ulong g_diagpool_dev_no = 0xffffff;
4f44b72d 73static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9
KC
74
75/* Only VNIC and VHBA channels are sent to visorclientbus (aka
76 * "visorhackbus")
77 */
78#define FOR_VISORHACKBUS(channel_type_guid) \
9eee5d1f 79 (((uuid_le_cmp(channel_type_guid,\
0639ba67
BR
80 spar_vnic_channel_protocol_uuid) == 0) ||\
81 (uuid_le_cmp(channel_type_guid,\
9eee5d1f 82 spar_vhba_channel_protocol_uuid) == 0)))
12e364b9
KC
83#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
84
85#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
86 (uuid_le_cmp(channel_type_guid,\
87 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 88
1390b88c
BR
89static LIST_HEAD(bus_info_list);
90static LIST_HEAD(dev_info_list);
12e364b9 91
c3d9a224 92static struct visorchannel *controlvm_channel;
12e364b9 93
84982fbf
BR
94/* Manages the request payload in the controlvm channel */
95static struct controlvm_payload_info {
c242233e 96 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 97 u64 offset; /* offset from beginning of controlvm
12e364b9 98 * channel to beginning of payload * pool */
b3c55b13 99 u32 bytes; /* number of bytes in payload pool */
84982fbf 100} controlvm_payload_info;
12e364b9 101
ea33b4ee
BR
102/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
103 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
104 */
105static struct livedump_info {
106 struct controlvm_message_header dumpcapture_header;
107 struct controlvm_message_header gettextdump_header;
108 struct controlvm_message_header dumpcomplete_header;
109 BOOL gettextdump_outstanding;
12e364b9
KC
110 u32 crc32;
111 ulong length;
112 atomic_t buffers_in_use;
113 ulong destination;
ea33b4ee 114} livedump_info;
12e364b9
KC
115
116/* The following globals are used to handle the scenario where we are unable to
117 * offload the payload from a controlvm message due to memory requirements. In
118 * this scenario, we simply stash the controlvm message, then attempt to
119 * process it again the next time controlvm_periodic_work() runs.
120 */
7166ed19
BR
121static struct controlvm_message controlvm_pending_msg;
122static BOOL controlvm_pending_msg_valid = FALSE;
12e364b9
KC
123
124/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
125 * TRANSMIT_FILE PutFile payloads.
126 */
1eee0011
BR
127static struct kmem_cache *putfile_buffer_list_pool;
128static const char putfile_buffer_list_pool_name[] =
12e364b9
KC
129 "controlvm_putfile_buffer_list_pool";
130
131/* This identifies a data buffer that has been received via a controlvm messages
132 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
133 */
134struct putfile_buffer_entry {
135 struct list_head next; /* putfile_buffer_entry list */
317d9614 136 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
137};
138
139/* List of struct putfile_request *, via next_putfile_request member.
140 * Each entry in this list identifies an outstanding TRANSMIT_FILE
141 * conversation.
142 */
1eee0011 143static LIST_HEAD(putfile_request_list);
12e364b9
KC
144
145/* This describes a buffer and its current state of transfer (e.g., how many
146 * bytes have already been supplied as putfile data, and how many bytes are
147 * remaining) for a putfile_request.
148 */
149struct putfile_active_buffer {
150 /* a payload from a controlvm message, containing a file data buffer */
317d9614 151 struct parser_context *parser_ctx;
12e364b9
KC
152 /* points within data area of parser_ctx to next byte of data */
153 u8 *pnext;
154 /* # bytes left from <pnext> to the end of this data buffer */
155 size_t bytes_remaining;
156};
157
158#define PUTFILE_REQUEST_SIG 0x0906101302281211
159/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
160 * conversation. Structs of this type are dynamically linked into
161 * <Putfile_request_list>.
162 */
163struct putfile_request {
164 u64 sig; /* PUTFILE_REQUEST_SIG */
165
166 /* header from original TransmitFile request */
98d7b594 167 struct controlvm_message_header controlvm_header;
12e364b9
KC
168 u64 file_request_number; /* from original TransmitFile request */
169
170 /* link to next struct putfile_request */
171 struct list_head next_putfile_request;
172
173 /* most-recent sequence number supplied via a controlvm message */
174 u64 data_sequence_number;
175
176 /* head of putfile_buffer_entry list, which describes the data to be
177 * supplied as putfile data;
178 * - this list is added to when controlvm messages come in that supply
179 * file data
180 * - this list is removed from via the hotplug program that is actually
181 * consuming these buffers to write as file data */
182 struct list_head input_buffer_list;
183 spinlock_t req_list_lock; /* lock for input_buffer_list */
184
185 /* waiters for input_buffer_list to go non-empty */
186 wait_queue_head_t input_buffer_wq;
187
188 /* data not yet read within current putfile_buffer_entry */
189 struct putfile_active_buffer active_buf;
190
191 /* <0 = failed, 0 = in-progress, >0 = successful; */
192 /* note that this must be set with req_list_lock, and if you set <0, */
193 /* it is your responsibility to also free up all of the other objects */
194 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
195 /* before releasing the lock */
196 int completion_status;
197};
198
712f42cd 199static atomic_t visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
12e364b9
KC
200
201struct parahotplug_request {
202 struct list_head list;
203 int id;
204 unsigned long expiration;
3ab47701 205 struct controlvm_message msg;
12e364b9
KC
206};
207
ddf5de53
BR
208static LIST_HEAD(parahotplug_request_list);
209static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
210static void parahotplug_process_list(void);
211
212/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
213 * CONTROLVM_REPORTEVENT.
214 */
6fe345af
BR
215static struct visorchipset_busdev_notifiers busdev_server_notifiers;
216static struct visorchipset_busdev_notifiers busdev_client_notifiers;
12e364b9 217
8e3fedd6
BR
218static void bus_create_response(ulong bus_no, int response);
219static void bus_destroy_response(ulong bus_no, int response);
220static void device_create_response(ulong bus_no, ulong dev_no, int response);
221static void device_destroy_response(ulong bus_no, ulong dev_no, int response);
222static void device_resume_response(ulong bus_no, ulong dev_no, int response);
12e364b9 223
8e3fedd6 224static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
225 .bus_create = bus_create_response,
226 .bus_destroy = bus_destroy_response,
227 .device_create = device_create_response,
228 .device_destroy = device_destroy_response,
927c7927 229 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
230 .device_resume = device_resume_response,
231};
232
233/* info for /dev/visorchipset */
5aa8ae57 234static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 235
19f6634f
BR
236/* prototypes for attributes */
237static ssize_t toolaction_show(struct device *dev,
8e76e695 238 struct device_attribute *attr, char *buf);
19f6634f 239static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
240 struct device_attribute *attr,
241 const char *buf, size_t count);
19f6634f
BR
242static DEVICE_ATTR_RW(toolaction);
243
54b31229 244static ssize_t boottotool_show(struct device *dev,
8e76e695 245 struct device_attribute *attr, char *buf);
54b31229 246static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
247 struct device_attribute *attr, const char *buf,
248 size_t count);
54b31229
BR
249static DEVICE_ATTR_RW(boottotool);
250
422af17c 251static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 252 char *buf);
422af17c 253static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 254 const char *buf, size_t count);
422af17c
BR
255static DEVICE_ATTR_RW(error);
256
257static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 258 char *buf);
422af17c 259static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 260 const char *buf, size_t count);
422af17c
BR
261static DEVICE_ATTR_RW(textid);
262
263static ssize_t remaining_steps_show(struct device *dev,
8e76e695 264 struct device_attribute *attr, char *buf);
422af17c 265static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
266 struct device_attribute *attr,
267 const char *buf, size_t count);
422af17c
BR
268static DEVICE_ATTR_RW(remaining_steps);
269
18b87ed1 270static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
271 struct device_attribute *attr,
272 const char *buf, size_t count);
18b87ed1
BR
273static DEVICE_ATTR_WO(chipsetready);
274
e56fa7cd 275static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
276 struct device_attribute *attr,
277 const char *buf, size_t count);
e56fa7cd
BR
278static DEVICE_ATTR_WO(devicedisabled);
279
280static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
281 struct device_attribute *attr,
282 const char *buf, size_t count);
e56fa7cd
BR
283static DEVICE_ATTR_WO(deviceenabled);
284
19f6634f
BR
285static struct attribute *visorchipset_install_attrs[] = {
286 &dev_attr_toolaction.attr,
54b31229 287 &dev_attr_boottotool.attr,
422af17c
BR
288 &dev_attr_error.attr,
289 &dev_attr_textid.attr,
290 &dev_attr_remaining_steps.attr,
19f6634f
BR
291 NULL
292};
293
294static struct attribute_group visorchipset_install_group = {
295 .name = "install",
296 .attrs = visorchipset_install_attrs
297};
298
18b87ed1
BR
299static struct attribute *visorchipset_guest_attrs[] = {
300 &dev_attr_chipsetready.attr,
301 NULL
302};
303
304static struct attribute_group visorchipset_guest_group = {
305 .name = "guest",
306 .attrs = visorchipset_guest_attrs
307};
308
e56fa7cd
BR
309static struct attribute *visorchipset_parahotplug_attrs[] = {
310 &dev_attr_devicedisabled.attr,
311 &dev_attr_deviceenabled.attr,
312 NULL
313};
314
315static struct attribute_group visorchipset_parahotplug_group = {
316 .name = "parahotplug",
317 .attrs = visorchipset_parahotplug_attrs
318};
319
19f6634f
BR
320static const struct attribute_group *visorchipset_dev_groups[] = {
321 &visorchipset_install_group,
18b87ed1 322 &visorchipset_guest_group,
e56fa7cd 323 &visorchipset_parahotplug_group,
19f6634f
BR
324 NULL
325};
326
12e364b9 327/* /sys/devices/platform/visorchipset */
eb34e877 328static struct platform_device visorchipset_platform_device = {
12e364b9
KC
329 .name = "visorchipset",
330 .id = -1,
19f6634f 331 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
332};
333
334/* Function prototypes */
b3168c70 335static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
336 int response);
337static void controlvm_respond_chipset_init(
b3168c70 338 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
339 enum ultra_chipset_feature features);
340static void controlvm_respond_physdev_changestate(
b3168c70 341 struct controlvm_message_header *msg_hdr, int response,
98d7b594 342 struct spar_segment_state state);
12e364b9 343
d746cb55
VB
344static ssize_t toolaction_show(struct device *dev,
345 struct device_attribute *attr,
346 char *buf)
19f6634f 347{
01f4d85a 348 u8 tool_action;
19f6634f 349
c3d9a224 350 visorchannel_read(controlvm_channel,
d19642f6 351 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 352 tool_action), &tool_action, sizeof(u8));
01f4d85a 353 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
354}
355
d746cb55
VB
356static ssize_t toolaction_store(struct device *dev,
357 struct device_attribute *attr,
358 const char *buf, size_t count)
19f6634f 359{
01f4d85a 360 u8 tool_action;
66e24b76 361 int ret;
19f6634f 362
01f4d85a 363 if (kstrtou8(buf, 10, &tool_action) != 0)
66e24b76
BR
364 return -EINVAL;
365
c3d9a224 366 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
367 offsetof(struct spar_controlvm_channel_protocol,
368 tool_action),
01f4d85a 369 &tool_action, sizeof(u8));
66e24b76
BR
370
371 if (ret)
372 return ret;
e22a4a0f 373 return count;
19f6634f
BR
374}
375
d746cb55
VB
376static ssize_t boottotool_show(struct device *dev,
377 struct device_attribute *attr,
378 char *buf)
54b31229 379{
365522d9 380 struct efi_spar_indication efi_spar_indication;
54b31229 381
c3d9a224 382 visorchannel_read(controlvm_channel,
8e76e695
BR
383 offsetof(struct spar_controlvm_channel_protocol,
384 efi_spar_ind), &efi_spar_indication,
385 sizeof(struct efi_spar_indication));
54b31229 386 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 387 efi_spar_indication.boot_to_tool);
54b31229
BR
388}
389
d746cb55
VB
390static ssize_t boottotool_store(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf, size_t count)
54b31229 393{
66e24b76 394 int val, ret;
365522d9 395 struct efi_spar_indication efi_spar_indication;
54b31229 396
66e24b76
BR
397 if (kstrtoint(buf, 10, &val) != 0)
398 return -EINVAL;
399
365522d9 400 efi_spar_indication.boot_to_tool = val;
c3d9a224 401 ret = visorchannel_write(controlvm_channel,
d19642f6 402 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
403 efi_spar_ind), &(efi_spar_indication),
404 sizeof(struct efi_spar_indication));
66e24b76
BR
405
406 if (ret)
407 return ret;
e22a4a0f 408 return count;
54b31229 409}
422af17c
BR
410
411static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 412 char *buf)
422af17c
BR
413{
414 u32 error;
415
8e76e695
BR
416 visorchannel_read(controlvm_channel,
417 offsetof(struct spar_controlvm_channel_protocol,
418 installation_error),
419 &error, sizeof(u32));
422af17c
BR
420 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
421}
422
423static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 424 const char *buf, size_t count)
422af17c
BR
425{
426 u32 error;
66e24b76 427 int ret;
422af17c 428
66e24b76
BR
429 if (kstrtou32(buf, 10, &error) != 0)
430 return -EINVAL;
431
c3d9a224 432 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
433 offsetof(struct spar_controlvm_channel_protocol,
434 installation_error),
435 &error, sizeof(u32));
66e24b76
BR
436 if (ret)
437 return ret;
e22a4a0f 438 return count;
422af17c
BR
439}
440
441static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 442 char *buf)
422af17c 443{
10dbf0e3 444 u32 text_id;
422af17c 445
8e76e695
BR
446 visorchannel_read(controlvm_channel,
447 offsetof(struct spar_controlvm_channel_protocol,
448 installation_text_id),
449 &text_id, sizeof(u32));
10dbf0e3 450 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
451}
452
453static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 454 const char *buf, size_t count)
422af17c 455{
10dbf0e3 456 u32 text_id;
66e24b76 457 int ret;
422af17c 458
10dbf0e3 459 if (kstrtou32(buf, 10, &text_id) != 0)
66e24b76
BR
460 return -EINVAL;
461
c3d9a224 462 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
463 offsetof(struct spar_controlvm_channel_protocol,
464 installation_text_id),
465 &text_id, sizeof(u32));
66e24b76
BR
466 if (ret)
467 return ret;
e22a4a0f 468 return count;
422af17c
BR
469}
470
422af17c 471static ssize_t remaining_steps_show(struct device *dev,
8e76e695 472 struct device_attribute *attr, char *buf)
422af17c 473{
ee8da290 474 u16 remaining_steps;
422af17c 475
c3d9a224 476 visorchannel_read(controlvm_channel,
8e76e695
BR
477 offsetof(struct spar_controlvm_channel_protocol,
478 installation_remaining_steps),
479 &remaining_steps, sizeof(u16));
ee8da290 480 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
481}
482
483static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
484 struct device_attribute *attr,
485 const char *buf, size_t count)
422af17c 486{
ee8da290 487 u16 remaining_steps;
66e24b76 488 int ret;
422af17c 489
ee8da290 490 if (kstrtou16(buf, 10, &remaining_steps) != 0)
66e24b76
BR
491 return -EINVAL;
492
c3d9a224 493 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
494 offsetof(struct spar_controlvm_channel_protocol,
495 installation_remaining_steps),
496 &remaining_steps, sizeof(u16));
66e24b76
BR
497 if (ret)
498 return ret;
e22a4a0f 499 return count;
422af17c
BR
500}
501
12e364b9 502static void
9b989a98 503bus_info_clear(void *v)
12e364b9 504{
33192fa1 505 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
12e364b9 506
12e364b9
KC
507 kfree(p->name);
508 p->name = NULL;
509
510 kfree(p->description);
511 p->description = NULL;
512
513 p->state.created = 0;
33192fa1 514 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
515}
516
517static void
9b989a98 518dev_info_clear(void *v)
12e364b9 519{
246e0cd0
BR
520 struct visorchipset_device_info *p =
521 (struct visorchipset_device_info *)(v);
26eb2c0c 522
12e364b9 523 p->state.created = 0;
246e0cd0 524 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
525}
526
c242233e 527static u8
12e364b9
KC
528check_chipset_events(void)
529{
530 int i;
c242233e 531 u8 send_msg = 1;
12e364b9
KC
532 /* Check events to determine if response should be sent */
533 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
534 send_msg &= chipset_events[i];
535 return send_msg;
536}
537
538static void
539clear_chipset_events(void)
540{
541 int i;
542 /* Clear chipset_events */
543 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
544 chipset_events[i] = 0;
545}
546
547void
fe90d892
BR
548visorchipset_register_busdev_server(
549 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 550 struct visorchipset_busdev_responders *responders,
1e7a59c1 551 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 552{
8f1947ac 553 down(&notifier_lock);
38f736e9 554 if (!notifiers) {
6fe345af
BR
555 memset(&busdev_server_notifiers, 0,
556 sizeof(busdev_server_notifiers));
12e364b9
KC
557 serverregistered = 0; /* clear flag */
558 } else {
6fe345af 559 busdev_server_notifiers = *notifiers;
12e364b9
KC
560 serverregistered = 1; /* set flag */
561 }
562 if (responders)
8e3fedd6 563 *responders = busdev_responders;
1e7a59c1
BR
564 if (driver_info)
565 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 566 VERSION, NULL);
12e364b9 567
8f1947ac 568 up(&notifier_lock);
12e364b9
KC
569}
570EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
571
572void
fe90d892
BR
573visorchipset_register_busdev_client(
574 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 575 struct visorchipset_busdev_responders *responders,
43fce019 576 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 577{
8f1947ac 578 down(&notifier_lock);
38f736e9 579 if (!notifiers) {
6fe345af
BR
580 memset(&busdev_client_notifiers, 0,
581 sizeof(busdev_client_notifiers));
12e364b9
KC
582 clientregistered = 0; /* clear flag */
583 } else {
6fe345af 584 busdev_client_notifiers = *notifiers;
12e364b9
KC
585 clientregistered = 1; /* set flag */
586 }
587 if (responders)
8e3fedd6 588 *responders = busdev_responders;
43fce019
BR
589 if (driver_info)
590 bus_device_info_init(driver_info, "chipset(bolts)",
591 "visorchipset", VERSION, NULL);
8f1947ac 592 up(&notifier_lock);
12e364b9
KC
593}
594EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
595
596static void
597cleanup_controlvm_structures(void)
598{
33192fa1 599 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 600 struct visorchipset_device_info *di, *tmp_di;
12e364b9 601
1390b88c 602 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
9b989a98 603 bus_info_clear(bi);
12e364b9
KC
604 list_del(&bi->entry);
605 kfree(bi);
606 }
607
1390b88c 608 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 609 dev_info_clear(di);
12e364b9
KC
610 list_del(&di->entry);
611 kfree(di);
612 }
613}
614
615static void
3ab47701 616chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
617{
618 static int chipset_inited;
b9b141e8 619 enum ultra_chipset_feature features = 0;
12e364b9
KC
620 int rc = CONTROLVM_RESP_SUCCESS;
621
622 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
623 if (chipset_inited) {
22ad57ba 624 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 625 goto cleanup;
12e364b9
KC
626 }
627 chipset_inited = 1;
628 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
629
630 /* Set features to indicate we support parahotplug (if Command
631 * also supports it). */
632 features =
2ea5117b 633 inmsg->cmd.init_chipset.
12e364b9
KC
634 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
635
636 /* Set the "reply" bit so Command knows this is a
637 * features-aware driver. */
638 features |= ULTRA_CHIPSET_FEATURE_REPLY;
639
e3199b2e 640cleanup:
12e364b9
KC
641 if (rc < 0)
642 cleanup_controlvm_structures();
98d7b594 643 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
644 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
645}
646
647static void
3ab47701 648controlvm_init_response(struct controlvm_message *msg,
b3168c70 649 struct controlvm_message_header *msg_hdr, int response)
12e364b9 650{
3ab47701 651 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 652 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
653 msg->hdr.payload_bytes = 0;
654 msg->hdr.payload_vm_offset = 0;
655 msg->hdr.payload_max_bytes = 0;
12e364b9 656 if (response < 0) {
98d7b594
BR
657 msg->hdr.flags.failed = 1;
658 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
659 }
660}
661
662static void
b3168c70 663controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 664{
3ab47701 665 struct controlvm_message outmsg;
26eb2c0c 666
b3168c70 667 controlvm_init_response(&outmsg, msg_hdr, response);
12e364b9
KC
668 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
669 * back the deviceChangeState structure in the packet. */
b3168c70 670 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
0639ba67
BR
671 g_devicechangestate_packet.device_change_state.bus_no ==
672 g_diagpool_bus_no &&
673 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 674 g_diagpool_dev_no)
4f44b72d 675 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 676 if (outmsg.hdr.flags.test_message == 1)
12e364b9 677 return;
2098dbd1 678
c3d9a224 679 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 680 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
681 return;
682 }
683}
684
685static void
b3168c70 686controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 687 int response,
b9b141e8 688 enum ultra_chipset_feature features)
12e364b9 689{
3ab47701 690 struct controlvm_message outmsg;
26eb2c0c 691
b3168c70 692 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 693 outmsg.cmd.init_chipset.features = features;
c3d9a224 694 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 695 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
696 return;
697 }
698}
699
98d7b594 700static void controlvm_respond_physdev_changestate(
b3168c70 701 struct controlvm_message_header *msg_hdr, int response,
98d7b594 702 struct spar_segment_state state)
12e364b9 703{
3ab47701 704 struct controlvm_message outmsg;
26eb2c0c 705
b3168c70 706 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
707 outmsg.cmd.device_change_state.state = state;
708 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 709 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 710 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
711 return;
712 }
713}
714
715void
2c683cde
BR
716visorchipset_save_message(struct controlvm_message *msg,
717 enum crash_obj_type type)
12e364b9 718{
4577225d
BR
719 u32 crash_msg_offset;
720 u16 crash_msg_count;
12e364b9
KC
721
722 /* get saved message count */
c3d9a224 723 if (visorchannel_read(controlvm_channel,
d19642f6
BR
724 offsetof(struct spar_controlvm_channel_protocol,
725 saved_crash_message_count),
4577225d 726 &crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
727 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
728 POSTCODE_SEVERITY_ERR);
729 return;
730 }
731
4577225d 732 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 733 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
4577225d 734 crash_msg_count,
12e364b9
KC
735 POSTCODE_SEVERITY_ERR);
736 return;
737 }
738
739 /* get saved crash message offset */
c3d9a224 740 if (visorchannel_read(controlvm_channel,
d19642f6
BR
741 offsetof(struct spar_controlvm_channel_protocol,
742 saved_crash_message_offset),
4577225d 743 &crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
744 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
745 POSTCODE_SEVERITY_ERR);
746 return;
747 }
748
2c683cde 749 if (type == CRASH_BUS) {
c3d9a224 750 if (visorchannel_write(controlvm_channel,
4577225d 751 crash_msg_offset,
3ab47701
BR
752 msg,
753 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
754 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
755 POSTCODE_SEVERITY_ERR);
756 return;
757 }
758 } else {
c3d9a224 759 if (visorchannel_write(controlvm_channel,
4577225d 760 crash_msg_offset +
3ab47701
BR
761 sizeof(struct controlvm_message), msg,
762 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
763 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
764 POSTCODE_SEVERITY_ERR);
765 return;
766 }
767 }
768}
769EXPORT_SYMBOL_GPL(visorchipset_save_message);
770
771static void
fbb31f48 772bus_responder(enum controlvm_id cmd_id, ulong bus_no, int response)
12e364b9 773{
33192fa1 774 struct visorchipset_bus_info *p = NULL;
12e364b9
KC
775 BOOL need_clear = FALSE;
776
fbb31f48 777 p = findbus(&bus_info_list, bus_no);
0aca7844 778 if (!p)
12e364b9 779 return;
0aca7844 780
12e364b9 781 if (response < 0) {
fbb31f48 782 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
12e364b9
KC
783 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
784 /* undo the row we just created... */
fbb31f48 785 delbusdevices(&dev_info_list, bus_no);
12e364b9 786 } else {
fbb31f48 787 if (cmd_id == CONTROLVM_BUS_CREATE)
12e364b9 788 p->state.created = 1;
fbb31f48 789 if (cmd_id == CONTROLVM_BUS_DESTROY)
12e364b9
KC
790 need_clear = TRUE;
791 }
792
0aca7844 793 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 794 return; /* no controlvm response needed */
6b59b31d 795 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 796 return;
33192fa1
BR
797 controlvm_respond(&p->pending_msg_hdr, response);
798 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 799 if (need_clear) {
9b989a98 800 bus_info_clear(p);
fbb31f48 801 delbusdevices(&dev_info_list, bus_no);
12e364b9
KC
802 }
803}
804
805static void
fbb31f48
BR
806device_changestate_responder(enum controlvm_id cmd_id,
807 ulong bus_no, ulong dev_no, int response,
808 struct spar_segment_state response_state)
12e364b9 809{
246e0cd0 810 struct visorchipset_device_info *p = NULL;
3ab47701 811 struct controlvm_message outmsg;
12e364b9 812
fbb31f48 813 p = finddevice(&dev_info_list, bus_no, dev_no);
0aca7844 814 if (!p)
12e364b9 815 return;
0aca7844 816 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 817 return; /* no controlvm response needed */
fbb31f48 818 if (p->pending_msg_hdr.id != cmd_id)
12e364b9 819 return;
12e364b9 820
246e0cd0 821 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 822
fbb31f48
BR
823 outmsg.cmd.device_change_state.bus_no = bus_no;
824 outmsg.cmd.device_change_state.dev_no = dev_no;
825 outmsg.cmd.device_change_state.state = response_state;
12e364b9 826
c3d9a224 827 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 828 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 829 return;
12e364b9 830
246e0cd0 831 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
832}
833
834static void
fbb31f48 835device_responder(enum controlvm_id cmd_id, ulong bus_no, ulong dev_no,
53bebb13 836 int response)
12e364b9 837{
246e0cd0 838 struct visorchipset_device_info *p = NULL;
12e364b9
KC
839 BOOL need_clear = FALSE;
840
fbb31f48 841 p = finddevice(&dev_info_list, bus_no, dev_no);
0aca7844 842 if (!p)
12e364b9 843 return;
12e364b9 844 if (response >= 0) {
fbb31f48 845 if (cmd_id == CONTROLVM_DEVICE_CREATE)
12e364b9 846 p->state.created = 1;
fbb31f48 847 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
12e364b9
KC
848 need_clear = TRUE;
849 }
850
0aca7844 851 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 852 return; /* no controlvm response needed */
0aca7844 853
6b59b31d 854 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 855 return;
0aca7844 856
246e0cd0
BR
857 controlvm_respond(&p->pending_msg_hdr, response);
858 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 859 if (need_clear)
9b989a98 860 dev_info_clear(p);
12e364b9
KC
861}
862
863static void
2836c6a8
BR
864bus_epilog(u32 bus_no,
865 u32 cmd, struct controlvm_message_header *msg_hdr,
866 int response, BOOL need_response)
12e364b9
KC
867{
868 BOOL notified = FALSE;
869
2836c6a8
BR
870 struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
871 bus_no);
12e364b9 872
2836c6a8 873 if (!bus_info)
12e364b9 874 return;
0aca7844 875
2836c6a8
BR
876 if (need_response) {
877 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
98d7b594 878 sizeof(struct controlvm_message_header));
75c1f8b7 879 } else {
2836c6a8 880 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 881 }
12e364b9 882
8f1947ac 883 down(&notifier_lock);
12e364b9
KC
884 if (response == CONTROLVM_RESP_SUCCESS) {
885 switch (cmd) {
886 case CONTROLVM_BUS_CREATE:
887 /* We can't tell from the bus_create
888 * information which of our 2 bus flavors the
889 * devices on this bus will ultimately end up.
890 * FORTUNATELY, it turns out it is harmless to
891 * send the bus_create to both of them. We can
892 * narrow things down a little bit, though,
893 * because we know: - BusDev_Server can handle
894 * either server or client devices
895 * - BusDev_Client can handle ONLY client
896 * devices */
6fe345af
BR
897 if (busdev_server_notifiers.bus_create) {
898 (*busdev_server_notifiers.bus_create) (bus_no);
12e364b9
KC
899 notified = TRUE;
900 }
2836c6a8 901 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
902 busdev_client_notifiers.bus_create) {
903 (*busdev_client_notifiers.bus_create) (bus_no);
12e364b9
KC
904 notified = TRUE;
905 }
906 break;
907 case CONTROLVM_BUS_DESTROY:
6fe345af
BR
908 if (busdev_server_notifiers.bus_destroy) {
909 (*busdev_server_notifiers.bus_destroy) (bus_no);
12e364b9
KC
910 notified = TRUE;
911 }
2836c6a8 912 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
913 busdev_client_notifiers.bus_destroy) {
914 (*busdev_client_notifiers.bus_destroy) (bus_no);
12e364b9
KC
915 notified = TRUE;
916 }
917 break;
918 }
919 }
920 if (notified)
921 /* The callback function just called above is responsible
929aa8ae 922 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
923 * function, which will call bus_responder()
924 */
925 ;
926 else
2836c6a8 927 bus_responder(cmd, bus_no, response);
8f1947ac 928 up(&notifier_lock);
12e364b9
KC
929}
930
931static void
2836c6a8
BR
932device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
933 struct controlvm_message_header *msg_hdr, int response,
934 BOOL need_response, BOOL for_visorbus)
12e364b9 935{
fe90d892 936 struct visorchipset_busdev_notifiers *notifiers = NULL;
12e364b9
KC
937 BOOL notified = FALSE;
938
2836c6a8
BR
939 struct visorchipset_device_info *dev_info =
940 finddevice(&dev_info_list, bus_no, dev_no);
12e364b9
KC
941 char *envp[] = {
942 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
943 NULL
944 };
945
2836c6a8 946 if (!dev_info)
12e364b9 947 return;
0aca7844 948
12e364b9 949 if (for_visorbus)
6fe345af 950 notifiers = &busdev_server_notifiers;
12e364b9 951 else
6fe345af 952 notifiers = &busdev_client_notifiers;
2836c6a8
BR
953 if (need_response) {
954 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
98d7b594 955 sizeof(struct controlvm_message_header));
75c1f8b7 956 } else {
2836c6a8 957 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 958 }
12e364b9 959
8f1947ac 960 down(&notifier_lock);
12e364b9
KC
961 if (response >= 0) {
962 switch (cmd) {
963 case CONTROLVM_DEVICE_CREATE:
964 if (notifiers->device_create) {
2836c6a8 965 (*notifiers->device_create) (bus_no, dev_no);
12e364b9
KC
966 notified = TRUE;
967 }
968 break;
969 case CONTROLVM_DEVICE_CHANGESTATE:
970 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
971 if (state.alive == segment_state_running.alive &&
972 state.operating ==
973 segment_state_running.operating) {
12e364b9 974 if (notifiers->device_resume) {
2836c6a8
BR
975 (*notifiers->device_resume) (bus_no,
976 dev_no);
12e364b9
KC
977 notified = TRUE;
978 }
979 }
980 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 981 else if (state.alive == segment_state_standby.alive &&
3f833b54 982 state.operating ==
bd0d2dcc 983 segment_state_standby.operating) {
12e364b9
KC
984 /* technically this is standby case
985 * where server is lost
986 */
987 if (notifiers->device_pause) {
2836c6a8
BR
988 (*notifiers->device_pause) (bus_no,
989 dev_no);
12e364b9
KC
990 notified = TRUE;
991 }
bd0d2dcc 992 } else if (state.alive == segment_state_paused.alive &&
3f833b54 993 state.operating ==
bd0d2dcc 994 segment_state_paused.operating) {
12e364b9
KC
995 /* this is lite pause where channel is
996 * still valid just 'pause' of it
997 */
2836c6a8
BR
998 if (bus_no == g_diagpool_bus_no &&
999 dev_no == g_diagpool_dev_no) {
12e364b9
KC
1000 /* this will trigger the
1001 * diag_shutdown.sh script in
1002 * the visorchipset hotplug */
1003 kobject_uevent_env
eb34e877 1004 (&visorchipset_platform_device.dev.
12e364b9
KC
1005 kobj, KOBJ_ONLINE, envp);
1006 }
1007 }
1008 break;
1009 case CONTROLVM_DEVICE_DESTROY:
1010 if (notifiers->device_destroy) {
2836c6a8 1011 (*notifiers->device_destroy) (bus_no, dev_no);
12e364b9
KC
1012 notified = TRUE;
1013 }
1014 break;
1015 }
1016 }
1017 if (notified)
1018 /* The callback function just called above is responsible
929aa8ae 1019 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1020 * function, which will call device_responder()
1021 */
1022 ;
1023 else
2836c6a8 1024 device_responder(cmd, bus_no, dev_no, response);
8f1947ac 1025 up(&notifier_lock);
12e364b9
KC
1026}
1027
1028static void
3ab47701 1029bus_create(struct controlvm_message *inmsg)
12e364b9 1030{
2ea5117b 1031 struct controlvm_message_packet *cmd = &inmsg->cmd;
6c5fed35 1032 ulong bus_no = cmd->create_bus.bus_no;
12e364b9 1033 int rc = CONTROLVM_RESP_SUCCESS;
6c5fed35 1034 struct visorchipset_bus_info *bus_info = NULL;
12e364b9 1035
6c5fed35
BR
1036 bus_info = findbus(&bus_info_list, bus_no);
1037 if (bus_info && (bus_info->state.created == 1)) {
1038 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1039 POSTCODE_SEVERITY_ERR);
22ad57ba 1040 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1041 goto cleanup;
12e364b9 1042 }
6c5fed35
BR
1043 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1044 if (!bus_info) {
1045 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1046 POSTCODE_SEVERITY_ERR);
22ad57ba 1047 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1048 goto cleanup;
12e364b9
KC
1049 }
1050
6c5fed35
BR
1051 INIT_LIST_HEAD(&bus_info->entry);
1052 bus_info->bus_no = bus_no;
1053 bus_info->dev_no = cmd->create_bus.dev_count;
12e364b9 1054
6c5fed35 1055 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1056
98d7b594 1057 if (inmsg->hdr.flags.test_message == 1)
6c5fed35 1058 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1059 else
6c5fed35 1060 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1061
6c5fed35
BR
1062 bus_info->flags.server = inmsg->hdr.flags.server;
1063 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1064 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1065 bus_info->chan_info.channel_type_uuid =
9b1caee7 1066 cmd->create_bus.bus_data_type_uuid;
6c5fed35 1067 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9 1068
6c5fed35 1069 list_add(&bus_info->entry, &bus_info_list);
12e364b9 1070
6c5fed35 1071 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1072
6c5fed35
BR
1073cleanup:
1074 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1075 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1076}
1077
1078static void
3ab47701 1079bus_destroy(struct controlvm_message *inmsg)
12e364b9 1080{
2ea5117b 1081 struct controlvm_message_packet *cmd = &inmsg->cmd;
dff54cd6
BR
1082 ulong bus_no = cmd->destroy_bus.bus_no;
1083 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1084 int rc = CONTROLVM_RESP_SUCCESS;
1085
dff54cd6
BR
1086 bus_info = findbus(&bus_info_list, bus_no);
1087 if (!bus_info)
22ad57ba 1088 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1089 else if (bus_info->state.created == 0)
22ad57ba 1090 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1091
dff54cd6 1092 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1093 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1094}
1095
1096static void
317d9614
BR
1097bus_configure(struct controlvm_message *inmsg,
1098 struct parser_context *parser_ctx)
12e364b9 1099{
2ea5117b 1100 struct controlvm_message_packet *cmd = &inmsg->cmd;
654bada0
BR
1101 ulong bus_no = cmd->configure_bus.bus_no;
1102 struct visorchipset_bus_info *bus_info = NULL;
12e364b9
KC
1103 int rc = CONTROLVM_RESP_SUCCESS;
1104 char s[99];
1105
654bada0
BR
1106 bus_no = cmd->configure_bus.bus_no;
1107 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1108 POSTCODE_SEVERITY_INFO);
12e364b9 1109
654bada0
BR
1110 bus_info = findbus(&bus_info_list, bus_no);
1111 if (!bus_info) {
1112 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1113 POSTCODE_SEVERITY_ERR);
22ad57ba 1114 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1115 } else if (bus_info->state.created == 0) {
1116 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1117 POSTCODE_SEVERITY_ERR);
22ad57ba 1118 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1119 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1120 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1121 POSTCODE_SEVERITY_ERR);
22ad57ba 1122 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0
BR
1123 } else {
1124 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1125 bus_info->partition_uuid = parser_id_get(parser_ctx);
1126 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1127 bus_info->name = parser_string_get(parser_ctx);
1128
1129 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1130 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1131 POSTCODE_SEVERITY_INFO);
12e364b9 1132 }
654bada0 1133 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1134 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1135}
1136
1137static void
3ab47701 1138my_device_create(struct controlvm_message *inmsg)
12e364b9 1139{
2ea5117b 1140 struct controlvm_message_packet *cmd = &inmsg->cmd;
c60c8e26
BR
1141 ulong bus_no = cmd->create_device.bus_no;
1142 ulong dev_no = cmd->create_device.dev_no;
1143 struct visorchipset_device_info *dev_info = NULL;
1144 struct visorchipset_bus_info *bus_info = NULL;
12e364b9
KC
1145 int rc = CONTROLVM_RESP_SUCCESS;
1146
c60c8e26
BR
1147 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1148 if (dev_info && (dev_info->state.created == 1)) {
1149 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1150 POSTCODE_SEVERITY_ERR);
22ad57ba 1151 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1152 goto cleanup;
12e364b9 1153 }
c60c8e26
BR
1154 bus_info = findbus(&bus_info_list, bus_no);
1155 if (!bus_info) {
1156 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1157 POSTCODE_SEVERITY_ERR);
22ad57ba 1158 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1159 goto cleanup;
12e364b9 1160 }
c60c8e26
BR
1161 if (bus_info->state.created == 0) {
1162 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1163 POSTCODE_SEVERITY_ERR);
22ad57ba 1164 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1165 goto cleanup;
12e364b9 1166 }
c60c8e26
BR
1167 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1168 if (!dev_info) {
1169 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1170 POSTCODE_SEVERITY_ERR);
22ad57ba 1171 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1172 goto cleanup;
12e364b9 1173 }
97a84f12 1174
c60c8e26
BR
1175 INIT_LIST_HEAD(&dev_info->entry);
1176 dev_info->bus_no = bus_no;
1177 dev_info->dev_no = dev_no;
1178 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1179 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1180 POSTCODE_SEVERITY_INFO);
1181
98d7b594 1182 if (inmsg->hdr.flags.test_message == 1)
c60c8e26 1183 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1184 else
c60c8e26
BR
1185 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1186 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1187 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1188 dev_info->chan_info.channel_type_uuid =
9b1caee7 1189 cmd->create_device.data_type_uuid;
c60c8e26
BR
1190 dev_info->chan_info.intr = cmd->create_device.intr;
1191 list_add(&dev_info->entry, &dev_info_list);
1192 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1193 POSTCODE_SEVERITY_INFO);
c60c8e26 1194cleanup:
12e364b9 1195 /* get the bus and devNo for DiagPool channel */
c60c8e26
BR
1196 if (dev_info &&
1197 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1198 g_diagpool_bus_no = bus_no;
1199 g_diagpool_dev_no = dev_no;
12e364b9 1200 }
c60c8e26 1201 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1202 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
98d7b594 1203 inmsg->hdr.flags.response_expected == 1,
c60c8e26 1204 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1205}
1206
1207static void
3ab47701 1208my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1209{
2ea5117b 1210 struct controlvm_message_packet *cmd = &inmsg->cmd;
0278a905
BR
1211 ulong bus_no = cmd->device_change_state.bus_no;
1212 ulong dev_no = cmd->device_change_state.dev_no;
2ea5117b 1213 struct spar_segment_state state = cmd->device_change_state.state;
0278a905 1214 struct visorchipset_device_info *dev_info = NULL;
12e364b9
KC
1215 int rc = CONTROLVM_RESP_SUCCESS;
1216
0278a905
BR
1217 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1218 if (!dev_info) {
1219 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1220 POSTCODE_SEVERITY_ERR);
22ad57ba 1221 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1222 } else if (dev_info->state.created == 0) {
1223 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1224 POSTCODE_SEVERITY_ERR);
22ad57ba 1225 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1226 }
0278a905
BR
1227 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1228 device_epilog(bus_no, dev_no, state,
1229 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
98d7b594 1230 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1231 FOR_VISORBUS(
0278a905 1232 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1233}
1234
1235static void
3ab47701 1236my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1237{
2ea5117b 1238 struct controlvm_message_packet *cmd = &inmsg->cmd;
61715c8b
BR
1239 ulong bus_no = cmd->destroy_device.bus_no;
1240 ulong dev_no = cmd->destroy_device.dev_no;
1241 struct visorchipset_device_info *dev_info = NULL;
12e364b9
KC
1242 int rc = CONTROLVM_RESP_SUCCESS;
1243
61715c8b
BR
1244 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1245 if (!dev_info)
22ad57ba 1246 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1247 else if (dev_info->state.created == 0)
22ad57ba 1248 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1249
61715c8b
BR
1250 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1251 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1252 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
98d7b594 1253 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1254 FOR_VISORBUS(
61715c8b 1255 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1256}
1257
1258/* When provided with the physical address of the controlvm channel
1259 * (phys_addr), the offset to the payload area we need to manage
1260 * (offset), and the size of this payload area (bytes), fills in the
84b11dfd 1261 * controlvm_payload_info struct. Returns TRUE for success or FALSE
12e364b9
KC
1262 * for failure.
1263 */
1264static int
5fc0229a 1265initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
84b11dfd 1266 struct controlvm_payload_info *info)
12e364b9 1267{
c242233e 1268 u8 __iomem *payload = NULL;
12e364b9
KC
1269 int rc = CONTROLVM_RESP_SUCCESS;
1270
38f736e9 1271 if (!info) {
22ad57ba 1272 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1273 goto cleanup;
12e364b9 1274 }
84b11dfd 1275 memset(info, 0, sizeof(struct controlvm_payload_info));
12e364b9 1276 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1277 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1278 goto cleanup;
12e364b9
KC
1279 }
1280 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1281 if (!payload) {
22ad57ba 1282 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1283 goto cleanup;
12e364b9
KC
1284 }
1285
1286 info->offset = offset;
1287 info->bytes = bytes;
1288 info->ptr = payload;
12e364b9 1289
f118a39b 1290cleanup:
12e364b9 1291 if (rc < 0) {
f118a39b 1292 if (payload) {
12e364b9
KC
1293 iounmap(payload);
1294 payload = NULL;
1295 }
1296 }
1297 return rc;
1298}
1299
1300static void
84b11dfd 1301destroy_controlvm_payload_info(struct controlvm_payload_info *info)
12e364b9 1302{
597c338f 1303 if (info->ptr) {
12e364b9
KC
1304 iounmap(info->ptr);
1305 info->ptr = NULL;
1306 }
84b11dfd 1307 memset(info, 0, sizeof(struct controlvm_payload_info));
12e364b9
KC
1308}
1309
1310static void
1311initialize_controlvm_payload(void)
1312{
c3d9a224 1313 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1314 u64 payload_offset = 0;
1315 u32 payload_bytes = 0;
26eb2c0c 1316
c3d9a224 1317 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1318 offsetof(struct spar_controlvm_channel_protocol,
1319 request_payload_offset),
cafefc0c 1320 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1321 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1322 POSTCODE_SEVERITY_ERR);
1323 return;
1324 }
c3d9a224 1325 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1326 offsetof(struct spar_controlvm_channel_protocol,
1327 request_payload_bytes),
cafefc0c 1328 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1329 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1330 POSTCODE_SEVERITY_ERR);
1331 return;
1332 }
1333 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1334 payload_offset, payload_bytes,
84982fbf 1335 &controlvm_payload_info);
12e364b9
KC
1336}
1337
1338/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1339 * Returns CONTROLVM_RESP_xxx code.
1340 */
1341int
1342visorchipset_chipset_ready(void)
1343{
eb34e877 1344 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1345 return CONTROLVM_RESP_SUCCESS;
1346}
1347EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1348
1349int
1350visorchipset_chipset_selftest(void)
1351{
1352 char env_selftest[20];
1353 char *envp[] = { env_selftest, NULL };
26eb2c0c 1354
12e364b9 1355 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1356 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1357 envp);
1358 return CONTROLVM_RESP_SUCCESS;
1359}
1360EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1361
1362/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1363 * Returns CONTROLVM_RESP_xxx code.
1364 */
1365int
1366visorchipset_chipset_notready(void)
1367{
eb34e877 1368 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1369 return CONTROLVM_RESP_SUCCESS;
1370}
1371EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1372
1373static void
77a0449d 1374chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1375{
1376 int rc = visorchipset_chipset_ready();
26eb2c0c 1377
12e364b9
KC
1378 if (rc != CONTROLVM_RESP_SUCCESS)
1379 rc = -rc;
77a0449d
BR
1380 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1381 controlvm_respond(msg_hdr, rc);
1382 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1383 /* Send CHIPSET_READY response when all modules have been loaded
1384 * and disks mounted for the partition
1385 */
77a0449d 1386 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1387 }
1388}
1389
1390static void
77a0449d 1391chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1392{
1393 int rc = visorchipset_chipset_selftest();
26eb2c0c 1394
12e364b9
KC
1395 if (rc != CONTROLVM_RESP_SUCCESS)
1396 rc = -rc;
77a0449d
BR
1397 if (msg_hdr->flags.response_expected)
1398 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1399}
1400
1401static void
77a0449d 1402chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1403{
1404 int rc = visorchipset_chipset_notready();
26eb2c0c 1405
12e364b9
KC
1406 if (rc != CONTROLVM_RESP_SUCCESS)
1407 rc = -rc;
77a0449d
BR
1408 if (msg_hdr->flags.response_expected)
1409 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1410}
1411
1412/* This is your "one-stop" shop for grabbing the next message from the
1413 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1414 */
1415static BOOL
3ab47701 1416read_controlvm_event(struct controlvm_message *msg)
12e364b9 1417{
c3d9a224 1418 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1419 CONTROLVM_QUEUE_EVENT, msg)) {
1420 /* got a message */
0aca7844 1421 if (msg->hdr.flags.test_message == 1)
12e364b9 1422 return FALSE;
e22a4a0f 1423 return TRUE;
12e364b9
KC
1424 }
1425 return FALSE;
1426}
1427
1428/*
1429 * The general parahotplug flow works as follows. The visorchipset
1430 * driver receives a DEVICE_CHANGESTATE message from Command
1431 * specifying a physical device to enable or disable. The CONTROLVM
1432 * message handler calls parahotplug_process_message, which then adds
1433 * the message to a global list and kicks off a udev event which
1434 * causes a user level script to enable or disable the specified
1435 * device. The udev script then writes to
1436 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1437 * to get called, at which point the appropriate CONTROLVM message is
1438 * retrieved from the list and responded to.
1439 */
1440
1441#define PARAHOTPLUG_TIMEOUT_MS 2000
1442
1443/*
1444 * Generate unique int to match an outstanding CONTROLVM message with a
1445 * udev script /proc response
1446 */
1447static int
1448parahotplug_next_id(void)
1449{
1450 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1451
12e364b9
KC
1452 return atomic_inc_return(&id);
1453}
1454
1455/*
1456 * Returns the time (in jiffies) when a CONTROLVM message on the list
1457 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1458 */
1459static unsigned long
1460parahotplug_next_expiration(void)
1461{
2cc1a1b3 1462 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1463}
1464
1465/*
1466 * Create a parahotplug_request, which is basically a wrapper for a
1467 * CONTROLVM_MESSAGE that we can stick on a list
1468 */
1469static struct parahotplug_request *
3ab47701 1470parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1471{
ea0dcfcf
QL
1472 struct parahotplug_request *req;
1473
6a55e3c3 1474 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1475 if (!req)
12e364b9
KC
1476 return NULL;
1477
1478 req->id = parahotplug_next_id();
1479 req->expiration = parahotplug_next_expiration();
1480 req->msg = *msg;
1481
1482 return req;
1483}
1484
1485/*
1486 * Free a parahotplug_request.
1487 */
1488static void
1489parahotplug_request_destroy(struct parahotplug_request *req)
1490{
1491 kfree(req);
1492}
1493
1494/*
1495 * Cause uevent to run the user level script to do the disable/enable
1496 * specified in (the CONTROLVM message in) the specified
1497 * parahotplug_request
1498 */
1499static void
1500parahotplug_request_kickoff(struct parahotplug_request *req)
1501{
2ea5117b 1502 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1503 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1504 env_func[40];
1505 char *envp[] = {
1506 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1507 };
1508
1509 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1510 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1511 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1512 cmd->device_change_state.state.active);
12e364b9 1513 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1514 cmd->device_change_state.bus_no);
12e364b9 1515 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1516 cmd->device_change_state.dev_no >> 3);
12e364b9 1517 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1518 cmd->device_change_state.dev_no & 0x7);
12e364b9 1519
eb34e877 1520 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1521 envp);
1522}
1523
1524/*
1525 * Remove any request from the list that's been on there too long and
1526 * respond with an error.
1527 */
1528static void
1529parahotplug_process_list(void)
1530{
1531 struct list_head *pos = NULL;
1532 struct list_head *tmp = NULL;
1533
ddf5de53 1534 spin_lock(&parahotplug_request_list_lock);
12e364b9 1535
ddf5de53 1536 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1537 struct parahotplug_request *req =
1538 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1539
1540 if (!time_after_eq(jiffies, req->expiration))
1541 continue;
1542
1543 list_del(pos);
1544 if (req->msg.hdr.flags.response_expected)
1545 controlvm_respond_physdev_changestate(
1546 &req->msg.hdr,
1547 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1548 req->msg.cmd.device_change_state.state);
1549 parahotplug_request_destroy(req);
12e364b9
KC
1550 }
1551
ddf5de53 1552 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1553}
1554
1555/*
1556 * Called from the /proc handler, which means the user script has
1557 * finished the enable/disable. Find the matching identifier, and
1558 * respond to the CONTROLVM message with success.
1559 */
1560static int
b06bdf7d 1561parahotplug_request_complete(int id, u16 active)
12e364b9
KC
1562{
1563 struct list_head *pos = NULL;
1564 struct list_head *tmp = NULL;
1565
ddf5de53 1566 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1567
1568 /* Look for a request matching "id". */
ddf5de53 1569 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1570 struct parahotplug_request *req =
1571 list_entry(pos, struct parahotplug_request, list);
1572 if (req->id == id) {
1573 /* Found a match. Remove it from the list and
1574 * respond.
1575 */
1576 list_del(pos);
ddf5de53 1577 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1578 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1579 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1580 controlvm_respond_physdev_changestate(
1581 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1582 req->msg.cmd.device_change_state.state);
12e364b9
KC
1583 parahotplug_request_destroy(req);
1584 return 0;
1585 }
1586 }
1587
ddf5de53 1588 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1589 return -1;
1590}
1591
1592/*
1593 * Enables or disables a PCI device by kicking off a udev script
1594 */
bd5b9b32 1595static void
3ab47701 1596parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1597{
1598 struct parahotplug_request *req;
1599
1600 req = parahotplug_request_create(inmsg);
1601
38f736e9 1602 if (!req)
12e364b9 1603 return;
12e364b9 1604
2ea5117b 1605 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1606 /* For enable messages, just respond with success
1607 * right away. This is a bit of a hack, but there are
1608 * issues with the early enable messages we get (with
1609 * either the udev script not detecting that the device
1610 * is up, or not getting called at all). Fortunately
1611 * the messages that get lost don't matter anyway, as
1612 * devices are automatically enabled at
1613 * initialization.
1614 */
1615 parahotplug_request_kickoff(req);
1616 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1617 CONTROLVM_RESP_SUCCESS,
1618 inmsg->cmd.device_change_state.state);
12e364b9
KC
1619 parahotplug_request_destroy(req);
1620 } else {
1621 /* For disable messages, add the request to the
1622 * request list before kicking off the udev script. It
1623 * won't get responded to until the script has
1624 * indicated it's done.
1625 */
ddf5de53
BR
1626 spin_lock(&parahotplug_request_list_lock);
1627 list_add_tail(&req->list, &parahotplug_request_list);
1628 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1629
1630 parahotplug_request_kickoff(req);
1631 }
1632}
1633
12e364b9
KC
1634/* Process a controlvm message.
1635 * Return result:
1636 * FALSE - this function will return FALSE only in the case where the
1637 * controlvm message was NOT processed, but processing must be
1638 * retried before reading the next controlvm message; a
1639 * scenario where this can occur is when we need to throttle
1640 * the allocation of memory in which to copy out controlvm
1641 * payload data
1642 * TRUE - processing of the controlvm message completed,
1643 * either successfully or with an error.
1644 */
1645static BOOL
3ab47701 1646handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
12e364b9 1647{
2ea5117b 1648 struct controlvm_message_packet *cmd = &inmsg.cmd;
818352a8
BR
1649 u64 parm_addr = 0;
1650 u32 parm_bytes = 0;
317d9614 1651 struct parser_context *parser_ctx = NULL;
818352a8 1652 bool local_addr = false;
3ab47701 1653 struct controlvm_message ackmsg;
12e364b9
KC
1654
1655 /* create parsing context if necessary */
818352a8 1656 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1657 if (channel_addr == 0)
12e364b9 1658 return TRUE;
818352a8
BR
1659 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1660 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1661
1662 /* Parameter and channel addresses within test messages actually lie
1663 * within our OS-controlled memory. We need to know that, because it
1664 * makes a difference in how we compute the virtual address.
1665 */
818352a8 1666 if (parm_addr != 0 && parm_bytes != 0) {
12e364b9 1667 BOOL retry = FALSE;
26eb2c0c 1668
12e364b9 1669 parser_ctx =
818352a8
BR
1670 parser_init_byte_stream(parm_addr, parm_bytes,
1671 local_addr, &retry);
1b08872e
BR
1672 if (!parser_ctx && retry)
1673 return FALSE;
12e364b9
KC
1674 }
1675
818352a8 1676 if (!local_addr) {
12e364b9
KC
1677 controlvm_init_response(&ackmsg, &inmsg.hdr,
1678 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1679 if (controlvm_channel)
1680 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1681 CONTROLVM_QUEUE_ACK,
1682 &ackmsg);
12e364b9 1683 }
98d7b594 1684 switch (inmsg.hdr.id) {
12e364b9 1685 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1686 chipset_init(&inmsg);
1687 break;
1688 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1689 bus_create(&inmsg);
1690 break;
1691 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1692 bus_destroy(&inmsg);
1693 break;
1694 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1695 bus_configure(&inmsg, parser_ctx);
1696 break;
1697 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1698 my_device_create(&inmsg);
1699 break;
1700 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1701 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1702 parahotplug_process_message(&inmsg);
1703 } else {
12e364b9
KC
1704 /* save the hdr and cmd structures for later use */
1705 /* when sending back the response to Command */
1706 my_device_changestate(&inmsg);
da021f02 1707 g_diag_msg_hdr = inmsg.hdr;
4f44b72d 1708 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1709 break;
1710 }
1711 break;
1712 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1713 my_device_destroy(&inmsg);
1714 break;
1715 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1716 /* no op for now, just send a respond that we passed */
98d7b594 1717 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1718 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1719 break;
1720 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1721 chipset_ready(&inmsg.hdr);
1722 break;
1723 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1724 chipset_selftest(&inmsg.hdr);
1725 break;
1726 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1727 chipset_notready(&inmsg.hdr);
1728 break;
1729 default:
98d7b594 1730 if (inmsg.hdr.flags.response_expected)
12e364b9 1731 controlvm_respond(&inmsg.hdr,
818352a8 1732 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1733 break;
1734 }
1735
38f736e9 1736 if (parser_ctx) {
12e364b9
KC
1737 parser_done(parser_ctx);
1738 parser_ctx = NULL;
1739 }
1740 return TRUE;
1741}
1742
d746cb55 1743static HOSTADDRESS controlvm_get_channel_address(void)
524b0b63 1744{
5fc0229a 1745 u64 addr = 0;
b3c55b13 1746 u32 size = 0;
524b0b63 1747
0aca7844 1748 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1749 return 0;
0aca7844 1750
524b0b63
BR
1751 return addr;
1752}
1753
12e364b9
KC
1754static void
1755controlvm_periodic_work(struct work_struct *work)
1756{
3ab47701 1757 struct controlvm_message inmsg;
1c1ed292 1758 BOOL got_command = FALSE;
12e364b9 1759 BOOL handle_command_failed = FALSE;
1c1ed292 1760 static u64 poll_count;
12e364b9
KC
1761
1762 /* make sure visorbus server is registered for controlvm callbacks */
1763 if (visorchipset_serverregwait && !serverregistered)
1c1ed292 1764 goto cleanup;
12e364b9
KC
1765 /* make sure visorclientbus server is regsitered for controlvm
1766 * callbacks
1767 */
1768 if (visorchipset_clientregwait && !clientregistered)
1c1ed292 1769 goto cleanup;
12e364b9 1770
1c1ed292
BR
1771 poll_count++;
1772 if (poll_count >= 250)
12e364b9
KC
1773 ; /* keep going */
1774 else
1c1ed292 1775 goto cleanup;
12e364b9
KC
1776
1777 /* Check events to determine if response to CHIPSET_READY
1778 * should be sent
1779 */
0639ba67
BR
1780 if (visorchipset_holdchipsetready &&
1781 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1782 if (check_chipset_events() == 1) {
da021f02 1783 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1784 clear_chipset_events();
da021f02 1785 memset(&g_chipset_msg_hdr, 0,
98d7b594 1786 sizeof(struct controlvm_message_header));
12e364b9
KC
1787 }
1788 }
1789
c3d9a224 1790 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1791 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1792 &inmsg))
1793 ;
1c1ed292 1794 if (!got_command) {
7166ed19 1795 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1796 /* we throttled processing of a prior
1797 * msg, so try to process it again
1798 * rather than reading a new one
1799 */
7166ed19
BR
1800 inmsg = controlvm_pending_msg;
1801 controlvm_pending_msg_valid = FALSE;
1c1ed292 1802 got_command = true;
75c1f8b7 1803 } else {
1c1ed292 1804 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1805 }
8a1182eb 1806 }
12e364b9
KC
1807
1808 handle_command_failed = FALSE;
1c1ed292 1809 while (got_command && (!handle_command_failed)) {
b53e0e93 1810 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1811 if (handle_command(inmsg,
1812 visorchannel_get_physaddr
c3d9a224 1813 (controlvm_channel)))
1c1ed292 1814 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
1815 else {
1816 /* this is a scenario where throttling
1817 * is required, but probably NOT an
1818 * error...; we stash the current
1819 * controlvm msg so we will attempt to
1820 * reprocess it on our next loop
1821 */
1822 handle_command_failed = TRUE;
7166ed19
BR
1823 controlvm_pending_msg = inmsg;
1824 controlvm_pending_msg_valid = TRUE;
12e364b9
KC
1825 }
1826 }
1827
1828 /* parahotplug_worker */
1829 parahotplug_process_list();
1830
1c1ed292 1831cleanup:
12e364b9
KC
1832
1833 if (time_after(jiffies,
b53e0e93 1834 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1835 /* it's been longer than MIN_IDLE_SECONDS since we
1836 * processed our last controlvm message; slow down the
1837 * polling
1838 */
911e213e
BR
1839 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1840 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1841 } else {
911e213e
BR
1842 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1843 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1844 }
1845
9232d2d6
BR
1846 queue_delayed_work(periodic_controlvm_workqueue,
1847 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1848}
1849
1850static void
1851setup_crash_devices_work_queue(struct work_struct *work)
1852{
e6bdb904
BR
1853 struct controlvm_message local_crash_bus_msg;
1854 struct controlvm_message local_crash_dev_msg;
3ab47701 1855 struct controlvm_message msg;
e6bdb904
BR
1856 u32 local_crash_msg_offset;
1857 u16 local_crash_msg_count;
12e364b9
KC
1858
1859 /* make sure visorbus server is registered for controlvm callbacks */
1860 if (visorchipset_serverregwait && !serverregistered)
e6bdb904 1861 goto cleanup;
12e364b9
KC
1862
1863 /* make sure visorclientbus server is regsitered for controlvm
1864 * callbacks
1865 */
1866 if (visorchipset_clientregwait && !clientregistered)
e6bdb904 1867 goto cleanup;
12e364b9
KC
1868
1869 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1870
1871 /* send init chipset msg */
98d7b594 1872 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1873 msg.cmd.init_chipset.bus_count = 23;
1874 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1875
1876 chipset_init(&msg);
1877
12e364b9 1878 /* get saved message count */
c3d9a224 1879 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1880 offsetof(struct spar_controlvm_channel_protocol,
1881 saved_crash_message_count),
e6bdb904 1882 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1883 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1884 POSTCODE_SEVERITY_ERR);
1885 return;
1886 }
1887
e6bdb904 1888 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 1889 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 1890 local_crash_msg_count,
12e364b9
KC
1891 POSTCODE_SEVERITY_ERR);
1892 return;
1893 }
1894
1895 /* get saved crash message offset */
c3d9a224 1896 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1897 offsetof(struct spar_controlvm_channel_protocol,
1898 saved_crash_message_offset),
e6bdb904 1899 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
1900 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1901 POSTCODE_SEVERITY_ERR);
1902 return;
1903 }
1904
1905 /* read create device message for storage bus offset */
c3d9a224 1906 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
1907 local_crash_msg_offset,
1908 &local_crash_bus_msg,
3ab47701 1909 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1910 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1911 POSTCODE_SEVERITY_ERR);
1912 return;
1913 }
1914
1915 /* read create device message for storage device */
c3d9a224 1916 if (visorchannel_read(controlvm_channel,
e6bdb904 1917 local_crash_msg_offset +
3ab47701 1918 sizeof(struct controlvm_message),
e6bdb904 1919 &local_crash_dev_msg,
3ab47701 1920 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1921 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1922 POSTCODE_SEVERITY_ERR);
1923 return;
1924 }
1925
1926 /* reuse IOVM create bus message */
e6bdb904
BR
1927 if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) {
1928 bus_create(&local_crash_bus_msg);
75c1f8b7 1929 } else {
12e364b9
KC
1930 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1931 POSTCODE_SEVERITY_ERR);
1932 return;
1933 }
1934
1935 /* reuse create device message for storage device */
e6bdb904
BR
1936 if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) {
1937 my_device_create(&local_crash_dev_msg);
75c1f8b7 1938 } else {
12e364b9
KC
1939 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1940 POSTCODE_SEVERITY_ERR);
1941 return;
1942 }
12e364b9
KC
1943 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1944 return;
1945
e6bdb904 1946cleanup:
12e364b9 1947
911e213e 1948 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1949
9232d2d6
BR
1950 queue_delayed_work(periodic_controlvm_workqueue,
1951 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1952}
1953
1954static void
8e3fedd6 1955bus_create_response(ulong bus_no, int response)
12e364b9 1956{
8e3fedd6 1957 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
12e364b9
KC
1958}
1959
1960static void
8e3fedd6 1961bus_destroy_response(ulong bus_no, int response)
12e364b9 1962{
8e3fedd6 1963 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
12e364b9
KC
1964}
1965
1966static void
8e3fedd6 1967device_create_response(ulong bus_no, ulong dev_no, int response)
12e364b9 1968{
8e3fedd6 1969 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
12e364b9
KC
1970}
1971
1972static void
8e3fedd6 1973device_destroy_response(ulong bus_no, ulong dev_no, int response)
12e364b9 1974{
8e3fedd6 1975 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
12e364b9
KC
1976}
1977
1978void
8420f417 1979visorchipset_device_pause_response(ulong bus_no, ulong dev_no, int response)
12e364b9 1980{
12e364b9 1981 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 1982 bus_no, dev_no, response,
bd0d2dcc 1983 segment_state_standby);
12e364b9 1984}
927c7927 1985EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
1986
1987static void
8e3fedd6 1988device_resume_response(ulong bus_no, ulong dev_no, int response)
12e364b9
KC
1989{
1990 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8e3fedd6 1991 bus_no, dev_no, response,
bd0d2dcc 1992 segment_state_running);
12e364b9
KC
1993}
1994
1995BOOL
77db7127 1996visorchipset_get_bus_info(ulong bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 1997{
1390b88c 1998 void *p = findbus(&bus_info_list, bus_no);
26eb2c0c 1999
0aca7844 2000 if (!p)
12e364b9 2001 return FALSE;
77db7127 2002 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
12e364b9
KC
2003 return TRUE;
2004}
2005EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2006
2007BOOL
58dd8f2d 2008visorchipset_set_bus_context(ulong bus_no, void *context)
12e364b9 2009{
1390b88c 2010 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
26eb2c0c 2011
0aca7844 2012 if (!p)
12e364b9 2013 return FALSE;
12e364b9
KC
2014 p->bus_driver_context = context;
2015 return TRUE;
2016}
2017EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2018
2019BOOL
b486df19
BR
2020visorchipset_get_device_info(ulong bus_no, ulong dev_no,
2021 struct visorchipset_device_info *dev_info)
12e364b9 2022{
1390b88c 2023 void *p = finddevice(&dev_info_list, bus_no, dev_no);
26eb2c0c 2024
0aca7844 2025 if (!p)
12e364b9 2026 return FALSE;
b486df19 2027 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
12e364b9
KC
2028 return TRUE;
2029}
2030EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2031
2032BOOL
cf0bd0b5 2033visorchipset_set_device_context(ulong bus_no, ulong dev_no, void *context)
12e364b9 2034{
246e0cd0 2035 struct visorchipset_device_info *p =
1390b88c 2036 finddevice(&dev_info_list, bus_no, dev_no);
26eb2c0c 2037
0aca7844 2038 if (!p)
12e364b9 2039 return FALSE;
12e364b9
KC
2040 p->bus_driver_context = context;
2041 return TRUE;
2042}
2043EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2044
2045/* Generic wrapper function for allocating memory from a kmem_cache pool.
2046 */
2047void *
2048visorchipset_cache_alloc(struct kmem_cache *pool, BOOL ok_to_block,
2049 char *fn, int ln)
2050{
2051 gfp_t gfp;
2052 void *p;
2053
2054 if (ok_to_block)
2055 gfp = GFP_KERNEL;
2056 else
2057 gfp = GFP_ATOMIC;
2058 /* __GFP_NORETRY means "ok to fail", meaning
2059 * kmem_cache_alloc() can return NULL, implying the caller CAN
2060 * cope with failure. If you do NOT specify __GFP_NORETRY,
2061 * Linux will go to extreme measures to get memory for you
2062 * (like, invoke oom killer), which will probably cripple the
2063 * system.
2064 */
2065 gfp |= __GFP_NORETRY;
2066 p = kmem_cache_alloc(pool, gfp);
0aca7844 2067 if (!p)
12e364b9 2068 return NULL;
0aca7844 2069
712f42cd 2070 atomic_inc(&visorchipset_cache_buffers_in_use);
12e364b9
KC
2071 return p;
2072}
2073
2074/* Generic wrapper function for freeing memory from a kmem_cache pool.
2075 */
2076void
2077visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2078{
0aca7844 2079 if (!p)
12e364b9 2080 return;
0aca7844 2081
712f42cd 2082 atomic_dec(&visorchipset_cache_buffers_in_use);
12e364b9
KC
2083 kmem_cache_free(pool, p);
2084}
2085
18b87ed1 2086static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2087 struct device_attribute *attr,
2088 const char *buf, size_t count)
12e364b9 2089{
18b87ed1 2090 char msgtype[64];
12e364b9 2091
66e24b76
BR
2092 if (sscanf(buf, "%63s", msgtype) != 1)
2093 return -EINVAL;
2094
2095 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2096 chipset_events[0] = 1;
2097 return count;
2098 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2099 chipset_events[1] = 1;
2100 return count;
e22a4a0f
BR
2101 }
2102 return -EINVAL;
12e364b9
KC
2103}
2104
e56fa7cd
BR
2105/* The parahotplug/devicedisabled interface gets called by our support script
2106 * when an SR-IOV device has been shut down. The ID is passed to the script
2107 * and then passed back when the device has been removed.
2108 */
2109static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2110 struct device_attribute *attr,
2111 const char *buf, size_t count)
e56fa7cd
BR
2112{
2113 uint id;
2114
2115 if (kstrtouint(buf, 10, &id) != 0)
2116 return -EINVAL;
2117
2118 parahotplug_request_complete(id, 0);
2119 return count;
2120}
2121
2122/* The parahotplug/deviceenabled interface gets called by our support script
2123 * when an SR-IOV device has been recovered. The ID is passed to the script
2124 * and then passed back when the device has been brought back up.
2125 */
2126static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2127 struct device_attribute *attr,
2128 const char *buf, size_t count)
e56fa7cd
BR
2129{
2130 uint id;
2131
2132 if (kstrtouint(buf, 10, &id) != 0)
2133 return -EINVAL;
2134
2135 parahotplug_request_complete(id, 1);
2136 return count;
2137}
2138
12e364b9
KC
2139static int __init
2140visorchipset_init(void)
2141{
2142 int rc = 0, x = 0;
8a1182eb 2143 HOSTADDRESS addr;
12e364b9 2144
fcd0157e
KC
2145 if (!unisys_spar_platform)
2146 return -ENODEV;
2147
6fe345af
BR
2148 memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2149 memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
84982fbf 2150 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
ea33b4ee
BR
2151 memset(&livedump_info, 0, sizeof(livedump_info));
2152 atomic_set(&livedump_info.buffers_in_use, 0);
12e364b9 2153
9f8d0e8b 2154 if (visorchipset_testvnic) {
9f8d0e8b
KC
2155 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2156 rc = x;
a6a3989b 2157 goto cleanup;
9f8d0e8b 2158 }
12e364b9 2159
8a1182eb
BR
2160 addr = controlvm_get_channel_address();
2161 if (addr != 0) {
c3d9a224 2162 controlvm_channel =
8a1182eb
BR
2163 visorchannel_create_with_lock
2164 (addr,
d19642f6 2165 sizeof(struct spar_controlvm_channel_protocol),
5fbaa4b3 2166 spar_controlvm_channel_protocol_uuid);
93a84565 2167 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
c3d9a224 2168 visorchannel_get_header(controlvm_channel))) {
8a1182eb
BR
2169 initialize_controlvm_payload();
2170 } else {
c3d9a224
BR
2171 visorchannel_destroy(controlvm_channel);
2172 controlvm_channel = NULL;
8a1182eb
BR
2173 return -ENODEV;
2174 }
2175 } else {
8a1182eb
BR
2176 return -ENODEV;
2177 }
2178
5aa8ae57
BR
2179 major_dev = MKDEV(visorchipset_major, 0);
2180 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2181 if (rc < 0) {
4cb005a9 2182 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2183 goto cleanup;
4cb005a9 2184 }
9f8d0e8b 2185
da021f02 2186 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2187
da021f02 2188 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2189
da021f02 2190 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2191
1eee0011
BR
2192 putfile_buffer_list_pool =
2193 kmem_cache_create(putfile_buffer_list_pool_name,
12e364b9
KC
2194 sizeof(struct putfile_buffer_entry),
2195 0, SLAB_HWCACHE_ALIGN, NULL);
1eee0011 2196 if (!putfile_buffer_list_pool) {
4cb005a9
KC
2197 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2198 rc = -1;
a6a3989b 2199 goto cleanup;
12e364b9 2200 }
2098dbd1 2201 if (!visorchipset_disable_controlvm) {
12e364b9 2202 /* if booting in a crash kernel */
1ba00980 2203 if (is_kdump_kernel())
9232d2d6 2204 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9
KC
2205 setup_crash_devices_work_queue);
2206 else
9232d2d6 2207 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9 2208 controlvm_periodic_work);
9232d2d6 2209 periodic_controlvm_workqueue =
12e364b9
KC
2210 create_singlethread_workqueue("visorchipset_controlvm");
2211
38f736e9 2212 if (!periodic_controlvm_workqueue) {
4cb005a9
KC
2213 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2214 DIAG_SEVERITY_ERR);
2215 rc = -ENOMEM;
a6a3989b 2216 goto cleanup;
4cb005a9 2217 }
b53e0e93 2218 most_recent_message_jiffies = jiffies;
911e213e 2219 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
9232d2d6
BR
2220 rc = queue_delayed_work(periodic_controlvm_workqueue,
2221 &periodic_controlvm_work, poll_jiffies);
4cb005a9 2222 if (rc < 0) {
4cb005a9
KC
2223 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2224 DIAG_SEVERITY_ERR);
a6a3989b 2225 goto cleanup;
4cb005a9 2226 }
12e364b9
KC
2227 }
2228
eb34e877
BR
2229 visorchipset_platform_device.dev.devt = major_dev;
2230 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2231 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2232 rc = -1;
a6a3989b 2233 goto cleanup;
4cb005a9 2234 }
12e364b9 2235 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
22ad57ba 2236 rc = 0;
a6a3989b 2237cleanup:
12e364b9 2238 if (rc) {
12e364b9
KC
2239 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2240 POSTCODE_SEVERITY_ERR);
2241 }
2242 return rc;
2243}
2244
2245static void
2246visorchipset_exit(void)
2247{
12e364b9
KC
2248 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2249
2250 if (visorchipset_disable_controlvm) {
2251 ;
2252 } else {
9232d2d6
BR
2253 cancel_delayed_work(&periodic_controlvm_work);
2254 flush_workqueue(periodic_controlvm_workqueue);
2255 destroy_workqueue(periodic_controlvm_workqueue);
2256 periodic_controlvm_workqueue = NULL;
84982fbf 2257 destroy_controlvm_payload_info(&controlvm_payload_info);
12e364b9 2258 }
1eee0011
BR
2259 if (putfile_buffer_list_pool) {
2260 kmem_cache_destroy(putfile_buffer_list_pool);
2261 putfile_buffer_list_pool = NULL;
12e364b9 2262 }
1783319f 2263
12e364b9
KC
2264 cleanup_controlvm_structures();
2265
da021f02 2266 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2267
da021f02 2268 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2269
da021f02 2270 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2271
c3d9a224 2272 visorchannel_destroy(controlvm_channel);
8a1182eb 2273
addceb12 2274 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2275 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
2276}
2277
2278module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2279MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2280int visorchipset_testvnic = 0;
2281
2282module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2283MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2284int visorchipset_testvnicclient = 0;
2285
2286module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2287MODULE_PARM_DESC(visorchipset_testmsg,
2288 "1 to manufacture the chipset, bus, and switch messages");
2289int visorchipset_testmsg = 0;
2290
2291module_param_named(major, visorchipset_major, int, S_IRUGO);
2292MODULE_PARM_DESC(visorchipset_major, "major device number to use for the device node");
2293int visorchipset_major = 0;
2294
2295module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2296MODULE_PARM_DESC(visorchipset_serverreqwait,
2297 "1 to have the module wait for the visor bus to register");
2298int visorchipset_serverregwait = 0; /* default is off */
2299module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2300MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2301int visorchipset_clientregwait = 1; /* default is on */
2302module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2303MODULE_PARM_DESC(visorchipset_testteardown,
2304 "1 to test teardown of the chipset, bus, and switch");
2305int visorchipset_testteardown = 0; /* default is off */
2306module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2307 S_IRUGO);
2308MODULE_PARM_DESC(visorchipset_disable_controlvm,
2309 "1 to disable polling of controlVm channel");
2310int visorchipset_disable_controlvm = 0; /* default is off */
12e364b9
KC
2311module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2312 int, S_IRUGO);
2313MODULE_PARM_DESC(visorchipset_holdchipsetready,
2314 "1 to hold response to CHIPSET_READY");
2315int visorchipset_holdchipsetready = 0; /* default is to send CHIPSET_READY
2316 * response immediately */
2317module_init(visorchipset_init);
2318module_exit(visorchipset_exit);
2319
2320MODULE_AUTHOR("Unisys");
2321MODULE_LICENSE("GPL");
2322MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2323 VERSION);
2324MODULE_VERSION(VERSION);
This page took 0.472038 seconds and 5 git commands to generate.