staging: unisys: Move visorchannel into visorbus
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
7023638c 18#include "version.h"
12e364b9
KC
19#include "visorchipset.h"
20#include "procobjecttree.h"
21#include "visorchannel.h"
22#include "periodic_work.h"
12e364b9
KC
23#include "file.h"
24#include "parser.h"
12e364b9 25#include "uisutils.h"
12e364b9
KC
26#include "controlvmcompletionstatus.h"
27#include "guestlinuxdebug.h"
12e364b9
KC
28
29#include <linux/nls.h>
30#include <linux/netdevice.h>
31#include <linux/platform_device.h>
90addb02 32#include <linux/uuid.h>
1ba00980 33#include <linux/crash_dump.h>
12e364b9
KC
34
35#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
36#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
37 * vnic loopback test */
38#define TEST_VNIC_SWITCHNO 1
39#define TEST_VNIC_BUSNO 9
40
41#define MAX_NAME_SIZE 128
42#define MAX_IP_SIZE 50
43#define MAXOUTSTANDINGCHANNELCOMMAND 256
44#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
b615d628
JS
47/*
48 * Module parameters
49 */
50static int visorchipset_testvnic;
51static int visorchipset_testvnicclient;
52static int visorchipset_testmsg;
53static int visorchipset_major;
54static int visorchipset_serverregwait;
55static int visorchipset_clientregwait = 1; /* default is on */
56static int visorchipset_testteardown;
57static int visorchipset_disable_controlvm;
58static int visorchipset_holdchipsetready;
59
12e364b9
KC
60/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
61* we switch to slow polling mode. As soon as we get a controlvm
62* message, we switch back to fast polling mode.
63*/
64#define MIN_IDLE_SECONDS 10
52063eca
JS
65static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
66static unsigned long most_recent_message_jiffies; /* when we got our last
bd5b9b32 67 * controlvm message */
12e364b9
KC
68static int serverregistered;
69static int clientregistered;
70
71#define MAX_CHIPSET_EVENTS 2
c242233e 72static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 73
9232d2d6
BR
74static struct delayed_work periodic_controlvm_work;
75static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 76static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 77
da021f02 78static struct controlvm_message_header g_chipset_msg_hdr;
59827f00 79static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 80 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 81/* 0xffffff is an invalid Bus/Device number */
52063eca
JS
82static u32 g_diagpool_bus_no = 0xffffff;
83static u32 g_diagpool_dev_no = 0xffffff;
4f44b72d 84static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9
KC
85
86/* Only VNIC and VHBA channels are sent to visorclientbus (aka
87 * "visorhackbus")
88 */
89#define FOR_VISORHACKBUS(channel_type_guid) \
9eee5d1f 90 (((uuid_le_cmp(channel_type_guid,\
0639ba67
BR
91 spar_vnic_channel_protocol_uuid) == 0) ||\
92 (uuid_le_cmp(channel_type_guid,\
9eee5d1f 93 spar_vhba_channel_protocol_uuid) == 0)))
12e364b9
KC
94#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
95
96#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
97 (uuid_le_cmp(channel_type_guid,\
98 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 99
1390b88c
BR
100static LIST_HEAD(bus_info_list);
101static LIST_HEAD(dev_info_list);
12e364b9 102
c3d9a224 103static struct visorchannel *controlvm_channel;
12e364b9 104
84982fbf 105/* Manages the request payload in the controlvm channel */
c1f834eb 106struct visor_controlvm_payload_info {
c242233e 107 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 108 u64 offset; /* offset from beginning of controlvm
12e364b9 109 * channel to beginning of payload * pool */
b3c55b13 110 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
111};
112
113static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 114
ea33b4ee
BR
115/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
116 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
117 */
c1f834eb 118struct visor_livedump_info {
ea33b4ee
BR
119 struct controlvm_message_header dumpcapture_header;
120 struct controlvm_message_header gettextdump_header;
121 struct controlvm_message_header dumpcomplete_header;
f4c11551 122 bool gettextdump_outstanding;
12e364b9 123 u32 crc32;
52063eca 124 unsigned long length;
12e364b9 125 atomic_t buffers_in_use;
52063eca 126 unsigned long destination;
c1f834eb
JS
127};
128
129static struct visor_livedump_info livedump_info;
12e364b9
KC
130
131/* The following globals are used to handle the scenario where we are unable to
132 * offload the payload from a controlvm message due to memory requirements. In
133 * this scenario, we simply stash the controlvm message, then attempt to
134 * process it again the next time controlvm_periodic_work() runs.
135 */
7166ed19 136static struct controlvm_message controlvm_pending_msg;
f4c11551 137static bool controlvm_pending_msg_valid = false;
12e364b9 138
12e364b9
KC
139/* This identifies a data buffer that has been received via a controlvm messages
140 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
141 */
142struct putfile_buffer_entry {
143 struct list_head next; /* putfile_buffer_entry list */
317d9614 144 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
145};
146
147/* List of struct putfile_request *, via next_putfile_request member.
148 * Each entry in this list identifies an outstanding TRANSMIT_FILE
149 * conversation.
150 */
1eee0011 151static LIST_HEAD(putfile_request_list);
12e364b9
KC
152
153/* This describes a buffer and its current state of transfer (e.g., how many
154 * bytes have already been supplied as putfile data, and how many bytes are
155 * remaining) for a putfile_request.
156 */
157struct putfile_active_buffer {
158 /* a payload from a controlvm message, containing a file data buffer */
317d9614 159 struct parser_context *parser_ctx;
12e364b9
KC
160 /* points within data area of parser_ctx to next byte of data */
161 u8 *pnext;
162 /* # bytes left from <pnext> to the end of this data buffer */
163 size_t bytes_remaining;
164};
165
166#define PUTFILE_REQUEST_SIG 0x0906101302281211
167/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
168 * conversation. Structs of this type are dynamically linked into
169 * <Putfile_request_list>.
170 */
171struct putfile_request {
172 u64 sig; /* PUTFILE_REQUEST_SIG */
173
174 /* header from original TransmitFile request */
98d7b594 175 struct controlvm_message_header controlvm_header;
12e364b9
KC
176 u64 file_request_number; /* from original TransmitFile request */
177
178 /* link to next struct putfile_request */
179 struct list_head next_putfile_request;
180
181 /* most-recent sequence number supplied via a controlvm message */
182 u64 data_sequence_number;
183
184 /* head of putfile_buffer_entry list, which describes the data to be
185 * supplied as putfile data;
186 * - this list is added to when controlvm messages come in that supply
187 * file data
188 * - this list is removed from via the hotplug program that is actually
189 * consuming these buffers to write as file data */
190 struct list_head input_buffer_list;
191 spinlock_t req_list_lock; /* lock for input_buffer_list */
192
193 /* waiters for input_buffer_list to go non-empty */
194 wait_queue_head_t input_buffer_wq;
195
196 /* data not yet read within current putfile_buffer_entry */
197 struct putfile_active_buffer active_buf;
198
199 /* <0 = failed, 0 = in-progress, >0 = successful; */
200 /* note that this must be set with req_list_lock, and if you set <0, */
201 /* it is your responsibility to also free up all of the other objects */
202 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
203 /* before releasing the lock */
204 int completion_status;
205};
206
12e364b9
KC
207struct parahotplug_request {
208 struct list_head list;
209 int id;
210 unsigned long expiration;
3ab47701 211 struct controlvm_message msg;
12e364b9
KC
212};
213
ddf5de53
BR
214static LIST_HEAD(parahotplug_request_list);
215static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
216static void parahotplug_process_list(void);
217
218/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
219 * CONTROLVM_REPORTEVENT.
220 */
6fe345af
BR
221static struct visorchipset_busdev_notifiers busdev_server_notifiers;
222static struct visorchipset_busdev_notifiers busdev_client_notifiers;
12e364b9 223
52063eca
JS
224static void bus_create_response(u32 bus_no, int response);
225static void bus_destroy_response(u32 bus_no, int response);
226static void device_create_response(u32 bus_no, u32 dev_no, int response);
227static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
228static void device_resume_response(u32 bus_no, u32 dev_no, int response);
12e364b9 229
8e3fedd6 230static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
231 .bus_create = bus_create_response,
232 .bus_destroy = bus_destroy_response,
233 .device_create = device_create_response,
234 .device_destroy = device_destroy_response,
927c7927 235 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
236 .device_resume = device_resume_response,
237};
238
239/* info for /dev/visorchipset */
5aa8ae57 240static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 241
19f6634f
BR
242/* prototypes for attributes */
243static ssize_t toolaction_show(struct device *dev,
8e76e695 244 struct device_attribute *attr, char *buf);
19f6634f 245static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
246 struct device_attribute *attr,
247 const char *buf, size_t count);
19f6634f
BR
248static DEVICE_ATTR_RW(toolaction);
249
54b31229 250static ssize_t boottotool_show(struct device *dev,
8e76e695 251 struct device_attribute *attr, char *buf);
54b31229 252static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
253 struct device_attribute *attr, const char *buf,
254 size_t count);
54b31229
BR
255static DEVICE_ATTR_RW(boottotool);
256
422af17c 257static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 258 char *buf);
422af17c 259static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 260 const char *buf, size_t count);
422af17c
BR
261static DEVICE_ATTR_RW(error);
262
263static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 264 char *buf);
422af17c 265static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 266 const char *buf, size_t count);
422af17c
BR
267static DEVICE_ATTR_RW(textid);
268
269static ssize_t remaining_steps_show(struct device *dev,
8e76e695 270 struct device_attribute *attr, char *buf);
422af17c 271static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
272 struct device_attribute *attr,
273 const char *buf, size_t count);
422af17c
BR
274static DEVICE_ATTR_RW(remaining_steps);
275
18b87ed1 276static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
277 struct device_attribute *attr,
278 const char *buf, size_t count);
18b87ed1
BR
279static DEVICE_ATTR_WO(chipsetready);
280
e56fa7cd 281static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
282 struct device_attribute *attr,
283 const char *buf, size_t count);
e56fa7cd
BR
284static DEVICE_ATTR_WO(devicedisabled);
285
286static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
287 struct device_attribute *attr,
288 const char *buf, size_t count);
e56fa7cd
BR
289static DEVICE_ATTR_WO(deviceenabled);
290
19f6634f
BR
291static struct attribute *visorchipset_install_attrs[] = {
292 &dev_attr_toolaction.attr,
54b31229 293 &dev_attr_boottotool.attr,
422af17c
BR
294 &dev_attr_error.attr,
295 &dev_attr_textid.attr,
296 &dev_attr_remaining_steps.attr,
19f6634f
BR
297 NULL
298};
299
300static struct attribute_group visorchipset_install_group = {
301 .name = "install",
302 .attrs = visorchipset_install_attrs
303};
304
18b87ed1
BR
305static struct attribute *visorchipset_guest_attrs[] = {
306 &dev_attr_chipsetready.attr,
307 NULL
308};
309
310static struct attribute_group visorchipset_guest_group = {
311 .name = "guest",
312 .attrs = visorchipset_guest_attrs
313};
314
e56fa7cd
BR
315static struct attribute *visorchipset_parahotplug_attrs[] = {
316 &dev_attr_devicedisabled.attr,
317 &dev_attr_deviceenabled.attr,
318 NULL
319};
320
321static struct attribute_group visorchipset_parahotplug_group = {
322 .name = "parahotplug",
323 .attrs = visorchipset_parahotplug_attrs
324};
325
19f6634f
BR
326static const struct attribute_group *visorchipset_dev_groups[] = {
327 &visorchipset_install_group,
18b87ed1 328 &visorchipset_guest_group,
e56fa7cd 329 &visorchipset_parahotplug_group,
19f6634f
BR
330 NULL
331};
332
12e364b9 333/* /sys/devices/platform/visorchipset */
eb34e877 334static struct platform_device visorchipset_platform_device = {
12e364b9
KC
335 .name = "visorchipset",
336 .id = -1,
19f6634f 337 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
338};
339
340/* Function prototypes */
b3168c70 341static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
342 int response);
343static void controlvm_respond_chipset_init(
b3168c70 344 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
345 enum ultra_chipset_feature features);
346static void controlvm_respond_physdev_changestate(
b3168c70 347 struct controlvm_message_header *msg_hdr, int response,
98d7b594 348 struct spar_segment_state state);
12e364b9 349
d746cb55
VB
350static ssize_t toolaction_show(struct device *dev,
351 struct device_attribute *attr,
352 char *buf)
19f6634f 353{
01f4d85a 354 u8 tool_action;
19f6634f 355
c3d9a224 356 visorchannel_read(controlvm_channel,
d19642f6 357 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 358 tool_action), &tool_action, sizeof(u8));
01f4d85a 359 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
360}
361
d746cb55
VB
362static ssize_t toolaction_store(struct device *dev,
363 struct device_attribute *attr,
364 const char *buf, size_t count)
19f6634f 365{
01f4d85a 366 u8 tool_action;
66e24b76 367 int ret;
19f6634f 368
ebec8967 369 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
370 return -EINVAL;
371
c3d9a224 372 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
373 offsetof(struct spar_controlvm_channel_protocol,
374 tool_action),
01f4d85a 375 &tool_action, sizeof(u8));
66e24b76
BR
376
377 if (ret)
378 return ret;
e22a4a0f 379 return count;
19f6634f
BR
380}
381
d746cb55
VB
382static ssize_t boottotool_show(struct device *dev,
383 struct device_attribute *attr,
384 char *buf)
54b31229 385{
365522d9 386 struct efi_spar_indication efi_spar_indication;
54b31229 387
c3d9a224 388 visorchannel_read(controlvm_channel,
8e76e695
BR
389 offsetof(struct spar_controlvm_channel_protocol,
390 efi_spar_ind), &efi_spar_indication,
391 sizeof(struct efi_spar_indication));
54b31229 392 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 393 efi_spar_indication.boot_to_tool);
54b31229
BR
394}
395
d746cb55
VB
396static ssize_t boottotool_store(struct device *dev,
397 struct device_attribute *attr,
398 const char *buf, size_t count)
54b31229 399{
66e24b76 400 int val, ret;
365522d9 401 struct efi_spar_indication efi_spar_indication;
54b31229 402
ebec8967 403 if (kstrtoint(buf, 10, &val))
66e24b76
BR
404 return -EINVAL;
405
365522d9 406 efi_spar_indication.boot_to_tool = val;
c3d9a224 407 ret = visorchannel_write(controlvm_channel,
d19642f6 408 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
409 efi_spar_ind), &(efi_spar_indication),
410 sizeof(struct efi_spar_indication));
66e24b76
BR
411
412 if (ret)
413 return ret;
e22a4a0f 414 return count;
54b31229 415}
422af17c
BR
416
417static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 418 char *buf)
422af17c
BR
419{
420 u32 error;
421
8e76e695
BR
422 visorchannel_read(controlvm_channel,
423 offsetof(struct spar_controlvm_channel_protocol,
424 installation_error),
425 &error, sizeof(u32));
422af17c
BR
426 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
427}
428
429static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 430 const char *buf, size_t count)
422af17c
BR
431{
432 u32 error;
66e24b76 433 int ret;
422af17c 434
ebec8967 435 if (kstrtou32(buf, 10, &error))
66e24b76
BR
436 return -EINVAL;
437
c3d9a224 438 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
439 offsetof(struct spar_controlvm_channel_protocol,
440 installation_error),
441 &error, sizeof(u32));
66e24b76
BR
442 if (ret)
443 return ret;
e22a4a0f 444 return count;
422af17c
BR
445}
446
447static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 448 char *buf)
422af17c 449{
10dbf0e3 450 u32 text_id;
422af17c 451
8e76e695
BR
452 visorchannel_read(controlvm_channel,
453 offsetof(struct spar_controlvm_channel_protocol,
454 installation_text_id),
455 &text_id, sizeof(u32));
10dbf0e3 456 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
457}
458
459static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 460 const char *buf, size_t count)
422af17c 461{
10dbf0e3 462 u32 text_id;
66e24b76 463 int ret;
422af17c 464
ebec8967 465 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
466 return -EINVAL;
467
c3d9a224 468 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
469 offsetof(struct spar_controlvm_channel_protocol,
470 installation_text_id),
471 &text_id, sizeof(u32));
66e24b76
BR
472 if (ret)
473 return ret;
e22a4a0f 474 return count;
422af17c
BR
475}
476
422af17c 477static ssize_t remaining_steps_show(struct device *dev,
8e76e695 478 struct device_attribute *attr, char *buf)
422af17c 479{
ee8da290 480 u16 remaining_steps;
422af17c 481
c3d9a224 482 visorchannel_read(controlvm_channel,
8e76e695
BR
483 offsetof(struct spar_controlvm_channel_protocol,
484 installation_remaining_steps),
485 &remaining_steps, sizeof(u16));
ee8da290 486 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
487}
488
489static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
490 struct device_attribute *attr,
491 const char *buf, size_t count)
422af17c 492{
ee8da290 493 u16 remaining_steps;
66e24b76 494 int ret;
422af17c 495
ebec8967 496 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
497 return -EINVAL;
498
c3d9a224 499 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
500 offsetof(struct spar_controlvm_channel_protocol,
501 installation_remaining_steps),
502 &remaining_steps, sizeof(u16));
66e24b76
BR
503 if (ret)
504 return ret;
e22a4a0f 505 return count;
422af17c
BR
506}
507
12e364b9 508static void
9b989a98 509bus_info_clear(void *v)
12e364b9 510{
bbd4be30 511 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
12e364b9 512
12e364b9 513 kfree(p->name);
12e364b9 514 kfree(p->description);
33192fa1 515 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
516}
517
518static void
9b989a98 519dev_info_clear(void *v)
12e364b9 520{
246e0cd0 521 struct visorchipset_device_info *p =
bbd4be30 522 (struct visorchipset_device_info *) v;
26eb2c0c 523
246e0cd0 524 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
525}
526
4f66520b
JS
527static struct visorchipset_bus_info *
528bus_find(struct list_head *list, u32 bus_no)
529{
530 struct visorchipset_bus_info *p;
531
532 list_for_each_entry(p, list, entry) {
533 if (p->bus_no == bus_no)
534 return p;
535 }
536
537 return NULL;
538}
539
d480f6a2
JS
540static struct visorchipset_device_info *
541device_find(struct list_head *list, u32 bus_no, u32 dev_no)
542{
543 struct visorchipset_device_info *p;
544
545 list_for_each_entry(p, list, entry) {
546 if (p->bus_no == bus_no && p->dev_no == dev_no)
547 return p;
548 }
549
550 return NULL;
551}
552
28723521
JS
553static void busdevices_del(struct list_head *list, u32 bus_no)
554{
555 struct visorchipset_device_info *p, *tmp;
556
557 list_for_each_entry_safe(p, tmp, list, entry) {
558 if (p->bus_no == bus_no) {
559 list_del(&p->entry);
560 kfree(p);
561 }
562 }
563}
564
c242233e 565static u8
12e364b9
KC
566check_chipset_events(void)
567{
568 int i;
c242233e 569 u8 send_msg = 1;
12e364b9
KC
570 /* Check events to determine if response should be sent */
571 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
572 send_msg &= chipset_events[i];
573 return send_msg;
574}
575
576static void
577clear_chipset_events(void)
578{
579 int i;
580 /* Clear chipset_events */
581 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
582 chipset_events[i] = 0;
583}
584
585void
fe90d892
BR
586visorchipset_register_busdev_server(
587 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 588 struct visorchipset_busdev_responders *responders,
1e7a59c1 589 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 590{
8f1947ac 591 down(&notifier_lock);
38f736e9 592 if (!notifiers) {
6fe345af
BR
593 memset(&busdev_server_notifiers, 0,
594 sizeof(busdev_server_notifiers));
12e364b9
KC
595 serverregistered = 0; /* clear flag */
596 } else {
6fe345af 597 busdev_server_notifiers = *notifiers;
12e364b9
KC
598 serverregistered = 1; /* set flag */
599 }
600 if (responders)
8e3fedd6 601 *responders = busdev_responders;
1e7a59c1
BR
602 if (driver_info)
603 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 604 VERSION, NULL);
12e364b9 605
8f1947ac 606 up(&notifier_lock);
12e364b9
KC
607}
608EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
609
610void
fe90d892
BR
611visorchipset_register_busdev_client(
612 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 613 struct visorchipset_busdev_responders *responders,
43fce019 614 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 615{
8f1947ac 616 down(&notifier_lock);
38f736e9 617 if (!notifiers) {
6fe345af
BR
618 memset(&busdev_client_notifiers, 0,
619 sizeof(busdev_client_notifiers));
12e364b9
KC
620 clientregistered = 0; /* clear flag */
621 } else {
6fe345af 622 busdev_client_notifiers = *notifiers;
12e364b9
KC
623 clientregistered = 1; /* set flag */
624 }
625 if (responders)
8e3fedd6 626 *responders = busdev_responders;
43fce019
BR
627 if (driver_info)
628 bus_device_info_init(driver_info, "chipset(bolts)",
629 "visorchipset", VERSION, NULL);
8f1947ac 630 up(&notifier_lock);
12e364b9
KC
631}
632EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
633
634static void
635cleanup_controlvm_structures(void)
636{
33192fa1 637 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 638 struct visorchipset_device_info *di, *tmp_di;
12e364b9 639
1390b88c 640 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
9b989a98 641 bus_info_clear(bi);
12e364b9
KC
642 list_del(&bi->entry);
643 kfree(bi);
644 }
645
1390b88c 646 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 647 dev_info_clear(di);
12e364b9
KC
648 list_del(&di->entry);
649 kfree(di);
650 }
651}
652
653static void
3ab47701 654chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
655{
656 static int chipset_inited;
b9b141e8 657 enum ultra_chipset_feature features = 0;
12e364b9
KC
658 int rc = CONTROLVM_RESP_SUCCESS;
659
660 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
661 if (chipset_inited) {
22ad57ba 662 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 663 goto cleanup;
12e364b9
KC
664 }
665 chipset_inited = 1;
666 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
667
668 /* Set features to indicate we support parahotplug (if Command
669 * also supports it). */
670 features =
2ea5117b 671 inmsg->cmd.init_chipset.
12e364b9
KC
672 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
673
674 /* Set the "reply" bit so Command knows this is a
675 * features-aware driver. */
676 features |= ULTRA_CHIPSET_FEATURE_REPLY;
677
e3199b2e 678cleanup:
12e364b9
KC
679 if (rc < 0)
680 cleanup_controlvm_structures();
98d7b594 681 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
682 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
683}
684
685static void
3ab47701 686controlvm_init_response(struct controlvm_message *msg,
b3168c70 687 struct controlvm_message_header *msg_hdr, int response)
12e364b9 688{
3ab47701 689 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 690 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
691 msg->hdr.payload_bytes = 0;
692 msg->hdr.payload_vm_offset = 0;
693 msg->hdr.payload_max_bytes = 0;
12e364b9 694 if (response < 0) {
98d7b594
BR
695 msg->hdr.flags.failed = 1;
696 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
697 }
698}
699
700static void
b3168c70 701controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 702{
3ab47701 703 struct controlvm_message outmsg;
26eb2c0c 704
b3168c70 705 controlvm_init_response(&outmsg, msg_hdr, response);
12e364b9
KC
706 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
707 * back the deviceChangeState structure in the packet. */
b3168c70 708 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
0639ba67
BR
709 g_devicechangestate_packet.device_change_state.bus_no ==
710 g_diagpool_bus_no &&
711 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 712 g_diagpool_dev_no)
4f44b72d 713 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 714 if (outmsg.hdr.flags.test_message == 1)
12e364b9 715 return;
2098dbd1 716
c3d9a224 717 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 718 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
719 return;
720 }
721}
722
723static void
b3168c70 724controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 725 int response,
b9b141e8 726 enum ultra_chipset_feature features)
12e364b9 727{
3ab47701 728 struct controlvm_message outmsg;
26eb2c0c 729
b3168c70 730 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 731 outmsg.cmd.init_chipset.features = features;
c3d9a224 732 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 733 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
734 return;
735 }
736}
737
98d7b594 738static void controlvm_respond_physdev_changestate(
b3168c70 739 struct controlvm_message_header *msg_hdr, int response,
98d7b594 740 struct spar_segment_state state)
12e364b9 741{
3ab47701 742 struct controlvm_message outmsg;
26eb2c0c 743
b3168c70 744 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
745 outmsg.cmd.device_change_state.state = state;
746 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 747 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 748 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
749 return;
750 }
751}
752
753void
2c683cde
BR
754visorchipset_save_message(struct controlvm_message *msg,
755 enum crash_obj_type type)
12e364b9 756{
4577225d
BR
757 u32 crash_msg_offset;
758 u16 crash_msg_count;
12e364b9
KC
759
760 /* get saved message count */
c3d9a224 761 if (visorchannel_read(controlvm_channel,
d19642f6
BR
762 offsetof(struct spar_controlvm_channel_protocol,
763 saved_crash_message_count),
4577225d 764 &crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
765 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
766 POSTCODE_SEVERITY_ERR);
767 return;
768 }
769
4577225d 770 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 771 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
4577225d 772 crash_msg_count,
12e364b9
KC
773 POSTCODE_SEVERITY_ERR);
774 return;
775 }
776
777 /* get saved crash message offset */
c3d9a224 778 if (visorchannel_read(controlvm_channel,
d19642f6
BR
779 offsetof(struct spar_controlvm_channel_protocol,
780 saved_crash_message_offset),
4577225d 781 &crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
782 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
783 POSTCODE_SEVERITY_ERR);
784 return;
785 }
786
2c683cde 787 if (type == CRASH_BUS) {
c3d9a224 788 if (visorchannel_write(controlvm_channel,
4577225d 789 crash_msg_offset,
3ab47701
BR
790 msg,
791 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
792 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
793 POSTCODE_SEVERITY_ERR);
794 return;
795 }
796 } else {
c3d9a224 797 if (visorchannel_write(controlvm_channel,
4577225d 798 crash_msg_offset +
3ab47701
BR
799 sizeof(struct controlvm_message), msg,
800 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
801 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
802 POSTCODE_SEVERITY_ERR);
803 return;
804 }
805 }
806}
807EXPORT_SYMBOL_GPL(visorchipset_save_message);
808
809static void
52063eca 810bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
12e364b9 811{
e82ba62e 812 struct visorchipset_bus_info *p;
f4c11551 813 bool need_clear = false;
12e364b9 814
4f66520b 815 p = bus_find(&bus_info_list, bus_no);
0aca7844 816 if (!p)
12e364b9 817 return;
0aca7844 818
12e364b9 819 if (response < 0) {
fbb31f48 820 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
12e364b9
KC
821 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
822 /* undo the row we just created... */
28723521 823 busdevices_del(&dev_info_list, bus_no);
12e364b9 824 } else {
fbb31f48 825 if (cmd_id == CONTROLVM_BUS_CREATE)
12e364b9 826 p->state.created = 1;
fbb31f48 827 if (cmd_id == CONTROLVM_BUS_DESTROY)
f4c11551 828 need_clear = true;
12e364b9
KC
829 }
830
0aca7844 831 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 832 return; /* no controlvm response needed */
6b59b31d 833 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 834 return;
33192fa1
BR
835 controlvm_respond(&p->pending_msg_hdr, response);
836 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 837 if (need_clear) {
9b989a98 838 bus_info_clear(p);
28723521 839 busdevices_del(&dev_info_list, bus_no);
12e364b9
KC
840 }
841}
842
843static void
fbb31f48 844device_changestate_responder(enum controlvm_id cmd_id,
52063eca 845 u32 bus_no, u32 dev_no, int response,
fbb31f48 846 struct spar_segment_state response_state)
12e364b9 847{
e82ba62e 848 struct visorchipset_device_info *p;
3ab47701 849 struct controlvm_message outmsg;
12e364b9 850
d480f6a2 851 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 852 if (!p)
12e364b9 853 return;
0aca7844 854 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 855 return; /* no controlvm response needed */
fbb31f48 856 if (p->pending_msg_hdr.id != cmd_id)
12e364b9 857 return;
12e364b9 858
246e0cd0 859 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 860
fbb31f48
BR
861 outmsg.cmd.device_change_state.bus_no = bus_no;
862 outmsg.cmd.device_change_state.dev_no = dev_no;
863 outmsg.cmd.device_change_state.state = response_state;
12e364b9 864
c3d9a224 865 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 866 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 867 return;
12e364b9 868
246e0cd0 869 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
870}
871
872static void
52063eca 873device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
12e364b9 874{
e82ba62e 875 struct visorchipset_device_info *p;
f4c11551 876 bool need_clear = false;
12e364b9 877
d480f6a2 878 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 879 if (!p)
12e364b9 880 return;
12e364b9 881 if (response >= 0) {
fbb31f48 882 if (cmd_id == CONTROLVM_DEVICE_CREATE)
12e364b9 883 p->state.created = 1;
fbb31f48 884 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
f4c11551 885 need_clear = true;
12e364b9
KC
886 }
887
0aca7844 888 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 889 return; /* no controlvm response needed */
0aca7844 890
6b59b31d 891 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 892 return;
0aca7844 893
246e0cd0
BR
894 controlvm_respond(&p->pending_msg_hdr, response);
895 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 896 if (need_clear)
9b989a98 897 dev_info_clear(p);
12e364b9
KC
898}
899
900static void
2836c6a8
BR
901bus_epilog(u32 bus_no,
902 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 903 int response, bool need_response)
12e364b9 904{
4f66520b 905 struct visorchipset_bus_info *bus_info;
f4c11551 906 bool notified = false;
12e364b9 907
4f66520b 908 bus_info = bus_find(&bus_info_list, bus_no);
12e364b9 909
2836c6a8 910 if (!bus_info)
12e364b9 911 return;
0aca7844 912
2836c6a8
BR
913 if (need_response) {
914 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
98d7b594 915 sizeof(struct controlvm_message_header));
75c1f8b7 916 } else {
2836c6a8 917 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 918 }
12e364b9 919
8f1947ac 920 down(&notifier_lock);
12e364b9
KC
921 if (response == CONTROLVM_RESP_SUCCESS) {
922 switch (cmd) {
923 case CONTROLVM_BUS_CREATE:
924 /* We can't tell from the bus_create
925 * information which of our 2 bus flavors the
926 * devices on this bus will ultimately end up.
927 * FORTUNATELY, it turns out it is harmless to
928 * send the bus_create to both of them. We can
929 * narrow things down a little bit, though,
930 * because we know: - BusDev_Server can handle
931 * either server or client devices
932 * - BusDev_Client can handle ONLY client
933 * devices */
6fe345af
BR
934 if (busdev_server_notifiers.bus_create) {
935 (*busdev_server_notifiers.bus_create) (bus_no);
f4c11551 936 notified = true;
12e364b9 937 }
2836c6a8 938 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
939 busdev_client_notifiers.bus_create) {
940 (*busdev_client_notifiers.bus_create) (bus_no);
f4c11551 941 notified = true;
12e364b9
KC
942 }
943 break;
944 case CONTROLVM_BUS_DESTROY:
6fe345af
BR
945 if (busdev_server_notifiers.bus_destroy) {
946 (*busdev_server_notifiers.bus_destroy) (bus_no);
f4c11551 947 notified = true;
12e364b9 948 }
2836c6a8 949 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
950 busdev_client_notifiers.bus_destroy) {
951 (*busdev_client_notifiers.bus_destroy) (bus_no);
f4c11551 952 notified = true;
12e364b9
KC
953 }
954 break;
955 }
956 }
957 if (notified)
958 /* The callback function just called above is responsible
929aa8ae 959 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
960 * function, which will call bus_responder()
961 */
962 ;
963 else
2836c6a8 964 bus_responder(cmd, bus_no, response);
8f1947ac 965 up(&notifier_lock);
12e364b9
KC
966}
967
968static void
2836c6a8
BR
969device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
970 struct controlvm_message_header *msg_hdr, int response,
f4c11551 971 bool need_response, bool for_visorbus)
12e364b9 972{
e82ba62e 973 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 974 bool notified = false;
12e364b9 975
2836c6a8 976 struct visorchipset_device_info *dev_info =
d480f6a2 977 device_find(&dev_info_list, bus_no, dev_no);
12e364b9
KC
978 char *envp[] = {
979 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
980 NULL
981 };
982
2836c6a8 983 if (!dev_info)
12e364b9 984 return;
0aca7844 985
12e364b9 986 if (for_visorbus)
6fe345af 987 notifiers = &busdev_server_notifiers;
12e364b9 988 else
6fe345af 989 notifiers = &busdev_client_notifiers;
2836c6a8
BR
990 if (need_response) {
991 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
98d7b594 992 sizeof(struct controlvm_message_header));
75c1f8b7 993 } else {
2836c6a8 994 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 995 }
12e364b9 996
8f1947ac 997 down(&notifier_lock);
12e364b9
KC
998 if (response >= 0) {
999 switch (cmd) {
1000 case CONTROLVM_DEVICE_CREATE:
1001 if (notifiers->device_create) {
2836c6a8 1002 (*notifiers->device_create) (bus_no, dev_no);
f4c11551 1003 notified = true;
12e364b9
KC
1004 }
1005 break;
1006 case CONTROLVM_DEVICE_CHANGESTATE:
1007 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1008 if (state.alive == segment_state_running.alive &&
1009 state.operating ==
1010 segment_state_running.operating) {
12e364b9 1011 if (notifiers->device_resume) {
2836c6a8
BR
1012 (*notifiers->device_resume) (bus_no,
1013 dev_no);
f4c11551 1014 notified = true;
12e364b9
KC
1015 }
1016 }
1017 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1018 else if (state.alive == segment_state_standby.alive &&
3f833b54 1019 state.operating ==
bd0d2dcc 1020 segment_state_standby.operating) {
12e364b9
KC
1021 /* technically this is standby case
1022 * where server is lost
1023 */
1024 if (notifiers->device_pause) {
2836c6a8
BR
1025 (*notifiers->device_pause) (bus_no,
1026 dev_no);
f4c11551 1027 notified = true;
12e364b9 1028 }
bd0d2dcc 1029 } else if (state.alive == segment_state_paused.alive &&
3f833b54 1030 state.operating ==
bd0d2dcc 1031 segment_state_paused.operating) {
12e364b9
KC
1032 /* this is lite pause where channel is
1033 * still valid just 'pause' of it
1034 */
2836c6a8
BR
1035 if (bus_no == g_diagpool_bus_no &&
1036 dev_no == g_diagpool_dev_no) {
12e364b9
KC
1037 /* this will trigger the
1038 * diag_shutdown.sh script in
1039 * the visorchipset hotplug */
1040 kobject_uevent_env
eb34e877 1041 (&visorchipset_platform_device.dev.
12e364b9
KC
1042 kobj, KOBJ_ONLINE, envp);
1043 }
1044 }
1045 break;
1046 case CONTROLVM_DEVICE_DESTROY:
1047 if (notifiers->device_destroy) {
2836c6a8 1048 (*notifiers->device_destroy) (bus_no, dev_no);
f4c11551 1049 notified = true;
12e364b9
KC
1050 }
1051 break;
1052 }
1053 }
1054 if (notified)
1055 /* The callback function just called above is responsible
929aa8ae 1056 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1057 * function, which will call device_responder()
1058 */
1059 ;
1060 else
2836c6a8 1061 device_responder(cmd, bus_no, dev_no, response);
8f1947ac 1062 up(&notifier_lock);
12e364b9
KC
1063}
1064
1065static void
3ab47701 1066bus_create(struct controlvm_message *inmsg)
12e364b9 1067{
2ea5117b 1068 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1069 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1070 int rc = CONTROLVM_RESP_SUCCESS;
e82ba62e 1071 struct visorchipset_bus_info *bus_info;
12e364b9 1072
4f66520b 1073 bus_info = bus_find(&bus_info_list, bus_no);
6c5fed35
BR
1074 if (bus_info && (bus_info->state.created == 1)) {
1075 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1076 POSTCODE_SEVERITY_ERR);
22ad57ba 1077 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1078 goto cleanup;
12e364b9 1079 }
6c5fed35
BR
1080 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1081 if (!bus_info) {
1082 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1083 POSTCODE_SEVERITY_ERR);
22ad57ba 1084 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1085 goto cleanup;
12e364b9
KC
1086 }
1087
6c5fed35
BR
1088 INIT_LIST_HEAD(&bus_info->entry);
1089 bus_info->bus_no = bus_no;
12e364b9 1090
6c5fed35 1091 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1092
98d7b594 1093 if (inmsg->hdr.flags.test_message == 1)
6c5fed35 1094 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1095 else
6c5fed35 1096 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1097
6c5fed35
BR
1098 bus_info->flags.server = inmsg->hdr.flags.server;
1099 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1100 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1101 bus_info->chan_info.channel_type_uuid =
9b1caee7 1102 cmd->create_bus.bus_data_type_uuid;
6c5fed35 1103 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9 1104
6c5fed35 1105 list_add(&bus_info->entry, &bus_info_list);
12e364b9 1106
6c5fed35 1107 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1108
6c5fed35
BR
1109cleanup:
1110 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1111 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1112}
1113
1114static void
3ab47701 1115bus_destroy(struct controlvm_message *inmsg)
12e364b9 1116{
2ea5117b 1117 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1118 u32 bus_no = cmd->destroy_bus.bus_no;
dff54cd6 1119 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1120 int rc = CONTROLVM_RESP_SUCCESS;
1121
4f66520b 1122 bus_info = bus_find(&bus_info_list, bus_no);
dff54cd6 1123 if (!bus_info)
22ad57ba 1124 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1125 else if (bus_info->state.created == 0)
22ad57ba 1126 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1127
dff54cd6 1128 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1129 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1130}
1131
1132static void
317d9614
BR
1133bus_configure(struct controlvm_message *inmsg,
1134 struct parser_context *parser_ctx)
12e364b9 1135{
2ea5117b 1136 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e
JS
1137 u32 bus_no;
1138 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1139 int rc = CONTROLVM_RESP_SUCCESS;
1140 char s[99];
1141
654bada0
BR
1142 bus_no = cmd->configure_bus.bus_no;
1143 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1144 POSTCODE_SEVERITY_INFO);
12e364b9 1145
4f66520b 1146 bus_info = bus_find(&bus_info_list, bus_no);
654bada0
BR
1147 if (!bus_info) {
1148 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1149 POSTCODE_SEVERITY_ERR);
22ad57ba 1150 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1151 } else if (bus_info->state.created == 0) {
1152 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1153 POSTCODE_SEVERITY_ERR);
22ad57ba 1154 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1155 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1156 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1157 POSTCODE_SEVERITY_ERR);
22ad57ba 1158 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0
BR
1159 } else {
1160 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1161 bus_info->partition_uuid = parser_id_get(parser_ctx);
1162 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1163 bus_info->name = parser_string_get(parser_ctx);
1164
1165 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1166 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1167 POSTCODE_SEVERITY_INFO);
12e364b9 1168 }
654bada0 1169 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1170 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1171}
1172
1173static void
3ab47701 1174my_device_create(struct controlvm_message *inmsg)
12e364b9 1175{
2ea5117b 1176 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1177 u32 bus_no = cmd->create_device.bus_no;
1178 u32 dev_no = cmd->create_device.dev_no;
e82ba62e
JS
1179 struct visorchipset_device_info *dev_info;
1180 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1181 int rc = CONTROLVM_RESP_SUCCESS;
1182
d480f6a2 1183 dev_info = device_find(&dev_info_list, bus_no, dev_no);
c60c8e26
BR
1184 if (dev_info && (dev_info->state.created == 1)) {
1185 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1186 POSTCODE_SEVERITY_ERR);
22ad57ba 1187 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1188 goto cleanup;
12e364b9 1189 }
4f66520b 1190 bus_info = bus_find(&bus_info_list, bus_no);
c60c8e26
BR
1191 if (!bus_info) {
1192 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1193 POSTCODE_SEVERITY_ERR);
22ad57ba 1194 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1195 goto cleanup;
12e364b9 1196 }
c60c8e26
BR
1197 if (bus_info->state.created == 0) {
1198 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1199 POSTCODE_SEVERITY_ERR);
22ad57ba 1200 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1201 goto cleanup;
12e364b9 1202 }
c60c8e26
BR
1203 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1204 if (!dev_info) {
1205 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1206 POSTCODE_SEVERITY_ERR);
22ad57ba 1207 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1208 goto cleanup;
12e364b9 1209 }
97a84f12 1210
c60c8e26
BR
1211 INIT_LIST_HEAD(&dev_info->entry);
1212 dev_info->bus_no = bus_no;
1213 dev_info->dev_no = dev_no;
1214 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1215 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1216 POSTCODE_SEVERITY_INFO);
1217
98d7b594 1218 if (inmsg->hdr.flags.test_message == 1)
c60c8e26 1219 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1220 else
c60c8e26
BR
1221 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1222 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1223 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1224 dev_info->chan_info.channel_type_uuid =
9b1caee7 1225 cmd->create_device.data_type_uuid;
c60c8e26
BR
1226 dev_info->chan_info.intr = cmd->create_device.intr;
1227 list_add(&dev_info->entry, &dev_info_list);
1228 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1229 POSTCODE_SEVERITY_INFO);
c60c8e26 1230cleanup:
12e364b9 1231 /* get the bus and devNo for DiagPool channel */
c60c8e26
BR
1232 if (dev_info &&
1233 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1234 g_diagpool_bus_no = bus_no;
1235 g_diagpool_dev_no = dev_no;
12e364b9 1236 }
c60c8e26 1237 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1238 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
98d7b594 1239 inmsg->hdr.flags.response_expected == 1,
c60c8e26 1240 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1241}
1242
1243static void
3ab47701 1244my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1245{
2ea5117b 1246 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1247 u32 bus_no = cmd->device_change_state.bus_no;
1248 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1249 struct spar_segment_state state = cmd->device_change_state.state;
e82ba62e 1250 struct visorchipset_device_info *dev_info;
12e364b9
KC
1251 int rc = CONTROLVM_RESP_SUCCESS;
1252
d480f6a2 1253 dev_info = device_find(&dev_info_list, bus_no, dev_no);
0278a905
BR
1254 if (!dev_info) {
1255 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1256 POSTCODE_SEVERITY_ERR);
22ad57ba 1257 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1258 } else if (dev_info->state.created == 0) {
1259 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1260 POSTCODE_SEVERITY_ERR);
22ad57ba 1261 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1262 }
0278a905
BR
1263 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1264 device_epilog(bus_no, dev_no, state,
1265 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
98d7b594 1266 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1267 FOR_VISORBUS(
0278a905 1268 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1269}
1270
1271static void
3ab47701 1272my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1273{
2ea5117b 1274 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1275 u32 bus_no = cmd->destroy_device.bus_no;
1276 u32 dev_no = cmd->destroy_device.dev_no;
e82ba62e 1277 struct visorchipset_device_info *dev_info;
12e364b9
KC
1278 int rc = CONTROLVM_RESP_SUCCESS;
1279
d480f6a2 1280 dev_info = device_find(&dev_info_list, bus_no, dev_no);
61715c8b 1281 if (!dev_info)
22ad57ba 1282 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1283 else if (dev_info->state.created == 0)
22ad57ba 1284 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1285
61715c8b
BR
1286 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1287 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1288 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
98d7b594 1289 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1290 FOR_VISORBUS(
61715c8b 1291 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1292}
1293
1294/* When provided with the physical address of the controlvm channel
1295 * (phys_addr), the offset to the payload area we need to manage
1296 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1297 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1298 * for failure.
1299 */
1300static int
5fc0229a 1301initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
c1f834eb 1302 struct visor_controlvm_payload_info *info)
12e364b9 1303{
c242233e 1304 u8 __iomem *payload = NULL;
12e364b9
KC
1305 int rc = CONTROLVM_RESP_SUCCESS;
1306
38f736e9 1307 if (!info) {
22ad57ba 1308 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1309 goto cleanup;
12e364b9 1310 }
c1f834eb 1311 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1312 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1313 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1314 goto cleanup;
12e364b9
KC
1315 }
1316 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1317 if (!payload) {
22ad57ba 1318 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1319 goto cleanup;
12e364b9
KC
1320 }
1321
1322 info->offset = offset;
1323 info->bytes = bytes;
1324 info->ptr = payload;
12e364b9 1325
f118a39b 1326cleanup:
12e364b9 1327 if (rc < 0) {
f118a39b 1328 if (payload) {
12e364b9
KC
1329 iounmap(payload);
1330 payload = NULL;
1331 }
1332 }
1333 return rc;
1334}
1335
1336static void
c1f834eb 1337destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1338{
597c338f 1339 if (info->ptr) {
12e364b9
KC
1340 iounmap(info->ptr);
1341 info->ptr = NULL;
1342 }
c1f834eb 1343 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1344}
1345
1346static void
1347initialize_controlvm_payload(void)
1348{
c3d9a224 1349 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1350 u64 payload_offset = 0;
1351 u32 payload_bytes = 0;
26eb2c0c 1352
c3d9a224 1353 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1354 offsetof(struct spar_controlvm_channel_protocol,
1355 request_payload_offset),
cafefc0c 1356 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1357 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1358 POSTCODE_SEVERITY_ERR);
1359 return;
1360 }
c3d9a224 1361 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1362 offsetof(struct spar_controlvm_channel_protocol,
1363 request_payload_bytes),
cafefc0c 1364 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1365 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1366 POSTCODE_SEVERITY_ERR);
1367 return;
1368 }
1369 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1370 payload_offset, payload_bytes,
84982fbf 1371 &controlvm_payload_info);
12e364b9
KC
1372}
1373
1374/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1375 * Returns CONTROLVM_RESP_xxx code.
1376 */
1377int
1378visorchipset_chipset_ready(void)
1379{
eb34e877 1380 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1381 return CONTROLVM_RESP_SUCCESS;
1382}
1383EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1384
1385int
1386visorchipset_chipset_selftest(void)
1387{
1388 char env_selftest[20];
1389 char *envp[] = { env_selftest, NULL };
26eb2c0c 1390
12e364b9 1391 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1392 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1393 envp);
1394 return CONTROLVM_RESP_SUCCESS;
1395}
1396EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1397
1398/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1399 * Returns CONTROLVM_RESP_xxx code.
1400 */
1401int
1402visorchipset_chipset_notready(void)
1403{
eb34e877 1404 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1405 return CONTROLVM_RESP_SUCCESS;
1406}
1407EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1408
1409static void
77a0449d 1410chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1411{
1412 int rc = visorchipset_chipset_ready();
26eb2c0c 1413
12e364b9
KC
1414 if (rc != CONTROLVM_RESP_SUCCESS)
1415 rc = -rc;
77a0449d
BR
1416 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1417 controlvm_respond(msg_hdr, rc);
1418 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1419 /* Send CHIPSET_READY response when all modules have been loaded
1420 * and disks mounted for the partition
1421 */
77a0449d 1422 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1423 }
1424}
1425
1426static void
77a0449d 1427chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1428{
1429 int rc = visorchipset_chipset_selftest();
26eb2c0c 1430
12e364b9
KC
1431 if (rc != CONTROLVM_RESP_SUCCESS)
1432 rc = -rc;
77a0449d
BR
1433 if (msg_hdr->flags.response_expected)
1434 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1435}
1436
1437static void
77a0449d 1438chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1439{
1440 int rc = visorchipset_chipset_notready();
26eb2c0c 1441
12e364b9
KC
1442 if (rc != CONTROLVM_RESP_SUCCESS)
1443 rc = -rc;
77a0449d
BR
1444 if (msg_hdr->flags.response_expected)
1445 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1446}
1447
1448/* This is your "one-stop" shop for grabbing the next message from the
1449 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1450 */
f4c11551 1451static bool
3ab47701 1452read_controlvm_event(struct controlvm_message *msg)
12e364b9 1453{
c3d9a224 1454 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1455 CONTROLVM_QUEUE_EVENT, msg)) {
1456 /* got a message */
0aca7844 1457 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1458 return false;
1459 return true;
12e364b9 1460 }
f4c11551 1461 return false;
12e364b9
KC
1462}
1463
1464/*
1465 * The general parahotplug flow works as follows. The visorchipset
1466 * driver receives a DEVICE_CHANGESTATE message from Command
1467 * specifying a physical device to enable or disable. The CONTROLVM
1468 * message handler calls parahotplug_process_message, which then adds
1469 * the message to a global list and kicks off a udev event which
1470 * causes a user level script to enable or disable the specified
1471 * device. The udev script then writes to
1472 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1473 * to get called, at which point the appropriate CONTROLVM message is
1474 * retrieved from the list and responded to.
1475 */
1476
1477#define PARAHOTPLUG_TIMEOUT_MS 2000
1478
1479/*
1480 * Generate unique int to match an outstanding CONTROLVM message with a
1481 * udev script /proc response
1482 */
1483static int
1484parahotplug_next_id(void)
1485{
1486 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1487
12e364b9
KC
1488 return atomic_inc_return(&id);
1489}
1490
1491/*
1492 * Returns the time (in jiffies) when a CONTROLVM message on the list
1493 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1494 */
1495static unsigned long
1496parahotplug_next_expiration(void)
1497{
2cc1a1b3 1498 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1499}
1500
1501/*
1502 * Create a parahotplug_request, which is basically a wrapper for a
1503 * CONTROLVM_MESSAGE that we can stick on a list
1504 */
1505static struct parahotplug_request *
3ab47701 1506parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1507{
ea0dcfcf
QL
1508 struct parahotplug_request *req;
1509
6a55e3c3 1510 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1511 if (!req)
12e364b9
KC
1512 return NULL;
1513
1514 req->id = parahotplug_next_id();
1515 req->expiration = parahotplug_next_expiration();
1516 req->msg = *msg;
1517
1518 return req;
1519}
1520
1521/*
1522 * Free a parahotplug_request.
1523 */
1524static void
1525parahotplug_request_destroy(struct parahotplug_request *req)
1526{
1527 kfree(req);
1528}
1529
1530/*
1531 * Cause uevent to run the user level script to do the disable/enable
1532 * specified in (the CONTROLVM message in) the specified
1533 * parahotplug_request
1534 */
1535static void
1536parahotplug_request_kickoff(struct parahotplug_request *req)
1537{
2ea5117b 1538 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1539 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1540 env_func[40];
1541 char *envp[] = {
1542 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1543 };
1544
1545 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1546 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1547 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1548 cmd->device_change_state.state.active);
12e364b9 1549 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1550 cmd->device_change_state.bus_no);
12e364b9 1551 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1552 cmd->device_change_state.dev_no >> 3);
12e364b9 1553 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1554 cmd->device_change_state.dev_no & 0x7);
12e364b9 1555
eb34e877 1556 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1557 envp);
1558}
1559
1560/*
1561 * Remove any request from the list that's been on there too long and
1562 * respond with an error.
1563 */
1564static void
1565parahotplug_process_list(void)
1566{
e82ba62e
JS
1567 struct list_head *pos;
1568 struct list_head *tmp;
12e364b9 1569
ddf5de53 1570 spin_lock(&parahotplug_request_list_lock);
12e364b9 1571
ddf5de53 1572 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1573 struct parahotplug_request *req =
1574 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1575
1576 if (!time_after_eq(jiffies, req->expiration))
1577 continue;
1578
1579 list_del(pos);
1580 if (req->msg.hdr.flags.response_expected)
1581 controlvm_respond_physdev_changestate(
1582 &req->msg.hdr,
1583 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1584 req->msg.cmd.device_change_state.state);
1585 parahotplug_request_destroy(req);
12e364b9
KC
1586 }
1587
ddf5de53 1588 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1589}
1590
1591/*
1592 * Called from the /proc handler, which means the user script has
1593 * finished the enable/disable. Find the matching identifier, and
1594 * respond to the CONTROLVM message with success.
1595 */
1596static int
b06bdf7d 1597parahotplug_request_complete(int id, u16 active)
12e364b9 1598{
e82ba62e
JS
1599 struct list_head *pos;
1600 struct list_head *tmp;
12e364b9 1601
ddf5de53 1602 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1603
1604 /* Look for a request matching "id". */
ddf5de53 1605 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1606 struct parahotplug_request *req =
1607 list_entry(pos, struct parahotplug_request, list);
1608 if (req->id == id) {
1609 /* Found a match. Remove it from the list and
1610 * respond.
1611 */
1612 list_del(pos);
ddf5de53 1613 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1614 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1615 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1616 controlvm_respond_physdev_changestate(
1617 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1618 req->msg.cmd.device_change_state.state);
12e364b9
KC
1619 parahotplug_request_destroy(req);
1620 return 0;
1621 }
1622 }
1623
ddf5de53 1624 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1625 return -1;
1626}
1627
1628/*
1629 * Enables or disables a PCI device by kicking off a udev script
1630 */
bd5b9b32 1631static void
3ab47701 1632parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1633{
1634 struct parahotplug_request *req;
1635
1636 req = parahotplug_request_create(inmsg);
1637
38f736e9 1638 if (!req)
12e364b9 1639 return;
12e364b9 1640
2ea5117b 1641 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1642 /* For enable messages, just respond with success
1643 * right away. This is a bit of a hack, but there are
1644 * issues with the early enable messages we get (with
1645 * either the udev script not detecting that the device
1646 * is up, or not getting called at all). Fortunately
1647 * the messages that get lost don't matter anyway, as
1648 * devices are automatically enabled at
1649 * initialization.
1650 */
1651 parahotplug_request_kickoff(req);
1652 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1653 CONTROLVM_RESP_SUCCESS,
1654 inmsg->cmd.device_change_state.state);
12e364b9
KC
1655 parahotplug_request_destroy(req);
1656 } else {
1657 /* For disable messages, add the request to the
1658 * request list before kicking off the udev script. It
1659 * won't get responded to until the script has
1660 * indicated it's done.
1661 */
ddf5de53
BR
1662 spin_lock(&parahotplug_request_list_lock);
1663 list_add_tail(&req->list, &parahotplug_request_list);
1664 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1665
1666 parahotplug_request_kickoff(req);
1667 }
1668}
1669
12e364b9
KC
1670/* Process a controlvm message.
1671 * Return result:
f4c11551 1672 * false - this function will return FALSE only in the case where the
12e364b9
KC
1673 * controlvm message was NOT processed, but processing must be
1674 * retried before reading the next controlvm message; a
1675 * scenario where this can occur is when we need to throttle
1676 * the allocation of memory in which to copy out controlvm
1677 * payload data
f4c11551 1678 * true - processing of the controlvm message completed,
12e364b9
KC
1679 * either successfully or with an error.
1680 */
f4c11551 1681static bool
3ab47701 1682handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
12e364b9 1683{
2ea5117b 1684 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1685 u64 parm_addr;
1686 u32 parm_bytes;
317d9614 1687 struct parser_context *parser_ctx = NULL;
e82ba62e 1688 bool local_addr;
3ab47701 1689 struct controlvm_message ackmsg;
12e364b9
KC
1690
1691 /* create parsing context if necessary */
818352a8 1692 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1693 if (channel_addr == 0)
f4c11551 1694 return true;
818352a8
BR
1695 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1696 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1697
1698 /* Parameter and channel addresses within test messages actually lie
1699 * within our OS-controlled memory. We need to know that, because it
1700 * makes a difference in how we compute the virtual address.
1701 */
ebec8967 1702 if (parm_addr && parm_bytes) {
f4c11551 1703 bool retry = false;
26eb2c0c 1704
12e364b9 1705 parser_ctx =
818352a8
BR
1706 parser_init_byte_stream(parm_addr, parm_bytes,
1707 local_addr, &retry);
1b08872e 1708 if (!parser_ctx && retry)
f4c11551 1709 return false;
12e364b9
KC
1710 }
1711
818352a8 1712 if (!local_addr) {
12e364b9
KC
1713 controlvm_init_response(&ackmsg, &inmsg.hdr,
1714 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1715 if (controlvm_channel)
1716 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1717 CONTROLVM_QUEUE_ACK,
1718 &ackmsg);
12e364b9 1719 }
98d7b594 1720 switch (inmsg.hdr.id) {
12e364b9 1721 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1722 chipset_init(&inmsg);
1723 break;
1724 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1725 bus_create(&inmsg);
1726 break;
1727 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1728 bus_destroy(&inmsg);
1729 break;
1730 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1731 bus_configure(&inmsg, parser_ctx);
1732 break;
1733 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1734 my_device_create(&inmsg);
1735 break;
1736 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1737 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1738 parahotplug_process_message(&inmsg);
1739 } else {
12e364b9
KC
1740 /* save the hdr and cmd structures for later use */
1741 /* when sending back the response to Command */
1742 my_device_changestate(&inmsg);
4f44b72d 1743 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1744 break;
1745 }
1746 break;
1747 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1748 my_device_destroy(&inmsg);
1749 break;
1750 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1751 /* no op for now, just send a respond that we passed */
98d7b594 1752 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1753 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1754 break;
1755 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1756 chipset_ready(&inmsg.hdr);
1757 break;
1758 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1759 chipset_selftest(&inmsg.hdr);
1760 break;
1761 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1762 chipset_notready(&inmsg.hdr);
1763 break;
1764 default:
98d7b594 1765 if (inmsg.hdr.flags.response_expected)
12e364b9 1766 controlvm_respond(&inmsg.hdr,
818352a8 1767 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1768 break;
1769 }
1770
38f736e9 1771 if (parser_ctx) {
12e364b9
KC
1772 parser_done(parser_ctx);
1773 parser_ctx = NULL;
1774 }
f4c11551 1775 return true;
12e364b9
KC
1776}
1777
d746cb55 1778static HOSTADDRESS controlvm_get_channel_address(void)
524b0b63 1779{
5fc0229a 1780 u64 addr = 0;
b3c55b13 1781 u32 size = 0;
524b0b63 1782
0aca7844 1783 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1784 return 0;
0aca7844 1785
524b0b63
BR
1786 return addr;
1787}
1788
12e364b9
KC
1789static void
1790controlvm_periodic_work(struct work_struct *work)
1791{
3ab47701 1792 struct controlvm_message inmsg;
f4c11551
JS
1793 bool got_command = false;
1794 bool handle_command_failed = false;
1c1ed292 1795 static u64 poll_count;
12e364b9
KC
1796
1797 /* make sure visorbus server is registered for controlvm callbacks */
1798 if (visorchipset_serverregwait && !serverregistered)
1c1ed292 1799 goto cleanup;
12e364b9
KC
1800 /* make sure visorclientbus server is regsitered for controlvm
1801 * callbacks
1802 */
1803 if (visorchipset_clientregwait && !clientregistered)
1c1ed292 1804 goto cleanup;
12e364b9 1805
1c1ed292
BR
1806 poll_count++;
1807 if (poll_count >= 250)
12e364b9
KC
1808 ; /* keep going */
1809 else
1c1ed292 1810 goto cleanup;
12e364b9
KC
1811
1812 /* Check events to determine if response to CHIPSET_READY
1813 * should be sent
1814 */
0639ba67
BR
1815 if (visorchipset_holdchipsetready &&
1816 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1817 if (check_chipset_events() == 1) {
da021f02 1818 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1819 clear_chipset_events();
da021f02 1820 memset(&g_chipset_msg_hdr, 0,
98d7b594 1821 sizeof(struct controlvm_message_header));
12e364b9
KC
1822 }
1823 }
1824
c3d9a224 1825 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1826 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1827 &inmsg))
1828 ;
1c1ed292 1829 if (!got_command) {
7166ed19 1830 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1831 /* we throttled processing of a prior
1832 * msg, so try to process it again
1833 * rather than reading a new one
1834 */
7166ed19 1835 inmsg = controlvm_pending_msg;
f4c11551 1836 controlvm_pending_msg_valid = false;
1c1ed292 1837 got_command = true;
75c1f8b7 1838 } else {
1c1ed292 1839 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1840 }
8a1182eb 1841 }
12e364b9 1842
f4c11551 1843 handle_command_failed = false;
1c1ed292 1844 while (got_command && (!handle_command_failed)) {
b53e0e93 1845 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1846 if (handle_command(inmsg,
1847 visorchannel_get_physaddr
c3d9a224 1848 (controlvm_channel)))
1c1ed292 1849 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
1850 else {
1851 /* this is a scenario where throttling
1852 * is required, but probably NOT an
1853 * error...; we stash the current
1854 * controlvm msg so we will attempt to
1855 * reprocess it on our next loop
1856 */
f4c11551 1857 handle_command_failed = true;
7166ed19 1858 controlvm_pending_msg = inmsg;
f4c11551 1859 controlvm_pending_msg_valid = true;
12e364b9
KC
1860 }
1861 }
1862
1863 /* parahotplug_worker */
1864 parahotplug_process_list();
1865
1c1ed292 1866cleanup:
12e364b9
KC
1867
1868 if (time_after(jiffies,
b53e0e93 1869 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1870 /* it's been longer than MIN_IDLE_SECONDS since we
1871 * processed our last controlvm message; slow down the
1872 * polling
1873 */
911e213e
BR
1874 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1875 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1876 } else {
911e213e
BR
1877 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1878 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1879 }
1880
9232d2d6
BR
1881 queue_delayed_work(periodic_controlvm_workqueue,
1882 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1883}
1884
1885static void
1886setup_crash_devices_work_queue(struct work_struct *work)
1887{
e6bdb904
BR
1888 struct controlvm_message local_crash_bus_msg;
1889 struct controlvm_message local_crash_dev_msg;
3ab47701 1890 struct controlvm_message msg;
e6bdb904
BR
1891 u32 local_crash_msg_offset;
1892 u16 local_crash_msg_count;
12e364b9
KC
1893
1894 /* make sure visorbus server is registered for controlvm callbacks */
1895 if (visorchipset_serverregwait && !serverregistered)
e6bdb904 1896 goto cleanup;
12e364b9
KC
1897
1898 /* make sure visorclientbus server is regsitered for controlvm
1899 * callbacks
1900 */
1901 if (visorchipset_clientregwait && !clientregistered)
e6bdb904 1902 goto cleanup;
12e364b9
KC
1903
1904 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1905
1906 /* send init chipset msg */
98d7b594 1907 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1908 msg.cmd.init_chipset.bus_count = 23;
1909 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1910
1911 chipset_init(&msg);
1912
12e364b9 1913 /* get saved message count */
c3d9a224 1914 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1915 offsetof(struct spar_controlvm_channel_protocol,
1916 saved_crash_message_count),
e6bdb904 1917 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1918 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1919 POSTCODE_SEVERITY_ERR);
1920 return;
1921 }
1922
e6bdb904 1923 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 1924 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 1925 local_crash_msg_count,
12e364b9
KC
1926 POSTCODE_SEVERITY_ERR);
1927 return;
1928 }
1929
1930 /* get saved crash message offset */
c3d9a224 1931 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1932 offsetof(struct spar_controlvm_channel_protocol,
1933 saved_crash_message_offset),
e6bdb904 1934 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
1935 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1936 POSTCODE_SEVERITY_ERR);
1937 return;
1938 }
1939
1940 /* read create device message for storage bus offset */
c3d9a224 1941 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
1942 local_crash_msg_offset,
1943 &local_crash_bus_msg,
3ab47701 1944 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1945 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1946 POSTCODE_SEVERITY_ERR);
1947 return;
1948 }
1949
1950 /* read create device message for storage device */
c3d9a224 1951 if (visorchannel_read(controlvm_channel,
e6bdb904 1952 local_crash_msg_offset +
3ab47701 1953 sizeof(struct controlvm_message),
e6bdb904 1954 &local_crash_dev_msg,
3ab47701 1955 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1956 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1957 POSTCODE_SEVERITY_ERR);
1958 return;
1959 }
1960
1961 /* reuse IOVM create bus message */
ebec8967 1962 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 1963 bus_create(&local_crash_bus_msg);
75c1f8b7 1964 } else {
12e364b9
KC
1965 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1966 POSTCODE_SEVERITY_ERR);
1967 return;
1968 }
1969
1970 /* reuse create device message for storage device */
ebec8967 1971 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 1972 my_device_create(&local_crash_dev_msg);
75c1f8b7 1973 } else {
12e364b9
KC
1974 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1975 POSTCODE_SEVERITY_ERR);
1976 return;
1977 }
12e364b9
KC
1978 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1979 return;
1980
e6bdb904 1981cleanup:
12e364b9 1982
911e213e 1983 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1984
9232d2d6
BR
1985 queue_delayed_work(periodic_controlvm_workqueue,
1986 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1987}
1988
1989static void
52063eca 1990bus_create_response(u32 bus_no, int response)
12e364b9 1991{
8e3fedd6 1992 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
12e364b9
KC
1993}
1994
1995static void
52063eca 1996bus_destroy_response(u32 bus_no, int response)
12e364b9 1997{
8e3fedd6 1998 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
12e364b9
KC
1999}
2000
2001static void
52063eca 2002device_create_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2003{
8e3fedd6 2004 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
12e364b9
KC
2005}
2006
2007static void
52063eca 2008device_destroy_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2009{
8e3fedd6 2010 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
12e364b9
KC
2011}
2012
2013void
52063eca 2014visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2015{
12e364b9 2016 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 2017 bus_no, dev_no, response,
bd0d2dcc 2018 segment_state_standby);
12e364b9 2019}
927c7927 2020EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
2021
2022static void
52063eca 2023device_resume_response(u32 bus_no, u32 dev_no, int response)
12e364b9
KC
2024{
2025 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8e3fedd6 2026 bus_no, dev_no, response,
bd0d2dcc 2027 segment_state_running);
12e364b9
KC
2028}
2029
f4c11551 2030bool
52063eca 2031visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 2032{
4f66520b 2033 void *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2034
0aca7844 2035 if (!p)
f4c11551 2036 return false;
77db7127 2037 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
f4c11551 2038 return true;
12e364b9
KC
2039}
2040EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2041
f4c11551 2042bool
52063eca 2043visorchipset_set_bus_context(u32 bus_no, void *context)
12e364b9 2044{
4f66520b 2045 struct visorchipset_bus_info *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2046
0aca7844 2047 if (!p)
f4c11551 2048 return false;
12e364b9 2049 p->bus_driver_context = context;
f4c11551 2050 return true;
12e364b9
KC
2051}
2052EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2053
f4c11551 2054bool
52063eca 2055visorchipset_get_device_info(u32 bus_no, u32 dev_no,
b486df19 2056 struct visorchipset_device_info *dev_info)
12e364b9 2057{
d480f6a2 2058 void *p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2059
0aca7844 2060 if (!p)
f4c11551 2061 return false;
b486df19 2062 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
f4c11551 2063 return true;
12e364b9
KC
2064}
2065EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2066
f4c11551 2067bool
52063eca 2068visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
12e364b9 2069{
d480f6a2
JS
2070 struct visorchipset_device_info *p;
2071
2072 p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2073
0aca7844 2074 if (!p)
f4c11551 2075 return false;
12e364b9 2076 p->bus_driver_context = context;
f4c11551 2077 return true;
12e364b9
KC
2078}
2079EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2080
2081/* Generic wrapper function for allocating memory from a kmem_cache pool.
2082 */
2083void *
f4c11551 2084visorchipset_cache_alloc(struct kmem_cache *pool, bool ok_to_block,
12e364b9
KC
2085 char *fn, int ln)
2086{
2087 gfp_t gfp;
2088 void *p;
2089
2090 if (ok_to_block)
2091 gfp = GFP_KERNEL;
2092 else
2093 gfp = GFP_ATOMIC;
2094 /* __GFP_NORETRY means "ok to fail", meaning
2095 * kmem_cache_alloc() can return NULL, implying the caller CAN
2096 * cope with failure. If you do NOT specify __GFP_NORETRY,
2097 * Linux will go to extreme measures to get memory for you
2098 * (like, invoke oom killer), which will probably cripple the
2099 * system.
2100 */
2101 gfp |= __GFP_NORETRY;
2102 p = kmem_cache_alloc(pool, gfp);
0aca7844 2103 if (!p)
12e364b9 2104 return NULL;
0aca7844 2105
12e364b9
KC
2106 return p;
2107}
2108
2109/* Generic wrapper function for freeing memory from a kmem_cache pool.
2110 */
2111void
2112visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2113{
0aca7844 2114 if (!p)
12e364b9 2115 return;
0aca7844 2116
12e364b9
KC
2117 kmem_cache_free(pool, p);
2118}
2119
18b87ed1 2120static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2121 struct device_attribute *attr,
2122 const char *buf, size_t count)
12e364b9 2123{
18b87ed1 2124 char msgtype[64];
12e364b9 2125
66e24b76
BR
2126 if (sscanf(buf, "%63s", msgtype) != 1)
2127 return -EINVAL;
2128
ebec8967 2129 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2130 chipset_events[0] = 1;
2131 return count;
ebec8967 2132 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2133 chipset_events[1] = 1;
2134 return count;
e22a4a0f
BR
2135 }
2136 return -EINVAL;
12e364b9
KC
2137}
2138
e56fa7cd
BR
2139/* The parahotplug/devicedisabled interface gets called by our support script
2140 * when an SR-IOV device has been shut down. The ID is passed to the script
2141 * and then passed back when the device has been removed.
2142 */
2143static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2144 struct device_attribute *attr,
2145 const char *buf, size_t count)
e56fa7cd 2146{
94217363 2147 unsigned int id;
e56fa7cd 2148
ebec8967 2149 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2150 return -EINVAL;
2151
2152 parahotplug_request_complete(id, 0);
2153 return count;
2154}
2155
2156/* The parahotplug/deviceenabled interface gets called by our support script
2157 * when an SR-IOV device has been recovered. The ID is passed to the script
2158 * and then passed back when the device has been brought back up.
2159 */
2160static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2161 struct device_attribute *attr,
2162 const char *buf, size_t count)
e56fa7cd 2163{
94217363 2164 unsigned int id;
e56fa7cd 2165
ebec8967 2166 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2167 return -EINVAL;
2168
2169 parahotplug_request_complete(id, 1);
2170 return count;
2171}
2172
12e364b9
KC
2173static int __init
2174visorchipset_init(void)
2175{
2176 int rc = 0, x = 0;
8a1182eb 2177 HOSTADDRESS addr;
12e364b9 2178
fcd0157e
KC
2179 if (!unisys_spar_platform)
2180 return -ENODEV;
2181
6fe345af
BR
2182 memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2183 memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
84982fbf 2184 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
ea33b4ee
BR
2185 memset(&livedump_info, 0, sizeof(livedump_info));
2186 atomic_set(&livedump_info.buffers_in_use, 0);
12e364b9 2187
9f8d0e8b 2188 if (visorchipset_testvnic) {
9f8d0e8b
KC
2189 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2190 rc = x;
a6a3989b 2191 goto cleanup;
9f8d0e8b 2192 }
12e364b9 2193
8a1182eb 2194 addr = controlvm_get_channel_address();
ebec8967 2195 if (addr) {
c3d9a224 2196 controlvm_channel =
8a1182eb
BR
2197 visorchannel_create_with_lock
2198 (addr,
d19642f6 2199 sizeof(struct spar_controlvm_channel_protocol),
5fbaa4b3 2200 spar_controlvm_channel_protocol_uuid);
93a84565 2201 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
c3d9a224 2202 visorchannel_get_header(controlvm_channel))) {
8a1182eb
BR
2203 initialize_controlvm_payload();
2204 } else {
c3d9a224
BR
2205 visorchannel_destroy(controlvm_channel);
2206 controlvm_channel = NULL;
8a1182eb
BR
2207 return -ENODEV;
2208 }
2209 } else {
8a1182eb
BR
2210 return -ENODEV;
2211 }
2212
5aa8ae57
BR
2213 major_dev = MKDEV(visorchipset_major, 0);
2214 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2215 if (rc < 0) {
4cb005a9 2216 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2217 goto cleanup;
4cb005a9 2218 }
9f8d0e8b 2219
da021f02 2220 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2221
2098dbd1 2222 if (!visorchipset_disable_controlvm) {
12e364b9 2223 /* if booting in a crash kernel */
1ba00980 2224 if (is_kdump_kernel())
9232d2d6 2225 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9
KC
2226 setup_crash_devices_work_queue);
2227 else
9232d2d6 2228 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9 2229 controlvm_periodic_work);
9232d2d6 2230 periodic_controlvm_workqueue =
12e364b9
KC
2231 create_singlethread_workqueue("visorchipset_controlvm");
2232
38f736e9 2233 if (!periodic_controlvm_workqueue) {
4cb005a9
KC
2234 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2235 DIAG_SEVERITY_ERR);
2236 rc = -ENOMEM;
a6a3989b 2237 goto cleanup;
4cb005a9 2238 }
b53e0e93 2239 most_recent_message_jiffies = jiffies;
911e213e 2240 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
9232d2d6
BR
2241 rc = queue_delayed_work(periodic_controlvm_workqueue,
2242 &periodic_controlvm_work, poll_jiffies);
4cb005a9 2243 if (rc < 0) {
4cb005a9
KC
2244 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2245 DIAG_SEVERITY_ERR);
a6a3989b 2246 goto cleanup;
4cb005a9 2247 }
12e364b9
KC
2248 }
2249
eb34e877
BR
2250 visorchipset_platform_device.dev.devt = major_dev;
2251 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2252 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2253 rc = -1;
a6a3989b 2254 goto cleanup;
4cb005a9 2255 }
12e364b9 2256 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
22ad57ba 2257 rc = 0;
a6a3989b 2258cleanup:
12e364b9 2259 if (rc) {
12e364b9
KC
2260 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2261 POSTCODE_SEVERITY_ERR);
2262 }
2263 return rc;
2264}
2265
2266static void
2267visorchipset_exit(void)
2268{
12e364b9
KC
2269 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2270
2271 if (visorchipset_disable_controlvm) {
2272 ;
2273 } else {
9232d2d6
BR
2274 cancel_delayed_work(&periodic_controlvm_work);
2275 flush_workqueue(periodic_controlvm_workqueue);
2276 destroy_workqueue(periodic_controlvm_workqueue);
2277 periodic_controlvm_workqueue = NULL;
84982fbf 2278 destroy_controlvm_payload_info(&controlvm_payload_info);
12e364b9 2279 }
1783319f 2280
12e364b9
KC
2281 cleanup_controlvm_structures();
2282
da021f02 2283 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2284
c3d9a224 2285 visorchannel_destroy(controlvm_channel);
8a1182eb 2286
addceb12 2287 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2288 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
2289}
2290
2291module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2292MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
12e364b9
KC
2293module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2294MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
12e364b9
KC
2295module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2296MODULE_PARM_DESC(visorchipset_testmsg,
2297 "1 to manufacture the chipset, bus, and switch messages");
12e364b9 2298module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2299MODULE_PARM_DESC(visorchipset_major,
2300 "major device number to use for the device node");
12e364b9
KC
2301module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2302MODULE_PARM_DESC(visorchipset_serverreqwait,
2303 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2304module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2305MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
12e364b9
KC
2306module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2307MODULE_PARM_DESC(visorchipset_testteardown,
2308 "1 to test teardown of the chipset, bus, and switch");
12e364b9
KC
2309module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2310 S_IRUGO);
2311MODULE_PARM_DESC(visorchipset_disable_controlvm,
2312 "1 to disable polling of controlVm channel");
12e364b9
KC
2313module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2314 int, S_IRUGO);
2315MODULE_PARM_DESC(visorchipset_holdchipsetready,
2316 "1 to hold response to CHIPSET_READY");
b615d628 2317
12e364b9
KC
2318module_init(visorchipset_init);
2319module_exit(visorchipset_exit);
2320
2321MODULE_AUTHOR("Unisys");
2322MODULE_LICENSE("GPL");
2323MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2324 VERSION);
2325MODULE_VERSION(VERSION);
This page took 0.411135 seconds and 5 git commands to generate.