staging: unisys: remove file.c and pass functionality to visorchipset
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
7023638c 18#include "version.h"
12e364b9 19#include "procobjecttree.h"
f6439218 20#include "visorbus.h"
12e364b9 21#include "periodic_work.h"
12e364b9 22#include "parser.h"
12e364b9 23#include "uisutils.h"
12e364b9
KC
24#include "controlvmcompletionstatus.h"
25#include "guestlinuxdebug.h"
e3420ed6 26#include "visorchipset.h"
12e364b9 27
e3420ed6
EA
28#include <linux/fs.h>
29#include <linux/mm.h>
12e364b9
KC
30#include <linux/nls.h>
31#include <linux/netdevice.h>
32#include <linux/platform_device.h>
90addb02 33#include <linux/uuid.h>
1ba00980 34#include <linux/crash_dump.h>
12e364b9
KC
35
36#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
37#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
38 * vnic loopback test */
39#define TEST_VNIC_SWITCHNO 1
40#define TEST_VNIC_BUSNO 9
41
42#define MAX_NAME_SIZE 128
43#define MAX_IP_SIZE 50
44#define MAXOUTSTANDINGCHANNELCOMMAND 256
45#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
46#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
47
b615d628
JS
48/*
49 * Module parameters
50 */
51static int visorchipset_testvnic;
52static int visorchipset_testvnicclient;
53static int visorchipset_testmsg;
54static int visorchipset_major;
55static int visorchipset_serverregwait;
56static int visorchipset_clientregwait = 1; /* default is on */
57static int visorchipset_testteardown;
58static int visorchipset_disable_controlvm;
59static int visorchipset_holdchipsetready;
60
e3420ed6
EA
61static int
62visorchipset_open(struct inode *inode, struct file *file)
63{
64 unsigned minor_number = iminor(inode);
65
66 if (minor_number)
67 return -ENODEV;
68 file->private_data = NULL;
69 return 0;
70}
71
72static int
73visorchipset_release(struct inode *inode, struct file *file)
74{
75 return 0;
76}
77
12e364b9
KC
78/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
79* we switch to slow polling mode. As soon as we get a controlvm
80* message, we switch back to fast polling mode.
81*/
82#define MIN_IDLE_SECONDS 10
52063eca
JS
83static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
84static unsigned long most_recent_message_jiffies; /* when we got our last
bd5b9b32 85 * controlvm message */
12e364b9
KC
86static int serverregistered;
87static int clientregistered;
88
89#define MAX_CHIPSET_EVENTS 2
c242233e 90static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 91
9232d2d6
BR
92static struct delayed_work periodic_controlvm_work;
93static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 94static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 95
e3420ed6
EA
96static struct cdev file_cdev;
97static struct visorchannel **file_controlvm_channel;
da021f02 98static struct controlvm_message_header g_chipset_msg_hdr;
59827f00 99static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 100 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 101/* 0xffffff is an invalid Bus/Device number */
52063eca
JS
102static u32 g_diagpool_bus_no = 0xffffff;
103static u32 g_diagpool_dev_no = 0xffffff;
4f44b72d 104static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9
KC
105
106/* Only VNIC and VHBA channels are sent to visorclientbus (aka
107 * "visorhackbus")
108 */
109#define FOR_VISORHACKBUS(channel_type_guid) \
9eee5d1f 110 (((uuid_le_cmp(channel_type_guid,\
0639ba67
BR
111 spar_vnic_channel_protocol_uuid) == 0) ||\
112 (uuid_le_cmp(channel_type_guid,\
9eee5d1f 113 spar_vhba_channel_protocol_uuid) == 0)))
12e364b9
KC
114#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
115
116#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
117 (uuid_le_cmp(channel_type_guid,\
118 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 119
1390b88c
BR
120static LIST_HEAD(bus_info_list);
121static LIST_HEAD(dev_info_list);
12e364b9 122
c3d9a224 123static struct visorchannel *controlvm_channel;
12e364b9 124
84982fbf 125/* Manages the request payload in the controlvm channel */
c1f834eb 126struct visor_controlvm_payload_info {
c242233e 127 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 128 u64 offset; /* offset from beginning of controlvm
12e364b9 129 * channel to beginning of payload * pool */
b3c55b13 130 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
131};
132
133static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 134
ea33b4ee
BR
135/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
136 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
137 */
c1f834eb 138struct visor_livedump_info {
ea33b4ee
BR
139 struct controlvm_message_header dumpcapture_header;
140 struct controlvm_message_header gettextdump_header;
141 struct controlvm_message_header dumpcomplete_header;
f4c11551 142 bool gettextdump_outstanding;
12e364b9 143 u32 crc32;
52063eca 144 unsigned long length;
12e364b9 145 atomic_t buffers_in_use;
52063eca 146 unsigned long destination;
c1f834eb
JS
147};
148
149static struct visor_livedump_info livedump_info;
12e364b9
KC
150
151/* The following globals are used to handle the scenario where we are unable to
152 * offload the payload from a controlvm message due to memory requirements. In
153 * this scenario, we simply stash the controlvm message, then attempt to
154 * process it again the next time controlvm_periodic_work() runs.
155 */
7166ed19 156static struct controlvm_message controlvm_pending_msg;
f4c11551 157static bool controlvm_pending_msg_valid = false;
12e364b9 158
12e364b9
KC
159/* This identifies a data buffer that has been received via a controlvm messages
160 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
161 */
162struct putfile_buffer_entry {
163 struct list_head next; /* putfile_buffer_entry list */
317d9614 164 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
165};
166
167/* List of struct putfile_request *, via next_putfile_request member.
168 * Each entry in this list identifies an outstanding TRANSMIT_FILE
169 * conversation.
170 */
1eee0011 171static LIST_HEAD(putfile_request_list);
12e364b9
KC
172
173/* This describes a buffer and its current state of transfer (e.g., how many
174 * bytes have already been supplied as putfile data, and how many bytes are
175 * remaining) for a putfile_request.
176 */
177struct putfile_active_buffer {
178 /* a payload from a controlvm message, containing a file data buffer */
317d9614 179 struct parser_context *parser_ctx;
12e364b9
KC
180 /* points within data area of parser_ctx to next byte of data */
181 u8 *pnext;
182 /* # bytes left from <pnext> to the end of this data buffer */
183 size_t bytes_remaining;
184};
185
186#define PUTFILE_REQUEST_SIG 0x0906101302281211
187/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
188 * conversation. Structs of this type are dynamically linked into
189 * <Putfile_request_list>.
190 */
191struct putfile_request {
192 u64 sig; /* PUTFILE_REQUEST_SIG */
193
194 /* header from original TransmitFile request */
98d7b594 195 struct controlvm_message_header controlvm_header;
12e364b9
KC
196 u64 file_request_number; /* from original TransmitFile request */
197
198 /* link to next struct putfile_request */
199 struct list_head next_putfile_request;
200
201 /* most-recent sequence number supplied via a controlvm message */
202 u64 data_sequence_number;
203
204 /* head of putfile_buffer_entry list, which describes the data to be
205 * supplied as putfile data;
206 * - this list is added to when controlvm messages come in that supply
207 * file data
208 * - this list is removed from via the hotplug program that is actually
209 * consuming these buffers to write as file data */
210 struct list_head input_buffer_list;
211 spinlock_t req_list_lock; /* lock for input_buffer_list */
212
213 /* waiters for input_buffer_list to go non-empty */
214 wait_queue_head_t input_buffer_wq;
215
216 /* data not yet read within current putfile_buffer_entry */
217 struct putfile_active_buffer active_buf;
218
219 /* <0 = failed, 0 = in-progress, >0 = successful; */
220 /* note that this must be set with req_list_lock, and if you set <0, */
221 /* it is your responsibility to also free up all of the other objects */
222 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
223 /* before releasing the lock */
224 int completion_status;
225};
226
12e364b9
KC
227struct parahotplug_request {
228 struct list_head list;
229 int id;
230 unsigned long expiration;
3ab47701 231 struct controlvm_message msg;
12e364b9
KC
232};
233
ddf5de53
BR
234static LIST_HEAD(parahotplug_request_list);
235static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
236static void parahotplug_process_list(void);
237
238/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
239 * CONTROLVM_REPORTEVENT.
240 */
6fe345af
BR
241static struct visorchipset_busdev_notifiers busdev_server_notifiers;
242static struct visorchipset_busdev_notifiers busdev_client_notifiers;
12e364b9 243
52063eca
JS
244static void bus_create_response(u32 bus_no, int response);
245static void bus_destroy_response(u32 bus_no, int response);
246static void device_create_response(u32 bus_no, u32 dev_no, int response);
247static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
248static void device_resume_response(u32 bus_no, u32 dev_no, int response);
12e364b9 249
8e3fedd6 250static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
251 .bus_create = bus_create_response,
252 .bus_destroy = bus_destroy_response,
253 .device_create = device_create_response,
254 .device_destroy = device_destroy_response,
927c7927 255 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
256 .device_resume = device_resume_response,
257};
258
259/* info for /dev/visorchipset */
5aa8ae57 260static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 261
19f6634f
BR
262/* prototypes for attributes */
263static ssize_t toolaction_show(struct device *dev,
8e76e695 264 struct device_attribute *attr, char *buf);
19f6634f 265static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
266 struct device_attribute *attr,
267 const char *buf, size_t count);
19f6634f
BR
268static DEVICE_ATTR_RW(toolaction);
269
54b31229 270static ssize_t boottotool_show(struct device *dev,
8e76e695 271 struct device_attribute *attr, char *buf);
54b31229 272static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
273 struct device_attribute *attr, const char *buf,
274 size_t count);
54b31229
BR
275static DEVICE_ATTR_RW(boottotool);
276
422af17c 277static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 278 char *buf);
422af17c 279static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 280 const char *buf, size_t count);
422af17c
BR
281static DEVICE_ATTR_RW(error);
282
283static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 284 char *buf);
422af17c 285static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 286 const char *buf, size_t count);
422af17c
BR
287static DEVICE_ATTR_RW(textid);
288
289static ssize_t remaining_steps_show(struct device *dev,
8e76e695 290 struct device_attribute *attr, char *buf);
422af17c 291static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
292 struct device_attribute *attr,
293 const char *buf, size_t count);
422af17c
BR
294static DEVICE_ATTR_RW(remaining_steps);
295
18b87ed1 296static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
297 struct device_attribute *attr,
298 const char *buf, size_t count);
18b87ed1
BR
299static DEVICE_ATTR_WO(chipsetready);
300
e56fa7cd 301static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
302 struct device_attribute *attr,
303 const char *buf, size_t count);
e56fa7cd
BR
304static DEVICE_ATTR_WO(devicedisabled);
305
306static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
307 struct device_attribute *attr,
308 const char *buf, size_t count);
e56fa7cd
BR
309static DEVICE_ATTR_WO(deviceenabled);
310
19f6634f
BR
311static struct attribute *visorchipset_install_attrs[] = {
312 &dev_attr_toolaction.attr,
54b31229 313 &dev_attr_boottotool.attr,
422af17c
BR
314 &dev_attr_error.attr,
315 &dev_attr_textid.attr,
316 &dev_attr_remaining_steps.attr,
19f6634f
BR
317 NULL
318};
319
320static struct attribute_group visorchipset_install_group = {
321 .name = "install",
322 .attrs = visorchipset_install_attrs
323};
324
18b87ed1
BR
325static struct attribute *visorchipset_guest_attrs[] = {
326 &dev_attr_chipsetready.attr,
327 NULL
328};
329
330static struct attribute_group visorchipset_guest_group = {
331 .name = "guest",
332 .attrs = visorchipset_guest_attrs
333};
334
e56fa7cd
BR
335static struct attribute *visorchipset_parahotplug_attrs[] = {
336 &dev_attr_devicedisabled.attr,
337 &dev_attr_deviceenabled.attr,
338 NULL
339};
340
341static struct attribute_group visorchipset_parahotplug_group = {
342 .name = "parahotplug",
343 .attrs = visorchipset_parahotplug_attrs
344};
345
19f6634f
BR
346static const struct attribute_group *visorchipset_dev_groups[] = {
347 &visorchipset_install_group,
18b87ed1 348 &visorchipset_guest_group,
e56fa7cd 349 &visorchipset_parahotplug_group,
19f6634f
BR
350 NULL
351};
352
12e364b9 353/* /sys/devices/platform/visorchipset */
eb34e877 354static struct platform_device visorchipset_platform_device = {
12e364b9
KC
355 .name = "visorchipset",
356 .id = -1,
19f6634f 357 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
358};
359
360/* Function prototypes */
b3168c70 361static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
362 int response);
363static void controlvm_respond_chipset_init(
b3168c70 364 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
365 enum ultra_chipset_feature features);
366static void controlvm_respond_physdev_changestate(
b3168c70 367 struct controlvm_message_header *msg_hdr, int response,
98d7b594 368 struct spar_segment_state state);
12e364b9 369
d746cb55
VB
370static ssize_t toolaction_show(struct device *dev,
371 struct device_attribute *attr,
372 char *buf)
19f6634f 373{
01f4d85a 374 u8 tool_action;
19f6634f 375
c3d9a224 376 visorchannel_read(controlvm_channel,
d19642f6 377 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 378 tool_action), &tool_action, sizeof(u8));
01f4d85a 379 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
380}
381
d746cb55
VB
382static ssize_t toolaction_store(struct device *dev,
383 struct device_attribute *attr,
384 const char *buf, size_t count)
19f6634f 385{
01f4d85a 386 u8 tool_action;
66e24b76 387 int ret;
19f6634f 388
ebec8967 389 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
390 return -EINVAL;
391
c3d9a224 392 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
393 offsetof(struct spar_controlvm_channel_protocol,
394 tool_action),
01f4d85a 395 &tool_action, sizeof(u8));
66e24b76
BR
396
397 if (ret)
398 return ret;
e22a4a0f 399 return count;
19f6634f
BR
400}
401
d746cb55
VB
402static ssize_t boottotool_show(struct device *dev,
403 struct device_attribute *attr,
404 char *buf)
54b31229 405{
365522d9 406 struct efi_spar_indication efi_spar_indication;
54b31229 407
c3d9a224 408 visorchannel_read(controlvm_channel,
8e76e695
BR
409 offsetof(struct spar_controlvm_channel_protocol,
410 efi_spar_ind), &efi_spar_indication,
411 sizeof(struct efi_spar_indication));
54b31229 412 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 413 efi_spar_indication.boot_to_tool);
54b31229
BR
414}
415
d746cb55
VB
416static ssize_t boottotool_store(struct device *dev,
417 struct device_attribute *attr,
418 const char *buf, size_t count)
54b31229 419{
66e24b76 420 int val, ret;
365522d9 421 struct efi_spar_indication efi_spar_indication;
54b31229 422
ebec8967 423 if (kstrtoint(buf, 10, &val))
66e24b76
BR
424 return -EINVAL;
425
365522d9 426 efi_spar_indication.boot_to_tool = val;
c3d9a224 427 ret = visorchannel_write(controlvm_channel,
d19642f6 428 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
429 efi_spar_ind), &(efi_spar_indication),
430 sizeof(struct efi_spar_indication));
66e24b76
BR
431
432 if (ret)
433 return ret;
e22a4a0f 434 return count;
54b31229 435}
422af17c
BR
436
437static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 438 char *buf)
422af17c
BR
439{
440 u32 error;
441
8e76e695
BR
442 visorchannel_read(controlvm_channel,
443 offsetof(struct spar_controlvm_channel_protocol,
444 installation_error),
445 &error, sizeof(u32));
422af17c
BR
446 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
447}
448
449static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 450 const char *buf, size_t count)
422af17c
BR
451{
452 u32 error;
66e24b76 453 int ret;
422af17c 454
ebec8967 455 if (kstrtou32(buf, 10, &error))
66e24b76
BR
456 return -EINVAL;
457
c3d9a224 458 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
459 offsetof(struct spar_controlvm_channel_protocol,
460 installation_error),
461 &error, sizeof(u32));
66e24b76
BR
462 if (ret)
463 return ret;
e22a4a0f 464 return count;
422af17c
BR
465}
466
467static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 468 char *buf)
422af17c 469{
10dbf0e3 470 u32 text_id;
422af17c 471
8e76e695
BR
472 visorchannel_read(controlvm_channel,
473 offsetof(struct spar_controlvm_channel_protocol,
474 installation_text_id),
475 &text_id, sizeof(u32));
10dbf0e3 476 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
477}
478
479static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 480 const char *buf, size_t count)
422af17c 481{
10dbf0e3 482 u32 text_id;
66e24b76 483 int ret;
422af17c 484
ebec8967 485 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
486 return -EINVAL;
487
c3d9a224 488 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
489 offsetof(struct spar_controlvm_channel_protocol,
490 installation_text_id),
491 &text_id, sizeof(u32));
66e24b76
BR
492 if (ret)
493 return ret;
e22a4a0f 494 return count;
422af17c
BR
495}
496
422af17c 497static ssize_t remaining_steps_show(struct device *dev,
8e76e695 498 struct device_attribute *attr, char *buf)
422af17c 499{
ee8da290 500 u16 remaining_steps;
422af17c 501
c3d9a224 502 visorchannel_read(controlvm_channel,
8e76e695
BR
503 offsetof(struct spar_controlvm_channel_protocol,
504 installation_remaining_steps),
505 &remaining_steps, sizeof(u16));
ee8da290 506 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
507}
508
509static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
510 struct device_attribute *attr,
511 const char *buf, size_t count)
422af17c 512{
ee8da290 513 u16 remaining_steps;
66e24b76 514 int ret;
422af17c 515
ebec8967 516 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
517 return -EINVAL;
518
c3d9a224 519 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
520 offsetof(struct spar_controlvm_channel_protocol,
521 installation_remaining_steps),
522 &remaining_steps, sizeof(u16));
66e24b76
BR
523 if (ret)
524 return ret;
e22a4a0f 525 return count;
422af17c
BR
526}
527
12e364b9 528static void
9b989a98 529bus_info_clear(void *v)
12e364b9 530{
bbd4be30 531 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
12e364b9 532
12e364b9 533 kfree(p->name);
12e364b9 534 kfree(p->description);
33192fa1 535 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
536}
537
538static void
9b989a98 539dev_info_clear(void *v)
12e364b9 540{
246e0cd0 541 struct visorchipset_device_info *p =
bbd4be30 542 (struct visorchipset_device_info *) v;
26eb2c0c 543
246e0cd0 544 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
545}
546
4f66520b
JS
547static struct visorchipset_bus_info *
548bus_find(struct list_head *list, u32 bus_no)
549{
550 struct visorchipset_bus_info *p;
551
552 list_for_each_entry(p, list, entry) {
553 if (p->bus_no == bus_no)
554 return p;
555 }
556
557 return NULL;
558}
559
d480f6a2
JS
560static struct visorchipset_device_info *
561device_find(struct list_head *list, u32 bus_no, u32 dev_no)
562{
563 struct visorchipset_device_info *p;
564
565 list_for_each_entry(p, list, entry) {
566 if (p->bus_no == bus_no && p->dev_no == dev_no)
567 return p;
568 }
569
570 return NULL;
571}
572
28723521
JS
573static void busdevices_del(struct list_head *list, u32 bus_no)
574{
575 struct visorchipset_device_info *p, *tmp;
576
577 list_for_each_entry_safe(p, tmp, list, entry) {
578 if (p->bus_no == bus_no) {
579 list_del(&p->entry);
580 kfree(p);
581 }
582 }
583}
584
c242233e 585static u8
12e364b9
KC
586check_chipset_events(void)
587{
588 int i;
c242233e 589 u8 send_msg = 1;
12e364b9
KC
590 /* Check events to determine if response should be sent */
591 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
592 send_msg &= chipset_events[i];
593 return send_msg;
594}
595
596static void
597clear_chipset_events(void)
598{
599 int i;
600 /* Clear chipset_events */
601 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
602 chipset_events[i] = 0;
603}
604
605void
fe90d892
BR
606visorchipset_register_busdev_server(
607 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 608 struct visorchipset_busdev_responders *responders,
1e7a59c1 609 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 610{
8f1947ac 611 down(&notifier_lock);
38f736e9 612 if (!notifiers) {
6fe345af
BR
613 memset(&busdev_server_notifiers, 0,
614 sizeof(busdev_server_notifiers));
12e364b9
KC
615 serverregistered = 0; /* clear flag */
616 } else {
6fe345af 617 busdev_server_notifiers = *notifiers;
12e364b9
KC
618 serverregistered = 1; /* set flag */
619 }
620 if (responders)
8e3fedd6 621 *responders = busdev_responders;
1e7a59c1
BR
622 if (driver_info)
623 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 624 VERSION, NULL);
12e364b9 625
8f1947ac 626 up(&notifier_lock);
12e364b9
KC
627}
628EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
629
630void
fe90d892
BR
631visorchipset_register_busdev_client(
632 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 633 struct visorchipset_busdev_responders *responders,
43fce019 634 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 635{
8f1947ac 636 down(&notifier_lock);
38f736e9 637 if (!notifiers) {
6fe345af
BR
638 memset(&busdev_client_notifiers, 0,
639 sizeof(busdev_client_notifiers));
12e364b9
KC
640 clientregistered = 0; /* clear flag */
641 } else {
6fe345af 642 busdev_client_notifiers = *notifiers;
12e364b9
KC
643 clientregistered = 1; /* set flag */
644 }
645 if (responders)
8e3fedd6 646 *responders = busdev_responders;
43fce019
BR
647 if (driver_info)
648 bus_device_info_init(driver_info, "chipset(bolts)",
649 "visorchipset", VERSION, NULL);
8f1947ac 650 up(&notifier_lock);
12e364b9
KC
651}
652EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
653
654static void
655cleanup_controlvm_structures(void)
656{
33192fa1 657 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 658 struct visorchipset_device_info *di, *tmp_di;
12e364b9 659
1390b88c 660 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
9b989a98 661 bus_info_clear(bi);
12e364b9
KC
662 list_del(&bi->entry);
663 kfree(bi);
664 }
665
1390b88c 666 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 667 dev_info_clear(di);
12e364b9
KC
668 list_del(&di->entry);
669 kfree(di);
670 }
671}
672
673static void
3ab47701 674chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
675{
676 static int chipset_inited;
b9b141e8 677 enum ultra_chipset_feature features = 0;
12e364b9
KC
678 int rc = CONTROLVM_RESP_SUCCESS;
679
680 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
681 if (chipset_inited) {
22ad57ba 682 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 683 goto cleanup;
12e364b9
KC
684 }
685 chipset_inited = 1;
686 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
687
688 /* Set features to indicate we support parahotplug (if Command
689 * also supports it). */
690 features =
2ea5117b 691 inmsg->cmd.init_chipset.
12e364b9
KC
692 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
693
694 /* Set the "reply" bit so Command knows this is a
695 * features-aware driver. */
696 features |= ULTRA_CHIPSET_FEATURE_REPLY;
697
e3199b2e 698cleanup:
12e364b9
KC
699 if (rc < 0)
700 cleanup_controlvm_structures();
98d7b594 701 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
702 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
703}
704
705static void
3ab47701 706controlvm_init_response(struct controlvm_message *msg,
b3168c70 707 struct controlvm_message_header *msg_hdr, int response)
12e364b9 708{
3ab47701 709 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 710 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
711 msg->hdr.payload_bytes = 0;
712 msg->hdr.payload_vm_offset = 0;
713 msg->hdr.payload_max_bytes = 0;
12e364b9 714 if (response < 0) {
98d7b594
BR
715 msg->hdr.flags.failed = 1;
716 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
717 }
718}
719
720static void
b3168c70 721controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 722{
3ab47701 723 struct controlvm_message outmsg;
26eb2c0c 724
b3168c70 725 controlvm_init_response(&outmsg, msg_hdr, response);
12e364b9
KC
726 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
727 * back the deviceChangeState structure in the packet. */
b3168c70 728 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
0639ba67
BR
729 g_devicechangestate_packet.device_change_state.bus_no ==
730 g_diagpool_bus_no &&
731 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 732 g_diagpool_dev_no)
4f44b72d 733 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 734 if (outmsg.hdr.flags.test_message == 1)
12e364b9 735 return;
2098dbd1 736
c3d9a224 737 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 738 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
739 return;
740 }
741}
742
743static void
b3168c70 744controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 745 int response,
b9b141e8 746 enum ultra_chipset_feature features)
12e364b9 747{
3ab47701 748 struct controlvm_message outmsg;
26eb2c0c 749
b3168c70 750 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 751 outmsg.cmd.init_chipset.features = features;
c3d9a224 752 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 753 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
754 return;
755 }
756}
757
98d7b594 758static void controlvm_respond_physdev_changestate(
b3168c70 759 struct controlvm_message_header *msg_hdr, int response,
98d7b594 760 struct spar_segment_state state)
12e364b9 761{
3ab47701 762 struct controlvm_message outmsg;
26eb2c0c 763
b3168c70 764 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
765 outmsg.cmd.device_change_state.state = state;
766 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 767 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 768 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
769 return;
770 }
771}
772
773void
2c683cde
BR
774visorchipset_save_message(struct controlvm_message *msg,
775 enum crash_obj_type type)
12e364b9 776{
4577225d
BR
777 u32 crash_msg_offset;
778 u16 crash_msg_count;
12e364b9
KC
779
780 /* get saved message count */
c3d9a224 781 if (visorchannel_read(controlvm_channel,
d19642f6
BR
782 offsetof(struct spar_controlvm_channel_protocol,
783 saved_crash_message_count),
4577225d 784 &crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
785 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
786 POSTCODE_SEVERITY_ERR);
787 return;
788 }
789
4577225d 790 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 791 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
4577225d 792 crash_msg_count,
12e364b9
KC
793 POSTCODE_SEVERITY_ERR);
794 return;
795 }
796
797 /* get saved crash message offset */
c3d9a224 798 if (visorchannel_read(controlvm_channel,
d19642f6
BR
799 offsetof(struct spar_controlvm_channel_protocol,
800 saved_crash_message_offset),
4577225d 801 &crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
802 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
803 POSTCODE_SEVERITY_ERR);
804 return;
805 }
806
2c683cde 807 if (type == CRASH_BUS) {
c3d9a224 808 if (visorchannel_write(controlvm_channel,
4577225d 809 crash_msg_offset,
3ab47701
BR
810 msg,
811 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
812 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
813 POSTCODE_SEVERITY_ERR);
814 return;
815 }
816 } else {
c3d9a224 817 if (visorchannel_write(controlvm_channel,
4577225d 818 crash_msg_offset +
3ab47701
BR
819 sizeof(struct controlvm_message), msg,
820 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
821 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
822 POSTCODE_SEVERITY_ERR);
823 return;
824 }
825 }
826}
827EXPORT_SYMBOL_GPL(visorchipset_save_message);
828
829static void
52063eca 830bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
12e364b9 831{
e82ba62e 832 struct visorchipset_bus_info *p;
f4c11551 833 bool need_clear = false;
12e364b9 834
4f66520b 835 p = bus_find(&bus_info_list, bus_no);
0aca7844 836 if (!p)
12e364b9 837 return;
0aca7844 838
12e364b9 839 if (response < 0) {
fbb31f48 840 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
12e364b9
KC
841 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
842 /* undo the row we just created... */
28723521 843 busdevices_del(&dev_info_list, bus_no);
12e364b9 844 } else {
fbb31f48 845 if (cmd_id == CONTROLVM_BUS_CREATE)
12e364b9 846 p->state.created = 1;
fbb31f48 847 if (cmd_id == CONTROLVM_BUS_DESTROY)
f4c11551 848 need_clear = true;
12e364b9
KC
849 }
850
0aca7844 851 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 852 return; /* no controlvm response needed */
6b59b31d 853 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 854 return;
33192fa1
BR
855 controlvm_respond(&p->pending_msg_hdr, response);
856 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 857 if (need_clear) {
9b989a98 858 bus_info_clear(p);
28723521 859 busdevices_del(&dev_info_list, bus_no);
12e364b9
KC
860 }
861}
862
863static void
fbb31f48 864device_changestate_responder(enum controlvm_id cmd_id,
52063eca 865 u32 bus_no, u32 dev_no, int response,
fbb31f48 866 struct spar_segment_state response_state)
12e364b9 867{
e82ba62e 868 struct visorchipset_device_info *p;
3ab47701 869 struct controlvm_message outmsg;
12e364b9 870
d480f6a2 871 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 872 if (!p)
12e364b9 873 return;
0aca7844 874 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 875 return; /* no controlvm response needed */
fbb31f48 876 if (p->pending_msg_hdr.id != cmd_id)
12e364b9 877 return;
12e364b9 878
246e0cd0 879 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 880
fbb31f48
BR
881 outmsg.cmd.device_change_state.bus_no = bus_no;
882 outmsg.cmd.device_change_state.dev_no = dev_no;
883 outmsg.cmd.device_change_state.state = response_state;
12e364b9 884
c3d9a224 885 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 886 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 887 return;
12e364b9 888
246e0cd0 889 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
890}
891
892static void
52063eca 893device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
12e364b9 894{
e82ba62e 895 struct visorchipset_device_info *p;
f4c11551 896 bool need_clear = false;
12e364b9 897
d480f6a2 898 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 899 if (!p)
12e364b9 900 return;
12e364b9 901 if (response >= 0) {
fbb31f48 902 if (cmd_id == CONTROLVM_DEVICE_CREATE)
12e364b9 903 p->state.created = 1;
fbb31f48 904 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
f4c11551 905 need_clear = true;
12e364b9
KC
906 }
907
0aca7844 908 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 909 return; /* no controlvm response needed */
0aca7844 910
6b59b31d 911 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 912 return;
0aca7844 913
246e0cd0
BR
914 controlvm_respond(&p->pending_msg_hdr, response);
915 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 916 if (need_clear)
9b989a98 917 dev_info_clear(p);
12e364b9
KC
918}
919
920static void
2836c6a8
BR
921bus_epilog(u32 bus_no,
922 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 923 int response, bool need_response)
12e364b9 924{
4f66520b 925 struct visorchipset_bus_info *bus_info;
f4c11551 926 bool notified = false;
12e364b9 927
4f66520b 928 bus_info = bus_find(&bus_info_list, bus_no);
12e364b9 929
2836c6a8 930 if (!bus_info)
12e364b9 931 return;
0aca7844 932
2836c6a8
BR
933 if (need_response) {
934 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
98d7b594 935 sizeof(struct controlvm_message_header));
75c1f8b7 936 } else {
2836c6a8 937 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 938 }
12e364b9 939
8f1947ac 940 down(&notifier_lock);
12e364b9
KC
941 if (response == CONTROLVM_RESP_SUCCESS) {
942 switch (cmd) {
943 case CONTROLVM_BUS_CREATE:
944 /* We can't tell from the bus_create
945 * information which of our 2 bus flavors the
946 * devices on this bus will ultimately end up.
947 * FORTUNATELY, it turns out it is harmless to
948 * send the bus_create to both of them. We can
949 * narrow things down a little bit, though,
950 * because we know: - BusDev_Server can handle
951 * either server or client devices
952 * - BusDev_Client can handle ONLY client
953 * devices */
6fe345af
BR
954 if (busdev_server_notifiers.bus_create) {
955 (*busdev_server_notifiers.bus_create) (bus_no);
f4c11551 956 notified = true;
12e364b9 957 }
2836c6a8 958 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
959 busdev_client_notifiers.bus_create) {
960 (*busdev_client_notifiers.bus_create) (bus_no);
f4c11551 961 notified = true;
12e364b9
KC
962 }
963 break;
964 case CONTROLVM_BUS_DESTROY:
6fe345af
BR
965 if (busdev_server_notifiers.bus_destroy) {
966 (*busdev_server_notifiers.bus_destroy) (bus_no);
f4c11551 967 notified = true;
12e364b9 968 }
2836c6a8 969 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
970 busdev_client_notifiers.bus_destroy) {
971 (*busdev_client_notifiers.bus_destroy) (bus_no);
f4c11551 972 notified = true;
12e364b9
KC
973 }
974 break;
975 }
976 }
977 if (notified)
978 /* The callback function just called above is responsible
929aa8ae 979 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
980 * function, which will call bus_responder()
981 */
982 ;
983 else
2836c6a8 984 bus_responder(cmd, bus_no, response);
8f1947ac 985 up(&notifier_lock);
12e364b9
KC
986}
987
988static void
2836c6a8
BR
989device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
990 struct controlvm_message_header *msg_hdr, int response,
f4c11551 991 bool need_response, bool for_visorbus)
12e364b9 992{
e82ba62e 993 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 994 bool notified = false;
12e364b9 995
2836c6a8 996 struct visorchipset_device_info *dev_info =
d480f6a2 997 device_find(&dev_info_list, bus_no, dev_no);
12e364b9
KC
998 char *envp[] = {
999 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1000 NULL
1001 };
1002
2836c6a8 1003 if (!dev_info)
12e364b9 1004 return;
0aca7844 1005
12e364b9 1006 if (for_visorbus)
6fe345af 1007 notifiers = &busdev_server_notifiers;
12e364b9 1008 else
6fe345af 1009 notifiers = &busdev_client_notifiers;
2836c6a8
BR
1010 if (need_response) {
1011 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
98d7b594 1012 sizeof(struct controlvm_message_header));
75c1f8b7 1013 } else {
2836c6a8 1014 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 1015 }
12e364b9 1016
8f1947ac 1017 down(&notifier_lock);
12e364b9
KC
1018 if (response >= 0) {
1019 switch (cmd) {
1020 case CONTROLVM_DEVICE_CREATE:
1021 if (notifiers->device_create) {
2836c6a8 1022 (*notifiers->device_create) (bus_no, dev_no);
f4c11551 1023 notified = true;
12e364b9
KC
1024 }
1025 break;
1026 case CONTROLVM_DEVICE_CHANGESTATE:
1027 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1028 if (state.alive == segment_state_running.alive &&
1029 state.operating ==
1030 segment_state_running.operating) {
12e364b9 1031 if (notifiers->device_resume) {
2836c6a8
BR
1032 (*notifiers->device_resume) (bus_no,
1033 dev_no);
f4c11551 1034 notified = true;
12e364b9
KC
1035 }
1036 }
1037 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1038 else if (state.alive == segment_state_standby.alive &&
3f833b54 1039 state.operating ==
bd0d2dcc 1040 segment_state_standby.operating) {
12e364b9
KC
1041 /* technically this is standby case
1042 * where server is lost
1043 */
1044 if (notifiers->device_pause) {
2836c6a8
BR
1045 (*notifiers->device_pause) (bus_no,
1046 dev_no);
f4c11551 1047 notified = true;
12e364b9 1048 }
bd0d2dcc 1049 } else if (state.alive == segment_state_paused.alive &&
3f833b54 1050 state.operating ==
bd0d2dcc 1051 segment_state_paused.operating) {
12e364b9
KC
1052 /* this is lite pause where channel is
1053 * still valid just 'pause' of it
1054 */
2836c6a8
BR
1055 if (bus_no == g_diagpool_bus_no &&
1056 dev_no == g_diagpool_dev_no) {
12e364b9
KC
1057 /* this will trigger the
1058 * diag_shutdown.sh script in
1059 * the visorchipset hotplug */
1060 kobject_uevent_env
eb34e877 1061 (&visorchipset_platform_device.dev.
12e364b9
KC
1062 kobj, KOBJ_ONLINE, envp);
1063 }
1064 }
1065 break;
1066 case CONTROLVM_DEVICE_DESTROY:
1067 if (notifiers->device_destroy) {
2836c6a8 1068 (*notifiers->device_destroy) (bus_no, dev_no);
f4c11551 1069 notified = true;
12e364b9
KC
1070 }
1071 break;
1072 }
1073 }
1074 if (notified)
1075 /* The callback function just called above is responsible
929aa8ae 1076 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1077 * function, which will call device_responder()
1078 */
1079 ;
1080 else
2836c6a8 1081 device_responder(cmd, bus_no, dev_no, response);
8f1947ac 1082 up(&notifier_lock);
12e364b9
KC
1083}
1084
1085static void
3ab47701 1086bus_create(struct controlvm_message *inmsg)
12e364b9 1087{
2ea5117b 1088 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1089 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1090 int rc = CONTROLVM_RESP_SUCCESS;
e82ba62e 1091 struct visorchipset_bus_info *bus_info;
12e364b9 1092
4f66520b 1093 bus_info = bus_find(&bus_info_list, bus_no);
6c5fed35
BR
1094 if (bus_info && (bus_info->state.created == 1)) {
1095 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1096 POSTCODE_SEVERITY_ERR);
22ad57ba 1097 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1098 goto cleanup;
12e364b9 1099 }
6c5fed35
BR
1100 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1101 if (!bus_info) {
1102 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1103 POSTCODE_SEVERITY_ERR);
22ad57ba 1104 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1105 goto cleanup;
12e364b9
KC
1106 }
1107
6c5fed35
BR
1108 INIT_LIST_HEAD(&bus_info->entry);
1109 bus_info->bus_no = bus_no;
12e364b9 1110
6c5fed35 1111 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1112
98d7b594 1113 if (inmsg->hdr.flags.test_message == 1)
6c5fed35 1114 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1115 else
6c5fed35 1116 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1117
6c5fed35
BR
1118 bus_info->flags.server = inmsg->hdr.flags.server;
1119 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1120 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1121 bus_info->chan_info.channel_type_uuid =
9b1caee7 1122 cmd->create_bus.bus_data_type_uuid;
6c5fed35 1123 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9 1124
6c5fed35 1125 list_add(&bus_info->entry, &bus_info_list);
12e364b9 1126
6c5fed35 1127 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1128
6c5fed35
BR
1129cleanup:
1130 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1131 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1132}
1133
1134static void
3ab47701 1135bus_destroy(struct controlvm_message *inmsg)
12e364b9 1136{
2ea5117b 1137 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1138 u32 bus_no = cmd->destroy_bus.bus_no;
dff54cd6 1139 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1140 int rc = CONTROLVM_RESP_SUCCESS;
1141
4f66520b 1142 bus_info = bus_find(&bus_info_list, bus_no);
dff54cd6 1143 if (!bus_info)
22ad57ba 1144 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1145 else if (bus_info->state.created == 0)
22ad57ba 1146 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1147
dff54cd6 1148 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1149 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1150}
1151
1152static void
317d9614
BR
1153bus_configure(struct controlvm_message *inmsg,
1154 struct parser_context *parser_ctx)
12e364b9 1155{
2ea5117b 1156 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e
JS
1157 u32 bus_no;
1158 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1159 int rc = CONTROLVM_RESP_SUCCESS;
1160 char s[99];
1161
654bada0
BR
1162 bus_no = cmd->configure_bus.bus_no;
1163 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1164 POSTCODE_SEVERITY_INFO);
12e364b9 1165
4f66520b 1166 bus_info = bus_find(&bus_info_list, bus_no);
654bada0
BR
1167 if (!bus_info) {
1168 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1169 POSTCODE_SEVERITY_ERR);
22ad57ba 1170 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1171 } else if (bus_info->state.created == 0) {
1172 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1173 POSTCODE_SEVERITY_ERR);
22ad57ba 1174 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1175 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1176 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1177 POSTCODE_SEVERITY_ERR);
22ad57ba 1178 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0
BR
1179 } else {
1180 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1181 bus_info->partition_uuid = parser_id_get(parser_ctx);
1182 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1183 bus_info->name = parser_string_get(parser_ctx);
1184
1185 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1186 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1187 POSTCODE_SEVERITY_INFO);
12e364b9 1188 }
654bada0 1189 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1190 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1191}
1192
1193static void
3ab47701 1194my_device_create(struct controlvm_message *inmsg)
12e364b9 1195{
2ea5117b 1196 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1197 u32 bus_no = cmd->create_device.bus_no;
1198 u32 dev_no = cmd->create_device.dev_no;
e82ba62e
JS
1199 struct visorchipset_device_info *dev_info;
1200 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1201 int rc = CONTROLVM_RESP_SUCCESS;
1202
d480f6a2 1203 dev_info = device_find(&dev_info_list, bus_no, dev_no);
c60c8e26
BR
1204 if (dev_info && (dev_info->state.created == 1)) {
1205 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1206 POSTCODE_SEVERITY_ERR);
22ad57ba 1207 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1208 goto cleanup;
12e364b9 1209 }
4f66520b 1210 bus_info = bus_find(&bus_info_list, bus_no);
c60c8e26
BR
1211 if (!bus_info) {
1212 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1213 POSTCODE_SEVERITY_ERR);
22ad57ba 1214 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1215 goto cleanup;
12e364b9 1216 }
c60c8e26
BR
1217 if (bus_info->state.created == 0) {
1218 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1219 POSTCODE_SEVERITY_ERR);
22ad57ba 1220 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1221 goto cleanup;
12e364b9 1222 }
c60c8e26
BR
1223 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1224 if (!dev_info) {
1225 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1226 POSTCODE_SEVERITY_ERR);
22ad57ba 1227 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1228 goto cleanup;
12e364b9 1229 }
97a84f12 1230
c60c8e26
BR
1231 INIT_LIST_HEAD(&dev_info->entry);
1232 dev_info->bus_no = bus_no;
1233 dev_info->dev_no = dev_no;
1234 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1235 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1236 POSTCODE_SEVERITY_INFO);
1237
98d7b594 1238 if (inmsg->hdr.flags.test_message == 1)
c60c8e26 1239 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1240 else
c60c8e26
BR
1241 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1242 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1243 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1244 dev_info->chan_info.channel_type_uuid =
9b1caee7 1245 cmd->create_device.data_type_uuid;
c60c8e26
BR
1246 dev_info->chan_info.intr = cmd->create_device.intr;
1247 list_add(&dev_info->entry, &dev_info_list);
1248 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1249 POSTCODE_SEVERITY_INFO);
c60c8e26 1250cleanup:
12e364b9 1251 /* get the bus and devNo for DiagPool channel */
c60c8e26
BR
1252 if (dev_info &&
1253 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1254 g_diagpool_bus_no = bus_no;
1255 g_diagpool_dev_no = dev_no;
12e364b9 1256 }
c60c8e26 1257 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1258 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
98d7b594 1259 inmsg->hdr.flags.response_expected == 1,
c60c8e26 1260 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1261}
1262
1263static void
3ab47701 1264my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1265{
2ea5117b 1266 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1267 u32 bus_no = cmd->device_change_state.bus_no;
1268 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1269 struct spar_segment_state state = cmd->device_change_state.state;
e82ba62e 1270 struct visorchipset_device_info *dev_info;
12e364b9
KC
1271 int rc = CONTROLVM_RESP_SUCCESS;
1272
d480f6a2 1273 dev_info = device_find(&dev_info_list, bus_no, dev_no);
0278a905
BR
1274 if (!dev_info) {
1275 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1276 POSTCODE_SEVERITY_ERR);
22ad57ba 1277 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1278 } else if (dev_info->state.created == 0) {
1279 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1280 POSTCODE_SEVERITY_ERR);
22ad57ba 1281 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1282 }
0278a905
BR
1283 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1284 device_epilog(bus_no, dev_no, state,
1285 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
98d7b594 1286 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1287 FOR_VISORBUS(
0278a905 1288 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1289}
1290
1291static void
3ab47701 1292my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1293{
2ea5117b 1294 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1295 u32 bus_no = cmd->destroy_device.bus_no;
1296 u32 dev_no = cmd->destroy_device.dev_no;
e82ba62e 1297 struct visorchipset_device_info *dev_info;
12e364b9
KC
1298 int rc = CONTROLVM_RESP_SUCCESS;
1299
d480f6a2 1300 dev_info = device_find(&dev_info_list, bus_no, dev_no);
61715c8b 1301 if (!dev_info)
22ad57ba 1302 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1303 else if (dev_info->state.created == 0)
22ad57ba 1304 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1305
61715c8b
BR
1306 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1307 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1308 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
98d7b594 1309 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1310 FOR_VISORBUS(
61715c8b 1311 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1312}
1313
1314/* When provided with the physical address of the controlvm channel
1315 * (phys_addr), the offset to the payload area we need to manage
1316 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1317 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1318 * for failure.
1319 */
1320static int
5fc0229a 1321initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
c1f834eb 1322 struct visor_controlvm_payload_info *info)
12e364b9 1323{
c242233e 1324 u8 __iomem *payload = NULL;
12e364b9
KC
1325 int rc = CONTROLVM_RESP_SUCCESS;
1326
38f736e9 1327 if (!info) {
22ad57ba 1328 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1329 goto cleanup;
12e364b9 1330 }
c1f834eb 1331 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1332 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1333 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1334 goto cleanup;
12e364b9
KC
1335 }
1336 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1337 if (!payload) {
22ad57ba 1338 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1339 goto cleanup;
12e364b9
KC
1340 }
1341
1342 info->offset = offset;
1343 info->bytes = bytes;
1344 info->ptr = payload;
12e364b9 1345
f118a39b 1346cleanup:
12e364b9 1347 if (rc < 0) {
f118a39b 1348 if (payload) {
12e364b9
KC
1349 iounmap(payload);
1350 payload = NULL;
1351 }
1352 }
1353 return rc;
1354}
1355
1356static void
c1f834eb 1357destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1358{
597c338f 1359 if (info->ptr) {
12e364b9
KC
1360 iounmap(info->ptr);
1361 info->ptr = NULL;
1362 }
c1f834eb 1363 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1364}
1365
1366static void
1367initialize_controlvm_payload(void)
1368{
c3d9a224 1369 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1370 u64 payload_offset = 0;
1371 u32 payload_bytes = 0;
26eb2c0c 1372
c3d9a224 1373 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1374 offsetof(struct spar_controlvm_channel_protocol,
1375 request_payload_offset),
cafefc0c 1376 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1377 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1378 POSTCODE_SEVERITY_ERR);
1379 return;
1380 }
c3d9a224 1381 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1382 offsetof(struct spar_controlvm_channel_protocol,
1383 request_payload_bytes),
cafefc0c 1384 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1385 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1386 POSTCODE_SEVERITY_ERR);
1387 return;
1388 }
1389 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1390 payload_offset, payload_bytes,
84982fbf 1391 &controlvm_payload_info);
12e364b9
KC
1392}
1393
1394/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1395 * Returns CONTROLVM_RESP_xxx code.
1396 */
1397int
1398visorchipset_chipset_ready(void)
1399{
eb34e877 1400 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1401 return CONTROLVM_RESP_SUCCESS;
1402}
1403EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1404
1405int
1406visorchipset_chipset_selftest(void)
1407{
1408 char env_selftest[20];
1409 char *envp[] = { env_selftest, NULL };
26eb2c0c 1410
12e364b9 1411 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1412 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1413 envp);
1414 return CONTROLVM_RESP_SUCCESS;
1415}
1416EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1417
1418/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1419 * Returns CONTROLVM_RESP_xxx code.
1420 */
1421int
1422visorchipset_chipset_notready(void)
1423{
eb34e877 1424 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1425 return CONTROLVM_RESP_SUCCESS;
1426}
1427EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1428
1429static void
77a0449d 1430chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1431{
1432 int rc = visorchipset_chipset_ready();
26eb2c0c 1433
12e364b9
KC
1434 if (rc != CONTROLVM_RESP_SUCCESS)
1435 rc = -rc;
77a0449d
BR
1436 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1437 controlvm_respond(msg_hdr, rc);
1438 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1439 /* Send CHIPSET_READY response when all modules have been loaded
1440 * and disks mounted for the partition
1441 */
77a0449d 1442 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1443 }
1444}
1445
1446static void
77a0449d 1447chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1448{
1449 int rc = visorchipset_chipset_selftest();
26eb2c0c 1450
12e364b9
KC
1451 if (rc != CONTROLVM_RESP_SUCCESS)
1452 rc = -rc;
77a0449d
BR
1453 if (msg_hdr->flags.response_expected)
1454 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1455}
1456
1457static void
77a0449d 1458chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1459{
1460 int rc = visorchipset_chipset_notready();
26eb2c0c 1461
12e364b9
KC
1462 if (rc != CONTROLVM_RESP_SUCCESS)
1463 rc = -rc;
77a0449d
BR
1464 if (msg_hdr->flags.response_expected)
1465 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1466}
1467
1468/* This is your "one-stop" shop for grabbing the next message from the
1469 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1470 */
f4c11551 1471static bool
3ab47701 1472read_controlvm_event(struct controlvm_message *msg)
12e364b9 1473{
c3d9a224 1474 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1475 CONTROLVM_QUEUE_EVENT, msg)) {
1476 /* got a message */
0aca7844 1477 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1478 return false;
1479 return true;
12e364b9 1480 }
f4c11551 1481 return false;
12e364b9
KC
1482}
1483
1484/*
1485 * The general parahotplug flow works as follows. The visorchipset
1486 * driver receives a DEVICE_CHANGESTATE message from Command
1487 * specifying a physical device to enable or disable. The CONTROLVM
1488 * message handler calls parahotplug_process_message, which then adds
1489 * the message to a global list and kicks off a udev event which
1490 * causes a user level script to enable or disable the specified
1491 * device. The udev script then writes to
1492 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1493 * to get called, at which point the appropriate CONTROLVM message is
1494 * retrieved from the list and responded to.
1495 */
1496
1497#define PARAHOTPLUG_TIMEOUT_MS 2000
1498
1499/*
1500 * Generate unique int to match an outstanding CONTROLVM message with a
1501 * udev script /proc response
1502 */
1503static int
1504parahotplug_next_id(void)
1505{
1506 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1507
12e364b9
KC
1508 return atomic_inc_return(&id);
1509}
1510
1511/*
1512 * Returns the time (in jiffies) when a CONTROLVM message on the list
1513 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1514 */
1515static unsigned long
1516parahotplug_next_expiration(void)
1517{
2cc1a1b3 1518 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1519}
1520
1521/*
1522 * Create a parahotplug_request, which is basically a wrapper for a
1523 * CONTROLVM_MESSAGE that we can stick on a list
1524 */
1525static struct parahotplug_request *
3ab47701 1526parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1527{
ea0dcfcf
QL
1528 struct parahotplug_request *req;
1529
6a55e3c3 1530 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1531 if (!req)
12e364b9
KC
1532 return NULL;
1533
1534 req->id = parahotplug_next_id();
1535 req->expiration = parahotplug_next_expiration();
1536 req->msg = *msg;
1537
1538 return req;
1539}
1540
1541/*
1542 * Free a parahotplug_request.
1543 */
1544static void
1545parahotplug_request_destroy(struct parahotplug_request *req)
1546{
1547 kfree(req);
1548}
1549
1550/*
1551 * Cause uevent to run the user level script to do the disable/enable
1552 * specified in (the CONTROLVM message in) the specified
1553 * parahotplug_request
1554 */
1555static void
1556parahotplug_request_kickoff(struct parahotplug_request *req)
1557{
2ea5117b 1558 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1559 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1560 env_func[40];
1561 char *envp[] = {
1562 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1563 };
1564
1565 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1566 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1567 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1568 cmd->device_change_state.state.active);
12e364b9 1569 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1570 cmd->device_change_state.bus_no);
12e364b9 1571 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1572 cmd->device_change_state.dev_no >> 3);
12e364b9 1573 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1574 cmd->device_change_state.dev_no & 0x7);
12e364b9 1575
eb34e877 1576 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1577 envp);
1578}
1579
1580/*
1581 * Remove any request from the list that's been on there too long and
1582 * respond with an error.
1583 */
1584static void
1585parahotplug_process_list(void)
1586{
e82ba62e
JS
1587 struct list_head *pos;
1588 struct list_head *tmp;
12e364b9 1589
ddf5de53 1590 spin_lock(&parahotplug_request_list_lock);
12e364b9 1591
ddf5de53 1592 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1593 struct parahotplug_request *req =
1594 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1595
1596 if (!time_after_eq(jiffies, req->expiration))
1597 continue;
1598
1599 list_del(pos);
1600 if (req->msg.hdr.flags.response_expected)
1601 controlvm_respond_physdev_changestate(
1602 &req->msg.hdr,
1603 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1604 req->msg.cmd.device_change_state.state);
1605 parahotplug_request_destroy(req);
12e364b9
KC
1606 }
1607
ddf5de53 1608 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1609}
1610
1611/*
1612 * Called from the /proc handler, which means the user script has
1613 * finished the enable/disable. Find the matching identifier, and
1614 * respond to the CONTROLVM message with success.
1615 */
1616static int
b06bdf7d 1617parahotplug_request_complete(int id, u16 active)
12e364b9 1618{
e82ba62e
JS
1619 struct list_head *pos;
1620 struct list_head *tmp;
12e364b9 1621
ddf5de53 1622 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1623
1624 /* Look for a request matching "id". */
ddf5de53 1625 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1626 struct parahotplug_request *req =
1627 list_entry(pos, struct parahotplug_request, list);
1628 if (req->id == id) {
1629 /* Found a match. Remove it from the list and
1630 * respond.
1631 */
1632 list_del(pos);
ddf5de53 1633 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1634 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1635 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1636 controlvm_respond_physdev_changestate(
1637 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1638 req->msg.cmd.device_change_state.state);
12e364b9
KC
1639 parahotplug_request_destroy(req);
1640 return 0;
1641 }
1642 }
1643
ddf5de53 1644 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1645 return -1;
1646}
1647
1648/*
1649 * Enables or disables a PCI device by kicking off a udev script
1650 */
bd5b9b32 1651static void
3ab47701 1652parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1653{
1654 struct parahotplug_request *req;
1655
1656 req = parahotplug_request_create(inmsg);
1657
38f736e9 1658 if (!req)
12e364b9 1659 return;
12e364b9 1660
2ea5117b 1661 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1662 /* For enable messages, just respond with success
1663 * right away. This is a bit of a hack, but there are
1664 * issues with the early enable messages we get (with
1665 * either the udev script not detecting that the device
1666 * is up, or not getting called at all). Fortunately
1667 * the messages that get lost don't matter anyway, as
1668 * devices are automatically enabled at
1669 * initialization.
1670 */
1671 parahotplug_request_kickoff(req);
1672 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1673 CONTROLVM_RESP_SUCCESS,
1674 inmsg->cmd.device_change_state.state);
12e364b9
KC
1675 parahotplug_request_destroy(req);
1676 } else {
1677 /* For disable messages, add the request to the
1678 * request list before kicking off the udev script. It
1679 * won't get responded to until the script has
1680 * indicated it's done.
1681 */
ddf5de53
BR
1682 spin_lock(&parahotplug_request_list_lock);
1683 list_add_tail(&req->list, &parahotplug_request_list);
1684 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1685
1686 parahotplug_request_kickoff(req);
1687 }
1688}
1689
12e364b9
KC
1690/* Process a controlvm message.
1691 * Return result:
f4c11551 1692 * false - this function will return FALSE only in the case where the
12e364b9
KC
1693 * controlvm message was NOT processed, but processing must be
1694 * retried before reading the next controlvm message; a
1695 * scenario where this can occur is when we need to throttle
1696 * the allocation of memory in which to copy out controlvm
1697 * payload data
f4c11551 1698 * true - processing of the controlvm message completed,
12e364b9
KC
1699 * either successfully or with an error.
1700 */
f4c11551 1701static bool
3ab47701 1702handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
12e364b9 1703{
2ea5117b 1704 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1705 u64 parm_addr;
1706 u32 parm_bytes;
317d9614 1707 struct parser_context *parser_ctx = NULL;
e82ba62e 1708 bool local_addr;
3ab47701 1709 struct controlvm_message ackmsg;
12e364b9
KC
1710
1711 /* create parsing context if necessary */
818352a8 1712 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1713 if (channel_addr == 0)
f4c11551 1714 return true;
818352a8
BR
1715 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1716 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1717
1718 /* Parameter and channel addresses within test messages actually lie
1719 * within our OS-controlled memory. We need to know that, because it
1720 * makes a difference in how we compute the virtual address.
1721 */
ebec8967 1722 if (parm_addr && parm_bytes) {
f4c11551 1723 bool retry = false;
26eb2c0c 1724
12e364b9 1725 parser_ctx =
818352a8
BR
1726 parser_init_byte_stream(parm_addr, parm_bytes,
1727 local_addr, &retry);
1b08872e 1728 if (!parser_ctx && retry)
f4c11551 1729 return false;
12e364b9
KC
1730 }
1731
818352a8 1732 if (!local_addr) {
12e364b9
KC
1733 controlvm_init_response(&ackmsg, &inmsg.hdr,
1734 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1735 if (controlvm_channel)
1736 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1737 CONTROLVM_QUEUE_ACK,
1738 &ackmsg);
12e364b9 1739 }
98d7b594 1740 switch (inmsg.hdr.id) {
12e364b9 1741 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1742 chipset_init(&inmsg);
1743 break;
1744 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1745 bus_create(&inmsg);
1746 break;
1747 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1748 bus_destroy(&inmsg);
1749 break;
1750 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1751 bus_configure(&inmsg, parser_ctx);
1752 break;
1753 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1754 my_device_create(&inmsg);
1755 break;
1756 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1757 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1758 parahotplug_process_message(&inmsg);
1759 } else {
12e364b9
KC
1760 /* save the hdr and cmd structures for later use */
1761 /* when sending back the response to Command */
1762 my_device_changestate(&inmsg);
4f44b72d 1763 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1764 break;
1765 }
1766 break;
1767 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1768 my_device_destroy(&inmsg);
1769 break;
1770 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1771 /* no op for now, just send a respond that we passed */
98d7b594 1772 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1773 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1774 break;
1775 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1776 chipset_ready(&inmsg.hdr);
1777 break;
1778 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1779 chipset_selftest(&inmsg.hdr);
1780 break;
1781 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1782 chipset_notready(&inmsg.hdr);
1783 break;
1784 default:
98d7b594 1785 if (inmsg.hdr.flags.response_expected)
12e364b9 1786 controlvm_respond(&inmsg.hdr,
818352a8 1787 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1788 break;
1789 }
1790
38f736e9 1791 if (parser_ctx) {
12e364b9
KC
1792 parser_done(parser_ctx);
1793 parser_ctx = NULL;
1794 }
f4c11551 1795 return true;
12e364b9
KC
1796}
1797
d746cb55 1798static HOSTADDRESS controlvm_get_channel_address(void)
524b0b63 1799{
5fc0229a 1800 u64 addr = 0;
b3c55b13 1801 u32 size = 0;
524b0b63 1802
0aca7844 1803 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1804 return 0;
0aca7844 1805
524b0b63
BR
1806 return addr;
1807}
1808
12e364b9
KC
1809static void
1810controlvm_periodic_work(struct work_struct *work)
1811{
3ab47701 1812 struct controlvm_message inmsg;
f4c11551
JS
1813 bool got_command = false;
1814 bool handle_command_failed = false;
1c1ed292 1815 static u64 poll_count;
12e364b9
KC
1816
1817 /* make sure visorbus server is registered for controlvm callbacks */
1818 if (visorchipset_serverregwait && !serverregistered)
1c1ed292 1819 goto cleanup;
12e364b9
KC
1820 /* make sure visorclientbus server is regsitered for controlvm
1821 * callbacks
1822 */
1823 if (visorchipset_clientregwait && !clientregistered)
1c1ed292 1824 goto cleanup;
12e364b9 1825
1c1ed292
BR
1826 poll_count++;
1827 if (poll_count >= 250)
12e364b9
KC
1828 ; /* keep going */
1829 else
1c1ed292 1830 goto cleanup;
12e364b9
KC
1831
1832 /* Check events to determine if response to CHIPSET_READY
1833 * should be sent
1834 */
0639ba67
BR
1835 if (visorchipset_holdchipsetready &&
1836 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1837 if (check_chipset_events() == 1) {
da021f02 1838 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1839 clear_chipset_events();
da021f02 1840 memset(&g_chipset_msg_hdr, 0,
98d7b594 1841 sizeof(struct controlvm_message_header));
12e364b9
KC
1842 }
1843 }
1844
c3d9a224 1845 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1846 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1847 &inmsg))
1848 ;
1c1ed292 1849 if (!got_command) {
7166ed19 1850 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1851 /* we throttled processing of a prior
1852 * msg, so try to process it again
1853 * rather than reading a new one
1854 */
7166ed19 1855 inmsg = controlvm_pending_msg;
f4c11551 1856 controlvm_pending_msg_valid = false;
1c1ed292 1857 got_command = true;
75c1f8b7 1858 } else {
1c1ed292 1859 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1860 }
8a1182eb 1861 }
12e364b9 1862
f4c11551 1863 handle_command_failed = false;
1c1ed292 1864 while (got_command && (!handle_command_failed)) {
b53e0e93 1865 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1866 if (handle_command(inmsg,
1867 visorchannel_get_physaddr
c3d9a224 1868 (controlvm_channel)))
1c1ed292 1869 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
1870 else {
1871 /* this is a scenario where throttling
1872 * is required, but probably NOT an
1873 * error...; we stash the current
1874 * controlvm msg so we will attempt to
1875 * reprocess it on our next loop
1876 */
f4c11551 1877 handle_command_failed = true;
7166ed19 1878 controlvm_pending_msg = inmsg;
f4c11551 1879 controlvm_pending_msg_valid = true;
12e364b9
KC
1880 }
1881 }
1882
1883 /* parahotplug_worker */
1884 parahotplug_process_list();
1885
1c1ed292 1886cleanup:
12e364b9
KC
1887
1888 if (time_after(jiffies,
b53e0e93 1889 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1890 /* it's been longer than MIN_IDLE_SECONDS since we
1891 * processed our last controlvm message; slow down the
1892 * polling
1893 */
911e213e
BR
1894 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1895 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1896 } else {
911e213e
BR
1897 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1898 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1899 }
1900
9232d2d6
BR
1901 queue_delayed_work(periodic_controlvm_workqueue,
1902 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1903}
1904
1905static void
1906setup_crash_devices_work_queue(struct work_struct *work)
1907{
e6bdb904
BR
1908 struct controlvm_message local_crash_bus_msg;
1909 struct controlvm_message local_crash_dev_msg;
3ab47701 1910 struct controlvm_message msg;
e6bdb904
BR
1911 u32 local_crash_msg_offset;
1912 u16 local_crash_msg_count;
12e364b9
KC
1913
1914 /* make sure visorbus server is registered for controlvm callbacks */
1915 if (visorchipset_serverregwait && !serverregistered)
e6bdb904 1916 goto cleanup;
12e364b9
KC
1917
1918 /* make sure visorclientbus server is regsitered for controlvm
1919 * callbacks
1920 */
1921 if (visorchipset_clientregwait && !clientregistered)
e6bdb904 1922 goto cleanup;
12e364b9
KC
1923
1924 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1925
1926 /* send init chipset msg */
98d7b594 1927 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1928 msg.cmd.init_chipset.bus_count = 23;
1929 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1930
1931 chipset_init(&msg);
1932
12e364b9 1933 /* get saved message count */
c3d9a224 1934 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1935 offsetof(struct spar_controlvm_channel_protocol,
1936 saved_crash_message_count),
e6bdb904 1937 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1938 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1939 POSTCODE_SEVERITY_ERR);
1940 return;
1941 }
1942
e6bdb904 1943 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 1944 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 1945 local_crash_msg_count,
12e364b9
KC
1946 POSTCODE_SEVERITY_ERR);
1947 return;
1948 }
1949
1950 /* get saved crash message offset */
c3d9a224 1951 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1952 offsetof(struct spar_controlvm_channel_protocol,
1953 saved_crash_message_offset),
e6bdb904 1954 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
1955 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1956 POSTCODE_SEVERITY_ERR);
1957 return;
1958 }
1959
1960 /* read create device message for storage bus offset */
c3d9a224 1961 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
1962 local_crash_msg_offset,
1963 &local_crash_bus_msg,
3ab47701 1964 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1965 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1966 POSTCODE_SEVERITY_ERR);
1967 return;
1968 }
1969
1970 /* read create device message for storage device */
c3d9a224 1971 if (visorchannel_read(controlvm_channel,
e6bdb904 1972 local_crash_msg_offset +
3ab47701 1973 sizeof(struct controlvm_message),
e6bdb904 1974 &local_crash_dev_msg,
3ab47701 1975 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1976 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1977 POSTCODE_SEVERITY_ERR);
1978 return;
1979 }
1980
1981 /* reuse IOVM create bus message */
ebec8967 1982 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 1983 bus_create(&local_crash_bus_msg);
75c1f8b7 1984 } else {
12e364b9
KC
1985 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1986 POSTCODE_SEVERITY_ERR);
1987 return;
1988 }
1989
1990 /* reuse create device message for storage device */
ebec8967 1991 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 1992 my_device_create(&local_crash_dev_msg);
75c1f8b7 1993 } else {
12e364b9
KC
1994 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1995 POSTCODE_SEVERITY_ERR);
1996 return;
1997 }
12e364b9
KC
1998 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1999 return;
2000
e6bdb904 2001cleanup:
12e364b9 2002
911e213e 2003 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 2004
9232d2d6
BR
2005 queue_delayed_work(periodic_controlvm_workqueue,
2006 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
2007}
2008
2009static void
52063eca 2010bus_create_response(u32 bus_no, int response)
12e364b9 2011{
8e3fedd6 2012 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
12e364b9
KC
2013}
2014
2015static void
52063eca 2016bus_destroy_response(u32 bus_no, int response)
12e364b9 2017{
8e3fedd6 2018 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
12e364b9
KC
2019}
2020
2021static void
52063eca 2022device_create_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2023{
8e3fedd6 2024 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
12e364b9
KC
2025}
2026
2027static void
52063eca 2028device_destroy_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2029{
8e3fedd6 2030 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
12e364b9
KC
2031}
2032
2033void
52063eca 2034visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2035{
12e364b9 2036 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 2037 bus_no, dev_no, response,
bd0d2dcc 2038 segment_state_standby);
12e364b9 2039}
927c7927 2040EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
2041
2042static void
52063eca 2043device_resume_response(u32 bus_no, u32 dev_no, int response)
12e364b9
KC
2044{
2045 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8e3fedd6 2046 bus_no, dev_no, response,
bd0d2dcc 2047 segment_state_running);
12e364b9
KC
2048}
2049
f4c11551 2050bool
52063eca 2051visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 2052{
4f66520b 2053 void *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2054
0aca7844 2055 if (!p)
f4c11551 2056 return false;
77db7127 2057 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
f4c11551 2058 return true;
12e364b9
KC
2059}
2060EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2061
f4c11551 2062bool
52063eca 2063visorchipset_set_bus_context(u32 bus_no, void *context)
12e364b9 2064{
4f66520b 2065 struct visorchipset_bus_info *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2066
0aca7844 2067 if (!p)
f4c11551 2068 return false;
12e364b9 2069 p->bus_driver_context = context;
f4c11551 2070 return true;
12e364b9
KC
2071}
2072EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2073
f4c11551 2074bool
52063eca 2075visorchipset_get_device_info(u32 bus_no, u32 dev_no,
b486df19 2076 struct visorchipset_device_info *dev_info)
12e364b9 2077{
d480f6a2 2078 void *p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2079
0aca7844 2080 if (!p)
f4c11551 2081 return false;
b486df19 2082 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
f4c11551 2083 return true;
12e364b9
KC
2084}
2085EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2086
f4c11551 2087bool
52063eca 2088visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
12e364b9 2089{
d480f6a2
JS
2090 struct visorchipset_device_info *p;
2091
2092 p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2093
0aca7844 2094 if (!p)
f4c11551 2095 return false;
12e364b9 2096 p->bus_driver_context = context;
f4c11551 2097 return true;
12e364b9
KC
2098}
2099EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2100
2101/* Generic wrapper function for allocating memory from a kmem_cache pool.
2102 */
2103void *
f4c11551 2104visorchipset_cache_alloc(struct kmem_cache *pool, bool ok_to_block,
12e364b9
KC
2105 char *fn, int ln)
2106{
2107 gfp_t gfp;
2108 void *p;
2109
2110 if (ok_to_block)
2111 gfp = GFP_KERNEL;
2112 else
2113 gfp = GFP_ATOMIC;
2114 /* __GFP_NORETRY means "ok to fail", meaning
2115 * kmem_cache_alloc() can return NULL, implying the caller CAN
2116 * cope with failure. If you do NOT specify __GFP_NORETRY,
2117 * Linux will go to extreme measures to get memory for you
2118 * (like, invoke oom killer), which will probably cripple the
2119 * system.
2120 */
2121 gfp |= __GFP_NORETRY;
2122 p = kmem_cache_alloc(pool, gfp);
0aca7844 2123 if (!p)
12e364b9 2124 return NULL;
0aca7844 2125
12e364b9
KC
2126 return p;
2127}
2128
2129/* Generic wrapper function for freeing memory from a kmem_cache pool.
2130 */
2131void
2132visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2133{
0aca7844 2134 if (!p)
12e364b9 2135 return;
0aca7844 2136
12e364b9
KC
2137 kmem_cache_free(pool, p);
2138}
2139
18b87ed1 2140static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2141 struct device_attribute *attr,
2142 const char *buf, size_t count)
12e364b9 2143{
18b87ed1 2144 char msgtype[64];
12e364b9 2145
66e24b76
BR
2146 if (sscanf(buf, "%63s", msgtype) != 1)
2147 return -EINVAL;
2148
ebec8967 2149 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2150 chipset_events[0] = 1;
2151 return count;
ebec8967 2152 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2153 chipset_events[1] = 1;
2154 return count;
e22a4a0f
BR
2155 }
2156 return -EINVAL;
12e364b9
KC
2157}
2158
e56fa7cd
BR
2159/* The parahotplug/devicedisabled interface gets called by our support script
2160 * when an SR-IOV device has been shut down. The ID is passed to the script
2161 * and then passed back when the device has been removed.
2162 */
2163static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2164 struct device_attribute *attr,
2165 const char *buf, size_t count)
e56fa7cd 2166{
94217363 2167 unsigned int id;
e56fa7cd 2168
ebec8967 2169 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2170 return -EINVAL;
2171
2172 parahotplug_request_complete(id, 0);
2173 return count;
2174}
2175
2176/* The parahotplug/deviceenabled interface gets called by our support script
2177 * when an SR-IOV device has been recovered. The ID is passed to the script
2178 * and then passed back when the device has been brought back up.
2179 */
2180static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2181 struct device_attribute *attr,
2182 const char *buf, size_t count)
e56fa7cd 2183{
94217363 2184 unsigned int id;
e56fa7cd 2185
ebec8967 2186 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2187 return -EINVAL;
2188
2189 parahotplug_request_complete(id, 1);
2190 return count;
2191}
2192
e3420ed6
EA
2193static int
2194visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2195{
2196 unsigned long physaddr = 0;
2197 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2198 GUEST_PHYSICAL_ADDRESS addr = 0;
2199
2200 /* sv_enable_dfp(); */
2201 if (offset & (PAGE_SIZE - 1))
2202 return -ENXIO; /* need aligned offsets */
2203
2204 switch (offset) {
2205 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2206 vma->vm_flags |= VM_IO;
2207 if (!*file_controlvm_channel)
2208 return -ENXIO;
2209
2210 visorchannel_read(*file_controlvm_channel,
2211 offsetof(struct spar_controlvm_channel_protocol,
2212 gp_control_channel),
2213 &addr, sizeof(addr));
2214 if (!addr)
2215 return -ENXIO;
2216
2217 physaddr = (unsigned long)addr;
2218 if (remap_pfn_range(vma, vma->vm_start,
2219 physaddr >> PAGE_SHIFT,
2220 vma->vm_end - vma->vm_start,
2221 /*pgprot_noncached */
2222 (vma->vm_page_prot))) {
2223 return -EAGAIN;
2224 }
2225 break;
2226 default:
2227 return -ENXIO;
2228 }
2229 return 0;
2230}
2231
2232static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2233 unsigned long arg)
2234{
2235 s64 adjustment;
2236 s64 vrtc_offset;
2237
2238 switch (cmd) {
2239 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2240 /* get the physical rtc offset */
2241 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2242 if (copy_to_user((void __user *)arg, &vrtc_offset,
2243 sizeof(vrtc_offset))) {
2244 return -EFAULT;
2245 }
2246 return SUCCESS;
2247 case VMCALL_UPDATE_PHYSICAL_TIME:
2248 if (copy_from_user(&adjustment, (void __user *)arg,
2249 sizeof(adjustment))) {
2250 return -EFAULT;
2251 }
2252 return issue_vmcall_update_physical_time(adjustment);
2253 default:
2254 return -EFAULT;
2255 }
2256}
2257
2258static const struct file_operations visorchipset_fops = {
2259 .owner = THIS_MODULE,
2260 .open = visorchipset_open,
2261 .read = NULL,
2262 .write = NULL,
2263 .unlocked_ioctl = visorchipset_ioctl,
2264 .release = visorchipset_release,
2265 .mmap = visorchipset_mmap,
2266};
2267
2268int
2269visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2270{
2271 int rc = 0;
2272
2273 file_controlvm_channel = controlvm_channel;
2274 cdev_init(&file_cdev, &visorchipset_fops);
2275 file_cdev.owner = THIS_MODULE;
2276 if (MAJOR(major_dev) == 0) {
2277 rc = alloc_chrdev_region(&major_dev, 0, 1, MYDRVNAME);
2278 /* dynamic major device number registration required */
2279 if (rc < 0)
2280 return rc;
2281 } else {
2282 /* static major device number registration required */
2283 rc = register_chrdev_region(major_dev, 1, MYDRVNAME);
2284 if (rc < 0)
2285 return rc;
2286 }
2287 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2288 if (rc < 0) {
2289 unregister_chrdev_region(major_dev, 1);
2290 return rc;
2291 }
2292 return 0;
2293}
2294
2295
2296
12e364b9
KC
2297static int __init
2298visorchipset_init(void)
2299{
2300 int rc = 0, x = 0;
8a1182eb 2301 HOSTADDRESS addr;
12e364b9 2302
fcd0157e
KC
2303 if (!unisys_spar_platform)
2304 return -ENODEV;
2305
6fe345af
BR
2306 memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2307 memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
84982fbf 2308 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
ea33b4ee
BR
2309 memset(&livedump_info, 0, sizeof(livedump_info));
2310 atomic_set(&livedump_info.buffers_in_use, 0);
12e364b9 2311
9f8d0e8b 2312 if (visorchipset_testvnic) {
9f8d0e8b
KC
2313 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2314 rc = x;
a6a3989b 2315 goto cleanup;
9f8d0e8b 2316 }
12e364b9 2317
8a1182eb 2318 addr = controlvm_get_channel_address();
ebec8967 2319 if (addr) {
c3d9a224 2320 controlvm_channel =
8a1182eb
BR
2321 visorchannel_create_with_lock
2322 (addr,
d19642f6 2323 sizeof(struct spar_controlvm_channel_protocol),
5fbaa4b3 2324 spar_controlvm_channel_protocol_uuid);
93a84565 2325 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
c3d9a224 2326 visorchannel_get_header(controlvm_channel))) {
8a1182eb
BR
2327 initialize_controlvm_payload();
2328 } else {
c3d9a224
BR
2329 visorchannel_destroy(controlvm_channel);
2330 controlvm_channel = NULL;
8a1182eb
BR
2331 return -ENODEV;
2332 }
2333 } else {
8a1182eb
BR
2334 return -ENODEV;
2335 }
2336
5aa8ae57
BR
2337 major_dev = MKDEV(visorchipset_major, 0);
2338 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2339 if (rc < 0) {
4cb005a9 2340 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2341 goto cleanup;
4cb005a9 2342 }
9f8d0e8b 2343
da021f02 2344 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2345
2098dbd1 2346 if (!visorchipset_disable_controlvm) {
12e364b9 2347 /* if booting in a crash kernel */
1ba00980 2348 if (is_kdump_kernel())
9232d2d6 2349 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9
KC
2350 setup_crash_devices_work_queue);
2351 else
9232d2d6 2352 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9 2353 controlvm_periodic_work);
9232d2d6 2354 periodic_controlvm_workqueue =
12e364b9
KC
2355 create_singlethread_workqueue("visorchipset_controlvm");
2356
38f736e9 2357 if (!periodic_controlvm_workqueue) {
4cb005a9
KC
2358 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2359 DIAG_SEVERITY_ERR);
2360 rc = -ENOMEM;
a6a3989b 2361 goto cleanup;
4cb005a9 2362 }
b53e0e93 2363 most_recent_message_jiffies = jiffies;
911e213e 2364 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
9232d2d6
BR
2365 rc = queue_delayed_work(periodic_controlvm_workqueue,
2366 &periodic_controlvm_work, poll_jiffies);
4cb005a9 2367 if (rc < 0) {
4cb005a9
KC
2368 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2369 DIAG_SEVERITY_ERR);
a6a3989b 2370 goto cleanup;
4cb005a9 2371 }
12e364b9
KC
2372 }
2373
eb34e877
BR
2374 visorchipset_platform_device.dev.devt = major_dev;
2375 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2376 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2377 rc = -1;
a6a3989b 2378 goto cleanup;
4cb005a9 2379 }
12e364b9 2380 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
22ad57ba 2381 rc = 0;
a6a3989b 2382cleanup:
12e364b9 2383 if (rc) {
12e364b9
KC
2384 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2385 POSTCODE_SEVERITY_ERR);
2386 }
2387 return rc;
2388}
2389
e3420ed6
EA
2390void
2391visorchipset_file_cleanup(dev_t major_dev)
2392{
2393 if (file_cdev.ops)
2394 cdev_del(&file_cdev);
2395 file_cdev.ops = NULL;
2396 unregister_chrdev_region(major_dev, 1);
2397}
2398
12e364b9
KC
2399static void
2400visorchipset_exit(void)
2401{
12e364b9
KC
2402 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2403
2404 if (visorchipset_disable_controlvm) {
2405 ;
2406 } else {
9232d2d6
BR
2407 cancel_delayed_work(&periodic_controlvm_work);
2408 flush_workqueue(periodic_controlvm_workqueue);
2409 destroy_workqueue(periodic_controlvm_workqueue);
2410 periodic_controlvm_workqueue = NULL;
84982fbf 2411 destroy_controlvm_payload_info(&controlvm_payload_info);
12e364b9 2412 }
1783319f 2413
12e364b9
KC
2414 cleanup_controlvm_structures();
2415
da021f02 2416 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2417
c3d9a224 2418 visorchannel_destroy(controlvm_channel);
8a1182eb 2419
addceb12 2420 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2421 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
2422}
2423
2424module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2425MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
12e364b9
KC
2426module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2427MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
12e364b9
KC
2428module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2429MODULE_PARM_DESC(visorchipset_testmsg,
2430 "1 to manufacture the chipset, bus, and switch messages");
12e364b9 2431module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2432MODULE_PARM_DESC(visorchipset_major,
2433 "major device number to use for the device node");
12e364b9
KC
2434module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2435MODULE_PARM_DESC(visorchipset_serverreqwait,
2436 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2437module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2438MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
12e364b9
KC
2439module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2440MODULE_PARM_DESC(visorchipset_testteardown,
2441 "1 to test teardown of the chipset, bus, and switch");
12e364b9
KC
2442module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2443 S_IRUGO);
2444MODULE_PARM_DESC(visorchipset_disable_controlvm,
2445 "1 to disable polling of controlVm channel");
12e364b9
KC
2446module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2447 int, S_IRUGO);
2448MODULE_PARM_DESC(visorchipset_holdchipsetready,
2449 "1 to hold response to CHIPSET_READY");
b615d628 2450
12e364b9
KC
2451module_init(visorchipset_init);
2452module_exit(visorchipset_exit);
2453
2454MODULE_AUTHOR("Unisys");
2455MODULE_LICENSE("GPL");
2456MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2457 VERSION);
2458MODULE_VERSION(VERSION);
This page took 0.434975 seconds and 5 git commands to generate.