staging: unisys: moving file.h functionality to visorchipset.h
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
CommitLineData
12e364b9
KC
1/* visorchipset_main.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
12e364b9
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
7023638c 18#include "version.h"
12e364b9
KC
19#include "visorchipset.h"
20#include "procobjecttree.h"
f6439218 21#include "visorbus.h"
12e364b9 22#include "periodic_work.h"
12e364b9 23#include "parser.h"
12e364b9 24#include "uisutils.h"
12e364b9
KC
25#include "controlvmcompletionstatus.h"
26#include "guestlinuxdebug.h"
12e364b9
KC
27
28#include <linux/nls.h>
29#include <linux/netdevice.h>
30#include <linux/platform_device.h>
90addb02 31#include <linux/uuid.h>
1ba00980 32#include <linux/crash_dump.h>
12e364b9
KC
33
34#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
35#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
36 * vnic loopback test */
37#define TEST_VNIC_SWITCHNO 1
38#define TEST_VNIC_BUSNO 9
39
40#define MAX_NAME_SIZE 128
41#define MAX_IP_SIZE 50
42#define MAXOUTSTANDINGCHANNELCOMMAND 256
43#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
b615d628
JS
46/*
47 * Module parameters
48 */
49static int visorchipset_testvnic;
50static int visorchipset_testvnicclient;
51static int visorchipset_testmsg;
52static int visorchipset_major;
53static int visorchipset_serverregwait;
54static int visorchipset_clientregwait = 1; /* default is on */
55static int visorchipset_testteardown;
56static int visorchipset_disable_controlvm;
57static int visorchipset_holdchipsetready;
58
12e364b9
KC
59/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
60* we switch to slow polling mode. As soon as we get a controlvm
61* message, we switch back to fast polling mode.
62*/
63#define MIN_IDLE_SECONDS 10
52063eca
JS
64static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
65static unsigned long most_recent_message_jiffies; /* when we got our last
bd5b9b32 66 * controlvm message */
12e364b9
KC
67static int serverregistered;
68static int clientregistered;
69
70#define MAX_CHIPSET_EVENTS 2
c242233e 71static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
12e364b9 72
9232d2d6
BR
73static struct delayed_work periodic_controlvm_work;
74static struct workqueue_struct *periodic_controlvm_workqueue;
8f1947ac 75static DEFINE_SEMAPHORE(notifier_lock);
12e364b9 76
da021f02 77static struct controlvm_message_header g_chipset_msg_hdr;
59827f00 78static const uuid_le spar_diag_pool_channel_protocol_uuid =
9eee5d1f 79 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
12e364b9 80/* 0xffffff is an invalid Bus/Device number */
52063eca
JS
81static u32 g_diagpool_bus_no = 0xffffff;
82static u32 g_diagpool_dev_no = 0xffffff;
4f44b72d 83static struct controlvm_message_packet g_devicechangestate_packet;
12e364b9
KC
84
85/* Only VNIC and VHBA channels are sent to visorclientbus (aka
86 * "visorhackbus")
87 */
88#define FOR_VISORHACKBUS(channel_type_guid) \
9eee5d1f 89 (((uuid_le_cmp(channel_type_guid,\
0639ba67
BR
90 spar_vnic_channel_protocol_uuid) == 0) ||\
91 (uuid_le_cmp(channel_type_guid,\
9eee5d1f 92 spar_vhba_channel_protocol_uuid) == 0)))
12e364b9
KC
93#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
94
95#define is_diagpool_channel(channel_type_guid) \
59827f00
BR
96 (uuid_le_cmp(channel_type_guid,\
97 spar_diag_pool_channel_protocol_uuid) == 0)
12e364b9 98
1390b88c
BR
99static LIST_HEAD(bus_info_list);
100static LIST_HEAD(dev_info_list);
12e364b9 101
c3d9a224 102static struct visorchannel *controlvm_channel;
12e364b9 103
84982fbf 104/* Manages the request payload in the controlvm channel */
c1f834eb 105struct visor_controlvm_payload_info {
c242233e 106 u8 __iomem *ptr; /* pointer to base address of payload pool */
5fc0229a 107 u64 offset; /* offset from beginning of controlvm
12e364b9 108 * channel to beginning of payload * pool */
b3c55b13 109 u32 bytes; /* number of bytes in payload pool */
c1f834eb
JS
110};
111
112static struct visor_controlvm_payload_info controlvm_payload_info;
12e364b9 113
ea33b4ee
BR
114/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
115 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
116 */
c1f834eb 117struct visor_livedump_info {
ea33b4ee
BR
118 struct controlvm_message_header dumpcapture_header;
119 struct controlvm_message_header gettextdump_header;
120 struct controlvm_message_header dumpcomplete_header;
f4c11551 121 bool gettextdump_outstanding;
12e364b9 122 u32 crc32;
52063eca 123 unsigned long length;
12e364b9 124 atomic_t buffers_in_use;
52063eca 125 unsigned long destination;
c1f834eb
JS
126};
127
128static struct visor_livedump_info livedump_info;
12e364b9
KC
129
130/* The following globals are used to handle the scenario where we are unable to
131 * offload the payload from a controlvm message due to memory requirements. In
132 * this scenario, we simply stash the controlvm message, then attempt to
133 * process it again the next time controlvm_periodic_work() runs.
134 */
7166ed19 135static struct controlvm_message controlvm_pending_msg;
f4c11551 136static bool controlvm_pending_msg_valid = false;
12e364b9 137
12e364b9
KC
138/* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140 */
141struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
317d9614 143 struct parser_context *parser_ctx; /* points to input data buffer */
12e364b9
KC
144};
145
146/* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
148 * conversation.
149 */
1eee0011 150static LIST_HEAD(putfile_request_list);
12e364b9
KC
151
152/* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
155 */
156struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
317d9614 158 struct parser_context *parser_ctx;
12e364b9
KC
159 /* points within data area of parser_ctx to next byte of data */
160 u8 *pnext;
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
163};
164
165#define PUTFILE_REQUEST_SIG 0x0906101302281211
166/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
169 */
170struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
172
173 /* header from original TransmitFile request */
98d7b594 174 struct controlvm_message_header controlvm_header;
12e364b9
KC
175 u64 file_request_number; /* from original TransmitFile request */
176
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
179
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
182
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
186 * file data
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
191
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
194
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
197
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
204};
205
12e364b9
KC
206struct parahotplug_request {
207 struct list_head list;
208 int id;
209 unsigned long expiration;
3ab47701 210 struct controlvm_message msg;
12e364b9
KC
211};
212
ddf5de53
BR
213static LIST_HEAD(parahotplug_request_list);
214static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
12e364b9
KC
215static void parahotplug_process_list(void);
216
217/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
218 * CONTROLVM_REPORTEVENT.
219 */
6fe345af
BR
220static struct visorchipset_busdev_notifiers busdev_server_notifiers;
221static struct visorchipset_busdev_notifiers busdev_client_notifiers;
12e364b9 222
52063eca
JS
223static void bus_create_response(u32 bus_no, int response);
224static void bus_destroy_response(u32 bus_no, int response);
225static void device_create_response(u32 bus_no, u32 dev_no, int response);
226static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
227static void device_resume_response(u32 bus_no, u32 dev_no, int response);
12e364b9 228
8e3fedd6 229static struct visorchipset_busdev_responders busdev_responders = {
12e364b9
KC
230 .bus_create = bus_create_response,
231 .bus_destroy = bus_destroy_response,
232 .device_create = device_create_response,
233 .device_destroy = device_destroy_response,
927c7927 234 .device_pause = visorchipset_device_pause_response,
12e364b9
KC
235 .device_resume = device_resume_response,
236};
237
238/* info for /dev/visorchipset */
5aa8ae57 239static dev_t major_dev = -1; /**< indicates major num for device */
12e364b9 240
19f6634f
BR
241/* prototypes for attributes */
242static ssize_t toolaction_show(struct device *dev,
8e76e695 243 struct device_attribute *attr, char *buf);
19f6634f 244static ssize_t toolaction_store(struct device *dev,
8e76e695
BR
245 struct device_attribute *attr,
246 const char *buf, size_t count);
19f6634f
BR
247static DEVICE_ATTR_RW(toolaction);
248
54b31229 249static ssize_t boottotool_show(struct device *dev,
8e76e695 250 struct device_attribute *attr, char *buf);
54b31229 251static ssize_t boottotool_store(struct device *dev,
8e76e695
BR
252 struct device_attribute *attr, const char *buf,
253 size_t count);
54b31229
BR
254static DEVICE_ATTR_RW(boottotool);
255
422af17c 256static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 257 char *buf);
422af17c 258static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 259 const char *buf, size_t count);
422af17c
BR
260static DEVICE_ATTR_RW(error);
261
262static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 263 char *buf);
422af17c 264static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 265 const char *buf, size_t count);
422af17c
BR
266static DEVICE_ATTR_RW(textid);
267
268static ssize_t remaining_steps_show(struct device *dev,
8e76e695 269 struct device_attribute *attr, char *buf);
422af17c 270static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
271 struct device_attribute *attr,
272 const char *buf, size_t count);
422af17c
BR
273static DEVICE_ATTR_RW(remaining_steps);
274
18b87ed1 275static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
276 struct device_attribute *attr,
277 const char *buf, size_t count);
18b87ed1
BR
278static DEVICE_ATTR_WO(chipsetready);
279
e56fa7cd 280static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
281 struct device_attribute *attr,
282 const char *buf, size_t count);
e56fa7cd
BR
283static DEVICE_ATTR_WO(devicedisabled);
284
285static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
286 struct device_attribute *attr,
287 const char *buf, size_t count);
e56fa7cd
BR
288static DEVICE_ATTR_WO(deviceenabled);
289
19f6634f
BR
290static struct attribute *visorchipset_install_attrs[] = {
291 &dev_attr_toolaction.attr,
54b31229 292 &dev_attr_boottotool.attr,
422af17c
BR
293 &dev_attr_error.attr,
294 &dev_attr_textid.attr,
295 &dev_attr_remaining_steps.attr,
19f6634f
BR
296 NULL
297};
298
299static struct attribute_group visorchipset_install_group = {
300 .name = "install",
301 .attrs = visorchipset_install_attrs
302};
303
18b87ed1
BR
304static struct attribute *visorchipset_guest_attrs[] = {
305 &dev_attr_chipsetready.attr,
306 NULL
307};
308
309static struct attribute_group visorchipset_guest_group = {
310 .name = "guest",
311 .attrs = visorchipset_guest_attrs
312};
313
e56fa7cd
BR
314static struct attribute *visorchipset_parahotplug_attrs[] = {
315 &dev_attr_devicedisabled.attr,
316 &dev_attr_deviceenabled.attr,
317 NULL
318};
319
320static struct attribute_group visorchipset_parahotplug_group = {
321 .name = "parahotplug",
322 .attrs = visorchipset_parahotplug_attrs
323};
324
19f6634f
BR
325static const struct attribute_group *visorchipset_dev_groups[] = {
326 &visorchipset_install_group,
18b87ed1 327 &visorchipset_guest_group,
e56fa7cd 328 &visorchipset_parahotplug_group,
19f6634f
BR
329 NULL
330};
331
12e364b9 332/* /sys/devices/platform/visorchipset */
eb34e877 333static struct platform_device visorchipset_platform_device = {
12e364b9
KC
334 .name = "visorchipset",
335 .id = -1,
19f6634f 336 .dev.groups = visorchipset_dev_groups,
12e364b9
KC
337};
338
339/* Function prototypes */
b3168c70 340static void controlvm_respond(struct controlvm_message_header *msg_hdr,
98d7b594
BR
341 int response);
342static void controlvm_respond_chipset_init(
b3168c70 343 struct controlvm_message_header *msg_hdr, int response,
98d7b594
BR
344 enum ultra_chipset_feature features);
345static void controlvm_respond_physdev_changestate(
b3168c70 346 struct controlvm_message_header *msg_hdr, int response,
98d7b594 347 struct spar_segment_state state);
12e364b9 348
d746cb55
VB
349static ssize_t toolaction_show(struct device *dev,
350 struct device_attribute *attr,
351 char *buf)
19f6634f 352{
01f4d85a 353 u8 tool_action;
19f6634f 354
c3d9a224 355 visorchannel_read(controlvm_channel,
d19642f6 356 offsetof(struct spar_controlvm_channel_protocol,
8e76e695 357 tool_action), &tool_action, sizeof(u8));
01f4d85a 358 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
19f6634f
BR
359}
360
d746cb55
VB
361static ssize_t toolaction_store(struct device *dev,
362 struct device_attribute *attr,
363 const char *buf, size_t count)
19f6634f 364{
01f4d85a 365 u8 tool_action;
66e24b76 366 int ret;
19f6634f 367
ebec8967 368 if (kstrtou8(buf, 10, &tool_action))
66e24b76
BR
369 return -EINVAL;
370
c3d9a224 371 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
372 offsetof(struct spar_controlvm_channel_protocol,
373 tool_action),
01f4d85a 374 &tool_action, sizeof(u8));
66e24b76
BR
375
376 if (ret)
377 return ret;
e22a4a0f 378 return count;
19f6634f
BR
379}
380
d746cb55
VB
381static ssize_t boottotool_show(struct device *dev,
382 struct device_attribute *attr,
383 char *buf)
54b31229 384{
365522d9 385 struct efi_spar_indication efi_spar_indication;
54b31229 386
c3d9a224 387 visorchannel_read(controlvm_channel,
8e76e695
BR
388 offsetof(struct spar_controlvm_channel_protocol,
389 efi_spar_ind), &efi_spar_indication,
390 sizeof(struct efi_spar_indication));
54b31229 391 return scnprintf(buf, PAGE_SIZE, "%u\n",
8e76e695 392 efi_spar_indication.boot_to_tool);
54b31229
BR
393}
394
d746cb55
VB
395static ssize_t boottotool_store(struct device *dev,
396 struct device_attribute *attr,
397 const char *buf, size_t count)
54b31229 398{
66e24b76 399 int val, ret;
365522d9 400 struct efi_spar_indication efi_spar_indication;
54b31229 401
ebec8967 402 if (kstrtoint(buf, 10, &val))
66e24b76
BR
403 return -EINVAL;
404
365522d9 405 efi_spar_indication.boot_to_tool = val;
c3d9a224 406 ret = visorchannel_write(controlvm_channel,
d19642f6 407 offsetof(struct spar_controlvm_channel_protocol,
8e76e695
BR
408 efi_spar_ind), &(efi_spar_indication),
409 sizeof(struct efi_spar_indication));
66e24b76
BR
410
411 if (ret)
412 return ret;
e22a4a0f 413 return count;
54b31229 414}
422af17c
BR
415
416static ssize_t error_show(struct device *dev, struct device_attribute *attr,
8e76e695 417 char *buf)
422af17c
BR
418{
419 u32 error;
420
8e76e695
BR
421 visorchannel_read(controlvm_channel,
422 offsetof(struct spar_controlvm_channel_protocol,
423 installation_error),
424 &error, sizeof(u32));
422af17c
BR
425 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
426}
427
428static ssize_t error_store(struct device *dev, struct device_attribute *attr,
8e76e695 429 const char *buf, size_t count)
422af17c
BR
430{
431 u32 error;
66e24b76 432 int ret;
422af17c 433
ebec8967 434 if (kstrtou32(buf, 10, &error))
66e24b76
BR
435 return -EINVAL;
436
c3d9a224 437 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
438 offsetof(struct spar_controlvm_channel_protocol,
439 installation_error),
440 &error, sizeof(u32));
66e24b76
BR
441 if (ret)
442 return ret;
e22a4a0f 443 return count;
422af17c
BR
444}
445
446static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
8e76e695 447 char *buf)
422af17c 448{
10dbf0e3 449 u32 text_id;
422af17c 450
8e76e695
BR
451 visorchannel_read(controlvm_channel,
452 offsetof(struct spar_controlvm_channel_protocol,
453 installation_text_id),
454 &text_id, sizeof(u32));
10dbf0e3 455 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
422af17c
BR
456}
457
458static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
8e76e695 459 const char *buf, size_t count)
422af17c 460{
10dbf0e3 461 u32 text_id;
66e24b76 462 int ret;
422af17c 463
ebec8967 464 if (kstrtou32(buf, 10, &text_id))
66e24b76
BR
465 return -EINVAL;
466
c3d9a224 467 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
468 offsetof(struct spar_controlvm_channel_protocol,
469 installation_text_id),
470 &text_id, sizeof(u32));
66e24b76
BR
471 if (ret)
472 return ret;
e22a4a0f 473 return count;
422af17c
BR
474}
475
422af17c 476static ssize_t remaining_steps_show(struct device *dev,
8e76e695 477 struct device_attribute *attr, char *buf)
422af17c 478{
ee8da290 479 u16 remaining_steps;
422af17c 480
c3d9a224 481 visorchannel_read(controlvm_channel,
8e76e695
BR
482 offsetof(struct spar_controlvm_channel_protocol,
483 installation_remaining_steps),
484 &remaining_steps, sizeof(u16));
ee8da290 485 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
422af17c
BR
486}
487
488static ssize_t remaining_steps_store(struct device *dev,
8e76e695
BR
489 struct device_attribute *attr,
490 const char *buf, size_t count)
422af17c 491{
ee8da290 492 u16 remaining_steps;
66e24b76 493 int ret;
422af17c 494
ebec8967 495 if (kstrtou16(buf, 10, &remaining_steps))
66e24b76
BR
496 return -EINVAL;
497
c3d9a224 498 ret = visorchannel_write(controlvm_channel,
8e76e695
BR
499 offsetof(struct spar_controlvm_channel_protocol,
500 installation_remaining_steps),
501 &remaining_steps, sizeof(u16));
66e24b76
BR
502 if (ret)
503 return ret;
e22a4a0f 504 return count;
422af17c
BR
505}
506
12e364b9 507static void
9b989a98 508bus_info_clear(void *v)
12e364b9 509{
bbd4be30 510 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
12e364b9 511
12e364b9 512 kfree(p->name);
12e364b9 513 kfree(p->description);
33192fa1 514 memset(p, 0, sizeof(struct visorchipset_bus_info));
12e364b9
KC
515}
516
517static void
9b989a98 518dev_info_clear(void *v)
12e364b9 519{
246e0cd0 520 struct visorchipset_device_info *p =
bbd4be30 521 (struct visorchipset_device_info *) v;
26eb2c0c 522
246e0cd0 523 memset(p, 0, sizeof(struct visorchipset_device_info));
12e364b9
KC
524}
525
4f66520b
JS
526static struct visorchipset_bus_info *
527bus_find(struct list_head *list, u32 bus_no)
528{
529 struct visorchipset_bus_info *p;
530
531 list_for_each_entry(p, list, entry) {
532 if (p->bus_no == bus_no)
533 return p;
534 }
535
536 return NULL;
537}
538
d480f6a2
JS
539static struct visorchipset_device_info *
540device_find(struct list_head *list, u32 bus_no, u32 dev_no)
541{
542 struct visorchipset_device_info *p;
543
544 list_for_each_entry(p, list, entry) {
545 if (p->bus_no == bus_no && p->dev_no == dev_no)
546 return p;
547 }
548
549 return NULL;
550}
551
28723521
JS
552static void busdevices_del(struct list_head *list, u32 bus_no)
553{
554 struct visorchipset_device_info *p, *tmp;
555
556 list_for_each_entry_safe(p, tmp, list, entry) {
557 if (p->bus_no == bus_no) {
558 list_del(&p->entry);
559 kfree(p);
560 }
561 }
562}
563
c242233e 564static u8
12e364b9
KC
565check_chipset_events(void)
566{
567 int i;
c242233e 568 u8 send_msg = 1;
12e364b9
KC
569 /* Check events to determine if response should be sent */
570 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
571 send_msg &= chipset_events[i];
572 return send_msg;
573}
574
575static void
576clear_chipset_events(void)
577{
578 int i;
579 /* Clear chipset_events */
580 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
581 chipset_events[i] = 0;
582}
583
584void
fe90d892
BR
585visorchipset_register_busdev_server(
586 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 587 struct visorchipset_busdev_responders *responders,
1e7a59c1 588 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 589{
8f1947ac 590 down(&notifier_lock);
38f736e9 591 if (!notifiers) {
6fe345af
BR
592 memset(&busdev_server_notifiers, 0,
593 sizeof(busdev_server_notifiers));
12e364b9
KC
594 serverregistered = 0; /* clear flag */
595 } else {
6fe345af 596 busdev_server_notifiers = *notifiers;
12e364b9
KC
597 serverregistered = 1; /* set flag */
598 }
599 if (responders)
8e3fedd6 600 *responders = busdev_responders;
1e7a59c1
BR
601 if (driver_info)
602 bus_device_info_init(driver_info, "chipset", "visorchipset",
8e76e695 603 VERSION, NULL);
12e364b9 604
8f1947ac 605 up(&notifier_lock);
12e364b9
KC
606}
607EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
608
609void
fe90d892
BR
610visorchipset_register_busdev_client(
611 struct visorchipset_busdev_notifiers *notifiers,
929aa8ae 612 struct visorchipset_busdev_responders *responders,
43fce019 613 struct ultra_vbus_deviceinfo *driver_info)
12e364b9 614{
8f1947ac 615 down(&notifier_lock);
38f736e9 616 if (!notifiers) {
6fe345af
BR
617 memset(&busdev_client_notifiers, 0,
618 sizeof(busdev_client_notifiers));
12e364b9
KC
619 clientregistered = 0; /* clear flag */
620 } else {
6fe345af 621 busdev_client_notifiers = *notifiers;
12e364b9
KC
622 clientregistered = 1; /* set flag */
623 }
624 if (responders)
8e3fedd6 625 *responders = busdev_responders;
43fce019
BR
626 if (driver_info)
627 bus_device_info_init(driver_info, "chipset(bolts)",
628 "visorchipset", VERSION, NULL);
8f1947ac 629 up(&notifier_lock);
12e364b9
KC
630}
631EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
632
633static void
634cleanup_controlvm_structures(void)
635{
33192fa1 636 struct visorchipset_bus_info *bi, *tmp_bi;
246e0cd0 637 struct visorchipset_device_info *di, *tmp_di;
12e364b9 638
1390b88c 639 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
9b989a98 640 bus_info_clear(bi);
12e364b9
KC
641 list_del(&bi->entry);
642 kfree(bi);
643 }
644
1390b88c 645 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
9b989a98 646 dev_info_clear(di);
12e364b9
KC
647 list_del(&di->entry);
648 kfree(di);
649 }
650}
651
652static void
3ab47701 653chipset_init(struct controlvm_message *inmsg)
12e364b9
KC
654{
655 static int chipset_inited;
b9b141e8 656 enum ultra_chipset_feature features = 0;
12e364b9
KC
657 int rc = CONTROLVM_RESP_SUCCESS;
658
659 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
660 if (chipset_inited) {
22ad57ba 661 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
e3199b2e 662 goto cleanup;
12e364b9
KC
663 }
664 chipset_inited = 1;
665 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
666
667 /* Set features to indicate we support parahotplug (if Command
668 * also supports it). */
669 features =
2ea5117b 670 inmsg->cmd.init_chipset.
12e364b9
KC
671 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
672
673 /* Set the "reply" bit so Command knows this is a
674 * features-aware driver. */
675 features |= ULTRA_CHIPSET_FEATURE_REPLY;
676
e3199b2e 677cleanup:
12e364b9
KC
678 if (rc < 0)
679 cleanup_controlvm_structures();
98d7b594 680 if (inmsg->hdr.flags.response_expected)
12e364b9
KC
681 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
682}
683
684static void
3ab47701 685controlvm_init_response(struct controlvm_message *msg,
b3168c70 686 struct controlvm_message_header *msg_hdr, int response)
12e364b9 687{
3ab47701 688 memset(msg, 0, sizeof(struct controlvm_message));
b3168c70 689 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
98d7b594
BR
690 msg->hdr.payload_bytes = 0;
691 msg->hdr.payload_vm_offset = 0;
692 msg->hdr.payload_max_bytes = 0;
12e364b9 693 if (response < 0) {
98d7b594
BR
694 msg->hdr.flags.failed = 1;
695 msg->hdr.completion_status = (u32) (-response);
12e364b9
KC
696 }
697}
698
699static void
b3168c70 700controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
12e364b9 701{
3ab47701 702 struct controlvm_message outmsg;
26eb2c0c 703
b3168c70 704 controlvm_init_response(&outmsg, msg_hdr, response);
12e364b9
KC
705 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
706 * back the deviceChangeState structure in the packet. */
b3168c70 707 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
0639ba67
BR
708 g_devicechangestate_packet.device_change_state.bus_no ==
709 g_diagpool_bus_no &&
710 g_devicechangestate_packet.device_change_state.dev_no ==
83d48905 711 g_diagpool_dev_no)
4f44b72d 712 outmsg.cmd = g_devicechangestate_packet;
2098dbd1 713 if (outmsg.hdr.flags.test_message == 1)
12e364b9 714 return;
2098dbd1 715
c3d9a224 716 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 717 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
718 return;
719 }
720}
721
722static void
b3168c70 723controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
98d7b594 724 int response,
b9b141e8 725 enum ultra_chipset_feature features)
12e364b9 726{
3ab47701 727 struct controlvm_message outmsg;
26eb2c0c 728
b3168c70 729 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b 730 outmsg.cmd.init_chipset.features = features;
c3d9a224 731 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 732 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
733 return;
734 }
735}
736
98d7b594 737static void controlvm_respond_physdev_changestate(
b3168c70 738 struct controlvm_message_header *msg_hdr, int response,
98d7b594 739 struct spar_segment_state state)
12e364b9 740{
3ab47701 741 struct controlvm_message outmsg;
26eb2c0c 742
b3168c70 743 controlvm_init_response(&outmsg, msg_hdr, response);
2ea5117b
BR
744 outmsg.cmd.device_change_state.state = state;
745 outmsg.cmd.device_change_state.flags.phys_device = 1;
c3d9a224 746 if (!visorchannel_signalinsert(controlvm_channel,
12e364b9 747 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
12e364b9
KC
748 return;
749 }
750}
751
752void
2c683cde
BR
753visorchipset_save_message(struct controlvm_message *msg,
754 enum crash_obj_type type)
12e364b9 755{
4577225d
BR
756 u32 crash_msg_offset;
757 u16 crash_msg_count;
12e364b9
KC
758
759 /* get saved message count */
c3d9a224 760 if (visorchannel_read(controlvm_channel,
d19642f6
BR
761 offsetof(struct spar_controlvm_channel_protocol,
762 saved_crash_message_count),
4577225d 763 &crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
764 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
765 POSTCODE_SEVERITY_ERR);
766 return;
767 }
768
4577225d 769 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 770 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
4577225d 771 crash_msg_count,
12e364b9
KC
772 POSTCODE_SEVERITY_ERR);
773 return;
774 }
775
776 /* get saved crash message offset */
c3d9a224 777 if (visorchannel_read(controlvm_channel,
d19642f6
BR
778 offsetof(struct spar_controlvm_channel_protocol,
779 saved_crash_message_offset),
4577225d 780 &crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
781 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
782 POSTCODE_SEVERITY_ERR);
783 return;
784 }
785
2c683cde 786 if (type == CRASH_BUS) {
c3d9a224 787 if (visorchannel_write(controlvm_channel,
4577225d 788 crash_msg_offset,
3ab47701
BR
789 msg,
790 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
791 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
792 POSTCODE_SEVERITY_ERR);
793 return;
794 }
795 } else {
c3d9a224 796 if (visorchannel_write(controlvm_channel,
4577225d 797 crash_msg_offset +
3ab47701
BR
798 sizeof(struct controlvm_message), msg,
799 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
800 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
801 POSTCODE_SEVERITY_ERR);
802 return;
803 }
804 }
805}
806EXPORT_SYMBOL_GPL(visorchipset_save_message);
807
808static void
52063eca 809bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
12e364b9 810{
e82ba62e 811 struct visorchipset_bus_info *p;
f4c11551 812 bool need_clear = false;
12e364b9 813
4f66520b 814 p = bus_find(&bus_info_list, bus_no);
0aca7844 815 if (!p)
12e364b9 816 return;
0aca7844 817
12e364b9 818 if (response < 0) {
fbb31f48 819 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
12e364b9
KC
820 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
821 /* undo the row we just created... */
28723521 822 busdevices_del(&dev_info_list, bus_no);
12e364b9 823 } else {
fbb31f48 824 if (cmd_id == CONTROLVM_BUS_CREATE)
12e364b9 825 p->state.created = 1;
fbb31f48 826 if (cmd_id == CONTROLVM_BUS_DESTROY)
f4c11551 827 need_clear = true;
12e364b9
KC
828 }
829
0aca7844 830 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 831 return; /* no controlvm response needed */
6b59b31d 832 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 833 return;
33192fa1
BR
834 controlvm_respond(&p->pending_msg_hdr, response);
835 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 836 if (need_clear) {
9b989a98 837 bus_info_clear(p);
28723521 838 busdevices_del(&dev_info_list, bus_no);
12e364b9
KC
839 }
840}
841
842static void
fbb31f48 843device_changestate_responder(enum controlvm_id cmd_id,
52063eca 844 u32 bus_no, u32 dev_no, int response,
fbb31f48 845 struct spar_segment_state response_state)
12e364b9 846{
e82ba62e 847 struct visorchipset_device_info *p;
3ab47701 848 struct controlvm_message outmsg;
12e364b9 849
d480f6a2 850 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 851 if (!p)
12e364b9 852 return;
0aca7844 853 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 854 return; /* no controlvm response needed */
fbb31f48 855 if (p->pending_msg_hdr.id != cmd_id)
12e364b9 856 return;
12e364b9 857
246e0cd0 858 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
12e364b9 859
fbb31f48
BR
860 outmsg.cmd.device_change_state.bus_no = bus_no;
861 outmsg.cmd.device_change_state.dev_no = dev_no;
862 outmsg.cmd.device_change_state.state = response_state;
12e364b9 863
c3d9a224 864 if (!visorchannel_signalinsert(controlvm_channel,
0aca7844 865 CONTROLVM_QUEUE_REQUEST, &outmsg))
12e364b9 866 return;
12e364b9 867
246e0cd0 868 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9
KC
869}
870
871static void
52063eca 872device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
12e364b9 873{
e82ba62e 874 struct visorchipset_device_info *p;
f4c11551 875 bool need_clear = false;
12e364b9 876
d480f6a2 877 p = device_find(&dev_info_list, bus_no, dev_no);
0aca7844 878 if (!p)
12e364b9 879 return;
12e364b9 880 if (response >= 0) {
fbb31f48 881 if (cmd_id == CONTROLVM_DEVICE_CREATE)
12e364b9 882 p->state.created = 1;
fbb31f48 883 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
f4c11551 884 need_clear = true;
12e364b9
KC
885 }
886
0aca7844 887 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
12e364b9 888 return; /* no controlvm response needed */
0aca7844 889
6b59b31d 890 if (p->pending_msg_hdr.id != (u32)cmd_id)
12e364b9 891 return;
0aca7844 892
246e0cd0
BR
893 controlvm_respond(&p->pending_msg_hdr, response);
894 p->pending_msg_hdr.id = CONTROLVM_INVALID;
12e364b9 895 if (need_clear)
9b989a98 896 dev_info_clear(p);
12e364b9
KC
897}
898
899static void
2836c6a8
BR
900bus_epilog(u32 bus_no,
901 u32 cmd, struct controlvm_message_header *msg_hdr,
f4c11551 902 int response, bool need_response)
12e364b9 903{
4f66520b 904 struct visorchipset_bus_info *bus_info;
f4c11551 905 bool notified = false;
12e364b9 906
4f66520b 907 bus_info = bus_find(&bus_info_list, bus_no);
12e364b9 908
2836c6a8 909 if (!bus_info)
12e364b9 910 return;
0aca7844 911
2836c6a8
BR
912 if (need_response) {
913 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
98d7b594 914 sizeof(struct controlvm_message_header));
75c1f8b7 915 } else {
2836c6a8 916 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 917 }
12e364b9 918
8f1947ac 919 down(&notifier_lock);
12e364b9
KC
920 if (response == CONTROLVM_RESP_SUCCESS) {
921 switch (cmd) {
922 case CONTROLVM_BUS_CREATE:
923 /* We can't tell from the bus_create
924 * information which of our 2 bus flavors the
925 * devices on this bus will ultimately end up.
926 * FORTUNATELY, it turns out it is harmless to
927 * send the bus_create to both of them. We can
928 * narrow things down a little bit, though,
929 * because we know: - BusDev_Server can handle
930 * either server or client devices
931 * - BusDev_Client can handle ONLY client
932 * devices */
6fe345af
BR
933 if (busdev_server_notifiers.bus_create) {
934 (*busdev_server_notifiers.bus_create) (bus_no);
f4c11551 935 notified = true;
12e364b9 936 }
2836c6a8 937 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
938 busdev_client_notifiers.bus_create) {
939 (*busdev_client_notifiers.bus_create) (bus_no);
f4c11551 940 notified = true;
12e364b9
KC
941 }
942 break;
943 case CONTROLVM_BUS_DESTROY:
6fe345af
BR
944 if (busdev_server_notifiers.bus_destroy) {
945 (*busdev_server_notifiers.bus_destroy) (bus_no);
f4c11551 946 notified = true;
12e364b9 947 }
2836c6a8 948 if ((!bus_info->flags.server) /*client */ &&
6fe345af
BR
949 busdev_client_notifiers.bus_destroy) {
950 (*busdev_client_notifiers.bus_destroy) (bus_no);
f4c11551 951 notified = true;
12e364b9
KC
952 }
953 break;
954 }
955 }
956 if (notified)
957 /* The callback function just called above is responsible
929aa8ae 958 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
959 * function, which will call bus_responder()
960 */
961 ;
962 else
2836c6a8 963 bus_responder(cmd, bus_no, response);
8f1947ac 964 up(&notifier_lock);
12e364b9
KC
965}
966
967static void
2836c6a8
BR
968device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
969 struct controlvm_message_header *msg_hdr, int response,
f4c11551 970 bool need_response, bool for_visorbus)
12e364b9 971{
e82ba62e 972 struct visorchipset_busdev_notifiers *notifiers;
f4c11551 973 bool notified = false;
12e364b9 974
2836c6a8 975 struct visorchipset_device_info *dev_info =
d480f6a2 976 device_find(&dev_info_list, bus_no, dev_no);
12e364b9
KC
977 char *envp[] = {
978 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
979 NULL
980 };
981
2836c6a8 982 if (!dev_info)
12e364b9 983 return;
0aca7844 984
12e364b9 985 if (for_visorbus)
6fe345af 986 notifiers = &busdev_server_notifiers;
12e364b9 987 else
6fe345af 988 notifiers = &busdev_client_notifiers;
2836c6a8
BR
989 if (need_response) {
990 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
98d7b594 991 sizeof(struct controlvm_message_header));
75c1f8b7 992 } else {
2836c6a8 993 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
75c1f8b7 994 }
12e364b9 995
8f1947ac 996 down(&notifier_lock);
12e364b9
KC
997 if (response >= 0) {
998 switch (cmd) {
999 case CONTROLVM_DEVICE_CREATE:
1000 if (notifiers->device_create) {
2836c6a8 1001 (*notifiers->device_create) (bus_no, dev_no);
f4c11551 1002 notified = true;
12e364b9
KC
1003 }
1004 break;
1005 case CONTROLVM_DEVICE_CHANGESTATE:
1006 /* ServerReady / ServerRunning / SegmentStateRunning */
bd0d2dcc
BR
1007 if (state.alive == segment_state_running.alive &&
1008 state.operating ==
1009 segment_state_running.operating) {
12e364b9 1010 if (notifiers->device_resume) {
2836c6a8
BR
1011 (*notifiers->device_resume) (bus_no,
1012 dev_no);
f4c11551 1013 notified = true;
12e364b9
KC
1014 }
1015 }
1016 /* ServerNotReady / ServerLost / SegmentStateStandby */
bd0d2dcc 1017 else if (state.alive == segment_state_standby.alive &&
3f833b54 1018 state.operating ==
bd0d2dcc 1019 segment_state_standby.operating) {
12e364b9
KC
1020 /* technically this is standby case
1021 * where server is lost
1022 */
1023 if (notifiers->device_pause) {
2836c6a8
BR
1024 (*notifiers->device_pause) (bus_no,
1025 dev_no);
f4c11551 1026 notified = true;
12e364b9 1027 }
bd0d2dcc 1028 } else if (state.alive == segment_state_paused.alive &&
3f833b54 1029 state.operating ==
bd0d2dcc 1030 segment_state_paused.operating) {
12e364b9
KC
1031 /* this is lite pause where channel is
1032 * still valid just 'pause' of it
1033 */
2836c6a8
BR
1034 if (bus_no == g_diagpool_bus_no &&
1035 dev_no == g_diagpool_dev_no) {
12e364b9
KC
1036 /* this will trigger the
1037 * diag_shutdown.sh script in
1038 * the visorchipset hotplug */
1039 kobject_uevent_env
eb34e877 1040 (&visorchipset_platform_device.dev.
12e364b9
KC
1041 kobj, KOBJ_ONLINE, envp);
1042 }
1043 }
1044 break;
1045 case CONTROLVM_DEVICE_DESTROY:
1046 if (notifiers->device_destroy) {
2836c6a8 1047 (*notifiers->device_destroy) (bus_no, dev_no);
f4c11551 1048 notified = true;
12e364b9
KC
1049 }
1050 break;
1051 }
1052 }
1053 if (notified)
1054 /* The callback function just called above is responsible
929aa8ae 1055 * for calling the appropriate visorchipset_busdev_responders
12e364b9
KC
1056 * function, which will call device_responder()
1057 */
1058 ;
1059 else
2836c6a8 1060 device_responder(cmd, bus_no, dev_no, response);
8f1947ac 1061 up(&notifier_lock);
12e364b9
KC
1062}
1063
1064static void
3ab47701 1065bus_create(struct controlvm_message *inmsg)
12e364b9 1066{
2ea5117b 1067 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1068 u32 bus_no = cmd->create_bus.bus_no;
12e364b9 1069 int rc = CONTROLVM_RESP_SUCCESS;
e82ba62e 1070 struct visorchipset_bus_info *bus_info;
12e364b9 1071
4f66520b 1072 bus_info = bus_find(&bus_info_list, bus_no);
6c5fed35
BR
1073 if (bus_info && (bus_info->state.created == 1)) {
1074 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1075 POSTCODE_SEVERITY_ERR);
22ad57ba 1076 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
6c5fed35 1077 goto cleanup;
12e364b9 1078 }
6c5fed35
BR
1079 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1080 if (!bus_info) {
1081 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
12e364b9 1082 POSTCODE_SEVERITY_ERR);
22ad57ba 1083 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
6c5fed35 1084 goto cleanup;
12e364b9
KC
1085 }
1086
6c5fed35
BR
1087 INIT_LIST_HEAD(&bus_info->entry);
1088 bus_info->bus_no = bus_no;
12e364b9 1089
6c5fed35 1090 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1091
98d7b594 1092 if (inmsg->hdr.flags.test_message == 1)
6c5fed35 1093 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1094 else
6c5fed35 1095 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
12e364b9 1096
6c5fed35
BR
1097 bus_info->flags.server = inmsg->hdr.flags.server;
1098 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1099 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1100 bus_info->chan_info.channel_type_uuid =
9b1caee7 1101 cmd->create_bus.bus_data_type_uuid;
6c5fed35 1102 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
12e364b9 1103
6c5fed35 1104 list_add(&bus_info->entry, &bus_info_list);
12e364b9 1105
6c5fed35 1106 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
12e364b9 1107
6c5fed35
BR
1108cleanup:
1109 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
98d7b594 1110 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1111}
1112
1113static void
3ab47701 1114bus_destroy(struct controlvm_message *inmsg)
12e364b9 1115{
2ea5117b 1116 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca 1117 u32 bus_no = cmd->destroy_bus.bus_no;
dff54cd6 1118 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1119 int rc = CONTROLVM_RESP_SUCCESS;
1120
4f66520b 1121 bus_info = bus_find(&bus_info_list, bus_no);
dff54cd6 1122 if (!bus_info)
22ad57ba 1123 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
dff54cd6 1124 else if (bus_info->state.created == 0)
22ad57ba 1125 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1126
dff54cd6 1127 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
98d7b594 1128 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1129}
1130
1131static void
317d9614
BR
1132bus_configure(struct controlvm_message *inmsg,
1133 struct parser_context *parser_ctx)
12e364b9 1134{
2ea5117b 1135 struct controlvm_message_packet *cmd = &inmsg->cmd;
e82ba62e
JS
1136 u32 bus_no;
1137 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1138 int rc = CONTROLVM_RESP_SUCCESS;
1139 char s[99];
1140
654bada0
BR
1141 bus_no = cmd->configure_bus.bus_no;
1142 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1143 POSTCODE_SEVERITY_INFO);
12e364b9 1144
4f66520b 1145 bus_info = bus_find(&bus_info_list, bus_no);
654bada0
BR
1146 if (!bus_info) {
1147 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1148 POSTCODE_SEVERITY_ERR);
22ad57ba 1149 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1150 } else if (bus_info->state.created == 0) {
1151 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1152 POSTCODE_SEVERITY_ERR);
22ad57ba 1153 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
654bada0
BR
1154 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1155 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
12e364b9 1156 POSTCODE_SEVERITY_ERR);
22ad57ba 1157 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
654bada0
BR
1158 } else {
1159 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1160 bus_info->partition_uuid = parser_id_get(parser_ctx);
1161 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1162 bus_info->name = parser_string_get(parser_ctx);
1163
1164 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1165 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1166 POSTCODE_SEVERITY_INFO);
12e364b9 1167 }
654bada0 1168 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
98d7b594 1169 rc, inmsg->hdr.flags.response_expected == 1);
12e364b9
KC
1170}
1171
1172static void
3ab47701 1173my_device_create(struct controlvm_message *inmsg)
12e364b9 1174{
2ea5117b 1175 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1176 u32 bus_no = cmd->create_device.bus_no;
1177 u32 dev_no = cmd->create_device.dev_no;
e82ba62e
JS
1178 struct visorchipset_device_info *dev_info;
1179 struct visorchipset_bus_info *bus_info;
12e364b9
KC
1180 int rc = CONTROLVM_RESP_SUCCESS;
1181
d480f6a2 1182 dev_info = device_find(&dev_info_list, bus_no, dev_no);
c60c8e26
BR
1183 if (dev_info && (dev_info->state.created == 1)) {
1184 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1185 POSTCODE_SEVERITY_ERR);
22ad57ba 1186 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
c60c8e26 1187 goto cleanup;
12e364b9 1188 }
4f66520b 1189 bus_info = bus_find(&bus_info_list, bus_no);
c60c8e26
BR
1190 if (!bus_info) {
1191 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1192 POSTCODE_SEVERITY_ERR);
22ad57ba 1193 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1194 goto cleanup;
12e364b9 1195 }
c60c8e26
BR
1196 if (bus_info->state.created == 0) {
1197 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1198 POSTCODE_SEVERITY_ERR);
22ad57ba 1199 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
c60c8e26 1200 goto cleanup;
12e364b9 1201 }
c60c8e26
BR
1202 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1203 if (!dev_info) {
1204 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1205 POSTCODE_SEVERITY_ERR);
22ad57ba 1206 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
c60c8e26 1207 goto cleanup;
12e364b9 1208 }
97a84f12 1209
c60c8e26
BR
1210 INIT_LIST_HEAD(&dev_info->entry);
1211 dev_info->bus_no = bus_no;
1212 dev_info->dev_no = dev_no;
1213 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1214 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
12e364b9
KC
1215 POSTCODE_SEVERITY_INFO);
1216
98d7b594 1217 if (inmsg->hdr.flags.test_message == 1)
c60c8e26 1218 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
12e364b9 1219 else
c60c8e26
BR
1220 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1221 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1222 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1223 dev_info->chan_info.channel_type_uuid =
9b1caee7 1224 cmd->create_device.data_type_uuid;
c60c8e26
BR
1225 dev_info->chan_info.intr = cmd->create_device.intr;
1226 list_add(&dev_info->entry, &dev_info_list);
1227 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
12e364b9 1228 POSTCODE_SEVERITY_INFO);
c60c8e26 1229cleanup:
12e364b9 1230 /* get the bus and devNo for DiagPool channel */
c60c8e26
BR
1231 if (dev_info &&
1232 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1233 g_diagpool_bus_no = bus_no;
1234 g_diagpool_dev_no = dev_no;
12e364b9 1235 }
c60c8e26 1236 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1237 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
98d7b594 1238 inmsg->hdr.flags.response_expected == 1,
c60c8e26 1239 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1240}
1241
1242static void
3ab47701 1243my_device_changestate(struct controlvm_message *inmsg)
12e364b9 1244{
2ea5117b 1245 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1246 u32 bus_no = cmd->device_change_state.bus_no;
1247 u32 dev_no = cmd->device_change_state.dev_no;
2ea5117b 1248 struct spar_segment_state state = cmd->device_change_state.state;
e82ba62e 1249 struct visorchipset_device_info *dev_info;
12e364b9
KC
1250 int rc = CONTROLVM_RESP_SUCCESS;
1251
d480f6a2 1252 dev_info = device_find(&dev_info_list, bus_no, dev_no);
0278a905
BR
1253 if (!dev_info) {
1254 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1255 POSTCODE_SEVERITY_ERR);
22ad57ba 1256 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
0278a905
BR
1257 } else if (dev_info->state.created == 0) {
1258 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
12e364b9 1259 POSTCODE_SEVERITY_ERR);
22ad57ba 1260 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
12e364b9 1261 }
0278a905
BR
1262 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1263 device_epilog(bus_no, dev_no, state,
1264 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
98d7b594 1265 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1266 FOR_VISORBUS(
0278a905 1267 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1268}
1269
1270static void
3ab47701 1271my_device_destroy(struct controlvm_message *inmsg)
12e364b9 1272{
2ea5117b 1273 struct controlvm_message_packet *cmd = &inmsg->cmd;
52063eca
JS
1274 u32 bus_no = cmd->destroy_device.bus_no;
1275 u32 dev_no = cmd->destroy_device.dev_no;
e82ba62e 1276 struct visorchipset_device_info *dev_info;
12e364b9
KC
1277 int rc = CONTROLVM_RESP_SUCCESS;
1278
d480f6a2 1279 dev_info = device_find(&dev_info_list, bus_no, dev_no);
61715c8b 1280 if (!dev_info)
22ad57ba 1281 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
61715c8b 1282 else if (dev_info->state.created == 0)
22ad57ba 1283 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
12e364b9 1284
61715c8b
BR
1285 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1286 device_epilog(bus_no, dev_no, segment_state_running,
12e364b9 1287 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
98d7b594 1288 inmsg->hdr.flags.response_expected == 1,
9b1caee7 1289 FOR_VISORBUS(
61715c8b 1290 dev_info->chan_info.channel_type_uuid));
12e364b9
KC
1291}
1292
1293/* When provided with the physical address of the controlvm channel
1294 * (phys_addr), the offset to the payload area we need to manage
1295 * (offset), and the size of this payload area (bytes), fills in the
f4c11551 1296 * controlvm_payload_info struct. Returns true for success or false
12e364b9
KC
1297 * for failure.
1298 */
1299static int
5fc0229a 1300initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
c1f834eb 1301 struct visor_controlvm_payload_info *info)
12e364b9 1302{
c242233e 1303 u8 __iomem *payload = NULL;
12e364b9
KC
1304 int rc = CONTROLVM_RESP_SUCCESS;
1305
38f736e9 1306 if (!info) {
22ad57ba 1307 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1308 goto cleanup;
12e364b9 1309 }
c1f834eb 1310 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9 1311 if ((offset == 0) || (bytes == 0)) {
22ad57ba 1312 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
f118a39b 1313 goto cleanup;
12e364b9
KC
1314 }
1315 payload = ioremap_cache(phys_addr + offset, bytes);
38f736e9 1316 if (!payload) {
22ad57ba 1317 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
f118a39b 1318 goto cleanup;
12e364b9
KC
1319 }
1320
1321 info->offset = offset;
1322 info->bytes = bytes;
1323 info->ptr = payload;
12e364b9 1324
f118a39b 1325cleanup:
12e364b9 1326 if (rc < 0) {
f118a39b 1327 if (payload) {
12e364b9
KC
1328 iounmap(payload);
1329 payload = NULL;
1330 }
1331 }
1332 return rc;
1333}
1334
1335static void
c1f834eb 1336destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
12e364b9 1337{
597c338f 1338 if (info->ptr) {
12e364b9
KC
1339 iounmap(info->ptr);
1340 info->ptr = NULL;
1341 }
c1f834eb 1342 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
12e364b9
KC
1343}
1344
1345static void
1346initialize_controlvm_payload(void)
1347{
c3d9a224 1348 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
cafefc0c
BR
1349 u64 payload_offset = 0;
1350 u32 payload_bytes = 0;
26eb2c0c 1351
c3d9a224 1352 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1353 offsetof(struct spar_controlvm_channel_protocol,
1354 request_payload_offset),
cafefc0c 1355 &payload_offset, sizeof(payload_offset)) < 0) {
12e364b9
KC
1356 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1357 POSTCODE_SEVERITY_ERR);
1358 return;
1359 }
c3d9a224 1360 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1361 offsetof(struct spar_controlvm_channel_protocol,
1362 request_payload_bytes),
cafefc0c 1363 &payload_bytes, sizeof(payload_bytes)) < 0) {
12e364b9
KC
1364 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1365 POSTCODE_SEVERITY_ERR);
1366 return;
1367 }
1368 initialize_controlvm_payload_info(phys_addr,
cafefc0c 1369 payload_offset, payload_bytes,
84982fbf 1370 &controlvm_payload_info);
12e364b9
KC
1371}
1372
1373/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1374 * Returns CONTROLVM_RESP_xxx code.
1375 */
1376int
1377visorchipset_chipset_ready(void)
1378{
eb34e877 1379 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
12e364b9
KC
1380 return CONTROLVM_RESP_SUCCESS;
1381}
1382EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1383
1384int
1385visorchipset_chipset_selftest(void)
1386{
1387 char env_selftest[20];
1388 char *envp[] = { env_selftest, NULL };
26eb2c0c 1389
12e364b9 1390 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
eb34e877 1391 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1392 envp);
1393 return CONTROLVM_RESP_SUCCESS;
1394}
1395EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1396
1397/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1398 * Returns CONTROLVM_RESP_xxx code.
1399 */
1400int
1401visorchipset_chipset_notready(void)
1402{
eb34e877 1403 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
12e364b9
KC
1404 return CONTROLVM_RESP_SUCCESS;
1405}
1406EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1407
1408static void
77a0449d 1409chipset_ready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1410{
1411 int rc = visorchipset_chipset_ready();
26eb2c0c 1412
12e364b9
KC
1413 if (rc != CONTROLVM_RESP_SUCCESS)
1414 rc = -rc;
77a0449d
BR
1415 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1416 controlvm_respond(msg_hdr, rc);
1417 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
12e364b9
KC
1418 /* Send CHIPSET_READY response when all modules have been loaded
1419 * and disks mounted for the partition
1420 */
77a0449d 1421 g_chipset_msg_hdr = *msg_hdr;
12e364b9
KC
1422 }
1423}
1424
1425static void
77a0449d 1426chipset_selftest(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1427{
1428 int rc = visorchipset_chipset_selftest();
26eb2c0c 1429
12e364b9
KC
1430 if (rc != CONTROLVM_RESP_SUCCESS)
1431 rc = -rc;
77a0449d
BR
1432 if (msg_hdr->flags.response_expected)
1433 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1434}
1435
1436static void
77a0449d 1437chipset_notready(struct controlvm_message_header *msg_hdr)
12e364b9
KC
1438{
1439 int rc = visorchipset_chipset_notready();
26eb2c0c 1440
12e364b9
KC
1441 if (rc != CONTROLVM_RESP_SUCCESS)
1442 rc = -rc;
77a0449d
BR
1443 if (msg_hdr->flags.response_expected)
1444 controlvm_respond(msg_hdr, rc);
12e364b9
KC
1445}
1446
1447/* This is your "one-stop" shop for grabbing the next message from the
1448 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1449 */
f4c11551 1450static bool
3ab47701 1451read_controlvm_event(struct controlvm_message *msg)
12e364b9 1452{
c3d9a224 1453 if (visorchannel_signalremove(controlvm_channel,
12e364b9
KC
1454 CONTROLVM_QUEUE_EVENT, msg)) {
1455 /* got a message */
0aca7844 1456 if (msg->hdr.flags.test_message == 1)
f4c11551
JS
1457 return false;
1458 return true;
12e364b9 1459 }
f4c11551 1460 return false;
12e364b9
KC
1461}
1462
1463/*
1464 * The general parahotplug flow works as follows. The visorchipset
1465 * driver receives a DEVICE_CHANGESTATE message from Command
1466 * specifying a physical device to enable or disable. The CONTROLVM
1467 * message handler calls parahotplug_process_message, which then adds
1468 * the message to a global list and kicks off a udev event which
1469 * causes a user level script to enable or disable the specified
1470 * device. The udev script then writes to
1471 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1472 * to get called, at which point the appropriate CONTROLVM message is
1473 * retrieved from the list and responded to.
1474 */
1475
1476#define PARAHOTPLUG_TIMEOUT_MS 2000
1477
1478/*
1479 * Generate unique int to match an outstanding CONTROLVM message with a
1480 * udev script /proc response
1481 */
1482static int
1483parahotplug_next_id(void)
1484{
1485 static atomic_t id = ATOMIC_INIT(0);
26eb2c0c 1486
12e364b9
KC
1487 return atomic_inc_return(&id);
1488}
1489
1490/*
1491 * Returns the time (in jiffies) when a CONTROLVM message on the list
1492 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1493 */
1494static unsigned long
1495parahotplug_next_expiration(void)
1496{
2cc1a1b3 1497 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
12e364b9
KC
1498}
1499
1500/*
1501 * Create a parahotplug_request, which is basically a wrapper for a
1502 * CONTROLVM_MESSAGE that we can stick on a list
1503 */
1504static struct parahotplug_request *
3ab47701 1505parahotplug_request_create(struct controlvm_message *msg)
12e364b9 1506{
ea0dcfcf
QL
1507 struct parahotplug_request *req;
1508
6a55e3c3 1509 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
38f736e9 1510 if (!req)
12e364b9
KC
1511 return NULL;
1512
1513 req->id = parahotplug_next_id();
1514 req->expiration = parahotplug_next_expiration();
1515 req->msg = *msg;
1516
1517 return req;
1518}
1519
1520/*
1521 * Free a parahotplug_request.
1522 */
1523static void
1524parahotplug_request_destroy(struct parahotplug_request *req)
1525{
1526 kfree(req);
1527}
1528
1529/*
1530 * Cause uevent to run the user level script to do the disable/enable
1531 * specified in (the CONTROLVM message in) the specified
1532 * parahotplug_request
1533 */
1534static void
1535parahotplug_request_kickoff(struct parahotplug_request *req)
1536{
2ea5117b 1537 struct controlvm_message_packet *cmd = &req->msg.cmd;
12e364b9
KC
1538 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1539 env_func[40];
1540 char *envp[] = {
1541 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1542 };
1543
1544 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1545 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1546 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
2ea5117b 1547 cmd->device_change_state.state.active);
12e364b9 1548 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
2ea5117b 1549 cmd->device_change_state.bus_no);
12e364b9 1550 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
2ea5117b 1551 cmd->device_change_state.dev_no >> 3);
12e364b9 1552 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
2ea5117b 1553 cmd->device_change_state.dev_no & 0x7);
12e364b9 1554
eb34e877 1555 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
12e364b9
KC
1556 envp);
1557}
1558
1559/*
1560 * Remove any request from the list that's been on there too long and
1561 * respond with an error.
1562 */
1563static void
1564parahotplug_process_list(void)
1565{
e82ba62e
JS
1566 struct list_head *pos;
1567 struct list_head *tmp;
12e364b9 1568
ddf5de53 1569 spin_lock(&parahotplug_request_list_lock);
12e364b9 1570
ddf5de53 1571 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1572 struct parahotplug_request *req =
1573 list_entry(pos, struct parahotplug_request, list);
55b33413
BR
1574
1575 if (!time_after_eq(jiffies, req->expiration))
1576 continue;
1577
1578 list_del(pos);
1579 if (req->msg.hdr.flags.response_expected)
1580 controlvm_respond_physdev_changestate(
1581 &req->msg.hdr,
1582 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1583 req->msg.cmd.device_change_state.state);
1584 parahotplug_request_destroy(req);
12e364b9
KC
1585 }
1586
ddf5de53 1587 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1588}
1589
1590/*
1591 * Called from the /proc handler, which means the user script has
1592 * finished the enable/disable. Find the matching identifier, and
1593 * respond to the CONTROLVM message with success.
1594 */
1595static int
b06bdf7d 1596parahotplug_request_complete(int id, u16 active)
12e364b9 1597{
e82ba62e
JS
1598 struct list_head *pos;
1599 struct list_head *tmp;
12e364b9 1600
ddf5de53 1601 spin_lock(&parahotplug_request_list_lock);
12e364b9
KC
1602
1603 /* Look for a request matching "id". */
ddf5de53 1604 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
12e364b9
KC
1605 struct parahotplug_request *req =
1606 list_entry(pos, struct parahotplug_request, list);
1607 if (req->id == id) {
1608 /* Found a match. Remove it from the list and
1609 * respond.
1610 */
1611 list_del(pos);
ddf5de53 1612 spin_unlock(&parahotplug_request_list_lock);
2ea5117b 1613 req->msg.cmd.device_change_state.state.active = active;
98d7b594 1614 if (req->msg.hdr.flags.response_expected)
12e364b9
KC
1615 controlvm_respond_physdev_changestate(
1616 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
2ea5117b 1617 req->msg.cmd.device_change_state.state);
12e364b9
KC
1618 parahotplug_request_destroy(req);
1619 return 0;
1620 }
1621 }
1622
ddf5de53 1623 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1624 return -1;
1625}
1626
1627/*
1628 * Enables or disables a PCI device by kicking off a udev script
1629 */
bd5b9b32 1630static void
3ab47701 1631parahotplug_process_message(struct controlvm_message *inmsg)
12e364b9
KC
1632{
1633 struct parahotplug_request *req;
1634
1635 req = parahotplug_request_create(inmsg);
1636
38f736e9 1637 if (!req)
12e364b9 1638 return;
12e364b9 1639
2ea5117b 1640 if (inmsg->cmd.device_change_state.state.active) {
12e364b9
KC
1641 /* For enable messages, just respond with success
1642 * right away. This is a bit of a hack, but there are
1643 * issues with the early enable messages we get (with
1644 * either the udev script not detecting that the device
1645 * is up, or not getting called at all). Fortunately
1646 * the messages that get lost don't matter anyway, as
1647 * devices are automatically enabled at
1648 * initialization.
1649 */
1650 parahotplug_request_kickoff(req);
1651 controlvm_respond_physdev_changestate(&inmsg->hdr,
8e76e695
BR
1652 CONTROLVM_RESP_SUCCESS,
1653 inmsg->cmd.device_change_state.state);
12e364b9
KC
1654 parahotplug_request_destroy(req);
1655 } else {
1656 /* For disable messages, add the request to the
1657 * request list before kicking off the udev script. It
1658 * won't get responded to until the script has
1659 * indicated it's done.
1660 */
ddf5de53
BR
1661 spin_lock(&parahotplug_request_list_lock);
1662 list_add_tail(&req->list, &parahotplug_request_list);
1663 spin_unlock(&parahotplug_request_list_lock);
12e364b9
KC
1664
1665 parahotplug_request_kickoff(req);
1666 }
1667}
1668
12e364b9
KC
1669/* Process a controlvm message.
1670 * Return result:
f4c11551 1671 * false - this function will return FALSE only in the case where the
12e364b9
KC
1672 * controlvm message was NOT processed, but processing must be
1673 * retried before reading the next controlvm message; a
1674 * scenario where this can occur is when we need to throttle
1675 * the allocation of memory in which to copy out controlvm
1676 * payload data
f4c11551 1677 * true - processing of the controlvm message completed,
12e364b9
KC
1678 * either successfully or with an error.
1679 */
f4c11551 1680static bool
3ab47701 1681handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
12e364b9 1682{
2ea5117b 1683 struct controlvm_message_packet *cmd = &inmsg.cmd;
e82ba62e
JS
1684 u64 parm_addr;
1685 u32 parm_bytes;
317d9614 1686 struct parser_context *parser_ctx = NULL;
e82ba62e 1687 bool local_addr;
3ab47701 1688 struct controlvm_message ackmsg;
12e364b9
KC
1689
1690 /* create parsing context if necessary */
818352a8 1691 local_addr = (inmsg.hdr.flags.test_message == 1);
0aca7844 1692 if (channel_addr == 0)
f4c11551 1693 return true;
818352a8
BR
1694 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1695 parm_bytes = inmsg.hdr.payload_bytes;
12e364b9
KC
1696
1697 /* Parameter and channel addresses within test messages actually lie
1698 * within our OS-controlled memory. We need to know that, because it
1699 * makes a difference in how we compute the virtual address.
1700 */
ebec8967 1701 if (parm_addr && parm_bytes) {
f4c11551 1702 bool retry = false;
26eb2c0c 1703
12e364b9 1704 parser_ctx =
818352a8
BR
1705 parser_init_byte_stream(parm_addr, parm_bytes,
1706 local_addr, &retry);
1b08872e 1707 if (!parser_ctx && retry)
f4c11551 1708 return false;
12e364b9
KC
1709 }
1710
818352a8 1711 if (!local_addr) {
12e364b9
KC
1712 controlvm_init_response(&ackmsg, &inmsg.hdr,
1713 CONTROLVM_RESP_SUCCESS);
c3d9a224
BR
1714 if (controlvm_channel)
1715 visorchannel_signalinsert(controlvm_channel,
1b08872e
BR
1716 CONTROLVM_QUEUE_ACK,
1717 &ackmsg);
12e364b9 1718 }
98d7b594 1719 switch (inmsg.hdr.id) {
12e364b9 1720 case CONTROLVM_CHIPSET_INIT:
12e364b9
KC
1721 chipset_init(&inmsg);
1722 break;
1723 case CONTROLVM_BUS_CREATE:
12e364b9
KC
1724 bus_create(&inmsg);
1725 break;
1726 case CONTROLVM_BUS_DESTROY:
12e364b9
KC
1727 bus_destroy(&inmsg);
1728 break;
1729 case CONTROLVM_BUS_CONFIGURE:
12e364b9
KC
1730 bus_configure(&inmsg, parser_ctx);
1731 break;
1732 case CONTROLVM_DEVICE_CREATE:
12e364b9
KC
1733 my_device_create(&inmsg);
1734 break;
1735 case CONTROLVM_DEVICE_CHANGESTATE:
2ea5117b 1736 if (cmd->device_change_state.flags.phys_device) {
12e364b9
KC
1737 parahotplug_process_message(&inmsg);
1738 } else {
12e364b9
KC
1739 /* save the hdr and cmd structures for later use */
1740 /* when sending back the response to Command */
1741 my_device_changestate(&inmsg);
4f44b72d 1742 g_devicechangestate_packet = inmsg.cmd;
12e364b9
KC
1743 break;
1744 }
1745 break;
1746 case CONTROLVM_DEVICE_DESTROY:
12e364b9
KC
1747 my_device_destroy(&inmsg);
1748 break;
1749 case CONTROLVM_DEVICE_CONFIGURE:
12e364b9 1750 /* no op for now, just send a respond that we passed */
98d7b594 1751 if (inmsg.hdr.flags.response_expected)
12e364b9
KC
1752 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1753 break;
1754 case CONTROLVM_CHIPSET_READY:
12e364b9
KC
1755 chipset_ready(&inmsg.hdr);
1756 break;
1757 case CONTROLVM_CHIPSET_SELFTEST:
12e364b9
KC
1758 chipset_selftest(&inmsg.hdr);
1759 break;
1760 case CONTROLVM_CHIPSET_STOP:
12e364b9
KC
1761 chipset_notready(&inmsg.hdr);
1762 break;
1763 default:
98d7b594 1764 if (inmsg.hdr.flags.response_expected)
12e364b9 1765 controlvm_respond(&inmsg.hdr,
818352a8 1766 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
12e364b9
KC
1767 break;
1768 }
1769
38f736e9 1770 if (parser_ctx) {
12e364b9
KC
1771 parser_done(parser_ctx);
1772 parser_ctx = NULL;
1773 }
f4c11551 1774 return true;
12e364b9
KC
1775}
1776
d746cb55 1777static HOSTADDRESS controlvm_get_channel_address(void)
524b0b63 1778{
5fc0229a 1779 u64 addr = 0;
b3c55b13 1780 u32 size = 0;
524b0b63 1781
0aca7844 1782 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
524b0b63 1783 return 0;
0aca7844 1784
524b0b63
BR
1785 return addr;
1786}
1787
12e364b9
KC
1788static void
1789controlvm_periodic_work(struct work_struct *work)
1790{
3ab47701 1791 struct controlvm_message inmsg;
f4c11551
JS
1792 bool got_command = false;
1793 bool handle_command_failed = false;
1c1ed292 1794 static u64 poll_count;
12e364b9
KC
1795
1796 /* make sure visorbus server is registered for controlvm callbacks */
1797 if (visorchipset_serverregwait && !serverregistered)
1c1ed292 1798 goto cleanup;
12e364b9
KC
1799 /* make sure visorclientbus server is regsitered for controlvm
1800 * callbacks
1801 */
1802 if (visorchipset_clientregwait && !clientregistered)
1c1ed292 1803 goto cleanup;
12e364b9 1804
1c1ed292
BR
1805 poll_count++;
1806 if (poll_count >= 250)
12e364b9
KC
1807 ; /* keep going */
1808 else
1c1ed292 1809 goto cleanup;
12e364b9
KC
1810
1811 /* Check events to determine if response to CHIPSET_READY
1812 * should be sent
1813 */
0639ba67
BR
1814 if (visorchipset_holdchipsetready &&
1815 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
12e364b9 1816 if (check_chipset_events() == 1) {
da021f02 1817 controlvm_respond(&g_chipset_msg_hdr, 0);
12e364b9 1818 clear_chipset_events();
da021f02 1819 memset(&g_chipset_msg_hdr, 0,
98d7b594 1820 sizeof(struct controlvm_message_header));
12e364b9
KC
1821 }
1822 }
1823
c3d9a224 1824 while (visorchannel_signalremove(controlvm_channel,
8a1182eb 1825 CONTROLVM_QUEUE_RESPONSE,
c3d9a224
BR
1826 &inmsg))
1827 ;
1c1ed292 1828 if (!got_command) {
7166ed19 1829 if (controlvm_pending_msg_valid) {
8a1182eb
BR
1830 /* we throttled processing of a prior
1831 * msg, so try to process it again
1832 * rather than reading a new one
1833 */
7166ed19 1834 inmsg = controlvm_pending_msg;
f4c11551 1835 controlvm_pending_msg_valid = false;
1c1ed292 1836 got_command = true;
75c1f8b7 1837 } else {
1c1ed292 1838 got_command = read_controlvm_event(&inmsg);
75c1f8b7 1839 }
8a1182eb 1840 }
12e364b9 1841
f4c11551 1842 handle_command_failed = false;
1c1ed292 1843 while (got_command && (!handle_command_failed)) {
b53e0e93 1844 most_recent_message_jiffies = jiffies;
8a1182eb
BR
1845 if (handle_command(inmsg,
1846 visorchannel_get_physaddr
c3d9a224 1847 (controlvm_channel)))
1c1ed292 1848 got_command = read_controlvm_event(&inmsg);
8a1182eb
BR
1849 else {
1850 /* this is a scenario where throttling
1851 * is required, but probably NOT an
1852 * error...; we stash the current
1853 * controlvm msg so we will attempt to
1854 * reprocess it on our next loop
1855 */
f4c11551 1856 handle_command_failed = true;
7166ed19 1857 controlvm_pending_msg = inmsg;
f4c11551 1858 controlvm_pending_msg_valid = true;
12e364b9
KC
1859 }
1860 }
1861
1862 /* parahotplug_worker */
1863 parahotplug_process_list();
1864
1c1ed292 1865cleanup:
12e364b9
KC
1866
1867 if (time_after(jiffies,
b53e0e93 1868 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
12e364b9
KC
1869 /* it's been longer than MIN_IDLE_SECONDS since we
1870 * processed our last controlvm message; slow down the
1871 * polling
1872 */
911e213e
BR
1873 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1874 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1875 } else {
911e213e
BR
1876 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1877 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
12e364b9
KC
1878 }
1879
9232d2d6
BR
1880 queue_delayed_work(periodic_controlvm_workqueue,
1881 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1882}
1883
1884static void
1885setup_crash_devices_work_queue(struct work_struct *work)
1886{
e6bdb904
BR
1887 struct controlvm_message local_crash_bus_msg;
1888 struct controlvm_message local_crash_dev_msg;
3ab47701 1889 struct controlvm_message msg;
e6bdb904
BR
1890 u32 local_crash_msg_offset;
1891 u16 local_crash_msg_count;
12e364b9
KC
1892
1893 /* make sure visorbus server is registered for controlvm callbacks */
1894 if (visorchipset_serverregwait && !serverregistered)
e6bdb904 1895 goto cleanup;
12e364b9
KC
1896
1897 /* make sure visorclientbus server is regsitered for controlvm
1898 * callbacks
1899 */
1900 if (visorchipset_clientregwait && !clientregistered)
e6bdb904 1901 goto cleanup;
12e364b9
KC
1902
1903 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1904
1905 /* send init chipset msg */
98d7b594 1906 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2ea5117b
BR
1907 msg.cmd.init_chipset.bus_count = 23;
1908 msg.cmd.init_chipset.switch_count = 0;
12e364b9
KC
1909
1910 chipset_init(&msg);
1911
12e364b9 1912 /* get saved message count */
c3d9a224 1913 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1914 offsetof(struct spar_controlvm_channel_protocol,
1915 saved_crash_message_count),
e6bdb904 1916 &local_crash_msg_count, sizeof(u16)) < 0) {
12e364b9
KC
1917 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1918 POSTCODE_SEVERITY_ERR);
1919 return;
1920 }
1921
e6bdb904 1922 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
12e364b9 1923 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
e6bdb904 1924 local_crash_msg_count,
12e364b9
KC
1925 POSTCODE_SEVERITY_ERR);
1926 return;
1927 }
1928
1929 /* get saved crash message offset */
c3d9a224 1930 if (visorchannel_read(controlvm_channel,
d19642f6
BR
1931 offsetof(struct spar_controlvm_channel_protocol,
1932 saved_crash_message_offset),
e6bdb904 1933 &local_crash_msg_offset, sizeof(u32)) < 0) {
12e364b9
KC
1934 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1935 POSTCODE_SEVERITY_ERR);
1936 return;
1937 }
1938
1939 /* read create device message for storage bus offset */
c3d9a224 1940 if (visorchannel_read(controlvm_channel,
e6bdb904
BR
1941 local_crash_msg_offset,
1942 &local_crash_bus_msg,
3ab47701 1943 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1944 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1945 POSTCODE_SEVERITY_ERR);
1946 return;
1947 }
1948
1949 /* read create device message for storage device */
c3d9a224 1950 if (visorchannel_read(controlvm_channel,
e6bdb904 1951 local_crash_msg_offset +
3ab47701 1952 sizeof(struct controlvm_message),
e6bdb904 1953 &local_crash_dev_msg,
3ab47701 1954 sizeof(struct controlvm_message)) < 0) {
12e364b9
KC
1955 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1956 POSTCODE_SEVERITY_ERR);
1957 return;
1958 }
1959
1960 /* reuse IOVM create bus message */
ebec8967 1961 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
e6bdb904 1962 bus_create(&local_crash_bus_msg);
75c1f8b7 1963 } else {
12e364b9
KC
1964 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1965 POSTCODE_SEVERITY_ERR);
1966 return;
1967 }
1968
1969 /* reuse create device message for storage device */
ebec8967 1970 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
e6bdb904 1971 my_device_create(&local_crash_dev_msg);
75c1f8b7 1972 } else {
12e364b9
KC
1973 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1974 POSTCODE_SEVERITY_ERR);
1975 return;
1976 }
12e364b9
KC
1977 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1978 return;
1979
e6bdb904 1980cleanup:
12e364b9 1981
911e213e 1982 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
12e364b9 1983
9232d2d6
BR
1984 queue_delayed_work(periodic_controlvm_workqueue,
1985 &periodic_controlvm_work, poll_jiffies);
12e364b9
KC
1986}
1987
1988static void
52063eca 1989bus_create_response(u32 bus_no, int response)
12e364b9 1990{
8e3fedd6 1991 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
12e364b9
KC
1992}
1993
1994static void
52063eca 1995bus_destroy_response(u32 bus_no, int response)
12e364b9 1996{
8e3fedd6 1997 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
12e364b9
KC
1998}
1999
2000static void
52063eca 2001device_create_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2002{
8e3fedd6 2003 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
12e364b9
KC
2004}
2005
2006static void
52063eca 2007device_destroy_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2008{
8e3fedd6 2009 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
12e364b9
KC
2010}
2011
2012void
52063eca 2013visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
12e364b9 2014{
12e364b9 2015 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8420f417 2016 bus_no, dev_no, response,
bd0d2dcc 2017 segment_state_standby);
12e364b9 2018}
927c7927 2019EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
12e364b9
KC
2020
2021static void
52063eca 2022device_resume_response(u32 bus_no, u32 dev_no, int response)
12e364b9
KC
2023{
2024 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
8e3fedd6 2025 bus_no, dev_no, response,
bd0d2dcc 2026 segment_state_running);
12e364b9
KC
2027}
2028
f4c11551 2029bool
52063eca 2030visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
12e364b9 2031{
4f66520b 2032 void *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2033
0aca7844 2034 if (!p)
f4c11551 2035 return false;
77db7127 2036 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
f4c11551 2037 return true;
12e364b9
KC
2038}
2039EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2040
f4c11551 2041bool
52063eca 2042visorchipset_set_bus_context(u32 bus_no, void *context)
12e364b9 2043{
4f66520b 2044 struct visorchipset_bus_info *p = bus_find(&bus_info_list, bus_no);
26eb2c0c 2045
0aca7844 2046 if (!p)
f4c11551 2047 return false;
12e364b9 2048 p->bus_driver_context = context;
f4c11551 2049 return true;
12e364b9
KC
2050}
2051EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2052
f4c11551 2053bool
52063eca 2054visorchipset_get_device_info(u32 bus_no, u32 dev_no,
b486df19 2055 struct visorchipset_device_info *dev_info)
12e364b9 2056{
d480f6a2 2057 void *p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2058
0aca7844 2059 if (!p)
f4c11551 2060 return false;
b486df19 2061 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
f4c11551 2062 return true;
12e364b9
KC
2063}
2064EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2065
f4c11551 2066bool
52063eca 2067visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
12e364b9 2068{
d480f6a2
JS
2069 struct visorchipset_device_info *p;
2070
2071 p = device_find(&dev_info_list, bus_no, dev_no);
26eb2c0c 2072
0aca7844 2073 if (!p)
f4c11551 2074 return false;
12e364b9 2075 p->bus_driver_context = context;
f4c11551 2076 return true;
12e364b9
KC
2077}
2078EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2079
2080/* Generic wrapper function for allocating memory from a kmem_cache pool.
2081 */
2082void *
f4c11551 2083visorchipset_cache_alloc(struct kmem_cache *pool, bool ok_to_block,
12e364b9
KC
2084 char *fn, int ln)
2085{
2086 gfp_t gfp;
2087 void *p;
2088
2089 if (ok_to_block)
2090 gfp = GFP_KERNEL;
2091 else
2092 gfp = GFP_ATOMIC;
2093 /* __GFP_NORETRY means "ok to fail", meaning
2094 * kmem_cache_alloc() can return NULL, implying the caller CAN
2095 * cope with failure. If you do NOT specify __GFP_NORETRY,
2096 * Linux will go to extreme measures to get memory for you
2097 * (like, invoke oom killer), which will probably cripple the
2098 * system.
2099 */
2100 gfp |= __GFP_NORETRY;
2101 p = kmem_cache_alloc(pool, gfp);
0aca7844 2102 if (!p)
12e364b9 2103 return NULL;
0aca7844 2104
12e364b9
KC
2105 return p;
2106}
2107
2108/* Generic wrapper function for freeing memory from a kmem_cache pool.
2109 */
2110void
2111visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2112{
0aca7844 2113 if (!p)
12e364b9 2114 return;
0aca7844 2115
12e364b9
KC
2116 kmem_cache_free(pool, p);
2117}
2118
18b87ed1 2119static ssize_t chipsetready_store(struct device *dev,
8e76e695
BR
2120 struct device_attribute *attr,
2121 const char *buf, size_t count)
12e364b9 2122{
18b87ed1 2123 char msgtype[64];
12e364b9 2124
66e24b76
BR
2125 if (sscanf(buf, "%63s", msgtype) != 1)
2126 return -EINVAL;
2127
ebec8967 2128 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
66e24b76
BR
2129 chipset_events[0] = 1;
2130 return count;
ebec8967 2131 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
66e24b76
BR
2132 chipset_events[1] = 1;
2133 return count;
e22a4a0f
BR
2134 }
2135 return -EINVAL;
12e364b9
KC
2136}
2137
e56fa7cd
BR
2138/* The parahotplug/devicedisabled interface gets called by our support script
2139 * when an SR-IOV device has been shut down. The ID is passed to the script
2140 * and then passed back when the device has been removed.
2141 */
2142static ssize_t devicedisabled_store(struct device *dev,
8e76e695
BR
2143 struct device_attribute *attr,
2144 const char *buf, size_t count)
e56fa7cd 2145{
94217363 2146 unsigned int id;
e56fa7cd 2147
ebec8967 2148 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2149 return -EINVAL;
2150
2151 parahotplug_request_complete(id, 0);
2152 return count;
2153}
2154
2155/* The parahotplug/deviceenabled interface gets called by our support script
2156 * when an SR-IOV device has been recovered. The ID is passed to the script
2157 * and then passed back when the device has been brought back up.
2158 */
2159static ssize_t deviceenabled_store(struct device *dev,
8e76e695
BR
2160 struct device_attribute *attr,
2161 const char *buf, size_t count)
e56fa7cd 2162{
94217363 2163 unsigned int id;
e56fa7cd 2164
ebec8967 2165 if (kstrtouint(buf, 10, &id))
e56fa7cd
BR
2166 return -EINVAL;
2167
2168 parahotplug_request_complete(id, 1);
2169 return count;
2170}
2171
12e364b9
KC
2172static int __init
2173visorchipset_init(void)
2174{
2175 int rc = 0, x = 0;
8a1182eb 2176 HOSTADDRESS addr;
12e364b9 2177
fcd0157e
KC
2178 if (!unisys_spar_platform)
2179 return -ENODEV;
2180
6fe345af
BR
2181 memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2182 memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
84982fbf 2183 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
ea33b4ee
BR
2184 memset(&livedump_info, 0, sizeof(livedump_info));
2185 atomic_set(&livedump_info.buffers_in_use, 0);
12e364b9 2186
9f8d0e8b 2187 if (visorchipset_testvnic) {
9f8d0e8b
KC
2188 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2189 rc = x;
a6a3989b 2190 goto cleanup;
9f8d0e8b 2191 }
12e364b9 2192
8a1182eb 2193 addr = controlvm_get_channel_address();
ebec8967 2194 if (addr) {
c3d9a224 2195 controlvm_channel =
8a1182eb
BR
2196 visorchannel_create_with_lock
2197 (addr,
d19642f6 2198 sizeof(struct spar_controlvm_channel_protocol),
5fbaa4b3 2199 spar_controlvm_channel_protocol_uuid);
93a84565 2200 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
c3d9a224 2201 visorchannel_get_header(controlvm_channel))) {
8a1182eb
BR
2202 initialize_controlvm_payload();
2203 } else {
c3d9a224
BR
2204 visorchannel_destroy(controlvm_channel);
2205 controlvm_channel = NULL;
8a1182eb
BR
2206 return -ENODEV;
2207 }
2208 } else {
8a1182eb
BR
2209 return -ENODEV;
2210 }
2211
5aa8ae57
BR
2212 major_dev = MKDEV(visorchipset_major, 0);
2213 rc = visorchipset_file_init(major_dev, &controlvm_channel);
4cb005a9 2214 if (rc < 0) {
4cb005a9 2215 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
a6a3989b 2216 goto cleanup;
4cb005a9 2217 }
9f8d0e8b 2218
da021f02 2219 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2220
2098dbd1 2221 if (!visorchipset_disable_controlvm) {
12e364b9 2222 /* if booting in a crash kernel */
1ba00980 2223 if (is_kdump_kernel())
9232d2d6 2224 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9
KC
2225 setup_crash_devices_work_queue);
2226 else
9232d2d6 2227 INIT_DELAYED_WORK(&periodic_controlvm_work,
12e364b9 2228 controlvm_periodic_work);
9232d2d6 2229 periodic_controlvm_workqueue =
12e364b9
KC
2230 create_singlethread_workqueue("visorchipset_controlvm");
2231
38f736e9 2232 if (!periodic_controlvm_workqueue) {
4cb005a9
KC
2233 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2234 DIAG_SEVERITY_ERR);
2235 rc = -ENOMEM;
a6a3989b 2236 goto cleanup;
4cb005a9 2237 }
b53e0e93 2238 most_recent_message_jiffies = jiffies;
911e213e 2239 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
9232d2d6
BR
2240 rc = queue_delayed_work(periodic_controlvm_workqueue,
2241 &periodic_controlvm_work, poll_jiffies);
4cb005a9 2242 if (rc < 0) {
4cb005a9
KC
2243 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2244 DIAG_SEVERITY_ERR);
a6a3989b 2245 goto cleanup;
4cb005a9 2246 }
12e364b9
KC
2247 }
2248
eb34e877
BR
2249 visorchipset_platform_device.dev.devt = major_dev;
2250 if (platform_device_register(&visorchipset_platform_device) < 0) {
4cb005a9
KC
2251 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2252 rc = -1;
a6a3989b 2253 goto cleanup;
4cb005a9 2254 }
12e364b9 2255 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
22ad57ba 2256 rc = 0;
a6a3989b 2257cleanup:
12e364b9 2258 if (rc) {
12e364b9
KC
2259 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2260 POSTCODE_SEVERITY_ERR);
2261 }
2262 return rc;
2263}
2264
2265static void
2266visorchipset_exit(void)
2267{
12e364b9
KC
2268 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2269
2270 if (visorchipset_disable_controlvm) {
2271 ;
2272 } else {
9232d2d6
BR
2273 cancel_delayed_work(&periodic_controlvm_work);
2274 flush_workqueue(periodic_controlvm_workqueue);
2275 destroy_workqueue(periodic_controlvm_workqueue);
2276 periodic_controlvm_workqueue = NULL;
84982fbf 2277 destroy_controlvm_payload_info(&controlvm_payload_info);
12e364b9 2278 }
1783319f 2279
12e364b9
KC
2280 cleanup_controlvm_structures();
2281
da021f02 2282 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
12e364b9 2283
c3d9a224 2284 visorchannel_destroy(controlvm_channel);
8a1182eb 2285
addceb12 2286 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
12e364b9 2287 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
12e364b9
KC
2288}
2289
2290module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2291MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
12e364b9
KC
2292module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2293MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
12e364b9
KC
2294module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2295MODULE_PARM_DESC(visorchipset_testmsg,
2296 "1 to manufacture the chipset, bus, and switch messages");
12e364b9 2297module_param_named(major, visorchipset_major, int, S_IRUGO);
b615d628
JS
2298MODULE_PARM_DESC(visorchipset_major,
2299 "major device number to use for the device node");
12e364b9
KC
2300module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2301MODULE_PARM_DESC(visorchipset_serverreqwait,
2302 "1 to have the module wait for the visor bus to register");
12e364b9
KC
2303module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2304MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
12e364b9
KC
2305module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2306MODULE_PARM_DESC(visorchipset_testteardown,
2307 "1 to test teardown of the chipset, bus, and switch");
12e364b9
KC
2308module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2309 S_IRUGO);
2310MODULE_PARM_DESC(visorchipset_disable_controlvm,
2311 "1 to disable polling of controlVm channel");
12e364b9
KC
2312module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2313 int, S_IRUGO);
2314MODULE_PARM_DESC(visorchipset_holdchipsetready,
2315 "1 to hold response to CHIPSET_READY");
b615d628 2316
12e364b9
KC
2317module_init(visorchipset_init);
2318module_exit(visorchipset_exit);
2319
2320MODULE_AUTHOR("Unisys");
2321MODULE_LICENSE("GPL");
2322MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2323 VERSION);
2324MODULE_VERSION(VERSION);
This page took 0.435895 seconds and 5 git commands to generate.