staging: unisys: Remove some unnecessary parenthesis
[deliverable/linux.git] / drivers / staging / unisys / visorchipset / visorchipset_main.c
... / ...
CommitLineData
1/* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18#include "version.h"
19#include "visorchipset.h"
20#include "procobjecttree.h"
21#include "visorchannel.h"
22#include "periodic_work.h"
23#include "file.h"
24#include "parser.h"
25#include "uisutils.h"
26#include "controlvmcompletionstatus.h"
27#include "guestlinuxdebug.h"
28
29#include <linux/nls.h>
30#include <linux/netdevice.h>
31#include <linux/platform_device.h>
32#include <linux/uuid.h>
33#include <linux/crash_dump.h>
34
35#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
36#define TEST_VNIC_PHYSITF "eth0" /* physical network itf for
37 * vnic loopback test */
38#define TEST_VNIC_SWITCHNO 1
39#define TEST_VNIC_BUSNO 9
40
41#define MAX_NAME_SIZE 128
42#define MAX_IP_SIZE 50
43#define MAXOUTSTANDINGCHANNELCOMMAND 256
44#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47/*
48 * Module parameters
49 */
50static int visorchipset_testvnic;
51static int visorchipset_testvnicclient;
52static int visorchipset_testmsg;
53static int visorchipset_major;
54static int visorchipset_serverregwait;
55static int visorchipset_clientregwait = 1; /* default is on */
56static int visorchipset_testteardown;
57static int visorchipset_disable_controlvm;
58static int visorchipset_holdchipsetready;
59
60/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
61* we switch to slow polling mode. As soon as we get a controlvm
62* message, we switch back to fast polling mode.
63*/
64#define MIN_IDLE_SECONDS 10
65static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
66static unsigned long most_recent_message_jiffies; /* when we got our last
67 * controlvm message */
68static int serverregistered;
69static int clientregistered;
70
71#define MAX_CHIPSET_EVENTS 2
72static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
73
74static struct delayed_work periodic_controlvm_work;
75static struct workqueue_struct *periodic_controlvm_workqueue;
76static DEFINE_SEMAPHORE(notifier_lock);
77
78static struct controlvm_message_header g_diag_msg_hdr;
79static struct controlvm_message_header g_chipset_msg_hdr;
80static struct controlvm_message_header g_del_dump_msg_hdr;
81static const uuid_le spar_diag_pool_channel_protocol_uuid =
82 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
83/* 0xffffff is an invalid Bus/Device number */
84static u32 g_diagpool_bus_no = 0xffffff;
85static u32 g_diagpool_dev_no = 0xffffff;
86static struct controlvm_message_packet g_devicechangestate_packet;
87
88/* Only VNIC and VHBA channels are sent to visorclientbus (aka
89 * "visorhackbus")
90 */
91#define FOR_VISORHACKBUS(channel_type_guid) \
92 (((uuid_le_cmp(channel_type_guid,\
93 spar_vnic_channel_protocol_uuid) == 0) ||\
94 (uuid_le_cmp(channel_type_guid,\
95 spar_vhba_channel_protocol_uuid) == 0)))
96#define FOR_VISORBUS(channel_type_guid) (!(FOR_VISORHACKBUS(channel_type_guid)))
97
98#define is_diagpool_channel(channel_type_guid) \
99 (uuid_le_cmp(channel_type_guid,\
100 spar_diag_pool_channel_protocol_uuid) == 0)
101
102static LIST_HEAD(bus_info_list);
103static LIST_HEAD(dev_info_list);
104
105static struct visorchannel *controlvm_channel;
106
107/* Manages the request payload in the controlvm channel */
108struct visor_controlvm_payload_info {
109 u8 __iomem *ptr; /* pointer to base address of payload pool */
110 u64 offset; /* offset from beginning of controlvm
111 * channel to beginning of payload * pool */
112 u32 bytes; /* number of bytes in payload pool */
113};
114
115static struct visor_controlvm_payload_info controlvm_payload_info;
116
117/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
118 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
119 */
120struct visor_livedump_info {
121 struct controlvm_message_header dumpcapture_header;
122 struct controlvm_message_header gettextdump_header;
123 struct controlvm_message_header dumpcomplete_header;
124 bool gettextdump_outstanding;
125 u32 crc32;
126 unsigned long length;
127 atomic_t buffers_in_use;
128 unsigned long destination;
129};
130
131static struct visor_livedump_info livedump_info;
132
133/* The following globals are used to handle the scenario where we are unable to
134 * offload the payload from a controlvm message due to memory requirements. In
135 * this scenario, we simply stash the controlvm message, then attempt to
136 * process it again the next time controlvm_periodic_work() runs.
137 */
138static struct controlvm_message controlvm_pending_msg;
139static bool controlvm_pending_msg_valid = false;
140
141/* Pool of struct putfile_buffer_entry, for keeping track of pending (incoming)
142 * TRANSMIT_FILE PutFile payloads.
143 */
144static struct kmem_cache *putfile_buffer_list_pool;
145static const char putfile_buffer_list_pool_name[] =
146 "controlvm_putfile_buffer_list_pool";
147
148/* This identifies a data buffer that has been received via a controlvm messages
149 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
150 */
151struct putfile_buffer_entry {
152 struct list_head next; /* putfile_buffer_entry list */
153 struct parser_context *parser_ctx; /* points to input data buffer */
154};
155
156/* List of struct putfile_request *, via next_putfile_request member.
157 * Each entry in this list identifies an outstanding TRANSMIT_FILE
158 * conversation.
159 */
160static LIST_HEAD(putfile_request_list);
161
162/* This describes a buffer and its current state of transfer (e.g., how many
163 * bytes have already been supplied as putfile data, and how many bytes are
164 * remaining) for a putfile_request.
165 */
166struct putfile_active_buffer {
167 /* a payload from a controlvm message, containing a file data buffer */
168 struct parser_context *parser_ctx;
169 /* points within data area of parser_ctx to next byte of data */
170 u8 *pnext;
171 /* # bytes left from <pnext> to the end of this data buffer */
172 size_t bytes_remaining;
173};
174
175#define PUTFILE_REQUEST_SIG 0x0906101302281211
176/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
177 * conversation. Structs of this type are dynamically linked into
178 * <Putfile_request_list>.
179 */
180struct putfile_request {
181 u64 sig; /* PUTFILE_REQUEST_SIG */
182
183 /* header from original TransmitFile request */
184 struct controlvm_message_header controlvm_header;
185 u64 file_request_number; /* from original TransmitFile request */
186
187 /* link to next struct putfile_request */
188 struct list_head next_putfile_request;
189
190 /* most-recent sequence number supplied via a controlvm message */
191 u64 data_sequence_number;
192
193 /* head of putfile_buffer_entry list, which describes the data to be
194 * supplied as putfile data;
195 * - this list is added to when controlvm messages come in that supply
196 * file data
197 * - this list is removed from via the hotplug program that is actually
198 * consuming these buffers to write as file data */
199 struct list_head input_buffer_list;
200 spinlock_t req_list_lock; /* lock for input_buffer_list */
201
202 /* waiters for input_buffer_list to go non-empty */
203 wait_queue_head_t input_buffer_wq;
204
205 /* data not yet read within current putfile_buffer_entry */
206 struct putfile_active_buffer active_buf;
207
208 /* <0 = failed, 0 = in-progress, >0 = successful; */
209 /* note that this must be set with req_list_lock, and if you set <0, */
210 /* it is your responsibility to also free up all of the other objects */
211 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
212 /* before releasing the lock */
213 int completion_status;
214};
215
216static atomic_t visorchipset_cache_buffers_in_use = ATOMIC_INIT(0);
217
218struct parahotplug_request {
219 struct list_head list;
220 int id;
221 unsigned long expiration;
222 struct controlvm_message msg;
223};
224
225static LIST_HEAD(parahotplug_request_list);
226static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
227static void parahotplug_process_list(void);
228
229/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
230 * CONTROLVM_REPORTEVENT.
231 */
232static struct visorchipset_busdev_notifiers busdev_server_notifiers;
233static struct visorchipset_busdev_notifiers busdev_client_notifiers;
234
235static void bus_create_response(u32 bus_no, int response);
236static void bus_destroy_response(u32 bus_no, int response);
237static void device_create_response(u32 bus_no, u32 dev_no, int response);
238static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
239static void device_resume_response(u32 bus_no, u32 dev_no, int response);
240
241static struct visorchipset_busdev_responders busdev_responders = {
242 .bus_create = bus_create_response,
243 .bus_destroy = bus_destroy_response,
244 .device_create = device_create_response,
245 .device_destroy = device_destroy_response,
246 .device_pause = visorchipset_device_pause_response,
247 .device_resume = device_resume_response,
248};
249
250/* info for /dev/visorchipset */
251static dev_t major_dev = -1; /**< indicates major num for device */
252
253/* prototypes for attributes */
254static ssize_t toolaction_show(struct device *dev,
255 struct device_attribute *attr, char *buf);
256static ssize_t toolaction_store(struct device *dev,
257 struct device_attribute *attr,
258 const char *buf, size_t count);
259static DEVICE_ATTR_RW(toolaction);
260
261static ssize_t boottotool_show(struct device *dev,
262 struct device_attribute *attr, char *buf);
263static ssize_t boottotool_store(struct device *dev,
264 struct device_attribute *attr, const char *buf,
265 size_t count);
266static DEVICE_ATTR_RW(boottotool);
267
268static ssize_t error_show(struct device *dev, struct device_attribute *attr,
269 char *buf);
270static ssize_t error_store(struct device *dev, struct device_attribute *attr,
271 const char *buf, size_t count);
272static DEVICE_ATTR_RW(error);
273
274static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
275 char *buf);
276static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
277 const char *buf, size_t count);
278static DEVICE_ATTR_RW(textid);
279
280static ssize_t remaining_steps_show(struct device *dev,
281 struct device_attribute *attr, char *buf);
282static ssize_t remaining_steps_store(struct device *dev,
283 struct device_attribute *attr,
284 const char *buf, size_t count);
285static DEVICE_ATTR_RW(remaining_steps);
286
287static ssize_t chipsetready_store(struct device *dev,
288 struct device_attribute *attr,
289 const char *buf, size_t count);
290static DEVICE_ATTR_WO(chipsetready);
291
292static ssize_t devicedisabled_store(struct device *dev,
293 struct device_attribute *attr,
294 const char *buf, size_t count);
295static DEVICE_ATTR_WO(devicedisabled);
296
297static ssize_t deviceenabled_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count);
300static DEVICE_ATTR_WO(deviceenabled);
301
302static struct attribute *visorchipset_install_attrs[] = {
303 &dev_attr_toolaction.attr,
304 &dev_attr_boottotool.attr,
305 &dev_attr_error.attr,
306 &dev_attr_textid.attr,
307 &dev_attr_remaining_steps.attr,
308 NULL
309};
310
311static struct attribute_group visorchipset_install_group = {
312 .name = "install",
313 .attrs = visorchipset_install_attrs
314};
315
316static struct attribute *visorchipset_guest_attrs[] = {
317 &dev_attr_chipsetready.attr,
318 NULL
319};
320
321static struct attribute_group visorchipset_guest_group = {
322 .name = "guest",
323 .attrs = visorchipset_guest_attrs
324};
325
326static struct attribute *visorchipset_parahotplug_attrs[] = {
327 &dev_attr_devicedisabled.attr,
328 &dev_attr_deviceenabled.attr,
329 NULL
330};
331
332static struct attribute_group visorchipset_parahotplug_group = {
333 .name = "parahotplug",
334 .attrs = visorchipset_parahotplug_attrs
335};
336
337static const struct attribute_group *visorchipset_dev_groups[] = {
338 &visorchipset_install_group,
339 &visorchipset_guest_group,
340 &visorchipset_parahotplug_group,
341 NULL
342};
343
344/* /sys/devices/platform/visorchipset */
345static struct platform_device visorchipset_platform_device = {
346 .name = "visorchipset",
347 .id = -1,
348 .dev.groups = visorchipset_dev_groups,
349};
350
351/* Function prototypes */
352static void controlvm_respond(struct controlvm_message_header *msg_hdr,
353 int response);
354static void controlvm_respond_chipset_init(
355 struct controlvm_message_header *msg_hdr, int response,
356 enum ultra_chipset_feature features);
357static void controlvm_respond_physdev_changestate(
358 struct controlvm_message_header *msg_hdr, int response,
359 struct spar_segment_state state);
360
361static ssize_t toolaction_show(struct device *dev,
362 struct device_attribute *attr,
363 char *buf)
364{
365 u8 tool_action;
366
367 visorchannel_read(controlvm_channel,
368 offsetof(struct spar_controlvm_channel_protocol,
369 tool_action), &tool_action, sizeof(u8));
370 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
371}
372
373static ssize_t toolaction_store(struct device *dev,
374 struct device_attribute *attr,
375 const char *buf, size_t count)
376{
377 u8 tool_action;
378 int ret;
379
380 if (kstrtou8(buf, 10, &tool_action) != 0)
381 return -EINVAL;
382
383 ret = visorchannel_write(controlvm_channel,
384 offsetof(struct spar_controlvm_channel_protocol,
385 tool_action),
386 &tool_action, sizeof(u8));
387
388 if (ret)
389 return ret;
390 return count;
391}
392
393static ssize_t boottotool_show(struct device *dev,
394 struct device_attribute *attr,
395 char *buf)
396{
397 struct efi_spar_indication efi_spar_indication;
398
399 visorchannel_read(controlvm_channel,
400 offsetof(struct spar_controlvm_channel_protocol,
401 efi_spar_ind), &efi_spar_indication,
402 sizeof(struct efi_spar_indication));
403 return scnprintf(buf, PAGE_SIZE, "%u\n",
404 efi_spar_indication.boot_to_tool);
405}
406
407static ssize_t boottotool_store(struct device *dev,
408 struct device_attribute *attr,
409 const char *buf, size_t count)
410{
411 int val, ret;
412 struct efi_spar_indication efi_spar_indication;
413
414 if (kstrtoint(buf, 10, &val) != 0)
415 return -EINVAL;
416
417 efi_spar_indication.boot_to_tool = val;
418 ret = visorchannel_write(controlvm_channel,
419 offsetof(struct spar_controlvm_channel_protocol,
420 efi_spar_ind), &(efi_spar_indication),
421 sizeof(struct efi_spar_indication));
422
423 if (ret)
424 return ret;
425 return count;
426}
427
428static ssize_t error_show(struct device *dev, struct device_attribute *attr,
429 char *buf)
430{
431 u32 error;
432
433 visorchannel_read(controlvm_channel,
434 offsetof(struct spar_controlvm_channel_protocol,
435 installation_error),
436 &error, sizeof(u32));
437 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
438}
439
440static ssize_t error_store(struct device *dev, struct device_attribute *attr,
441 const char *buf, size_t count)
442{
443 u32 error;
444 int ret;
445
446 if (kstrtou32(buf, 10, &error) != 0)
447 return -EINVAL;
448
449 ret = visorchannel_write(controlvm_channel,
450 offsetof(struct spar_controlvm_channel_protocol,
451 installation_error),
452 &error, sizeof(u32));
453 if (ret)
454 return ret;
455 return count;
456}
457
458static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
459 char *buf)
460{
461 u32 text_id;
462
463 visorchannel_read(controlvm_channel,
464 offsetof(struct spar_controlvm_channel_protocol,
465 installation_text_id),
466 &text_id, sizeof(u32));
467 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
468}
469
470static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
471 const char *buf, size_t count)
472{
473 u32 text_id;
474 int ret;
475
476 if (kstrtou32(buf, 10, &text_id) != 0)
477 return -EINVAL;
478
479 ret = visorchannel_write(controlvm_channel,
480 offsetof(struct spar_controlvm_channel_protocol,
481 installation_text_id),
482 &text_id, sizeof(u32));
483 if (ret)
484 return ret;
485 return count;
486}
487
488static ssize_t remaining_steps_show(struct device *dev,
489 struct device_attribute *attr, char *buf)
490{
491 u16 remaining_steps;
492
493 visorchannel_read(controlvm_channel,
494 offsetof(struct spar_controlvm_channel_protocol,
495 installation_remaining_steps),
496 &remaining_steps, sizeof(u16));
497 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
498}
499
500static ssize_t remaining_steps_store(struct device *dev,
501 struct device_attribute *attr,
502 const char *buf, size_t count)
503{
504 u16 remaining_steps;
505 int ret;
506
507 if (kstrtou16(buf, 10, &remaining_steps) != 0)
508 return -EINVAL;
509
510 ret = visorchannel_write(controlvm_channel,
511 offsetof(struct spar_controlvm_channel_protocol,
512 installation_remaining_steps),
513 &remaining_steps, sizeof(u16));
514 if (ret)
515 return ret;
516 return count;
517}
518
519static void
520bus_info_clear(void *v)
521{
522 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) (v);
523
524 kfree(p->name);
525 p->name = NULL;
526
527 kfree(p->description);
528 p->description = NULL;
529
530 p->state.created = 0;
531 memset(p, 0, sizeof(struct visorchipset_bus_info));
532}
533
534static void
535dev_info_clear(void *v)
536{
537 struct visorchipset_device_info *p =
538 (struct visorchipset_device_info *)(v);
539
540 p->state.created = 0;
541 memset(p, 0, sizeof(struct visorchipset_device_info));
542}
543
544static u8
545check_chipset_events(void)
546{
547 int i;
548 u8 send_msg = 1;
549 /* Check events to determine if response should be sent */
550 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
551 send_msg &= chipset_events[i];
552 return send_msg;
553}
554
555static void
556clear_chipset_events(void)
557{
558 int i;
559 /* Clear chipset_events */
560 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
561 chipset_events[i] = 0;
562}
563
564void
565visorchipset_register_busdev_server(
566 struct visorchipset_busdev_notifiers *notifiers,
567 struct visorchipset_busdev_responders *responders,
568 struct ultra_vbus_deviceinfo *driver_info)
569{
570 down(&notifier_lock);
571 if (!notifiers) {
572 memset(&busdev_server_notifiers, 0,
573 sizeof(busdev_server_notifiers));
574 serverregistered = 0; /* clear flag */
575 } else {
576 busdev_server_notifiers = *notifiers;
577 serverregistered = 1; /* set flag */
578 }
579 if (responders)
580 *responders = busdev_responders;
581 if (driver_info)
582 bus_device_info_init(driver_info, "chipset", "visorchipset",
583 VERSION, NULL);
584
585 up(&notifier_lock);
586}
587EXPORT_SYMBOL_GPL(visorchipset_register_busdev_server);
588
589void
590visorchipset_register_busdev_client(
591 struct visorchipset_busdev_notifiers *notifiers,
592 struct visorchipset_busdev_responders *responders,
593 struct ultra_vbus_deviceinfo *driver_info)
594{
595 down(&notifier_lock);
596 if (!notifiers) {
597 memset(&busdev_client_notifiers, 0,
598 sizeof(busdev_client_notifiers));
599 clientregistered = 0; /* clear flag */
600 } else {
601 busdev_client_notifiers = *notifiers;
602 clientregistered = 1; /* set flag */
603 }
604 if (responders)
605 *responders = busdev_responders;
606 if (driver_info)
607 bus_device_info_init(driver_info, "chipset(bolts)",
608 "visorchipset", VERSION, NULL);
609 up(&notifier_lock);
610}
611EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
612
613static void
614cleanup_controlvm_structures(void)
615{
616 struct visorchipset_bus_info *bi, *tmp_bi;
617 struct visorchipset_device_info *di, *tmp_di;
618
619 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
620 bus_info_clear(bi);
621 list_del(&bi->entry);
622 kfree(bi);
623 }
624
625 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
626 dev_info_clear(di);
627 list_del(&di->entry);
628 kfree(di);
629 }
630}
631
632static void
633chipset_init(struct controlvm_message *inmsg)
634{
635 static int chipset_inited;
636 enum ultra_chipset_feature features = 0;
637 int rc = CONTROLVM_RESP_SUCCESS;
638
639 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
640 if (chipset_inited) {
641 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
642 goto cleanup;
643 }
644 chipset_inited = 1;
645 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
646
647 /* Set features to indicate we support parahotplug (if Command
648 * also supports it). */
649 features =
650 inmsg->cmd.init_chipset.
651 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
652
653 /* Set the "reply" bit so Command knows this is a
654 * features-aware driver. */
655 features |= ULTRA_CHIPSET_FEATURE_REPLY;
656
657cleanup:
658 if (rc < 0)
659 cleanup_controlvm_structures();
660 if (inmsg->hdr.flags.response_expected)
661 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
662}
663
664static void
665controlvm_init_response(struct controlvm_message *msg,
666 struct controlvm_message_header *msg_hdr, int response)
667{
668 memset(msg, 0, sizeof(struct controlvm_message));
669 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
670 msg->hdr.payload_bytes = 0;
671 msg->hdr.payload_vm_offset = 0;
672 msg->hdr.payload_max_bytes = 0;
673 if (response < 0) {
674 msg->hdr.flags.failed = 1;
675 msg->hdr.completion_status = (u32) (-response);
676 }
677}
678
679static void
680controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
681{
682 struct controlvm_message outmsg;
683
684 controlvm_init_response(&outmsg, msg_hdr, response);
685 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
686 * back the deviceChangeState structure in the packet. */
687 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
688 g_devicechangestate_packet.device_change_state.bus_no ==
689 g_diagpool_bus_no &&
690 g_devicechangestate_packet.device_change_state.dev_no ==
691 g_diagpool_dev_no)
692 outmsg.cmd = g_devicechangestate_packet;
693 if (outmsg.hdr.flags.test_message == 1)
694 return;
695
696 if (!visorchannel_signalinsert(controlvm_channel,
697 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
698 return;
699 }
700}
701
702static void
703controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
704 int response,
705 enum ultra_chipset_feature features)
706{
707 struct controlvm_message outmsg;
708
709 controlvm_init_response(&outmsg, msg_hdr, response);
710 outmsg.cmd.init_chipset.features = features;
711 if (!visorchannel_signalinsert(controlvm_channel,
712 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
713 return;
714 }
715}
716
717static void controlvm_respond_physdev_changestate(
718 struct controlvm_message_header *msg_hdr, int response,
719 struct spar_segment_state state)
720{
721 struct controlvm_message outmsg;
722
723 controlvm_init_response(&outmsg, msg_hdr, response);
724 outmsg.cmd.device_change_state.state = state;
725 outmsg.cmd.device_change_state.flags.phys_device = 1;
726 if (!visorchannel_signalinsert(controlvm_channel,
727 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
728 return;
729 }
730}
731
732void
733visorchipset_save_message(struct controlvm_message *msg,
734 enum crash_obj_type type)
735{
736 u32 crash_msg_offset;
737 u16 crash_msg_count;
738
739 /* get saved message count */
740 if (visorchannel_read(controlvm_channel,
741 offsetof(struct spar_controlvm_channel_protocol,
742 saved_crash_message_count),
743 &crash_msg_count, sizeof(u16)) < 0) {
744 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
745 POSTCODE_SEVERITY_ERR);
746 return;
747 }
748
749 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
750 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
751 crash_msg_count,
752 POSTCODE_SEVERITY_ERR);
753 return;
754 }
755
756 /* get saved crash message offset */
757 if (visorchannel_read(controlvm_channel,
758 offsetof(struct spar_controlvm_channel_protocol,
759 saved_crash_message_offset),
760 &crash_msg_offset, sizeof(u32)) < 0) {
761 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
762 POSTCODE_SEVERITY_ERR);
763 return;
764 }
765
766 if (type == CRASH_BUS) {
767 if (visorchannel_write(controlvm_channel,
768 crash_msg_offset,
769 msg,
770 sizeof(struct controlvm_message)) < 0) {
771 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
772 POSTCODE_SEVERITY_ERR);
773 return;
774 }
775 } else {
776 if (visorchannel_write(controlvm_channel,
777 crash_msg_offset +
778 sizeof(struct controlvm_message), msg,
779 sizeof(struct controlvm_message)) < 0) {
780 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
781 POSTCODE_SEVERITY_ERR);
782 return;
783 }
784 }
785}
786EXPORT_SYMBOL_GPL(visorchipset_save_message);
787
788static void
789bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
790{
791 struct visorchipset_bus_info *p = NULL;
792 bool need_clear = false;
793
794 p = findbus(&bus_info_list, bus_no);
795 if (!p)
796 return;
797
798 if (response < 0) {
799 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
800 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
801 /* undo the row we just created... */
802 delbusdevices(&dev_info_list, bus_no);
803 } else {
804 if (cmd_id == CONTROLVM_BUS_CREATE)
805 p->state.created = 1;
806 if (cmd_id == CONTROLVM_BUS_DESTROY)
807 need_clear = true;
808 }
809
810 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
811 return; /* no controlvm response needed */
812 if (p->pending_msg_hdr.id != (u32)cmd_id)
813 return;
814 controlvm_respond(&p->pending_msg_hdr, response);
815 p->pending_msg_hdr.id = CONTROLVM_INVALID;
816 if (need_clear) {
817 bus_info_clear(p);
818 delbusdevices(&dev_info_list, bus_no);
819 }
820}
821
822static void
823device_changestate_responder(enum controlvm_id cmd_id,
824 u32 bus_no, u32 dev_no, int response,
825 struct spar_segment_state response_state)
826{
827 struct visorchipset_device_info *p = NULL;
828 struct controlvm_message outmsg;
829
830 p = finddevice(&dev_info_list, bus_no, dev_no);
831 if (!p)
832 return;
833 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
834 return; /* no controlvm response needed */
835 if (p->pending_msg_hdr.id != cmd_id)
836 return;
837
838 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
839
840 outmsg.cmd.device_change_state.bus_no = bus_no;
841 outmsg.cmd.device_change_state.dev_no = dev_no;
842 outmsg.cmd.device_change_state.state = response_state;
843
844 if (!visorchannel_signalinsert(controlvm_channel,
845 CONTROLVM_QUEUE_REQUEST, &outmsg))
846 return;
847
848 p->pending_msg_hdr.id = CONTROLVM_INVALID;
849}
850
851static void
852device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
853{
854 struct visorchipset_device_info *p = NULL;
855 bool need_clear = false;
856
857 p = finddevice(&dev_info_list, bus_no, dev_no);
858 if (!p)
859 return;
860 if (response >= 0) {
861 if (cmd_id == CONTROLVM_DEVICE_CREATE)
862 p->state.created = 1;
863 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
864 need_clear = true;
865 }
866
867 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
868 return; /* no controlvm response needed */
869
870 if (p->pending_msg_hdr.id != (u32)cmd_id)
871 return;
872
873 controlvm_respond(&p->pending_msg_hdr, response);
874 p->pending_msg_hdr.id = CONTROLVM_INVALID;
875 if (need_clear)
876 dev_info_clear(p);
877}
878
879static void
880bus_epilog(u32 bus_no,
881 u32 cmd, struct controlvm_message_header *msg_hdr,
882 int response, bool need_response)
883{
884 bool notified = false;
885
886 struct visorchipset_bus_info *bus_info = findbus(&bus_info_list,
887 bus_no);
888
889 if (!bus_info)
890 return;
891
892 if (need_response) {
893 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
894 sizeof(struct controlvm_message_header));
895 } else {
896 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
897 }
898
899 down(&notifier_lock);
900 if (response == CONTROLVM_RESP_SUCCESS) {
901 switch (cmd) {
902 case CONTROLVM_BUS_CREATE:
903 /* We can't tell from the bus_create
904 * information which of our 2 bus flavors the
905 * devices on this bus will ultimately end up.
906 * FORTUNATELY, it turns out it is harmless to
907 * send the bus_create to both of them. We can
908 * narrow things down a little bit, though,
909 * because we know: - BusDev_Server can handle
910 * either server or client devices
911 * - BusDev_Client can handle ONLY client
912 * devices */
913 if (busdev_server_notifiers.bus_create) {
914 (*busdev_server_notifiers.bus_create) (bus_no);
915 notified = true;
916 }
917 if ((!bus_info->flags.server) /*client */ &&
918 busdev_client_notifiers.bus_create) {
919 (*busdev_client_notifiers.bus_create) (bus_no);
920 notified = true;
921 }
922 break;
923 case CONTROLVM_BUS_DESTROY:
924 if (busdev_server_notifiers.bus_destroy) {
925 (*busdev_server_notifiers.bus_destroy) (bus_no);
926 notified = true;
927 }
928 if ((!bus_info->flags.server) /*client */ &&
929 busdev_client_notifiers.bus_destroy) {
930 (*busdev_client_notifiers.bus_destroy) (bus_no);
931 notified = true;
932 }
933 break;
934 }
935 }
936 if (notified)
937 /* The callback function just called above is responsible
938 * for calling the appropriate visorchipset_busdev_responders
939 * function, which will call bus_responder()
940 */
941 ;
942 else
943 bus_responder(cmd, bus_no, response);
944 up(&notifier_lock);
945}
946
947static void
948device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
949 struct controlvm_message_header *msg_hdr, int response,
950 bool need_response, bool for_visorbus)
951{
952 struct visorchipset_busdev_notifiers *notifiers = NULL;
953 bool notified = false;
954
955 struct visorchipset_device_info *dev_info =
956 finddevice(&dev_info_list, bus_no, dev_no);
957 char *envp[] = {
958 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
959 NULL
960 };
961
962 if (!dev_info)
963 return;
964
965 if (for_visorbus)
966 notifiers = &busdev_server_notifiers;
967 else
968 notifiers = &busdev_client_notifiers;
969 if (need_response) {
970 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
971 sizeof(struct controlvm_message_header));
972 } else {
973 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
974 }
975
976 down(&notifier_lock);
977 if (response >= 0) {
978 switch (cmd) {
979 case CONTROLVM_DEVICE_CREATE:
980 if (notifiers->device_create) {
981 (*notifiers->device_create) (bus_no, dev_no);
982 notified = true;
983 }
984 break;
985 case CONTROLVM_DEVICE_CHANGESTATE:
986 /* ServerReady / ServerRunning / SegmentStateRunning */
987 if (state.alive == segment_state_running.alive &&
988 state.operating ==
989 segment_state_running.operating) {
990 if (notifiers->device_resume) {
991 (*notifiers->device_resume) (bus_no,
992 dev_no);
993 notified = true;
994 }
995 }
996 /* ServerNotReady / ServerLost / SegmentStateStandby */
997 else if (state.alive == segment_state_standby.alive &&
998 state.operating ==
999 segment_state_standby.operating) {
1000 /* technically this is standby case
1001 * where server is lost
1002 */
1003 if (notifiers->device_pause) {
1004 (*notifiers->device_pause) (bus_no,
1005 dev_no);
1006 notified = true;
1007 }
1008 } else if (state.alive == segment_state_paused.alive &&
1009 state.operating ==
1010 segment_state_paused.operating) {
1011 /* this is lite pause where channel is
1012 * still valid just 'pause' of it
1013 */
1014 if (bus_no == g_diagpool_bus_no &&
1015 dev_no == g_diagpool_dev_no) {
1016 /* this will trigger the
1017 * diag_shutdown.sh script in
1018 * the visorchipset hotplug */
1019 kobject_uevent_env
1020 (&visorchipset_platform_device.dev.
1021 kobj, KOBJ_ONLINE, envp);
1022 }
1023 }
1024 break;
1025 case CONTROLVM_DEVICE_DESTROY:
1026 if (notifiers->device_destroy) {
1027 (*notifiers->device_destroy) (bus_no, dev_no);
1028 notified = true;
1029 }
1030 break;
1031 }
1032 }
1033 if (notified)
1034 /* The callback function just called above is responsible
1035 * for calling the appropriate visorchipset_busdev_responders
1036 * function, which will call device_responder()
1037 */
1038 ;
1039 else
1040 device_responder(cmd, bus_no, dev_no, response);
1041 up(&notifier_lock);
1042}
1043
1044static void
1045bus_create(struct controlvm_message *inmsg)
1046{
1047 struct controlvm_message_packet *cmd = &inmsg->cmd;
1048 u32 bus_no = cmd->create_bus.bus_no;
1049 int rc = CONTROLVM_RESP_SUCCESS;
1050 struct visorchipset_bus_info *bus_info = NULL;
1051
1052 bus_info = findbus(&bus_info_list, bus_no);
1053 if (bus_info && (bus_info->state.created == 1)) {
1054 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1055 POSTCODE_SEVERITY_ERR);
1056 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1057 goto cleanup;
1058 }
1059 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1060 if (!bus_info) {
1061 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1062 POSTCODE_SEVERITY_ERR);
1063 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1064 goto cleanup;
1065 }
1066
1067 INIT_LIST_HEAD(&bus_info->entry);
1068 bus_info->bus_no = bus_no;
1069 bus_info->dev_no = cmd->create_bus.dev_count;
1070
1071 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1072
1073 if (inmsg->hdr.flags.test_message == 1)
1074 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1075 else
1076 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1077
1078 bus_info->flags.server = inmsg->hdr.flags.server;
1079 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1080 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1081 bus_info->chan_info.channel_type_uuid =
1082 cmd->create_bus.bus_data_type_uuid;
1083 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1084
1085 list_add(&bus_info->entry, &bus_info_list);
1086
1087 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1088
1089cleanup:
1090 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1091 rc, inmsg->hdr.flags.response_expected == 1);
1092}
1093
1094static void
1095bus_destroy(struct controlvm_message *inmsg)
1096{
1097 struct controlvm_message_packet *cmd = &inmsg->cmd;
1098 u32 bus_no = cmd->destroy_bus.bus_no;
1099 struct visorchipset_bus_info *bus_info;
1100 int rc = CONTROLVM_RESP_SUCCESS;
1101
1102 bus_info = findbus(&bus_info_list, bus_no);
1103 if (!bus_info)
1104 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1105 else if (bus_info->state.created == 0)
1106 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1107
1108 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1109 rc, inmsg->hdr.flags.response_expected == 1);
1110}
1111
1112static void
1113bus_configure(struct controlvm_message *inmsg,
1114 struct parser_context *parser_ctx)
1115{
1116 struct controlvm_message_packet *cmd = &inmsg->cmd;
1117 u32 bus_no = cmd->configure_bus.bus_no;
1118 struct visorchipset_bus_info *bus_info = NULL;
1119 int rc = CONTROLVM_RESP_SUCCESS;
1120 char s[99];
1121
1122 bus_no = cmd->configure_bus.bus_no;
1123 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1124 POSTCODE_SEVERITY_INFO);
1125
1126 bus_info = findbus(&bus_info_list, bus_no);
1127 if (!bus_info) {
1128 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1129 POSTCODE_SEVERITY_ERR);
1130 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1131 } else if (bus_info->state.created == 0) {
1132 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1133 POSTCODE_SEVERITY_ERR);
1134 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1135 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1136 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1137 POSTCODE_SEVERITY_ERR);
1138 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1139 } else {
1140 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1141 bus_info->partition_uuid = parser_id_get(parser_ctx);
1142 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1143 bus_info->name = parser_string_get(parser_ctx);
1144
1145 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1146 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1147 POSTCODE_SEVERITY_INFO);
1148 }
1149 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1150 rc, inmsg->hdr.flags.response_expected == 1);
1151}
1152
1153static void
1154my_device_create(struct controlvm_message *inmsg)
1155{
1156 struct controlvm_message_packet *cmd = &inmsg->cmd;
1157 u32 bus_no = cmd->create_device.bus_no;
1158 u32 dev_no = cmd->create_device.dev_no;
1159 struct visorchipset_device_info *dev_info = NULL;
1160 struct visorchipset_bus_info *bus_info = NULL;
1161 int rc = CONTROLVM_RESP_SUCCESS;
1162
1163 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1164 if (dev_info && (dev_info->state.created == 1)) {
1165 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1166 POSTCODE_SEVERITY_ERR);
1167 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1168 goto cleanup;
1169 }
1170 bus_info = findbus(&bus_info_list, bus_no);
1171 if (!bus_info) {
1172 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1173 POSTCODE_SEVERITY_ERR);
1174 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1175 goto cleanup;
1176 }
1177 if (bus_info->state.created == 0) {
1178 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1179 POSTCODE_SEVERITY_ERR);
1180 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1181 goto cleanup;
1182 }
1183 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1184 if (!dev_info) {
1185 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1186 POSTCODE_SEVERITY_ERR);
1187 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1188 goto cleanup;
1189 }
1190
1191 INIT_LIST_HEAD(&dev_info->entry);
1192 dev_info->bus_no = bus_no;
1193 dev_info->dev_no = dev_no;
1194 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1195 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1196 POSTCODE_SEVERITY_INFO);
1197
1198 if (inmsg->hdr.flags.test_message == 1)
1199 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1200 else
1201 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1202 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1203 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1204 dev_info->chan_info.channel_type_uuid =
1205 cmd->create_device.data_type_uuid;
1206 dev_info->chan_info.intr = cmd->create_device.intr;
1207 list_add(&dev_info->entry, &dev_info_list);
1208 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1209 POSTCODE_SEVERITY_INFO);
1210cleanup:
1211 /* get the bus and devNo for DiagPool channel */
1212 if (dev_info &&
1213 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1214 g_diagpool_bus_no = bus_no;
1215 g_diagpool_dev_no = dev_no;
1216 }
1217 device_epilog(bus_no, dev_no, segment_state_running,
1218 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1219 inmsg->hdr.flags.response_expected == 1,
1220 FOR_VISORBUS(dev_info->chan_info.channel_type_uuid));
1221}
1222
1223static void
1224my_device_changestate(struct controlvm_message *inmsg)
1225{
1226 struct controlvm_message_packet *cmd = &inmsg->cmd;
1227 u32 bus_no = cmd->device_change_state.bus_no;
1228 u32 dev_no = cmd->device_change_state.dev_no;
1229 struct spar_segment_state state = cmd->device_change_state.state;
1230 struct visorchipset_device_info *dev_info = NULL;
1231 int rc = CONTROLVM_RESP_SUCCESS;
1232
1233 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1234 if (!dev_info) {
1235 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1236 POSTCODE_SEVERITY_ERR);
1237 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1238 } else if (dev_info->state.created == 0) {
1239 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1240 POSTCODE_SEVERITY_ERR);
1241 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1242 }
1243 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1244 device_epilog(bus_no, dev_no, state,
1245 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1246 inmsg->hdr.flags.response_expected == 1,
1247 FOR_VISORBUS(
1248 dev_info->chan_info.channel_type_uuid));
1249}
1250
1251static void
1252my_device_destroy(struct controlvm_message *inmsg)
1253{
1254 struct controlvm_message_packet *cmd = &inmsg->cmd;
1255 u32 bus_no = cmd->destroy_device.bus_no;
1256 u32 dev_no = cmd->destroy_device.dev_no;
1257 struct visorchipset_device_info *dev_info = NULL;
1258 int rc = CONTROLVM_RESP_SUCCESS;
1259
1260 dev_info = finddevice(&dev_info_list, bus_no, dev_no);
1261 if (!dev_info)
1262 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1263 else if (dev_info->state.created == 0)
1264 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1265
1266 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1267 device_epilog(bus_no, dev_no, segment_state_running,
1268 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1269 inmsg->hdr.flags.response_expected == 1,
1270 FOR_VISORBUS(
1271 dev_info->chan_info.channel_type_uuid));
1272}
1273
1274/* When provided with the physical address of the controlvm channel
1275 * (phys_addr), the offset to the payload area we need to manage
1276 * (offset), and the size of this payload area (bytes), fills in the
1277 * controlvm_payload_info struct. Returns true for success or false
1278 * for failure.
1279 */
1280static int
1281initialize_controlvm_payload_info(HOSTADDRESS phys_addr, u64 offset, u32 bytes,
1282 struct visor_controlvm_payload_info *info)
1283{
1284 u8 __iomem *payload = NULL;
1285 int rc = CONTROLVM_RESP_SUCCESS;
1286
1287 if (!info) {
1288 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1289 goto cleanup;
1290 }
1291 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1292 if ((offset == 0) || (bytes == 0)) {
1293 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1294 goto cleanup;
1295 }
1296 payload = ioremap_cache(phys_addr + offset, bytes);
1297 if (!payload) {
1298 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1299 goto cleanup;
1300 }
1301
1302 info->offset = offset;
1303 info->bytes = bytes;
1304 info->ptr = payload;
1305
1306cleanup:
1307 if (rc < 0) {
1308 if (payload) {
1309 iounmap(payload);
1310 payload = NULL;
1311 }
1312 }
1313 return rc;
1314}
1315
1316static void
1317destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1318{
1319 if (info->ptr) {
1320 iounmap(info->ptr);
1321 info->ptr = NULL;
1322 }
1323 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1324}
1325
1326static void
1327initialize_controlvm_payload(void)
1328{
1329 HOSTADDRESS phys_addr = visorchannel_get_physaddr(controlvm_channel);
1330 u64 payload_offset = 0;
1331 u32 payload_bytes = 0;
1332
1333 if (visorchannel_read(controlvm_channel,
1334 offsetof(struct spar_controlvm_channel_protocol,
1335 request_payload_offset),
1336 &payload_offset, sizeof(payload_offset)) < 0) {
1337 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1338 POSTCODE_SEVERITY_ERR);
1339 return;
1340 }
1341 if (visorchannel_read(controlvm_channel,
1342 offsetof(struct spar_controlvm_channel_protocol,
1343 request_payload_bytes),
1344 &payload_bytes, sizeof(payload_bytes)) < 0) {
1345 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1346 POSTCODE_SEVERITY_ERR);
1347 return;
1348 }
1349 initialize_controlvm_payload_info(phys_addr,
1350 payload_offset, payload_bytes,
1351 &controlvm_payload_info);
1352}
1353
1354/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1355 * Returns CONTROLVM_RESP_xxx code.
1356 */
1357int
1358visorchipset_chipset_ready(void)
1359{
1360 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1361 return CONTROLVM_RESP_SUCCESS;
1362}
1363EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1364
1365int
1366visorchipset_chipset_selftest(void)
1367{
1368 char env_selftest[20];
1369 char *envp[] = { env_selftest, NULL };
1370
1371 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1372 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1373 envp);
1374 return CONTROLVM_RESP_SUCCESS;
1375}
1376EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1377
1378/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1379 * Returns CONTROLVM_RESP_xxx code.
1380 */
1381int
1382visorchipset_chipset_notready(void)
1383{
1384 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1385 return CONTROLVM_RESP_SUCCESS;
1386}
1387EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1388
1389static void
1390chipset_ready(struct controlvm_message_header *msg_hdr)
1391{
1392 int rc = visorchipset_chipset_ready();
1393
1394 if (rc != CONTROLVM_RESP_SUCCESS)
1395 rc = -rc;
1396 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1397 controlvm_respond(msg_hdr, rc);
1398 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1399 /* Send CHIPSET_READY response when all modules have been loaded
1400 * and disks mounted for the partition
1401 */
1402 g_chipset_msg_hdr = *msg_hdr;
1403 }
1404}
1405
1406static void
1407chipset_selftest(struct controlvm_message_header *msg_hdr)
1408{
1409 int rc = visorchipset_chipset_selftest();
1410
1411 if (rc != CONTROLVM_RESP_SUCCESS)
1412 rc = -rc;
1413 if (msg_hdr->flags.response_expected)
1414 controlvm_respond(msg_hdr, rc);
1415}
1416
1417static void
1418chipset_notready(struct controlvm_message_header *msg_hdr)
1419{
1420 int rc = visorchipset_chipset_notready();
1421
1422 if (rc != CONTROLVM_RESP_SUCCESS)
1423 rc = -rc;
1424 if (msg_hdr->flags.response_expected)
1425 controlvm_respond(msg_hdr, rc);
1426}
1427
1428/* This is your "one-stop" shop for grabbing the next message from the
1429 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1430 */
1431static bool
1432read_controlvm_event(struct controlvm_message *msg)
1433{
1434 if (visorchannel_signalremove(controlvm_channel,
1435 CONTROLVM_QUEUE_EVENT, msg)) {
1436 /* got a message */
1437 if (msg->hdr.flags.test_message == 1)
1438 return false;
1439 return true;
1440 }
1441 return false;
1442}
1443
1444/*
1445 * The general parahotplug flow works as follows. The visorchipset
1446 * driver receives a DEVICE_CHANGESTATE message from Command
1447 * specifying a physical device to enable or disable. The CONTROLVM
1448 * message handler calls parahotplug_process_message, which then adds
1449 * the message to a global list and kicks off a udev event which
1450 * causes a user level script to enable or disable the specified
1451 * device. The udev script then writes to
1452 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1453 * to get called, at which point the appropriate CONTROLVM message is
1454 * retrieved from the list and responded to.
1455 */
1456
1457#define PARAHOTPLUG_TIMEOUT_MS 2000
1458
1459/*
1460 * Generate unique int to match an outstanding CONTROLVM message with a
1461 * udev script /proc response
1462 */
1463static int
1464parahotplug_next_id(void)
1465{
1466 static atomic_t id = ATOMIC_INIT(0);
1467
1468 return atomic_inc_return(&id);
1469}
1470
1471/*
1472 * Returns the time (in jiffies) when a CONTROLVM message on the list
1473 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1474 */
1475static unsigned long
1476parahotplug_next_expiration(void)
1477{
1478 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1479}
1480
1481/*
1482 * Create a parahotplug_request, which is basically a wrapper for a
1483 * CONTROLVM_MESSAGE that we can stick on a list
1484 */
1485static struct parahotplug_request *
1486parahotplug_request_create(struct controlvm_message *msg)
1487{
1488 struct parahotplug_request *req;
1489
1490 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1491 if (!req)
1492 return NULL;
1493
1494 req->id = parahotplug_next_id();
1495 req->expiration = parahotplug_next_expiration();
1496 req->msg = *msg;
1497
1498 return req;
1499}
1500
1501/*
1502 * Free a parahotplug_request.
1503 */
1504static void
1505parahotplug_request_destroy(struct parahotplug_request *req)
1506{
1507 kfree(req);
1508}
1509
1510/*
1511 * Cause uevent to run the user level script to do the disable/enable
1512 * specified in (the CONTROLVM message in) the specified
1513 * parahotplug_request
1514 */
1515static void
1516parahotplug_request_kickoff(struct parahotplug_request *req)
1517{
1518 struct controlvm_message_packet *cmd = &req->msg.cmd;
1519 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1520 env_func[40];
1521 char *envp[] = {
1522 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1523 };
1524
1525 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1526 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1527 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1528 cmd->device_change_state.state.active);
1529 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1530 cmd->device_change_state.bus_no);
1531 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1532 cmd->device_change_state.dev_no >> 3);
1533 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1534 cmd->device_change_state.dev_no & 0x7);
1535
1536 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1537 envp);
1538}
1539
1540/*
1541 * Remove any request from the list that's been on there too long and
1542 * respond with an error.
1543 */
1544static void
1545parahotplug_process_list(void)
1546{
1547 struct list_head *pos = NULL;
1548 struct list_head *tmp = NULL;
1549
1550 spin_lock(&parahotplug_request_list_lock);
1551
1552 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1553 struct parahotplug_request *req =
1554 list_entry(pos, struct parahotplug_request, list);
1555
1556 if (!time_after_eq(jiffies, req->expiration))
1557 continue;
1558
1559 list_del(pos);
1560 if (req->msg.hdr.flags.response_expected)
1561 controlvm_respond_physdev_changestate(
1562 &req->msg.hdr,
1563 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1564 req->msg.cmd.device_change_state.state);
1565 parahotplug_request_destroy(req);
1566 }
1567
1568 spin_unlock(&parahotplug_request_list_lock);
1569}
1570
1571/*
1572 * Called from the /proc handler, which means the user script has
1573 * finished the enable/disable. Find the matching identifier, and
1574 * respond to the CONTROLVM message with success.
1575 */
1576static int
1577parahotplug_request_complete(int id, u16 active)
1578{
1579 struct list_head *pos = NULL;
1580 struct list_head *tmp = NULL;
1581
1582 spin_lock(&parahotplug_request_list_lock);
1583
1584 /* Look for a request matching "id". */
1585 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1586 struct parahotplug_request *req =
1587 list_entry(pos, struct parahotplug_request, list);
1588 if (req->id == id) {
1589 /* Found a match. Remove it from the list and
1590 * respond.
1591 */
1592 list_del(pos);
1593 spin_unlock(&parahotplug_request_list_lock);
1594 req->msg.cmd.device_change_state.state.active = active;
1595 if (req->msg.hdr.flags.response_expected)
1596 controlvm_respond_physdev_changestate(
1597 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1598 req->msg.cmd.device_change_state.state);
1599 parahotplug_request_destroy(req);
1600 return 0;
1601 }
1602 }
1603
1604 spin_unlock(&parahotplug_request_list_lock);
1605 return -1;
1606}
1607
1608/*
1609 * Enables or disables a PCI device by kicking off a udev script
1610 */
1611static void
1612parahotplug_process_message(struct controlvm_message *inmsg)
1613{
1614 struct parahotplug_request *req;
1615
1616 req = parahotplug_request_create(inmsg);
1617
1618 if (!req)
1619 return;
1620
1621 if (inmsg->cmd.device_change_state.state.active) {
1622 /* For enable messages, just respond with success
1623 * right away. This is a bit of a hack, but there are
1624 * issues with the early enable messages we get (with
1625 * either the udev script not detecting that the device
1626 * is up, or not getting called at all). Fortunately
1627 * the messages that get lost don't matter anyway, as
1628 * devices are automatically enabled at
1629 * initialization.
1630 */
1631 parahotplug_request_kickoff(req);
1632 controlvm_respond_physdev_changestate(&inmsg->hdr,
1633 CONTROLVM_RESP_SUCCESS,
1634 inmsg->cmd.device_change_state.state);
1635 parahotplug_request_destroy(req);
1636 } else {
1637 /* For disable messages, add the request to the
1638 * request list before kicking off the udev script. It
1639 * won't get responded to until the script has
1640 * indicated it's done.
1641 */
1642 spin_lock(&parahotplug_request_list_lock);
1643 list_add_tail(&req->list, &parahotplug_request_list);
1644 spin_unlock(&parahotplug_request_list_lock);
1645
1646 parahotplug_request_kickoff(req);
1647 }
1648}
1649
1650/* Process a controlvm message.
1651 * Return result:
1652 * false - this function will return FALSE only in the case where the
1653 * controlvm message was NOT processed, but processing must be
1654 * retried before reading the next controlvm message; a
1655 * scenario where this can occur is when we need to throttle
1656 * the allocation of memory in which to copy out controlvm
1657 * payload data
1658 * true - processing of the controlvm message completed,
1659 * either successfully or with an error.
1660 */
1661static bool
1662handle_command(struct controlvm_message inmsg, HOSTADDRESS channel_addr)
1663{
1664 struct controlvm_message_packet *cmd = &inmsg.cmd;
1665 u64 parm_addr = 0;
1666 u32 parm_bytes = 0;
1667 struct parser_context *parser_ctx = NULL;
1668 bool local_addr = false;
1669 struct controlvm_message ackmsg;
1670
1671 /* create parsing context if necessary */
1672 local_addr = (inmsg.hdr.flags.test_message == 1);
1673 if (channel_addr == 0)
1674 return true;
1675 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1676 parm_bytes = inmsg.hdr.payload_bytes;
1677
1678 /* Parameter and channel addresses within test messages actually lie
1679 * within our OS-controlled memory. We need to know that, because it
1680 * makes a difference in how we compute the virtual address.
1681 */
1682 if (parm_addr != 0 && parm_bytes != 0) {
1683 bool retry = false;
1684
1685 parser_ctx =
1686 parser_init_byte_stream(parm_addr, parm_bytes,
1687 local_addr, &retry);
1688 if (!parser_ctx && retry)
1689 return false;
1690 }
1691
1692 if (!local_addr) {
1693 controlvm_init_response(&ackmsg, &inmsg.hdr,
1694 CONTROLVM_RESP_SUCCESS);
1695 if (controlvm_channel)
1696 visorchannel_signalinsert(controlvm_channel,
1697 CONTROLVM_QUEUE_ACK,
1698 &ackmsg);
1699 }
1700 switch (inmsg.hdr.id) {
1701 case CONTROLVM_CHIPSET_INIT:
1702 chipset_init(&inmsg);
1703 break;
1704 case CONTROLVM_BUS_CREATE:
1705 bus_create(&inmsg);
1706 break;
1707 case CONTROLVM_BUS_DESTROY:
1708 bus_destroy(&inmsg);
1709 break;
1710 case CONTROLVM_BUS_CONFIGURE:
1711 bus_configure(&inmsg, parser_ctx);
1712 break;
1713 case CONTROLVM_DEVICE_CREATE:
1714 my_device_create(&inmsg);
1715 break;
1716 case CONTROLVM_DEVICE_CHANGESTATE:
1717 if (cmd->device_change_state.flags.phys_device) {
1718 parahotplug_process_message(&inmsg);
1719 } else {
1720 /* save the hdr and cmd structures for later use */
1721 /* when sending back the response to Command */
1722 my_device_changestate(&inmsg);
1723 g_diag_msg_hdr = inmsg.hdr;
1724 g_devicechangestate_packet = inmsg.cmd;
1725 break;
1726 }
1727 break;
1728 case CONTROLVM_DEVICE_DESTROY:
1729 my_device_destroy(&inmsg);
1730 break;
1731 case CONTROLVM_DEVICE_CONFIGURE:
1732 /* no op for now, just send a respond that we passed */
1733 if (inmsg.hdr.flags.response_expected)
1734 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1735 break;
1736 case CONTROLVM_CHIPSET_READY:
1737 chipset_ready(&inmsg.hdr);
1738 break;
1739 case CONTROLVM_CHIPSET_SELFTEST:
1740 chipset_selftest(&inmsg.hdr);
1741 break;
1742 case CONTROLVM_CHIPSET_STOP:
1743 chipset_notready(&inmsg.hdr);
1744 break;
1745 default:
1746 if (inmsg.hdr.flags.response_expected)
1747 controlvm_respond(&inmsg.hdr,
1748 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1749 break;
1750 }
1751
1752 if (parser_ctx) {
1753 parser_done(parser_ctx);
1754 parser_ctx = NULL;
1755 }
1756 return true;
1757}
1758
1759static HOSTADDRESS controlvm_get_channel_address(void)
1760{
1761 u64 addr = 0;
1762 u32 size = 0;
1763
1764 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1765 return 0;
1766
1767 return addr;
1768}
1769
1770static void
1771controlvm_periodic_work(struct work_struct *work)
1772{
1773 struct controlvm_message inmsg;
1774 bool got_command = false;
1775 bool handle_command_failed = false;
1776 static u64 poll_count;
1777
1778 /* make sure visorbus server is registered for controlvm callbacks */
1779 if (visorchipset_serverregwait && !serverregistered)
1780 goto cleanup;
1781 /* make sure visorclientbus server is regsitered for controlvm
1782 * callbacks
1783 */
1784 if (visorchipset_clientregwait && !clientregistered)
1785 goto cleanup;
1786
1787 poll_count++;
1788 if (poll_count >= 250)
1789 ; /* keep going */
1790 else
1791 goto cleanup;
1792
1793 /* Check events to determine if response to CHIPSET_READY
1794 * should be sent
1795 */
1796 if (visorchipset_holdchipsetready &&
1797 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1798 if (check_chipset_events() == 1) {
1799 controlvm_respond(&g_chipset_msg_hdr, 0);
1800 clear_chipset_events();
1801 memset(&g_chipset_msg_hdr, 0,
1802 sizeof(struct controlvm_message_header));
1803 }
1804 }
1805
1806 while (visorchannel_signalremove(controlvm_channel,
1807 CONTROLVM_QUEUE_RESPONSE,
1808 &inmsg))
1809 ;
1810 if (!got_command) {
1811 if (controlvm_pending_msg_valid) {
1812 /* we throttled processing of a prior
1813 * msg, so try to process it again
1814 * rather than reading a new one
1815 */
1816 inmsg = controlvm_pending_msg;
1817 controlvm_pending_msg_valid = false;
1818 got_command = true;
1819 } else {
1820 got_command = read_controlvm_event(&inmsg);
1821 }
1822 }
1823
1824 handle_command_failed = false;
1825 while (got_command && (!handle_command_failed)) {
1826 most_recent_message_jiffies = jiffies;
1827 if (handle_command(inmsg,
1828 visorchannel_get_physaddr
1829 (controlvm_channel)))
1830 got_command = read_controlvm_event(&inmsg);
1831 else {
1832 /* this is a scenario where throttling
1833 * is required, but probably NOT an
1834 * error...; we stash the current
1835 * controlvm msg so we will attempt to
1836 * reprocess it on our next loop
1837 */
1838 handle_command_failed = true;
1839 controlvm_pending_msg = inmsg;
1840 controlvm_pending_msg_valid = true;
1841 }
1842 }
1843
1844 /* parahotplug_worker */
1845 parahotplug_process_list();
1846
1847cleanup:
1848
1849 if (time_after(jiffies,
1850 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1851 /* it's been longer than MIN_IDLE_SECONDS since we
1852 * processed our last controlvm message; slow down the
1853 * polling
1854 */
1855 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1856 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1857 } else {
1858 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1859 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1860 }
1861
1862 queue_delayed_work(periodic_controlvm_workqueue,
1863 &periodic_controlvm_work, poll_jiffies);
1864}
1865
1866static void
1867setup_crash_devices_work_queue(struct work_struct *work)
1868{
1869 struct controlvm_message local_crash_bus_msg;
1870 struct controlvm_message local_crash_dev_msg;
1871 struct controlvm_message msg;
1872 u32 local_crash_msg_offset;
1873 u16 local_crash_msg_count;
1874
1875 /* make sure visorbus server is registered for controlvm callbacks */
1876 if (visorchipset_serverregwait && !serverregistered)
1877 goto cleanup;
1878
1879 /* make sure visorclientbus server is regsitered for controlvm
1880 * callbacks
1881 */
1882 if (visorchipset_clientregwait && !clientregistered)
1883 goto cleanup;
1884
1885 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1886
1887 /* send init chipset msg */
1888 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1889 msg.cmd.init_chipset.bus_count = 23;
1890 msg.cmd.init_chipset.switch_count = 0;
1891
1892 chipset_init(&msg);
1893
1894 /* get saved message count */
1895 if (visorchannel_read(controlvm_channel,
1896 offsetof(struct spar_controlvm_channel_protocol,
1897 saved_crash_message_count),
1898 &local_crash_msg_count, sizeof(u16)) < 0) {
1899 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1900 POSTCODE_SEVERITY_ERR);
1901 return;
1902 }
1903
1904 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1905 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1906 local_crash_msg_count,
1907 POSTCODE_SEVERITY_ERR);
1908 return;
1909 }
1910
1911 /* get saved crash message offset */
1912 if (visorchannel_read(controlvm_channel,
1913 offsetof(struct spar_controlvm_channel_protocol,
1914 saved_crash_message_offset),
1915 &local_crash_msg_offset, sizeof(u32)) < 0) {
1916 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1917 POSTCODE_SEVERITY_ERR);
1918 return;
1919 }
1920
1921 /* read create device message for storage bus offset */
1922 if (visorchannel_read(controlvm_channel,
1923 local_crash_msg_offset,
1924 &local_crash_bus_msg,
1925 sizeof(struct controlvm_message)) < 0) {
1926 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1927 POSTCODE_SEVERITY_ERR);
1928 return;
1929 }
1930
1931 /* read create device message for storage device */
1932 if (visorchannel_read(controlvm_channel,
1933 local_crash_msg_offset +
1934 sizeof(struct controlvm_message),
1935 &local_crash_dev_msg,
1936 sizeof(struct controlvm_message)) < 0) {
1937 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1938 POSTCODE_SEVERITY_ERR);
1939 return;
1940 }
1941
1942 /* reuse IOVM create bus message */
1943 if (local_crash_bus_msg.cmd.create_bus.channel_addr != 0) {
1944 bus_create(&local_crash_bus_msg);
1945 } else {
1946 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1947 POSTCODE_SEVERITY_ERR);
1948 return;
1949 }
1950
1951 /* reuse create device message for storage device */
1952 if (local_crash_dev_msg.cmd.create_device.channel_addr != 0) {
1953 my_device_create(&local_crash_dev_msg);
1954 } else {
1955 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1956 POSTCODE_SEVERITY_ERR);
1957 return;
1958 }
1959 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1960 return;
1961
1962cleanup:
1963
1964 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1965
1966 queue_delayed_work(periodic_controlvm_workqueue,
1967 &periodic_controlvm_work, poll_jiffies);
1968}
1969
1970static void
1971bus_create_response(u32 bus_no, int response)
1972{
1973 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
1974}
1975
1976static void
1977bus_destroy_response(u32 bus_no, int response)
1978{
1979 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
1980}
1981
1982static void
1983device_create_response(u32 bus_no, u32 dev_no, int response)
1984{
1985 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
1986}
1987
1988static void
1989device_destroy_response(u32 bus_no, u32 dev_no, int response)
1990{
1991 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
1992}
1993
1994void
1995visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
1996{
1997 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1998 bus_no, dev_no, response,
1999 segment_state_standby);
2000}
2001EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2002
2003static void
2004device_resume_response(u32 bus_no, u32 dev_no, int response)
2005{
2006 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2007 bus_no, dev_no, response,
2008 segment_state_running);
2009}
2010
2011bool
2012visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
2013{
2014 void *p = findbus(&bus_info_list, bus_no);
2015
2016 if (!p)
2017 return false;
2018 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2019 return true;
2020}
2021EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2022
2023bool
2024visorchipset_set_bus_context(u32 bus_no, void *context)
2025{
2026 struct visorchipset_bus_info *p = findbus(&bus_info_list, bus_no);
2027
2028 if (!p)
2029 return false;
2030 p->bus_driver_context = context;
2031 return true;
2032}
2033EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2034
2035bool
2036visorchipset_get_device_info(u32 bus_no, u32 dev_no,
2037 struct visorchipset_device_info *dev_info)
2038{
2039 void *p = finddevice(&dev_info_list, bus_no, dev_no);
2040
2041 if (!p)
2042 return false;
2043 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2044 return true;
2045}
2046EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2047
2048bool
2049visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
2050{
2051 struct visorchipset_device_info *p =
2052 finddevice(&dev_info_list, bus_no, dev_no);
2053
2054 if (!p)
2055 return false;
2056 p->bus_driver_context = context;
2057 return true;
2058}
2059EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2060
2061/* Generic wrapper function for allocating memory from a kmem_cache pool.
2062 */
2063void *
2064visorchipset_cache_alloc(struct kmem_cache *pool, bool ok_to_block,
2065 char *fn, int ln)
2066{
2067 gfp_t gfp;
2068 void *p;
2069
2070 if (ok_to_block)
2071 gfp = GFP_KERNEL;
2072 else
2073 gfp = GFP_ATOMIC;
2074 /* __GFP_NORETRY means "ok to fail", meaning
2075 * kmem_cache_alloc() can return NULL, implying the caller CAN
2076 * cope with failure. If you do NOT specify __GFP_NORETRY,
2077 * Linux will go to extreme measures to get memory for you
2078 * (like, invoke oom killer), which will probably cripple the
2079 * system.
2080 */
2081 gfp |= __GFP_NORETRY;
2082 p = kmem_cache_alloc(pool, gfp);
2083 if (!p)
2084 return NULL;
2085
2086 atomic_inc(&visorchipset_cache_buffers_in_use);
2087 return p;
2088}
2089
2090/* Generic wrapper function for freeing memory from a kmem_cache pool.
2091 */
2092void
2093visorchipset_cache_free(struct kmem_cache *pool, void *p, char *fn, int ln)
2094{
2095 if (!p)
2096 return;
2097
2098 atomic_dec(&visorchipset_cache_buffers_in_use);
2099 kmem_cache_free(pool, p);
2100}
2101
2102static ssize_t chipsetready_store(struct device *dev,
2103 struct device_attribute *attr,
2104 const char *buf, size_t count)
2105{
2106 char msgtype[64];
2107
2108 if (sscanf(buf, "%63s", msgtype) != 1)
2109 return -EINVAL;
2110
2111 if (strcmp(msgtype, "CALLHOMEDISK_MOUNTED") == 0) {
2112 chipset_events[0] = 1;
2113 return count;
2114 } else if (strcmp(msgtype, "MODULES_LOADED") == 0) {
2115 chipset_events[1] = 1;
2116 return count;
2117 }
2118 return -EINVAL;
2119}
2120
2121/* The parahotplug/devicedisabled interface gets called by our support script
2122 * when an SR-IOV device has been shut down. The ID is passed to the script
2123 * and then passed back when the device has been removed.
2124 */
2125static ssize_t devicedisabled_store(struct device *dev,
2126 struct device_attribute *attr,
2127 const char *buf, size_t count)
2128{
2129 uint id;
2130
2131 if (kstrtouint(buf, 10, &id) != 0)
2132 return -EINVAL;
2133
2134 parahotplug_request_complete(id, 0);
2135 return count;
2136}
2137
2138/* The parahotplug/deviceenabled interface gets called by our support script
2139 * when an SR-IOV device has been recovered. The ID is passed to the script
2140 * and then passed back when the device has been brought back up.
2141 */
2142static ssize_t deviceenabled_store(struct device *dev,
2143 struct device_attribute *attr,
2144 const char *buf, size_t count)
2145{
2146 uint id;
2147
2148 if (kstrtouint(buf, 10, &id) != 0)
2149 return -EINVAL;
2150
2151 parahotplug_request_complete(id, 1);
2152 return count;
2153}
2154
2155static int __init
2156visorchipset_init(void)
2157{
2158 int rc = 0, x = 0;
2159 HOSTADDRESS addr;
2160
2161 if (!unisys_spar_platform)
2162 return -ENODEV;
2163
2164 memset(&busdev_server_notifiers, 0, sizeof(busdev_server_notifiers));
2165 memset(&busdev_client_notifiers, 0, sizeof(busdev_client_notifiers));
2166 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2167 memset(&livedump_info, 0, sizeof(livedump_info));
2168 atomic_set(&livedump_info.buffers_in_use, 0);
2169
2170 if (visorchipset_testvnic) {
2171 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, x, DIAG_SEVERITY_ERR);
2172 rc = x;
2173 goto cleanup;
2174 }
2175
2176 addr = controlvm_get_channel_address();
2177 if (addr != 0) {
2178 controlvm_channel =
2179 visorchannel_create_with_lock
2180 (addr,
2181 sizeof(struct spar_controlvm_channel_protocol),
2182 spar_controlvm_channel_protocol_uuid);
2183 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2184 visorchannel_get_header(controlvm_channel))) {
2185 initialize_controlvm_payload();
2186 } else {
2187 visorchannel_destroy(controlvm_channel);
2188 controlvm_channel = NULL;
2189 return -ENODEV;
2190 }
2191 } else {
2192 return -ENODEV;
2193 }
2194
2195 major_dev = MKDEV(visorchipset_major, 0);
2196 rc = visorchipset_file_init(major_dev, &controlvm_channel);
2197 if (rc < 0) {
2198 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2199 goto cleanup;
2200 }
2201
2202 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2203
2204 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2205
2206 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2207
2208 putfile_buffer_list_pool =
2209 kmem_cache_create(putfile_buffer_list_pool_name,
2210 sizeof(struct putfile_buffer_entry),
2211 0, SLAB_HWCACHE_ALIGN, NULL);
2212 if (!putfile_buffer_list_pool) {
2213 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2214 rc = -1;
2215 goto cleanup;
2216 }
2217 if (!visorchipset_disable_controlvm) {
2218 /* if booting in a crash kernel */
2219 if (is_kdump_kernel())
2220 INIT_DELAYED_WORK(&periodic_controlvm_work,
2221 setup_crash_devices_work_queue);
2222 else
2223 INIT_DELAYED_WORK(&periodic_controlvm_work,
2224 controlvm_periodic_work);
2225 periodic_controlvm_workqueue =
2226 create_singlethread_workqueue("visorchipset_controlvm");
2227
2228 if (!periodic_controlvm_workqueue) {
2229 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2230 DIAG_SEVERITY_ERR);
2231 rc = -ENOMEM;
2232 goto cleanup;
2233 }
2234 most_recent_message_jiffies = jiffies;
2235 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2236 rc = queue_delayed_work(periodic_controlvm_workqueue,
2237 &periodic_controlvm_work, poll_jiffies);
2238 if (rc < 0) {
2239 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2240 DIAG_SEVERITY_ERR);
2241 goto cleanup;
2242 }
2243 }
2244
2245 visorchipset_platform_device.dev.devt = major_dev;
2246 if (platform_device_register(&visorchipset_platform_device) < 0) {
2247 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2248 rc = -1;
2249 goto cleanup;
2250 }
2251 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2252 rc = 0;
2253cleanup:
2254 if (rc) {
2255 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2256 POSTCODE_SEVERITY_ERR);
2257 }
2258 return rc;
2259}
2260
2261static void
2262visorchipset_exit(void)
2263{
2264 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2265
2266 if (visorchipset_disable_controlvm) {
2267 ;
2268 } else {
2269 cancel_delayed_work(&periodic_controlvm_work);
2270 flush_workqueue(periodic_controlvm_workqueue);
2271 destroy_workqueue(periodic_controlvm_workqueue);
2272 periodic_controlvm_workqueue = NULL;
2273 destroy_controlvm_payload_info(&controlvm_payload_info);
2274 }
2275 if (putfile_buffer_list_pool) {
2276 kmem_cache_destroy(putfile_buffer_list_pool);
2277 putfile_buffer_list_pool = NULL;
2278 }
2279
2280 cleanup_controlvm_structures();
2281
2282 memset(&g_diag_msg_hdr, 0, sizeof(struct controlvm_message_header));
2283
2284 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2285
2286 memset(&g_del_dump_msg_hdr, 0, sizeof(struct controlvm_message_header));
2287
2288 visorchannel_destroy(controlvm_channel);
2289
2290 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2291 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2292}
2293
2294module_param_named(testvnic, visorchipset_testvnic, int, S_IRUGO);
2295MODULE_PARM_DESC(visorchipset_testvnic, "1 to test vnic, using dummy VNIC connected via a loopback to a physical ethernet");
2296module_param_named(testvnicclient, visorchipset_testvnicclient, int, S_IRUGO);
2297MODULE_PARM_DESC(visorchipset_testvnicclient, "1 to test vnic, using real VNIC channel attached to a separate IOVM guest");
2298module_param_named(testmsg, visorchipset_testmsg, int, S_IRUGO);
2299MODULE_PARM_DESC(visorchipset_testmsg,
2300 "1 to manufacture the chipset, bus, and switch messages");
2301module_param_named(major, visorchipset_major, int, S_IRUGO);
2302MODULE_PARM_DESC(visorchipset_major,
2303 "major device number to use for the device node");
2304module_param_named(serverregwait, visorchipset_serverregwait, int, S_IRUGO);
2305MODULE_PARM_DESC(visorchipset_serverreqwait,
2306 "1 to have the module wait for the visor bus to register");
2307module_param_named(clientregwait, visorchipset_clientregwait, int, S_IRUGO);
2308MODULE_PARM_DESC(visorchipset_clientregwait, "1 to have the module wait for the visorclientbus to register");
2309module_param_named(testteardown, visorchipset_testteardown, int, S_IRUGO);
2310MODULE_PARM_DESC(visorchipset_testteardown,
2311 "1 to test teardown of the chipset, bus, and switch");
2312module_param_named(disable_controlvm, visorchipset_disable_controlvm, int,
2313 S_IRUGO);
2314MODULE_PARM_DESC(visorchipset_disable_controlvm,
2315 "1 to disable polling of controlVm channel");
2316module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2317 int, S_IRUGO);
2318MODULE_PARM_DESC(visorchipset_holdchipsetready,
2319 "1 to hold response to CHIPSET_READY");
2320
2321module_init(visorchipset_init);
2322module_exit(visorchipset_exit);
2323
2324MODULE_AUTHOR("Unisys");
2325MODULE_LICENSE("GPL");
2326MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2327 VERSION);
2328MODULE_VERSION(VERSION);
This page took 0.033647 seconds and 5 git commands to generate.