staging: vme_user: remove buf_unalloc helper
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
... / ...
CommitLineData
1/* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18#include <linux/acpi.h>
19#include <linux/cdev.h>
20#include <linux/ctype.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
23#include <linux/nls.h>
24#include <linux/netdevice.h>
25#include <linux/platform_device.h>
26#include <linux/uuid.h>
27#include <linux/crash_dump.h>
28
29#include "channel_guid.h"
30#include "controlvmchannel.h"
31#include "controlvmcompletionstatus.h"
32#include "guestlinuxdebug.h"
33#include "periodic_work.h"
34#include "version.h"
35#include "visorbus.h"
36#include "visorbus_private.h"
37#include "vmcallinterface.h"
38
39#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
40
41#define MAX_NAME_SIZE 128
42#define MAX_IP_SIZE 50
43#define MAXOUTSTANDINGCHANNELCOMMAND 256
44#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
48
49#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
50
51
52#define UNISYS_SPAR_LEAF_ID 0x40000000
53
54/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55#define UNISYS_SPAR_ID_EBX 0x73696e55
56#define UNISYS_SPAR_ID_ECX 0x70537379
57#define UNISYS_SPAR_ID_EDX 0x34367261
58
59/*
60 * Module parameters
61 */
62static int visorchipset_major;
63static int visorchipset_visorbusregwait = 1; /* default is on */
64static int visorchipset_holdchipsetready;
65static unsigned long controlvm_payload_bytes_buffered;
66
67static int
68visorchipset_open(struct inode *inode, struct file *file)
69{
70 unsigned minor_number = iminor(inode);
71
72 if (minor_number)
73 return -ENODEV;
74 file->private_data = NULL;
75 return 0;
76}
77
78static int
79visorchipset_release(struct inode *inode, struct file *file)
80{
81 return 0;
82}
83
84/* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85* we switch to slow polling mode. As soon as we get a controlvm
86* message, we switch back to fast polling mode.
87*/
88#define MIN_IDLE_SECONDS 10
89static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
90static unsigned long most_recent_message_jiffies; /* when we got our last
91 * controlvm message */
92static int visorbusregistered;
93
94#define MAX_CHIPSET_EVENTS 2
95static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
96
97struct parser_context {
98 unsigned long allocbytes;
99 unsigned long param_bytes;
100 u8 *curr;
101 unsigned long bytes_remaining;
102 bool byte_stream;
103 char data[0];
104};
105
106static struct delayed_work periodic_controlvm_work;
107static struct workqueue_struct *periodic_controlvm_workqueue;
108static DEFINE_SEMAPHORE(notifier_lock);
109
110static struct cdev file_cdev;
111static struct visorchannel **file_controlvm_channel;
112static struct controlvm_message_header g_chipset_msg_hdr;
113static struct controlvm_message_packet g_devicechangestate_packet;
114
115static LIST_HEAD(bus_info_list);
116static LIST_HEAD(dev_info_list);
117
118static struct visorchannel *controlvm_channel;
119
120/* Manages the request payload in the controlvm channel */
121struct visor_controlvm_payload_info {
122 u8 __iomem *ptr; /* pointer to base address of payload pool */
123 u64 offset; /* offset from beginning of controlvm
124 * channel to beginning of payload * pool */
125 u32 bytes; /* number of bytes in payload pool */
126};
127
128static struct visor_controlvm_payload_info controlvm_payload_info;
129
130/* The following globals are used to handle the scenario where we are unable to
131 * offload the payload from a controlvm message due to memory requirements. In
132 * this scenario, we simply stash the controlvm message, then attempt to
133 * process it again the next time controlvm_periodic_work() runs.
134 */
135static struct controlvm_message controlvm_pending_msg;
136static bool controlvm_pending_msg_valid;
137
138/* This identifies a data buffer that has been received via a controlvm messages
139 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140 */
141struct putfile_buffer_entry {
142 struct list_head next; /* putfile_buffer_entry list */
143 struct parser_context *parser_ctx; /* points to input data buffer */
144};
145
146/* List of struct putfile_request *, via next_putfile_request member.
147 * Each entry in this list identifies an outstanding TRANSMIT_FILE
148 * conversation.
149 */
150static LIST_HEAD(putfile_request_list);
151
152/* This describes a buffer and its current state of transfer (e.g., how many
153 * bytes have already been supplied as putfile data, and how many bytes are
154 * remaining) for a putfile_request.
155 */
156struct putfile_active_buffer {
157 /* a payload from a controlvm message, containing a file data buffer */
158 struct parser_context *parser_ctx;
159 /* points within data area of parser_ctx to next byte of data */
160 u8 *pnext;
161 /* # bytes left from <pnext> to the end of this data buffer */
162 size_t bytes_remaining;
163};
164
165#define PUTFILE_REQUEST_SIG 0x0906101302281211
166/* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167 * conversation. Structs of this type are dynamically linked into
168 * <Putfile_request_list>.
169 */
170struct putfile_request {
171 u64 sig; /* PUTFILE_REQUEST_SIG */
172
173 /* header from original TransmitFile request */
174 struct controlvm_message_header controlvm_header;
175 u64 file_request_number; /* from original TransmitFile request */
176
177 /* link to next struct putfile_request */
178 struct list_head next_putfile_request;
179
180 /* most-recent sequence number supplied via a controlvm message */
181 u64 data_sequence_number;
182
183 /* head of putfile_buffer_entry list, which describes the data to be
184 * supplied as putfile data;
185 * - this list is added to when controlvm messages come in that supply
186 * file data
187 * - this list is removed from via the hotplug program that is actually
188 * consuming these buffers to write as file data */
189 struct list_head input_buffer_list;
190 spinlock_t req_list_lock; /* lock for input_buffer_list */
191
192 /* waiters for input_buffer_list to go non-empty */
193 wait_queue_head_t input_buffer_wq;
194
195 /* data not yet read within current putfile_buffer_entry */
196 struct putfile_active_buffer active_buf;
197
198 /* <0 = failed, 0 = in-progress, >0 = successful; */
199 /* note that this must be set with req_list_lock, and if you set <0, */
200 /* it is your responsibility to also free up all of the other objects */
201 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202 /* before releasing the lock */
203 int completion_status;
204};
205
206struct parahotplug_request {
207 struct list_head list;
208 int id;
209 unsigned long expiration;
210 struct controlvm_message msg;
211};
212
213static LIST_HEAD(parahotplug_request_list);
214static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
215static void parahotplug_process_list(void);
216
217/* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
218 * CONTROLVM_REPORTEVENT.
219 */
220static struct visorchipset_busdev_notifiers busdev_notifiers;
221
222static void bus_create_response(struct visor_device *p, int response);
223static void bus_destroy_response(struct visor_device *p, int response);
224static void device_create_response(struct visor_device *p, int response);
225static void device_destroy_response(struct visor_device *p, int response);
226static void device_resume_response(struct visor_device *p, int response);
227
228static void visorchipset_device_pause_response(struct visor_device *p,
229 int response);
230
231static struct visorchipset_busdev_responders busdev_responders = {
232 .bus_create = bus_create_response,
233 .bus_destroy = bus_destroy_response,
234 .device_create = device_create_response,
235 .device_destroy = device_destroy_response,
236 .device_pause = visorchipset_device_pause_response,
237 .device_resume = device_resume_response,
238};
239
240/* info for /dev/visorchipset */
241static dev_t major_dev = -1; /**< indicates major num for device */
242
243/* prototypes for attributes */
244static ssize_t toolaction_show(struct device *dev,
245 struct device_attribute *attr, char *buf);
246static ssize_t toolaction_store(struct device *dev,
247 struct device_attribute *attr,
248 const char *buf, size_t count);
249static DEVICE_ATTR_RW(toolaction);
250
251static ssize_t boottotool_show(struct device *dev,
252 struct device_attribute *attr, char *buf);
253static ssize_t boottotool_store(struct device *dev,
254 struct device_attribute *attr, const char *buf,
255 size_t count);
256static DEVICE_ATTR_RW(boottotool);
257
258static ssize_t error_show(struct device *dev, struct device_attribute *attr,
259 char *buf);
260static ssize_t error_store(struct device *dev, struct device_attribute *attr,
261 const char *buf, size_t count);
262static DEVICE_ATTR_RW(error);
263
264static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
265 char *buf);
266static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
267 const char *buf, size_t count);
268static DEVICE_ATTR_RW(textid);
269
270static ssize_t remaining_steps_show(struct device *dev,
271 struct device_attribute *attr, char *buf);
272static ssize_t remaining_steps_store(struct device *dev,
273 struct device_attribute *attr,
274 const char *buf, size_t count);
275static DEVICE_ATTR_RW(remaining_steps);
276
277static ssize_t chipsetready_store(struct device *dev,
278 struct device_attribute *attr,
279 const char *buf, size_t count);
280static DEVICE_ATTR_WO(chipsetready);
281
282static ssize_t devicedisabled_store(struct device *dev,
283 struct device_attribute *attr,
284 const char *buf, size_t count);
285static DEVICE_ATTR_WO(devicedisabled);
286
287static ssize_t deviceenabled_store(struct device *dev,
288 struct device_attribute *attr,
289 const char *buf, size_t count);
290static DEVICE_ATTR_WO(deviceenabled);
291
292static struct attribute *visorchipset_install_attrs[] = {
293 &dev_attr_toolaction.attr,
294 &dev_attr_boottotool.attr,
295 &dev_attr_error.attr,
296 &dev_attr_textid.attr,
297 &dev_attr_remaining_steps.attr,
298 NULL
299};
300
301static struct attribute_group visorchipset_install_group = {
302 .name = "install",
303 .attrs = visorchipset_install_attrs
304};
305
306static struct attribute *visorchipset_guest_attrs[] = {
307 &dev_attr_chipsetready.attr,
308 NULL
309};
310
311static struct attribute_group visorchipset_guest_group = {
312 .name = "guest",
313 .attrs = visorchipset_guest_attrs
314};
315
316static struct attribute *visorchipset_parahotplug_attrs[] = {
317 &dev_attr_devicedisabled.attr,
318 &dev_attr_deviceenabled.attr,
319 NULL
320};
321
322static struct attribute_group visorchipset_parahotplug_group = {
323 .name = "parahotplug",
324 .attrs = visorchipset_parahotplug_attrs
325};
326
327static const struct attribute_group *visorchipset_dev_groups[] = {
328 &visorchipset_install_group,
329 &visorchipset_guest_group,
330 &visorchipset_parahotplug_group,
331 NULL
332};
333
334static void visorchipset_dev_release(struct device *dev)
335{
336}
337
338/* /sys/devices/platform/visorchipset */
339static struct platform_device visorchipset_platform_device = {
340 .name = "visorchipset",
341 .id = -1,
342 .dev.groups = visorchipset_dev_groups,
343 .dev.release = visorchipset_dev_release,
344};
345
346/* Function prototypes */
347static void controlvm_respond(struct controlvm_message_header *msg_hdr,
348 int response);
349static void controlvm_respond_chipset_init(
350 struct controlvm_message_header *msg_hdr, int response,
351 enum ultra_chipset_feature features);
352static void controlvm_respond_physdev_changestate(
353 struct controlvm_message_header *msg_hdr, int response,
354 struct spar_segment_state state);
355
356
357static void parser_done(struct parser_context *ctx);
358
359static struct parser_context *
360parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
361{
362 int allocbytes = sizeof(struct parser_context) + bytes;
363 struct parser_context *rc = NULL;
364 struct parser_context *ctx = NULL;
365
366 if (retry)
367 *retry = false;
368
369 /*
370 * alloc an 0 extra byte to ensure payload is
371 * '\0'-terminated
372 */
373 allocbytes++;
374 if ((controlvm_payload_bytes_buffered + bytes)
375 > MAX_CONTROLVM_PAYLOAD_BYTES) {
376 if (retry)
377 *retry = true;
378 rc = NULL;
379 goto cleanup;
380 }
381 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
382 if (!ctx) {
383 if (retry)
384 *retry = true;
385 rc = NULL;
386 goto cleanup;
387 }
388
389 ctx->allocbytes = allocbytes;
390 ctx->param_bytes = bytes;
391 ctx->curr = NULL;
392 ctx->bytes_remaining = 0;
393 ctx->byte_stream = false;
394 if (local) {
395 void *p;
396
397 if (addr > virt_to_phys(high_memory - 1)) {
398 rc = NULL;
399 goto cleanup;
400 }
401 p = __va((unsigned long) (addr));
402 memcpy(ctx->data, p, bytes);
403 } else {
404 void __iomem *mapping;
405
406 if (!request_mem_region(addr, bytes, "visorchipset")) {
407 rc = NULL;
408 goto cleanup;
409 }
410
411 mapping = ioremap_cache(addr, bytes);
412 if (!mapping) {
413 release_mem_region(addr, bytes);
414 rc = NULL;
415 goto cleanup;
416 }
417 memcpy_fromio(ctx->data, mapping, bytes);
418 release_mem_region(addr, bytes);
419 }
420
421 ctx->byte_stream = true;
422 rc = ctx;
423cleanup:
424 if (rc) {
425 controlvm_payload_bytes_buffered += ctx->param_bytes;
426 } else {
427 if (ctx) {
428 parser_done(ctx);
429 ctx = NULL;
430 }
431 }
432 return rc;
433}
434
435static uuid_le
436parser_id_get(struct parser_context *ctx)
437{
438 struct spar_controlvm_parameters_header *phdr = NULL;
439
440 if (ctx == NULL)
441 return NULL_UUID_LE;
442 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
443 return phdr->id;
444}
445
446/** Describes the state from the perspective of which controlvm messages have
447 * been received for a bus or device.
448 */
449
450enum PARSER_WHICH_STRING {
451 PARSERSTRING_INITIATOR,
452 PARSERSTRING_TARGET,
453 PARSERSTRING_CONNECTION,
454 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
455};
456
457static void
458parser_param_start(struct parser_context *ctx,
459 enum PARSER_WHICH_STRING which_string)
460{
461 struct spar_controlvm_parameters_header *phdr = NULL;
462
463 if (ctx == NULL)
464 goto Away;
465 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
466 switch (which_string) {
467 case PARSERSTRING_INITIATOR:
468 ctx->curr = ctx->data + phdr->initiator_offset;
469 ctx->bytes_remaining = phdr->initiator_length;
470 break;
471 case PARSERSTRING_TARGET:
472 ctx->curr = ctx->data + phdr->target_offset;
473 ctx->bytes_remaining = phdr->target_length;
474 break;
475 case PARSERSTRING_CONNECTION:
476 ctx->curr = ctx->data + phdr->connection_offset;
477 ctx->bytes_remaining = phdr->connection_length;
478 break;
479 case PARSERSTRING_NAME:
480 ctx->curr = ctx->data + phdr->name_offset;
481 ctx->bytes_remaining = phdr->name_length;
482 break;
483 default:
484 break;
485 }
486
487Away:
488 return;
489}
490
491static void parser_done(struct parser_context *ctx)
492{
493 if (!ctx)
494 return;
495 controlvm_payload_bytes_buffered -= ctx->param_bytes;
496 kfree(ctx);
497}
498
499static void *
500parser_string_get(struct parser_context *ctx)
501{
502 u8 *pscan;
503 unsigned long nscan;
504 int value_length = -1;
505 void *value = NULL;
506 int i;
507
508 if (!ctx)
509 return NULL;
510 pscan = ctx->curr;
511 nscan = ctx->bytes_remaining;
512 if (nscan == 0)
513 return NULL;
514 if (!pscan)
515 return NULL;
516 for (i = 0, value_length = -1; i < nscan; i++)
517 if (pscan[i] == '\0') {
518 value_length = i;
519 break;
520 }
521 if (value_length < 0) /* '\0' was not included in the length */
522 value_length = nscan;
523 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
524 if (value == NULL)
525 return NULL;
526 if (value_length > 0)
527 memcpy(value, pscan, value_length);
528 ((u8 *) (value))[value_length] = '\0';
529 return value;
530}
531
532
533static ssize_t toolaction_show(struct device *dev,
534 struct device_attribute *attr,
535 char *buf)
536{
537 u8 tool_action;
538
539 visorchannel_read(controlvm_channel,
540 offsetof(struct spar_controlvm_channel_protocol,
541 tool_action), &tool_action, sizeof(u8));
542 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
543}
544
545static ssize_t toolaction_store(struct device *dev,
546 struct device_attribute *attr,
547 const char *buf, size_t count)
548{
549 u8 tool_action;
550 int ret;
551
552 if (kstrtou8(buf, 10, &tool_action))
553 return -EINVAL;
554
555 ret = visorchannel_write(controlvm_channel,
556 offsetof(struct spar_controlvm_channel_protocol,
557 tool_action),
558 &tool_action, sizeof(u8));
559
560 if (ret)
561 return ret;
562 return count;
563}
564
565static ssize_t boottotool_show(struct device *dev,
566 struct device_attribute *attr,
567 char *buf)
568{
569 struct efi_spar_indication efi_spar_indication;
570
571 visorchannel_read(controlvm_channel,
572 offsetof(struct spar_controlvm_channel_protocol,
573 efi_spar_ind), &efi_spar_indication,
574 sizeof(struct efi_spar_indication));
575 return scnprintf(buf, PAGE_SIZE, "%u\n",
576 efi_spar_indication.boot_to_tool);
577}
578
579static ssize_t boottotool_store(struct device *dev,
580 struct device_attribute *attr,
581 const char *buf, size_t count)
582{
583 int val, ret;
584 struct efi_spar_indication efi_spar_indication;
585
586 if (kstrtoint(buf, 10, &val))
587 return -EINVAL;
588
589 efi_spar_indication.boot_to_tool = val;
590 ret = visorchannel_write(controlvm_channel,
591 offsetof(struct spar_controlvm_channel_protocol,
592 efi_spar_ind), &(efi_spar_indication),
593 sizeof(struct efi_spar_indication));
594
595 if (ret)
596 return ret;
597 return count;
598}
599
600static ssize_t error_show(struct device *dev, struct device_attribute *attr,
601 char *buf)
602{
603 u32 error;
604
605 visorchannel_read(controlvm_channel,
606 offsetof(struct spar_controlvm_channel_protocol,
607 installation_error),
608 &error, sizeof(u32));
609 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
610}
611
612static ssize_t error_store(struct device *dev, struct device_attribute *attr,
613 const char *buf, size_t count)
614{
615 u32 error;
616 int ret;
617
618 if (kstrtou32(buf, 10, &error))
619 return -EINVAL;
620
621 ret = visorchannel_write(controlvm_channel,
622 offsetof(struct spar_controlvm_channel_protocol,
623 installation_error),
624 &error, sizeof(u32));
625 if (ret)
626 return ret;
627 return count;
628}
629
630static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
631 char *buf)
632{
633 u32 text_id;
634
635 visorchannel_read(controlvm_channel,
636 offsetof(struct spar_controlvm_channel_protocol,
637 installation_text_id),
638 &text_id, sizeof(u32));
639 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
640}
641
642static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
643 const char *buf, size_t count)
644{
645 u32 text_id;
646 int ret;
647
648 if (kstrtou32(buf, 10, &text_id))
649 return -EINVAL;
650
651 ret = visorchannel_write(controlvm_channel,
652 offsetof(struct spar_controlvm_channel_protocol,
653 installation_text_id),
654 &text_id, sizeof(u32));
655 if (ret)
656 return ret;
657 return count;
658}
659
660static ssize_t remaining_steps_show(struct device *dev,
661 struct device_attribute *attr, char *buf)
662{
663 u16 remaining_steps;
664
665 visorchannel_read(controlvm_channel,
666 offsetof(struct spar_controlvm_channel_protocol,
667 installation_remaining_steps),
668 &remaining_steps, sizeof(u16));
669 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
670}
671
672static ssize_t remaining_steps_store(struct device *dev,
673 struct device_attribute *attr,
674 const char *buf, size_t count)
675{
676 u16 remaining_steps;
677 int ret;
678
679 if (kstrtou16(buf, 10, &remaining_steps))
680 return -EINVAL;
681
682 ret = visorchannel_write(controlvm_channel,
683 offsetof(struct spar_controlvm_channel_protocol,
684 installation_remaining_steps),
685 &remaining_steps, sizeof(u16));
686 if (ret)
687 return ret;
688 return count;
689}
690
691struct visor_busdev {
692 u32 bus_no;
693 u32 dev_no;
694};
695
696static int match_visorbus_dev_by_id(struct device *dev, void *data)
697{
698 struct visor_device *vdev = to_visor_device(dev);
699 struct visor_busdev *id = (struct visor_busdev *)data;
700 u32 bus_no = id->bus_no;
701 u32 dev_no = id->dev_no;
702
703 if ((vdev->chipset_bus_no == bus_no) &&
704 (vdev->chipset_dev_no == dev_no))
705 return 1;
706
707 return 0;
708}
709struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
710 struct visor_device *from)
711{
712 struct device *dev;
713 struct device *dev_start = NULL;
714 struct visor_device *vdev = NULL;
715 struct visor_busdev id = {
716 .bus_no = bus_no,
717 .dev_no = dev_no
718 };
719
720 if (from)
721 dev_start = &from->device;
722 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
723 match_visorbus_dev_by_id);
724 if (dev)
725 vdev = to_visor_device(dev);
726 return vdev;
727}
728EXPORT_SYMBOL(visorbus_get_device_by_id);
729
730static u8
731check_chipset_events(void)
732{
733 int i;
734 u8 send_msg = 1;
735 /* Check events to determine if response should be sent */
736 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
737 send_msg &= chipset_events[i];
738 return send_msg;
739}
740
741static void
742clear_chipset_events(void)
743{
744 int i;
745 /* Clear chipset_events */
746 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
747 chipset_events[i] = 0;
748}
749
750void
751visorchipset_register_busdev(
752 struct visorchipset_busdev_notifiers *notifiers,
753 struct visorchipset_busdev_responders *responders,
754 struct ultra_vbus_deviceinfo *driver_info)
755{
756 down(&notifier_lock);
757 if (!notifiers) {
758 memset(&busdev_notifiers, 0,
759 sizeof(busdev_notifiers));
760 visorbusregistered = 0; /* clear flag */
761 } else {
762 busdev_notifiers = *notifiers;
763 visorbusregistered = 1; /* set flag */
764 }
765 if (responders)
766 *responders = busdev_responders;
767 if (driver_info)
768 bus_device_info_init(driver_info, "chipset", "visorchipset",
769 VERSION, NULL);
770
771 up(&notifier_lock);
772}
773EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
774
775static void
776chipset_init(struct controlvm_message *inmsg)
777{
778 static int chipset_inited;
779 enum ultra_chipset_feature features = 0;
780 int rc = CONTROLVM_RESP_SUCCESS;
781
782 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
783 if (chipset_inited) {
784 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
785 goto cleanup;
786 }
787 chipset_inited = 1;
788 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
789
790 /* Set features to indicate we support parahotplug (if Command
791 * also supports it). */
792 features =
793 inmsg->cmd.init_chipset.
794 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
795
796 /* Set the "reply" bit so Command knows this is a
797 * features-aware driver. */
798 features |= ULTRA_CHIPSET_FEATURE_REPLY;
799
800cleanup:
801 if (inmsg->hdr.flags.response_expected)
802 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
803}
804
805static void
806controlvm_init_response(struct controlvm_message *msg,
807 struct controlvm_message_header *msg_hdr, int response)
808{
809 memset(msg, 0, sizeof(struct controlvm_message));
810 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
811 msg->hdr.payload_bytes = 0;
812 msg->hdr.payload_vm_offset = 0;
813 msg->hdr.payload_max_bytes = 0;
814 if (response < 0) {
815 msg->hdr.flags.failed = 1;
816 msg->hdr.completion_status = (u32) (-response);
817 }
818}
819
820static void
821controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
822{
823 struct controlvm_message outmsg;
824
825 controlvm_init_response(&outmsg, msg_hdr, response);
826 if (outmsg.hdr.flags.test_message == 1)
827 return;
828
829 if (!visorchannel_signalinsert(controlvm_channel,
830 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
831 return;
832 }
833}
834
835static void
836controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
837 int response,
838 enum ultra_chipset_feature features)
839{
840 struct controlvm_message outmsg;
841
842 controlvm_init_response(&outmsg, msg_hdr, response);
843 outmsg.cmd.init_chipset.features = features;
844 if (!visorchannel_signalinsert(controlvm_channel,
845 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
846 return;
847 }
848}
849
850static void controlvm_respond_physdev_changestate(
851 struct controlvm_message_header *msg_hdr, int response,
852 struct spar_segment_state state)
853{
854 struct controlvm_message outmsg;
855
856 controlvm_init_response(&outmsg, msg_hdr, response);
857 outmsg.cmd.device_change_state.state = state;
858 outmsg.cmd.device_change_state.flags.phys_device = 1;
859 if (!visorchannel_signalinsert(controlvm_channel,
860 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
861 return;
862 }
863}
864
865enum crash_obj_type {
866 CRASH_DEV,
867 CRASH_BUS,
868};
869
870static void
871bus_responder(enum controlvm_id cmd_id,
872 struct controlvm_message_header *pending_msg_hdr,
873 int response)
874{
875 if (pending_msg_hdr == NULL)
876 return; /* no controlvm response needed */
877
878 if (pending_msg_hdr->id != (u32)cmd_id)
879 return;
880
881 controlvm_respond(pending_msg_hdr, response);
882}
883
884static void
885device_changestate_responder(enum controlvm_id cmd_id,
886 struct visor_device *p, int response,
887 struct spar_segment_state response_state)
888{
889 struct controlvm_message outmsg;
890 u32 bus_no = p->chipset_bus_no;
891 u32 dev_no = p->chipset_dev_no;
892
893 if (p->pending_msg_hdr == NULL)
894 return; /* no controlvm response needed */
895 if (p->pending_msg_hdr->id != cmd_id)
896 return;
897
898 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
899
900 outmsg.cmd.device_change_state.bus_no = bus_no;
901 outmsg.cmd.device_change_state.dev_no = dev_no;
902 outmsg.cmd.device_change_state.state = response_state;
903
904 if (!visorchannel_signalinsert(controlvm_channel,
905 CONTROLVM_QUEUE_REQUEST, &outmsg))
906 return;
907}
908
909static void
910device_responder(enum controlvm_id cmd_id,
911 struct controlvm_message_header *pending_msg_hdr,
912 int response)
913{
914 if (pending_msg_hdr == NULL)
915 return; /* no controlvm response needed */
916
917 if (pending_msg_hdr->id != (u32)cmd_id)
918 return;
919
920 controlvm_respond(pending_msg_hdr, response);
921}
922
923static void
924bus_epilog(struct visor_device *bus_info,
925 u32 cmd, struct controlvm_message_header *msg_hdr,
926 int response, bool need_response)
927{
928 bool notified = false;
929 struct controlvm_message_header *pmsg_hdr = NULL;
930
931 if (!bus_info) {
932 /* relying on a valid passed in response code */
933 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
934 pmsg_hdr = msg_hdr;
935 goto away;
936 }
937
938 if (bus_info->pending_msg_hdr) {
939 /* only non-NULL if dev is still waiting on a response */
940 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
941 pmsg_hdr = bus_info->pending_msg_hdr;
942 goto away;
943 }
944
945 if (need_response) {
946 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
947 if (!pmsg_hdr) {
948 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
949 goto away;
950 }
951
952 memcpy(pmsg_hdr, msg_hdr,
953 sizeof(struct controlvm_message_header));
954 bus_info->pending_msg_hdr = pmsg_hdr;
955 }
956
957 down(&notifier_lock);
958 if (response == CONTROLVM_RESP_SUCCESS) {
959 switch (cmd) {
960 case CONTROLVM_BUS_CREATE:
961 if (busdev_notifiers.bus_create) {
962 (*busdev_notifiers.bus_create) (bus_info);
963 notified = true;
964 }
965 break;
966 case CONTROLVM_BUS_DESTROY:
967 if (busdev_notifiers.bus_destroy) {
968 (*busdev_notifiers.bus_destroy) (bus_info);
969 notified = true;
970 }
971 break;
972 }
973 }
974away:
975 if (notified)
976 /* The callback function just called above is responsible
977 * for calling the appropriate visorchipset_busdev_responders
978 * function, which will call bus_responder()
979 */
980 ;
981 else
982 /*
983 * Do not kfree(pmsg_hdr) as this is the failure path.
984 * The success path ('notified') will call the responder
985 * directly and kfree() there.
986 */
987 bus_responder(cmd, pmsg_hdr, response);
988 up(&notifier_lock);
989}
990
991static void
992device_epilog(struct visor_device *dev_info,
993 struct spar_segment_state state, u32 cmd,
994 struct controlvm_message_header *msg_hdr, int response,
995 bool need_response, bool for_visorbus)
996{
997 struct visorchipset_busdev_notifiers *notifiers;
998 bool notified = false;
999 struct controlvm_message_header *pmsg_hdr = NULL;
1000
1001 notifiers = &busdev_notifiers;
1002
1003 if (!dev_info) {
1004 /* relying on a valid passed in response code */
1005 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1006 pmsg_hdr = msg_hdr;
1007 goto away;
1008 }
1009
1010 if (dev_info->pending_msg_hdr) {
1011 /* only non-NULL if dev is still waiting on a response */
1012 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1013 pmsg_hdr = dev_info->pending_msg_hdr;
1014 goto away;
1015 }
1016
1017 if (need_response) {
1018 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1019 if (!pmsg_hdr) {
1020 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1021 goto away;
1022 }
1023
1024 memcpy(pmsg_hdr, msg_hdr,
1025 sizeof(struct controlvm_message_header));
1026 dev_info->pending_msg_hdr = pmsg_hdr;
1027 }
1028
1029 down(&notifier_lock);
1030 if (response >= 0) {
1031 switch (cmd) {
1032 case CONTROLVM_DEVICE_CREATE:
1033 if (notifiers->device_create) {
1034 (*notifiers->device_create) (dev_info);
1035 notified = true;
1036 }
1037 break;
1038 case CONTROLVM_DEVICE_CHANGESTATE:
1039 /* ServerReady / ServerRunning / SegmentStateRunning */
1040 if (state.alive == segment_state_running.alive &&
1041 state.operating ==
1042 segment_state_running.operating) {
1043 if (notifiers->device_resume) {
1044 (*notifiers->device_resume) (dev_info);
1045 notified = true;
1046 }
1047 }
1048 /* ServerNotReady / ServerLost / SegmentStateStandby */
1049 else if (state.alive == segment_state_standby.alive &&
1050 state.operating ==
1051 segment_state_standby.operating) {
1052 /* technically this is standby case
1053 * where server is lost
1054 */
1055 if (notifiers->device_pause) {
1056 (*notifiers->device_pause) (dev_info);
1057 notified = true;
1058 }
1059 }
1060 break;
1061 case CONTROLVM_DEVICE_DESTROY:
1062 if (notifiers->device_destroy) {
1063 (*notifiers->device_destroy) (dev_info);
1064 notified = true;
1065 }
1066 break;
1067 }
1068 }
1069away:
1070 if (notified)
1071 /* The callback function just called above is responsible
1072 * for calling the appropriate visorchipset_busdev_responders
1073 * function, which will call device_responder()
1074 */
1075 ;
1076 else
1077 /*
1078 * Do not kfree(pmsg_hdr) as this is the failure path.
1079 * The success path ('notified') will call the responder
1080 * directly and kfree() there.
1081 */
1082 device_responder(cmd, pmsg_hdr, response);
1083 up(&notifier_lock);
1084}
1085
1086static void
1087bus_create(struct controlvm_message *inmsg)
1088{
1089 struct controlvm_message_packet *cmd = &inmsg->cmd;
1090 u32 bus_no = cmd->create_bus.bus_no;
1091 int rc = CONTROLVM_RESP_SUCCESS;
1092 struct visor_device *bus_info;
1093 struct visorchannel *visorchannel;
1094
1095 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1096 if (bus_info && (bus_info->state.created == 1)) {
1097 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1098 POSTCODE_SEVERITY_ERR);
1099 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1100 goto cleanup;
1101 }
1102 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1103 if (!bus_info) {
1104 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1105 POSTCODE_SEVERITY_ERR);
1106 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1107 goto cleanup;
1108 }
1109
1110 INIT_LIST_HEAD(&bus_info->list_all);
1111 bus_info->chipset_bus_no = bus_no;
1112 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
1113
1114 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1115
1116 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1117 cmd->create_bus.channel_bytes,
1118 GFP_KERNEL,
1119 cmd->create_bus.bus_data_type_uuid);
1120
1121 if (!visorchannel) {
1122 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1123 POSTCODE_SEVERITY_ERR);
1124 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1125 kfree(bus_info);
1126 bus_info = NULL;
1127 goto cleanup;
1128 }
1129 bus_info->visorchannel = visorchannel;
1130
1131 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1132
1133cleanup:
1134 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1135 rc, inmsg->hdr.flags.response_expected == 1);
1136}
1137
1138static void
1139bus_destroy(struct controlvm_message *inmsg)
1140{
1141 struct controlvm_message_packet *cmd = &inmsg->cmd;
1142 u32 bus_no = cmd->destroy_bus.bus_no;
1143 struct visor_device *bus_info;
1144 int rc = CONTROLVM_RESP_SUCCESS;
1145
1146 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1147 if (!bus_info)
1148 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1149 else if (bus_info->state.created == 0)
1150 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1151
1152 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1153 rc, inmsg->hdr.flags.response_expected == 1);
1154
1155 /* bus_info is freed as part of the busdevice_release function */
1156}
1157
1158static void
1159bus_configure(struct controlvm_message *inmsg,
1160 struct parser_context *parser_ctx)
1161{
1162 struct controlvm_message_packet *cmd = &inmsg->cmd;
1163 u32 bus_no;
1164 struct visor_device *bus_info;
1165 int rc = CONTROLVM_RESP_SUCCESS;
1166
1167 bus_no = cmd->configure_bus.bus_no;
1168 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1169 POSTCODE_SEVERITY_INFO);
1170
1171 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1172 if (!bus_info) {
1173 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1174 POSTCODE_SEVERITY_ERR);
1175 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1176 } else if (bus_info->state.created == 0) {
1177 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1178 POSTCODE_SEVERITY_ERR);
1179 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1180 } else if (bus_info->pending_msg_hdr != NULL) {
1181 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1182 POSTCODE_SEVERITY_ERR);
1183 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1184 } else {
1185 visorchannel_set_clientpartition(bus_info->visorchannel,
1186 cmd->configure_bus.guest_handle);
1187 bus_info->partition_uuid = parser_id_get(parser_ctx);
1188 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1189 bus_info->name = parser_string_get(parser_ctx);
1190
1191 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1192 POSTCODE_SEVERITY_INFO);
1193 }
1194 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1195 rc, inmsg->hdr.flags.response_expected == 1);
1196}
1197
1198static void
1199my_device_create(struct controlvm_message *inmsg)
1200{
1201 struct controlvm_message_packet *cmd = &inmsg->cmd;
1202 u32 bus_no = cmd->create_device.bus_no;
1203 u32 dev_no = cmd->create_device.dev_no;
1204 struct visor_device *dev_info = NULL;
1205 struct visor_device *bus_info;
1206 struct visorchannel *visorchannel;
1207 int rc = CONTROLVM_RESP_SUCCESS;
1208
1209 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1210 if (!bus_info) {
1211 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1212 POSTCODE_SEVERITY_ERR);
1213 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1214 goto cleanup;
1215 }
1216
1217 if (bus_info->state.created == 0) {
1218 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1219 POSTCODE_SEVERITY_ERR);
1220 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1221 goto cleanup;
1222 }
1223
1224 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1225 if (dev_info && (dev_info->state.created == 1)) {
1226 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1227 POSTCODE_SEVERITY_ERR);
1228 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1229 goto cleanup;
1230 }
1231
1232 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1233 if (!dev_info) {
1234 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1235 POSTCODE_SEVERITY_ERR);
1236 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1237 goto cleanup;
1238 }
1239
1240 dev_info->chipset_bus_no = bus_no;
1241 dev_info->chipset_dev_no = dev_no;
1242 dev_info->inst = cmd->create_device.dev_inst_uuid;
1243
1244 /* not sure where the best place to set the 'parent' */
1245 dev_info->device.parent = &bus_info->device;
1246
1247 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1248 POSTCODE_SEVERITY_INFO);
1249
1250 visorchannel = visorchannel_create(cmd->create_device.channel_addr,
1251 cmd->create_device.channel_bytes,
1252 GFP_KERNEL,
1253 cmd->create_device.data_type_uuid);
1254
1255 if (!visorchannel) {
1256 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1257 POSTCODE_SEVERITY_ERR);
1258 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1259 kfree(dev_info);
1260 dev_info = NULL;
1261 goto cleanup;
1262 }
1263 dev_info->visorchannel = visorchannel;
1264 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
1265 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1266 POSTCODE_SEVERITY_INFO);
1267cleanup:
1268 device_epilog(dev_info, segment_state_running,
1269 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1270 inmsg->hdr.flags.response_expected == 1, 1);
1271}
1272
1273static void
1274my_device_changestate(struct controlvm_message *inmsg)
1275{
1276 struct controlvm_message_packet *cmd = &inmsg->cmd;
1277 u32 bus_no = cmd->device_change_state.bus_no;
1278 u32 dev_no = cmd->device_change_state.dev_no;
1279 struct spar_segment_state state = cmd->device_change_state.state;
1280 struct visor_device *dev_info;
1281 int rc = CONTROLVM_RESP_SUCCESS;
1282
1283 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1284 if (!dev_info) {
1285 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1286 POSTCODE_SEVERITY_ERR);
1287 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1288 } else if (dev_info->state.created == 0) {
1289 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1290 POSTCODE_SEVERITY_ERR);
1291 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1292 }
1293 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1294 device_epilog(dev_info, state,
1295 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1296 inmsg->hdr.flags.response_expected == 1, 1);
1297}
1298
1299static void
1300my_device_destroy(struct controlvm_message *inmsg)
1301{
1302 struct controlvm_message_packet *cmd = &inmsg->cmd;
1303 u32 bus_no = cmd->destroy_device.bus_no;
1304 u32 dev_no = cmd->destroy_device.dev_no;
1305 struct visor_device *dev_info;
1306 int rc = CONTROLVM_RESP_SUCCESS;
1307
1308 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1309 if (!dev_info)
1310 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1311 else if (dev_info->state.created == 0)
1312 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1313
1314 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1315 device_epilog(dev_info, segment_state_running,
1316 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1317 inmsg->hdr.flags.response_expected == 1, 1);
1318}
1319
1320/* When provided with the physical address of the controlvm channel
1321 * (phys_addr), the offset to the payload area we need to manage
1322 * (offset), and the size of this payload area (bytes), fills in the
1323 * controlvm_payload_info struct. Returns true for success or false
1324 * for failure.
1325 */
1326static int
1327initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1328 struct visor_controlvm_payload_info *info)
1329{
1330 u8 __iomem *payload = NULL;
1331 int rc = CONTROLVM_RESP_SUCCESS;
1332
1333 if (!info) {
1334 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1335 goto cleanup;
1336 }
1337 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1338 if ((offset == 0) || (bytes == 0)) {
1339 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1340 goto cleanup;
1341 }
1342 payload = ioremap_cache(phys_addr + offset, bytes);
1343 if (!payload) {
1344 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1345 goto cleanup;
1346 }
1347
1348 info->offset = offset;
1349 info->bytes = bytes;
1350 info->ptr = payload;
1351
1352cleanup:
1353 if (rc < 0) {
1354 if (payload) {
1355 iounmap(payload);
1356 payload = NULL;
1357 }
1358 }
1359 return rc;
1360}
1361
1362static void
1363destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1364{
1365 if (info->ptr) {
1366 iounmap(info->ptr);
1367 info->ptr = NULL;
1368 }
1369 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1370}
1371
1372static void
1373initialize_controlvm_payload(void)
1374{
1375 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1376 u64 payload_offset = 0;
1377 u32 payload_bytes = 0;
1378
1379 if (visorchannel_read(controlvm_channel,
1380 offsetof(struct spar_controlvm_channel_protocol,
1381 request_payload_offset),
1382 &payload_offset, sizeof(payload_offset)) < 0) {
1383 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1384 POSTCODE_SEVERITY_ERR);
1385 return;
1386 }
1387 if (visorchannel_read(controlvm_channel,
1388 offsetof(struct spar_controlvm_channel_protocol,
1389 request_payload_bytes),
1390 &payload_bytes, sizeof(payload_bytes)) < 0) {
1391 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1392 POSTCODE_SEVERITY_ERR);
1393 return;
1394 }
1395 initialize_controlvm_payload_info(phys_addr,
1396 payload_offset, payload_bytes,
1397 &controlvm_payload_info);
1398}
1399
1400/* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1401 * Returns CONTROLVM_RESP_xxx code.
1402 */
1403static int
1404visorchipset_chipset_ready(void)
1405{
1406 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1407 return CONTROLVM_RESP_SUCCESS;
1408}
1409
1410static int
1411visorchipset_chipset_selftest(void)
1412{
1413 char env_selftest[20];
1414 char *envp[] = { env_selftest, NULL };
1415
1416 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1417 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1418 envp);
1419 return CONTROLVM_RESP_SUCCESS;
1420}
1421
1422/* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1423 * Returns CONTROLVM_RESP_xxx code.
1424 */
1425static int
1426visorchipset_chipset_notready(void)
1427{
1428 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1429 return CONTROLVM_RESP_SUCCESS;
1430}
1431
1432static void
1433chipset_ready(struct controlvm_message_header *msg_hdr)
1434{
1435 int rc = visorchipset_chipset_ready();
1436
1437 if (rc != CONTROLVM_RESP_SUCCESS)
1438 rc = -rc;
1439 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1440 controlvm_respond(msg_hdr, rc);
1441 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1442 /* Send CHIPSET_READY response when all modules have been loaded
1443 * and disks mounted for the partition
1444 */
1445 g_chipset_msg_hdr = *msg_hdr;
1446 }
1447}
1448
1449static void
1450chipset_selftest(struct controlvm_message_header *msg_hdr)
1451{
1452 int rc = visorchipset_chipset_selftest();
1453
1454 if (rc != CONTROLVM_RESP_SUCCESS)
1455 rc = -rc;
1456 if (msg_hdr->flags.response_expected)
1457 controlvm_respond(msg_hdr, rc);
1458}
1459
1460static void
1461chipset_notready(struct controlvm_message_header *msg_hdr)
1462{
1463 int rc = visorchipset_chipset_notready();
1464
1465 if (rc != CONTROLVM_RESP_SUCCESS)
1466 rc = -rc;
1467 if (msg_hdr->flags.response_expected)
1468 controlvm_respond(msg_hdr, rc);
1469}
1470
1471/* This is your "one-stop" shop for grabbing the next message from the
1472 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1473 */
1474static bool
1475read_controlvm_event(struct controlvm_message *msg)
1476{
1477 if (visorchannel_signalremove(controlvm_channel,
1478 CONTROLVM_QUEUE_EVENT, msg)) {
1479 /* got a message */
1480 if (msg->hdr.flags.test_message == 1)
1481 return false;
1482 return true;
1483 }
1484 return false;
1485}
1486
1487/*
1488 * The general parahotplug flow works as follows. The visorchipset
1489 * driver receives a DEVICE_CHANGESTATE message from Command
1490 * specifying a physical device to enable or disable. The CONTROLVM
1491 * message handler calls parahotplug_process_message, which then adds
1492 * the message to a global list and kicks off a udev event which
1493 * causes a user level script to enable or disable the specified
1494 * device. The udev script then writes to
1495 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1496 * to get called, at which point the appropriate CONTROLVM message is
1497 * retrieved from the list and responded to.
1498 */
1499
1500#define PARAHOTPLUG_TIMEOUT_MS 2000
1501
1502/*
1503 * Generate unique int to match an outstanding CONTROLVM message with a
1504 * udev script /proc response
1505 */
1506static int
1507parahotplug_next_id(void)
1508{
1509 static atomic_t id = ATOMIC_INIT(0);
1510
1511 return atomic_inc_return(&id);
1512}
1513
1514/*
1515 * Returns the time (in jiffies) when a CONTROLVM message on the list
1516 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1517 */
1518static unsigned long
1519parahotplug_next_expiration(void)
1520{
1521 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1522}
1523
1524/*
1525 * Create a parahotplug_request, which is basically a wrapper for a
1526 * CONTROLVM_MESSAGE that we can stick on a list
1527 */
1528static struct parahotplug_request *
1529parahotplug_request_create(struct controlvm_message *msg)
1530{
1531 struct parahotplug_request *req;
1532
1533 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1534 if (!req)
1535 return NULL;
1536
1537 req->id = parahotplug_next_id();
1538 req->expiration = parahotplug_next_expiration();
1539 req->msg = *msg;
1540
1541 return req;
1542}
1543
1544/*
1545 * Free a parahotplug_request.
1546 */
1547static void
1548parahotplug_request_destroy(struct parahotplug_request *req)
1549{
1550 kfree(req);
1551}
1552
1553/*
1554 * Cause uevent to run the user level script to do the disable/enable
1555 * specified in (the CONTROLVM message in) the specified
1556 * parahotplug_request
1557 */
1558static void
1559parahotplug_request_kickoff(struct parahotplug_request *req)
1560{
1561 struct controlvm_message_packet *cmd = &req->msg.cmd;
1562 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1563 env_func[40];
1564 char *envp[] = {
1565 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1566 };
1567
1568 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1569 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1570 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1571 cmd->device_change_state.state.active);
1572 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1573 cmd->device_change_state.bus_no);
1574 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1575 cmd->device_change_state.dev_no >> 3);
1576 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1577 cmd->device_change_state.dev_no & 0x7);
1578
1579 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1580 envp);
1581}
1582
1583/*
1584 * Remove any request from the list that's been on there too long and
1585 * respond with an error.
1586 */
1587static void
1588parahotplug_process_list(void)
1589{
1590 struct list_head *pos;
1591 struct list_head *tmp;
1592
1593 spin_lock(&parahotplug_request_list_lock);
1594
1595 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1596 struct parahotplug_request *req =
1597 list_entry(pos, struct parahotplug_request, list);
1598
1599 if (!time_after_eq(jiffies, req->expiration))
1600 continue;
1601
1602 list_del(pos);
1603 if (req->msg.hdr.flags.response_expected)
1604 controlvm_respond_physdev_changestate(
1605 &req->msg.hdr,
1606 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1607 req->msg.cmd.device_change_state.state);
1608 parahotplug_request_destroy(req);
1609 }
1610
1611 spin_unlock(&parahotplug_request_list_lock);
1612}
1613
1614/*
1615 * Called from the /proc handler, which means the user script has
1616 * finished the enable/disable. Find the matching identifier, and
1617 * respond to the CONTROLVM message with success.
1618 */
1619static int
1620parahotplug_request_complete(int id, u16 active)
1621{
1622 struct list_head *pos;
1623 struct list_head *tmp;
1624
1625 spin_lock(&parahotplug_request_list_lock);
1626
1627 /* Look for a request matching "id". */
1628 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1629 struct parahotplug_request *req =
1630 list_entry(pos, struct parahotplug_request, list);
1631 if (req->id == id) {
1632 /* Found a match. Remove it from the list and
1633 * respond.
1634 */
1635 list_del(pos);
1636 spin_unlock(&parahotplug_request_list_lock);
1637 req->msg.cmd.device_change_state.state.active = active;
1638 if (req->msg.hdr.flags.response_expected)
1639 controlvm_respond_physdev_changestate(
1640 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1641 req->msg.cmd.device_change_state.state);
1642 parahotplug_request_destroy(req);
1643 return 0;
1644 }
1645 }
1646
1647 spin_unlock(&parahotplug_request_list_lock);
1648 return -1;
1649}
1650
1651/*
1652 * Enables or disables a PCI device by kicking off a udev script
1653 */
1654static void
1655parahotplug_process_message(struct controlvm_message *inmsg)
1656{
1657 struct parahotplug_request *req;
1658
1659 req = parahotplug_request_create(inmsg);
1660
1661 if (!req)
1662 return;
1663
1664 if (inmsg->cmd.device_change_state.state.active) {
1665 /* For enable messages, just respond with success
1666 * right away. This is a bit of a hack, but there are
1667 * issues with the early enable messages we get (with
1668 * either the udev script not detecting that the device
1669 * is up, or not getting called at all). Fortunately
1670 * the messages that get lost don't matter anyway, as
1671 * devices are automatically enabled at
1672 * initialization.
1673 */
1674 parahotplug_request_kickoff(req);
1675 controlvm_respond_physdev_changestate(&inmsg->hdr,
1676 CONTROLVM_RESP_SUCCESS,
1677 inmsg->cmd.device_change_state.state);
1678 parahotplug_request_destroy(req);
1679 } else {
1680 /* For disable messages, add the request to the
1681 * request list before kicking off the udev script. It
1682 * won't get responded to until the script has
1683 * indicated it's done.
1684 */
1685 spin_lock(&parahotplug_request_list_lock);
1686 list_add_tail(&req->list, &parahotplug_request_list);
1687 spin_unlock(&parahotplug_request_list_lock);
1688
1689 parahotplug_request_kickoff(req);
1690 }
1691}
1692
1693/* Process a controlvm message.
1694 * Return result:
1695 * false - this function will return false only in the case where the
1696 * controlvm message was NOT processed, but processing must be
1697 * retried before reading the next controlvm message; a
1698 * scenario where this can occur is when we need to throttle
1699 * the allocation of memory in which to copy out controlvm
1700 * payload data
1701 * true - processing of the controlvm message completed,
1702 * either successfully or with an error.
1703 */
1704static bool
1705handle_command(struct controlvm_message inmsg, u64 channel_addr)
1706{
1707 struct controlvm_message_packet *cmd = &inmsg.cmd;
1708 u64 parm_addr;
1709 u32 parm_bytes;
1710 struct parser_context *parser_ctx = NULL;
1711 bool local_addr;
1712 struct controlvm_message ackmsg;
1713
1714 /* create parsing context if necessary */
1715 local_addr = (inmsg.hdr.flags.test_message == 1);
1716 if (channel_addr == 0)
1717 return true;
1718 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1719 parm_bytes = inmsg.hdr.payload_bytes;
1720
1721 /* Parameter and channel addresses within test messages actually lie
1722 * within our OS-controlled memory. We need to know that, because it
1723 * makes a difference in how we compute the virtual address.
1724 */
1725 if (parm_addr && parm_bytes) {
1726 bool retry = false;
1727
1728 parser_ctx =
1729 parser_init_byte_stream(parm_addr, parm_bytes,
1730 local_addr, &retry);
1731 if (!parser_ctx && retry)
1732 return false;
1733 }
1734
1735 if (!local_addr) {
1736 controlvm_init_response(&ackmsg, &inmsg.hdr,
1737 CONTROLVM_RESP_SUCCESS);
1738 if (controlvm_channel)
1739 visorchannel_signalinsert(controlvm_channel,
1740 CONTROLVM_QUEUE_ACK,
1741 &ackmsg);
1742 }
1743 switch (inmsg.hdr.id) {
1744 case CONTROLVM_CHIPSET_INIT:
1745 chipset_init(&inmsg);
1746 break;
1747 case CONTROLVM_BUS_CREATE:
1748 bus_create(&inmsg);
1749 break;
1750 case CONTROLVM_BUS_DESTROY:
1751 bus_destroy(&inmsg);
1752 break;
1753 case CONTROLVM_BUS_CONFIGURE:
1754 bus_configure(&inmsg, parser_ctx);
1755 break;
1756 case CONTROLVM_DEVICE_CREATE:
1757 my_device_create(&inmsg);
1758 break;
1759 case CONTROLVM_DEVICE_CHANGESTATE:
1760 if (cmd->device_change_state.flags.phys_device) {
1761 parahotplug_process_message(&inmsg);
1762 } else {
1763 /* save the hdr and cmd structures for later use */
1764 /* when sending back the response to Command */
1765 my_device_changestate(&inmsg);
1766 g_devicechangestate_packet = inmsg.cmd;
1767 break;
1768 }
1769 break;
1770 case CONTROLVM_DEVICE_DESTROY:
1771 my_device_destroy(&inmsg);
1772 break;
1773 case CONTROLVM_DEVICE_CONFIGURE:
1774 /* no op for now, just send a respond that we passed */
1775 if (inmsg.hdr.flags.response_expected)
1776 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1777 break;
1778 case CONTROLVM_CHIPSET_READY:
1779 chipset_ready(&inmsg.hdr);
1780 break;
1781 case CONTROLVM_CHIPSET_SELFTEST:
1782 chipset_selftest(&inmsg.hdr);
1783 break;
1784 case CONTROLVM_CHIPSET_STOP:
1785 chipset_notready(&inmsg.hdr);
1786 break;
1787 default:
1788 if (inmsg.hdr.flags.response_expected)
1789 controlvm_respond(&inmsg.hdr,
1790 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1791 break;
1792 }
1793
1794 if (parser_ctx) {
1795 parser_done(parser_ctx);
1796 parser_ctx = NULL;
1797 }
1798 return true;
1799}
1800
1801static inline unsigned int
1802issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1803{
1804 struct vmcall_io_controlvm_addr_params params;
1805 int result = VMCALL_SUCCESS;
1806 u64 physaddr;
1807
1808 physaddr = virt_to_phys(&params);
1809 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1810 if (VMCALL_SUCCESSFUL(result)) {
1811 *control_addr = params.address;
1812 *control_bytes = params.channel_bytes;
1813 }
1814 return result;
1815}
1816
1817static u64 controlvm_get_channel_address(void)
1818{
1819 u64 addr = 0;
1820 u32 size = 0;
1821
1822 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1823 return 0;
1824
1825 return addr;
1826}
1827
1828static void
1829controlvm_periodic_work(struct work_struct *work)
1830{
1831 struct controlvm_message inmsg;
1832 bool got_command = false;
1833 bool handle_command_failed = false;
1834 static u64 poll_count;
1835
1836 /* make sure visorbus server is registered for controlvm callbacks */
1837 if (visorchipset_visorbusregwait && !visorbusregistered)
1838 goto cleanup;
1839
1840 poll_count++;
1841 if (poll_count >= 250)
1842 ; /* keep going */
1843 else
1844 goto cleanup;
1845
1846 /* Check events to determine if response to CHIPSET_READY
1847 * should be sent
1848 */
1849 if (visorchipset_holdchipsetready &&
1850 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1851 if (check_chipset_events() == 1) {
1852 controlvm_respond(&g_chipset_msg_hdr, 0);
1853 clear_chipset_events();
1854 memset(&g_chipset_msg_hdr, 0,
1855 sizeof(struct controlvm_message_header));
1856 }
1857 }
1858
1859 while (visorchannel_signalremove(controlvm_channel,
1860 CONTROLVM_QUEUE_RESPONSE,
1861 &inmsg))
1862 ;
1863 if (!got_command) {
1864 if (controlvm_pending_msg_valid) {
1865 /* we throttled processing of a prior
1866 * msg, so try to process it again
1867 * rather than reading a new one
1868 */
1869 inmsg = controlvm_pending_msg;
1870 controlvm_pending_msg_valid = false;
1871 got_command = true;
1872 } else {
1873 got_command = read_controlvm_event(&inmsg);
1874 }
1875 }
1876
1877 handle_command_failed = false;
1878 while (got_command && (!handle_command_failed)) {
1879 most_recent_message_jiffies = jiffies;
1880 if (handle_command(inmsg,
1881 visorchannel_get_physaddr
1882 (controlvm_channel)))
1883 got_command = read_controlvm_event(&inmsg);
1884 else {
1885 /* this is a scenario where throttling
1886 * is required, but probably NOT an
1887 * error...; we stash the current
1888 * controlvm msg so we will attempt to
1889 * reprocess it on our next loop
1890 */
1891 handle_command_failed = true;
1892 controlvm_pending_msg = inmsg;
1893 controlvm_pending_msg_valid = true;
1894 }
1895 }
1896
1897 /* parahotplug_worker */
1898 parahotplug_process_list();
1899
1900cleanup:
1901
1902 if (time_after(jiffies,
1903 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1904 /* it's been longer than MIN_IDLE_SECONDS since we
1905 * processed our last controlvm message; slow down the
1906 * polling
1907 */
1908 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1909 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1910 } else {
1911 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1912 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1913 }
1914
1915 queue_delayed_work(periodic_controlvm_workqueue,
1916 &periodic_controlvm_work, poll_jiffies);
1917}
1918
1919static void
1920setup_crash_devices_work_queue(struct work_struct *work)
1921{
1922 struct controlvm_message local_crash_bus_msg;
1923 struct controlvm_message local_crash_dev_msg;
1924 struct controlvm_message msg;
1925 u32 local_crash_msg_offset;
1926 u16 local_crash_msg_count;
1927
1928 /* make sure visorbus is registered for controlvm callbacks */
1929 if (visorchipset_visorbusregwait && !visorbusregistered)
1930 goto cleanup;
1931
1932 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1933
1934 /* send init chipset msg */
1935 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1936 msg.cmd.init_chipset.bus_count = 23;
1937 msg.cmd.init_chipset.switch_count = 0;
1938
1939 chipset_init(&msg);
1940
1941 /* get saved message count */
1942 if (visorchannel_read(controlvm_channel,
1943 offsetof(struct spar_controlvm_channel_protocol,
1944 saved_crash_message_count),
1945 &local_crash_msg_count, sizeof(u16)) < 0) {
1946 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1947 POSTCODE_SEVERITY_ERR);
1948 return;
1949 }
1950
1951 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1952 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1953 local_crash_msg_count,
1954 POSTCODE_SEVERITY_ERR);
1955 return;
1956 }
1957
1958 /* get saved crash message offset */
1959 if (visorchannel_read(controlvm_channel,
1960 offsetof(struct spar_controlvm_channel_protocol,
1961 saved_crash_message_offset),
1962 &local_crash_msg_offset, sizeof(u32)) < 0) {
1963 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1964 POSTCODE_SEVERITY_ERR);
1965 return;
1966 }
1967
1968 /* read create device message for storage bus offset */
1969 if (visorchannel_read(controlvm_channel,
1970 local_crash_msg_offset,
1971 &local_crash_bus_msg,
1972 sizeof(struct controlvm_message)) < 0) {
1973 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1974 POSTCODE_SEVERITY_ERR);
1975 return;
1976 }
1977
1978 /* read create device message for storage device */
1979 if (visorchannel_read(controlvm_channel,
1980 local_crash_msg_offset +
1981 sizeof(struct controlvm_message),
1982 &local_crash_dev_msg,
1983 sizeof(struct controlvm_message)) < 0) {
1984 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1985 POSTCODE_SEVERITY_ERR);
1986 return;
1987 }
1988
1989 /* reuse IOVM create bus message */
1990 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1991 bus_create(&local_crash_bus_msg);
1992 } else {
1993 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1994 POSTCODE_SEVERITY_ERR);
1995 return;
1996 }
1997
1998 /* reuse create device message for storage device */
1999 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2000 my_device_create(&local_crash_dev_msg);
2001 } else {
2002 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2003 POSTCODE_SEVERITY_ERR);
2004 return;
2005 }
2006 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2007 return;
2008
2009cleanup:
2010
2011 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2012
2013 queue_delayed_work(periodic_controlvm_workqueue,
2014 &periodic_controlvm_work, poll_jiffies);
2015}
2016
2017static void
2018bus_create_response(struct visor_device *bus_info, int response)
2019{
2020 if (response >= 0) {
2021 bus_info->state.created = 1;
2022 }
2023
2024 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2025 response);
2026
2027 kfree(bus_info->pending_msg_hdr);
2028 bus_info->pending_msg_hdr = NULL;
2029}
2030
2031static void
2032bus_destroy_response(struct visor_device *bus_info, int response)
2033{
2034 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2035 response);
2036
2037 kfree(bus_info->pending_msg_hdr);
2038 bus_info->pending_msg_hdr = NULL;
2039}
2040
2041static void
2042device_create_response(struct visor_device *dev_info, int response)
2043{
2044 if (response >= 0)
2045 dev_info->state.created = 1;
2046
2047 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2048 response);
2049
2050 kfree(dev_info->pending_msg_hdr);
2051}
2052
2053static void
2054device_destroy_response(struct visor_device *dev_info, int response)
2055{
2056 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2057 response);
2058
2059 kfree(dev_info->pending_msg_hdr);
2060 dev_info->pending_msg_hdr = NULL;
2061}
2062
2063static void
2064visorchipset_device_pause_response(struct visor_device *dev_info,
2065 int response)
2066{
2067 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2068 dev_info, response,
2069 segment_state_standby);
2070
2071 kfree(dev_info->pending_msg_hdr);
2072 dev_info->pending_msg_hdr = NULL;
2073}
2074
2075static void
2076device_resume_response(struct visor_device *dev_info, int response)
2077{
2078 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2079 dev_info, response,
2080 segment_state_running);
2081
2082 kfree(dev_info->pending_msg_hdr);
2083 dev_info->pending_msg_hdr = NULL;
2084}
2085
2086static ssize_t chipsetready_store(struct device *dev,
2087 struct device_attribute *attr,
2088 const char *buf, size_t count)
2089{
2090 char msgtype[64];
2091
2092 if (sscanf(buf, "%63s", msgtype) != 1)
2093 return -EINVAL;
2094
2095 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2096 chipset_events[0] = 1;
2097 return count;
2098 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2099 chipset_events[1] = 1;
2100 return count;
2101 }
2102 return -EINVAL;
2103}
2104
2105/* The parahotplug/devicedisabled interface gets called by our support script
2106 * when an SR-IOV device has been shut down. The ID is passed to the script
2107 * and then passed back when the device has been removed.
2108 */
2109static ssize_t devicedisabled_store(struct device *dev,
2110 struct device_attribute *attr,
2111 const char *buf, size_t count)
2112{
2113 unsigned int id;
2114
2115 if (kstrtouint(buf, 10, &id))
2116 return -EINVAL;
2117
2118 parahotplug_request_complete(id, 0);
2119 return count;
2120}
2121
2122/* The parahotplug/deviceenabled interface gets called by our support script
2123 * when an SR-IOV device has been recovered. The ID is passed to the script
2124 * and then passed back when the device has been brought back up.
2125 */
2126static ssize_t deviceenabled_store(struct device *dev,
2127 struct device_attribute *attr,
2128 const char *buf, size_t count)
2129{
2130 unsigned int id;
2131
2132 if (kstrtouint(buf, 10, &id))
2133 return -EINVAL;
2134
2135 parahotplug_request_complete(id, 1);
2136 return count;
2137}
2138
2139static int
2140visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2141{
2142 unsigned long physaddr = 0;
2143 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2144 u64 addr = 0;
2145
2146 /* sv_enable_dfp(); */
2147 if (offset & (PAGE_SIZE - 1))
2148 return -ENXIO; /* need aligned offsets */
2149
2150 switch (offset) {
2151 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2152 vma->vm_flags |= VM_IO;
2153 if (!*file_controlvm_channel)
2154 return -ENXIO;
2155
2156 visorchannel_read(*file_controlvm_channel,
2157 offsetof(struct spar_controlvm_channel_protocol,
2158 gp_control_channel),
2159 &addr, sizeof(addr));
2160 if (!addr)
2161 return -ENXIO;
2162
2163 physaddr = (unsigned long)addr;
2164 if (remap_pfn_range(vma, vma->vm_start,
2165 physaddr >> PAGE_SHIFT,
2166 vma->vm_end - vma->vm_start,
2167 /*pgprot_noncached */
2168 (vma->vm_page_prot))) {
2169 return -EAGAIN;
2170 }
2171 break;
2172 default:
2173 return -ENXIO;
2174 }
2175 return 0;
2176}
2177
2178static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2179{
2180 u64 result = VMCALL_SUCCESS;
2181 u64 physaddr = 0;
2182
2183 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2184 result);
2185 return result;
2186}
2187
2188static inline int issue_vmcall_update_physical_time(u64 adjustment)
2189{
2190 int result = VMCALL_SUCCESS;
2191
2192 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2193 return result;
2194}
2195
2196static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2197 unsigned long arg)
2198{
2199 s64 adjustment;
2200 s64 vrtc_offset;
2201
2202 switch (cmd) {
2203 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2204 /* get the physical rtc offset */
2205 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2206 if (copy_to_user((void __user *)arg, &vrtc_offset,
2207 sizeof(vrtc_offset))) {
2208 return -EFAULT;
2209 }
2210 return 0;
2211 case VMCALL_UPDATE_PHYSICAL_TIME:
2212 if (copy_from_user(&adjustment, (void __user *)arg,
2213 sizeof(adjustment))) {
2214 return -EFAULT;
2215 }
2216 return issue_vmcall_update_physical_time(adjustment);
2217 default:
2218 return -EFAULT;
2219 }
2220}
2221
2222static const struct file_operations visorchipset_fops = {
2223 .owner = THIS_MODULE,
2224 .open = visorchipset_open,
2225 .read = NULL,
2226 .write = NULL,
2227 .unlocked_ioctl = visorchipset_ioctl,
2228 .release = visorchipset_release,
2229 .mmap = visorchipset_mmap,
2230};
2231
2232static int
2233visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2234{
2235 int rc = 0;
2236
2237 file_controlvm_channel = controlvm_channel;
2238 cdev_init(&file_cdev, &visorchipset_fops);
2239 file_cdev.owner = THIS_MODULE;
2240 if (MAJOR(major_dev) == 0) {
2241 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2242 /* dynamic major device number registration required */
2243 if (rc < 0)
2244 return rc;
2245 } else {
2246 /* static major device number registration required */
2247 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2248 if (rc < 0)
2249 return rc;
2250 }
2251 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2252 if (rc < 0) {
2253 unregister_chrdev_region(major_dev, 1);
2254 return rc;
2255 }
2256 return 0;
2257}
2258
2259static int
2260visorchipset_init(struct acpi_device *acpi_device)
2261{
2262 int rc = 0;
2263 u64 addr;
2264 int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2265 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2266
2267 addr = controlvm_get_channel_address();
2268 if (!addr)
2269 return -ENODEV;
2270
2271 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2272 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2273
2274 controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2275 GFP_KERNEL, uuid);
2276 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2277 visorchannel_get_header(controlvm_channel))) {
2278 initialize_controlvm_payload();
2279 } else {
2280 visorchannel_destroy(controlvm_channel);
2281 controlvm_channel = NULL;
2282 return -ENODEV;
2283 }
2284
2285 major_dev = MKDEV(visorchipset_major, 0);
2286 rc = visorchipset_file_init(major_dev, &controlvm_channel);
2287 if (rc < 0) {
2288 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2289 goto cleanup;
2290 }
2291
2292 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2293
2294 /* if booting in a crash kernel */
2295 if (is_kdump_kernel())
2296 INIT_DELAYED_WORK(&periodic_controlvm_work,
2297 setup_crash_devices_work_queue);
2298 else
2299 INIT_DELAYED_WORK(&periodic_controlvm_work,
2300 controlvm_periodic_work);
2301 periodic_controlvm_workqueue =
2302 create_singlethread_workqueue("visorchipset_controlvm");
2303
2304 if (!periodic_controlvm_workqueue) {
2305 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2306 DIAG_SEVERITY_ERR);
2307 rc = -ENOMEM;
2308 goto cleanup;
2309 }
2310 most_recent_message_jiffies = jiffies;
2311 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2312 rc = queue_delayed_work(periodic_controlvm_workqueue,
2313 &periodic_controlvm_work, poll_jiffies);
2314 if (rc < 0) {
2315 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2316 DIAG_SEVERITY_ERR);
2317 goto cleanup;
2318 }
2319
2320 visorchipset_platform_device.dev.devt = major_dev;
2321 if (platform_device_register(&visorchipset_platform_device) < 0) {
2322 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2323 rc = -1;
2324 goto cleanup;
2325 }
2326 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2327
2328 rc = visorbus_init();
2329cleanup:
2330 if (rc) {
2331 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2332 POSTCODE_SEVERITY_ERR);
2333 }
2334 return rc;
2335}
2336
2337static void
2338visorchipset_file_cleanup(dev_t major_dev)
2339{
2340 if (file_cdev.ops)
2341 cdev_del(&file_cdev);
2342 file_cdev.ops = NULL;
2343 unregister_chrdev_region(major_dev, 1);
2344}
2345
2346static int
2347visorchipset_exit(struct acpi_device *acpi_device)
2348{
2349 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2350
2351 visorbus_exit();
2352
2353 cancel_delayed_work(&periodic_controlvm_work);
2354 flush_workqueue(periodic_controlvm_workqueue);
2355 destroy_workqueue(periodic_controlvm_workqueue);
2356 periodic_controlvm_workqueue = NULL;
2357 destroy_controlvm_payload_info(&controlvm_payload_info);
2358
2359 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2360
2361 visorchannel_destroy(controlvm_channel);
2362
2363 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2364 platform_device_unregister(&visorchipset_platform_device);
2365 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2366
2367 return 0;
2368}
2369
2370static const struct acpi_device_id unisys_device_ids[] = {
2371 {"PNP0A07", 0},
2372 {"", 0},
2373};
2374
2375static struct acpi_driver unisys_acpi_driver = {
2376 .name = "unisys_acpi",
2377 .class = "unisys_acpi_class",
2378 .owner = THIS_MODULE,
2379 .ids = unisys_device_ids,
2380 .ops = {
2381 .add = visorchipset_init,
2382 .remove = visorchipset_exit,
2383 },
2384};
2385static __init uint32_t visorutil_spar_detect(void)
2386{
2387 unsigned int eax, ebx, ecx, edx;
2388
2389 if (cpu_has_hypervisor) {
2390 /* check the ID */
2391 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2392 return (ebx == UNISYS_SPAR_ID_EBX) &&
2393 (ecx == UNISYS_SPAR_ID_ECX) &&
2394 (edx == UNISYS_SPAR_ID_EDX);
2395 } else {
2396 return 0;
2397 }
2398}
2399
2400static int init_unisys(void)
2401{
2402 int result;
2403 if (!visorutil_spar_detect())
2404 return -ENODEV;
2405
2406 result = acpi_bus_register_driver(&unisys_acpi_driver);
2407 if (result)
2408 return -ENODEV;
2409
2410 pr_info("Unisys Visorchipset Driver Loaded.\n");
2411 return 0;
2412};
2413
2414static void exit_unisys(void)
2415{
2416 acpi_bus_unregister_driver(&unisys_acpi_driver);
2417}
2418
2419module_param_named(major, visorchipset_major, int, S_IRUGO);
2420MODULE_PARM_DESC(visorchipset_major,
2421 "major device number to use for the device node");
2422module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2423MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2424 "1 to have the module wait for the visor bus to register");
2425module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2426 int, S_IRUGO);
2427MODULE_PARM_DESC(visorchipset_holdchipsetready,
2428 "1 to hold response to CHIPSET_READY");
2429
2430module_init(init_unisys);
2431module_exit(exit_unisys);
2432
2433MODULE_AUTHOR("Unisys");
2434MODULE_LICENSE("GPL");
2435MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2436 VERSION);
2437MODULE_VERSION(VERSION);
This page took 0.033533 seconds and 5 git commands to generate.