staging: unisys: Do not use 0 as the default bus root device number
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18 #include <linux/acpi.h>
19 #include <linux/cdev.h>
20 #include <linux/ctype.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/nls.h>
24 #include <linux/netdevice.h>
25 #include <linux/platform_device.h>
26 #include <linux/uuid.h>
27 #include <linux/crash_dump.h>
28
29 #include "channel_guid.h"
30 #include "controlvmchannel.h"
31 #include "controlvmcompletionstatus.h"
32 #include "guestlinuxdebug.h"
33 #include "periodic_work.h"
34 #include "version.h"
35 #include "visorbus.h"
36 #include "visorbus_private.h"
37 #include "vmcallinterface.h"
38
39 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
40
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE 50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
48
49 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
50
51
52 #define UNISYS_SPAR_LEAF_ID 0x40000000
53
54 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55 #define UNISYS_SPAR_ID_EBX 0x73696e55
56 #define UNISYS_SPAR_ID_ECX 0x70537379
57 #define UNISYS_SPAR_ID_EDX 0x34367261
58
59 #define BUS_ROOT_DEVICE UINT_MAX
60
61 /*
62 * Module parameters
63 */
64 static int visorchipset_major;
65 static int visorchipset_visorbusregwait = 1; /* default is on */
66 static int visorchipset_holdchipsetready;
67 static unsigned long controlvm_payload_bytes_buffered;
68
69 static int
70 visorchipset_open(struct inode *inode, struct file *file)
71 {
72 unsigned minor_number = iminor(inode);
73
74 if (minor_number)
75 return -ENODEV;
76 file->private_data = NULL;
77 return 0;
78 }
79
80 static int
81 visorchipset_release(struct inode *inode, struct file *file)
82 {
83 return 0;
84 }
85
86 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
87 * we switch to slow polling mode. As soon as we get a controlvm
88 * message, we switch back to fast polling mode.
89 */
90 #define MIN_IDLE_SECONDS 10
91 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
92 static unsigned long most_recent_message_jiffies; /* when we got our last
93 * controlvm message */
94 static int visorbusregistered;
95
96 #define MAX_CHIPSET_EVENTS 2
97 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
98
99 struct parser_context {
100 unsigned long allocbytes;
101 unsigned long param_bytes;
102 u8 *curr;
103 unsigned long bytes_remaining;
104 bool byte_stream;
105 char data[0];
106 };
107
108 static struct delayed_work periodic_controlvm_work;
109 static struct workqueue_struct *periodic_controlvm_workqueue;
110 static DEFINE_SEMAPHORE(notifier_lock);
111
112 static struct cdev file_cdev;
113 static struct visorchannel **file_controlvm_channel;
114 static struct controlvm_message_header g_chipset_msg_hdr;
115 static const uuid_le spar_diag_pool_channel_protocol_uuid =
116 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
117 /* 0xffffff is an invalid Bus/Device number */
118 static u32 g_diagpool_bus_no = 0xffffff;
119 static u32 g_diagpool_dev_no = 0xffffff;
120 static struct controlvm_message_packet g_devicechangestate_packet;
121
122 #define is_diagpool_channel(channel_type_guid) \
123 (uuid_le_cmp(channel_type_guid,\
124 spar_diag_pool_channel_protocol_uuid) == 0)
125
126 static LIST_HEAD(bus_info_list);
127 static LIST_HEAD(dev_info_list);
128
129 static struct visorchannel *controlvm_channel;
130
131 /* Manages the request payload in the controlvm channel */
132 struct visor_controlvm_payload_info {
133 u8 __iomem *ptr; /* pointer to base address of payload pool */
134 u64 offset; /* offset from beginning of controlvm
135 * channel to beginning of payload * pool */
136 u32 bytes; /* number of bytes in payload pool */
137 };
138
139 static struct visor_controlvm_payload_info controlvm_payload_info;
140
141 /* The following globals are used to handle the scenario where we are unable to
142 * offload the payload from a controlvm message due to memory requirements. In
143 * this scenario, we simply stash the controlvm message, then attempt to
144 * process it again the next time controlvm_periodic_work() runs.
145 */
146 static struct controlvm_message controlvm_pending_msg;
147 static bool controlvm_pending_msg_valid;
148
149 /* This identifies a data buffer that has been received via a controlvm messages
150 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
151 */
152 struct putfile_buffer_entry {
153 struct list_head next; /* putfile_buffer_entry list */
154 struct parser_context *parser_ctx; /* points to input data buffer */
155 };
156
157 /* List of struct putfile_request *, via next_putfile_request member.
158 * Each entry in this list identifies an outstanding TRANSMIT_FILE
159 * conversation.
160 */
161 static LIST_HEAD(putfile_request_list);
162
163 /* This describes a buffer and its current state of transfer (e.g., how many
164 * bytes have already been supplied as putfile data, and how many bytes are
165 * remaining) for a putfile_request.
166 */
167 struct putfile_active_buffer {
168 /* a payload from a controlvm message, containing a file data buffer */
169 struct parser_context *parser_ctx;
170 /* points within data area of parser_ctx to next byte of data */
171 u8 *pnext;
172 /* # bytes left from <pnext> to the end of this data buffer */
173 size_t bytes_remaining;
174 };
175
176 #define PUTFILE_REQUEST_SIG 0x0906101302281211
177 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
178 * conversation. Structs of this type are dynamically linked into
179 * <Putfile_request_list>.
180 */
181 struct putfile_request {
182 u64 sig; /* PUTFILE_REQUEST_SIG */
183
184 /* header from original TransmitFile request */
185 struct controlvm_message_header controlvm_header;
186 u64 file_request_number; /* from original TransmitFile request */
187
188 /* link to next struct putfile_request */
189 struct list_head next_putfile_request;
190
191 /* most-recent sequence number supplied via a controlvm message */
192 u64 data_sequence_number;
193
194 /* head of putfile_buffer_entry list, which describes the data to be
195 * supplied as putfile data;
196 * - this list is added to when controlvm messages come in that supply
197 * file data
198 * - this list is removed from via the hotplug program that is actually
199 * consuming these buffers to write as file data */
200 struct list_head input_buffer_list;
201 spinlock_t req_list_lock; /* lock for input_buffer_list */
202
203 /* waiters for input_buffer_list to go non-empty */
204 wait_queue_head_t input_buffer_wq;
205
206 /* data not yet read within current putfile_buffer_entry */
207 struct putfile_active_buffer active_buf;
208
209 /* <0 = failed, 0 = in-progress, >0 = successful; */
210 /* note that this must be set with req_list_lock, and if you set <0, */
211 /* it is your responsibility to also free up all of the other objects */
212 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
213 /* before releasing the lock */
214 int completion_status;
215 };
216
217 struct parahotplug_request {
218 struct list_head list;
219 int id;
220 unsigned long expiration;
221 struct controlvm_message msg;
222 };
223
224 static LIST_HEAD(parahotplug_request_list);
225 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
226 static void parahotplug_process_list(void);
227
228 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
229 * CONTROLVM_REPORTEVENT.
230 */
231 static struct visorchipset_busdev_notifiers busdev_notifiers;
232
233 static void bus_create_response(struct visorchipset_bus_info *p, int response);
234 static void bus_destroy_response(struct visorchipset_bus_info *p, int response);
235 static void device_create_response(struct visorchipset_device_info *p,
236 int response);
237 static void device_destroy_response(struct visorchipset_device_info *p,
238 int response);
239 static void device_resume_response(struct visorchipset_device_info *p,
240 int response);
241
242 static void
243 visorchipset_device_pause_response(struct visorchipset_device_info *p,
244 int response);
245
246 static struct visorchipset_busdev_responders busdev_responders = {
247 .bus_create = bus_create_response,
248 .bus_destroy = bus_destroy_response,
249 .device_create = device_create_response,
250 .device_destroy = device_destroy_response,
251 .device_pause = visorchipset_device_pause_response,
252 .device_resume = device_resume_response,
253 };
254
255 /* info for /dev/visorchipset */
256 static dev_t major_dev = -1; /**< indicates major num for device */
257
258 /* prototypes for attributes */
259 static ssize_t toolaction_show(struct device *dev,
260 struct device_attribute *attr, char *buf);
261 static ssize_t toolaction_store(struct device *dev,
262 struct device_attribute *attr,
263 const char *buf, size_t count);
264 static DEVICE_ATTR_RW(toolaction);
265
266 static ssize_t boottotool_show(struct device *dev,
267 struct device_attribute *attr, char *buf);
268 static ssize_t boottotool_store(struct device *dev,
269 struct device_attribute *attr, const char *buf,
270 size_t count);
271 static DEVICE_ATTR_RW(boottotool);
272
273 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
274 char *buf);
275 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
276 const char *buf, size_t count);
277 static DEVICE_ATTR_RW(error);
278
279 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
280 char *buf);
281 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
282 const char *buf, size_t count);
283 static DEVICE_ATTR_RW(textid);
284
285 static ssize_t remaining_steps_show(struct device *dev,
286 struct device_attribute *attr, char *buf);
287 static ssize_t remaining_steps_store(struct device *dev,
288 struct device_attribute *attr,
289 const char *buf, size_t count);
290 static DEVICE_ATTR_RW(remaining_steps);
291
292 static ssize_t chipsetready_store(struct device *dev,
293 struct device_attribute *attr,
294 const char *buf, size_t count);
295 static DEVICE_ATTR_WO(chipsetready);
296
297 static ssize_t devicedisabled_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count);
300 static DEVICE_ATTR_WO(devicedisabled);
301
302 static ssize_t deviceenabled_store(struct device *dev,
303 struct device_attribute *attr,
304 const char *buf, size_t count);
305 static DEVICE_ATTR_WO(deviceenabled);
306
307 static struct attribute *visorchipset_install_attrs[] = {
308 &dev_attr_toolaction.attr,
309 &dev_attr_boottotool.attr,
310 &dev_attr_error.attr,
311 &dev_attr_textid.attr,
312 &dev_attr_remaining_steps.attr,
313 NULL
314 };
315
316 static struct attribute_group visorchipset_install_group = {
317 .name = "install",
318 .attrs = visorchipset_install_attrs
319 };
320
321 static struct attribute *visorchipset_guest_attrs[] = {
322 &dev_attr_chipsetready.attr,
323 NULL
324 };
325
326 static struct attribute_group visorchipset_guest_group = {
327 .name = "guest",
328 .attrs = visorchipset_guest_attrs
329 };
330
331 static struct attribute *visorchipset_parahotplug_attrs[] = {
332 &dev_attr_devicedisabled.attr,
333 &dev_attr_deviceenabled.attr,
334 NULL
335 };
336
337 static struct attribute_group visorchipset_parahotplug_group = {
338 .name = "parahotplug",
339 .attrs = visorchipset_parahotplug_attrs
340 };
341
342 static const struct attribute_group *visorchipset_dev_groups[] = {
343 &visorchipset_install_group,
344 &visorchipset_guest_group,
345 &visorchipset_parahotplug_group,
346 NULL
347 };
348
349 /* /sys/devices/platform/visorchipset */
350 static struct platform_device visorchipset_platform_device = {
351 .name = "visorchipset",
352 .id = -1,
353 .dev.groups = visorchipset_dev_groups,
354 };
355
356 /* Function prototypes */
357 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
358 int response);
359 static void controlvm_respond_chipset_init(
360 struct controlvm_message_header *msg_hdr, int response,
361 enum ultra_chipset_feature features);
362 static void controlvm_respond_physdev_changestate(
363 struct controlvm_message_header *msg_hdr, int response,
364 struct spar_segment_state state);
365
366
367 static void parser_done(struct parser_context *ctx);
368
369 static struct parser_context *
370 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
371 {
372 int allocbytes = sizeof(struct parser_context) + bytes;
373 struct parser_context *rc = NULL;
374 struct parser_context *ctx = NULL;
375
376 if (retry)
377 *retry = false;
378
379 /*
380 * alloc an 0 extra byte to ensure payload is
381 * '\0'-terminated
382 */
383 allocbytes++;
384 if ((controlvm_payload_bytes_buffered + bytes)
385 > MAX_CONTROLVM_PAYLOAD_BYTES) {
386 if (retry)
387 *retry = true;
388 rc = NULL;
389 goto cleanup;
390 }
391 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
392 if (!ctx) {
393 if (retry)
394 *retry = true;
395 rc = NULL;
396 goto cleanup;
397 }
398
399 ctx->allocbytes = allocbytes;
400 ctx->param_bytes = bytes;
401 ctx->curr = NULL;
402 ctx->bytes_remaining = 0;
403 ctx->byte_stream = false;
404 if (local) {
405 void *p;
406
407 if (addr > virt_to_phys(high_memory - 1)) {
408 rc = NULL;
409 goto cleanup;
410 }
411 p = __va((unsigned long) (addr));
412 memcpy(ctx->data, p, bytes);
413 } else {
414 void __iomem *mapping;
415
416 if (!request_mem_region(addr, bytes, "visorchipset")) {
417 rc = NULL;
418 goto cleanup;
419 }
420
421 mapping = ioremap_cache(addr, bytes);
422 if (!mapping) {
423 release_mem_region(addr, bytes);
424 rc = NULL;
425 goto cleanup;
426 }
427 memcpy_fromio(ctx->data, mapping, bytes);
428 release_mem_region(addr, bytes);
429 }
430
431 ctx->byte_stream = true;
432 rc = ctx;
433 cleanup:
434 if (rc) {
435 controlvm_payload_bytes_buffered += ctx->param_bytes;
436 } else {
437 if (ctx) {
438 parser_done(ctx);
439 ctx = NULL;
440 }
441 }
442 return rc;
443 }
444
445 static uuid_le
446 parser_id_get(struct parser_context *ctx)
447 {
448 struct spar_controlvm_parameters_header *phdr = NULL;
449
450 if (ctx == NULL)
451 return NULL_UUID_LE;
452 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
453 return phdr->id;
454 }
455
456 /** Describes the state from the perspective of which controlvm messages have
457 * been received for a bus or device.
458 */
459
460 enum PARSER_WHICH_STRING {
461 PARSERSTRING_INITIATOR,
462 PARSERSTRING_TARGET,
463 PARSERSTRING_CONNECTION,
464 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
465 };
466
467 static void
468 parser_param_start(struct parser_context *ctx,
469 enum PARSER_WHICH_STRING which_string)
470 {
471 struct spar_controlvm_parameters_header *phdr = NULL;
472
473 if (ctx == NULL)
474 goto Away;
475 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
476 switch (which_string) {
477 case PARSERSTRING_INITIATOR:
478 ctx->curr = ctx->data + phdr->initiator_offset;
479 ctx->bytes_remaining = phdr->initiator_length;
480 break;
481 case PARSERSTRING_TARGET:
482 ctx->curr = ctx->data + phdr->target_offset;
483 ctx->bytes_remaining = phdr->target_length;
484 break;
485 case PARSERSTRING_CONNECTION:
486 ctx->curr = ctx->data + phdr->connection_offset;
487 ctx->bytes_remaining = phdr->connection_length;
488 break;
489 case PARSERSTRING_NAME:
490 ctx->curr = ctx->data + phdr->name_offset;
491 ctx->bytes_remaining = phdr->name_length;
492 break;
493 default:
494 break;
495 }
496
497 Away:
498 return;
499 }
500
501 static void parser_done(struct parser_context *ctx)
502 {
503 if (!ctx)
504 return;
505 controlvm_payload_bytes_buffered -= ctx->param_bytes;
506 kfree(ctx);
507 }
508
509 static void *
510 parser_string_get(struct parser_context *ctx)
511 {
512 u8 *pscan;
513 unsigned long nscan;
514 int value_length = -1;
515 void *value = NULL;
516 int i;
517
518 if (!ctx)
519 return NULL;
520 pscan = ctx->curr;
521 nscan = ctx->bytes_remaining;
522 if (nscan == 0)
523 return NULL;
524 if (!pscan)
525 return NULL;
526 for (i = 0, value_length = -1; i < nscan; i++)
527 if (pscan[i] == '\0') {
528 value_length = i;
529 break;
530 }
531 if (value_length < 0) /* '\0' was not included in the length */
532 value_length = nscan;
533 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
534 if (value == NULL)
535 return NULL;
536 if (value_length > 0)
537 memcpy(value, pscan, value_length);
538 ((u8 *) (value))[value_length] = '\0';
539 return value;
540 }
541
542
543 static ssize_t toolaction_show(struct device *dev,
544 struct device_attribute *attr,
545 char *buf)
546 {
547 u8 tool_action;
548
549 visorchannel_read(controlvm_channel,
550 offsetof(struct spar_controlvm_channel_protocol,
551 tool_action), &tool_action, sizeof(u8));
552 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
553 }
554
555 static ssize_t toolaction_store(struct device *dev,
556 struct device_attribute *attr,
557 const char *buf, size_t count)
558 {
559 u8 tool_action;
560 int ret;
561
562 if (kstrtou8(buf, 10, &tool_action))
563 return -EINVAL;
564
565 ret = visorchannel_write(controlvm_channel,
566 offsetof(struct spar_controlvm_channel_protocol,
567 tool_action),
568 &tool_action, sizeof(u8));
569
570 if (ret)
571 return ret;
572 return count;
573 }
574
575 static ssize_t boottotool_show(struct device *dev,
576 struct device_attribute *attr,
577 char *buf)
578 {
579 struct efi_spar_indication efi_spar_indication;
580
581 visorchannel_read(controlvm_channel,
582 offsetof(struct spar_controlvm_channel_protocol,
583 efi_spar_ind), &efi_spar_indication,
584 sizeof(struct efi_spar_indication));
585 return scnprintf(buf, PAGE_SIZE, "%u\n",
586 efi_spar_indication.boot_to_tool);
587 }
588
589 static ssize_t boottotool_store(struct device *dev,
590 struct device_attribute *attr,
591 const char *buf, size_t count)
592 {
593 int val, ret;
594 struct efi_spar_indication efi_spar_indication;
595
596 if (kstrtoint(buf, 10, &val))
597 return -EINVAL;
598
599 efi_spar_indication.boot_to_tool = val;
600 ret = visorchannel_write(controlvm_channel,
601 offsetof(struct spar_controlvm_channel_protocol,
602 efi_spar_ind), &(efi_spar_indication),
603 sizeof(struct efi_spar_indication));
604
605 if (ret)
606 return ret;
607 return count;
608 }
609
610 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
611 char *buf)
612 {
613 u32 error;
614
615 visorchannel_read(controlvm_channel,
616 offsetof(struct spar_controlvm_channel_protocol,
617 installation_error),
618 &error, sizeof(u32));
619 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
620 }
621
622 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
623 const char *buf, size_t count)
624 {
625 u32 error;
626 int ret;
627
628 if (kstrtou32(buf, 10, &error))
629 return -EINVAL;
630
631 ret = visorchannel_write(controlvm_channel,
632 offsetof(struct spar_controlvm_channel_protocol,
633 installation_error),
634 &error, sizeof(u32));
635 if (ret)
636 return ret;
637 return count;
638 }
639
640 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
641 char *buf)
642 {
643 u32 text_id;
644
645 visorchannel_read(controlvm_channel,
646 offsetof(struct spar_controlvm_channel_protocol,
647 installation_text_id),
648 &text_id, sizeof(u32));
649 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
650 }
651
652 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
653 const char *buf, size_t count)
654 {
655 u32 text_id;
656 int ret;
657
658 if (kstrtou32(buf, 10, &text_id))
659 return -EINVAL;
660
661 ret = visorchannel_write(controlvm_channel,
662 offsetof(struct spar_controlvm_channel_protocol,
663 installation_text_id),
664 &text_id, sizeof(u32));
665 if (ret)
666 return ret;
667 return count;
668 }
669
670 static ssize_t remaining_steps_show(struct device *dev,
671 struct device_attribute *attr, char *buf)
672 {
673 u16 remaining_steps;
674
675 visorchannel_read(controlvm_channel,
676 offsetof(struct spar_controlvm_channel_protocol,
677 installation_remaining_steps),
678 &remaining_steps, sizeof(u16));
679 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
680 }
681
682 static ssize_t remaining_steps_store(struct device *dev,
683 struct device_attribute *attr,
684 const char *buf, size_t count)
685 {
686 u16 remaining_steps;
687 int ret;
688
689 if (kstrtou16(buf, 10, &remaining_steps))
690 return -EINVAL;
691
692 ret = visorchannel_write(controlvm_channel,
693 offsetof(struct spar_controlvm_channel_protocol,
694 installation_remaining_steps),
695 &remaining_steps, sizeof(u16));
696 if (ret)
697 return ret;
698 return count;
699 }
700
701 static void
702 bus_info_clear(void *v)
703 {
704 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
705
706 kfree(p->name);
707 kfree(p->description);
708 memset(p, 0, sizeof(struct visorchipset_bus_info));
709 }
710
711 static void
712 dev_info_clear(void *v)
713 {
714 struct visorchipset_device_info *p =
715 (struct visorchipset_device_info *) v;
716
717 memset(p, 0, sizeof(struct visorchipset_device_info));
718 }
719
720 struct visor_busdev {
721 u32 bus_no;
722 u32 dev_no;
723 };
724
725 static int match_visorbus_dev_by_id(struct device *dev, void *data)
726 {
727 struct visor_device *vdev = to_visor_device(dev);
728 struct visor_busdev *id = (struct visor_busdev *)data;
729 u32 bus_no = id->bus_no;
730 u32 dev_no = id->dev_no;
731
732 if ((vdev->chipset_bus_no == bus_no) &&
733 (vdev->chipset_dev_no == dev_no))
734 return 1;
735
736 return 0;
737 }
738 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
739 struct visor_device *from)
740 {
741 struct device *dev;
742 struct device *dev_start = NULL;
743 struct visor_device *vdev = NULL;
744 struct visor_busdev id = {
745 .bus_no = bus_no,
746 .dev_no = dev_no
747 };
748
749 if (from)
750 dev_start = &from->device;
751 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
752 match_visorbus_dev_by_id);
753 if (dev)
754 vdev = to_visor_device(dev);
755 return vdev;
756 }
757 EXPORT_SYMBOL(visorbus_get_device_by_id);
758
759 static struct visorchipset_bus_info *
760 bus_find(struct list_head *list, u32 bus_no)
761 {
762 struct visorchipset_bus_info *p;
763
764 list_for_each_entry(p, list, entry) {
765 if (p->bus_no == bus_no)
766 return p;
767 }
768
769 return NULL;
770 }
771
772 static struct visorchipset_device_info *
773 device_find(struct list_head *list, u32 bus_no, u32 dev_no)
774 {
775 struct visorchipset_device_info *p;
776
777 list_for_each_entry(p, list, entry) {
778 if (p->bus_no == bus_no && p->dev_no == dev_no)
779 return p;
780 }
781
782 return NULL;
783 }
784
785 static void busdevices_del(struct list_head *list, u32 bus_no)
786 {
787 struct visorchipset_device_info *p, *tmp;
788
789 list_for_each_entry_safe(p, tmp, list, entry) {
790 if (p->bus_no == bus_no) {
791 list_del(&p->entry);
792 kfree(p);
793 }
794 }
795 }
796
797 static u8
798 check_chipset_events(void)
799 {
800 int i;
801 u8 send_msg = 1;
802 /* Check events to determine if response should be sent */
803 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
804 send_msg &= chipset_events[i];
805 return send_msg;
806 }
807
808 static void
809 clear_chipset_events(void)
810 {
811 int i;
812 /* Clear chipset_events */
813 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
814 chipset_events[i] = 0;
815 }
816
817 void
818 visorchipset_register_busdev(
819 struct visorchipset_busdev_notifiers *notifiers,
820 struct visorchipset_busdev_responders *responders,
821 struct ultra_vbus_deviceinfo *driver_info)
822 {
823 down(&notifier_lock);
824 if (!notifiers) {
825 memset(&busdev_notifiers, 0,
826 sizeof(busdev_notifiers));
827 visorbusregistered = 0; /* clear flag */
828 } else {
829 busdev_notifiers = *notifiers;
830 visorbusregistered = 1; /* set flag */
831 }
832 if (responders)
833 *responders = busdev_responders;
834 if (driver_info)
835 bus_device_info_init(driver_info, "chipset", "visorchipset",
836 VERSION, NULL);
837
838 up(&notifier_lock);
839 }
840 EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
841
842 static void
843 cleanup_controlvm_structures(void)
844 {
845 struct visorchipset_bus_info *bi, *tmp_bi;
846 struct visorchipset_device_info *di, *tmp_di;
847
848 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
849 bus_info_clear(bi);
850 list_del(&bi->entry);
851 kfree(bi);
852 }
853
854 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
855 dev_info_clear(di);
856 list_del(&di->entry);
857 kfree(di);
858 }
859 }
860
861 static void
862 chipset_init(struct controlvm_message *inmsg)
863 {
864 static int chipset_inited;
865 enum ultra_chipset_feature features = 0;
866 int rc = CONTROLVM_RESP_SUCCESS;
867
868 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
869 if (chipset_inited) {
870 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
871 goto cleanup;
872 }
873 chipset_inited = 1;
874 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
875
876 /* Set features to indicate we support parahotplug (if Command
877 * also supports it). */
878 features =
879 inmsg->cmd.init_chipset.
880 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
881
882 /* Set the "reply" bit so Command knows this is a
883 * features-aware driver. */
884 features |= ULTRA_CHIPSET_FEATURE_REPLY;
885
886 cleanup:
887 if (rc < 0)
888 cleanup_controlvm_structures();
889 if (inmsg->hdr.flags.response_expected)
890 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
891 }
892
893 static void
894 controlvm_init_response(struct controlvm_message *msg,
895 struct controlvm_message_header *msg_hdr, int response)
896 {
897 memset(msg, 0, sizeof(struct controlvm_message));
898 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
899 msg->hdr.payload_bytes = 0;
900 msg->hdr.payload_vm_offset = 0;
901 msg->hdr.payload_max_bytes = 0;
902 if (response < 0) {
903 msg->hdr.flags.failed = 1;
904 msg->hdr.completion_status = (u32) (-response);
905 }
906 }
907
908 static void
909 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
910 {
911 struct controlvm_message outmsg;
912
913 controlvm_init_response(&outmsg, msg_hdr, response);
914 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
915 * back the deviceChangeState structure in the packet. */
916 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
917 g_devicechangestate_packet.device_change_state.bus_no ==
918 g_diagpool_bus_no &&
919 g_devicechangestate_packet.device_change_state.dev_no ==
920 g_diagpool_dev_no)
921 outmsg.cmd = g_devicechangestate_packet;
922 if (outmsg.hdr.flags.test_message == 1)
923 return;
924
925 if (!visorchannel_signalinsert(controlvm_channel,
926 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
927 return;
928 }
929 }
930
931 static void
932 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
933 int response,
934 enum ultra_chipset_feature features)
935 {
936 struct controlvm_message outmsg;
937
938 controlvm_init_response(&outmsg, msg_hdr, response);
939 outmsg.cmd.init_chipset.features = features;
940 if (!visorchannel_signalinsert(controlvm_channel,
941 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
942 return;
943 }
944 }
945
946 static void controlvm_respond_physdev_changestate(
947 struct controlvm_message_header *msg_hdr, int response,
948 struct spar_segment_state state)
949 {
950 struct controlvm_message outmsg;
951
952 controlvm_init_response(&outmsg, msg_hdr, response);
953 outmsg.cmd.device_change_state.state = state;
954 outmsg.cmd.device_change_state.flags.phys_device = 1;
955 if (!visorchannel_signalinsert(controlvm_channel,
956 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
957 return;
958 }
959 }
960
961 enum crash_obj_type {
962 CRASH_DEV,
963 CRASH_BUS,
964 };
965
966 static void
967 bus_responder(enum controlvm_id cmd_id,
968 struct controlvm_message_header *pending_msg_hdr,
969 int response)
970 {
971 if (pending_msg_hdr == NULL)
972 return; /* no controlvm response needed */
973
974 if (pending_msg_hdr->id != (u32)cmd_id)
975 return;
976
977 controlvm_respond(pending_msg_hdr, response);
978 }
979
980 static void
981 device_changestate_responder(enum controlvm_id cmd_id,
982 struct visorchipset_device_info *p, int response,
983 struct spar_segment_state response_state)
984 {
985 struct controlvm_message outmsg;
986 u32 bus_no = p->bus_no;
987 u32 dev_no = p->dev_no;
988
989 if (p->pending_msg_hdr == NULL)
990 return; /* no controlvm response needed */
991 if (p->pending_msg_hdr->id != cmd_id)
992 return;
993
994 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
995
996 outmsg.cmd.device_change_state.bus_no = bus_no;
997 outmsg.cmd.device_change_state.dev_no = dev_no;
998 outmsg.cmd.device_change_state.state = response_state;
999
1000 if (!visorchannel_signalinsert(controlvm_channel,
1001 CONTROLVM_QUEUE_REQUEST, &outmsg))
1002 return;
1003 }
1004
1005 static void
1006 device_responder(enum controlvm_id cmd_id,
1007 struct controlvm_message_header *pending_msg_hdr,
1008 int response)
1009 {
1010 if (pending_msg_hdr == NULL)
1011 return; /* no controlvm response needed */
1012
1013 if (pending_msg_hdr->id != (u32)cmd_id)
1014 return;
1015
1016 controlvm_respond(pending_msg_hdr, response);
1017 }
1018
1019 static void
1020 bus_epilog(struct visorchipset_bus_info *bus_info,
1021 u32 cmd, struct controlvm_message_header *msg_hdr,
1022 int response, bool need_response)
1023 {
1024 bool notified = false;
1025 struct controlvm_message_header *pmsg_hdr = NULL;
1026
1027 if (!bus_info) {
1028 /* relying on a valid passed in response code */
1029 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1030 pmsg_hdr = msg_hdr;
1031 goto away;
1032 }
1033
1034 if (bus_info->pending_msg_hdr) {
1035 /* only non-NULL if dev is still waiting on a response */
1036 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1037 pmsg_hdr = bus_info->pending_msg_hdr;
1038 goto away;
1039 }
1040
1041 if (need_response) {
1042 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1043 if (!pmsg_hdr) {
1044 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1045 goto away;
1046 }
1047
1048 memcpy(pmsg_hdr, msg_hdr,
1049 sizeof(struct controlvm_message_header));
1050 bus_info->pending_msg_hdr = pmsg_hdr;
1051 }
1052
1053 down(&notifier_lock);
1054 if (response == CONTROLVM_RESP_SUCCESS) {
1055 switch (cmd) {
1056 case CONTROLVM_BUS_CREATE:
1057 if (busdev_notifiers.bus_create) {
1058 (*busdev_notifiers.bus_create) (bus_info);
1059 notified = true;
1060 }
1061 break;
1062 case CONTROLVM_BUS_DESTROY:
1063 if (busdev_notifiers.bus_destroy) {
1064 (*busdev_notifiers.bus_destroy) (bus_info);
1065 notified = true;
1066 }
1067 break;
1068 }
1069 }
1070 away:
1071 if (notified)
1072 /* The callback function just called above is responsible
1073 * for calling the appropriate visorchipset_busdev_responders
1074 * function, which will call bus_responder()
1075 */
1076 ;
1077 else
1078 /*
1079 * Do not kfree(pmsg_hdr) as this is the failure path.
1080 * The success path ('notified') will call the responder
1081 * directly and kfree() there.
1082 */
1083 bus_responder(cmd, pmsg_hdr, response);
1084 up(&notifier_lock);
1085 }
1086
1087 static void
1088 device_epilog(struct visorchipset_device_info *dev_info,
1089 struct spar_segment_state state, u32 cmd,
1090 struct controlvm_message_header *msg_hdr, int response,
1091 bool need_response, bool for_visorbus)
1092 {
1093 struct visorchipset_busdev_notifiers *notifiers;
1094 bool notified = false;
1095 u32 bus_no = dev_info->bus_no;
1096 u32 dev_no = dev_info->dev_no;
1097 struct controlvm_message_header *pmsg_hdr = NULL;
1098
1099 char *envp[] = {
1100 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1101 NULL
1102 };
1103
1104 notifiers = &busdev_notifiers;
1105
1106 if (!dev_info) {
1107 /* relying on a valid passed in response code */
1108 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1109 pmsg_hdr = msg_hdr;
1110 goto away;
1111 }
1112
1113 if (dev_info->pending_msg_hdr) {
1114 /* only non-NULL if dev is still waiting on a response */
1115 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1116 pmsg_hdr = dev_info->pending_msg_hdr;
1117 goto away;
1118 }
1119
1120 if (need_response) {
1121 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1122 if (!pmsg_hdr) {
1123 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1124 goto away;
1125 }
1126
1127 memcpy(pmsg_hdr, msg_hdr,
1128 sizeof(struct controlvm_message_header));
1129 dev_info->pending_msg_hdr = pmsg_hdr;
1130 }
1131
1132 down(&notifier_lock);
1133 if (response >= 0) {
1134 switch (cmd) {
1135 case CONTROLVM_DEVICE_CREATE:
1136 if (notifiers->device_create) {
1137 (*notifiers->device_create) (dev_info);
1138 notified = true;
1139 }
1140 break;
1141 case CONTROLVM_DEVICE_CHANGESTATE:
1142 /* ServerReady / ServerRunning / SegmentStateRunning */
1143 if (state.alive == segment_state_running.alive &&
1144 state.operating ==
1145 segment_state_running.operating) {
1146 if (notifiers->device_resume) {
1147 (*notifiers->device_resume) (dev_info);
1148 notified = true;
1149 }
1150 }
1151 /* ServerNotReady / ServerLost / SegmentStateStandby */
1152 else if (state.alive == segment_state_standby.alive &&
1153 state.operating ==
1154 segment_state_standby.operating) {
1155 /* technically this is standby case
1156 * where server is lost
1157 */
1158 if (notifiers->device_pause) {
1159 (*notifiers->device_pause) (dev_info);
1160 notified = true;
1161 }
1162 } else if (state.alive == segment_state_paused.alive &&
1163 state.operating ==
1164 segment_state_paused.operating) {
1165 /* this is lite pause where channel is
1166 * still valid just 'pause' of it
1167 */
1168 if (bus_no == g_diagpool_bus_no &&
1169 dev_no == g_diagpool_dev_no) {
1170 /* this will trigger the
1171 * diag_shutdown.sh script in
1172 * the visorchipset hotplug */
1173 kobject_uevent_env
1174 (&visorchipset_platform_device.dev.
1175 kobj, KOBJ_ONLINE, envp);
1176 }
1177 }
1178 break;
1179 case CONTROLVM_DEVICE_DESTROY:
1180 if (notifiers->device_destroy) {
1181 (*notifiers->device_destroy) (dev_info);
1182 notified = true;
1183 }
1184 break;
1185 }
1186 }
1187 away:
1188 if (notified)
1189 /* The callback function just called above is responsible
1190 * for calling the appropriate visorchipset_busdev_responders
1191 * function, which will call device_responder()
1192 */
1193 ;
1194 else
1195 /*
1196 * Do not kfree(pmsg_hdr) as this is the failure path.
1197 * The success path ('notified') will call the responder
1198 * directly and kfree() there.
1199 */
1200 device_responder(cmd, pmsg_hdr, response);
1201 up(&notifier_lock);
1202 }
1203
1204 static void
1205 bus_create(struct controlvm_message *inmsg)
1206 {
1207 struct controlvm_message_packet *cmd = &inmsg->cmd;
1208 u32 bus_no = cmd->create_bus.bus_no;
1209 int rc = CONTROLVM_RESP_SUCCESS;
1210 struct visorchipset_bus_info *bus_info;
1211 struct visorchannel *visorchannel;
1212
1213 bus_info = bus_find(&bus_info_list, bus_no);
1214 if (bus_info && (bus_info->state.created == 1)) {
1215 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1216 POSTCODE_SEVERITY_ERR);
1217 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1218 goto cleanup;
1219 }
1220 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1221 if (!bus_info) {
1222 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1223 POSTCODE_SEVERITY_ERR);
1224 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1225 goto cleanup;
1226 }
1227
1228 INIT_LIST_HEAD(&bus_info->entry);
1229 bus_info->bus_no = bus_no;
1230
1231 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1232
1233 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1234 cmd->create_bus.channel_bytes,
1235 GFP_KERNEL,
1236 cmd->create_bus.bus_data_type_uuid);
1237
1238 if (!visorchannel) {
1239 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1240 POSTCODE_SEVERITY_ERR);
1241 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1242 kfree(bus_info);
1243 bus_info = NULL;
1244 goto cleanup;
1245 }
1246 bus_info->visorchannel = visorchannel;
1247 list_add(&bus_info->entry, &bus_info_list);
1248
1249 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1250
1251 cleanup:
1252 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1253 rc, inmsg->hdr.flags.response_expected == 1);
1254 }
1255
1256 static void
1257 bus_destroy(struct controlvm_message *inmsg)
1258 {
1259 struct controlvm_message_packet *cmd = &inmsg->cmd;
1260 u32 bus_no = cmd->destroy_bus.bus_no;
1261 struct visorchipset_bus_info *bus_info;
1262 int rc = CONTROLVM_RESP_SUCCESS;
1263
1264 bus_info = bus_find(&bus_info_list, bus_no);
1265 if (!bus_info)
1266 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1267 else if (bus_info->state.created == 0)
1268 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1269
1270 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1271 rc, inmsg->hdr.flags.response_expected == 1);
1272 }
1273
1274 static void
1275 bus_configure(struct controlvm_message *inmsg,
1276 struct parser_context *parser_ctx)
1277 {
1278 struct controlvm_message_packet *cmd = &inmsg->cmd;
1279 u32 bus_no;
1280 struct visorchipset_bus_info *bus_info;
1281 int rc = CONTROLVM_RESP_SUCCESS;
1282 char s[99];
1283
1284 bus_no = cmd->configure_bus.bus_no;
1285 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1286 POSTCODE_SEVERITY_INFO);
1287
1288 bus_info = bus_find(&bus_info_list, bus_no);
1289 if (!bus_info) {
1290 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1291 POSTCODE_SEVERITY_ERR);
1292 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1293 } else if (bus_info->state.created == 0) {
1294 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1295 POSTCODE_SEVERITY_ERR);
1296 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1297 } else if (bus_info->pending_msg_hdr != NULL) {
1298 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1299 POSTCODE_SEVERITY_ERR);
1300 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1301 } else {
1302 visorchannel_set_clientpartition(bus_info->visorchannel,
1303 cmd->configure_bus.guest_handle);
1304 bus_info->partition_uuid = parser_id_get(parser_ctx);
1305 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1306 bus_info->name = parser_string_get(parser_ctx);
1307
1308 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1309 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1310 POSTCODE_SEVERITY_INFO);
1311 }
1312 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1313 rc, inmsg->hdr.flags.response_expected == 1);
1314 }
1315
1316 static void
1317 my_device_create(struct controlvm_message *inmsg)
1318 {
1319 struct controlvm_message_packet *cmd = &inmsg->cmd;
1320 u32 bus_no = cmd->create_device.bus_no;
1321 u32 dev_no = cmd->create_device.dev_no;
1322 struct visorchipset_device_info *dev_info;
1323 struct visorchipset_bus_info *bus_info;
1324 struct visorchannel *visorchannel;
1325 int rc = CONTROLVM_RESP_SUCCESS;
1326
1327 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1328 if (dev_info && (dev_info->state.created == 1)) {
1329 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1330 POSTCODE_SEVERITY_ERR);
1331 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1332 goto cleanup;
1333 }
1334 bus_info = bus_find(&bus_info_list, bus_no);
1335 if (!bus_info) {
1336 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1337 POSTCODE_SEVERITY_ERR);
1338 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1339 goto cleanup;
1340 }
1341 if (bus_info->state.created == 0) {
1342 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1343 POSTCODE_SEVERITY_ERR);
1344 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1345 goto cleanup;
1346 }
1347 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1348 if (!dev_info) {
1349 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1350 POSTCODE_SEVERITY_ERR);
1351 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1352 goto cleanup;
1353 }
1354
1355 INIT_LIST_HEAD(&dev_info->entry);
1356 dev_info->bus_no = bus_no;
1357 dev_info->dev_no = dev_no;
1358 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1359 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1360 POSTCODE_SEVERITY_INFO);
1361
1362 visorchannel = visorchannel_create(cmd->create_device.channel_addr,
1363 cmd->create_device.channel_bytes,
1364 GFP_KERNEL,
1365 cmd->create_device.data_type_uuid);
1366
1367 if (!visorchannel) {
1368 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1369 POSTCODE_SEVERITY_ERR);
1370 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1371 kfree(dev_info);
1372 dev_info = NULL;
1373 goto cleanup;
1374 }
1375 dev_info->visorchannel = visorchannel;
1376 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
1377 list_add(&dev_info->entry, &dev_info_list);
1378 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1379 POSTCODE_SEVERITY_INFO);
1380 cleanup:
1381 /* get the bus and devNo for DiagPool channel */
1382 if (dev_info &&
1383 is_diagpool_channel(cmd->create_device.data_type_uuid)) {
1384 g_diagpool_bus_no = bus_no;
1385 g_diagpool_dev_no = dev_no;
1386 }
1387 device_epilog(dev_info, segment_state_running,
1388 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1389 inmsg->hdr.flags.response_expected == 1, 1);
1390 }
1391
1392 static void
1393 my_device_changestate(struct controlvm_message *inmsg)
1394 {
1395 struct controlvm_message_packet *cmd = &inmsg->cmd;
1396 u32 bus_no = cmd->device_change_state.bus_no;
1397 u32 dev_no = cmd->device_change_state.dev_no;
1398 struct spar_segment_state state = cmd->device_change_state.state;
1399 struct visorchipset_device_info *dev_info;
1400 int rc = CONTROLVM_RESP_SUCCESS;
1401
1402 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1403 if (!dev_info) {
1404 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1405 POSTCODE_SEVERITY_ERR);
1406 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1407 } else if (dev_info->state.created == 0) {
1408 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1409 POSTCODE_SEVERITY_ERR);
1410 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1411 }
1412 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1413 device_epilog(dev_info, state,
1414 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1415 inmsg->hdr.flags.response_expected == 1, 1);
1416 }
1417
1418 static void
1419 my_device_destroy(struct controlvm_message *inmsg)
1420 {
1421 struct controlvm_message_packet *cmd = &inmsg->cmd;
1422 u32 bus_no = cmd->destroy_device.bus_no;
1423 u32 dev_no = cmd->destroy_device.dev_no;
1424 struct visorchipset_device_info *dev_info;
1425 int rc = CONTROLVM_RESP_SUCCESS;
1426
1427 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1428 if (!dev_info)
1429 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1430 else if (dev_info->state.created == 0)
1431 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1432
1433 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1434 device_epilog(dev_info, segment_state_running,
1435 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1436 inmsg->hdr.flags.response_expected == 1, 1);
1437 }
1438
1439 /* When provided with the physical address of the controlvm channel
1440 * (phys_addr), the offset to the payload area we need to manage
1441 * (offset), and the size of this payload area (bytes), fills in the
1442 * controlvm_payload_info struct. Returns true for success or false
1443 * for failure.
1444 */
1445 static int
1446 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1447 struct visor_controlvm_payload_info *info)
1448 {
1449 u8 __iomem *payload = NULL;
1450 int rc = CONTROLVM_RESP_SUCCESS;
1451
1452 if (!info) {
1453 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1454 goto cleanup;
1455 }
1456 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1457 if ((offset == 0) || (bytes == 0)) {
1458 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1459 goto cleanup;
1460 }
1461 payload = ioremap_cache(phys_addr + offset, bytes);
1462 if (!payload) {
1463 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1464 goto cleanup;
1465 }
1466
1467 info->offset = offset;
1468 info->bytes = bytes;
1469 info->ptr = payload;
1470
1471 cleanup:
1472 if (rc < 0) {
1473 if (payload) {
1474 iounmap(payload);
1475 payload = NULL;
1476 }
1477 }
1478 return rc;
1479 }
1480
1481 static void
1482 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1483 {
1484 if (info->ptr) {
1485 iounmap(info->ptr);
1486 info->ptr = NULL;
1487 }
1488 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1489 }
1490
1491 static void
1492 initialize_controlvm_payload(void)
1493 {
1494 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1495 u64 payload_offset = 0;
1496 u32 payload_bytes = 0;
1497
1498 if (visorchannel_read(controlvm_channel,
1499 offsetof(struct spar_controlvm_channel_protocol,
1500 request_payload_offset),
1501 &payload_offset, sizeof(payload_offset)) < 0) {
1502 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1503 POSTCODE_SEVERITY_ERR);
1504 return;
1505 }
1506 if (visorchannel_read(controlvm_channel,
1507 offsetof(struct spar_controlvm_channel_protocol,
1508 request_payload_bytes),
1509 &payload_bytes, sizeof(payload_bytes)) < 0) {
1510 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1511 POSTCODE_SEVERITY_ERR);
1512 return;
1513 }
1514 initialize_controlvm_payload_info(phys_addr,
1515 payload_offset, payload_bytes,
1516 &controlvm_payload_info);
1517 }
1518
1519 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1520 * Returns CONTROLVM_RESP_xxx code.
1521 */
1522 static int
1523 visorchipset_chipset_ready(void)
1524 {
1525 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1526 return CONTROLVM_RESP_SUCCESS;
1527 }
1528
1529 static int
1530 visorchipset_chipset_selftest(void)
1531 {
1532 char env_selftest[20];
1533 char *envp[] = { env_selftest, NULL };
1534
1535 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1536 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1537 envp);
1538 return CONTROLVM_RESP_SUCCESS;
1539 }
1540
1541 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1542 * Returns CONTROLVM_RESP_xxx code.
1543 */
1544 static int
1545 visorchipset_chipset_notready(void)
1546 {
1547 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1548 return CONTROLVM_RESP_SUCCESS;
1549 }
1550
1551 static void
1552 chipset_ready(struct controlvm_message_header *msg_hdr)
1553 {
1554 int rc = visorchipset_chipset_ready();
1555
1556 if (rc != CONTROLVM_RESP_SUCCESS)
1557 rc = -rc;
1558 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1559 controlvm_respond(msg_hdr, rc);
1560 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1561 /* Send CHIPSET_READY response when all modules have been loaded
1562 * and disks mounted for the partition
1563 */
1564 g_chipset_msg_hdr = *msg_hdr;
1565 }
1566 }
1567
1568 static void
1569 chipset_selftest(struct controlvm_message_header *msg_hdr)
1570 {
1571 int rc = visorchipset_chipset_selftest();
1572
1573 if (rc != CONTROLVM_RESP_SUCCESS)
1574 rc = -rc;
1575 if (msg_hdr->flags.response_expected)
1576 controlvm_respond(msg_hdr, rc);
1577 }
1578
1579 static void
1580 chipset_notready(struct controlvm_message_header *msg_hdr)
1581 {
1582 int rc = visorchipset_chipset_notready();
1583
1584 if (rc != CONTROLVM_RESP_SUCCESS)
1585 rc = -rc;
1586 if (msg_hdr->flags.response_expected)
1587 controlvm_respond(msg_hdr, rc);
1588 }
1589
1590 /* This is your "one-stop" shop for grabbing the next message from the
1591 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1592 */
1593 static bool
1594 read_controlvm_event(struct controlvm_message *msg)
1595 {
1596 if (visorchannel_signalremove(controlvm_channel,
1597 CONTROLVM_QUEUE_EVENT, msg)) {
1598 /* got a message */
1599 if (msg->hdr.flags.test_message == 1)
1600 return false;
1601 return true;
1602 }
1603 return false;
1604 }
1605
1606 /*
1607 * The general parahotplug flow works as follows. The visorchipset
1608 * driver receives a DEVICE_CHANGESTATE message from Command
1609 * specifying a physical device to enable or disable. The CONTROLVM
1610 * message handler calls parahotplug_process_message, which then adds
1611 * the message to a global list and kicks off a udev event which
1612 * causes a user level script to enable or disable the specified
1613 * device. The udev script then writes to
1614 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1615 * to get called, at which point the appropriate CONTROLVM message is
1616 * retrieved from the list and responded to.
1617 */
1618
1619 #define PARAHOTPLUG_TIMEOUT_MS 2000
1620
1621 /*
1622 * Generate unique int to match an outstanding CONTROLVM message with a
1623 * udev script /proc response
1624 */
1625 static int
1626 parahotplug_next_id(void)
1627 {
1628 static atomic_t id = ATOMIC_INIT(0);
1629
1630 return atomic_inc_return(&id);
1631 }
1632
1633 /*
1634 * Returns the time (in jiffies) when a CONTROLVM message on the list
1635 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1636 */
1637 static unsigned long
1638 parahotplug_next_expiration(void)
1639 {
1640 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1641 }
1642
1643 /*
1644 * Create a parahotplug_request, which is basically a wrapper for a
1645 * CONTROLVM_MESSAGE that we can stick on a list
1646 */
1647 static struct parahotplug_request *
1648 parahotplug_request_create(struct controlvm_message *msg)
1649 {
1650 struct parahotplug_request *req;
1651
1652 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1653 if (!req)
1654 return NULL;
1655
1656 req->id = parahotplug_next_id();
1657 req->expiration = parahotplug_next_expiration();
1658 req->msg = *msg;
1659
1660 return req;
1661 }
1662
1663 /*
1664 * Free a parahotplug_request.
1665 */
1666 static void
1667 parahotplug_request_destroy(struct parahotplug_request *req)
1668 {
1669 kfree(req);
1670 }
1671
1672 /*
1673 * Cause uevent to run the user level script to do the disable/enable
1674 * specified in (the CONTROLVM message in) the specified
1675 * parahotplug_request
1676 */
1677 static void
1678 parahotplug_request_kickoff(struct parahotplug_request *req)
1679 {
1680 struct controlvm_message_packet *cmd = &req->msg.cmd;
1681 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1682 env_func[40];
1683 char *envp[] = {
1684 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1685 };
1686
1687 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1688 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1689 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1690 cmd->device_change_state.state.active);
1691 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1692 cmd->device_change_state.bus_no);
1693 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1694 cmd->device_change_state.dev_no >> 3);
1695 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1696 cmd->device_change_state.dev_no & 0x7);
1697
1698 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1699 envp);
1700 }
1701
1702 /*
1703 * Remove any request from the list that's been on there too long and
1704 * respond with an error.
1705 */
1706 static void
1707 parahotplug_process_list(void)
1708 {
1709 struct list_head *pos;
1710 struct list_head *tmp;
1711
1712 spin_lock(&parahotplug_request_list_lock);
1713
1714 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1715 struct parahotplug_request *req =
1716 list_entry(pos, struct parahotplug_request, list);
1717
1718 if (!time_after_eq(jiffies, req->expiration))
1719 continue;
1720
1721 list_del(pos);
1722 if (req->msg.hdr.flags.response_expected)
1723 controlvm_respond_physdev_changestate(
1724 &req->msg.hdr,
1725 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1726 req->msg.cmd.device_change_state.state);
1727 parahotplug_request_destroy(req);
1728 }
1729
1730 spin_unlock(&parahotplug_request_list_lock);
1731 }
1732
1733 /*
1734 * Called from the /proc handler, which means the user script has
1735 * finished the enable/disable. Find the matching identifier, and
1736 * respond to the CONTROLVM message with success.
1737 */
1738 static int
1739 parahotplug_request_complete(int id, u16 active)
1740 {
1741 struct list_head *pos;
1742 struct list_head *tmp;
1743
1744 spin_lock(&parahotplug_request_list_lock);
1745
1746 /* Look for a request matching "id". */
1747 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1748 struct parahotplug_request *req =
1749 list_entry(pos, struct parahotplug_request, list);
1750 if (req->id == id) {
1751 /* Found a match. Remove it from the list and
1752 * respond.
1753 */
1754 list_del(pos);
1755 spin_unlock(&parahotplug_request_list_lock);
1756 req->msg.cmd.device_change_state.state.active = active;
1757 if (req->msg.hdr.flags.response_expected)
1758 controlvm_respond_physdev_changestate(
1759 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1760 req->msg.cmd.device_change_state.state);
1761 parahotplug_request_destroy(req);
1762 return 0;
1763 }
1764 }
1765
1766 spin_unlock(&parahotplug_request_list_lock);
1767 return -1;
1768 }
1769
1770 /*
1771 * Enables or disables a PCI device by kicking off a udev script
1772 */
1773 static void
1774 parahotplug_process_message(struct controlvm_message *inmsg)
1775 {
1776 struct parahotplug_request *req;
1777
1778 req = parahotplug_request_create(inmsg);
1779
1780 if (!req)
1781 return;
1782
1783 if (inmsg->cmd.device_change_state.state.active) {
1784 /* For enable messages, just respond with success
1785 * right away. This is a bit of a hack, but there are
1786 * issues with the early enable messages we get (with
1787 * either the udev script not detecting that the device
1788 * is up, or not getting called at all). Fortunately
1789 * the messages that get lost don't matter anyway, as
1790 * devices are automatically enabled at
1791 * initialization.
1792 */
1793 parahotplug_request_kickoff(req);
1794 controlvm_respond_physdev_changestate(&inmsg->hdr,
1795 CONTROLVM_RESP_SUCCESS,
1796 inmsg->cmd.device_change_state.state);
1797 parahotplug_request_destroy(req);
1798 } else {
1799 /* For disable messages, add the request to the
1800 * request list before kicking off the udev script. It
1801 * won't get responded to until the script has
1802 * indicated it's done.
1803 */
1804 spin_lock(&parahotplug_request_list_lock);
1805 list_add_tail(&req->list, &parahotplug_request_list);
1806 spin_unlock(&parahotplug_request_list_lock);
1807
1808 parahotplug_request_kickoff(req);
1809 }
1810 }
1811
1812 /* Process a controlvm message.
1813 * Return result:
1814 * false - this function will return false only in the case where the
1815 * controlvm message was NOT processed, but processing must be
1816 * retried before reading the next controlvm message; a
1817 * scenario where this can occur is when we need to throttle
1818 * the allocation of memory in which to copy out controlvm
1819 * payload data
1820 * true - processing of the controlvm message completed,
1821 * either successfully or with an error.
1822 */
1823 static bool
1824 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1825 {
1826 struct controlvm_message_packet *cmd = &inmsg.cmd;
1827 u64 parm_addr;
1828 u32 parm_bytes;
1829 struct parser_context *parser_ctx = NULL;
1830 bool local_addr;
1831 struct controlvm_message ackmsg;
1832
1833 /* create parsing context if necessary */
1834 local_addr = (inmsg.hdr.flags.test_message == 1);
1835 if (channel_addr == 0)
1836 return true;
1837 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1838 parm_bytes = inmsg.hdr.payload_bytes;
1839
1840 /* Parameter and channel addresses within test messages actually lie
1841 * within our OS-controlled memory. We need to know that, because it
1842 * makes a difference in how we compute the virtual address.
1843 */
1844 if (parm_addr && parm_bytes) {
1845 bool retry = false;
1846
1847 parser_ctx =
1848 parser_init_byte_stream(parm_addr, parm_bytes,
1849 local_addr, &retry);
1850 if (!parser_ctx && retry)
1851 return false;
1852 }
1853
1854 if (!local_addr) {
1855 controlvm_init_response(&ackmsg, &inmsg.hdr,
1856 CONTROLVM_RESP_SUCCESS);
1857 if (controlvm_channel)
1858 visorchannel_signalinsert(controlvm_channel,
1859 CONTROLVM_QUEUE_ACK,
1860 &ackmsg);
1861 }
1862 switch (inmsg.hdr.id) {
1863 case CONTROLVM_CHIPSET_INIT:
1864 chipset_init(&inmsg);
1865 break;
1866 case CONTROLVM_BUS_CREATE:
1867 bus_create(&inmsg);
1868 break;
1869 case CONTROLVM_BUS_DESTROY:
1870 bus_destroy(&inmsg);
1871 break;
1872 case CONTROLVM_BUS_CONFIGURE:
1873 bus_configure(&inmsg, parser_ctx);
1874 break;
1875 case CONTROLVM_DEVICE_CREATE:
1876 my_device_create(&inmsg);
1877 break;
1878 case CONTROLVM_DEVICE_CHANGESTATE:
1879 if (cmd->device_change_state.flags.phys_device) {
1880 parahotplug_process_message(&inmsg);
1881 } else {
1882 /* save the hdr and cmd structures for later use */
1883 /* when sending back the response to Command */
1884 my_device_changestate(&inmsg);
1885 g_devicechangestate_packet = inmsg.cmd;
1886 break;
1887 }
1888 break;
1889 case CONTROLVM_DEVICE_DESTROY:
1890 my_device_destroy(&inmsg);
1891 break;
1892 case CONTROLVM_DEVICE_CONFIGURE:
1893 /* no op for now, just send a respond that we passed */
1894 if (inmsg.hdr.flags.response_expected)
1895 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1896 break;
1897 case CONTROLVM_CHIPSET_READY:
1898 chipset_ready(&inmsg.hdr);
1899 break;
1900 case CONTROLVM_CHIPSET_SELFTEST:
1901 chipset_selftest(&inmsg.hdr);
1902 break;
1903 case CONTROLVM_CHIPSET_STOP:
1904 chipset_notready(&inmsg.hdr);
1905 break;
1906 default:
1907 if (inmsg.hdr.flags.response_expected)
1908 controlvm_respond(&inmsg.hdr,
1909 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1910 break;
1911 }
1912
1913 if (parser_ctx) {
1914 parser_done(parser_ctx);
1915 parser_ctx = NULL;
1916 }
1917 return true;
1918 }
1919
1920 static inline unsigned int
1921 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1922 {
1923 struct vmcall_io_controlvm_addr_params params;
1924 int result = VMCALL_SUCCESS;
1925 u64 physaddr;
1926
1927 physaddr = virt_to_phys(&params);
1928 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1929 if (VMCALL_SUCCESSFUL(result)) {
1930 *control_addr = params.address;
1931 *control_bytes = params.channel_bytes;
1932 }
1933 return result;
1934 }
1935
1936 static u64 controlvm_get_channel_address(void)
1937 {
1938 u64 addr = 0;
1939 u32 size = 0;
1940
1941 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1942 return 0;
1943
1944 return addr;
1945 }
1946
1947 static void
1948 controlvm_periodic_work(struct work_struct *work)
1949 {
1950 struct controlvm_message inmsg;
1951 bool got_command = false;
1952 bool handle_command_failed = false;
1953 static u64 poll_count;
1954
1955 /* make sure visorbus server is registered for controlvm callbacks */
1956 if (visorchipset_visorbusregwait && !visorbusregistered)
1957 goto cleanup;
1958
1959 poll_count++;
1960 if (poll_count >= 250)
1961 ; /* keep going */
1962 else
1963 goto cleanup;
1964
1965 /* Check events to determine if response to CHIPSET_READY
1966 * should be sent
1967 */
1968 if (visorchipset_holdchipsetready &&
1969 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1970 if (check_chipset_events() == 1) {
1971 controlvm_respond(&g_chipset_msg_hdr, 0);
1972 clear_chipset_events();
1973 memset(&g_chipset_msg_hdr, 0,
1974 sizeof(struct controlvm_message_header));
1975 }
1976 }
1977
1978 while (visorchannel_signalremove(controlvm_channel,
1979 CONTROLVM_QUEUE_RESPONSE,
1980 &inmsg))
1981 ;
1982 if (!got_command) {
1983 if (controlvm_pending_msg_valid) {
1984 /* we throttled processing of a prior
1985 * msg, so try to process it again
1986 * rather than reading a new one
1987 */
1988 inmsg = controlvm_pending_msg;
1989 controlvm_pending_msg_valid = false;
1990 got_command = true;
1991 } else {
1992 got_command = read_controlvm_event(&inmsg);
1993 }
1994 }
1995
1996 handle_command_failed = false;
1997 while (got_command && (!handle_command_failed)) {
1998 most_recent_message_jiffies = jiffies;
1999 if (handle_command(inmsg,
2000 visorchannel_get_physaddr
2001 (controlvm_channel)))
2002 got_command = read_controlvm_event(&inmsg);
2003 else {
2004 /* this is a scenario where throttling
2005 * is required, but probably NOT an
2006 * error...; we stash the current
2007 * controlvm msg so we will attempt to
2008 * reprocess it on our next loop
2009 */
2010 handle_command_failed = true;
2011 controlvm_pending_msg = inmsg;
2012 controlvm_pending_msg_valid = true;
2013 }
2014 }
2015
2016 /* parahotplug_worker */
2017 parahotplug_process_list();
2018
2019 cleanup:
2020
2021 if (time_after(jiffies,
2022 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2023 /* it's been longer than MIN_IDLE_SECONDS since we
2024 * processed our last controlvm message; slow down the
2025 * polling
2026 */
2027 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2028 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2029 } else {
2030 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2031 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2032 }
2033
2034 queue_delayed_work(periodic_controlvm_workqueue,
2035 &periodic_controlvm_work, poll_jiffies);
2036 }
2037
2038 static void
2039 setup_crash_devices_work_queue(struct work_struct *work)
2040 {
2041 struct controlvm_message local_crash_bus_msg;
2042 struct controlvm_message local_crash_dev_msg;
2043 struct controlvm_message msg;
2044 u32 local_crash_msg_offset;
2045 u16 local_crash_msg_count;
2046
2047 /* make sure visorbus is registered for controlvm callbacks */
2048 if (visorchipset_visorbusregwait && !visorbusregistered)
2049 goto cleanup;
2050
2051 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2052
2053 /* send init chipset msg */
2054 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2055 msg.cmd.init_chipset.bus_count = 23;
2056 msg.cmd.init_chipset.switch_count = 0;
2057
2058 chipset_init(&msg);
2059
2060 /* get saved message count */
2061 if (visorchannel_read(controlvm_channel,
2062 offsetof(struct spar_controlvm_channel_protocol,
2063 saved_crash_message_count),
2064 &local_crash_msg_count, sizeof(u16)) < 0) {
2065 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2066 POSTCODE_SEVERITY_ERR);
2067 return;
2068 }
2069
2070 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
2071 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2072 local_crash_msg_count,
2073 POSTCODE_SEVERITY_ERR);
2074 return;
2075 }
2076
2077 /* get saved crash message offset */
2078 if (visorchannel_read(controlvm_channel,
2079 offsetof(struct spar_controlvm_channel_protocol,
2080 saved_crash_message_offset),
2081 &local_crash_msg_offset, sizeof(u32)) < 0) {
2082 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2083 POSTCODE_SEVERITY_ERR);
2084 return;
2085 }
2086
2087 /* read create device message for storage bus offset */
2088 if (visorchannel_read(controlvm_channel,
2089 local_crash_msg_offset,
2090 &local_crash_bus_msg,
2091 sizeof(struct controlvm_message)) < 0) {
2092 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2093 POSTCODE_SEVERITY_ERR);
2094 return;
2095 }
2096
2097 /* read create device message for storage device */
2098 if (visorchannel_read(controlvm_channel,
2099 local_crash_msg_offset +
2100 sizeof(struct controlvm_message),
2101 &local_crash_dev_msg,
2102 sizeof(struct controlvm_message)) < 0) {
2103 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2104 POSTCODE_SEVERITY_ERR);
2105 return;
2106 }
2107
2108 /* reuse IOVM create bus message */
2109 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
2110 bus_create(&local_crash_bus_msg);
2111 } else {
2112 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2113 POSTCODE_SEVERITY_ERR);
2114 return;
2115 }
2116
2117 /* reuse create device message for storage device */
2118 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2119 my_device_create(&local_crash_dev_msg);
2120 } else {
2121 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2122 POSTCODE_SEVERITY_ERR);
2123 return;
2124 }
2125 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2126 return;
2127
2128 cleanup:
2129
2130 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2131
2132 queue_delayed_work(periodic_controlvm_workqueue,
2133 &periodic_controlvm_work, poll_jiffies);
2134 }
2135
2136 static void
2137 bus_create_response(struct visorchipset_bus_info *bus_info, int response)
2138 {
2139 if (response >= 0) {
2140 bus_info->state.created = 1;
2141 } else {
2142 if (response != -CONTROLVM_RESP_ERROR_ALREADY_DONE)
2143 /* undo the row we just created... */
2144 busdevices_del(&dev_info_list, bus_info->bus_no);
2145 }
2146
2147 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2148 response);
2149
2150 kfree(bus_info->pending_msg_hdr);
2151 bus_info->pending_msg_hdr = NULL;
2152 }
2153
2154 static void
2155 bus_destroy_response(struct visorchipset_bus_info *bus_info, int response)
2156 {
2157 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2158 response);
2159
2160 kfree(bus_info->pending_msg_hdr);
2161 bus_info->pending_msg_hdr = NULL;
2162
2163 bus_info_clear(bus_info);
2164 busdevices_del(&dev_info_list, bus_info->bus_no);
2165 }
2166
2167 static void
2168 device_create_response(struct visorchipset_device_info *dev_info, int response)
2169 {
2170 if (response >= 0)
2171 dev_info->state.created = 1;
2172
2173 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2174 response);
2175
2176 kfree(dev_info->pending_msg_hdr);
2177 dev_info->pending_msg_hdr = NULL;
2178 }
2179
2180 static void
2181 device_destroy_response(struct visorchipset_device_info *dev_info, int response)
2182 {
2183 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2184 response);
2185
2186 kfree(dev_info->pending_msg_hdr);
2187 dev_info->pending_msg_hdr = NULL;
2188
2189 dev_info_clear(dev_info);
2190 }
2191
2192 static void
2193 visorchipset_device_pause_response(struct visorchipset_device_info *dev_info,
2194 int response)
2195 {
2196 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2197 dev_info, response,
2198 segment_state_standby);
2199
2200 kfree(dev_info->pending_msg_hdr);
2201 dev_info->pending_msg_hdr = NULL;
2202 }
2203
2204 static void
2205 device_resume_response(struct visorchipset_device_info *dev_info, int response)
2206 {
2207 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2208 dev_info, response,
2209 segment_state_running);
2210
2211 kfree(dev_info->pending_msg_hdr);
2212 dev_info->pending_msg_hdr = NULL;
2213 }
2214
2215 bool
2216 visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
2217 {
2218 void *p = bus_find(&bus_info_list, bus_no);
2219
2220 if (!p)
2221 return false;
2222 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2223 return true;
2224 }
2225 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2226
2227 bool
2228 visorchipset_set_bus_context(struct visorchipset_bus_info *p, void *context)
2229 {
2230 if (!p)
2231 return false;
2232 p->bus_driver_context = context;
2233 return true;
2234 }
2235 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2236
2237 bool
2238 visorchipset_get_device_info(u32 bus_no, u32 dev_no,
2239 struct visorchipset_device_info *dev_info)
2240 {
2241 void *p = device_find(&dev_info_list, bus_no, dev_no);
2242
2243 if (!p)
2244 return false;
2245 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2246 return true;
2247 }
2248 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2249
2250 bool
2251 visorchipset_set_device_context(struct visorchipset_device_info *p,
2252 void *context)
2253 {
2254 if (!p)
2255 return false;
2256 p->bus_driver_context = context;
2257 return true;
2258 }
2259 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2260
2261 static ssize_t chipsetready_store(struct device *dev,
2262 struct device_attribute *attr,
2263 const char *buf, size_t count)
2264 {
2265 char msgtype[64];
2266
2267 if (sscanf(buf, "%63s", msgtype) != 1)
2268 return -EINVAL;
2269
2270 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2271 chipset_events[0] = 1;
2272 return count;
2273 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2274 chipset_events[1] = 1;
2275 return count;
2276 }
2277 return -EINVAL;
2278 }
2279
2280 /* The parahotplug/devicedisabled interface gets called by our support script
2281 * when an SR-IOV device has been shut down. The ID is passed to the script
2282 * and then passed back when the device has been removed.
2283 */
2284 static ssize_t devicedisabled_store(struct device *dev,
2285 struct device_attribute *attr,
2286 const char *buf, size_t count)
2287 {
2288 unsigned int id;
2289
2290 if (kstrtouint(buf, 10, &id))
2291 return -EINVAL;
2292
2293 parahotplug_request_complete(id, 0);
2294 return count;
2295 }
2296
2297 /* The parahotplug/deviceenabled interface gets called by our support script
2298 * when an SR-IOV device has been recovered. The ID is passed to the script
2299 * and then passed back when the device has been brought back up.
2300 */
2301 static ssize_t deviceenabled_store(struct device *dev,
2302 struct device_attribute *attr,
2303 const char *buf, size_t count)
2304 {
2305 unsigned int id;
2306
2307 if (kstrtouint(buf, 10, &id))
2308 return -EINVAL;
2309
2310 parahotplug_request_complete(id, 1);
2311 return count;
2312 }
2313
2314 static int
2315 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2316 {
2317 unsigned long physaddr = 0;
2318 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2319 u64 addr = 0;
2320
2321 /* sv_enable_dfp(); */
2322 if (offset & (PAGE_SIZE - 1))
2323 return -ENXIO; /* need aligned offsets */
2324
2325 switch (offset) {
2326 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2327 vma->vm_flags |= VM_IO;
2328 if (!*file_controlvm_channel)
2329 return -ENXIO;
2330
2331 visorchannel_read(*file_controlvm_channel,
2332 offsetof(struct spar_controlvm_channel_protocol,
2333 gp_control_channel),
2334 &addr, sizeof(addr));
2335 if (!addr)
2336 return -ENXIO;
2337
2338 physaddr = (unsigned long)addr;
2339 if (remap_pfn_range(vma, vma->vm_start,
2340 physaddr >> PAGE_SHIFT,
2341 vma->vm_end - vma->vm_start,
2342 /*pgprot_noncached */
2343 (vma->vm_page_prot))) {
2344 return -EAGAIN;
2345 }
2346 break;
2347 default:
2348 return -ENXIO;
2349 }
2350 return 0;
2351 }
2352
2353 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2354 {
2355 u64 result = VMCALL_SUCCESS;
2356 u64 physaddr = 0;
2357
2358 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2359 result);
2360 return result;
2361 }
2362
2363 static inline int issue_vmcall_update_physical_time(u64 adjustment)
2364 {
2365 int result = VMCALL_SUCCESS;
2366
2367 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2368 return result;
2369 }
2370
2371 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2372 unsigned long arg)
2373 {
2374 s64 adjustment;
2375 s64 vrtc_offset;
2376
2377 switch (cmd) {
2378 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2379 /* get the physical rtc offset */
2380 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2381 if (copy_to_user((void __user *)arg, &vrtc_offset,
2382 sizeof(vrtc_offset))) {
2383 return -EFAULT;
2384 }
2385 return 0;
2386 case VMCALL_UPDATE_PHYSICAL_TIME:
2387 if (copy_from_user(&adjustment, (void __user *)arg,
2388 sizeof(adjustment))) {
2389 return -EFAULT;
2390 }
2391 return issue_vmcall_update_physical_time(adjustment);
2392 default:
2393 return -EFAULT;
2394 }
2395 }
2396
2397 static const struct file_operations visorchipset_fops = {
2398 .owner = THIS_MODULE,
2399 .open = visorchipset_open,
2400 .read = NULL,
2401 .write = NULL,
2402 .unlocked_ioctl = visorchipset_ioctl,
2403 .release = visorchipset_release,
2404 .mmap = visorchipset_mmap,
2405 };
2406
2407 static int
2408 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2409 {
2410 int rc = 0;
2411
2412 file_controlvm_channel = controlvm_channel;
2413 cdev_init(&file_cdev, &visorchipset_fops);
2414 file_cdev.owner = THIS_MODULE;
2415 if (MAJOR(major_dev) == 0) {
2416 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2417 /* dynamic major device number registration required */
2418 if (rc < 0)
2419 return rc;
2420 } else {
2421 /* static major device number registration required */
2422 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2423 if (rc < 0)
2424 return rc;
2425 }
2426 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2427 if (rc < 0) {
2428 unregister_chrdev_region(major_dev, 1);
2429 return rc;
2430 }
2431 return 0;
2432 }
2433
2434 static int
2435 visorchipset_init(struct acpi_device *acpi_device)
2436 {
2437 int rc = 0;
2438 u64 addr;
2439 int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2440 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2441
2442 addr = controlvm_get_channel_address();
2443 if (!addr)
2444 return -ENODEV;
2445
2446 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2447 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2448
2449 controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2450 GFP_KERNEL, uuid);
2451 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2452 visorchannel_get_header(controlvm_channel))) {
2453 initialize_controlvm_payload();
2454 } else {
2455 visorchannel_destroy(controlvm_channel);
2456 controlvm_channel = NULL;
2457 return -ENODEV;
2458 }
2459
2460 major_dev = MKDEV(visorchipset_major, 0);
2461 rc = visorchipset_file_init(major_dev, &controlvm_channel);
2462 if (rc < 0) {
2463 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2464 goto cleanup;
2465 }
2466
2467 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2468
2469 /* if booting in a crash kernel */
2470 if (is_kdump_kernel())
2471 INIT_DELAYED_WORK(&periodic_controlvm_work,
2472 setup_crash_devices_work_queue);
2473 else
2474 INIT_DELAYED_WORK(&periodic_controlvm_work,
2475 controlvm_periodic_work);
2476 periodic_controlvm_workqueue =
2477 create_singlethread_workqueue("visorchipset_controlvm");
2478
2479 if (!periodic_controlvm_workqueue) {
2480 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2481 DIAG_SEVERITY_ERR);
2482 rc = -ENOMEM;
2483 goto cleanup;
2484 }
2485 most_recent_message_jiffies = jiffies;
2486 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2487 rc = queue_delayed_work(periodic_controlvm_workqueue,
2488 &periodic_controlvm_work, poll_jiffies);
2489 if (rc < 0) {
2490 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2491 DIAG_SEVERITY_ERR);
2492 goto cleanup;
2493 }
2494
2495 visorchipset_platform_device.dev.devt = major_dev;
2496 if (platform_device_register(&visorchipset_platform_device) < 0) {
2497 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2498 rc = -1;
2499 goto cleanup;
2500 }
2501 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2502
2503 rc = visorbus_init();
2504 cleanup:
2505 if (rc) {
2506 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2507 POSTCODE_SEVERITY_ERR);
2508 }
2509 return rc;
2510 }
2511
2512 static void
2513 visorchipset_file_cleanup(dev_t major_dev)
2514 {
2515 if (file_cdev.ops)
2516 cdev_del(&file_cdev);
2517 file_cdev.ops = NULL;
2518 unregister_chrdev_region(major_dev, 1);
2519 }
2520
2521 static int
2522 visorchipset_exit(struct acpi_device *acpi_device)
2523 {
2524 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2525
2526 visorbus_exit();
2527
2528 cancel_delayed_work(&periodic_controlvm_work);
2529 flush_workqueue(periodic_controlvm_workqueue);
2530 destroy_workqueue(periodic_controlvm_workqueue);
2531 periodic_controlvm_workqueue = NULL;
2532 destroy_controlvm_payload_info(&controlvm_payload_info);
2533
2534 cleanup_controlvm_structures();
2535
2536 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2537
2538 visorchannel_destroy(controlvm_channel);
2539
2540 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2541 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2542
2543 return 0;
2544 }
2545
2546 static const struct acpi_device_id unisys_device_ids[] = {
2547 {"PNP0A07", 0},
2548 {"", 0},
2549 };
2550
2551 static struct acpi_driver unisys_acpi_driver = {
2552 .name = "unisys_acpi",
2553 .class = "unisys_acpi_class",
2554 .owner = THIS_MODULE,
2555 .ids = unisys_device_ids,
2556 .ops = {
2557 .add = visorchipset_init,
2558 .remove = visorchipset_exit,
2559 },
2560 };
2561 static __init uint32_t visorutil_spar_detect(void)
2562 {
2563 unsigned int eax, ebx, ecx, edx;
2564
2565 if (cpu_has_hypervisor) {
2566 /* check the ID */
2567 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2568 return (ebx == UNISYS_SPAR_ID_EBX) &&
2569 (ecx == UNISYS_SPAR_ID_ECX) &&
2570 (edx == UNISYS_SPAR_ID_EDX);
2571 } else {
2572 return 0;
2573 }
2574 }
2575
2576 static int init_unisys(void)
2577 {
2578 int result;
2579 if (!visorutil_spar_detect())
2580 return -ENODEV;
2581
2582 result = acpi_bus_register_driver(&unisys_acpi_driver);
2583 if (result)
2584 return -ENODEV;
2585
2586 pr_info("Unisys Visorchipset Driver Loaded.\n");
2587 return 0;
2588 };
2589
2590 static void exit_unisys(void)
2591 {
2592 acpi_bus_unregister_driver(&unisys_acpi_driver);
2593 }
2594
2595 module_param_named(major, visorchipset_major, int, S_IRUGO);
2596 MODULE_PARM_DESC(visorchipset_major,
2597 "major device number to use for the device node");
2598 module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2599 MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2600 "1 to have the module wait for the visor bus to register");
2601 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2602 int, S_IRUGO);
2603 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2604 "1 to hold response to CHIPSET_READY");
2605
2606 module_init(init_unisys);
2607 module_exit(exit_unisys);
2608
2609 MODULE_AUTHOR("Unisys");
2610 MODULE_LICENSE("GPL");
2611 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2612 VERSION);
2613 MODULE_VERSION(VERSION);
This page took 0.165932 seconds and 5 git commands to generate.