staging: unisys: Convert device functions to pass dev_info pointer around
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18 #include <linux/acpi.h>
19 #include <linux/cdev.h>
20 #include <linux/ctype.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/nls.h>
24 #include <linux/netdevice.h>
25 #include <linux/platform_device.h>
26 #include <linux/uuid.h>
27 #include <linux/crash_dump.h>
28
29 #include "channel_guid.h"
30 #include "controlvmchannel.h"
31 #include "controlvmcompletionstatus.h"
32 #include "guestlinuxdebug.h"
33 #include "periodic_work.h"
34 #include "version.h"
35 #include "visorbus.h"
36 #include "visorbus_private.h"
37 #include "vmcallinterface.h"
38
39 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
40
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE 50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
48
49 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
50
51
52 #define UNISYS_SPAR_LEAF_ID 0x40000000
53
54 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55 #define UNISYS_SPAR_ID_EBX 0x73696e55
56 #define UNISYS_SPAR_ID_ECX 0x70537379
57 #define UNISYS_SPAR_ID_EDX 0x34367261
58
59 /*
60 * Module parameters
61 */
62 static int visorchipset_major;
63 static int visorchipset_visorbusregwait = 1; /* default is on */
64 static int visorchipset_holdchipsetready;
65 static unsigned long controlvm_payload_bytes_buffered;
66
67 static int
68 visorchipset_open(struct inode *inode, struct file *file)
69 {
70 unsigned minor_number = iminor(inode);
71
72 if (minor_number)
73 return -ENODEV;
74 file->private_data = NULL;
75 return 0;
76 }
77
78 static int
79 visorchipset_release(struct inode *inode, struct file *file)
80 {
81 return 0;
82 }
83
84 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85 * we switch to slow polling mode. As soon as we get a controlvm
86 * message, we switch back to fast polling mode.
87 */
88 #define MIN_IDLE_SECONDS 10
89 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
90 static unsigned long most_recent_message_jiffies; /* when we got our last
91 * controlvm message */
92 static int visorbusregistered;
93
94 #define MAX_CHIPSET_EVENTS 2
95 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
96
97 struct parser_context {
98 unsigned long allocbytes;
99 unsigned long param_bytes;
100 u8 *curr;
101 unsigned long bytes_remaining;
102 bool byte_stream;
103 char data[0];
104 };
105
106 static struct delayed_work periodic_controlvm_work;
107 static struct workqueue_struct *periodic_controlvm_workqueue;
108 static DEFINE_SEMAPHORE(notifier_lock);
109
110 static struct cdev file_cdev;
111 static struct visorchannel **file_controlvm_channel;
112 static struct controlvm_message_header g_chipset_msg_hdr;
113 static const uuid_le spar_diag_pool_channel_protocol_uuid =
114 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
115 /* 0xffffff is an invalid Bus/Device number */
116 static u32 g_diagpool_bus_no = 0xffffff;
117 static u32 g_diagpool_dev_no = 0xffffff;
118 static struct controlvm_message_packet g_devicechangestate_packet;
119
120 #define is_diagpool_channel(channel_type_guid) \
121 (uuid_le_cmp(channel_type_guid,\
122 spar_diag_pool_channel_protocol_uuid) == 0)
123
124 static LIST_HEAD(bus_info_list);
125 static LIST_HEAD(dev_info_list);
126
127 static struct visorchannel *controlvm_channel;
128
129 /* Manages the request payload in the controlvm channel */
130 struct visor_controlvm_payload_info {
131 u8 __iomem *ptr; /* pointer to base address of payload pool */
132 u64 offset; /* offset from beginning of controlvm
133 * channel to beginning of payload * pool */
134 u32 bytes; /* number of bytes in payload pool */
135 };
136
137 static struct visor_controlvm_payload_info controlvm_payload_info;
138
139 /* The following globals are used to handle the scenario where we are unable to
140 * offload the payload from a controlvm message due to memory requirements. In
141 * this scenario, we simply stash the controlvm message, then attempt to
142 * process it again the next time controlvm_periodic_work() runs.
143 */
144 static struct controlvm_message controlvm_pending_msg;
145 static bool controlvm_pending_msg_valid;
146
147 /* This identifies a data buffer that has been received via a controlvm messages
148 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
149 */
150 struct putfile_buffer_entry {
151 struct list_head next; /* putfile_buffer_entry list */
152 struct parser_context *parser_ctx; /* points to input data buffer */
153 };
154
155 /* List of struct putfile_request *, via next_putfile_request member.
156 * Each entry in this list identifies an outstanding TRANSMIT_FILE
157 * conversation.
158 */
159 static LIST_HEAD(putfile_request_list);
160
161 /* This describes a buffer and its current state of transfer (e.g., how many
162 * bytes have already been supplied as putfile data, and how many bytes are
163 * remaining) for a putfile_request.
164 */
165 struct putfile_active_buffer {
166 /* a payload from a controlvm message, containing a file data buffer */
167 struct parser_context *parser_ctx;
168 /* points within data area of parser_ctx to next byte of data */
169 u8 *pnext;
170 /* # bytes left from <pnext> to the end of this data buffer */
171 size_t bytes_remaining;
172 };
173
174 #define PUTFILE_REQUEST_SIG 0x0906101302281211
175 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
176 * conversation. Structs of this type are dynamically linked into
177 * <Putfile_request_list>.
178 */
179 struct putfile_request {
180 u64 sig; /* PUTFILE_REQUEST_SIG */
181
182 /* header from original TransmitFile request */
183 struct controlvm_message_header controlvm_header;
184 u64 file_request_number; /* from original TransmitFile request */
185
186 /* link to next struct putfile_request */
187 struct list_head next_putfile_request;
188
189 /* most-recent sequence number supplied via a controlvm message */
190 u64 data_sequence_number;
191
192 /* head of putfile_buffer_entry list, which describes the data to be
193 * supplied as putfile data;
194 * - this list is added to when controlvm messages come in that supply
195 * file data
196 * - this list is removed from via the hotplug program that is actually
197 * consuming these buffers to write as file data */
198 struct list_head input_buffer_list;
199 spinlock_t req_list_lock; /* lock for input_buffer_list */
200
201 /* waiters for input_buffer_list to go non-empty */
202 wait_queue_head_t input_buffer_wq;
203
204 /* data not yet read within current putfile_buffer_entry */
205 struct putfile_active_buffer active_buf;
206
207 /* <0 = failed, 0 = in-progress, >0 = successful; */
208 /* note that this must be set with req_list_lock, and if you set <0, */
209 /* it is your responsibility to also free up all of the other objects */
210 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
211 /* before releasing the lock */
212 int completion_status;
213 };
214
215 struct parahotplug_request {
216 struct list_head list;
217 int id;
218 unsigned long expiration;
219 struct controlvm_message msg;
220 };
221
222 static LIST_HEAD(parahotplug_request_list);
223 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
224 static void parahotplug_process_list(void);
225
226 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
227 * CONTROLVM_REPORTEVENT.
228 */
229 static struct visorchipset_busdev_notifiers busdev_notifiers;
230
231 static void bus_create_response(struct visorchipset_bus_info *p, int response);
232 static void bus_destroy_response(struct visorchipset_bus_info *p, int response);
233 static void device_create_response(struct visorchipset_device_info *p,
234 int response);
235 static void device_destroy_response(struct visorchipset_device_info *p,
236 int response);
237 static void device_resume_response(struct visorchipset_device_info *p,
238 int response);
239
240 static void
241 visorchipset_device_pause_response(struct visorchipset_device_info *p,
242 int response);
243
244 static struct visorchipset_busdev_responders busdev_responders = {
245 .bus_create = bus_create_response,
246 .bus_destroy = bus_destroy_response,
247 .device_create = device_create_response,
248 .device_destroy = device_destroy_response,
249 .device_pause = visorchipset_device_pause_response,
250 .device_resume = device_resume_response,
251 };
252
253 /* info for /dev/visorchipset */
254 static dev_t major_dev = -1; /**< indicates major num for device */
255
256 /* prototypes for attributes */
257 static ssize_t toolaction_show(struct device *dev,
258 struct device_attribute *attr, char *buf);
259 static ssize_t toolaction_store(struct device *dev,
260 struct device_attribute *attr,
261 const char *buf, size_t count);
262 static DEVICE_ATTR_RW(toolaction);
263
264 static ssize_t boottotool_show(struct device *dev,
265 struct device_attribute *attr, char *buf);
266 static ssize_t boottotool_store(struct device *dev,
267 struct device_attribute *attr, const char *buf,
268 size_t count);
269 static DEVICE_ATTR_RW(boottotool);
270
271 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
272 char *buf);
273 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
274 const char *buf, size_t count);
275 static DEVICE_ATTR_RW(error);
276
277 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
278 char *buf);
279 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
280 const char *buf, size_t count);
281 static DEVICE_ATTR_RW(textid);
282
283 static ssize_t remaining_steps_show(struct device *dev,
284 struct device_attribute *attr, char *buf);
285 static ssize_t remaining_steps_store(struct device *dev,
286 struct device_attribute *attr,
287 const char *buf, size_t count);
288 static DEVICE_ATTR_RW(remaining_steps);
289
290 static ssize_t chipsetready_store(struct device *dev,
291 struct device_attribute *attr,
292 const char *buf, size_t count);
293 static DEVICE_ATTR_WO(chipsetready);
294
295 static ssize_t devicedisabled_store(struct device *dev,
296 struct device_attribute *attr,
297 const char *buf, size_t count);
298 static DEVICE_ATTR_WO(devicedisabled);
299
300 static ssize_t deviceenabled_store(struct device *dev,
301 struct device_attribute *attr,
302 const char *buf, size_t count);
303 static DEVICE_ATTR_WO(deviceenabled);
304
305 static struct attribute *visorchipset_install_attrs[] = {
306 &dev_attr_toolaction.attr,
307 &dev_attr_boottotool.attr,
308 &dev_attr_error.attr,
309 &dev_attr_textid.attr,
310 &dev_attr_remaining_steps.attr,
311 NULL
312 };
313
314 static struct attribute_group visorchipset_install_group = {
315 .name = "install",
316 .attrs = visorchipset_install_attrs
317 };
318
319 static struct attribute *visorchipset_guest_attrs[] = {
320 &dev_attr_chipsetready.attr,
321 NULL
322 };
323
324 static struct attribute_group visorchipset_guest_group = {
325 .name = "guest",
326 .attrs = visorchipset_guest_attrs
327 };
328
329 static struct attribute *visorchipset_parahotplug_attrs[] = {
330 &dev_attr_devicedisabled.attr,
331 &dev_attr_deviceenabled.attr,
332 NULL
333 };
334
335 static struct attribute_group visorchipset_parahotplug_group = {
336 .name = "parahotplug",
337 .attrs = visorchipset_parahotplug_attrs
338 };
339
340 static const struct attribute_group *visorchipset_dev_groups[] = {
341 &visorchipset_install_group,
342 &visorchipset_guest_group,
343 &visorchipset_parahotplug_group,
344 NULL
345 };
346
347 /* /sys/devices/platform/visorchipset */
348 static struct platform_device visorchipset_platform_device = {
349 .name = "visorchipset",
350 .id = -1,
351 .dev.groups = visorchipset_dev_groups,
352 };
353
354 /* Function prototypes */
355 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
356 int response);
357 static void controlvm_respond_chipset_init(
358 struct controlvm_message_header *msg_hdr, int response,
359 enum ultra_chipset_feature features);
360 static void controlvm_respond_physdev_changestate(
361 struct controlvm_message_header *msg_hdr, int response,
362 struct spar_segment_state state);
363
364
365 static void parser_done(struct parser_context *ctx);
366
367 static struct parser_context *
368 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
369 {
370 int allocbytes = sizeof(struct parser_context) + bytes;
371 struct parser_context *rc = NULL;
372 struct parser_context *ctx = NULL;
373
374 if (retry)
375 *retry = false;
376
377 /*
378 * alloc an 0 extra byte to ensure payload is
379 * '\0'-terminated
380 */
381 allocbytes++;
382 if ((controlvm_payload_bytes_buffered + bytes)
383 > MAX_CONTROLVM_PAYLOAD_BYTES) {
384 if (retry)
385 *retry = true;
386 rc = NULL;
387 goto cleanup;
388 }
389 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
390 if (!ctx) {
391 if (retry)
392 *retry = true;
393 rc = NULL;
394 goto cleanup;
395 }
396
397 ctx->allocbytes = allocbytes;
398 ctx->param_bytes = bytes;
399 ctx->curr = NULL;
400 ctx->bytes_remaining = 0;
401 ctx->byte_stream = false;
402 if (local) {
403 void *p;
404
405 if (addr > virt_to_phys(high_memory - 1)) {
406 rc = NULL;
407 goto cleanup;
408 }
409 p = __va((unsigned long) (addr));
410 memcpy(ctx->data, p, bytes);
411 } else {
412 void __iomem *mapping;
413
414 if (!request_mem_region(addr, bytes, "visorchipset")) {
415 rc = NULL;
416 goto cleanup;
417 }
418
419 mapping = ioremap_cache(addr, bytes);
420 if (!mapping) {
421 release_mem_region(addr, bytes);
422 rc = NULL;
423 goto cleanup;
424 }
425 memcpy_fromio(ctx->data, mapping, bytes);
426 release_mem_region(addr, bytes);
427 }
428
429 ctx->byte_stream = true;
430 rc = ctx;
431 cleanup:
432 if (rc) {
433 controlvm_payload_bytes_buffered += ctx->param_bytes;
434 } else {
435 if (ctx) {
436 parser_done(ctx);
437 ctx = NULL;
438 }
439 }
440 return rc;
441 }
442
443 static uuid_le
444 parser_id_get(struct parser_context *ctx)
445 {
446 struct spar_controlvm_parameters_header *phdr = NULL;
447
448 if (ctx == NULL)
449 return NULL_UUID_LE;
450 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
451 return phdr->id;
452 }
453
454 /** Describes the state from the perspective of which controlvm messages have
455 * been received for a bus or device.
456 */
457
458 enum PARSER_WHICH_STRING {
459 PARSERSTRING_INITIATOR,
460 PARSERSTRING_TARGET,
461 PARSERSTRING_CONNECTION,
462 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
463 };
464
465 static void
466 parser_param_start(struct parser_context *ctx,
467 enum PARSER_WHICH_STRING which_string)
468 {
469 struct spar_controlvm_parameters_header *phdr = NULL;
470
471 if (ctx == NULL)
472 goto Away;
473 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
474 switch (which_string) {
475 case PARSERSTRING_INITIATOR:
476 ctx->curr = ctx->data + phdr->initiator_offset;
477 ctx->bytes_remaining = phdr->initiator_length;
478 break;
479 case PARSERSTRING_TARGET:
480 ctx->curr = ctx->data + phdr->target_offset;
481 ctx->bytes_remaining = phdr->target_length;
482 break;
483 case PARSERSTRING_CONNECTION:
484 ctx->curr = ctx->data + phdr->connection_offset;
485 ctx->bytes_remaining = phdr->connection_length;
486 break;
487 case PARSERSTRING_NAME:
488 ctx->curr = ctx->data + phdr->name_offset;
489 ctx->bytes_remaining = phdr->name_length;
490 break;
491 default:
492 break;
493 }
494
495 Away:
496 return;
497 }
498
499 static void parser_done(struct parser_context *ctx)
500 {
501 if (!ctx)
502 return;
503 controlvm_payload_bytes_buffered -= ctx->param_bytes;
504 kfree(ctx);
505 }
506
507 static void *
508 parser_string_get(struct parser_context *ctx)
509 {
510 u8 *pscan;
511 unsigned long nscan;
512 int value_length = -1;
513 void *value = NULL;
514 int i;
515
516 if (!ctx)
517 return NULL;
518 pscan = ctx->curr;
519 nscan = ctx->bytes_remaining;
520 if (nscan == 0)
521 return NULL;
522 if (!pscan)
523 return NULL;
524 for (i = 0, value_length = -1; i < nscan; i++)
525 if (pscan[i] == '\0') {
526 value_length = i;
527 break;
528 }
529 if (value_length < 0) /* '\0' was not included in the length */
530 value_length = nscan;
531 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
532 if (value == NULL)
533 return NULL;
534 if (value_length > 0)
535 memcpy(value, pscan, value_length);
536 ((u8 *) (value))[value_length] = '\0';
537 return value;
538 }
539
540
541 static ssize_t toolaction_show(struct device *dev,
542 struct device_attribute *attr,
543 char *buf)
544 {
545 u8 tool_action;
546
547 visorchannel_read(controlvm_channel,
548 offsetof(struct spar_controlvm_channel_protocol,
549 tool_action), &tool_action, sizeof(u8));
550 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
551 }
552
553 static ssize_t toolaction_store(struct device *dev,
554 struct device_attribute *attr,
555 const char *buf, size_t count)
556 {
557 u8 tool_action;
558 int ret;
559
560 if (kstrtou8(buf, 10, &tool_action))
561 return -EINVAL;
562
563 ret = visorchannel_write(controlvm_channel,
564 offsetof(struct spar_controlvm_channel_protocol,
565 tool_action),
566 &tool_action, sizeof(u8));
567
568 if (ret)
569 return ret;
570 return count;
571 }
572
573 static ssize_t boottotool_show(struct device *dev,
574 struct device_attribute *attr,
575 char *buf)
576 {
577 struct efi_spar_indication efi_spar_indication;
578
579 visorchannel_read(controlvm_channel,
580 offsetof(struct spar_controlvm_channel_protocol,
581 efi_spar_ind), &efi_spar_indication,
582 sizeof(struct efi_spar_indication));
583 return scnprintf(buf, PAGE_SIZE, "%u\n",
584 efi_spar_indication.boot_to_tool);
585 }
586
587 static ssize_t boottotool_store(struct device *dev,
588 struct device_attribute *attr,
589 const char *buf, size_t count)
590 {
591 int val, ret;
592 struct efi_spar_indication efi_spar_indication;
593
594 if (kstrtoint(buf, 10, &val))
595 return -EINVAL;
596
597 efi_spar_indication.boot_to_tool = val;
598 ret = visorchannel_write(controlvm_channel,
599 offsetof(struct spar_controlvm_channel_protocol,
600 efi_spar_ind), &(efi_spar_indication),
601 sizeof(struct efi_spar_indication));
602
603 if (ret)
604 return ret;
605 return count;
606 }
607
608 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
609 char *buf)
610 {
611 u32 error;
612
613 visorchannel_read(controlvm_channel,
614 offsetof(struct spar_controlvm_channel_protocol,
615 installation_error),
616 &error, sizeof(u32));
617 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
618 }
619
620 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
621 const char *buf, size_t count)
622 {
623 u32 error;
624 int ret;
625
626 if (kstrtou32(buf, 10, &error))
627 return -EINVAL;
628
629 ret = visorchannel_write(controlvm_channel,
630 offsetof(struct spar_controlvm_channel_protocol,
631 installation_error),
632 &error, sizeof(u32));
633 if (ret)
634 return ret;
635 return count;
636 }
637
638 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
639 char *buf)
640 {
641 u32 text_id;
642
643 visorchannel_read(controlvm_channel,
644 offsetof(struct spar_controlvm_channel_protocol,
645 installation_text_id),
646 &text_id, sizeof(u32));
647 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
648 }
649
650 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
651 const char *buf, size_t count)
652 {
653 u32 text_id;
654 int ret;
655
656 if (kstrtou32(buf, 10, &text_id))
657 return -EINVAL;
658
659 ret = visorchannel_write(controlvm_channel,
660 offsetof(struct spar_controlvm_channel_protocol,
661 installation_text_id),
662 &text_id, sizeof(u32));
663 if (ret)
664 return ret;
665 return count;
666 }
667
668 static ssize_t remaining_steps_show(struct device *dev,
669 struct device_attribute *attr, char *buf)
670 {
671 u16 remaining_steps;
672
673 visorchannel_read(controlvm_channel,
674 offsetof(struct spar_controlvm_channel_protocol,
675 installation_remaining_steps),
676 &remaining_steps, sizeof(u16));
677 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
678 }
679
680 static ssize_t remaining_steps_store(struct device *dev,
681 struct device_attribute *attr,
682 const char *buf, size_t count)
683 {
684 u16 remaining_steps;
685 int ret;
686
687 if (kstrtou16(buf, 10, &remaining_steps))
688 return -EINVAL;
689
690 ret = visorchannel_write(controlvm_channel,
691 offsetof(struct spar_controlvm_channel_protocol,
692 installation_remaining_steps),
693 &remaining_steps, sizeof(u16));
694 if (ret)
695 return ret;
696 return count;
697 }
698
699 static void
700 bus_info_clear(void *v)
701 {
702 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
703
704 kfree(p->name);
705 kfree(p->description);
706 memset(p, 0, sizeof(struct visorchipset_bus_info));
707 }
708
709 static void
710 dev_info_clear(void *v)
711 {
712 struct visorchipset_device_info *p =
713 (struct visorchipset_device_info *) v;
714
715 memset(p, 0, sizeof(struct visorchipset_device_info));
716 }
717
718 struct visor_busdev {
719 u32 bus_no;
720 u32 dev_no;
721 };
722
723 static int match_visorbus_dev_by_id(struct device *dev, void *data)
724 {
725 struct visor_device *vdev = to_visor_device(dev);
726 struct visor_busdev *id = (struct visor_busdev *)data;
727 u32 bus_no = id->bus_no;
728 u32 dev_no = id->dev_no;
729
730 if (((bus_no == -1) || (vdev->chipset_bus_no == bus_no)) &&
731 ((dev_no == -1) || (vdev->chipset_dev_no == dev_no)))
732 return 1;
733
734 return 0;
735 }
736 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
737 struct visor_device *from)
738 {
739 struct device *dev;
740 struct device *dev_start = NULL;
741 struct visor_device *vdev = NULL;
742 struct visor_busdev id = {
743 .bus_no = bus_no,
744 .dev_no = dev_no
745 };
746
747 if (from)
748 dev_start = &from->device;
749 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
750 match_visorbus_dev_by_id);
751 if (dev)
752 vdev = to_visor_device(dev);
753 return vdev;
754 }
755 EXPORT_SYMBOL(visorbus_get_device_by_id);
756
757 static struct visorchipset_bus_info *
758 bus_find(struct list_head *list, u32 bus_no)
759 {
760 struct visorchipset_bus_info *p;
761
762 list_for_each_entry(p, list, entry) {
763 if (p->bus_no == bus_no)
764 return p;
765 }
766
767 return NULL;
768 }
769
770 static struct visorchipset_device_info *
771 device_find(struct list_head *list, u32 bus_no, u32 dev_no)
772 {
773 struct visorchipset_device_info *p;
774
775 list_for_each_entry(p, list, entry) {
776 if (p->bus_no == bus_no && p->dev_no == dev_no)
777 return p;
778 }
779
780 return NULL;
781 }
782
783 static void busdevices_del(struct list_head *list, u32 bus_no)
784 {
785 struct visorchipset_device_info *p, *tmp;
786
787 list_for_each_entry_safe(p, tmp, list, entry) {
788 if (p->bus_no == bus_no) {
789 list_del(&p->entry);
790 kfree(p);
791 }
792 }
793 }
794
795 static u8
796 check_chipset_events(void)
797 {
798 int i;
799 u8 send_msg = 1;
800 /* Check events to determine if response should be sent */
801 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
802 send_msg &= chipset_events[i];
803 return send_msg;
804 }
805
806 static void
807 clear_chipset_events(void)
808 {
809 int i;
810 /* Clear chipset_events */
811 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
812 chipset_events[i] = 0;
813 }
814
815 void
816 visorchipset_register_busdev(
817 struct visorchipset_busdev_notifiers *notifiers,
818 struct visorchipset_busdev_responders *responders,
819 struct ultra_vbus_deviceinfo *driver_info)
820 {
821 down(&notifier_lock);
822 if (!notifiers) {
823 memset(&busdev_notifiers, 0,
824 sizeof(busdev_notifiers));
825 visorbusregistered = 0; /* clear flag */
826 } else {
827 busdev_notifiers = *notifiers;
828 visorbusregistered = 1; /* set flag */
829 }
830 if (responders)
831 *responders = busdev_responders;
832 if (driver_info)
833 bus_device_info_init(driver_info, "chipset", "visorchipset",
834 VERSION, NULL);
835
836 up(&notifier_lock);
837 }
838 EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
839
840 static void
841 cleanup_controlvm_structures(void)
842 {
843 struct visorchipset_bus_info *bi, *tmp_bi;
844 struct visorchipset_device_info *di, *tmp_di;
845
846 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
847 bus_info_clear(bi);
848 list_del(&bi->entry);
849 kfree(bi);
850 }
851
852 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
853 dev_info_clear(di);
854 list_del(&di->entry);
855 kfree(di);
856 }
857 }
858
859 static void
860 chipset_init(struct controlvm_message *inmsg)
861 {
862 static int chipset_inited;
863 enum ultra_chipset_feature features = 0;
864 int rc = CONTROLVM_RESP_SUCCESS;
865
866 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
867 if (chipset_inited) {
868 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
869 goto cleanup;
870 }
871 chipset_inited = 1;
872 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
873
874 /* Set features to indicate we support parahotplug (if Command
875 * also supports it). */
876 features =
877 inmsg->cmd.init_chipset.
878 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
879
880 /* Set the "reply" bit so Command knows this is a
881 * features-aware driver. */
882 features |= ULTRA_CHIPSET_FEATURE_REPLY;
883
884 cleanup:
885 if (rc < 0)
886 cleanup_controlvm_structures();
887 if (inmsg->hdr.flags.response_expected)
888 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
889 }
890
891 static void
892 controlvm_init_response(struct controlvm_message *msg,
893 struct controlvm_message_header *msg_hdr, int response)
894 {
895 memset(msg, 0, sizeof(struct controlvm_message));
896 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
897 msg->hdr.payload_bytes = 0;
898 msg->hdr.payload_vm_offset = 0;
899 msg->hdr.payload_max_bytes = 0;
900 if (response < 0) {
901 msg->hdr.flags.failed = 1;
902 msg->hdr.completion_status = (u32) (-response);
903 }
904 }
905
906 static void
907 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
908 {
909 struct controlvm_message outmsg;
910
911 controlvm_init_response(&outmsg, msg_hdr, response);
912 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
913 * back the deviceChangeState structure in the packet. */
914 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
915 g_devicechangestate_packet.device_change_state.bus_no ==
916 g_diagpool_bus_no &&
917 g_devicechangestate_packet.device_change_state.dev_no ==
918 g_diagpool_dev_no)
919 outmsg.cmd = g_devicechangestate_packet;
920 if (outmsg.hdr.flags.test_message == 1)
921 return;
922
923 if (!visorchannel_signalinsert(controlvm_channel,
924 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
925 return;
926 }
927 }
928
929 static void
930 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
931 int response,
932 enum ultra_chipset_feature features)
933 {
934 struct controlvm_message outmsg;
935
936 controlvm_init_response(&outmsg, msg_hdr, response);
937 outmsg.cmd.init_chipset.features = features;
938 if (!visorchannel_signalinsert(controlvm_channel,
939 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
940 return;
941 }
942 }
943
944 static void controlvm_respond_physdev_changestate(
945 struct controlvm_message_header *msg_hdr, int response,
946 struct spar_segment_state state)
947 {
948 struct controlvm_message outmsg;
949
950 controlvm_init_response(&outmsg, msg_hdr, response);
951 outmsg.cmd.device_change_state.state = state;
952 outmsg.cmd.device_change_state.flags.phys_device = 1;
953 if (!visorchannel_signalinsert(controlvm_channel,
954 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
955 return;
956 }
957 }
958
959 enum crash_obj_type {
960 CRASH_DEV,
961 CRASH_BUS,
962 };
963
964 static void
965 bus_responder(enum controlvm_id cmd_id, struct visorchipset_bus_info *p,
966 int response)
967 {
968 bool need_clear = false;
969 u32 bus_no = p->bus_no;
970
971 if (!p)
972 return;
973
974 if (response < 0) {
975 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
976 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
977 /* undo the row we just created... */
978 busdevices_del(&dev_info_list, bus_no);
979 } else {
980 if (cmd_id == CONTROLVM_BUS_CREATE)
981 p->state.created = 1;
982 if (cmd_id == CONTROLVM_BUS_DESTROY)
983 need_clear = true;
984 }
985
986 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
987 return; /* no controlvm response needed */
988 if (p->pending_msg_hdr.id != (u32)cmd_id)
989 return;
990 controlvm_respond(&p->pending_msg_hdr, response);
991 p->pending_msg_hdr.id = CONTROLVM_INVALID;
992 if (need_clear) {
993 bus_info_clear(p);
994 busdevices_del(&dev_info_list, bus_no);
995 }
996 }
997
998 static void
999 device_changestate_responder(enum controlvm_id cmd_id,
1000 struct visorchipset_device_info *p, int response,
1001 struct spar_segment_state response_state)
1002 {
1003 struct controlvm_message outmsg;
1004 u32 bus_no = p->bus_no;
1005 u32 dev_no = p->dev_no;
1006
1007 if (!p)
1008 return;
1009 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1010 return; /* no controlvm response needed */
1011 if (p->pending_msg_hdr.id != cmd_id)
1012 return;
1013
1014 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
1015
1016 outmsg.cmd.device_change_state.bus_no = bus_no;
1017 outmsg.cmd.device_change_state.dev_no = dev_no;
1018 outmsg.cmd.device_change_state.state = response_state;
1019
1020 if (!visorchannel_signalinsert(controlvm_channel,
1021 CONTROLVM_QUEUE_REQUEST, &outmsg))
1022 return;
1023
1024 p->pending_msg_hdr.id = CONTROLVM_INVALID;
1025 }
1026
1027 static void
1028 device_responder(enum controlvm_id cmd_id, struct visorchipset_device_info *p,
1029 int response)
1030 {
1031 bool need_clear = false;
1032
1033 if (!p)
1034 return;
1035 if (response >= 0) {
1036 if (cmd_id == CONTROLVM_DEVICE_CREATE)
1037 p->state.created = 1;
1038 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
1039 need_clear = true;
1040 }
1041
1042 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1043 return; /* no controlvm response needed */
1044
1045 if (p->pending_msg_hdr.id != (u32)cmd_id)
1046 return;
1047
1048 controlvm_respond(&p->pending_msg_hdr, response);
1049 p->pending_msg_hdr.id = CONTROLVM_INVALID;
1050 if (need_clear)
1051 dev_info_clear(p);
1052 }
1053
1054 static void
1055 bus_epilog(struct visorchipset_bus_info *bus_info,
1056 u32 cmd, struct controlvm_message_header *msg_hdr,
1057 int response, bool need_response)
1058 {
1059 bool notified = false;
1060
1061 if (!bus_info)
1062 return;
1063
1064 if (need_response) {
1065 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
1066 sizeof(struct controlvm_message_header));
1067 } else {
1068 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1069 }
1070
1071 down(&notifier_lock);
1072 if (response == CONTROLVM_RESP_SUCCESS) {
1073 switch (cmd) {
1074 case CONTROLVM_BUS_CREATE:
1075 if (busdev_notifiers.bus_create) {
1076 (*busdev_notifiers.bus_create) (bus_info);
1077 notified = true;
1078 }
1079 break;
1080 case CONTROLVM_BUS_DESTROY:
1081 if (busdev_notifiers.bus_destroy) {
1082 (*busdev_notifiers.bus_destroy) (bus_info);
1083 notified = true;
1084 }
1085 break;
1086 }
1087 }
1088 if (notified)
1089 /* The callback function just called above is responsible
1090 * for calling the appropriate visorchipset_busdev_responders
1091 * function, which will call bus_responder()
1092 */
1093 ;
1094 else
1095 bus_responder(cmd, bus_info, response);
1096 up(&notifier_lock);
1097 }
1098
1099 static void
1100 device_epilog(struct visorchipset_device_info *dev_info,
1101 struct spar_segment_state state, u32 cmd,
1102 struct controlvm_message_header *msg_hdr, int response,
1103 bool need_response, bool for_visorbus)
1104 {
1105 struct visorchipset_busdev_notifiers *notifiers;
1106 bool notified = false;
1107 u32 bus_no = dev_info->bus_no;
1108 u32 dev_no = dev_info->dev_no;
1109
1110 char *envp[] = {
1111 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1112 NULL
1113 };
1114
1115 if (!dev_info)
1116 return;
1117
1118 notifiers = &busdev_notifiers;
1119
1120 if (need_response) {
1121 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
1122 sizeof(struct controlvm_message_header));
1123 } else {
1124 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1125 }
1126
1127 down(&notifier_lock);
1128 if (response >= 0) {
1129 switch (cmd) {
1130 case CONTROLVM_DEVICE_CREATE:
1131 if (notifiers->device_create) {
1132 (*notifiers->device_create) (dev_info);
1133 notified = true;
1134 }
1135 break;
1136 case CONTROLVM_DEVICE_CHANGESTATE:
1137 /* ServerReady / ServerRunning / SegmentStateRunning */
1138 if (state.alive == segment_state_running.alive &&
1139 state.operating ==
1140 segment_state_running.operating) {
1141 if (notifiers->device_resume) {
1142 (*notifiers->device_resume) (dev_info);
1143 notified = true;
1144 }
1145 }
1146 /* ServerNotReady / ServerLost / SegmentStateStandby */
1147 else if (state.alive == segment_state_standby.alive &&
1148 state.operating ==
1149 segment_state_standby.operating) {
1150 /* technically this is standby case
1151 * where server is lost
1152 */
1153 if (notifiers->device_pause) {
1154 (*notifiers->device_pause) (dev_info);
1155 notified = true;
1156 }
1157 } else if (state.alive == segment_state_paused.alive &&
1158 state.operating ==
1159 segment_state_paused.operating) {
1160 /* this is lite pause where channel is
1161 * still valid just 'pause' of it
1162 */
1163 if (bus_no == g_diagpool_bus_no &&
1164 dev_no == g_diagpool_dev_no) {
1165 /* this will trigger the
1166 * diag_shutdown.sh script in
1167 * the visorchipset hotplug */
1168 kobject_uevent_env
1169 (&visorchipset_platform_device.dev.
1170 kobj, KOBJ_ONLINE, envp);
1171 }
1172 }
1173 break;
1174 case CONTROLVM_DEVICE_DESTROY:
1175 if (notifiers->device_destroy) {
1176 (*notifiers->device_destroy) (dev_info);
1177 notified = true;
1178 }
1179 break;
1180 }
1181 }
1182 if (notified)
1183 /* The callback function just called above is responsible
1184 * for calling the appropriate visorchipset_busdev_responders
1185 * function, which will call device_responder()
1186 */
1187 ;
1188 else
1189 device_responder(cmd, dev_info, response);
1190 up(&notifier_lock);
1191 }
1192
1193 static void
1194 bus_create(struct controlvm_message *inmsg)
1195 {
1196 struct controlvm_message_packet *cmd = &inmsg->cmd;
1197 u32 bus_no = cmd->create_bus.bus_no;
1198 int rc = CONTROLVM_RESP_SUCCESS;
1199 struct visorchipset_bus_info *bus_info;
1200
1201 bus_info = bus_find(&bus_info_list, bus_no);
1202 if (bus_info && (bus_info->state.created == 1)) {
1203 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1204 POSTCODE_SEVERITY_ERR);
1205 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1206 goto cleanup;
1207 }
1208 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1209 if (!bus_info) {
1210 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1211 POSTCODE_SEVERITY_ERR);
1212 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1213 goto cleanup;
1214 }
1215
1216 INIT_LIST_HEAD(&bus_info->entry);
1217 bus_info->bus_no = bus_no;
1218
1219 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1220
1221 if (inmsg->hdr.flags.test_message == 1)
1222 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1223 else
1224 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1225
1226 bus_info->flags.server = inmsg->hdr.flags.server;
1227 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1228 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1229 bus_info->chan_info.channel_type_uuid =
1230 cmd->create_bus.bus_data_type_uuid;
1231 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1232
1233 list_add(&bus_info->entry, &bus_info_list);
1234
1235 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1236
1237 cleanup:
1238 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1239 rc, inmsg->hdr.flags.response_expected == 1);
1240 }
1241
1242 static void
1243 bus_destroy(struct controlvm_message *inmsg)
1244 {
1245 struct controlvm_message_packet *cmd = &inmsg->cmd;
1246 u32 bus_no = cmd->destroy_bus.bus_no;
1247 struct visorchipset_bus_info *bus_info;
1248 int rc = CONTROLVM_RESP_SUCCESS;
1249
1250 bus_info = bus_find(&bus_info_list, bus_no);
1251 if (!bus_info)
1252 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1253 else if (bus_info->state.created == 0)
1254 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1255
1256 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1257 rc, inmsg->hdr.flags.response_expected == 1);
1258 }
1259
1260 static void
1261 bus_configure(struct controlvm_message *inmsg,
1262 struct parser_context *parser_ctx)
1263 {
1264 struct controlvm_message_packet *cmd = &inmsg->cmd;
1265 u32 bus_no;
1266 struct visorchipset_bus_info *bus_info;
1267 int rc = CONTROLVM_RESP_SUCCESS;
1268 char s[99];
1269
1270 bus_no = cmd->configure_bus.bus_no;
1271 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1272 POSTCODE_SEVERITY_INFO);
1273
1274 bus_info = bus_find(&bus_info_list, bus_no);
1275 if (!bus_info) {
1276 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1277 POSTCODE_SEVERITY_ERR);
1278 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1279 } else if (bus_info->state.created == 0) {
1280 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1281 POSTCODE_SEVERITY_ERR);
1282 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1283 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1284 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1285 POSTCODE_SEVERITY_ERR);
1286 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1287 } else {
1288 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1289 bus_info->partition_uuid = parser_id_get(parser_ctx);
1290 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1291 bus_info->name = parser_string_get(parser_ctx);
1292
1293 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1294 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1295 POSTCODE_SEVERITY_INFO);
1296 }
1297 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1298 rc, inmsg->hdr.flags.response_expected == 1);
1299 }
1300
1301 static void
1302 my_device_create(struct controlvm_message *inmsg)
1303 {
1304 struct controlvm_message_packet *cmd = &inmsg->cmd;
1305 u32 bus_no = cmd->create_device.bus_no;
1306 u32 dev_no = cmd->create_device.dev_no;
1307 struct visorchipset_device_info *dev_info;
1308 struct visorchipset_bus_info *bus_info;
1309 int rc = CONTROLVM_RESP_SUCCESS;
1310
1311 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1312 if (dev_info && (dev_info->state.created == 1)) {
1313 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1314 POSTCODE_SEVERITY_ERR);
1315 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1316 goto cleanup;
1317 }
1318 bus_info = bus_find(&bus_info_list, bus_no);
1319 if (!bus_info) {
1320 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1321 POSTCODE_SEVERITY_ERR);
1322 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1323 goto cleanup;
1324 }
1325 if (bus_info->state.created == 0) {
1326 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1327 POSTCODE_SEVERITY_ERR);
1328 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1329 goto cleanup;
1330 }
1331 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1332 if (!dev_info) {
1333 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1334 POSTCODE_SEVERITY_ERR);
1335 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1336 goto cleanup;
1337 }
1338
1339 INIT_LIST_HEAD(&dev_info->entry);
1340 dev_info->bus_no = bus_no;
1341 dev_info->dev_no = dev_no;
1342 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1343 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1344 POSTCODE_SEVERITY_INFO);
1345
1346 if (inmsg->hdr.flags.test_message == 1)
1347 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1348 else
1349 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1350 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1351 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1352 dev_info->chan_info.channel_type_uuid =
1353 cmd->create_device.data_type_uuid;
1354 list_add(&dev_info->entry, &dev_info_list);
1355 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1356 POSTCODE_SEVERITY_INFO);
1357 cleanup:
1358 /* get the bus and devNo for DiagPool channel */
1359 if (dev_info &&
1360 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1361 g_diagpool_bus_no = bus_no;
1362 g_diagpool_dev_no = dev_no;
1363 }
1364 device_epilog(dev_info, segment_state_running,
1365 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1366 inmsg->hdr.flags.response_expected == 1, 1);
1367 }
1368
1369 static void
1370 my_device_changestate(struct controlvm_message *inmsg)
1371 {
1372 struct controlvm_message_packet *cmd = &inmsg->cmd;
1373 u32 bus_no = cmd->device_change_state.bus_no;
1374 u32 dev_no = cmd->device_change_state.dev_no;
1375 struct spar_segment_state state = cmd->device_change_state.state;
1376 struct visorchipset_device_info *dev_info;
1377 int rc = CONTROLVM_RESP_SUCCESS;
1378
1379 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1380 if (!dev_info) {
1381 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1382 POSTCODE_SEVERITY_ERR);
1383 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1384 } else if (dev_info->state.created == 0) {
1385 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1386 POSTCODE_SEVERITY_ERR);
1387 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1388 }
1389 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1390 device_epilog(dev_info, state,
1391 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1392 inmsg->hdr.flags.response_expected == 1, 1);
1393 }
1394
1395 static void
1396 my_device_destroy(struct controlvm_message *inmsg)
1397 {
1398 struct controlvm_message_packet *cmd = &inmsg->cmd;
1399 u32 bus_no = cmd->destroy_device.bus_no;
1400 u32 dev_no = cmd->destroy_device.dev_no;
1401 struct visorchipset_device_info *dev_info;
1402 int rc = CONTROLVM_RESP_SUCCESS;
1403
1404 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1405 if (!dev_info)
1406 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1407 else if (dev_info->state.created == 0)
1408 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1409
1410 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1411 device_epilog(dev_info, segment_state_running,
1412 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1413 inmsg->hdr.flags.response_expected == 1, 1);
1414 }
1415
1416 /* When provided with the physical address of the controlvm channel
1417 * (phys_addr), the offset to the payload area we need to manage
1418 * (offset), and the size of this payload area (bytes), fills in the
1419 * controlvm_payload_info struct. Returns true for success or false
1420 * for failure.
1421 */
1422 static int
1423 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1424 struct visor_controlvm_payload_info *info)
1425 {
1426 u8 __iomem *payload = NULL;
1427 int rc = CONTROLVM_RESP_SUCCESS;
1428
1429 if (!info) {
1430 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1431 goto cleanup;
1432 }
1433 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1434 if ((offset == 0) || (bytes == 0)) {
1435 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1436 goto cleanup;
1437 }
1438 payload = ioremap_cache(phys_addr + offset, bytes);
1439 if (!payload) {
1440 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1441 goto cleanup;
1442 }
1443
1444 info->offset = offset;
1445 info->bytes = bytes;
1446 info->ptr = payload;
1447
1448 cleanup:
1449 if (rc < 0) {
1450 if (payload) {
1451 iounmap(payload);
1452 payload = NULL;
1453 }
1454 }
1455 return rc;
1456 }
1457
1458 static void
1459 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1460 {
1461 if (info->ptr) {
1462 iounmap(info->ptr);
1463 info->ptr = NULL;
1464 }
1465 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1466 }
1467
1468 static void
1469 initialize_controlvm_payload(void)
1470 {
1471 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1472 u64 payload_offset = 0;
1473 u32 payload_bytes = 0;
1474
1475 if (visorchannel_read(controlvm_channel,
1476 offsetof(struct spar_controlvm_channel_protocol,
1477 request_payload_offset),
1478 &payload_offset, sizeof(payload_offset)) < 0) {
1479 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1480 POSTCODE_SEVERITY_ERR);
1481 return;
1482 }
1483 if (visorchannel_read(controlvm_channel,
1484 offsetof(struct spar_controlvm_channel_protocol,
1485 request_payload_bytes),
1486 &payload_bytes, sizeof(payload_bytes)) < 0) {
1487 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1488 POSTCODE_SEVERITY_ERR);
1489 return;
1490 }
1491 initialize_controlvm_payload_info(phys_addr,
1492 payload_offset, payload_bytes,
1493 &controlvm_payload_info);
1494 }
1495
1496 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1497 * Returns CONTROLVM_RESP_xxx code.
1498 */
1499 static int
1500 visorchipset_chipset_ready(void)
1501 {
1502 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1503 return CONTROLVM_RESP_SUCCESS;
1504 }
1505
1506 static int
1507 visorchipset_chipset_selftest(void)
1508 {
1509 char env_selftest[20];
1510 char *envp[] = { env_selftest, NULL };
1511
1512 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1513 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1514 envp);
1515 return CONTROLVM_RESP_SUCCESS;
1516 }
1517
1518 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1519 * Returns CONTROLVM_RESP_xxx code.
1520 */
1521 static int
1522 visorchipset_chipset_notready(void)
1523 {
1524 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1525 return CONTROLVM_RESP_SUCCESS;
1526 }
1527
1528 static void
1529 chipset_ready(struct controlvm_message_header *msg_hdr)
1530 {
1531 int rc = visorchipset_chipset_ready();
1532
1533 if (rc != CONTROLVM_RESP_SUCCESS)
1534 rc = -rc;
1535 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1536 controlvm_respond(msg_hdr, rc);
1537 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1538 /* Send CHIPSET_READY response when all modules have been loaded
1539 * and disks mounted for the partition
1540 */
1541 g_chipset_msg_hdr = *msg_hdr;
1542 }
1543 }
1544
1545 static void
1546 chipset_selftest(struct controlvm_message_header *msg_hdr)
1547 {
1548 int rc = visorchipset_chipset_selftest();
1549
1550 if (rc != CONTROLVM_RESP_SUCCESS)
1551 rc = -rc;
1552 if (msg_hdr->flags.response_expected)
1553 controlvm_respond(msg_hdr, rc);
1554 }
1555
1556 static void
1557 chipset_notready(struct controlvm_message_header *msg_hdr)
1558 {
1559 int rc = visorchipset_chipset_notready();
1560
1561 if (rc != CONTROLVM_RESP_SUCCESS)
1562 rc = -rc;
1563 if (msg_hdr->flags.response_expected)
1564 controlvm_respond(msg_hdr, rc);
1565 }
1566
1567 /* This is your "one-stop" shop for grabbing the next message from the
1568 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1569 */
1570 static bool
1571 read_controlvm_event(struct controlvm_message *msg)
1572 {
1573 if (visorchannel_signalremove(controlvm_channel,
1574 CONTROLVM_QUEUE_EVENT, msg)) {
1575 /* got a message */
1576 if (msg->hdr.flags.test_message == 1)
1577 return false;
1578 return true;
1579 }
1580 return false;
1581 }
1582
1583 /*
1584 * The general parahotplug flow works as follows. The visorchipset
1585 * driver receives a DEVICE_CHANGESTATE message from Command
1586 * specifying a physical device to enable or disable. The CONTROLVM
1587 * message handler calls parahotplug_process_message, which then adds
1588 * the message to a global list and kicks off a udev event which
1589 * causes a user level script to enable or disable the specified
1590 * device. The udev script then writes to
1591 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1592 * to get called, at which point the appropriate CONTROLVM message is
1593 * retrieved from the list and responded to.
1594 */
1595
1596 #define PARAHOTPLUG_TIMEOUT_MS 2000
1597
1598 /*
1599 * Generate unique int to match an outstanding CONTROLVM message with a
1600 * udev script /proc response
1601 */
1602 static int
1603 parahotplug_next_id(void)
1604 {
1605 static atomic_t id = ATOMIC_INIT(0);
1606
1607 return atomic_inc_return(&id);
1608 }
1609
1610 /*
1611 * Returns the time (in jiffies) when a CONTROLVM message on the list
1612 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1613 */
1614 static unsigned long
1615 parahotplug_next_expiration(void)
1616 {
1617 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1618 }
1619
1620 /*
1621 * Create a parahotplug_request, which is basically a wrapper for a
1622 * CONTROLVM_MESSAGE that we can stick on a list
1623 */
1624 static struct parahotplug_request *
1625 parahotplug_request_create(struct controlvm_message *msg)
1626 {
1627 struct parahotplug_request *req;
1628
1629 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1630 if (!req)
1631 return NULL;
1632
1633 req->id = parahotplug_next_id();
1634 req->expiration = parahotplug_next_expiration();
1635 req->msg = *msg;
1636
1637 return req;
1638 }
1639
1640 /*
1641 * Free a parahotplug_request.
1642 */
1643 static void
1644 parahotplug_request_destroy(struct parahotplug_request *req)
1645 {
1646 kfree(req);
1647 }
1648
1649 /*
1650 * Cause uevent to run the user level script to do the disable/enable
1651 * specified in (the CONTROLVM message in) the specified
1652 * parahotplug_request
1653 */
1654 static void
1655 parahotplug_request_kickoff(struct parahotplug_request *req)
1656 {
1657 struct controlvm_message_packet *cmd = &req->msg.cmd;
1658 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1659 env_func[40];
1660 char *envp[] = {
1661 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1662 };
1663
1664 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1665 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1666 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1667 cmd->device_change_state.state.active);
1668 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1669 cmd->device_change_state.bus_no);
1670 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1671 cmd->device_change_state.dev_no >> 3);
1672 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1673 cmd->device_change_state.dev_no & 0x7);
1674
1675 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1676 envp);
1677 }
1678
1679 /*
1680 * Remove any request from the list that's been on there too long and
1681 * respond with an error.
1682 */
1683 static void
1684 parahotplug_process_list(void)
1685 {
1686 struct list_head *pos;
1687 struct list_head *tmp;
1688
1689 spin_lock(&parahotplug_request_list_lock);
1690
1691 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1692 struct parahotplug_request *req =
1693 list_entry(pos, struct parahotplug_request, list);
1694
1695 if (!time_after_eq(jiffies, req->expiration))
1696 continue;
1697
1698 list_del(pos);
1699 if (req->msg.hdr.flags.response_expected)
1700 controlvm_respond_physdev_changestate(
1701 &req->msg.hdr,
1702 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1703 req->msg.cmd.device_change_state.state);
1704 parahotplug_request_destroy(req);
1705 }
1706
1707 spin_unlock(&parahotplug_request_list_lock);
1708 }
1709
1710 /*
1711 * Called from the /proc handler, which means the user script has
1712 * finished the enable/disable. Find the matching identifier, and
1713 * respond to the CONTROLVM message with success.
1714 */
1715 static int
1716 parahotplug_request_complete(int id, u16 active)
1717 {
1718 struct list_head *pos;
1719 struct list_head *tmp;
1720
1721 spin_lock(&parahotplug_request_list_lock);
1722
1723 /* Look for a request matching "id". */
1724 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1725 struct parahotplug_request *req =
1726 list_entry(pos, struct parahotplug_request, list);
1727 if (req->id == id) {
1728 /* Found a match. Remove it from the list and
1729 * respond.
1730 */
1731 list_del(pos);
1732 spin_unlock(&parahotplug_request_list_lock);
1733 req->msg.cmd.device_change_state.state.active = active;
1734 if (req->msg.hdr.flags.response_expected)
1735 controlvm_respond_physdev_changestate(
1736 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1737 req->msg.cmd.device_change_state.state);
1738 parahotplug_request_destroy(req);
1739 return 0;
1740 }
1741 }
1742
1743 spin_unlock(&parahotplug_request_list_lock);
1744 return -1;
1745 }
1746
1747 /*
1748 * Enables or disables a PCI device by kicking off a udev script
1749 */
1750 static void
1751 parahotplug_process_message(struct controlvm_message *inmsg)
1752 {
1753 struct parahotplug_request *req;
1754
1755 req = parahotplug_request_create(inmsg);
1756
1757 if (!req)
1758 return;
1759
1760 if (inmsg->cmd.device_change_state.state.active) {
1761 /* For enable messages, just respond with success
1762 * right away. This is a bit of a hack, but there are
1763 * issues with the early enable messages we get (with
1764 * either the udev script not detecting that the device
1765 * is up, or not getting called at all). Fortunately
1766 * the messages that get lost don't matter anyway, as
1767 * devices are automatically enabled at
1768 * initialization.
1769 */
1770 parahotplug_request_kickoff(req);
1771 controlvm_respond_physdev_changestate(&inmsg->hdr,
1772 CONTROLVM_RESP_SUCCESS,
1773 inmsg->cmd.device_change_state.state);
1774 parahotplug_request_destroy(req);
1775 } else {
1776 /* For disable messages, add the request to the
1777 * request list before kicking off the udev script. It
1778 * won't get responded to until the script has
1779 * indicated it's done.
1780 */
1781 spin_lock(&parahotplug_request_list_lock);
1782 list_add_tail(&req->list, &parahotplug_request_list);
1783 spin_unlock(&parahotplug_request_list_lock);
1784
1785 parahotplug_request_kickoff(req);
1786 }
1787 }
1788
1789 /* Process a controlvm message.
1790 * Return result:
1791 * false - this function will return false only in the case where the
1792 * controlvm message was NOT processed, but processing must be
1793 * retried before reading the next controlvm message; a
1794 * scenario where this can occur is when we need to throttle
1795 * the allocation of memory in which to copy out controlvm
1796 * payload data
1797 * true - processing of the controlvm message completed,
1798 * either successfully or with an error.
1799 */
1800 static bool
1801 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1802 {
1803 struct controlvm_message_packet *cmd = &inmsg.cmd;
1804 u64 parm_addr;
1805 u32 parm_bytes;
1806 struct parser_context *parser_ctx = NULL;
1807 bool local_addr;
1808 struct controlvm_message ackmsg;
1809
1810 /* create parsing context if necessary */
1811 local_addr = (inmsg.hdr.flags.test_message == 1);
1812 if (channel_addr == 0)
1813 return true;
1814 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1815 parm_bytes = inmsg.hdr.payload_bytes;
1816
1817 /* Parameter and channel addresses within test messages actually lie
1818 * within our OS-controlled memory. We need to know that, because it
1819 * makes a difference in how we compute the virtual address.
1820 */
1821 if (parm_addr && parm_bytes) {
1822 bool retry = false;
1823
1824 parser_ctx =
1825 parser_init_byte_stream(parm_addr, parm_bytes,
1826 local_addr, &retry);
1827 if (!parser_ctx && retry)
1828 return false;
1829 }
1830
1831 if (!local_addr) {
1832 controlvm_init_response(&ackmsg, &inmsg.hdr,
1833 CONTROLVM_RESP_SUCCESS);
1834 if (controlvm_channel)
1835 visorchannel_signalinsert(controlvm_channel,
1836 CONTROLVM_QUEUE_ACK,
1837 &ackmsg);
1838 }
1839 switch (inmsg.hdr.id) {
1840 case CONTROLVM_CHIPSET_INIT:
1841 chipset_init(&inmsg);
1842 break;
1843 case CONTROLVM_BUS_CREATE:
1844 bus_create(&inmsg);
1845 break;
1846 case CONTROLVM_BUS_DESTROY:
1847 bus_destroy(&inmsg);
1848 break;
1849 case CONTROLVM_BUS_CONFIGURE:
1850 bus_configure(&inmsg, parser_ctx);
1851 break;
1852 case CONTROLVM_DEVICE_CREATE:
1853 my_device_create(&inmsg);
1854 break;
1855 case CONTROLVM_DEVICE_CHANGESTATE:
1856 if (cmd->device_change_state.flags.phys_device) {
1857 parahotplug_process_message(&inmsg);
1858 } else {
1859 /* save the hdr and cmd structures for later use */
1860 /* when sending back the response to Command */
1861 my_device_changestate(&inmsg);
1862 g_devicechangestate_packet = inmsg.cmd;
1863 break;
1864 }
1865 break;
1866 case CONTROLVM_DEVICE_DESTROY:
1867 my_device_destroy(&inmsg);
1868 break;
1869 case CONTROLVM_DEVICE_CONFIGURE:
1870 /* no op for now, just send a respond that we passed */
1871 if (inmsg.hdr.flags.response_expected)
1872 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1873 break;
1874 case CONTROLVM_CHIPSET_READY:
1875 chipset_ready(&inmsg.hdr);
1876 break;
1877 case CONTROLVM_CHIPSET_SELFTEST:
1878 chipset_selftest(&inmsg.hdr);
1879 break;
1880 case CONTROLVM_CHIPSET_STOP:
1881 chipset_notready(&inmsg.hdr);
1882 break;
1883 default:
1884 if (inmsg.hdr.flags.response_expected)
1885 controlvm_respond(&inmsg.hdr,
1886 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1887 break;
1888 }
1889
1890 if (parser_ctx) {
1891 parser_done(parser_ctx);
1892 parser_ctx = NULL;
1893 }
1894 return true;
1895 }
1896
1897 static inline unsigned int
1898 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1899 {
1900 struct vmcall_io_controlvm_addr_params params;
1901 int result = VMCALL_SUCCESS;
1902 u64 physaddr;
1903
1904 physaddr = virt_to_phys(&params);
1905 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1906 if (VMCALL_SUCCESSFUL(result)) {
1907 *control_addr = params.address;
1908 *control_bytes = params.channel_bytes;
1909 }
1910 return result;
1911 }
1912
1913 static u64 controlvm_get_channel_address(void)
1914 {
1915 u64 addr = 0;
1916 u32 size = 0;
1917
1918 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1919 return 0;
1920
1921 return addr;
1922 }
1923
1924 static void
1925 controlvm_periodic_work(struct work_struct *work)
1926 {
1927 struct controlvm_message inmsg;
1928 bool got_command = false;
1929 bool handle_command_failed = false;
1930 static u64 poll_count;
1931
1932 /* make sure visorbus server is registered for controlvm callbacks */
1933 if (visorchipset_visorbusregwait && !visorbusregistered)
1934 goto cleanup;
1935
1936 poll_count++;
1937 if (poll_count >= 250)
1938 ; /* keep going */
1939 else
1940 goto cleanup;
1941
1942 /* Check events to determine if response to CHIPSET_READY
1943 * should be sent
1944 */
1945 if (visorchipset_holdchipsetready &&
1946 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1947 if (check_chipset_events() == 1) {
1948 controlvm_respond(&g_chipset_msg_hdr, 0);
1949 clear_chipset_events();
1950 memset(&g_chipset_msg_hdr, 0,
1951 sizeof(struct controlvm_message_header));
1952 }
1953 }
1954
1955 while (visorchannel_signalremove(controlvm_channel,
1956 CONTROLVM_QUEUE_RESPONSE,
1957 &inmsg))
1958 ;
1959 if (!got_command) {
1960 if (controlvm_pending_msg_valid) {
1961 /* we throttled processing of a prior
1962 * msg, so try to process it again
1963 * rather than reading a new one
1964 */
1965 inmsg = controlvm_pending_msg;
1966 controlvm_pending_msg_valid = false;
1967 got_command = true;
1968 } else {
1969 got_command = read_controlvm_event(&inmsg);
1970 }
1971 }
1972
1973 handle_command_failed = false;
1974 while (got_command && (!handle_command_failed)) {
1975 most_recent_message_jiffies = jiffies;
1976 if (handle_command(inmsg,
1977 visorchannel_get_physaddr
1978 (controlvm_channel)))
1979 got_command = read_controlvm_event(&inmsg);
1980 else {
1981 /* this is a scenario where throttling
1982 * is required, but probably NOT an
1983 * error...; we stash the current
1984 * controlvm msg so we will attempt to
1985 * reprocess it on our next loop
1986 */
1987 handle_command_failed = true;
1988 controlvm_pending_msg = inmsg;
1989 controlvm_pending_msg_valid = true;
1990 }
1991 }
1992
1993 /* parahotplug_worker */
1994 parahotplug_process_list();
1995
1996 cleanup:
1997
1998 if (time_after(jiffies,
1999 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2000 /* it's been longer than MIN_IDLE_SECONDS since we
2001 * processed our last controlvm message; slow down the
2002 * polling
2003 */
2004 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2005 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2006 } else {
2007 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2008 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2009 }
2010
2011 queue_delayed_work(periodic_controlvm_workqueue,
2012 &periodic_controlvm_work, poll_jiffies);
2013 }
2014
2015 static void
2016 setup_crash_devices_work_queue(struct work_struct *work)
2017 {
2018 struct controlvm_message local_crash_bus_msg;
2019 struct controlvm_message local_crash_dev_msg;
2020 struct controlvm_message msg;
2021 u32 local_crash_msg_offset;
2022 u16 local_crash_msg_count;
2023
2024 /* make sure visorbus is registered for controlvm callbacks */
2025 if (visorchipset_visorbusregwait && !visorbusregistered)
2026 goto cleanup;
2027
2028 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2029
2030 /* send init chipset msg */
2031 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2032 msg.cmd.init_chipset.bus_count = 23;
2033 msg.cmd.init_chipset.switch_count = 0;
2034
2035 chipset_init(&msg);
2036
2037 /* get saved message count */
2038 if (visorchannel_read(controlvm_channel,
2039 offsetof(struct spar_controlvm_channel_protocol,
2040 saved_crash_message_count),
2041 &local_crash_msg_count, sizeof(u16)) < 0) {
2042 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2043 POSTCODE_SEVERITY_ERR);
2044 return;
2045 }
2046
2047 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
2048 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2049 local_crash_msg_count,
2050 POSTCODE_SEVERITY_ERR);
2051 return;
2052 }
2053
2054 /* get saved crash message offset */
2055 if (visorchannel_read(controlvm_channel,
2056 offsetof(struct spar_controlvm_channel_protocol,
2057 saved_crash_message_offset),
2058 &local_crash_msg_offset, sizeof(u32)) < 0) {
2059 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2060 POSTCODE_SEVERITY_ERR);
2061 return;
2062 }
2063
2064 /* read create device message for storage bus offset */
2065 if (visorchannel_read(controlvm_channel,
2066 local_crash_msg_offset,
2067 &local_crash_bus_msg,
2068 sizeof(struct controlvm_message)) < 0) {
2069 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2070 POSTCODE_SEVERITY_ERR);
2071 return;
2072 }
2073
2074 /* read create device message for storage device */
2075 if (visorchannel_read(controlvm_channel,
2076 local_crash_msg_offset +
2077 sizeof(struct controlvm_message),
2078 &local_crash_dev_msg,
2079 sizeof(struct controlvm_message)) < 0) {
2080 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2081 POSTCODE_SEVERITY_ERR);
2082 return;
2083 }
2084
2085 /* reuse IOVM create bus message */
2086 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
2087 bus_create(&local_crash_bus_msg);
2088 } else {
2089 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2090 POSTCODE_SEVERITY_ERR);
2091 return;
2092 }
2093
2094 /* reuse create device message for storage device */
2095 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2096 my_device_create(&local_crash_dev_msg);
2097 } else {
2098 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2099 POSTCODE_SEVERITY_ERR);
2100 return;
2101 }
2102 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2103 return;
2104
2105 cleanup:
2106
2107 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2108
2109 queue_delayed_work(periodic_controlvm_workqueue,
2110 &periodic_controlvm_work, poll_jiffies);
2111 }
2112
2113 static void
2114 bus_create_response(struct visorchipset_bus_info *bus_info, int response)
2115 {
2116 bus_responder(CONTROLVM_BUS_CREATE, bus_info, response);
2117 }
2118
2119 static void
2120 bus_destroy_response(struct visorchipset_bus_info *bus_info, int response)
2121 {
2122 bus_responder(CONTROLVM_BUS_DESTROY, bus_info, response);
2123 }
2124
2125 static void
2126 device_create_response(struct visorchipset_device_info *dev_info, int response)
2127 {
2128 device_responder(CONTROLVM_DEVICE_CREATE, dev_info, response);
2129 }
2130
2131 static void
2132 device_destroy_response(struct visorchipset_device_info *dev_info, int response)
2133 {
2134 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info, response);
2135 }
2136
2137 static void
2138 visorchipset_device_pause_response(struct visorchipset_device_info *dev_info,
2139 int response)
2140 {
2141 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2142 dev_info, response,
2143 segment_state_standby);
2144 }
2145
2146 static void
2147 device_resume_response(struct visorchipset_device_info *dev_info, int response)
2148 {
2149 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2150 dev_info, response,
2151 segment_state_running);
2152 }
2153
2154 bool
2155 visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
2156 {
2157 void *p = bus_find(&bus_info_list, bus_no);
2158
2159 if (!p)
2160 return false;
2161 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2162 return true;
2163 }
2164 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2165
2166 bool
2167 visorchipset_set_bus_context(struct visorchipset_bus_info *p, void *context)
2168 {
2169 if (!p)
2170 return false;
2171 p->bus_driver_context = context;
2172 return true;
2173 }
2174 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2175
2176 bool
2177 visorchipset_get_device_info(u32 bus_no, u32 dev_no,
2178 struct visorchipset_device_info *dev_info)
2179 {
2180 void *p = device_find(&dev_info_list, bus_no, dev_no);
2181
2182 if (!p)
2183 return false;
2184 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2185 return true;
2186 }
2187 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2188
2189 bool
2190 visorchipset_set_device_context(struct visorchipset_device_info *p,
2191 void *context)
2192 {
2193 if (!p)
2194 return false;
2195 p->bus_driver_context = context;
2196 return true;
2197 }
2198 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2199
2200 static ssize_t chipsetready_store(struct device *dev,
2201 struct device_attribute *attr,
2202 const char *buf, size_t count)
2203 {
2204 char msgtype[64];
2205
2206 if (sscanf(buf, "%63s", msgtype) != 1)
2207 return -EINVAL;
2208
2209 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2210 chipset_events[0] = 1;
2211 return count;
2212 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2213 chipset_events[1] = 1;
2214 return count;
2215 }
2216 return -EINVAL;
2217 }
2218
2219 /* The parahotplug/devicedisabled interface gets called by our support script
2220 * when an SR-IOV device has been shut down. The ID is passed to the script
2221 * and then passed back when the device has been removed.
2222 */
2223 static ssize_t devicedisabled_store(struct device *dev,
2224 struct device_attribute *attr,
2225 const char *buf, size_t count)
2226 {
2227 unsigned int id;
2228
2229 if (kstrtouint(buf, 10, &id))
2230 return -EINVAL;
2231
2232 parahotplug_request_complete(id, 0);
2233 return count;
2234 }
2235
2236 /* The parahotplug/deviceenabled interface gets called by our support script
2237 * when an SR-IOV device has been recovered. The ID is passed to the script
2238 * and then passed back when the device has been brought back up.
2239 */
2240 static ssize_t deviceenabled_store(struct device *dev,
2241 struct device_attribute *attr,
2242 const char *buf, size_t count)
2243 {
2244 unsigned int id;
2245
2246 if (kstrtouint(buf, 10, &id))
2247 return -EINVAL;
2248
2249 parahotplug_request_complete(id, 1);
2250 return count;
2251 }
2252
2253 static int
2254 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2255 {
2256 unsigned long physaddr = 0;
2257 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2258 u64 addr = 0;
2259
2260 /* sv_enable_dfp(); */
2261 if (offset & (PAGE_SIZE - 1))
2262 return -ENXIO; /* need aligned offsets */
2263
2264 switch (offset) {
2265 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2266 vma->vm_flags |= VM_IO;
2267 if (!*file_controlvm_channel)
2268 return -ENXIO;
2269
2270 visorchannel_read(*file_controlvm_channel,
2271 offsetof(struct spar_controlvm_channel_protocol,
2272 gp_control_channel),
2273 &addr, sizeof(addr));
2274 if (!addr)
2275 return -ENXIO;
2276
2277 physaddr = (unsigned long)addr;
2278 if (remap_pfn_range(vma, vma->vm_start,
2279 physaddr >> PAGE_SHIFT,
2280 vma->vm_end - vma->vm_start,
2281 /*pgprot_noncached */
2282 (vma->vm_page_prot))) {
2283 return -EAGAIN;
2284 }
2285 break;
2286 default:
2287 return -ENXIO;
2288 }
2289 return 0;
2290 }
2291
2292 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2293 {
2294 u64 result = VMCALL_SUCCESS;
2295 u64 physaddr = 0;
2296
2297 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2298 result);
2299 return result;
2300 }
2301
2302 static inline int issue_vmcall_update_physical_time(u64 adjustment)
2303 {
2304 int result = VMCALL_SUCCESS;
2305
2306 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2307 return result;
2308 }
2309
2310 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2311 unsigned long arg)
2312 {
2313 s64 adjustment;
2314 s64 vrtc_offset;
2315
2316 switch (cmd) {
2317 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2318 /* get the physical rtc offset */
2319 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2320 if (copy_to_user((void __user *)arg, &vrtc_offset,
2321 sizeof(vrtc_offset))) {
2322 return -EFAULT;
2323 }
2324 return 0;
2325 case VMCALL_UPDATE_PHYSICAL_TIME:
2326 if (copy_from_user(&adjustment, (void __user *)arg,
2327 sizeof(adjustment))) {
2328 return -EFAULT;
2329 }
2330 return issue_vmcall_update_physical_time(adjustment);
2331 default:
2332 return -EFAULT;
2333 }
2334 }
2335
2336 static const struct file_operations visorchipset_fops = {
2337 .owner = THIS_MODULE,
2338 .open = visorchipset_open,
2339 .read = NULL,
2340 .write = NULL,
2341 .unlocked_ioctl = visorchipset_ioctl,
2342 .release = visorchipset_release,
2343 .mmap = visorchipset_mmap,
2344 };
2345
2346 static int
2347 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2348 {
2349 int rc = 0;
2350
2351 file_controlvm_channel = controlvm_channel;
2352 cdev_init(&file_cdev, &visorchipset_fops);
2353 file_cdev.owner = THIS_MODULE;
2354 if (MAJOR(major_dev) == 0) {
2355 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2356 /* dynamic major device number registration required */
2357 if (rc < 0)
2358 return rc;
2359 } else {
2360 /* static major device number registration required */
2361 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2362 if (rc < 0)
2363 return rc;
2364 }
2365 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2366 if (rc < 0) {
2367 unregister_chrdev_region(major_dev, 1);
2368 return rc;
2369 }
2370 return 0;
2371 }
2372
2373 static int
2374 visorchipset_init(struct acpi_device *acpi_device)
2375 {
2376 int rc = 0;
2377 u64 addr;
2378 int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2379 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2380
2381 addr = controlvm_get_channel_address();
2382 if (!addr)
2383 return -ENODEV;
2384
2385 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2386 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2387
2388 controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2389 GFP_KERNEL, uuid);
2390 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2391 visorchannel_get_header(controlvm_channel))) {
2392 initialize_controlvm_payload();
2393 } else {
2394 visorchannel_destroy(controlvm_channel);
2395 controlvm_channel = NULL;
2396 return -ENODEV;
2397 }
2398
2399 major_dev = MKDEV(visorchipset_major, 0);
2400 rc = visorchipset_file_init(major_dev, &controlvm_channel);
2401 if (rc < 0) {
2402 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2403 goto cleanup;
2404 }
2405
2406 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2407
2408 /* if booting in a crash kernel */
2409 if (is_kdump_kernel())
2410 INIT_DELAYED_WORK(&periodic_controlvm_work,
2411 setup_crash_devices_work_queue);
2412 else
2413 INIT_DELAYED_WORK(&periodic_controlvm_work,
2414 controlvm_periodic_work);
2415 periodic_controlvm_workqueue =
2416 create_singlethread_workqueue("visorchipset_controlvm");
2417
2418 if (!periodic_controlvm_workqueue) {
2419 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2420 DIAG_SEVERITY_ERR);
2421 rc = -ENOMEM;
2422 goto cleanup;
2423 }
2424 most_recent_message_jiffies = jiffies;
2425 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2426 rc = queue_delayed_work(periodic_controlvm_workqueue,
2427 &periodic_controlvm_work, poll_jiffies);
2428 if (rc < 0) {
2429 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2430 DIAG_SEVERITY_ERR);
2431 goto cleanup;
2432 }
2433
2434 visorchipset_platform_device.dev.devt = major_dev;
2435 if (platform_device_register(&visorchipset_platform_device) < 0) {
2436 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2437 rc = -1;
2438 goto cleanup;
2439 }
2440 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2441
2442 rc = visorbus_init();
2443 cleanup:
2444 if (rc) {
2445 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2446 POSTCODE_SEVERITY_ERR);
2447 }
2448 return rc;
2449 }
2450
2451 static void
2452 visorchipset_file_cleanup(dev_t major_dev)
2453 {
2454 if (file_cdev.ops)
2455 cdev_del(&file_cdev);
2456 file_cdev.ops = NULL;
2457 unregister_chrdev_region(major_dev, 1);
2458 }
2459
2460 static int
2461 visorchipset_exit(struct acpi_device *acpi_device)
2462 {
2463 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2464
2465 visorbus_exit();
2466
2467 cancel_delayed_work(&periodic_controlvm_work);
2468 flush_workqueue(periodic_controlvm_workqueue);
2469 destroy_workqueue(periodic_controlvm_workqueue);
2470 periodic_controlvm_workqueue = NULL;
2471 destroy_controlvm_payload_info(&controlvm_payload_info);
2472
2473 cleanup_controlvm_structures();
2474
2475 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2476
2477 visorchannel_destroy(controlvm_channel);
2478
2479 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2480 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2481
2482 return 0;
2483 }
2484
2485 static const struct acpi_device_id unisys_device_ids[] = {
2486 {"PNP0A07", 0},
2487 {"", 0},
2488 };
2489
2490 static struct acpi_driver unisys_acpi_driver = {
2491 .name = "unisys_acpi",
2492 .class = "unisys_acpi_class",
2493 .owner = THIS_MODULE,
2494 .ids = unisys_device_ids,
2495 .ops = {
2496 .add = visorchipset_init,
2497 .remove = visorchipset_exit,
2498 },
2499 };
2500 static __init uint32_t visorutil_spar_detect(void)
2501 {
2502 unsigned int eax, ebx, ecx, edx;
2503
2504 if (cpu_has_hypervisor) {
2505 /* check the ID */
2506 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2507 return (ebx == UNISYS_SPAR_ID_EBX) &&
2508 (ecx == UNISYS_SPAR_ID_ECX) &&
2509 (edx == UNISYS_SPAR_ID_EDX);
2510 } else {
2511 return 0;
2512 }
2513 }
2514
2515 static int init_unisys(void)
2516 {
2517 int result;
2518 if (!visorutil_spar_detect())
2519 return -ENODEV;
2520
2521 result = acpi_bus_register_driver(&unisys_acpi_driver);
2522 if (result)
2523 return -ENODEV;
2524
2525 pr_info("Unisys Visorchipset Driver Loaded.\n");
2526 return 0;
2527 };
2528
2529 static void exit_unisys(void)
2530 {
2531 acpi_bus_unregister_driver(&unisys_acpi_driver);
2532 }
2533
2534 module_param_named(major, visorchipset_major, int, S_IRUGO);
2535 MODULE_PARM_DESC(visorchipset_major,
2536 "major device number to use for the device node");
2537 module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2538 MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2539 "1 to have the module wait for the visor bus to register");
2540 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2541 int, S_IRUGO);
2542 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2543 "1 to hold response to CHIPSET_READY");
2544
2545 module_init(init_unisys);
2546 module_exit(exit_unisys);
2547
2548 MODULE_AUTHOR("Unisys");
2549 MODULE_LICENSE("GPL");
2550 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2551 VERSION);
2552 MODULE_VERSION(VERSION);
This page took 0.087769 seconds and 5 git commands to generate.