staging: unisys: visorchipset_file_{init, cleanup}(): mark static
[deliverable/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18 #include <linux/acpi.h>
19 #include <linux/cdev.h>
20 #include <linux/ctype.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/nls.h>
24 #include <linux/netdevice.h>
25 #include <linux/platform_device.h>
26 #include <linux/uuid.h>
27 #include <linux/crash_dump.h>
28
29 #include "controlvmchannel.h"
30 #include "controlvmcompletionstatus.h"
31 #include "guestlinuxdebug.h"
32 #include "periodic_work.h"
33 #include "uisutils.h"
34 #include "version.h"
35 #include "visorbus.h"
36 #include "visorbus_private.h"
37
38 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
39
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE 50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
47
48 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
49
50
51 #define UNISYS_SPAR_LEAF_ID 0x40000000
52
53 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
54 #define UNISYS_SPAR_ID_EBX 0x73696e55
55 #define UNISYS_SPAR_ID_ECX 0x70537379
56 #define UNISYS_SPAR_ID_EDX 0x34367261
57
58 /*
59 * Module parameters
60 */
61 static int visorchipset_major;
62 static int visorchipset_visorbusregwait = 1; /* default is on */
63 static int visorchipset_holdchipsetready;
64 static unsigned long controlvm_payload_bytes_buffered;
65
66 static int
67 visorchipset_open(struct inode *inode, struct file *file)
68 {
69 unsigned minor_number = iminor(inode);
70
71 if (minor_number)
72 return -ENODEV;
73 file->private_data = NULL;
74 return 0;
75 }
76
77 static int
78 visorchipset_release(struct inode *inode, struct file *file)
79 {
80 return 0;
81 }
82
83 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
84 * we switch to slow polling mode. As soon as we get a controlvm
85 * message, we switch back to fast polling mode.
86 */
87 #define MIN_IDLE_SECONDS 10
88 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
89 static unsigned long most_recent_message_jiffies; /* when we got our last
90 * controlvm message */
91 static int visorbusregistered;
92
93 #define MAX_CHIPSET_EVENTS 2
94 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
95
96 struct parser_context {
97 unsigned long allocbytes;
98 unsigned long param_bytes;
99 u8 *curr;
100 unsigned long bytes_remaining;
101 bool byte_stream;
102 char data[0];
103 };
104
105 static struct delayed_work periodic_controlvm_work;
106 static struct workqueue_struct *periodic_controlvm_workqueue;
107 static DEFINE_SEMAPHORE(notifier_lock);
108
109 static struct cdev file_cdev;
110 static struct visorchannel **file_controlvm_channel;
111 static struct controlvm_message_header g_chipset_msg_hdr;
112 static const uuid_le spar_diag_pool_channel_protocol_uuid =
113 SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID;
114 /* 0xffffff is an invalid Bus/Device number */
115 static u32 g_diagpool_bus_no = 0xffffff;
116 static u32 g_diagpool_dev_no = 0xffffff;
117 static struct controlvm_message_packet g_devicechangestate_packet;
118
119 #define is_diagpool_channel(channel_type_guid) \
120 (uuid_le_cmp(channel_type_guid,\
121 spar_diag_pool_channel_protocol_uuid) == 0)
122
123 static LIST_HEAD(bus_info_list);
124 static LIST_HEAD(dev_info_list);
125
126 static struct visorchannel *controlvm_channel;
127
128 /* Manages the request payload in the controlvm channel */
129 struct visor_controlvm_payload_info {
130 u8 __iomem *ptr; /* pointer to base address of payload pool */
131 u64 offset; /* offset from beginning of controlvm
132 * channel to beginning of payload * pool */
133 u32 bytes; /* number of bytes in payload pool */
134 };
135
136 static struct visor_controlvm_payload_info controlvm_payload_info;
137
138 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
139 * CONTROLVM_DUMP_GETTEXTDUMP / CONTROLVM_DUMP_COMPLETE conversation.
140 */
141 struct visor_livedump_info {
142 struct controlvm_message_header dumpcapture_header;
143 struct controlvm_message_header gettextdump_header;
144 struct controlvm_message_header dumpcomplete_header;
145 bool gettextdump_outstanding;
146 u32 crc32;
147 unsigned long length;
148 atomic_t buffers_in_use;
149 unsigned long destination;
150 };
151
152 static struct visor_livedump_info livedump_info;
153
154 /* The following globals are used to handle the scenario where we are unable to
155 * offload the payload from a controlvm message due to memory requirements. In
156 * this scenario, we simply stash the controlvm message, then attempt to
157 * process it again the next time controlvm_periodic_work() runs.
158 */
159 static struct controlvm_message controlvm_pending_msg;
160 static bool controlvm_pending_msg_valid;
161
162 /* This identifies a data buffer that has been received via a controlvm messages
163 * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
164 */
165 struct putfile_buffer_entry {
166 struct list_head next; /* putfile_buffer_entry list */
167 struct parser_context *parser_ctx; /* points to input data buffer */
168 };
169
170 /* List of struct putfile_request *, via next_putfile_request member.
171 * Each entry in this list identifies an outstanding TRANSMIT_FILE
172 * conversation.
173 */
174 static LIST_HEAD(putfile_request_list);
175
176 /* This describes a buffer and its current state of transfer (e.g., how many
177 * bytes have already been supplied as putfile data, and how many bytes are
178 * remaining) for a putfile_request.
179 */
180 struct putfile_active_buffer {
181 /* a payload from a controlvm message, containing a file data buffer */
182 struct parser_context *parser_ctx;
183 /* points within data area of parser_ctx to next byte of data */
184 u8 *pnext;
185 /* # bytes left from <pnext> to the end of this data buffer */
186 size_t bytes_remaining;
187 };
188
189 #define PUTFILE_REQUEST_SIG 0x0906101302281211
190 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
191 * conversation. Structs of this type are dynamically linked into
192 * <Putfile_request_list>.
193 */
194 struct putfile_request {
195 u64 sig; /* PUTFILE_REQUEST_SIG */
196
197 /* header from original TransmitFile request */
198 struct controlvm_message_header controlvm_header;
199 u64 file_request_number; /* from original TransmitFile request */
200
201 /* link to next struct putfile_request */
202 struct list_head next_putfile_request;
203
204 /* most-recent sequence number supplied via a controlvm message */
205 u64 data_sequence_number;
206
207 /* head of putfile_buffer_entry list, which describes the data to be
208 * supplied as putfile data;
209 * - this list is added to when controlvm messages come in that supply
210 * file data
211 * - this list is removed from via the hotplug program that is actually
212 * consuming these buffers to write as file data */
213 struct list_head input_buffer_list;
214 spinlock_t req_list_lock; /* lock for input_buffer_list */
215
216 /* waiters for input_buffer_list to go non-empty */
217 wait_queue_head_t input_buffer_wq;
218
219 /* data not yet read within current putfile_buffer_entry */
220 struct putfile_active_buffer active_buf;
221
222 /* <0 = failed, 0 = in-progress, >0 = successful; */
223 /* note that this must be set with req_list_lock, and if you set <0, */
224 /* it is your responsibility to also free up all of the other objects */
225 /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
226 /* before releasing the lock */
227 int completion_status;
228 };
229
230 struct parahotplug_request {
231 struct list_head list;
232 int id;
233 unsigned long expiration;
234 struct controlvm_message msg;
235 };
236
237 static LIST_HEAD(parahotplug_request_list);
238 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
239 static void parahotplug_process_list(void);
240
241 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
242 * CONTROLVM_REPORTEVENT.
243 */
244 static struct visorchipset_busdev_notifiers busdev_notifiers;
245
246 static void bus_create_response(u32 bus_no, int response);
247 static void bus_destroy_response(u32 bus_no, int response);
248 static void device_create_response(u32 bus_no, u32 dev_no, int response);
249 static void device_destroy_response(u32 bus_no, u32 dev_no, int response);
250 static void device_resume_response(u32 bus_no, u32 dev_no, int response);
251
252 static void visorchipset_device_pause_response(u32 bus_no, u32 dev_no,
253 int response);
254
255 static struct visorchipset_busdev_responders busdev_responders = {
256 .bus_create = bus_create_response,
257 .bus_destroy = bus_destroy_response,
258 .device_create = device_create_response,
259 .device_destroy = device_destroy_response,
260 .device_pause = visorchipset_device_pause_response,
261 .device_resume = device_resume_response,
262 };
263
264 /* info for /dev/visorchipset */
265 static dev_t major_dev = -1; /**< indicates major num for device */
266
267 /* prototypes for attributes */
268 static ssize_t toolaction_show(struct device *dev,
269 struct device_attribute *attr, char *buf);
270 static ssize_t toolaction_store(struct device *dev,
271 struct device_attribute *attr,
272 const char *buf, size_t count);
273 static DEVICE_ATTR_RW(toolaction);
274
275 static ssize_t boottotool_show(struct device *dev,
276 struct device_attribute *attr, char *buf);
277 static ssize_t boottotool_store(struct device *dev,
278 struct device_attribute *attr, const char *buf,
279 size_t count);
280 static DEVICE_ATTR_RW(boottotool);
281
282 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
283 char *buf);
284 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
285 const char *buf, size_t count);
286 static DEVICE_ATTR_RW(error);
287
288 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
289 char *buf);
290 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
291 const char *buf, size_t count);
292 static DEVICE_ATTR_RW(textid);
293
294 static ssize_t remaining_steps_show(struct device *dev,
295 struct device_attribute *attr, char *buf);
296 static ssize_t remaining_steps_store(struct device *dev,
297 struct device_attribute *attr,
298 const char *buf, size_t count);
299 static DEVICE_ATTR_RW(remaining_steps);
300
301 static ssize_t chipsetready_store(struct device *dev,
302 struct device_attribute *attr,
303 const char *buf, size_t count);
304 static DEVICE_ATTR_WO(chipsetready);
305
306 static ssize_t devicedisabled_store(struct device *dev,
307 struct device_attribute *attr,
308 const char *buf, size_t count);
309 static DEVICE_ATTR_WO(devicedisabled);
310
311 static ssize_t deviceenabled_store(struct device *dev,
312 struct device_attribute *attr,
313 const char *buf, size_t count);
314 static DEVICE_ATTR_WO(deviceenabled);
315
316 static struct attribute *visorchipset_install_attrs[] = {
317 &dev_attr_toolaction.attr,
318 &dev_attr_boottotool.attr,
319 &dev_attr_error.attr,
320 &dev_attr_textid.attr,
321 &dev_attr_remaining_steps.attr,
322 NULL
323 };
324
325 static struct attribute_group visorchipset_install_group = {
326 .name = "install",
327 .attrs = visorchipset_install_attrs
328 };
329
330 static struct attribute *visorchipset_guest_attrs[] = {
331 &dev_attr_chipsetready.attr,
332 NULL
333 };
334
335 static struct attribute_group visorchipset_guest_group = {
336 .name = "guest",
337 .attrs = visorchipset_guest_attrs
338 };
339
340 static struct attribute *visorchipset_parahotplug_attrs[] = {
341 &dev_attr_devicedisabled.attr,
342 &dev_attr_deviceenabled.attr,
343 NULL
344 };
345
346 static struct attribute_group visorchipset_parahotplug_group = {
347 .name = "parahotplug",
348 .attrs = visorchipset_parahotplug_attrs
349 };
350
351 static const struct attribute_group *visorchipset_dev_groups[] = {
352 &visorchipset_install_group,
353 &visorchipset_guest_group,
354 &visorchipset_parahotplug_group,
355 NULL
356 };
357
358 /* /sys/devices/platform/visorchipset */
359 static struct platform_device visorchipset_platform_device = {
360 .name = "visorchipset",
361 .id = -1,
362 .dev.groups = visorchipset_dev_groups,
363 };
364
365 /* Function prototypes */
366 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
367 int response);
368 static void controlvm_respond_chipset_init(
369 struct controlvm_message_header *msg_hdr, int response,
370 enum ultra_chipset_feature features);
371 static void controlvm_respond_physdev_changestate(
372 struct controlvm_message_header *msg_hdr, int response,
373 struct spar_segment_state state);
374
375
376 static void parser_done(struct parser_context *ctx);
377
378 static struct parser_context *
379 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
380 {
381 int allocbytes = sizeof(struct parser_context) + bytes;
382 struct parser_context *rc = NULL;
383 struct parser_context *ctx = NULL;
384
385 if (retry)
386 *retry = false;
387
388 /*
389 * alloc an 0 extra byte to ensure payload is
390 * '\0'-terminated
391 */
392 allocbytes++;
393 if ((controlvm_payload_bytes_buffered + bytes)
394 > MAX_CONTROLVM_PAYLOAD_BYTES) {
395 if (retry)
396 *retry = true;
397 rc = NULL;
398 goto cleanup;
399 }
400 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
401 if (!ctx) {
402 if (retry)
403 *retry = true;
404 rc = NULL;
405 goto cleanup;
406 }
407
408 ctx->allocbytes = allocbytes;
409 ctx->param_bytes = bytes;
410 ctx->curr = NULL;
411 ctx->bytes_remaining = 0;
412 ctx->byte_stream = false;
413 if (local) {
414 void *p;
415
416 if (addr > virt_to_phys(high_memory - 1)) {
417 rc = NULL;
418 goto cleanup;
419 }
420 p = __va((unsigned long) (addr));
421 memcpy(ctx->data, p, bytes);
422 } else {
423 void __iomem *mapping;
424
425 if (!request_mem_region(addr, bytes, "visorchipset")) {
426 rc = NULL;
427 goto cleanup;
428 }
429
430 mapping = ioremap_cache(addr, bytes);
431 if (!mapping) {
432 release_mem_region(addr, bytes);
433 rc = NULL;
434 goto cleanup;
435 }
436 memcpy_fromio(ctx->data, mapping, bytes);
437 release_mem_region(addr, bytes);
438 }
439
440 ctx->byte_stream = true;
441 rc = ctx;
442 cleanup:
443 if (rc) {
444 controlvm_payload_bytes_buffered += ctx->param_bytes;
445 } else {
446 if (ctx) {
447 parser_done(ctx);
448 ctx = NULL;
449 }
450 }
451 return rc;
452 }
453
454 static uuid_le
455 parser_id_get(struct parser_context *ctx)
456 {
457 struct spar_controlvm_parameters_header *phdr = NULL;
458
459 if (ctx == NULL)
460 return NULL_UUID_LE;
461 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
462 return phdr->id;
463 }
464
465 /** Describes the state from the perspective of which controlvm messages have
466 * been received for a bus or device.
467 */
468
469 enum PARSER_WHICH_STRING {
470 PARSERSTRING_INITIATOR,
471 PARSERSTRING_TARGET,
472 PARSERSTRING_CONNECTION,
473 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
474 };
475
476 static void
477 parser_param_start(struct parser_context *ctx,
478 enum PARSER_WHICH_STRING which_string)
479 {
480 struct spar_controlvm_parameters_header *phdr = NULL;
481
482 if (ctx == NULL)
483 goto Away;
484 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
485 switch (which_string) {
486 case PARSERSTRING_INITIATOR:
487 ctx->curr = ctx->data + phdr->initiator_offset;
488 ctx->bytes_remaining = phdr->initiator_length;
489 break;
490 case PARSERSTRING_TARGET:
491 ctx->curr = ctx->data + phdr->target_offset;
492 ctx->bytes_remaining = phdr->target_length;
493 break;
494 case PARSERSTRING_CONNECTION:
495 ctx->curr = ctx->data + phdr->connection_offset;
496 ctx->bytes_remaining = phdr->connection_length;
497 break;
498 case PARSERSTRING_NAME:
499 ctx->curr = ctx->data + phdr->name_offset;
500 ctx->bytes_remaining = phdr->name_length;
501 break;
502 default:
503 break;
504 }
505
506 Away:
507 return;
508 }
509
510 static void parser_done(struct parser_context *ctx)
511 {
512 if (!ctx)
513 return;
514 controlvm_payload_bytes_buffered -= ctx->param_bytes;
515 kfree(ctx);
516 }
517
518 static void *
519 parser_string_get(struct parser_context *ctx)
520 {
521 u8 *pscan;
522 unsigned long nscan;
523 int value_length = -1;
524 void *value = NULL;
525 int i;
526
527 if (!ctx)
528 return NULL;
529 pscan = ctx->curr;
530 nscan = ctx->bytes_remaining;
531 if (nscan == 0)
532 return NULL;
533 if (!pscan)
534 return NULL;
535 for (i = 0, value_length = -1; i < nscan; i++)
536 if (pscan[i] == '\0') {
537 value_length = i;
538 break;
539 }
540 if (value_length < 0) /* '\0' was not included in the length */
541 value_length = nscan;
542 value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
543 if (value == NULL)
544 return NULL;
545 if (value_length > 0)
546 memcpy(value, pscan, value_length);
547 ((u8 *) (value))[value_length] = '\0';
548 return value;
549 }
550
551
552 static ssize_t toolaction_show(struct device *dev,
553 struct device_attribute *attr,
554 char *buf)
555 {
556 u8 tool_action;
557
558 visorchannel_read(controlvm_channel,
559 offsetof(struct spar_controlvm_channel_protocol,
560 tool_action), &tool_action, sizeof(u8));
561 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
562 }
563
564 static ssize_t toolaction_store(struct device *dev,
565 struct device_attribute *attr,
566 const char *buf, size_t count)
567 {
568 u8 tool_action;
569 int ret;
570
571 if (kstrtou8(buf, 10, &tool_action))
572 return -EINVAL;
573
574 ret = visorchannel_write(controlvm_channel,
575 offsetof(struct spar_controlvm_channel_protocol,
576 tool_action),
577 &tool_action, sizeof(u8));
578
579 if (ret)
580 return ret;
581 return count;
582 }
583
584 static ssize_t boottotool_show(struct device *dev,
585 struct device_attribute *attr,
586 char *buf)
587 {
588 struct efi_spar_indication efi_spar_indication;
589
590 visorchannel_read(controlvm_channel,
591 offsetof(struct spar_controlvm_channel_protocol,
592 efi_spar_ind), &efi_spar_indication,
593 sizeof(struct efi_spar_indication));
594 return scnprintf(buf, PAGE_SIZE, "%u\n",
595 efi_spar_indication.boot_to_tool);
596 }
597
598 static ssize_t boottotool_store(struct device *dev,
599 struct device_attribute *attr,
600 const char *buf, size_t count)
601 {
602 int val, ret;
603 struct efi_spar_indication efi_spar_indication;
604
605 if (kstrtoint(buf, 10, &val))
606 return -EINVAL;
607
608 efi_spar_indication.boot_to_tool = val;
609 ret = visorchannel_write(controlvm_channel,
610 offsetof(struct spar_controlvm_channel_protocol,
611 efi_spar_ind), &(efi_spar_indication),
612 sizeof(struct efi_spar_indication));
613
614 if (ret)
615 return ret;
616 return count;
617 }
618
619 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
620 char *buf)
621 {
622 u32 error;
623
624 visorchannel_read(controlvm_channel,
625 offsetof(struct spar_controlvm_channel_protocol,
626 installation_error),
627 &error, sizeof(u32));
628 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
629 }
630
631 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
632 const char *buf, size_t count)
633 {
634 u32 error;
635 int ret;
636
637 if (kstrtou32(buf, 10, &error))
638 return -EINVAL;
639
640 ret = visorchannel_write(controlvm_channel,
641 offsetof(struct spar_controlvm_channel_protocol,
642 installation_error),
643 &error, sizeof(u32));
644 if (ret)
645 return ret;
646 return count;
647 }
648
649 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
650 char *buf)
651 {
652 u32 text_id;
653
654 visorchannel_read(controlvm_channel,
655 offsetof(struct spar_controlvm_channel_protocol,
656 installation_text_id),
657 &text_id, sizeof(u32));
658 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
659 }
660
661 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
662 const char *buf, size_t count)
663 {
664 u32 text_id;
665 int ret;
666
667 if (kstrtou32(buf, 10, &text_id))
668 return -EINVAL;
669
670 ret = visorchannel_write(controlvm_channel,
671 offsetof(struct spar_controlvm_channel_protocol,
672 installation_text_id),
673 &text_id, sizeof(u32));
674 if (ret)
675 return ret;
676 return count;
677 }
678
679 static ssize_t remaining_steps_show(struct device *dev,
680 struct device_attribute *attr, char *buf)
681 {
682 u16 remaining_steps;
683
684 visorchannel_read(controlvm_channel,
685 offsetof(struct spar_controlvm_channel_protocol,
686 installation_remaining_steps),
687 &remaining_steps, sizeof(u16));
688 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
689 }
690
691 static ssize_t remaining_steps_store(struct device *dev,
692 struct device_attribute *attr,
693 const char *buf, size_t count)
694 {
695 u16 remaining_steps;
696 int ret;
697
698 if (kstrtou16(buf, 10, &remaining_steps))
699 return -EINVAL;
700
701 ret = visorchannel_write(controlvm_channel,
702 offsetof(struct spar_controlvm_channel_protocol,
703 installation_remaining_steps),
704 &remaining_steps, sizeof(u16));
705 if (ret)
706 return ret;
707 return count;
708 }
709
710 static void
711 bus_info_clear(void *v)
712 {
713 struct visorchipset_bus_info *p = (struct visorchipset_bus_info *) v;
714
715 kfree(p->name);
716 kfree(p->description);
717 memset(p, 0, sizeof(struct visorchipset_bus_info));
718 }
719
720 static void
721 dev_info_clear(void *v)
722 {
723 struct visorchipset_device_info *p =
724 (struct visorchipset_device_info *) v;
725
726 memset(p, 0, sizeof(struct visorchipset_device_info));
727 }
728
729 static struct visorchipset_bus_info *
730 bus_find(struct list_head *list, u32 bus_no)
731 {
732 struct visorchipset_bus_info *p;
733
734 list_for_each_entry(p, list, entry) {
735 if (p->bus_no == bus_no)
736 return p;
737 }
738
739 return NULL;
740 }
741
742 static struct visorchipset_device_info *
743 device_find(struct list_head *list, u32 bus_no, u32 dev_no)
744 {
745 struct visorchipset_device_info *p;
746
747 list_for_each_entry(p, list, entry) {
748 if (p->bus_no == bus_no && p->dev_no == dev_no)
749 return p;
750 }
751
752 return NULL;
753 }
754
755 static void busdevices_del(struct list_head *list, u32 bus_no)
756 {
757 struct visorchipset_device_info *p, *tmp;
758
759 list_for_each_entry_safe(p, tmp, list, entry) {
760 if (p->bus_no == bus_no) {
761 list_del(&p->entry);
762 kfree(p);
763 }
764 }
765 }
766
767 static u8
768 check_chipset_events(void)
769 {
770 int i;
771 u8 send_msg = 1;
772 /* Check events to determine if response should be sent */
773 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
774 send_msg &= chipset_events[i];
775 return send_msg;
776 }
777
778 static void
779 clear_chipset_events(void)
780 {
781 int i;
782 /* Clear chipset_events */
783 for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
784 chipset_events[i] = 0;
785 }
786
787 void
788 visorchipset_register_busdev(
789 struct visorchipset_busdev_notifiers *notifiers,
790 struct visorchipset_busdev_responders *responders,
791 struct ultra_vbus_deviceinfo *driver_info)
792 {
793 down(&notifier_lock);
794 if (!notifiers) {
795 memset(&busdev_notifiers, 0,
796 sizeof(busdev_notifiers));
797 visorbusregistered = 0; /* clear flag */
798 } else {
799 busdev_notifiers = *notifiers;
800 visorbusregistered = 1; /* set flag */
801 }
802 if (responders)
803 *responders = busdev_responders;
804 if (driver_info)
805 bus_device_info_init(driver_info, "chipset", "visorchipset",
806 VERSION, NULL);
807
808 up(&notifier_lock);
809 }
810 EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
811
812 static void
813 cleanup_controlvm_structures(void)
814 {
815 struct visorchipset_bus_info *bi, *tmp_bi;
816 struct visorchipset_device_info *di, *tmp_di;
817
818 list_for_each_entry_safe(bi, tmp_bi, &bus_info_list, entry) {
819 bus_info_clear(bi);
820 list_del(&bi->entry);
821 kfree(bi);
822 }
823
824 list_for_each_entry_safe(di, tmp_di, &dev_info_list, entry) {
825 dev_info_clear(di);
826 list_del(&di->entry);
827 kfree(di);
828 }
829 }
830
831 static void
832 chipset_init(struct controlvm_message *inmsg)
833 {
834 static int chipset_inited;
835 enum ultra_chipset_feature features = 0;
836 int rc = CONTROLVM_RESP_SUCCESS;
837
838 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
839 if (chipset_inited) {
840 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
841 goto cleanup;
842 }
843 chipset_inited = 1;
844 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
845
846 /* Set features to indicate we support parahotplug (if Command
847 * also supports it). */
848 features =
849 inmsg->cmd.init_chipset.
850 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
851
852 /* Set the "reply" bit so Command knows this is a
853 * features-aware driver. */
854 features |= ULTRA_CHIPSET_FEATURE_REPLY;
855
856 cleanup:
857 if (rc < 0)
858 cleanup_controlvm_structures();
859 if (inmsg->hdr.flags.response_expected)
860 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
861 }
862
863 static void
864 controlvm_init_response(struct controlvm_message *msg,
865 struct controlvm_message_header *msg_hdr, int response)
866 {
867 memset(msg, 0, sizeof(struct controlvm_message));
868 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
869 msg->hdr.payload_bytes = 0;
870 msg->hdr.payload_vm_offset = 0;
871 msg->hdr.payload_max_bytes = 0;
872 if (response < 0) {
873 msg->hdr.flags.failed = 1;
874 msg->hdr.completion_status = (u32) (-response);
875 }
876 }
877
878 static void
879 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
880 {
881 struct controlvm_message outmsg;
882
883 controlvm_init_response(&outmsg, msg_hdr, response);
884 /* For DiagPool channel DEVICE_CHANGESTATE, we need to send
885 * back the deviceChangeState structure in the packet. */
886 if (msg_hdr->id == CONTROLVM_DEVICE_CHANGESTATE &&
887 g_devicechangestate_packet.device_change_state.bus_no ==
888 g_diagpool_bus_no &&
889 g_devicechangestate_packet.device_change_state.dev_no ==
890 g_diagpool_dev_no)
891 outmsg.cmd = g_devicechangestate_packet;
892 if (outmsg.hdr.flags.test_message == 1)
893 return;
894
895 if (!visorchannel_signalinsert(controlvm_channel,
896 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
897 return;
898 }
899 }
900
901 static void
902 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
903 int response,
904 enum ultra_chipset_feature features)
905 {
906 struct controlvm_message outmsg;
907
908 controlvm_init_response(&outmsg, msg_hdr, response);
909 outmsg.cmd.init_chipset.features = features;
910 if (!visorchannel_signalinsert(controlvm_channel,
911 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
912 return;
913 }
914 }
915
916 static void controlvm_respond_physdev_changestate(
917 struct controlvm_message_header *msg_hdr, int response,
918 struct spar_segment_state state)
919 {
920 struct controlvm_message outmsg;
921
922 controlvm_init_response(&outmsg, msg_hdr, response);
923 outmsg.cmd.device_change_state.state = state;
924 outmsg.cmd.device_change_state.flags.phys_device = 1;
925 if (!visorchannel_signalinsert(controlvm_channel,
926 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
927 return;
928 }
929 }
930
931 enum crash_obj_type {
932 CRASH_DEV,
933 CRASH_BUS,
934 };
935
936 void
937 visorchipset_save_message(struct controlvm_message *msg,
938 enum crash_obj_type type)
939 {
940 u32 crash_msg_offset;
941 u16 crash_msg_count;
942
943 /* get saved message count */
944 if (visorchannel_read(controlvm_channel,
945 offsetof(struct spar_controlvm_channel_protocol,
946 saved_crash_message_count),
947 &crash_msg_count, sizeof(u16)) < 0) {
948 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
949 POSTCODE_SEVERITY_ERR);
950 return;
951 }
952
953 if (crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
954 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
955 crash_msg_count,
956 POSTCODE_SEVERITY_ERR);
957 return;
958 }
959
960 /* get saved crash message offset */
961 if (visorchannel_read(controlvm_channel,
962 offsetof(struct spar_controlvm_channel_protocol,
963 saved_crash_message_offset),
964 &crash_msg_offset, sizeof(u32)) < 0) {
965 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
966 POSTCODE_SEVERITY_ERR);
967 return;
968 }
969
970 if (type == CRASH_BUS) {
971 if (visorchannel_write(controlvm_channel,
972 crash_msg_offset,
973 msg,
974 sizeof(struct controlvm_message)) < 0) {
975 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
976 POSTCODE_SEVERITY_ERR);
977 return;
978 }
979 } else { /* CRASH_DEV */
980 if (visorchannel_write(controlvm_channel,
981 crash_msg_offset +
982 sizeof(struct controlvm_message), msg,
983 sizeof(struct controlvm_message)) < 0) {
984 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
985 POSTCODE_SEVERITY_ERR);
986 return;
987 }
988 }
989 }
990 EXPORT_SYMBOL_GPL(visorchipset_save_message);
991
992 static void
993 bus_responder(enum controlvm_id cmd_id, u32 bus_no, int response)
994 {
995 struct visorchipset_bus_info *p;
996 bool need_clear = false;
997
998 p = bus_find(&bus_info_list, bus_no);
999 if (!p)
1000 return;
1001
1002 if (response < 0) {
1003 if ((cmd_id == CONTROLVM_BUS_CREATE) &&
1004 (response != (-CONTROLVM_RESP_ERROR_ALREADY_DONE)))
1005 /* undo the row we just created... */
1006 busdevices_del(&dev_info_list, bus_no);
1007 } else {
1008 if (cmd_id == CONTROLVM_BUS_CREATE)
1009 p->state.created = 1;
1010 if (cmd_id == CONTROLVM_BUS_DESTROY)
1011 need_clear = true;
1012 }
1013
1014 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1015 return; /* no controlvm response needed */
1016 if (p->pending_msg_hdr.id != (u32)cmd_id)
1017 return;
1018 controlvm_respond(&p->pending_msg_hdr, response);
1019 p->pending_msg_hdr.id = CONTROLVM_INVALID;
1020 if (need_clear) {
1021 bus_info_clear(p);
1022 busdevices_del(&dev_info_list, bus_no);
1023 }
1024 }
1025
1026 static void
1027 device_changestate_responder(enum controlvm_id cmd_id,
1028 u32 bus_no, u32 dev_no, int response,
1029 struct spar_segment_state response_state)
1030 {
1031 struct visorchipset_device_info *p;
1032 struct controlvm_message outmsg;
1033
1034 p = device_find(&dev_info_list, bus_no, dev_no);
1035 if (!p)
1036 return;
1037 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1038 return; /* no controlvm response needed */
1039 if (p->pending_msg_hdr.id != cmd_id)
1040 return;
1041
1042 controlvm_init_response(&outmsg, &p->pending_msg_hdr, response);
1043
1044 outmsg.cmd.device_change_state.bus_no = bus_no;
1045 outmsg.cmd.device_change_state.dev_no = dev_no;
1046 outmsg.cmd.device_change_state.state = response_state;
1047
1048 if (!visorchannel_signalinsert(controlvm_channel,
1049 CONTROLVM_QUEUE_REQUEST, &outmsg))
1050 return;
1051
1052 p->pending_msg_hdr.id = CONTROLVM_INVALID;
1053 }
1054
1055 static void
1056 device_responder(enum controlvm_id cmd_id, u32 bus_no, u32 dev_no, int response)
1057 {
1058 struct visorchipset_device_info *p;
1059 bool need_clear = false;
1060
1061 p = device_find(&dev_info_list, bus_no, dev_no);
1062 if (!p)
1063 return;
1064 if (response >= 0) {
1065 if (cmd_id == CONTROLVM_DEVICE_CREATE)
1066 p->state.created = 1;
1067 if (cmd_id == CONTROLVM_DEVICE_DESTROY)
1068 need_clear = true;
1069 }
1070
1071 if (p->pending_msg_hdr.id == CONTROLVM_INVALID)
1072 return; /* no controlvm response needed */
1073
1074 if (p->pending_msg_hdr.id != (u32)cmd_id)
1075 return;
1076
1077 controlvm_respond(&p->pending_msg_hdr, response);
1078 p->pending_msg_hdr.id = CONTROLVM_INVALID;
1079 if (need_clear)
1080 dev_info_clear(p);
1081 }
1082
1083 static void
1084 bus_epilog(u32 bus_no,
1085 u32 cmd, struct controlvm_message_header *msg_hdr,
1086 int response, bool need_response)
1087 {
1088 struct visorchipset_bus_info *bus_info;
1089 bool notified = false;
1090
1091 bus_info = bus_find(&bus_info_list, bus_no);
1092
1093 if (!bus_info)
1094 return;
1095
1096 if (need_response) {
1097 memcpy(&bus_info->pending_msg_hdr, msg_hdr,
1098 sizeof(struct controlvm_message_header));
1099 } else {
1100 bus_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1101 }
1102
1103 down(&notifier_lock);
1104 if (response == CONTROLVM_RESP_SUCCESS) {
1105 switch (cmd) {
1106 case CONTROLVM_BUS_CREATE:
1107 if (busdev_notifiers.bus_create) {
1108 (*busdev_notifiers.bus_create) (bus_no);
1109 notified = true;
1110 }
1111 break;
1112 case CONTROLVM_BUS_DESTROY:
1113 if (busdev_notifiers.bus_destroy) {
1114 (*busdev_notifiers.bus_destroy) (bus_no);
1115 notified = true;
1116 }
1117 break;
1118 }
1119 }
1120 if (notified)
1121 /* The callback function just called above is responsible
1122 * for calling the appropriate visorchipset_busdev_responders
1123 * function, which will call bus_responder()
1124 */
1125 ;
1126 else
1127 bus_responder(cmd, bus_no, response);
1128 up(&notifier_lock);
1129 }
1130
1131 static void
1132 device_epilog(u32 bus_no, u32 dev_no, struct spar_segment_state state, u32 cmd,
1133 struct controlvm_message_header *msg_hdr, int response,
1134 bool need_response, bool for_visorbus)
1135 {
1136 struct visorchipset_busdev_notifiers *notifiers;
1137 bool notified = false;
1138
1139 struct visorchipset_device_info *dev_info =
1140 device_find(&dev_info_list, bus_no, dev_no);
1141 char *envp[] = {
1142 "SPARSP_DIAGPOOL_PAUSED_STATE = 1",
1143 NULL
1144 };
1145
1146 if (!dev_info)
1147 return;
1148
1149 notifiers = &busdev_notifiers;
1150
1151 if (need_response) {
1152 memcpy(&dev_info->pending_msg_hdr, msg_hdr,
1153 sizeof(struct controlvm_message_header));
1154 } else {
1155 dev_info->pending_msg_hdr.id = CONTROLVM_INVALID;
1156 }
1157
1158 down(&notifier_lock);
1159 if (response >= 0) {
1160 switch (cmd) {
1161 case CONTROLVM_DEVICE_CREATE:
1162 if (notifiers->device_create) {
1163 (*notifiers->device_create) (bus_no, dev_no);
1164 notified = true;
1165 }
1166 break;
1167 case CONTROLVM_DEVICE_CHANGESTATE:
1168 /* ServerReady / ServerRunning / SegmentStateRunning */
1169 if (state.alive == segment_state_running.alive &&
1170 state.operating ==
1171 segment_state_running.operating) {
1172 if (notifiers->device_resume) {
1173 (*notifiers->device_resume) (bus_no,
1174 dev_no);
1175 notified = true;
1176 }
1177 }
1178 /* ServerNotReady / ServerLost / SegmentStateStandby */
1179 else if (state.alive == segment_state_standby.alive &&
1180 state.operating ==
1181 segment_state_standby.operating) {
1182 /* technically this is standby case
1183 * where server is lost
1184 */
1185 if (notifiers->device_pause) {
1186 (*notifiers->device_pause) (bus_no,
1187 dev_no);
1188 notified = true;
1189 }
1190 } else if (state.alive == segment_state_paused.alive &&
1191 state.operating ==
1192 segment_state_paused.operating) {
1193 /* this is lite pause where channel is
1194 * still valid just 'pause' of it
1195 */
1196 if (bus_no == g_diagpool_bus_no &&
1197 dev_no == g_diagpool_dev_no) {
1198 /* this will trigger the
1199 * diag_shutdown.sh script in
1200 * the visorchipset hotplug */
1201 kobject_uevent_env
1202 (&visorchipset_platform_device.dev.
1203 kobj, KOBJ_ONLINE, envp);
1204 }
1205 }
1206 break;
1207 case CONTROLVM_DEVICE_DESTROY:
1208 if (notifiers->device_destroy) {
1209 (*notifiers->device_destroy) (bus_no, dev_no);
1210 notified = true;
1211 }
1212 break;
1213 }
1214 }
1215 if (notified)
1216 /* The callback function just called above is responsible
1217 * for calling the appropriate visorchipset_busdev_responders
1218 * function, which will call device_responder()
1219 */
1220 ;
1221 else
1222 device_responder(cmd, bus_no, dev_no, response);
1223 up(&notifier_lock);
1224 }
1225
1226 static void
1227 bus_create(struct controlvm_message *inmsg)
1228 {
1229 struct controlvm_message_packet *cmd = &inmsg->cmd;
1230 u32 bus_no = cmd->create_bus.bus_no;
1231 int rc = CONTROLVM_RESP_SUCCESS;
1232 struct visorchipset_bus_info *bus_info;
1233
1234 bus_info = bus_find(&bus_info_list, bus_no);
1235 if (bus_info && (bus_info->state.created == 1)) {
1236 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1237 POSTCODE_SEVERITY_ERR);
1238 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1239 goto cleanup;
1240 }
1241 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1242 if (!bus_info) {
1243 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1244 POSTCODE_SEVERITY_ERR);
1245 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1246 goto cleanup;
1247 }
1248
1249 INIT_LIST_HEAD(&bus_info->entry);
1250 bus_info->bus_no = bus_no;
1251
1252 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1253
1254 if (inmsg->hdr.flags.test_message == 1)
1255 bus_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1256 else
1257 bus_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1258
1259 bus_info->flags.server = inmsg->hdr.flags.server;
1260 bus_info->chan_info.channel_addr = cmd->create_bus.channel_addr;
1261 bus_info->chan_info.n_channel_bytes = cmd->create_bus.channel_bytes;
1262 bus_info->chan_info.channel_type_uuid =
1263 cmd->create_bus.bus_data_type_uuid;
1264 bus_info->chan_info.channel_inst_uuid = cmd->create_bus.bus_inst_uuid;
1265
1266 list_add(&bus_info->entry, &bus_info_list);
1267
1268 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1269
1270 cleanup:
1271 bus_epilog(bus_no, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1272 rc, inmsg->hdr.flags.response_expected == 1);
1273 }
1274
1275 static void
1276 bus_destroy(struct controlvm_message *inmsg)
1277 {
1278 struct controlvm_message_packet *cmd = &inmsg->cmd;
1279 u32 bus_no = cmd->destroy_bus.bus_no;
1280 struct visorchipset_bus_info *bus_info;
1281 int rc = CONTROLVM_RESP_SUCCESS;
1282
1283 bus_info = bus_find(&bus_info_list, bus_no);
1284 if (!bus_info)
1285 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1286 else if (bus_info->state.created == 0)
1287 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1288
1289 bus_epilog(bus_no, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1290 rc, inmsg->hdr.flags.response_expected == 1);
1291 }
1292
1293 static void
1294 bus_configure(struct controlvm_message *inmsg,
1295 struct parser_context *parser_ctx)
1296 {
1297 struct controlvm_message_packet *cmd = &inmsg->cmd;
1298 u32 bus_no;
1299 struct visorchipset_bus_info *bus_info;
1300 int rc = CONTROLVM_RESP_SUCCESS;
1301 char s[99];
1302
1303 bus_no = cmd->configure_bus.bus_no;
1304 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1305 POSTCODE_SEVERITY_INFO);
1306
1307 bus_info = bus_find(&bus_info_list, bus_no);
1308 if (!bus_info) {
1309 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1310 POSTCODE_SEVERITY_ERR);
1311 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1312 } else if (bus_info->state.created == 0) {
1313 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1314 POSTCODE_SEVERITY_ERR);
1315 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1316 } else if (bus_info->pending_msg_hdr.id != CONTROLVM_INVALID) {
1317 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1318 POSTCODE_SEVERITY_ERR);
1319 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1320 } else {
1321 bus_info->partition_handle = cmd->configure_bus.guest_handle;
1322 bus_info->partition_uuid = parser_id_get(parser_ctx);
1323 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1324 bus_info->name = parser_string_get(parser_ctx);
1325
1326 visorchannel_uuid_id(&bus_info->partition_uuid, s);
1327 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1328 POSTCODE_SEVERITY_INFO);
1329 }
1330 bus_epilog(bus_no, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1331 rc, inmsg->hdr.flags.response_expected == 1);
1332 }
1333
1334 static void
1335 my_device_create(struct controlvm_message *inmsg)
1336 {
1337 struct controlvm_message_packet *cmd = &inmsg->cmd;
1338 u32 bus_no = cmd->create_device.bus_no;
1339 u32 dev_no = cmd->create_device.dev_no;
1340 struct visorchipset_device_info *dev_info;
1341 struct visorchipset_bus_info *bus_info;
1342 int rc = CONTROLVM_RESP_SUCCESS;
1343
1344 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1345 if (dev_info && (dev_info->state.created == 1)) {
1346 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1347 POSTCODE_SEVERITY_ERR);
1348 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1349 goto cleanup;
1350 }
1351 bus_info = bus_find(&bus_info_list, bus_no);
1352 if (!bus_info) {
1353 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1354 POSTCODE_SEVERITY_ERR);
1355 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1356 goto cleanup;
1357 }
1358 if (bus_info->state.created == 0) {
1359 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1360 POSTCODE_SEVERITY_ERR);
1361 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1362 goto cleanup;
1363 }
1364 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1365 if (!dev_info) {
1366 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1367 POSTCODE_SEVERITY_ERR);
1368 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1369 goto cleanup;
1370 }
1371
1372 INIT_LIST_HEAD(&dev_info->entry);
1373 dev_info->bus_no = bus_no;
1374 dev_info->dev_no = dev_no;
1375 dev_info->dev_inst_uuid = cmd->create_device.dev_inst_uuid;
1376 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1377 POSTCODE_SEVERITY_INFO);
1378
1379 if (inmsg->hdr.flags.test_message == 1)
1380 dev_info->chan_info.addr_type = ADDRTYPE_LOCALTEST;
1381 else
1382 dev_info->chan_info.addr_type = ADDRTYPE_LOCALPHYSICAL;
1383 dev_info->chan_info.channel_addr = cmd->create_device.channel_addr;
1384 dev_info->chan_info.n_channel_bytes = cmd->create_device.channel_bytes;
1385 dev_info->chan_info.channel_type_uuid =
1386 cmd->create_device.data_type_uuid;
1387 dev_info->chan_info.intr = cmd->create_device.intr;
1388 list_add(&dev_info->entry, &dev_info_list);
1389 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1390 POSTCODE_SEVERITY_INFO);
1391 cleanup:
1392 /* get the bus and devNo for DiagPool channel */
1393 if (dev_info &&
1394 is_diagpool_channel(dev_info->chan_info.channel_type_uuid)) {
1395 g_diagpool_bus_no = bus_no;
1396 g_diagpool_dev_no = dev_no;
1397 }
1398 device_epilog(bus_no, dev_no, segment_state_running,
1399 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1400 inmsg->hdr.flags.response_expected == 1, 1);
1401 }
1402
1403 static void
1404 my_device_changestate(struct controlvm_message *inmsg)
1405 {
1406 struct controlvm_message_packet *cmd = &inmsg->cmd;
1407 u32 bus_no = cmd->device_change_state.bus_no;
1408 u32 dev_no = cmd->device_change_state.dev_no;
1409 struct spar_segment_state state = cmd->device_change_state.state;
1410 struct visorchipset_device_info *dev_info;
1411 int rc = CONTROLVM_RESP_SUCCESS;
1412
1413 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1414 if (!dev_info) {
1415 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1416 POSTCODE_SEVERITY_ERR);
1417 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1418 } else if (dev_info->state.created == 0) {
1419 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1420 POSTCODE_SEVERITY_ERR);
1421 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1422 }
1423 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1424 device_epilog(bus_no, dev_no, state,
1425 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1426 inmsg->hdr.flags.response_expected == 1, 1);
1427 }
1428
1429 static void
1430 my_device_destroy(struct controlvm_message *inmsg)
1431 {
1432 struct controlvm_message_packet *cmd = &inmsg->cmd;
1433 u32 bus_no = cmd->destroy_device.bus_no;
1434 u32 dev_no = cmd->destroy_device.dev_no;
1435 struct visorchipset_device_info *dev_info;
1436 int rc = CONTROLVM_RESP_SUCCESS;
1437
1438 dev_info = device_find(&dev_info_list, bus_no, dev_no);
1439 if (!dev_info)
1440 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1441 else if (dev_info->state.created == 0)
1442 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1443
1444 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1445 device_epilog(bus_no, dev_no, segment_state_running,
1446 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1447 inmsg->hdr.flags.response_expected == 1, 1);
1448 }
1449
1450 /* When provided with the physical address of the controlvm channel
1451 * (phys_addr), the offset to the payload area we need to manage
1452 * (offset), and the size of this payload area (bytes), fills in the
1453 * controlvm_payload_info struct. Returns true for success or false
1454 * for failure.
1455 */
1456 static int
1457 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1458 struct visor_controlvm_payload_info *info)
1459 {
1460 u8 __iomem *payload = NULL;
1461 int rc = CONTROLVM_RESP_SUCCESS;
1462
1463 if (!info) {
1464 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1465 goto cleanup;
1466 }
1467 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1468 if ((offset == 0) || (bytes == 0)) {
1469 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1470 goto cleanup;
1471 }
1472 payload = ioremap_cache(phys_addr + offset, bytes);
1473 if (!payload) {
1474 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1475 goto cleanup;
1476 }
1477
1478 info->offset = offset;
1479 info->bytes = bytes;
1480 info->ptr = payload;
1481
1482 cleanup:
1483 if (rc < 0) {
1484 if (payload) {
1485 iounmap(payload);
1486 payload = NULL;
1487 }
1488 }
1489 return rc;
1490 }
1491
1492 static void
1493 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1494 {
1495 if (info->ptr) {
1496 iounmap(info->ptr);
1497 info->ptr = NULL;
1498 }
1499 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1500 }
1501
1502 static void
1503 initialize_controlvm_payload(void)
1504 {
1505 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1506 u64 payload_offset = 0;
1507 u32 payload_bytes = 0;
1508
1509 if (visorchannel_read(controlvm_channel,
1510 offsetof(struct spar_controlvm_channel_protocol,
1511 request_payload_offset),
1512 &payload_offset, sizeof(payload_offset)) < 0) {
1513 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1514 POSTCODE_SEVERITY_ERR);
1515 return;
1516 }
1517 if (visorchannel_read(controlvm_channel,
1518 offsetof(struct spar_controlvm_channel_protocol,
1519 request_payload_bytes),
1520 &payload_bytes, sizeof(payload_bytes)) < 0) {
1521 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1522 POSTCODE_SEVERITY_ERR);
1523 return;
1524 }
1525 initialize_controlvm_payload_info(phys_addr,
1526 payload_offset, payload_bytes,
1527 &controlvm_payload_info);
1528 }
1529
1530 /* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1531 * Returns CONTROLVM_RESP_xxx code.
1532 */
1533 int
1534 visorchipset_chipset_ready(void)
1535 {
1536 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1537 return CONTROLVM_RESP_SUCCESS;
1538 }
1539 EXPORT_SYMBOL_GPL(visorchipset_chipset_ready);
1540
1541 int
1542 visorchipset_chipset_selftest(void)
1543 {
1544 char env_selftest[20];
1545 char *envp[] = { env_selftest, NULL };
1546
1547 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1548 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1549 envp);
1550 return CONTROLVM_RESP_SUCCESS;
1551 }
1552 EXPORT_SYMBOL_GPL(visorchipset_chipset_selftest);
1553
1554 /* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1555 * Returns CONTROLVM_RESP_xxx code.
1556 */
1557 int
1558 visorchipset_chipset_notready(void)
1559 {
1560 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1561 return CONTROLVM_RESP_SUCCESS;
1562 }
1563 EXPORT_SYMBOL_GPL(visorchipset_chipset_notready);
1564
1565 static void
1566 chipset_ready(struct controlvm_message_header *msg_hdr)
1567 {
1568 int rc = visorchipset_chipset_ready();
1569
1570 if (rc != CONTROLVM_RESP_SUCCESS)
1571 rc = -rc;
1572 if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1573 controlvm_respond(msg_hdr, rc);
1574 if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1575 /* Send CHIPSET_READY response when all modules have been loaded
1576 * and disks mounted for the partition
1577 */
1578 g_chipset_msg_hdr = *msg_hdr;
1579 }
1580 }
1581
1582 static void
1583 chipset_selftest(struct controlvm_message_header *msg_hdr)
1584 {
1585 int rc = visorchipset_chipset_selftest();
1586
1587 if (rc != CONTROLVM_RESP_SUCCESS)
1588 rc = -rc;
1589 if (msg_hdr->flags.response_expected)
1590 controlvm_respond(msg_hdr, rc);
1591 }
1592
1593 static void
1594 chipset_notready(struct controlvm_message_header *msg_hdr)
1595 {
1596 int rc = visorchipset_chipset_notready();
1597
1598 if (rc != CONTROLVM_RESP_SUCCESS)
1599 rc = -rc;
1600 if (msg_hdr->flags.response_expected)
1601 controlvm_respond(msg_hdr, rc);
1602 }
1603
1604 /* This is your "one-stop" shop for grabbing the next message from the
1605 * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1606 */
1607 static bool
1608 read_controlvm_event(struct controlvm_message *msg)
1609 {
1610 if (visorchannel_signalremove(controlvm_channel,
1611 CONTROLVM_QUEUE_EVENT, msg)) {
1612 /* got a message */
1613 if (msg->hdr.flags.test_message == 1)
1614 return false;
1615 return true;
1616 }
1617 return false;
1618 }
1619
1620 /*
1621 * The general parahotplug flow works as follows. The visorchipset
1622 * driver receives a DEVICE_CHANGESTATE message from Command
1623 * specifying a physical device to enable or disable. The CONTROLVM
1624 * message handler calls parahotplug_process_message, which then adds
1625 * the message to a global list and kicks off a udev event which
1626 * causes a user level script to enable or disable the specified
1627 * device. The udev script then writes to
1628 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1629 * to get called, at which point the appropriate CONTROLVM message is
1630 * retrieved from the list and responded to.
1631 */
1632
1633 #define PARAHOTPLUG_TIMEOUT_MS 2000
1634
1635 /*
1636 * Generate unique int to match an outstanding CONTROLVM message with a
1637 * udev script /proc response
1638 */
1639 static int
1640 parahotplug_next_id(void)
1641 {
1642 static atomic_t id = ATOMIC_INIT(0);
1643
1644 return atomic_inc_return(&id);
1645 }
1646
1647 /*
1648 * Returns the time (in jiffies) when a CONTROLVM message on the list
1649 * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1650 */
1651 static unsigned long
1652 parahotplug_next_expiration(void)
1653 {
1654 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1655 }
1656
1657 /*
1658 * Create a parahotplug_request, which is basically a wrapper for a
1659 * CONTROLVM_MESSAGE that we can stick on a list
1660 */
1661 static struct parahotplug_request *
1662 parahotplug_request_create(struct controlvm_message *msg)
1663 {
1664 struct parahotplug_request *req;
1665
1666 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1667 if (!req)
1668 return NULL;
1669
1670 req->id = parahotplug_next_id();
1671 req->expiration = parahotplug_next_expiration();
1672 req->msg = *msg;
1673
1674 return req;
1675 }
1676
1677 /*
1678 * Free a parahotplug_request.
1679 */
1680 static void
1681 parahotplug_request_destroy(struct parahotplug_request *req)
1682 {
1683 kfree(req);
1684 }
1685
1686 /*
1687 * Cause uevent to run the user level script to do the disable/enable
1688 * specified in (the CONTROLVM message in) the specified
1689 * parahotplug_request
1690 */
1691 static void
1692 parahotplug_request_kickoff(struct parahotplug_request *req)
1693 {
1694 struct controlvm_message_packet *cmd = &req->msg.cmd;
1695 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1696 env_func[40];
1697 char *envp[] = {
1698 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1699 };
1700
1701 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1702 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1703 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1704 cmd->device_change_state.state.active);
1705 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1706 cmd->device_change_state.bus_no);
1707 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1708 cmd->device_change_state.dev_no >> 3);
1709 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1710 cmd->device_change_state.dev_no & 0x7);
1711
1712 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1713 envp);
1714 }
1715
1716 /*
1717 * Remove any request from the list that's been on there too long and
1718 * respond with an error.
1719 */
1720 static void
1721 parahotplug_process_list(void)
1722 {
1723 struct list_head *pos;
1724 struct list_head *tmp;
1725
1726 spin_lock(&parahotplug_request_list_lock);
1727
1728 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1729 struct parahotplug_request *req =
1730 list_entry(pos, struct parahotplug_request, list);
1731
1732 if (!time_after_eq(jiffies, req->expiration))
1733 continue;
1734
1735 list_del(pos);
1736 if (req->msg.hdr.flags.response_expected)
1737 controlvm_respond_physdev_changestate(
1738 &req->msg.hdr,
1739 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1740 req->msg.cmd.device_change_state.state);
1741 parahotplug_request_destroy(req);
1742 }
1743
1744 spin_unlock(&parahotplug_request_list_lock);
1745 }
1746
1747 /*
1748 * Called from the /proc handler, which means the user script has
1749 * finished the enable/disable. Find the matching identifier, and
1750 * respond to the CONTROLVM message with success.
1751 */
1752 static int
1753 parahotplug_request_complete(int id, u16 active)
1754 {
1755 struct list_head *pos;
1756 struct list_head *tmp;
1757
1758 spin_lock(&parahotplug_request_list_lock);
1759
1760 /* Look for a request matching "id". */
1761 list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1762 struct parahotplug_request *req =
1763 list_entry(pos, struct parahotplug_request, list);
1764 if (req->id == id) {
1765 /* Found a match. Remove it from the list and
1766 * respond.
1767 */
1768 list_del(pos);
1769 spin_unlock(&parahotplug_request_list_lock);
1770 req->msg.cmd.device_change_state.state.active = active;
1771 if (req->msg.hdr.flags.response_expected)
1772 controlvm_respond_physdev_changestate(
1773 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1774 req->msg.cmd.device_change_state.state);
1775 parahotplug_request_destroy(req);
1776 return 0;
1777 }
1778 }
1779
1780 spin_unlock(&parahotplug_request_list_lock);
1781 return -1;
1782 }
1783
1784 /*
1785 * Enables or disables a PCI device by kicking off a udev script
1786 */
1787 static void
1788 parahotplug_process_message(struct controlvm_message *inmsg)
1789 {
1790 struct parahotplug_request *req;
1791
1792 req = parahotplug_request_create(inmsg);
1793
1794 if (!req)
1795 return;
1796
1797 if (inmsg->cmd.device_change_state.state.active) {
1798 /* For enable messages, just respond with success
1799 * right away. This is a bit of a hack, but there are
1800 * issues with the early enable messages we get (with
1801 * either the udev script not detecting that the device
1802 * is up, or not getting called at all). Fortunately
1803 * the messages that get lost don't matter anyway, as
1804 * devices are automatically enabled at
1805 * initialization.
1806 */
1807 parahotplug_request_kickoff(req);
1808 controlvm_respond_physdev_changestate(&inmsg->hdr,
1809 CONTROLVM_RESP_SUCCESS,
1810 inmsg->cmd.device_change_state.state);
1811 parahotplug_request_destroy(req);
1812 } else {
1813 /* For disable messages, add the request to the
1814 * request list before kicking off the udev script. It
1815 * won't get responded to until the script has
1816 * indicated it's done.
1817 */
1818 spin_lock(&parahotplug_request_list_lock);
1819 list_add_tail(&req->list, &parahotplug_request_list);
1820 spin_unlock(&parahotplug_request_list_lock);
1821
1822 parahotplug_request_kickoff(req);
1823 }
1824 }
1825
1826 /* Process a controlvm message.
1827 * Return result:
1828 * false - this function will return false only in the case where the
1829 * controlvm message was NOT processed, but processing must be
1830 * retried before reading the next controlvm message; a
1831 * scenario where this can occur is when we need to throttle
1832 * the allocation of memory in which to copy out controlvm
1833 * payload data
1834 * true - processing of the controlvm message completed,
1835 * either successfully or with an error.
1836 */
1837 static bool
1838 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1839 {
1840 struct controlvm_message_packet *cmd = &inmsg.cmd;
1841 u64 parm_addr;
1842 u32 parm_bytes;
1843 struct parser_context *parser_ctx = NULL;
1844 bool local_addr;
1845 struct controlvm_message ackmsg;
1846
1847 /* create parsing context if necessary */
1848 local_addr = (inmsg.hdr.flags.test_message == 1);
1849 if (channel_addr == 0)
1850 return true;
1851 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1852 parm_bytes = inmsg.hdr.payload_bytes;
1853
1854 /* Parameter and channel addresses within test messages actually lie
1855 * within our OS-controlled memory. We need to know that, because it
1856 * makes a difference in how we compute the virtual address.
1857 */
1858 if (parm_addr && parm_bytes) {
1859 bool retry = false;
1860
1861 parser_ctx =
1862 parser_init_byte_stream(parm_addr, parm_bytes,
1863 local_addr, &retry);
1864 if (!parser_ctx && retry)
1865 return false;
1866 }
1867
1868 if (!local_addr) {
1869 controlvm_init_response(&ackmsg, &inmsg.hdr,
1870 CONTROLVM_RESP_SUCCESS);
1871 if (controlvm_channel)
1872 visorchannel_signalinsert(controlvm_channel,
1873 CONTROLVM_QUEUE_ACK,
1874 &ackmsg);
1875 }
1876 switch (inmsg.hdr.id) {
1877 case CONTROLVM_CHIPSET_INIT:
1878 chipset_init(&inmsg);
1879 break;
1880 case CONTROLVM_BUS_CREATE:
1881 bus_create(&inmsg);
1882 break;
1883 case CONTROLVM_BUS_DESTROY:
1884 bus_destroy(&inmsg);
1885 break;
1886 case CONTROLVM_BUS_CONFIGURE:
1887 bus_configure(&inmsg, parser_ctx);
1888 break;
1889 case CONTROLVM_DEVICE_CREATE:
1890 my_device_create(&inmsg);
1891 break;
1892 case CONTROLVM_DEVICE_CHANGESTATE:
1893 if (cmd->device_change_state.flags.phys_device) {
1894 parahotplug_process_message(&inmsg);
1895 } else {
1896 /* save the hdr and cmd structures for later use */
1897 /* when sending back the response to Command */
1898 my_device_changestate(&inmsg);
1899 g_devicechangestate_packet = inmsg.cmd;
1900 break;
1901 }
1902 break;
1903 case CONTROLVM_DEVICE_DESTROY:
1904 my_device_destroy(&inmsg);
1905 break;
1906 case CONTROLVM_DEVICE_CONFIGURE:
1907 /* no op for now, just send a respond that we passed */
1908 if (inmsg.hdr.flags.response_expected)
1909 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1910 break;
1911 case CONTROLVM_CHIPSET_READY:
1912 chipset_ready(&inmsg.hdr);
1913 break;
1914 case CONTROLVM_CHIPSET_SELFTEST:
1915 chipset_selftest(&inmsg.hdr);
1916 break;
1917 case CONTROLVM_CHIPSET_STOP:
1918 chipset_notready(&inmsg.hdr);
1919 break;
1920 default:
1921 if (inmsg.hdr.flags.response_expected)
1922 controlvm_respond(&inmsg.hdr,
1923 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1924 break;
1925 }
1926
1927 if (parser_ctx) {
1928 parser_done(parser_ctx);
1929 parser_ctx = NULL;
1930 }
1931 return true;
1932 }
1933
1934 static u64 controlvm_get_channel_address(void)
1935 {
1936 u64 addr = 0;
1937 u32 size = 0;
1938
1939 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1940 return 0;
1941
1942 return addr;
1943 }
1944
1945 static void
1946 controlvm_periodic_work(struct work_struct *work)
1947 {
1948 struct controlvm_message inmsg;
1949 bool got_command = false;
1950 bool handle_command_failed = false;
1951 static u64 poll_count;
1952
1953 /* make sure visorbus server is registered for controlvm callbacks */
1954 if (visorchipset_visorbusregwait && !visorbusregistered)
1955 goto cleanup;
1956
1957 poll_count++;
1958 if (poll_count >= 250)
1959 ; /* keep going */
1960 else
1961 goto cleanup;
1962
1963 /* Check events to determine if response to CHIPSET_READY
1964 * should be sent
1965 */
1966 if (visorchipset_holdchipsetready &&
1967 (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1968 if (check_chipset_events() == 1) {
1969 controlvm_respond(&g_chipset_msg_hdr, 0);
1970 clear_chipset_events();
1971 memset(&g_chipset_msg_hdr, 0,
1972 sizeof(struct controlvm_message_header));
1973 }
1974 }
1975
1976 while (visorchannel_signalremove(controlvm_channel,
1977 CONTROLVM_QUEUE_RESPONSE,
1978 &inmsg))
1979 ;
1980 if (!got_command) {
1981 if (controlvm_pending_msg_valid) {
1982 /* we throttled processing of a prior
1983 * msg, so try to process it again
1984 * rather than reading a new one
1985 */
1986 inmsg = controlvm_pending_msg;
1987 controlvm_pending_msg_valid = false;
1988 got_command = true;
1989 } else {
1990 got_command = read_controlvm_event(&inmsg);
1991 }
1992 }
1993
1994 handle_command_failed = false;
1995 while (got_command && (!handle_command_failed)) {
1996 most_recent_message_jiffies = jiffies;
1997 if (handle_command(inmsg,
1998 visorchannel_get_physaddr
1999 (controlvm_channel)))
2000 got_command = read_controlvm_event(&inmsg);
2001 else {
2002 /* this is a scenario where throttling
2003 * is required, but probably NOT an
2004 * error...; we stash the current
2005 * controlvm msg so we will attempt to
2006 * reprocess it on our next loop
2007 */
2008 handle_command_failed = true;
2009 controlvm_pending_msg = inmsg;
2010 controlvm_pending_msg_valid = true;
2011 }
2012 }
2013
2014 /* parahotplug_worker */
2015 parahotplug_process_list();
2016
2017 cleanup:
2018
2019 if (time_after(jiffies,
2020 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2021 /* it's been longer than MIN_IDLE_SECONDS since we
2022 * processed our last controlvm message; slow down the
2023 * polling
2024 */
2025 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2026 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2027 } else {
2028 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2029 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2030 }
2031
2032 queue_delayed_work(periodic_controlvm_workqueue,
2033 &periodic_controlvm_work, poll_jiffies);
2034 }
2035
2036 static void
2037 setup_crash_devices_work_queue(struct work_struct *work)
2038 {
2039 struct controlvm_message local_crash_bus_msg;
2040 struct controlvm_message local_crash_dev_msg;
2041 struct controlvm_message msg;
2042 u32 local_crash_msg_offset;
2043 u16 local_crash_msg_count;
2044
2045 /* make sure visorbus is registered for controlvm callbacks */
2046 if (visorchipset_visorbusregwait && !visorbusregistered)
2047 goto cleanup;
2048
2049 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
2050
2051 /* send init chipset msg */
2052 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
2053 msg.cmd.init_chipset.bus_count = 23;
2054 msg.cmd.init_chipset.switch_count = 0;
2055
2056 chipset_init(&msg);
2057
2058 /* get saved message count */
2059 if (visorchannel_read(controlvm_channel,
2060 offsetof(struct spar_controlvm_channel_protocol,
2061 saved_crash_message_count),
2062 &local_crash_msg_count, sizeof(u16)) < 0) {
2063 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2064 POSTCODE_SEVERITY_ERR);
2065 return;
2066 }
2067
2068 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
2069 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
2070 local_crash_msg_count,
2071 POSTCODE_SEVERITY_ERR);
2072 return;
2073 }
2074
2075 /* get saved crash message offset */
2076 if (visorchannel_read(controlvm_channel,
2077 offsetof(struct spar_controlvm_channel_protocol,
2078 saved_crash_message_offset),
2079 &local_crash_msg_offset, sizeof(u32)) < 0) {
2080 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
2081 POSTCODE_SEVERITY_ERR);
2082 return;
2083 }
2084
2085 /* read create device message for storage bus offset */
2086 if (visorchannel_read(controlvm_channel,
2087 local_crash_msg_offset,
2088 &local_crash_bus_msg,
2089 sizeof(struct controlvm_message)) < 0) {
2090 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
2091 POSTCODE_SEVERITY_ERR);
2092 return;
2093 }
2094
2095 /* read create device message for storage device */
2096 if (visorchannel_read(controlvm_channel,
2097 local_crash_msg_offset +
2098 sizeof(struct controlvm_message),
2099 &local_crash_dev_msg,
2100 sizeof(struct controlvm_message)) < 0) {
2101 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
2102 POSTCODE_SEVERITY_ERR);
2103 return;
2104 }
2105
2106 /* reuse IOVM create bus message */
2107 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
2108 bus_create(&local_crash_bus_msg);
2109 } else {
2110 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
2111 POSTCODE_SEVERITY_ERR);
2112 return;
2113 }
2114
2115 /* reuse create device message for storage device */
2116 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2117 my_device_create(&local_crash_dev_msg);
2118 } else {
2119 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2120 POSTCODE_SEVERITY_ERR);
2121 return;
2122 }
2123 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2124 return;
2125
2126 cleanup:
2127
2128 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2129
2130 queue_delayed_work(periodic_controlvm_workqueue,
2131 &periodic_controlvm_work, poll_jiffies);
2132 }
2133
2134 static void
2135 bus_create_response(u32 bus_no, int response)
2136 {
2137 bus_responder(CONTROLVM_BUS_CREATE, bus_no, response);
2138 }
2139
2140 static void
2141 bus_destroy_response(u32 bus_no, int response)
2142 {
2143 bus_responder(CONTROLVM_BUS_DESTROY, bus_no, response);
2144 }
2145
2146 static void
2147 device_create_response(u32 bus_no, u32 dev_no, int response)
2148 {
2149 device_responder(CONTROLVM_DEVICE_CREATE, bus_no, dev_no, response);
2150 }
2151
2152 static void
2153 device_destroy_response(u32 bus_no, u32 dev_no, int response)
2154 {
2155 device_responder(CONTROLVM_DEVICE_DESTROY, bus_no, dev_no, response);
2156 }
2157
2158 void
2159 visorchipset_device_pause_response(u32 bus_no, u32 dev_no, int response)
2160 {
2161 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2162 bus_no, dev_no, response,
2163 segment_state_standby);
2164 }
2165 EXPORT_SYMBOL_GPL(visorchipset_device_pause_response);
2166
2167 static void
2168 device_resume_response(u32 bus_no, u32 dev_no, int response)
2169 {
2170 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2171 bus_no, dev_no, response,
2172 segment_state_running);
2173 }
2174
2175 bool
2176 visorchipset_get_bus_info(u32 bus_no, struct visorchipset_bus_info *bus_info)
2177 {
2178 void *p = bus_find(&bus_info_list, bus_no);
2179
2180 if (!p)
2181 return false;
2182 memcpy(bus_info, p, sizeof(struct visorchipset_bus_info));
2183 return true;
2184 }
2185 EXPORT_SYMBOL_GPL(visorchipset_get_bus_info);
2186
2187 bool
2188 visorchipset_set_bus_context(u32 bus_no, void *context)
2189 {
2190 struct visorchipset_bus_info *p = bus_find(&bus_info_list, bus_no);
2191
2192 if (!p)
2193 return false;
2194 p->bus_driver_context = context;
2195 return true;
2196 }
2197 EXPORT_SYMBOL_GPL(visorchipset_set_bus_context);
2198
2199 bool
2200 visorchipset_get_device_info(u32 bus_no, u32 dev_no,
2201 struct visorchipset_device_info *dev_info)
2202 {
2203 void *p = device_find(&dev_info_list, bus_no, dev_no);
2204
2205 if (!p)
2206 return false;
2207 memcpy(dev_info, p, sizeof(struct visorchipset_device_info));
2208 return true;
2209 }
2210 EXPORT_SYMBOL_GPL(visorchipset_get_device_info);
2211
2212 bool
2213 visorchipset_set_device_context(u32 bus_no, u32 dev_no, void *context)
2214 {
2215 struct visorchipset_device_info *p;
2216
2217 p = device_find(&dev_info_list, bus_no, dev_no);
2218
2219 if (!p)
2220 return false;
2221 p->bus_driver_context = context;
2222 return true;
2223 }
2224 EXPORT_SYMBOL_GPL(visorchipset_set_device_context);
2225
2226 static ssize_t chipsetready_store(struct device *dev,
2227 struct device_attribute *attr,
2228 const char *buf, size_t count)
2229 {
2230 char msgtype[64];
2231
2232 if (sscanf(buf, "%63s", msgtype) != 1)
2233 return -EINVAL;
2234
2235 if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2236 chipset_events[0] = 1;
2237 return count;
2238 } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2239 chipset_events[1] = 1;
2240 return count;
2241 }
2242 return -EINVAL;
2243 }
2244
2245 /* The parahotplug/devicedisabled interface gets called by our support script
2246 * when an SR-IOV device has been shut down. The ID is passed to the script
2247 * and then passed back when the device has been removed.
2248 */
2249 static ssize_t devicedisabled_store(struct device *dev,
2250 struct device_attribute *attr,
2251 const char *buf, size_t count)
2252 {
2253 unsigned int id;
2254
2255 if (kstrtouint(buf, 10, &id))
2256 return -EINVAL;
2257
2258 parahotplug_request_complete(id, 0);
2259 return count;
2260 }
2261
2262 /* The parahotplug/deviceenabled interface gets called by our support script
2263 * when an SR-IOV device has been recovered. The ID is passed to the script
2264 * and then passed back when the device has been brought back up.
2265 */
2266 static ssize_t deviceenabled_store(struct device *dev,
2267 struct device_attribute *attr,
2268 const char *buf, size_t count)
2269 {
2270 unsigned int id;
2271
2272 if (kstrtouint(buf, 10, &id))
2273 return -EINVAL;
2274
2275 parahotplug_request_complete(id, 1);
2276 return count;
2277 }
2278
2279 static int
2280 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2281 {
2282 unsigned long physaddr = 0;
2283 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2284 u64 addr = 0;
2285
2286 /* sv_enable_dfp(); */
2287 if (offset & (PAGE_SIZE - 1))
2288 return -ENXIO; /* need aligned offsets */
2289
2290 switch (offset) {
2291 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2292 vma->vm_flags |= VM_IO;
2293 if (!*file_controlvm_channel)
2294 return -ENXIO;
2295
2296 visorchannel_read(*file_controlvm_channel,
2297 offsetof(struct spar_controlvm_channel_protocol,
2298 gp_control_channel),
2299 &addr, sizeof(addr));
2300 if (!addr)
2301 return -ENXIO;
2302
2303 physaddr = (unsigned long)addr;
2304 if (remap_pfn_range(vma, vma->vm_start,
2305 physaddr >> PAGE_SHIFT,
2306 vma->vm_end - vma->vm_start,
2307 /*pgprot_noncached */
2308 (vma->vm_page_prot))) {
2309 return -EAGAIN;
2310 }
2311 break;
2312 default:
2313 return -ENXIO;
2314 }
2315 return 0;
2316 }
2317
2318 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2319 unsigned long arg)
2320 {
2321 s64 adjustment;
2322 s64 vrtc_offset;
2323
2324 switch (cmd) {
2325 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2326 /* get the physical rtc offset */
2327 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2328 if (copy_to_user((void __user *)arg, &vrtc_offset,
2329 sizeof(vrtc_offset))) {
2330 return -EFAULT;
2331 }
2332 return 0;
2333 case VMCALL_UPDATE_PHYSICAL_TIME:
2334 if (copy_from_user(&adjustment, (void __user *)arg,
2335 sizeof(adjustment))) {
2336 return -EFAULT;
2337 }
2338 return issue_vmcall_update_physical_time(adjustment);
2339 default:
2340 return -EFAULT;
2341 }
2342 }
2343
2344 static const struct file_operations visorchipset_fops = {
2345 .owner = THIS_MODULE,
2346 .open = visorchipset_open,
2347 .read = NULL,
2348 .write = NULL,
2349 .unlocked_ioctl = visorchipset_ioctl,
2350 .release = visorchipset_release,
2351 .mmap = visorchipset_mmap,
2352 };
2353
2354 static int
2355 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2356 {
2357 int rc = 0;
2358
2359 file_controlvm_channel = controlvm_channel;
2360 cdev_init(&file_cdev, &visorchipset_fops);
2361 file_cdev.owner = THIS_MODULE;
2362 if (MAJOR(major_dev) == 0) {
2363 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2364 /* dynamic major device number registration required */
2365 if (rc < 0)
2366 return rc;
2367 } else {
2368 /* static major device number registration required */
2369 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2370 if (rc < 0)
2371 return rc;
2372 }
2373 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2374 if (rc < 0) {
2375 unregister_chrdev_region(major_dev, 1);
2376 return rc;
2377 }
2378 return 0;
2379 }
2380
2381 static int
2382 visorchipset_init(struct acpi_device *acpi_device)
2383 {
2384 int rc = 0;
2385 u64 addr;
2386
2387 memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2388 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2389 memset(&livedump_info, 0, sizeof(livedump_info));
2390 atomic_set(&livedump_info.buffers_in_use, 0);
2391
2392 addr = controlvm_get_channel_address();
2393 if (addr) {
2394 int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2395 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2396 controlvm_channel =
2397 visorchannel_create_with_lock(addr, tmp_sz,
2398 GFP_KERNEL, uuid);
2399 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2400 visorchannel_get_header(controlvm_channel))) {
2401 initialize_controlvm_payload();
2402 } else {
2403 visorchannel_destroy(controlvm_channel);
2404 controlvm_channel = NULL;
2405 return -ENODEV;
2406 }
2407 } else {
2408 return -ENODEV;
2409 }
2410
2411 major_dev = MKDEV(visorchipset_major, 0);
2412 rc = visorchipset_file_init(major_dev, &controlvm_channel);
2413 if (rc < 0) {
2414 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2415 goto cleanup;
2416 }
2417
2418 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2419
2420 /* if booting in a crash kernel */
2421 if (is_kdump_kernel())
2422 INIT_DELAYED_WORK(&periodic_controlvm_work,
2423 setup_crash_devices_work_queue);
2424 else
2425 INIT_DELAYED_WORK(&periodic_controlvm_work,
2426 controlvm_periodic_work);
2427 periodic_controlvm_workqueue =
2428 create_singlethread_workqueue("visorchipset_controlvm");
2429
2430 if (!periodic_controlvm_workqueue) {
2431 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2432 DIAG_SEVERITY_ERR);
2433 rc = -ENOMEM;
2434 goto cleanup;
2435 }
2436 most_recent_message_jiffies = jiffies;
2437 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2438 rc = queue_delayed_work(periodic_controlvm_workqueue,
2439 &periodic_controlvm_work, poll_jiffies);
2440 if (rc < 0) {
2441 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2442 DIAG_SEVERITY_ERR);
2443 goto cleanup;
2444 }
2445
2446 visorchipset_platform_device.dev.devt = major_dev;
2447 if (platform_device_register(&visorchipset_platform_device) < 0) {
2448 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2449 rc = -1;
2450 goto cleanup;
2451 }
2452 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2453
2454 rc = visorbus_init();
2455 cleanup:
2456 if (rc) {
2457 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2458 POSTCODE_SEVERITY_ERR);
2459 }
2460 return rc;
2461 }
2462
2463 static void
2464 visorchipset_file_cleanup(dev_t major_dev)
2465 {
2466 if (file_cdev.ops)
2467 cdev_del(&file_cdev);
2468 file_cdev.ops = NULL;
2469 unregister_chrdev_region(major_dev, 1);
2470 }
2471
2472 static int
2473 visorchipset_exit(struct acpi_device *acpi_device)
2474 {
2475 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2476
2477 visorbus_exit();
2478
2479 cancel_delayed_work(&periodic_controlvm_work);
2480 flush_workqueue(periodic_controlvm_workqueue);
2481 destroy_workqueue(periodic_controlvm_workqueue);
2482 periodic_controlvm_workqueue = NULL;
2483 destroy_controlvm_payload_info(&controlvm_payload_info);
2484
2485 cleanup_controlvm_structures();
2486
2487 memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2488
2489 visorchannel_destroy(controlvm_channel);
2490
2491 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2492 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2493
2494 return 0;
2495 }
2496
2497 static const struct acpi_device_id unisys_device_ids[] = {
2498 {"PNP0A07", 0},
2499 {"", 0},
2500 };
2501
2502 static struct acpi_driver unisys_acpi_driver = {
2503 .name = "unisys_acpi",
2504 .class = "unisys_acpi_class",
2505 .owner = THIS_MODULE,
2506 .ids = unisys_device_ids,
2507 .ops = {
2508 .add = visorchipset_init,
2509 .remove = visorchipset_exit,
2510 },
2511 };
2512 static __init uint32_t visorutil_spar_detect(void)
2513 {
2514 unsigned int eax, ebx, ecx, edx;
2515
2516 if (cpu_has_hypervisor) {
2517 /* check the ID */
2518 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2519 return (ebx == UNISYS_SPAR_ID_EBX) &&
2520 (ecx == UNISYS_SPAR_ID_ECX) &&
2521 (edx == UNISYS_SPAR_ID_EDX);
2522 } else {
2523 return 0;
2524 }
2525 }
2526
2527 static int init_unisys(void)
2528 {
2529 int result;
2530 if (!visorutil_spar_detect())
2531 return -ENODEV;
2532
2533 result = acpi_bus_register_driver(&unisys_acpi_driver);
2534 if (result)
2535 return -ENODEV;
2536
2537 pr_info("Unisys Visorchipset Driver Loaded.\n");
2538 return 0;
2539 };
2540
2541 static void exit_unisys(void)
2542 {
2543 acpi_bus_unregister_driver(&unisys_acpi_driver);
2544 }
2545
2546 module_param_named(major, visorchipset_major, int, S_IRUGO);
2547 MODULE_PARM_DESC(visorchipset_major,
2548 "major device number to use for the device node");
2549 module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2550 MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2551 "1 to have the module wait for the visor bus to register");
2552 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2553 int, S_IRUGO);
2554 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2555 "1 to hold response to CHIPSET_READY");
2556
2557 module_init(init_unisys);
2558 module_exit(exit_unisys);
2559
2560 MODULE_AUTHOR("Unisys");
2561 MODULE_LICENSE("GPL");
2562 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2563 VERSION);
2564 MODULE_VERSION(VERSION);
This page took 0.107804 seconds and 5 git commands to generate.