staging: unisys: remove ERRDEV macros
[deliverable/linux.git] / drivers / staging / unisys / uislib / uislib.c
CommitLineData
bac8a4d5
KC
1/* uislib.c
2 *
f6d0c1e6 3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
bac8a4d5
KC
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18/* @ALL_INSPECTED */
19#define EXPORT_SYMTAB
20#include <linux/kernel.h>
21#include <linux/highmem.h>
22#ifdef CONFIG_MODVERSIONS
23#include <config/modversions.h>
24#endif
25#include <linux/module.h>
28fa597f 26#include <linux/debugfs.h>
bac8a4d5 27
1d2def98
BR
28#include <linux/types.h>
29#include <linux/uuid.h>
bac8a4d5
KC
30
31#include <linux/version.h>
32#include "uniklog.h"
33#include "diagnostics/appos_subsystems.h"
34#include "uisutils.h"
35#include "vbuschannel.h"
36
37#include <linux/proc_fs.h>
38#include <linux/uaccess.h> /* for copy_from_user */
39#include <linux/ctype.h> /* for toupper */
40#include <linux/list.h>
41
42#include "sparstop.h"
43#include "visorchipset.h"
bac8a4d5
KC
44#include "version.h"
45#include "guestlinuxdebug.h"
46
47#define SET_PROC_OWNER(x, y)
48
bac8a4d5
KC
49#define POLLJIFFIES_NORMAL 1
50/* Choose whether or not you want to wakeup the request-polling thread
51 * after an IO termination:
52 * this is shorter than using __FILE__ (full path name) in
53 * debug/info/error messages
54 */
55#define CURRENT_FILE_PC UISLIB_PC_uislib_c
56#define __MYFILE__ "uislib.c"
57
58/* global function pointers that act as callback functions into virtpcimod */
2df7cc62 59int (*virt_control_chan_func)(struct guest_msgs *);
bac8a4d5 60
b98ab24c
BR
61static int debug_buf_valid;
62static char *debug_buf; /* Note this MUST be global,
bac8a4d5
KC
63 * because the contents must */
64static unsigned int chipset_inited;
a8d7f21d 65
bac8a4d5
KC
66#define WAIT_ON_CALLBACK(handle) \
67 do { \
68 if (handle) \
69 break; \
70 UIS_THREAD_WAIT; \
71 } while (1)
72
b98ab24c
BR
73static struct bus_info *bus_list;
74static rwlock_t bus_list_lock;
75static int bus_list_count; /* number of buses in the list */
76static int max_bus_count; /* maximum number of buses expected */
77static u64 phys_data_chan;
78static int platform_no;
bac8a4d5 79
b98ab24c
BR
80static struct uisthread_info incoming_ti;
81static BOOL incoming_started = FALSE;
82static LIST_HEAD(poll_dev_chan);
a8d7f21d
KC
83static unsigned long long tot_moved_to_tail_cnt;
84static unsigned long long tot_wait_cnt;
85static unsigned long long tot_wakeup_cnt;
86static unsigned long long tot_schedule_cnt;
87static int en_smart_wakeup = 1;
b98ab24c
BR
88static DEFINE_SEMAPHORE(poll_dev_lock); /* unlocked */
89static DECLARE_WAIT_QUEUE_HEAD(poll_dev_wake_q);
90static int poll_dev_start;
bac8a4d5 91
bac8a4d5
KC
92#define CALLHOME_PROC_ENTRY_FN "callhome"
93#define CALLHOME_THROTTLED_PROC_ENTRY_FN "callhome_throttled"
b27a00de 94
28fa597f
BR
95#define DIR_DEBUGFS_ENTRY "uislib"
96static struct dentry *dir_debugfs;
97
98#define PLATFORMNUMBER_DEBUGFS_ENTRY_FN "platform"
99static struct dentry *platformnumber_debugfs_read;
100
b913a2ef
BR
101#define CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN "cycles_before_wait"
102static struct dentry *cycles_before_wait_debugfs_read;
103
81d2d7de
BR
104#define SMART_WAKEUP_DEBUGFS_ENTRY_FN "smart_wakeup"
105static struct dentry *smart_wakeup_debugfs_entry;
106
7ec96720
BR
107#define INFO_DEBUGFS_ENTRY_FN "info"
108static struct dentry *info_debugfs_entry;
109
a8d7f21d 110static unsigned long long cycles_before_wait, wait_cycles;
bac8a4d5
KC
111
112/*****************************************************/
113/* local functions */
114/*****************************************************/
115
7ec96720 116static ssize_t info_debugfs_read(struct file *file, char __user *buf,
dddfe8e6 117 size_t len, loff_t *offset);
7ec96720
BR
118static const struct file_operations debugfs_info_fops = {
119 .read = info_debugfs_read,
bac8a4d5
KC
120};
121
bac8a4d5 122static void
3ab47701 123init_msg_header(struct controlvm_message *msg, u32 id, uint rsp, uint svr)
bac8a4d5 124{
3ab47701 125 memset(msg, 0, sizeof(struct controlvm_message));
98d7b594
BR
126 msg->hdr.id = id;
127 msg->hdr.flags.response_expected = rsp;
128 msg->hdr.flags.server = svr;
bac8a4d5
KC
129}
130
eb4a1468 131static __iomem void *init_vbus_channel(u64 ch_addr, u32 ch_bytes)
bac8a4d5 132{
eb4a1468 133 void __iomem *ch = uislib_ioremap_cache(ch_addr, ch_bytes);
ddc9f84b 134
0aca7844 135 if (!ch)
eb4a1468 136 return NULL;
0aca7844 137
eb4a1468 138 if (!SPAR_VBUS_CHANNEL_OK_CLIENT(ch)) {
eb4a1468
BR
139 uislib_iounmap(ch);
140 return NULL;
bac8a4d5 141 }
eb4a1468 142 return ch;
bac8a4d5
KC
143}
144
145static int
3ab47701 146create_bus(struct controlvm_message *msg, char *buf)
bac8a4d5 147{
d0500864 148 u32 bus_no, dev_count;
bac8a4d5
KC
149 struct bus_info *tmp, *bus;
150 size_t size;
151
b98ab24c 152 if (max_bus_count == bus_list_count) {
b98ab24c 153 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, max_bus_count,
bac8a4d5
KC
154 POSTCODE_SEVERITY_ERR);
155 return CONTROLVM_RESP_ERROR_MAX_BUSES;
156 }
157
d0500864
BR
158 bus_no = msg->cmd.create_bus.bus_no;
159 dev_count = msg->cmd.create_bus.dev_count;
bac8a4d5 160
d0500864 161 POSTCODE_LINUX_4(BUS_CREATE_ENTRY_PC, bus_no, dev_count,
bac8a4d5
KC
162 POSTCODE_SEVERITY_INFO);
163
164 size =
165 sizeof(struct bus_info) +
d0500864 166 (dev_count * sizeof(struct device_info *));
60140462 167 bus = kzalloc(size, GFP_ATOMIC);
bac8a4d5 168 if (!bus) {
d0500864 169 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
bac8a4d5
KC
170 POSTCODE_SEVERITY_ERR);
171 return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
172 }
173
bac8a4d5
KC
174 /* Currently by default, the bus Number is the GuestHandle.
175 * Configure Bus message can override this.
176 */
98d7b594 177 if (msg->hdr.flags.test_message) {
bac8a4d5 178 /* This implies we're the IOVM so set guest handle to 0... */
43ecb9fe 179 bus->guest_handle = 0;
d0500864 180 bus->bus_no = bus_no;
43ecb9fe 181 bus->local_vnic = 1;
20eca8f0 182 } else {
d0500864
BR
183 bus->bus_no = bus_no;
184 bus->guest_handle = bus_no;
20eca8f0 185 }
b41b1ad5 186 sprintf(bus->name, "%d", (int)bus->bus_no);
d0500864 187 bus->device_count = dev_count;
bac8a4d5 188 bus->device =
b41b1ad5 189 (struct device_info **)((char *)bus + sizeof(struct bus_info));
2ea5117b 190 bus->bus_inst_uuid = msg->cmd.create_bus.bus_inst_uuid;
43ecb9fe
BR
191 bus->bus_channel_bytes = 0;
192 bus->bus_channel = NULL;
bac8a4d5
KC
193
194 /* add bus to our bus list - but check for duplicates first */
b98ab24c
BR
195 read_lock(&bus_list_lock);
196 for (tmp = bus_list; tmp; tmp = tmp->next) {
43ecb9fe 197 if (tmp->bus_no == bus->bus_no)
bac8a4d5
KC
198 break;
199 }
b98ab24c 200 read_unlock(&bus_list_lock);
bac8a4d5 201 if (tmp) {
d0500864 202 /* found a bus already in the list with same bus_no -
bac8a4d5
KC
203 * reject add
204 */
43ecb9fe 205 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
bac8a4d5 206 POSTCODE_SEVERITY_ERR);
60140462 207 kfree(bus);
bac8a4d5
KC
208 return CONTROLVM_RESP_ERROR_ALREADY_DONE;
209 }
20eca8f0
BR
210 if ((msg->cmd.create_bus.channel_addr != 0) &&
211 (msg->cmd.create_bus.channel_bytes != 0)) {
2ea5117b 212 bus->bus_channel_bytes = msg->cmd.create_bus.channel_bytes;
43ecb9fe 213 bus->bus_channel =
2ea5117b
BR
214 init_vbus_channel(msg->cmd.create_bus.channel_addr,
215 msg->cmd.create_bus.channel_bytes);
bac8a4d5
KC
216 }
217 /* the msg is bound for virtpci; send guest_msgs struct to callback */
98d7b594 218 if (!msg->hdr.flags.server) {
bac8a4d5 219 struct guest_msgs cmd;
ddc9f84b 220
bac8a4d5 221 cmd.msgtype = GUEST_ADD_VBUS;
d0500864 222 cmd.add_vbus.bus_no = bus_no;
43ecb9fe 223 cmd.add_vbus.chanptr = bus->bus_channel;
d0500864 224 cmd.add_vbus.dev_count = dev_count;
2ea5117b
BR
225 cmd.add_vbus.bus_uuid = msg->cmd.create_bus.bus_data_type_uuid;
226 cmd.add_vbus.instance_uuid = msg->cmd.create_bus.bus_inst_uuid;
2df7cc62 227 if (!virt_control_chan_func) {
43ecb9fe 228 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
bac8a4d5 229 POSTCODE_SEVERITY_ERR);
d21bb450 230 kfree(bus);
bac8a4d5
KC
231 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
232 }
2df7cc62 233 if (!virt_control_chan_func(&cmd)) {
43ecb9fe 234 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->bus_no,
bac8a4d5 235 POSTCODE_SEVERITY_ERR);
d21bb450 236 kfree(bus);
bac8a4d5
KC
237 return
238 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
239 }
240 }
bac8a4d5
KC
241
242 /* add bus at the head of our list */
b98ab24c
BR
243 write_lock(&bus_list_lock);
244 if (!bus_list) {
245 bus_list = bus;
20eca8f0 246 } else {
b98ab24c
BR
247 bus->next = bus_list;
248 bus_list = bus;
bac8a4d5 249 }
b98ab24c
BR
250 bus_list_count++;
251 write_unlock(&bus_list_lock);
bac8a4d5 252
43ecb9fe 253 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus->bus_no,
bac8a4d5
KC
254 POSTCODE_SEVERITY_INFO);
255 return CONTROLVM_RESP_SUCCESS;
256}
257
258static int
3ab47701 259destroy_bus(struct controlvm_message *msg, char *buf)
bac8a4d5
KC
260{
261 int i;
262 struct bus_info *bus, *prev = NULL;
81e4c97e 263 struct guest_msgs cmd;
e6c15b7b 264 u32 bus_no;
bac8a4d5 265
e6c15b7b 266 bus_no = msg->cmd.destroy_bus.bus_no;
bac8a4d5 267
b98ab24c 268 read_lock(&bus_list_lock);
81e4c97e 269
b98ab24c 270 bus = bus_list;
81e4c97e 271 while (bus) {
e6c15b7b 272 if (bus->bus_no == bus_no)
bac8a4d5 273 break;
81e4c97e
BR
274 prev = bus;
275 bus = bus->next;
bac8a4d5
KC
276 }
277
278 if (!bus) {
b98ab24c 279 read_unlock(&bus_list_lock);
bac8a4d5
KC
280 return CONTROLVM_RESP_ERROR_ALREADY_DONE;
281 }
81e4c97e
BR
282
283 /* verify that this bus has no devices. */
43ecb9fe 284 for (i = 0; i < bus->device_count; i++) {
81e4c97e 285 if (bus->device[i] != NULL) {
b98ab24c 286 read_unlock(&bus_list_lock);
81e4c97e
BR
287 return CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED;
288 }
289 }
b98ab24c 290 read_unlock(&bus_list_lock);
81e4c97e 291
98d7b594 292 if (msg->hdr.flags.server)
81e4c97e
BR
293 goto remove;
294
295 /* client messages require us to call the virtpci callback associated
296 with this bus. */
297 cmd.msgtype = GUEST_DEL_VBUS;
e6c15b7b 298 cmd.del_vbus.bus_no = bus_no;
0aca7844 299 if (!virt_control_chan_func)
81e4c97e 300 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
0aca7844
BR
301
302 if (!virt_control_chan_func(&cmd))
81e4c97e 303 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
81e4c97e
BR
304
305 /* finally, remove the bus from the list */
306remove:
b98ab24c 307 write_lock(&bus_list_lock);
81e4c97e
BR
308 if (prev) /* not at head */
309 prev->next = bus->next;
310 else
b98ab24c
BR
311 bus_list = bus->next;
312 bus_list_count--;
313 write_unlock(&bus_list_lock);
81e4c97e 314
43ecb9fe
BR
315 if (bus->bus_channel) {
316 uislib_iounmap(bus->bus_channel);
317 bus->bus_channel = NULL;
bac8a4d5
KC
318 }
319
60140462 320 kfree(bus);
bac8a4d5
KC
321 return CONTROLVM_RESP_SUCCESS;
322}
323
8a05beb1 324static int create_device(struct controlvm_message *msg, char *buf)
bac8a4d5
KC
325{
326 struct device_info *dev;
327 struct bus_info *bus;
8a05beb1
BR
328 struct guest_msgs cmd;
329 u32 bus_no, dev_no;
bac8a4d5 330 int result = CONTROLVM_RESP_SUCCESS;
8a05beb1
BR
331 u64 min_size = MIN_IO_CHANNEL_SIZE;
332 struct req_handler_info *req_handler;
bac8a4d5 333
8a05beb1
BR
334 bus_no = msg->cmd.create_device.bus_no;
335 dev_no = msg->cmd.create_device.dev_no;
bac8a4d5 336
8a05beb1 337 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
bac8a4d5
KC
338 POSTCODE_SEVERITY_INFO);
339
c62bddfb 340 dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
bac8a4d5 341 if (!dev) {
8a05beb1 342 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
bac8a4d5
KC
343 POSTCODE_SEVERITY_ERR);
344 return CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
345 }
346
f91b9262 347 dev->channel_uuid = msg->cmd.create_device.data_type_uuid;
2ea5117b 348 dev->intr = msg->cmd.create_device.intr;
f91b9262 349 dev->channel_addr = msg->cmd.create_device.channel_addr;
8a05beb1
BR
350 dev->bus_no = bus_no;
351 dev->dev_no = dev_no;
bac8a4d5 352 sema_init(&dev->interrupt_callback_lock, 1); /* unlocked */
8a05beb1 353 sprintf(dev->devid, "vbus%u:dev%u", (unsigned)bus_no, (unsigned)dev_no);
bac8a4d5 354 /* map the channel memory for the device. */
c62bddfb 355 if (msg->hdr.flags.test_message) {
f796e84c 356 dev->chanptr = (void __iomem *)__va(dev->channel_addr);
c62bddfb 357 } else {
8a05beb1
BR
358 req_handler = req_handler_find(dev->channel_uuid);
359 if (req_handler)
bac8a4d5
KC
360 /* generic service handler registered for this
361 * channel
362 */
8a05beb1
BR
363 min_size = req_handler->min_channel_bytes;
364 if (min_size > msg->cmd.create_device.channel_bytes) {
8a05beb1
BR
365 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
366 bus_no, POSTCODE_SEVERITY_ERR);
bac8a4d5 367 result = CONTROLVM_RESP_ERROR_CHANNEL_SIZE_TOO_SMALL;
8a05beb1 368 goto cleanup;
bac8a4d5
KC
369 }
370 dev->chanptr =
f796e84c 371 uislib_ioremap_cache(dev->channel_addr,
f91b9262 372 msg->cmd.create_device.channel_bytes);
bac8a4d5 373 if (!dev->chanptr) {
bac8a4d5 374 result = CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
8a05beb1
BR
375 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
376 bus_no, POSTCODE_SEVERITY_ERR);
377 goto cleanup;
bac8a4d5
KC
378 }
379 }
f91b9262
BR
380 dev->instance_uuid = msg->cmd.create_device.dev_inst_uuid;
381 dev->channel_bytes = msg->cmd.create_device.channel_bytes;
bac8a4d5 382
b98ab24c
BR
383 read_lock(&bus_list_lock);
384 for (bus = bus_list; bus; bus = bus->next) {
8a05beb1
BR
385 if (bus->bus_no != bus_no)
386 continue;
387 /* make sure the device number is valid */
388 if (dev_no >= bus->device_count) {
8a05beb1
BR
389 result = CONTROLVM_RESP_ERROR_MAX_DEVICES;
390 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
391 bus_no, POSTCODE_SEVERITY_ERR);
392 read_unlock(&bus_list_lock);
393 goto cleanup;
394 }
395 /* make sure this device is not already set */
396 if (bus->device[dev_no]) {
8a05beb1
BR
397 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
398 dev_no, bus_no,
399 POSTCODE_SEVERITY_ERR);
400 result = CONTROLVM_RESP_ERROR_ALREADY_DONE;
401 read_unlock(&bus_list_lock);
402 goto cleanup;
403 }
404 read_unlock(&bus_list_lock);
405 /* the msg is bound for virtpci; send
406 * guest_msgs struct to callback
407 */
408 if (msg->hdr.flags.server) {
409 bus->device[dev_no] = dev;
410 POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
411 bus_no, POSTCODE_SEVERITY_INFO);
412 return CONTROLVM_RESP_SUCCESS;
413 }
414 if (uuid_le_cmp(dev->channel_uuid,
415 spar_vhba_channel_protocol_uuid) == 0) {
416 wait_for_valid_guid(&((struct channel_header __iomem *)
417 (dev->chanptr))->chtype);
418 if (!SPAR_VHBA_CHANNEL_OK_CLIENT(dev->chanptr)) {
bac8a4d5 419 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
8a05beb1 420 dev_no, bus_no,
bac8a4d5 421 POSTCODE_SEVERITY_ERR);
8a05beb1
BR
422 result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
423 goto cleanup;
bac8a4d5 424 }
8a05beb1
BR
425 cmd.msgtype = GUEST_ADD_VHBA;
426 cmd.add_vhba.chanptr = dev->chanptr;
427 cmd.add_vhba.bus_no = bus_no;
428 cmd.add_vhba.device_no = dev_no;
429 cmd.add_vhba.instance_uuid = dev->instance_uuid;
430 cmd.add_vhba.intr = dev->intr;
431 } else if (uuid_le_cmp(dev->channel_uuid,
432 spar_vnic_channel_protocol_uuid) == 0) {
433 wait_for_valid_guid(&((struct channel_header __iomem *)
434 (dev->chanptr))->chtype);
435 if (!SPAR_VNIC_CHANNEL_OK_CLIENT(dev->chanptr)) {
bac8a4d5 436 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC,
8a05beb1 437 dev_no, bus_no,
bac8a4d5 438 POSTCODE_SEVERITY_ERR);
8a05beb1
BR
439 result = CONTROLVM_RESP_ERROR_CHANNEL_INVALID;
440 goto cleanup;
bac8a4d5 441 }
8a05beb1
BR
442 cmd.msgtype = GUEST_ADD_VNIC;
443 cmd.add_vnic.chanptr = dev->chanptr;
444 cmd.add_vnic.bus_no = bus_no;
445 cmd.add_vnic.device_no = dev_no;
446 cmd.add_vnic.instance_uuid = dev->instance_uuid;
447 cmd.add_vhba.intr = dev->intr;
448 } else {
8a05beb1
BR
449 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
450 bus_no, POSTCODE_SEVERITY_ERR);
451 result = CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
452 goto cleanup;
453 }
454
455 if (!virt_control_chan_func) {
8a05beb1
BR
456 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
457 bus_no, POSTCODE_SEVERITY_ERR);
458 result = CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
459 goto cleanup;
460 }
461
462 if (!virt_control_chan_func(&cmd)) {
8a05beb1
BR
463 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no,
464 bus_no, POSTCODE_SEVERITY_ERR);
465 result =
466 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
467 goto cleanup;
bac8a4d5 468 }
8a05beb1
BR
469
470 bus->device[dev_no] = dev;
471 POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no,
472 bus_no, POSTCODE_SEVERITY_INFO);
473 return CONTROLVM_RESP_SUCCESS;
bac8a4d5 474 }
b98ab24c 475 read_unlock(&bus_list_lock);
bac8a4d5 476
8a05beb1 477 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
bac8a4d5
KC
478 POSTCODE_SEVERITY_ERR);
479 result = CONTROLVM_RESP_ERROR_BUS_INVALID;
480
8a05beb1 481cleanup:
98d7b594 482 if (!msg->hdr.flags.test_message) {
bac8a4d5
KC
483 uislib_iounmap(dev->chanptr);
484 dev->chanptr = NULL;
485 }
486
60140462 487 kfree(dev);
bac8a4d5
KC
488 return result;
489}
490
441a29a5 491static int pause_device(struct controlvm_message *msg)
bac8a4d5 492{
441a29a5 493 u32 bus_no, dev_no;
bac8a4d5
KC
494 struct bus_info *bus;
495 struct device_info *dev;
496 struct guest_msgs cmd;
b5114432 497 int retval = CONTROLVM_RESP_SUCCESS;
bac8a4d5 498
441a29a5
BR
499 bus_no = msg->cmd.device_change_state.bus_no;
500 dev_no = msg->cmd.device_change_state.dev_no;
bac8a4d5 501
b98ab24c
BR
502 read_lock(&bus_list_lock);
503 for (bus = bus_list; bus; bus = bus->next) {
441a29a5 504 if (bus->bus_no == bus_no) {
bac8a4d5 505 /* make sure the device number is valid */
441a29a5 506 if (dev_no >= bus->device_count) {
b5114432 507 retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
bac8a4d5 508 } else {
b5114432 509 /* make sure this device exists */
441a29a5 510 dev = bus->device[dev_no];
b5114432 511 if (!dev) {
b5114432
SM
512 retval =
513 CONTROLVM_RESP_ERROR_ALREADY_DONE;
514 }
bac8a4d5
KC
515 }
516 break;
517 }
518 }
0aca7844 519 if (!bus)
b5114432 520 retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
0aca7844 521
b98ab24c 522 read_unlock(&bus_list_lock);
b5114432
SM
523 if (retval == CONTROLVM_RESP_SUCCESS) {
524 /* the msg is bound for virtpci; send
525 * guest_msgs struct to callback
526 */
441a29a5
BR
527 if (uuid_le_cmp(dev->channel_uuid,
528 spar_vhba_channel_protocol_uuid) == 0) {
b5114432
SM
529 cmd.msgtype = GUEST_PAUSE_VHBA;
530 cmd.pause_vhba.chanptr = dev->chanptr;
441a29a5
BR
531 } else if (uuid_le_cmp(dev->channel_uuid,
532 spar_vnic_channel_protocol_uuid) == 0) {
b5114432
SM
533 cmd.msgtype = GUEST_PAUSE_VNIC;
534 cmd.pause_vnic.chanptr = dev->chanptr;
535 } else {
b5114432
SM
536 return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
537 }
2df7cc62 538 if (!virt_control_chan_func) {
b5114432
SM
539 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
540 }
2df7cc62 541 if (!virt_control_chan_func(&cmd)) {
b5114432
SM
542 return
543 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
544 }
545 }
546 return retval;
bac8a4d5
KC
547}
548
027b8c5b 549static int resume_device(struct controlvm_message *msg)
bac8a4d5 550{
027b8c5b 551 u32 bus_no, dev_no;
bac8a4d5
KC
552 struct bus_info *bus;
553 struct device_info *dev;
554 struct guest_msgs cmd;
4f01952d 555 int retval = CONTROLVM_RESP_SUCCESS;
bac8a4d5 556
027b8c5b
BR
557 bus_no = msg->cmd.device_change_state.bus_no;
558 dev_no = msg->cmd.device_change_state.dev_no;
bac8a4d5 559
b98ab24c
BR
560 read_lock(&bus_list_lock);
561 for (bus = bus_list; bus; bus = bus->next) {
027b8c5b 562 if (bus->bus_no == bus_no) {
bac8a4d5 563 /* make sure the device number is valid */
027b8c5b 564 if (dev_no >= bus->device_count) {
4f01952d 565 retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
bac8a4d5 566 } else {
4f01952d 567 /* make sure this device exists */
027b8c5b 568 dev = bus->device[dev_no];
4f01952d 569 if (!dev) {
4f01952d
SM
570 retval =
571 CONTROLVM_RESP_ERROR_ALREADY_DONE;
572 }
bac8a4d5
KC
573 }
574 break;
575 }
576 }
577
0aca7844 578 if (!bus)
4f01952d 579 retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
0aca7844 580
b98ab24c 581 read_unlock(&bus_list_lock);
4f01952d
SM
582 /* the msg is bound for virtpci; send
583 * guest_msgs struct to callback
584 */
585 if (retval == CONTROLVM_RESP_SUCCESS) {
027b8c5b
BR
586 if (uuid_le_cmp(dev->channel_uuid,
587 spar_vhba_channel_protocol_uuid) == 0) {
4f01952d
SM
588 cmd.msgtype = GUEST_RESUME_VHBA;
589 cmd.resume_vhba.chanptr = dev->chanptr;
027b8c5b
BR
590 } else if (uuid_le_cmp(dev->channel_uuid,
591 spar_vnic_channel_protocol_uuid) == 0) {
4f01952d
SM
592 cmd.msgtype = GUEST_RESUME_VNIC;
593 cmd.resume_vnic.chanptr = dev->chanptr;
594 } else {
4f01952d
SM
595 return CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
596 }
2df7cc62 597 if (!virt_control_chan_func) {
4f01952d
SM
598 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
599 }
2df7cc62 600 if (!virt_control_chan_func(&cmd)) {
4f01952d
SM
601 return
602 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
603 }
604 }
605 return retval;
bac8a4d5
KC
606}
607
5b28f15a 608static int destroy_device(struct controlvm_message *msg, char *buf)
bac8a4d5 609{
5b28f15a 610 u32 bus_no, dev_no;
bac8a4d5
KC
611 struct bus_info *bus;
612 struct device_info *dev;
613 struct guest_msgs cmd;
3aa2ec58 614 int retval = CONTROLVM_RESP_SUCCESS;
bac8a4d5 615
5b28f15a
BR
616 bus_no = msg->cmd.destroy_device.bus_no;
617 dev_no = msg->cmd.destroy_device.bus_no;
bac8a4d5 618
b98ab24c 619 read_lock(&bus_list_lock);
b98ab24c 620 for (bus = bus_list; bus; bus = bus->next) {
5b28f15a 621 if (bus->bus_no == bus_no) {
bac8a4d5 622 /* make sure the device number is valid */
5b28f15a 623 if (dev_no >= bus->device_count) {
3aa2ec58 624 retval = CONTROLVM_RESP_ERROR_DEVICE_INVALID;
bac8a4d5 625 } else {
3aa2ec58 626 /* make sure this device exists */
5b28f15a 627 dev = bus->device[dev_no];
3aa2ec58 628 if (!dev) {
3aa2ec58
SM
629 retval =
630 CONTROLVM_RESP_ERROR_ALREADY_DONE;
631 }
bac8a4d5 632 }
bac8a4d5
KC
633 break;
634 }
635 }
636
0aca7844 637 if (!bus)
3aa2ec58 638 retval = CONTROLVM_RESP_ERROR_BUS_INVALID;
b98ab24c 639 read_unlock(&bus_list_lock);
3aa2ec58
SM
640 if (retval == CONTROLVM_RESP_SUCCESS) {
641 /* the msg is bound for virtpci; send
642 * guest_msgs struct to callback
643 */
5b28f15a
BR
644 if (uuid_le_cmp(dev->channel_uuid,
645 spar_vhba_channel_protocol_uuid) == 0) {
3aa2ec58
SM
646 cmd.msgtype = GUEST_DEL_VHBA;
647 cmd.del_vhba.chanptr = dev->chanptr;
5b28f15a
BR
648 } else if (uuid_le_cmp(dev->channel_uuid,
649 spar_vnic_channel_protocol_uuid) == 0) {
3aa2ec58
SM
650 cmd.msgtype = GUEST_DEL_VNIC;
651 cmd.del_vnic.chanptr = dev->chanptr;
652 } else {
3aa2ec58
SM
653 return
654 CONTROLVM_RESP_ERROR_CHANNEL_TYPE_UNKNOWN;
655 }
2df7cc62 656 if (!virt_control_chan_func) {
3aa2ec58
SM
657 return
658 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
659 }
2df7cc62 660 if (!virt_control_chan_func(&cmd)) {
3aa2ec58
SM
661 return
662 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
663 }
664/* you must disable channel interrupts BEFORE you unmap the channel,
665 * because if you unmap first, there may still be some activity going
666 * on which accesses the channel and you will get a "unable to handle
667 * kernel paging request"
668 */
669 if (dev->polling) {
5b28f15a 670 uislib_disable_channel_interrupts(bus_no, dev_no);
3aa2ec58
SM
671 }
672 /* unmap the channel memory for the device. */
98d7b594 673 if (!msg->hdr.flags.test_message) {
3aa2ec58
SM
674 uislib_iounmap(dev->chanptr);
675 }
676 kfree(dev);
5b28f15a 677 bus->device[dev_no] = NULL;
3aa2ec58
SM
678 }
679 return retval;
bac8a4d5
KC
680}
681
bac8a4d5 682static int
3ab47701 683init_chipset(struct controlvm_message *msg, char *buf)
bac8a4d5
KC
684{
685 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
686
b98ab24c
BR
687 max_bus_count = msg->cmd.init_chipset.bus_count;
688 platform_no = msg->cmd.init_chipset.platform_number;
689 phys_data_chan = 0;
bac8a4d5
KC
690
691 /* We need to make sure we have our functions registered
692 * before processing messages. If we are a test vehicle the
98d7b594 693 * test_message for init_chipset will be set. We can ignore the
bac8a4d5 694 * waits for the callbacks, since this will be manually entered
98d7b594 695 * from a user. If no test_message is set, we will wait for the
bac8a4d5
KC
696 * functions.
697 */
98d7b594 698 if (!msg->hdr.flags.test_message)
2df7cc62 699 WAIT_ON_CALLBACK(virt_control_chan_func);
bac8a4d5
KC
700
701 chipset_inited = 1;
702 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
703
704 return CONTROLVM_RESP_SUCCESS;
705}
706
3ee7441f 707static int delete_bus_glue(u32 bus_no)
bac8a4d5 708{
3ab47701 709 struct controlvm_message msg;
bac8a4d5
KC
710
711 init_msg_header(&msg, CONTROLVM_BUS_DESTROY, 0, 0);
3ee7441f 712 msg.cmd.destroy_bus.bus_no = bus_no;
0aca7844 713 if (destroy_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
bac8a4d5 714 return 0;
bac8a4d5
KC
715 return 1;
716}
717
419113c8 718static int delete_device_glue(u32 bus_no, u32 dev_no)
bac8a4d5 719{
3ab47701 720 struct controlvm_message msg;
bac8a4d5
KC
721
722 init_msg_header(&msg, CONTROLVM_DEVICE_DESTROY, 0, 0);
419113c8
BR
723 msg.cmd.destroy_device.bus_no = bus_no;
724 msg.cmd.destroy_device.dev_no = dev_no;
0aca7844 725 if (destroy_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
bac8a4d5 726 return 0;
bac8a4d5
KC
727 return 1;
728}
729
730int
e1242538
BR
731uislib_client_inject_add_bus(u32 bus_no, uuid_le inst_uuid,
732 u64 channel_addr, ulong n_channel_bytes)
bac8a4d5 733{
3ab47701 734 struct controlvm_message msg;
bac8a4d5 735
bac8a4d5 736 /* step 0: init the chipset */
e1242538 737 POSTCODE_LINUX_3(CHIPSET_INIT_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
bac8a4d5
KC
738
739 if (!chipset_inited) {
740 /* step: initialize the chipset */
741 init_msg_header(&msg, CONTROLVM_CHIPSET_INIT, 0, 0);
742 /* this change is needed so that console will come up
743 * OK even when the bus 0 create comes in late. If the
744 * bus 0 create is the first create, then the add_vnic
745 * will work fine, but if the bus 0 create arrives
746 * after number 4, then the add_vnic will fail, and the
747 * ultraboot will fail.
748 */
2ea5117b
BR
749 msg.cmd.init_chipset.bus_count = 23;
750 msg.cmd.init_chipset.switch_count = 0;
0aca7844 751 if (init_chipset(&msg, NULL) != CONTROLVM_RESP_SUCCESS)
bac8a4d5 752 return 0;
e1242538 753 POSTCODE_LINUX_3(CHIPSET_INIT_EXIT_PC, bus_no,
bac8a4d5
KC
754 POSTCODE_SEVERITY_INFO);
755 }
756
757 /* step 1: create a bus */
e1242538
BR
758 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no,
759 POSTCODE_SEVERITY_WARNING);
bac8a4d5 760 init_msg_header(&msg, CONTROLVM_BUS_CREATE, 0, 0);
2ea5117b
BR
761 msg.cmd.create_bus.bus_no = bus_no;
762 msg.cmd.create_bus.dev_count = 23; /* devNo+1; */
763 msg.cmd.create_bus.channel_addr = channel_addr;
764 msg.cmd.create_bus.channel_bytes = n_channel_bytes;
bac8a4d5 765 if (create_bus(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
e1242538 766 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
bac8a4d5
KC
767 POSTCODE_SEVERITY_ERR);
768 return 0;
769 }
e1242538 770 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
bac8a4d5
KC
771
772 return 1;
773}
774EXPORT_SYMBOL_GPL(uislib_client_inject_add_bus);
775
bac8a4d5 776int
ac15ba59 777uislib_client_inject_del_bus(u32 bus_no)
bac8a4d5 778{
ac15ba59 779 return delete_bus_glue(bus_no);
bac8a4d5
KC
780}
781EXPORT_SYMBOL_GPL(uislib_client_inject_del_bus);
782
783int
062d312d 784uislib_client_inject_pause_vhba(u32 bus_no, u32 dev_no)
bac8a4d5 785{
3ab47701 786 struct controlvm_message msg;
bac8a4d5
KC
787 int rc;
788
789 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
2ea5117b
BR
790 msg.cmd.device_change_state.bus_no = bus_no;
791 msg.cmd.device_change_state.dev_no = dev_no;
792 msg.cmd.device_change_state.state = segment_state_standby;
bac8a4d5 793 rc = pause_device(&msg);
0aca7844 794 if (rc != CONTROLVM_RESP_SUCCESS)
bac8a4d5 795 return rc;
bac8a4d5
KC
796 return 0;
797}
798EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vhba);
799
800int
ae47a51b 801uislib_client_inject_resume_vhba(u32 bus_no, u32 dev_no)
bac8a4d5 802{
3ab47701 803 struct controlvm_message msg;
bac8a4d5
KC
804 int rc;
805
806 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
2ea5117b
BR
807 msg.cmd.device_change_state.bus_no = bus_no;
808 msg.cmd.device_change_state.dev_no = dev_no;
809 msg.cmd.device_change_state.state = segment_state_running;
bac8a4d5 810 rc = resume_device(&msg);
0aca7844 811 if (rc != CONTROLVM_RESP_SUCCESS)
bac8a4d5 812 return rc;
bac8a4d5 813 return 0;
bac8a4d5
KC
814}
815EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vhba);
816
817int
3d3b7154 818uislib_client_inject_add_vhba(u32 bus_no, u32 dev_no,
5fc0229a 819 u64 phys_chan_addr, u32 chan_bytes,
3d3b7154 820 int is_test_addr, uuid_le inst_uuid,
4eddbf13 821 struct irq_info *intr)
bac8a4d5 822{
3ab47701 823 struct controlvm_message msg;
bac8a4d5 824
bac8a4d5
KC
825 /* chipset init'ed with bus bus has been previously created -
826 * Verify it still exists step 2: create the VHBA device on the
827 * bus
828 */
3d3b7154 829 POSTCODE_LINUX_4(VHBA_CREATE_ENTRY_PC, dev_no, bus_no,
bac8a4d5
KC
830 POSTCODE_SEVERITY_INFO);
831
832 init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
833 if (is_test_addr)
834 /* signify that the physical channel address does NOT
835 * need to be ioremap()ed
836 */
98d7b594 837 msg.hdr.flags.test_message = 1;
f91b9262
BR
838 msg.cmd.create_device.bus_no = bus_no;
839 msg.cmd.create_device.dev_no = dev_no;
840 msg.cmd.create_device.dev_inst_uuid = inst_uuid;
bac8a4d5 841 if (intr)
2ea5117b 842 msg.cmd.create_device.intr = *intr;
bac8a4d5 843 else
2ea5117b 844 memset(&msg.cmd.create_device.intr, 0,
4eddbf13 845 sizeof(struct irq_info));
f91b9262 846 msg.cmd.create_device.channel_addr = phys_chan_addr;
bac8a4d5 847 if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
bac8a4d5
KC
848 POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, chan_bytes,
849 MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
850 return 0;
851 }
f91b9262
BR
852 msg.cmd.create_device.channel_bytes = chan_bytes;
853 msg.cmd.create_device.data_type_uuid = spar_vhba_channel_protocol_uuid;
bac8a4d5 854 if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
3d3b7154 855 POSTCODE_LINUX_4(VHBA_CREATE_FAILURE_PC, dev_no, bus_no,
bac8a4d5
KC
856 POSTCODE_SEVERITY_ERR);
857 return 0;
858 }
3d3b7154 859 POSTCODE_LINUX_4(VHBA_CREATE_SUCCESS_PC, dev_no, bus_no,
bac8a4d5
KC
860 POSTCODE_SEVERITY_INFO);
861 return 1;
862}
863EXPORT_SYMBOL_GPL(uislib_client_inject_add_vhba);
864
865int
6bc962ac 866uislib_client_inject_del_vhba(u32 bus_no, u32 dev_no)
bac8a4d5 867{
6bc962ac 868 return delete_device_glue(bus_no, dev_no);
bac8a4d5
KC
869}
870EXPORT_SYMBOL_GPL(uislib_client_inject_del_vhba);
871
872int
94a887da 873uislib_client_inject_add_vnic(u32 bus_no, u32 dev_no,
5fc0229a 874 u64 phys_chan_addr, u32 chan_bytes,
94a887da 875 int is_test_addr, uuid_le inst_uuid,
4eddbf13 876 struct irq_info *intr)
bac8a4d5 877{
3ab47701 878 struct controlvm_message msg;
bac8a4d5 879
bac8a4d5
KC
880 /* chipset init'ed with bus bus has been previously created -
881 * Verify it still exists step 2: create the VNIC device on the
882 * bus
883 */
94a887da 884 POSTCODE_LINUX_4(VNIC_CREATE_ENTRY_PC, dev_no, bus_no,
bac8a4d5
KC
885 POSTCODE_SEVERITY_INFO);
886
887 init_msg_header(&msg, CONTROLVM_DEVICE_CREATE, 0, 0);
888 if (is_test_addr)
889 /* signify that the physical channel address does NOT
890 * need to be ioremap()ed
891 */
98d7b594 892 msg.hdr.flags.test_message = 1;
f91b9262
BR
893 msg.cmd.create_device.bus_no = bus_no;
894 msg.cmd.create_device.dev_no = dev_no;
895 msg.cmd.create_device.dev_inst_uuid = inst_uuid;
bac8a4d5 896 if (intr)
2ea5117b 897 msg.cmd.create_device.intr = *intr;
bac8a4d5 898 else
2ea5117b 899 memset(&msg.cmd.create_device.intr, 0,
4eddbf13 900 sizeof(struct irq_info));
f91b9262 901 msg.cmd.create_device.channel_addr = phys_chan_addr;
bac8a4d5 902 if (chan_bytes < MIN_IO_CHANNEL_SIZE) {
bac8a4d5
KC
903 POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, chan_bytes,
904 MIN_IO_CHANNEL_SIZE, POSTCODE_SEVERITY_ERR);
905 return 0;
906 }
f91b9262
BR
907 msg.cmd.create_device.channel_bytes = chan_bytes;
908 msg.cmd.create_device.data_type_uuid = spar_vnic_channel_protocol_uuid;
bac8a4d5 909 if (create_device(&msg, NULL) != CONTROLVM_RESP_SUCCESS) {
94a887da 910 POSTCODE_LINUX_4(VNIC_CREATE_FAILURE_PC, dev_no, bus_no,
bac8a4d5
KC
911 POSTCODE_SEVERITY_ERR);
912 return 0;
913 }
914
94a887da 915 POSTCODE_LINUX_4(VNIC_CREATE_SUCCESS_PC, dev_no, bus_no,
bac8a4d5
KC
916 POSTCODE_SEVERITY_INFO);
917 return 1;
918}
919EXPORT_SYMBOL_GPL(uislib_client_inject_add_vnic);
920
921int
68a4b12c 922uislib_client_inject_pause_vnic(u32 bus_no, u32 dev_no)
bac8a4d5 923{
3ab47701 924 struct controlvm_message msg;
bac8a4d5
KC
925 int rc;
926
927 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
2ea5117b
BR
928 msg.cmd.device_change_state.bus_no = bus_no;
929 msg.cmd.device_change_state.dev_no = dev_no;
930 msg.cmd.device_change_state.state = segment_state_standby;
bac8a4d5
KC
931 rc = pause_device(&msg);
932 if (rc != CONTROLVM_RESP_SUCCESS) {
bac8a4d5
KC
933 return -1;
934 }
935 return 0;
936}
937EXPORT_SYMBOL_GPL(uislib_client_inject_pause_vnic);
938
939int
3fe7cec4 940uislib_client_inject_resume_vnic(u32 bus_no, u32 dev_no)
bac8a4d5 941{
3ab47701 942 struct controlvm_message msg;
bac8a4d5
KC
943 int rc;
944
945 init_msg_header(&msg, CONTROLVM_DEVICE_CHANGESTATE, 0, 0);
2ea5117b
BR
946 msg.cmd.device_change_state.bus_no = bus_no;
947 msg.cmd.device_change_state.dev_no = dev_no;
948 msg.cmd.device_change_state.state = segment_state_running;
bac8a4d5 949 rc = resume_device(&msg);
0aca7844 950 if (rc != CONTROLVM_RESP_SUCCESS)
bac8a4d5 951 return -1;
bac8a4d5 952 return 0;
bac8a4d5
KC
953}
954EXPORT_SYMBOL_GPL(uislib_client_inject_resume_vnic);
955
956int
bdb628d0 957uislib_client_inject_del_vnic(u32 bus_no, u32 dev_no)
bac8a4d5 958{
bdb628d0 959 return delete_device_glue(bus_no, dev_no);
bac8a4d5
KC
960}
961EXPORT_SYMBOL_GPL(uislib_client_inject_del_vnic);
962
bac8a4d5
KC
963void *
964uislib_cache_alloc(struct kmem_cache *cur_pool, char *fn, int ln)
965{
966 /* __GFP_NORETRY means "ok to fail", meaning kmalloc() can
967 * return NULL. If you do NOT specify __GFP_NORETRY, Linux
968 * will go to extreme measures to get memory for you (like,
969 * invoke oom killer), which will probably cripple the system.
970 */
971 void *p = kmem_cache_alloc(cur_pool, GFP_ATOMIC | __GFP_NORETRY);
ddc9f84b 972
0aca7844 973 if (p == NULL)
bac8a4d5 974 return NULL;
bac8a4d5
KC
975 return p;
976}
977EXPORT_SYMBOL_GPL(uislib_cache_alloc);
978
979void
980uislib_cache_free(struct kmem_cache *cur_pool, void *p, char *fn, int ln)
981{
0aca7844 982 if (p == NULL)
bac8a4d5 983 return;
bac8a4d5
KC
984 kmem_cache_free(cur_pool, p);
985}
986EXPORT_SYMBOL_GPL(uislib_cache_free);
987
988/*****************************************************/
989/* proc filesystem callback functions */
990/*****************************************************/
991
27dd5548
KC
992#define PLINE(...) uisutil_add_proc_line_ex(&tot, buff, \
993 buff_len, __VA_ARGS__)
bac8a4d5
KC
994
995static int
7ec96720 996info_debugfs_read_helper(char **buff, int *buff_len)
bac8a4d5
KC
997{
998 int i, tot = 0;
999 struct bus_info *bus;
1000
27dd5548
KC
1001 if (PLINE("\nBuses:\n") < 0)
1002 goto err_done;
bac8a4d5 1003
b98ab24c
BR
1004 read_lock(&bus_list_lock);
1005 for (bus = bus_list; bus; bus = bus->next) {
27dd5548 1006 if (PLINE(" bus=0x%p, busNo=%d, deviceCount=%d\n",
43ecb9fe 1007 bus, bus->bus_no, bus->device_count) < 0)
27dd5548 1008 goto err_done_unlock;
bac8a4d5 1009
27dd5548
KC
1010 if (PLINE(" Devices:\n") < 0)
1011 goto err_done_unlock;
bac8a4d5 1012
43ecb9fe 1013 for (i = 0; i < bus->device_count; i++) {
bac8a4d5 1014 if (bus->device[i]) {
27dd5548 1015 if (PLINE(" busNo %d, device[%i]: 0x%p, chanptr=0x%p, swtch=0x%p\n",
43ecb9fe 1016 bus->bus_no, i, bus->device[i],
27dd5548
KC
1017 bus->device[i]->chanptr,
1018 bus->device[i]->swtch) < 0)
1019 goto err_done_unlock;
1020
1021 if (PLINE(" first_busy_cnt=%llu, moved_to_tail_cnt=%llu, last_on_list_cnt=%llu\n",
1022 bus->device[i]->first_busy_cnt,
1023 bus->device[i]->moved_to_tail_cnt,
1024 bus->device[i]->last_on_list_cnt) < 0)
1025 goto err_done_unlock;
bac8a4d5
KC
1026 }
1027 }
1028 }
b98ab24c 1029 read_unlock(&bus_list_lock);
bac8a4d5 1030
27dd5548 1031 if (PLINE("UisUtils_Registered_Services: %d\n",
e9b9262a 1032 atomic_read(&uisutils_registered_services)) < 0)
27dd5548
KC
1033 goto err_done;
1034 if (PLINE("cycles_before_wait %llu wait_cycles:%llu\n",
1035 cycles_before_wait, wait_cycles) < 0)
1036 goto err_done;
1037 if (PLINE("tot_wakeup_cnt %llu:tot_wait_cnt %llu:tot_schedule_cnt %llu\n",
1038 tot_wakeup_cnt, tot_wait_cnt, tot_schedule_cnt) < 0)
1039 goto err_done;
1040 if (PLINE("en_smart_wakeup %d\n", en_smart_wakeup) < 0)
1041 goto err_done;
1042 if (PLINE("tot_moved_to_tail_cnt %llu\n", tot_moved_to_tail_cnt) < 0)
1043 goto err_done;
bac8a4d5
KC
1044
1045 return tot;
bac8a4d5 1046
27dd5548 1047err_done_unlock:
b98ab24c 1048 read_unlock(&bus_list_lock);
27dd5548 1049err_done:
bac8a4d5
KC
1050 return -1;
1051}
1052
2b6040c5
BR
1053static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1054 size_t len, loff_t *offset)
bac8a4d5
KC
1055{
1056 char *temp;
2b6040c5 1057 int total_bytes = 0;
bac8a4d5
KC
1058 int remaining_bytes = PROC_READ_BUFFER_SIZE;
1059
1060/* *start = buf; */
b98ab24c 1061 if (debug_buf == NULL) {
b98ab24c 1062 debug_buf = vmalloc(PROC_READ_BUFFER_SIZE);
bac8a4d5 1063
0aca7844 1064 if (debug_buf == NULL)
bac8a4d5 1065 return -ENOMEM;
bac8a4d5
KC
1066 }
1067
b98ab24c 1068 temp = debug_buf;
bac8a4d5 1069
b98ab24c 1070 if ((*offset == 0) || (!debug_buf_valid)) {
bac8a4d5 1071 /* if the read fails, then -1 will be returned */
2b6040c5 1072 total_bytes = info_debugfs_read_helper(&temp, &remaining_bytes);
b98ab24c 1073 debug_buf_valid = 1;
2dcca2f7 1074 } else {
2b6040c5 1075 total_bytes = strlen(debug_buf);
2dcca2f7 1076 }
bac8a4d5
KC
1077
1078 return simple_read_from_buffer(buf, len, offset,
2b6040c5 1079 debug_buf, total_bytes);
bac8a4d5
KC
1080}
1081
ab12d8a0 1082static struct device_info *find_dev(u32 bus_no, u32 dev_no)
bac8a4d5
KC
1083{
1084 struct bus_info *bus;
1085 struct device_info *dev = NULL;
1086
b98ab24c
BR
1087 read_lock(&bus_list_lock);
1088 for (bus = bus_list; bus; bus = bus->next) {
ab12d8a0 1089 if (bus->bus_no == bus_no) {
bac8a4d5 1090 /* make sure the device number is valid */
0aca7844 1091 if (dev_no >= bus->device_count)
ab12d8a0 1092 break;
ab12d8a0 1093 dev = bus->device[dev_no];
ab12d8a0 1094 break;
bac8a4d5
KC
1095 }
1096 }
b98ab24c 1097 read_unlock(&bus_list_lock);
bac8a4d5
KC
1098 return dev;
1099}
1100
1101/* This thread calls the "interrupt" function for each device that has
1102 * enabled such using uislib_enable_channel_interrupts(). The "interrupt"
1103 * function typically reads and processes the devices's channel input
1104 * queue. This thread repeatedly does this, until the thread is told to stop
1105 * (via uisthread_stop()). Sleeping rules:
1106 * - If we have called the "interrupt" function for all devices, and all of
1107 * them have reported "nothing processed" (returned 0), then we will go to
1108 * sleep for a maximum of POLLJIFFIES_NORMAL jiffies.
1109 * - If anyone calls uislib_force_channel_interrupt(), the above jiffy
1110 * sleep will be interrupted, and we will resume calling the "interrupt"
1111 * function for all devices.
1112 * - The list of devices is dynamically re-ordered in order to
1113 * attempt to preserve fairness. Whenever we spin thru the list of
1114 * devices and call the dev->interrupt() function, if we find
1115 * devices which report that there is still more work to do, the
1116 * the first such device we find is moved to the end of the device
1117 * list. This ensures that extremely busy devices don't starve out
1118 * less-busy ones.
1119 *
1120 */
3ec66a4b 1121static int process_incoming(void *v)
bac8a4d5
KC
1122{
1123 unsigned long long cur_cycles, old_cycles, idle_cycles, delta_cycles;
1124 struct list_head *new_tail = NULL;
1125 int i;
ddc9f84b 1126
bac8a4d5
KC
1127 UIS_DAEMONIZE("dev_incoming");
1128 for (i = 0; i < 16; i++) {
1129 old_cycles = get_cycles();
b98ab24c 1130 wait_event_timeout(poll_dev_wake_q,
bac8a4d5
KC
1131 0, POLLJIFFIES_NORMAL);
1132 cur_cycles = get_cycles();
1133 if (wait_cycles == 0) {
1134 wait_cycles = (cur_cycles - old_cycles);
1135 } else {
1136 if (wait_cycles < (cur_cycles - old_cycles))
1137 wait_cycles = (cur_cycles - old_cycles);
1138 }
1139 }
bac8a4d5
KC
1140 cycles_before_wait = wait_cycles;
1141 idle_cycles = 0;
b98ab24c 1142 poll_dev_start = 0;
bac8a4d5
KC
1143 while (1) {
1144 struct list_head *lelt, *tmp;
1145 struct device_info *dev = NULL;
1146
1147 /* poll each channel for input */
b98ab24c 1148 down(&poll_dev_lock);
bac8a4d5 1149 new_tail = NULL;
b98ab24c 1150 list_for_each_safe(lelt, tmp, &poll_dev_chan) {
bac8a4d5 1151 int rc = 0;
ddc9f84b 1152
bac8a4d5
KC
1153 dev = list_entry(lelt, struct device_info,
1154 list_polling_device_channels);
f2170625 1155 down(&dev->interrupt_callback_lock);
bac8a4d5
KC
1156 if (dev->interrupt)
1157 rc = dev->interrupt(dev->interrupt_context);
1158 else
1159 continue;
f2170625 1160 up(&dev->interrupt_callback_lock);
bac8a4d5
KC
1161 if (rc) {
1162 /* dev->interrupt returned, but there
1163 * is still more work to do.
1164 * Reschedule work to occur as soon as
1165 * possible. */
1166 idle_cycles = 0;
1167 if (new_tail == NULL) {
1168 dev->first_busy_cnt++;
1169 if (!
1170 (list_is_last
1171 (lelt,
b98ab24c 1172 &poll_dev_chan))) {
bac8a4d5
KC
1173 new_tail = lelt;
1174 dev->moved_to_tail_cnt++;
f0b9a0f9 1175 } else {
bac8a4d5 1176 dev->last_on_list_cnt++;
f0b9a0f9 1177 }
bac8a4d5 1178 }
bac8a4d5 1179 }
e0f2f18e 1180 if (kthread_should_stop())
bac8a4d5
KC
1181 break;
1182 }
1183 if (new_tail != NULL) {
1184 tot_moved_to_tail_cnt++;
b98ab24c 1185 list_move_tail(new_tail, &poll_dev_chan);
bac8a4d5 1186 }
b98ab24c 1187 up(&poll_dev_lock);
bac8a4d5
KC
1188 cur_cycles = get_cycles();
1189 delta_cycles = cur_cycles - old_cycles;
1190 old_cycles = cur_cycles;
1191
1192 /* At this point, we have scanned thru all of the
1193 * channels, and at least one of the following is true:
1194 * - there is no input waiting on any of the channels
1195 * - we have received a signal to stop this thread
1196 */
e0f2f18e 1197 if (kthread_should_stop())
bac8a4d5
KC
1198 break;
1199 if (en_smart_wakeup == 0xFF) {
bac8a4d5
KC
1200 break;
1201 }
1202 /* wait for POLLJIFFIES_NORMAL jiffies, or until
b98ab24c 1203 * someone wakes up poll_dev_wake_q,
bac8a4d5
KC
1204 * whichever comes first only do a wait when we have
1205 * been idle for cycles_before_wait cycles.
1206 */
1207 if (idle_cycles > cycles_before_wait) {
b98ab24c 1208 poll_dev_start = 0;
bac8a4d5 1209 tot_wait_cnt++;
b98ab24c
BR
1210 wait_event_timeout(poll_dev_wake_q,
1211 poll_dev_start,
bac8a4d5 1212 POLLJIFFIES_NORMAL);
b98ab24c 1213 poll_dev_start = 1;
bac8a4d5
KC
1214 } else {
1215 tot_schedule_cnt++;
1216 schedule();
1217 idle_cycles = idle_cycles + delta_cycles;
1218 }
1219 }
b98ab24c 1220 complete_and_exit(&incoming_ti.has_stopped, 0);
bac8a4d5
KC
1221}
1222
1223static BOOL
52ebd30e 1224initialize_incoming_thread(void)
bac8a4d5 1225{
b98ab24c 1226 if (incoming_started)
bac8a4d5 1227 return TRUE;
b98ab24c 1228 if (!uisthread_start(&incoming_ti,
3ec66a4b 1229 &process_incoming, NULL, "dev_incoming")) {
bac8a4d5
KC
1230 return FALSE;
1231 }
b98ab24c 1232 incoming_started = TRUE;
bac8a4d5
KC
1233 return TRUE;
1234}
1235
1236/* Add a new device/channel to the list being processed by
3ec66a4b 1237 * process_incoming().
bac8a4d5
KC
1238 * <interrupt> - indicates the function to call periodically.
1239 * <interrupt_context> - indicates the data to pass to the <interrupt>
1240 * function.
1241 */
1242void
93d1304f 1243uislib_enable_channel_interrupts(u32 bus_no, u32 dev_no,
bac8a4d5
KC
1244 int (*interrupt)(void *),
1245 void *interrupt_context)
1246{
1247 struct device_info *dev;
ddc9f84b 1248
93d1304f 1249 dev = find_dev(bus_no, dev_no);
0aca7844 1250 if (!dev)
bac8a4d5 1251 return;
0aca7844 1252
b98ab24c 1253 down(&poll_dev_lock);
52ebd30e 1254 initialize_incoming_thread();
bac8a4d5
KC
1255 dev->interrupt = interrupt;
1256 dev->interrupt_context = interrupt_context;
1257 dev->polling = TRUE;
2f652e0c 1258 list_add_tail(&dev->list_polling_device_channels,
b98ab24c
BR
1259 &poll_dev_chan);
1260 up(&poll_dev_lock);
bac8a4d5
KC
1261}
1262EXPORT_SYMBOL_GPL(uislib_enable_channel_interrupts);
1263
1264/* Remove a device/channel from the list being processed by
3ec66a4b 1265 * process_incoming().
bac8a4d5
KC
1266 */
1267void
d0dd33f3 1268uislib_disable_channel_interrupts(u32 bus_no, u32 dev_no)
bac8a4d5
KC
1269{
1270 struct device_info *dev;
ddc9f84b 1271
d0dd33f3 1272 dev = find_dev(bus_no, dev_no);
0aca7844 1273 if (!dev)
bac8a4d5 1274 return;
b98ab24c 1275 down(&poll_dev_lock);
bac8a4d5
KC
1276 list_del(&dev->list_polling_device_channels);
1277 dev->polling = FALSE;
1278 dev->interrupt = NULL;
b98ab24c 1279 up(&poll_dev_lock);
bac8a4d5
KC
1280}
1281EXPORT_SYMBOL_GPL(uislib_disable_channel_interrupts);
1282
1283static void
1284do_wakeup_polling_device_channels(struct work_struct *dummy)
1285{
b98ab24c
BR
1286 if (!poll_dev_start) {
1287 poll_dev_start = 1;
1288 wake_up(&poll_dev_wake_q);
bac8a4d5
KC
1289 }
1290}
1291
8304ada2 1292static DECLARE_WORK(work_wakeup_polling_device_channels,
a8d7f21d 1293 do_wakeup_polling_device_channels);
bac8a4d5 1294
3ec66a4b 1295/* Call this function when you want to send a hint to process_incoming() that
bac8a4d5
KC
1296 * your device might have more requests.
1297 */
1298void
f7b33ff4 1299uislib_force_channel_interrupt(u32 bus_no, u32 dev_no)
bac8a4d5
KC
1300{
1301 if (en_smart_wakeup == 0)
1302 return;
b98ab24c 1303 if (poll_dev_start)
bac8a4d5
KC
1304 return;
1305 /* The point of using schedule_work() instead of just doing
1306 * the work inline is to force a slight delay before waking up
3ec66a4b 1307 * the process_incoming() thread.
bac8a4d5
KC
1308 */
1309 tot_wakeup_cnt++;
8304ada2 1310 schedule_work(&work_wakeup_polling_device_channels);
bac8a4d5
KC
1311}
1312EXPORT_SYMBOL_GPL(uislib_force_channel_interrupt);
1313
1314/*****************************************************/
1315/* Module Init & Exit functions */
1316/*****************************************************/
1317
1318static int __init
1319uislib_mod_init(void)
1320{
fcd0157e
KC
1321 if (!unisys_spar_platform)
1322 return -ENODEV;
1323
bac8a4d5 1324 /* initialize global pointers to NULL */
b98ab24c
BR
1325 bus_list = NULL;
1326 bus_list_count = 0;
1327 max_bus_count = 0;
1328 rwlock_init(&bus_list_lock);
2df7cc62 1329 virt_control_chan_func = NULL;
bac8a4d5
KC
1330
1331 /* Issue VMCALL_GET_CONTROLVM_ADDR to get CtrlChanPhysAddr and
1332 * then map this physical address to a virtual address. */
1333 POSTCODE_LINUX_2(DRIVER_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1334
28fa597f 1335 dir_debugfs = debugfs_create_dir(DIR_DEBUGFS_ENTRY, NULL);
28fa597f 1336 if (dir_debugfs) {
7ec96720
BR
1337 info_debugfs_entry = debugfs_create_file(
1338 INFO_DEBUGFS_ENTRY_FN, 0444, dir_debugfs, NULL,
1339 &debugfs_info_fops);
1340
28fa597f
BR
1341 platformnumber_debugfs_read = debugfs_create_u32(
1342 PLATFORMNUMBER_DEBUGFS_ENTRY_FN, 0444, dir_debugfs,
b98ab24c 1343 &platform_no);
bac8a4d5 1344
b913a2ef
BR
1345 cycles_before_wait_debugfs_read = debugfs_create_u64(
1346 CYCLES_BEFORE_WAIT_DEBUGFS_ENTRY_FN, 0666, dir_debugfs,
1347 &cycles_before_wait);
bac8a4d5 1348
81d2d7de
BR
1349 smart_wakeup_debugfs_entry = debugfs_create_bool(
1350 SMART_WAKEUP_DEBUGFS_ENTRY_FN, 0666, dir_debugfs,
1351 &en_smart_wakeup);
1352 }
bac8a4d5 1353
bac8a4d5
KC
1354 POSTCODE_LINUX_3(DRIVER_EXIT_PC, 0, POSTCODE_SEVERITY_INFO);
1355 return 0;
1356}
1357
1358static void __exit
1359uislib_mod_exit(void)
1360{
b98ab24c
BR
1361 if (debug_buf) {
1362 vfree(debug_buf);
1363 debug_buf = NULL;
bac8a4d5
KC
1364 }
1365
7ec96720 1366 debugfs_remove(info_debugfs_entry);
81d2d7de 1367 debugfs_remove(smart_wakeup_debugfs_entry);
b913a2ef 1368 debugfs_remove(cycles_before_wait_debugfs_read);
28fa597f
BR
1369 debugfs_remove(platformnumber_debugfs_read);
1370 debugfs_remove(dir_debugfs);
bac8a4d5
KC
1371}
1372
1373module_init(uislib_mod_init);
1374module_exit(uislib_mod_exit);
1375
bac8a4d5
KC
1376MODULE_LICENSE("GPL");
1377MODULE_AUTHOR("Usha Srinivasan");
1378MODULE_ALIAS("uislib");
1379 /* this is extracted during depmod and kept in modules.dep */
This page took 0.321398 seconds and 5 git commands to generate.