085d78212a80f201fb3bba927c8640df95e2837b
[deliverable/linux.git] / drivers / staging / unisys / visorhba / visorhba_main.c
1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2 * All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more
13 * details.
14 */
15
16 #include <linux/debugfs.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <scsi/scsi.h>
20 #include <scsi/scsi_host.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_device.h>
23
24 #include "visorbus.h"
25 #include "iochannel.h"
26
27 /* The Send and Receive Buffers of the IO Queue may both be full */
28
29 #define IOS_ERROR_THRESHOLD 1000
30 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
31 * = 4800 bytes ~ 2^13 = 8192 bytes
32 */
33 #define MAX_BUF 8192
34 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
35 #define VISORHBA_ERROR_COUNT 30
36 #define VISORHBA_OPEN_MAX 1
37
38 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
39 void (*visorhba_cmnd_done)
40 (struct scsi_cmnd *));
41 #ifdef DEF_SCSI_QCMD
42 static DEF_SCSI_QCMD(visorhba_queue_command)
43 #else
44 #define visorhba_queue_command visorhba_queue_command_lck
45 #endif
46 static int visorhba_probe(struct visor_device *dev);
47 static void visorhba_remove(struct visor_device *dev);
48 static int visorhba_pause(struct visor_device *dev,
49 visorbus_state_complete_func complete_func);
50 static int visorhba_resume(struct visor_device *dev,
51 visorbus_state_complete_func complete_func);
52
53 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
54 size_t len, loff_t *offset);
55 static int set_no_disk_inquiry_result(unsigned char *buf,
56 size_t len, bool is_lun0);
57 static struct dentry *visorhba_debugfs_dir;
58 static const struct file_operations debugfs_info_fops = {
59 .read = info_debugfs_read,
60 };
61
62 /* GUIDS for HBA channel type supported by this driver */
63 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
64 /* Note that the only channel type we expect to be reported by the
65 * bus driver is the SPAR_VHBA channel.
66 */
67 { SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
68 { NULL_UUID_LE, NULL }
69 };
70
71 /* This is used to tell the visor bus driver which types of visor devices
72 * we support, and what functions to call when a visor device that we support
73 * is attached or removed.
74 */
75 static struct visor_driver visorhba_driver = {
76 .name = "visorhba",
77 .owner = THIS_MODULE,
78 .channel_types = visorhba_channel_types,
79 .probe = visorhba_probe,
80 .remove = visorhba_remove,
81 .pause = visorhba_pause,
82 .resume = visorhba_resume,
83 .channel_interrupt = NULL,
84 };
85 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
86 MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
87
88 struct visordisk_info {
89 u32 valid;
90 u32 channel, id, lun; /* Disk Path */
91 atomic_t ios_threshold;
92 atomic_t error_count;
93 struct visordisk_info *next;
94 };
95
96 struct scsipending {
97 struct uiscmdrsp cmdrsp;
98 void *sent; /* The Data being tracked */
99 char cmdtype; /* Type of pointer that is being stored */
100 };
101
102 /* Work Data for dar_work_queue */
103 struct diskaddremove {
104 u8 add; /* 0-remove, 1-add */
105 struct Scsi_Host *shost; /* Scsi Host for this visorhba instance */
106 u32 channel, id, lun; /* Disk Path */
107 struct diskaddremove *next;
108 };
109
110 /* Each scsi_host has a host_data area that contains this struct. */
111 struct visorhba_devdata {
112 struct Scsi_Host *scsihost;
113 struct visor_device *dev;
114 struct list_head dev_info_list;
115 /* Tracks the requests that have been forwarded to
116 * the IOVM and haven't returned yet
117 */
118 struct scsipending pending[MAX_PENDING_REQUESTS];
119 /* Start search for next pending free slot here */
120 unsigned int nextinsert;
121 spinlock_t privlock; /* lock to protect data in devdata */
122 bool serverdown;
123 bool serverchangingstate;
124 unsigned long long acquire_failed_cnt;
125 unsigned long long interrupts_rcvd;
126 unsigned long long interrupts_notme;
127 unsigned long long interrupts_disabled;
128 u64 __iomem *flags_addr;
129 atomic_t interrupt_rcvd;
130 wait_queue_head_t rsp_queue;
131 struct visordisk_info head;
132 unsigned int max_buff_len;
133 int devnum;
134 struct task_struct *thread;
135 int thread_wait_ms;
136 };
137
138 struct visorhba_devices_open {
139 struct visorhba_devdata *devdata;
140 };
141
142 static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
143
144 #define for_each_vdisk_match(iter, list, match) \
145 for (iter = &list->head; iter->next; iter = iter->next) \
146 if ((iter->channel == match->channel) && \
147 (iter->id == match->id) && \
148 (iter->lun == match->lun))
149 /**
150 * visor_thread_start - starts a thread for the device
151 * @threadfn: Function the thread starts
152 * @thrcontext: Context to pass to the thread, i.e. devdata
153 * @name: string describing name of thread
154 *
155 * Starts a thread for the device.
156 *
157 * Return the task_struct * denoting the thread on success,
158 * or NULL on failure
159 */
160 static struct task_struct *visor_thread_start
161 (int (*threadfn)(void *), void *thrcontext, char *name)
162 {
163 struct task_struct *task;
164
165 task = kthread_run(threadfn, thrcontext, "%s", name);
166 if (IS_ERR(task)) {
167 pr_err("visorbus failed to start thread\n");
168 return NULL;
169 }
170 return task;
171 }
172
173 /**
174 * visor_thread_stop - stops the thread if it is running
175 */
176 static void visor_thread_stop(struct task_struct *task)
177 {
178 if (!task)
179 return; /* no thread running */
180 kthread_stop(task);
181 }
182
183 /**
184 * add_scsipending_entry - save off io command that is pending in
185 * Service Partition
186 * @devdata: Pointer to devdata
187 * @cmdtype: Specifies the type of command pending
188 * @new: The command to be saved
189 *
190 * Saves off the io command that is being handled by the Service
191 * Partition so that it can be handled when it completes. If new is
192 * NULL it is assumed the entry refers only to the cmdrsp.
193 * Returns insert_location where entry was added,
194 * SCSI_MLQUEUE_DEVICE_BUSY if it can't
195 */
196 static int add_scsipending_entry(struct visorhba_devdata *devdata,
197 char cmdtype, void *new)
198 {
199 unsigned long flags;
200 struct scsipending *entry;
201 int insert_location;
202
203 spin_lock_irqsave(&devdata->privlock, flags);
204 insert_location = devdata->nextinsert;
205 while (devdata->pending[insert_location].sent) {
206 insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
207 if (insert_location == (int)devdata->nextinsert) {
208 spin_unlock_irqrestore(&devdata->privlock, flags);
209 return -1;
210 }
211 }
212
213 entry = &devdata->pending[insert_location];
214 memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
215 entry->cmdtype = cmdtype;
216 if (new)
217 entry->sent = new;
218 else /* wants to send cmdrsp */
219 entry->sent = &entry->cmdrsp;
220 devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
221 spin_unlock_irqrestore(&devdata->privlock, flags);
222
223 return insert_location;
224 }
225
226 /**
227 * del_scsipending_enty - removes an entry from the pending array
228 * @devdata: Device holding the pending array
229 * @del: Entry to remove
230 *
231 * Removes the entry pointed at by del and returns it.
232 * Returns the scsipending entry pointed at
233 */
234 static void *del_scsipending_ent(struct visorhba_devdata *devdata,
235 int del)
236 {
237 unsigned long flags;
238 void *sent;
239
240 if (del >= MAX_PENDING_REQUESTS)
241 return NULL;
242
243 spin_lock_irqsave(&devdata->privlock, flags);
244 sent = devdata->pending[del].sent;
245
246 devdata->pending[del].cmdtype = 0;
247 devdata->pending[del].sent = NULL;
248 spin_unlock_irqrestore(&devdata->privlock, flags);
249
250 return sent;
251 }
252
253 /**
254 * get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
255 * #ddata: Device holding the pending array
256 * @ent: Entry that stores the cmdrsp
257 *
258 * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
259 * if the "sent" field is not NULL
260 * Returns a pointer to the cmdrsp.
261 */
262 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
263 int ent)
264 {
265 if (ddata->pending[ent].sent)
266 return &ddata->pending[ent].cmdrsp;
267
268 return NULL;
269 }
270
271 /**
272 * forward_taskmgmt_command - send taskmegmt command to the Service
273 * Partition
274 * @tasktype: Type of taskmgmt command
275 * @scsidev: Scsidev that issued command
276 *
277 * Create a cmdrsp packet and send it to the Serivce Partition
278 * that will service this request.
279 * Returns whether the command was queued successfully or not.
280 */
281 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
282 struct scsi_cmnd *scsicmd)
283 {
284 struct uiscmdrsp *cmdrsp;
285 struct scsi_device *scsidev = scsicmd->device;
286 struct visorhba_devdata *devdata =
287 (struct visorhba_devdata *)scsidev->host->hostdata;
288 int notifyresult = 0xffff;
289 wait_queue_head_t notifyevent;
290 int scsicmd_id = 0;
291
292 if (devdata->serverdown || devdata->serverchangingstate)
293 return FAILED;
294
295 scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
296 NULL);
297 if (scsicmd_id < 0)
298 return FAILED;
299
300 cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
301
302 init_waitqueue_head(&notifyevent);
303
304 /* issue TASK_MGMT_ABORT_TASK */
305 cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
306 /* specify the event that has to be triggered when this */
307 /* cmd is complete */
308 cmdrsp->scsitaskmgmt.notify_handle = (u64)&notifyevent;
309 cmdrsp->scsitaskmgmt.notifyresult_handle = (u64)&notifyresult;
310
311 /* save destination */
312 cmdrsp->scsitaskmgmt.tasktype = tasktype;
313 cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
314 cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
315 cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
316 cmdrsp->scsitaskmgmt.handle = scsicmd_id;
317
318 if (!visorchannel_signalinsert(devdata->dev->visorchannel,
319 IOCHAN_TO_IOPART,
320 cmdrsp))
321 goto err_del_scsipending_ent;
322
323 /* It can take the Service Partition up to 35 seconds to complete
324 * an IO in some cases, so wait 45 seconds and error out
325 */
326 if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
327 msecs_to_jiffies(45000)))
328 goto err_del_scsipending_ent;
329
330 if (tasktype == TASK_MGMT_ABORT_TASK)
331 scsicmd->result = DID_ABORT << 16;
332 else
333 scsicmd->result = DID_RESET << 16;
334
335 scsicmd->scsi_done(scsicmd);
336
337 return SUCCESS;
338
339 err_del_scsipending_ent:
340 del_scsipending_ent(devdata, scsicmd_id);
341 return FAILED;
342 }
343
344 /**
345 * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
346 * @scsicmd: The scsicmd that needs aborted
347 *
348 * Returns SUCCESS if inserted, failure otherwise
349 *
350 */
351 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
352 {
353 /* issue TASK_MGMT_ABORT_TASK */
354 struct scsi_device *scsidev;
355 struct visordisk_info *vdisk;
356 struct visorhba_devdata *devdata;
357
358 scsidev = scsicmd->device;
359 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
360 for_each_vdisk_match(vdisk, devdata, scsidev) {
361 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
362 atomic_inc(&vdisk->error_count);
363 else
364 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
365 }
366 return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
367 }
368
369 /**
370 * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
371 * @scsicmd: The scsicmd that needs aborted
372 *
373 * Returns SUCCESS if inserted, failure otherwise
374 */
375 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
376 {
377 /* issue TASK_MGMT_LUN_RESET */
378 struct scsi_device *scsidev;
379 struct visordisk_info *vdisk;
380 struct visorhba_devdata *devdata;
381
382 scsidev = scsicmd->device;
383 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
384 for_each_vdisk_match(vdisk, devdata, scsidev) {
385 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
386 atomic_inc(&vdisk->error_count);
387 else
388 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
389 }
390 return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
391 }
392
393 /**
394 * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
395 * target on the bus
396 * @scsicmd: The scsicmd that needs aborted
397 *
398 * Returns SUCCESS
399 */
400 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
401 {
402 struct scsi_device *scsidev;
403 struct visordisk_info *vdisk;
404 struct visorhba_devdata *devdata;
405
406 scsidev = scsicmd->device;
407 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
408 for_each_vdisk_match(vdisk, devdata, scsidev) {
409 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
410 atomic_inc(&vdisk->error_count);
411 else
412 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
413 }
414 return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
415 }
416
417 /**
418 * visorhba_host_reset_handler - Not supported
419 * @scsicmd: The scsicmd that needs aborted
420 *
421 * Not supported, return SUCCESS
422 * Returns SUCCESS
423 */
424 static int
425 visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
426 {
427 /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
428 return SUCCESS;
429 }
430
431 /**
432 * visorhba_get_info
433 * @shp: Scsi host that is requesting information
434 *
435 * Returns string with info
436 */
437 static const char *visorhba_get_info(struct Scsi_Host *shp)
438 {
439 /* Return version string */
440 return "visorhba";
441 }
442
443 /**
444 * visorhba_queue_command_lck -- queues command to the Service Partition
445 * @scsicmd: Command to be queued
446 * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
447 *
448 * Queues to scsicmd to the ServicePartition after converting it to a
449 * uiscmdrsp structure.
450 *
451 * Returns success if queued to the Service Partition, otherwise
452 * failure.
453 */
454 static int
455 visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
456 void (*visorhba_cmnd_done)(struct scsi_cmnd *))
457 {
458 struct uiscmdrsp *cmdrsp;
459 struct scsi_device *scsidev = scsicmd->device;
460 int insert_location;
461 unsigned char *cdb = scsicmd->cmnd;
462 struct Scsi_Host *scsihost = scsidev->host;
463 unsigned int i;
464 struct visorhba_devdata *devdata =
465 (struct visorhba_devdata *)scsihost->hostdata;
466 struct scatterlist *sg = NULL;
467 struct scatterlist *sglist = NULL;
468
469 if (devdata->serverdown || devdata->serverchangingstate)
470 return SCSI_MLQUEUE_DEVICE_BUSY;
471
472 insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
473 (void *)scsicmd);
474
475 if (insert_location < 0)
476 return SCSI_MLQUEUE_DEVICE_BUSY;
477
478 cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
479
480 cmdrsp->cmdtype = CMD_SCSI_TYPE;
481 /* save the pending insertion location. Deletion from pending
482 * will return the scsicmd pointer for completion
483 */
484 cmdrsp->scsi.handle = insert_location;
485
486 /* save done function that we have call when cmd is complete */
487 scsicmd->scsi_done = visorhba_cmnd_done;
488 /* save destination */
489 cmdrsp->scsi.vdest.channel = scsidev->channel;
490 cmdrsp->scsi.vdest.id = scsidev->id;
491 cmdrsp->scsi.vdest.lun = scsidev->lun;
492 /* save datadir */
493 cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
494 memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
495
496 cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
497
498 /* keep track of the max buffer length so far. */
499 if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
500 devdata->max_buff_len = cmdrsp->scsi.bufflen;
501
502 if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
503 goto err_del_scsipending_ent;
504
505 /* convert buffer to phys information */
506 /* buffer is scatterlist - copy it out */
507 sglist = scsi_sglist(scsicmd);
508
509 for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
510 cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
511 cmdrsp->scsi.gpi_list[i].length = sg->length;
512 }
513 cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
514
515 if (!visorchannel_signalinsert(devdata->dev->visorchannel,
516 IOCHAN_TO_IOPART,
517 cmdrsp))
518 /* queue must be full and we aren't going to wait */
519 goto err_del_scsipending_ent;
520
521 return 0;
522
523 err_del_scsipending_ent:
524 del_scsipending_ent(devdata, insert_location);
525 return SCSI_MLQUEUE_DEVICE_BUSY;
526 }
527
528 /**
529 * visorhba_slave_alloc - called when new disk is discovered
530 * @scsidev: New disk
531 *
532 * Create a new visordisk_info structure and add it to our
533 * list of vdisks.
534 *
535 * Returns success when created, otherwise error.
536 */
537 static int visorhba_slave_alloc(struct scsi_device *scsidev)
538 {
539 /* this is called by the midlayer before scan for new devices --
540 * LLD can alloc any struct & do init if needed.
541 */
542 struct visordisk_info *vdisk;
543 struct visordisk_info *tmpvdisk;
544 struct visorhba_devdata *devdata;
545 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
546
547 devdata = (struct visorhba_devdata *)scsihost->hostdata;
548 if (!devdata)
549 return 0; /* even though we errored, treat as success */
550
551 for_each_vdisk_match(vdisk, devdata, scsidev)
552 return 0; /* already allocated return success */
553
554 tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
555 if (!tmpvdisk)
556 return -ENOMEM;
557
558 tmpvdisk->channel = scsidev->channel;
559 tmpvdisk->id = scsidev->id;
560 tmpvdisk->lun = scsidev->lun;
561 vdisk->next = tmpvdisk;
562 return 0;
563 }
564
565 /**
566 * visorhba_slave_destroy - disk is going away
567 * @scsidev: scsi device going away
568 *
569 * Disk is going away, clean up resources.
570 * Returns void.
571 */
572 static void visorhba_slave_destroy(struct scsi_device *scsidev)
573 {
574 /* midlevel calls this after device has been quiesced and
575 * before it is to be deleted.
576 */
577 struct visordisk_info *vdisk, *delvdisk;
578 struct visorhba_devdata *devdata;
579 struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
580
581 devdata = (struct visorhba_devdata *)scsihost->hostdata;
582 for_each_vdisk_match(vdisk, devdata, scsidev) {
583 delvdisk = vdisk->next;
584 vdisk->next = delvdisk->next;
585 kfree(delvdisk);
586 return;
587 }
588 }
589
590 static struct scsi_host_template visorhba_driver_template = {
591 .name = "Unisys Visor HBA",
592 .info = visorhba_get_info,
593 .queuecommand = visorhba_queue_command,
594 .eh_abort_handler = visorhba_abort_handler,
595 .eh_device_reset_handler = visorhba_device_reset_handler,
596 .eh_bus_reset_handler = visorhba_bus_reset_handler,
597 .eh_host_reset_handler = visorhba_host_reset_handler,
598 .shost_attrs = NULL,
599 #define visorhba_MAX_CMNDS 128
600 .can_queue = visorhba_MAX_CMNDS,
601 .sg_tablesize = 64,
602 .this_id = -1,
603 .slave_alloc = visorhba_slave_alloc,
604 .slave_destroy = visorhba_slave_destroy,
605 .use_clustering = ENABLE_CLUSTERING,
606 };
607
608 /**
609 * info_debugfs_read - debugfs interface to dump visorhba states
610 * @file: Debug file
611 * @buf: buffer to send back to user
612 * @len: len that can be written to buf
613 * @offset: offset into buf
614 *
615 * Dumps information about the visorhba driver and devices
616 * TODO: Make this per vhba
617 * Returns bytes_read
618 */
619 static ssize_t info_debugfs_read(struct file *file, char __user *buf,
620 size_t len, loff_t *offset)
621 {
622 ssize_t bytes_read = 0;
623 int str_pos = 0;
624 u64 phys_flags_addr;
625 int i;
626 struct visorhba_devdata *devdata;
627 char *vbuf;
628
629 if (len > MAX_BUF)
630 len = MAX_BUF;
631 vbuf = kzalloc(len, GFP_KERNEL);
632 if (!vbuf)
633 return -ENOMEM;
634
635 for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
636 if (!visorhbas_open[i].devdata)
637 continue;
638
639 devdata = visorhbas_open[i].devdata;
640
641 str_pos += scnprintf(vbuf + str_pos,
642 len - str_pos, "max_buff_len:%u\n",
643 devdata->max_buff_len);
644
645 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
646 "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
647 devdata->interrupts_rcvd,
648 devdata->interrupts_disabled);
649 str_pos += scnprintf(vbuf + str_pos,
650 len - str_pos, "\ninterrupts_notme = %llu,\n",
651 devdata->interrupts_notme);
652 phys_flags_addr = virt_to_phys((__force void *)
653 devdata->flags_addr);
654 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
655 "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
656 devdata->flags_addr, phys_flags_addr,
657 (__le64)readq(devdata->flags_addr));
658 str_pos += scnprintf(vbuf + str_pos,
659 len - str_pos, "acquire_failed_cnt:%llu\n",
660 devdata->acquire_failed_cnt);
661 str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
662 }
663
664 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
665 kfree(vbuf);
666 return bytes_read;
667 }
668
669 /**
670 * visorhba_serverdown_complete - Called when we are done cleaning up
671 * from serverdown
672 * @work: work structure for this serverdown request
673 *
674 * Called when we are done cleanning up from serverdown, stop processing
675 * queue, fail pending IOs.
676 * Returns void when finished cleaning up
677 */
678 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
679 {
680 int i;
681 struct scsipending *pendingdel = NULL;
682 struct scsi_cmnd *scsicmd = NULL;
683 struct uiscmdrsp *cmdrsp;
684 unsigned long flags;
685
686 /* Stop using the IOVM response queue (queue should be drained
687 * by the end)
688 */
689 visor_thread_stop(devdata->thread);
690
691 /* Fail commands that weren't completed */
692 spin_lock_irqsave(&devdata->privlock, flags);
693 for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
694 pendingdel = &devdata->pending[i];
695 switch (pendingdel->cmdtype) {
696 case CMD_SCSI_TYPE:
697 scsicmd = pendingdel->sent;
698 scsicmd->result = DID_RESET << 16;
699 if (scsicmd->scsi_done)
700 scsicmd->scsi_done(scsicmd);
701 break;
702 case CMD_SCSITASKMGMT_TYPE:
703 cmdrsp = pendingdel->sent;
704 cmdrsp->scsitaskmgmt.notifyresult_handle
705 = TASK_MGMT_FAILED;
706 wake_up_all((wait_queue_head_t *)
707 cmdrsp->scsitaskmgmt.notify_handle);
708 break;
709 default:
710 break;
711 }
712 pendingdel->cmdtype = 0;
713 pendingdel->sent = NULL;
714 }
715 spin_unlock_irqrestore(&devdata->privlock, flags);
716
717 devdata->serverdown = true;
718 devdata->serverchangingstate = false;
719 }
720
721 /**
722 * visorhba_serverdown - Got notified that the IOVM is down
723 * @devdata: visorhba that is being serviced by downed IOVM.
724 *
725 * Something happened to the IOVM, return immediately and
726 * schedule work cleanup work.
727 * Return SUCCESS or EINVAL
728 */
729 static int visorhba_serverdown(struct visorhba_devdata *devdata)
730 {
731 if (!devdata->serverdown && !devdata->serverchangingstate) {
732 devdata->serverchangingstate = true;
733 visorhba_serverdown_complete(devdata);
734 } else if (devdata->serverchangingstate) {
735 return -EINVAL;
736 }
737 return 0;
738 }
739
740 /**
741 * do_scsi_linuxstat - scsi command returned linuxstat
742 * @cmdrsp: response from IOVM
743 * @scsicmd: Command issued.
744 *
745 * Don't log errors for disk-not-present inquiries
746 * Returns void
747 */
748 static void
749 do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
750 {
751 struct visorhba_devdata *devdata;
752 struct visordisk_info *vdisk;
753 struct scsi_device *scsidev;
754
755 scsidev = scsicmd->device;
756 memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
757
758 /* Do not log errors for disk-not-present inquiries */
759 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
760 (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
761 (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
762 return;
763 /* Okay see what our error_count is here.... */
764 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
765 for_each_vdisk_match(vdisk, devdata, scsidev) {
766 if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
767 atomic_inc(&vdisk->error_count);
768 atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
769 }
770 }
771 }
772
773 static int set_no_disk_inquiry_result(unsigned char *buf,
774 size_t len, bool is_lun0)
775 {
776 if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
777 return -EINVAL;
778 memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
779 buf[2] = SCSI_SPC2_VER;
780 if (is_lun0) {
781 buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
782 buf[3] = DEV_HISUPPORT;
783 } else {
784 buf[0] = DEV_NOT_CAPABLE;
785 }
786 buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
787 strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
788 return 0;
789 }
790
791 /**
792 * do_scsi_nolinuxstat - scsi command didn't have linuxstat
793 * @cmdrsp: response from IOVM
794 * @scsicmd: Command issued.
795 *
796 * Handle response when no linuxstat was returned
797 * Returns void
798 */
799 static void
800 do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
801 {
802 struct scsi_device *scsidev;
803 unsigned char buf[36];
804 struct scatterlist *sg;
805 unsigned int i;
806 char *this_page;
807 char *this_page_orig;
808 int bufind = 0;
809 struct visordisk_info *vdisk;
810 struct visorhba_devdata *devdata;
811
812 scsidev = scsicmd->device;
813 if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
814 (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
815 if (cmdrsp->scsi.no_disk_result == 0)
816 return;
817
818 /* Linux scsi code wants a device at Lun 0
819 * to issue report luns, but we don't want
820 * a disk there so we'll present a processor
821 * there.
822 */
823 set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
824 scsidev->lun == 0);
825
826 if (scsi_sg_count(scsicmd) == 0) {
827 memcpy(scsi_sglist(scsicmd), buf,
828 cmdrsp->scsi.bufflen);
829 return;
830 }
831
832 sg = scsi_sglist(scsicmd);
833 for (i = 0; i < scsi_sg_count(scsicmd); i++) {
834 this_page_orig = kmap_atomic(sg_page(sg + i));
835 this_page = (void *)((unsigned long)this_page_orig |
836 sg[i].offset);
837 memcpy(this_page, buf + bufind, sg[i].length);
838 kunmap_atomic(this_page_orig);
839 }
840 } else {
841 devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
842 for_each_vdisk_match(vdisk, devdata, scsidev) {
843 if (atomic_read(&vdisk->ios_threshold) > 0) {
844 atomic_dec(&vdisk->ios_threshold);
845 if (atomic_read(&vdisk->ios_threshold) == 0)
846 atomic_set(&vdisk->error_count, 0);
847 }
848 }
849 }
850 }
851
852 /**
853 * complete_scsi_command - complete a scsi command
854 * @uiscmdrsp: Response from Service Partition
855 * @scsicmd: The scsi command
856 *
857 * Response returned by the Service Partition, finish it and send
858 * completion to the scsi midlayer.
859 * Returns void.
860 */
861 static void
862 complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
863 {
864 /* take what we need out of cmdrsp and complete the scsicmd */
865 scsicmd->result = cmdrsp->scsi.linuxstat;
866 if (cmdrsp->scsi.linuxstat)
867 do_scsi_linuxstat(cmdrsp, scsicmd);
868 else
869 do_scsi_nolinuxstat(cmdrsp, scsicmd);
870
871 scsicmd->scsi_done(scsicmd);
872 }
873
874 /**
875 * complete_taskmgmt_command - complete task management
876 * @cmdrsp: Response from the IOVM
877 *
878 * Service Partition returned the result of the task management
879 * command. Wake up anyone waiting for it.
880 * Returns void
881 */
882 static inline void complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
883 {
884 /* copy the result of the taskgmgt and
885 * wake up the error handler that is waiting for this
886 */
887 cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
888 wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify_handle);
889 }
890
891 static struct work_struct dar_work_queue;
892 static struct diskaddremove *dar_work_queue_head;
893 static spinlock_t dar_work_queue_lock; /* Lock to protet dar_work_queue_head */
894 static unsigned short dar_work_queue_sched;
895
896 /**
897 * queue_disk_add_remove - IOSP has sent us a add/remove request
898 * @dar: disk add/remove request
899 *
900 * Queue the work needed to add/remove a disk.
901 * Returns void
902 */
903 static inline void queue_disk_add_remove(struct diskaddremove *dar)
904 {
905 unsigned long flags;
906
907 spin_lock_irqsave(&dar_work_queue_lock, flags);
908 if (!dar_work_queue_head) {
909 dar_work_queue_head = dar;
910 dar->next = NULL;
911 } else {
912 dar->next = dar_work_queue_head;
913 dar_work_queue_head = dar;
914 }
915 if (!dar_work_queue_sched) {
916 schedule_work(&dar_work_queue);
917 dar_work_queue_sched = 1;
918 }
919 spin_unlock_irqrestore(&dar_work_queue_lock, flags);
920 }
921
922 /**
923 * process_disk_notify - IOSP has sent a process disk notify event
924 * @shost: Scsi hot
925 * @cmdrsp: Response from the IOSP
926 *
927 * Queue it to the work queue.
928 * Return void.
929 */
930 static void process_disk_notify(struct Scsi_Host *shost,
931 struct uiscmdrsp *cmdrsp)
932 {
933 struct diskaddremove *dar;
934
935 dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
936 if (!dar)
937 return;
938
939 dar->add = cmdrsp->disknotify.add;
940 dar->shost = shost;
941 dar->channel = cmdrsp->disknotify.channel;
942 dar->id = cmdrsp->disknotify.id;
943 dar->lun = cmdrsp->disknotify.lun;
944 queue_disk_add_remove(dar);
945 }
946
947 /**
948 * drain_queue - pull responses out of iochannel
949 * @cmdrsp: Response from the IOSP
950 * @devdata: device that owns this iochannel
951 *
952 * Pulls responses out of the iochannel and process the responses.
953 * Restuns void
954 */
955 static void
956 drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
957 {
958 struct scsi_cmnd *scsicmd;
959 struct Scsi_Host *shost = devdata->scsihost;
960
961 while (1) {
962 if (!visorchannel_signalremove(devdata->dev->visorchannel,
963 IOCHAN_FROM_IOPART,
964 cmdrsp))
965 break; /* queue empty */
966
967 if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
968 /* scsicmd location is returned by the
969 * deletion
970 */
971 scsicmd = del_scsipending_ent(devdata,
972 cmdrsp->scsi.handle);
973 if (!scsicmd)
974 break;
975 /* complete the orig cmd */
976 complete_scsi_command(cmdrsp, scsicmd);
977 } else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
978 if (!del_scsipending_ent(devdata,
979 cmdrsp->scsitaskmgmt.handle))
980 break;
981 complete_taskmgmt_command(cmdrsp);
982 } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
983 /* The vHba pointer has no meaning in a
984 * guest partition. Let's be safe and set it
985 * to NULL now. Do not use it here!
986 */
987 cmdrsp->disknotify.v_hba = NULL;
988 process_disk_notify(shost, cmdrsp);
989 }
990 /* cmdrsp is now available for resuse */
991 }
992 }
993
994 /**
995 * process_incoming_rsps - Process responses from IOSP
996 * @v: void pointer to visorhba_devdata
997 *
998 * Main function for the thread that processes the responses
999 * from the IO Service Partition. When the queue is empty, wait
1000 * to check to see if it is full again.
1001 */
1002 static int process_incoming_rsps(void *v)
1003 {
1004 struct visorhba_devdata *devdata = v;
1005 struct uiscmdrsp *cmdrsp = NULL;
1006 const int size = sizeof(*cmdrsp);
1007
1008 cmdrsp = kmalloc(size, GFP_ATOMIC);
1009 if (!cmdrsp)
1010 return -ENOMEM;
1011
1012 while (1) {
1013 if (kthread_should_stop())
1014 break;
1015 wait_event_interruptible_timeout(
1016 devdata->rsp_queue, (atomic_read(
1017 &devdata->interrupt_rcvd) == 1),
1018 msecs_to_jiffies(devdata->thread_wait_ms));
1019 /* drain queue */
1020 drain_queue(cmdrsp, devdata);
1021 }
1022 kfree(cmdrsp);
1023 return 0;
1024 }
1025
1026 /**
1027 * visorhba_pause - function to handle visorbus pause messages
1028 * @dev: device that is pausing.
1029 * @complete_func: function to call when finished
1030 *
1031 * Something has happened to the IO Service Partition that is
1032 * handling this device. Quiet this device and reset commands
1033 * so that the Service Partition can be corrected.
1034 * Returns SUCCESS
1035 */
1036 static int visorhba_pause(struct visor_device *dev,
1037 visorbus_state_complete_func complete_func)
1038 {
1039 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1040
1041 visorhba_serverdown(devdata);
1042 complete_func(dev, 0);
1043 return 0;
1044 }
1045
1046 /**
1047 * visorhba_resume - function called when the IO Service Partition is back
1048 * @dev: device that is pausing.
1049 * @complete_func: function to call when finished
1050 *
1051 * Yay! The IO Service Partition is back, the channel has been wiped
1052 * so lets re-establish connection and start processing responses.
1053 * Returns 0 on success, error on failure.
1054 */
1055 static int visorhba_resume(struct visor_device *dev,
1056 visorbus_state_complete_func complete_func)
1057 {
1058 struct visorhba_devdata *devdata;
1059
1060 devdata = dev_get_drvdata(&dev->device);
1061 if (!devdata)
1062 return -EINVAL;
1063
1064 if (devdata->serverdown && !devdata->serverchangingstate)
1065 devdata->serverchangingstate = true;
1066
1067 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1068 "vhba_incming");
1069
1070 devdata->serverdown = false;
1071 devdata->serverchangingstate = false;
1072
1073 return 0;
1074 }
1075
1076 /**
1077 * visorhba_probe - device has been discovered, do acquire
1078 * @dev: visor_device that was discovered
1079 *
1080 * A new HBA was discovered, do the initial connections of it.
1081 * Return 0 on success, otherwise error.
1082 */
1083 static int visorhba_probe(struct visor_device *dev)
1084 {
1085 struct Scsi_Host *scsihost;
1086 struct vhba_config_max max;
1087 struct visorhba_devdata *devdata = NULL;
1088 int i, err, channel_offset;
1089 u64 features;
1090
1091 scsihost = scsi_host_alloc(&visorhba_driver_template,
1092 sizeof(*devdata));
1093 if (!scsihost)
1094 return -ENODEV;
1095
1096 channel_offset = offsetof(struct spar_io_channel_protocol,
1097 vhba.max);
1098 err = visorbus_read_channel(dev, channel_offset, &max,
1099 sizeof(struct vhba_config_max));
1100 if (err < 0)
1101 goto err_scsi_host_put;
1102
1103 scsihost->max_id = (unsigned)max.max_id;
1104 scsihost->max_lun = (unsigned)max.max_lun;
1105 scsihost->cmd_per_lun = (unsigned)max.cmd_per_lun;
1106 scsihost->max_sectors =
1107 (unsigned short)(max.max_io_size >> 9);
1108 scsihost->sg_tablesize =
1109 (unsigned short)(max.max_io_size / PAGE_SIZE);
1110 if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1111 scsihost->sg_tablesize = MAX_PHYS_INFO;
1112 err = scsi_add_host(scsihost, &dev->device);
1113 if (err < 0)
1114 goto err_scsi_host_put;
1115
1116 devdata = (struct visorhba_devdata *)scsihost->hostdata;
1117 for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
1118 if (!visorhbas_open[i].devdata) {
1119 visorhbas_open[i].devdata = devdata;
1120 break;
1121 }
1122 }
1123
1124 devdata->dev = dev;
1125 dev_set_drvdata(&dev->device, devdata);
1126
1127 init_waitqueue_head(&devdata->rsp_queue);
1128 spin_lock_init(&devdata->privlock);
1129 devdata->serverdown = false;
1130 devdata->serverchangingstate = false;
1131 devdata->scsihost = scsihost;
1132
1133 channel_offset = offsetof(struct spar_io_channel_protocol,
1134 channel_header.features);
1135 err = visorbus_read_channel(dev, channel_offset, &features, 8);
1136 if (err)
1137 goto err_scsi_remove_host;
1138 features |= ULTRA_IO_CHANNEL_IS_POLLING;
1139 err = visorbus_write_channel(dev, channel_offset, &features, 8);
1140 if (err)
1141 goto err_scsi_remove_host;
1142
1143 devdata->thread_wait_ms = 2;
1144 devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1145 "vhba_incoming");
1146
1147 scsi_scan_host(scsihost);
1148
1149 return 0;
1150
1151 err_scsi_remove_host:
1152 scsi_remove_host(scsihost);
1153
1154 err_scsi_host_put:
1155 scsi_host_put(scsihost);
1156 return err;
1157 }
1158
1159 /**
1160 * visorhba_remove - remove a visorhba device
1161 * @dev: Device to remove
1162 *
1163 * Removes the visorhba device.
1164 * Returns void.
1165 */
1166 static void visorhba_remove(struct visor_device *dev)
1167 {
1168 struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1169 struct Scsi_Host *scsihost = NULL;
1170
1171 if (!devdata)
1172 return;
1173
1174 scsihost = devdata->scsihost;
1175 visor_thread_stop(devdata->thread);
1176 scsi_remove_host(scsihost);
1177 scsi_host_put(scsihost);
1178
1179 dev_set_drvdata(&dev->device, NULL);
1180 }
1181
1182 /**
1183 * visorhba_init - driver init routine
1184 *
1185 * Initialize the visorhba driver and register it with visorbus
1186 * to handle s-Par virtual host bus adapter.
1187 */
1188 static int visorhba_init(void)
1189 {
1190 struct dentry *ret;
1191 int rc = -ENOMEM;
1192
1193 visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1194 if (!visorhba_debugfs_dir)
1195 return -ENOMEM;
1196
1197 ret = debugfs_create_file("info", S_IRUSR, visorhba_debugfs_dir, NULL,
1198 &debugfs_info_fops);
1199
1200 if (!ret) {
1201 rc = -EIO;
1202 goto cleanup_debugfs;
1203 }
1204
1205 rc = visorbus_register_visor_driver(&visorhba_driver);
1206 if (rc)
1207 goto cleanup_debugfs;
1208
1209 return rc;
1210
1211 cleanup_debugfs:
1212 debugfs_remove_recursive(visorhba_debugfs_dir);
1213
1214 return rc;
1215 }
1216
1217 /**
1218 * visorhba_cleanup - driver exit routine
1219 *
1220 * Unregister driver from the bus and free up memory.
1221 */
1222 static void visorhba_exit(void)
1223 {
1224 visorbus_unregister_visor_driver(&visorhba_driver);
1225 debugfs_remove_recursive(visorhba_debugfs_dir);
1226 }
1227
1228 module_init(visorhba_init);
1229 module_exit(visorhba_exit);
1230
1231 MODULE_AUTHOR("Unisys");
1232 MODULE_LICENSE("GPL");
1233 MODULE_DESCRIPTION("s-Par hba driver");
This page took 0.067946 seconds and 4 git commands to generate.