1 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more
16 #include <linux/debugfs.h>
17 #include <linux/skbuff.h>
18 #include <linux/kthread.h>
19 #include <scsi/scsi.h>
20 #include <scsi/scsi_host.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_device.h>
25 #include "iochannel.h"
27 /* The Send and Receive Buffers of the IO Queue may both be full */
29 #define IOS_ERROR_THRESHOLD 1000
30 /* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
31 * = 4800 bytes ~ 2^13 = 8192 bytes
34 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
35 #define VISORHBA_ERROR_COUNT 30
36 #define VISORHBA_OPEN_MAX 1
38 static int visorhba_queue_command_lck(struct scsi_cmnd
*scsicmd
,
39 void (*visorhba_cmnd_done
)
40 (struct scsi_cmnd
*));
42 static DEF_SCSI_QCMD(visorhba_queue_command
)
44 #define visorhba_queue_command visorhba_queue_command_lck
46 static int visorhba_probe(struct visor_device
*dev
);
47 static void visorhba_remove(struct visor_device
*dev
);
48 static int visorhba_pause(struct visor_device
*dev
,
49 visorbus_state_complete_func complete_func
);
50 static int visorhba_resume(struct visor_device
*dev
,
51 visorbus_state_complete_func complete_func
);
53 static ssize_t
info_debugfs_read(struct file
*file
, char __user
*buf
,
54 size_t len
, loff_t
*offset
);
55 static int set_no_disk_inquiry_result(unsigned char *buf
,
56 size_t len
, bool is_lun0
);
57 static struct dentry
*visorhba_debugfs_dir
;
58 static const struct file_operations debugfs_info_fops
= {
59 .read
= info_debugfs_read
,
62 /* GUIDS for HBA channel type supported by this driver */
63 static struct visor_channeltype_descriptor visorhba_channel_types
[] = {
64 /* Note that the only channel type we expect to be reported by the
65 * bus driver is the SPAR_VHBA channel.
67 { SPAR_VHBA_CHANNEL_PROTOCOL_UUID
, "sparvhba" },
68 { NULL_UUID_LE
, NULL
}
71 /* This is used to tell the visor bus driver which types of visor devices
72 * we support, and what functions to call when a visor device that we support
73 * is attached or removed.
75 static struct visor_driver visorhba_driver
= {
78 .channel_types
= visorhba_channel_types
,
79 .probe
= visorhba_probe
,
80 .remove
= visorhba_remove
,
81 .pause
= visorhba_pause
,
82 .resume
= visorhba_resume
,
83 .channel_interrupt
= NULL
,
85 MODULE_DEVICE_TABLE(visorbus
, visorhba_channel_types
);
86 MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR
);
88 struct visordisk_info
{
90 u32 channel
, id
, lun
; /* Disk Path */
91 atomic_t ios_threshold
;
93 struct visordisk_info
*next
;
97 struct uiscmdrsp cmdrsp
;
98 void *sent
; /* The Data being tracked */
99 char cmdtype
; /* Type of pointer that is being stored */
102 /* Work Data for dar_work_queue */
103 struct diskaddremove
{
104 u8 add
; /* 0-remove, 1-add */
105 struct Scsi_Host
*shost
; /* Scsi Host for this visorhba instance */
106 u32 channel
, id
, lun
; /* Disk Path */
107 struct diskaddremove
*next
;
110 /* Each scsi_host has a host_data area that contains this struct. */
111 struct visorhba_devdata
{
112 struct Scsi_Host
*scsihost
;
113 struct visor_device
*dev
;
114 struct list_head dev_info_list
;
115 /* Tracks the requests that have been forwarded to
116 * the IOVM and haven't returned yet
118 struct scsipending pending
[MAX_PENDING_REQUESTS
];
119 /* Start search for next pending free slot here */
120 unsigned int nextinsert
;
121 spinlock_t privlock
; /* lock to protect data in devdata */
123 bool serverchangingstate
;
124 unsigned long long acquire_failed_cnt
;
125 unsigned long long interrupts_rcvd
;
126 unsigned long long interrupts_notme
;
127 unsigned long long interrupts_disabled
;
128 u64 __iomem
*flags_addr
;
129 atomic_t interrupt_rcvd
;
130 wait_queue_head_t rsp_queue
;
131 struct visordisk_info head
;
132 unsigned int max_buff_len
;
134 struct task_struct
*thread
;
138 struct visorhba_devices_open
{
139 struct visorhba_devdata
*devdata
;
142 static struct visorhba_devices_open visorhbas_open
[VISORHBA_OPEN_MAX
];
144 #define for_each_vdisk_match(iter, list, match) \
145 for (iter = &list->head; iter->next; iter = iter->next) \
146 if ((iter->channel == match->channel) && \
147 (iter->id == match->id) && \
148 (iter->lun == match->lun))
150 * visor_thread_start - starts a thread for the device
151 * @threadfn: Function the thread starts
152 * @thrcontext: Context to pass to the thread, i.e. devdata
153 * @name: string describing name of thread
155 * Starts a thread for the device.
157 * Return the task_struct * denoting the thread on success,
160 static struct task_struct
*visor_thread_start
161 (int (*threadfn
)(void *), void *thrcontext
, char *name
)
163 struct task_struct
*task
;
165 task
= kthread_run(threadfn
, thrcontext
, "%s", name
);
167 pr_err("visorbus failed to start thread\n");
174 * visor_thread_stop - stops the thread if it is running
176 static void visor_thread_stop(struct task_struct
*task
)
179 return; /* no thread running */
184 * add_scsipending_entry - save off io command that is pending in
186 * @devdata: Pointer to devdata
187 * @cmdtype: Specifies the type of command pending
188 * @new: The command to be saved
190 * Saves off the io command that is being handled by the Service
191 * Partition so that it can be handled when it completes. If new is
192 * NULL it is assumed the entry refers only to the cmdrsp.
193 * Returns insert_location where entry was added,
194 * SCSI_MLQUEUE_DEVICE_BUSY if it can't
196 static int add_scsipending_entry(struct visorhba_devdata
*devdata
,
197 char cmdtype
, void *new)
200 struct scsipending
*entry
;
203 spin_lock_irqsave(&devdata
->privlock
, flags
);
204 insert_location
= devdata
->nextinsert
;
205 while (devdata
->pending
[insert_location
].sent
) {
206 insert_location
= (insert_location
+ 1) % MAX_PENDING_REQUESTS
;
207 if (insert_location
== (int)devdata
->nextinsert
) {
208 spin_unlock_irqrestore(&devdata
->privlock
, flags
);
213 entry
= &devdata
->pending
[insert_location
];
214 memset(&entry
->cmdrsp
, 0, sizeof(entry
->cmdrsp
));
215 entry
->cmdtype
= cmdtype
;
218 else /* wants to send cmdrsp */
219 entry
->sent
= &entry
->cmdrsp
;
220 devdata
->nextinsert
= (insert_location
+ 1) % MAX_PENDING_REQUESTS
;
221 spin_unlock_irqrestore(&devdata
->privlock
, flags
);
223 return insert_location
;
227 * del_scsipending_enty - removes an entry from the pending array
228 * @devdata: Device holding the pending array
229 * @del: Entry to remove
231 * Removes the entry pointed at by del and returns it.
232 * Returns the scsipending entry pointed at
234 static void *del_scsipending_ent(struct visorhba_devdata
*devdata
,
240 if (del
>= MAX_PENDING_REQUESTS
)
243 spin_lock_irqsave(&devdata
->privlock
, flags
);
244 sent
= devdata
->pending
[del
].sent
;
246 devdata
->pending
[del
].cmdtype
= 0;
247 devdata
->pending
[del
].sent
= NULL
;
248 spin_unlock_irqrestore(&devdata
->privlock
, flags
);
254 * get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
255 * #ddata: Device holding the pending array
256 * @ent: Entry that stores the cmdrsp
258 * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
259 * if the "sent" field is not NULL
260 * Returns a pointer to the cmdrsp.
262 static struct uiscmdrsp
*get_scsipending_cmdrsp(struct visorhba_devdata
*ddata
,
265 if (ddata
->pending
[ent
].sent
)
266 return &ddata
->pending
[ent
].cmdrsp
;
272 * forward_taskmgmt_command - send taskmegmt command to the Service
274 * @tasktype: Type of taskmgmt command
275 * @scsidev: Scsidev that issued command
277 * Create a cmdrsp packet and send it to the Serivce Partition
278 * that will service this request.
279 * Returns whether the command was queued successfully or not.
281 static int forward_taskmgmt_command(enum task_mgmt_types tasktype
,
282 struct scsi_cmnd
*scsicmd
)
284 struct uiscmdrsp
*cmdrsp
;
285 struct scsi_device
*scsidev
= scsicmd
->device
;
286 struct visorhba_devdata
*devdata
=
287 (struct visorhba_devdata
*)scsidev
->host
->hostdata
;
288 int notifyresult
= 0xffff;
289 wait_queue_head_t notifyevent
;
292 if (devdata
->serverdown
|| devdata
->serverchangingstate
)
295 scsicmd_id
= add_scsipending_entry(devdata
, CMD_SCSITASKMGMT_TYPE
,
300 cmdrsp
= get_scsipending_cmdrsp(devdata
, scsicmd_id
);
302 init_waitqueue_head(¬ifyevent
);
304 /* issue TASK_MGMT_ABORT_TASK */
305 cmdrsp
->cmdtype
= CMD_SCSITASKMGMT_TYPE
;
306 /* specify the event that has to be triggered when this */
307 /* cmd is complete */
308 cmdrsp
->scsitaskmgmt
.notify_handle
= (u64
)¬ifyevent
;
309 cmdrsp
->scsitaskmgmt
.notifyresult_handle
= (u64
)¬ifyresult
;
311 /* save destination */
312 cmdrsp
->scsitaskmgmt
.tasktype
= tasktype
;
313 cmdrsp
->scsitaskmgmt
.vdest
.channel
= scsidev
->channel
;
314 cmdrsp
->scsitaskmgmt
.vdest
.id
= scsidev
->id
;
315 cmdrsp
->scsitaskmgmt
.vdest
.lun
= scsidev
->lun
;
316 cmdrsp
->scsitaskmgmt
.handle
= scsicmd_id
;
318 if (!visorchannel_signalinsert(devdata
->dev
->visorchannel
,
321 goto err_del_scsipending_ent
;
323 /* It can take the Service Partition up to 35 seconds to complete
324 * an IO in some cases, so wait 45 seconds and error out
326 if (!wait_event_timeout(notifyevent
, notifyresult
!= 0xffff,
327 msecs_to_jiffies(45000)))
328 goto err_del_scsipending_ent
;
330 if (tasktype
== TASK_MGMT_ABORT_TASK
)
331 scsicmd
->result
= DID_ABORT
<< 16;
333 scsicmd
->result
= DID_RESET
<< 16;
335 scsicmd
->scsi_done(scsicmd
);
339 err_del_scsipending_ent
:
340 del_scsipending_ent(devdata
, scsicmd_id
);
345 * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
346 * @scsicmd: The scsicmd that needs aborted
348 * Returns SUCCESS if inserted, failure otherwise
351 static int visorhba_abort_handler(struct scsi_cmnd
*scsicmd
)
353 /* issue TASK_MGMT_ABORT_TASK */
354 struct scsi_device
*scsidev
;
355 struct visordisk_info
*vdisk
;
356 struct visorhba_devdata
*devdata
;
358 scsidev
= scsicmd
->device
;
359 devdata
= (struct visorhba_devdata
*)scsidev
->host
->hostdata
;
360 for_each_vdisk_match(vdisk
, devdata
, scsidev
) {
361 if (atomic_read(&vdisk
->error_count
) < VISORHBA_ERROR_COUNT
)
362 atomic_inc(&vdisk
->error_count
);
364 atomic_set(&vdisk
->ios_threshold
, IOS_ERROR_THRESHOLD
);
366 return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK
, scsicmd
);
370 * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
371 * @scsicmd: The scsicmd that needs aborted
373 * Returns SUCCESS if inserted, failure otherwise
375 static int visorhba_device_reset_handler(struct scsi_cmnd
*scsicmd
)
377 /* issue TASK_MGMT_LUN_RESET */
378 struct scsi_device
*scsidev
;
379 struct visordisk_info
*vdisk
;
380 struct visorhba_devdata
*devdata
;
382 scsidev
= scsicmd
->device
;
383 devdata
= (struct visorhba_devdata
*)scsidev
->host
->hostdata
;
384 for_each_vdisk_match(vdisk
, devdata
, scsidev
) {
385 if (atomic_read(&vdisk
->error_count
) < VISORHBA_ERROR_COUNT
)
386 atomic_inc(&vdisk
->error_count
);
388 atomic_set(&vdisk
->ios_threshold
, IOS_ERROR_THRESHOLD
);
390 return forward_taskmgmt_command(TASK_MGMT_LUN_RESET
, scsicmd
);
394 * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
396 * @scsicmd: The scsicmd that needs aborted
400 static int visorhba_bus_reset_handler(struct scsi_cmnd
*scsicmd
)
402 struct scsi_device
*scsidev
;
403 struct visordisk_info
*vdisk
;
404 struct visorhba_devdata
*devdata
;
406 scsidev
= scsicmd
->device
;
407 devdata
= (struct visorhba_devdata
*)scsidev
->host
->hostdata
;
408 for_each_vdisk_match(vdisk
, devdata
, scsidev
) {
409 if (atomic_read(&vdisk
->error_count
) < VISORHBA_ERROR_COUNT
)
410 atomic_inc(&vdisk
->error_count
);
412 atomic_set(&vdisk
->ios_threshold
, IOS_ERROR_THRESHOLD
);
414 return forward_taskmgmt_command(TASK_MGMT_BUS_RESET
, scsicmd
);
418 * visorhba_host_reset_handler - Not supported
419 * @scsicmd: The scsicmd that needs aborted
421 * Not supported, return SUCCESS
425 visorhba_host_reset_handler(struct scsi_cmnd
*scsicmd
)
427 /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
433 * @shp: Scsi host that is requesting information
435 * Returns string with info
437 static const char *visorhba_get_info(struct Scsi_Host
*shp
)
439 /* Return version string */
444 * visorhba_queue_command_lck -- queues command to the Service Partition
445 * @scsicmd: Command to be queued
446 * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
448 * Queues to scsicmd to the ServicePartition after converting it to a
449 * uiscmdrsp structure.
451 * Returns success if queued to the Service Partition, otherwise
455 visorhba_queue_command_lck(struct scsi_cmnd
*scsicmd
,
456 void (*visorhba_cmnd_done
)(struct scsi_cmnd
*))
458 struct uiscmdrsp
*cmdrsp
;
459 struct scsi_device
*scsidev
= scsicmd
->device
;
461 unsigned char *cdb
= scsicmd
->cmnd
;
462 struct Scsi_Host
*scsihost
= scsidev
->host
;
464 struct visorhba_devdata
*devdata
=
465 (struct visorhba_devdata
*)scsihost
->hostdata
;
466 struct scatterlist
*sg
= NULL
;
467 struct scatterlist
*sglist
= NULL
;
469 if (devdata
->serverdown
|| devdata
->serverchangingstate
)
470 return SCSI_MLQUEUE_DEVICE_BUSY
;
472 insert_location
= add_scsipending_entry(devdata
, CMD_SCSI_TYPE
,
475 if (insert_location
< 0)
476 return SCSI_MLQUEUE_DEVICE_BUSY
;
478 cmdrsp
= get_scsipending_cmdrsp(devdata
, insert_location
);
480 cmdrsp
->cmdtype
= CMD_SCSI_TYPE
;
481 /* save the pending insertion location. Deletion from pending
482 * will return the scsicmd pointer for completion
484 cmdrsp
->scsi
.handle
= insert_location
;
486 /* save done function that we have call when cmd is complete */
487 scsicmd
->scsi_done
= visorhba_cmnd_done
;
488 /* save destination */
489 cmdrsp
->scsi
.vdest
.channel
= scsidev
->channel
;
490 cmdrsp
->scsi
.vdest
.id
= scsidev
->id
;
491 cmdrsp
->scsi
.vdest
.lun
= scsidev
->lun
;
493 cmdrsp
->scsi
.data_dir
= scsicmd
->sc_data_direction
;
494 memcpy(cmdrsp
->scsi
.cmnd
, cdb
, MAX_CMND_SIZE
);
496 cmdrsp
->scsi
.bufflen
= scsi_bufflen(scsicmd
);
498 /* keep track of the max buffer length so far. */
499 if (cmdrsp
->scsi
.bufflen
> devdata
->max_buff_len
)
500 devdata
->max_buff_len
= cmdrsp
->scsi
.bufflen
;
502 if (scsi_sg_count(scsicmd
) > MAX_PHYS_INFO
)
503 goto err_del_scsipending_ent
;
505 /* convert buffer to phys information */
506 /* buffer is scatterlist - copy it out */
507 sglist
= scsi_sglist(scsicmd
);
509 for_each_sg(sglist
, sg
, scsi_sg_count(scsicmd
), i
) {
510 cmdrsp
->scsi
.gpi_list
[i
].address
= sg_phys(sg
);
511 cmdrsp
->scsi
.gpi_list
[i
].length
= sg
->length
;
513 cmdrsp
->scsi
.guest_phys_entries
= scsi_sg_count(scsicmd
);
515 if (!visorchannel_signalinsert(devdata
->dev
->visorchannel
,
518 /* queue must be full and we aren't going to wait */
519 goto err_del_scsipending_ent
;
523 err_del_scsipending_ent
:
524 del_scsipending_ent(devdata
, insert_location
);
525 return SCSI_MLQUEUE_DEVICE_BUSY
;
529 * visorhba_slave_alloc - called when new disk is discovered
532 * Create a new visordisk_info structure and add it to our
535 * Returns success when created, otherwise error.
537 static int visorhba_slave_alloc(struct scsi_device
*scsidev
)
539 /* this is called by the midlayer before scan for new devices --
540 * LLD can alloc any struct & do init if needed.
542 struct visordisk_info
*vdisk
;
543 struct visordisk_info
*tmpvdisk
;
544 struct visorhba_devdata
*devdata
;
545 struct Scsi_Host
*scsihost
= (struct Scsi_Host
*)scsidev
->host
;
547 devdata
= (struct visorhba_devdata
*)scsihost
->hostdata
;
549 return 0; /* even though we errored, treat as success */
551 for_each_vdisk_match(vdisk
, devdata
, scsidev
)
552 return 0; /* already allocated return success */
554 tmpvdisk
= kzalloc(sizeof(*tmpvdisk
), GFP_ATOMIC
);
558 tmpvdisk
->channel
= scsidev
->channel
;
559 tmpvdisk
->id
= scsidev
->id
;
560 tmpvdisk
->lun
= scsidev
->lun
;
561 vdisk
->next
= tmpvdisk
;
566 * visorhba_slave_destroy - disk is going away
567 * @scsidev: scsi device going away
569 * Disk is going away, clean up resources.
572 static void visorhba_slave_destroy(struct scsi_device
*scsidev
)
574 /* midlevel calls this after device has been quiesced and
575 * before it is to be deleted.
577 struct visordisk_info
*vdisk
, *delvdisk
;
578 struct visorhba_devdata
*devdata
;
579 struct Scsi_Host
*scsihost
= (struct Scsi_Host
*)scsidev
->host
;
581 devdata
= (struct visorhba_devdata
*)scsihost
->hostdata
;
582 for_each_vdisk_match(vdisk
, devdata
, scsidev
) {
583 delvdisk
= vdisk
->next
;
584 vdisk
->next
= delvdisk
->next
;
590 static struct scsi_host_template visorhba_driver_template
= {
591 .name
= "Unisys Visor HBA",
592 .info
= visorhba_get_info
,
593 .queuecommand
= visorhba_queue_command
,
594 .eh_abort_handler
= visorhba_abort_handler
,
595 .eh_device_reset_handler
= visorhba_device_reset_handler
,
596 .eh_bus_reset_handler
= visorhba_bus_reset_handler
,
597 .eh_host_reset_handler
= visorhba_host_reset_handler
,
599 #define visorhba_MAX_CMNDS 128
600 .can_queue
= visorhba_MAX_CMNDS
,
603 .slave_alloc
= visorhba_slave_alloc
,
604 .slave_destroy
= visorhba_slave_destroy
,
605 .use_clustering
= ENABLE_CLUSTERING
,
609 * info_debugfs_read - debugfs interface to dump visorhba states
611 * @buf: buffer to send back to user
612 * @len: len that can be written to buf
613 * @offset: offset into buf
615 * Dumps information about the visorhba driver and devices
616 * TODO: Make this per vhba
619 static ssize_t
info_debugfs_read(struct file
*file
, char __user
*buf
,
620 size_t len
, loff_t
*offset
)
622 ssize_t bytes_read
= 0;
626 struct visorhba_devdata
*devdata
;
631 vbuf
= kzalloc(len
, GFP_KERNEL
);
635 for (i
= 0; i
< VISORHBA_OPEN_MAX
; i
++) {
636 if (!visorhbas_open
[i
].devdata
)
639 devdata
= visorhbas_open
[i
].devdata
;
641 str_pos
+= scnprintf(vbuf
+ str_pos
,
642 len
- str_pos
, "max_buff_len:%u\n",
643 devdata
->max_buff_len
);
645 str_pos
+= scnprintf(vbuf
+ str_pos
, len
- str_pos
,
646 "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
647 devdata
->interrupts_rcvd
,
648 devdata
->interrupts_disabled
);
649 str_pos
+= scnprintf(vbuf
+ str_pos
,
650 len
- str_pos
, "\ninterrupts_notme = %llu,\n",
651 devdata
->interrupts_notme
);
652 phys_flags_addr
= virt_to_phys((__force
void *)
653 devdata
->flags_addr
);
654 str_pos
+= scnprintf(vbuf
+ str_pos
, len
- str_pos
,
655 "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
656 devdata
->flags_addr
, phys_flags_addr
,
657 (__le64
)readq(devdata
->flags_addr
));
658 str_pos
+= scnprintf(vbuf
+ str_pos
,
659 len
- str_pos
, "acquire_failed_cnt:%llu\n",
660 devdata
->acquire_failed_cnt
);
661 str_pos
+= scnprintf(vbuf
+ str_pos
, len
- str_pos
, "\n");
664 bytes_read
= simple_read_from_buffer(buf
, len
, offset
, vbuf
, str_pos
);
670 * visorhba_serverdown_complete - Called when we are done cleaning up
672 * @work: work structure for this serverdown request
674 * Called when we are done cleanning up from serverdown, stop processing
675 * queue, fail pending IOs.
676 * Returns void when finished cleaning up
678 static void visorhba_serverdown_complete(struct visorhba_devdata
*devdata
)
681 struct scsipending
*pendingdel
= NULL
;
682 struct scsi_cmnd
*scsicmd
= NULL
;
683 struct uiscmdrsp
*cmdrsp
;
686 /* Stop using the IOVM response queue (queue should be drained
689 visor_thread_stop(devdata
->thread
);
691 /* Fail commands that weren't completed */
692 spin_lock_irqsave(&devdata
->privlock
, flags
);
693 for (i
= 0; i
< MAX_PENDING_REQUESTS
; i
++) {
694 pendingdel
= &devdata
->pending
[i
];
695 switch (pendingdel
->cmdtype
) {
697 scsicmd
= pendingdel
->sent
;
698 scsicmd
->result
= DID_RESET
<< 16;
699 if (scsicmd
->scsi_done
)
700 scsicmd
->scsi_done(scsicmd
);
702 case CMD_SCSITASKMGMT_TYPE
:
703 cmdrsp
= pendingdel
->sent
;
704 cmdrsp
->scsitaskmgmt
.notifyresult_handle
706 wake_up_all((wait_queue_head_t
*)
707 cmdrsp
->scsitaskmgmt
.notify_handle
);
712 pendingdel
->cmdtype
= 0;
713 pendingdel
->sent
= NULL
;
715 spin_unlock_irqrestore(&devdata
->privlock
, flags
);
717 devdata
->serverdown
= true;
718 devdata
->serverchangingstate
= false;
722 * visorhba_serverdown - Got notified that the IOVM is down
723 * @devdata: visorhba that is being serviced by downed IOVM.
725 * Something happened to the IOVM, return immediately and
726 * schedule work cleanup work.
727 * Return SUCCESS or EINVAL
729 static int visorhba_serverdown(struct visorhba_devdata
*devdata
)
731 if (!devdata
->serverdown
&& !devdata
->serverchangingstate
) {
732 devdata
->serverchangingstate
= true;
733 visorhba_serverdown_complete(devdata
);
734 } else if (devdata
->serverchangingstate
) {
741 * do_scsi_linuxstat - scsi command returned linuxstat
742 * @cmdrsp: response from IOVM
743 * @scsicmd: Command issued.
745 * Don't log errors for disk-not-present inquiries
749 do_scsi_linuxstat(struct uiscmdrsp
*cmdrsp
, struct scsi_cmnd
*scsicmd
)
751 struct visorhba_devdata
*devdata
;
752 struct visordisk_info
*vdisk
;
753 struct scsi_device
*scsidev
;
755 scsidev
= scsicmd
->device
;
756 memcpy(scsicmd
->sense_buffer
, cmdrsp
->scsi
.sensebuf
, MAX_SENSE_SIZE
);
758 /* Do not log errors for disk-not-present inquiries */
759 if ((cmdrsp
->scsi
.cmnd
[0] == INQUIRY
) &&
760 (host_byte(cmdrsp
->scsi
.linuxstat
) == DID_NO_CONNECT
) &&
761 (cmdrsp
->scsi
.addlstat
== ADDL_SEL_TIMEOUT
))
763 /* Okay see what our error_count is here.... */
764 devdata
= (struct visorhba_devdata
*)scsidev
->host
->hostdata
;
765 for_each_vdisk_match(vdisk
, devdata
, scsidev
) {
766 if (atomic_read(&vdisk
->error_count
) < VISORHBA_ERROR_COUNT
) {
767 atomic_inc(&vdisk
->error_count
);
768 atomic_set(&vdisk
->ios_threshold
, IOS_ERROR_THRESHOLD
);
773 static int set_no_disk_inquiry_result(unsigned char *buf
,
774 size_t len
, bool is_lun0
)
776 if (!buf
|| len
< NO_DISK_INQUIRY_RESULT_LEN
)
778 memset(buf
, 0, NO_DISK_INQUIRY_RESULT_LEN
);
779 buf
[2] = SCSI_SPC2_VER
;
781 buf
[0] = DEV_DISK_CAPABLE_NOT_PRESENT
;
782 buf
[3] = DEV_HISUPPORT
;
784 buf
[0] = DEV_NOT_CAPABLE
;
786 buf
[4] = NO_DISK_INQUIRY_RESULT_LEN
- 5;
787 strncpy(buf
+ 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN
- 8);
792 * do_scsi_nolinuxstat - scsi command didn't have linuxstat
793 * @cmdrsp: response from IOVM
794 * @scsicmd: Command issued.
796 * Handle response when no linuxstat was returned
800 do_scsi_nolinuxstat(struct uiscmdrsp
*cmdrsp
, struct scsi_cmnd
*scsicmd
)
802 struct scsi_device
*scsidev
;
803 unsigned char buf
[36];
804 struct scatterlist
*sg
;
807 char *this_page_orig
;
809 struct visordisk_info
*vdisk
;
810 struct visorhba_devdata
*devdata
;
812 scsidev
= scsicmd
->device
;
813 if ((cmdrsp
->scsi
.cmnd
[0] == INQUIRY
) &&
814 (cmdrsp
->scsi
.bufflen
>= MIN_INQUIRY_RESULT_LEN
)) {
815 if (cmdrsp
->scsi
.no_disk_result
== 0)
818 /* Linux scsi code wants a device at Lun 0
819 * to issue report luns, but we don't want
820 * a disk there so we'll present a processor
823 set_no_disk_inquiry_result(buf
, (size_t)cmdrsp
->scsi
.bufflen
,
826 if (scsi_sg_count(scsicmd
) == 0) {
827 memcpy(scsi_sglist(scsicmd
), buf
,
828 cmdrsp
->scsi
.bufflen
);
832 sg
= scsi_sglist(scsicmd
);
833 for (i
= 0; i
< scsi_sg_count(scsicmd
); i
++) {
834 this_page_orig
= kmap_atomic(sg_page(sg
+ i
));
835 this_page
= (void *)((unsigned long)this_page_orig
|
837 memcpy(this_page
, buf
+ bufind
, sg
[i
].length
);
838 kunmap_atomic(this_page_orig
);
841 devdata
= (struct visorhba_devdata
*)scsidev
->host
->hostdata
;
842 for_each_vdisk_match(vdisk
, devdata
, scsidev
) {
843 if (atomic_read(&vdisk
->ios_threshold
) > 0) {
844 atomic_dec(&vdisk
->ios_threshold
);
845 if (atomic_read(&vdisk
->ios_threshold
) == 0)
846 atomic_set(&vdisk
->error_count
, 0);
853 * complete_scsi_command - complete a scsi command
854 * @uiscmdrsp: Response from Service Partition
855 * @scsicmd: The scsi command
857 * Response returned by the Service Partition, finish it and send
858 * completion to the scsi midlayer.
862 complete_scsi_command(struct uiscmdrsp
*cmdrsp
, struct scsi_cmnd
*scsicmd
)
864 /* take what we need out of cmdrsp and complete the scsicmd */
865 scsicmd
->result
= cmdrsp
->scsi
.linuxstat
;
866 if (cmdrsp
->scsi
.linuxstat
)
867 do_scsi_linuxstat(cmdrsp
, scsicmd
);
869 do_scsi_nolinuxstat(cmdrsp
, scsicmd
);
871 scsicmd
->scsi_done(scsicmd
);
875 * complete_taskmgmt_command - complete task management
876 * @cmdrsp: Response from the IOVM
878 * Service Partition returned the result of the task management
879 * command. Wake up anyone waiting for it.
882 static inline void complete_taskmgmt_command(struct uiscmdrsp
*cmdrsp
)
884 /* copy the result of the taskgmgt and
885 * wake up the error handler that is waiting for this
887 cmdrsp
->vdiskmgmt
.notifyresult_handle
= cmdrsp
->vdiskmgmt
.result
;
888 wake_up_all((wait_queue_head_t
*)cmdrsp
->scsitaskmgmt
.notify_handle
);
891 static struct work_struct dar_work_queue
;
892 static struct diskaddremove
*dar_work_queue_head
;
893 static spinlock_t dar_work_queue_lock
; /* Lock to protet dar_work_queue_head */
894 static unsigned short dar_work_queue_sched
;
897 * queue_disk_add_remove - IOSP has sent us a add/remove request
898 * @dar: disk add/remove request
900 * Queue the work needed to add/remove a disk.
903 static inline void queue_disk_add_remove(struct diskaddremove
*dar
)
907 spin_lock_irqsave(&dar_work_queue_lock
, flags
);
908 if (!dar_work_queue_head
) {
909 dar_work_queue_head
= dar
;
912 dar
->next
= dar_work_queue_head
;
913 dar_work_queue_head
= dar
;
915 if (!dar_work_queue_sched
) {
916 schedule_work(&dar_work_queue
);
917 dar_work_queue_sched
= 1;
919 spin_unlock_irqrestore(&dar_work_queue_lock
, flags
);
923 * process_disk_notify - IOSP has sent a process disk notify event
925 * @cmdrsp: Response from the IOSP
927 * Queue it to the work queue.
930 static void process_disk_notify(struct Scsi_Host
*shost
,
931 struct uiscmdrsp
*cmdrsp
)
933 struct diskaddremove
*dar
;
935 dar
= kzalloc(sizeof(*dar
), GFP_ATOMIC
);
939 dar
->add
= cmdrsp
->disknotify
.add
;
941 dar
->channel
= cmdrsp
->disknotify
.channel
;
942 dar
->id
= cmdrsp
->disknotify
.id
;
943 dar
->lun
= cmdrsp
->disknotify
.lun
;
944 queue_disk_add_remove(dar
);
948 * drain_queue - pull responses out of iochannel
949 * @cmdrsp: Response from the IOSP
950 * @devdata: device that owns this iochannel
952 * Pulls responses out of the iochannel and process the responses.
956 drain_queue(struct uiscmdrsp
*cmdrsp
, struct visorhba_devdata
*devdata
)
958 struct scsi_cmnd
*scsicmd
;
959 struct Scsi_Host
*shost
= devdata
->scsihost
;
962 if (!visorchannel_signalremove(devdata
->dev
->visorchannel
,
965 break; /* queue empty */
967 if (cmdrsp
->cmdtype
== CMD_SCSI_TYPE
) {
968 /* scsicmd location is returned by the
971 scsicmd
= del_scsipending_ent(devdata
,
972 cmdrsp
->scsi
.handle
);
975 /* complete the orig cmd */
976 complete_scsi_command(cmdrsp
, scsicmd
);
977 } else if (cmdrsp
->cmdtype
== CMD_SCSITASKMGMT_TYPE
) {
978 if (!del_scsipending_ent(devdata
,
979 cmdrsp
->scsitaskmgmt
.handle
))
981 complete_taskmgmt_command(cmdrsp
);
982 } else if (cmdrsp
->cmdtype
== CMD_NOTIFYGUEST_TYPE
) {
983 /* The vHba pointer has no meaning in a
984 * guest partition. Let's be safe and set it
985 * to NULL now. Do not use it here!
987 cmdrsp
->disknotify
.v_hba
= NULL
;
988 process_disk_notify(shost
, cmdrsp
);
990 /* cmdrsp is now available for resuse */
995 * process_incoming_rsps - Process responses from IOSP
996 * @v: void pointer to visorhba_devdata
998 * Main function for the thread that processes the responses
999 * from the IO Service Partition. When the queue is empty, wait
1000 * to check to see if it is full again.
1002 static int process_incoming_rsps(void *v
)
1004 struct visorhba_devdata
*devdata
= v
;
1005 struct uiscmdrsp
*cmdrsp
= NULL
;
1006 const int size
= sizeof(*cmdrsp
);
1008 cmdrsp
= kmalloc(size
, GFP_ATOMIC
);
1013 if (kthread_should_stop())
1015 wait_event_interruptible_timeout(
1016 devdata
->rsp_queue
, (atomic_read(
1017 &devdata
->interrupt_rcvd
) == 1),
1018 msecs_to_jiffies(devdata
->thread_wait_ms
));
1020 drain_queue(cmdrsp
, devdata
);
1027 * visorhba_pause - function to handle visorbus pause messages
1028 * @dev: device that is pausing.
1029 * @complete_func: function to call when finished
1031 * Something has happened to the IO Service Partition that is
1032 * handling this device. Quiet this device and reset commands
1033 * so that the Service Partition can be corrected.
1036 static int visorhba_pause(struct visor_device
*dev
,
1037 visorbus_state_complete_func complete_func
)
1039 struct visorhba_devdata
*devdata
= dev_get_drvdata(&dev
->device
);
1041 visorhba_serverdown(devdata
);
1042 complete_func(dev
, 0);
1047 * visorhba_resume - function called when the IO Service Partition is back
1048 * @dev: device that is pausing.
1049 * @complete_func: function to call when finished
1051 * Yay! The IO Service Partition is back, the channel has been wiped
1052 * so lets re-establish connection and start processing responses.
1053 * Returns 0 on success, error on failure.
1055 static int visorhba_resume(struct visor_device
*dev
,
1056 visorbus_state_complete_func complete_func
)
1058 struct visorhba_devdata
*devdata
;
1060 devdata
= dev_get_drvdata(&dev
->device
);
1064 if (devdata
->serverdown
&& !devdata
->serverchangingstate
)
1065 devdata
->serverchangingstate
= true;
1067 devdata
->thread
= visor_thread_start(process_incoming_rsps
, devdata
,
1070 devdata
->serverdown
= false;
1071 devdata
->serverchangingstate
= false;
1077 * visorhba_probe - device has been discovered, do acquire
1078 * @dev: visor_device that was discovered
1080 * A new HBA was discovered, do the initial connections of it.
1081 * Return 0 on success, otherwise error.
1083 static int visorhba_probe(struct visor_device
*dev
)
1085 struct Scsi_Host
*scsihost
;
1086 struct vhba_config_max max
;
1087 struct visorhba_devdata
*devdata
= NULL
;
1088 int i
, err
, channel_offset
;
1091 scsihost
= scsi_host_alloc(&visorhba_driver_template
,
1096 channel_offset
= offsetof(struct spar_io_channel_protocol
,
1098 err
= visorbus_read_channel(dev
, channel_offset
, &max
,
1099 sizeof(struct vhba_config_max
));
1101 goto err_scsi_host_put
;
1103 scsihost
->max_id
= (unsigned)max
.max_id
;
1104 scsihost
->max_lun
= (unsigned)max
.max_lun
;
1105 scsihost
->cmd_per_lun
= (unsigned)max
.cmd_per_lun
;
1106 scsihost
->max_sectors
=
1107 (unsigned short)(max
.max_io_size
>> 9);
1108 scsihost
->sg_tablesize
=
1109 (unsigned short)(max
.max_io_size
/ PAGE_SIZE
);
1110 if (scsihost
->sg_tablesize
> MAX_PHYS_INFO
)
1111 scsihost
->sg_tablesize
= MAX_PHYS_INFO
;
1112 err
= scsi_add_host(scsihost
, &dev
->device
);
1114 goto err_scsi_host_put
;
1116 devdata
= (struct visorhba_devdata
*)scsihost
->hostdata
;
1117 for (i
= 0; i
< VISORHBA_OPEN_MAX
; i
++) {
1118 if (!visorhbas_open
[i
].devdata
) {
1119 visorhbas_open
[i
].devdata
= devdata
;
1125 dev_set_drvdata(&dev
->device
, devdata
);
1127 init_waitqueue_head(&devdata
->rsp_queue
);
1128 spin_lock_init(&devdata
->privlock
);
1129 devdata
->serverdown
= false;
1130 devdata
->serverchangingstate
= false;
1131 devdata
->scsihost
= scsihost
;
1133 channel_offset
= offsetof(struct spar_io_channel_protocol
,
1134 channel_header
.features
);
1135 err
= visorbus_read_channel(dev
, channel_offset
, &features
, 8);
1137 goto err_scsi_remove_host
;
1138 features
|= ULTRA_IO_CHANNEL_IS_POLLING
;
1139 err
= visorbus_write_channel(dev
, channel_offset
, &features
, 8);
1141 goto err_scsi_remove_host
;
1143 devdata
->thread_wait_ms
= 2;
1144 devdata
->thread
= visor_thread_start(process_incoming_rsps
, devdata
,
1147 scsi_scan_host(scsihost
);
1151 err_scsi_remove_host
:
1152 scsi_remove_host(scsihost
);
1155 scsi_host_put(scsihost
);
1160 * visorhba_remove - remove a visorhba device
1161 * @dev: Device to remove
1163 * Removes the visorhba device.
1166 static void visorhba_remove(struct visor_device
*dev
)
1168 struct visorhba_devdata
*devdata
= dev_get_drvdata(&dev
->device
);
1169 struct Scsi_Host
*scsihost
= NULL
;
1174 scsihost
= devdata
->scsihost
;
1175 visor_thread_stop(devdata
->thread
);
1176 scsi_remove_host(scsihost
);
1177 scsi_host_put(scsihost
);
1179 dev_set_drvdata(&dev
->device
, NULL
);
1183 * visorhba_init - driver init routine
1185 * Initialize the visorhba driver and register it with visorbus
1186 * to handle s-Par virtual host bus adapter.
1188 static int visorhba_init(void)
1193 visorhba_debugfs_dir
= debugfs_create_dir("visorhba", NULL
);
1194 if (!visorhba_debugfs_dir
)
1197 ret
= debugfs_create_file("info", S_IRUSR
, visorhba_debugfs_dir
, NULL
,
1198 &debugfs_info_fops
);
1202 goto cleanup_debugfs
;
1205 rc
= visorbus_register_visor_driver(&visorhba_driver
);
1207 goto cleanup_debugfs
;
1212 debugfs_remove_recursive(visorhba_debugfs_dir
);
1218 * visorhba_cleanup - driver exit routine
1220 * Unregister driver from the bus and free up memory.
1222 static void visorhba_exit(void)
1224 visorbus_unregister_visor_driver(&visorhba_driver
);
1225 debugfs_remove_recursive(visorhba_debugfs_dir
);
1228 module_init(visorhba_init
);
1229 module_exit(visorhba_exit
);
1231 MODULE_AUTHOR("Unisys");
1232 MODULE_LICENSE("GPL");
1233 MODULE_DESCRIPTION("s-Par hba driver");