Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[deliverable/linux.git] / drivers / scsi / scsi_error.c
1 /*
2 * scsi_error.c Copyright (C) 1997 Eric Youngdale
3 *
4 * SCSI error/timeout handling
5 * Initial versions: Eric Youngdale. Based upon conversations with
6 * Leonard Zubkoff and David Miller at Linux Expo,
7 * ideas originating from all over the place.
8 *
9 * Restructured scsi_unjam_host and associated functions.
10 * September 04, 2002 Mike Anderson (andmike@us.ibm.com)
11 *
12 * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
13 * minor cleanups.
14 * September 30, 2002 Mike Anderson (andmike@us.ibm.com)
15 */
16
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/timer.h>
20 #include <linux/string.h>
21 #include <linux/kernel.h>
22 #include <linux/freezer.h>
23 #include <linux/kthread.h>
24 #include <linux/interrupt.h>
25 #include <linux/blkdev.h>
26 #include <linux/delay.h>
27 #include <linux/scatterlist.h>
28
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_dbg.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_transport.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_ioctl.h>
37
38 #include "scsi_priv.h"
39 #include "scsi_logging.h"
40 #include "scsi_transport_api.h"
41
42 #define SENSE_TIMEOUT (10*HZ)
43
44 /*
45 * These should *probably* be handled by the host itself.
46 * Since it is allowed to sleep, it probably should.
47 */
48 #define BUS_RESET_SETTLE_TIME (10)
49 #define HOST_RESET_SETTLE_TIME (10)
50
51 /* called with shost->host_lock held */
52 void scsi_eh_wakeup(struct Scsi_Host *shost)
53 {
54 if (shost->host_busy == shost->host_failed) {
55 wake_up_process(shost->ehandler);
56 SCSI_LOG_ERROR_RECOVERY(5,
57 printk("Waking error handler thread\n"));
58 }
59 }
60
61 /**
62 * scsi_schedule_eh - schedule EH for SCSI host
63 * @shost: SCSI host to invoke error handling on.
64 *
65 * Schedule SCSI EH without scmd.
66 **/
67 void scsi_schedule_eh(struct Scsi_Host *shost)
68 {
69 unsigned long flags;
70
71 spin_lock_irqsave(shost->host_lock, flags);
72
73 if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
74 scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
75 shost->host_eh_scheduled++;
76 scsi_eh_wakeup(shost);
77 }
78
79 spin_unlock_irqrestore(shost->host_lock, flags);
80 }
81 EXPORT_SYMBOL_GPL(scsi_schedule_eh);
82
83 /**
84 * scsi_eh_scmd_add - add scsi cmd to error handling.
85 * @scmd: scmd to run eh on.
86 * @eh_flag: optional SCSI_EH flag.
87 *
88 * Return value:
89 * 0 on failure.
90 **/
91 int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
92 {
93 struct Scsi_Host *shost = scmd->device->host;
94 unsigned long flags;
95 int ret = 0;
96
97 if (!shost->ehandler)
98 return 0;
99
100 spin_lock_irqsave(shost->host_lock, flags);
101 if (scsi_host_set_state(shost, SHOST_RECOVERY))
102 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
103 goto out_unlock;
104
105 ret = 1;
106 scmd->eh_eflags |= eh_flag;
107 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
108 shost->host_failed++;
109 scsi_eh_wakeup(shost);
110 out_unlock:
111 spin_unlock_irqrestore(shost->host_lock, flags);
112 return ret;
113 }
114
115 /**
116 * scsi_add_timer - Start timeout timer for a single scsi command.
117 * @scmd: scsi command that is about to start running.
118 * @timeout: amount of time to allow this command to run.
119 * @complete: timeout function to call if timer isn't canceled.
120 *
121 * Notes:
122 * This should be turned into an inline function. Each scsi command
123 * has its own timer, and as it is added to the queue, we set up the
124 * timer. When the command completes, we cancel the timer.
125 **/
126 void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
127 void (*complete)(struct scsi_cmnd *))
128 {
129
130 /*
131 * If the clock was already running for this command, then
132 * first delete the timer. The timer handling code gets rather
133 * confused if we don't do this.
134 */
135 if (scmd->eh_timeout.function)
136 del_timer(&scmd->eh_timeout);
137
138 scmd->eh_timeout.data = (unsigned long)scmd;
139 scmd->eh_timeout.expires = jiffies + timeout;
140 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
141
142 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
143 " %d, (%p)\n", __FUNCTION__,
144 scmd, timeout, complete));
145
146 add_timer(&scmd->eh_timeout);
147 }
148
149 /**
150 * scsi_delete_timer - Delete/cancel timer for a given function.
151 * @scmd: Cmd that we are canceling timer for
152 *
153 * Notes:
154 * This should be turned into an inline function.
155 *
156 * Return value:
157 * 1 if we were able to detach the timer. 0 if we blew it, and the
158 * timer function has already started to run.
159 **/
160 int scsi_delete_timer(struct scsi_cmnd *scmd)
161 {
162 int rtn;
163
164 rtn = del_timer(&scmd->eh_timeout);
165
166 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
167 " rtn: %d\n", __FUNCTION__,
168 scmd, rtn));
169
170 scmd->eh_timeout.data = (unsigned long)NULL;
171 scmd->eh_timeout.function = NULL;
172
173 return rtn;
174 }
175
176 /**
177 * scsi_times_out - Timeout function for normal scsi commands.
178 * @scmd: Cmd that is timing out.
179 *
180 * Notes:
181 * We do not need to lock this. There is the potential for a race
182 * only in that the normal completion handling might run, but if the
183 * normal completion function determines that the timer has already
184 * fired, then it mustn't do anything.
185 **/
186 void scsi_times_out(struct scsi_cmnd *scmd)
187 {
188 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
189
190 scsi_log_completion(scmd, TIMEOUT_ERROR);
191
192 if (scmd->device->host->transportt->eh_timed_out)
193 eh_timed_out = scmd->device->host->transportt->eh_timed_out;
194 else if (scmd->device->host->hostt->eh_timed_out)
195 eh_timed_out = scmd->device->host->hostt->eh_timed_out;
196 else
197 eh_timed_out = NULL;
198
199 if (eh_timed_out)
200 switch (eh_timed_out(scmd)) {
201 case EH_HANDLED:
202 __scsi_done(scmd);
203 return;
204 case EH_RESET_TIMER:
205 scsi_add_timer(scmd, scmd->timeout_per_command,
206 scsi_times_out);
207 return;
208 case EH_NOT_HANDLED:
209 break;
210 }
211
212 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
213 scmd->result |= DID_TIME_OUT << 16;
214 __scsi_done(scmd);
215 }
216 }
217
218 /**
219 * scsi_block_when_processing_errors - Prevent cmds from being queued.
220 * @sdev: Device on which we are performing recovery.
221 *
222 * Description:
223 * We block until the host is out of error recovery, and then check to
224 * see whether the host or the device is offline.
225 *
226 * Return value:
227 * 0 when dev was taken offline by error recovery. 1 OK to proceed.
228 **/
229 int scsi_block_when_processing_errors(struct scsi_device *sdev)
230 {
231 int online;
232
233 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
234
235 online = scsi_device_online(sdev);
236
237 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__,
238 online));
239
240 return online;
241 }
242 EXPORT_SYMBOL(scsi_block_when_processing_errors);
243
244 #ifdef CONFIG_SCSI_LOGGING
245 /**
246 * scsi_eh_prt_fail_stats - Log info on failures.
247 * @shost: scsi host being recovered.
248 * @work_q: Queue of scsi cmds to process.
249 **/
250 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
251 struct list_head *work_q)
252 {
253 struct scsi_cmnd *scmd;
254 struct scsi_device *sdev;
255 int total_failures = 0;
256 int cmd_failed = 0;
257 int cmd_cancel = 0;
258 int devices_failed = 0;
259
260 shost_for_each_device(sdev, shost) {
261 list_for_each_entry(scmd, work_q, eh_entry) {
262 if (scmd->device == sdev) {
263 ++total_failures;
264 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
265 ++cmd_cancel;
266 else
267 ++cmd_failed;
268 }
269 }
270
271 if (cmd_cancel || cmd_failed) {
272 SCSI_LOG_ERROR_RECOVERY(3,
273 sdev_printk(KERN_INFO, sdev,
274 "%s: cmds failed: %d, cancel: %d\n",
275 __FUNCTION__, cmd_failed,
276 cmd_cancel));
277 cmd_cancel = 0;
278 cmd_failed = 0;
279 ++devices_failed;
280 }
281 }
282
283 SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d"
284 " devices require eh work\n",
285 total_failures, devices_failed));
286 }
287 #endif
288
289 /**
290 * scsi_check_sense - Examine scsi cmd sense
291 * @scmd: Cmd to have sense checked.
292 *
293 * Return value:
294 * SUCCESS or FAILED or NEEDS_RETRY
295 *
296 * Notes:
297 * When a deferred error is detected the current command has
298 * not been executed and needs retrying.
299 **/
300 static int scsi_check_sense(struct scsi_cmnd *scmd)
301 {
302 struct scsi_sense_hdr sshdr;
303
304 if (! scsi_command_normalize_sense(scmd, &sshdr))
305 return FAILED; /* no valid sense data */
306
307 if (scsi_sense_is_deferred(&sshdr))
308 return NEEDS_RETRY;
309
310 /*
311 * Previous logic looked for FILEMARK, EOM or ILI which are
312 * mainly associated with tapes and returned SUCCESS.
313 */
314 if (sshdr.response_code == 0x70) {
315 /* fixed format */
316 if (scmd->sense_buffer[2] & 0xe0)
317 return SUCCESS;
318 } else {
319 /*
320 * descriptor format: look for "stream commands sense data
321 * descriptor" (see SSC-3). Assume single sense data
322 * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
323 */
324 if ((sshdr.additional_length > 3) &&
325 (scmd->sense_buffer[8] == 0x4) &&
326 (scmd->sense_buffer[11] & 0xe0))
327 return SUCCESS;
328 }
329
330 switch (sshdr.sense_key) {
331 case NO_SENSE:
332 return SUCCESS;
333 case RECOVERED_ERROR:
334 return /* soft_error */ SUCCESS;
335
336 case ABORTED_COMMAND:
337 return NEEDS_RETRY;
338 case NOT_READY:
339 case UNIT_ATTENTION:
340 /*
341 * if we are expecting a cc/ua because of a bus reset that we
342 * performed, treat this just as a retry. otherwise this is
343 * information that we should pass up to the upper-level driver
344 * so that we can deal with it there.
345 */
346 if (scmd->device->expecting_cc_ua) {
347 scmd->device->expecting_cc_ua = 0;
348 return NEEDS_RETRY;
349 }
350 /*
351 * if the device is in the process of becoming ready, we
352 * should retry.
353 */
354 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
355 return NEEDS_RETRY;
356 /*
357 * if the device is not started, we need to wake
358 * the error handler to start the motor
359 */
360 if (scmd->device->allow_restart &&
361 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
362 return FAILED;
363 return SUCCESS;
364
365 /* these three are not supported */
366 case COPY_ABORTED:
367 case VOLUME_OVERFLOW:
368 case MISCOMPARE:
369 return SUCCESS;
370
371 case MEDIUM_ERROR:
372 if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
373 sshdr.asc == 0x13 || /* AMNF DATA FIELD */
374 sshdr.asc == 0x14) { /* RECORD NOT FOUND */
375 return SUCCESS;
376 }
377 return NEEDS_RETRY;
378
379 case HARDWARE_ERROR:
380 if (scmd->device->retry_hwerror)
381 return NEEDS_RETRY;
382 else
383 return SUCCESS;
384
385 case ILLEGAL_REQUEST:
386 case BLANK_CHECK:
387 case DATA_PROTECT:
388 default:
389 return SUCCESS;
390 }
391 }
392
393 /**
394 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
395 * @scmd: SCSI cmd to examine.
396 *
397 * Notes:
398 * This is *only* called when we are examining the status of commands
399 * queued during error recovery. the main difference here is that we
400 * don't allow for the possibility of retries here, and we are a lot
401 * more restrictive about what we consider acceptable.
402 **/
403 static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
404 {
405 /*
406 * first check the host byte, to see if there is anything in there
407 * that would indicate what we need to do.
408 */
409 if (host_byte(scmd->result) == DID_RESET) {
410 /*
411 * rats. we are already in the error handler, so we now
412 * get to try and figure out what to do next. if the sense
413 * is valid, we have a pretty good idea of what to do.
414 * if not, we mark it as FAILED.
415 */
416 return scsi_check_sense(scmd);
417 }
418 if (host_byte(scmd->result) != DID_OK)
419 return FAILED;
420
421 /*
422 * next, check the message byte.
423 */
424 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
425 return FAILED;
426
427 /*
428 * now, check the status byte to see if this indicates
429 * anything special.
430 */
431 switch (status_byte(scmd->result)) {
432 case GOOD:
433 case COMMAND_TERMINATED:
434 return SUCCESS;
435 case CHECK_CONDITION:
436 return scsi_check_sense(scmd);
437 case CONDITION_GOOD:
438 case INTERMEDIATE_GOOD:
439 case INTERMEDIATE_C_GOOD:
440 /*
441 * who knows? FIXME(eric)
442 */
443 return SUCCESS;
444 case BUSY:
445 case QUEUE_FULL:
446 case RESERVATION_CONFLICT:
447 default:
448 return FAILED;
449 }
450 return FAILED;
451 }
452
453 /**
454 * scsi_eh_done - Completion function for error handling.
455 * @scmd: Cmd that is done.
456 **/
457 static void scsi_eh_done(struct scsi_cmnd *scmd)
458 {
459 struct completion *eh_action;
460
461 SCSI_LOG_ERROR_RECOVERY(3,
462 printk("%s scmd: %p result: %x\n",
463 __FUNCTION__, scmd, scmd->result));
464
465 eh_action = scmd->device->host->eh_action;
466 if (eh_action)
467 complete(eh_action);
468 }
469
470 /**
471 * scsi_try_host_reset - ask host adapter to reset itself
472 * @scmd: SCSI cmd to send hsot reset.
473 **/
474 static int scsi_try_host_reset(struct scsi_cmnd *scmd)
475 {
476 unsigned long flags;
477 int rtn;
478
479 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
480 __FUNCTION__));
481
482 if (!scmd->device->host->hostt->eh_host_reset_handler)
483 return FAILED;
484
485 rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd);
486
487 if (rtn == SUCCESS) {
488 if (!scmd->device->host->hostt->skip_settle_delay)
489 ssleep(HOST_RESET_SETTLE_TIME);
490 spin_lock_irqsave(scmd->device->host->host_lock, flags);
491 scsi_report_bus_reset(scmd->device->host,
492 scmd_channel(scmd));
493 spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
494 }
495
496 return rtn;
497 }
498
499 /**
500 * scsi_try_bus_reset - ask host to perform a bus reset
501 * @scmd: SCSI cmd to send bus reset.
502 **/
503 static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
504 {
505 unsigned long flags;
506 int rtn;
507
508 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
509 __FUNCTION__));
510
511 if (!scmd->device->host->hostt->eh_bus_reset_handler)
512 return FAILED;
513
514 rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd);
515
516 if (rtn == SUCCESS) {
517 if (!scmd->device->host->hostt->skip_settle_delay)
518 ssleep(BUS_RESET_SETTLE_TIME);
519 spin_lock_irqsave(scmd->device->host->host_lock, flags);
520 scsi_report_bus_reset(scmd->device->host,
521 scmd_channel(scmd));
522 spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
523 }
524
525 return rtn;
526 }
527
528 /**
529 * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
530 * @scmd: SCSI cmd used to send BDR
531 *
532 * Notes:
533 * There is no timeout for this operation. if this operation is
534 * unreliable for a given host, then the host itself needs to put a
535 * timer on it, and set the host back to a consistent state prior to
536 * returning.
537 **/
538 static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
539 {
540 int rtn;
541
542 if (!scmd->device->host->hostt->eh_device_reset_handler)
543 return FAILED;
544
545 rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd);
546 if (rtn == SUCCESS) {
547 scmd->device->was_reset = 1;
548 scmd->device->expecting_cc_ua = 1;
549 }
550
551 return rtn;
552 }
553
554 static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
555 {
556 if (!scmd->device->host->hostt->eh_abort_handler)
557 return FAILED;
558
559 return scmd->device->host->hostt->eh_abort_handler(scmd);
560 }
561
562 /**
563 * scsi_try_to_abort_cmd - Ask host to abort a running command.
564 * @scmd: SCSI cmd to abort from Lower Level.
565 *
566 * Notes:
567 * This function will not return until the user's completion function
568 * has been called. there is no timeout on this operation. if the
569 * author of the low-level driver wishes this operation to be timed,
570 * they can provide this facility themselves. helper functions in
571 * scsi_error.c can be supplied to make this easier to do.
572 **/
573 static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
574 {
575 /*
576 * scsi_done was called just after the command timed out and before
577 * we had a chance to process it. (db)
578 */
579 if (scmd->serial_number == 0)
580 return SUCCESS;
581 return __scsi_try_to_abort_cmd(scmd);
582 }
583
584 static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
585 {
586 if (__scsi_try_to_abort_cmd(scmd) != SUCCESS)
587 if (scsi_try_bus_device_reset(scmd) != SUCCESS)
588 if (scsi_try_bus_reset(scmd) != SUCCESS)
589 scsi_try_host_reset(scmd);
590 }
591
592 /**
593 * scsi_eh_prep_cmnd - Save a scsi command info as part of error recory
594 * @scmd: SCSI command structure to hijack
595 * @ses: structure to save restore information
596 * @cmnd: CDB to send. Can be NULL if no new cmnd is needed
597 * @cmnd_size: size in bytes of @cmnd
598 * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
599 *
600 * This function is used to save a scsi command information before re-execution
601 * as part of the error recovery process. If @sense_bytes is 0 the command
602 * sent must be one that does not transfer any data. If @sense_bytes != 0
603 * @cmnd is ignored and this functions sets up a REQUEST_SENSE command
604 * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer.
605 **/
606 void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
607 unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
608 {
609 struct scsi_device *sdev = scmd->device;
610
611 /*
612 * We need saved copies of a number of fields - this is because
613 * error handling may need to overwrite these with different values
614 * to run different commands, and once error handling is complete,
615 * we will need to restore these values prior to running the actual
616 * command.
617 */
618 ses->cmd_len = scmd->cmd_len;
619 memcpy(ses->cmnd, scmd->cmnd, sizeof(scmd->cmnd));
620 ses->data_direction = scmd->sc_data_direction;
621 ses->bufflen = scmd->request_bufflen;
622 ses->buffer = scmd->request_buffer;
623 ses->use_sg = scmd->use_sg;
624 ses->resid = scmd->resid;
625 ses->result = scmd->result;
626
627 if (sense_bytes) {
628 scmd->request_bufflen = min_t(unsigned,
629 sizeof(scmd->sense_buffer), sense_bytes);
630 sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
631 scmd->request_bufflen);
632 scmd->request_buffer = &ses->sense_sgl;
633 scmd->sc_data_direction = DMA_FROM_DEVICE;
634 scmd->use_sg = 1;
635 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
636 scmd->cmnd[0] = REQUEST_SENSE;
637 scmd->cmnd[4] = scmd->request_bufflen;
638 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
639 } else {
640 scmd->request_buffer = NULL;
641 scmd->request_bufflen = 0;
642 scmd->sc_data_direction = DMA_NONE;
643 scmd->use_sg = 0;
644 if (cmnd) {
645 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
646 memcpy(scmd->cmnd, cmnd, cmnd_size);
647 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
648 }
649 }
650
651 scmd->underflow = 0;
652
653 if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
654 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
655 (sdev->lun << 5 & 0xe0);
656
657 /*
658 * Zero the sense buffer. The scsi spec mandates that any
659 * untransferred sense data should be interpreted as being zero.
660 */
661 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
662 }
663 EXPORT_SYMBOL(scsi_eh_prep_cmnd);
664
665 /**
666 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory
667 * @scmd: SCSI command structure to restore
668 * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd
669 *
670 * Undo any damage done by above scsi_prep_eh_cmnd().
671 **/
672 void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
673 {
674 /*
675 * Restore original data
676 */
677 scmd->cmd_len = ses->cmd_len;
678 memcpy(scmd->cmnd, ses->cmnd, sizeof(scmd->cmnd));
679 scmd->sc_data_direction = ses->data_direction;
680 scmd->request_bufflen = ses->bufflen;
681 scmd->request_buffer = ses->buffer;
682 scmd->use_sg = ses->use_sg;
683 scmd->resid = ses->resid;
684 scmd->result = ses->result;
685 }
686 EXPORT_SYMBOL(scsi_eh_restore_cmnd);
687
688 /**
689 * scsi_send_eh_cmnd - submit a scsi command as part of error recory
690 * @scmd: SCSI command structure to hijack
691 * @cmnd: CDB to send
692 * @cmnd_size: size in bytes of @cmnd
693 * @timeout: timeout for this request
694 * @sense_bytes: size of sense data to copy or 0
695 *
696 * This function is used to send a scsi command down to a target device
697 * as part of the error recovery process. See also scsi_eh_prep_cmnd() above.
698 *
699 * Return value:
700 * SUCCESS or FAILED or NEEDS_RETRY
701 **/
702 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
703 int cmnd_size, int timeout, unsigned sense_bytes)
704 {
705 struct scsi_device *sdev = scmd->device;
706 struct Scsi_Host *shost = sdev->host;
707 DECLARE_COMPLETION_ONSTACK(done);
708 unsigned long timeleft;
709 unsigned long flags;
710 struct scsi_eh_save ses;
711 int rtn;
712
713 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
714 shost->eh_action = &done;
715
716 spin_lock_irqsave(shost->host_lock, flags);
717 scsi_log_send(scmd);
718 shost->hostt->queuecommand(scmd, scsi_eh_done);
719 spin_unlock_irqrestore(shost->host_lock, flags);
720
721 timeleft = wait_for_completion_timeout(&done, timeout);
722
723 shost->eh_action = NULL;
724
725 scsi_log_completion(scmd, SUCCESS);
726
727 SCSI_LOG_ERROR_RECOVERY(3,
728 printk("%s: scmd: %p, timeleft: %ld\n",
729 __FUNCTION__, scmd, timeleft));
730
731 /*
732 * If there is time left scsi_eh_done got called, and we will
733 * examine the actual status codes to see whether the command
734 * actually did complete normally, else tell the host to forget
735 * about this command.
736 */
737 if (timeleft) {
738 rtn = scsi_eh_completed_normally(scmd);
739 SCSI_LOG_ERROR_RECOVERY(3,
740 printk("%s: scsi_eh_completed_normally %x\n",
741 __FUNCTION__, rtn));
742
743 switch (rtn) {
744 case SUCCESS:
745 case NEEDS_RETRY:
746 case FAILED:
747 break;
748 default:
749 rtn = FAILED;
750 break;
751 }
752 } else {
753 scsi_abort_eh_cmnd(scmd);
754 rtn = FAILED;
755 }
756
757 scsi_eh_restore_cmnd(scmd, &ses);
758 return rtn;
759 }
760
761 /**
762 * scsi_request_sense - Request sense data from a particular target.
763 * @scmd: SCSI cmd for request sense.
764 *
765 * Notes:
766 * Some hosts automatically obtain this information, others require
767 * that we obtain it on our own. This function will *not* return until
768 * the command either times out, or it completes.
769 **/
770 static int scsi_request_sense(struct scsi_cmnd *scmd)
771 {
772 return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0);
773 }
774
775 /**
776 * scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
777 * @scmd: Original SCSI cmd that eh has finished.
778 * @done_q: Queue for processed commands.
779 *
780 * Notes:
781 * We don't want to use the normal command completion while we are are
782 * still handling errors - it may cause other commands to be queued,
783 * and that would disturb what we are doing. thus we really want to
784 * keep a list of pending commands for final completion, and once we
785 * are ready to leave error handling we handle completion for real.
786 **/
787 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
788 {
789 scmd->device->host->host_failed--;
790 scmd->eh_eflags = 0;
791 list_move_tail(&scmd->eh_entry, done_q);
792 }
793 EXPORT_SYMBOL(scsi_eh_finish_cmd);
794
795 /**
796 * scsi_eh_get_sense - Get device sense data.
797 * @work_q: Queue of commands to process.
798 * @done_q: Queue of proccessed commands..
799 *
800 * Description:
801 * See if we need to request sense information. if so, then get it
802 * now, so we have a better idea of what to do.
803 *
804 * Notes:
805 * This has the unfortunate side effect that if a shost adapter does
806 * not automatically request sense information, that we end up shutting
807 * it down before we request it.
808 *
809 * All drivers should request sense information internally these days,
810 * so for now all I have to say is tough noogies if you end up in here.
811 *
812 * XXX: Long term this code should go away, but that needs an audit of
813 * all LLDDs first.
814 **/
815 int scsi_eh_get_sense(struct list_head *work_q,
816 struct list_head *done_q)
817 {
818 struct scsi_cmnd *scmd, *next;
819 int rtn;
820
821 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
822 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
823 SCSI_SENSE_VALID(scmd))
824 continue;
825
826 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
827 "%s: requesting sense\n",
828 current->comm));
829 rtn = scsi_request_sense(scmd);
830 if (rtn != SUCCESS)
831 continue;
832
833 SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p"
834 " result %x\n", scmd,
835 scmd->result));
836 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd));
837
838 rtn = scsi_decide_disposition(scmd);
839
840 /*
841 * if the result was normal, then just pass it along to the
842 * upper level.
843 */
844 if (rtn == SUCCESS)
845 /* we don't want this command reissued, just
846 * finished with the sense data, so set
847 * retries to the max allowed to ensure it
848 * won't get reissued */
849 scmd->retries = scmd->allowed;
850 else if (rtn != NEEDS_RETRY)
851 continue;
852
853 scsi_eh_finish_cmd(scmd, done_q);
854 }
855
856 return list_empty(work_q);
857 }
858 EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
859
860 /**
861 * scsi_eh_tur - Send TUR to device.
862 * @scmd: Scsi cmd to send TUR
863 *
864 * Return value:
865 * 0 - Device is ready. 1 - Device NOT ready.
866 **/
867 static int scsi_eh_tur(struct scsi_cmnd *scmd)
868 {
869 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
870 int retry_cnt = 1, rtn;
871
872 retry_tur:
873 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
874
875 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
876 __FUNCTION__, scmd, rtn));
877
878 switch (rtn) {
879 case NEEDS_RETRY:
880 if (retry_cnt--)
881 goto retry_tur;
882 /*FALLTHRU*/
883 case SUCCESS:
884 return 0;
885 default:
886 return 1;
887 }
888 }
889
890 /**
891 * scsi_eh_abort_cmds - abort canceled commands.
892 * @shost: scsi host being recovered.
893 * @eh_done_q: list_head for processed commands.
894 *
895 * Decription:
896 * Try and see whether or not it makes sense to try and abort the
897 * running command. this only works out to be the case if we have one
898 * command that has timed out. if the command simply failed, it makes
899 * no sense to try and abort the command, since as far as the shost
900 * adapter is concerned, it isn't running.
901 **/
902 static int scsi_eh_abort_cmds(struct list_head *work_q,
903 struct list_head *done_q)
904 {
905 struct scsi_cmnd *scmd, *next;
906 int rtn;
907
908 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
909 if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
910 continue;
911 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
912 "0x%p\n", current->comm,
913 scmd));
914 rtn = scsi_try_to_abort_cmd(scmd);
915 if (rtn == SUCCESS) {
916 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
917 if (!scsi_device_online(scmd->device) ||
918 !scsi_eh_tur(scmd)) {
919 scsi_eh_finish_cmd(scmd, done_q);
920 }
921
922 } else
923 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
924 " cmd failed:"
925 "0x%p\n",
926 current->comm,
927 scmd));
928 }
929
930 return list_empty(work_q);
931 }
932
933 /**
934 * scsi_eh_try_stu - Send START_UNIT to device.
935 * @scmd: Scsi cmd to send START_UNIT
936 *
937 * Return value:
938 * 0 - Device is ready. 1 - Device NOT ready.
939 **/
940 static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
941 {
942 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
943
944 if (scmd->device->allow_restart) {
945 int i, rtn = NEEDS_RETRY;
946
947 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
948 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
949 scmd->device->timeout, 0);
950
951 if (rtn == SUCCESS)
952 return 0;
953 }
954
955 return 1;
956 }
957
958 /**
959 * scsi_eh_stu - send START_UNIT if needed
960 * @shost: scsi host being recovered.
961 * @eh_done_q: list_head for processed commands.
962 *
963 * Notes:
964 * If commands are failing due to not ready, initializing command required,
965 * try revalidating the device, which will end up sending a start unit.
966 **/
967 static int scsi_eh_stu(struct Scsi_Host *shost,
968 struct list_head *work_q,
969 struct list_head *done_q)
970 {
971 struct scsi_cmnd *scmd, *stu_scmd, *next;
972 struct scsi_device *sdev;
973
974 shost_for_each_device(sdev, shost) {
975 stu_scmd = NULL;
976 list_for_each_entry(scmd, work_q, eh_entry)
977 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
978 scsi_check_sense(scmd) == FAILED ) {
979 stu_scmd = scmd;
980 break;
981 }
982
983 if (!stu_scmd)
984 continue;
985
986 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:"
987 " 0x%p\n", current->comm, sdev));
988
989 if (!scsi_eh_try_stu(stu_scmd)) {
990 if (!scsi_device_online(sdev) ||
991 !scsi_eh_tur(stu_scmd)) {
992 list_for_each_entry_safe(scmd, next,
993 work_q, eh_entry) {
994 if (scmd->device == sdev)
995 scsi_eh_finish_cmd(scmd, done_q);
996 }
997 }
998 } else {
999 SCSI_LOG_ERROR_RECOVERY(3,
1000 printk("%s: START_UNIT failed to sdev:"
1001 " 0x%p\n", current->comm, sdev));
1002 }
1003 }
1004
1005 return list_empty(work_q);
1006 }
1007
1008
1009 /**
1010 * scsi_eh_bus_device_reset - send bdr if needed
1011 * @shost: scsi host being recovered.
1012 * @eh_done_q: list_head for processed commands.
1013 *
1014 * Notes:
1015 * Try a bus device reset. still, look to see whether we have multiple
1016 * devices that are jammed or not - if we have multiple devices, it
1017 * makes no sense to try bus_device_reset - we really would need to try
1018 * a bus_reset instead.
1019 **/
1020 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1021 struct list_head *work_q,
1022 struct list_head *done_q)
1023 {
1024 struct scsi_cmnd *scmd, *bdr_scmd, *next;
1025 struct scsi_device *sdev;
1026 int rtn;
1027
1028 shost_for_each_device(sdev, shost) {
1029 bdr_scmd = NULL;
1030 list_for_each_entry(scmd, work_q, eh_entry)
1031 if (scmd->device == sdev) {
1032 bdr_scmd = scmd;
1033 break;
1034 }
1035
1036 if (!bdr_scmd)
1037 continue;
1038
1039 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:"
1040 " 0x%p\n", current->comm,
1041 sdev));
1042 rtn = scsi_try_bus_device_reset(bdr_scmd);
1043 if (rtn == SUCCESS) {
1044 if (!scsi_device_online(sdev) ||
1045 !scsi_eh_tur(bdr_scmd)) {
1046 list_for_each_entry_safe(scmd, next,
1047 work_q, eh_entry) {
1048 if (scmd->device == sdev)
1049 scsi_eh_finish_cmd(scmd,
1050 done_q);
1051 }
1052 }
1053 } else {
1054 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR"
1055 " failed sdev:"
1056 "0x%p\n",
1057 current->comm,
1058 sdev));
1059 }
1060 }
1061
1062 return list_empty(work_q);
1063 }
1064
1065 /**
1066 * scsi_eh_bus_reset - send a bus reset
1067 * @shost: scsi host being recovered.
1068 * @eh_done_q: list_head for processed commands.
1069 **/
1070 static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1071 struct list_head *work_q,
1072 struct list_head *done_q)
1073 {
1074 struct scsi_cmnd *scmd, *chan_scmd, *next;
1075 unsigned int channel;
1076 int rtn;
1077
1078 /*
1079 * we really want to loop over the various channels, and do this on
1080 * a channel by channel basis. we should also check to see if any
1081 * of the failed commands are on soft_reset devices, and if so, skip
1082 * the reset.
1083 */
1084
1085 for (channel = 0; channel <= shost->max_channel; channel++) {
1086 chan_scmd = NULL;
1087 list_for_each_entry(scmd, work_q, eh_entry) {
1088 if (channel == scmd_channel(scmd)) {
1089 chan_scmd = scmd;
1090 break;
1091 /*
1092 * FIXME add back in some support for
1093 * soft_reset devices.
1094 */
1095 }
1096 }
1097
1098 if (!chan_scmd)
1099 continue;
1100 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:"
1101 " %d\n", current->comm,
1102 channel));
1103 rtn = scsi_try_bus_reset(chan_scmd);
1104 if (rtn == SUCCESS) {
1105 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1106 if (channel == scmd_channel(scmd))
1107 if (!scsi_device_online(scmd->device) ||
1108 !scsi_eh_tur(scmd))
1109 scsi_eh_finish_cmd(scmd,
1110 done_q);
1111 }
1112 } else {
1113 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
1114 " failed chan: %d\n",
1115 current->comm,
1116 channel));
1117 }
1118 }
1119 return list_empty(work_q);
1120 }
1121
1122 /**
1123 * scsi_eh_host_reset - send a host reset
1124 * @work_q: list_head for processed commands.
1125 * @done_q: list_head for processed commands.
1126 **/
1127 static int scsi_eh_host_reset(struct list_head *work_q,
1128 struct list_head *done_q)
1129 {
1130 struct scsi_cmnd *scmd, *next;
1131 int rtn;
1132
1133 if (!list_empty(work_q)) {
1134 scmd = list_entry(work_q->next,
1135 struct scsi_cmnd, eh_entry);
1136
1137 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n"
1138 , current->comm));
1139
1140 rtn = scsi_try_host_reset(scmd);
1141 if (rtn == SUCCESS) {
1142 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1143 if (!scsi_device_online(scmd->device) ||
1144 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
1145 !scsi_eh_tur(scmd))
1146 scsi_eh_finish_cmd(scmd, done_q);
1147 }
1148 } else {
1149 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST"
1150 " failed\n",
1151 current->comm));
1152 }
1153 }
1154 return list_empty(work_q);
1155 }
1156
1157 /**
1158 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
1159 * @work_q: list_head for processed commands.
1160 * @done_q: list_head for processed commands.
1161 *
1162 **/
1163 static void scsi_eh_offline_sdevs(struct list_head *work_q,
1164 struct list_head *done_q)
1165 {
1166 struct scsi_cmnd *scmd, *next;
1167
1168 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1169 sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1170 "not ready after error recovery\n");
1171 scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1172 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
1173 /*
1174 * FIXME: Handle lost cmds.
1175 */
1176 }
1177 scsi_eh_finish_cmd(scmd, done_q);
1178 }
1179 return;
1180 }
1181
1182 /**
1183 * scsi_decide_disposition - Disposition a cmd on return from LLD.
1184 * @scmd: SCSI cmd to examine.
1185 *
1186 * Notes:
1187 * This is *only* called when we are examining the status after sending
1188 * out the actual data command. any commands that are queued for error
1189 * recovery (e.g. test_unit_ready) do *not* come through here.
1190 *
1191 * When this routine returns failed, it means the error handler thread
1192 * is woken. In cases where the error code indicates an error that
1193 * doesn't require the error handler read (i.e. we don't need to
1194 * abort/reset), this function should return SUCCESS.
1195 **/
1196 int scsi_decide_disposition(struct scsi_cmnd *scmd)
1197 {
1198 int rtn;
1199
1200 /*
1201 * if the device is offline, then we clearly just pass the result back
1202 * up to the top level.
1203 */
1204 if (!scsi_device_online(scmd->device)) {
1205 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
1206 " as SUCCESS\n",
1207 __FUNCTION__));
1208 return SUCCESS;
1209 }
1210
1211 /*
1212 * first check the host byte, to see if there is anything in there
1213 * that would indicate what we need to do.
1214 */
1215 switch (host_byte(scmd->result)) {
1216 case DID_PASSTHROUGH:
1217 /*
1218 * no matter what, pass this through to the upper layer.
1219 * nuke this special code so that it looks like we are saying
1220 * did_ok.
1221 */
1222 scmd->result &= 0xff00ffff;
1223 return SUCCESS;
1224 case DID_OK:
1225 /*
1226 * looks good. drop through, and check the next byte.
1227 */
1228 break;
1229 case DID_NO_CONNECT:
1230 case DID_BAD_TARGET:
1231 case DID_ABORT:
1232 /*
1233 * note - this means that we just report the status back
1234 * to the top level driver, not that we actually think
1235 * that it indicates SUCCESS.
1236 */
1237 return SUCCESS;
1238 /*
1239 * when the low level driver returns did_soft_error,
1240 * it is responsible for keeping an internal retry counter
1241 * in order to avoid endless loops (db)
1242 *
1243 * actually this is a bug in this function here. we should
1244 * be mindful of the maximum number of retries specified
1245 * and not get stuck in a loop.
1246 */
1247 case DID_SOFT_ERROR:
1248 goto maybe_retry;
1249 case DID_IMM_RETRY:
1250 return NEEDS_RETRY;
1251
1252 case DID_REQUEUE:
1253 return ADD_TO_MLQUEUE;
1254
1255 case DID_ERROR:
1256 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1257 status_byte(scmd->result) == RESERVATION_CONFLICT)
1258 /*
1259 * execute reservation conflict processing code
1260 * lower down
1261 */
1262 break;
1263 /* fallthrough */
1264
1265 case DID_BUS_BUSY:
1266 case DID_PARITY:
1267 goto maybe_retry;
1268 case DID_TIME_OUT:
1269 /*
1270 * when we scan the bus, we get timeout messages for
1271 * these commands if there is no device available.
1272 * other hosts report did_no_connect for the same thing.
1273 */
1274 if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1275 scmd->cmnd[0] == INQUIRY)) {
1276 return SUCCESS;
1277 } else {
1278 return FAILED;
1279 }
1280 case DID_RESET:
1281 return SUCCESS;
1282 default:
1283 return FAILED;
1284 }
1285
1286 /*
1287 * next, check the message byte.
1288 */
1289 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1290 return FAILED;
1291
1292 /*
1293 * check the status byte to see if this indicates anything special.
1294 */
1295 switch (status_byte(scmd->result)) {
1296 case QUEUE_FULL:
1297 /*
1298 * the case of trying to send too many commands to a
1299 * tagged queueing device.
1300 */
1301 case BUSY:
1302 /*
1303 * device can't talk to us at the moment. Should only
1304 * occur (SAM-3) when the task queue is empty, so will cause
1305 * the empty queue handling to trigger a stall in the
1306 * device.
1307 */
1308 return ADD_TO_MLQUEUE;
1309 case GOOD:
1310 case COMMAND_TERMINATED:
1311 case TASK_ABORTED:
1312 return SUCCESS;
1313 case CHECK_CONDITION:
1314 rtn = scsi_check_sense(scmd);
1315 if (rtn == NEEDS_RETRY)
1316 goto maybe_retry;
1317 /* if rtn == FAILED, we have no sense information;
1318 * returning FAILED will wake the error handler thread
1319 * to collect the sense and redo the decide
1320 * disposition */
1321 return rtn;
1322 case CONDITION_GOOD:
1323 case INTERMEDIATE_GOOD:
1324 case INTERMEDIATE_C_GOOD:
1325 case ACA_ACTIVE:
1326 /*
1327 * who knows? FIXME(eric)
1328 */
1329 return SUCCESS;
1330
1331 case RESERVATION_CONFLICT:
1332 sdev_printk(KERN_INFO, scmd->device,
1333 "reservation conflict\n");
1334 return SUCCESS; /* causes immediate i/o error */
1335 default:
1336 return FAILED;
1337 }
1338 return FAILED;
1339
1340 maybe_retry:
1341
1342 /* we requeue for retry because the error was retryable, and
1343 * the request was not marked fast fail. Note that above,
1344 * even if the request is marked fast fail, we still requeue
1345 * for queue congestion conditions (QUEUE_FULL or BUSY) */
1346 if ((++scmd->retries) <= scmd->allowed
1347 && !blk_noretry_request(scmd->request)) {
1348 return NEEDS_RETRY;
1349 } else {
1350 /*
1351 * no more retries - report this one back to upper level.
1352 */
1353 return SUCCESS;
1354 }
1355 }
1356
1357 /**
1358 * scsi_eh_lock_door - Prevent medium removal for the specified device
1359 * @sdev: SCSI device to prevent medium removal
1360 *
1361 * Locking:
1362 * We must be called from process context; scsi_allocate_request()
1363 * may sleep.
1364 *
1365 * Notes:
1366 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1367 * head of the devices request queue, and continue.
1368 *
1369 * Bugs:
1370 * scsi_allocate_request() may sleep waiting for existing requests to
1371 * be processed. However, since we haven't kicked off any request
1372 * processing for this host, this may deadlock.
1373 *
1374 * If scsi_allocate_request() fails for what ever reason, we
1375 * completely forget to lock the door.
1376 **/
1377 static void scsi_eh_lock_door(struct scsi_device *sdev)
1378 {
1379 unsigned char cmnd[MAX_COMMAND_SIZE];
1380
1381 cmnd[0] = ALLOW_MEDIUM_REMOVAL;
1382 cmnd[1] = 0;
1383 cmnd[2] = 0;
1384 cmnd[3] = 0;
1385 cmnd[4] = SCSI_REMOVAL_PREVENT;
1386 cmnd[5] = 0;
1387
1388 scsi_execute_async(sdev, cmnd, 6, DMA_NONE, NULL, 0, 0, 10 * HZ,
1389 5, NULL, NULL, GFP_KERNEL);
1390 }
1391
1392
1393 /**
1394 * scsi_restart_operations - restart io operations to the specified host.
1395 * @shost: Host we are restarting.
1396 *
1397 * Notes:
1398 * When we entered the error handler, we blocked all further i/o to
1399 * this device. we need to 'reverse' this process.
1400 **/
1401 static void scsi_restart_operations(struct Scsi_Host *shost)
1402 {
1403 struct scsi_device *sdev;
1404 unsigned long flags;
1405
1406 /*
1407 * If the door was locked, we need to insert a door lock request
1408 * onto the head of the SCSI request queue for the device. There
1409 * is no point trying to lock the door of an off-line device.
1410 */
1411 shost_for_each_device(sdev, shost) {
1412 if (scsi_device_online(sdev) && sdev->locked)
1413 scsi_eh_lock_door(sdev);
1414 }
1415
1416 /*
1417 * next free up anything directly waiting upon the host. this
1418 * will be requests for character device operations, and also for
1419 * ioctls to queued block devices.
1420 */
1421 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1422 __FUNCTION__));
1423
1424 spin_lock_irqsave(shost->host_lock, flags);
1425 if (scsi_host_set_state(shost, SHOST_RUNNING))
1426 if (scsi_host_set_state(shost, SHOST_CANCEL))
1427 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
1428 spin_unlock_irqrestore(shost->host_lock, flags);
1429
1430 wake_up(&shost->host_wait);
1431
1432 /*
1433 * finally we need to re-initiate requests that may be pending. we will
1434 * have had everything blocked while error handling is taking place, and
1435 * now that error recovery is done, we will need to ensure that these
1436 * requests are started.
1437 */
1438 scsi_run_host_queues(shost);
1439 }
1440
1441 /**
1442 * scsi_eh_ready_devs - check device ready state and recover if not.
1443 * @shost: host to be recovered.
1444 * @eh_done_q: list_head for processed commands.
1445 *
1446 **/
1447 void scsi_eh_ready_devs(struct Scsi_Host *shost,
1448 struct list_head *work_q,
1449 struct list_head *done_q)
1450 {
1451 if (!scsi_eh_stu(shost, work_q, done_q))
1452 if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
1453 if (!scsi_eh_bus_reset(shost, work_q, done_q))
1454 if (!scsi_eh_host_reset(work_q, done_q))
1455 scsi_eh_offline_sdevs(work_q, done_q);
1456 }
1457 EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
1458
1459 /**
1460 * scsi_eh_flush_done_q - finish processed commands or retry them.
1461 * @done_q: list_head of processed commands.
1462 *
1463 **/
1464 void scsi_eh_flush_done_q(struct list_head *done_q)
1465 {
1466 struct scsi_cmnd *scmd, *next;
1467
1468 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
1469 list_del_init(&scmd->eh_entry);
1470 if (scsi_device_online(scmd->device) &&
1471 !blk_noretry_request(scmd->request) &&
1472 (++scmd->retries <= scmd->allowed)) {
1473 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
1474 " retry cmd: %p\n",
1475 current->comm,
1476 scmd));
1477 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
1478 } else {
1479 /*
1480 * If just we got sense for the device (called
1481 * scsi_eh_get_sense), scmd->result is already
1482 * set, do not set DRIVER_TIMEOUT.
1483 */
1484 if (!scmd->result)
1485 scmd->result |= (DRIVER_TIMEOUT << 24);
1486 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish"
1487 " cmd: %p\n",
1488 current->comm, scmd));
1489 scsi_finish_command(scmd);
1490 }
1491 }
1492 }
1493 EXPORT_SYMBOL(scsi_eh_flush_done_q);
1494
1495 /**
1496 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
1497 * @shost: Host to unjam.
1498 *
1499 * Notes:
1500 * When we come in here, we *know* that all commands on the bus have
1501 * either completed, failed or timed out. we also know that no further
1502 * commands are being sent to the host, so things are relatively quiet
1503 * and we have freedom to fiddle with things as we wish.
1504 *
1505 * This is only the *default* implementation. it is possible for
1506 * individual drivers to supply their own version of this function, and
1507 * if the maintainer wishes to do this, it is strongly suggested that
1508 * this function be taken as a template and modified. this function
1509 * was designed to correctly handle problems for about 95% of the
1510 * different cases out there, and it should always provide at least a
1511 * reasonable amount of error recovery.
1512 *
1513 * Any command marked 'failed' or 'timeout' must eventually have
1514 * scsi_finish_cmd() called for it. we do all of the retry stuff
1515 * here, so when we restart the host after we return it should have an
1516 * empty queue.
1517 **/
1518 static void scsi_unjam_host(struct Scsi_Host *shost)
1519 {
1520 unsigned long flags;
1521 LIST_HEAD(eh_work_q);
1522 LIST_HEAD(eh_done_q);
1523
1524 spin_lock_irqsave(shost->host_lock, flags);
1525 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
1526 spin_unlock_irqrestore(shost->host_lock, flags);
1527
1528 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
1529
1530 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
1531 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
1532 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
1533
1534 scsi_eh_flush_done_q(&eh_done_q);
1535 }
1536
1537 /**
1538 * scsi_error_handler - SCSI error handler thread
1539 * @data: Host for which we are running.
1540 *
1541 * Notes:
1542 * This is the main error handling loop. This is run as a kernel thread
1543 * for every SCSI host and handles all error handling activity.
1544 **/
1545 int scsi_error_handler(void *data)
1546 {
1547 struct Scsi_Host *shost = data;
1548
1549 /*
1550 * We use TASK_INTERRUPTIBLE so that the thread is not
1551 * counted against the load average as a running process.
1552 * We never actually get interrupted because kthread_run
1553 * disables singal delivery for the created thread.
1554 */
1555 set_current_state(TASK_INTERRUPTIBLE);
1556 while (!kthread_should_stop()) {
1557 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
1558 shost->host_failed != shost->host_busy) {
1559 SCSI_LOG_ERROR_RECOVERY(1,
1560 printk("Error handler scsi_eh_%d sleeping\n",
1561 shost->host_no));
1562 schedule();
1563 set_current_state(TASK_INTERRUPTIBLE);
1564 continue;
1565 }
1566
1567 __set_current_state(TASK_RUNNING);
1568 SCSI_LOG_ERROR_RECOVERY(1,
1569 printk("Error handler scsi_eh_%d waking up\n",
1570 shost->host_no));
1571
1572 /*
1573 * We have a host that is failing for some reason. Figure out
1574 * what we need to do to get it up and online again (if we can).
1575 * If we fail, we end up taking the thing offline.
1576 */
1577 if (shost->transportt->eh_strategy_handler)
1578 shost->transportt->eh_strategy_handler(shost);
1579 else
1580 scsi_unjam_host(shost);
1581
1582 /*
1583 * Note - if the above fails completely, the action is to take
1584 * individual devices offline and flush the queue of any
1585 * outstanding requests that may have been pending. When we
1586 * restart, we restart any I/O to any other devices on the bus
1587 * which are still online.
1588 */
1589 scsi_restart_operations(shost);
1590 set_current_state(TASK_INTERRUPTIBLE);
1591 }
1592 __set_current_state(TASK_RUNNING);
1593
1594 SCSI_LOG_ERROR_RECOVERY(1,
1595 printk("Error handler scsi_eh_%d exiting\n", shost->host_no));
1596 shost->ehandler = NULL;
1597 return 0;
1598 }
1599
1600 /*
1601 * Function: scsi_report_bus_reset()
1602 *
1603 * Purpose: Utility function used by low-level drivers to report that
1604 * they have observed a bus reset on the bus being handled.
1605 *
1606 * Arguments: shost - Host in question
1607 * channel - channel on which reset was observed.
1608 *
1609 * Returns: Nothing
1610 *
1611 * Lock status: Host lock must be held.
1612 *
1613 * Notes: This only needs to be called if the reset is one which
1614 * originates from an unknown location. Resets originated
1615 * by the mid-level itself don't need to call this, but there
1616 * should be no harm.
1617 *
1618 * The main purpose of this is to make sure that a CHECK_CONDITION
1619 * is properly treated.
1620 */
1621 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
1622 {
1623 struct scsi_device *sdev;
1624
1625 __shost_for_each_device(sdev, shost) {
1626 if (channel == sdev_channel(sdev)) {
1627 sdev->was_reset = 1;
1628 sdev->expecting_cc_ua = 1;
1629 }
1630 }
1631 }
1632 EXPORT_SYMBOL(scsi_report_bus_reset);
1633
1634 /*
1635 * Function: scsi_report_device_reset()
1636 *
1637 * Purpose: Utility function used by low-level drivers to report that
1638 * they have observed a device reset on the device being handled.
1639 *
1640 * Arguments: shost - Host in question
1641 * channel - channel on which reset was observed
1642 * target - target on which reset was observed
1643 *
1644 * Returns: Nothing
1645 *
1646 * Lock status: Host lock must be held
1647 *
1648 * Notes: This only needs to be called if the reset is one which
1649 * originates from an unknown location. Resets originated
1650 * by the mid-level itself don't need to call this, but there
1651 * should be no harm.
1652 *
1653 * The main purpose of this is to make sure that a CHECK_CONDITION
1654 * is properly treated.
1655 */
1656 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
1657 {
1658 struct scsi_device *sdev;
1659
1660 __shost_for_each_device(sdev, shost) {
1661 if (channel == sdev_channel(sdev) &&
1662 target == sdev_id(sdev)) {
1663 sdev->was_reset = 1;
1664 sdev->expecting_cc_ua = 1;
1665 }
1666 }
1667 }
1668 EXPORT_SYMBOL(scsi_report_device_reset);
1669
1670 static void
1671 scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
1672 {
1673 }
1674
1675 /*
1676 * Function: scsi_reset_provider
1677 *
1678 * Purpose: Send requested reset to a bus or device at any phase.
1679 *
1680 * Arguments: device - device to send reset to
1681 * flag - reset type (see scsi.h)
1682 *
1683 * Returns: SUCCESS/FAILURE.
1684 *
1685 * Notes: This is used by the SCSI Generic driver to provide
1686 * Bus/Device reset capability.
1687 */
1688 int
1689 scsi_reset_provider(struct scsi_device *dev, int flag)
1690 {
1691 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
1692 struct Scsi_Host *shost = dev->host;
1693 struct request req;
1694 unsigned long flags;
1695 int rtn;
1696
1697 scmd->request = &req;
1698 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1699
1700 memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd));
1701
1702 scmd->scsi_done = scsi_reset_provider_done_command;
1703 scmd->request_buffer = NULL;
1704 scmd->request_bufflen = 0;
1705
1706 scmd->cmd_len = 0;
1707
1708 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1709
1710 init_timer(&scmd->eh_timeout);
1711
1712 spin_lock_irqsave(shost->host_lock, flags);
1713 shost->tmf_in_progress = 1;
1714 spin_unlock_irqrestore(shost->host_lock, flags);
1715
1716 switch (flag) {
1717 case SCSI_TRY_RESET_DEVICE:
1718 rtn = scsi_try_bus_device_reset(scmd);
1719 if (rtn == SUCCESS)
1720 break;
1721 /* FALLTHROUGH */
1722 case SCSI_TRY_RESET_BUS:
1723 rtn = scsi_try_bus_reset(scmd);
1724 if (rtn == SUCCESS)
1725 break;
1726 /* FALLTHROUGH */
1727 case SCSI_TRY_RESET_HOST:
1728 rtn = scsi_try_host_reset(scmd);
1729 break;
1730 default:
1731 rtn = FAILED;
1732 }
1733
1734 spin_lock_irqsave(shost->host_lock, flags);
1735 shost->tmf_in_progress = 0;
1736 spin_unlock_irqrestore(shost->host_lock, flags);
1737
1738 /*
1739 * be sure to wake up anyone who was sleeping or had their queue
1740 * suspended while we performed the TMF.
1741 */
1742 SCSI_LOG_ERROR_RECOVERY(3,
1743 printk("%s: waking up host to restart after TMF\n",
1744 __FUNCTION__));
1745
1746 wake_up(&shost->host_wait);
1747
1748 scsi_run_host_queues(shost);
1749
1750 scsi_next_command(scmd);
1751 return rtn;
1752 }
1753 EXPORT_SYMBOL(scsi_reset_provider);
1754
1755 /**
1756 * scsi_normalize_sense - normalize main elements from either fixed or
1757 * descriptor sense data format into a common format.
1758 *
1759 * @sense_buffer: byte array containing sense data returned by device
1760 * @sb_len: number of valid bytes in sense_buffer
1761 * @sshdr: pointer to instance of structure that common
1762 * elements are written to.
1763 *
1764 * Notes:
1765 * The "main elements" from sense data are: response_code, sense_key,
1766 * asc, ascq and additional_length (only for descriptor format).
1767 *
1768 * Typically this function can be called after a device has
1769 * responded to a SCSI command with the CHECK_CONDITION status.
1770 *
1771 * Return value:
1772 * 1 if valid sense data information found, else 0;
1773 **/
1774 int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
1775 struct scsi_sense_hdr *sshdr)
1776 {
1777 if (!sense_buffer || !sb_len)
1778 return 0;
1779
1780 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
1781
1782 sshdr->response_code = (sense_buffer[0] & 0x7f);
1783
1784 if (!scsi_sense_valid(sshdr))
1785 return 0;
1786
1787 if (sshdr->response_code >= 0x72) {
1788 /*
1789 * descriptor format
1790 */
1791 if (sb_len > 1)
1792 sshdr->sense_key = (sense_buffer[1] & 0xf);
1793 if (sb_len > 2)
1794 sshdr->asc = sense_buffer[2];
1795 if (sb_len > 3)
1796 sshdr->ascq = sense_buffer[3];
1797 if (sb_len > 7)
1798 sshdr->additional_length = sense_buffer[7];
1799 } else {
1800 /*
1801 * fixed format
1802 */
1803 if (sb_len > 2)
1804 sshdr->sense_key = (sense_buffer[2] & 0xf);
1805 if (sb_len > 7) {
1806 sb_len = (sb_len < (sense_buffer[7] + 8)) ?
1807 sb_len : (sense_buffer[7] + 8);
1808 if (sb_len > 12)
1809 sshdr->asc = sense_buffer[12];
1810 if (sb_len > 13)
1811 sshdr->ascq = sense_buffer[13];
1812 }
1813 }
1814
1815 return 1;
1816 }
1817 EXPORT_SYMBOL(scsi_normalize_sense);
1818
1819 int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
1820 struct scsi_sense_hdr *sshdr)
1821 {
1822 return scsi_normalize_sense(cmd->sense_buffer,
1823 sizeof(cmd->sense_buffer), sshdr);
1824 }
1825 EXPORT_SYMBOL(scsi_command_normalize_sense);
1826
1827 /**
1828 * scsi_sense_desc_find - search for a given descriptor type in
1829 * descriptor sense data format.
1830 *
1831 * @sense_buffer: byte array of descriptor format sense data
1832 * @sb_len: number of valid bytes in sense_buffer
1833 * @desc_type: value of descriptor type to find
1834 * (e.g. 0 -> information)
1835 *
1836 * Notes:
1837 * only valid when sense data is in descriptor format
1838 *
1839 * Return value:
1840 * pointer to start of (first) descriptor if found else NULL
1841 **/
1842 const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
1843 int desc_type)
1844 {
1845 int add_sen_len, add_len, desc_len, k;
1846 const u8 * descp;
1847
1848 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
1849 return NULL;
1850 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
1851 return NULL;
1852 add_sen_len = (add_sen_len < (sb_len - 8)) ?
1853 add_sen_len : (sb_len - 8);
1854 descp = &sense_buffer[8];
1855 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
1856 descp += desc_len;
1857 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
1858 desc_len = add_len + 2;
1859 if (descp[0] == desc_type)
1860 return descp;
1861 if (add_len < 0) // short descriptor ??
1862 break;
1863 }
1864 return NULL;
1865 }
1866 EXPORT_SYMBOL(scsi_sense_desc_find);
1867
1868 /**
1869 * scsi_get_sense_info_fld - attempts to get information field from
1870 * sense data (either fixed or descriptor format)
1871 *
1872 * @sense_buffer: byte array of sense data
1873 * @sb_len: number of valid bytes in sense_buffer
1874 * @info_out: pointer to 64 integer where 8 or 4 byte information
1875 * field will be placed if found.
1876 *
1877 * Return value:
1878 * 1 if information field found, 0 if not found.
1879 **/
1880 int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
1881 u64 * info_out)
1882 {
1883 int j;
1884 const u8 * ucp;
1885 u64 ull;
1886
1887 if (sb_len < 7)
1888 return 0;
1889 switch (sense_buffer[0] & 0x7f) {
1890 case 0x70:
1891 case 0x71:
1892 if (sense_buffer[0] & 0x80) {
1893 *info_out = (sense_buffer[3] << 24) +
1894 (sense_buffer[4] << 16) +
1895 (sense_buffer[5] << 8) + sense_buffer[6];
1896 return 1;
1897 } else
1898 return 0;
1899 case 0x72:
1900 case 0x73:
1901 ucp = scsi_sense_desc_find(sense_buffer, sb_len,
1902 0 /* info desc */);
1903 if (ucp && (0xa == ucp[1])) {
1904 ull = 0;
1905 for (j = 0; j < 8; ++j) {
1906 if (j > 0)
1907 ull <<= 8;
1908 ull |= ucp[4 + j];
1909 }
1910 *info_out = ull;
1911 return 1;
1912 } else
1913 return 0;
1914 default:
1915 return 0;
1916 }
1917 }
1918 EXPORT_SYMBOL(scsi_get_sense_info_fld);
This page took 0.162464 seconds and 6 git commands to generate.