[SCSI] hide EH backup data outside the scsi_cmnd
[deliverable/linux.git] / drivers / scsi / scsi.c
CommitLineData
1da177e4
LT
1/*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
5 *
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
9 *
10 * <drew@colorado.edu>
11 *
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 *
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
20 *
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
23 *
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
28 *
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
32 *
33 * Converted cli() code to spinlocks, Ingo Molnar
34 *
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 *
37 * out_of_space hacks, D. Gilbert (dpg) 990608
38 */
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
43#include <linux/sched.h>
44#include <linux/timer.h>
45#include <linux/string.h>
46#include <linux/slab.h>
47#include <linux/blkdev.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/completion.h>
1da177e4
LT
51#include <linux/unistd.h>
52#include <linux/spinlock.h>
53#include <linux/kmod.h>
54#include <linux/interrupt.h>
55#include <linux/notifier.h>
56#include <linux/cpu.h>
0b950672 57#include <linux/mutex.h>
1da177e4
LT
58
59#include <scsi/scsi.h>
60#include <scsi/scsi_cmnd.h>
61#include <scsi/scsi_dbg.h>
62#include <scsi/scsi_device.h>
63#include <scsi/scsi_eh.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
1da177e4
LT
66
67#include "scsi_priv.h"
68#include "scsi_logging.h"
69
52c1da39 70static void scsi_done(struct scsi_cmnd *cmd);
1da177e4
LT
71
72/*
73 * Definitions and constants.
74 */
75
76#define MIN_RESET_DELAY (2*HZ)
77
78/* Do not call reset on error if we just did a reset within 15 sec. */
79#define MIN_RESET_PERIOD (15*HZ)
80
81/*
82 * Macro to determine the size of SCSI command. This macro takes vendor
83 * unique commands into account. SCSI commands in groups 6 and 7 are
84 * vendor unique and we will depend upon the command length being
85 * supplied correctly in cmd_len.
86 */
87#define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
88 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
89
90/*
91 * Note - the initial logging level can be set here to log events at boot time.
92 * After the system is up, you may enable logging via the /proc interface.
93 */
94unsigned int scsi_logging_level;
95#if defined(CONFIG_SCSI_LOGGING)
96EXPORT_SYMBOL(scsi_logging_level);
97#endif
98
99const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = {
100 "Direct-Access ",
101 "Sequential-Access",
102 "Printer ",
103 "Processor ",
104 "WORM ",
105 "CD-ROM ",
106 "Scanner ",
107 "Optical Device ",
108 "Medium Changer ",
109 "Communications ",
110 "Unknown ",
111 "Unknown ",
112 "RAID ",
113 "Enclosure ",
7f602c53 114 "Direct-Access-RBC",
1da177e4
LT
115};
116EXPORT_SYMBOL(scsi_device_types);
117
1da177e4
LT
118struct scsi_host_cmd_pool {
119 kmem_cache_t *slab;
120 unsigned int users;
121 char *name;
122 unsigned int slab_flags;
c53033f6 123 gfp_t gfp_mask;
1da177e4
LT
124};
125
126static struct scsi_host_cmd_pool scsi_cmd_pool = {
127 .name = "scsi_cmd_cache",
128 .slab_flags = SLAB_HWCACHE_ALIGN,
129};
130
131static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
132 .name = "scsi_cmd_cache(DMA)",
133 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
134 .gfp_mask = __GFP_DMA,
135};
136
0b950672 137static DEFINE_MUTEX(host_cmd_pool_mutex);
1da177e4
LT
138
139static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
c53033f6 140 gfp_t gfp_mask)
1da177e4
LT
141{
142 struct scsi_cmnd *cmd;
143
144 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
145 gfp_mask | shost->cmd_pool->gfp_mask);
146
147 if (unlikely(!cmd)) {
148 unsigned long flags;
149
150 spin_lock_irqsave(&shost->free_list_lock, flags);
151 if (likely(!list_empty(&shost->free_list))) {
152 cmd = list_entry(shost->free_list.next,
153 struct scsi_cmnd, list);
154 list_del_init(&cmd->list);
155 }
156 spin_unlock_irqrestore(&shost->free_list_lock, flags);
157 }
158
159 return cmd;
160}
161
162/*
163 * Function: scsi_get_command()
164 *
165 * Purpose: Allocate and setup a scsi command block
166 *
167 * Arguments: dev - parent scsi device
168 * gfp_mask- allocator flags
169 *
170 * Returns: The allocated scsi command structure.
171 */
c53033f6 172struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
1da177e4
LT
173{
174 struct scsi_cmnd *cmd;
175
176 /* Bail if we can't get a reference to the device */
177 if (!get_device(&dev->sdev_gendev))
178 return NULL;
179
180 cmd = __scsi_get_command(dev->host, gfp_mask);
181
182 if (likely(cmd != NULL)) {
183 unsigned long flags;
184
185 memset(cmd, 0, sizeof(*cmd));
186 cmd->device = dev;
1da177e4
LT
187 init_timer(&cmd->eh_timeout);
188 INIT_LIST_HEAD(&cmd->list);
189 spin_lock_irqsave(&dev->list_lock, flags);
190 list_add_tail(&cmd->list, &dev->cmd_list);
191 spin_unlock_irqrestore(&dev->list_lock, flags);
79e448bf 192 cmd->jiffies_at_alloc = jiffies;
1da177e4
LT
193 } else
194 put_device(&dev->sdev_gendev);
195
196 return cmd;
197}
198EXPORT_SYMBOL(scsi_get_command);
199
200/*
201 * Function: scsi_put_command()
202 *
203 * Purpose: Free a scsi command block
204 *
205 * Arguments: cmd - command block to free
206 *
207 * Returns: Nothing.
208 *
209 * Notes: The command must not belong to any lists.
210 */
211void scsi_put_command(struct scsi_cmnd *cmd)
212{
213 struct scsi_device *sdev = cmd->device;
214 struct Scsi_Host *shost = sdev->host;
215 unsigned long flags;
216
217 /* serious error if the command hasn't come from a device list */
218 spin_lock_irqsave(&cmd->device->list_lock, flags);
219 BUG_ON(list_empty(&cmd->list));
220 list_del_init(&cmd->list);
221 spin_unlock(&cmd->device->list_lock);
222 /* changing locks here, don't need to restore the irq state */
223 spin_lock(&shost->free_list_lock);
224 if (unlikely(list_empty(&shost->free_list))) {
225 list_add(&cmd->list, &shost->free_list);
226 cmd = NULL;
227 }
228 spin_unlock_irqrestore(&shost->free_list_lock, flags);
229
230 if (likely(cmd != NULL))
231 kmem_cache_free(shost->cmd_pool->slab, cmd);
232
233 put_device(&sdev->sdev_gendev);
234}
235EXPORT_SYMBOL(scsi_put_command);
236
237/*
238 * Function: scsi_setup_command_freelist()
239 *
240 * Purpose: Setup the command freelist for a scsi host.
241 *
242 * Arguments: shost - host to allocate the freelist for.
243 *
244 * Returns: Nothing.
245 */
246int scsi_setup_command_freelist(struct Scsi_Host *shost)
247{
248 struct scsi_host_cmd_pool *pool;
249 struct scsi_cmnd *cmd;
250
251 spin_lock_init(&shost->free_list_lock);
252 INIT_LIST_HEAD(&shost->free_list);
253
254 /*
255 * Select a command slab for this host and create it if not
256 * yet existant.
257 */
0b950672 258 mutex_lock(&host_cmd_pool_mutex);
1da177e4
LT
259 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
260 if (!pool->users) {
261 pool->slab = kmem_cache_create(pool->name,
262 sizeof(struct scsi_cmnd), 0,
263 pool->slab_flags, NULL, NULL);
264 if (!pool->slab)
265 goto fail;
266 }
267
268 pool->users++;
269 shost->cmd_pool = pool;
0b950672 270 mutex_unlock(&host_cmd_pool_mutex);
1da177e4
LT
271
272 /*
273 * Get one backup command for this host.
274 */
275 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
276 GFP_KERNEL | shost->cmd_pool->gfp_mask);
277 if (!cmd)
278 goto fail2;
279 list_add(&cmd->list, &shost->free_list);
280 return 0;
281
282 fail2:
283 if (!--pool->users)
284 kmem_cache_destroy(pool->slab);
285 return -ENOMEM;
286 fail:
0b950672 287 mutex_unlock(&host_cmd_pool_mutex);
1da177e4
LT
288 return -ENOMEM;
289
290}
291
292/*
293 * Function: scsi_destroy_command_freelist()
294 *
295 * Purpose: Release the command freelist for a scsi host.
296 *
297 * Arguments: shost - host that's freelist is going to be destroyed
298 */
299void scsi_destroy_command_freelist(struct Scsi_Host *shost)
300{
301 while (!list_empty(&shost->free_list)) {
302 struct scsi_cmnd *cmd;
303
304 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
305 list_del_init(&cmd->list);
306 kmem_cache_free(shost->cmd_pool->slab, cmd);
307 }
308
0b950672 309 mutex_lock(&host_cmd_pool_mutex);
1da177e4
LT
310 if (!--shost->cmd_pool->users)
311 kmem_cache_destroy(shost->cmd_pool->slab);
0b950672 312 mutex_unlock(&host_cmd_pool_mutex);
1da177e4
LT
313}
314
315#ifdef CONFIG_SCSI_LOGGING
316void scsi_log_send(struct scsi_cmnd *cmd)
317{
318 unsigned int level;
319 struct scsi_device *sdev;
320
321 /*
322 * If ML QUEUE log level is greater than or equal to:
323 *
324 * 1: nothing (match completion)
325 *
326 * 2: log opcode + command of all commands
327 *
328 * 3: same as 2 plus dump cmd address
329 *
330 * 4: same as 3 plus dump extra junk
331 */
332 if (unlikely(scsi_logging_level)) {
333 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
334 SCSI_LOG_MLQUEUE_BITS);
335 if (level > 1) {
336 sdev = cmd->device;
9ccfc756 337 sdev_printk(KERN_INFO, sdev, "send ");
1da177e4
LT
338 if (level > 2)
339 printk("0x%p ", cmd);
340 /*
341 * spaces to match disposition and cmd->result
342 * output in scsi_log_completion.
343 */
344 printk(" ");
345 scsi_print_command(cmd);
346 if (level > 3) {
347 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
348 " done = 0x%p, queuecommand 0x%p\n",
631c228c 349 cmd->request_buffer, cmd->request_bufflen,
1da177e4
LT
350 cmd->done,
351 sdev->host->hostt->queuecommand);
352
353 }
354 }
355 }
356}
357
358void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
359{
360 unsigned int level;
361 struct scsi_device *sdev;
362
363 /*
364 * If ML COMPLETE log level is greater than or equal to:
365 *
366 * 1: log disposition, result, opcode + command, and conditionally
367 * sense data for failures or non SUCCESS dispositions.
368 *
369 * 2: same as 1 but for all command completions.
370 *
371 * 3: same as 2 plus dump cmd address
372 *
373 * 4: same as 3 plus dump extra junk
374 */
375 if (unlikely(scsi_logging_level)) {
376 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
377 SCSI_LOG_MLCOMPLETE_BITS);
378 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
379 (level > 1)) {
380 sdev = cmd->device;
9ccfc756 381 sdev_printk(KERN_INFO, sdev, "done ");
1da177e4
LT
382 if (level > 2)
383 printk("0x%p ", cmd);
384 /*
385 * Dump truncated values, so we usually fit within
386 * 80 chars.
387 */
388 switch (disposition) {
389 case SUCCESS:
390 printk("SUCCESS");
391 break;
392 case NEEDS_RETRY:
393 printk("RETRY ");
394 break;
395 case ADD_TO_MLQUEUE:
396 printk("MLQUEUE");
397 break;
398 case FAILED:
399 printk("FAILED ");
400 break;
401 case TIMEOUT_ERROR:
402 /*
403 * If called via scsi_times_out.
404 */
405 printk("TIMEOUT");
406 break;
407 default:
408 printk("UNKNOWN");
409 }
410 printk(" %8x ", cmd->result);
411 scsi_print_command(cmd);
412 if (status_byte(cmd->result) & CHECK_CONDITION) {
413 /*
db9dff36 414 * XXX The scsi_print_sense formatting/prefix
1da177e4
LT
415 * doesn't match this function.
416 */
417 scsi_print_sense("", cmd);
418 }
419 if (level > 3) {
420 printk(KERN_INFO "scsi host busy %d failed %d\n",
421 sdev->host->host_busy,
422 sdev->host->host_failed);
423 }
424 }
425 }
426}
427#endif
428
429/*
430 * Assign a serial number and pid to the request for error recovery
431 * and debugging purposes. Protected by the Host_Lock of host.
432 */
433static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
434{
435 cmd->serial_number = host->cmd_serial_number++;
436 if (cmd->serial_number == 0)
437 cmd->serial_number = host->cmd_serial_number++;
438
439 cmd->pid = host->cmd_pid++;
440 if (cmd->pid == 0)
441 cmd->pid = host->cmd_pid++;
442}
443
444/*
445 * Function: scsi_dispatch_command
446 *
447 * Purpose: Dispatch a command to the low-level driver.
448 *
449 * Arguments: cmd - command block we are dispatching.
450 *
451 * Notes:
452 */
453int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
454{
455 struct Scsi_Host *host = cmd->device->host;
456 unsigned long flags = 0;
457 unsigned long timeout;
458 int rtn = 0;
459
460 /* check if the device is still usable */
461 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
462 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
463 * returns an immediate error upwards, and signals
464 * that the device is no longer present */
465 cmd->result = DID_NO_CONNECT << 16;
466 atomic_inc(&cmd->device->iorequest_cnt);
69b52893 467 __scsi_done(cmd);
1da177e4
LT
468 /* return 0 (because the command has been processed) */
469 goto out;
470 }
471
472 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
473 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
474 /*
475 * in SDEV_BLOCK, the command is just put back on the device
476 * queue. The suspend state has already blocked the queue so
477 * future requests should not occur until the device
478 * transitions out of the suspend state.
479 */
480 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
481
482 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
483
484 /*
485 * NOTE: rtn is still zero here because we don't need the
486 * queue to be plugged on return (it's already stopped)
487 */
488 goto out;
489 }
490
491 /*
492 * If SCSI-2 or lower, store the LUN value in cmnd.
493 */
4d7db04a
JB
494 if (cmd->device->scsi_level <= SCSI_2 &&
495 cmd->device->scsi_level != SCSI_UNKNOWN) {
1da177e4
LT
496 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
497 (cmd->device->lun << 5 & 0xe0);
498 }
499
500 /*
501 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
502 * we can avoid the drive not being ready.
503 */
504 timeout = host->last_reset + MIN_RESET_DELAY;
505
506 if (host->resetting && time_before(jiffies, timeout)) {
507 int ticks_remaining = timeout - jiffies;
508 /*
509 * NOTE: This may be executed from within an interrupt
510 * handler! This is bad, but for now, it'll do. The irq
511 * level of the interrupt handler has been masked out by the
512 * platform dependent interrupt handling code already, so the
513 * sti() here will not cause another call to the SCSI host's
514 * interrupt handler (assuming there is one irq-level per
515 * host).
516 */
517 while (--ticks_remaining >= 0)
518 mdelay(1 + 999 / HZ);
519 host->resetting = 0;
520 }
521
522 /*
523 * AK: unlikely race here: for some reason the timer could
524 * expire before the serial number is set up below.
525 */
526 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
527
528 scsi_log_send(cmd);
529
530 /*
531 * We will use a queued command if possible, otherwise we will
532 * emulate the queuing and calling of completion function ourselves.
533 */
1da177e4
LT
534 atomic_inc(&cmd->device->iorequest_cnt);
535
536 /*
537 * Before we queue this command, check if the command
538 * length exceeds what the host adapter can handle.
539 */
540 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
541 SCSI_LOG_MLQUEUE(3,
542 printk("queuecommand : command too long.\n"));
543 cmd->result = (DID_ABORT << 16);
544
545 scsi_done(cmd);
546 goto out;
547 }
548
549 spin_lock_irqsave(host->host_lock, flags);
550 scsi_cmd_get_serial(host, cmd);
551
d2c9d9ea 552 if (unlikely(host->shost_state == SHOST_DEL)) {
1da177e4
LT
553 cmd->result = (DID_NO_CONNECT << 16);
554 scsi_done(cmd);
555 } else {
556 rtn = host->hostt->queuecommand(cmd, scsi_done);
557 }
558 spin_unlock_irqrestore(host->host_lock, flags);
559 if (rtn) {
d8c37e7b
TH
560 if (scsi_delete_timer(cmd)) {
561 atomic_inc(&cmd->device->iodone_cnt);
562 scsi_queue_insert(cmd,
563 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
564 rtn : SCSI_MLQUEUE_HOST_BUSY);
565 }
1da177e4
LT
566 SCSI_LOG_MLQUEUE(3,
567 printk("queuecommand : request rejected\n"));
568 }
569
570 out:
571 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
572 return rtn;
573}
574
1da177e4
LT
575
576/*
577 * Per-CPU I/O completion queue.
578 */
579static DEFINE_PER_CPU(struct list_head, scsi_done_q);
580
89f48c4d
LT
581/**
582 * scsi_req_abort_cmd -- Request command recovery for the specified command
583 * cmd: pointer to the SCSI command of interest
584 *
585 * This function requests that SCSI Core start recovery for the
586 * command by deleting the timer and adding the command to the eh
587 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
588 * implement their own error recovery MAY ignore the timeout event if
589 * they generated scsi_req_abort_cmd.
590 */
591void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
592{
593 if (!scsi_delete_timer(cmd))
594 return;
595 scsi_times_out(cmd);
596}
597EXPORT_SYMBOL(scsi_req_abort_cmd);
598
1da177e4
LT
599/**
600 * scsi_done - Enqueue the finished SCSI command into the done queue.
601 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
602 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
603 *
604 * This function is the mid-level's (SCSI Core) interrupt routine, which
605 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues
606 * the command to the done queue for further processing.
607 *
608 * This is the producer of the done queue who enqueues at the tail.
609 *
610 * This function is interrupt context safe.
611 */
52c1da39 612static void scsi_done(struct scsi_cmnd *cmd)
1da177e4
LT
613{
614 /*
615 * We don't have to worry about this one timing out any more.
616 * If we are unable to remove the timer, then the command
617 * has already timed out. In which case, we have no choice but to
618 * let the timeout function run, as we have no idea where in fact
619 * that function could really be. It might be on another processor,
620 * etc, etc.
621 */
622 if (!scsi_delete_timer(cmd))
623 return;
624 __scsi_done(cmd);
625}
626
627/* Private entry to scsi_done() to complete a command when the timer
628 * isn't running --- used by scsi_times_out */
629void __scsi_done(struct scsi_cmnd *cmd)
630{
1aea6434 631 struct request *rq = cmd->request;
1da177e4
LT
632
633 /*
634 * Set the serial numbers back to zero
635 */
636 cmd->serial_number = 0;
1da177e4
LT
637
638 atomic_inc(&cmd->device->iodone_cnt);
639 if (cmd->result)
640 atomic_inc(&cmd->device->ioerr_cnt);
641
1aea6434
JA
642 BUG_ON(!rq);
643
1da177e4 644 /*
1aea6434
JA
645 * The uptodate/nbytes values don't matter, as we allow partial
646 * completes and thus will check this in the softirq callback
1da177e4 647 */
1aea6434
JA
648 rq->completion_data = cmd;
649 blk_complete_request(rq);
1da177e4
LT
650}
651
652/*
653 * Function: scsi_retry_command
654 *
655 * Purpose: Send a command back to the low level to be retried.
656 *
657 * Notes: This command is always executed in the context of the
658 * bottom half handler, or the error handler thread. Low
659 * level drivers should not become re-entrant as a result of
660 * this.
661 */
1aea6434 662int scsi_retry_command(struct scsi_cmnd *cmd)
1da177e4 663{
1da177e4
LT
664 /*
665 * Zero the sense information from the last time we tried
666 * this command.
667 */
668 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
669
670 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
671}
672
673/*
674 * Function: scsi_finish_command
675 *
676 * Purpose: Pass command off to upper layer for finishing of I/O
677 * request, waking processes that are waiting on results,
678 * etc.
679 */
680void scsi_finish_command(struct scsi_cmnd *cmd)
681{
682 struct scsi_device *sdev = cmd->device;
683 struct Scsi_Host *shost = sdev->host;
1da177e4
LT
684
685 scsi_device_unbusy(sdev);
686
687 /*
688 * Clear the flags which say that the device/host is no longer
689 * capable of accepting new commands. These are set in scsi_queue.c
690 * for both the queue full condition on a device, and for a
691 * host full condition on the host.
692 *
693 * XXX(hch): What about locking?
694 */
695 shost->host_blocked = 0;
696 sdev->device_blocked = 0;
697
698 /*
699 * If we have valid sense information, then some kind of recovery
700 * must have taken place. Make a note of this.
701 */
702 if (SCSI_SENSE_VALID(cmd))
703 cmd->result |= (DRIVER_SENSE << 24);
704
3bf743e7
JG
705 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
706 "Notifying upper driver of completion "
707 "(result %x)\n", cmd->result));
1da177e4 708
1da177e4
LT
709 cmd->done(cmd);
710}
711EXPORT_SYMBOL(scsi_finish_command);
712
713/*
714 * Function: scsi_adjust_queue_depth()
715 *
716 * Purpose: Allow low level drivers to tell us to change the queue depth
717 * on a specific SCSI device
718 *
719 * Arguments: sdev - SCSI Device in question
720 * tagged - Do we use tagged queueing (non-0) or do we treat
721 * this device as an untagged device (0)
722 * tags - Number of tags allowed if tagged queueing enabled,
723 * or number of commands the low level driver can
724 * queue up in non-tagged mode (as per cmd_per_lun).
725 *
726 * Returns: Nothing
727 *
728 * Lock Status: None held on entry
729 *
730 * Notes: Low level drivers may call this at any time and we will do
731 * the right thing depending on whether or not the device is
732 * currently active and whether or not it even has the
733 * command blocks built yet.
734 */
735void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
736{
737 unsigned long flags;
738
739 /*
740 * refuse to set tagged depth to an unworkable size
741 */
742 if (tags <= 0)
743 return;
744
745 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
746
747 /* Check to see if the queue is managed by the block layer
748 * if it is, and we fail to adjust the depth, exit */
749 if (blk_queue_tagged(sdev->request_queue) &&
750 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
751 goto out;
752
753 sdev->queue_depth = tags;
754 switch (tagged) {
755 case MSG_ORDERED_TAG:
756 sdev->ordered_tags = 1;
757 sdev->simple_tags = 1;
758 break;
759 case MSG_SIMPLE_TAG:
760 sdev->ordered_tags = 0;
761 sdev->simple_tags = 1;
762 break;
763 default:
9ccfc756
JB
764 sdev_printk(KERN_WARNING, sdev,
765 "scsi_adjust_queue_depth, bad queue type, "
766 "disabled\n");
1da177e4
LT
767 case 0:
768 sdev->ordered_tags = sdev->simple_tags = 0;
769 sdev->queue_depth = tags;
770 break;
771 }
772 out:
773 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
774}
775EXPORT_SYMBOL(scsi_adjust_queue_depth);
776
777/*
778 * Function: scsi_track_queue_full()
779 *
780 * Purpose: This function will track successive QUEUE_FULL events on a
781 * specific SCSI device to determine if and when there is a
782 * need to adjust the queue depth on the device.
783 *
784 * Arguments: sdev - SCSI Device in question
785 * depth - Current number of outstanding SCSI commands on
786 * this device, not counting the one returned as
787 * QUEUE_FULL.
788 *
789 * Returns: 0 - No change needed
790 * >0 - Adjust queue depth to this new depth
791 * -1 - Drop back to untagged operation using host->cmd_per_lun
792 * as the untagged command depth
793 *
794 * Lock Status: None held on entry
795 *
796 * Notes: Low level drivers may call this at any time and we will do
797 * "The Right Thing." We are interrupt context safe.
798 */
799int scsi_track_queue_full(struct scsi_device *sdev, int depth)
800{
801 if ((jiffies >> 4) == sdev->last_queue_full_time)
802 return 0;
803
804 sdev->last_queue_full_time = (jiffies >> 4);
805 if (sdev->last_queue_full_depth != depth) {
806 sdev->last_queue_full_count = 1;
807 sdev->last_queue_full_depth = depth;
808 } else {
809 sdev->last_queue_full_count++;
810 }
811
812 if (sdev->last_queue_full_count <= 10)
813 return 0;
814 if (sdev->last_queue_full_depth < 8) {
815 /* Drop back to untagged */
816 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
817 return -1;
818 }
819
820 if (sdev->ordered_tags)
821 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
822 else
823 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
824 return depth;
825}
826EXPORT_SYMBOL(scsi_track_queue_full);
827
828/**
829 * scsi_device_get - get an addition reference to a scsi_device
830 * @sdev: device to get a reference to
831 *
832 * Gets a reference to the scsi_device and increments the use count
833 * of the underlying LLDD module. You must hold host_lock of the
834 * parent Scsi_Host or already have a reference when calling this.
835 */
836int scsi_device_get(struct scsi_device *sdev)
837{
838 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
839 return -ENXIO;
840 if (!get_device(&sdev->sdev_gendev))
841 return -ENXIO;
842 if (!try_module_get(sdev->host->hostt->module)) {
843 put_device(&sdev->sdev_gendev);
844 return -ENXIO;
845 }
846 return 0;
847}
848EXPORT_SYMBOL(scsi_device_get);
849
850/**
851 * scsi_device_put - release a reference to a scsi_device
852 * @sdev: device to release a reference on.
853 *
854 * Release a reference to the scsi_device and decrements the use count
855 * of the underlying LLDD module. The device is freed once the last
856 * user vanishes.
857 */
858void scsi_device_put(struct scsi_device *sdev)
859{
860 module_put(sdev->host->hostt->module);
861 put_device(&sdev->sdev_gendev);
862}
863EXPORT_SYMBOL(scsi_device_put);
864
865/* helper for shost_for_each_device, thus not documented */
866struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
867 struct scsi_device *prev)
868{
869 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
870 struct scsi_device *next = NULL;
871 unsigned long flags;
872
873 spin_lock_irqsave(shost->host_lock, flags);
874 while (list->next != &shost->__devices) {
875 next = list_entry(list->next, struct scsi_device, siblings);
876 /* skip devices that we can't get a reference to */
877 if (!scsi_device_get(next))
878 break;
879 next = NULL;
880 list = list->next;
881 }
882 spin_unlock_irqrestore(shost->host_lock, flags);
883
884 if (prev)
885 scsi_device_put(prev);
886 return next;
887}
888EXPORT_SYMBOL(__scsi_iterate_devices);
889
890/**
891 * starget_for_each_device - helper to walk all devices of a target
892 * @starget: target whose devices we want to iterate over.
893 *
894 * This traverses over each devices of @shost. The devices have
895 * a reference that must be released by scsi_host_put when breaking
896 * out of the loop.
897 */
898void starget_for_each_device(struct scsi_target *starget, void * data,
899 void (*fn)(struct scsi_device *, void *))
900{
901 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
902 struct scsi_device *sdev;
903
904 shost_for_each_device(sdev, shost) {
905 if ((sdev->channel == starget->channel) &&
906 (sdev->id == starget->id))
907 fn(sdev, data);
908 }
909}
910EXPORT_SYMBOL(starget_for_each_device);
911
912/**
913 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
914 * @starget: SCSI target pointer
915 * @lun: SCSI Logical Unit Number
916 *
917 * Looks up the scsi_device with the specified @lun for a give
918 * @starget. The returned scsi_device does not have an additional
919 * reference. You must hold the host's host_lock over this call and
920 * any access to the returned scsi_device.
921 *
922 * Note: The only reason why drivers would want to use this is because
923 * they're need to access the device list in irq context. Otherwise you
924 * really want to use scsi_device_lookup_by_target instead.
925 **/
926struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
927 uint lun)
928{
929 struct scsi_device *sdev;
930
931 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
932 if (sdev->lun ==lun)
933 return sdev;
934 }
935
936 return NULL;
937}
938EXPORT_SYMBOL(__scsi_device_lookup_by_target);
939
940/**
941 * scsi_device_lookup_by_target - find a device given the target
942 * @starget: SCSI target pointer
943 * @lun: SCSI Logical Unit Number
944 *
945 * Looks up the scsi_device with the specified @channel, @id, @lun for a
946 * give host. The returned scsi_device has an additional reference that
947 * needs to be release with scsi_host_put once you're done with it.
948 **/
949struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
950 uint lun)
951{
952 struct scsi_device *sdev;
953 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
954 unsigned long flags;
955
956 spin_lock_irqsave(shost->host_lock, flags);
957 sdev = __scsi_device_lookup_by_target(starget, lun);
958 if (sdev && scsi_device_get(sdev))
959 sdev = NULL;
960 spin_unlock_irqrestore(shost->host_lock, flags);
961
962 return sdev;
963}
964EXPORT_SYMBOL(scsi_device_lookup_by_target);
965
966/**
967 * scsi_device_lookup - find a device given the host (UNLOCKED)
968 * @shost: SCSI host pointer
969 * @channel: SCSI channel (zero if only one channel)
970 * @pun: SCSI target number (physical unit number)
971 * @lun: SCSI Logical Unit Number
972 *
973 * Looks up the scsi_device with the specified @channel, @id, @lun for a
974 * give host. The returned scsi_device does not have an additional reference.
975 * You must hold the host's host_lock over this call and any access to the
976 * returned scsi_device.
977 *
978 * Note: The only reason why drivers would want to use this is because
979 * they're need to access the device list in irq context. Otherwise you
980 * really want to use scsi_device_lookup instead.
981 **/
982struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
983 uint channel, uint id, uint lun)
984{
985 struct scsi_device *sdev;
986
987 list_for_each_entry(sdev, &shost->__devices, siblings) {
988 if (sdev->channel == channel && sdev->id == id &&
989 sdev->lun ==lun)
990 return sdev;
991 }
992
993 return NULL;
994}
995EXPORT_SYMBOL(__scsi_device_lookup);
996
997/**
998 * scsi_device_lookup - find a device given the host
999 * @shost: SCSI host pointer
1000 * @channel: SCSI channel (zero if only one channel)
1001 * @id: SCSI target number (physical unit number)
1002 * @lun: SCSI Logical Unit Number
1003 *
1004 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1005 * give host. The returned scsi_device has an additional reference that
1006 * needs to be release with scsi_host_put once you're done with it.
1007 **/
1008struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1009 uint channel, uint id, uint lun)
1010{
1011 struct scsi_device *sdev;
1012 unsigned long flags;
1013
1014 spin_lock_irqsave(shost->host_lock, flags);
1015 sdev = __scsi_device_lookup(shost, channel, id, lun);
1016 if (sdev && scsi_device_get(sdev))
1017 sdev = NULL;
1018 spin_unlock_irqrestore(shost->host_lock, flags);
1019
1020 return sdev;
1021}
1022EXPORT_SYMBOL(scsi_device_lookup);
1023
1024/**
1025 * scsi_device_cancel - cancel outstanding IO to this device
1026 * @sdev: Pointer to struct scsi_device
1027 * @recovery: Boolean instructing function to recover device or not.
1028 *
1029 **/
1030int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1031{
1032 struct scsi_cmnd *scmd;
1033 LIST_HEAD(active_list);
1034 struct list_head *lh, *lh_sf;
1035 unsigned long flags;
1036
1037 scsi_device_set_state(sdev, SDEV_CANCEL);
1038
1039 spin_lock_irqsave(&sdev->list_lock, flags);
1040 list_for_each_entry(scmd, &sdev->cmd_list, list) {
1041 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) {
1042 /*
1043 * If we are unable to remove the timer, it means
1044 * that the command has already timed out or
1045 * finished.
1046 */
1047 if (!scsi_delete_timer(scmd))
1048 continue;
1049 list_add_tail(&scmd->eh_entry, &active_list);
1050 }
1051 }
1052 spin_unlock_irqrestore(&sdev->list_lock, flags);
1053
1054 if (!list_empty(&active_list)) {
1055 list_for_each_safe(lh, lh_sf, &active_list) {
1056 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1057 list_del_init(lh);
939647ee
JB
1058 if (recovery &&
1059 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1da177e4
LT
1060 scmd->result = (DID_ABORT << 16);
1061 scsi_finish_command(scmd);
1062 }
1063 }
1064 }
1065
1066 return 0;
1067}
1068EXPORT_SYMBOL(scsi_device_cancel);
1069
1da177e4
LT
1070MODULE_DESCRIPTION("SCSI core");
1071MODULE_LICENSE("GPL");
1072
1073module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1074MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1075
1076static int __init init_scsi(void)
1077{
1078 int error, i;
1079
1080 error = scsi_init_queue();
1081 if (error)
1082 return error;
1083 error = scsi_init_procfs();
1084 if (error)
1085 goto cleanup_queue;
1086 error = scsi_init_devinfo();
1087 if (error)
1088 goto cleanup_procfs;
1089 error = scsi_init_hosts();
1090 if (error)
1091 goto cleanup_devlist;
1092 error = scsi_init_sysctl();
1093 if (error)
1094 goto cleanup_hosts;
1095 error = scsi_sysfs_register();
1096 if (error)
1097 goto cleanup_sysctl;
1098
530bba6f 1099 for_each_possible_cpu(i)
1da177e4
LT
1100 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1101
1da177e4
LT
1102 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1103 return 0;
1104
1105cleanup_sysctl:
1106 scsi_exit_sysctl();
1107cleanup_hosts:
1108 scsi_exit_hosts();
1109cleanup_devlist:
1110 scsi_exit_devinfo();
1111cleanup_procfs:
1112 scsi_exit_procfs();
1113cleanup_queue:
1114 scsi_exit_queue();
1115 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1116 -error);
1117 return error;
1118}
1119
1120static void __exit exit_scsi(void)
1121{
1122 scsi_sysfs_unregister();
1123 scsi_exit_sysctl();
1124 scsi_exit_hosts();
1125 scsi_exit_devinfo();
1da177e4
LT
1126 scsi_exit_procfs();
1127 scsi_exit_queue();
1da177e4
LT
1128}
1129
1130subsys_initcall(init_scsi);
1131module_exit(exit_scsi);
This page took 0.327913 seconds and 5 git commands to generate.