2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
10 #include <linux/bio.h>
11 #include <linux/bitops.h>
12 #include <linux/blkdev.h>
13 #include <linux/completion.h>
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/hardirq.h>
22 #include <linux/scatterlist.h>
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_dbg.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_driver.h>
29 #include <scsi/scsi_eh.h>
30 #include <scsi/scsi_host.h>
32 #include "scsi_priv.h"
33 #include "scsi_logging.h"
36 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
37 #define SG_MEMPOOL_SIZE 2
39 struct scsi_host_sg_pool
{
42 struct kmem_cache
*slab
;
46 #define SP(x) { x, "sgpool-" __stringify(x) }
47 #if (SCSI_MAX_SG_SEGMENTS < 32)
48 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
50 static struct scsi_host_sg_pool scsi_sg_pools
[] = {
53 #if (SCSI_MAX_SG_SEGMENTS > 32)
55 #if (SCSI_MAX_SG_SEGMENTS > 64)
57 #if (SCSI_MAX_SG_SEGMENTS > 128)
59 #if (SCSI_MAX_SG_SEGMENTS > 256)
60 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
65 SP(SCSI_MAX_SG_SEGMENTS
)
69 struct kmem_cache
*scsi_sdb_cache
;
72 #include <acpi/acpi_bus.h>
74 int scsi_register_acpi_bus_type(struct acpi_bus_type
*bus
)
76 bus
->bus
= &scsi_bus_type
;
77 return register_acpi_bus_type(bus
);
79 EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type
);
81 void scsi_unregister_acpi_bus_type(struct acpi_bus_type
*bus
)
83 unregister_acpi_bus_type(bus
);
85 EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type
);
89 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
90 * not change behaviour from the previous unplug mechanism, experimentation
91 * may prove this needs changing.
93 #define SCSI_QUEUE_DELAY 3
96 * Function: scsi_unprep_request()
98 * Purpose: Remove all preparation done for a request, including its
99 * associated scsi_cmnd, so that it can be requeued.
101 * Arguments: req - request to unprepare
103 * Lock status: Assumed that no locks are held upon entry.
107 static void scsi_unprep_request(struct request
*req
)
109 struct scsi_cmnd
*cmd
= req
->special
;
111 blk_unprep_request(req
);
114 scsi_put_command(cmd
);
118 * __scsi_queue_insert - private queue insertion
119 * @cmd: The SCSI command being requeued
120 * @reason: The reason for the requeue
121 * @unbusy: Whether the queue should be unbusied
123 * This is a private queue insertion. The public interface
124 * scsi_queue_insert() always assumes the queue should be unbusied
125 * because it's always called before the completion. This function is
126 * for a requeue after completion, which should only occur in this
129 static void __scsi_queue_insert(struct scsi_cmnd
*cmd
, int reason
, int unbusy
)
131 struct Scsi_Host
*host
= cmd
->device
->host
;
132 struct scsi_device
*device
= cmd
->device
;
133 struct scsi_target
*starget
= scsi_target(device
);
134 struct request_queue
*q
= device
->request_queue
;
138 printk("Inserting command %p into mlqueue\n", cmd
));
141 * Set the appropriate busy bit for the device/host.
143 * If the host/device isn't busy, assume that something actually
144 * completed, and that we should be able to queue a command now.
146 * Note that the prior mid-layer assumption that any host could
147 * always queue at least one command is now broken. The mid-layer
148 * will implement a user specifiable stall (see
149 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
150 * if a command is requeued with no other commands outstanding
151 * either for the device or for the host.
154 case SCSI_MLQUEUE_HOST_BUSY
:
155 host
->host_blocked
= host
->max_host_blocked
;
157 case SCSI_MLQUEUE_DEVICE_BUSY
:
158 case SCSI_MLQUEUE_EH_RETRY
:
159 device
->device_blocked
= device
->max_device_blocked
;
161 case SCSI_MLQUEUE_TARGET_BUSY
:
162 starget
->target_blocked
= starget
->max_target_blocked
;
167 * Decrement the counters, since these commands are no longer
168 * active on the host/device.
171 scsi_device_unbusy(device
);
174 * Requeue this command. It will go before all other commands
175 * that are already in the queue. Schedule requeue work under
176 * lock such that the kblockd_schedule_work() call happens
177 * before blk_cleanup_queue() finishes.
179 spin_lock_irqsave(q
->queue_lock
, flags
);
180 blk_requeue_request(q
, cmd
->request
);
181 kblockd_schedule_work(q
, &device
->requeue_work
);
182 spin_unlock_irqrestore(q
->queue_lock
, flags
);
186 * Function: scsi_queue_insert()
188 * Purpose: Insert a command in the midlevel queue.
190 * Arguments: cmd - command that we are adding to queue.
191 * reason - why we are inserting command to queue.
193 * Lock status: Assumed that lock is not held upon entry.
197 * Notes: We do this for one of two cases. Either the host is busy
198 * and it cannot accept any more commands for the time being,
199 * or the device returned QUEUE_FULL and can accept no more
201 * Notes: This could be called either from an interrupt context or a
202 * normal process context.
204 void scsi_queue_insert(struct scsi_cmnd
*cmd
, int reason
)
206 __scsi_queue_insert(cmd
, reason
, 1);
209 * scsi_execute - insert request and wait for the result
212 * @data_direction: data direction
213 * @buffer: data buffer
214 * @bufflen: len of buffer
215 * @sense: optional sense buffer
216 * @timeout: request timeout in seconds
217 * @retries: number of times to retry request
218 * @flags: or into request flags;
219 * @resid: optional residual length
221 * returns the req->errors value which is the scsi_cmnd result
224 int scsi_execute(struct scsi_device
*sdev
, const unsigned char *cmd
,
225 int data_direction
, void *buffer
, unsigned bufflen
,
226 unsigned char *sense
, int timeout
, int retries
, int flags
,
230 int write
= (data_direction
== DMA_TO_DEVICE
);
231 int ret
= DRIVER_ERROR
<< 24;
233 req
= blk_get_request(sdev
->request_queue
, write
, __GFP_WAIT
);
237 if (bufflen
&& blk_rq_map_kern(sdev
->request_queue
, req
,
238 buffer
, bufflen
, __GFP_WAIT
))
241 req
->cmd_len
= COMMAND_SIZE(cmd
[0]);
242 memcpy(req
->cmd
, cmd
, req
->cmd_len
);
245 req
->retries
= retries
;
246 req
->timeout
= timeout
;
247 req
->cmd_type
= REQ_TYPE_BLOCK_PC
;
248 req
->cmd_flags
|= flags
| REQ_QUIET
| REQ_PREEMPT
;
251 * head injection *required* here otherwise quiesce won't work
253 blk_execute_rq(req
->q
, NULL
, req
, 1);
256 * Some devices (USB mass-storage in particular) may transfer
257 * garbage data together with a residue indicating that the data
258 * is invalid. Prevent the garbage from being misinterpreted
259 * and prevent security leaks by zeroing out the excess data.
261 if (unlikely(req
->resid_len
> 0 && req
->resid_len
<= bufflen
))
262 memset(buffer
+ (bufflen
- req
->resid_len
), 0, req
->resid_len
);
265 *resid
= req
->resid_len
;
268 blk_put_request(req
);
272 EXPORT_SYMBOL(scsi_execute
);
275 int scsi_execute_req(struct scsi_device
*sdev
, const unsigned char *cmd
,
276 int data_direction
, void *buffer
, unsigned bufflen
,
277 struct scsi_sense_hdr
*sshdr
, int timeout
, int retries
,
284 sense
= kzalloc(SCSI_SENSE_BUFFERSIZE
, GFP_NOIO
);
286 return DRIVER_ERROR
<< 24;
288 result
= scsi_execute(sdev
, cmd
, data_direction
, buffer
, bufflen
,
289 sense
, timeout
, retries
, 0, resid
);
291 scsi_normalize_sense(sense
, SCSI_SENSE_BUFFERSIZE
, sshdr
);
296 EXPORT_SYMBOL(scsi_execute_req
);
299 * Function: scsi_init_cmd_errh()
301 * Purpose: Initialize cmd fields related to error handling.
303 * Arguments: cmd - command that is ready to be queued.
305 * Notes: This function has the job of initializing a number of
306 * fields related to error handling. Typically this will
307 * be called once for each command, as required.
309 static void scsi_init_cmd_errh(struct scsi_cmnd
*cmd
)
311 cmd
->serial_number
= 0;
312 scsi_set_resid(cmd
, 0);
313 memset(cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
314 if (cmd
->cmd_len
== 0)
315 cmd
->cmd_len
= scsi_command_size(cmd
->cmnd
);
318 void scsi_device_unbusy(struct scsi_device
*sdev
)
320 struct Scsi_Host
*shost
= sdev
->host
;
321 struct scsi_target
*starget
= scsi_target(sdev
);
324 spin_lock_irqsave(shost
->host_lock
, flags
);
326 starget
->target_busy
--;
327 if (unlikely(scsi_host_in_recovery(shost
) &&
328 (shost
->host_failed
|| shost
->host_eh_scheduled
)))
329 scsi_eh_wakeup(shost
);
330 spin_unlock(shost
->host_lock
);
331 spin_lock(sdev
->request_queue
->queue_lock
);
333 spin_unlock_irqrestore(sdev
->request_queue
->queue_lock
, flags
);
337 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
338 * and call blk_run_queue for all the scsi_devices on the target -
339 * including current_sdev first.
341 * Called with *no* scsi locks held.
343 static void scsi_single_lun_run(struct scsi_device
*current_sdev
)
345 struct Scsi_Host
*shost
= current_sdev
->host
;
346 struct scsi_device
*sdev
, *tmp
;
347 struct scsi_target
*starget
= scsi_target(current_sdev
);
350 spin_lock_irqsave(shost
->host_lock
, flags
);
351 starget
->starget_sdev_user
= NULL
;
352 spin_unlock_irqrestore(shost
->host_lock
, flags
);
355 * Call blk_run_queue for all LUNs on the target, starting with
356 * current_sdev. We race with others (to set starget_sdev_user),
357 * but in most cases, we will be first. Ideally, each LU on the
358 * target would get some limited time or requests on the target.
360 blk_run_queue(current_sdev
->request_queue
);
362 spin_lock_irqsave(shost
->host_lock
, flags
);
363 if (starget
->starget_sdev_user
)
365 list_for_each_entry_safe(sdev
, tmp
, &starget
->devices
,
366 same_target_siblings
) {
367 if (sdev
== current_sdev
)
369 if (scsi_device_get(sdev
))
372 spin_unlock_irqrestore(shost
->host_lock
, flags
);
373 blk_run_queue(sdev
->request_queue
);
374 spin_lock_irqsave(shost
->host_lock
, flags
);
376 scsi_device_put(sdev
);
379 spin_unlock_irqrestore(shost
->host_lock
, flags
);
382 static inline int scsi_device_is_busy(struct scsi_device
*sdev
)
384 if (sdev
->device_busy
>= sdev
->queue_depth
|| sdev
->device_blocked
)
390 static inline int scsi_target_is_busy(struct scsi_target
*starget
)
392 return ((starget
->can_queue
> 0 &&
393 starget
->target_busy
>= starget
->can_queue
) ||
394 starget
->target_blocked
);
397 static inline int scsi_host_is_busy(struct Scsi_Host
*shost
)
399 if ((shost
->can_queue
> 0 && shost
->host_busy
>= shost
->can_queue
) ||
400 shost
->host_blocked
|| shost
->host_self_blocked
)
407 * Function: scsi_run_queue()
409 * Purpose: Select a proper request queue to serve next
411 * Arguments: q - last request's queue
415 * Notes: The previous command was completely finished, start
416 * a new one if possible.
418 static void scsi_run_queue(struct request_queue
*q
)
420 struct scsi_device
*sdev
= q
->queuedata
;
421 struct Scsi_Host
*shost
;
422 LIST_HEAD(starved_list
);
426 if (scsi_target(sdev
)->single_lun
)
427 scsi_single_lun_run(sdev
);
429 spin_lock_irqsave(shost
->host_lock
, flags
);
430 list_splice_init(&shost
->starved_list
, &starved_list
);
432 while (!list_empty(&starved_list
)) {
434 * As long as shost is accepting commands and we have
435 * starved queues, call blk_run_queue. scsi_request_fn
436 * drops the queue_lock and can add us back to the
439 * host_lock protects the starved_list and starved_entry.
440 * scsi_request_fn must get the host_lock before checking
441 * or modifying starved_list or starved_entry.
443 if (scsi_host_is_busy(shost
))
446 sdev
= list_entry(starved_list
.next
,
447 struct scsi_device
, starved_entry
);
448 list_del_init(&sdev
->starved_entry
);
449 if (scsi_target_is_busy(scsi_target(sdev
))) {
450 list_move_tail(&sdev
->starved_entry
,
451 &shost
->starved_list
);
455 spin_unlock(shost
->host_lock
);
456 spin_lock(sdev
->request_queue
->queue_lock
);
457 __blk_run_queue(sdev
->request_queue
);
458 spin_unlock(sdev
->request_queue
->queue_lock
);
459 spin_lock(shost
->host_lock
);
461 /* put any unprocessed entries back */
462 list_splice(&starved_list
, &shost
->starved_list
);
463 spin_unlock_irqrestore(shost
->host_lock
, flags
);
468 void scsi_requeue_run_queue(struct work_struct
*work
)
470 struct scsi_device
*sdev
;
471 struct request_queue
*q
;
473 sdev
= container_of(work
, struct scsi_device
, requeue_work
);
474 q
= sdev
->request_queue
;
479 * Function: scsi_requeue_command()
481 * Purpose: Handle post-processing of completed commands.
483 * Arguments: q - queue to operate on
484 * cmd - command that may need to be requeued.
488 * Notes: After command completion, there may be blocks left
489 * over which weren't finished by the previous command
490 * this can be for a number of reasons - the main one is
491 * I/O errors in the middle of the request, in which case
492 * we need to request the blocks that come after the bad
494 * Notes: Upon return, cmd is a stale pointer.
496 static void scsi_requeue_command(struct request_queue
*q
, struct scsi_cmnd
*cmd
)
498 struct scsi_device
*sdev
= cmd
->device
;
499 struct request
*req
= cmd
->request
;
503 * We need to hold a reference on the device to avoid the queue being
504 * killed after the unlock and before scsi_run_queue is invoked which
505 * may happen because scsi_unprep_request() puts the command which
506 * releases its reference on the device.
508 get_device(&sdev
->sdev_gendev
);
510 spin_lock_irqsave(q
->queue_lock
, flags
);
511 scsi_unprep_request(req
);
512 blk_requeue_request(q
, req
);
513 spin_unlock_irqrestore(q
->queue_lock
, flags
);
517 put_device(&sdev
->sdev_gendev
);
520 void scsi_next_command(struct scsi_cmnd
*cmd
)
522 struct scsi_device
*sdev
= cmd
->device
;
523 struct request_queue
*q
= sdev
->request_queue
;
525 /* need to hold a reference on the device before we let go of the cmd */
526 get_device(&sdev
->sdev_gendev
);
528 scsi_put_command(cmd
);
531 /* ok to remove device now */
532 put_device(&sdev
->sdev_gendev
);
535 void scsi_run_host_queues(struct Scsi_Host
*shost
)
537 struct scsi_device
*sdev
;
539 shost_for_each_device(sdev
, shost
)
540 scsi_run_queue(sdev
->request_queue
);
543 static void __scsi_release_buffers(struct scsi_cmnd
*, int);
546 * Function: scsi_end_request()
548 * Purpose: Post-processing of completed commands (usually invoked at end
549 * of upper level post-processing and scsi_io_completion).
551 * Arguments: cmd - command that is complete.
552 * error - 0 if I/O indicates success, < 0 for I/O error.
553 * bytes - number of bytes of completed I/O
554 * requeue - indicates whether we should requeue leftovers.
556 * Lock status: Assumed that lock is not held upon entry.
558 * Returns: cmd if requeue required, NULL otherwise.
560 * Notes: This is called for block device requests in order to
561 * mark some number of sectors as complete.
563 * We are guaranteeing that the request queue will be goosed
564 * at some point during this call.
565 * Notes: If cmd was requeued, upon return it will be a stale pointer.
567 static struct scsi_cmnd
*scsi_end_request(struct scsi_cmnd
*cmd
, int error
,
568 int bytes
, int requeue
)
570 struct request_queue
*q
= cmd
->device
->request_queue
;
571 struct request
*req
= cmd
->request
;
574 * If there are blocks left over at the end, set up the command
575 * to queue the remainder of them.
577 if (blk_end_request(req
, error
, bytes
)) {
578 /* kill remainder if no retrys */
579 if (error
&& scsi_noretry_cmd(cmd
))
580 blk_end_request_all(req
, error
);
584 * Bleah. Leftovers again. Stick the
585 * leftovers in the front of the
586 * queue, and goose the queue again.
588 scsi_release_buffers(cmd
);
589 scsi_requeue_command(q
, cmd
);
597 * This will goose the queue request function at the end, so we don't
598 * need to worry about launching another command.
600 __scsi_release_buffers(cmd
, 0);
601 scsi_next_command(cmd
);
605 static inline unsigned int scsi_sgtable_index(unsigned short nents
)
609 BUG_ON(nents
> SCSI_MAX_SG_SEGMENTS
);
614 index
= get_count_order(nents
) - 3;
619 static void scsi_sg_free(struct scatterlist
*sgl
, unsigned int nents
)
621 struct scsi_host_sg_pool
*sgp
;
623 sgp
= scsi_sg_pools
+ scsi_sgtable_index(nents
);
624 mempool_free(sgl
, sgp
->pool
);
627 static struct scatterlist
*scsi_sg_alloc(unsigned int nents
, gfp_t gfp_mask
)
629 struct scsi_host_sg_pool
*sgp
;
631 sgp
= scsi_sg_pools
+ scsi_sgtable_index(nents
);
632 return mempool_alloc(sgp
->pool
, gfp_mask
);
635 static int scsi_alloc_sgtable(struct scsi_data_buffer
*sdb
, int nents
,
642 ret
= __sg_alloc_table(&sdb
->table
, nents
, SCSI_MAX_SG_SEGMENTS
,
643 gfp_mask
, scsi_sg_alloc
);
645 __sg_free_table(&sdb
->table
, SCSI_MAX_SG_SEGMENTS
,
651 static void scsi_free_sgtable(struct scsi_data_buffer
*sdb
)
653 __sg_free_table(&sdb
->table
, SCSI_MAX_SG_SEGMENTS
, scsi_sg_free
);
656 static void __scsi_release_buffers(struct scsi_cmnd
*cmd
, int do_bidi_check
)
659 if (cmd
->sdb
.table
.nents
)
660 scsi_free_sgtable(&cmd
->sdb
);
662 memset(&cmd
->sdb
, 0, sizeof(cmd
->sdb
));
664 if (do_bidi_check
&& scsi_bidi_cmnd(cmd
)) {
665 struct scsi_data_buffer
*bidi_sdb
=
666 cmd
->request
->next_rq
->special
;
667 scsi_free_sgtable(bidi_sdb
);
668 kmem_cache_free(scsi_sdb_cache
, bidi_sdb
);
669 cmd
->request
->next_rq
->special
= NULL
;
672 if (scsi_prot_sg_count(cmd
))
673 scsi_free_sgtable(cmd
->prot_sdb
);
677 * Function: scsi_release_buffers()
679 * Purpose: Completion processing for block device I/O requests.
681 * Arguments: cmd - command that we are bailing.
683 * Lock status: Assumed that no lock is held upon entry.
687 * Notes: In the event that an upper level driver rejects a
688 * command, we must release resources allocated during
689 * the __init_io() function. Primarily this would involve
690 * the scatter-gather table, and potentially any bounce
693 void scsi_release_buffers(struct scsi_cmnd
*cmd
)
695 __scsi_release_buffers(cmd
, 1);
697 EXPORT_SYMBOL(scsi_release_buffers
);
699 static int __scsi_error_from_host_byte(struct scsi_cmnd
*cmd
, int result
)
703 switch(host_byte(result
)) {
704 case DID_TRANSPORT_FAILFAST
:
707 case DID_TARGET_FAILURE
:
708 set_host_byte(cmd
, DID_OK
);
711 case DID_NEXUS_FAILURE
:
712 set_host_byte(cmd
, DID_OK
);
724 * Function: scsi_io_completion()
726 * Purpose: Completion processing for block device I/O requests.
728 * Arguments: cmd - command that is finished.
730 * Lock status: Assumed that no lock is held upon entry.
734 * Notes: This function is matched in terms of capabilities to
735 * the function that created the scatter-gather list.
736 * In other words, if there are no bounce buffers
737 * (the normal case for most drivers), we don't need
738 * the logic to deal with cleaning up afterwards.
740 * We must call scsi_end_request(). This will finish off
741 * the specified number of sectors. If we are done, the
742 * command block will be released and the queue function
743 * will be goosed. If we are not done then we have to
744 * figure out what to do next:
746 * a) We can call scsi_requeue_command(). The request
747 * will be unprepared and put back on the queue. Then
748 * a new command will be created for it. This should
749 * be used if we made forward progress, or if we want
750 * to switch from READ(10) to READ(6) for example.
752 * b) We can call scsi_queue_insert(). The request will
753 * be put back on the queue and retried using the same
754 * command as before, possibly after a delay.
756 * c) We can call blk_end_request() with -EIO to fail
757 * the remainder of the request.
759 void scsi_io_completion(struct scsi_cmnd
*cmd
, unsigned int good_bytes
)
761 int result
= cmd
->result
;
762 struct request_queue
*q
= cmd
->device
->request_queue
;
763 struct request
*req
= cmd
->request
;
765 struct scsi_sense_hdr sshdr
;
767 int sense_deferred
= 0;
768 enum {ACTION_FAIL
, ACTION_REPREP
, ACTION_RETRY
,
769 ACTION_DELAYED_RETRY
} action
;
770 char *description
= NULL
;
773 sense_valid
= scsi_command_normalize_sense(cmd
, &sshdr
);
775 sense_deferred
= scsi_sense_is_deferred(&sshdr
);
778 if (req
->cmd_type
== REQ_TYPE_BLOCK_PC
) { /* SG_IO ioctl from block level */
779 req
->errors
= result
;
781 if (sense_valid
&& req
->sense
) {
783 * SG_IO wants current and deferred errors
785 int len
= 8 + cmd
->sense_buffer
[7];
787 if (len
> SCSI_SENSE_BUFFERSIZE
)
788 len
= SCSI_SENSE_BUFFERSIZE
;
789 memcpy(req
->sense
, cmd
->sense_buffer
, len
);
790 req
->sense_len
= len
;
793 error
= __scsi_error_from_host_byte(cmd
, result
);
796 req
->resid_len
= scsi_get_resid(cmd
);
798 if (scsi_bidi_cmnd(cmd
)) {
800 * Bidi commands Must be complete as a whole,
801 * both sides at once.
803 req
->next_rq
->resid_len
= scsi_in(cmd
)->resid
;
805 scsi_release_buffers(cmd
);
806 blk_end_request_all(req
, 0);
808 scsi_next_command(cmd
);
813 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
814 BUG_ON(blk_bidi_rq(req
));
817 * Next deal with any sectors which we were able to correctly
820 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
822 blk_rq_sectors(req
), good_bytes
));
825 * Recovered errors need reporting, but they're always treated
826 * as success, so fiddle the result code here. For BLOCK_PC
827 * we already took a copy of the original into rq->errors which
828 * is what gets returned to the user
830 if (sense_valid
&& (sshdr
.sense_key
== RECOVERED_ERROR
)) {
831 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
832 * print since caller wants ATA registers. Only occurs on
833 * SCSI ATA PASS_THROUGH commands when CK_COND=1
835 if ((sshdr
.asc
== 0x0) && (sshdr
.ascq
== 0x1d))
837 else if (!(req
->cmd_flags
& REQ_QUIET
))
838 scsi_print_sense("", cmd
);
840 /* BLOCK_PC may have set error */
845 * A number of bytes were successfully read. If there
846 * are leftovers and there is some kind of error
847 * (result != 0), retry the rest.
849 if (scsi_end_request(cmd
, error
, good_bytes
, result
== 0) == NULL
)
852 error
= __scsi_error_from_host_byte(cmd
, result
);
854 if (host_byte(result
) == DID_RESET
) {
855 /* Third party bus reset or reset for error recovery
856 * reasons. Just retry the command and see what
859 action
= ACTION_RETRY
;
860 } else if (sense_valid
&& !sense_deferred
) {
861 switch (sshdr
.sense_key
) {
863 if (cmd
->device
->removable
) {
864 /* Detected disc change. Set a bit
865 * and quietly refuse further access.
867 cmd
->device
->changed
= 1;
868 description
= "Media Changed";
869 action
= ACTION_FAIL
;
871 /* Must have been a power glitch, or a
872 * bus reset. Could not have been a
873 * media change, so we just retry the
874 * command and see what happens.
876 action
= ACTION_RETRY
;
879 case ILLEGAL_REQUEST
:
880 /* If we had an ILLEGAL REQUEST returned, then
881 * we may have performed an unsupported
882 * command. The only thing this should be
883 * would be a ten byte read where only a six
884 * byte read was supported. Also, on a system
885 * where READ CAPACITY failed, we may have
886 * read past the end of the disk.
888 if ((cmd
->device
->use_10_for_rw
&&
889 sshdr
.asc
== 0x20 && sshdr
.ascq
== 0x00) &&
890 (cmd
->cmnd
[0] == READ_10
||
891 cmd
->cmnd
[0] == WRITE_10
)) {
892 /* This will issue a new 6-byte command. */
893 cmd
->device
->use_10_for_rw
= 0;
894 action
= ACTION_REPREP
;
895 } else if (sshdr
.asc
== 0x10) /* DIX */ {
896 description
= "Host Data Integrity Failure";
897 action
= ACTION_FAIL
;
899 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
900 } else if ((sshdr
.asc
== 0x20 || sshdr
.asc
== 0x24) &&
901 (cmd
->cmnd
[0] == UNMAP
||
902 cmd
->cmnd
[0] == WRITE_SAME_16
||
903 cmd
->cmnd
[0] == WRITE_SAME
)) {
904 description
= "Discard failure";
905 action
= ACTION_FAIL
;
908 action
= ACTION_FAIL
;
910 case ABORTED_COMMAND
:
911 action
= ACTION_FAIL
;
912 if (sshdr
.asc
== 0x10) { /* DIF */
913 description
= "Target Data Integrity Failure";
918 /* If the device is in the process of becoming
919 * ready, or has a temporary blockage, retry.
921 if (sshdr
.asc
== 0x04) {
922 switch (sshdr
.ascq
) {
923 case 0x01: /* becoming ready */
924 case 0x04: /* format in progress */
925 case 0x05: /* rebuild in progress */
926 case 0x06: /* recalculation in progress */
927 case 0x07: /* operation in progress */
928 case 0x08: /* Long write in progress */
929 case 0x09: /* self test in progress */
930 case 0x14: /* space allocation in progress */
931 action
= ACTION_DELAYED_RETRY
;
934 description
= "Device not ready";
935 action
= ACTION_FAIL
;
939 description
= "Device not ready";
940 action
= ACTION_FAIL
;
943 case VOLUME_OVERFLOW
:
944 /* See SSC3rXX or current. */
945 action
= ACTION_FAIL
;
948 description
= "Unhandled sense code";
949 action
= ACTION_FAIL
;
953 description
= "Unhandled error code";
954 action
= ACTION_FAIL
;
959 /* Give up and fail the remainder of the request */
960 scsi_release_buffers(cmd
);
961 if (!(req
->cmd_flags
& REQ_QUIET
)) {
963 scmd_printk(KERN_INFO
, cmd
, "%s\n",
965 scsi_print_result(cmd
);
966 if (driver_byte(result
) & DRIVER_SENSE
)
967 scsi_print_sense("", cmd
);
968 scsi_print_command(cmd
);
970 if (blk_end_request_err(req
, error
))
971 scsi_requeue_command(q
, cmd
);
973 scsi_next_command(cmd
);
976 /* Unprep the request and put it back at the head of the queue.
977 * A new command will be prepared and issued.
979 scsi_release_buffers(cmd
);
980 scsi_requeue_command(q
, cmd
);
983 /* Retry the same command immediately */
984 __scsi_queue_insert(cmd
, SCSI_MLQUEUE_EH_RETRY
, 0);
986 case ACTION_DELAYED_RETRY
:
987 /* Retry the same command after a delay */
988 __scsi_queue_insert(cmd
, SCSI_MLQUEUE_DEVICE_BUSY
, 0);
993 static int scsi_init_sgtable(struct request
*req
, struct scsi_data_buffer
*sdb
,
999 * If sg table allocation fails, requeue request later.
1001 if (unlikely(scsi_alloc_sgtable(sdb
, req
->nr_phys_segments
,
1003 return BLKPREP_DEFER
;
1009 * Next, walk the list, and fill in the addresses and sizes of
1012 count
= blk_rq_map_sg(req
->q
, req
, sdb
->table
.sgl
);
1013 BUG_ON(count
> sdb
->table
.nents
);
1014 sdb
->table
.nents
= count
;
1015 sdb
->length
= blk_rq_bytes(req
);
1020 * Function: scsi_init_io()
1022 * Purpose: SCSI I/O initialize function.
1024 * Arguments: cmd - Command descriptor we wish to initialize
1026 * Returns: 0 on success
1027 * BLKPREP_DEFER if the failure is retryable
1028 * BLKPREP_KILL if the failure is fatal
1030 int scsi_init_io(struct scsi_cmnd
*cmd
, gfp_t gfp_mask
)
1032 struct request
*rq
= cmd
->request
;
1034 int error
= scsi_init_sgtable(rq
, &cmd
->sdb
, gfp_mask
);
1038 if (blk_bidi_rq(rq
)) {
1039 struct scsi_data_buffer
*bidi_sdb
= kmem_cache_zalloc(
1040 scsi_sdb_cache
, GFP_ATOMIC
);
1042 error
= BLKPREP_DEFER
;
1046 rq
->next_rq
->special
= bidi_sdb
;
1047 error
= scsi_init_sgtable(rq
->next_rq
, bidi_sdb
, GFP_ATOMIC
);
1052 if (blk_integrity_rq(rq
)) {
1053 struct scsi_data_buffer
*prot_sdb
= cmd
->prot_sdb
;
1056 BUG_ON(prot_sdb
== NULL
);
1057 ivecs
= blk_rq_count_integrity_sg(rq
->q
, rq
->bio
);
1059 if (scsi_alloc_sgtable(prot_sdb
, ivecs
, gfp_mask
)) {
1060 error
= BLKPREP_DEFER
;
1064 count
= blk_rq_map_integrity_sg(rq
->q
, rq
->bio
,
1065 prot_sdb
->table
.sgl
);
1066 BUG_ON(unlikely(count
> ivecs
));
1067 BUG_ON(unlikely(count
> queue_max_integrity_segments(rq
->q
)));
1069 cmd
->prot_sdb
= prot_sdb
;
1070 cmd
->prot_sdb
->table
.nents
= count
;
1076 scsi_release_buffers(cmd
);
1077 cmd
->request
->special
= NULL
;
1078 scsi_put_command(cmd
);
1081 EXPORT_SYMBOL(scsi_init_io
);
1083 static struct scsi_cmnd
*scsi_get_cmd_from_req(struct scsi_device
*sdev
,
1084 struct request
*req
)
1086 struct scsi_cmnd
*cmd
;
1088 if (!req
->special
) {
1089 cmd
= scsi_get_command(sdev
, GFP_ATOMIC
);
1097 /* pull a tag out of the request if we have one */
1098 cmd
->tag
= req
->tag
;
1101 cmd
->cmnd
= req
->cmd
;
1102 cmd
->prot_op
= SCSI_PROT_NORMAL
;
1107 int scsi_setup_blk_pc_cmnd(struct scsi_device
*sdev
, struct request
*req
)
1109 struct scsi_cmnd
*cmd
;
1110 int ret
= scsi_prep_state_check(sdev
, req
);
1112 if (ret
!= BLKPREP_OK
)
1115 cmd
= scsi_get_cmd_from_req(sdev
, req
);
1117 return BLKPREP_DEFER
;
1120 * BLOCK_PC requests may transfer data, in which case they must
1121 * a bio attached to them. Or they might contain a SCSI command
1122 * that does not transfer data, in which case they may optionally
1123 * submit a request without an attached bio.
1128 BUG_ON(!req
->nr_phys_segments
);
1130 ret
= scsi_init_io(cmd
, GFP_ATOMIC
);
1134 BUG_ON(blk_rq_bytes(req
));
1136 memset(&cmd
->sdb
, 0, sizeof(cmd
->sdb
));
1140 cmd
->cmd_len
= req
->cmd_len
;
1141 if (!blk_rq_bytes(req
))
1142 cmd
->sc_data_direction
= DMA_NONE
;
1143 else if (rq_data_dir(req
) == WRITE
)
1144 cmd
->sc_data_direction
= DMA_TO_DEVICE
;
1146 cmd
->sc_data_direction
= DMA_FROM_DEVICE
;
1148 cmd
->transfersize
= blk_rq_bytes(req
);
1149 cmd
->allowed
= req
->retries
;
1152 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd
);
1155 * Setup a REQ_TYPE_FS command. These are simple read/write request
1156 * from filesystems that still need to be translated to SCSI CDBs from
1159 int scsi_setup_fs_cmnd(struct scsi_device
*sdev
, struct request
*req
)
1161 struct scsi_cmnd
*cmd
;
1162 int ret
= scsi_prep_state_check(sdev
, req
);
1164 if (ret
!= BLKPREP_OK
)
1167 if (unlikely(sdev
->scsi_dh_data
&& sdev
->scsi_dh_data
->scsi_dh
1168 && sdev
->scsi_dh_data
->scsi_dh
->prep_fn
)) {
1169 ret
= sdev
->scsi_dh_data
->scsi_dh
->prep_fn(sdev
, req
);
1170 if (ret
!= BLKPREP_OK
)
1175 * Filesystem requests must transfer data.
1177 BUG_ON(!req
->nr_phys_segments
);
1179 cmd
= scsi_get_cmd_from_req(sdev
, req
);
1181 return BLKPREP_DEFER
;
1183 memset(cmd
->cmnd
, 0, BLK_MAX_CDB
);
1184 return scsi_init_io(cmd
, GFP_ATOMIC
);
1186 EXPORT_SYMBOL(scsi_setup_fs_cmnd
);
1188 int scsi_prep_state_check(struct scsi_device
*sdev
, struct request
*req
)
1190 int ret
= BLKPREP_OK
;
1193 * If the device is not in running state we will reject some
1196 if (unlikely(sdev
->sdev_state
!= SDEV_RUNNING
)) {
1197 switch (sdev
->sdev_state
) {
1199 case SDEV_TRANSPORT_OFFLINE
:
1201 * If the device is offline we refuse to process any
1202 * commands. The device must be brought online
1203 * before trying any recovery commands.
1205 sdev_printk(KERN_ERR
, sdev
,
1206 "rejecting I/O to offline device\n");
1211 * If the device is fully deleted, we refuse to
1212 * process any commands as well.
1214 sdev_printk(KERN_ERR
, sdev
,
1215 "rejecting I/O to dead device\n");
1220 case SDEV_CREATED_BLOCK
:
1222 * If the devices is blocked we defer normal commands.
1224 if (!(req
->cmd_flags
& REQ_PREEMPT
))
1225 ret
= BLKPREP_DEFER
;
1229 * For any other not fully online state we only allow
1230 * special commands. In particular any user initiated
1231 * command is not allowed.
1233 if (!(req
->cmd_flags
& REQ_PREEMPT
))
1240 EXPORT_SYMBOL(scsi_prep_state_check
);
1242 int scsi_prep_return(struct request_queue
*q
, struct request
*req
, int ret
)
1244 struct scsi_device
*sdev
= q
->queuedata
;
1248 req
->errors
= DID_NO_CONNECT
<< 16;
1249 /* release the command and kill it */
1251 struct scsi_cmnd
*cmd
= req
->special
;
1252 scsi_release_buffers(cmd
);
1253 scsi_put_command(cmd
);
1254 req
->special
= NULL
;
1259 * If we defer, the blk_peek_request() returns NULL, but the
1260 * queue must be restarted, so we schedule a callback to happen
1263 if (sdev
->device_busy
== 0)
1264 blk_delay_queue(q
, SCSI_QUEUE_DELAY
);
1267 req
->cmd_flags
|= REQ_DONTPREP
;
1272 EXPORT_SYMBOL(scsi_prep_return
);
1274 int scsi_prep_fn(struct request_queue
*q
, struct request
*req
)
1276 struct scsi_device
*sdev
= q
->queuedata
;
1277 int ret
= BLKPREP_KILL
;
1279 if (req
->cmd_type
== REQ_TYPE_BLOCK_PC
)
1280 ret
= scsi_setup_blk_pc_cmnd(sdev
, req
);
1281 return scsi_prep_return(q
, req
, ret
);
1283 EXPORT_SYMBOL(scsi_prep_fn
);
1286 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1289 * Called with the queue_lock held.
1291 static inline int scsi_dev_queue_ready(struct request_queue
*q
,
1292 struct scsi_device
*sdev
)
1294 if (sdev
->device_busy
== 0 && sdev
->device_blocked
) {
1296 * unblock after device_blocked iterates to zero
1298 if (--sdev
->device_blocked
== 0) {
1300 sdev_printk(KERN_INFO
, sdev
,
1301 "unblocking device at zero depth\n"));
1303 blk_delay_queue(q
, SCSI_QUEUE_DELAY
);
1307 if (scsi_device_is_busy(sdev
))
1315 * scsi_target_queue_ready: checks if there we can send commands to target
1316 * @sdev: scsi device on starget to check.
1318 * Called with the host lock held.
1320 static inline int scsi_target_queue_ready(struct Scsi_Host
*shost
,
1321 struct scsi_device
*sdev
)
1323 struct scsi_target
*starget
= scsi_target(sdev
);
1325 if (starget
->single_lun
) {
1326 if (starget
->starget_sdev_user
&&
1327 starget
->starget_sdev_user
!= sdev
)
1329 starget
->starget_sdev_user
= sdev
;
1332 if (starget
->target_busy
== 0 && starget
->target_blocked
) {
1334 * unblock after target_blocked iterates to zero
1336 if (--starget
->target_blocked
== 0) {
1337 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO
, starget
,
1338 "unblocking target at zero depth\n"));
1343 if (scsi_target_is_busy(starget
)) {
1344 list_move_tail(&sdev
->starved_entry
, &shost
->starved_list
);
1352 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1353 * return 0. We must end up running the queue again whenever 0 is
1354 * returned, else IO can hang.
1356 * Called with host_lock held.
1358 static inline int scsi_host_queue_ready(struct request_queue
*q
,
1359 struct Scsi_Host
*shost
,
1360 struct scsi_device
*sdev
)
1362 if (scsi_host_in_recovery(shost
))
1364 if (shost
->host_busy
== 0 && shost
->host_blocked
) {
1366 * unblock after host_blocked iterates to zero
1368 if (--shost
->host_blocked
== 0) {
1370 printk("scsi%d unblocking host at zero depth\n",
1376 if (scsi_host_is_busy(shost
)) {
1377 if (list_empty(&sdev
->starved_entry
))
1378 list_add_tail(&sdev
->starved_entry
, &shost
->starved_list
);
1382 /* We're OK to process the command, so we can't be starved */
1383 if (!list_empty(&sdev
->starved_entry
))
1384 list_del_init(&sdev
->starved_entry
);
1390 * Busy state exporting function for request stacking drivers.
1392 * For efficiency, no lock is taken to check the busy state of
1393 * shost/starget/sdev, since the returned value is not guaranteed and
1394 * may be changed after request stacking drivers call the function,
1395 * regardless of taking lock or not.
1397 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1398 * needs to return 'not busy'. Otherwise, request stacking drivers
1399 * may hold requests forever.
1401 static int scsi_lld_busy(struct request_queue
*q
)
1403 struct scsi_device
*sdev
= q
->queuedata
;
1404 struct Scsi_Host
*shost
;
1406 if (blk_queue_dead(q
))
1412 * Ignore host/starget busy state.
1413 * Since block layer does not have a concept of fairness across
1414 * multiple queues, congestion of host/starget needs to be handled
1417 if (scsi_host_in_recovery(shost
) || scsi_device_is_busy(sdev
))
1424 * Kill a request for a dead device
1426 static void scsi_kill_request(struct request
*req
, struct request_queue
*q
)
1428 struct scsi_cmnd
*cmd
= req
->special
;
1429 struct scsi_device
*sdev
;
1430 struct scsi_target
*starget
;
1431 struct Scsi_Host
*shost
;
1433 blk_start_request(req
);
1435 scmd_printk(KERN_INFO
, cmd
, "killing request\n");
1438 starget
= scsi_target(sdev
);
1440 scsi_init_cmd_errh(cmd
);
1441 cmd
->result
= DID_NO_CONNECT
<< 16;
1442 atomic_inc(&cmd
->device
->iorequest_cnt
);
1445 * SCSI request completion path will do scsi_device_unbusy(),
1446 * bump busy counts. To bump the counters, we need to dance
1447 * with the locks as normal issue path does.
1449 sdev
->device_busy
++;
1450 spin_unlock(sdev
->request_queue
->queue_lock
);
1451 spin_lock(shost
->host_lock
);
1453 starget
->target_busy
++;
1454 spin_unlock(shost
->host_lock
);
1455 spin_lock(sdev
->request_queue
->queue_lock
);
1457 blk_complete_request(req
);
1460 static void scsi_softirq_done(struct request
*rq
)
1462 struct scsi_cmnd
*cmd
= rq
->special
;
1463 unsigned long wait_for
= (cmd
->allowed
+ 1) * rq
->timeout
;
1466 INIT_LIST_HEAD(&cmd
->eh_entry
);
1468 atomic_inc(&cmd
->device
->iodone_cnt
);
1470 atomic_inc(&cmd
->device
->ioerr_cnt
);
1472 disposition
= scsi_decide_disposition(cmd
);
1473 if (disposition
!= SUCCESS
&&
1474 time_before(cmd
->jiffies_at_alloc
+ wait_for
, jiffies
)) {
1475 sdev_printk(KERN_ERR
, cmd
->device
,
1476 "timing out command, waited %lus\n",
1478 disposition
= SUCCESS
;
1481 scsi_log_completion(cmd
, disposition
);
1483 switch (disposition
) {
1485 scsi_finish_command(cmd
);
1488 scsi_queue_insert(cmd
, SCSI_MLQUEUE_EH_RETRY
);
1490 case ADD_TO_MLQUEUE
:
1491 scsi_queue_insert(cmd
, SCSI_MLQUEUE_DEVICE_BUSY
);
1494 if (!scsi_eh_scmd_add(cmd
, 0))
1495 scsi_finish_command(cmd
);
1500 * Function: scsi_request_fn()
1502 * Purpose: Main strategy routine for SCSI.
1504 * Arguments: q - Pointer to actual queue.
1508 * Lock status: IO request lock assumed to be held when called.
1510 static void scsi_request_fn(struct request_queue
*q
)
1512 struct scsi_device
*sdev
= q
->queuedata
;
1513 struct Scsi_Host
*shost
;
1514 struct scsi_cmnd
*cmd
;
1515 struct request
*req
;
1517 if(!get_device(&sdev
->sdev_gendev
))
1518 /* We must be tearing the block queue down already */
1522 * To start with, we keep looping until the queue is empty, or until
1523 * the host is no longer able to accept any more requests.
1529 * get next queueable request. We do this early to make sure
1530 * that the request is fully prepared even if we cannot
1533 req
= blk_peek_request(q
);
1534 if (!req
|| !scsi_dev_queue_ready(q
, sdev
))
1537 if (unlikely(!scsi_device_online(sdev
))) {
1538 sdev_printk(KERN_ERR
, sdev
,
1539 "rejecting I/O to offline device\n");
1540 scsi_kill_request(req
, q
);
1546 * Remove the request from the request list.
1548 if (!(blk_queue_tagged(q
) && !blk_queue_start_tag(q
, req
)))
1549 blk_start_request(req
);
1550 sdev
->device_busy
++;
1552 spin_unlock(q
->queue_lock
);
1554 if (unlikely(cmd
== NULL
)) {
1555 printk(KERN_CRIT
"impossible request in %s.\n"
1556 "please mail a stack trace to "
1557 "linux-scsi@vger.kernel.org\n",
1559 blk_dump_rq_flags(req
, "foo");
1562 spin_lock(shost
->host_lock
);
1565 * We hit this when the driver is using a host wide
1566 * tag map. For device level tag maps the queue_depth check
1567 * in the device ready fn would prevent us from trying
1568 * to allocate a tag. Since the map is a shared host resource
1569 * we add the dev to the starved list so it eventually gets
1570 * a run when a tag is freed.
1572 if (blk_queue_tagged(q
) && !blk_rq_tagged(req
)) {
1573 if (list_empty(&sdev
->starved_entry
))
1574 list_add_tail(&sdev
->starved_entry
,
1575 &shost
->starved_list
);
1579 if (!scsi_target_queue_ready(shost
, sdev
))
1582 if (!scsi_host_queue_ready(q
, shost
, sdev
))
1585 scsi_target(sdev
)->target_busy
++;
1589 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1590 * take the lock again.
1592 spin_unlock_irq(shost
->host_lock
);
1595 * Finally, initialize any error handling parameters, and set up
1596 * the timers for timeouts.
1598 scsi_init_cmd_errh(cmd
);
1601 * Dispatch the command to the low-level driver.
1603 rtn
= scsi_dispatch_cmd(cmd
);
1604 spin_lock_irq(q
->queue_lock
);
1612 spin_unlock_irq(shost
->host_lock
);
1615 * lock q, handle tag, requeue req, and decrement device_busy. We
1616 * must return with queue_lock held.
1618 * Decrementing device_busy without checking it is OK, as all such
1619 * cases (host limits or settings) should run the queue at some
1622 spin_lock_irq(q
->queue_lock
);
1623 blk_requeue_request(q
, req
);
1624 sdev
->device_busy
--;
1626 if (sdev
->device_busy
== 0)
1627 blk_delay_queue(q
, SCSI_QUEUE_DELAY
);
1629 /* must be careful here...if we trigger the ->remove() function
1630 * we cannot be holding the q lock */
1631 spin_unlock_irq(q
->queue_lock
);
1632 put_device(&sdev
->sdev_gendev
);
1633 spin_lock_irq(q
->queue_lock
);
1636 u64
scsi_calculate_bounce_limit(struct Scsi_Host
*shost
)
1638 struct device
*host_dev
;
1639 u64 bounce_limit
= 0xffffffff;
1641 if (shost
->unchecked_isa_dma
)
1642 return BLK_BOUNCE_ISA
;
1644 * Platforms with virtual-DMA translation
1645 * hardware have no practical limit.
1647 if (!PCI_DMA_BUS_IS_PHYS
)
1648 return BLK_BOUNCE_ANY
;
1650 host_dev
= scsi_get_device(shost
);
1651 if (host_dev
&& host_dev
->dma_mask
)
1652 bounce_limit
= *host_dev
->dma_mask
;
1654 return bounce_limit
;
1656 EXPORT_SYMBOL(scsi_calculate_bounce_limit
);
1658 struct request_queue
*__scsi_alloc_queue(struct Scsi_Host
*shost
,
1659 request_fn_proc
*request_fn
)
1661 struct request_queue
*q
;
1662 struct device
*dev
= shost
->dma_dev
;
1664 q
= blk_init_queue(request_fn
, NULL
);
1669 * this limit is imposed by hardware restrictions
1671 blk_queue_max_segments(q
, min_t(unsigned short, shost
->sg_tablesize
,
1672 SCSI_MAX_SG_CHAIN_SEGMENTS
));
1674 if (scsi_host_prot_dma(shost
)) {
1675 shost
->sg_prot_tablesize
=
1676 min_not_zero(shost
->sg_prot_tablesize
,
1677 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS
);
1678 BUG_ON(shost
->sg_prot_tablesize
< shost
->sg_tablesize
);
1679 blk_queue_max_integrity_segments(q
, shost
->sg_prot_tablesize
);
1682 blk_queue_max_hw_sectors(q
, shost
->max_sectors
);
1683 blk_queue_bounce_limit(q
, scsi_calculate_bounce_limit(shost
));
1684 blk_queue_segment_boundary(q
, shost
->dma_boundary
);
1685 dma_set_seg_boundary(dev
, shost
->dma_boundary
);
1687 blk_queue_max_segment_size(q
, dma_get_max_seg_size(dev
));
1689 if (!shost
->use_clustering
)
1690 q
->limits
.cluster
= 0;
1693 * set a reasonable default alignment on word boundaries: the
1694 * host and device may alter it using
1695 * blk_queue_update_dma_alignment() later.
1697 blk_queue_dma_alignment(q
, 0x03);
1701 EXPORT_SYMBOL(__scsi_alloc_queue
);
1703 struct request_queue
*scsi_alloc_queue(struct scsi_device
*sdev
)
1705 struct request_queue
*q
;
1707 q
= __scsi_alloc_queue(sdev
->host
, scsi_request_fn
);
1711 blk_queue_prep_rq(q
, scsi_prep_fn
);
1712 blk_queue_softirq_done(q
, scsi_softirq_done
);
1713 blk_queue_rq_timed_out(q
, scsi_times_out
);
1714 blk_queue_lld_busy(q
, scsi_lld_busy
);
1719 * Function: scsi_block_requests()
1721 * Purpose: Utility function used by low-level drivers to prevent further
1722 * commands from being queued to the device.
1724 * Arguments: shost - Host in question
1728 * Lock status: No locks are assumed held.
1730 * Notes: There is no timer nor any other means by which the requests
1731 * get unblocked other than the low-level driver calling
1732 * scsi_unblock_requests().
1734 void scsi_block_requests(struct Scsi_Host
*shost
)
1736 shost
->host_self_blocked
= 1;
1738 EXPORT_SYMBOL(scsi_block_requests
);
1741 * Function: scsi_unblock_requests()
1743 * Purpose: Utility function used by low-level drivers to allow further
1744 * commands from being queued to the device.
1746 * Arguments: shost - Host in question
1750 * Lock status: No locks are assumed held.
1752 * Notes: There is no timer nor any other means by which the requests
1753 * get unblocked other than the low-level driver calling
1754 * scsi_unblock_requests().
1756 * This is done as an API function so that changes to the
1757 * internals of the scsi mid-layer won't require wholesale
1758 * changes to drivers that use this feature.
1760 void scsi_unblock_requests(struct Scsi_Host
*shost
)
1762 shost
->host_self_blocked
= 0;
1763 scsi_run_host_queues(shost
);
1765 EXPORT_SYMBOL(scsi_unblock_requests
);
1767 int __init
scsi_init_queue(void)
1771 scsi_sdb_cache
= kmem_cache_create("scsi_data_buffer",
1772 sizeof(struct scsi_data_buffer
),
1774 if (!scsi_sdb_cache
) {
1775 printk(KERN_ERR
"SCSI: can't init scsi sdb cache\n");
1779 for (i
= 0; i
< SG_MEMPOOL_NR
; i
++) {
1780 struct scsi_host_sg_pool
*sgp
= scsi_sg_pools
+ i
;
1781 int size
= sgp
->size
* sizeof(struct scatterlist
);
1783 sgp
->slab
= kmem_cache_create(sgp
->name
, size
, 0,
1784 SLAB_HWCACHE_ALIGN
, NULL
);
1786 printk(KERN_ERR
"SCSI: can't init sg slab %s\n",
1791 sgp
->pool
= mempool_create_slab_pool(SG_MEMPOOL_SIZE
,
1794 printk(KERN_ERR
"SCSI: can't init sg mempool %s\n",
1803 for (i
= 0; i
< SG_MEMPOOL_NR
; i
++) {
1804 struct scsi_host_sg_pool
*sgp
= scsi_sg_pools
+ i
;
1806 mempool_destroy(sgp
->pool
);
1808 kmem_cache_destroy(sgp
->slab
);
1810 kmem_cache_destroy(scsi_sdb_cache
);
1815 void scsi_exit_queue(void)
1819 kmem_cache_destroy(scsi_sdb_cache
);
1821 for (i
= 0; i
< SG_MEMPOOL_NR
; i
++) {
1822 struct scsi_host_sg_pool
*sgp
= scsi_sg_pools
+ i
;
1823 mempool_destroy(sgp
->pool
);
1824 kmem_cache_destroy(sgp
->slab
);
1829 * scsi_mode_select - issue a mode select
1830 * @sdev: SCSI device to be queried
1831 * @pf: Page format bit (1 == standard, 0 == vendor specific)
1832 * @sp: Save page bit (0 == don't save, 1 == save)
1833 * @modepage: mode page being requested
1834 * @buffer: request buffer (may not be smaller than eight bytes)
1835 * @len: length of request buffer.
1836 * @timeout: command timeout
1837 * @retries: number of retries before failing
1838 * @data: returns a structure abstracting the mode header data
1839 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1840 * must be SCSI_SENSE_BUFFERSIZE big.
1842 * Returns zero if successful; negative error number or scsi
1847 scsi_mode_select(struct scsi_device
*sdev
, int pf
, int sp
, int modepage
,
1848 unsigned char *buffer
, int len
, int timeout
, int retries
,
1849 struct scsi_mode_data
*data
, struct scsi_sense_hdr
*sshdr
)
1851 unsigned char cmd
[10];
1852 unsigned char *real_buffer
;
1855 memset(cmd
, 0, sizeof(cmd
));
1856 cmd
[1] = (pf
? 0x10 : 0) | (sp
? 0x01 : 0);
1858 if (sdev
->use_10_for_ms
) {
1861 real_buffer
= kmalloc(8 + len
, GFP_KERNEL
);
1864 memcpy(real_buffer
+ 8, buffer
, len
);
1868 real_buffer
[2] = data
->medium_type
;
1869 real_buffer
[3] = data
->device_specific
;
1870 real_buffer
[4] = data
->longlba
? 0x01 : 0;
1872 real_buffer
[6] = data
->block_descriptor_length
>> 8;
1873 real_buffer
[7] = data
->block_descriptor_length
;
1875 cmd
[0] = MODE_SELECT_10
;
1879 if (len
> 255 || data
->block_descriptor_length
> 255 ||
1883 real_buffer
= kmalloc(4 + len
, GFP_KERNEL
);
1886 memcpy(real_buffer
+ 4, buffer
, len
);
1889 real_buffer
[1] = data
->medium_type
;
1890 real_buffer
[2] = data
->device_specific
;
1891 real_buffer
[3] = data
->block_descriptor_length
;
1894 cmd
[0] = MODE_SELECT
;
1898 ret
= scsi_execute_req(sdev
, cmd
, DMA_TO_DEVICE
, real_buffer
, len
,
1899 sshdr
, timeout
, retries
, NULL
);
1903 EXPORT_SYMBOL_GPL(scsi_mode_select
);
1906 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1907 * @sdev: SCSI device to be queried
1908 * @dbd: set if mode sense will allow block descriptors to be returned
1909 * @modepage: mode page being requested
1910 * @buffer: request buffer (may not be smaller than eight bytes)
1911 * @len: length of request buffer.
1912 * @timeout: command timeout
1913 * @retries: number of retries before failing
1914 * @data: returns a structure abstracting the mode header data
1915 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1916 * must be SCSI_SENSE_BUFFERSIZE big.
1918 * Returns zero if unsuccessful, or the header offset (either 4
1919 * or 8 depending on whether a six or ten byte command was
1920 * issued) if successful.
1923 scsi_mode_sense(struct scsi_device
*sdev
, int dbd
, int modepage
,
1924 unsigned char *buffer
, int len
, int timeout
, int retries
,
1925 struct scsi_mode_data
*data
, struct scsi_sense_hdr
*sshdr
)
1927 unsigned char cmd
[12];
1931 struct scsi_sense_hdr my_sshdr
;
1933 memset(data
, 0, sizeof(*data
));
1934 memset(&cmd
[0], 0, 12);
1935 cmd
[1] = dbd
& 0x18; /* allows DBD and LLBA bits */
1938 /* caller might not be interested in sense, but we need it */
1943 use_10_for_ms
= sdev
->use_10_for_ms
;
1945 if (use_10_for_ms
) {
1949 cmd
[0] = MODE_SENSE_10
;
1956 cmd
[0] = MODE_SENSE
;
1961 memset(buffer
, 0, len
);
1963 result
= scsi_execute_req(sdev
, cmd
, DMA_FROM_DEVICE
, buffer
, len
,
1964 sshdr
, timeout
, retries
, NULL
);
1966 /* This code looks awful: what it's doing is making sure an
1967 * ILLEGAL REQUEST sense return identifies the actual command
1968 * byte as the problem. MODE_SENSE commands can return
1969 * ILLEGAL REQUEST if the code page isn't supported */
1971 if (use_10_for_ms
&& !scsi_status_is_good(result
) &&
1972 (driver_byte(result
) & DRIVER_SENSE
)) {
1973 if (scsi_sense_valid(sshdr
)) {
1974 if ((sshdr
->sense_key
== ILLEGAL_REQUEST
) &&
1975 (sshdr
->asc
== 0x20) && (sshdr
->ascq
== 0)) {
1977 * Invalid command operation code
1979 sdev
->use_10_for_ms
= 0;
1985 if(scsi_status_is_good(result
)) {
1986 if (unlikely(buffer
[0] == 0x86 && buffer
[1] == 0x0b &&
1987 (modepage
== 6 || modepage
== 8))) {
1988 /* Initio breakage? */
1991 data
->medium_type
= 0;
1992 data
->device_specific
= 0;
1994 data
->block_descriptor_length
= 0;
1995 } else if(use_10_for_ms
) {
1996 data
->length
= buffer
[0]*256 + buffer
[1] + 2;
1997 data
->medium_type
= buffer
[2];
1998 data
->device_specific
= buffer
[3];
1999 data
->longlba
= buffer
[4] & 0x01;
2000 data
->block_descriptor_length
= buffer
[6]*256
2003 data
->length
= buffer
[0] + 1;
2004 data
->medium_type
= buffer
[1];
2005 data
->device_specific
= buffer
[2];
2006 data
->block_descriptor_length
= buffer
[3];
2008 data
->header_length
= header_length
;
2013 EXPORT_SYMBOL(scsi_mode_sense
);
2016 * scsi_test_unit_ready - test if unit is ready
2017 * @sdev: scsi device to change the state of.
2018 * @timeout: command timeout
2019 * @retries: number of retries before failing
2020 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
2021 * returning sense. Make sure that this is cleared before passing
2024 * Returns zero if unsuccessful or an error if TUR failed. For
2025 * removable media, UNIT_ATTENTION sets ->changed flag.
2028 scsi_test_unit_ready(struct scsi_device
*sdev
, int timeout
, int retries
,
2029 struct scsi_sense_hdr
*sshdr_external
)
2032 TEST_UNIT_READY
, 0, 0, 0, 0, 0,
2034 struct scsi_sense_hdr
*sshdr
;
2037 if (!sshdr_external
)
2038 sshdr
= kzalloc(sizeof(*sshdr
), GFP_KERNEL
);
2040 sshdr
= sshdr_external
;
2042 /* try to eat the UNIT_ATTENTION if there are enough retries */
2044 result
= scsi_execute_req(sdev
, cmd
, DMA_NONE
, NULL
, 0, sshdr
,
2045 timeout
, retries
, NULL
);
2046 if (sdev
->removable
&& scsi_sense_valid(sshdr
) &&
2047 sshdr
->sense_key
== UNIT_ATTENTION
)
2049 } while (scsi_sense_valid(sshdr
) &&
2050 sshdr
->sense_key
== UNIT_ATTENTION
&& --retries
);
2052 if (!sshdr_external
)
2056 EXPORT_SYMBOL(scsi_test_unit_ready
);
2059 * scsi_device_set_state - Take the given device through the device state model.
2060 * @sdev: scsi device to change the state of.
2061 * @state: state to change to.
2063 * Returns zero if unsuccessful or an error if the requested
2064 * transition is illegal.
2067 scsi_device_set_state(struct scsi_device
*sdev
, enum scsi_device_state state
)
2069 enum scsi_device_state oldstate
= sdev
->sdev_state
;
2071 if (state
== oldstate
)
2077 case SDEV_CREATED_BLOCK
:
2088 case SDEV_TRANSPORT_OFFLINE
:
2101 case SDEV_TRANSPORT_OFFLINE
:
2109 case SDEV_TRANSPORT_OFFLINE
:
2124 case SDEV_CREATED_BLOCK
:
2131 case SDEV_CREATED_BLOCK
:
2146 case SDEV_TRANSPORT_OFFLINE
:
2159 case SDEV_TRANSPORT_OFFLINE
:
2168 sdev
->sdev_state
= state
;
2172 SCSI_LOG_ERROR_RECOVERY(1,
2173 sdev_printk(KERN_ERR
, sdev
,
2174 "Illegal state transition %s->%s\n",
2175 scsi_device_state_name(oldstate
),
2176 scsi_device_state_name(state
))
2180 EXPORT_SYMBOL(scsi_device_set_state
);
2183 * sdev_evt_emit - emit a single SCSI device uevent
2184 * @sdev: associated SCSI device
2185 * @evt: event to emit
2187 * Send a single uevent (scsi_event) to the associated scsi_device.
2189 static void scsi_evt_emit(struct scsi_device
*sdev
, struct scsi_event
*evt
)
2194 switch (evt
->evt_type
) {
2195 case SDEV_EVT_MEDIA_CHANGE
:
2196 envp
[idx
++] = "SDEV_MEDIA_CHANGE=1";
2206 kobject_uevent_env(&sdev
->sdev_gendev
.kobj
, KOBJ_CHANGE
, envp
);
2210 * sdev_evt_thread - send a uevent for each scsi event
2211 * @work: work struct for scsi_device
2213 * Dispatch queued events to their associated scsi_device kobjects
2216 void scsi_evt_thread(struct work_struct
*work
)
2218 struct scsi_device
*sdev
;
2219 LIST_HEAD(event_list
);
2221 sdev
= container_of(work
, struct scsi_device
, event_work
);
2224 struct scsi_event
*evt
;
2225 struct list_head
*this, *tmp
;
2226 unsigned long flags
;
2228 spin_lock_irqsave(&sdev
->list_lock
, flags
);
2229 list_splice_init(&sdev
->event_list
, &event_list
);
2230 spin_unlock_irqrestore(&sdev
->list_lock
, flags
);
2232 if (list_empty(&event_list
))
2235 list_for_each_safe(this, tmp
, &event_list
) {
2236 evt
= list_entry(this, struct scsi_event
, node
);
2237 list_del(&evt
->node
);
2238 scsi_evt_emit(sdev
, evt
);
2245 * sdev_evt_send - send asserted event to uevent thread
2246 * @sdev: scsi_device event occurred on
2247 * @evt: event to send
2249 * Assert scsi device event asynchronously.
2251 void sdev_evt_send(struct scsi_device
*sdev
, struct scsi_event
*evt
)
2253 unsigned long flags
;
2256 /* FIXME: currently this check eliminates all media change events
2257 * for polled devices. Need to update to discriminate between AN
2258 * and polled events */
2259 if (!test_bit(evt
->evt_type
, sdev
->supported_events
)) {
2265 spin_lock_irqsave(&sdev
->list_lock
, flags
);
2266 list_add_tail(&evt
->node
, &sdev
->event_list
);
2267 schedule_work(&sdev
->event_work
);
2268 spin_unlock_irqrestore(&sdev
->list_lock
, flags
);
2270 EXPORT_SYMBOL_GPL(sdev_evt_send
);
2273 * sdev_evt_alloc - allocate a new scsi event
2274 * @evt_type: type of event to allocate
2275 * @gfpflags: GFP flags for allocation
2277 * Allocates and returns a new scsi_event.
2279 struct scsi_event
*sdev_evt_alloc(enum scsi_device_event evt_type
,
2282 struct scsi_event
*evt
= kzalloc(sizeof(struct scsi_event
), gfpflags
);
2286 evt
->evt_type
= evt_type
;
2287 INIT_LIST_HEAD(&evt
->node
);
2289 /* evt_type-specific initialization, if any */
2291 case SDEV_EVT_MEDIA_CHANGE
:
2299 EXPORT_SYMBOL_GPL(sdev_evt_alloc
);
2302 * sdev_evt_send_simple - send asserted event to uevent thread
2303 * @sdev: scsi_device event occurred on
2304 * @evt_type: type of event to send
2305 * @gfpflags: GFP flags for allocation
2307 * Assert scsi device event asynchronously, given an event type.
2309 void sdev_evt_send_simple(struct scsi_device
*sdev
,
2310 enum scsi_device_event evt_type
, gfp_t gfpflags
)
2312 struct scsi_event
*evt
= sdev_evt_alloc(evt_type
, gfpflags
);
2314 sdev_printk(KERN_ERR
, sdev
, "event %d eaten due to OOM\n",
2319 sdev_evt_send(sdev
, evt
);
2321 EXPORT_SYMBOL_GPL(sdev_evt_send_simple
);
2324 * scsi_device_quiesce - Block user issued commands.
2325 * @sdev: scsi device to quiesce.
2327 * This works by trying to transition to the SDEV_QUIESCE state
2328 * (which must be a legal transition). When the device is in this
2329 * state, only special requests will be accepted, all others will
2330 * be deferred. Since special requests may also be requeued requests,
2331 * a successful return doesn't guarantee the device will be
2332 * totally quiescent.
2334 * Must be called with user context, may sleep.
2336 * Returns zero if unsuccessful or an error if not.
2339 scsi_device_quiesce(struct scsi_device
*sdev
)
2341 int err
= scsi_device_set_state(sdev
, SDEV_QUIESCE
);
2345 scsi_run_queue(sdev
->request_queue
);
2346 while (sdev
->device_busy
) {
2347 msleep_interruptible(200);
2348 scsi_run_queue(sdev
->request_queue
);
2352 EXPORT_SYMBOL(scsi_device_quiesce
);
2355 * scsi_device_resume - Restart user issued commands to a quiesced device.
2356 * @sdev: scsi device to resume.
2358 * Moves the device from quiesced back to running and restarts the
2361 * Must be called with user context, may sleep.
2363 void scsi_device_resume(struct scsi_device
*sdev
)
2365 /* check if the device state was mutated prior to resume, and if
2366 * so assume the state is being managed elsewhere (for example
2367 * device deleted during suspend)
2369 if (sdev
->sdev_state
!= SDEV_QUIESCE
||
2370 scsi_device_set_state(sdev
, SDEV_RUNNING
))
2372 scsi_run_queue(sdev
->request_queue
);
2374 EXPORT_SYMBOL(scsi_device_resume
);
2377 device_quiesce_fn(struct scsi_device
*sdev
, void *data
)
2379 scsi_device_quiesce(sdev
);
2383 scsi_target_quiesce(struct scsi_target
*starget
)
2385 starget_for_each_device(starget
, NULL
, device_quiesce_fn
);
2387 EXPORT_SYMBOL(scsi_target_quiesce
);
2390 device_resume_fn(struct scsi_device
*sdev
, void *data
)
2392 scsi_device_resume(sdev
);
2396 scsi_target_resume(struct scsi_target
*starget
)
2398 starget_for_each_device(starget
, NULL
, device_resume_fn
);
2400 EXPORT_SYMBOL(scsi_target_resume
);
2403 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2404 * @sdev: device to block
2406 * Block request made by scsi lld's to temporarily stop all
2407 * scsi commands on the specified device. Called from interrupt
2408 * or normal process context.
2410 * Returns zero if successful or error if not
2413 * This routine transitions the device to the SDEV_BLOCK state
2414 * (which must be a legal transition). When the device is in this
2415 * state, all commands are deferred until the scsi lld reenables
2416 * the device with scsi_device_unblock or device_block_tmo fires.
2419 scsi_internal_device_block(struct scsi_device
*sdev
)
2421 struct request_queue
*q
= sdev
->request_queue
;
2422 unsigned long flags
;
2425 err
= scsi_device_set_state(sdev
, SDEV_BLOCK
);
2427 err
= scsi_device_set_state(sdev
, SDEV_CREATED_BLOCK
);
2434 * The device has transitioned to SDEV_BLOCK. Stop the
2435 * block layer from calling the midlayer with this device's
2438 spin_lock_irqsave(q
->queue_lock
, flags
);
2440 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2444 EXPORT_SYMBOL_GPL(scsi_internal_device_block
);
2447 * scsi_internal_device_unblock - resume a device after a block request
2448 * @sdev: device to resume
2449 * @new_state: state to set devices to after unblocking
2451 * Called by scsi lld's or the midlayer to restart the device queue
2452 * for the previously suspended scsi device. Called from interrupt or
2453 * normal process context.
2455 * Returns zero if successful or error if not.
2458 * This routine transitions the device to the SDEV_RUNNING state
2459 * or to one of the offline states (which must be a legal transition)
2460 * allowing the midlayer to goose the queue for this device.
2463 scsi_internal_device_unblock(struct scsi_device
*sdev
,
2464 enum scsi_device_state new_state
)
2466 struct request_queue
*q
= sdev
->request_queue
;
2467 unsigned long flags
;
2470 * Try to transition the scsi device to SDEV_RUNNING or one of the
2471 * offlined states and goose the device queue if successful.
2473 if (sdev
->sdev_state
== SDEV_BLOCK
)
2474 sdev
->sdev_state
= new_state
;
2475 else if (sdev
->sdev_state
== SDEV_CREATED_BLOCK
) {
2476 if (new_state
== SDEV_TRANSPORT_OFFLINE
||
2477 new_state
== SDEV_OFFLINE
)
2478 sdev
->sdev_state
= new_state
;
2480 sdev
->sdev_state
= SDEV_CREATED
;
2481 } else if (sdev
->sdev_state
!= SDEV_CANCEL
&&
2482 sdev
->sdev_state
!= SDEV_OFFLINE
)
2485 spin_lock_irqsave(q
->queue_lock
, flags
);
2487 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2491 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock
);
2494 device_block(struct scsi_device
*sdev
, void *data
)
2496 scsi_internal_device_block(sdev
);
2500 target_block(struct device
*dev
, void *data
)
2502 if (scsi_is_target_device(dev
))
2503 starget_for_each_device(to_scsi_target(dev
), NULL
,
2509 scsi_target_block(struct device
*dev
)
2511 if (scsi_is_target_device(dev
))
2512 starget_for_each_device(to_scsi_target(dev
), NULL
,
2515 device_for_each_child(dev
, NULL
, target_block
);
2517 EXPORT_SYMBOL_GPL(scsi_target_block
);
2520 device_unblock(struct scsi_device
*sdev
, void *data
)
2522 scsi_internal_device_unblock(sdev
, *(enum scsi_device_state
*)data
);
2526 target_unblock(struct device
*dev
, void *data
)
2528 if (scsi_is_target_device(dev
))
2529 starget_for_each_device(to_scsi_target(dev
), data
,
2535 scsi_target_unblock(struct device
*dev
, enum scsi_device_state new_state
)
2537 if (scsi_is_target_device(dev
))
2538 starget_for_each_device(to_scsi_target(dev
), &new_state
,
2541 device_for_each_child(dev
, &new_state
, target_unblock
);
2543 EXPORT_SYMBOL_GPL(scsi_target_unblock
);
2546 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2547 * @sgl: scatter-gather list
2548 * @sg_count: number of segments in sg
2549 * @offset: offset in bytes into sg, on return offset into the mapped area
2550 * @len: bytes to map, on return number of bytes mapped
2552 * Returns virtual address of the start of the mapped page
2554 void *scsi_kmap_atomic_sg(struct scatterlist
*sgl
, int sg_count
,
2555 size_t *offset
, size_t *len
)
2558 size_t sg_len
= 0, len_complete
= 0;
2559 struct scatterlist
*sg
;
2562 WARN_ON(!irqs_disabled());
2564 for_each_sg(sgl
, sg
, sg_count
, i
) {
2565 len_complete
= sg_len
; /* Complete sg-entries */
2566 sg_len
+= sg
->length
;
2567 if (sg_len
> *offset
)
2571 if (unlikely(i
== sg_count
)) {
2572 printk(KERN_ERR
"%s: Bytes in sg: %zu, requested offset %zu, "
2574 __func__
, sg_len
, *offset
, sg_count
);
2579 /* Offset starting from the beginning of first page in this sg-entry */
2580 *offset
= *offset
- len_complete
+ sg
->offset
;
2582 /* Assumption: contiguous pages can be accessed as "page + i" */
2583 page
= nth_page(sg_page(sg
), (*offset
>> PAGE_SHIFT
));
2584 *offset
&= ~PAGE_MASK
;
2586 /* Bytes in this sg-entry from *offset to the end of the page */
2587 sg_len
= PAGE_SIZE
- *offset
;
2591 return kmap_atomic(page
);
2593 EXPORT_SYMBOL(scsi_kmap_atomic_sg
);
2596 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2597 * @virt: virtual address to be unmapped
2599 void scsi_kunmap_atomic_sg(void *virt
)
2601 kunmap_atomic(virt
);
2603 EXPORT_SYMBOL(scsi_kunmap_atomic_sg
);