Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / scsi / scsi_lib.c
1 /*
2 * Copyright (C) 1999 Eric Youngdale
3 * Copyright (C) 2014 Christoph Hellwig
4 *
5 * SCSI queueing library.
6 * Initial versions: Eric Youngdale (eric@andante.org).
7 * Based upon conversations with large numbers
8 * of people at Linux Expo.
9 */
10
11 #include <linux/bio.h>
12 #include <linux/bitops.h>
13 #include <linux/blkdev.h>
14 #include <linux/completion.h>
15 #include <linux/kernel.h>
16 #include <linux/export.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/pci.h>
21 #include <linux/delay.h>
22 #include <linux/hardirq.h>
23 #include <linux/scatterlist.h>
24 #include <linux/blk-mq.h>
25 #include <linux/ratelimit.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_dbg.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_driver.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_host.h>
34
35 #include <trace/events/scsi.h>
36
37 #include "scsi_priv.h"
38 #include "scsi_logging.h"
39
40
41 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
42 #define SG_MEMPOOL_SIZE 2
43
44 struct scsi_host_sg_pool {
45 size_t size;
46 char *name;
47 struct kmem_cache *slab;
48 mempool_t *pool;
49 };
50
51 #define SP(x) { .size = x, "sgpool-" __stringify(x) }
52 #if (SCSI_MAX_SG_SEGMENTS < 32)
53 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
54 #endif
55 static struct scsi_host_sg_pool scsi_sg_pools[] = {
56 SP(8),
57 SP(16),
58 #if (SCSI_MAX_SG_SEGMENTS > 32)
59 SP(32),
60 #if (SCSI_MAX_SG_SEGMENTS > 64)
61 SP(64),
62 #if (SCSI_MAX_SG_SEGMENTS > 128)
63 SP(128),
64 #if (SCSI_MAX_SG_SEGMENTS > 256)
65 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
66 #endif
67 #endif
68 #endif
69 #endif
70 SP(SCSI_MAX_SG_SEGMENTS)
71 };
72 #undef SP
73
74 struct kmem_cache *scsi_sdb_cache;
75
76 /*
77 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
78 * not change behaviour from the previous unplug mechanism, experimentation
79 * may prove this needs changing.
80 */
81 #define SCSI_QUEUE_DELAY 3
82
83 static void
84 scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
85 {
86 struct Scsi_Host *host = cmd->device->host;
87 struct scsi_device *device = cmd->device;
88 struct scsi_target *starget = scsi_target(device);
89
90 /*
91 * Set the appropriate busy bit for the device/host.
92 *
93 * If the host/device isn't busy, assume that something actually
94 * completed, and that we should be able to queue a command now.
95 *
96 * Note that the prior mid-layer assumption that any host could
97 * always queue at least one command is now broken. The mid-layer
98 * will implement a user specifiable stall (see
99 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
100 * if a command is requeued with no other commands outstanding
101 * either for the device or for the host.
102 */
103 switch (reason) {
104 case SCSI_MLQUEUE_HOST_BUSY:
105 atomic_set(&host->host_blocked, host->max_host_blocked);
106 break;
107 case SCSI_MLQUEUE_DEVICE_BUSY:
108 case SCSI_MLQUEUE_EH_RETRY:
109 atomic_set(&device->device_blocked,
110 device->max_device_blocked);
111 break;
112 case SCSI_MLQUEUE_TARGET_BUSY:
113 atomic_set(&starget->target_blocked,
114 starget->max_target_blocked);
115 break;
116 }
117 }
118
119 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
120 {
121 struct scsi_device *sdev = cmd->device;
122 struct request_queue *q = cmd->request->q;
123
124 blk_mq_requeue_request(cmd->request);
125 blk_mq_kick_requeue_list(q);
126 put_device(&sdev->sdev_gendev);
127 }
128
129 /**
130 * __scsi_queue_insert - private queue insertion
131 * @cmd: The SCSI command being requeued
132 * @reason: The reason for the requeue
133 * @unbusy: Whether the queue should be unbusied
134 *
135 * This is a private queue insertion. The public interface
136 * scsi_queue_insert() always assumes the queue should be unbusied
137 * because it's always called before the completion. This function is
138 * for a requeue after completion, which should only occur in this
139 * file.
140 */
141 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
142 {
143 struct scsi_device *device = cmd->device;
144 struct request_queue *q = device->request_queue;
145 unsigned long flags;
146
147 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
148 "Inserting command %p into mlqueue\n", cmd));
149
150 scsi_set_blocked(cmd, reason);
151
152 /*
153 * Decrement the counters, since these commands are no longer
154 * active on the host/device.
155 */
156 if (unbusy)
157 scsi_device_unbusy(device);
158
159 /*
160 * Requeue this command. It will go before all other commands
161 * that are already in the queue. Schedule requeue work under
162 * lock such that the kblockd_schedule_work() call happens
163 * before blk_cleanup_queue() finishes.
164 */
165 cmd->result = 0;
166 if (q->mq_ops) {
167 scsi_mq_requeue_cmd(cmd);
168 return;
169 }
170 spin_lock_irqsave(q->queue_lock, flags);
171 blk_requeue_request(q, cmd->request);
172 kblockd_schedule_work(&device->requeue_work);
173 spin_unlock_irqrestore(q->queue_lock, flags);
174 }
175
176 /*
177 * Function: scsi_queue_insert()
178 *
179 * Purpose: Insert a command in the midlevel queue.
180 *
181 * Arguments: cmd - command that we are adding to queue.
182 * reason - why we are inserting command to queue.
183 *
184 * Lock status: Assumed that lock is not held upon entry.
185 *
186 * Returns: Nothing.
187 *
188 * Notes: We do this for one of two cases. Either the host is busy
189 * and it cannot accept any more commands for the time being,
190 * or the device returned QUEUE_FULL and can accept no more
191 * commands.
192 * Notes: This could be called either from an interrupt context or a
193 * normal process context.
194 */
195 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
196 {
197 __scsi_queue_insert(cmd, reason, 1);
198 }
199 /**
200 * scsi_execute - insert request and wait for the result
201 * @sdev: scsi device
202 * @cmd: scsi command
203 * @data_direction: data direction
204 * @buffer: data buffer
205 * @bufflen: len of buffer
206 * @sense: optional sense buffer
207 * @timeout: request timeout in seconds
208 * @retries: number of times to retry request
209 * @flags: or into request flags;
210 * @resid: optional residual length
211 *
212 * returns the req->errors value which is the scsi_cmnd result
213 * field.
214 */
215 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
216 int data_direction, void *buffer, unsigned bufflen,
217 unsigned char *sense, int timeout, int retries, u64 flags,
218 int *resid)
219 {
220 struct request *req;
221 int write = (data_direction == DMA_TO_DEVICE);
222 int ret = DRIVER_ERROR << 24;
223
224 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
225 if (IS_ERR(req))
226 return ret;
227 blk_rq_set_block_pc(req);
228
229 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
230 buffer, bufflen, __GFP_WAIT))
231 goto out;
232
233 req->cmd_len = COMMAND_SIZE(cmd[0]);
234 memcpy(req->cmd, cmd, req->cmd_len);
235 req->sense = sense;
236 req->sense_len = 0;
237 req->retries = retries;
238 req->timeout = timeout;
239 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
240
241 /*
242 * head injection *required* here otherwise quiesce won't work
243 */
244 blk_execute_rq(req->q, NULL, req, 1);
245
246 /*
247 * Some devices (USB mass-storage in particular) may transfer
248 * garbage data together with a residue indicating that the data
249 * is invalid. Prevent the garbage from being misinterpreted
250 * and prevent security leaks by zeroing out the excess data.
251 */
252 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
253 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
254
255 if (resid)
256 *resid = req->resid_len;
257 ret = req->errors;
258 out:
259 blk_put_request(req);
260
261 return ret;
262 }
263 EXPORT_SYMBOL(scsi_execute);
264
265 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
266 int data_direction, void *buffer, unsigned bufflen,
267 struct scsi_sense_hdr *sshdr, int timeout, int retries,
268 int *resid, u64 flags)
269 {
270 char *sense = NULL;
271 int result;
272
273 if (sshdr) {
274 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
275 if (!sense)
276 return DRIVER_ERROR << 24;
277 }
278 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
279 sense, timeout, retries, flags, resid);
280 if (sshdr)
281 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
282
283 kfree(sense);
284 return result;
285 }
286 EXPORT_SYMBOL(scsi_execute_req_flags);
287
288 /*
289 * Function: scsi_init_cmd_errh()
290 *
291 * Purpose: Initialize cmd fields related to error handling.
292 *
293 * Arguments: cmd - command that is ready to be queued.
294 *
295 * Notes: This function has the job of initializing a number of
296 * fields related to error handling. Typically this will
297 * be called once for each command, as required.
298 */
299 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
300 {
301 cmd->serial_number = 0;
302 scsi_set_resid(cmd, 0);
303 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
304 if (cmd->cmd_len == 0)
305 cmd->cmd_len = scsi_command_size(cmd->cmnd);
306 }
307
308 void scsi_device_unbusy(struct scsi_device *sdev)
309 {
310 struct Scsi_Host *shost = sdev->host;
311 struct scsi_target *starget = scsi_target(sdev);
312 unsigned long flags;
313
314 atomic_dec(&shost->host_busy);
315 if (starget->can_queue > 0)
316 atomic_dec(&starget->target_busy);
317
318 if (unlikely(scsi_host_in_recovery(shost) &&
319 (shost->host_failed || shost->host_eh_scheduled))) {
320 spin_lock_irqsave(shost->host_lock, flags);
321 scsi_eh_wakeup(shost);
322 spin_unlock_irqrestore(shost->host_lock, flags);
323 }
324
325 atomic_dec(&sdev->device_busy);
326 }
327
328 static void scsi_kick_queue(struct request_queue *q)
329 {
330 if (q->mq_ops)
331 blk_mq_start_hw_queues(q);
332 else
333 blk_run_queue(q);
334 }
335
336 /*
337 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
338 * and call blk_run_queue for all the scsi_devices on the target -
339 * including current_sdev first.
340 *
341 * Called with *no* scsi locks held.
342 */
343 static void scsi_single_lun_run(struct scsi_device *current_sdev)
344 {
345 struct Scsi_Host *shost = current_sdev->host;
346 struct scsi_device *sdev, *tmp;
347 struct scsi_target *starget = scsi_target(current_sdev);
348 unsigned long flags;
349
350 spin_lock_irqsave(shost->host_lock, flags);
351 starget->starget_sdev_user = NULL;
352 spin_unlock_irqrestore(shost->host_lock, flags);
353
354 /*
355 * Call blk_run_queue for all LUNs on the target, starting with
356 * current_sdev. We race with others (to set starget_sdev_user),
357 * but in most cases, we will be first. Ideally, each LU on the
358 * target would get some limited time or requests on the target.
359 */
360 scsi_kick_queue(current_sdev->request_queue);
361
362 spin_lock_irqsave(shost->host_lock, flags);
363 if (starget->starget_sdev_user)
364 goto out;
365 list_for_each_entry_safe(sdev, tmp, &starget->devices,
366 same_target_siblings) {
367 if (sdev == current_sdev)
368 continue;
369 if (scsi_device_get(sdev))
370 continue;
371
372 spin_unlock_irqrestore(shost->host_lock, flags);
373 scsi_kick_queue(sdev->request_queue);
374 spin_lock_irqsave(shost->host_lock, flags);
375
376 scsi_device_put(sdev);
377 }
378 out:
379 spin_unlock_irqrestore(shost->host_lock, flags);
380 }
381
382 static inline bool scsi_device_is_busy(struct scsi_device *sdev)
383 {
384 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
385 return true;
386 if (atomic_read(&sdev->device_blocked) > 0)
387 return true;
388 return false;
389 }
390
391 static inline bool scsi_target_is_busy(struct scsi_target *starget)
392 {
393 if (starget->can_queue > 0) {
394 if (atomic_read(&starget->target_busy) >= starget->can_queue)
395 return true;
396 if (atomic_read(&starget->target_blocked) > 0)
397 return true;
398 }
399 return false;
400 }
401
402 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
403 {
404 if (shost->can_queue > 0 &&
405 atomic_read(&shost->host_busy) >= shost->can_queue)
406 return true;
407 if (atomic_read(&shost->host_blocked) > 0)
408 return true;
409 if (shost->host_self_blocked)
410 return true;
411 return false;
412 }
413
414 static void scsi_starved_list_run(struct Scsi_Host *shost)
415 {
416 LIST_HEAD(starved_list);
417 struct scsi_device *sdev;
418 unsigned long flags;
419
420 spin_lock_irqsave(shost->host_lock, flags);
421 list_splice_init(&shost->starved_list, &starved_list);
422
423 while (!list_empty(&starved_list)) {
424 struct request_queue *slq;
425
426 /*
427 * As long as shost is accepting commands and we have
428 * starved queues, call blk_run_queue. scsi_request_fn
429 * drops the queue_lock and can add us back to the
430 * starved_list.
431 *
432 * host_lock protects the starved_list and starved_entry.
433 * scsi_request_fn must get the host_lock before checking
434 * or modifying starved_list or starved_entry.
435 */
436 if (scsi_host_is_busy(shost))
437 break;
438
439 sdev = list_entry(starved_list.next,
440 struct scsi_device, starved_entry);
441 list_del_init(&sdev->starved_entry);
442 if (scsi_target_is_busy(scsi_target(sdev))) {
443 list_move_tail(&sdev->starved_entry,
444 &shost->starved_list);
445 continue;
446 }
447
448 /*
449 * Once we drop the host lock, a racing scsi_remove_device()
450 * call may remove the sdev from the starved list and destroy
451 * it and the queue. Mitigate by taking a reference to the
452 * queue and never touching the sdev again after we drop the
453 * host lock. Note: if __scsi_remove_device() invokes
454 * blk_cleanup_queue() before the queue is run from this
455 * function then blk_run_queue() will return immediately since
456 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
457 */
458 slq = sdev->request_queue;
459 if (!blk_get_queue(slq))
460 continue;
461 spin_unlock_irqrestore(shost->host_lock, flags);
462
463 scsi_kick_queue(slq);
464 blk_put_queue(slq);
465
466 spin_lock_irqsave(shost->host_lock, flags);
467 }
468 /* put any unprocessed entries back */
469 list_splice(&starved_list, &shost->starved_list);
470 spin_unlock_irqrestore(shost->host_lock, flags);
471 }
472
473 /*
474 * Function: scsi_run_queue()
475 *
476 * Purpose: Select a proper request queue to serve next
477 *
478 * Arguments: q - last request's queue
479 *
480 * Returns: Nothing
481 *
482 * Notes: The previous command was completely finished, start
483 * a new one if possible.
484 */
485 static void scsi_run_queue(struct request_queue *q)
486 {
487 struct scsi_device *sdev = q->queuedata;
488
489 if (scsi_target(sdev)->single_lun)
490 scsi_single_lun_run(sdev);
491 if (!list_empty(&sdev->host->starved_list))
492 scsi_starved_list_run(sdev->host);
493
494 if (q->mq_ops)
495 blk_mq_start_stopped_hw_queues(q, false);
496 else
497 blk_run_queue(q);
498 }
499
500 void scsi_requeue_run_queue(struct work_struct *work)
501 {
502 struct scsi_device *sdev;
503 struct request_queue *q;
504
505 sdev = container_of(work, struct scsi_device, requeue_work);
506 q = sdev->request_queue;
507 scsi_run_queue(q);
508 }
509
510 /*
511 * Function: scsi_requeue_command()
512 *
513 * Purpose: Handle post-processing of completed commands.
514 *
515 * Arguments: q - queue to operate on
516 * cmd - command that may need to be requeued.
517 *
518 * Returns: Nothing
519 *
520 * Notes: After command completion, there may be blocks left
521 * over which weren't finished by the previous command
522 * this can be for a number of reasons - the main one is
523 * I/O errors in the middle of the request, in which case
524 * we need to request the blocks that come after the bad
525 * sector.
526 * Notes: Upon return, cmd is a stale pointer.
527 */
528 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
529 {
530 struct scsi_device *sdev = cmd->device;
531 struct request *req = cmd->request;
532 unsigned long flags;
533
534 spin_lock_irqsave(q->queue_lock, flags);
535 blk_unprep_request(req);
536 req->special = NULL;
537 scsi_put_command(cmd);
538 blk_requeue_request(q, req);
539 spin_unlock_irqrestore(q->queue_lock, flags);
540
541 scsi_run_queue(q);
542
543 put_device(&sdev->sdev_gendev);
544 }
545
546 void scsi_run_host_queues(struct Scsi_Host *shost)
547 {
548 struct scsi_device *sdev;
549
550 shost_for_each_device(sdev, shost)
551 scsi_run_queue(sdev->request_queue);
552 }
553
554 static inline unsigned int scsi_sgtable_index(unsigned short nents)
555 {
556 unsigned int index;
557
558 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
559
560 if (nents <= 8)
561 index = 0;
562 else
563 index = get_count_order(nents) - 3;
564
565 return index;
566 }
567
568 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
569 {
570 struct scsi_host_sg_pool *sgp;
571
572 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
573 mempool_free(sgl, sgp->pool);
574 }
575
576 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
577 {
578 struct scsi_host_sg_pool *sgp;
579
580 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
581 return mempool_alloc(sgp->pool, gfp_mask);
582 }
583
584 static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
585 {
586 if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
587 return;
588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
589 }
590
591 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
592 {
593 struct scatterlist *first_chunk = NULL;
594 int ret;
595
596 BUG_ON(!nents);
597
598 if (mq) {
599 if (nents <= SCSI_MAX_SG_SEGMENTS) {
600 sdb->table.nents = nents;
601 sg_init_table(sdb->table.sgl, sdb->table.nents);
602 return 0;
603 }
604 first_chunk = sdb->table.sgl;
605 }
606
607 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
608 first_chunk, GFP_ATOMIC, scsi_sg_alloc);
609 if (unlikely(ret))
610 scsi_free_sgtable(sdb, mq);
611 return ret;
612 }
613
614 static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
615 {
616 if (cmd->request->cmd_type == REQ_TYPE_FS) {
617 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
618
619 if (drv->uninit_command)
620 drv->uninit_command(cmd);
621 }
622 }
623
624 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
625 {
626 if (cmd->sdb.table.nents)
627 scsi_free_sgtable(&cmd->sdb, true);
628 if (cmd->request->next_rq && cmd->request->next_rq->special)
629 scsi_free_sgtable(cmd->request->next_rq->special, true);
630 if (scsi_prot_sg_count(cmd))
631 scsi_free_sgtable(cmd->prot_sdb, true);
632 }
633
634 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
635 {
636 struct scsi_device *sdev = cmd->device;
637 struct Scsi_Host *shost = sdev->host;
638 unsigned long flags;
639
640 scsi_mq_free_sgtables(cmd);
641 scsi_uninit_cmd(cmd);
642
643 if (shost->use_cmd_list) {
644 BUG_ON(list_empty(&cmd->list));
645 spin_lock_irqsave(&sdev->list_lock, flags);
646 list_del_init(&cmd->list);
647 spin_unlock_irqrestore(&sdev->list_lock, flags);
648 }
649 }
650
651 /*
652 * Function: scsi_release_buffers()
653 *
654 * Purpose: Free resources allocate for a scsi_command.
655 *
656 * Arguments: cmd - command that we are bailing.
657 *
658 * Lock status: Assumed that no lock is held upon entry.
659 *
660 * Returns: Nothing
661 *
662 * Notes: In the event that an upper level driver rejects a
663 * command, we must release resources allocated during
664 * the __init_io() function. Primarily this would involve
665 * the scatter-gather table.
666 */
667 static void scsi_release_buffers(struct scsi_cmnd *cmd)
668 {
669 if (cmd->sdb.table.nents)
670 scsi_free_sgtable(&cmd->sdb, false);
671
672 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
673
674 if (scsi_prot_sg_count(cmd))
675 scsi_free_sgtable(cmd->prot_sdb, false);
676 }
677
678 static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
679 {
680 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
681
682 scsi_free_sgtable(bidi_sdb, false);
683 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
684 cmd->request->next_rq->special = NULL;
685 }
686
687 static bool scsi_end_request(struct request *req, int error,
688 unsigned int bytes, unsigned int bidi_bytes)
689 {
690 struct scsi_cmnd *cmd = req->special;
691 struct scsi_device *sdev = cmd->device;
692 struct request_queue *q = sdev->request_queue;
693
694 if (blk_update_request(req, error, bytes))
695 return true;
696
697 /* Bidi request must be completed as a whole */
698 if (unlikely(bidi_bytes) &&
699 blk_update_request(req->next_rq, error, bidi_bytes))
700 return true;
701
702 if (blk_queue_add_random(q))
703 add_disk_randomness(req->rq_disk);
704
705 if (req->mq_ctx) {
706 /*
707 * In the MQ case the command gets freed by __blk_mq_end_request,
708 * so we have to do all cleanup that depends on it earlier.
709 *
710 * We also can't kick the queues from irq context, so we
711 * will have to defer it to a workqueue.
712 */
713 scsi_mq_uninit_cmd(cmd);
714
715 __blk_mq_end_request(req, error);
716
717 if (scsi_target(sdev)->single_lun ||
718 !list_empty(&sdev->host->starved_list))
719 kblockd_schedule_work(&sdev->requeue_work);
720 else
721 blk_mq_start_stopped_hw_queues(q, true);
722 } else {
723 unsigned long flags;
724
725 if (bidi_bytes)
726 scsi_release_bidi_buffers(cmd);
727
728 spin_lock_irqsave(q->queue_lock, flags);
729 blk_finish_request(req, error);
730 spin_unlock_irqrestore(q->queue_lock, flags);
731
732 scsi_release_buffers(cmd);
733
734 scsi_put_command(cmd);
735 scsi_run_queue(q);
736 }
737
738 put_device(&sdev->sdev_gendev);
739 return false;
740 }
741
742 /**
743 * __scsi_error_from_host_byte - translate SCSI error code into errno
744 * @cmd: SCSI command (unused)
745 * @result: scsi error code
746 *
747 * Translate SCSI error code into standard UNIX errno.
748 * Return values:
749 * -ENOLINK temporary transport failure
750 * -EREMOTEIO permanent target failure, do not retry
751 * -EBADE permanent nexus failure, retry on other path
752 * -ENOSPC No write space available
753 * -ENODATA Medium error
754 * -EIO unspecified I/O error
755 */
756 static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
757 {
758 int error = 0;
759
760 switch(host_byte(result)) {
761 case DID_TRANSPORT_FAILFAST:
762 error = -ENOLINK;
763 break;
764 case DID_TARGET_FAILURE:
765 set_host_byte(cmd, DID_OK);
766 error = -EREMOTEIO;
767 break;
768 case DID_NEXUS_FAILURE:
769 set_host_byte(cmd, DID_OK);
770 error = -EBADE;
771 break;
772 case DID_ALLOC_FAILURE:
773 set_host_byte(cmd, DID_OK);
774 error = -ENOSPC;
775 break;
776 case DID_MEDIUM_ERROR:
777 set_host_byte(cmd, DID_OK);
778 error = -ENODATA;
779 break;
780 default:
781 error = -EIO;
782 break;
783 }
784
785 return error;
786 }
787
788 /*
789 * Function: scsi_io_completion()
790 *
791 * Purpose: Completion processing for block device I/O requests.
792 *
793 * Arguments: cmd - command that is finished.
794 *
795 * Lock status: Assumed that no lock is held upon entry.
796 *
797 * Returns: Nothing
798 *
799 * Notes: We will finish off the specified number of sectors. If we
800 * are done, the command block will be released and the queue
801 * function will be goosed. If we are not done then we have to
802 * figure out what to do next:
803 *
804 * a) We can call scsi_requeue_command(). The request
805 * will be unprepared and put back on the queue. Then
806 * a new command will be created for it. This should
807 * be used if we made forward progress, or if we want
808 * to switch from READ(10) to READ(6) for example.
809 *
810 * b) We can call __scsi_queue_insert(). The request will
811 * be put back on the queue and retried using the same
812 * command as before, possibly after a delay.
813 *
814 * c) We can call scsi_end_request() with -EIO to fail
815 * the remainder of the request.
816 */
817 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
818 {
819 int result = cmd->result;
820 struct request_queue *q = cmd->device->request_queue;
821 struct request *req = cmd->request;
822 int error = 0;
823 struct scsi_sense_hdr sshdr;
824 bool sense_valid = false;
825 int sense_deferred = 0, level = 0;
826 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
827 ACTION_DELAYED_RETRY} action;
828 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
829
830 if (result) {
831 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
832 if (sense_valid)
833 sense_deferred = scsi_sense_is_deferred(&sshdr);
834 }
835
836 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
837 if (result) {
838 if (sense_valid && req->sense) {
839 /*
840 * SG_IO wants current and deferred errors
841 */
842 int len = 8 + cmd->sense_buffer[7];
843
844 if (len > SCSI_SENSE_BUFFERSIZE)
845 len = SCSI_SENSE_BUFFERSIZE;
846 memcpy(req->sense, cmd->sense_buffer, len);
847 req->sense_len = len;
848 }
849 if (!sense_deferred)
850 error = __scsi_error_from_host_byte(cmd, result);
851 }
852 /*
853 * __scsi_error_from_host_byte may have reset the host_byte
854 */
855 req->errors = cmd->result;
856
857 req->resid_len = scsi_get_resid(cmd);
858
859 if (scsi_bidi_cmnd(cmd)) {
860 /*
861 * Bidi commands Must be complete as a whole,
862 * both sides at once.
863 */
864 req->next_rq->resid_len = scsi_in(cmd)->resid;
865 if (scsi_end_request(req, 0, blk_rq_bytes(req),
866 blk_rq_bytes(req->next_rq)))
867 BUG();
868 return;
869 }
870 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
871 /*
872 * Certain non BLOCK_PC requests are commands that don't
873 * actually transfer anything (FLUSH), so cannot use
874 * good_bytes != blk_rq_bytes(req) as the signal for an error.
875 * This sets the error explicitly for the problem case.
876 */
877 error = __scsi_error_from_host_byte(cmd, result);
878 }
879
880 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
881 BUG_ON(blk_bidi_rq(req));
882
883 /*
884 * Next deal with any sectors which we were able to correctly
885 * handle.
886 */
887 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
888 "%u sectors total, %d bytes done.\n",
889 blk_rq_sectors(req), good_bytes));
890
891 /*
892 * Recovered errors need reporting, but they're always treated
893 * as success, so fiddle the result code here. For BLOCK_PC
894 * we already took a copy of the original into rq->errors which
895 * is what gets returned to the user
896 */
897 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
898 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
899 * print since caller wants ATA registers. Only occurs on
900 * SCSI ATA PASS_THROUGH commands when CK_COND=1
901 */
902 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
903 ;
904 else if (!(req->cmd_flags & REQ_QUIET))
905 scsi_print_sense(cmd);
906 result = 0;
907 /* BLOCK_PC may have set error */
908 error = 0;
909 }
910
911 /*
912 * If we finished all bytes in the request we are done now.
913 */
914 if (!scsi_end_request(req, error, good_bytes, 0))
915 return;
916
917 /*
918 * Kill remainder if no retrys.
919 */
920 if (error && scsi_noretry_cmd(cmd)) {
921 if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
922 BUG();
923 return;
924 }
925
926 /*
927 * If there had been no error, but we have leftover bytes in the
928 * requeues just queue the command up again.
929 */
930 if (result == 0)
931 goto requeue;
932
933 error = __scsi_error_from_host_byte(cmd, result);
934
935 if (host_byte(result) == DID_RESET) {
936 /* Third party bus reset or reset for error recovery
937 * reasons. Just retry the command and see what
938 * happens.
939 */
940 action = ACTION_RETRY;
941 } else if (sense_valid && !sense_deferred) {
942 switch (sshdr.sense_key) {
943 case UNIT_ATTENTION:
944 if (cmd->device->removable) {
945 /* Detected disc change. Set a bit
946 * and quietly refuse further access.
947 */
948 cmd->device->changed = 1;
949 action = ACTION_FAIL;
950 } else {
951 /* Must have been a power glitch, or a
952 * bus reset. Could not have been a
953 * media change, so we just retry the
954 * command and see what happens.
955 */
956 action = ACTION_RETRY;
957 }
958 break;
959 case ILLEGAL_REQUEST:
960 /* If we had an ILLEGAL REQUEST returned, then
961 * we may have performed an unsupported
962 * command. The only thing this should be
963 * would be a ten byte read where only a six
964 * byte read was supported. Also, on a system
965 * where READ CAPACITY failed, we may have
966 * read past the end of the disk.
967 */
968 if ((cmd->device->use_10_for_rw &&
969 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
970 (cmd->cmnd[0] == READ_10 ||
971 cmd->cmnd[0] == WRITE_10)) {
972 /* This will issue a new 6-byte command. */
973 cmd->device->use_10_for_rw = 0;
974 action = ACTION_REPREP;
975 } else if (sshdr.asc == 0x10) /* DIX */ {
976 action = ACTION_FAIL;
977 error = -EILSEQ;
978 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
979 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
980 action = ACTION_FAIL;
981 error = -EREMOTEIO;
982 } else
983 action = ACTION_FAIL;
984 break;
985 case ABORTED_COMMAND:
986 action = ACTION_FAIL;
987 if (sshdr.asc == 0x10) /* DIF */
988 error = -EILSEQ;
989 break;
990 case NOT_READY:
991 /* If the device is in the process of becoming
992 * ready, or has a temporary blockage, retry.
993 */
994 if (sshdr.asc == 0x04) {
995 switch (sshdr.ascq) {
996 case 0x01: /* becoming ready */
997 case 0x04: /* format in progress */
998 case 0x05: /* rebuild in progress */
999 case 0x06: /* recalculation in progress */
1000 case 0x07: /* operation in progress */
1001 case 0x08: /* Long write in progress */
1002 case 0x09: /* self test in progress */
1003 case 0x14: /* space allocation in progress */
1004 action = ACTION_DELAYED_RETRY;
1005 break;
1006 default:
1007 action = ACTION_FAIL;
1008 break;
1009 }
1010 } else
1011 action = ACTION_FAIL;
1012 break;
1013 case VOLUME_OVERFLOW:
1014 /* See SSC3rXX or current. */
1015 action = ACTION_FAIL;
1016 break;
1017 default:
1018 action = ACTION_FAIL;
1019 break;
1020 }
1021 } else
1022 action = ACTION_FAIL;
1023
1024 if (action != ACTION_FAIL &&
1025 time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
1026 action = ACTION_FAIL;
1027
1028 switch (action) {
1029 case ACTION_FAIL:
1030 /* Give up and fail the remainder of the request */
1031 if (!(req->cmd_flags & REQ_QUIET)) {
1032 static DEFINE_RATELIMIT_STATE(_rs,
1033 DEFAULT_RATELIMIT_INTERVAL,
1034 DEFAULT_RATELIMIT_BURST);
1035
1036 if (unlikely(scsi_logging_level))
1037 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
1038 SCSI_LOG_MLCOMPLETE_BITS);
1039
1040 /*
1041 * if logging is enabled the failure will be printed
1042 * in scsi_log_completion(), so avoid duplicate messages
1043 */
1044 if (!level && __ratelimit(&_rs)) {
1045 scsi_print_result(cmd, NULL, FAILED);
1046 if (driver_byte(result) & DRIVER_SENSE)
1047 scsi_print_sense(cmd);
1048 scsi_print_command(cmd);
1049 }
1050 }
1051 if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
1052 return;
1053 /*FALLTHRU*/
1054 case ACTION_REPREP:
1055 requeue:
1056 /* Unprep the request and put it back at the head of the queue.
1057 * A new command will be prepared and issued.
1058 */
1059 if (q->mq_ops) {
1060 cmd->request->cmd_flags &= ~REQ_DONTPREP;
1061 scsi_mq_uninit_cmd(cmd);
1062 scsi_mq_requeue_cmd(cmd);
1063 } else {
1064 scsi_release_buffers(cmd);
1065 scsi_requeue_command(q, cmd);
1066 }
1067 break;
1068 case ACTION_RETRY:
1069 /* Retry the same command immediately */
1070 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1071 break;
1072 case ACTION_DELAYED_RETRY:
1073 /* Retry the same command after a delay */
1074 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1075 break;
1076 }
1077 }
1078
1079 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
1080 {
1081 int count;
1082
1083 /*
1084 * If sg table allocation fails, requeue request later.
1085 */
1086 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1087 req->mq_ctx != NULL)))
1088 return BLKPREP_DEFER;
1089
1090 /*
1091 * Next, walk the list, and fill in the addresses and sizes of
1092 * each segment.
1093 */
1094 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1095 BUG_ON(count > sdb->table.nents);
1096 sdb->table.nents = count;
1097 sdb->length = blk_rq_bytes(req);
1098 return BLKPREP_OK;
1099 }
1100
1101 /*
1102 * Function: scsi_init_io()
1103 *
1104 * Purpose: SCSI I/O initialize function.
1105 *
1106 * Arguments: cmd - Command descriptor we wish to initialize
1107 *
1108 * Returns: 0 on success
1109 * BLKPREP_DEFER if the failure is retryable
1110 * BLKPREP_KILL if the failure is fatal
1111 */
1112 int scsi_init_io(struct scsi_cmnd *cmd)
1113 {
1114 struct scsi_device *sdev = cmd->device;
1115 struct request *rq = cmd->request;
1116 bool is_mq = (rq->mq_ctx != NULL);
1117 int error;
1118
1119 BUG_ON(!rq->nr_phys_segments);
1120
1121 error = scsi_init_sgtable(rq, &cmd->sdb);
1122 if (error)
1123 goto err_exit;
1124
1125 if (blk_bidi_rq(rq)) {
1126 if (!rq->q->mq_ops) {
1127 struct scsi_data_buffer *bidi_sdb =
1128 kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
1129 if (!bidi_sdb) {
1130 error = BLKPREP_DEFER;
1131 goto err_exit;
1132 }
1133
1134 rq->next_rq->special = bidi_sdb;
1135 }
1136
1137 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
1138 if (error)
1139 goto err_exit;
1140 }
1141
1142 if (blk_integrity_rq(rq)) {
1143 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1144 int ivecs, count;
1145
1146 if (prot_sdb == NULL) {
1147 /*
1148 * This can happen if someone (e.g. multipath)
1149 * queues a command to a device on an adapter
1150 * that does not support DIX.
1151 */
1152 WARN_ON_ONCE(1);
1153 error = BLKPREP_KILL;
1154 goto err_exit;
1155 }
1156
1157 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1158
1159 if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
1160 error = BLKPREP_DEFER;
1161 goto err_exit;
1162 }
1163
1164 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1165 prot_sdb->table.sgl);
1166 BUG_ON(unlikely(count > ivecs));
1167 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1168
1169 cmd->prot_sdb = prot_sdb;
1170 cmd->prot_sdb->table.nents = count;
1171 }
1172
1173 return BLKPREP_OK;
1174 err_exit:
1175 if (is_mq) {
1176 scsi_mq_free_sgtables(cmd);
1177 } else {
1178 scsi_release_buffers(cmd);
1179 cmd->request->special = NULL;
1180 scsi_put_command(cmd);
1181 put_device(&sdev->sdev_gendev);
1182 }
1183 return error;
1184 }
1185 EXPORT_SYMBOL(scsi_init_io);
1186
1187 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1188 struct request *req)
1189 {
1190 struct scsi_cmnd *cmd;
1191
1192 if (!req->special) {
1193 /* Bail if we can't get a reference to the device */
1194 if (!get_device(&sdev->sdev_gendev))
1195 return NULL;
1196
1197 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1198 if (unlikely(!cmd)) {
1199 put_device(&sdev->sdev_gendev);
1200 return NULL;
1201 }
1202 req->special = cmd;
1203 } else {
1204 cmd = req->special;
1205 }
1206
1207 /* pull a tag out of the request if we have one */
1208 cmd->tag = req->tag;
1209 cmd->request = req;
1210
1211 cmd->cmnd = req->cmd;
1212 cmd->prot_op = SCSI_PROT_NORMAL;
1213
1214 return cmd;
1215 }
1216
1217 static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1218 {
1219 struct scsi_cmnd *cmd = req->special;
1220
1221 /*
1222 * BLOCK_PC requests may transfer data, in which case they must
1223 * a bio attached to them. Or they might contain a SCSI command
1224 * that does not transfer data, in which case they may optionally
1225 * submit a request without an attached bio.
1226 */
1227 if (req->bio) {
1228 int ret = scsi_init_io(cmd);
1229 if (unlikely(ret))
1230 return ret;
1231 } else {
1232 BUG_ON(blk_rq_bytes(req));
1233
1234 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1235 }
1236
1237 cmd->cmd_len = req->cmd_len;
1238 cmd->transfersize = blk_rq_bytes(req);
1239 cmd->allowed = req->retries;
1240 return BLKPREP_OK;
1241 }
1242
1243 /*
1244 * Setup a REQ_TYPE_FS command. These are simple request from filesystems
1245 * that still need to be translated to SCSI CDBs from the ULD.
1246 */
1247 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1248 {
1249 struct scsi_cmnd *cmd = req->special;
1250
1251 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1252 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1253 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1254 if (ret != BLKPREP_OK)
1255 return ret;
1256 }
1257
1258 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1259 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1260 }
1261
1262 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
1263 {
1264 struct scsi_cmnd *cmd = req->special;
1265
1266 if (!blk_rq_bytes(req))
1267 cmd->sc_data_direction = DMA_NONE;
1268 else if (rq_data_dir(req) == WRITE)
1269 cmd->sc_data_direction = DMA_TO_DEVICE;
1270 else
1271 cmd->sc_data_direction = DMA_FROM_DEVICE;
1272
1273 switch (req->cmd_type) {
1274 case REQ_TYPE_FS:
1275 return scsi_setup_fs_cmnd(sdev, req);
1276 case REQ_TYPE_BLOCK_PC:
1277 return scsi_setup_blk_pc_cmnd(sdev, req);
1278 default:
1279 return BLKPREP_KILL;
1280 }
1281 }
1282
1283 static int
1284 scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1285 {
1286 int ret = BLKPREP_OK;
1287
1288 /*
1289 * If the device is not in running state we will reject some
1290 * or all commands.
1291 */
1292 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1293 switch (sdev->sdev_state) {
1294 case SDEV_OFFLINE:
1295 case SDEV_TRANSPORT_OFFLINE:
1296 /*
1297 * If the device is offline we refuse to process any
1298 * commands. The device must be brought online
1299 * before trying any recovery commands.
1300 */
1301 sdev_printk(KERN_ERR, sdev,
1302 "rejecting I/O to offline device\n");
1303 ret = BLKPREP_KILL;
1304 break;
1305 case SDEV_DEL:
1306 /*
1307 * If the device is fully deleted, we refuse to
1308 * process any commands as well.
1309 */
1310 sdev_printk(KERN_ERR, sdev,
1311 "rejecting I/O to dead device\n");
1312 ret = BLKPREP_KILL;
1313 break;
1314 case SDEV_QUIESCE:
1315 case SDEV_BLOCK:
1316 case SDEV_CREATED_BLOCK:
1317 /*
1318 * If the devices is blocked we defer normal commands.
1319 */
1320 if (!(req->cmd_flags & REQ_PREEMPT))
1321 ret = BLKPREP_DEFER;
1322 break;
1323 default:
1324 /*
1325 * For any other not fully online state we only allow
1326 * special commands. In particular any user initiated
1327 * command is not allowed.
1328 */
1329 if (!(req->cmd_flags & REQ_PREEMPT))
1330 ret = BLKPREP_KILL;
1331 break;
1332 }
1333 }
1334 return ret;
1335 }
1336
1337 static int
1338 scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1339 {
1340 struct scsi_device *sdev = q->queuedata;
1341
1342 switch (ret) {
1343 case BLKPREP_KILL:
1344 req->errors = DID_NO_CONNECT << 16;
1345 /* release the command and kill it */
1346 if (req->special) {
1347 struct scsi_cmnd *cmd = req->special;
1348 scsi_release_buffers(cmd);
1349 scsi_put_command(cmd);
1350 put_device(&sdev->sdev_gendev);
1351 req->special = NULL;
1352 }
1353 break;
1354 case BLKPREP_DEFER:
1355 /*
1356 * If we defer, the blk_peek_request() returns NULL, but the
1357 * queue must be restarted, so we schedule a callback to happen
1358 * shortly.
1359 */
1360 if (atomic_read(&sdev->device_busy) == 0)
1361 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1362 break;
1363 default:
1364 req->cmd_flags |= REQ_DONTPREP;
1365 }
1366
1367 return ret;
1368 }
1369
1370 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1371 {
1372 struct scsi_device *sdev = q->queuedata;
1373 struct scsi_cmnd *cmd;
1374 int ret;
1375
1376 ret = scsi_prep_state_check(sdev, req);
1377 if (ret != BLKPREP_OK)
1378 goto out;
1379
1380 cmd = scsi_get_cmd_from_req(sdev, req);
1381 if (unlikely(!cmd)) {
1382 ret = BLKPREP_DEFER;
1383 goto out;
1384 }
1385
1386 ret = scsi_setup_cmnd(sdev, req);
1387 out:
1388 return scsi_prep_return(q, req, ret);
1389 }
1390
1391 static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1392 {
1393 scsi_uninit_cmd(req->special);
1394 }
1395
1396 /*
1397 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1398 * return 0.
1399 *
1400 * Called with the queue_lock held.
1401 */
1402 static inline int scsi_dev_queue_ready(struct request_queue *q,
1403 struct scsi_device *sdev)
1404 {
1405 unsigned int busy;
1406
1407 busy = atomic_inc_return(&sdev->device_busy) - 1;
1408 if (atomic_read(&sdev->device_blocked)) {
1409 if (busy)
1410 goto out_dec;
1411
1412 /*
1413 * unblock after device_blocked iterates to zero
1414 */
1415 if (atomic_dec_return(&sdev->device_blocked) > 0) {
1416 /*
1417 * For the MQ case we take care of this in the caller.
1418 */
1419 if (!q->mq_ops)
1420 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1421 goto out_dec;
1422 }
1423 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1424 "unblocking device at zero depth\n"));
1425 }
1426
1427 if (busy >= sdev->queue_depth)
1428 goto out_dec;
1429
1430 return 1;
1431 out_dec:
1432 atomic_dec(&sdev->device_busy);
1433 return 0;
1434 }
1435
1436 /*
1437 * scsi_target_queue_ready: checks if there we can send commands to target
1438 * @sdev: scsi device on starget to check.
1439 */
1440 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1441 struct scsi_device *sdev)
1442 {
1443 struct scsi_target *starget = scsi_target(sdev);
1444 unsigned int busy;
1445
1446 if (starget->single_lun) {
1447 spin_lock_irq(shost->host_lock);
1448 if (starget->starget_sdev_user &&
1449 starget->starget_sdev_user != sdev) {
1450 spin_unlock_irq(shost->host_lock);
1451 return 0;
1452 }
1453 starget->starget_sdev_user = sdev;
1454 spin_unlock_irq(shost->host_lock);
1455 }
1456
1457 if (starget->can_queue <= 0)
1458 return 1;
1459
1460 busy = atomic_inc_return(&starget->target_busy) - 1;
1461 if (atomic_read(&starget->target_blocked) > 0) {
1462 if (busy)
1463 goto starved;
1464
1465 /*
1466 * unblock after target_blocked iterates to zero
1467 */
1468 if (atomic_dec_return(&starget->target_blocked) > 0)
1469 goto out_dec;
1470
1471 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1472 "unblocking target at zero depth\n"));
1473 }
1474
1475 if (busy >= starget->can_queue)
1476 goto starved;
1477
1478 return 1;
1479
1480 starved:
1481 spin_lock_irq(shost->host_lock);
1482 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1483 spin_unlock_irq(shost->host_lock);
1484 out_dec:
1485 if (starget->can_queue > 0)
1486 atomic_dec(&starget->target_busy);
1487 return 0;
1488 }
1489
1490 /*
1491 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1492 * return 0. We must end up running the queue again whenever 0 is
1493 * returned, else IO can hang.
1494 */
1495 static inline int scsi_host_queue_ready(struct request_queue *q,
1496 struct Scsi_Host *shost,
1497 struct scsi_device *sdev)
1498 {
1499 unsigned int busy;
1500
1501 if (scsi_host_in_recovery(shost))
1502 return 0;
1503
1504 busy = atomic_inc_return(&shost->host_busy) - 1;
1505 if (atomic_read(&shost->host_blocked) > 0) {
1506 if (busy)
1507 goto starved;
1508
1509 /*
1510 * unblock after host_blocked iterates to zero
1511 */
1512 if (atomic_dec_return(&shost->host_blocked) > 0)
1513 goto out_dec;
1514
1515 SCSI_LOG_MLQUEUE(3,
1516 shost_printk(KERN_INFO, shost,
1517 "unblocking host at zero depth\n"));
1518 }
1519
1520 if (shost->can_queue > 0 && busy >= shost->can_queue)
1521 goto starved;
1522 if (shost->host_self_blocked)
1523 goto starved;
1524
1525 /* We're OK to process the command, so we can't be starved */
1526 if (!list_empty(&sdev->starved_entry)) {
1527 spin_lock_irq(shost->host_lock);
1528 if (!list_empty(&sdev->starved_entry))
1529 list_del_init(&sdev->starved_entry);
1530 spin_unlock_irq(shost->host_lock);
1531 }
1532
1533 return 1;
1534
1535 starved:
1536 spin_lock_irq(shost->host_lock);
1537 if (list_empty(&sdev->starved_entry))
1538 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1539 spin_unlock_irq(shost->host_lock);
1540 out_dec:
1541 atomic_dec(&shost->host_busy);
1542 return 0;
1543 }
1544
1545 /*
1546 * Busy state exporting function for request stacking drivers.
1547 *
1548 * For efficiency, no lock is taken to check the busy state of
1549 * shost/starget/sdev, since the returned value is not guaranteed and
1550 * may be changed after request stacking drivers call the function,
1551 * regardless of taking lock or not.
1552 *
1553 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1554 * needs to return 'not busy'. Otherwise, request stacking drivers
1555 * may hold requests forever.
1556 */
1557 static int scsi_lld_busy(struct request_queue *q)
1558 {
1559 struct scsi_device *sdev = q->queuedata;
1560 struct Scsi_Host *shost;
1561
1562 if (blk_queue_dying(q))
1563 return 0;
1564
1565 shost = sdev->host;
1566
1567 /*
1568 * Ignore host/starget busy state.
1569 * Since block layer does not have a concept of fairness across
1570 * multiple queues, congestion of host/starget needs to be handled
1571 * in SCSI layer.
1572 */
1573 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1574 return 1;
1575
1576 return 0;
1577 }
1578
1579 /*
1580 * Kill a request for a dead device
1581 */
1582 static void scsi_kill_request(struct request *req, struct request_queue *q)
1583 {
1584 struct scsi_cmnd *cmd = req->special;
1585 struct scsi_device *sdev;
1586 struct scsi_target *starget;
1587 struct Scsi_Host *shost;
1588
1589 blk_start_request(req);
1590
1591 scmd_printk(KERN_INFO, cmd, "killing request\n");
1592
1593 sdev = cmd->device;
1594 starget = scsi_target(sdev);
1595 shost = sdev->host;
1596 scsi_init_cmd_errh(cmd);
1597 cmd->result = DID_NO_CONNECT << 16;
1598 atomic_inc(&cmd->device->iorequest_cnt);
1599
1600 /*
1601 * SCSI request completion path will do scsi_device_unbusy(),
1602 * bump busy counts. To bump the counters, we need to dance
1603 * with the locks as normal issue path does.
1604 */
1605 atomic_inc(&sdev->device_busy);
1606 atomic_inc(&shost->host_busy);
1607 if (starget->can_queue > 0)
1608 atomic_inc(&starget->target_busy);
1609
1610 blk_complete_request(req);
1611 }
1612
1613 static void scsi_softirq_done(struct request *rq)
1614 {
1615 struct scsi_cmnd *cmd = rq->special;
1616 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1617 int disposition;
1618
1619 INIT_LIST_HEAD(&cmd->eh_entry);
1620
1621 atomic_inc(&cmd->device->iodone_cnt);
1622 if (cmd->result)
1623 atomic_inc(&cmd->device->ioerr_cnt);
1624
1625 disposition = scsi_decide_disposition(cmd);
1626 if (disposition != SUCCESS &&
1627 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1628 sdev_printk(KERN_ERR, cmd->device,
1629 "timing out command, waited %lus\n",
1630 wait_for/HZ);
1631 disposition = SUCCESS;
1632 }
1633
1634 scsi_log_completion(cmd, disposition);
1635
1636 switch (disposition) {
1637 case SUCCESS:
1638 scsi_finish_command(cmd);
1639 break;
1640 case NEEDS_RETRY:
1641 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1642 break;
1643 case ADD_TO_MLQUEUE:
1644 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1645 break;
1646 default:
1647 if (!scsi_eh_scmd_add(cmd, 0))
1648 scsi_finish_command(cmd);
1649 }
1650 }
1651
1652 /**
1653 * scsi_dispatch_command - Dispatch a command to the low-level driver.
1654 * @cmd: command block we are dispatching.
1655 *
1656 * Return: nonzero return request was rejected and device's queue needs to be
1657 * plugged.
1658 */
1659 static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1660 {
1661 struct Scsi_Host *host = cmd->device->host;
1662 int rtn = 0;
1663
1664 atomic_inc(&cmd->device->iorequest_cnt);
1665
1666 /* check if the device is still usable */
1667 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1668 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
1669 * returns an immediate error upwards, and signals
1670 * that the device is no longer present */
1671 cmd->result = DID_NO_CONNECT << 16;
1672 goto done;
1673 }
1674
1675 /* Check to see if the scsi lld made this device blocked. */
1676 if (unlikely(scsi_device_blocked(cmd->device))) {
1677 /*
1678 * in blocked state, the command is just put back on
1679 * the device queue. The suspend state has already
1680 * blocked the queue so future requests should not
1681 * occur until the device transitions out of the
1682 * suspend state.
1683 */
1684 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1685 "queuecommand : device blocked\n"));
1686 return SCSI_MLQUEUE_DEVICE_BUSY;
1687 }
1688
1689 /* Store the LUN value in cmnd, if needed. */
1690 if (cmd->device->lun_in_cdb)
1691 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1692 (cmd->device->lun << 5 & 0xe0);
1693
1694 scsi_log_send(cmd);
1695
1696 /*
1697 * Before we queue this command, check if the command
1698 * length exceeds what the host adapter can handle.
1699 */
1700 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1701 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1702 "queuecommand : command too long. "
1703 "cdb_size=%d host->max_cmd_len=%d\n",
1704 cmd->cmd_len, cmd->device->host->max_cmd_len));
1705 cmd->result = (DID_ABORT << 16);
1706 goto done;
1707 }
1708
1709 if (unlikely(host->shost_state == SHOST_DEL)) {
1710 cmd->result = (DID_NO_CONNECT << 16);
1711 goto done;
1712
1713 }
1714
1715 trace_scsi_dispatch_cmd_start(cmd);
1716 rtn = host->hostt->queuecommand(host, cmd);
1717 if (rtn) {
1718 trace_scsi_dispatch_cmd_error(cmd, rtn);
1719 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1720 rtn != SCSI_MLQUEUE_TARGET_BUSY)
1721 rtn = SCSI_MLQUEUE_HOST_BUSY;
1722
1723 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1724 "queuecommand : request rejected\n"));
1725 }
1726
1727 return rtn;
1728 done:
1729 cmd->scsi_done(cmd);
1730 return 0;
1731 }
1732
1733 /**
1734 * scsi_done - Invoke completion on finished SCSI command.
1735 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
1736 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
1737 *
1738 * Description: This function is the mid-level's (SCSI Core) interrupt routine,
1739 * which regains ownership of the SCSI command (de facto) from a LLDD, and
1740 * calls blk_complete_request() for further processing.
1741 *
1742 * This function is interrupt context safe.
1743 */
1744 static void scsi_done(struct scsi_cmnd *cmd)
1745 {
1746 trace_scsi_dispatch_cmd_done(cmd);
1747 blk_complete_request(cmd->request);
1748 }
1749
1750 /*
1751 * Function: scsi_request_fn()
1752 *
1753 * Purpose: Main strategy routine for SCSI.
1754 *
1755 * Arguments: q - Pointer to actual queue.
1756 *
1757 * Returns: Nothing
1758 *
1759 * Lock status: IO request lock assumed to be held when called.
1760 */
1761 static void scsi_request_fn(struct request_queue *q)
1762 __releases(q->queue_lock)
1763 __acquires(q->queue_lock)
1764 {
1765 struct scsi_device *sdev = q->queuedata;
1766 struct Scsi_Host *shost;
1767 struct scsi_cmnd *cmd;
1768 struct request *req;
1769
1770 /*
1771 * To start with, we keep looping until the queue is empty, or until
1772 * the host is no longer able to accept any more requests.
1773 */
1774 shost = sdev->host;
1775 for (;;) {
1776 int rtn;
1777 /*
1778 * get next queueable request. We do this early to make sure
1779 * that the request is fully prepared even if we cannot
1780 * accept it.
1781 */
1782 req = blk_peek_request(q);
1783 if (!req)
1784 break;
1785
1786 if (unlikely(!scsi_device_online(sdev))) {
1787 sdev_printk(KERN_ERR, sdev,
1788 "rejecting I/O to offline device\n");
1789 scsi_kill_request(req, q);
1790 continue;
1791 }
1792
1793 if (!scsi_dev_queue_ready(q, sdev))
1794 break;
1795
1796 /*
1797 * Remove the request from the request list.
1798 */
1799 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1800 blk_start_request(req);
1801
1802 spin_unlock_irq(q->queue_lock);
1803 cmd = req->special;
1804 if (unlikely(cmd == NULL)) {
1805 printk(KERN_CRIT "impossible request in %s.\n"
1806 "please mail a stack trace to "
1807 "linux-scsi@vger.kernel.org\n",
1808 __func__);
1809 blk_dump_rq_flags(req, "foo");
1810 BUG();
1811 }
1812
1813 /*
1814 * We hit this when the driver is using a host wide
1815 * tag map. For device level tag maps the queue_depth check
1816 * in the device ready fn would prevent us from trying
1817 * to allocate a tag. Since the map is a shared host resource
1818 * we add the dev to the starved list so it eventually gets
1819 * a run when a tag is freed.
1820 */
1821 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
1822 spin_lock_irq(shost->host_lock);
1823 if (list_empty(&sdev->starved_entry))
1824 list_add_tail(&sdev->starved_entry,
1825 &shost->starved_list);
1826 spin_unlock_irq(shost->host_lock);
1827 goto not_ready;
1828 }
1829
1830 if (!scsi_target_queue_ready(shost, sdev))
1831 goto not_ready;
1832
1833 if (!scsi_host_queue_ready(q, shost, sdev))
1834 goto host_not_ready;
1835
1836 if (sdev->simple_tags)
1837 cmd->flags |= SCMD_TAGGED;
1838 else
1839 cmd->flags &= ~SCMD_TAGGED;
1840
1841 /*
1842 * Finally, initialize any error handling parameters, and set up
1843 * the timers for timeouts.
1844 */
1845 scsi_init_cmd_errh(cmd);
1846
1847 /*
1848 * Dispatch the command to the low-level driver.
1849 */
1850 cmd->scsi_done = scsi_done;
1851 rtn = scsi_dispatch_cmd(cmd);
1852 if (rtn) {
1853 scsi_queue_insert(cmd, rtn);
1854 spin_lock_irq(q->queue_lock);
1855 goto out_delay;
1856 }
1857 spin_lock_irq(q->queue_lock);
1858 }
1859
1860 return;
1861
1862 host_not_ready:
1863 if (scsi_target(sdev)->can_queue > 0)
1864 atomic_dec(&scsi_target(sdev)->target_busy);
1865 not_ready:
1866 /*
1867 * lock q, handle tag, requeue req, and decrement device_busy. We
1868 * must return with queue_lock held.
1869 *
1870 * Decrementing device_busy without checking it is OK, as all such
1871 * cases (host limits or settings) should run the queue at some
1872 * later time.
1873 */
1874 spin_lock_irq(q->queue_lock);
1875 blk_requeue_request(q, req);
1876 atomic_dec(&sdev->device_busy);
1877 out_delay:
1878 if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1879 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1880 }
1881
1882 static inline int prep_to_mq(int ret)
1883 {
1884 switch (ret) {
1885 case BLKPREP_OK:
1886 return 0;
1887 case BLKPREP_DEFER:
1888 return BLK_MQ_RQ_QUEUE_BUSY;
1889 default:
1890 return BLK_MQ_RQ_QUEUE_ERROR;
1891 }
1892 }
1893
1894 static int scsi_mq_prep_fn(struct request *req)
1895 {
1896 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1897 struct scsi_device *sdev = req->q->queuedata;
1898 struct Scsi_Host *shost = sdev->host;
1899 unsigned char *sense_buf = cmd->sense_buffer;
1900 struct scatterlist *sg;
1901
1902 memset(cmd, 0, sizeof(struct scsi_cmnd));
1903
1904 req->special = cmd;
1905
1906 cmd->request = req;
1907 cmd->device = sdev;
1908 cmd->sense_buffer = sense_buf;
1909
1910 cmd->tag = req->tag;
1911
1912 cmd->cmnd = req->cmd;
1913 cmd->prot_op = SCSI_PROT_NORMAL;
1914
1915 INIT_LIST_HEAD(&cmd->list);
1916 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1917 cmd->jiffies_at_alloc = jiffies;
1918
1919 if (shost->use_cmd_list) {
1920 spin_lock_irq(&sdev->list_lock);
1921 list_add_tail(&cmd->list, &sdev->cmd_list);
1922 spin_unlock_irq(&sdev->list_lock);
1923 }
1924
1925 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1926 cmd->sdb.table.sgl = sg;
1927
1928 if (scsi_host_get_prot(shost)) {
1929 cmd->prot_sdb = (void *)sg +
1930 min_t(unsigned int,
1931 shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
1932 sizeof(struct scatterlist);
1933 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1934
1935 cmd->prot_sdb->table.sgl =
1936 (struct scatterlist *)(cmd->prot_sdb + 1);
1937 }
1938
1939 if (blk_bidi_rq(req)) {
1940 struct request *next_rq = req->next_rq;
1941 struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
1942
1943 memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
1944 bidi_sdb->table.sgl =
1945 (struct scatterlist *)(bidi_sdb + 1);
1946
1947 next_rq->special = bidi_sdb;
1948 }
1949
1950 blk_mq_start_request(req);
1951
1952 return scsi_setup_cmnd(sdev, req);
1953 }
1954
1955 static void scsi_mq_done(struct scsi_cmnd *cmd)
1956 {
1957 trace_scsi_dispatch_cmd_done(cmd);
1958 blk_mq_complete_request(cmd->request);
1959 }
1960
1961 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1962 const struct blk_mq_queue_data *bd)
1963 {
1964 struct request *req = bd->rq;
1965 struct request_queue *q = req->q;
1966 struct scsi_device *sdev = q->queuedata;
1967 struct Scsi_Host *shost = sdev->host;
1968 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1969 int ret;
1970 int reason;
1971
1972 ret = prep_to_mq(scsi_prep_state_check(sdev, req));
1973 if (ret)
1974 goto out;
1975
1976 ret = BLK_MQ_RQ_QUEUE_BUSY;
1977 if (!get_device(&sdev->sdev_gendev))
1978 goto out;
1979
1980 if (!scsi_dev_queue_ready(q, sdev))
1981 goto out_put_device;
1982 if (!scsi_target_queue_ready(shost, sdev))
1983 goto out_dec_device_busy;
1984 if (!scsi_host_queue_ready(q, shost, sdev))
1985 goto out_dec_target_busy;
1986
1987
1988 if (!(req->cmd_flags & REQ_DONTPREP)) {
1989 ret = prep_to_mq(scsi_mq_prep_fn(req));
1990 if (ret)
1991 goto out_dec_host_busy;
1992 req->cmd_flags |= REQ_DONTPREP;
1993 } else {
1994 blk_mq_start_request(req);
1995 }
1996
1997 if (sdev->simple_tags)
1998 cmd->flags |= SCMD_TAGGED;
1999 else
2000 cmd->flags &= ~SCMD_TAGGED;
2001
2002 scsi_init_cmd_errh(cmd);
2003 cmd->scsi_done = scsi_mq_done;
2004
2005 reason = scsi_dispatch_cmd(cmd);
2006 if (reason) {
2007 scsi_set_blocked(cmd, reason);
2008 ret = BLK_MQ_RQ_QUEUE_BUSY;
2009 goto out_dec_host_busy;
2010 }
2011
2012 return BLK_MQ_RQ_QUEUE_OK;
2013
2014 out_dec_host_busy:
2015 atomic_dec(&shost->host_busy);
2016 out_dec_target_busy:
2017 if (scsi_target(sdev)->can_queue > 0)
2018 atomic_dec(&scsi_target(sdev)->target_busy);
2019 out_dec_device_busy:
2020 atomic_dec(&sdev->device_busy);
2021 out_put_device:
2022 put_device(&sdev->sdev_gendev);
2023 out:
2024 switch (ret) {
2025 case BLK_MQ_RQ_QUEUE_BUSY:
2026 blk_mq_stop_hw_queue(hctx);
2027 if (atomic_read(&sdev->device_busy) == 0 &&
2028 !scsi_device_blocked(sdev))
2029 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
2030 break;
2031 case BLK_MQ_RQ_QUEUE_ERROR:
2032 /*
2033 * Make sure to release all allocated ressources when
2034 * we hit an error, as we will never see this command
2035 * again.
2036 */
2037 if (req->cmd_flags & REQ_DONTPREP)
2038 scsi_mq_uninit_cmd(cmd);
2039 break;
2040 default:
2041 break;
2042 }
2043 return ret;
2044 }
2045
2046 static enum blk_eh_timer_return scsi_timeout(struct request *req,
2047 bool reserved)
2048 {
2049 if (reserved)
2050 return BLK_EH_RESET_TIMER;
2051 return scsi_times_out(req);
2052 }
2053
2054 static int scsi_init_request(void *data, struct request *rq,
2055 unsigned int hctx_idx, unsigned int request_idx,
2056 unsigned int numa_node)
2057 {
2058 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2059
2060 cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
2061 numa_node);
2062 if (!cmd->sense_buffer)
2063 return -ENOMEM;
2064 return 0;
2065 }
2066
2067 static void scsi_exit_request(void *data, struct request *rq,
2068 unsigned int hctx_idx, unsigned int request_idx)
2069 {
2070 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2071
2072 kfree(cmd->sense_buffer);
2073 }
2074
2075 static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
2076 {
2077 struct device *host_dev;
2078 u64 bounce_limit = 0xffffffff;
2079
2080 if (shost->unchecked_isa_dma)
2081 return BLK_BOUNCE_ISA;
2082 /*
2083 * Platforms with virtual-DMA translation
2084 * hardware have no practical limit.
2085 */
2086 if (!PCI_DMA_BUS_IS_PHYS)
2087 return BLK_BOUNCE_ANY;
2088
2089 host_dev = scsi_get_device(shost);
2090 if (host_dev && host_dev->dma_mask)
2091 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
2092
2093 return bounce_limit;
2094 }
2095
2096 static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2097 {
2098 struct device *dev = shost->dma_dev;
2099
2100 /*
2101 * this limit is imposed by hardware restrictions
2102 */
2103 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
2104 SCSI_MAX_SG_CHAIN_SEGMENTS));
2105
2106 if (scsi_host_prot_dma(shost)) {
2107 shost->sg_prot_tablesize =
2108 min_not_zero(shost->sg_prot_tablesize,
2109 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
2110 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
2111 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
2112 }
2113
2114 blk_queue_max_hw_sectors(q, shost->max_sectors);
2115 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
2116 blk_queue_segment_boundary(q, shost->dma_boundary);
2117 dma_set_seg_boundary(dev, shost->dma_boundary);
2118
2119 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
2120
2121 if (!shost->use_clustering)
2122 q->limits.cluster = 0;
2123
2124 /*
2125 * set a reasonable default alignment on word boundaries: the
2126 * host and device may alter it using
2127 * blk_queue_update_dma_alignment() later.
2128 */
2129 blk_queue_dma_alignment(q, 0x03);
2130 }
2131
2132 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
2133 request_fn_proc *request_fn)
2134 {
2135 struct request_queue *q;
2136
2137 q = blk_init_queue(request_fn, NULL);
2138 if (!q)
2139 return NULL;
2140 __scsi_init_queue(shost, q);
2141 return q;
2142 }
2143 EXPORT_SYMBOL(__scsi_alloc_queue);
2144
2145 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
2146 {
2147 struct request_queue *q;
2148
2149 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
2150 if (!q)
2151 return NULL;
2152
2153 blk_queue_prep_rq(q, scsi_prep_fn);
2154 blk_queue_unprep_rq(q, scsi_unprep_fn);
2155 blk_queue_softirq_done(q, scsi_softirq_done);
2156 blk_queue_rq_timed_out(q, scsi_times_out);
2157 blk_queue_lld_busy(q, scsi_lld_busy);
2158 return q;
2159 }
2160
2161 static struct blk_mq_ops scsi_mq_ops = {
2162 .map_queue = blk_mq_map_queue,
2163 .queue_rq = scsi_queue_rq,
2164 .complete = scsi_softirq_done,
2165 .timeout = scsi_timeout,
2166 .init_request = scsi_init_request,
2167 .exit_request = scsi_exit_request,
2168 };
2169
2170 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
2171 {
2172 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
2173 if (IS_ERR(sdev->request_queue))
2174 return NULL;
2175
2176 sdev->request_queue->queuedata = sdev;
2177 __scsi_init_queue(sdev->host, sdev->request_queue);
2178 return sdev->request_queue;
2179 }
2180
2181 int scsi_mq_setup_tags(struct Scsi_Host *shost)
2182 {
2183 unsigned int cmd_size, sgl_size, tbl_size;
2184
2185 tbl_size = shost->sg_tablesize;
2186 if (tbl_size > SCSI_MAX_SG_SEGMENTS)
2187 tbl_size = SCSI_MAX_SG_SEGMENTS;
2188 sgl_size = tbl_size * sizeof(struct scatterlist);
2189 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
2190 if (scsi_host_get_prot(shost))
2191 cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
2192
2193 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
2194 shost->tag_set.ops = &scsi_mq_ops;
2195 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
2196 shost->tag_set.queue_depth = shost->can_queue;
2197 shost->tag_set.cmd_size = cmd_size;
2198 shost->tag_set.numa_node = NUMA_NO_NODE;
2199 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
2200 shost->tag_set.flags |=
2201 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
2202 shost->tag_set.driver_data = shost;
2203
2204 return blk_mq_alloc_tag_set(&shost->tag_set);
2205 }
2206
2207 void scsi_mq_destroy_tags(struct Scsi_Host *shost)
2208 {
2209 blk_mq_free_tag_set(&shost->tag_set);
2210 }
2211
2212 /*
2213 * Function: scsi_block_requests()
2214 *
2215 * Purpose: Utility function used by low-level drivers to prevent further
2216 * commands from being queued to the device.
2217 *
2218 * Arguments: shost - Host in question
2219 *
2220 * Returns: Nothing
2221 *
2222 * Lock status: No locks are assumed held.
2223 *
2224 * Notes: There is no timer nor any other means by which the requests
2225 * get unblocked other than the low-level driver calling
2226 * scsi_unblock_requests().
2227 */
2228 void scsi_block_requests(struct Scsi_Host *shost)
2229 {
2230 shost->host_self_blocked = 1;
2231 }
2232 EXPORT_SYMBOL(scsi_block_requests);
2233
2234 /*
2235 * Function: scsi_unblock_requests()
2236 *
2237 * Purpose: Utility function used by low-level drivers to allow further
2238 * commands from being queued to the device.
2239 *
2240 * Arguments: shost - Host in question
2241 *
2242 * Returns: Nothing
2243 *
2244 * Lock status: No locks are assumed held.
2245 *
2246 * Notes: There is no timer nor any other means by which the requests
2247 * get unblocked other than the low-level driver calling
2248 * scsi_unblock_requests().
2249 *
2250 * This is done as an API function so that changes to the
2251 * internals of the scsi mid-layer won't require wholesale
2252 * changes to drivers that use this feature.
2253 */
2254 void scsi_unblock_requests(struct Scsi_Host *shost)
2255 {
2256 shost->host_self_blocked = 0;
2257 scsi_run_host_queues(shost);
2258 }
2259 EXPORT_SYMBOL(scsi_unblock_requests);
2260
2261 int __init scsi_init_queue(void)
2262 {
2263 int i;
2264
2265 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
2266 sizeof(struct scsi_data_buffer),
2267 0, 0, NULL);
2268 if (!scsi_sdb_cache) {
2269 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
2270 return -ENOMEM;
2271 }
2272
2273 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2274 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
2275 int size = sgp->size * sizeof(struct scatterlist);
2276
2277 sgp->slab = kmem_cache_create(sgp->name, size, 0,
2278 SLAB_HWCACHE_ALIGN, NULL);
2279 if (!sgp->slab) {
2280 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
2281 sgp->name);
2282 goto cleanup_sdb;
2283 }
2284
2285 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
2286 sgp->slab);
2287 if (!sgp->pool) {
2288 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
2289 sgp->name);
2290 goto cleanup_sdb;
2291 }
2292 }
2293
2294 return 0;
2295
2296 cleanup_sdb:
2297 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2298 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
2299 if (sgp->pool)
2300 mempool_destroy(sgp->pool);
2301 if (sgp->slab)
2302 kmem_cache_destroy(sgp->slab);
2303 }
2304 kmem_cache_destroy(scsi_sdb_cache);
2305
2306 return -ENOMEM;
2307 }
2308
2309 void scsi_exit_queue(void)
2310 {
2311 int i;
2312
2313 kmem_cache_destroy(scsi_sdb_cache);
2314
2315 for (i = 0; i < SG_MEMPOOL_NR; i++) {
2316 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
2317 mempool_destroy(sgp->pool);
2318 kmem_cache_destroy(sgp->slab);
2319 }
2320 }
2321
2322 /**
2323 * scsi_mode_select - issue a mode select
2324 * @sdev: SCSI device to be queried
2325 * @pf: Page format bit (1 == standard, 0 == vendor specific)
2326 * @sp: Save page bit (0 == don't save, 1 == save)
2327 * @modepage: mode page being requested
2328 * @buffer: request buffer (may not be smaller than eight bytes)
2329 * @len: length of request buffer.
2330 * @timeout: command timeout
2331 * @retries: number of retries before failing
2332 * @data: returns a structure abstracting the mode header data
2333 * @sshdr: place to put sense data (or NULL if no sense to be collected).
2334 * must be SCSI_SENSE_BUFFERSIZE big.
2335 *
2336 * Returns zero if successful; negative error number or scsi
2337 * status on error
2338 *
2339 */
2340 int
2341 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2342 unsigned char *buffer, int len, int timeout, int retries,
2343 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2344 {
2345 unsigned char cmd[10];
2346 unsigned char *real_buffer;
2347 int ret;
2348
2349 memset(cmd, 0, sizeof(cmd));
2350 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2351
2352 if (sdev->use_10_for_ms) {
2353 if (len > 65535)
2354 return -EINVAL;
2355 real_buffer = kmalloc(8 + len, GFP_KERNEL);
2356 if (!real_buffer)
2357 return -ENOMEM;
2358 memcpy(real_buffer + 8, buffer, len);
2359 len += 8;
2360 real_buffer[0] = 0;
2361 real_buffer[1] = 0;
2362 real_buffer[2] = data->medium_type;
2363 real_buffer[3] = data->device_specific;
2364 real_buffer[4] = data->longlba ? 0x01 : 0;
2365 real_buffer[5] = 0;
2366 real_buffer[6] = data->block_descriptor_length >> 8;
2367 real_buffer[7] = data->block_descriptor_length;
2368
2369 cmd[0] = MODE_SELECT_10;
2370 cmd[7] = len >> 8;
2371 cmd[8] = len;
2372 } else {
2373 if (len > 255 || data->block_descriptor_length > 255 ||
2374 data->longlba)
2375 return -EINVAL;
2376
2377 real_buffer = kmalloc(4 + len, GFP_KERNEL);
2378 if (!real_buffer)
2379 return -ENOMEM;
2380 memcpy(real_buffer + 4, buffer, len);
2381 len += 4;
2382 real_buffer[0] = 0;
2383 real_buffer[1] = data->medium_type;
2384 real_buffer[2] = data->device_specific;
2385 real_buffer[3] = data->block_descriptor_length;
2386
2387
2388 cmd[0] = MODE_SELECT;
2389 cmd[4] = len;
2390 }
2391
2392 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2393 sshdr, timeout, retries, NULL);
2394 kfree(real_buffer);
2395 return ret;
2396 }
2397 EXPORT_SYMBOL_GPL(scsi_mode_select);
2398
2399 /**
2400 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
2401 * @sdev: SCSI device to be queried
2402 * @dbd: set if mode sense will allow block descriptors to be returned
2403 * @modepage: mode page being requested
2404 * @buffer: request buffer (may not be smaller than eight bytes)
2405 * @len: length of request buffer.
2406 * @timeout: command timeout
2407 * @retries: number of retries before failing
2408 * @data: returns a structure abstracting the mode header data
2409 * @sshdr: place to put sense data (or NULL if no sense to be collected).
2410 * must be SCSI_SENSE_BUFFERSIZE big.
2411 *
2412 * Returns zero if unsuccessful, or the header offset (either 4
2413 * or 8 depending on whether a six or ten byte command was
2414 * issued) if successful.
2415 */
2416 int
2417 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2418 unsigned char *buffer, int len, int timeout, int retries,
2419 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2420 {
2421 unsigned char cmd[12];
2422 int use_10_for_ms;
2423 int header_length;
2424 int result;
2425 struct scsi_sense_hdr my_sshdr;
2426
2427 memset(data, 0, sizeof(*data));
2428 memset(&cmd[0], 0, 12);
2429 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
2430 cmd[2] = modepage;
2431
2432 /* caller might not be interested in sense, but we need it */
2433 if (!sshdr)
2434 sshdr = &my_sshdr;
2435
2436 retry:
2437 use_10_for_ms = sdev->use_10_for_ms;
2438
2439 if (use_10_for_ms) {
2440 if (len < 8)
2441 len = 8;
2442
2443 cmd[0] = MODE_SENSE_10;
2444 cmd[8] = len;
2445 header_length = 8;
2446 } else {
2447 if (len < 4)
2448 len = 4;
2449
2450 cmd[0] = MODE_SENSE;
2451 cmd[4] = len;
2452 header_length = 4;
2453 }
2454
2455 memset(buffer, 0, len);
2456
2457 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2458 sshdr, timeout, retries, NULL);
2459
2460 /* This code looks awful: what it's doing is making sure an
2461 * ILLEGAL REQUEST sense return identifies the actual command
2462 * byte as the problem. MODE_SENSE commands can return
2463 * ILLEGAL REQUEST if the code page isn't supported */
2464
2465 if (use_10_for_ms && !scsi_status_is_good(result) &&
2466 (driver_byte(result) & DRIVER_SENSE)) {
2467 if (scsi_sense_valid(sshdr)) {
2468 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2469 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2470 /*
2471 * Invalid command operation code
2472 */
2473 sdev->use_10_for_ms = 0;
2474 goto retry;
2475 }
2476 }
2477 }
2478
2479 if(scsi_status_is_good(result)) {
2480 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2481 (modepage == 6 || modepage == 8))) {
2482 /* Initio breakage? */
2483 header_length = 0;
2484 data->length = 13;
2485 data->medium_type = 0;
2486 data->device_specific = 0;
2487 data->longlba = 0;
2488 data->block_descriptor_length = 0;
2489 } else if(use_10_for_ms) {
2490 data->length = buffer[0]*256 + buffer[1] + 2;
2491 data->medium_type = buffer[2];
2492 data->device_specific = buffer[3];
2493 data->longlba = buffer[4] & 0x01;
2494 data->block_descriptor_length = buffer[6]*256
2495 + buffer[7];
2496 } else {
2497 data->length = buffer[0] + 1;
2498 data->medium_type = buffer[1];
2499 data->device_specific = buffer[2];
2500 data->block_descriptor_length = buffer[3];
2501 }
2502 data->header_length = header_length;
2503 }
2504
2505 return result;
2506 }
2507 EXPORT_SYMBOL(scsi_mode_sense);
2508
2509 /**
2510 * scsi_test_unit_ready - test if unit is ready
2511 * @sdev: scsi device to change the state of.
2512 * @timeout: command timeout
2513 * @retries: number of retries before failing
2514 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
2515 * returning sense. Make sure that this is cleared before passing
2516 * in.
2517 *
2518 * Returns zero if unsuccessful or an error if TUR failed. For
2519 * removable media, UNIT_ATTENTION sets ->changed flag.
2520 **/
2521 int
2522 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2523 struct scsi_sense_hdr *sshdr_external)
2524 {
2525 char cmd[] = {
2526 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2527 };
2528 struct scsi_sense_hdr *sshdr;
2529 int result;
2530
2531 if (!sshdr_external)
2532 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2533 else
2534 sshdr = sshdr_external;
2535
2536 /* try to eat the UNIT_ATTENTION if there are enough retries */
2537 do {
2538 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2539 timeout, retries, NULL);
2540 if (sdev->removable && scsi_sense_valid(sshdr) &&
2541 sshdr->sense_key == UNIT_ATTENTION)
2542 sdev->changed = 1;
2543 } while (scsi_sense_valid(sshdr) &&
2544 sshdr->sense_key == UNIT_ATTENTION && --retries);
2545
2546 if (!sshdr_external)
2547 kfree(sshdr);
2548 return result;
2549 }
2550 EXPORT_SYMBOL(scsi_test_unit_ready);
2551
2552 /**
2553 * scsi_device_set_state - Take the given device through the device state model.
2554 * @sdev: scsi device to change the state of.
2555 * @state: state to change to.
2556 *
2557 * Returns zero if unsuccessful or an error if the requested
2558 * transition is illegal.
2559 */
2560 int
2561 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2562 {
2563 enum scsi_device_state oldstate = sdev->sdev_state;
2564
2565 if (state == oldstate)
2566 return 0;
2567
2568 switch (state) {
2569 case SDEV_CREATED:
2570 switch (oldstate) {
2571 case SDEV_CREATED_BLOCK:
2572 break;
2573 default:
2574 goto illegal;
2575 }
2576 break;
2577
2578 case SDEV_RUNNING:
2579 switch (oldstate) {
2580 case SDEV_CREATED:
2581 case SDEV_OFFLINE:
2582 case SDEV_TRANSPORT_OFFLINE:
2583 case SDEV_QUIESCE:
2584 case SDEV_BLOCK:
2585 break;
2586 default:
2587 goto illegal;
2588 }
2589 break;
2590
2591 case SDEV_QUIESCE:
2592 switch (oldstate) {
2593 case SDEV_RUNNING:
2594 case SDEV_OFFLINE:
2595 case SDEV_TRANSPORT_OFFLINE:
2596 break;
2597 default:
2598 goto illegal;
2599 }
2600 break;
2601
2602 case SDEV_OFFLINE:
2603 case SDEV_TRANSPORT_OFFLINE:
2604 switch (oldstate) {
2605 case SDEV_CREATED:
2606 case SDEV_RUNNING:
2607 case SDEV_QUIESCE:
2608 case SDEV_BLOCK:
2609 break;
2610 default:
2611 goto illegal;
2612 }
2613 break;
2614
2615 case SDEV_BLOCK:
2616 switch (oldstate) {
2617 case SDEV_RUNNING:
2618 case SDEV_CREATED_BLOCK:
2619 break;
2620 default:
2621 goto illegal;
2622 }
2623 break;
2624
2625 case SDEV_CREATED_BLOCK:
2626 switch (oldstate) {
2627 case SDEV_CREATED:
2628 break;
2629 default:
2630 goto illegal;
2631 }
2632 break;
2633
2634 case SDEV_CANCEL:
2635 switch (oldstate) {
2636 case SDEV_CREATED:
2637 case SDEV_RUNNING:
2638 case SDEV_QUIESCE:
2639 case SDEV_OFFLINE:
2640 case SDEV_TRANSPORT_OFFLINE:
2641 case SDEV_BLOCK:
2642 break;
2643 default:
2644 goto illegal;
2645 }
2646 break;
2647
2648 case SDEV_DEL:
2649 switch (oldstate) {
2650 case SDEV_CREATED:
2651 case SDEV_RUNNING:
2652 case SDEV_OFFLINE:
2653 case SDEV_TRANSPORT_OFFLINE:
2654 case SDEV_CANCEL:
2655 case SDEV_CREATED_BLOCK:
2656 break;
2657 default:
2658 goto illegal;
2659 }
2660 break;
2661
2662 }
2663 sdev->sdev_state = state;
2664 return 0;
2665
2666 illegal:
2667 SCSI_LOG_ERROR_RECOVERY(1,
2668 sdev_printk(KERN_ERR, sdev,
2669 "Illegal state transition %s->%s",
2670 scsi_device_state_name(oldstate),
2671 scsi_device_state_name(state))
2672 );
2673 return -EINVAL;
2674 }
2675 EXPORT_SYMBOL(scsi_device_set_state);
2676
2677 /**
2678 * sdev_evt_emit - emit a single SCSI device uevent
2679 * @sdev: associated SCSI device
2680 * @evt: event to emit
2681 *
2682 * Send a single uevent (scsi_event) to the associated scsi_device.
2683 */
2684 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2685 {
2686 int idx = 0;
2687 char *envp[3];
2688
2689 switch (evt->evt_type) {
2690 case SDEV_EVT_MEDIA_CHANGE:
2691 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2692 break;
2693 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2694 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2695 break;
2696 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2697 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2698 break;
2699 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2700 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2701 break;
2702 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2703 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2704 break;
2705 case SDEV_EVT_LUN_CHANGE_REPORTED:
2706 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2707 break;
2708 default:
2709 /* do nothing */
2710 break;
2711 }
2712
2713 envp[idx++] = NULL;
2714
2715 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2716 }
2717
2718 /**
2719 * sdev_evt_thread - send a uevent for each scsi event
2720 * @work: work struct for scsi_device
2721 *
2722 * Dispatch queued events to their associated scsi_device kobjects
2723 * as uevents.
2724 */
2725 void scsi_evt_thread(struct work_struct *work)
2726 {
2727 struct scsi_device *sdev;
2728 enum scsi_device_event evt_type;
2729 LIST_HEAD(event_list);
2730
2731 sdev = container_of(work, struct scsi_device, event_work);
2732
2733 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2734 if (test_and_clear_bit(evt_type, sdev->pending_events))
2735 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2736
2737 while (1) {
2738 struct scsi_event *evt;
2739 struct list_head *this, *tmp;
2740 unsigned long flags;
2741
2742 spin_lock_irqsave(&sdev->list_lock, flags);
2743 list_splice_init(&sdev->event_list, &event_list);
2744 spin_unlock_irqrestore(&sdev->list_lock, flags);
2745
2746 if (list_empty(&event_list))
2747 break;
2748
2749 list_for_each_safe(this, tmp, &event_list) {
2750 evt = list_entry(this, struct scsi_event, node);
2751 list_del(&evt->node);
2752 scsi_evt_emit(sdev, evt);
2753 kfree(evt);
2754 }
2755 }
2756 }
2757
2758 /**
2759 * sdev_evt_send - send asserted event to uevent thread
2760 * @sdev: scsi_device event occurred on
2761 * @evt: event to send
2762 *
2763 * Assert scsi device event asynchronously.
2764 */
2765 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2766 {
2767 unsigned long flags;
2768
2769 #if 0
2770 /* FIXME: currently this check eliminates all media change events
2771 * for polled devices. Need to update to discriminate between AN
2772 * and polled events */
2773 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2774 kfree(evt);
2775 return;
2776 }
2777 #endif
2778
2779 spin_lock_irqsave(&sdev->list_lock, flags);
2780 list_add_tail(&evt->node, &sdev->event_list);
2781 schedule_work(&sdev->event_work);
2782 spin_unlock_irqrestore(&sdev->list_lock, flags);
2783 }
2784 EXPORT_SYMBOL_GPL(sdev_evt_send);
2785
2786 /**
2787 * sdev_evt_alloc - allocate a new scsi event
2788 * @evt_type: type of event to allocate
2789 * @gfpflags: GFP flags for allocation
2790 *
2791 * Allocates and returns a new scsi_event.
2792 */
2793 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2794 gfp_t gfpflags)
2795 {
2796 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2797 if (!evt)
2798 return NULL;
2799
2800 evt->evt_type = evt_type;
2801 INIT_LIST_HEAD(&evt->node);
2802
2803 /* evt_type-specific initialization, if any */
2804 switch (evt_type) {
2805 case SDEV_EVT_MEDIA_CHANGE:
2806 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2807 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2808 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2809 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2810 case SDEV_EVT_LUN_CHANGE_REPORTED:
2811 default:
2812 /* do nothing */
2813 break;
2814 }
2815
2816 return evt;
2817 }
2818 EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2819
2820 /**
2821 * sdev_evt_send_simple - send asserted event to uevent thread
2822 * @sdev: scsi_device event occurred on
2823 * @evt_type: type of event to send
2824 * @gfpflags: GFP flags for allocation
2825 *
2826 * Assert scsi device event asynchronously, given an event type.
2827 */
2828 void sdev_evt_send_simple(struct scsi_device *sdev,
2829 enum scsi_device_event evt_type, gfp_t gfpflags)
2830 {
2831 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2832 if (!evt) {
2833 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2834 evt_type);
2835 return;
2836 }
2837
2838 sdev_evt_send(sdev, evt);
2839 }
2840 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2841
2842 /**
2843 * scsi_device_quiesce - Block user issued commands.
2844 * @sdev: scsi device to quiesce.
2845 *
2846 * This works by trying to transition to the SDEV_QUIESCE state
2847 * (which must be a legal transition). When the device is in this
2848 * state, only special requests will be accepted, all others will
2849 * be deferred. Since special requests may also be requeued requests,
2850 * a successful return doesn't guarantee the device will be
2851 * totally quiescent.
2852 *
2853 * Must be called with user context, may sleep.
2854 *
2855 * Returns zero if unsuccessful or an error if not.
2856 */
2857 int
2858 scsi_device_quiesce(struct scsi_device *sdev)
2859 {
2860 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2861 if (err)
2862 return err;
2863
2864 scsi_run_queue(sdev->request_queue);
2865 while (atomic_read(&sdev->device_busy)) {
2866 msleep_interruptible(200);
2867 scsi_run_queue(sdev->request_queue);
2868 }
2869 return 0;
2870 }
2871 EXPORT_SYMBOL(scsi_device_quiesce);
2872
2873 /**
2874 * scsi_device_resume - Restart user issued commands to a quiesced device.
2875 * @sdev: scsi device to resume.
2876 *
2877 * Moves the device from quiesced back to running and restarts the
2878 * queues.
2879 *
2880 * Must be called with user context, may sleep.
2881 */
2882 void scsi_device_resume(struct scsi_device *sdev)
2883 {
2884 /* check if the device state was mutated prior to resume, and if
2885 * so assume the state is being managed elsewhere (for example
2886 * device deleted during suspend)
2887 */
2888 if (sdev->sdev_state != SDEV_QUIESCE ||
2889 scsi_device_set_state(sdev, SDEV_RUNNING))
2890 return;
2891 scsi_run_queue(sdev->request_queue);
2892 }
2893 EXPORT_SYMBOL(scsi_device_resume);
2894
2895 static void
2896 device_quiesce_fn(struct scsi_device *sdev, void *data)
2897 {
2898 scsi_device_quiesce(sdev);
2899 }
2900
2901 void
2902 scsi_target_quiesce(struct scsi_target *starget)
2903 {
2904 starget_for_each_device(starget, NULL, device_quiesce_fn);
2905 }
2906 EXPORT_SYMBOL(scsi_target_quiesce);
2907
2908 static void
2909 device_resume_fn(struct scsi_device *sdev, void *data)
2910 {
2911 scsi_device_resume(sdev);
2912 }
2913
2914 void
2915 scsi_target_resume(struct scsi_target *starget)
2916 {
2917 starget_for_each_device(starget, NULL, device_resume_fn);
2918 }
2919 EXPORT_SYMBOL(scsi_target_resume);
2920
2921 /**
2922 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2923 * @sdev: device to block
2924 *
2925 * Block request made by scsi lld's to temporarily stop all
2926 * scsi commands on the specified device. Called from interrupt
2927 * or normal process context.
2928 *
2929 * Returns zero if successful or error if not
2930 *
2931 * Notes:
2932 * This routine transitions the device to the SDEV_BLOCK state
2933 * (which must be a legal transition). When the device is in this
2934 * state, all commands are deferred until the scsi lld reenables
2935 * the device with scsi_device_unblock or device_block_tmo fires.
2936 */
2937 int
2938 scsi_internal_device_block(struct scsi_device *sdev)
2939 {
2940 struct request_queue *q = sdev->request_queue;
2941 unsigned long flags;
2942 int err = 0;
2943
2944 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2945 if (err) {
2946 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2947
2948 if (err)
2949 return err;
2950 }
2951
2952 /*
2953 * The device has transitioned to SDEV_BLOCK. Stop the
2954 * block layer from calling the midlayer with this device's
2955 * request queue.
2956 */
2957 if (q->mq_ops) {
2958 blk_mq_stop_hw_queues(q);
2959 } else {
2960 spin_lock_irqsave(q->queue_lock, flags);
2961 blk_stop_queue(q);
2962 spin_unlock_irqrestore(q->queue_lock, flags);
2963 }
2964
2965 return 0;
2966 }
2967 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2968
2969 /**
2970 * scsi_internal_device_unblock - resume a device after a block request
2971 * @sdev: device to resume
2972 * @new_state: state to set devices to after unblocking
2973 *
2974 * Called by scsi lld's or the midlayer to restart the device queue
2975 * for the previously suspended scsi device. Called from interrupt or
2976 * normal process context.
2977 *
2978 * Returns zero if successful or error if not.
2979 *
2980 * Notes:
2981 * This routine transitions the device to the SDEV_RUNNING state
2982 * or to one of the offline states (which must be a legal transition)
2983 * allowing the midlayer to goose the queue for this device.
2984 */
2985 int
2986 scsi_internal_device_unblock(struct scsi_device *sdev,
2987 enum scsi_device_state new_state)
2988 {
2989 struct request_queue *q = sdev->request_queue;
2990 unsigned long flags;
2991
2992 /*
2993 * Try to transition the scsi device to SDEV_RUNNING or one of the
2994 * offlined states and goose the device queue if successful.
2995 */
2996 if ((sdev->sdev_state == SDEV_BLOCK) ||
2997 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
2998 sdev->sdev_state = new_state;
2999 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
3000 if (new_state == SDEV_TRANSPORT_OFFLINE ||
3001 new_state == SDEV_OFFLINE)
3002 sdev->sdev_state = new_state;
3003 else
3004 sdev->sdev_state = SDEV_CREATED;
3005 } else if (sdev->sdev_state != SDEV_CANCEL &&
3006 sdev->sdev_state != SDEV_OFFLINE)
3007 return -EINVAL;
3008
3009 if (q->mq_ops) {
3010 blk_mq_start_stopped_hw_queues(q, false);
3011 } else {
3012 spin_lock_irqsave(q->queue_lock, flags);
3013 blk_start_queue(q);
3014 spin_unlock_irqrestore(q->queue_lock, flags);
3015 }
3016
3017 return 0;
3018 }
3019 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
3020
3021 static void
3022 device_block(struct scsi_device *sdev, void *data)
3023 {
3024 scsi_internal_device_block(sdev);
3025 }
3026
3027 static int
3028 target_block(struct device *dev, void *data)
3029 {
3030 if (scsi_is_target_device(dev))
3031 starget_for_each_device(to_scsi_target(dev), NULL,
3032 device_block);
3033 return 0;
3034 }
3035
3036 void
3037 scsi_target_block(struct device *dev)
3038 {
3039 if (scsi_is_target_device(dev))
3040 starget_for_each_device(to_scsi_target(dev), NULL,
3041 device_block);
3042 else
3043 device_for_each_child(dev, NULL, target_block);
3044 }
3045 EXPORT_SYMBOL_GPL(scsi_target_block);
3046
3047 static void
3048 device_unblock(struct scsi_device *sdev, void *data)
3049 {
3050 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
3051 }
3052
3053 static int
3054 target_unblock(struct device *dev, void *data)
3055 {
3056 if (scsi_is_target_device(dev))
3057 starget_for_each_device(to_scsi_target(dev), data,
3058 device_unblock);
3059 return 0;
3060 }
3061
3062 void
3063 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
3064 {
3065 if (scsi_is_target_device(dev))
3066 starget_for_each_device(to_scsi_target(dev), &new_state,
3067 device_unblock);
3068 else
3069 device_for_each_child(dev, &new_state, target_unblock);
3070 }
3071 EXPORT_SYMBOL_GPL(scsi_target_unblock);
3072
3073 /**
3074 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
3075 * @sgl: scatter-gather list
3076 * @sg_count: number of segments in sg
3077 * @offset: offset in bytes into sg, on return offset into the mapped area
3078 * @len: bytes to map, on return number of bytes mapped
3079 *
3080 * Returns virtual address of the start of the mapped page
3081 */
3082 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
3083 size_t *offset, size_t *len)
3084 {
3085 int i;
3086 size_t sg_len = 0, len_complete = 0;
3087 struct scatterlist *sg;
3088 struct page *page;
3089
3090 WARN_ON(!irqs_disabled());
3091
3092 for_each_sg(sgl, sg, sg_count, i) {
3093 len_complete = sg_len; /* Complete sg-entries */
3094 sg_len += sg->length;
3095 if (sg_len > *offset)
3096 break;
3097 }
3098
3099 if (unlikely(i == sg_count)) {
3100 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
3101 "elements %d\n",
3102 __func__, sg_len, *offset, sg_count);
3103 WARN_ON(1);
3104 return NULL;
3105 }
3106
3107 /* Offset starting from the beginning of first page in this sg-entry */
3108 *offset = *offset - len_complete + sg->offset;
3109
3110 /* Assumption: contiguous pages can be accessed as "page + i" */
3111 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
3112 *offset &= ~PAGE_MASK;
3113
3114 /* Bytes in this sg-entry from *offset to the end of the page */
3115 sg_len = PAGE_SIZE - *offset;
3116 if (*len > sg_len)
3117 *len = sg_len;
3118
3119 return kmap_atomic(page);
3120 }
3121 EXPORT_SYMBOL(scsi_kmap_atomic_sg);
3122
3123 /**
3124 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
3125 * @virt: virtual address to be unmapped
3126 */
3127 void scsi_kunmap_atomic_sg(void *virt)
3128 {
3129 kunmap_atomic(virt);
3130 }
3131 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3132
3133 void sdev_disable_disk_events(struct scsi_device *sdev)
3134 {
3135 atomic_inc(&sdev->disk_events_disable_depth);
3136 }
3137 EXPORT_SYMBOL(sdev_disable_disk_events);
3138
3139 void sdev_enable_disk_events(struct scsi_device *sdev)
3140 {
3141 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
3142 return;
3143 atomic_dec(&sdev->disk_events_disable_depth);
3144 }
3145 EXPORT_SYMBOL(sdev_enable_disk_events);
This page took 0.192909 seconds and 5 git commands to generate.