block: move REQ_TYPE_ATA_TASKFILE and REQ_TYPE_ATA_PC to ide.h
[deliverable/linux.git] / include / linux / blkdev.h
1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
3
4 #include <linux/sched.h>
5
6 #ifdef CONFIG_BLOCK
7
8 #include <linux/major.h>
9 #include <linux/genhd.h>
10 #include <linux/list.h>
11 #include <linux/llist.h>
12 #include <linux/timer.h>
13 #include <linux/workqueue.h>
14 #include <linux/pagemap.h>
15 #include <linux/backing-dev.h>
16 #include <linux/wait.h>
17 #include <linux/mempool.h>
18 #include <linux/bio.h>
19 #include <linux/stringify.h>
20 #include <linux/gfp.h>
21 #include <linux/bsg.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate.h>
24 #include <linux/percpu-refcount.h>
25
26 #include <asm/scatterlist.h>
27
28 struct module;
29 struct scsi_ioctl_command;
30
31 struct request_queue;
32 struct elevator_queue;
33 struct request_pm_state;
34 struct blk_trace;
35 struct request;
36 struct sg_io_hdr;
37 struct bsg_job;
38 struct blkcg_gq;
39 struct blk_flush_queue;
40
41 #define BLKDEV_MIN_RQ 4
42 #define BLKDEV_MAX_RQ 128 /* Default maximum */
43
44 /*
45 * Maximum number of blkcg policies allowed to be registered concurrently.
46 * Defined here to simplify include dependency.
47 */
48 #define BLKCG_MAX_POLS 2
49
50 struct request;
51 typedef void (rq_end_io_fn)(struct request *, int);
52
53 #define BLK_RL_SYNCFULL (1U << 0)
54 #define BLK_RL_ASYNCFULL (1U << 1)
55
56 struct request_list {
57 struct request_queue *q; /* the queue this rl belongs to */
58 #ifdef CONFIG_BLK_CGROUP
59 struct blkcg_gq *blkg; /* blkg this request pool belongs to */
60 #endif
61 /*
62 * count[], starved[], and wait[] are indexed by
63 * BLK_RW_SYNC/BLK_RW_ASYNC
64 */
65 int count[2];
66 int starved[2];
67 mempool_t *rq_pool;
68 wait_queue_head_t wait[2];
69 unsigned int flags;
70 };
71
72 /*
73 * request command types
74 */
75 enum rq_cmd_type_bits {
76 REQ_TYPE_FS = 1, /* fs request */
77 REQ_TYPE_BLOCK_PC, /* scsi command */
78 REQ_TYPE_SENSE, /* sense request */
79 REQ_TYPE_PM_SUSPEND, /* suspend request */
80 REQ_TYPE_PM_RESUME, /* resume request */
81 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
82 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
83 };
84
85 #define BLK_MAX_CDB 16
86
87 /*
88 * Try to put the fields that are referenced together in the same cacheline.
89 *
90 * If you modify this structure, make sure to update blk_rq_init() and
91 * especially blk_mq_rq_ctx_init() to take care of the added fields.
92 */
93 struct request {
94 struct list_head queuelist;
95 union {
96 struct call_single_data csd;
97 unsigned long fifo_time;
98 };
99
100 struct request_queue *q;
101 struct blk_mq_ctx *mq_ctx;
102
103 u64 cmd_flags;
104 unsigned cmd_type;
105 unsigned long atomic_flags;
106
107 int cpu;
108
109 /* the following two fields are internal, NEVER access directly */
110 unsigned int __data_len; /* total data len */
111 sector_t __sector; /* sector cursor */
112
113 struct bio *bio;
114 struct bio *biotail;
115
116 /*
117 * The hash is used inside the scheduler, and killed once the
118 * request reaches the dispatch list. The ipi_list is only used
119 * to queue the request for softirq completion, which is long
120 * after the request has been unhashed (and even removed from
121 * the dispatch list).
122 */
123 union {
124 struct hlist_node hash; /* merge hash */
125 struct list_head ipi_list;
126 };
127
128 /*
129 * The rb_node is only used inside the io scheduler, requests
130 * are pruned when moved to the dispatch queue. So let the
131 * completion_data share space with the rb_node.
132 */
133 union {
134 struct rb_node rb_node; /* sort/lookup */
135 void *completion_data;
136 };
137
138 /*
139 * Three pointers are available for the IO schedulers, if they need
140 * more they have to dynamically allocate it. Flush requests are
141 * never put on the IO scheduler. So let the flush fields share
142 * space with the elevator data.
143 */
144 union {
145 struct {
146 struct io_cq *icq;
147 void *priv[2];
148 } elv;
149
150 struct {
151 unsigned int seq;
152 struct list_head list;
153 rq_end_io_fn *saved_end_io;
154 } flush;
155 };
156
157 struct gendisk *rq_disk;
158 struct hd_struct *part;
159 unsigned long start_time;
160 #ifdef CONFIG_BLK_CGROUP
161 struct request_list *rl; /* rl this rq is alloced from */
162 unsigned long long start_time_ns;
163 unsigned long long io_start_time_ns; /* when passed to hardware */
164 #endif
165 /* Number of scatter-gather DMA addr+len pairs after
166 * physical address coalescing is performed.
167 */
168 unsigned short nr_phys_segments;
169 #if defined(CONFIG_BLK_DEV_INTEGRITY)
170 unsigned short nr_integrity_segments;
171 #endif
172
173 unsigned short ioprio;
174
175 void *special; /* opaque pointer available for LLD use */
176
177 int tag;
178 int errors;
179
180 /*
181 * when request is used as a packet command carrier
182 */
183 unsigned char __cmd[BLK_MAX_CDB];
184 unsigned char *cmd;
185 unsigned short cmd_len;
186
187 unsigned int extra_len; /* length of alignment and padding */
188 unsigned int sense_len;
189 unsigned int resid_len; /* residual count */
190 void *sense;
191
192 unsigned long deadline;
193 struct list_head timeout_list;
194 unsigned int timeout;
195 int retries;
196
197 /*
198 * completion callback.
199 */
200 rq_end_io_fn *end_io;
201 void *end_io_data;
202
203 /* for bidi */
204 struct request *next_rq;
205 };
206
207 static inline unsigned short req_get_ioprio(struct request *req)
208 {
209 return req->ioprio;
210 }
211
212 /*
213 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
214 * requests. Some step values could eventually be made generic.
215 */
216 struct request_pm_state
217 {
218 /* PM state machine step value, currently driver specific */
219 int pm_step;
220 /* requested PM state value (S1, S2, S3, S4, ...) */
221 u32 pm_state;
222 void* data; /* for driver use */
223 };
224
225 #include <linux/elevator.h>
226
227 struct blk_queue_ctx;
228
229 typedef void (request_fn_proc) (struct request_queue *q);
230 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
231 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
232 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
233
234 struct bio_vec;
235 struct bvec_merge_data {
236 struct block_device *bi_bdev;
237 sector_t bi_sector;
238 unsigned bi_size;
239 unsigned long bi_rw;
240 };
241 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
242 struct bio_vec *);
243 typedef void (softirq_done_fn)(struct request *);
244 typedef int (dma_drain_needed_fn)(struct request *);
245 typedef int (lld_busy_fn) (struct request_queue *q);
246 typedef int (bsg_job_fn) (struct bsg_job *);
247
248 enum blk_eh_timer_return {
249 BLK_EH_NOT_HANDLED,
250 BLK_EH_HANDLED,
251 BLK_EH_RESET_TIMER,
252 };
253
254 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
255
256 enum blk_queue_state {
257 Queue_down,
258 Queue_up,
259 };
260
261 struct blk_queue_tag {
262 struct request **tag_index; /* map of busy tags */
263 unsigned long *tag_map; /* bit map of free/busy tags */
264 int busy; /* current depth */
265 int max_depth; /* what we will send to device */
266 int real_max_depth; /* what the array can hold */
267 atomic_t refcnt; /* map can be shared */
268 int alloc_policy; /* tag allocation policy */
269 int next_tag; /* next tag */
270 };
271 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
272 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
273
274 #define BLK_SCSI_MAX_CMDS (256)
275 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
276
277 struct queue_limits {
278 unsigned long bounce_pfn;
279 unsigned long seg_boundary_mask;
280
281 unsigned int max_hw_sectors;
282 unsigned int chunk_sectors;
283 unsigned int max_sectors;
284 unsigned int max_segment_size;
285 unsigned int physical_block_size;
286 unsigned int alignment_offset;
287 unsigned int io_min;
288 unsigned int io_opt;
289 unsigned int max_discard_sectors;
290 unsigned int max_write_same_sectors;
291 unsigned int discard_granularity;
292 unsigned int discard_alignment;
293
294 unsigned short logical_block_size;
295 unsigned short max_segments;
296 unsigned short max_integrity_segments;
297
298 unsigned char misaligned;
299 unsigned char discard_misaligned;
300 unsigned char cluster;
301 unsigned char discard_zeroes_data;
302 unsigned char raid_partial_stripes_expensive;
303 };
304
305 struct request_queue {
306 /*
307 * Together with queue_head for cacheline sharing
308 */
309 struct list_head queue_head;
310 struct request *last_merge;
311 struct elevator_queue *elevator;
312 int nr_rqs[2]; /* # allocated [a]sync rqs */
313 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
314
315 /*
316 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
317 * is used, root blkg allocates from @q->root_rl and all other
318 * blkgs from their own blkg->rl. Which one to use should be
319 * determined using bio_request_list().
320 */
321 struct request_list root_rl;
322
323 request_fn_proc *request_fn;
324 make_request_fn *make_request_fn;
325 prep_rq_fn *prep_rq_fn;
326 unprep_rq_fn *unprep_rq_fn;
327 merge_bvec_fn *merge_bvec_fn;
328 softirq_done_fn *softirq_done_fn;
329 rq_timed_out_fn *rq_timed_out_fn;
330 dma_drain_needed_fn *dma_drain_needed;
331 lld_busy_fn *lld_busy_fn;
332
333 struct blk_mq_ops *mq_ops;
334
335 unsigned int *mq_map;
336
337 /* sw queues */
338 struct blk_mq_ctx __percpu *queue_ctx;
339 unsigned int nr_queues;
340
341 /* hw dispatch queues */
342 struct blk_mq_hw_ctx **queue_hw_ctx;
343 unsigned int nr_hw_queues;
344
345 /*
346 * Dispatch queue sorting
347 */
348 sector_t end_sector;
349 struct request *boundary_rq;
350
351 /*
352 * Delayed queue handling
353 */
354 struct delayed_work delay_work;
355
356 struct backing_dev_info backing_dev_info;
357
358 /*
359 * The queue owner gets to use this for whatever they like.
360 * ll_rw_blk doesn't touch it.
361 */
362 void *queuedata;
363
364 /*
365 * various queue flags, see QUEUE_* below
366 */
367 unsigned long queue_flags;
368
369 /*
370 * ida allocated id for this queue. Used to index queues from
371 * ioctx.
372 */
373 int id;
374
375 /*
376 * queue needs bounce pages for pages above this limit
377 */
378 gfp_t bounce_gfp;
379
380 /*
381 * protects queue structures from reentrancy. ->__queue_lock should
382 * _never_ be used directly, it is queue private. always use
383 * ->queue_lock.
384 */
385 spinlock_t __queue_lock;
386 spinlock_t *queue_lock;
387
388 /*
389 * queue kobject
390 */
391 struct kobject kobj;
392
393 /*
394 * mq queue kobject
395 */
396 struct kobject mq_kobj;
397
398 #ifdef CONFIG_PM
399 struct device *dev;
400 int rpm_status;
401 unsigned int nr_pending;
402 #endif
403
404 /*
405 * queue settings
406 */
407 unsigned long nr_requests; /* Max # of requests */
408 unsigned int nr_congestion_on;
409 unsigned int nr_congestion_off;
410 unsigned int nr_batching;
411
412 unsigned int dma_drain_size;
413 void *dma_drain_buffer;
414 unsigned int dma_pad_mask;
415 unsigned int dma_alignment;
416
417 struct blk_queue_tag *queue_tags;
418 struct list_head tag_busy_list;
419
420 unsigned int nr_sorted;
421 unsigned int in_flight[2];
422 /*
423 * Number of active block driver functions for which blk_drain_queue()
424 * must wait. Must be incremented around functions that unlock the
425 * queue_lock internally, e.g. scsi_request_fn().
426 */
427 unsigned int request_fn_active;
428
429 unsigned int rq_timeout;
430 struct timer_list timeout;
431 struct list_head timeout_list;
432
433 struct list_head icq_list;
434 #ifdef CONFIG_BLK_CGROUP
435 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
436 struct blkcg_gq *root_blkg;
437 struct list_head blkg_list;
438 #endif
439
440 struct queue_limits limits;
441
442 /*
443 * sg stuff
444 */
445 unsigned int sg_timeout;
446 unsigned int sg_reserved_size;
447 int node;
448 #ifdef CONFIG_BLK_DEV_IO_TRACE
449 struct blk_trace *blk_trace;
450 #endif
451 /*
452 * for flush operations
453 */
454 unsigned int flush_flags;
455 unsigned int flush_not_queueable:1;
456 struct blk_flush_queue *fq;
457
458 struct list_head requeue_list;
459 spinlock_t requeue_lock;
460 struct work_struct requeue_work;
461
462 struct mutex sysfs_lock;
463
464 int bypass_depth;
465 int mq_freeze_depth;
466
467 #if defined(CONFIG_BLK_DEV_BSG)
468 bsg_job_fn *bsg_job_fn;
469 int bsg_job_size;
470 struct bsg_class_device bsg_dev;
471 #endif
472
473 #ifdef CONFIG_BLK_DEV_THROTTLING
474 /* Throttle data */
475 struct throtl_data *td;
476 #endif
477 struct rcu_head rcu_head;
478 wait_queue_head_t mq_freeze_wq;
479 struct percpu_ref mq_usage_counter;
480 struct list_head all_q_node;
481
482 struct blk_mq_tag_set *tag_set;
483 struct list_head tag_set_list;
484 };
485
486 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
487 #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
488 #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
489 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
490 #define QUEUE_FLAG_DYING 5 /* queue being torn down */
491 #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
492 #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
493 #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
494 #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
495 #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
496 #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
497 #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
498 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
499 #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
500 #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
501 #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
502 #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
503 #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
504 #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
505 #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
506 #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
507 #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
508 #define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
509
510 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
511 (1 << QUEUE_FLAG_STACKABLE) | \
512 (1 << QUEUE_FLAG_SAME_COMP) | \
513 (1 << QUEUE_FLAG_ADD_RANDOM))
514
515 #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
516 (1 << QUEUE_FLAG_STACKABLE) | \
517 (1 << QUEUE_FLAG_SAME_COMP))
518
519 static inline void queue_lockdep_assert_held(struct request_queue *q)
520 {
521 if (q->queue_lock)
522 lockdep_assert_held(q->queue_lock);
523 }
524
525 static inline void queue_flag_set_unlocked(unsigned int flag,
526 struct request_queue *q)
527 {
528 __set_bit(flag, &q->queue_flags);
529 }
530
531 static inline int queue_flag_test_and_clear(unsigned int flag,
532 struct request_queue *q)
533 {
534 queue_lockdep_assert_held(q);
535
536 if (test_bit(flag, &q->queue_flags)) {
537 __clear_bit(flag, &q->queue_flags);
538 return 1;
539 }
540
541 return 0;
542 }
543
544 static inline int queue_flag_test_and_set(unsigned int flag,
545 struct request_queue *q)
546 {
547 queue_lockdep_assert_held(q);
548
549 if (!test_bit(flag, &q->queue_flags)) {
550 __set_bit(flag, &q->queue_flags);
551 return 0;
552 }
553
554 return 1;
555 }
556
557 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
558 {
559 queue_lockdep_assert_held(q);
560 __set_bit(flag, &q->queue_flags);
561 }
562
563 static inline void queue_flag_clear_unlocked(unsigned int flag,
564 struct request_queue *q)
565 {
566 __clear_bit(flag, &q->queue_flags);
567 }
568
569 static inline int queue_in_flight(struct request_queue *q)
570 {
571 return q->in_flight[0] + q->in_flight[1];
572 }
573
574 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
575 {
576 queue_lockdep_assert_held(q);
577 __clear_bit(flag, &q->queue_flags);
578 }
579
580 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
581 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
582 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
583 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
584 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
585 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
586 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
587 #define blk_queue_noxmerges(q) \
588 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
589 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
590 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
591 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
592 #define blk_queue_stackable(q) \
593 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
594 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
595 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
596 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
597
598 #define blk_noretry_request(rq) \
599 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
600 REQ_FAILFAST_DRIVER))
601
602 #define blk_account_rq(rq) \
603 (((rq)->cmd_flags & REQ_STARTED) && \
604 ((rq)->cmd_type == REQ_TYPE_FS))
605
606 #define blk_pm_request(rq) \
607 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
608 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
609
610 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
611 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
612 /* rq->queuelist of dequeued request must be list_empty() */
613 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
614
615 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
616
617 #define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
618
619 /*
620 * Driver can handle struct request, if it either has an old style
621 * request_fn defined, or is blk-mq based.
622 */
623 static inline bool queue_is_rq_based(struct request_queue *q)
624 {
625 return q->request_fn || q->mq_ops;
626 }
627
628 static inline unsigned int blk_queue_cluster(struct request_queue *q)
629 {
630 return q->limits.cluster;
631 }
632
633 /*
634 * We regard a request as sync, if either a read or a sync write
635 */
636 static inline bool rw_is_sync(unsigned int rw_flags)
637 {
638 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
639 }
640
641 static inline bool rq_is_sync(struct request *rq)
642 {
643 return rw_is_sync(rq->cmd_flags);
644 }
645
646 static inline bool blk_rl_full(struct request_list *rl, bool sync)
647 {
648 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
649
650 return rl->flags & flag;
651 }
652
653 static inline void blk_set_rl_full(struct request_list *rl, bool sync)
654 {
655 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
656
657 rl->flags |= flag;
658 }
659
660 static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
661 {
662 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
663
664 rl->flags &= ~flag;
665 }
666
667 static inline bool rq_mergeable(struct request *rq)
668 {
669 if (rq->cmd_type != REQ_TYPE_FS)
670 return false;
671
672 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
673 return false;
674
675 return true;
676 }
677
678 static inline bool blk_check_merge_flags(unsigned int flags1,
679 unsigned int flags2)
680 {
681 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
682 return false;
683
684 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
685 return false;
686
687 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
688 return false;
689
690 return true;
691 }
692
693 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
694 {
695 if (bio_data(a) == bio_data(b))
696 return true;
697
698 return false;
699 }
700
701 /*
702 * q->prep_rq_fn return values
703 */
704 #define BLKPREP_OK 0 /* serve it */
705 #define BLKPREP_KILL 1 /* fatal error, kill */
706 #define BLKPREP_DEFER 2 /* leave on queue */
707
708 extern unsigned long blk_max_low_pfn, blk_max_pfn;
709
710 /*
711 * standard bounce addresses:
712 *
713 * BLK_BOUNCE_HIGH : bounce all highmem pages
714 * BLK_BOUNCE_ANY : don't bounce anything
715 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
716 */
717
718 #if BITS_PER_LONG == 32
719 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
720 #else
721 #define BLK_BOUNCE_HIGH -1ULL
722 #endif
723 #define BLK_BOUNCE_ANY (-1ULL)
724 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
725
726 /*
727 * default timeout for SG_IO if none specified
728 */
729 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
730 #define BLK_MIN_SG_TIMEOUT (7 * HZ)
731
732 #ifdef CONFIG_BOUNCE
733 extern int init_emergency_isa_pool(void);
734 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
735 #else
736 static inline int init_emergency_isa_pool(void)
737 {
738 return 0;
739 }
740 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
741 {
742 }
743 #endif /* CONFIG_MMU */
744
745 struct rq_map_data {
746 struct page **pages;
747 int page_order;
748 int nr_entries;
749 unsigned long offset;
750 int null_mapped;
751 int from_user;
752 };
753
754 struct req_iterator {
755 struct bvec_iter iter;
756 struct bio *bio;
757 };
758
759 /* This should not be used directly - use rq_for_each_segment */
760 #define for_each_bio(_bio) \
761 for (; _bio; _bio = _bio->bi_next)
762 #define __rq_for_each_bio(_bio, rq) \
763 if ((rq->bio)) \
764 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
765
766 #define rq_for_each_segment(bvl, _rq, _iter) \
767 __rq_for_each_bio(_iter.bio, _rq) \
768 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
769
770 #define rq_iter_last(bvec, _iter) \
771 (_iter.bio->bi_next == NULL && \
772 bio_iter_last(bvec, _iter.iter))
773
774 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
775 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
776 #endif
777 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
778 extern void rq_flush_dcache_pages(struct request *rq);
779 #else
780 static inline void rq_flush_dcache_pages(struct request *rq)
781 {
782 }
783 #endif
784
785 extern int blk_register_queue(struct gendisk *disk);
786 extern void blk_unregister_queue(struct gendisk *disk);
787 extern void generic_make_request(struct bio *bio);
788 extern void blk_rq_init(struct request_queue *q, struct request *rq);
789 extern void blk_put_request(struct request *);
790 extern void __blk_put_request(struct request_queue *, struct request *);
791 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
792 extern struct request *blk_make_request(struct request_queue *, struct bio *,
793 gfp_t);
794 extern void blk_rq_set_block_pc(struct request *);
795 extern void blk_requeue_request(struct request_queue *, struct request *);
796 extern void blk_add_request_payload(struct request *rq, struct page *page,
797 unsigned int len);
798 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
799 extern int blk_lld_busy(struct request_queue *q);
800 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
801 struct bio_set *bs, gfp_t gfp_mask,
802 int (*bio_ctr)(struct bio *, struct bio *, void *),
803 void *data);
804 extern void blk_rq_unprep_clone(struct request *rq);
805 extern int blk_insert_cloned_request(struct request_queue *q,
806 struct request *rq);
807 extern void blk_delay_queue(struct request_queue *, unsigned long);
808 extern void blk_recount_segments(struct request_queue *, struct bio *);
809 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
810 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
811 unsigned int, void __user *);
812 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
813 unsigned int, void __user *);
814 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
815 struct scsi_ioctl_command __user *);
816
817 extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
818
819 /*
820 * A queue has just exitted congestion. Note this in the global counter of
821 * congested queues, and wake up anyone who was waiting for requests to be
822 * put back.
823 */
824 static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
825 {
826 clear_bdi_congested(&q->backing_dev_info, sync);
827 }
828
829 /*
830 * A queue has just entered congestion. Flag that in the queue's VM-visible
831 * state flags and increment the global gounter of congested queues.
832 */
833 static inline void blk_set_queue_congested(struct request_queue *q, int sync)
834 {
835 set_bdi_congested(&q->backing_dev_info, sync);
836 }
837
838 extern void blk_start_queue(struct request_queue *q);
839 extern void blk_stop_queue(struct request_queue *q);
840 extern void blk_sync_queue(struct request_queue *q);
841 extern void __blk_stop_queue(struct request_queue *q);
842 extern void __blk_run_queue(struct request_queue *q);
843 extern void blk_run_queue(struct request_queue *);
844 extern void blk_run_queue_async(struct request_queue *q);
845 extern int blk_rq_map_user(struct request_queue *, struct request *,
846 struct rq_map_data *, void __user *, unsigned long,
847 gfp_t);
848 extern int blk_rq_unmap_user(struct bio *);
849 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
850 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
851 struct rq_map_data *, const struct iov_iter *,
852 gfp_t);
853 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
854 struct request *, int);
855 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
856 struct request *, int, rq_end_io_fn *);
857
858 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
859 {
860 return bdev->bd_disk->queue; /* this is never NULL */
861 }
862
863 /*
864 * blk_rq_pos() : the current sector
865 * blk_rq_bytes() : bytes left in the entire request
866 * blk_rq_cur_bytes() : bytes left in the current segment
867 * blk_rq_err_bytes() : bytes left till the next error boundary
868 * blk_rq_sectors() : sectors left in the entire request
869 * blk_rq_cur_sectors() : sectors left in the current segment
870 */
871 static inline sector_t blk_rq_pos(const struct request *rq)
872 {
873 return rq->__sector;
874 }
875
876 static inline unsigned int blk_rq_bytes(const struct request *rq)
877 {
878 return rq->__data_len;
879 }
880
881 static inline int blk_rq_cur_bytes(const struct request *rq)
882 {
883 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
884 }
885
886 extern unsigned int blk_rq_err_bytes(const struct request *rq);
887
888 static inline unsigned int blk_rq_sectors(const struct request *rq)
889 {
890 return blk_rq_bytes(rq) >> 9;
891 }
892
893 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
894 {
895 return blk_rq_cur_bytes(rq) >> 9;
896 }
897
898 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
899 unsigned int cmd_flags)
900 {
901 if (unlikely(cmd_flags & REQ_DISCARD))
902 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
903
904 if (unlikely(cmd_flags & REQ_WRITE_SAME))
905 return q->limits.max_write_same_sectors;
906
907 return q->limits.max_sectors;
908 }
909
910 /*
911 * Return maximum size of a request at given offset. Only valid for
912 * file system requests.
913 */
914 static inline unsigned int blk_max_size_offset(struct request_queue *q,
915 sector_t offset)
916 {
917 if (!q->limits.chunk_sectors)
918 return q->limits.max_sectors;
919
920 return q->limits.chunk_sectors -
921 (offset & (q->limits.chunk_sectors - 1));
922 }
923
924 static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
925 {
926 struct request_queue *q = rq->q;
927
928 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
929 return q->limits.max_hw_sectors;
930
931 if (!q->limits.chunk_sectors)
932 return blk_queue_get_max_sectors(q, rq->cmd_flags);
933
934 return min(blk_max_size_offset(q, blk_rq_pos(rq)),
935 blk_queue_get_max_sectors(q, rq->cmd_flags));
936 }
937
938 static inline unsigned int blk_rq_count_bios(struct request *rq)
939 {
940 unsigned int nr_bios = 0;
941 struct bio *bio;
942
943 __rq_for_each_bio(bio, rq)
944 nr_bios++;
945
946 return nr_bios;
947 }
948
949 /*
950 * Request issue related functions.
951 */
952 extern struct request *blk_peek_request(struct request_queue *q);
953 extern void blk_start_request(struct request *rq);
954 extern struct request *blk_fetch_request(struct request_queue *q);
955
956 /*
957 * Request completion related functions.
958 *
959 * blk_update_request() completes given number of bytes and updates
960 * the request without completing it.
961 *
962 * blk_end_request() and friends. __blk_end_request() must be called
963 * with the request queue spinlock acquired.
964 *
965 * Several drivers define their own end_request and call
966 * blk_end_request() for parts of the original function.
967 * This prevents code duplication in drivers.
968 */
969 extern bool blk_update_request(struct request *rq, int error,
970 unsigned int nr_bytes);
971 extern void blk_finish_request(struct request *rq, int error);
972 extern bool blk_end_request(struct request *rq, int error,
973 unsigned int nr_bytes);
974 extern void blk_end_request_all(struct request *rq, int error);
975 extern bool blk_end_request_cur(struct request *rq, int error);
976 extern bool blk_end_request_err(struct request *rq, int error);
977 extern bool __blk_end_request(struct request *rq, int error,
978 unsigned int nr_bytes);
979 extern void __blk_end_request_all(struct request *rq, int error);
980 extern bool __blk_end_request_cur(struct request *rq, int error);
981 extern bool __blk_end_request_err(struct request *rq, int error);
982
983 extern void blk_complete_request(struct request *);
984 extern void __blk_complete_request(struct request *);
985 extern void blk_abort_request(struct request *);
986 extern void blk_unprep_request(struct request *);
987
988 /*
989 * Access functions for manipulating queue properties
990 */
991 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
992 spinlock_t *lock, int node_id);
993 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
994 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
995 request_fn_proc *, spinlock_t *);
996 extern void blk_cleanup_queue(struct request_queue *);
997 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
998 extern void blk_queue_bounce_limit(struct request_queue *, u64);
999 extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
1000 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1001 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1002 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1003 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1004 extern void blk_queue_max_discard_sectors(struct request_queue *q,
1005 unsigned int max_discard_sectors);
1006 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1007 unsigned int max_write_same_sectors);
1008 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
1009 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1010 extern void blk_queue_alignment_offset(struct request_queue *q,
1011 unsigned int alignment);
1012 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
1013 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1014 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
1015 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1016 extern void blk_set_default_limits(struct queue_limits *lim);
1017 extern void blk_set_stacking_limits(struct queue_limits *lim);
1018 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1019 sector_t offset);
1020 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1021 sector_t offset);
1022 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1023 sector_t offset);
1024 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1025 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
1026 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1027 extern int blk_queue_dma_drain(struct request_queue *q,
1028 dma_drain_needed_fn *dma_drain_needed,
1029 void *buf, unsigned int size);
1030 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1031 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1032 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
1033 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
1034 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
1035 extern void blk_queue_dma_alignment(struct request_queue *, int);
1036 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1037 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1038 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1039 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1040 extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
1041 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1042 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1043
1044 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1045 extern void blk_dump_rq_flags(struct request *, char *);
1046 extern long nr_blockdev_pages(void);
1047
1048 bool __must_check blk_get_queue(struct request_queue *);
1049 struct request_queue *blk_alloc_queue(gfp_t);
1050 struct request_queue *blk_alloc_queue_node(gfp_t, int);
1051 extern void blk_put_queue(struct request_queue *);
1052
1053 /*
1054 * block layer runtime pm functions
1055 */
1056 #ifdef CONFIG_PM
1057 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1058 extern int blk_pre_runtime_suspend(struct request_queue *q);
1059 extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1060 extern void blk_pre_runtime_resume(struct request_queue *q);
1061 extern void blk_post_runtime_resume(struct request_queue *q, int err);
1062 #else
1063 static inline void blk_pm_runtime_init(struct request_queue *q,
1064 struct device *dev) {}
1065 static inline int blk_pre_runtime_suspend(struct request_queue *q)
1066 {
1067 return -ENOSYS;
1068 }
1069 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1070 static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1071 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1072 #endif
1073
1074 /*
1075 * blk_plug permits building a queue of related requests by holding the I/O
1076 * fragments for a short period. This allows merging of sequential requests
1077 * into single larger request. As the requests are moved from a per-task list to
1078 * the device's request_queue in a batch, this results in improved scalability
1079 * as the lock contention for request_queue lock is reduced.
1080 *
1081 * It is ok not to disable preemption when adding the request to the plug list
1082 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1083 * the plug list when the task sleeps by itself. For details, please see
1084 * schedule() where blk_schedule_flush_plug() is called.
1085 */
1086 struct blk_plug {
1087 struct list_head list; /* requests */
1088 struct list_head mq_list; /* blk-mq requests */
1089 struct list_head cb_list; /* md requires an unplug callback */
1090 };
1091 #define BLK_MAX_REQUEST_COUNT 16
1092
1093 struct blk_plug_cb;
1094 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1095 struct blk_plug_cb {
1096 struct list_head list;
1097 blk_plug_cb_fn callback;
1098 void *data;
1099 };
1100 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1101 void *data, int size);
1102 extern void blk_start_plug(struct blk_plug *);
1103 extern void blk_finish_plug(struct blk_plug *);
1104 extern void blk_flush_plug_list(struct blk_plug *, bool);
1105
1106 static inline void blk_flush_plug(struct task_struct *tsk)
1107 {
1108 struct blk_plug *plug = tsk->plug;
1109
1110 if (plug)
1111 blk_flush_plug_list(plug, false);
1112 }
1113
1114 static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1115 {
1116 struct blk_plug *plug = tsk->plug;
1117
1118 if (plug)
1119 blk_flush_plug_list(plug, true);
1120 }
1121
1122 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1123 {
1124 struct blk_plug *plug = tsk->plug;
1125
1126 return plug &&
1127 (!list_empty(&plug->list) ||
1128 !list_empty(&plug->mq_list) ||
1129 !list_empty(&plug->cb_list));
1130 }
1131
1132 /*
1133 * tag stuff
1134 */
1135 extern int blk_queue_start_tag(struct request_queue *, struct request *);
1136 extern struct request *blk_queue_find_tag(struct request_queue *, int);
1137 extern void blk_queue_end_tag(struct request_queue *, struct request *);
1138 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1139 extern void blk_queue_free_tags(struct request_queue *);
1140 extern int blk_queue_resize_tags(struct request_queue *, int);
1141 extern void blk_queue_invalidate_tags(struct request_queue *);
1142 extern struct blk_queue_tag *blk_init_tags(int, int);
1143 extern void blk_free_tags(struct blk_queue_tag *);
1144
1145 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1146 int tag)
1147 {
1148 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1149 return NULL;
1150 return bqt->tag_index[tag];
1151 }
1152
1153 #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
1154
1155 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1156 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1157 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1158 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1159 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1160 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1161 sector_t nr_sects, gfp_t gfp_mask, bool discard);
1162 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1163 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1164 {
1165 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1166 nr_blocks << (sb->s_blocksize_bits - 9),
1167 gfp_mask, flags);
1168 }
1169 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1170 sector_t nr_blocks, gfp_t gfp_mask)
1171 {
1172 return blkdev_issue_zeroout(sb->s_bdev,
1173 block << (sb->s_blocksize_bits - 9),
1174 nr_blocks << (sb->s_blocksize_bits - 9),
1175 gfp_mask, true);
1176 }
1177
1178 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1179
1180 enum blk_default_limits {
1181 BLK_MAX_SEGMENTS = 128,
1182 BLK_SAFE_MAX_SECTORS = 255,
1183 BLK_MAX_SEGMENT_SIZE = 65536,
1184 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1185 };
1186
1187 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1188
1189 static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1190 {
1191 return q->limits.bounce_pfn;
1192 }
1193
1194 static inline unsigned long queue_segment_boundary(struct request_queue *q)
1195 {
1196 return q->limits.seg_boundary_mask;
1197 }
1198
1199 static inline unsigned int queue_max_sectors(struct request_queue *q)
1200 {
1201 return q->limits.max_sectors;
1202 }
1203
1204 static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1205 {
1206 return q->limits.max_hw_sectors;
1207 }
1208
1209 static inline unsigned short queue_max_segments(struct request_queue *q)
1210 {
1211 return q->limits.max_segments;
1212 }
1213
1214 static inline unsigned int queue_max_segment_size(struct request_queue *q)
1215 {
1216 return q->limits.max_segment_size;
1217 }
1218
1219 static inline unsigned short queue_logical_block_size(struct request_queue *q)
1220 {
1221 int retval = 512;
1222
1223 if (q && q->limits.logical_block_size)
1224 retval = q->limits.logical_block_size;
1225
1226 return retval;
1227 }
1228
1229 static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1230 {
1231 return queue_logical_block_size(bdev_get_queue(bdev));
1232 }
1233
1234 static inline unsigned int queue_physical_block_size(struct request_queue *q)
1235 {
1236 return q->limits.physical_block_size;
1237 }
1238
1239 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1240 {
1241 return queue_physical_block_size(bdev_get_queue(bdev));
1242 }
1243
1244 static inline unsigned int queue_io_min(struct request_queue *q)
1245 {
1246 return q->limits.io_min;
1247 }
1248
1249 static inline int bdev_io_min(struct block_device *bdev)
1250 {
1251 return queue_io_min(bdev_get_queue(bdev));
1252 }
1253
1254 static inline unsigned int queue_io_opt(struct request_queue *q)
1255 {
1256 return q->limits.io_opt;
1257 }
1258
1259 static inline int bdev_io_opt(struct block_device *bdev)
1260 {
1261 return queue_io_opt(bdev_get_queue(bdev));
1262 }
1263
1264 static inline int queue_alignment_offset(struct request_queue *q)
1265 {
1266 if (q->limits.misaligned)
1267 return -1;
1268
1269 return q->limits.alignment_offset;
1270 }
1271
1272 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1273 {
1274 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1275 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
1276
1277 return (granularity + lim->alignment_offset - alignment) % granularity;
1278 }
1279
1280 static inline int bdev_alignment_offset(struct block_device *bdev)
1281 {
1282 struct request_queue *q = bdev_get_queue(bdev);
1283
1284 if (q->limits.misaligned)
1285 return -1;
1286
1287 if (bdev != bdev->bd_contains)
1288 return bdev->bd_part->alignment_offset;
1289
1290 return q->limits.alignment_offset;
1291 }
1292
1293 static inline int queue_discard_alignment(struct request_queue *q)
1294 {
1295 if (q->limits.discard_misaligned)
1296 return -1;
1297
1298 return q->limits.discard_alignment;
1299 }
1300
1301 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1302 {
1303 unsigned int alignment, granularity, offset;
1304
1305 if (!lim->max_discard_sectors)
1306 return 0;
1307
1308 /* Why are these in bytes, not sectors? */
1309 alignment = lim->discard_alignment >> 9;
1310 granularity = lim->discard_granularity >> 9;
1311 if (!granularity)
1312 return 0;
1313
1314 /* Offset of the partition start in 'granularity' sectors */
1315 offset = sector_div(sector, granularity);
1316
1317 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1318 offset = (granularity + alignment - offset) % granularity;
1319
1320 /* Turn it back into bytes, gaah */
1321 return offset << 9;
1322 }
1323
1324 static inline int bdev_discard_alignment(struct block_device *bdev)
1325 {
1326 struct request_queue *q = bdev_get_queue(bdev);
1327
1328 if (bdev != bdev->bd_contains)
1329 return bdev->bd_part->discard_alignment;
1330
1331 return q->limits.discard_alignment;
1332 }
1333
1334 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1335 {
1336 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
1337 return 1;
1338
1339 return 0;
1340 }
1341
1342 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1343 {
1344 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1345 }
1346
1347 static inline unsigned int bdev_write_same(struct block_device *bdev)
1348 {
1349 struct request_queue *q = bdev_get_queue(bdev);
1350
1351 if (q)
1352 return q->limits.max_write_same_sectors;
1353
1354 return 0;
1355 }
1356
1357 static inline int queue_dma_alignment(struct request_queue *q)
1358 {
1359 return q ? q->dma_alignment : 511;
1360 }
1361
1362 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1363 unsigned int len)
1364 {
1365 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1366 return !(addr & alignment) && !(len & alignment);
1367 }
1368
1369 /* assumes size > 256 */
1370 static inline unsigned int blksize_bits(unsigned int size)
1371 {
1372 unsigned int bits = 8;
1373 do {
1374 bits++;
1375 size >>= 1;
1376 } while (size > 256);
1377 return bits;
1378 }
1379
1380 static inline unsigned int block_size(struct block_device *bdev)
1381 {
1382 return bdev->bd_block_size;
1383 }
1384
1385 static inline bool queue_flush_queueable(struct request_queue *q)
1386 {
1387 return !q->flush_not_queueable;
1388 }
1389
1390 typedef struct {struct page *v;} Sector;
1391
1392 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1393
1394 static inline void put_dev_sector(Sector p)
1395 {
1396 page_cache_release(p.v);
1397 }
1398
1399 struct work_struct;
1400 int kblockd_schedule_work(struct work_struct *work);
1401 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
1402 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1403
1404 #ifdef CONFIG_BLK_CGROUP
1405 /*
1406 * This should not be using sched_clock(). A real patch is in progress
1407 * to fix this up, until that is in place we need to disable preemption
1408 * around sched_clock() in this function and set_io_start_time_ns().
1409 */
1410 static inline void set_start_time_ns(struct request *req)
1411 {
1412 preempt_disable();
1413 req->start_time_ns = sched_clock();
1414 preempt_enable();
1415 }
1416
1417 static inline void set_io_start_time_ns(struct request *req)
1418 {
1419 preempt_disable();
1420 req->io_start_time_ns = sched_clock();
1421 preempt_enable();
1422 }
1423
1424 static inline uint64_t rq_start_time_ns(struct request *req)
1425 {
1426 return req->start_time_ns;
1427 }
1428
1429 static inline uint64_t rq_io_start_time_ns(struct request *req)
1430 {
1431 return req->io_start_time_ns;
1432 }
1433 #else
1434 static inline void set_start_time_ns(struct request *req) {}
1435 static inline void set_io_start_time_ns(struct request *req) {}
1436 static inline uint64_t rq_start_time_ns(struct request *req)
1437 {
1438 return 0;
1439 }
1440 static inline uint64_t rq_io_start_time_ns(struct request *req)
1441 {
1442 return 0;
1443 }
1444 #endif
1445
1446 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1447 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1448 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1449 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1450
1451 #if defined(CONFIG_BLK_DEV_INTEGRITY)
1452
1453 enum blk_integrity_flags {
1454 BLK_INTEGRITY_VERIFY = 1 << 0,
1455 BLK_INTEGRITY_GENERATE = 1 << 1,
1456 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
1457 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
1458 };
1459
1460 struct blk_integrity_iter {
1461 void *prot_buf;
1462 void *data_buf;
1463 sector_t seed;
1464 unsigned int data_size;
1465 unsigned short interval;
1466 const char *disk_name;
1467 };
1468
1469 typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
1470
1471 struct blk_integrity {
1472 integrity_processing_fn *generate_fn;
1473 integrity_processing_fn *verify_fn;
1474
1475 unsigned short flags;
1476 unsigned short tuple_size;
1477 unsigned short interval;
1478 unsigned short tag_size;
1479
1480 const char *name;
1481
1482 struct kobject kobj;
1483 };
1484
1485 extern bool blk_integrity_is_initialized(struct gendisk *);
1486 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1487 extern void blk_integrity_unregister(struct gendisk *);
1488 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1489 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1490 struct scatterlist *);
1491 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1492 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1493 struct request *);
1494 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1495 struct bio *);
1496
1497 static inline
1498 struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1499 {
1500 return bdev->bd_disk->integrity;
1501 }
1502
1503 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1504 {
1505 return disk->integrity;
1506 }
1507
1508 static inline bool blk_integrity_rq(struct request *rq)
1509 {
1510 return rq->cmd_flags & REQ_INTEGRITY;
1511 }
1512
1513 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1514 unsigned int segs)
1515 {
1516 q->limits.max_integrity_segments = segs;
1517 }
1518
1519 static inline unsigned short
1520 queue_max_integrity_segments(struct request_queue *q)
1521 {
1522 return q->limits.max_integrity_segments;
1523 }
1524
1525 #else /* CONFIG_BLK_DEV_INTEGRITY */
1526
1527 struct bio;
1528 struct block_device;
1529 struct gendisk;
1530 struct blk_integrity;
1531
1532 static inline int blk_integrity_rq(struct request *rq)
1533 {
1534 return 0;
1535 }
1536 static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1537 struct bio *b)
1538 {
1539 return 0;
1540 }
1541 static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1542 struct bio *b,
1543 struct scatterlist *s)
1544 {
1545 return 0;
1546 }
1547 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1548 {
1549 return NULL;
1550 }
1551 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1552 {
1553 return NULL;
1554 }
1555 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1556 {
1557 return 0;
1558 }
1559 static inline int blk_integrity_register(struct gendisk *d,
1560 struct blk_integrity *b)
1561 {
1562 return 0;
1563 }
1564 static inline void blk_integrity_unregister(struct gendisk *d)
1565 {
1566 }
1567 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1568 unsigned int segs)
1569 {
1570 }
1571 static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1572 {
1573 return 0;
1574 }
1575 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1576 struct request *r1,
1577 struct request *r2)
1578 {
1579 return true;
1580 }
1581 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1582 struct request *r,
1583 struct bio *b)
1584 {
1585 return true;
1586 }
1587 static inline bool blk_integrity_is_initialized(struct gendisk *g)
1588 {
1589 return 0;
1590 }
1591
1592 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1593
1594 struct block_device_operations {
1595 int (*open) (struct block_device *, fmode_t);
1596 void (*release) (struct gendisk *, fmode_t);
1597 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
1598 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1599 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1600 long (*direct_access)(struct block_device *, sector_t,
1601 void **, unsigned long *pfn, long size);
1602 unsigned int (*check_events) (struct gendisk *disk,
1603 unsigned int clearing);
1604 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
1605 int (*media_changed) (struct gendisk *);
1606 void (*unlock_native_capacity) (struct gendisk *);
1607 int (*revalidate_disk) (struct gendisk *);
1608 int (*getgeo)(struct block_device *, struct hd_geometry *);
1609 /* this callback is with swap_lock and sometimes page table lock held */
1610 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1611 struct module *owner;
1612 };
1613
1614 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1615 unsigned long);
1616 extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1617 extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1618 struct writeback_control *);
1619 extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
1620 unsigned long *pfn, long size);
1621 #else /* CONFIG_BLOCK */
1622
1623 struct block_device;
1624
1625 /*
1626 * stubs for when the block layer is configured out
1627 */
1628 #define buffer_heads_over_limit 0
1629
1630 static inline long nr_blockdev_pages(void)
1631 {
1632 return 0;
1633 }
1634
1635 struct blk_plug {
1636 };
1637
1638 static inline void blk_start_plug(struct blk_plug *plug)
1639 {
1640 }
1641
1642 static inline void blk_finish_plug(struct blk_plug *plug)
1643 {
1644 }
1645
1646 static inline void blk_flush_plug(struct task_struct *task)
1647 {
1648 }
1649
1650 static inline void blk_schedule_flush_plug(struct task_struct *task)
1651 {
1652 }
1653
1654
1655 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1656 {
1657 return false;
1658 }
1659
1660 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1661 sector_t *error_sector)
1662 {
1663 return 0;
1664 }
1665
1666 #endif /* CONFIG_BLOCK */
1667
1668 #endif
This page took 0.083368 seconds and 6 git commands to generate.