]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/blkdev.h
Merge tag 'doc-4.8-fixes' of git://git.lwn.net/linux
[mirror_ubuntu-bionic-kernel.git] / include / linux / blkdev.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
85fd0bc9
RK
4#include <linux/sched.h>
5
f5ff8422
JA
6#ifdef CONFIG_BLOCK
7
1da177e4
LT
8#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
320ae51f 11#include <linux/llist.h>
1da177e4
LT
12#include <linux/timer.h>
13#include <linux/workqueue.h>
14#include <linux/pagemap.h>
66114cad 15#include <linux/backing-dev-defs.h>
1da177e4
LT
16#include <linux/wait.h>
17#include <linux/mempool.h>
34c0fd54 18#include <linux/pfn.h>
1da177e4 19#include <linux/bio.h>
1da177e4 20#include <linux/stringify.h>
3e6053d7 21#include <linux/gfp.h>
d351af01 22#include <linux/bsg.h>
c7c22e4d 23#include <linux/smp.h>
548bc8e1 24#include <linux/rcupdate.h>
add703fd 25#include <linux/percpu-refcount.h>
84be456f 26#include <linux/scatterlist.h>
1da177e4 27
de477254 28struct module;
21b2f0c8
CH
29struct scsi_ioctl_command;
30
1da177e4 31struct request_queue;
1da177e4 32struct elevator_queue;
2056a782 33struct blk_trace;
3d6392cf
JA
34struct request;
35struct sg_io_hdr;
aa387cc8 36struct bsg_job;
3c798398 37struct blkcg_gq;
7c94e1c1 38struct blk_flush_queue;
bbd3e064 39struct pr_ops;
1da177e4
LT
40
41#define BLKDEV_MIN_RQ 4
42#define BLKDEV_MAX_RQ 128 /* Default maximum */
43
8bd435b3
TH
44/*
45 * Maximum number of blkcg policies allowed to be registered concurrently.
46 * Defined here to simplify include dependency.
47 */
48#define BLKCG_MAX_POLS 2
49
8ffdc655 50typedef void (rq_end_io_fn)(struct request *, int);
1da177e4 51
5b788ce3
TH
52#define BLK_RL_SYNCFULL (1U << 0)
53#define BLK_RL_ASYNCFULL (1U << 1)
54
1da177e4 55struct request_list {
5b788ce3 56 struct request_queue *q; /* the queue this rl belongs to */
a051661c
TH
57#ifdef CONFIG_BLK_CGROUP
58 struct blkcg_gq *blkg; /* blkg this request pool belongs to */
59#endif
1faa16d2
JA
60 /*
61 * count[], starved[], and wait[] are indexed by
62 * BLK_RW_SYNC/BLK_RW_ASYNC
63 */
8a5ecdd4
TH
64 int count[2];
65 int starved[2];
66 mempool_t *rq_pool;
67 wait_queue_head_t wait[2];
5b788ce3 68 unsigned int flags;
1da177e4
LT
69};
70
4aff5e23
JA
71/*
72 * request command types
73 */
74enum rq_cmd_type_bits {
75 REQ_TYPE_FS = 1, /* fs request */
76 REQ_TYPE_BLOCK_PC, /* scsi command */
b42171ef 77 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
4aff5e23
JA
78};
79
1da177e4
LT
80#define BLK_MAX_CDB 16
81
82/*
af76e555
CH
83 * Try to put the fields that are referenced together in the same cacheline.
84 *
85 * If you modify this structure, make sure to update blk_rq_init() and
86 * especially blk_mq_rq_ctx_init() to take care of the added fields.
1da177e4
LT
87 */
88struct request {
6897fc22 89 struct list_head queuelist;
320ae51f
JA
90 union {
91 struct call_single_data csd;
9828c2c6 92 u64 fifo_time;
320ae51f 93 };
ff856bad 94
165125e1 95 struct request_queue *q;
320ae51f 96 struct blk_mq_ctx *mq_ctx;
e6a1c874 97
ca93e453 98 int cpu;
b42171ef 99 unsigned cmd_type;
ca93e453 100 u64 cmd_flags;
242f9dcb 101 unsigned long atomic_flags;
1da177e4 102
a2dec7b3 103 /* the following two fields are internal, NEVER access directly */
a2dec7b3 104 unsigned int __data_len; /* total data len */
181fdde3 105 sector_t __sector; /* sector cursor */
1da177e4
LT
106
107 struct bio *bio;
108 struct bio *biotail;
109
360f92c2
JA
110 /*
111 * The hash is used inside the scheduler, and killed once the
112 * request reaches the dispatch list. The ipi_list is only used
113 * to queue the request for softirq completion, which is long
114 * after the request has been unhashed (and even removed from
115 * the dispatch list).
116 */
117 union {
118 struct hlist_node hash; /* merge hash */
119 struct list_head ipi_list;
120 };
121
e6a1c874
JA
122 /*
123 * The rb_node is only used inside the io scheduler, requests
124 * are pruned when moved to the dispatch queue. So let the
c186794d 125 * completion_data share space with the rb_node.
e6a1c874
JA
126 */
127 union {
128 struct rb_node rb_node; /* sort/lookup */
c186794d 129 void *completion_data;
e6a1c874 130 };
9817064b 131
ff7d145f 132 /*
7f1dc8a2 133 * Three pointers are available for the IO schedulers, if they need
c186794d
MS
134 * more they have to dynamically allocate it. Flush requests are
135 * never put on the IO scheduler. So let the flush fields share
a612fddf 136 * space with the elevator data.
ff7d145f 137 */
c186794d 138 union {
a612fddf
TH
139 struct {
140 struct io_cq *icq;
141 void *priv[2];
142 } elv;
143
c186794d
MS
144 struct {
145 unsigned int seq;
146 struct list_head list;
4853abaa 147 rq_end_io_fn *saved_end_io;
c186794d
MS
148 } flush;
149 };
ff7d145f 150
8f34ee75 151 struct gendisk *rq_disk;
09e099d4 152 struct hd_struct *part;
1da177e4 153 unsigned long start_time;
9195291e 154#ifdef CONFIG_BLK_CGROUP
a051661c 155 struct request_list *rl; /* rl this rq is alloced from */
9195291e
DS
156 unsigned long long start_time_ns;
157 unsigned long long io_start_time_ns; /* when passed to hardware */
158#endif
1da177e4
LT
159 /* Number of scatter-gather DMA addr+len pairs after
160 * physical address coalescing is performed.
161 */
162 unsigned short nr_phys_segments;
13f05c8d
MP
163#if defined(CONFIG_BLK_DEV_INTEGRITY)
164 unsigned short nr_integrity_segments;
165#endif
1da177e4 166
8f34ee75
JA
167 unsigned short ioprio;
168
731ec497 169 void *special; /* opaque pointer available for LLD use */
1da177e4 170
cdd60262
JA
171 int tag;
172 int errors;
173
1da177e4
LT
174 /*
175 * when request is used as a packet command carrier
176 */
d7e3c324
FT
177 unsigned char __cmd[BLK_MAX_CDB];
178 unsigned char *cmd;
181fdde3 179 unsigned short cmd_len;
1da177e4 180
7a85f889 181 unsigned int extra_len; /* length of alignment and padding */
1da177e4 182 unsigned int sense_len;
c3a4d78c 183 unsigned int resid_len; /* residual count */
1da177e4
LT
184 void *sense;
185
242f9dcb
JA
186 unsigned long deadline;
187 struct list_head timeout_list;
1da177e4 188 unsigned int timeout;
17e01f21 189 int retries;
1da177e4 190
1da177e4 191 /*
c00895ab 192 * completion callback.
1da177e4
LT
193 */
194 rq_end_io_fn *end_io;
195 void *end_io_data;
abae1fde
FT
196
197 /* for bidi */
198 struct request *next_rq;
1da177e4
LT
199};
200
4e1b2d52
MC
201#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
202#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
203
204#define req_set_op(req, op) do { \
205 WARN_ON(op >= (1 << REQ_OP_BITS)); \
206 (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
207 (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
208} while (0)
209
f2150821
MC
210#define req_set_op_attrs(req, op, flags) do { \
211 req_set_op(req, op); \
212 (req)->cmd_flags |= flags; \
213} while (0)
214
766ca442
FLVC
215static inline unsigned short req_get_ioprio(struct request *req)
216{
217 return req->ioprio;
218}
219
1da177e4
LT
220#include <linux/elevator.h>
221
320ae51f
JA
222struct blk_queue_ctx;
223
165125e1 224typedef void (request_fn_proc) (struct request_queue *q);
dece1635 225typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
165125e1 226typedef int (prep_rq_fn) (struct request_queue *, struct request *);
28018c24 227typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
1da177e4
LT
228
229struct bio_vec;
ff856bad 230typedef void (softirq_done_fn)(struct request *);
2fb98e84 231typedef int (dma_drain_needed_fn)(struct request *);
ef9e3fac 232typedef int (lld_busy_fn) (struct request_queue *q);
aa387cc8 233typedef int (bsg_job_fn) (struct bsg_job *);
1da177e4 234
242f9dcb
JA
235enum blk_eh_timer_return {
236 BLK_EH_NOT_HANDLED,
237 BLK_EH_HANDLED,
238 BLK_EH_RESET_TIMER,
239};
240
241typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
242
1da177e4
LT
243enum blk_queue_state {
244 Queue_down,
245 Queue_up,
246};
247
1da177e4
LT
248struct blk_queue_tag {
249 struct request **tag_index; /* map of busy tags */
250 unsigned long *tag_map; /* bit map of free/busy tags */
1da177e4
LT
251 int busy; /* current depth */
252 int max_depth; /* what we will send to device */
ba025082 253 int real_max_depth; /* what the array can hold */
1da177e4 254 atomic_t refcnt; /* map can be shared */
ee1b6f7a
SL
255 int alloc_policy; /* tag allocation policy */
256 int next_tag; /* next tag */
1da177e4 257};
ee1b6f7a
SL
258#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
259#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
1da177e4 260
abf54393
FT
261#define BLK_SCSI_MAX_CMDS (256)
262#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
263
025146e1
MP
264struct queue_limits {
265 unsigned long bounce_pfn;
266 unsigned long seg_boundary_mask;
03100aad 267 unsigned long virt_boundary_mask;
025146e1
MP
268
269 unsigned int max_hw_sectors;
ca369d51 270 unsigned int max_dev_sectors;
762380ad 271 unsigned int chunk_sectors;
025146e1
MP
272 unsigned int max_sectors;
273 unsigned int max_segment_size;
c72758f3
MP
274 unsigned int physical_block_size;
275 unsigned int alignment_offset;
276 unsigned int io_min;
277 unsigned int io_opt;
67efc925 278 unsigned int max_discard_sectors;
0034af03 279 unsigned int max_hw_discard_sectors;
4363ac7c 280 unsigned int max_write_same_sectors;
86b37281
MP
281 unsigned int discard_granularity;
282 unsigned int discard_alignment;
025146e1
MP
283
284 unsigned short logical_block_size;
8a78362c 285 unsigned short max_segments;
13f05c8d 286 unsigned short max_integrity_segments;
025146e1 287
c72758f3 288 unsigned char misaligned;
86b37281 289 unsigned char discard_misaligned;
e692cb66 290 unsigned char cluster;
a934a00a 291 unsigned char discard_zeroes_data;
c78afc62 292 unsigned char raid_partial_stripes_expensive;
025146e1
MP
293};
294
d7b76301 295struct request_queue {
1da177e4
LT
296 /*
297 * Together with queue_head for cacheline sharing
298 */
299 struct list_head queue_head;
300 struct request *last_merge;
b374d18a 301 struct elevator_queue *elevator;
8a5ecdd4
TH
302 int nr_rqs[2]; /* # allocated [a]sync rqs */
303 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
1da177e4
LT
304
305 /*
a051661c
TH
306 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
307 * is used, root blkg allocates from @q->root_rl and all other
308 * blkgs from their own blkg->rl. Which one to use should be
309 * determined using bio_request_list().
1da177e4 310 */
a051661c 311 struct request_list root_rl;
1da177e4
LT
312
313 request_fn_proc *request_fn;
1da177e4
LT
314 make_request_fn *make_request_fn;
315 prep_rq_fn *prep_rq_fn;
28018c24 316 unprep_rq_fn *unprep_rq_fn;
ff856bad 317 softirq_done_fn *softirq_done_fn;
242f9dcb 318 rq_timed_out_fn *rq_timed_out_fn;
2fb98e84 319 dma_drain_needed_fn *dma_drain_needed;
ef9e3fac 320 lld_busy_fn *lld_busy_fn;
1da177e4 321
320ae51f
JA
322 struct blk_mq_ops *mq_ops;
323
324 unsigned int *mq_map;
325
326 /* sw queues */
e6cdb092 327 struct blk_mq_ctx __percpu *queue_ctx;
320ae51f
JA
328 unsigned int nr_queues;
329
330 /* hw dispatch queues */
331 struct blk_mq_hw_ctx **queue_hw_ctx;
332 unsigned int nr_hw_queues;
333
8922e16c
TH
334 /*
335 * Dispatch queue sorting
336 */
1b47f531 337 sector_t end_sector;
8922e16c 338 struct request *boundary_rq;
8922e16c 339
1da177e4 340 /*
3cca6dc1 341 * Delayed queue handling
1da177e4 342 */
3cca6dc1 343 struct delayed_work delay_work;
1da177e4
LT
344
345 struct backing_dev_info backing_dev_info;
346
347 /*
348 * The queue owner gets to use this for whatever they like.
349 * ll_rw_blk doesn't touch it.
350 */
351 void *queuedata;
352
1da177e4 353 /*
d7b76301 354 * various queue flags, see QUEUE_* below
1da177e4 355 */
d7b76301 356 unsigned long queue_flags;
1da177e4 357
a73f730d
TH
358 /*
359 * ida allocated id for this queue. Used to index queues from
360 * ioctx.
361 */
362 int id;
363
1da177e4 364 /*
d7b76301 365 * queue needs bounce pages for pages above this limit
1da177e4 366 */
d7b76301 367 gfp_t bounce_gfp;
1da177e4
LT
368
369 /*
152587de
JA
370 * protects queue structures from reentrancy. ->__queue_lock should
371 * _never_ be used directly, it is queue private. always use
372 * ->queue_lock.
1da177e4 373 */
152587de 374 spinlock_t __queue_lock;
1da177e4
LT
375 spinlock_t *queue_lock;
376
377 /*
378 * queue kobject
379 */
380 struct kobject kobj;
381
320ae51f
JA
382 /*
383 * mq queue kobject
384 */
385 struct kobject mq_kobj;
386
ac6fc48c
DW
387#ifdef CONFIG_BLK_DEV_INTEGRITY
388 struct blk_integrity integrity;
389#endif /* CONFIG_BLK_DEV_INTEGRITY */
390
47fafbc7 391#ifdef CONFIG_PM
6c954667
LM
392 struct device *dev;
393 int rpm_status;
394 unsigned int nr_pending;
395#endif
396
1da177e4
LT
397 /*
398 * queue settings
399 */
400 unsigned long nr_requests; /* Max # of requests */
401 unsigned int nr_congestion_on;
402 unsigned int nr_congestion_off;
403 unsigned int nr_batching;
404
fa0ccd83 405 unsigned int dma_drain_size;
d7b76301 406 void *dma_drain_buffer;
e3790c7d 407 unsigned int dma_pad_mask;
1da177e4
LT
408 unsigned int dma_alignment;
409
410 struct blk_queue_tag *queue_tags;
6eca9004 411 struct list_head tag_busy_list;
1da177e4 412
15853af9 413 unsigned int nr_sorted;
0a7ae2ff 414 unsigned int in_flight[2];
24faf6f6
BVA
415 /*
416 * Number of active block driver functions for which blk_drain_queue()
417 * must wait. Must be incremented around functions that unlock the
418 * queue_lock internally, e.g. scsi_request_fn().
419 */
420 unsigned int request_fn_active;
1da177e4 421
242f9dcb
JA
422 unsigned int rq_timeout;
423 struct timer_list timeout;
287922eb 424 struct work_struct timeout_work;
242f9dcb
JA
425 struct list_head timeout_list;
426
a612fddf 427 struct list_head icq_list;
4eef3049 428#ifdef CONFIG_BLK_CGROUP
a2b1693b 429 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
3c798398 430 struct blkcg_gq *root_blkg;
03aa264a 431 struct list_head blkg_list;
4eef3049 432#endif
a612fddf 433
025146e1
MP
434 struct queue_limits limits;
435
1da177e4
LT
436 /*
437 * sg stuff
438 */
439 unsigned int sg_timeout;
440 unsigned int sg_reserved_size;
1946089a 441 int node;
6c5c9341 442#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 443 struct blk_trace *blk_trace;
6c5c9341 444#endif
1da177e4 445 /*
4913efe4 446 * for flush operations
1da177e4 447 */
7c94e1c1 448 struct blk_flush_queue *fq;
483f4afc 449
6fca6a61
CH
450 struct list_head requeue_list;
451 spinlock_t requeue_lock;
452 struct work_struct requeue_work;
453
483f4afc 454 struct mutex sysfs_lock;
d351af01 455
d732580b 456 int bypass_depth;
4ecd4fef 457 atomic_t mq_freeze_depth;
d732580b 458
d351af01 459#if defined(CONFIG_BLK_DEV_BSG)
aa387cc8
MC
460 bsg_job_fn *bsg_job_fn;
461 int bsg_job_size;
d351af01
FT
462 struct bsg_class_device bsg_dev;
463#endif
e43473b7
VG
464
465#ifdef CONFIG_BLK_DEV_THROTTLING
466 /* Throttle data */
467 struct throtl_data *td;
468#endif
548bc8e1 469 struct rcu_head rcu_head;
320ae51f 470 wait_queue_head_t mq_freeze_wq;
3ef28e83 471 struct percpu_ref q_usage_counter;
320ae51f 472 struct list_head all_q_node;
0d2602ca
JA
473
474 struct blk_mq_tag_set *tag_set;
475 struct list_head tag_set_list;
54efd50b 476 struct bio_set *bio_split;
4593fdbe
AM
477
478 bool mq_sysfs_init_done;
1da177e4
LT
479};
480
1da177e4
LT
481#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
482#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
1faa16d2
JA
483#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
484#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
3f3299d5 485#define QUEUE_FLAG_DYING 5 /* queue being torn down */
d732580b 486#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
c21e6beb
JA
487#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
488#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
5757a6d7 489#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
c21e6beb
JA
490#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
491#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
492#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
88e740f1 493#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
c21e6beb
JA
494#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
495#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
496#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
497#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
288dab8a 498#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
5757a6d7 499#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
c246e80d 500#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
320ae51f 501#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
05f1dd53 502#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
05229bee 503#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
93e9d8e8
JA
504#define QUEUE_FLAG_WC 23 /* Write back caching */
505#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
c888a8f9 506#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
163d4baa 507#define QUEUE_FLAG_DAX 26 /* device supports DAX */
bc58ba94
JA
508
509#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
01e97f6b 510 (1 << QUEUE_FLAG_STACKABLE) | \
e2e1a148
JA
511 (1 << QUEUE_FLAG_SAME_COMP) | \
512 (1 << QUEUE_FLAG_ADD_RANDOM))
797e7dbb 513
94eddfbe 514#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
ad9cf3bb 515 (1 << QUEUE_FLAG_STACKABLE) | \
8e0b60b9
CH
516 (1 << QUEUE_FLAG_SAME_COMP) | \
517 (1 << QUEUE_FLAG_POLL))
94eddfbe 518
8bcb6c7d 519static inline void queue_lockdep_assert_held(struct request_queue *q)
8f45c1a5 520{
8bcb6c7d
AK
521 if (q->queue_lock)
522 lockdep_assert_held(q->queue_lock);
8f45c1a5
LT
523}
524
75ad23bc
NP
525static inline void queue_flag_set_unlocked(unsigned int flag,
526 struct request_queue *q)
527{
528 __set_bit(flag, &q->queue_flags);
529}
530
e48ec690
JA
531static inline int queue_flag_test_and_clear(unsigned int flag,
532 struct request_queue *q)
533{
8bcb6c7d 534 queue_lockdep_assert_held(q);
e48ec690
JA
535
536 if (test_bit(flag, &q->queue_flags)) {
537 __clear_bit(flag, &q->queue_flags);
538 return 1;
539 }
540
541 return 0;
542}
543
544static inline int queue_flag_test_and_set(unsigned int flag,
545 struct request_queue *q)
546{
8bcb6c7d 547 queue_lockdep_assert_held(q);
e48ec690
JA
548
549 if (!test_bit(flag, &q->queue_flags)) {
550 __set_bit(flag, &q->queue_flags);
551 return 0;
552 }
553
554 return 1;
555}
556
75ad23bc
NP
557static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
558{
8bcb6c7d 559 queue_lockdep_assert_held(q);
75ad23bc
NP
560 __set_bit(flag, &q->queue_flags);
561}
562
563static inline void queue_flag_clear_unlocked(unsigned int flag,
564 struct request_queue *q)
565{
566 __clear_bit(flag, &q->queue_flags);
567}
568
0a7ae2ff
JA
569static inline int queue_in_flight(struct request_queue *q)
570{
571 return q->in_flight[0] + q->in_flight[1];
572}
573
75ad23bc
NP
574static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
575{
8bcb6c7d 576 queue_lockdep_assert_held(q);
75ad23bc
NP
577 __clear_bit(flag, &q->queue_flags);
578}
579
1da177e4
LT
580#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
581#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
3f3299d5 582#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
c246e80d 583#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
d732580b 584#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
320ae51f 585#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
ac9fafa1 586#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
488991e2
AB
587#define blk_queue_noxmerges(q) \
588 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
a68bbddb 589#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
bc58ba94 590#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
e2e1a148 591#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
4ee5eaf4
KU
592#define blk_queue_stackable(q) \
593 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
c15227de 594#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
288dab8a
CH
595#define blk_queue_secure_erase(q) \
596 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
163d4baa 597#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
1da177e4 598
33659ebb
CH
599#define blk_noretry_request(rq) \
600 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
601 REQ_FAILFAST_DRIVER))
602
603#define blk_account_rq(rq) \
604 (((rq)->cmd_flags & REQ_STARTED) && \
e2a60da7 605 ((rq)->cmd_type == REQ_TYPE_FS))
33659ebb 606
ab780f1e 607#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
abae1fde 608#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
336cdb40
KU
609/* rq->queuelist of dequeued request must be list_empty() */
610#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
1da177e4
LT
611
612#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
613
4e1b2d52 614#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
1da177e4 615
49fd524f
JA
616/*
617 * Driver can handle struct request, if it either has an old style
618 * request_fn defined, or is blk-mq based.
619 */
620static inline bool queue_is_rq_based(struct request_queue *q)
621{
622 return q->request_fn || q->mq_ops;
623}
624
e692cb66
MP
625static inline unsigned int blk_queue_cluster(struct request_queue *q)
626{
627 return q->limits.cluster;
628}
629
9e2585a8 630/*
1faa16d2 631 * We regard a request as sync, if either a read or a sync write
9e2585a8 632 */
d9d8c5c4 633static inline bool rw_is_sync(int op, unsigned int rw_flags)
1faa16d2 634{
d9d8c5c4 635 return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
1faa16d2
JA
636}
637
638static inline bool rq_is_sync(struct request *rq)
639{
d9d8c5c4 640 return rw_is_sync(req_op(rq), rq->cmd_flags);
1faa16d2
JA
641}
642
5b788ce3 643static inline bool blk_rl_full(struct request_list *rl, bool sync)
1da177e4 644{
5b788ce3
TH
645 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
646
647 return rl->flags & flag;
1da177e4
LT
648}
649
5b788ce3 650static inline void blk_set_rl_full(struct request_list *rl, bool sync)
1da177e4 651{
5b788ce3
TH
652 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
653
654 rl->flags |= flag;
1da177e4
LT
655}
656
5b788ce3 657static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
1da177e4 658{
5b788ce3
TH
659 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
660
661 rl->flags &= ~flag;
1da177e4
LT
662}
663
e2a60da7
MP
664static inline bool rq_mergeable(struct request *rq)
665{
666 if (rq->cmd_type != REQ_TYPE_FS)
667 return false;
1da177e4 668
3a5e02ce
MC
669 if (req_op(rq) == REQ_OP_FLUSH)
670 return false;
671
e2a60da7
MP
672 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
673 return false;
674
675 return true;
676}
1da177e4 677
4363ac7c
MP
678static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
679{
680 if (bio_data(a) == bio_data(b))
681 return true;
682
683 return false;
684}
685
1da177e4
LT
686/*
687 * q->prep_rq_fn return values
688 */
0fb5b1fb
MP
689enum {
690 BLKPREP_OK, /* serve it */
691 BLKPREP_KILL, /* fatal error, kill, return -EIO */
692 BLKPREP_DEFER, /* leave on queue */
693 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
694};
1da177e4
LT
695
696extern unsigned long blk_max_low_pfn, blk_max_pfn;
697
698/*
699 * standard bounce addresses:
700 *
701 * BLK_BOUNCE_HIGH : bounce all highmem pages
702 * BLK_BOUNCE_ANY : don't bounce anything
703 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
704 */
2472892a
AK
705
706#if BITS_PER_LONG == 32
1da177e4 707#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
2472892a
AK
708#else
709#define BLK_BOUNCE_HIGH -1ULL
710#endif
711#define BLK_BOUNCE_ANY (-1ULL)
bfe17231 712#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
1da177e4 713
3d6392cf
JA
714/*
715 * default timeout for SG_IO if none specified
716 */
717#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
f2f1fa78 718#define BLK_MIN_SG_TIMEOUT (7 * HZ)
3d6392cf 719
2a7326b5 720#ifdef CONFIG_BOUNCE
1da177e4 721extern int init_emergency_isa_pool(void);
165125e1 722extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
1da177e4
LT
723#else
724static inline int init_emergency_isa_pool(void)
725{
726 return 0;
727}
165125e1 728static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
1da177e4
LT
729{
730}
731#endif /* CONFIG_MMU */
732
152e283f
FT
733struct rq_map_data {
734 struct page **pages;
735 int page_order;
736 int nr_entries;
56c451f4 737 unsigned long offset;
97ae77a1 738 int null_mapped;
ecb554a8 739 int from_user;
152e283f
FT
740};
741
5705f702 742struct req_iterator {
7988613b 743 struct bvec_iter iter;
5705f702
N
744 struct bio *bio;
745};
746
747/* This should not be used directly - use rq_for_each_segment */
1e428079
JA
748#define for_each_bio(_bio) \
749 for (; _bio; _bio = _bio->bi_next)
5705f702 750#define __rq_for_each_bio(_bio, rq) \
1da177e4
LT
751 if ((rq->bio)) \
752 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
753
5705f702
N
754#define rq_for_each_segment(bvl, _rq, _iter) \
755 __rq_for_each_bio(_iter.bio, _rq) \
7988613b 756 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
5705f702 757
4550dd6c 758#define rq_iter_last(bvec, _iter) \
7988613b 759 (_iter.bio->bi_next == NULL && \
4550dd6c 760 bio_iter_last(bvec, _iter.iter))
5705f702 761
2d4dc890
IL
762#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
763# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
764#endif
765#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
766extern void rq_flush_dcache_pages(struct request *rq);
767#else
768static inline void rq_flush_dcache_pages(struct request *rq)
769{
770}
771#endif
772
2af3a815
TK
773#ifdef CONFIG_PRINTK
774#define vfs_msg(sb, level, fmt, ...) \
775 __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
776#else
777#define vfs_msg(sb, level, fmt, ...) \
778do { \
779 no_printk(fmt, ##__VA_ARGS__); \
780 __vfs_msg(sb, "", " "); \
781} while (0)
782#endif
783
1da177e4
LT
784extern int blk_register_queue(struct gendisk *disk);
785extern void blk_unregister_queue(struct gendisk *disk);
dece1635 786extern blk_qc_t generic_make_request(struct bio *bio);
2a4aa30c 787extern void blk_rq_init(struct request_queue *q, struct request *rq);
1da177e4 788extern void blk_put_request(struct request *);
165125e1 789extern void __blk_put_request(struct request_queue *, struct request *);
165125e1 790extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
f27b087b 791extern void blk_rq_set_block_pc(struct request *);
165125e1 792extern void blk_requeue_request(struct request_queue *, struct request *);
66ac0280 793extern void blk_add_request_payload(struct request *rq, struct page *page,
37e58237 794 int offset, unsigned int len);
ef9e3fac 795extern int blk_lld_busy(struct request_queue *q);
78d8e58a
MS
796extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
797 struct bio_set *bs, gfp_t gfp_mask,
798 int (*bio_ctr)(struct bio *, struct bio *, void *),
799 void *data);
800extern void blk_rq_unprep_clone(struct request *rq);
82124d60
KU
801extern int blk_insert_cloned_request(struct request_queue *q,
802 struct request *rq);
98d61d5b 803extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
3cca6dc1 804extern void blk_delay_queue(struct request_queue *, unsigned long);
54efd50b
KO
805extern void blk_queue_split(struct request_queue *, struct bio **,
806 struct bio_set *);
165125e1 807extern void blk_recount_segments(struct request_queue *, struct bio *);
0bfc96cb 808extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
577ebb37
PB
809extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
810 unsigned int, void __user *);
74f3c8af
AV
811extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
812 unsigned int, void __user *);
e915e872
AV
813extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
814 struct scsi_ioctl_command __user *);
3fcfab16 815
6f3b0e8b 816extern int blk_queue_enter(struct request_queue *q, bool nowait);
2e6edc95 817extern void blk_queue_exit(struct request_queue *q);
165125e1 818extern void blk_start_queue(struct request_queue *q);
21491412 819extern void blk_start_queue_async(struct request_queue *q);
165125e1 820extern void blk_stop_queue(struct request_queue *q);
1da177e4 821extern void blk_sync_queue(struct request_queue *q);
165125e1 822extern void __blk_stop_queue(struct request_queue *q);
24ecfbe2 823extern void __blk_run_queue(struct request_queue *q);
a7928c15 824extern void __blk_run_queue_uncond(struct request_queue *q);
165125e1 825extern void blk_run_queue(struct request_queue *);
c21e6beb 826extern void blk_run_queue_async(struct request_queue *q);
a3bce90e 827extern int blk_rq_map_user(struct request_queue *, struct request *,
152e283f
FT
828 struct rq_map_data *, void __user *, unsigned long,
829 gfp_t);
8e5cfc45 830extern int blk_rq_unmap_user(struct bio *);
165125e1
JA
831extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
832extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
26e49cfc
KO
833 struct rq_map_data *, const struct iov_iter *,
834 gfp_t);
165125e1 835extern int blk_execute_rq(struct request_queue *, struct gendisk *,
994ca9a1 836 struct request *, int);
165125e1 837extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
15fc858a 838 struct request *, int, rq_end_io_fn *);
6e39b69e 839
05229bee
JA
840bool blk_poll(struct request_queue *q, blk_qc_t cookie);
841
165125e1 842static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1da177e4 843{
ff9ea323 844 return bdev->bd_disk->queue; /* this is never NULL */
1da177e4
LT
845}
846
5efccd17 847/*
80a761fd
TH
848 * blk_rq_pos() : the current sector
849 * blk_rq_bytes() : bytes left in the entire request
850 * blk_rq_cur_bytes() : bytes left in the current segment
851 * blk_rq_err_bytes() : bytes left till the next error boundary
852 * blk_rq_sectors() : sectors left in the entire request
853 * blk_rq_cur_sectors() : sectors left in the current segment
5efccd17 854 */
5b93629b
TH
855static inline sector_t blk_rq_pos(const struct request *rq)
856{
a2dec7b3 857 return rq->__sector;
2e46e8b2
TH
858}
859
860static inline unsigned int blk_rq_bytes(const struct request *rq)
861{
a2dec7b3 862 return rq->__data_len;
5b93629b
TH
863}
864
2e46e8b2
TH
865static inline int blk_rq_cur_bytes(const struct request *rq)
866{
867 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
868}
5efccd17 869
80a761fd
TH
870extern unsigned int blk_rq_err_bytes(const struct request *rq);
871
5b93629b
TH
872static inline unsigned int blk_rq_sectors(const struct request *rq)
873{
2e46e8b2 874 return blk_rq_bytes(rq) >> 9;
5b93629b
TH
875}
876
877static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
878{
2e46e8b2 879 return blk_rq_cur_bytes(rq) >> 9;
5b93629b
TH
880}
881
f31dc1cd 882static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
8fe0d473 883 int op)
f31dc1cd 884{
8fe0d473 885 if (unlikely(op == REQ_OP_DISCARD))
871dd928 886 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
f31dc1cd 887
8fe0d473 888 if (unlikely(op == REQ_OP_WRITE_SAME))
4363ac7c
MP
889 return q->limits.max_write_same_sectors;
890
f31dc1cd
MP
891 return q->limits.max_sectors;
892}
893
762380ad
JA
894/*
895 * Return maximum size of a request at given offset. Only valid for
896 * file system requests.
897 */
898static inline unsigned int blk_max_size_offset(struct request_queue *q,
899 sector_t offset)
900{
901 if (!q->limits.chunk_sectors)
736ed4de 902 return q->limits.max_sectors;
762380ad
JA
903
904 return q->limits.chunk_sectors -
905 (offset & (q->limits.chunk_sectors - 1));
906}
907
17007f39
DLM
908static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
909 sector_t offset)
f31dc1cd
MP
910{
911 struct request_queue *q = rq->q;
912
f2101842 913 if (unlikely(rq->cmd_type != REQ_TYPE_FS))
f31dc1cd
MP
914 return q->limits.max_hw_sectors;
915
8fe0d473
MC
916 if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
917 return blk_queue_get_max_sectors(q, req_op(rq));
762380ad 918
17007f39 919 return min(blk_max_size_offset(q, offset),
8fe0d473 920 blk_queue_get_max_sectors(q, req_op(rq)));
f31dc1cd
MP
921}
922
75afb352
JN
923static inline unsigned int blk_rq_count_bios(struct request *rq)
924{
925 unsigned int nr_bios = 0;
926 struct bio *bio;
927
928 __rq_for_each_bio(bio, rq)
929 nr_bios++;
930
931 return nr_bios;
932}
933
9934c8c0
TH
934/*
935 * Request issue related functions.
936 */
937extern struct request *blk_peek_request(struct request_queue *q);
938extern void blk_start_request(struct request *rq);
939extern struct request *blk_fetch_request(struct request_queue *q);
940
1da177e4 941/*
2e60e022
TH
942 * Request completion related functions.
943 *
944 * blk_update_request() completes given number of bytes and updates
945 * the request without completing it.
946 *
f06d9a2b
TH
947 * blk_end_request() and friends. __blk_end_request() must be called
948 * with the request queue spinlock acquired.
1da177e4
LT
949 *
950 * Several drivers define their own end_request and call
3bcddeac
KU
951 * blk_end_request() for parts of the original function.
952 * This prevents code duplication in drivers.
1da177e4 953 */
2e60e022
TH
954extern bool blk_update_request(struct request *rq, int error,
955 unsigned int nr_bytes);
12120077 956extern void blk_finish_request(struct request *rq, int error);
b1f74493
FT
957extern bool blk_end_request(struct request *rq, int error,
958 unsigned int nr_bytes);
959extern void blk_end_request_all(struct request *rq, int error);
960extern bool blk_end_request_cur(struct request *rq, int error);
80a761fd 961extern bool blk_end_request_err(struct request *rq, int error);
b1f74493
FT
962extern bool __blk_end_request(struct request *rq, int error,
963 unsigned int nr_bytes);
964extern void __blk_end_request_all(struct request *rq, int error);
965extern bool __blk_end_request_cur(struct request *rq, int error);
80a761fd 966extern bool __blk_end_request_err(struct request *rq, int error);
2e60e022 967
ff856bad 968extern void blk_complete_request(struct request *);
242f9dcb
JA
969extern void __blk_complete_request(struct request *);
970extern void blk_abort_request(struct request *);
28018c24 971extern void blk_unprep_request(struct request *);
ff856bad 972
1da177e4
LT
973/*
974 * Access functions for manipulating queue properties
975 */
165125e1 976extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1946089a 977 spinlock_t *lock, int node_id);
165125e1 978extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
01effb0d
MS
979extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
980 request_fn_proc *, spinlock_t *);
165125e1
JA
981extern void blk_cleanup_queue(struct request_queue *);
982extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
983extern void blk_queue_bounce_limit(struct request_queue *, u64);
086fa5ff 984extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
762380ad 985extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
8a78362c 986extern void blk_queue_max_segments(struct request_queue *, unsigned short);
165125e1 987extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
67efc925
CH
988extern void blk_queue_max_discard_sectors(struct request_queue *q,
989 unsigned int max_discard_sectors);
4363ac7c
MP
990extern void blk_queue_max_write_same_sectors(struct request_queue *q,
991 unsigned int max_write_same_sectors);
e1defc4f 992extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
892b6f90 993extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
c72758f3
MP
994extern void blk_queue_alignment_offset(struct request_queue *q,
995 unsigned int alignment);
7c958e32 996extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
c72758f3 997extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
3c5820c7 998extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
c72758f3 999extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
e475bba2 1000extern void blk_set_default_limits(struct queue_limits *lim);
b1bd055d 1001extern void blk_set_stacking_limits(struct queue_limits *lim);
c72758f3
MP
1002extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1003 sector_t offset);
17be8c24
MP
1004extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1005 sector_t offset);
c72758f3
MP
1006extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1007 sector_t offset);
165125e1 1008extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
e3790c7d 1009extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
27f8221a 1010extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
2fb98e84
TH
1011extern int blk_queue_dma_drain(struct request_queue *q,
1012 dma_drain_needed_fn *dma_drain_needed,
1013 void *buf, unsigned int size);
ef9e3fac 1014extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
165125e1 1015extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
03100aad 1016extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
165125e1 1017extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
28018c24 1018extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
165125e1 1019extern void blk_queue_dma_alignment(struct request_queue *, int);
11c3e689 1020extern void blk_queue_update_dma_alignment(struct request_queue *, int);
165125e1 1021extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
242f9dcb
JA
1022extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1023extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
f3876930 1024extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
93e9d8e8 1025extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1da177e4 1026extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1da177e4 1027
165125e1 1028extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1da177e4 1029extern void blk_dump_rq_flags(struct request *, char *);
1da177e4 1030extern long nr_blockdev_pages(void);
1da177e4 1031
09ac46c4 1032bool __must_check blk_get_queue(struct request_queue *);
165125e1
JA
1033struct request_queue *blk_alloc_queue(gfp_t);
1034struct request_queue *blk_alloc_queue_node(gfp_t, int);
1035extern void blk_put_queue(struct request_queue *);
3f21c265 1036extern void blk_set_queue_dying(struct request_queue *);
1da177e4 1037
6c954667
LM
1038/*
1039 * block layer runtime pm functions
1040 */
47fafbc7 1041#ifdef CONFIG_PM
6c954667
LM
1042extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1043extern int blk_pre_runtime_suspend(struct request_queue *q);
1044extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1045extern void blk_pre_runtime_resume(struct request_queue *q);
1046extern void blk_post_runtime_resume(struct request_queue *q, int err);
d07ab6d1 1047extern void blk_set_runtime_active(struct request_queue *q);
6c954667
LM
1048#else
1049static inline void blk_pm_runtime_init(struct request_queue *q,
1050 struct device *dev) {}
1051static inline int blk_pre_runtime_suspend(struct request_queue *q)
1052{
1053 return -ENOSYS;
1054}
1055static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1056static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1057static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
d07ab6d1 1058extern inline void blk_set_runtime_active(struct request_queue *q) {}
6c954667
LM
1059#endif
1060
316cc67d 1061/*
75df7136
SJ
1062 * blk_plug permits building a queue of related requests by holding the I/O
1063 * fragments for a short period. This allows merging of sequential requests
1064 * into single larger request. As the requests are moved from a per-task list to
1065 * the device's request_queue in a batch, this results in improved scalability
1066 * as the lock contention for request_queue lock is reduced.
1067 *
1068 * It is ok not to disable preemption when adding the request to the plug list
1069 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1070 * the plug list when the task sleeps by itself. For details, please see
1071 * schedule() where blk_schedule_flush_plug() is called.
316cc67d 1072 */
73c10101 1073struct blk_plug {
75df7136 1074 struct list_head list; /* requests */
320ae51f 1075 struct list_head mq_list; /* blk-mq requests */
75df7136 1076 struct list_head cb_list; /* md requires an unplug callback */
73c10101 1077};
55c022bb
SL
1078#define BLK_MAX_REQUEST_COUNT 16
1079
9cbb1750 1080struct blk_plug_cb;
74018dc3 1081typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
048c9374
N
1082struct blk_plug_cb {
1083 struct list_head list;
9cbb1750
N
1084 blk_plug_cb_fn callback;
1085 void *data;
048c9374 1086};
9cbb1750
N
1087extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1088 void *data, int size);
73c10101
JA
1089extern void blk_start_plug(struct blk_plug *);
1090extern void blk_finish_plug(struct blk_plug *);
f6603783 1091extern void blk_flush_plug_list(struct blk_plug *, bool);
73c10101
JA
1092
1093static inline void blk_flush_plug(struct task_struct *tsk)
1094{
1095 struct blk_plug *plug = tsk->plug;
1096
a237c1c5
JA
1097 if (plug)
1098 blk_flush_plug_list(plug, false);
1099}
1100
1101static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1102{
1103 struct blk_plug *plug = tsk->plug;
1104
88b996cd 1105 if (plug)
f6603783 1106 blk_flush_plug_list(plug, true);
73c10101
JA
1107}
1108
1109static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1110{
1111 struct blk_plug *plug = tsk->plug;
1112
320ae51f
JA
1113 return plug &&
1114 (!list_empty(&plug->list) ||
1115 !list_empty(&plug->mq_list) ||
1116 !list_empty(&plug->cb_list));
73c10101
JA
1117}
1118
1da177e4
LT
1119/*
1120 * tag stuff
1121 */
165125e1
JA
1122extern int blk_queue_start_tag(struct request_queue *, struct request *);
1123extern struct request *blk_queue_find_tag(struct request_queue *, int);
1124extern void blk_queue_end_tag(struct request_queue *, struct request *);
ee1b6f7a 1125extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
165125e1
JA
1126extern void blk_queue_free_tags(struct request_queue *);
1127extern int blk_queue_resize_tags(struct request_queue *, int);
1128extern void blk_queue_invalidate_tags(struct request_queue *);
ee1b6f7a 1129extern struct blk_queue_tag *blk_init_tags(int, int);
492dfb48 1130extern void blk_free_tags(struct blk_queue_tag *);
1da177e4 1131
f583f492
DS
1132static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1133 int tag)
1134{
1135 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1136 return NULL;
1137 return bqt->tag_index[tag];
1138}
dd3932ed 1139
e950fdf7
CH
1140
1141#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
1142#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
dd3932ed
CH
1143
1144extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
fbd9b09a
DM
1145extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1146 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
38f25255 1147extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
288dab8a 1148 sector_t nr_sects, gfp_t gfp_mask, int flags,
469e3216 1149 struct bio **biop);
4363ac7c
MP
1150extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1151 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
3f14d792 1152extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
d93ba7a5 1153 sector_t nr_sects, gfp_t gfp_mask, bool discard);
2cf6d26a
CH
1154static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1155 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
fb2dce86 1156{
2cf6d26a
CH
1157 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1158 nr_blocks << (sb->s_blocksize_bits - 9),
1159 gfp_mask, flags);
fb2dce86 1160}
e6fa0be6 1161static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
a107e5a3 1162 sector_t nr_blocks, gfp_t gfp_mask)
e6fa0be6
LC
1163{
1164 return blkdev_issue_zeroout(sb->s_bdev,
1165 block << (sb->s_blocksize_bits - 9),
1166 nr_blocks << (sb->s_blocksize_bits - 9),
d93ba7a5 1167 gfp_mask, true);
e6fa0be6 1168}
1da177e4 1169
018e0446 1170extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
0b07de85 1171
eb28d31b
MP
1172enum blk_default_limits {
1173 BLK_MAX_SEGMENTS = 128,
1174 BLK_SAFE_MAX_SECTORS = 255,
d2be537c 1175 BLK_DEF_MAX_SECTORS = 2560,
eb28d31b
MP
1176 BLK_MAX_SEGMENT_SIZE = 65536,
1177 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1178};
0e435ac2 1179
1da177e4
LT
1180#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1181
ae03bf63
MP
1182static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1183{
025146e1 1184 return q->limits.bounce_pfn;
ae03bf63
MP
1185}
1186
1187static inline unsigned long queue_segment_boundary(struct request_queue *q)
1188{
025146e1 1189 return q->limits.seg_boundary_mask;
ae03bf63
MP
1190}
1191
03100aad
KB
1192static inline unsigned long queue_virt_boundary(struct request_queue *q)
1193{
1194 return q->limits.virt_boundary_mask;
1195}
1196
ae03bf63
MP
1197static inline unsigned int queue_max_sectors(struct request_queue *q)
1198{
025146e1 1199 return q->limits.max_sectors;
ae03bf63
MP
1200}
1201
1202static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1203{
025146e1 1204 return q->limits.max_hw_sectors;
ae03bf63
MP
1205}
1206
8a78362c 1207static inline unsigned short queue_max_segments(struct request_queue *q)
ae03bf63 1208{
8a78362c 1209 return q->limits.max_segments;
ae03bf63
MP
1210}
1211
1212static inline unsigned int queue_max_segment_size(struct request_queue *q)
1213{
025146e1 1214 return q->limits.max_segment_size;
ae03bf63
MP
1215}
1216
e1defc4f 1217static inline unsigned short queue_logical_block_size(struct request_queue *q)
1da177e4
LT
1218{
1219 int retval = 512;
1220
025146e1
MP
1221 if (q && q->limits.logical_block_size)
1222 retval = q->limits.logical_block_size;
1da177e4
LT
1223
1224 return retval;
1225}
1226
e1defc4f 1227static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1da177e4 1228{
e1defc4f 1229 return queue_logical_block_size(bdev_get_queue(bdev));
1da177e4
LT
1230}
1231
c72758f3
MP
1232static inline unsigned int queue_physical_block_size(struct request_queue *q)
1233{
1234 return q->limits.physical_block_size;
1235}
1236
892b6f90 1237static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
ac481c20
MP
1238{
1239 return queue_physical_block_size(bdev_get_queue(bdev));
1240}
1241
c72758f3
MP
1242static inline unsigned int queue_io_min(struct request_queue *q)
1243{
1244 return q->limits.io_min;
1245}
1246
ac481c20
MP
1247static inline int bdev_io_min(struct block_device *bdev)
1248{
1249 return queue_io_min(bdev_get_queue(bdev));
1250}
1251
c72758f3
MP
1252static inline unsigned int queue_io_opt(struct request_queue *q)
1253{
1254 return q->limits.io_opt;
1255}
1256
ac481c20
MP
1257static inline int bdev_io_opt(struct block_device *bdev)
1258{
1259 return queue_io_opt(bdev_get_queue(bdev));
1260}
1261
c72758f3
MP
1262static inline int queue_alignment_offset(struct request_queue *q)
1263{
ac481c20 1264 if (q->limits.misaligned)
c72758f3
MP
1265 return -1;
1266
ac481c20 1267 return q->limits.alignment_offset;
c72758f3
MP
1268}
1269
e03a72e1 1270static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
81744ee4
MP
1271{
1272 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
b8839b8c 1273 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
81744ee4 1274
b8839b8c 1275 return (granularity + lim->alignment_offset - alignment) % granularity;
c72758f3
MP
1276}
1277
ac481c20
MP
1278static inline int bdev_alignment_offset(struct block_device *bdev)
1279{
1280 struct request_queue *q = bdev_get_queue(bdev);
1281
1282 if (q->limits.misaligned)
1283 return -1;
1284
1285 if (bdev != bdev->bd_contains)
1286 return bdev->bd_part->alignment_offset;
1287
1288 return q->limits.alignment_offset;
1289}
1290
86b37281
MP
1291static inline int queue_discard_alignment(struct request_queue *q)
1292{
1293 if (q->limits.discard_misaligned)
1294 return -1;
1295
1296 return q->limits.discard_alignment;
1297}
1298
e03a72e1 1299static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
86b37281 1300{
59771079 1301 unsigned int alignment, granularity, offset;
dd3d145d 1302
a934a00a
MP
1303 if (!lim->max_discard_sectors)
1304 return 0;
1305
59771079
LT
1306 /* Why are these in bytes, not sectors? */
1307 alignment = lim->discard_alignment >> 9;
1308 granularity = lim->discard_granularity >> 9;
1309 if (!granularity)
1310 return 0;
1311
1312 /* Offset of the partition start in 'granularity' sectors */
1313 offset = sector_div(sector, granularity);
1314
1315 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1316 offset = (granularity + alignment - offset) % granularity;
1317
1318 /* Turn it back into bytes, gaah */
1319 return offset << 9;
86b37281
MP
1320}
1321
c6e66634
PB
1322static inline int bdev_discard_alignment(struct block_device *bdev)
1323{
1324 struct request_queue *q = bdev_get_queue(bdev);
1325
1326 if (bdev != bdev->bd_contains)
1327 return bdev->bd_part->discard_alignment;
1328
1329 return q->limits.discard_alignment;
1330}
1331
98262f27
MP
1332static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1333{
a934a00a 1334 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
98262f27
MP
1335 return 1;
1336
1337 return 0;
1338}
1339
1340static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1341{
1342 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1343}
1344
4363ac7c
MP
1345static inline unsigned int bdev_write_same(struct block_device *bdev)
1346{
1347 struct request_queue *q = bdev_get_queue(bdev);
1348
1349 if (q)
1350 return q->limits.max_write_same_sectors;
1351
1352 return 0;
1353}
1354
165125e1 1355static inline int queue_dma_alignment(struct request_queue *q)
1da177e4 1356{
482eb689 1357 return q ? q->dma_alignment : 511;
1da177e4
LT
1358}
1359
14417799 1360static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
87904074
FT
1361 unsigned int len)
1362{
1363 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
14417799 1364 return !(addr & alignment) && !(len & alignment);
87904074
FT
1365}
1366
1da177e4
LT
1367/* assumes size > 256 */
1368static inline unsigned int blksize_bits(unsigned int size)
1369{
1370 unsigned int bits = 8;
1371 do {
1372 bits++;
1373 size >>= 1;
1374 } while (size > 256);
1375 return bits;
1376}
1377
2befb9e3 1378static inline unsigned int block_size(struct block_device *bdev)
1da177e4
LT
1379{
1380 return bdev->bd_block_size;
1381}
1382
f3876930 1383static inline bool queue_flush_queueable(struct request_queue *q)
1384{
c888a8f9 1385 return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
f3876930 1386}
1387
1da177e4
LT
1388typedef struct {struct page *v;} Sector;
1389
1390unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1391
1392static inline void put_dev_sector(Sector p)
1393{
09cbfeaf 1394 put_page(p.v);
1da177e4
LT
1395}
1396
e0af2917
ML
1397static inline bool __bvec_gap_to_prev(struct request_queue *q,
1398 struct bio_vec *bprv, unsigned int offset)
1399{
1400 return offset ||
1401 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1402}
1403
03100aad
KB
1404/*
1405 * Check if adding a bio_vec after bprv with offset would create a gap in
1406 * the SG list. Most drivers don't care about this, but some do.
1407 */
1408static inline bool bvec_gap_to_prev(struct request_queue *q,
1409 struct bio_vec *bprv, unsigned int offset)
1410{
1411 if (!queue_virt_boundary(q))
1412 return false;
e0af2917 1413 return __bvec_gap_to_prev(q, bprv, offset);
03100aad
KB
1414}
1415
5e7c4274
JA
1416static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1417 struct bio *next)
1418{
25e71a99
ML
1419 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1420 struct bio_vec pb, nb;
1421
1422 bio_get_last_bvec(prev, &pb);
1423 bio_get_first_bvec(next, &nb);
5e7c4274 1424
25e71a99
ML
1425 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1426 }
1427
1428 return false;
5e7c4274
JA
1429}
1430
1431static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1432{
1433 return bio_will_gap(req->q, req->biotail, bio);
1434}
1435
1436static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1437{
1438 return bio_will_gap(req->q, bio, req->bio);
1439}
1440
1da177e4 1441struct work_struct;
59c3d45e
JA
1442int kblockd_schedule_work(struct work_struct *work);
1443int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
8ab14595 1444int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1da177e4 1445
9195291e 1446#ifdef CONFIG_BLK_CGROUP
28f4197e
JA
1447/*
1448 * This should not be using sched_clock(). A real patch is in progress
1449 * to fix this up, until that is in place we need to disable preemption
1450 * around sched_clock() in this function and set_io_start_time_ns().
1451 */
9195291e
DS
1452static inline void set_start_time_ns(struct request *req)
1453{
28f4197e 1454 preempt_disable();
9195291e 1455 req->start_time_ns = sched_clock();
28f4197e 1456 preempt_enable();
9195291e
DS
1457}
1458
1459static inline void set_io_start_time_ns(struct request *req)
1460{
28f4197e 1461 preempt_disable();
9195291e 1462 req->io_start_time_ns = sched_clock();
28f4197e 1463 preempt_enable();
9195291e 1464}
84c124da
DS
1465
1466static inline uint64_t rq_start_time_ns(struct request *req)
1467{
1468 return req->start_time_ns;
1469}
1470
1471static inline uint64_t rq_io_start_time_ns(struct request *req)
1472{
1473 return req->io_start_time_ns;
1474}
9195291e
DS
1475#else
1476static inline void set_start_time_ns(struct request *req) {}
1477static inline void set_io_start_time_ns(struct request *req) {}
84c124da
DS
1478static inline uint64_t rq_start_time_ns(struct request *req)
1479{
1480 return 0;
1481}
1482static inline uint64_t rq_io_start_time_ns(struct request *req)
1483{
1484 return 0;
1485}
9195291e
DS
1486#endif
1487
1da177e4
LT
1488#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1489 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1490#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1491 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1492
7ba1ba12
MP
1493#if defined(CONFIG_BLK_DEV_INTEGRITY)
1494
8288f496
MP
1495enum blk_integrity_flags {
1496 BLK_INTEGRITY_VERIFY = 1 << 0,
1497 BLK_INTEGRITY_GENERATE = 1 << 1,
3aec2f41 1498 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
aae7df50 1499 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
8288f496 1500};
7ba1ba12 1501
18593088 1502struct blk_integrity_iter {
7ba1ba12
MP
1503 void *prot_buf;
1504 void *data_buf;
3be91c4a 1505 sector_t seed;
7ba1ba12 1506 unsigned int data_size;
3be91c4a 1507 unsigned short interval;
7ba1ba12
MP
1508 const char *disk_name;
1509};
1510
18593088 1511typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
7ba1ba12 1512
0f8087ec
MP
1513struct blk_integrity_profile {
1514 integrity_processing_fn *generate_fn;
1515 integrity_processing_fn *verify_fn;
1516 const char *name;
1517};
7ba1ba12 1518
25520d55 1519extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
7ba1ba12 1520extern void blk_integrity_unregister(struct gendisk *);
ad7fce93 1521extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
13f05c8d
MP
1522extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1523 struct scatterlist *);
1524extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
4eaf99be
MP
1525extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1526 struct request *);
1527extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1528 struct bio *);
7ba1ba12 1529
25520d55 1530static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
b04accc4 1531{
ac6fc48c 1532 struct blk_integrity *bi = &disk->queue->integrity;
25520d55
MP
1533
1534 if (!bi->profile)
1535 return NULL;
1536
1537 return bi;
b04accc4
JA
1538}
1539
25520d55
MP
1540static inline
1541struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
b02739b0 1542{
25520d55 1543 return blk_get_integrity(bdev->bd_disk);
b02739b0
MP
1544}
1545
180b2f95 1546static inline bool blk_integrity_rq(struct request *rq)
7ba1ba12 1547{
180b2f95 1548 return rq->cmd_flags & REQ_INTEGRITY;
7ba1ba12
MP
1549}
1550
13f05c8d
MP
1551static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1552 unsigned int segs)
1553{
1554 q->limits.max_integrity_segments = segs;
1555}
1556
1557static inline unsigned short
1558queue_max_integrity_segments(struct request_queue *q)
1559{
1560 return q->limits.max_integrity_segments;
1561}
1562
7f39add3
SG
1563static inline bool integrity_req_gap_back_merge(struct request *req,
1564 struct bio *next)
1565{
1566 struct bio_integrity_payload *bip = bio_integrity(req->bio);
1567 struct bio_integrity_payload *bip_next = bio_integrity(next);
1568
1569 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1570 bip_next->bip_vec[0].bv_offset);
1571}
1572
1573static inline bool integrity_req_gap_front_merge(struct request *req,
1574 struct bio *bio)
1575{
1576 struct bio_integrity_payload *bip = bio_integrity(bio);
1577 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1578
1579 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1580 bip_next->bip_vec[0].bv_offset);
1581}
1582
7ba1ba12
MP
1583#else /* CONFIG_BLK_DEV_INTEGRITY */
1584
fd83240a
SR
1585struct bio;
1586struct block_device;
1587struct gendisk;
1588struct blk_integrity;
1589
1590static inline int blk_integrity_rq(struct request *rq)
1591{
1592 return 0;
1593}
1594static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1595 struct bio *b)
1596{
1597 return 0;
1598}
1599static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1600 struct bio *b,
1601 struct scatterlist *s)
1602{
1603 return 0;
1604}
1605static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1606{
61a04e5b 1607 return NULL;
fd83240a
SR
1608}
1609static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1610{
1611 return NULL;
1612}
1613static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1614{
1615 return 0;
1616}
25520d55 1617static inline void blk_integrity_register(struct gendisk *d,
fd83240a
SR
1618 struct blk_integrity *b)
1619{
fd83240a
SR
1620}
1621static inline void blk_integrity_unregister(struct gendisk *d)
1622{
1623}
1624static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1625 unsigned int segs)
1626{
1627}
1628static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1629{
1630 return 0;
1631}
4eaf99be
MP
1632static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1633 struct request *r1,
1634 struct request *r2)
fd83240a 1635{
cb1a5ab6 1636 return true;
fd83240a 1637}
4eaf99be
MP
1638static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1639 struct request *r,
1640 struct bio *b)
fd83240a 1641{
cb1a5ab6 1642 return true;
fd83240a 1643}
25520d55 1644
7f39add3
SG
1645static inline bool integrity_req_gap_back_merge(struct request *req,
1646 struct bio *next)
1647{
1648 return false;
1649}
1650static inline bool integrity_req_gap_front_merge(struct request *req,
1651 struct bio *bio)
1652{
1653 return false;
1654}
7ba1ba12
MP
1655
1656#endif /* CONFIG_BLK_DEV_INTEGRITY */
1657
b2e0d162
DW
1658/**
1659 * struct blk_dax_ctl - control and output parameters for ->direct_access
1660 * @sector: (input) offset relative to a block_device
1661 * @addr: (output) kernel virtual address for @sector populated by driver
1662 * @pfn: (output) page frame number for @addr populated by driver
1663 * @size: (input) number of bytes requested
1664 */
1665struct blk_dax_ctl {
1666 sector_t sector;
7a9eb206 1667 void *addr;
b2e0d162 1668 long size;
34c0fd54 1669 pfn_t pfn;
b2e0d162
DW
1670};
1671
08f85851 1672struct block_device_operations {
d4430d62 1673 int (*open) (struct block_device *, fmode_t);
db2a144b 1674 void (*release) (struct gendisk *, fmode_t);
abf54548 1675 int (*rw_page)(struct block_device *, sector_t, struct page *, int op);
d4430d62
AV
1676 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1677 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
7a9eb206
DW
1678 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
1679 long);
77ea887e
TH
1680 unsigned int (*check_events) (struct gendisk *disk,
1681 unsigned int clearing);
1682 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
08f85851 1683 int (*media_changed) (struct gendisk *);
c3e33e04 1684 void (*unlock_native_capacity) (struct gendisk *);
08f85851
AV
1685 int (*revalidate_disk) (struct gendisk *);
1686 int (*getgeo)(struct block_device *, struct hd_geometry *);
b3a27d05
NG
1687 /* this callback is with swap_lock and sometimes page table lock held */
1688 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
08f85851 1689 struct module *owner;
bbd3e064 1690 const struct pr_ops *pr_ops;
08f85851
AV
1691};
1692
633a08b8
AV
1693extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1694 unsigned long);
47a191fd
MW
1695extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1696extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1697 struct writeback_control *);
b2e0d162 1698extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
2d96afc8 1699extern int bdev_dax_supported(struct super_block *, int);
a8078b1f 1700extern bool bdev_dax_capable(struct block_device *);
9361401e 1701#else /* CONFIG_BLOCK */
ac13a829
FF
1702
1703struct block_device;
1704
9361401e
DH
1705/*
1706 * stubs for when the block layer is configured out
1707 */
1708#define buffer_heads_over_limit 0
1709
9361401e
DH
1710static inline long nr_blockdev_pages(void)
1711{
1712 return 0;
1713}
1714
1f940bdf
JA
1715struct blk_plug {
1716};
1717
1718static inline void blk_start_plug(struct blk_plug *plug)
73c10101
JA
1719{
1720}
1721
1f940bdf 1722static inline void blk_finish_plug(struct blk_plug *plug)
73c10101
JA
1723{
1724}
1725
1f940bdf 1726static inline void blk_flush_plug(struct task_struct *task)
73c10101
JA
1727{
1728}
1729
a237c1c5
JA
1730static inline void blk_schedule_flush_plug(struct task_struct *task)
1731{
1732}
1733
1734
73c10101
JA
1735static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1736{
1737 return false;
1738}
1739
ac13a829
FF
1740static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1741 sector_t *error_sector)
1742{
1743 return 0;
1744}
1745
9361401e
DH
1746#endif /* CONFIG_BLOCK */
1747
1da177e4 1748#endif