]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/blkdev.h
Merge branch 'misc' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-bionic-kernel.git] / include / linux / blkdev.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
85fd0bc9
RK
4#include <linux/sched.h>
5
f5ff8422
JA
6#ifdef CONFIG_BLOCK
7
1da177e4
LT
8#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
11#include <linux/timer.h>
12#include <linux/workqueue.h>
13#include <linux/pagemap.h>
14#include <linux/backing-dev.h>
15#include <linux/wait.h>
16#include <linux/mempool.h>
17#include <linux/bio.h>
1da177e4 18#include <linux/stringify.h>
3e6053d7 19#include <linux/gfp.h>
d351af01 20#include <linux/bsg.h>
c7c22e4d 21#include <linux/smp.h>
1da177e4
LT
22
23#include <asm/scatterlist.h>
24
de477254 25struct module;
21b2f0c8
CH
26struct scsi_ioctl_command;
27
1da177e4 28struct request_queue;
1da177e4 29struct elevator_queue;
1da177e4 30struct request_pm_state;
2056a782 31struct blk_trace;
3d6392cf
JA
32struct request;
33struct sg_io_hdr;
aa387cc8 34struct bsg_job;
1da177e4
LT
35
36#define BLKDEV_MIN_RQ 4
37#define BLKDEV_MAX_RQ 128 /* Default maximum */
38
1da177e4 39struct request;
8ffdc655 40typedef void (rq_end_io_fn)(struct request *, int);
1da177e4
LT
41
42struct request_list {
1faa16d2
JA
43 /*
44 * count[], starved[], and wait[] are indexed by
45 * BLK_RW_SYNC/BLK_RW_ASYNC
46 */
1da177e4
LT
47 int count[2];
48 int starved[2];
cb98fc8b 49 int elvpriv;
1da177e4
LT
50 mempool_t *rq_pool;
51 wait_queue_head_t wait[2];
1da177e4
LT
52};
53
4aff5e23
JA
54/*
55 * request command types
56 */
57enum rq_cmd_type_bits {
58 REQ_TYPE_FS = 1, /* fs request */
59 REQ_TYPE_BLOCK_PC, /* scsi command */
60 REQ_TYPE_SENSE, /* sense request */
61 REQ_TYPE_PM_SUSPEND, /* suspend request */
62 REQ_TYPE_PM_RESUME, /* resume request */
63 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
4aff5e23 64 REQ_TYPE_SPECIAL, /* driver defined type */
4aff5e23
JA
65 /*
66 * for ATA/ATAPI devices. this really doesn't belong here, ide should
67 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
68 * private REQ_LB opcodes to differentiate what type of request this is
69 */
4aff5e23 70 REQ_TYPE_ATA_TASKFILE,
cea2885a 71 REQ_TYPE_ATA_PC,
4aff5e23
JA
72};
73
1da177e4
LT
74#define BLK_MAX_CDB 16
75
76/*
63a71386 77 * try to put the fields that are referenced together in the same cacheline.
4d0d98b6 78 * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init()
63a71386 79 * as well!
1da177e4
LT
80 */
81struct request {
ff856bad 82 struct list_head queuelist;
c7c22e4d 83 struct call_single_data csd;
ff856bad 84
165125e1 85 struct request_queue *q;
e6a1c874 86
4aff5e23
JA
87 unsigned int cmd_flags;
88 enum rq_cmd_type_bits cmd_type;
242f9dcb 89 unsigned long atomic_flags;
1da177e4 90
181fdde3
RK
91 int cpu;
92
a2dec7b3 93 /* the following two fields are internal, NEVER access directly */
a2dec7b3 94 unsigned int __data_len; /* total data len */
181fdde3 95 sector_t __sector; /* sector cursor */
1da177e4
LT
96
97 struct bio *bio;
98 struct bio *biotail;
99
9817064b 100 struct hlist_node hash; /* merge hash */
e6a1c874
JA
101 /*
102 * The rb_node is only used inside the io scheduler, requests
103 * are pruned when moved to the dispatch queue. So let the
c186794d 104 * completion_data share space with the rb_node.
e6a1c874
JA
105 */
106 union {
107 struct rb_node rb_node; /* sort/lookup */
c186794d 108 void *completion_data;
e6a1c874 109 };
9817064b 110
ff7d145f 111 /*
7f1dc8a2 112 * Three pointers are available for the IO schedulers, if they need
c186794d
MS
113 * more they have to dynamically allocate it. Flush requests are
114 * never put on the IO scheduler. So let the flush fields share
a612fddf 115 * space with the elevator data.
ff7d145f 116 */
c186794d 117 union {
a612fddf
TH
118 struct {
119 struct io_cq *icq;
120 void *priv[2];
121 } elv;
122
c186794d
MS
123 struct {
124 unsigned int seq;
125 struct list_head list;
4853abaa 126 rq_end_io_fn *saved_end_io;
c186794d
MS
127 } flush;
128 };
ff7d145f 129
8f34ee75 130 struct gendisk *rq_disk;
09e099d4 131 struct hd_struct *part;
1da177e4 132 unsigned long start_time;
9195291e
DS
133#ifdef CONFIG_BLK_CGROUP
134 unsigned long long start_time_ns;
135 unsigned long long io_start_time_ns; /* when passed to hardware */
136#endif
1da177e4
LT
137 /* Number of scatter-gather DMA addr+len pairs after
138 * physical address coalescing is performed.
139 */
140 unsigned short nr_phys_segments;
13f05c8d
MP
141#if defined(CONFIG_BLK_DEV_INTEGRITY)
142 unsigned short nr_integrity_segments;
143#endif
1da177e4 144
8f34ee75
JA
145 unsigned short ioprio;
146
181fdde3
RK
147 int ref_count;
148
731ec497
TH
149 void *special; /* opaque pointer available for LLD use */
150 char *buffer; /* kaddr of the current segment if available */
1da177e4 151
cdd60262
JA
152 int tag;
153 int errors;
154
1da177e4
LT
155 /*
156 * when request is used as a packet command carrier
157 */
d7e3c324
FT
158 unsigned char __cmd[BLK_MAX_CDB];
159 unsigned char *cmd;
181fdde3 160 unsigned short cmd_len;
1da177e4 161
7a85f889 162 unsigned int extra_len; /* length of alignment and padding */
1da177e4 163 unsigned int sense_len;
c3a4d78c 164 unsigned int resid_len; /* residual count */
1da177e4
LT
165 void *sense;
166
242f9dcb
JA
167 unsigned long deadline;
168 struct list_head timeout_list;
1da177e4 169 unsigned int timeout;
17e01f21 170 int retries;
1da177e4 171
1da177e4 172 /*
c00895ab 173 * completion callback.
1da177e4
LT
174 */
175 rq_end_io_fn *end_io;
176 void *end_io_data;
abae1fde
FT
177
178 /* for bidi */
179 struct request *next_rq;
1da177e4
LT
180};
181
766ca442
FLVC
182static inline unsigned short req_get_ioprio(struct request *req)
183{
184 return req->ioprio;
185}
186
1da177e4 187/*
4aff5e23 188 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
1da177e4
LT
189 * requests. Some step values could eventually be made generic.
190 */
191struct request_pm_state
192{
193 /* PM state machine step value, currently driver specific */
194 int pm_step;
195 /* requested PM state value (S1, S2, S3, S4, ...) */
196 u32 pm_state;
197 void* data; /* for driver use */
198};
199
200#include <linux/elevator.h>
201
165125e1 202typedef void (request_fn_proc) (struct request_queue *q);
5a7bbad2 203typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
165125e1 204typedef int (prep_rq_fn) (struct request_queue *, struct request *);
28018c24 205typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
1da177e4
LT
206
207struct bio_vec;
cc371e66
AK
208struct bvec_merge_data {
209 struct block_device *bi_bdev;
210 sector_t bi_sector;
211 unsigned bi_size;
212 unsigned long bi_rw;
213};
214typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
215 struct bio_vec *);
ff856bad 216typedef void (softirq_done_fn)(struct request *);
2fb98e84 217typedef int (dma_drain_needed_fn)(struct request *);
ef9e3fac 218typedef int (lld_busy_fn) (struct request_queue *q);
aa387cc8 219typedef int (bsg_job_fn) (struct bsg_job *);
1da177e4 220
242f9dcb
JA
221enum blk_eh_timer_return {
222 BLK_EH_NOT_HANDLED,
223 BLK_EH_HANDLED,
224 BLK_EH_RESET_TIMER,
225};
226
227typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
228
1da177e4
LT
229enum blk_queue_state {
230 Queue_down,
231 Queue_up,
232};
233
1da177e4
LT
234struct blk_queue_tag {
235 struct request **tag_index; /* map of busy tags */
236 unsigned long *tag_map; /* bit map of free/busy tags */
1da177e4
LT
237 int busy; /* current depth */
238 int max_depth; /* what we will send to device */
ba025082 239 int real_max_depth; /* what the array can hold */
1da177e4
LT
240 atomic_t refcnt; /* map can be shared */
241};
242
abf54393
FT
243#define BLK_SCSI_MAX_CMDS (256)
244#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
245
025146e1
MP
246struct queue_limits {
247 unsigned long bounce_pfn;
248 unsigned long seg_boundary_mask;
249
250 unsigned int max_hw_sectors;
251 unsigned int max_sectors;
252 unsigned int max_segment_size;
c72758f3
MP
253 unsigned int physical_block_size;
254 unsigned int alignment_offset;
255 unsigned int io_min;
256 unsigned int io_opt;
67efc925 257 unsigned int max_discard_sectors;
86b37281
MP
258 unsigned int discard_granularity;
259 unsigned int discard_alignment;
025146e1
MP
260
261 unsigned short logical_block_size;
8a78362c 262 unsigned short max_segments;
13f05c8d 263 unsigned short max_integrity_segments;
025146e1 264
c72758f3 265 unsigned char misaligned;
86b37281 266 unsigned char discard_misaligned;
e692cb66 267 unsigned char cluster;
a934a00a 268 unsigned char discard_zeroes_data;
025146e1
MP
269};
270
d7b76301 271struct request_queue {
1da177e4
LT
272 /*
273 * Together with queue_head for cacheline sharing
274 */
275 struct list_head queue_head;
276 struct request *last_merge;
b374d18a 277 struct elevator_queue *elevator;
1da177e4
LT
278
279 /*
280 * the queue request freelist, one for reads and one for writes
281 */
282 struct request_list rq;
283
284 request_fn_proc *request_fn;
1da177e4
LT
285 make_request_fn *make_request_fn;
286 prep_rq_fn *prep_rq_fn;
28018c24 287 unprep_rq_fn *unprep_rq_fn;
1da177e4 288 merge_bvec_fn *merge_bvec_fn;
ff856bad 289 softirq_done_fn *softirq_done_fn;
242f9dcb 290 rq_timed_out_fn *rq_timed_out_fn;
2fb98e84 291 dma_drain_needed_fn *dma_drain_needed;
ef9e3fac 292 lld_busy_fn *lld_busy_fn;
1da177e4 293
8922e16c
TH
294 /*
295 * Dispatch queue sorting
296 */
1b47f531 297 sector_t end_sector;
8922e16c 298 struct request *boundary_rq;
8922e16c 299
1da177e4 300 /*
3cca6dc1 301 * Delayed queue handling
1da177e4 302 */
3cca6dc1 303 struct delayed_work delay_work;
1da177e4
LT
304
305 struct backing_dev_info backing_dev_info;
306
307 /*
308 * The queue owner gets to use this for whatever they like.
309 * ll_rw_blk doesn't touch it.
310 */
311 void *queuedata;
312
1da177e4 313 /*
d7b76301 314 * various queue flags, see QUEUE_* below
1da177e4 315 */
d7b76301 316 unsigned long queue_flags;
1da177e4 317
a73f730d
TH
318 /*
319 * ida allocated id for this queue. Used to index queues from
320 * ioctx.
321 */
322 int id;
323
1da177e4 324 /*
d7b76301 325 * queue needs bounce pages for pages above this limit
1da177e4 326 */
d7b76301 327 gfp_t bounce_gfp;
1da177e4
LT
328
329 /*
152587de
JA
330 * protects queue structures from reentrancy. ->__queue_lock should
331 * _never_ be used directly, it is queue private. always use
332 * ->queue_lock.
1da177e4 333 */
152587de 334 spinlock_t __queue_lock;
1da177e4
LT
335 spinlock_t *queue_lock;
336
337 /*
338 * queue kobject
339 */
340 struct kobject kobj;
341
342 /*
343 * queue settings
344 */
345 unsigned long nr_requests; /* Max # of requests */
346 unsigned int nr_congestion_on;
347 unsigned int nr_congestion_off;
348 unsigned int nr_batching;
349
fa0ccd83 350 unsigned int dma_drain_size;
d7b76301 351 void *dma_drain_buffer;
e3790c7d 352 unsigned int dma_pad_mask;
1da177e4
LT
353 unsigned int dma_alignment;
354
355 struct blk_queue_tag *queue_tags;
6eca9004 356 struct list_head tag_busy_list;
1da177e4 357
15853af9 358 unsigned int nr_sorted;
0a7ae2ff 359 unsigned int in_flight[2];
1da177e4 360
242f9dcb
JA
361 unsigned int rq_timeout;
362 struct timer_list timeout;
363 struct list_head timeout_list;
364
a612fddf
TH
365 struct list_head icq_list;
366
025146e1
MP
367 struct queue_limits limits;
368
1da177e4
LT
369 /*
370 * sg stuff
371 */
372 unsigned int sg_timeout;
373 unsigned int sg_reserved_size;
1946089a 374 int node;
6c5c9341 375#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 376 struct blk_trace *blk_trace;
6c5c9341 377#endif
1da177e4 378 /*
4913efe4 379 * for flush operations
1da177e4 380 */
4913efe4 381 unsigned int flush_flags;
f3876930 382 unsigned int flush_not_queueable:1;
3ac0cc45 383 unsigned int flush_queue_delayed:1;
ae1b1539
TH
384 unsigned int flush_pending_idx:1;
385 unsigned int flush_running_idx:1;
386 unsigned long flush_pending_since;
387 struct list_head flush_queue[2];
388 struct list_head flush_data_in_flight;
dd4c133f 389 struct request flush_rq;
483f4afc
AV
390
391 struct mutex sysfs_lock;
d351af01
FT
392
393#if defined(CONFIG_BLK_DEV_BSG)
aa387cc8
MC
394 bsg_job_fn *bsg_job_fn;
395 int bsg_job_size;
d351af01
FT
396 struct bsg_class_device bsg_dev;
397#endif
e43473b7
VG
398
399#ifdef CONFIG_BLK_DEV_THROTTLING
400 /* Throttle data */
401 struct throtl_data *td;
402#endif
1da177e4
LT
403};
404
1da177e4
LT
405#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
406#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
1faa16d2
JA
407#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
408#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
1da177e4 409#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
c21e6beb
JA
410#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
411#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
412#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
5757a6d7 413#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
c21e6beb
JA
414#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
415#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
416#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
88e740f1 417#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
c21e6beb
JA
418#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
419#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
420#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
421#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
422#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
5757a6d7 423#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
bc58ba94
JA
424
425#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
01e97f6b 426 (1 << QUEUE_FLAG_STACKABLE) | \
e2e1a148
JA
427 (1 << QUEUE_FLAG_SAME_COMP) | \
428 (1 << QUEUE_FLAG_ADD_RANDOM))
797e7dbb 429
8bcb6c7d 430static inline void queue_lockdep_assert_held(struct request_queue *q)
8f45c1a5 431{
8bcb6c7d
AK
432 if (q->queue_lock)
433 lockdep_assert_held(q->queue_lock);
8f45c1a5
LT
434}
435
75ad23bc
NP
436static inline void queue_flag_set_unlocked(unsigned int flag,
437 struct request_queue *q)
438{
439 __set_bit(flag, &q->queue_flags);
440}
441
e48ec690
JA
442static inline int queue_flag_test_and_clear(unsigned int flag,
443 struct request_queue *q)
444{
8bcb6c7d 445 queue_lockdep_assert_held(q);
e48ec690
JA
446
447 if (test_bit(flag, &q->queue_flags)) {
448 __clear_bit(flag, &q->queue_flags);
449 return 1;
450 }
451
452 return 0;
453}
454
455static inline int queue_flag_test_and_set(unsigned int flag,
456 struct request_queue *q)
457{
8bcb6c7d 458 queue_lockdep_assert_held(q);
e48ec690
JA
459
460 if (!test_bit(flag, &q->queue_flags)) {
461 __set_bit(flag, &q->queue_flags);
462 return 0;
463 }
464
465 return 1;
466}
467
75ad23bc
NP
468static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
469{
8bcb6c7d 470 queue_lockdep_assert_held(q);
75ad23bc
NP
471 __set_bit(flag, &q->queue_flags);
472}
473
474static inline void queue_flag_clear_unlocked(unsigned int flag,
475 struct request_queue *q)
476{
477 __clear_bit(flag, &q->queue_flags);
478}
479
0a7ae2ff
JA
480static inline int queue_in_flight(struct request_queue *q)
481{
482 return q->in_flight[0] + q->in_flight[1];
483}
484
75ad23bc
NP
485static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
486{
8bcb6c7d 487 queue_lockdep_assert_held(q);
75ad23bc
NP
488 __clear_bit(flag, &q->queue_flags);
489}
490
1da177e4
LT
491#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
492#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
34f6055c 493#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
ac9fafa1 494#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
488991e2
AB
495#define blk_queue_noxmerges(q) \
496 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
a68bbddb 497#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
bc58ba94 498#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
e2e1a148 499#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
4ee5eaf4
KU
500#define blk_queue_stackable(q) \
501 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
c15227de 502#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
8d57a98c
AH
503#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
504 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
1da177e4 505
33659ebb
CH
506#define blk_noretry_request(rq) \
507 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
508 REQ_FAILFAST_DRIVER))
509
510#define blk_account_rq(rq) \
511 (((rq)->cmd_flags & REQ_STARTED) && \
512 ((rq)->cmd_type == REQ_TYPE_FS || \
513 ((rq)->cmd_flags & REQ_DISCARD)))
514
1da177e4 515#define blk_pm_request(rq) \
33659ebb
CH
516 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
517 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
1da177e4 518
ab780f1e 519#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
abae1fde 520#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
336cdb40
KU
521/* rq->queuelist of dequeued request must be list_empty() */
522#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
1da177e4
LT
523
524#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
525
4aff5e23 526#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
1da177e4 527
e692cb66
MP
528static inline unsigned int blk_queue_cluster(struct request_queue *q)
529{
530 return q->limits.cluster;
531}
532
9e2585a8 533/*
1faa16d2 534 * We regard a request as sync, if either a read or a sync write
9e2585a8 535 */
1faa16d2
JA
536static inline bool rw_is_sync(unsigned int rw_flags)
537{
7b6d91da 538 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
1faa16d2
JA
539}
540
541static inline bool rq_is_sync(struct request *rq)
542{
543 return rw_is_sync(rq->cmd_flags);
544}
545
1faa16d2 546static inline int blk_queue_full(struct request_queue *q, int sync)
1da177e4 547{
1faa16d2
JA
548 if (sync)
549 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
550 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
1da177e4
LT
551}
552
1faa16d2 553static inline void blk_set_queue_full(struct request_queue *q, int sync)
1da177e4 554{
1faa16d2
JA
555 if (sync)
556 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
1da177e4 557 else
1faa16d2 558 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
1da177e4
LT
559}
560
1faa16d2 561static inline void blk_clear_queue_full(struct request_queue *q, int sync)
1da177e4 562{
1faa16d2
JA
563 if (sync)
564 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
1da177e4 565 else
1faa16d2 566 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
1da177e4
LT
567}
568
569
570/*
571 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
572 * it already be started by driver.
573 */
574#define RQ_NOMERGE_FLAGS \
02e031cb 575 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
1da177e4 576#define rq_mergeable(rq) \
e17fc0a1 577 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
33659ebb
CH
578 (((rq)->cmd_flags & REQ_DISCARD) || \
579 (rq)->cmd_type == REQ_TYPE_FS))
1da177e4 580
1da177e4
LT
581/*
582 * q->prep_rq_fn return values
583 */
584#define BLKPREP_OK 0 /* serve it */
585#define BLKPREP_KILL 1 /* fatal error, kill */
586#define BLKPREP_DEFER 2 /* leave on queue */
587
588extern unsigned long blk_max_low_pfn, blk_max_pfn;
589
590/*
591 * standard bounce addresses:
592 *
593 * BLK_BOUNCE_HIGH : bounce all highmem pages
594 * BLK_BOUNCE_ANY : don't bounce anything
595 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
596 */
2472892a
AK
597
598#if BITS_PER_LONG == 32
1da177e4 599#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
2472892a
AK
600#else
601#define BLK_BOUNCE_HIGH -1ULL
602#endif
603#define BLK_BOUNCE_ANY (-1ULL)
bfe17231 604#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
1da177e4 605
3d6392cf
JA
606/*
607 * default timeout for SG_IO if none specified
608 */
609#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
f2f1fa78 610#define BLK_MIN_SG_TIMEOUT (7 * HZ)
3d6392cf 611
2a7326b5 612#ifdef CONFIG_BOUNCE
1da177e4 613extern int init_emergency_isa_pool(void);
165125e1 614extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
1da177e4
LT
615#else
616static inline int init_emergency_isa_pool(void)
617{
618 return 0;
619}
165125e1 620static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
1da177e4
LT
621{
622}
623#endif /* CONFIG_MMU */
624
152e283f
FT
625struct rq_map_data {
626 struct page **pages;
627 int page_order;
628 int nr_entries;
56c451f4 629 unsigned long offset;
97ae77a1 630 int null_mapped;
ecb554a8 631 int from_user;
152e283f
FT
632};
633
5705f702
N
634struct req_iterator {
635 int i;
636 struct bio *bio;
637};
638
639/* This should not be used directly - use rq_for_each_segment */
1e428079
JA
640#define for_each_bio(_bio) \
641 for (; _bio; _bio = _bio->bi_next)
5705f702 642#define __rq_for_each_bio(_bio, rq) \
1da177e4
LT
643 if ((rq->bio)) \
644 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
645
5705f702
N
646#define rq_for_each_segment(bvl, _rq, _iter) \
647 __rq_for_each_bio(_iter.bio, _rq) \
648 bio_for_each_segment(bvl, _iter.bio, _iter.i)
649
650#define rq_iter_last(rq, _iter) \
651 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
652
2d4dc890
IL
653#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
654# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
655#endif
656#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
657extern void rq_flush_dcache_pages(struct request *rq);
658#else
659static inline void rq_flush_dcache_pages(struct request *rq)
660{
661}
662#endif
663
1da177e4
LT
664extern int blk_register_queue(struct gendisk *disk);
665extern void blk_unregister_queue(struct gendisk *disk);
1da177e4 666extern void generic_make_request(struct bio *bio);
2a4aa30c 667extern void blk_rq_init(struct request_queue *q, struct request *rq);
1da177e4 668extern void blk_put_request(struct request *);
165125e1 669extern void __blk_put_request(struct request_queue *, struct request *);
165125e1 670extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
79eb63e9
BH
671extern struct request *blk_make_request(struct request_queue *, struct bio *,
672 gfp_t);
165125e1 673extern void blk_requeue_request(struct request_queue *, struct request *);
66ac0280
CH
674extern void blk_add_request_payload(struct request *rq, struct page *page,
675 unsigned int len);
82124d60 676extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
ef9e3fac 677extern int blk_lld_busy(struct request_queue *q);
b0fd271d
KU
678extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
679 struct bio_set *bs, gfp_t gfp_mask,
680 int (*bio_ctr)(struct bio *, struct bio *, void *),
681 void *data);
682extern void blk_rq_unprep_clone(struct request *rq);
82124d60
KU
683extern int blk_insert_cloned_request(struct request_queue *q,
684 struct request *rq);
3cca6dc1 685extern void blk_delay_queue(struct request_queue *, unsigned long);
165125e1 686extern void blk_recount_segments(struct request_queue *, struct bio *);
0bfc96cb 687extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
577ebb37
PB
688extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
689 unsigned int, void __user *);
74f3c8af
AV
690extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
691 unsigned int, void __user *);
e915e872
AV
692extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
693 struct scsi_ioctl_command __user *);
3fcfab16 694
5a7bbad2 695extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
166e1f90 696
3fcfab16
AM
697/*
698 * A queue has just exitted congestion. Note this in the global counter of
699 * congested queues, and wake up anyone who was waiting for requests to be
700 * put back.
701 */
8aa7e847 702static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
3fcfab16 703{
8aa7e847 704 clear_bdi_congested(&q->backing_dev_info, sync);
3fcfab16
AM
705}
706
707/*
708 * A queue has just entered congestion. Flag that in the queue's VM-visible
709 * state flags and increment the global gounter of congested queues.
710 */
8aa7e847 711static inline void blk_set_queue_congested(struct request_queue *q, int sync)
3fcfab16 712{
8aa7e847 713 set_bdi_congested(&q->backing_dev_info, sync);
3fcfab16
AM
714}
715
165125e1
JA
716extern void blk_start_queue(struct request_queue *q);
717extern void blk_stop_queue(struct request_queue *q);
1da177e4 718extern void blk_sync_queue(struct request_queue *q);
165125e1 719extern void __blk_stop_queue(struct request_queue *q);
24ecfbe2 720extern void __blk_run_queue(struct request_queue *q);
165125e1 721extern void blk_run_queue(struct request_queue *);
c21e6beb 722extern void blk_run_queue_async(struct request_queue *q);
a3bce90e 723extern int blk_rq_map_user(struct request_queue *, struct request *,
152e283f
FT
724 struct rq_map_data *, void __user *, unsigned long,
725 gfp_t);
8e5cfc45 726extern int blk_rq_unmap_user(struct bio *);
165125e1
JA
727extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
728extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
152e283f
FT
729 struct rq_map_data *, struct sg_iovec *, int,
730 unsigned int, gfp_t);
165125e1 731extern int blk_execute_rq(struct request_queue *, struct gendisk *,
994ca9a1 732 struct request *, int);
165125e1 733extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
15fc858a 734 struct request *, int, rq_end_io_fn *);
6e39b69e 735
165125e1 736static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1da177e4
LT
737{
738 return bdev->bd_disk->queue;
739}
740
5efccd17 741/*
80a761fd
TH
742 * blk_rq_pos() : the current sector
743 * blk_rq_bytes() : bytes left in the entire request
744 * blk_rq_cur_bytes() : bytes left in the current segment
745 * blk_rq_err_bytes() : bytes left till the next error boundary
746 * blk_rq_sectors() : sectors left in the entire request
747 * blk_rq_cur_sectors() : sectors left in the current segment
5efccd17 748 */
5b93629b
TH
749static inline sector_t blk_rq_pos(const struct request *rq)
750{
a2dec7b3 751 return rq->__sector;
2e46e8b2
TH
752}
753
754static inline unsigned int blk_rq_bytes(const struct request *rq)
755{
a2dec7b3 756 return rq->__data_len;
5b93629b
TH
757}
758
2e46e8b2
TH
759static inline int blk_rq_cur_bytes(const struct request *rq)
760{
761 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
762}
5efccd17 763
80a761fd
TH
764extern unsigned int blk_rq_err_bytes(const struct request *rq);
765
5b93629b
TH
766static inline unsigned int blk_rq_sectors(const struct request *rq)
767{
2e46e8b2 768 return blk_rq_bytes(rq) >> 9;
5b93629b
TH
769}
770
771static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
772{
2e46e8b2 773 return blk_rq_cur_bytes(rq) >> 9;
5b93629b
TH
774}
775
9934c8c0
TH
776/*
777 * Request issue related functions.
778 */
779extern struct request *blk_peek_request(struct request_queue *q);
780extern void blk_start_request(struct request *rq);
781extern struct request *blk_fetch_request(struct request_queue *q);
782
1da177e4 783/*
2e60e022
TH
784 * Request completion related functions.
785 *
786 * blk_update_request() completes given number of bytes and updates
787 * the request without completing it.
788 *
f06d9a2b
TH
789 * blk_end_request() and friends. __blk_end_request() must be called
790 * with the request queue spinlock acquired.
1da177e4
LT
791 *
792 * Several drivers define their own end_request and call
3bcddeac
KU
793 * blk_end_request() for parts of the original function.
794 * This prevents code duplication in drivers.
1da177e4 795 */
2e60e022
TH
796extern bool blk_update_request(struct request *rq, int error,
797 unsigned int nr_bytes);
b1f74493
FT
798extern bool blk_end_request(struct request *rq, int error,
799 unsigned int nr_bytes);
800extern void blk_end_request_all(struct request *rq, int error);
801extern bool blk_end_request_cur(struct request *rq, int error);
80a761fd 802extern bool blk_end_request_err(struct request *rq, int error);
b1f74493
FT
803extern bool __blk_end_request(struct request *rq, int error,
804 unsigned int nr_bytes);
805extern void __blk_end_request_all(struct request *rq, int error);
806extern bool __blk_end_request_cur(struct request *rq, int error);
80a761fd 807extern bool __blk_end_request_err(struct request *rq, int error);
2e60e022 808
ff856bad 809extern void blk_complete_request(struct request *);
242f9dcb
JA
810extern void __blk_complete_request(struct request *);
811extern void blk_abort_request(struct request *);
11914a53 812extern void blk_abort_queue(struct request_queue *);
28018c24 813extern void blk_unprep_request(struct request *);
ff856bad 814
1da177e4
LT
815/*
816 * Access functions for manipulating queue properties
817 */
165125e1 818extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1946089a 819 spinlock_t *lock, int node_id);
165125e1 820extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
01effb0d
MS
821extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
822 request_fn_proc *, spinlock_t *);
165125e1
JA
823extern void blk_cleanup_queue(struct request_queue *);
824extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
825extern void blk_queue_bounce_limit(struct request_queue *, u64);
72d4cd9f 826extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
086fa5ff 827extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
8a78362c 828extern void blk_queue_max_segments(struct request_queue *, unsigned short);
165125e1 829extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
67efc925
CH
830extern void blk_queue_max_discard_sectors(struct request_queue *q,
831 unsigned int max_discard_sectors);
e1defc4f 832extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
892b6f90 833extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
c72758f3
MP
834extern void blk_queue_alignment_offset(struct request_queue *q,
835 unsigned int alignment);
7c958e32 836extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
c72758f3 837extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
3c5820c7 838extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
c72758f3 839extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
e475bba2 840extern void blk_set_default_limits(struct queue_limits *lim);
b1bd055d 841extern void blk_set_stacking_limits(struct queue_limits *lim);
c72758f3
MP
842extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
843 sector_t offset);
17be8c24
MP
844extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
845 sector_t offset);
c72758f3
MP
846extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
847 sector_t offset);
165125e1 848extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
e3790c7d 849extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
27f8221a 850extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
2fb98e84
TH
851extern int blk_queue_dma_drain(struct request_queue *q,
852 dma_drain_needed_fn *dma_drain_needed,
853 void *buf, unsigned int size);
ef9e3fac 854extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
165125e1
JA
855extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
856extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
28018c24 857extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
165125e1
JA
858extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
859extern void blk_queue_dma_alignment(struct request_queue *, int);
11c3e689 860extern void blk_queue_update_dma_alignment(struct request_queue *, int);
165125e1 861extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
242f9dcb
JA
862extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
863extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
4913efe4 864extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
f3876930 865extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1da177e4 866extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1da177e4 867
165125e1 868extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1da177e4 869extern void blk_dump_rq_flags(struct request *, char *);
1da177e4 870extern long nr_blockdev_pages(void);
1da177e4 871
09ac46c4 872bool __must_check blk_get_queue(struct request_queue *);
165125e1
JA
873struct request_queue *blk_alloc_queue(gfp_t);
874struct request_queue *blk_alloc_queue_node(gfp_t, int);
875extern void blk_put_queue(struct request_queue *);
1da177e4 876
316cc67d 877/*
75df7136
SJ
878 * blk_plug permits building a queue of related requests by holding the I/O
879 * fragments for a short period. This allows merging of sequential requests
880 * into single larger request. As the requests are moved from a per-task list to
881 * the device's request_queue in a batch, this results in improved scalability
882 * as the lock contention for request_queue lock is reduced.
883 *
884 * It is ok not to disable preemption when adding the request to the plug list
885 * or when attempting a merge, because blk_schedule_flush_list() will only flush
886 * the plug list when the task sleeps by itself. For details, please see
887 * schedule() where blk_schedule_flush_plug() is called.
316cc67d 888 */
73c10101 889struct blk_plug {
75df7136
SJ
890 unsigned long magic; /* detect uninitialized use-cases */
891 struct list_head list; /* requests */
892 struct list_head cb_list; /* md requires an unplug callback */
893 unsigned int should_sort; /* list to be sorted before flushing? */
73c10101 894};
55c022bb
SL
895#define BLK_MAX_REQUEST_COUNT 16
896
048c9374
N
897struct blk_plug_cb {
898 struct list_head list;
899 void (*callback)(struct blk_plug_cb *);
900};
73c10101
JA
901
902extern void blk_start_plug(struct blk_plug *);
903extern void blk_finish_plug(struct blk_plug *);
f6603783 904extern void blk_flush_plug_list(struct blk_plug *, bool);
73c10101
JA
905
906static inline void blk_flush_plug(struct task_struct *tsk)
907{
908 struct blk_plug *plug = tsk->plug;
909
a237c1c5
JA
910 if (plug)
911 blk_flush_plug_list(plug, false);
912}
913
914static inline void blk_schedule_flush_plug(struct task_struct *tsk)
915{
916 struct blk_plug *plug = tsk->plug;
917
88b996cd 918 if (plug)
f6603783 919 blk_flush_plug_list(plug, true);
73c10101
JA
920}
921
922static inline bool blk_needs_flush_plug(struct task_struct *tsk)
923{
924 struct blk_plug *plug = tsk->plug;
925
048c9374 926 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
73c10101
JA
927}
928
1da177e4
LT
929/*
930 * tag stuff
931 */
4aff5e23 932#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
165125e1
JA
933extern int blk_queue_start_tag(struct request_queue *, struct request *);
934extern struct request *blk_queue_find_tag(struct request_queue *, int);
935extern void blk_queue_end_tag(struct request_queue *, struct request *);
936extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
937extern void blk_queue_free_tags(struct request_queue *);
938extern int blk_queue_resize_tags(struct request_queue *, int);
939extern void blk_queue_invalidate_tags(struct request_queue *);
492dfb48
JB
940extern struct blk_queue_tag *blk_init_tags(int);
941extern void blk_free_tags(struct blk_queue_tag *);
1da177e4 942
f583f492
DS
943static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
944 int tag)
945{
946 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
947 return NULL;
948 return bqt->tag_index[tag];
949}
dd3932ed
CH
950
951#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
952
953extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
fbd9b09a
DM
954extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
955 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
3f14d792 956extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
dd3932ed 957 sector_t nr_sects, gfp_t gfp_mask);
2cf6d26a
CH
958static inline int sb_issue_discard(struct super_block *sb, sector_t block,
959 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
fb2dce86 960{
2cf6d26a
CH
961 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
962 nr_blocks << (sb->s_blocksize_bits - 9),
963 gfp_mask, flags);
fb2dce86 964}
e6fa0be6 965static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
a107e5a3 966 sector_t nr_blocks, gfp_t gfp_mask)
e6fa0be6
LC
967{
968 return blkdev_issue_zeroout(sb->s_bdev,
969 block << (sb->s_blocksize_bits - 9),
970 nr_blocks << (sb->s_blocksize_bits - 9),
a107e5a3 971 gfp_mask);
e6fa0be6 972}
1da177e4 973
018e0446 974extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
0b07de85 975
eb28d31b
MP
976enum blk_default_limits {
977 BLK_MAX_SEGMENTS = 128,
978 BLK_SAFE_MAX_SECTORS = 255,
979 BLK_DEF_MAX_SECTORS = 1024,
980 BLK_MAX_SEGMENT_SIZE = 65536,
981 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
982};
0e435ac2 983
1da177e4
LT
984#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
985
ae03bf63
MP
986static inline unsigned long queue_bounce_pfn(struct request_queue *q)
987{
025146e1 988 return q->limits.bounce_pfn;
ae03bf63
MP
989}
990
991static inline unsigned long queue_segment_boundary(struct request_queue *q)
992{
025146e1 993 return q->limits.seg_boundary_mask;
ae03bf63
MP
994}
995
996static inline unsigned int queue_max_sectors(struct request_queue *q)
997{
025146e1 998 return q->limits.max_sectors;
ae03bf63
MP
999}
1000
1001static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1002{
025146e1 1003 return q->limits.max_hw_sectors;
ae03bf63
MP
1004}
1005
8a78362c 1006static inline unsigned short queue_max_segments(struct request_queue *q)
ae03bf63 1007{
8a78362c 1008 return q->limits.max_segments;
ae03bf63
MP
1009}
1010
1011static inline unsigned int queue_max_segment_size(struct request_queue *q)
1012{
025146e1 1013 return q->limits.max_segment_size;
ae03bf63
MP
1014}
1015
e1defc4f 1016static inline unsigned short queue_logical_block_size(struct request_queue *q)
1da177e4
LT
1017{
1018 int retval = 512;
1019
025146e1
MP
1020 if (q && q->limits.logical_block_size)
1021 retval = q->limits.logical_block_size;
1da177e4
LT
1022
1023 return retval;
1024}
1025
e1defc4f 1026static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1da177e4 1027{
e1defc4f 1028 return queue_logical_block_size(bdev_get_queue(bdev));
1da177e4
LT
1029}
1030
c72758f3
MP
1031static inline unsigned int queue_physical_block_size(struct request_queue *q)
1032{
1033 return q->limits.physical_block_size;
1034}
1035
892b6f90 1036static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
ac481c20
MP
1037{
1038 return queue_physical_block_size(bdev_get_queue(bdev));
1039}
1040
c72758f3
MP
1041static inline unsigned int queue_io_min(struct request_queue *q)
1042{
1043 return q->limits.io_min;
1044}
1045
ac481c20
MP
1046static inline int bdev_io_min(struct block_device *bdev)
1047{
1048 return queue_io_min(bdev_get_queue(bdev));
1049}
1050
c72758f3
MP
1051static inline unsigned int queue_io_opt(struct request_queue *q)
1052{
1053 return q->limits.io_opt;
1054}
1055
ac481c20
MP
1056static inline int bdev_io_opt(struct block_device *bdev)
1057{
1058 return queue_io_opt(bdev_get_queue(bdev));
1059}
1060
c72758f3
MP
1061static inline int queue_alignment_offset(struct request_queue *q)
1062{
ac481c20 1063 if (q->limits.misaligned)
c72758f3
MP
1064 return -1;
1065
ac481c20 1066 return q->limits.alignment_offset;
c72758f3
MP
1067}
1068
e03a72e1 1069static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
81744ee4
MP
1070{
1071 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
e03a72e1 1072 unsigned int alignment = (sector << 9) & (granularity - 1);
81744ee4 1073
e03a72e1
MP
1074 return (granularity + lim->alignment_offset - alignment)
1075 & (granularity - 1);
c72758f3
MP
1076}
1077
ac481c20
MP
1078static inline int bdev_alignment_offset(struct block_device *bdev)
1079{
1080 struct request_queue *q = bdev_get_queue(bdev);
1081
1082 if (q->limits.misaligned)
1083 return -1;
1084
1085 if (bdev != bdev->bd_contains)
1086 return bdev->bd_part->alignment_offset;
1087
1088 return q->limits.alignment_offset;
1089}
1090
86b37281
MP
1091static inline int queue_discard_alignment(struct request_queue *q)
1092{
1093 if (q->limits.discard_misaligned)
1094 return -1;
1095
1096 return q->limits.discard_alignment;
1097}
1098
e03a72e1 1099static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
86b37281 1100{
dd3d145d
MP
1101 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1102
a934a00a
MP
1103 if (!lim->max_discard_sectors)
1104 return 0;
1105
dd3d145d
MP
1106 return (lim->discard_granularity + lim->discard_alignment - alignment)
1107 & (lim->discard_granularity - 1);
86b37281
MP
1108}
1109
98262f27
MP
1110static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1111{
a934a00a 1112 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
98262f27
MP
1113 return 1;
1114
1115 return 0;
1116}
1117
1118static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1119{
1120 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1121}
1122
165125e1 1123static inline int queue_dma_alignment(struct request_queue *q)
1da177e4 1124{
482eb689 1125 return q ? q->dma_alignment : 511;
1da177e4
LT
1126}
1127
14417799 1128static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
87904074
FT
1129 unsigned int len)
1130{
1131 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
14417799 1132 return !(addr & alignment) && !(len & alignment);
87904074
FT
1133}
1134
1da177e4
LT
1135/* assumes size > 256 */
1136static inline unsigned int blksize_bits(unsigned int size)
1137{
1138 unsigned int bits = 8;
1139 do {
1140 bits++;
1141 size >>= 1;
1142 } while (size > 256);
1143 return bits;
1144}
1145
2befb9e3 1146static inline unsigned int block_size(struct block_device *bdev)
1da177e4
LT
1147{
1148 return bdev->bd_block_size;
1149}
1150
f3876930 1151static inline bool queue_flush_queueable(struct request_queue *q)
1152{
1153 return !q->flush_not_queueable;
1154}
1155
1da177e4
LT
1156typedef struct {struct page *v;} Sector;
1157
1158unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1159
1160static inline void put_dev_sector(Sector p)
1161{
1162 page_cache_release(p.v);
1163}
1164
1165struct work_struct;
18887ad9 1166int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1da177e4 1167
9195291e 1168#ifdef CONFIG_BLK_CGROUP
28f4197e
JA
1169/*
1170 * This should not be using sched_clock(). A real patch is in progress
1171 * to fix this up, until that is in place we need to disable preemption
1172 * around sched_clock() in this function and set_io_start_time_ns().
1173 */
9195291e
DS
1174static inline void set_start_time_ns(struct request *req)
1175{
28f4197e 1176 preempt_disable();
9195291e 1177 req->start_time_ns = sched_clock();
28f4197e 1178 preempt_enable();
9195291e
DS
1179}
1180
1181static inline void set_io_start_time_ns(struct request *req)
1182{
28f4197e 1183 preempt_disable();
9195291e 1184 req->io_start_time_ns = sched_clock();
28f4197e 1185 preempt_enable();
9195291e 1186}
84c124da
DS
1187
1188static inline uint64_t rq_start_time_ns(struct request *req)
1189{
1190 return req->start_time_ns;
1191}
1192
1193static inline uint64_t rq_io_start_time_ns(struct request *req)
1194{
1195 return req->io_start_time_ns;
1196}
9195291e
DS
1197#else
1198static inline void set_start_time_ns(struct request *req) {}
1199static inline void set_io_start_time_ns(struct request *req) {}
84c124da
DS
1200static inline uint64_t rq_start_time_ns(struct request *req)
1201{
1202 return 0;
1203}
1204static inline uint64_t rq_io_start_time_ns(struct request *req)
1205{
1206 return 0;
1207}
9195291e
DS
1208#endif
1209
1da177e4
LT
1210#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1211 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1212#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1213 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1214
7ba1ba12
MP
1215#if defined(CONFIG_BLK_DEV_INTEGRITY)
1216
b24498d4
JA
1217#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */
1218#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */
7ba1ba12
MP
1219
1220struct blk_integrity_exchg {
1221 void *prot_buf;
1222 void *data_buf;
1223 sector_t sector;
1224 unsigned int data_size;
1225 unsigned short sector_size;
1226 const char *disk_name;
1227};
1228
1229typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
1230typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
1231typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
1232typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
1233
1234struct blk_integrity {
1235 integrity_gen_fn *generate_fn;
1236 integrity_vrfy_fn *verify_fn;
1237 integrity_set_tag_fn *set_tag_fn;
1238 integrity_get_tag_fn *get_tag_fn;
1239
1240 unsigned short flags;
1241 unsigned short tuple_size;
1242 unsigned short sector_size;
1243 unsigned short tag_size;
1244
1245 const char *name;
1246
1247 struct kobject kobj;
1248};
1249
a63a5cf8 1250extern bool blk_integrity_is_initialized(struct gendisk *);
7ba1ba12
MP
1251extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1252extern void blk_integrity_unregister(struct gendisk *);
ad7fce93 1253extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
13f05c8d
MP
1254extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1255 struct scatterlist *);
1256extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1257extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
1258 struct request *);
1259extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
1260 struct bio *);
7ba1ba12 1261
b04accc4
JA
1262static inline
1263struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1264{
1265 return bdev->bd_disk->integrity;
1266}
1267
b02739b0
MP
1268static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1269{
1270 return disk->integrity;
1271}
1272
7ba1ba12
MP
1273static inline int blk_integrity_rq(struct request *rq)
1274{
d442cc44
MP
1275 if (rq->bio == NULL)
1276 return 0;
1277
7ba1ba12
MP
1278 return bio_integrity(rq->bio);
1279}
1280
13f05c8d
MP
1281static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1282 unsigned int segs)
1283{
1284 q->limits.max_integrity_segments = segs;
1285}
1286
1287static inline unsigned short
1288queue_max_integrity_segments(struct request_queue *q)
1289{
1290 return q->limits.max_integrity_segments;
1291}
1292
7ba1ba12
MP
1293#else /* CONFIG_BLK_DEV_INTEGRITY */
1294
fd83240a
SR
1295struct bio;
1296struct block_device;
1297struct gendisk;
1298struct blk_integrity;
1299
1300static inline int blk_integrity_rq(struct request *rq)
1301{
1302 return 0;
1303}
1304static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1305 struct bio *b)
1306{
1307 return 0;
1308}
1309static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1310 struct bio *b,
1311 struct scatterlist *s)
1312{
1313 return 0;
1314}
1315static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1316{
1317 return 0;
1318}
1319static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1320{
1321 return NULL;
1322}
1323static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1324{
1325 return 0;
1326}
1327static inline int blk_integrity_register(struct gendisk *d,
1328 struct blk_integrity *b)
1329{
1330 return 0;
1331}
1332static inline void blk_integrity_unregister(struct gendisk *d)
1333{
1334}
1335static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1336 unsigned int segs)
1337{
1338}
1339static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1340{
1341 return 0;
1342}
1343static inline int blk_integrity_merge_rq(struct request_queue *rq,
1344 struct request *r1,
1345 struct request *r2)
1346{
1347 return 0;
1348}
1349static inline int blk_integrity_merge_bio(struct request_queue *rq,
1350 struct request *r,
1351 struct bio *b)
1352{
1353 return 0;
1354}
1355static inline bool blk_integrity_is_initialized(struct gendisk *g)
1356{
1357 return 0;
1358}
7ba1ba12
MP
1359
1360#endif /* CONFIG_BLK_DEV_INTEGRITY */
1361
08f85851 1362struct block_device_operations {
d4430d62
AV
1363 int (*open) (struct block_device *, fmode_t);
1364 int (*release) (struct gendisk *, fmode_t);
d4430d62
AV
1365 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1366 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
08f85851
AV
1367 int (*direct_access) (struct block_device *, sector_t,
1368 void **, unsigned long *);
77ea887e
TH
1369 unsigned int (*check_events) (struct gendisk *disk,
1370 unsigned int clearing);
1371 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
08f85851 1372 int (*media_changed) (struct gendisk *);
c3e33e04 1373 void (*unlock_native_capacity) (struct gendisk *);
08f85851
AV
1374 int (*revalidate_disk) (struct gendisk *);
1375 int (*getgeo)(struct block_device *, struct hd_geometry *);
b3a27d05
NG
1376 /* this callback is with swap_lock and sometimes page table lock held */
1377 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
08f85851
AV
1378 struct module *owner;
1379};
1380
633a08b8
AV
1381extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1382 unsigned long);
9361401e
DH
1383#else /* CONFIG_BLOCK */
1384/*
1385 * stubs for when the block layer is configured out
1386 */
1387#define buffer_heads_over_limit 0
1388
9361401e
DH
1389static inline long nr_blockdev_pages(void)
1390{
1391 return 0;
1392}
1393
1f940bdf
JA
1394struct blk_plug {
1395};
1396
1397static inline void blk_start_plug(struct blk_plug *plug)
73c10101
JA
1398{
1399}
1400
1f940bdf 1401static inline void blk_finish_plug(struct blk_plug *plug)
73c10101
JA
1402{
1403}
1404
1f940bdf 1405static inline void blk_flush_plug(struct task_struct *task)
73c10101
JA
1406{
1407}
1408
a237c1c5
JA
1409static inline void blk_schedule_flush_plug(struct task_struct *task)
1410{
1411}
1412
1413
73c10101
JA
1414static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1415{
1416 return false;
1417}
1418
9361401e
DH
1419#endif /* CONFIG_BLOCK */
1420
1da177e4 1421#endif