1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/blk-mq.h>
10 /* Max future timer expiry for timeouts */
11 #define BLK_MAX_TIMEOUT (5 * HZ)
13 #ifdef CONFIG_DEBUG_FS
14 extern struct dentry
*blk_debugfs_root
;
17 struct blk_flush_queue
{
18 unsigned int flush_queue_delayed
:1;
19 unsigned int flush_pending_idx
:1;
20 unsigned int flush_running_idx
:1;
21 unsigned long flush_pending_since
;
22 struct list_head flush_queue
[2];
23 struct list_head flush_data_in_flight
;
24 struct request
*flush_rq
;
27 * flush_rq shares tag with this rq, both can't be active
30 struct request
*orig_rq
;
31 spinlock_t mq_flush_lock
;
34 extern struct kmem_cache
*blk_requestq_cachep
;
35 extern struct kobj_type blk_queue_ktype
;
36 extern struct ida blk_queue_ida
;
39 * @q->queue_lock is set while a queue is being initialized. Since we know
40 * that no other threads access the queue object before @q->queue_lock has
41 * been set, it is safe to manipulate queue flags without holding the
42 * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
43 * blk_init_allocated_queue().
45 static inline void queue_lockdep_assert_held(struct request_queue
*q
)
48 lockdep_assert_held(q
->queue_lock
);
51 static inline void queue_flag_set_unlocked(unsigned int flag
,
52 struct request_queue
*q
)
54 if (test_bit(QUEUE_FLAG_INIT_DONE
, &q
->queue_flags
) &&
55 kref_read(&q
->kobj
.kref
))
56 lockdep_assert_held(q
->queue_lock
);
57 __set_bit(flag
, &q
->queue_flags
);
60 static inline void queue_flag_clear_unlocked(unsigned int flag
,
61 struct request_queue
*q
)
63 if (test_bit(QUEUE_FLAG_INIT_DONE
, &q
->queue_flags
) &&
64 kref_read(&q
->kobj
.kref
))
65 lockdep_assert_held(q
->queue_lock
);
66 __clear_bit(flag
, &q
->queue_flags
);
69 static inline int queue_flag_test_and_clear(unsigned int flag
,
70 struct request_queue
*q
)
72 queue_lockdep_assert_held(q
);
74 if (test_bit(flag
, &q
->queue_flags
)) {
75 __clear_bit(flag
, &q
->queue_flags
);
82 static inline int queue_flag_test_and_set(unsigned int flag
,
83 struct request_queue
*q
)
85 queue_lockdep_assert_held(q
);
87 if (!test_bit(flag
, &q
->queue_flags
)) {
88 __set_bit(flag
, &q
->queue_flags
);
95 static inline void queue_flag_set(unsigned int flag
, struct request_queue
*q
)
97 queue_lockdep_assert_held(q
);
98 __set_bit(flag
, &q
->queue_flags
);
101 static inline void queue_flag_clear(unsigned int flag
, struct request_queue
*q
)
103 queue_lockdep_assert_held(q
);
104 __clear_bit(flag
, &q
->queue_flags
);
107 static inline struct blk_flush_queue
*
108 blk_get_flush_queue(struct request_queue
*q
, struct blk_mq_ctx
*ctx
)
110 return blk_mq_map_queue(q
, REQ_OP_FLUSH
, ctx
->cpu
)->fq
;
113 static inline void __blk_get_queue(struct request_queue
*q
)
115 kobject_get(&q
->kobj
);
118 struct blk_flush_queue
*blk_alloc_flush_queue(struct request_queue
*q
,
119 int node
, int cmd_size
, gfp_t flags
);
120 void blk_free_flush_queue(struct blk_flush_queue
*q
);
122 void blk_exit_queue(struct request_queue
*q
);
123 void blk_rq_bio_prep(struct request_queue
*q
, struct request
*rq
,
125 void blk_freeze_queue(struct request_queue
*q
);
127 static inline void blk_queue_enter_live(struct request_queue
*q
)
130 * Given that running in generic_make_request() context
131 * guarantees that a live reference against q_usage_counter has
132 * been established, further references under that same context
133 * need not check that the queue has been frozen (marked dead).
135 percpu_ref_get(&q
->q_usage_counter
);
138 static inline bool biovec_phys_mergeable(struct request_queue
*q
,
139 struct bio_vec
*vec1
, struct bio_vec
*vec2
)
141 unsigned long mask
= queue_segment_boundary(q
);
142 phys_addr_t addr1
= page_to_phys(vec1
->bv_page
) + vec1
->bv_offset
;
143 phys_addr_t addr2
= page_to_phys(vec2
->bv_page
) + vec2
->bv_offset
;
145 if (addr1
+ vec1
->bv_len
!= addr2
)
147 if (xen_domain() && !xen_biovec_phys_mergeable(vec1
, vec2
))
149 if ((addr1
| mask
) != ((addr2
+ vec2
->bv_len
- 1) | mask
))
154 static inline bool __bvec_gap_to_prev(struct request_queue
*q
,
155 struct bio_vec
*bprv
, unsigned int offset
)
158 ((bprv
->bv_offset
+ bprv
->bv_len
) & queue_virt_boundary(q
));
162 * Check if adding a bio_vec after bprv with offset would create a gap in
163 * the SG list. Most drivers don't care about this, but some do.
165 static inline bool bvec_gap_to_prev(struct request_queue
*q
,
166 struct bio_vec
*bprv
, unsigned int offset
)
168 if (!queue_virt_boundary(q
))
170 return __bvec_gap_to_prev(q
, bprv
, offset
);
173 #ifdef CONFIG_BLK_DEV_INTEGRITY
174 void blk_flush_integrity(void);
175 bool __bio_integrity_endio(struct bio
*);
176 static inline bool bio_integrity_endio(struct bio
*bio
)
178 if (bio_integrity(bio
))
179 return __bio_integrity_endio(bio
);
183 static inline bool integrity_req_gap_back_merge(struct request
*req
,
186 struct bio_integrity_payload
*bip
= bio_integrity(req
->bio
);
187 struct bio_integrity_payload
*bip_next
= bio_integrity(next
);
189 return bvec_gap_to_prev(req
->q
, &bip
->bip_vec
[bip
->bip_vcnt
- 1],
190 bip_next
->bip_vec
[0].bv_offset
);
193 static inline bool integrity_req_gap_front_merge(struct request
*req
,
196 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
197 struct bio_integrity_payload
*bip_next
= bio_integrity(req
->bio
);
199 return bvec_gap_to_prev(req
->q
, &bip
->bip_vec
[bip
->bip_vcnt
- 1],
200 bip_next
->bip_vec
[0].bv_offset
);
202 #else /* CONFIG_BLK_DEV_INTEGRITY */
203 static inline bool integrity_req_gap_back_merge(struct request
*req
,
208 static inline bool integrity_req_gap_front_merge(struct request
*req
,
214 static inline void blk_flush_integrity(void)
217 static inline bool bio_integrity_endio(struct bio
*bio
)
221 #endif /* CONFIG_BLK_DEV_INTEGRITY */
223 unsigned long blk_rq_timeout(unsigned long timeout
);
224 void blk_add_timer(struct request
*req
);
226 bool bio_attempt_front_merge(struct request_queue
*q
, struct request
*req
,
228 bool bio_attempt_back_merge(struct request_queue
*q
, struct request
*req
,
230 bool bio_attempt_discard_merge(struct request_queue
*q
, struct request
*req
,
232 bool blk_attempt_plug_merge(struct request_queue
*q
, struct bio
*bio
,
233 unsigned int *request_count
,
234 struct request
**same_queue_rq
);
235 unsigned int blk_plug_queued_count(struct request_queue
*q
);
237 void blk_account_io_start(struct request
*req
, bool new_io
);
238 void blk_account_io_completion(struct request
*req
, unsigned int bytes
);
239 void blk_account_io_done(struct request
*req
, u64 now
);
242 * EH timer and IO completion will both attempt to 'grab' the request, make
243 * sure that only one of them succeeds. Steal the bottom bit of the
244 * __deadline field for this.
246 static inline int blk_mark_rq_complete(struct request
*rq
)
248 return test_and_set_bit(0, &rq
->__deadline
);
251 static inline void blk_clear_rq_complete(struct request
*rq
)
253 clear_bit(0, &rq
->__deadline
);
256 static inline bool blk_rq_is_complete(struct request
*rq
)
258 return test_bit(0, &rq
->__deadline
);
262 * Internal elevator interface
264 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
266 void blk_insert_flush(struct request
*rq
);
268 int elevator_init_mq(struct request_queue
*q
);
269 int elevator_switch_mq(struct request_queue
*q
,
270 struct elevator_type
*new_e
);
271 void elevator_exit(struct request_queue
*, struct elevator_queue
*);
272 int elv_register_queue(struct request_queue
*q
);
273 void elv_unregister_queue(struct request_queue
*q
);
275 struct hd_struct
*__disk_get_part(struct gendisk
*disk
, int partno
);
277 #ifdef CONFIG_FAIL_IO_TIMEOUT
278 int blk_should_fake_timeout(struct request_queue
*);
279 ssize_t
part_timeout_show(struct device
*, struct device_attribute
*, char *);
280 ssize_t
part_timeout_store(struct device
*, struct device_attribute
*,
281 const char *, size_t);
283 static inline int blk_should_fake_timeout(struct request_queue
*q
)
289 int ll_back_merge_fn(struct request_queue
*q
, struct request
*req
,
291 int ll_front_merge_fn(struct request_queue
*q
, struct request
*req
,
293 struct request
*attempt_back_merge(struct request_queue
*q
, struct request
*rq
);
294 struct request
*attempt_front_merge(struct request_queue
*q
, struct request
*rq
);
295 int blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
296 struct request
*next
);
297 void blk_recalc_rq_segments(struct request
*rq
);
298 void blk_rq_set_mixed_merge(struct request
*rq
);
299 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
);
300 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
);
302 int blk_dev_init(void);
305 * Contribute to IO statistics IFF:
307 * a) it's attached to a gendisk, and
308 * b) the queue had IO stats enabled when this request was started, and
309 * c) it's a file system request
311 static inline bool blk_do_io_stat(struct request
*rq
)
313 return rq
->rq_disk
&&
314 (rq
->rq_flags
& RQF_IO_STAT
) &&
315 !blk_rq_is_passthrough(rq
);
318 static inline void req_set_nomerge(struct request_queue
*q
, struct request
*req
)
320 req
->cmd_flags
|= REQ_NOMERGE
;
321 if (req
== q
->last_merge
)
322 q
->last_merge
= NULL
;
326 * Steal a bit from this field for legacy IO path atomic IO marking. Note that
327 * setting the deadline clears the bottom bit, potentially clearing the
328 * completed bit. The user has to be OK with this (current ones are fine).
330 static inline void blk_rq_set_deadline(struct request
*rq
, unsigned long time
)
332 rq
->__deadline
= time
& ~0x1UL
;
335 static inline unsigned long blk_rq_deadline(struct request
*rq
)
337 return rq
->__deadline
& ~0x1UL
;
341 * Internal io_context interface
343 void get_io_context(struct io_context
*ioc
);
344 struct io_cq
*ioc_lookup_icq(struct io_context
*ioc
, struct request_queue
*q
);
345 struct io_cq
*ioc_create_icq(struct io_context
*ioc
, struct request_queue
*q
,
347 void ioc_clear_queue(struct request_queue
*q
);
349 int create_task_io_context(struct task_struct
*task
, gfp_t gfp_mask
, int node
);
352 * rq_ioc - determine io_context for request allocation
353 * @bio: request being allocated is for this bio (can be %NULL)
355 * Determine io_context to use for request allocation for @bio. May return
356 * %NULL if %current->io_context doesn't exist.
358 static inline struct io_context
*rq_ioc(struct bio
*bio
)
360 #ifdef CONFIG_BLK_CGROUP
361 if (bio
&& bio
->bi_ioc
)
364 return current
->io_context
;
368 * create_io_context - try to create task->io_context
369 * @gfp_mask: allocation mask
370 * @node: allocation node
372 * If %current->io_context is %NULL, allocate a new io_context and install
373 * it. Returns the current %current->io_context which may be %NULL if
376 * Note that this function can't be called with IRQ disabled because
377 * task_lock which protects %current->io_context is IRQ-unsafe.
379 static inline struct io_context
*create_io_context(gfp_t gfp_mask
, int node
)
381 WARN_ON_ONCE(irqs_disabled());
382 if (unlikely(!current
->io_context
))
383 create_task_io_context(current
, gfp_mask
, node
);
384 return current
->io_context
;
388 * Internal throttling interface
390 #ifdef CONFIG_BLK_DEV_THROTTLING
391 extern void blk_throtl_drain(struct request_queue
*q
);
392 extern int blk_throtl_init(struct request_queue
*q
);
393 extern void blk_throtl_exit(struct request_queue
*q
);
394 extern void blk_throtl_register_queue(struct request_queue
*q
);
395 #else /* CONFIG_BLK_DEV_THROTTLING */
396 static inline void blk_throtl_drain(struct request_queue
*q
) { }
397 static inline int blk_throtl_init(struct request_queue
*q
) { return 0; }
398 static inline void blk_throtl_exit(struct request_queue
*q
) { }
399 static inline void blk_throtl_register_queue(struct request_queue
*q
) { }
400 #endif /* CONFIG_BLK_DEV_THROTTLING */
401 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
402 extern ssize_t
blk_throtl_sample_time_show(struct request_queue
*q
, char *page
);
403 extern ssize_t
blk_throtl_sample_time_store(struct request_queue
*q
,
404 const char *page
, size_t count
);
405 extern void blk_throtl_bio_endio(struct bio
*bio
);
406 extern void blk_throtl_stat_add(struct request
*rq
, u64 time
);
408 static inline void blk_throtl_bio_endio(struct bio
*bio
) { }
409 static inline void blk_throtl_stat_add(struct request
*rq
, u64 time
) { }
413 extern int init_emergency_isa_pool(void);
414 extern void blk_queue_bounce(struct request_queue
*q
, struct bio
**bio
);
416 static inline int init_emergency_isa_pool(void)
420 static inline void blk_queue_bounce(struct request_queue
*q
, struct bio
**bio
)
423 #endif /* CONFIG_BOUNCE */
425 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
426 extern int blk_iolatency_init(struct request_queue
*q
);
428 static inline int blk_iolatency_init(struct request_queue
*q
) { return 0; }
431 struct bio
*blk_next_bio(struct bio
*bio
, unsigned int nr_pages
, gfp_t gfp
);
433 #ifdef CONFIG_BLK_DEV_ZONED
434 void blk_queue_free_zone_bitmaps(struct request_queue
*q
);
436 static inline void blk_queue_free_zone_bitmaps(struct request_queue
*q
) {}
439 #endif /* BLK_INTERNAL_H */