]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef BLK_INTERNAL_H | |
3 | #define BLK_INTERNAL_H | |
4 | ||
5 | #include <linux/idr.h> | |
6 | #include <linux/blk-mq.h> | |
7 | #include "blk-mq.h" | |
8 | ||
9 | /* Amount of time in which a process may batch requests */ | |
10 | #define BLK_BATCH_TIME (HZ/50UL) | |
11 | ||
12 | /* Number of requests a "batching" process may submit */ | |
13 | #define BLK_BATCH_REQ 32 | |
14 | ||
15 | /* Max future timer expiry for timeouts */ | |
16 | #define BLK_MAX_TIMEOUT (5 * HZ) | |
17 | ||
18 | #ifdef CONFIG_DEBUG_FS | |
19 | extern struct dentry *blk_debugfs_root; | |
20 | #endif | |
21 | ||
22 | struct blk_flush_queue { | |
23 | unsigned int flush_queue_delayed:1; | |
24 | unsigned int flush_pending_idx:1; | |
25 | unsigned int flush_running_idx:1; | |
26 | unsigned long flush_pending_since; | |
27 | struct list_head flush_queue[2]; | |
28 | struct list_head flush_data_in_flight; | |
29 | struct request *flush_rq; | |
30 | ||
31 | /* | |
32 | * flush_rq shares tag with this rq, both can't be active | |
33 | * at the same time | |
34 | */ | |
35 | struct request *orig_rq; | |
36 | spinlock_t mq_flush_lock; | |
37 | }; | |
38 | ||
39 | extern struct kmem_cache *blk_requestq_cachep; | |
40 | extern struct kmem_cache *request_cachep; | |
41 | extern struct kobj_type blk_queue_ktype; | |
42 | extern struct ida blk_queue_ida; | |
43 | ||
44 | static inline struct blk_flush_queue *blk_get_flush_queue( | |
45 | struct request_queue *q, struct blk_mq_ctx *ctx) | |
46 | { | |
47 | if (q->mq_ops) | |
48 | return blk_mq_map_queue(q, ctx->cpu)->fq; | |
49 | return q->fq; | |
50 | } | |
51 | ||
52 | static inline void __blk_get_queue(struct request_queue *q) | |
53 | { | |
54 | kobject_get(&q->kobj); | |
55 | } | |
56 | ||
57 | struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, | |
58 | int node, int cmd_size); | |
59 | void blk_free_flush_queue(struct blk_flush_queue *q); | |
60 | ||
61 | int blk_init_rl(struct request_list *rl, struct request_queue *q, | |
62 | gfp_t gfp_mask); | |
63 | void blk_exit_rl(struct request_queue *q, struct request_list *rl); | |
64 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |
65 | struct bio *bio); | |
66 | void blk_queue_bypass_start(struct request_queue *q); | |
67 | void blk_queue_bypass_end(struct request_queue *q); | |
68 | void __blk_queue_free_tags(struct request_queue *q); | |
69 | void blk_freeze_queue(struct request_queue *q); | |
70 | ||
71 | static inline void blk_queue_enter_live(struct request_queue *q) | |
72 | { | |
73 | /* | |
74 | * Given that running in generic_make_request() context | |
75 | * guarantees that a live reference against q_usage_counter has | |
76 | * been established, further references under that same context | |
77 | * need not check that the queue has been frozen (marked dead). | |
78 | */ | |
79 | percpu_ref_get(&q->q_usage_counter); | |
80 | } | |
81 | ||
82 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
83 | void blk_flush_integrity(void); | |
84 | bool __bio_integrity_endio(struct bio *); | |
85 | static inline bool bio_integrity_endio(struct bio *bio) | |
86 | { | |
87 | if (bio_integrity(bio)) | |
88 | return __bio_integrity_endio(bio); | |
89 | return true; | |
90 | } | |
91 | #else | |
92 | static inline void blk_flush_integrity(void) | |
93 | { | |
94 | } | |
95 | static inline bool bio_integrity_endio(struct bio *bio) | |
96 | { | |
97 | return true; | |
98 | } | |
99 | #endif | |
100 | ||
101 | void blk_timeout_work(struct work_struct *work); | |
102 | unsigned long blk_rq_timeout(unsigned long timeout); | |
103 | void blk_add_timer(struct request *req); | |
104 | void blk_delete_timer(struct request *); | |
105 | ||
106 | ||
107 | bool bio_attempt_front_merge(struct request_queue *q, struct request *req, | |
108 | struct bio *bio); | |
109 | bool bio_attempt_back_merge(struct request_queue *q, struct request *req, | |
110 | struct bio *bio); | |
111 | bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, | |
112 | struct bio *bio); | |
113 | bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, | |
114 | unsigned int *request_count, | |
115 | struct request **same_queue_rq); | |
116 | unsigned int blk_plug_queued_count(struct request_queue *q); | |
117 | ||
118 | void blk_account_io_start(struct request *req, bool new_io); | |
119 | void blk_account_io_completion(struct request *req, unsigned int bytes); | |
120 | void blk_account_io_done(struct request *req); | |
121 | ||
122 | /* | |
123 | * Internal atomic flags for request handling | |
124 | */ | |
125 | enum rq_atomic_flags { | |
126 | /* | |
127 | * Keep these two bits first - not because we depend on the | |
128 | * value of them, but we do depend on them being in the same | |
129 | * byte of storage to ensure ordering on writes. Keeping them | |
130 | * first will achieve that nicely. | |
131 | */ | |
132 | REQ_ATOM_COMPLETE = 0, | |
133 | REQ_ATOM_STARTED, | |
134 | ||
135 | REQ_ATOM_POLL_SLEPT, | |
136 | }; | |
137 | ||
138 | /* | |
139 | * EH timer and IO completion will both attempt to 'grab' the request, make | |
140 | * sure that only one of them succeeds | |
141 | */ | |
142 | static inline int blk_mark_rq_complete(struct request *rq) | |
143 | { | |
144 | return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | |
145 | } | |
146 | ||
147 | static inline void blk_clear_rq_complete(struct request *rq) | |
148 | { | |
149 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | |
150 | } | |
151 | ||
152 | /* | |
153 | * Internal elevator interface | |
154 | */ | |
155 | #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) | |
156 | ||
157 | void blk_insert_flush(struct request *rq); | |
158 | ||
159 | static inline void elv_activate_rq(struct request_queue *q, struct request *rq) | |
160 | { | |
161 | struct elevator_queue *e = q->elevator; | |
162 | ||
163 | if (e->type->ops.sq.elevator_activate_req_fn) | |
164 | e->type->ops.sq.elevator_activate_req_fn(q, rq); | |
165 | } | |
166 | ||
167 | static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) | |
168 | { | |
169 | struct elevator_queue *e = q->elevator; | |
170 | ||
171 | if (e->type->ops.sq.elevator_deactivate_req_fn) | |
172 | e->type->ops.sq.elevator_deactivate_req_fn(q, rq); | |
173 | } | |
174 | ||
175 | struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); | |
176 | ||
177 | #ifdef CONFIG_FAIL_IO_TIMEOUT | |
178 | int blk_should_fake_timeout(struct request_queue *); | |
179 | ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); | |
180 | ssize_t part_timeout_store(struct device *, struct device_attribute *, | |
181 | const char *, size_t); | |
182 | #else | |
183 | static inline int blk_should_fake_timeout(struct request_queue *q) | |
184 | { | |
185 | return 0; | |
186 | } | |
187 | #endif | |
188 | ||
189 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
190 | struct bio *bio); | |
191 | int ll_front_merge_fn(struct request_queue *q, struct request *req, | |
192 | struct bio *bio); | |
193 | struct request *attempt_back_merge(struct request_queue *q, struct request *rq); | |
194 | struct request *attempt_front_merge(struct request_queue *q, struct request *rq); | |
195 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
196 | struct request *next); | |
197 | void blk_recalc_rq_segments(struct request *rq); | |
198 | void blk_rq_set_mixed_merge(struct request *rq); | |
199 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio); | |
200 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); | |
201 | ||
202 | void blk_queue_congestion_threshold(struct request_queue *q); | |
203 | ||
204 | int blk_dev_init(void); | |
205 | ||
206 | ||
207 | /* | |
208 | * Return the threshold (number of used requests) at which the queue is | |
209 | * considered to be congested. It include a little hysteresis to keep the | |
210 | * context switch rate down. | |
211 | */ | |
212 | static inline int queue_congestion_on_threshold(struct request_queue *q) | |
213 | { | |
214 | return q->nr_congestion_on; | |
215 | } | |
216 | ||
217 | /* | |
218 | * The threshold at which a queue is considered to be uncongested | |
219 | */ | |
220 | static inline int queue_congestion_off_threshold(struct request_queue *q) | |
221 | { | |
222 | return q->nr_congestion_off; | |
223 | } | |
224 | ||
225 | extern int blk_update_nr_requests(struct request_queue *, unsigned int); | |
226 | ||
227 | /* | |
228 | * Contribute to IO statistics IFF: | |
229 | * | |
230 | * a) it's attached to a gendisk, and | |
231 | * b) the queue had IO stats enabled when this request was started, and | |
232 | * c) it's a file system request | |
233 | */ | |
234 | static inline int blk_do_io_stat(struct request *rq) | |
235 | { | |
236 | return rq->rq_disk && | |
237 | (rq->rq_flags & RQF_IO_STAT) && | |
238 | !blk_rq_is_passthrough(rq); | |
239 | } | |
240 | ||
241 | static inline void req_set_nomerge(struct request_queue *q, struct request *req) | |
242 | { | |
243 | req->cmd_flags |= REQ_NOMERGE; | |
244 | if (req == q->last_merge) | |
245 | q->last_merge = NULL; | |
246 | } | |
247 | ||
248 | /* | |
249 | * Internal io_context interface | |
250 | */ | |
251 | void get_io_context(struct io_context *ioc); | |
252 | struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); | |
253 | struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, | |
254 | gfp_t gfp_mask); | |
255 | void ioc_clear_queue(struct request_queue *q); | |
256 | ||
257 | int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); | |
258 | ||
259 | /** | |
260 | * rq_ioc - determine io_context for request allocation | |
261 | * @bio: request being allocated is for this bio (can be %NULL) | |
262 | * | |
263 | * Determine io_context to use for request allocation for @bio. May return | |
264 | * %NULL if %current->io_context doesn't exist. | |
265 | */ | |
266 | static inline struct io_context *rq_ioc(struct bio *bio) | |
267 | { | |
268 | #ifdef CONFIG_BLK_CGROUP | |
269 | if (bio && bio->bi_ioc) | |
270 | return bio->bi_ioc; | |
271 | #endif | |
272 | return current->io_context; | |
273 | } | |
274 | ||
275 | /** | |
276 | * create_io_context - try to create task->io_context | |
277 | * @gfp_mask: allocation mask | |
278 | * @node: allocation node | |
279 | * | |
280 | * If %current->io_context is %NULL, allocate a new io_context and install | |
281 | * it. Returns the current %current->io_context which may be %NULL if | |
282 | * allocation failed. | |
283 | * | |
284 | * Note that this function can't be called with IRQ disabled because | |
285 | * task_lock which protects %current->io_context is IRQ-unsafe. | |
286 | */ | |
287 | static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) | |
288 | { | |
289 | WARN_ON_ONCE(irqs_disabled()); | |
290 | if (unlikely(!current->io_context)) | |
291 | create_task_io_context(current, gfp_mask, node); | |
292 | return current->io_context; | |
293 | } | |
294 | ||
295 | /* | |
296 | * Internal throttling interface | |
297 | */ | |
298 | #ifdef CONFIG_BLK_DEV_THROTTLING | |
299 | extern void blk_throtl_drain(struct request_queue *q); | |
300 | extern int blk_throtl_init(struct request_queue *q); | |
301 | extern void blk_throtl_exit(struct request_queue *q); | |
302 | extern void blk_throtl_register_queue(struct request_queue *q); | |
303 | #else /* CONFIG_BLK_DEV_THROTTLING */ | |
304 | static inline void blk_throtl_drain(struct request_queue *q) { } | |
305 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | |
306 | static inline void blk_throtl_exit(struct request_queue *q) { } | |
307 | static inline void blk_throtl_register_queue(struct request_queue *q) { } | |
308 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | |
309 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW | |
310 | extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); | |
311 | extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, | |
312 | const char *page, size_t count); | |
313 | extern void blk_throtl_bio_endio(struct bio *bio); | |
314 | extern void blk_throtl_stat_add(struct request *rq, u64 time); | |
315 | #else | |
316 | static inline void blk_throtl_bio_endio(struct bio *bio) { } | |
317 | static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } | |
318 | #endif | |
319 | ||
320 | #ifdef CONFIG_BOUNCE | |
321 | extern int init_emergency_isa_pool(void); | |
322 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); | |
323 | #else | |
324 | static inline int init_emergency_isa_pool(void) | |
325 | { | |
326 | return 0; | |
327 | } | |
328 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | |
329 | { | |
330 | } | |
331 | #endif /* CONFIG_BOUNCE */ | |
332 | ||
333 | extern void blk_drain_queue(struct request_queue *q); | |
334 | ||
335 | #endif /* BLK_INTERNAL_H */ |