]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
945ffb60 JA |
2 | /* |
3 | * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, | |
4 | * for the blk-mq scheduling framework | |
5 | * | |
6 | * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/blkdev.h> | |
11 | #include <linux/blk-mq.h> | |
12 | #include <linux/elevator.h> | |
13 | #include <linux/bio.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/compiler.h> | |
18 | #include <linux/rbtree.h> | |
19 | #include <linux/sbitmap.h> | |
20 | ||
b357e4a6 CK |
21 | #include <trace/events/block.h> |
22 | ||
945ffb60 JA |
23 | #include "blk.h" |
24 | #include "blk-mq.h" | |
daaadb3e | 25 | #include "blk-mq-debugfs.h" |
945ffb60 JA |
26 | #include "blk-mq-tag.h" |
27 | #include "blk-mq-sched.h" | |
28 | ||
29 | /* | |
898bd37a | 30 | * See Documentation/block/deadline-iosched.rst |
945ffb60 JA |
31 | */ |
32 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ | |
33 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | |
34 | static const int writes_starved = 2; /* max times reads can starve a write */ | |
35 | static const int fifo_batch = 16; /* # of sequential requests treated as one | |
36 | by the above parameters. For throughput. */ | |
37 | ||
004a26b3 BVA |
38 | enum dd_data_dir { |
39 | DD_READ = READ, | |
40 | DD_WRITE = WRITE, | |
41 | }; | |
42 | ||
43 | enum { DD_DIR_COUNT = 2 }; | |
44 | ||
c807ab52 BVA |
45 | enum dd_prio { |
46 | DD_RT_PRIO = 0, | |
47 | DD_BE_PRIO = 1, | |
48 | DD_IDLE_PRIO = 2, | |
49 | DD_PRIO_MAX = 2, | |
50 | }; | |
51 | ||
52 | enum { DD_PRIO_COUNT = 3 }; | |
53 | ||
0f783995 TH |
54 | /* I/O statistics per I/O priority. */ |
55 | struct io_stats_per_prio { | |
56 | local_t inserted; | |
57 | local_t merged; | |
58 | local_t dispatched; | |
59 | local_t completed; | |
60 | }; | |
61 | ||
38ba64d1 BVA |
62 | /* I/O statistics for all I/O priorities (enum dd_prio). */ |
63 | struct io_stats { | |
64 | struct io_stats_per_prio stats[DD_PRIO_COUNT]; | |
65 | }; | |
66 | ||
c807ab52 BVA |
67 | /* |
68 | * Deadline scheduler data per I/O priority (enum dd_prio). Requests are | |
69 | * present on both sort_list[] and fifo_list[]. | |
70 | */ | |
71 | struct dd_per_prio { | |
72 | struct list_head dispatch; | |
73 | struct rb_root sort_list[DD_DIR_COUNT]; | |
74 | struct list_head fifo_list[DD_DIR_COUNT]; | |
75 | /* Next request in FIFO order. Read, write or both are NULL. */ | |
76 | struct request *next_rq[DD_DIR_COUNT]; | |
77 | }; | |
78 | ||
945ffb60 JA |
79 | struct deadline_data { |
80 | /* | |
81 | * run time data | |
82 | */ | |
83 | ||
c807ab52 | 84 | struct dd_per_prio per_prio[DD_PRIO_COUNT]; |
945ffb60 | 85 | |
d672d325 BVA |
86 | /* Data direction of latest dispatched request. */ |
87 | enum dd_data_dir last_dir; | |
945ffb60 JA |
88 | unsigned int batching; /* number of sequential requests made */ |
89 | unsigned int starved; /* times reads have starved writes */ | |
90 | ||
38ba64d1 BVA |
91 | struct io_stats __percpu *stats; |
92 | ||
945ffb60 JA |
93 | /* |
94 | * settings that change how the i/o scheduler behaves | |
95 | */ | |
004a26b3 | 96 | int fifo_expire[DD_DIR_COUNT]; |
945ffb60 JA |
97 | int fifo_batch; |
98 | int writes_starved; | |
99 | int front_merges; | |
07757588 | 100 | u32 async_depth; |
945ffb60 JA |
101 | |
102 | spinlock_t lock; | |
5700f691 | 103 | spinlock_t zone_lock; |
c807ab52 BVA |
104 | }; |
105 | ||
38ba64d1 BVA |
106 | /* Count one event of type 'event_type' and with I/O priority 'prio' */ |
107 | #define dd_count(dd, event_type, prio) do { \ | |
108 | struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \ | |
109 | \ | |
110 | BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ | |
111 | BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ | |
112 | local_inc(&io_stats->stats[(prio)].event_type); \ | |
113 | put_cpu_ptr(io_stats); \ | |
114 | } while (0) | |
115 | ||
116 | /* | |
117 | * Returns the total number of dd_count(dd, event_type, prio) calls across all | |
118 | * CPUs. No locking or barriers since it is fine if the returned sum is slightly | |
119 | * outdated. | |
120 | */ | |
121 | #define dd_sum(dd, event_type, prio) ({ \ | |
122 | unsigned int cpu; \ | |
123 | u32 sum = 0; \ | |
124 | \ | |
125 | BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ | |
126 | BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ | |
127 | for_each_present_cpu(cpu) \ | |
128 | sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \ | |
129 | stats[(prio)].event_type); \ | |
130 | sum; \ | |
131 | }) | |
132 | ||
c807ab52 BVA |
133 | /* Maps an I/O priority class to a deadline scheduler priority. */ |
134 | static const enum dd_prio ioprio_class_to_prio[] = { | |
135 | [IOPRIO_CLASS_NONE] = DD_BE_PRIO, | |
136 | [IOPRIO_CLASS_RT] = DD_RT_PRIO, | |
137 | [IOPRIO_CLASS_BE] = DD_BE_PRIO, | |
138 | [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO, | |
945ffb60 JA |
139 | }; |
140 | ||
141 | static inline struct rb_root * | |
c807ab52 | 142 | deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) |
945ffb60 | 143 | { |
c807ab52 BVA |
144 | return &per_prio->sort_list[rq_data_dir(rq)]; |
145 | } | |
146 | ||
147 | /* | |
148 | * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a | |
149 | * request. | |
150 | */ | |
151 | static u8 dd_rq_ioclass(struct request *rq) | |
152 | { | |
153 | return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); | |
945ffb60 JA |
154 | } |
155 | ||
156 | /* | |
157 | * get the request after `rq' in sector-sorted order | |
158 | */ | |
159 | static inline struct request * | |
160 | deadline_latter_request(struct request *rq) | |
161 | { | |
162 | struct rb_node *node = rb_next(&rq->rb_node); | |
163 | ||
164 | if (node) | |
165 | return rb_entry_rq(node); | |
166 | ||
167 | return NULL; | |
168 | } | |
169 | ||
170 | static void | |
c807ab52 | 171 | deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) |
945ffb60 | 172 | { |
c807ab52 | 173 | struct rb_root *root = deadline_rb_root(per_prio, rq); |
945ffb60 JA |
174 | |
175 | elv_rb_add(root, rq); | |
176 | } | |
177 | ||
178 | static inline void | |
c807ab52 | 179 | deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq) |
945ffb60 | 180 | { |
004a26b3 | 181 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
945ffb60 | 182 | |
c807ab52 BVA |
183 | if (per_prio->next_rq[data_dir] == rq) |
184 | per_prio->next_rq[data_dir] = deadline_latter_request(rq); | |
945ffb60 | 185 | |
c807ab52 | 186 | elv_rb_del(deadline_rb_root(per_prio, rq), rq); |
945ffb60 JA |
187 | } |
188 | ||
189 | /* | |
190 | * remove rq from rbtree and fifo. | |
191 | */ | |
c807ab52 BVA |
192 | static void deadline_remove_request(struct request_queue *q, |
193 | struct dd_per_prio *per_prio, | |
194 | struct request *rq) | |
945ffb60 | 195 | { |
945ffb60 JA |
196 | list_del_init(&rq->queuelist); |
197 | ||
198 | /* | |
199 | * We might not be on the rbtree, if we are doing an insert merge | |
200 | */ | |
201 | if (!RB_EMPTY_NODE(&rq->rb_node)) | |
c807ab52 | 202 | deadline_del_rq_rb(per_prio, rq); |
945ffb60 JA |
203 | |
204 | elv_rqhash_del(q, rq); | |
205 | if (q->last_merge == rq) | |
206 | q->last_merge = NULL; | |
207 | } | |
208 | ||
209 | static void dd_request_merged(struct request_queue *q, struct request *req, | |
34fe7c05 | 210 | enum elv_merge type) |
945ffb60 JA |
211 | { |
212 | struct deadline_data *dd = q->elevator->elevator_data; | |
c807ab52 BVA |
213 | const u8 ioprio_class = dd_rq_ioclass(req); |
214 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; | |
215 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
945ffb60 JA |
216 | |
217 | /* | |
218 | * if the merge was a front merge, we need to reposition request | |
219 | */ | |
220 | if (type == ELEVATOR_FRONT_MERGE) { | |
c807ab52 BVA |
221 | elv_rb_del(deadline_rb_root(per_prio, req), req); |
222 | deadline_add_rq_rb(per_prio, req); | |
945ffb60 JA |
223 | } |
224 | } | |
225 | ||
46eae2e3 BVA |
226 | /* |
227 | * Callback function that is invoked after @next has been merged into @req. | |
228 | */ | |
945ffb60 JA |
229 | static void dd_merged_requests(struct request_queue *q, struct request *req, |
230 | struct request *next) | |
231 | { | |
38ba64d1 | 232 | struct deadline_data *dd = q->elevator->elevator_data; |
c807ab52 BVA |
233 | const u8 ioprio_class = dd_rq_ioclass(next); |
234 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; | |
235 | ||
38ba64d1 BVA |
236 | dd_count(dd, merged, prio); |
237 | ||
945ffb60 JA |
238 | /* |
239 | * if next expires before rq, assign its expire time to rq | |
240 | * and move into next position (next will be deleted) in fifo | |
241 | */ | |
242 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | |
243 | if (time_before((unsigned long)next->fifo_time, | |
244 | (unsigned long)req->fifo_time)) { | |
245 | list_move(&req->queuelist, &next->queuelist); | |
246 | req->fifo_time = next->fifo_time; | |
247 | } | |
248 | } | |
249 | ||
250 | /* | |
251 | * kill knowledge of next, this one is a goner | |
252 | */ | |
c807ab52 | 253 | deadline_remove_request(q, &dd->per_prio[prio], next); |
945ffb60 JA |
254 | } |
255 | ||
256 | /* | |
257 | * move an entry to dispatch queue | |
258 | */ | |
259 | static void | |
c807ab52 BVA |
260 | deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
261 | struct request *rq) | |
945ffb60 | 262 | { |
004a26b3 | 263 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
945ffb60 | 264 | |
c807ab52 | 265 | per_prio->next_rq[data_dir] = deadline_latter_request(rq); |
945ffb60 JA |
266 | |
267 | /* | |
268 | * take it off the sort and fifo list | |
269 | */ | |
c807ab52 | 270 | deadline_remove_request(rq->q, per_prio, rq); |
945ffb60 JA |
271 | } |
272 | ||
273 | /* | |
274 | * deadline_check_fifo returns 0 if there are no expired requests on the fifo, | |
275 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | |
276 | */ | |
c807ab52 | 277 | static inline int deadline_check_fifo(struct dd_per_prio *per_prio, |
004a26b3 | 278 | enum dd_data_dir data_dir) |
945ffb60 | 279 | { |
c807ab52 | 280 | struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); |
945ffb60 JA |
281 | |
282 | /* | |
283 | * rq is expired! | |
284 | */ | |
285 | if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) | |
286 | return 1; | |
287 | ||
288 | return 0; | |
289 | } | |
290 | ||
bf09ce56 DLM |
291 | /* |
292 | * For the specified data direction, return the next request to | |
293 | * dispatch using arrival ordered lists. | |
294 | */ | |
295 | static struct request * | |
c807ab52 BVA |
296 | deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
297 | enum dd_data_dir data_dir) | |
bf09ce56 | 298 | { |
5700f691 DLM |
299 | struct request *rq; |
300 | unsigned long flags; | |
301 | ||
c807ab52 | 302 | if (list_empty(&per_prio->fifo_list[data_dir])) |
bf09ce56 DLM |
303 | return NULL; |
304 | ||
c807ab52 | 305 | rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); |
004a26b3 | 306 | if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) |
5700f691 DLM |
307 | return rq; |
308 | ||
309 | /* | |
310 | * Look for a write request that can be dispatched, that is one with | |
311 | * an unlocked target zone. | |
312 | */ | |
313 | spin_lock_irqsave(&dd->zone_lock, flags); | |
c807ab52 | 314 | list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { |
5700f691 DLM |
315 | if (blk_req_can_dispatch_to_zone(rq)) |
316 | goto out; | |
317 | } | |
318 | rq = NULL; | |
319 | out: | |
320 | spin_unlock_irqrestore(&dd->zone_lock, flags); | |
321 | ||
322 | return rq; | |
bf09ce56 DLM |
323 | } |
324 | ||
325 | /* | |
326 | * For the specified data direction, return the next request to | |
327 | * dispatch using sector position sorted lists. | |
328 | */ | |
329 | static struct request * | |
c807ab52 BVA |
330 | deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, |
331 | enum dd_data_dir data_dir) | |
bf09ce56 | 332 | { |
5700f691 DLM |
333 | struct request *rq; |
334 | unsigned long flags; | |
335 | ||
c807ab52 | 336 | rq = per_prio->next_rq[data_dir]; |
5700f691 DLM |
337 | if (!rq) |
338 | return NULL; | |
339 | ||
004a26b3 | 340 | if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) |
5700f691 DLM |
341 | return rq; |
342 | ||
343 | /* | |
344 | * Look for a write request that can be dispatched, that is one with | |
345 | * an unlocked target zone. | |
346 | */ | |
347 | spin_lock_irqsave(&dd->zone_lock, flags); | |
348 | while (rq) { | |
349 | if (blk_req_can_dispatch_to_zone(rq)) | |
350 | break; | |
351 | rq = deadline_latter_request(rq); | |
352 | } | |
353 | spin_unlock_irqrestore(&dd->zone_lock, flags); | |
354 | ||
355 | return rq; | |
bf09ce56 DLM |
356 | } |
357 | ||
945ffb60 JA |
358 | /* |
359 | * deadline_dispatch_requests selects the best request according to | |
7b05bf77 | 360 | * read/write expire, fifo_batch, etc |
945ffb60 | 361 | */ |
c807ab52 | 362 | static struct request *__dd_dispatch_request(struct deadline_data *dd, |
7b05bf77 | 363 | struct dd_per_prio *per_prio) |
945ffb60 | 364 | { |
bf09ce56 | 365 | struct request *rq, *next_rq; |
004a26b3 | 366 | enum dd_data_dir data_dir; |
38ba64d1 BVA |
367 | enum dd_prio prio; |
368 | u8 ioprio_class; | |
945ffb60 | 369 | |
3bd473f4 BVA |
370 | lockdep_assert_held(&dd->lock); |
371 | ||
c807ab52 BVA |
372 | if (!list_empty(&per_prio->dispatch)) { |
373 | rq = list_first_entry(&per_prio->dispatch, struct request, | |
374 | queuelist); | |
945ffb60 JA |
375 | list_del_init(&rq->queuelist); |
376 | goto done; | |
377 | } | |
378 | ||
945ffb60 JA |
379 | /* |
380 | * batches are currently reads XOR writes | |
381 | */ | |
c807ab52 | 382 | rq = deadline_next_request(dd, per_prio, dd->last_dir); |
945ffb60 JA |
383 | if (rq && dd->batching < dd->fifo_batch) |
384 | /* we have a next request are still entitled to batch */ | |
385 | goto dispatch_request; | |
386 | ||
387 | /* | |
388 | * at this point we are not running a batch. select the appropriate | |
389 | * data direction (read / write) | |
390 | */ | |
391 | ||
c807ab52 BVA |
392 | if (!list_empty(&per_prio->fifo_list[DD_READ])) { |
393 | BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ])); | |
945ffb60 | 394 | |
c807ab52 | 395 | if (deadline_fifo_request(dd, per_prio, DD_WRITE) && |
5700f691 | 396 | (dd->starved++ >= dd->writes_starved)) |
945ffb60 JA |
397 | goto dispatch_writes; |
398 | ||
004a26b3 | 399 | data_dir = DD_READ; |
945ffb60 JA |
400 | |
401 | goto dispatch_find_request; | |
402 | } | |
403 | ||
404 | /* | |
405 | * there are either no reads or writes have been starved | |
406 | */ | |
407 | ||
c807ab52 | 408 | if (!list_empty(&per_prio->fifo_list[DD_WRITE])) { |
945ffb60 | 409 | dispatch_writes: |
c807ab52 | 410 | BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE])); |
945ffb60 JA |
411 | |
412 | dd->starved = 0; | |
413 | ||
004a26b3 | 414 | data_dir = DD_WRITE; |
945ffb60 JA |
415 | |
416 | goto dispatch_find_request; | |
417 | } | |
418 | ||
419 | return NULL; | |
420 | ||
421 | dispatch_find_request: | |
422 | /* | |
423 | * we are not running a batch, find best request for selected data_dir | |
424 | */ | |
c807ab52 BVA |
425 | next_rq = deadline_next_request(dd, per_prio, data_dir); |
426 | if (deadline_check_fifo(per_prio, data_dir) || !next_rq) { | |
945ffb60 JA |
427 | /* |
428 | * A deadline has expired, the last request was in the other | |
429 | * direction, or we have run out of higher-sectored requests. | |
430 | * Start again from the request with the earliest expiry time. | |
431 | */ | |
c807ab52 | 432 | rq = deadline_fifo_request(dd, per_prio, data_dir); |
945ffb60 JA |
433 | } else { |
434 | /* | |
435 | * The last req was the same dir and we have a next request in | |
436 | * sort order. No expired requests so continue on from here. | |
437 | */ | |
bf09ce56 | 438 | rq = next_rq; |
945ffb60 JA |
439 | } |
440 | ||
5700f691 DLM |
441 | /* |
442 | * For a zoned block device, if we only have writes queued and none of | |
443 | * them can be dispatched, rq will be NULL. | |
444 | */ | |
445 | if (!rq) | |
446 | return NULL; | |
447 | ||
d672d325 | 448 | dd->last_dir = data_dir; |
945ffb60 JA |
449 | dd->batching = 0; |
450 | ||
451 | dispatch_request: | |
452 | /* | |
453 | * rq is the selected appropriate request. | |
454 | */ | |
455 | dd->batching++; | |
c807ab52 | 456 | deadline_move_request(dd, per_prio, rq); |
945ffb60 | 457 | done: |
38ba64d1 BVA |
458 | ioprio_class = dd_rq_ioclass(rq); |
459 | prio = ioprio_class_to_prio[ioprio_class]; | |
460 | dd_count(dd, dispatched, prio); | |
5700f691 DLM |
461 | /* |
462 | * If the request needs its target zone locked, do it. | |
463 | */ | |
464 | blk_req_zone_write_lock(rq); | |
945ffb60 JA |
465 | rq->rq_flags |= RQF_STARTED; |
466 | return rq; | |
467 | } | |
468 | ||
ca11f209 | 469 | /* |
46eae2e3 BVA |
470 | * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests(). |
471 | * | |
ca11f209 | 472 | * One confusing aspect here is that we get called for a specific |
7211aef8 | 473 | * hardware queue, but we may return a request that is for a |
ca11f209 JA |
474 | * different hardware queue. This is because mq-deadline has shared |
475 | * state for all hardware queues, in terms of sorting, FIFOs, etc. | |
476 | */ | |
c13660a0 | 477 | static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
945ffb60 JA |
478 | { |
479 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
7b05bf77 | 480 | struct request *rq; |
c807ab52 | 481 | enum dd_prio prio; |
945ffb60 JA |
482 | |
483 | spin_lock(&dd->lock); | |
fb926032 | 484 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
7b05bf77 JA |
485 | rq = __dd_dispatch_request(dd, &dd->per_prio[prio]); |
486 | if (rq) | |
c807ab52 BVA |
487 | break; |
488 | } | |
945ffb60 | 489 | spin_unlock(&dd->lock); |
c13660a0 JA |
490 | |
491 | return rq; | |
945ffb60 JA |
492 | } |
493 | ||
07757588 BVA |
494 | /* |
495 | * Called by __blk_mq_alloc_request(). The shallow_depth value set by this | |
496 | * function is used by __blk_mq_get_tag(). | |
497 | */ | |
498 | static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) | |
499 | { | |
500 | struct deadline_data *dd = data->q->elevator->elevator_data; | |
501 | ||
502 | /* Do not throttle synchronous reads. */ | |
503 | if (op_is_sync(op) && !op_is_write(op)) | |
504 | return; | |
505 | ||
506 | /* | |
507 | * Throttle asynchronous requests and writes such that these requests | |
508 | * do not block the allocation of synchronous requests. | |
509 | */ | |
510 | data->shallow_depth = dd->async_depth; | |
511 | } | |
512 | ||
513 | /* Called by blk_mq_update_nr_requests(). */ | |
514 | static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) | |
515 | { | |
516 | struct request_queue *q = hctx->queue; | |
517 | struct deadline_data *dd = q->elevator->elevator_data; | |
518 | struct blk_mq_tags *tags = hctx->sched_tags; | |
519 | ||
520 | dd->async_depth = max(1UL, 3 * q->nr_requests / 4); | |
521 | ||
522 | sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth); | |
523 | } | |
524 | ||
525 | /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ | |
526 | static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) | |
527 | { | |
528 | dd_depth_updated(hctx); | |
529 | return 0; | |
530 | } | |
531 | ||
3e9a99eb | 532 | static void dd_exit_sched(struct elevator_queue *e) |
945ffb60 JA |
533 | { |
534 | struct deadline_data *dd = e->elevator_data; | |
c807ab52 | 535 | enum dd_prio prio; |
945ffb60 | 536 | |
c807ab52 BVA |
537 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
538 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
539 | ||
540 | WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); | |
541 | WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); | |
542 | } | |
945ffb60 | 543 | |
38ba64d1 BVA |
544 | free_percpu(dd->stats); |
545 | ||
945ffb60 JA |
546 | kfree(dd); |
547 | } | |
548 | ||
549 | /* | |
0f783995 | 550 | * initialize elevator private data (deadline_data). |
945ffb60 | 551 | */ |
3e9a99eb | 552 | static int dd_init_sched(struct request_queue *q, struct elevator_type *e) |
945ffb60 JA |
553 | { |
554 | struct deadline_data *dd; | |
555 | struct elevator_queue *eq; | |
c807ab52 BVA |
556 | enum dd_prio prio; |
557 | int ret = -ENOMEM; | |
945ffb60 JA |
558 | |
559 | eq = elevator_alloc(q, e); | |
560 | if (!eq) | |
c807ab52 | 561 | return ret; |
945ffb60 JA |
562 | |
563 | dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); | |
c807ab52 BVA |
564 | if (!dd) |
565 | goto put_eq; | |
566 | ||
945ffb60 JA |
567 | eq->elevator_data = dd; |
568 | ||
38ba64d1 BVA |
569 | dd->stats = alloc_percpu_gfp(typeof(*dd->stats), |
570 | GFP_KERNEL | __GFP_ZERO); | |
571 | if (!dd->stats) | |
572 | goto free_dd; | |
573 | ||
c807ab52 BVA |
574 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) { |
575 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
576 | ||
577 | INIT_LIST_HEAD(&per_prio->dispatch); | |
578 | INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]); | |
579 | INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]); | |
580 | per_prio->sort_list[DD_READ] = RB_ROOT; | |
581 | per_prio->sort_list[DD_WRITE] = RB_ROOT; | |
582 | } | |
004a26b3 BVA |
583 | dd->fifo_expire[DD_READ] = read_expire; |
584 | dd->fifo_expire[DD_WRITE] = write_expire; | |
945ffb60 JA |
585 | dd->writes_starved = writes_starved; |
586 | dd->front_merges = 1; | |
d672d325 | 587 | dd->last_dir = DD_WRITE; |
945ffb60 JA |
588 | dd->fifo_batch = fifo_batch; |
589 | spin_lock_init(&dd->lock); | |
5700f691 | 590 | spin_lock_init(&dd->zone_lock); |
945ffb60 JA |
591 | |
592 | q->elevator = eq; | |
593 | return 0; | |
c807ab52 | 594 | |
38ba64d1 BVA |
595 | free_dd: |
596 | kfree(dd); | |
597 | ||
c807ab52 BVA |
598 | put_eq: |
599 | kobject_put(&eq->kobj); | |
600 | return ret; | |
945ffb60 JA |
601 | } |
602 | ||
46eae2e3 BVA |
603 | /* |
604 | * Try to merge @bio into an existing request. If @bio has been merged into | |
605 | * an existing request, store the pointer to that request into *@rq. | |
606 | */ | |
945ffb60 JA |
607 | static int dd_request_merge(struct request_queue *q, struct request **rq, |
608 | struct bio *bio) | |
609 | { | |
610 | struct deadline_data *dd = q->elevator->elevator_data; | |
c807ab52 BVA |
611 | const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio); |
612 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; | |
613 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
945ffb60 JA |
614 | sector_t sector = bio_end_sector(bio); |
615 | struct request *__rq; | |
616 | ||
617 | if (!dd->front_merges) | |
618 | return ELEVATOR_NO_MERGE; | |
619 | ||
c807ab52 | 620 | __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector); |
945ffb60 JA |
621 | if (__rq) { |
622 | BUG_ON(sector != blk_rq_pos(__rq)); | |
623 | ||
624 | if (elv_bio_merge_ok(__rq, bio)) { | |
625 | *rq = __rq; | |
866663b7 ML |
626 | if (blk_discard_mergable(__rq)) |
627 | return ELEVATOR_DISCARD_MERGE; | |
945ffb60 JA |
628 | return ELEVATOR_FRONT_MERGE; |
629 | } | |
630 | } | |
631 | ||
632 | return ELEVATOR_NO_MERGE; | |
633 | } | |
634 | ||
46eae2e3 BVA |
635 | /* |
636 | * Attempt to merge a bio into an existing request. This function is called | |
637 | * before @bio is associated with a request. | |
638 | */ | |
efed9a33 | 639 | static bool dd_bio_merge(struct request_queue *q, struct bio *bio, |
14ccb66b | 640 | unsigned int nr_segs) |
945ffb60 | 641 | { |
945ffb60 | 642 | struct deadline_data *dd = q->elevator->elevator_data; |
e4d750c9 JA |
643 | struct request *free = NULL; |
644 | bool ret; | |
945ffb60 JA |
645 | |
646 | spin_lock(&dd->lock); | |
14ccb66b | 647 | ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); |
945ffb60 JA |
648 | spin_unlock(&dd->lock); |
649 | ||
e4d750c9 JA |
650 | if (free) |
651 | blk_mq_free_request(free); | |
652 | ||
945ffb60 JA |
653 | return ret; |
654 | } | |
655 | ||
656 | /* | |
657 | * add rq to rbtree and fifo | |
658 | */ | |
659 | static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
660 | bool at_head) | |
661 | { | |
662 | struct request_queue *q = hctx->queue; | |
663 | struct deadline_data *dd = q->elevator->elevator_data; | |
004a26b3 | 664 | const enum dd_data_dir data_dir = rq_data_dir(rq); |
c807ab52 BVA |
665 | u16 ioprio = req_get_ioprio(rq); |
666 | u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); | |
667 | struct dd_per_prio *per_prio; | |
668 | enum dd_prio prio; | |
fd2ef39c | 669 | LIST_HEAD(free); |
945ffb60 | 670 | |
3bd473f4 BVA |
671 | lockdep_assert_held(&dd->lock); |
672 | ||
5700f691 DLM |
673 | /* |
674 | * This may be a requeue of a write request that has locked its | |
675 | * target zone. If it is the case, this releases the zone lock. | |
676 | */ | |
677 | blk_req_zone_write_unlock(rq); | |
678 | ||
c807ab52 | 679 | prio = ioprio_class_to_prio[ioprio_class]; |
38ba64d1 | 680 | dd_count(dd, inserted, prio); |
b6d2b054 | 681 | rq->elv.priv[0] = (void *)(uintptr_t)1; |
c807ab52 | 682 | |
fd2ef39c JK |
683 | if (blk_mq_sched_try_insert_merge(q, rq, &free)) { |
684 | blk_mq_free_requests(&free); | |
945ffb60 | 685 | return; |
fd2ef39c | 686 | } |
945ffb60 | 687 | |
b357e4a6 | 688 | trace_block_rq_insert(rq); |
945ffb60 | 689 | |
c807ab52 | 690 | per_prio = &dd->per_prio[prio]; |
7687b38a | 691 | if (at_head) { |
c807ab52 | 692 | list_add(&rq->queuelist, &per_prio->dispatch); |
945ffb60 | 693 | } else { |
c807ab52 | 694 | deadline_add_rq_rb(per_prio, rq); |
945ffb60 JA |
695 | |
696 | if (rq_mergeable(rq)) { | |
697 | elv_rqhash_add(q, rq); | |
698 | if (!q->last_merge) | |
699 | q->last_merge = rq; | |
700 | } | |
701 | ||
702 | /* | |
703 | * set expire time and add to fifo list | |
704 | */ | |
705 | rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; | |
c807ab52 | 706 | list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]); |
945ffb60 JA |
707 | } |
708 | } | |
709 | ||
46eae2e3 BVA |
710 | /* |
711 | * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests(). | |
712 | */ | |
945ffb60 JA |
713 | static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, |
714 | struct list_head *list, bool at_head) | |
715 | { | |
716 | struct request_queue *q = hctx->queue; | |
717 | struct deadline_data *dd = q->elevator->elevator_data; | |
718 | ||
719 | spin_lock(&dd->lock); | |
720 | while (!list_empty(list)) { | |
721 | struct request *rq; | |
722 | ||
723 | rq = list_first_entry(list, struct request, queuelist); | |
724 | list_del_init(&rq->queuelist); | |
725 | dd_insert_request(hctx, rq, at_head); | |
726 | } | |
727 | spin_unlock(&dd->lock); | |
728 | } | |
729 | ||
b6d2b054 | 730 | /* Callback from inside blk_mq_rq_ctx_init(). */ |
5d9c305b | 731 | static void dd_prepare_request(struct request *rq) |
f3bc78d2 | 732 | { |
b6d2b054 | 733 | rq->elv.priv[0] = NULL; |
f3bc78d2 DLM |
734 | } |
735 | ||
5700f691 | 736 | /* |
46eae2e3 BVA |
737 | * Callback from inside blk_mq_free_request(). |
738 | * | |
5700f691 DLM |
739 | * For zoned block devices, write unlock the target zone of |
740 | * completed write requests. Do this while holding the zone lock | |
741 | * spinlock so that the zone is never unlocked while deadline_fifo_request() | |
f3bc78d2 DLM |
742 | * or deadline_next_request() are executing. This function is called for |
743 | * all requests, whether or not these requests complete successfully. | |
cb8acabb DLM |
744 | * |
745 | * For a zoned block device, __dd_dispatch_request() may have stopped | |
746 | * dispatching requests if all the queued requests are write requests directed | |
747 | * at zones that are already locked due to on-going write requests. To ensure | |
748 | * write request dispatch progress in this case, mark the queue as needing a | |
749 | * restart to ensure that the queue is run again after completion of the | |
750 | * request and zones being unlocked. | |
5700f691 | 751 | */ |
f3bc78d2 | 752 | static void dd_finish_request(struct request *rq) |
5700f691 DLM |
753 | { |
754 | struct request_queue *q = rq->q; | |
c807ab52 BVA |
755 | struct deadline_data *dd = q->elevator->elevator_data; |
756 | const u8 ioprio_class = dd_rq_ioclass(rq); | |
757 | const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; | |
758 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; | |
5700f691 | 759 | |
b6d2b054 BVA |
760 | /* |
761 | * The block layer core may call dd_finish_request() without having | |
762 | * called dd_insert_requests(). Hence only update statistics for | |
763 | * requests for which dd_insert_requests() has been called. See also | |
764 | * blk_mq_request_bypass_insert(). | |
765 | */ | |
766 | if (rq->elv.priv[0]) | |
767 | dd_count(dd, completed, prio); | |
38ba64d1 | 768 | |
5700f691 | 769 | if (blk_queue_is_zoned(q)) { |
5700f691 DLM |
770 | unsigned long flags; |
771 | ||
772 | spin_lock_irqsave(&dd->zone_lock, flags); | |
773 | blk_req_zone_write_unlock(rq); | |
c807ab52 | 774 | if (!list_empty(&per_prio->fifo_list[DD_WRITE])) |
cb8acabb | 775 | blk_mq_sched_mark_restart_hctx(rq->mq_hctx); |
5700f691 DLM |
776 | spin_unlock_irqrestore(&dd->zone_lock, flags); |
777 | } | |
778 | } | |
779 | ||
c807ab52 BVA |
780 | static bool dd_has_work_for_prio(struct dd_per_prio *per_prio) |
781 | { | |
782 | return !list_empty_careful(&per_prio->dispatch) || | |
783 | !list_empty_careful(&per_prio->fifo_list[DD_READ]) || | |
784 | !list_empty_careful(&per_prio->fifo_list[DD_WRITE]); | |
785 | } | |
786 | ||
945ffb60 JA |
787 | static bool dd_has_work(struct blk_mq_hw_ctx *hctx) |
788 | { | |
789 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
c807ab52 BVA |
790 | enum dd_prio prio; |
791 | ||
792 | for (prio = 0; prio <= DD_PRIO_MAX; prio++) | |
793 | if (dd_has_work_for_prio(&dd->per_prio[prio])) | |
794 | return true; | |
945ffb60 | 795 | |
c807ab52 | 796 | return false; |
945ffb60 JA |
797 | } |
798 | ||
799 | /* | |
800 | * sysfs parts below | |
801 | */ | |
d6d7f013 | 802 | #define SHOW_INT(__FUNC, __VAR) \ |
945ffb60 JA |
803 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
804 | { \ | |
805 | struct deadline_data *dd = e->elevator_data; \ | |
d6d7f013 BVA |
806 | \ |
807 | return sysfs_emit(page, "%d\n", __VAR); \ | |
945ffb60 | 808 | } |
d6d7f013 BVA |
809 | #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) |
810 | SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); | |
811 | SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); | |
812 | SHOW_INT(deadline_writes_starved_show, dd->writes_starved); | |
813 | SHOW_INT(deadline_front_merges_show, dd->front_merges); | |
08d28bc3 | 814 | SHOW_INT(deadline_async_depth_show, dd->async_depth); |
d6d7f013 BVA |
815 | SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); |
816 | #undef SHOW_INT | |
817 | #undef SHOW_JIFFIES | |
945ffb60 JA |
818 | |
819 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
820 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | |
821 | { \ | |
822 | struct deadline_data *dd = e->elevator_data; \ | |
d6d7f013 BVA |
823 | int __data, __ret; \ |
824 | \ | |
825 | __ret = kstrtoint(page, 0, &__data); \ | |
826 | if (__ret < 0) \ | |
827 | return __ret; \ | |
945ffb60 JA |
828 | if (__data < (MIN)) \ |
829 | __data = (MIN); \ | |
830 | else if (__data > (MAX)) \ | |
831 | __data = (MAX); \ | |
d6d7f013 | 832 | *(__PTR) = __CONV(__data); \ |
235f8da1 | 833 | return count; \ |
945ffb60 | 834 | } |
d6d7f013 BVA |
835 | #define STORE_INT(__FUNC, __PTR, MIN, MAX) \ |
836 | STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, ) | |
837 | #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \ | |
838 | STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) | |
839 | STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); | |
840 | STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); | |
841 | STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); | |
842 | STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); | |
08d28bc3 | 843 | STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX); |
d6d7f013 | 844 | STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); |
945ffb60 | 845 | #undef STORE_FUNCTION |
d6d7f013 BVA |
846 | #undef STORE_INT |
847 | #undef STORE_JIFFIES | |
945ffb60 JA |
848 | |
849 | #define DD_ATTR(name) \ | |
5657a819 | 850 | __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) |
945ffb60 JA |
851 | |
852 | static struct elv_fs_entry deadline_attrs[] = { | |
853 | DD_ATTR(read_expire), | |
854 | DD_ATTR(write_expire), | |
855 | DD_ATTR(writes_starved), | |
856 | DD_ATTR(front_merges), | |
07757588 | 857 | DD_ATTR(async_depth), |
945ffb60 JA |
858 | DD_ATTR(fifo_batch), |
859 | __ATTR_NULL | |
860 | }; | |
861 | ||
daaadb3e | 862 | #ifdef CONFIG_BLK_DEBUG_FS |
c807ab52 | 863 | #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \ |
daaadb3e OS |
864 | static void *deadline_##name##_fifo_start(struct seq_file *m, \ |
865 | loff_t *pos) \ | |
866 | __acquires(&dd->lock) \ | |
867 | { \ | |
868 | struct request_queue *q = m->private; \ | |
869 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
c807ab52 | 870 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
daaadb3e OS |
871 | \ |
872 | spin_lock(&dd->lock); \ | |
c807ab52 | 873 | return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \ |
daaadb3e OS |
874 | } \ |
875 | \ | |
876 | static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ | |
877 | loff_t *pos) \ | |
878 | { \ | |
879 | struct request_queue *q = m->private; \ | |
880 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
c807ab52 | 881 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
daaadb3e | 882 | \ |
c807ab52 | 883 | return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \ |
daaadb3e OS |
884 | } \ |
885 | \ | |
886 | static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ | |
887 | __releases(&dd->lock) \ | |
888 | { \ | |
889 | struct request_queue *q = m->private; \ | |
890 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
891 | \ | |
892 | spin_unlock(&dd->lock); \ | |
893 | } \ | |
894 | \ | |
895 | static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ | |
896 | .start = deadline_##name##_fifo_start, \ | |
897 | .next = deadline_##name##_fifo_next, \ | |
898 | .stop = deadline_##name##_fifo_stop, \ | |
899 | .show = blk_mq_debugfs_rq_show, \ | |
900 | }; \ | |
901 | \ | |
902 | static int deadline_##name##_next_rq_show(void *data, \ | |
903 | struct seq_file *m) \ | |
904 | { \ | |
905 | struct request_queue *q = data; \ | |
906 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
c807ab52 BVA |
907 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ |
908 | struct request *rq = per_prio->next_rq[data_dir]; \ | |
daaadb3e OS |
909 | \ |
910 | if (rq) \ | |
911 | __blk_mq_debugfs_rq_show(m, rq); \ | |
912 | return 0; \ | |
913 | } | |
c807ab52 BVA |
914 | |
915 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0); | |
916 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0); | |
917 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1); | |
918 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1); | |
919 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2); | |
920 | DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2); | |
daaadb3e OS |
921 | #undef DEADLINE_DEBUGFS_DDIR_ATTRS |
922 | ||
923 | static int deadline_batching_show(void *data, struct seq_file *m) | |
924 | { | |
925 | struct request_queue *q = data; | |
926 | struct deadline_data *dd = q->elevator->elevator_data; | |
927 | ||
928 | seq_printf(m, "%u\n", dd->batching); | |
929 | return 0; | |
930 | } | |
931 | ||
932 | static int deadline_starved_show(void *data, struct seq_file *m) | |
933 | { | |
934 | struct request_queue *q = data; | |
935 | struct deadline_data *dd = q->elevator->elevator_data; | |
936 | ||
937 | seq_printf(m, "%u\n", dd->starved); | |
938 | return 0; | |
939 | } | |
940 | ||
07757588 BVA |
941 | static int dd_async_depth_show(void *data, struct seq_file *m) |
942 | { | |
943 | struct request_queue *q = data; | |
944 | struct deadline_data *dd = q->elevator->elevator_data; | |
945 | ||
946 | seq_printf(m, "%u\n", dd->async_depth); | |
947 | return 0; | |
948 | } | |
949 | ||
55a51ea1 GU |
950 | /* Number of requests queued for a given priority level. */ |
951 | static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) | |
952 | { | |
953 | return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); | |
954 | } | |
955 | ||
38ba64d1 BVA |
956 | static int dd_queued_show(void *data, struct seq_file *m) |
957 | { | |
958 | struct request_queue *q = data; | |
959 | struct deadline_data *dd = q->elevator->elevator_data; | |
960 | ||
961 | seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO), | |
962 | dd_queued(dd, DD_BE_PRIO), | |
963 | dd_queued(dd, DD_IDLE_PRIO)); | |
964 | return 0; | |
965 | } | |
966 | ||
967 | /* Number of requests owned by the block driver for a given priority. */ | |
968 | static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio) | |
969 | { | |
970 | return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio) | |
971 | - dd_sum(dd, completed, prio); | |
972 | } | |
973 | ||
974 | static int dd_owned_by_driver_show(void *data, struct seq_file *m) | |
975 | { | |
976 | struct request_queue *q = data; | |
977 | struct deadline_data *dd = q->elevator->elevator_data; | |
978 | ||
979 | seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO), | |
980 | dd_owned_by_driver(dd, DD_BE_PRIO), | |
981 | dd_owned_by_driver(dd, DD_IDLE_PRIO)); | |
982 | return 0; | |
983 | } | |
984 | ||
c807ab52 BVA |
985 | #define DEADLINE_DISPATCH_ATTR(prio) \ |
986 | static void *deadline_dispatch##prio##_start(struct seq_file *m, \ | |
987 | loff_t *pos) \ | |
988 | __acquires(&dd->lock) \ | |
989 | { \ | |
990 | struct request_queue *q = m->private; \ | |
991 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
992 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ | |
993 | \ | |
994 | spin_lock(&dd->lock); \ | |
995 | return seq_list_start(&per_prio->dispatch, *pos); \ | |
996 | } \ | |
997 | \ | |
998 | static void *deadline_dispatch##prio##_next(struct seq_file *m, \ | |
999 | void *v, loff_t *pos) \ | |
1000 | { \ | |
1001 | struct request_queue *q = m->private; \ | |
1002 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
1003 | struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ | |
1004 | \ | |
1005 | return seq_list_next(v, &per_prio->dispatch, pos); \ | |
1006 | } \ | |
1007 | \ | |
1008 | static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \ | |
1009 | __releases(&dd->lock) \ | |
1010 | { \ | |
1011 | struct request_queue *q = m->private; \ | |
1012 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
1013 | \ | |
1014 | spin_unlock(&dd->lock); \ | |
1015 | } \ | |
1016 | \ | |
1017 | static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \ | |
1018 | .start = deadline_dispatch##prio##_start, \ | |
1019 | .next = deadline_dispatch##prio##_next, \ | |
1020 | .stop = deadline_dispatch##prio##_stop, \ | |
1021 | .show = blk_mq_debugfs_rq_show, \ | |
daaadb3e OS |
1022 | } |
1023 | ||
c807ab52 BVA |
1024 | DEADLINE_DISPATCH_ATTR(0); |
1025 | DEADLINE_DISPATCH_ATTR(1); | |
1026 | DEADLINE_DISPATCH_ATTR(2); | |
1027 | #undef DEADLINE_DISPATCH_ATTR | |
daaadb3e | 1028 | |
c807ab52 BVA |
1029 | #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ |
1030 | {#name "_fifo_list", 0400, \ | |
1031 | .seq_ops = &deadline_##name##_fifo_seq_ops} | |
1032 | #define DEADLINE_NEXT_RQ_ATTR(name) \ | |
daaadb3e OS |
1033 | {#name "_next_rq", 0400, deadline_##name##_next_rq_show} |
1034 | static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { | |
c807ab52 BVA |
1035 | DEADLINE_QUEUE_DDIR_ATTRS(read0), |
1036 | DEADLINE_QUEUE_DDIR_ATTRS(write0), | |
1037 | DEADLINE_QUEUE_DDIR_ATTRS(read1), | |
1038 | DEADLINE_QUEUE_DDIR_ATTRS(write1), | |
1039 | DEADLINE_QUEUE_DDIR_ATTRS(read2), | |
1040 | DEADLINE_QUEUE_DDIR_ATTRS(write2), | |
1041 | DEADLINE_NEXT_RQ_ATTR(read0), | |
1042 | DEADLINE_NEXT_RQ_ATTR(write0), | |
1043 | DEADLINE_NEXT_RQ_ATTR(read1), | |
1044 | DEADLINE_NEXT_RQ_ATTR(write1), | |
1045 | DEADLINE_NEXT_RQ_ATTR(read2), | |
1046 | DEADLINE_NEXT_RQ_ATTR(write2), | |
daaadb3e OS |
1047 | {"batching", 0400, deadline_batching_show}, |
1048 | {"starved", 0400, deadline_starved_show}, | |
07757588 | 1049 | {"async_depth", 0400, dd_async_depth_show}, |
c807ab52 BVA |
1050 | {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops}, |
1051 | {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops}, | |
1052 | {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops}, | |
38ba64d1 BVA |
1053 | {"owned_by_driver", 0400, dd_owned_by_driver_show}, |
1054 | {"queued", 0400, dd_queued_show}, | |
daaadb3e OS |
1055 | {}, |
1056 | }; | |
1057 | #undef DEADLINE_QUEUE_DDIR_ATTRS | |
1058 | #endif | |
1059 | ||
945ffb60 | 1060 | static struct elevator_type mq_deadline = { |
f9cd4bfe | 1061 | .ops = { |
07757588 BVA |
1062 | .depth_updated = dd_depth_updated, |
1063 | .limit_depth = dd_limit_depth, | |
945ffb60 | 1064 | .insert_requests = dd_insert_requests, |
c13660a0 | 1065 | .dispatch_request = dd_dispatch_request, |
f3bc78d2 DLM |
1066 | .prepare_request = dd_prepare_request, |
1067 | .finish_request = dd_finish_request, | |
945ffb60 JA |
1068 | .next_request = elv_rb_latter_request, |
1069 | .former_request = elv_rb_former_request, | |
1070 | .bio_merge = dd_bio_merge, | |
1071 | .request_merge = dd_request_merge, | |
1072 | .requests_merged = dd_merged_requests, | |
1073 | .request_merged = dd_request_merged, | |
1074 | .has_work = dd_has_work, | |
3e9a99eb BVA |
1075 | .init_sched = dd_init_sched, |
1076 | .exit_sched = dd_exit_sched, | |
07757588 | 1077 | .init_hctx = dd_init_hctx, |
945ffb60 JA |
1078 | }, |
1079 | ||
daaadb3e OS |
1080 | #ifdef CONFIG_BLK_DEBUG_FS |
1081 | .queue_debugfs_attrs = deadline_queue_debugfs_attrs, | |
1082 | #endif | |
945ffb60 JA |
1083 | .elevator_attrs = deadline_attrs, |
1084 | .elevator_name = "mq-deadline", | |
4d740bc9 | 1085 | .elevator_alias = "deadline", |
68c43f13 | 1086 | .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE, |
945ffb60 JA |
1087 | .elevator_owner = THIS_MODULE, |
1088 | }; | |
7de967e7 | 1089 | MODULE_ALIAS("mq-deadline-iosched"); |
945ffb60 JA |
1090 | |
1091 | static int __init deadline_init(void) | |
1092 | { | |
0f783995 | 1093 | return elv_register(&mq_deadline); |
945ffb60 JA |
1094 | } |
1095 | ||
1096 | static void __exit deadline_exit(void) | |
1097 | { | |
1098 | elv_unregister(&mq_deadline); | |
1099 | } | |
1100 | ||
1101 | module_init(deadline_init); | |
1102 | module_exit(deadline_exit); | |
1103 | ||
c807ab52 | 1104 | MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche"); |
945ffb60 JA |
1105 | MODULE_LICENSE("GPL"); |
1106 | MODULE_DESCRIPTION("MQ deadline IO scheduler"); |