]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/blk-core.c
nbd: make sure request completion won't concurrent
[mirror_ubuntu-jammy-kernel.git] / block / blk-core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
5 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
8 * - July2000
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12 /*
13 * This handles all read/write requests to block devices
14 */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-mq.h>
20 #include <linux/blk-pm.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/fault-inject.h>
33 #include <linux/list_sort.h>
34 #include <linux/delay.h>
35 #include <linux/ratelimit.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/blk-cgroup.h>
38 #include <linux/t10-pi.h>
39 #include <linux/debugfs.h>
40 #include <linux/bpf.h>
41 #include <linux/psi.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/blk-crypto.h>
44
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/block.h>
47
48 #include "blk.h"
49 #include "blk-mq.h"
50 #include "blk-mq-sched.h"
51 #include "blk-pm.h"
52 #include "blk-rq-qos.h"
53
54 struct dentry *blk_debugfs_root;
55
56 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
59 EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
60 EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
62
63 DEFINE_IDA(blk_queue_ida);
64
65 /*
66 * For queue allocation
67 */
68 struct kmem_cache *blk_requestq_cachep;
69
70 /*
71 * Controlling structure to kblockd
72 */
73 static struct workqueue_struct *kblockd_workqueue;
74
75 /**
76 * blk_queue_flag_set - atomically set a queue flag
77 * @flag: flag to be set
78 * @q: request queue
79 */
80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
81 {
82 set_bit(flag, &q->queue_flags);
83 }
84 EXPORT_SYMBOL(blk_queue_flag_set);
85
86 /**
87 * blk_queue_flag_clear - atomically clear a queue flag
88 * @flag: flag to be cleared
89 * @q: request queue
90 */
91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
92 {
93 clear_bit(flag, &q->queue_flags);
94 }
95 EXPORT_SYMBOL(blk_queue_flag_clear);
96
97 /**
98 * blk_queue_flag_test_and_set - atomically test and set a queue flag
99 * @flag: flag to be set
100 * @q: request queue
101 *
102 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
103 * the flag was already set.
104 */
105 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
106 {
107 return test_and_set_bit(flag, &q->queue_flags);
108 }
109 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
110
111 void blk_rq_init(struct request_queue *q, struct request *rq)
112 {
113 memset(rq, 0, sizeof(*rq));
114
115 INIT_LIST_HEAD(&rq->queuelist);
116 rq->q = q;
117 rq->__sector = (sector_t) -1;
118 INIT_HLIST_NODE(&rq->hash);
119 RB_CLEAR_NODE(&rq->rb_node);
120 rq->tag = BLK_MQ_NO_TAG;
121 rq->internal_tag = BLK_MQ_NO_TAG;
122 rq->start_time_ns = ktime_get_ns();
123 rq->part = NULL;
124 blk_crypto_rq_set_defaults(rq);
125 }
126 EXPORT_SYMBOL(blk_rq_init);
127
128 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
129 static const char *const blk_op_name[] = {
130 REQ_OP_NAME(READ),
131 REQ_OP_NAME(WRITE),
132 REQ_OP_NAME(FLUSH),
133 REQ_OP_NAME(DISCARD),
134 REQ_OP_NAME(SECURE_ERASE),
135 REQ_OP_NAME(ZONE_RESET),
136 REQ_OP_NAME(ZONE_RESET_ALL),
137 REQ_OP_NAME(ZONE_OPEN),
138 REQ_OP_NAME(ZONE_CLOSE),
139 REQ_OP_NAME(ZONE_FINISH),
140 REQ_OP_NAME(ZONE_APPEND),
141 REQ_OP_NAME(WRITE_SAME),
142 REQ_OP_NAME(WRITE_ZEROES),
143 REQ_OP_NAME(DRV_IN),
144 REQ_OP_NAME(DRV_OUT),
145 };
146 #undef REQ_OP_NAME
147
148 /**
149 * blk_op_str - Return string XXX in the REQ_OP_XXX.
150 * @op: REQ_OP_XXX.
151 *
152 * Description: Centralize block layer function to convert REQ_OP_XXX into
153 * string format. Useful in the debugging and tracing bio or request. For
154 * invalid REQ_OP_XXX it returns string "UNKNOWN".
155 */
156 inline const char *blk_op_str(unsigned int op)
157 {
158 const char *op_str = "UNKNOWN";
159
160 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
161 op_str = blk_op_name[op];
162
163 return op_str;
164 }
165 EXPORT_SYMBOL_GPL(blk_op_str);
166
167 static const struct {
168 int errno;
169 const char *name;
170 } blk_errors[] = {
171 [BLK_STS_OK] = { 0, "" },
172 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
173 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
174 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
175 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
176 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
177 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
178 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
179 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
180 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
181 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
182 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
183
184 /* device mapper special case, should not leak out: */
185 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
186
187 /* zone device specific errors */
188 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
189 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
190
191 /* everything else not covered above: */
192 [BLK_STS_IOERR] = { -EIO, "I/O" },
193 };
194
195 blk_status_t errno_to_blk_status(int errno)
196 {
197 int i;
198
199 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
200 if (blk_errors[i].errno == errno)
201 return (__force blk_status_t)i;
202 }
203
204 return BLK_STS_IOERR;
205 }
206 EXPORT_SYMBOL_GPL(errno_to_blk_status);
207
208 int blk_status_to_errno(blk_status_t status)
209 {
210 int idx = (__force int)status;
211
212 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
213 return -EIO;
214 return blk_errors[idx].errno;
215 }
216 EXPORT_SYMBOL_GPL(blk_status_to_errno);
217
218 static void print_req_error(struct request *req, blk_status_t status,
219 const char *caller)
220 {
221 int idx = (__force int)status;
222
223 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
224 return;
225
226 printk_ratelimited(KERN_ERR
227 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
228 "phys_seg %u prio class %u\n",
229 caller, blk_errors[idx].name,
230 req->rq_disk ? req->rq_disk->disk_name : "?",
231 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
232 req->cmd_flags & ~REQ_OP_MASK,
233 req->nr_phys_segments,
234 IOPRIO_PRIO_CLASS(req->ioprio));
235 }
236
237 static void req_bio_endio(struct request *rq, struct bio *bio,
238 unsigned int nbytes, blk_status_t error)
239 {
240 if (error)
241 bio->bi_status = error;
242
243 if (unlikely(rq->rq_flags & RQF_QUIET))
244 bio_set_flag(bio, BIO_QUIET);
245
246 bio_advance(bio, nbytes);
247
248 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
249 /*
250 * Partial zone append completions cannot be supported as the
251 * BIO fragments may end up not being written sequentially.
252 */
253 if (bio->bi_iter.bi_size)
254 bio->bi_status = BLK_STS_IOERR;
255 else
256 bio->bi_iter.bi_sector = rq->__sector;
257 }
258
259 /* don't actually finish bio if it's part of flush sequence */
260 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
261 bio_endio(bio);
262 }
263
264 void blk_dump_rq_flags(struct request *rq, char *msg)
265 {
266 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
267 rq->rq_disk ? rq->rq_disk->disk_name : "?",
268 (unsigned long long) rq->cmd_flags);
269
270 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
271 (unsigned long long)blk_rq_pos(rq),
272 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
273 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
274 rq->bio, rq->biotail, blk_rq_bytes(rq));
275 }
276 EXPORT_SYMBOL(blk_dump_rq_flags);
277
278 /**
279 * blk_sync_queue - cancel any pending callbacks on a queue
280 * @q: the queue
281 *
282 * Description:
283 * The block layer may perform asynchronous callback activity
284 * on a queue, such as calling the unplug function after a timeout.
285 * A block device may call blk_sync_queue to ensure that any
286 * such activity is cancelled, thus allowing it to release resources
287 * that the callbacks might use. The caller must already have made sure
288 * that its ->submit_bio will not re-add plugging prior to calling
289 * this function.
290 *
291 * This function does not cancel any asynchronous activity arising
292 * out of elevator or throttling code. That would require elevator_exit()
293 * and blkcg_exit_queue() to be called with queue lock initialized.
294 *
295 */
296 void blk_sync_queue(struct request_queue *q)
297 {
298 del_timer_sync(&q->timeout);
299 cancel_work_sync(&q->timeout_work);
300 }
301 EXPORT_SYMBOL(blk_sync_queue);
302
303 /**
304 * blk_set_pm_only - increment pm_only counter
305 * @q: request queue pointer
306 */
307 void blk_set_pm_only(struct request_queue *q)
308 {
309 atomic_inc(&q->pm_only);
310 }
311 EXPORT_SYMBOL_GPL(blk_set_pm_only);
312
313 void blk_clear_pm_only(struct request_queue *q)
314 {
315 int pm_only;
316
317 pm_only = atomic_dec_return(&q->pm_only);
318 WARN_ON_ONCE(pm_only < 0);
319 if (pm_only == 0)
320 wake_up_all(&q->mq_freeze_wq);
321 }
322 EXPORT_SYMBOL_GPL(blk_clear_pm_only);
323
324 /**
325 * blk_put_queue - decrement the request_queue refcount
326 * @q: the request_queue structure to decrement the refcount for
327 *
328 * Decrements the refcount of the request_queue kobject. When this reaches 0
329 * we'll have blk_release_queue() called.
330 *
331 * Context: Any context, but the last reference must not be dropped from
332 * atomic context.
333 */
334 void blk_put_queue(struct request_queue *q)
335 {
336 kobject_put(&q->kobj);
337 }
338 EXPORT_SYMBOL(blk_put_queue);
339
340 void blk_queue_start_drain(struct request_queue *q)
341 {
342 /*
343 * When queue DYING flag is set, we need to block new req
344 * entering queue, so we call blk_freeze_queue_start() to
345 * prevent I/O from crossing blk_queue_enter().
346 */
347 blk_freeze_queue_start(q);
348 if (queue_is_mq(q))
349 blk_mq_wake_waiters(q);
350 /* Make blk_queue_enter() reexamine the DYING flag. */
351 wake_up_all(&q->mq_freeze_wq);
352 }
353
354 /**
355 * blk_cleanup_queue - shutdown a request queue
356 * @q: request queue to shutdown
357 *
358 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
359 * put it. All future requests will be failed immediately with -ENODEV.
360 *
361 * Context: can sleep
362 */
363 void blk_cleanup_queue(struct request_queue *q)
364 {
365 /* cannot be called from atomic context */
366 might_sleep();
367
368 WARN_ON_ONCE(blk_queue_registered(q));
369
370 /* mark @q DYING, no new request or merges will be allowed afterwards */
371 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
372 blk_queue_start_drain(q);
373
374 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
375 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
376
377 /*
378 * Drain all requests queued before DYING marking. Set DEAD flag to
379 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
380 * after draining finished.
381 */
382 blk_freeze_queue(q);
383
384 /* cleanup rq qos structures for queue without disk */
385 rq_qos_exit(q);
386
387 blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
388
389 blk_sync_queue(q);
390 if (queue_is_mq(q)) {
391 blk_mq_cancel_work_sync(q);
392 blk_mq_exit_queue(q);
393 }
394
395 /*
396 * In theory, request pool of sched_tags belongs to request queue.
397 * However, the current implementation requires tag_set for freeing
398 * requests, so free the pool now.
399 *
400 * Queue has become frozen, there can't be any in-queue requests, so
401 * it is safe to free requests now.
402 */
403 mutex_lock(&q->sysfs_lock);
404 if (q->elevator)
405 blk_mq_sched_free_requests(q);
406 mutex_unlock(&q->sysfs_lock);
407
408 percpu_ref_exit(&q->q_usage_counter);
409
410 /* @q is and will stay empty, shutdown and put */
411 blk_put_queue(q);
412 }
413 EXPORT_SYMBOL(blk_cleanup_queue);
414
415 static bool blk_try_enter_queue(struct request_queue *q, bool pm)
416 {
417 rcu_read_lock();
418 if (!percpu_ref_tryget_live(&q->q_usage_counter))
419 goto fail;
420
421 /*
422 * The code that increments the pm_only counter must ensure that the
423 * counter is globally visible before the queue is unfrozen.
424 */
425 if (blk_queue_pm_only(q) &&
426 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
427 goto fail_put;
428
429 rcu_read_unlock();
430 return true;
431
432 fail_put:
433 percpu_ref_put(&q->q_usage_counter);
434 fail:
435 rcu_read_unlock();
436 return false;
437 }
438
439 /**
440 * blk_queue_enter() - try to increase q->q_usage_counter
441 * @q: request queue pointer
442 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
443 */
444 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
445 {
446 const bool pm = flags & BLK_MQ_REQ_PM;
447
448 while (!blk_try_enter_queue(q, pm)) {
449 if (flags & BLK_MQ_REQ_NOWAIT)
450 return -EBUSY;
451
452 /*
453 * read pair of barrier in blk_freeze_queue_start(), we need to
454 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
455 * reading .mq_freeze_depth or queue dying flag, otherwise the
456 * following wait may never return if the two reads are
457 * reordered.
458 */
459 smp_rmb();
460 wait_event(q->mq_freeze_wq,
461 (!q->mq_freeze_depth &&
462 blk_pm_resume_queue(pm, q)) ||
463 blk_queue_dying(q));
464 if (blk_queue_dying(q))
465 return -ENODEV;
466 }
467
468 return 0;
469 }
470
471 static inline int bio_queue_enter(struct bio *bio)
472 {
473 struct gendisk *disk = bio->bi_bdev->bd_disk;
474 struct request_queue *q = disk->queue;
475
476 while (!blk_try_enter_queue(q, false)) {
477 if (bio->bi_opf & REQ_NOWAIT) {
478 if (test_bit(GD_DEAD, &disk->state))
479 goto dead;
480 bio_wouldblock_error(bio);
481 return -EBUSY;
482 }
483
484 /*
485 * read pair of barrier in blk_freeze_queue_start(), we need to
486 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
487 * reading .mq_freeze_depth or queue dying flag, otherwise the
488 * following wait may never return if the two reads are
489 * reordered.
490 */
491 smp_rmb();
492 wait_event(q->mq_freeze_wq,
493 (!q->mq_freeze_depth &&
494 blk_pm_resume_queue(false, q)) ||
495 test_bit(GD_DEAD, &disk->state));
496 if (test_bit(GD_DEAD, &disk->state))
497 goto dead;
498 }
499
500 return 0;
501 dead:
502 bio_io_error(bio);
503 return -ENODEV;
504 }
505
506 void blk_queue_exit(struct request_queue *q)
507 {
508 percpu_ref_put(&q->q_usage_counter);
509 }
510
511 static void blk_queue_usage_counter_release(struct percpu_ref *ref)
512 {
513 struct request_queue *q =
514 container_of(ref, struct request_queue, q_usage_counter);
515
516 wake_up_all(&q->mq_freeze_wq);
517 }
518
519 static void blk_rq_timed_out_timer(struct timer_list *t)
520 {
521 struct request_queue *q = from_timer(q, t, timeout);
522
523 kblockd_schedule_work(&q->timeout_work);
524 }
525
526 static void blk_timeout_work(struct work_struct *work)
527 {
528 }
529
530 struct request_queue *blk_alloc_queue(int node_id)
531 {
532 struct request_queue *q;
533 int ret;
534
535 q = kmem_cache_alloc_node(blk_requestq_cachep,
536 GFP_KERNEL | __GFP_ZERO, node_id);
537 if (!q)
538 return NULL;
539
540 q->last_merge = NULL;
541
542 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
543 if (q->id < 0)
544 goto fail_q;
545
546 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
547 if (ret)
548 goto fail_id;
549
550 q->stats = blk_alloc_queue_stats();
551 if (!q->stats)
552 goto fail_split;
553
554 q->node = node_id;
555
556 atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
557
558 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
559 INIT_WORK(&q->timeout_work, blk_timeout_work);
560 INIT_LIST_HEAD(&q->icq_list);
561 #ifdef CONFIG_BLK_CGROUP
562 INIT_LIST_HEAD(&q->blkg_list);
563 #endif
564
565 kobject_init(&q->kobj, &blk_queue_ktype);
566
567 mutex_init(&q->debugfs_mutex);
568 mutex_init(&q->sysfs_lock);
569 mutex_init(&q->sysfs_dir_lock);
570 spin_lock_init(&q->queue_lock);
571
572 init_waitqueue_head(&q->mq_freeze_wq);
573 mutex_init(&q->mq_freeze_lock);
574
575 /*
576 * Init percpu_ref in atomic mode so that it's faster to shutdown.
577 * See blk_register_queue() for details.
578 */
579 if (percpu_ref_init(&q->q_usage_counter,
580 blk_queue_usage_counter_release,
581 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
582 goto fail_stats;
583
584 if (blkcg_init_queue(q))
585 goto fail_ref;
586
587 blk_queue_dma_alignment(q, 511);
588 blk_set_default_limits(&q->limits);
589 q->nr_requests = BLKDEV_MAX_RQ;
590
591 return q;
592
593 fail_ref:
594 percpu_ref_exit(&q->q_usage_counter);
595 fail_stats:
596 blk_free_queue_stats(q->stats);
597 fail_split:
598 bioset_exit(&q->bio_split);
599 fail_id:
600 ida_simple_remove(&blk_queue_ida, q->id);
601 fail_q:
602 kmem_cache_free(blk_requestq_cachep, q);
603 return NULL;
604 }
605
606 /**
607 * blk_get_queue - increment the request_queue refcount
608 * @q: the request_queue structure to increment the refcount for
609 *
610 * Increment the refcount of the request_queue kobject.
611 *
612 * Context: Any context.
613 */
614 bool blk_get_queue(struct request_queue *q)
615 {
616 if (likely(!blk_queue_dying(q))) {
617 __blk_get_queue(q);
618 return true;
619 }
620
621 return false;
622 }
623 EXPORT_SYMBOL(blk_get_queue);
624
625 /**
626 * blk_get_request - allocate a request
627 * @q: request queue to allocate a request for
628 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
629 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
630 */
631 struct request *blk_get_request(struct request_queue *q, unsigned int op,
632 blk_mq_req_flags_t flags)
633 {
634 struct request *req;
635
636 WARN_ON_ONCE(op & REQ_NOWAIT);
637 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
638
639 req = blk_mq_alloc_request(q, op, flags);
640 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
641 q->mq_ops->initialize_rq_fn(req);
642
643 return req;
644 }
645 EXPORT_SYMBOL(blk_get_request);
646
647 void blk_put_request(struct request *req)
648 {
649 blk_mq_free_request(req);
650 }
651 EXPORT_SYMBOL(blk_put_request);
652
653 static void handle_bad_sector(struct bio *bio, sector_t maxsector)
654 {
655 char b[BDEVNAME_SIZE];
656
657 pr_info_ratelimited("attempt to access beyond end of device\n"
658 "%s: rw=%d, want=%llu, limit=%llu\n",
659 bio_devname(bio, b), bio->bi_opf,
660 bio_end_sector(bio), maxsector);
661 }
662
663 #ifdef CONFIG_FAIL_MAKE_REQUEST
664
665 static DECLARE_FAULT_ATTR(fail_make_request);
666
667 static int __init setup_fail_make_request(char *str)
668 {
669 return setup_fault_attr(&fail_make_request, str);
670 }
671 __setup("fail_make_request=", setup_fail_make_request);
672
673 static bool should_fail_request(struct block_device *part, unsigned int bytes)
674 {
675 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
676 }
677
678 static int __init fail_make_request_debugfs(void)
679 {
680 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
681 NULL, &fail_make_request);
682
683 return PTR_ERR_OR_ZERO(dir);
684 }
685
686 late_initcall(fail_make_request_debugfs);
687
688 #else /* CONFIG_FAIL_MAKE_REQUEST */
689
690 static inline bool should_fail_request(struct block_device *part,
691 unsigned int bytes)
692 {
693 return false;
694 }
695
696 #endif /* CONFIG_FAIL_MAKE_REQUEST */
697
698 static inline bool bio_check_ro(struct bio *bio)
699 {
700 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
701 char b[BDEVNAME_SIZE];
702
703 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
704 return false;
705
706 WARN_ONCE(1,
707 "Trying to write to read-only block-device %s (partno %d)\n",
708 bio_devname(bio, b), bio->bi_bdev->bd_partno);
709 /* Older lvm-tools actually trigger this */
710 return false;
711 }
712
713 return false;
714 }
715
716 static noinline int should_fail_bio(struct bio *bio)
717 {
718 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
719 return -EIO;
720 return 0;
721 }
722 ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
723
724 /*
725 * Check whether this bio extends beyond the end of the device or partition.
726 * This may well happen - the kernel calls bread() without checking the size of
727 * the device, e.g., when mounting a file system.
728 */
729 static inline int bio_check_eod(struct bio *bio)
730 {
731 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
732 unsigned int nr_sectors = bio_sectors(bio);
733
734 if (nr_sectors && maxsector &&
735 (nr_sectors > maxsector ||
736 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
737 handle_bad_sector(bio, maxsector);
738 return -EIO;
739 }
740 return 0;
741 }
742
743 /*
744 * Remap block n of partition p to block n+start(p) of the disk.
745 */
746 static int blk_partition_remap(struct bio *bio)
747 {
748 struct block_device *p = bio->bi_bdev;
749
750 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
751 return -EIO;
752 if (bio_sectors(bio)) {
753 bio->bi_iter.bi_sector += p->bd_start_sect;
754 trace_block_bio_remap(bio, p->bd_dev,
755 bio->bi_iter.bi_sector -
756 p->bd_start_sect);
757 }
758 bio_set_flag(bio, BIO_REMAPPED);
759 return 0;
760 }
761
762 /*
763 * Check write append to a zoned block device.
764 */
765 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
766 struct bio *bio)
767 {
768 sector_t pos = bio->bi_iter.bi_sector;
769 int nr_sectors = bio_sectors(bio);
770
771 /* Only applicable to zoned block devices */
772 if (!blk_queue_is_zoned(q))
773 return BLK_STS_NOTSUPP;
774
775 /* The bio sector must point to the start of a sequential zone */
776 if (pos & (blk_queue_zone_sectors(q) - 1) ||
777 !blk_queue_zone_is_seq(q, pos))
778 return BLK_STS_IOERR;
779
780 /*
781 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
782 * split and could result in non-contiguous sectors being written in
783 * different zones.
784 */
785 if (nr_sectors > q->limits.chunk_sectors)
786 return BLK_STS_IOERR;
787
788 /* Make sure the BIO is small enough and will not get split */
789 if (nr_sectors > q->limits.max_zone_append_sectors)
790 return BLK_STS_IOERR;
791
792 bio->bi_opf |= REQ_NOMERGE;
793
794 return BLK_STS_OK;
795 }
796
797 static noinline_for_stack bool submit_bio_checks(struct bio *bio)
798 {
799 struct block_device *bdev = bio->bi_bdev;
800 struct request_queue *q = bdev->bd_disk->queue;
801 blk_status_t status = BLK_STS_IOERR;
802 struct blk_plug *plug;
803
804 might_sleep();
805
806 plug = blk_mq_plug(q, bio);
807 if (plug && plug->nowait)
808 bio->bi_opf |= REQ_NOWAIT;
809
810 /*
811 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
812 * if queue does not support NOWAIT.
813 */
814 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
815 goto not_supported;
816
817 if (should_fail_bio(bio))
818 goto end_io;
819 if (unlikely(bio_check_ro(bio)))
820 goto end_io;
821 if (!bio_flagged(bio, BIO_REMAPPED)) {
822 if (unlikely(bio_check_eod(bio)))
823 goto end_io;
824 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
825 goto end_io;
826 }
827
828 /*
829 * Filter flush bio's early so that bio based drivers without flush
830 * support don't have to worry about them.
831 */
832 if (op_is_flush(bio->bi_opf) &&
833 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
834 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
835 if (!bio_sectors(bio)) {
836 status = BLK_STS_OK;
837 goto end_io;
838 }
839 }
840
841 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
842 bio_clear_hipri(bio);
843
844 switch (bio_op(bio)) {
845 case REQ_OP_DISCARD:
846 if (!blk_queue_discard(q))
847 goto not_supported;
848 break;
849 case REQ_OP_SECURE_ERASE:
850 if (!blk_queue_secure_erase(q))
851 goto not_supported;
852 break;
853 case REQ_OP_WRITE_SAME:
854 if (!q->limits.max_write_same_sectors)
855 goto not_supported;
856 break;
857 case REQ_OP_ZONE_APPEND:
858 status = blk_check_zone_append(q, bio);
859 if (status != BLK_STS_OK)
860 goto end_io;
861 break;
862 case REQ_OP_ZONE_RESET:
863 case REQ_OP_ZONE_OPEN:
864 case REQ_OP_ZONE_CLOSE:
865 case REQ_OP_ZONE_FINISH:
866 if (!blk_queue_is_zoned(q))
867 goto not_supported;
868 break;
869 case REQ_OP_ZONE_RESET_ALL:
870 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
871 goto not_supported;
872 break;
873 case REQ_OP_WRITE_ZEROES:
874 if (!q->limits.max_write_zeroes_sectors)
875 goto not_supported;
876 break;
877 default:
878 break;
879 }
880
881 /*
882 * Various block parts want %current->io_context, so allocate it up
883 * front rather than dealing with lots of pain to allocate it only
884 * where needed. This may fail and the block layer knows how to live
885 * with it.
886 */
887 if (unlikely(!current->io_context))
888 create_task_io_context(current, GFP_ATOMIC, q->node);
889
890 if (blk_throtl_bio(bio))
891 return false;
892
893 blk_cgroup_bio_start(bio);
894 blkcg_bio_issue_init(bio);
895
896 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
897 trace_block_bio_queue(bio);
898 /* Now that enqueuing has been traced, we need to trace
899 * completion as well.
900 */
901 bio_set_flag(bio, BIO_TRACE_COMPLETION);
902 }
903 return true;
904
905 not_supported:
906 status = BLK_STS_NOTSUPP;
907 end_io:
908 bio->bi_status = status;
909 bio_endio(bio);
910 return false;
911 }
912
913 static blk_qc_t __submit_bio(struct bio *bio)
914 {
915 struct gendisk *disk = bio->bi_bdev->bd_disk;
916 blk_qc_t ret = BLK_QC_T_NONE;
917
918 if (unlikely(bio_queue_enter(bio) != 0))
919 return BLK_QC_T_NONE;
920
921 if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
922 goto queue_exit;
923 if (disk->fops->submit_bio) {
924 ret = disk->fops->submit_bio(bio);
925 goto queue_exit;
926 }
927 return blk_mq_submit_bio(bio);
928
929 queue_exit:
930 blk_queue_exit(disk->queue);
931 return ret;
932 }
933
934 /*
935 * The loop in this function may be a bit non-obvious, and so deserves some
936 * explanation:
937 *
938 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
939 * that), so we have a list with a single bio.
940 * - We pretend that we have just taken it off a longer list, so we assign
941 * bio_list to a pointer to the bio_list_on_stack, thus initialising the
942 * bio_list of new bios to be added. ->submit_bio() may indeed add some more
943 * bios through a recursive call to submit_bio_noacct. If it did, we find a
944 * non-NULL value in bio_list and re-enter the loop from the top.
945 * - In this case we really did just take the bio of the top of the list (no
946 * pretending) and so remove it from bio_list, and call into ->submit_bio()
947 * again.
948 *
949 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
950 * bio_list_on_stack[1] contains bios that were submitted before the current
951 * ->submit_bio_bio, but that haven't been processed yet.
952 */
953 static blk_qc_t __submit_bio_noacct(struct bio *bio)
954 {
955 struct bio_list bio_list_on_stack[2];
956 blk_qc_t ret = BLK_QC_T_NONE;
957
958 BUG_ON(bio->bi_next);
959
960 bio_list_init(&bio_list_on_stack[0]);
961 current->bio_list = bio_list_on_stack;
962
963 do {
964 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
965 struct bio_list lower, same;
966
967 /*
968 * Create a fresh bio_list for all subordinate requests.
969 */
970 bio_list_on_stack[1] = bio_list_on_stack[0];
971 bio_list_init(&bio_list_on_stack[0]);
972
973 ret = __submit_bio(bio);
974
975 /*
976 * Sort new bios into those for a lower level and those for the
977 * same level.
978 */
979 bio_list_init(&lower);
980 bio_list_init(&same);
981 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
982 if (q == bio->bi_bdev->bd_disk->queue)
983 bio_list_add(&same, bio);
984 else
985 bio_list_add(&lower, bio);
986
987 /*
988 * Now assemble so we handle the lowest level first.
989 */
990 bio_list_merge(&bio_list_on_stack[0], &lower);
991 bio_list_merge(&bio_list_on_stack[0], &same);
992 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
993 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
994
995 current->bio_list = NULL;
996 return ret;
997 }
998
999 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
1000 {
1001 struct bio_list bio_list[2] = { };
1002 blk_qc_t ret;
1003
1004 current->bio_list = bio_list;
1005
1006 do {
1007 ret = __submit_bio(bio);
1008 } while ((bio = bio_list_pop(&bio_list[0])));
1009
1010 current->bio_list = NULL;
1011 return ret;
1012 }
1013
1014 /**
1015 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1016 * @bio: The bio describing the location in memory and on the device.
1017 *
1018 * This is a version of submit_bio() that shall only be used for I/O that is
1019 * resubmitted to lower level drivers by stacking block drivers. All file
1020 * systems and other upper level users of the block layer should use
1021 * submit_bio() instead.
1022 */
1023 blk_qc_t submit_bio_noacct(struct bio *bio)
1024 {
1025 /*
1026 * We only want one ->submit_bio to be active at a time, else stack
1027 * usage with stacked devices could be a problem. Use current->bio_list
1028 * to collect a list of requests submited by a ->submit_bio method while
1029 * it is active, and then process them after it returned.
1030 */
1031 if (current->bio_list) {
1032 bio_list_add(&current->bio_list[0], bio);
1033 return BLK_QC_T_NONE;
1034 }
1035
1036 if (!bio->bi_bdev->bd_disk->fops->submit_bio)
1037 return __submit_bio_noacct_mq(bio);
1038 return __submit_bio_noacct(bio);
1039 }
1040 EXPORT_SYMBOL(submit_bio_noacct);
1041
1042 /**
1043 * submit_bio - submit a bio to the block device layer for I/O
1044 * @bio: The &struct bio which describes the I/O
1045 *
1046 * submit_bio() is used to submit I/O requests to block devices. It is passed a
1047 * fully set up &struct bio that describes the I/O that needs to be done. The
1048 * bio will be send to the device described by the bi_bdev field.
1049 *
1050 * The success/failure status of the request, along with notification of
1051 * completion, is delivered asynchronously through the ->bi_end_io() callback
1052 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
1053 * been called.
1054 */
1055 blk_qc_t submit_bio(struct bio *bio)
1056 {
1057 if (blkcg_punt_bio_submit(bio))
1058 return BLK_QC_T_NONE;
1059
1060 /*
1061 * If it's a regular read/write or a barrier with data attached,
1062 * go through the normal accounting stuff before submission.
1063 */
1064 if (bio_has_data(bio)) {
1065 unsigned int count;
1066
1067 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1068 count = queue_logical_block_size(
1069 bio->bi_bdev->bd_disk->queue) >> 9;
1070 else
1071 count = bio_sectors(bio);
1072
1073 if (op_is_write(bio_op(bio))) {
1074 count_vm_events(PGPGOUT, count);
1075 } else {
1076 task_io_account_read(bio->bi_iter.bi_size);
1077 count_vm_events(PGPGIN, count);
1078 }
1079 }
1080
1081 /*
1082 * If we're reading data that is part of the userspace workingset, count
1083 * submission time as memory stall. When the device is congested, or
1084 * the submitting cgroup IO-throttled, submission can be a significant
1085 * part of overall IO time.
1086 */
1087 if (unlikely(bio_op(bio) == REQ_OP_READ &&
1088 bio_flagged(bio, BIO_WORKINGSET))) {
1089 unsigned long pflags;
1090 blk_qc_t ret;
1091
1092 psi_memstall_enter(&pflags);
1093 ret = submit_bio_noacct(bio);
1094 psi_memstall_leave(&pflags);
1095
1096 return ret;
1097 }
1098
1099 return submit_bio_noacct(bio);
1100 }
1101 EXPORT_SYMBOL(submit_bio);
1102
1103 /**
1104 * blk_cloned_rq_check_limits - Helper function to check a cloned request
1105 * for the new queue limits
1106 * @q: the queue
1107 * @rq: the request being checked
1108 *
1109 * Description:
1110 * @rq may have been made based on weaker limitations of upper-level queues
1111 * in request stacking drivers, and it may violate the limitation of @q.
1112 * Since the block layer and the underlying device driver trust @rq
1113 * after it is inserted to @q, it should be checked against @q before
1114 * the insertion using this generic function.
1115 *
1116 * Request stacking drivers like request-based dm may change the queue
1117 * limits when retrying requests on other queues. Those requests need
1118 * to be checked against the new queue limits again during dispatch.
1119 */
1120 static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1121 struct request *rq)
1122 {
1123 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
1124
1125 if (blk_rq_sectors(rq) > max_sectors) {
1126 /*
1127 * SCSI device does not have a good way to return if
1128 * Write Same/Zero is actually supported. If a device rejects
1129 * a non-read/write command (discard, write same,etc.) the
1130 * low-level device driver will set the relevant queue limit to
1131 * 0 to prevent blk-lib from issuing more of the offending
1132 * operations. Commands queued prior to the queue limit being
1133 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
1134 * errors being propagated to upper layers.
1135 */
1136 if (max_sectors == 0)
1137 return BLK_STS_NOTSUPP;
1138
1139 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1140 __func__, blk_rq_sectors(rq), max_sectors);
1141 return BLK_STS_IOERR;
1142 }
1143
1144 /*
1145 * The queue settings related to segment counting may differ from the
1146 * original queue.
1147 */
1148 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1149 if (rq->nr_phys_segments > queue_max_segments(q)) {
1150 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1151 __func__, rq->nr_phys_segments, queue_max_segments(q));
1152 return BLK_STS_IOERR;
1153 }
1154
1155 return BLK_STS_OK;
1156 }
1157
1158 /**
1159 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1160 * @q: the queue to submit the request
1161 * @rq: the request being queued
1162 */
1163 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1164 {
1165 blk_status_t ret;
1166
1167 ret = blk_cloned_rq_check_limits(q, rq);
1168 if (ret != BLK_STS_OK)
1169 return ret;
1170
1171 if (rq->rq_disk &&
1172 should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
1173 return BLK_STS_IOERR;
1174
1175 if (blk_crypto_insert_cloned_request(rq))
1176 return BLK_STS_IOERR;
1177
1178 if (blk_queue_io_stat(q))
1179 blk_account_io_start(rq);
1180
1181 /*
1182 * Since we have a scheduler attached on the top device,
1183 * bypass a potential scheduler on the bottom device for
1184 * insert.
1185 */
1186 return blk_mq_request_issue_directly(rq, true);
1187 }
1188 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1189
1190 /**
1191 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1192 * @rq: request to examine
1193 *
1194 * Description:
1195 * A request could be merge of IOs which require different failure
1196 * handling. This function determines the number of bytes which
1197 * can be failed from the beginning of the request without
1198 * crossing into area which need to be retried further.
1199 *
1200 * Return:
1201 * The number of bytes to fail.
1202 */
1203 unsigned int blk_rq_err_bytes(const struct request *rq)
1204 {
1205 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1206 unsigned int bytes = 0;
1207 struct bio *bio;
1208
1209 if (!(rq->rq_flags & RQF_MIXED_MERGE))
1210 return blk_rq_bytes(rq);
1211
1212 /*
1213 * Currently the only 'mixing' which can happen is between
1214 * different fastfail types. We can safely fail portions
1215 * which have all the failfast bits that the first one has -
1216 * the ones which are at least as eager to fail as the first
1217 * one.
1218 */
1219 for (bio = rq->bio; bio; bio = bio->bi_next) {
1220 if ((bio->bi_opf & ff) != ff)
1221 break;
1222 bytes += bio->bi_iter.bi_size;
1223 }
1224
1225 /* this could lead to infinite loop */
1226 BUG_ON(blk_rq_bytes(rq) && !bytes);
1227 return bytes;
1228 }
1229 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1230
1231 static void update_io_ticks(struct block_device *part, unsigned long now,
1232 bool end)
1233 {
1234 unsigned long stamp;
1235 again:
1236 stamp = READ_ONCE(part->bd_stamp);
1237 if (unlikely(time_after(now, stamp))) {
1238 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
1239 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
1240 }
1241 if (part->bd_partno) {
1242 part = bdev_whole(part);
1243 goto again;
1244 }
1245 }
1246
1247 static void blk_account_io_completion(struct request *req, unsigned int bytes)
1248 {
1249 if (req->part && blk_do_io_stat(req)) {
1250 const int sgrp = op_stat_group(req_op(req));
1251
1252 part_stat_lock();
1253 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
1254 part_stat_unlock();
1255 }
1256 }
1257
1258 void blk_account_io_done(struct request *req, u64 now)
1259 {
1260 /*
1261 * Account IO completion. flush_rq isn't accounted as a
1262 * normal IO on queueing nor completion. Accounting the
1263 * containing request is enough.
1264 */
1265 if (req->part && blk_do_io_stat(req) &&
1266 !(req->rq_flags & RQF_FLUSH_SEQ)) {
1267 const int sgrp = op_stat_group(req_op(req));
1268
1269 part_stat_lock();
1270 update_io_ticks(req->part, jiffies, true);
1271 part_stat_inc(req->part, ios[sgrp]);
1272 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
1273 part_stat_unlock();
1274 }
1275 }
1276
1277 void blk_account_io_start(struct request *rq)
1278 {
1279 if (!blk_do_io_stat(rq))
1280 return;
1281
1282 /* passthrough requests can hold bios that do not have ->bi_bdev set */
1283 if (rq->bio && rq->bio->bi_bdev)
1284 rq->part = rq->bio->bi_bdev;
1285 else
1286 rq->part = rq->rq_disk->part0;
1287
1288 part_stat_lock();
1289 update_io_ticks(rq->part, jiffies, false);
1290 part_stat_unlock();
1291 }
1292
1293 static unsigned long __part_start_io_acct(struct block_device *part,
1294 unsigned int sectors, unsigned int op,
1295 unsigned long start_time)
1296 {
1297 const int sgrp = op_stat_group(op);
1298
1299 part_stat_lock();
1300 update_io_ticks(part, start_time, false);
1301 part_stat_inc(part, ios[sgrp]);
1302 part_stat_add(part, sectors[sgrp], sectors);
1303 part_stat_local_inc(part, in_flight[op_is_write(op)]);
1304 part_stat_unlock();
1305
1306 return start_time;
1307 }
1308
1309 /**
1310 * bio_start_io_acct_time - start I/O accounting for bio based drivers
1311 * @bio: bio to start account for
1312 * @start_time: start time that should be passed back to bio_end_io_acct().
1313 */
1314 void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
1315 {
1316 __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
1317 bio_op(bio), start_time);
1318 }
1319 EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
1320
1321 /**
1322 * bio_start_io_acct - start I/O accounting for bio based drivers
1323 * @bio: bio to start account for
1324 *
1325 * Returns the start time that should be passed back to bio_end_io_acct().
1326 */
1327 unsigned long bio_start_io_acct(struct bio *bio)
1328 {
1329 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
1330 bio_op(bio), jiffies);
1331 }
1332 EXPORT_SYMBOL_GPL(bio_start_io_acct);
1333
1334 unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1335 unsigned int op)
1336 {
1337 return __part_start_io_acct(disk->part0, sectors, op, jiffies);
1338 }
1339 EXPORT_SYMBOL(disk_start_io_acct);
1340
1341 static void __part_end_io_acct(struct block_device *part, unsigned int op,
1342 unsigned long start_time)
1343 {
1344 const int sgrp = op_stat_group(op);
1345 unsigned long now = READ_ONCE(jiffies);
1346 unsigned long duration = now - start_time;
1347
1348 part_stat_lock();
1349 update_io_ticks(part, now, true);
1350 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1351 part_stat_local_dec(part, in_flight[op_is_write(op)]);
1352 part_stat_unlock();
1353 }
1354
1355 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1356 struct block_device *orig_bdev)
1357 {
1358 __part_end_io_acct(orig_bdev, bio_op(bio), start_time);
1359 }
1360 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1361
1362 void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1363 unsigned long start_time)
1364 {
1365 __part_end_io_acct(disk->part0, op, start_time);
1366 }
1367 EXPORT_SYMBOL(disk_end_io_acct);
1368
1369 /*
1370 * Steal bios from a request and add them to a bio list.
1371 * The request must not have been partially completed before.
1372 */
1373 void blk_steal_bios(struct bio_list *list, struct request *rq)
1374 {
1375 if (rq->bio) {
1376 if (list->tail)
1377 list->tail->bi_next = rq->bio;
1378 else
1379 list->head = rq->bio;
1380 list->tail = rq->biotail;
1381
1382 rq->bio = NULL;
1383 rq->biotail = NULL;
1384 }
1385
1386 rq->__data_len = 0;
1387 }
1388 EXPORT_SYMBOL_GPL(blk_steal_bios);
1389
1390 /**
1391 * blk_update_request - Complete multiple bytes without completing the request
1392 * @req: the request being processed
1393 * @error: block status code
1394 * @nr_bytes: number of bytes to complete for @req
1395 *
1396 * Description:
1397 * Ends I/O on a number of bytes attached to @req, but doesn't complete
1398 * the request structure even if @req doesn't have leftover.
1399 * If @req has leftover, sets it up for the next range of segments.
1400 *
1401 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
1402 * %false return from this function.
1403 *
1404 * Note:
1405 * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
1406 * except in the consistency check at the end of this function.
1407 *
1408 * Return:
1409 * %false - this request doesn't have any more data
1410 * %true - this request has more data
1411 **/
1412 bool blk_update_request(struct request *req, blk_status_t error,
1413 unsigned int nr_bytes)
1414 {
1415 int total_bytes;
1416
1417 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1418
1419 if (!req->bio)
1420 return false;
1421
1422 #ifdef CONFIG_BLK_DEV_INTEGRITY
1423 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1424 error == BLK_STS_OK)
1425 req->q->integrity.profile->complete_fn(req, nr_bytes);
1426 #endif
1427
1428 if (unlikely(error && !blk_rq_is_passthrough(req) &&
1429 !(req->rq_flags & RQF_QUIET)))
1430 print_req_error(req, error, __func__);
1431
1432 blk_account_io_completion(req, nr_bytes);
1433
1434 total_bytes = 0;
1435 while (req->bio) {
1436 struct bio *bio = req->bio;
1437 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1438
1439 if (bio_bytes == bio->bi_iter.bi_size)
1440 req->bio = bio->bi_next;
1441
1442 /* Completion has already been traced */
1443 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1444 req_bio_endio(req, bio, bio_bytes, error);
1445
1446 total_bytes += bio_bytes;
1447 nr_bytes -= bio_bytes;
1448
1449 if (!nr_bytes)
1450 break;
1451 }
1452
1453 /*
1454 * completely done
1455 */
1456 if (!req->bio) {
1457 /*
1458 * Reset counters so that the request stacking driver
1459 * can find how many bytes remain in the request
1460 * later.
1461 */
1462 req->__data_len = 0;
1463 return false;
1464 }
1465
1466 req->__data_len -= total_bytes;
1467
1468 /* update sector only for requests with clear definition of sector */
1469 if (!blk_rq_is_passthrough(req))
1470 req->__sector += total_bytes >> 9;
1471
1472 /* mixed attributes always follow the first bio */
1473 if (req->rq_flags & RQF_MIXED_MERGE) {
1474 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1475 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1476 }
1477
1478 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1479 /*
1480 * If total number of sectors is less than the first segment
1481 * size, something has gone terribly wrong.
1482 */
1483 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1484 blk_dump_rq_flags(req, "request botched");
1485 req->__data_len = blk_rq_cur_bytes(req);
1486 }
1487
1488 /* recalculate the number of segments */
1489 req->nr_phys_segments = blk_recalc_rq_segments(req);
1490 }
1491
1492 return true;
1493 }
1494 EXPORT_SYMBOL_GPL(blk_update_request);
1495
1496 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1497 /**
1498 * rq_flush_dcache_pages - Helper function to flush all pages in a request
1499 * @rq: the request to be flushed
1500 *
1501 * Description:
1502 * Flush all pages in @rq.
1503 */
1504 void rq_flush_dcache_pages(struct request *rq)
1505 {
1506 struct req_iterator iter;
1507 struct bio_vec bvec;
1508
1509 rq_for_each_segment(bvec, rq, iter)
1510 flush_dcache_page(bvec.bv_page);
1511 }
1512 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1513 #endif
1514
1515 /**
1516 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
1517 * @q : the queue of the device being checked
1518 *
1519 * Description:
1520 * Check if underlying low-level drivers of a device are busy.
1521 * If the drivers want to export their busy state, they must set own
1522 * exporting function using blk_queue_lld_busy() first.
1523 *
1524 * Basically, this function is used only by request stacking drivers
1525 * to stop dispatching requests to underlying devices when underlying
1526 * devices are busy. This behavior helps more I/O merging on the queue
1527 * of the request stacking driver and prevents I/O throughput regression
1528 * on burst I/O load.
1529 *
1530 * Return:
1531 * 0 - Not busy (The request stacking driver should dispatch request)
1532 * 1 - Busy (The request stacking driver should stop dispatching request)
1533 */
1534 int blk_lld_busy(struct request_queue *q)
1535 {
1536 if (queue_is_mq(q) && q->mq_ops->busy)
1537 return q->mq_ops->busy(q);
1538
1539 return 0;
1540 }
1541 EXPORT_SYMBOL_GPL(blk_lld_busy);
1542
1543 /**
1544 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
1545 * @rq: the clone request to be cleaned up
1546 *
1547 * Description:
1548 * Free all bios in @rq for a cloned request.
1549 */
1550 void blk_rq_unprep_clone(struct request *rq)
1551 {
1552 struct bio *bio;
1553
1554 while ((bio = rq->bio) != NULL) {
1555 rq->bio = bio->bi_next;
1556
1557 bio_put(bio);
1558 }
1559 }
1560 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1561
1562 /**
1563 * blk_rq_prep_clone - Helper function to setup clone request
1564 * @rq: the request to be setup
1565 * @rq_src: original request to be cloned
1566 * @bs: bio_set that bios for clone are allocated from
1567 * @gfp_mask: memory allocation mask for bio
1568 * @bio_ctr: setup function to be called for each clone bio.
1569 * Returns %0 for success, non %0 for failure.
1570 * @data: private data to be passed to @bio_ctr
1571 *
1572 * Description:
1573 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
1574 * Also, pages which the original bios are pointing to are not copied
1575 * and the cloned bios just point same pages.
1576 * So cloned bios must be completed before original bios, which means
1577 * the caller must complete @rq before @rq_src.
1578 */
1579 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1580 struct bio_set *bs, gfp_t gfp_mask,
1581 int (*bio_ctr)(struct bio *, struct bio *, void *),
1582 void *data)
1583 {
1584 struct bio *bio, *bio_src;
1585
1586 if (!bs)
1587 bs = &fs_bio_set;
1588
1589 __rq_for_each_bio(bio_src, rq_src) {
1590 bio = bio_clone_fast(bio_src, gfp_mask, bs);
1591 if (!bio)
1592 goto free_and_out;
1593
1594 if (bio_ctr && bio_ctr(bio, bio_src, data))
1595 goto free_and_out;
1596
1597 if (rq->bio) {
1598 rq->biotail->bi_next = bio;
1599 rq->biotail = bio;
1600 } else {
1601 rq->bio = rq->biotail = bio;
1602 }
1603 bio = NULL;
1604 }
1605
1606 /* Copy attributes of the original request to the clone request. */
1607 rq->__sector = blk_rq_pos(rq_src);
1608 rq->__data_len = blk_rq_bytes(rq_src);
1609 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1610 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1611 rq->special_vec = rq_src->special_vec;
1612 }
1613 rq->nr_phys_segments = rq_src->nr_phys_segments;
1614 rq->ioprio = rq_src->ioprio;
1615
1616 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1617 goto free_and_out;
1618
1619 return 0;
1620
1621 free_and_out:
1622 if (bio)
1623 bio_put(bio);
1624 blk_rq_unprep_clone(rq);
1625
1626 return -ENOMEM;
1627 }
1628 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1629
1630 int kblockd_schedule_work(struct work_struct *work)
1631 {
1632 return queue_work(kblockd_workqueue, work);
1633 }
1634 EXPORT_SYMBOL(kblockd_schedule_work);
1635
1636 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1637 unsigned long delay)
1638 {
1639 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1640 }
1641 EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1642
1643 /**
1644 * blk_start_plug - initialize blk_plug and track it inside the task_struct
1645 * @plug: The &struct blk_plug that needs to be initialized
1646 *
1647 * Description:
1648 * blk_start_plug() indicates to the block layer an intent by the caller
1649 * to submit multiple I/O requests in a batch. The block layer may use
1650 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1651 * is called. However, the block layer may choose to submit requests
1652 * before a call to blk_finish_plug() if the number of queued I/Os
1653 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1654 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1655 * the task schedules (see below).
1656 *
1657 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1658 * pending I/O should the task end up blocking between blk_start_plug() and
1659 * blk_finish_plug(). This is important from a performance perspective, but
1660 * also ensures that we don't deadlock. For instance, if the task is blocking
1661 * for a memory allocation, memory reclaim could end up wanting to free a
1662 * page belonging to that request that is currently residing in our private
1663 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1664 * this kind of deadlock.
1665 */
1666 void blk_start_plug(struct blk_plug *plug)
1667 {
1668 struct task_struct *tsk = current;
1669
1670 /*
1671 * If this is a nested plug, don't actually assign it.
1672 */
1673 if (tsk->plug)
1674 return;
1675
1676 INIT_LIST_HEAD(&plug->mq_list);
1677 INIT_LIST_HEAD(&plug->cb_list);
1678 plug->rq_count = 0;
1679 plug->multiple_queues = false;
1680 plug->nowait = false;
1681
1682 /*
1683 * Store ordering should not be needed here, since a potential
1684 * preempt will imply a full memory barrier
1685 */
1686 tsk->plug = plug;
1687 }
1688 EXPORT_SYMBOL(blk_start_plug);
1689
1690 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1691 {
1692 LIST_HEAD(callbacks);
1693
1694 while (!list_empty(&plug->cb_list)) {
1695 list_splice_init(&plug->cb_list, &callbacks);
1696
1697 while (!list_empty(&callbacks)) {
1698 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1699 struct blk_plug_cb,
1700 list);
1701 list_del(&cb->list);
1702 cb->callback(cb, from_schedule);
1703 }
1704 }
1705 }
1706
1707 struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1708 int size)
1709 {
1710 struct blk_plug *plug = current->plug;
1711 struct blk_plug_cb *cb;
1712
1713 if (!plug)
1714 return NULL;
1715
1716 list_for_each_entry(cb, &plug->cb_list, list)
1717 if (cb->callback == unplug && cb->data == data)
1718 return cb;
1719
1720 /* Not currently on the callback list */
1721 BUG_ON(size < sizeof(*cb));
1722 cb = kzalloc(size, GFP_ATOMIC);
1723 if (cb) {
1724 cb->data = data;
1725 cb->callback = unplug;
1726 list_add(&cb->list, &plug->cb_list);
1727 }
1728 return cb;
1729 }
1730 EXPORT_SYMBOL(blk_check_plugged);
1731
1732 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1733 {
1734 flush_plug_callbacks(plug, from_schedule);
1735
1736 if (!list_empty(&plug->mq_list))
1737 blk_mq_flush_plug_list(plug, from_schedule);
1738 }
1739
1740 /**
1741 * blk_finish_plug - mark the end of a batch of submitted I/O
1742 * @plug: The &struct blk_plug passed to blk_start_plug()
1743 *
1744 * Description:
1745 * Indicate that a batch of I/O submissions is complete. This function
1746 * must be paired with an initial call to blk_start_plug(). The intent
1747 * is to allow the block layer to optimize I/O submission. See the
1748 * documentation for blk_start_plug() for more information.
1749 */
1750 void blk_finish_plug(struct blk_plug *plug)
1751 {
1752 if (plug != current->plug)
1753 return;
1754 blk_flush_plug_list(plug, false);
1755
1756 current->plug = NULL;
1757 }
1758 EXPORT_SYMBOL(blk_finish_plug);
1759
1760 void blk_io_schedule(void)
1761 {
1762 /* Prevent hang_check timer from firing at us during very long I/O */
1763 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1764
1765 if (timeout)
1766 io_schedule_timeout(timeout);
1767 else
1768 io_schedule();
1769 }
1770 EXPORT_SYMBOL_GPL(blk_io_schedule);
1771
1772 int __init blk_dev_init(void)
1773 {
1774 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1775 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1776 sizeof_field(struct request, cmd_flags));
1777 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1778 sizeof_field(struct bio, bi_opf));
1779
1780 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
1781 kblockd_workqueue = alloc_workqueue("kblockd",
1782 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1783 if (!kblockd_workqueue)
1784 panic("Failed to create kblockd\n");
1785
1786 blk_requestq_cachep = kmem_cache_create("request_queue",
1787 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1788
1789 blk_debugfs_root = debugfs_create_dir("block", NULL);
1790
1791 return 0;
1792 }