]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-mq.h
blk-mq-sched: add framework for MQ capable IO schedulers
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq.h
CommitLineData
320ae51f
JA
1#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
cf43e6be
JA
4#include "blk-stat.h"
5
24d2f903
CH
6struct blk_mq_tag_set;
7
320ae51f
JA
8struct blk_mq_ctx {
9 struct {
10 spinlock_t lock;
11 struct list_head rq_list;
12 } ____cacheline_aligned_in_smp;
13
14 unsigned int cpu;
15 unsigned int index_hw;
320ae51f
JA
16
17 /* incremented at dispatch time */
18 unsigned long rq_dispatched[2];
19 unsigned long rq_merged;
20
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
cf43e6be 23 struct blk_rq_stat stat[2];
320ae51f
JA
24
25 struct request_queue *queue;
26 struct kobject kobj;
4bb659b1 27} ____cacheline_aligned_in_smp;
320ae51f 28
320ae51f 29void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
780db207 30void blk_mq_freeze_queue(struct request_queue *q);
3edcc0ce 31void blk_mq_free_queue(struct request_queue *q);
e3a2b3f9 32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94 33void blk_mq_wake_waiters(struct request_queue *q);
f04c3df3 34bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
2c3ad667
JA
35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
36
37/*
38 * Internal helpers for allocating/freeing the request map
39 */
cc71a6f4
JA
40void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
41 unsigned int hctx_idx);
42void blk_mq_free_rq_map(struct blk_mq_tags *tags);
43struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
44 unsigned int hctx_idx,
45 unsigned int nr_tags,
46 unsigned int reserved_tags);
47int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
48 unsigned int hctx_idx, unsigned int depth);
2c3ad667
JA
49
50/*
51 * Internal helpers for request insertion into sw queues
52 */
53void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
54 bool at_head);
bd166ef1
JA
55void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
56 struct list_head *list);
320ae51f
JA
57/*
58 * CPU hotplug helpers
59 */
676141e4
JA
60void blk_mq_enable_hotplug(void);
61void blk_mq_disable_hotplug(void);
320ae51f
JA
62
63/*
64 * CPU -> queue mappings
65 */
f14bbe77 66extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
320ae51f 67
7d7e0f90
CH
68static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
69 int cpu)
70{
71 return q->queue_hw_ctx[q->mq_map[cpu]];
72}
73
67aec14c
JA
74/*
75 * sysfs helpers
76 */
77extern int blk_mq_sysfs_register(struct request_queue *q);
78extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b 79extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14c 80
90415837
CH
81extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
82
e09aae7e
ML
83void blk_mq_release(struct request_queue *q);
84
1aecfe48
ML
85static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
86 unsigned int cpu)
87{
88 return per_cpu_ptr(q->queue_ctx, cpu);
89}
90
91/*
92 * This assumes per-cpu software queueing queues. They could be per-node
93 * as well, for instance. For now this is hardcoded as-is. Note that we don't
94 * care about preemption, since we know the ctx's are persistent. This does
95 * mean that we can't rely on ctx always matching the currently running CPU.
96 */
97static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
98{
99 return __blk_mq_get_ctx(q, get_cpu());
100}
101
102static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
103{
104 put_cpu();
105}
106
cb96a42c
ML
107struct blk_mq_alloc_data {
108 /* input parameter */
109 struct request_queue *q;
6f3b0e8b 110 unsigned int flags;
cb96a42c
ML
111
112 /* input & output parameter */
113 struct blk_mq_ctx *ctx;
114 struct blk_mq_hw_ctx *hctx;
115};
116
117static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
6f3b0e8b
CH
118 struct request_queue *q, unsigned int flags,
119 struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
cb96a42c
ML
120{
121 data->q = q;
6f3b0e8b 122 data->flags = flags;
cb96a42c
ML
123 data->ctx = ctx;
124 data->hctx = hctx;
125}
126
4941115b
JA
127static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
128{
bd166ef1
JA
129 if (data->flags & BLK_MQ_REQ_INTERNAL)
130 return data->hctx->sched_tags;
131
4941115b
JA
132 return data->hctx->tags;
133}
134
2c3ad667
JA
135/*
136 * Internal helpers for request allocation/init/free
137 */
138void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
139 struct request *rq, unsigned int op);
bd166ef1 140void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
2c3ad667 141 struct request *rq);
bd166ef1 142void blk_mq_finish_request(struct request *rq);
2c3ad667
JA
143struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
144 unsigned int op);
145
5d1b25c1
BVA
146static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
147{
148 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
149}
150
19c66e59
ML
151static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
152{
153 return hctx->nr_ctx && hctx->tags;
154}
155
320ae51f 156#endif