]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-mq.h
block, blk-mq: draining can't be skipped even if bypass_depth was non-zero
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq.h
CommitLineData
320ae51f
JA
1#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
24d2f903
CH
4struct blk_mq_tag_set;
5
320ae51f
JA
6struct blk_mq_ctx {
7 struct {
8 spinlock_t lock;
9 struct list_head rq_list;
10 } ____cacheline_aligned_in_smp;
11
12 unsigned int cpu;
13 unsigned int index_hw;
320ae51f 14
4bb659b1
JA
15 unsigned int last_tag ____cacheline_aligned_in_smp;
16
320ae51f
JA
17 /* incremented at dispatch time */
18 unsigned long rq_dispatched[2];
19 unsigned long rq_merged;
20
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
23
24 struct request_queue *queue;
25 struct kobject kobj;
4bb659b1 26} ____cacheline_aligned_in_smp;
320ae51f 27
30a91cb4 28void __blk_mq_complete_request(struct request *rq);
320ae51f
JA
29void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30void blk_mq_init_flush(struct request_queue *q);
43a5e4e2 31void blk_mq_drain_queue(struct request_queue *q);
3edcc0ce 32void blk_mq_free_queue(struct request_queue *q);
8727af4b
CH
33void blk_mq_clone_flush_request(struct request *flush_rq,
34 struct request *orig_rq);
e3a2b3f9 35int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
320ae51f
JA
36
37/*
38 * CPU hotplug helpers
39 */
40struct blk_mq_cpu_notifier;
41void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
e814e71b 42 int (*fn)(void *, unsigned long, unsigned int),
320ae51f
JA
43 void *data);
44void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
45void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
46void blk_mq_cpu_init(void);
676141e4
JA
47void blk_mq_enable_hotplug(void);
48void blk_mq_disable_hotplug(void);
320ae51f
JA
49
50/*
51 * CPU -> queue mappings
52 */
24d2f903 53extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
320ae51f 54extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
f14bbe77 55extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
320ae51f 56
67aec14c
JA
57/*
58 * sysfs helpers
59 */
60extern int blk_mq_sysfs_register(struct request_queue *q);
61extern void blk_mq_sysfs_unregister(struct request_queue *q);
62
e93ecf60
JA
63/*
64 * Basic implementation of sparser bitmap, allowing the user to spread
65 * the bits over more cachelines.
66 */
67struct blk_align_bitmap {
68 unsigned long word;
69 unsigned long depth;
70} ____cacheline_aligned_in_smp;
71
1aecfe48
ML
72static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
73 unsigned int cpu)
74{
75 return per_cpu_ptr(q->queue_ctx, cpu);
76}
77
78/*
79 * This assumes per-cpu software queueing queues. They could be per-node
80 * as well, for instance. For now this is hardcoded as-is. Note that we don't
81 * care about preemption, since we know the ctx's are persistent. This does
82 * mean that we can't rely on ctx always matching the currently running CPU.
83 */
84static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
85{
86 return __blk_mq_get_ctx(q, get_cpu());
87}
88
89static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
90{
91 put_cpu();
92}
93
cb96a42c
ML
94struct blk_mq_alloc_data {
95 /* input parameter */
96 struct request_queue *q;
97 gfp_t gfp;
98 bool reserved;
99
100 /* input & output parameter */
101 struct blk_mq_ctx *ctx;
102 struct blk_mq_hw_ctx *hctx;
103};
104
105static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
106 struct request_queue *q, gfp_t gfp, bool reserved,
107 struct blk_mq_ctx *ctx,
108 struct blk_mq_hw_ctx *hctx)
109{
110 data->q = q;
111 data->gfp = gfp;
112 data->reserved = reserved;
113 data->ctx = ctx;
114 data->hctx = hctx;
115}
116
320ae51f 117#endif