]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-mq.h
blk-mq: remove ->map_queue
[mirror_ubuntu-artful-kernel.git] / block / blk-mq.h
CommitLineData
320ae51f
JA
1#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
24d2f903
CH
4struct blk_mq_tag_set;
5
320ae51f
JA
6struct blk_mq_ctx {
7 struct {
8 spinlock_t lock;
9 struct list_head rq_list;
10 } ____cacheline_aligned_in_smp;
11
12 unsigned int cpu;
13 unsigned int index_hw;
320ae51f 14
4bb659b1
JA
15 unsigned int last_tag ____cacheline_aligned_in_smp;
16
320ae51f
JA
17 /* incremented at dispatch time */
18 unsigned long rq_dispatched[2];
19 unsigned long rq_merged;
20
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
23
24 struct request_queue *queue;
25 struct kobject kobj;
4bb659b1 26} ____cacheline_aligned_in_smp;
320ae51f 27
320ae51f 28void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
780db207 29void blk_mq_freeze_queue(struct request_queue *q);
3edcc0ce 30void blk_mq_free_queue(struct request_queue *q);
e3a2b3f9 31int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94 32void blk_mq_wake_waiters(struct request_queue *q);
320ae51f
JA
33
34/*
35 * CPU hotplug helpers
36 */
37struct blk_mq_cpu_notifier;
38void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
e814e71b 39 int (*fn)(void *, unsigned long, unsigned int),
320ae51f
JA
40 void *data);
41void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
42void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
43void blk_mq_cpu_init(void);
676141e4
JA
44void blk_mq_enable_hotplug(void);
45void blk_mq_disable_hotplug(void);
320ae51f
JA
46
47/*
48 * CPU -> queue mappings
49 */
24d2f903 50extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
5778322e
AM
51extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
52 const struct cpumask *online_mask);
f14bbe77 53extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
320ae51f 54
7d7e0f90
CH
55static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
56 int cpu)
57{
58 return q->queue_hw_ctx[q->mq_map[cpu]];
59}
60
67aec14c
JA
61/*
62 * sysfs helpers
63 */
64extern int blk_mq_sysfs_register(struct request_queue *q);
65extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b 66extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14c 67
90415837
CH
68extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
69
e09aae7e
ML
70void blk_mq_release(struct request_queue *q);
71
e93ecf60
JA
72/*
73 * Basic implementation of sparser bitmap, allowing the user to spread
74 * the bits over more cachelines.
75 */
76struct blk_align_bitmap {
77 unsigned long word;
78 unsigned long depth;
79} ____cacheline_aligned_in_smp;
80
1aecfe48
ML
81static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
82 unsigned int cpu)
83{
84 return per_cpu_ptr(q->queue_ctx, cpu);
85}
86
87/*
88 * This assumes per-cpu software queueing queues. They could be per-node
89 * as well, for instance. For now this is hardcoded as-is. Note that we don't
90 * care about preemption, since we know the ctx's are persistent. This does
91 * mean that we can't rely on ctx always matching the currently running CPU.
92 */
93static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
94{
95 return __blk_mq_get_ctx(q, get_cpu());
96}
97
98static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
99{
100 put_cpu();
101}
102
cb96a42c
ML
103struct blk_mq_alloc_data {
104 /* input parameter */
105 struct request_queue *q;
6f3b0e8b 106 unsigned int flags;
cb96a42c
ML
107
108 /* input & output parameter */
109 struct blk_mq_ctx *ctx;
110 struct blk_mq_hw_ctx *hctx;
111};
112
113static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
6f3b0e8b
CH
114 struct request_queue *q, unsigned int flags,
115 struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
cb96a42c
ML
116{
117 data->q = q;
6f3b0e8b 118 data->flags = flags;
cb96a42c
ML
119 data->ctx = ctx;
120 data->hctx = hctx;
121}
122
19c66e59
ML
123static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
124{
125 return hctx->nr_ctx && hctx->tags;
126}
127
320ae51f 128#endif