]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-mq.h
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / block / blk-mq.h
CommitLineData
320ae51f
JA
1#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
cf43e6be
JA
4#include "blk-stat.h"
5
24d2f903
CH
6struct blk_mq_tag_set;
7
320ae51f
JA
8struct blk_mq_ctx {
9 struct {
10 spinlock_t lock;
11 struct list_head rq_list;
12 } ____cacheline_aligned_in_smp;
13
14 unsigned int cpu;
15 unsigned int index_hw;
320ae51f
JA
16
17 /* incremented at dispatch time */
18 unsigned long rq_dispatched[2];
19 unsigned long rq_merged;
20
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
23
24 struct request_queue *queue;
25 struct kobject kobj;
4bb659b1 26} ____cacheline_aligned_in_smp;
320ae51f 27
320ae51f 28void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
780db207 29void blk_mq_freeze_queue(struct request_queue *q);
3edcc0ce 30void blk_mq_free_queue(struct request_queue *q);
e3a2b3f9 31int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94 32void blk_mq_wake_waiters(struct request_queue *q);
81380ca1 33bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
2c3ad667 34void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
50e1dab8 35bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
bd6737f1
JA
36bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
37 bool wait);
2c3ad667
JA
38
39/*
40 * Internal helpers for allocating/freeing the request map
41 */
cc71a6f4
JA
42void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
43 unsigned int hctx_idx);
44void blk_mq_free_rq_map(struct blk_mq_tags *tags);
45struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
46 unsigned int hctx_idx,
47 unsigned int nr_tags,
48 unsigned int reserved_tags);
49int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
50 unsigned int hctx_idx, unsigned int depth);
2c3ad667
JA
51
52/*
53 * Internal helpers for request insertion into sw queues
54 */
55void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
56 bool at_head);
c8e7f707 57void blk_mq_request_bypass_insert(struct request *rq);
bd166ef1
JA
58void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
59 struct list_head *list);
320ae51f
JA
60
61/*
62 * CPU -> queue mappings
63 */
f14bbe77 64extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
320ae51f 65
7d7e0f90
CH
66static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
67 int cpu)
68{
69 return q->queue_hw_ctx[q->mq_map[cpu]];
70}
71
67aec14c
JA
72/*
73 * sysfs helpers
74 */
737f98cf 75extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31 76extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8 77extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14c
JA
78extern int blk_mq_sysfs_register(struct request_queue *q);
79extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b 80extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14c 81
90415837
CH
82extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
83
e09aae7e
ML
84void blk_mq_release(struct request_queue *q);
85
1aecfe48
ML
86static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
87 unsigned int cpu)
88{
89 return per_cpu_ptr(q->queue_ctx, cpu);
90}
91
92/*
93 * This assumes per-cpu software queueing queues. They could be per-node
94 * as well, for instance. For now this is hardcoded as-is. Note that we don't
95 * care about preemption, since we know the ctx's are persistent. This does
96 * mean that we can't rely on ctx always matching the currently running CPU.
97 */
98static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
99{
100 return __blk_mq_get_ctx(q, get_cpu());
101}
102
103static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
104{
105 put_cpu();
106}
107
cb96a42c
ML
108struct blk_mq_alloc_data {
109 /* input parameter */
110 struct request_queue *q;
6f3b0e8b 111 unsigned int flags;
229a9287 112 unsigned int shallow_depth;
cb96a42c
ML
113
114 /* input & output parameter */
115 struct blk_mq_ctx *ctx;
116 struct blk_mq_hw_ctx *hctx;
117};
118
4941115b
JA
119static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
120{
bd166ef1
JA
121 if (data->flags & BLK_MQ_REQ_INTERNAL)
122 return data->hctx->sched_tags;
123
4941115b
JA
124 return data->hctx->tags;
125}
126
5d1b25c1
BVA
127static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
128{
129 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
130}
131
19c66e59
ML
132static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
133{
134 return hctx->nr_ctx && hctx->tags;
135}
136
320ae51f 137#endif