11 struct list_head rq_list
;
12 } ____cacheline_aligned_in_smp
;
15 unsigned int index_hw
;
17 /* incremented at dispatch time */
18 unsigned long rq_dispatched
[2];
19 unsigned long rq_merged
;
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed
[2];
23 struct blk_rq_stat stat
[2];
25 struct request_queue
*queue
;
27 } ____cacheline_aligned_in_smp
;
29 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx
*hctx
, bool async
);
30 void blk_mq_freeze_queue(struct request_queue
*q
);
31 void blk_mq_free_queue(struct request_queue
*q
);
32 int blk_mq_update_nr_requests(struct request_queue
*q
, unsigned int nr
);
33 void blk_mq_wake_waiters(struct request_queue
*q
);
34 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx
*, struct list_head
*);
35 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx
*hctx
, struct list_head
*list
);
38 * Internal helpers for allocating/freeing the request map
40 void blk_mq_free_rq_map(struct blk_mq_tag_set
*set
, struct blk_mq_tags
*tags
,
41 unsigned int hctx_idx
);
42 struct blk_mq_tags
*blk_mq_init_rq_map(struct blk_mq_tag_set
*set
,
43 unsigned int hctx_idx
);
46 * Internal helpers for request insertion into sw queues
48 void __blk_mq_insert_request(struct blk_mq_hw_ctx
*hctx
, struct request
*rq
,
54 void blk_mq_enable_hotplug(void);
55 void blk_mq_disable_hotplug(void);
58 * CPU -> queue mappings
60 extern int blk_mq_hw_queue_to_node(unsigned int *map
, unsigned int);
62 static inline struct blk_mq_hw_ctx
*blk_mq_map_queue(struct request_queue
*q
,
65 return q
->queue_hw_ctx
[q
->mq_map
[cpu
]];
71 extern int blk_mq_sysfs_register(struct request_queue
*q
);
72 extern void blk_mq_sysfs_unregister(struct request_queue
*q
);
73 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx
*hctx
);
75 extern void blk_mq_rq_timed_out(struct request
*req
, bool reserved
);
77 void blk_mq_release(struct request_queue
*q
);
79 static inline struct blk_mq_ctx
*__blk_mq_get_ctx(struct request_queue
*q
,
82 return per_cpu_ptr(q
->queue_ctx
, cpu
);
86 * This assumes per-cpu software queueing queues. They could be per-node
87 * as well, for instance. For now this is hardcoded as-is. Note that we don't
88 * care about preemption, since we know the ctx's are persistent. This does
89 * mean that we can't rely on ctx always matching the currently running CPU.
91 static inline struct blk_mq_ctx
*blk_mq_get_ctx(struct request_queue
*q
)
93 return __blk_mq_get_ctx(q
, get_cpu());
96 static inline void blk_mq_put_ctx(struct blk_mq_ctx
*ctx
)
101 struct blk_mq_alloc_data
{
102 /* input parameter */
103 struct request_queue
*q
;
106 /* input & output parameter */
107 struct blk_mq_ctx
*ctx
;
108 struct blk_mq_hw_ctx
*hctx
;
111 static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data
*data
,
112 struct request_queue
*q
, unsigned int flags
,
113 struct blk_mq_ctx
*ctx
, struct blk_mq_hw_ctx
*hctx
)
122 * Internal helpers for request allocation/init/free
124 void blk_mq_rq_ctx_init(struct request_queue
*q
, struct blk_mq_ctx
*ctx
,
125 struct request
*rq
, unsigned int op
);
126 void __blk_mq_free_request(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_ctx
*ctx
,
128 struct request
*__blk_mq_alloc_request(struct blk_mq_alloc_data
*data
,
131 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx
*hctx
)
133 return test_bit(BLK_MQ_S_STOPPED
, &hctx
->state
);
136 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx
*hctx
)
138 return hctx
->nr_ctx
&& hctx
->tags
;