1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef INT_BLK_MQ_TAG_H
3 #define INT_BLK_MQ_TAG_H
6 * Tag address space map.
10 unsigned int nr_reserved_tags
;
12 atomic_t active_queues
;
14 struct sbitmap_queue
*bitmap_tags
;
15 struct sbitmap_queue
*breserved_tags
;
17 struct sbitmap_queue __bitmap_tags
;
18 struct sbitmap_queue __breserved_tags
;
21 struct request
**static_rqs
;
22 struct list_head page_list
;
25 * used to clear request reference in rqs[] before freeing one
31 extern struct blk_mq_tags
*blk_mq_init_tags(unsigned int nr_tags
,
32 unsigned int reserved_tags
,
33 int node
, unsigned int flags
);
34 extern void blk_mq_free_tags(struct blk_mq_tags
*tags
, unsigned int flags
);
35 extern int blk_mq_init_bitmaps(struct sbitmap_queue
*bitmap_tags
,
36 struct sbitmap_queue
*breserved_tags
,
37 unsigned int queue_depth
,
38 unsigned int reserved
,
39 int node
, int alloc_policy
);
41 extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set
*set
);
42 extern void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set
*set
);
43 extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
);
44 extern void blk_mq_put_tag(struct blk_mq_tags
*tags
, struct blk_mq_ctx
*ctx
,
46 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx
*hctx
,
47 struct blk_mq_tags
**tags
,
48 unsigned int depth
, bool can_grow
);
49 extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set
*set
,
52 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
, bool);
53 void blk_mq_queue_tag_busy_iter(struct request_queue
*q
, busy_iter_fn
*fn
,
55 void blk_mq_all_tag_iter(struct blk_mq_tags
*tags
, busy_tag_iter_fn
*fn
,
58 static inline struct sbq_wait_state
*bt_wait_ptr(struct sbitmap_queue
*bt
,
59 struct blk_mq_hw_ctx
*hctx
)
63 return sbq_wait_ptr(bt
, &hctx
->wait_index
);
69 BLK_MQ_TAG_MAX
= BLK_MQ_NO_TAG
- 1,
72 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*);
73 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*);
75 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
77 if (!(hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
))
80 return __blk_mq_tag_busy(hctx
);
83 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
85 if (!(hctx
->flags
& BLK_MQ_F_TAG_QUEUE_SHARED
))
88 __blk_mq_tag_idle(hctx
);
91 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags
*tags
,
94 return tag
< tags
->nr_reserved_tags
;