1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef INT_BLK_MQ_TAG_H
3 #define INT_BLK_MQ_TAG_H
8 * Tag address space map.
12 unsigned int nr_reserved_tags
;
14 atomic_t active_queues
;
16 struct sbitmap_queue bitmap_tags
;
17 struct sbitmap_queue breserved_tags
;
20 struct request
**static_rqs
;
21 struct list_head page_list
;
25 extern struct blk_mq_tags
*blk_mq_init_tags(unsigned int nr_tags
, unsigned int reserved_tags
, int node
, int alloc_policy
);
26 extern void blk_mq_free_tags(struct blk_mq_tags
*tags
);
28 extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data
*data
);
29 extern void blk_mq_put_tag(struct blk_mq_tags
*tags
, struct blk_mq_ctx
*ctx
,
31 extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx
*hctx
,
32 struct blk_mq_tags
**tags
,
33 unsigned int depth
, bool can_grow
);
34 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
, bool);
35 void blk_mq_queue_tag_busy_iter(struct request_queue
*q
, busy_iter_fn
*fn
,
37 void blk_mq_all_tag_iter(struct blk_mq_tags
*tags
, busy_tag_iter_fn
*fn
,
40 static inline struct sbq_wait_state
*bt_wait_ptr(struct sbitmap_queue
*bt
,
41 struct blk_mq_hw_ctx
*hctx
)
45 return sbq_wait_ptr(bt
, &hctx
->wait_index
);
51 BLK_MQ_TAG_MAX
= BLK_MQ_NO_TAG
- 1,
54 extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*);
55 extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*);
57 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
59 if (!(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
62 return __blk_mq_tag_busy(hctx
);
65 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
67 if (!(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
70 __blk_mq_tag_idle(hctx
);
74 * For shared tag users, we track the number of currently active users
75 * and attempt to provide a fair share of the tag depth for each of them.
77 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
78 struct sbitmap_queue
*bt
)
80 unsigned int depth
, users
;
82 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
84 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
88 * Don't try dividing an ant
90 if (bt
->sb
.depth
== 1)
93 users
= atomic_read(&hctx
->tags
->active_queues
);
98 * Allow at least some tags
100 depth
= max((bt
->sb
.depth
+ users
- 1) / users
, 4U);
101 return atomic_read(&hctx
->nr_active
) < depth
;
104 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags
*tags
,
107 return tag
< tags
->nr_reserved_tags
;