]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-wbt.h
blk-mq: avoid sysfs buffer overflow with too many CPU cores
[mirror_ubuntu-bionic-kernel.git] / block / blk-wbt.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef WB_THROTTLE_H
3 #define WB_THROTTLE_H
4
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/wait.h>
8 #include <linux/timer.h>
9 #include <linux/ktime.h>
10
11 #include "blk-stat.h"
12
13 enum wbt_flags {
14 WBT_TRACKED = 1, /* write, tracked for throttling */
15 WBT_READ = 2, /* read */
16 WBT_KSWAPD = 4, /* write, from kswapd */
17
18 WBT_NR_BITS = 3, /* number of bits */
19 };
20
21 enum {
22 WBT_RWQ_BG = 0,
23 WBT_RWQ_KSWAPD,
24 WBT_NUM_RWQ,
25 };
26
27 /*
28 * Enable states. Either off, or on by default (done at init time),
29 * or on through manual setup in sysfs.
30 */
31 enum {
32 WBT_STATE_ON_DEFAULT = 1,
33 WBT_STATE_ON_MANUAL = 2,
34 };
35
36 static inline void wbt_clear_state(struct blk_issue_stat *stat)
37 {
38 stat->stat &= ~BLK_STAT_RES_MASK;
39 }
40
41 static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat)
42 {
43 return (stat->stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT;
44 }
45
46 static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct)
47 {
48 stat->stat |= ((u64) wb_acct) << BLK_STAT_RES_SHIFT;
49 }
50
51 static inline bool wbt_is_tracked(struct blk_issue_stat *stat)
52 {
53 return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED;
54 }
55
56 static inline bool wbt_is_read(struct blk_issue_stat *stat)
57 {
58 return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_READ;
59 }
60
61 struct rq_wait {
62 wait_queue_head_t wait;
63 atomic_t inflight;
64 };
65
66 struct rq_wb {
67 /*
68 * Settings that govern how we throttle
69 */
70 unsigned int wb_background; /* background writeback */
71 unsigned int wb_normal; /* normal writeback */
72 unsigned int wb_max; /* max throughput writeback */
73 int scale_step;
74 bool scaled_max;
75
76 short enable_state; /* WBT_STATE_* */
77
78 /*
79 * Number of consecutive periods where we don't have enough
80 * information to make a firm scale up/down decision.
81 */
82 unsigned int unknown_cnt;
83
84 u64 win_nsec; /* default window size */
85 u64 cur_win_nsec; /* current window size */
86
87 struct blk_stat_callback *cb;
88
89 s64 sync_issue;
90 void *sync_cookie;
91
92 unsigned int wc;
93 unsigned int queue_depth;
94
95 unsigned long last_issue; /* last non-throttled issue */
96 unsigned long last_comp; /* last non-throttled comp */
97 unsigned long min_lat_nsec;
98 struct request_queue *queue;
99 struct rq_wait rq_wait[WBT_NUM_RWQ];
100 };
101
102 static inline unsigned int wbt_inflight(struct rq_wb *rwb)
103 {
104 unsigned int i, ret = 0;
105
106 for (i = 0; i < WBT_NUM_RWQ; i++)
107 ret += atomic_read(&rwb->rq_wait[i].inflight);
108
109 return ret;
110 }
111
112 #ifdef CONFIG_BLK_WBT
113
114 void __wbt_done(struct rq_wb *, enum wbt_flags);
115 void wbt_done(struct rq_wb *, struct blk_issue_stat *);
116 enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
117 int wbt_init(struct request_queue *);
118 void wbt_exit(struct request_queue *);
119 void wbt_update_limits(struct rq_wb *);
120 void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
121 void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
122 void wbt_disable_default(struct request_queue *);
123 void wbt_enable_default(struct request_queue *);
124
125 void wbt_set_queue_depth(struct rq_wb *, unsigned int);
126 void wbt_set_write_cache(struct rq_wb *, bool);
127
128 u64 wbt_default_latency_nsec(struct request_queue *);
129
130 #else
131
132 static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
133 {
134 }
135 static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
136 {
137 }
138 static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
139 spinlock_t *lock)
140 {
141 return 0;
142 }
143 static inline int wbt_init(struct request_queue *q)
144 {
145 return -EINVAL;
146 }
147 static inline void wbt_exit(struct request_queue *q)
148 {
149 }
150 static inline void wbt_update_limits(struct rq_wb *rwb)
151 {
152 }
153 static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
154 {
155 }
156 static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
157 {
158 }
159 static inline void wbt_disable_default(struct request_queue *q)
160 {
161 }
162 static inline void wbt_enable_default(struct request_queue *q)
163 {
164 }
165 static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
166 {
167 }
168 static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
169 {
170 }
171 static inline u64 wbt_default_latency_nsec(struct request_queue *q)
172 {
173 return 0;
174 }
175
176 #endif /* CONFIG_BLK_WBT */
177
178 #endif