]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/backing-dev-defs.h
pktcdvd: Fix possible Spectre-v1 for pkt_devs
[mirror_ubuntu-bionic-kernel.git] / include / linux / backing-dev-defs.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_BACKING_DEV_DEFS_H
3 #define __LINUX_BACKING_DEV_DEFS_H
4
5 #include <linux/list.h>
6 #include <linux/radix-tree.h>
7 #include <linux/rbtree.h>
8 #include <linux/spinlock.h>
9 #include <linux/percpu_counter.h>
10 #include <linux/percpu-refcount.h>
11 #include <linux/flex_proportions.h>
12 #include <linux/timer.h>
13 #include <linux/workqueue.h>
14 #include <linux/kref.h>
15
16 struct page;
17 struct device;
18 struct dentry;
19
20 /*
21 * Bits in bdi_writeback.state
22 */
23 enum wb_state {
24 WB_registered, /* bdi_register() was done */
25 WB_writeback_running, /* Writeback is in progress */
26 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
27 WB_start_all, /* nr_pages == 0 (all) work pending */
28 };
29
30 enum wb_congested_state {
31 WB_async_congested, /* The async (write) queue is getting full */
32 WB_sync_congested, /* The sync queue is getting full */
33 };
34
35 typedef int (congested_fn)(void *, int);
36
37 enum wb_stat_item {
38 WB_RECLAIMABLE,
39 WB_WRITEBACK,
40 WB_DIRTIED,
41 WB_WRITTEN,
42 NR_WB_STAT_ITEMS
43 };
44
45 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
46
47 /*
48 * why some writeback work was initiated
49 */
50 enum wb_reason {
51 WB_REASON_BACKGROUND,
52 WB_REASON_VMSCAN,
53 WB_REASON_SYNC,
54 WB_REASON_PERIODIC,
55 WB_REASON_LAPTOP_TIMER,
56 WB_REASON_FREE_MORE_MEM,
57 WB_REASON_FS_FREE_SPACE,
58 /*
59 * There is no bdi forker thread any more and works are done
60 * by emergency worker, however, this is TPs userland visible
61 * and we'll be exposing exactly the same information,
62 * so it has a mismatch name.
63 */
64 WB_REASON_FORKER_THREAD,
65
66 WB_REASON_MAX,
67 };
68
69 /*
70 * For cgroup writeback, multiple wb's may map to the same blkcg. Those
71 * wb's can operate mostly independently but should share the congested
72 * state. To facilitate such sharing, the congested state is tracked using
73 * the following struct which is created on demand, indexed by blkcg ID on
74 * its bdi, and refcounted.
75 */
76 struct bdi_writeback_congested {
77 unsigned long state; /* WB_[a]sync_congested flags */
78 atomic_t refcnt; /* nr of attached wb's and blkg */
79
80 #ifdef CONFIG_CGROUP_WRITEBACK
81 struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
82 * on bdi unregistration. For memcg-wb
83 * internal use only! */
84 int blkcg_id; /* ID of the associated blkcg */
85 struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
86 #endif
87 };
88
89 /*
90 * Each wb (bdi_writeback) can perform writeback operations, is measured
91 * and throttled, independently. Without cgroup writeback, each bdi
92 * (bdi_writeback) is served by its embedded bdi->wb.
93 *
94 * On the default hierarchy, blkcg implicitly enables memcg. This allows
95 * using memcg's page ownership for attributing writeback IOs, and every
96 * memcg - blkcg combination can be served by its own wb by assigning a
97 * dedicated wb to each memcg, which enables isolation across different
98 * cgroups and propagation of IO back pressure down from the IO layer upto
99 * the tasks which are generating the dirty pages to be written back.
100 *
101 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
102 * refcounted with the number of inodes attached to it, and pins the memcg
103 * and the corresponding blkcg. As the corresponding blkcg for a memcg may
104 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
105 * is tested for blkcg after lookup and removed from index on mismatch so
106 * that a new wb for the combination can be created.
107 */
108 struct bdi_writeback {
109 struct backing_dev_info *bdi; /* our parent bdi */
110
111 unsigned long state; /* Always use atomic bitops on this */
112 unsigned long last_old_flush; /* last old data flush */
113
114 struct list_head b_dirty; /* dirty inodes */
115 struct list_head b_io; /* parked for writeback */
116 struct list_head b_more_io; /* parked for more writeback */
117 struct list_head b_dirty_time; /* time stamps are dirty */
118 spinlock_t list_lock; /* protects the b_* lists */
119
120 struct percpu_counter stat[NR_WB_STAT_ITEMS];
121
122 struct bdi_writeback_congested *congested;
123
124 unsigned long bw_time_stamp; /* last time write bw is updated */
125 unsigned long dirtied_stamp;
126 unsigned long written_stamp; /* pages written at bw_time_stamp */
127 unsigned long write_bandwidth; /* the estimated write bandwidth */
128 unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
129
130 /*
131 * The base dirty throttle rate, re-calculated on every 200ms.
132 * All the bdi tasks' dirty rate will be curbed under it.
133 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
134 * in small steps and is much more smooth/stable than the latter.
135 */
136 unsigned long dirty_ratelimit;
137 unsigned long balanced_dirty_ratelimit;
138
139 struct fprop_local_percpu completions;
140 int dirty_exceeded;
141 enum wb_reason start_all_reason;
142
143 spinlock_t work_lock; /* protects work_list & dwork scheduling */
144 struct list_head work_list;
145 struct delayed_work dwork; /* work item used for writeback */
146
147 unsigned long dirty_sleep; /* last wait */
148
149 struct list_head bdi_node; /* anchored at bdi->wb_list */
150
151 #ifdef CONFIG_CGROUP_WRITEBACK
152 struct percpu_ref refcnt; /* used only for !root wb's */
153 struct fprop_local_percpu memcg_completions;
154 struct cgroup_subsys_state *memcg_css; /* the associated memcg */
155 struct cgroup_subsys_state *blkcg_css; /* and blkcg */
156 struct list_head memcg_node; /* anchored at memcg->cgwb_list */
157 struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
158
159 union {
160 struct work_struct release_work;
161 struct rcu_head rcu;
162 };
163 #endif
164 };
165
166 struct backing_dev_info {
167 struct list_head bdi_list;
168 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
169 unsigned long io_pages; /* max allowed IO size */
170 congested_fn *congested_fn; /* Function pointer if device is md/dm */
171 void *congested_data; /* Pointer to aux data for congested func */
172
173 const char *name;
174
175 struct kref refcnt; /* Reference counter for the structure */
176 unsigned int capabilities; /* Device capabilities */
177 unsigned int min_ratio;
178 unsigned int max_ratio, max_prop_frac;
179
180 /*
181 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
182 * any dirty wbs, which is depended upon by bdi_has_dirty().
183 */
184 atomic_long_t tot_write_bandwidth;
185
186 struct bdi_writeback wb; /* the root writeback info for this bdi */
187 struct list_head wb_list; /* list of all wbs */
188 #ifdef CONFIG_CGROUP_WRITEBACK
189 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
190 struct rb_root cgwb_congested_tree; /* their congested states */
191 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
192 #else
193 struct bdi_writeback_congested *wb_congested;
194 #endif
195 wait_queue_head_t wb_waitq;
196
197 struct device *dev;
198 struct device *owner;
199
200 struct timer_list laptop_mode_wb_timer;
201
202 #ifdef CONFIG_DEBUG_FS
203 struct dentry *debug_dir;
204 struct dentry *debug_stats;
205 #endif
206 };
207
208 enum {
209 BLK_RW_ASYNC = 0,
210 BLK_RW_SYNC = 1,
211 };
212
213 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
214 void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
215
216 static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
217 {
218 clear_wb_congested(bdi->wb.congested, sync);
219 }
220
221 static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
222 {
223 set_wb_congested(bdi->wb.congested, sync);
224 }
225
226 struct wb_lock_cookie {
227 bool locked;
228 unsigned long flags;
229 };
230
231 #ifdef CONFIG_CGROUP_WRITEBACK
232
233 /**
234 * wb_tryget - try to increment a wb's refcount
235 * @wb: bdi_writeback to get
236 */
237 static inline bool wb_tryget(struct bdi_writeback *wb)
238 {
239 if (wb != &wb->bdi->wb)
240 return percpu_ref_tryget(&wb->refcnt);
241 return true;
242 }
243
244 /**
245 * wb_get - increment a wb's refcount
246 * @wb: bdi_writeback to get
247 */
248 static inline void wb_get(struct bdi_writeback *wb)
249 {
250 if (wb != &wb->bdi->wb)
251 percpu_ref_get(&wb->refcnt);
252 }
253
254 /**
255 * wb_put - decrement a wb's refcount
256 * @wb: bdi_writeback to put
257 */
258 static inline void wb_put(struct bdi_writeback *wb)
259 {
260 if (wb != &wb->bdi->wb)
261 percpu_ref_put(&wb->refcnt);
262 }
263
264 /**
265 * wb_dying - is a wb dying?
266 * @wb: bdi_writeback of interest
267 *
268 * Returns whether @wb is unlinked and being drained.
269 */
270 static inline bool wb_dying(struct bdi_writeback *wb)
271 {
272 return percpu_ref_is_dying(&wb->refcnt);
273 }
274
275 #else /* CONFIG_CGROUP_WRITEBACK */
276
277 static inline bool wb_tryget(struct bdi_writeback *wb)
278 {
279 return true;
280 }
281
282 static inline void wb_get(struct bdi_writeback *wb)
283 {
284 }
285
286 static inline void wb_put(struct bdi_writeback *wb)
287 {
288 }
289
290 static inline bool wb_dying(struct bdi_writeback *wb)
291 {
292 return false;
293 }
294
295 #endif /* CONFIG_CGROUP_WRITEBACK */
296
297 #endif /* __LINUX_BACKING_DEV_DEFS_H */