]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/backing-dev.h
Merge tag 'x86-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / include / linux / backing-dev.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/linux/backing-dev.h
4 *
5 * low-level device information and state which is propagated up through
6 * to high-level code.
7 */
8
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
11
12 #include <linux/kernel.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/blkdev.h>
16 #include <linux/device.h>
17 #include <linux/writeback.h>
18 #include <linux/blk-cgroup.h>
19 #include <linux/backing-dev-defs.h>
20 #include <linux/slab.h>
21
22 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
23 {
24 kref_get(&bdi->refcnt);
25 return bdi;
26 }
27
28 struct backing_dev_info *bdi_get_by_id(u64 id);
29 void bdi_put(struct backing_dev_info *bdi);
30
31 __printf(2, 3)
32 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
33 __printf(2, 0)
34 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
35 va_list args);
36 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
37 void bdi_unregister(struct backing_dev_info *bdi);
38
39 struct backing_dev_info *bdi_alloc(int node_id);
40
41 void wb_start_background_writeback(struct bdi_writeback *wb);
42 void wb_workfn(struct work_struct *work);
43 void wb_wakeup_delayed(struct bdi_writeback *wb);
44
45 void wb_wait_for_completion(struct wb_completion *done);
46
47 extern spinlock_t bdi_lock;
48 extern struct list_head bdi_list;
49
50 extern struct workqueue_struct *bdi_wq;
51 extern struct workqueue_struct *bdi_async_bio_wq;
52
53 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
54 {
55 return test_bit(WB_has_dirty_io, &wb->state);
56 }
57
58 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
59 {
60 /*
61 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
62 * any dirty wbs. See wb_update_write_bandwidth().
63 */
64 return atomic_long_read(&bdi->tot_write_bandwidth);
65 }
66
67 static inline void __add_wb_stat(struct bdi_writeback *wb,
68 enum wb_stat_item item, s64 amount)
69 {
70 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
71 }
72
73 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
74 {
75 __add_wb_stat(wb, item, 1);
76 }
77
78 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
79 {
80 __add_wb_stat(wb, item, -1);
81 }
82
83 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
84 {
85 return percpu_counter_read_positive(&wb->stat[item]);
86 }
87
88 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
89 {
90 return percpu_counter_sum_positive(&wb->stat[item]);
91 }
92
93 extern void wb_writeout_inc(struct bdi_writeback *wb);
94
95 /*
96 * maximal error of a stat counter.
97 */
98 static inline unsigned long wb_stat_error(void)
99 {
100 #ifdef CONFIG_SMP
101 return nr_cpu_ids * WB_STAT_BATCH;
102 #else
103 return 1;
104 #endif
105 }
106
107 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
108 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
109
110 /*
111 * Flags in backing_dev_info::capability
112 *
113 * The first three flags control whether dirty pages will contribute to the
114 * VM's accounting and whether writepages() should be called for dirty pages
115 * (something that would not, for example, be appropriate for ramfs)
116 *
117 * WARNING: these flags are closely related and should not normally be
118 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
119 * three flags into a single convenience macro.
120 *
121 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
122 * BDI_CAP_NO_WRITEBACK: Don't write pages back
123 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
124 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
125 *
126 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
127 * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
128 * inefficient.
129 */
130 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
131 #define BDI_CAP_NO_WRITEBACK 0x00000002
132 #define BDI_CAP_NO_ACCT_WB 0x00000004
133 #define BDI_CAP_STABLE_WRITES 0x00000008
134 #define BDI_CAP_STRICTLIMIT 0x00000010
135 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
136 #define BDI_CAP_SYNCHRONOUS_IO 0x00000040
137
138 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
139 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
140
141 extern struct backing_dev_info noop_backing_dev_info;
142
143 /**
144 * writeback_in_progress - determine whether there is writeback in progress
145 * @wb: bdi_writeback of interest
146 *
147 * Determine whether there is writeback waiting to be handled against a
148 * bdi_writeback.
149 */
150 static inline bool writeback_in_progress(struct bdi_writeback *wb)
151 {
152 return test_bit(WB_writeback_running, &wb->state);
153 }
154
155 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
156 {
157 struct super_block *sb;
158
159 if (!inode)
160 return &noop_backing_dev_info;
161
162 sb = inode->i_sb;
163 #ifdef CONFIG_BLOCK
164 if (sb_is_blkdev_sb(sb))
165 return I_BDEV(inode)->bd_bdi;
166 #endif
167 return sb->s_bdi;
168 }
169
170 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
171 {
172 return wb->congested & cong_bits;
173 }
174
175 long congestion_wait(int sync, long timeout);
176 long wait_iff_congested(int sync, long timeout);
177
178 static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
179 {
180 return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
181 }
182
183 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
184 {
185 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
186 }
187
188 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
189 {
190 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
191 }
192
193 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
194 {
195 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
196 }
197
198 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
199 {
200 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
201 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
202 BDI_CAP_NO_WRITEBACK));
203 }
204
205 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
206 {
207 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
208 }
209
210 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
211 {
212 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
213 }
214
215 static inline int bdi_sched_wait(void *word)
216 {
217 schedule();
218 return 0;
219 }
220
221 #ifdef CONFIG_CGROUP_WRITEBACK
222
223 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
224 struct cgroup_subsys_state *memcg_css);
225 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
226 struct cgroup_subsys_state *memcg_css,
227 gfp_t gfp);
228 void wb_memcg_offline(struct mem_cgroup *memcg);
229 void wb_blkcg_offline(struct blkcg *blkcg);
230 int inode_congested(struct inode *inode, int cong_bits);
231
232 /**
233 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
234 * @inode: inode of interest
235 *
236 * cgroup writeback requires support from both the bdi and filesystem.
237 * Also, both memcg and iocg have to be on the default hierarchy. Test
238 * whether all conditions are met.
239 *
240 * Note that the test result may change dynamically on the same inode
241 * depending on how memcg and iocg are configured.
242 */
243 static inline bool inode_cgwb_enabled(struct inode *inode)
244 {
245 struct backing_dev_info *bdi = inode_to_bdi(inode);
246
247 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
248 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
249 bdi_cap_account_dirty(bdi) &&
250 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
251 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
252 }
253
254 /**
255 * wb_find_current - find wb for %current on a bdi
256 * @bdi: bdi of interest
257 *
258 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
259 * Must be called under rcu_read_lock() which protects the returend wb.
260 * NULL if not found.
261 */
262 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
263 {
264 struct cgroup_subsys_state *memcg_css;
265 struct bdi_writeback *wb;
266
267 memcg_css = task_css(current, memory_cgrp_id);
268 if (!memcg_css->parent)
269 return &bdi->wb;
270
271 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
272
273 /*
274 * %current's blkcg equals the effective blkcg of its memcg. No
275 * need to use the relatively expensive cgroup_get_e_css().
276 */
277 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
278 return wb;
279 return NULL;
280 }
281
282 /**
283 * wb_get_create_current - get or create wb for %current on a bdi
284 * @bdi: bdi of interest
285 * @gfp: allocation mask
286 *
287 * Equivalent to wb_get_create() on %current's memcg. This function is
288 * called from a relatively hot path and optimizes the common cases using
289 * wb_find_current().
290 */
291 static inline struct bdi_writeback *
292 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
293 {
294 struct bdi_writeback *wb;
295
296 rcu_read_lock();
297 wb = wb_find_current(bdi);
298 if (wb && unlikely(!wb_tryget(wb)))
299 wb = NULL;
300 rcu_read_unlock();
301
302 if (unlikely(!wb)) {
303 struct cgroup_subsys_state *memcg_css;
304
305 memcg_css = task_get_css(current, memory_cgrp_id);
306 wb = wb_get_create(bdi, memcg_css, gfp);
307 css_put(memcg_css);
308 }
309 return wb;
310 }
311
312 /**
313 * inode_to_wb_is_valid - test whether an inode has a wb associated
314 * @inode: inode of interest
315 *
316 * Returns %true if @inode has a wb associated. May be called without any
317 * locking.
318 */
319 static inline bool inode_to_wb_is_valid(struct inode *inode)
320 {
321 return inode->i_wb;
322 }
323
324 /**
325 * inode_to_wb - determine the wb of an inode
326 * @inode: inode of interest
327 *
328 * Returns the wb @inode is currently associated with. The caller must be
329 * holding either @inode->i_lock, the i_pages lock, or the
330 * associated wb's list_lock.
331 */
332 static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
333 {
334 #ifdef CONFIG_LOCKDEP
335 WARN_ON_ONCE(debug_locks &&
336 (!lockdep_is_held(&inode->i_lock) &&
337 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
338 !lockdep_is_held(&inode->i_wb->list_lock)));
339 #endif
340 return inode->i_wb;
341 }
342
343 /**
344 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
345 * @inode: target inode
346 * @cookie: output param, to be passed to the end function
347 *
348 * The caller wants to access the wb associated with @inode but isn't
349 * holding inode->i_lock, the i_pages lock or wb->list_lock. This
350 * function determines the wb associated with @inode and ensures that the
351 * association doesn't change until the transaction is finished with
352 * unlocked_inode_to_wb_end().
353 *
354 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
355 * can't sleep during the transaction. IRQs may or may not be disabled on
356 * return.
357 */
358 static inline struct bdi_writeback *
359 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
360 {
361 rcu_read_lock();
362
363 /*
364 * Paired with store_release in inode_switch_wbs_work_fn() and
365 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
366 */
367 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
368
369 if (unlikely(cookie->locked))
370 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
371
372 /*
373 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
374 * lock. inode_to_wb() will bark. Deref directly.
375 */
376 return inode->i_wb;
377 }
378
379 /**
380 * unlocked_inode_to_wb_end - end inode wb access transaction
381 * @inode: target inode
382 * @cookie: @cookie from unlocked_inode_to_wb_begin()
383 */
384 static inline void unlocked_inode_to_wb_end(struct inode *inode,
385 struct wb_lock_cookie *cookie)
386 {
387 if (unlikely(cookie->locked))
388 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
389
390 rcu_read_unlock();
391 }
392
393 #else /* CONFIG_CGROUP_WRITEBACK */
394
395 static inline bool inode_cgwb_enabled(struct inode *inode)
396 {
397 return false;
398 }
399
400 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
401 {
402 return &bdi->wb;
403 }
404
405 static inline struct bdi_writeback *
406 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
407 {
408 return &bdi->wb;
409 }
410
411 static inline bool inode_to_wb_is_valid(struct inode *inode)
412 {
413 return true;
414 }
415
416 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
417 {
418 return &inode_to_bdi(inode)->wb;
419 }
420
421 static inline struct bdi_writeback *
422 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
423 {
424 return inode_to_wb(inode);
425 }
426
427 static inline void unlocked_inode_to_wb_end(struct inode *inode,
428 struct wb_lock_cookie *cookie)
429 {
430 }
431
432 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
433 {
434 }
435
436 static inline void wb_blkcg_offline(struct blkcg *blkcg)
437 {
438 }
439
440 static inline int inode_congested(struct inode *inode, int cong_bits)
441 {
442 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
443 }
444
445 #endif /* CONFIG_CGROUP_WRITEBACK */
446
447 static inline int inode_read_congested(struct inode *inode)
448 {
449 return inode_congested(inode, 1 << WB_sync_congested);
450 }
451
452 static inline int inode_write_congested(struct inode *inode)
453 {
454 return inode_congested(inode, 1 << WB_async_congested);
455 }
456
457 static inline int inode_rw_congested(struct inode *inode)
458 {
459 return inode_congested(inode, (1 << WB_sync_congested) |
460 (1 << WB_async_congested));
461 }
462
463 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
464 {
465 return wb_congested(&bdi->wb, cong_bits);
466 }
467
468 static inline int bdi_read_congested(struct backing_dev_info *bdi)
469 {
470 return bdi_congested(bdi, 1 << WB_sync_congested);
471 }
472
473 static inline int bdi_write_congested(struct backing_dev_info *bdi)
474 {
475 return bdi_congested(bdi, 1 << WB_async_congested);
476 }
477
478 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
479 {
480 return bdi_congested(bdi, (1 << WB_sync_congested) |
481 (1 << WB_async_congested));
482 }
483
484 const char *bdi_dev_name(struct backing_dev_info *bdi);
485
486 #endif /* _LINUX_BACKING_DEV_H */