]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/backing-dev.h
block: Remove unused functions
[mirror_ubuntu-jammy-kernel.git] / include / linux / backing-dev.h
CommitLineData
1da177e4
LT
1/*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H
10
cf0ca9fe 11#include <linux/kernel.h>
e4ad08fe 12#include <linux/fs.h>
03ba3782 13#include <linux/sched.h>
a212b105 14#include <linux/blkdev.h>
03ba3782 15#include <linux/writeback.h>
52ebea74 16#include <linux/blk-cgroup.h>
66114cad 17#include <linux/backing-dev-defs.h>
a13f35e8 18#include <linux/slab.h>
de1414a6 19
d03f6cdc
JK
20static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
21{
22 kref_get(&bdi->refcnt);
23 return bdi;
24}
25
26void bdi_put(struct backing_dev_info *bdi);
b2e8fb6e 27
d2cc4dde 28__printf(3, 4)
cf0ca9fe
PZ
29int bdi_register(struct backing_dev_info *bdi, struct device *parent,
30 const char *fmt, ...);
baf7a616
JK
31int bdi_register_va(struct backing_dev_info *bdi, struct device *parent,
32 const char *fmt, va_list args);
df08c32c 33int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
b02176f3
TH
34void bdi_unregister(struct backing_dev_info *bdi);
35
d03f6cdc 36struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
baf7a616
JK
37static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
38{
39 return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
40}
b02176f3 41
c00ddad3
TH
42void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
43 bool range_cyclic, enum wb_reason reason);
9ecf4866 44void wb_start_background_writeback(struct bdi_writeback *wb);
f0054bb1 45void wb_workfn(struct work_struct *work);
f0054bb1 46void wb_wakeup_delayed(struct bdi_writeback *wb);
cf0ca9fe 47
03ba3782 48extern spinlock_t bdi_lock;
66f3b8e2
JA
49extern struct list_head bdi_list;
50
839a8e86
TH
51extern struct workqueue_struct *bdi_wq;
52
d6c10f1f 53static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
03ba3782 54{
d6c10f1f 55 return test_bit(WB_has_dirty_io, &wb->state);
03ba3782
JA
56}
57
95a46c65
TH
58static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
59{
60 /*
61 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
62 * any dirty wbs. See wb_update_write_bandwidth().
63 */
64 return atomic_long_read(&bdi->tot_write_bandwidth);
03ba3782
JA
65}
66
93f78d88
TH
67static inline void __add_wb_stat(struct bdi_writeback *wb,
68 enum wb_stat_item item, s64 amount)
b2e8fb6e 69{
93f78d88 70 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
b2e8fb6e
PZ
71}
72
93f78d88
TH
73static inline void __inc_wb_stat(struct bdi_writeback *wb,
74 enum wb_stat_item item)
b2e8fb6e 75{
93f78d88 76 __add_wb_stat(wb, item, 1);
b2e8fb6e
PZ
77}
78
93f78d88 79static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e
PZ
80{
81 unsigned long flags;
82
83 local_irq_save(flags);
93f78d88 84 __inc_wb_stat(wb, item);
b2e8fb6e
PZ
85 local_irq_restore(flags);
86}
87
93f78d88
TH
88static inline void __dec_wb_stat(struct bdi_writeback *wb,
89 enum wb_stat_item item)
b2e8fb6e 90{
93f78d88 91 __add_wb_stat(wb, item, -1);
b2e8fb6e
PZ
92}
93
93f78d88 94static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e
PZ
95{
96 unsigned long flags;
97
98 local_irq_save(flags);
93f78d88 99 __dec_wb_stat(wb, item);
b2e8fb6e
PZ
100 local_irq_restore(flags);
101}
102
93f78d88 103static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 104{
93f78d88 105 return percpu_counter_read_positive(&wb->stat[item]);
b2e8fb6e
PZ
106}
107
93f78d88
TH
108static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
109 enum wb_stat_item item)
b2e8fb6e 110{
93f78d88 111 return percpu_counter_sum_positive(&wb->stat[item]);
b2e8fb6e
PZ
112}
113
93f78d88 114static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
e0bf68dd 115{
b2e8fb6e
PZ
116 s64 sum;
117 unsigned long flags;
118
119 local_irq_save(flags);
93f78d88 120 sum = __wb_stat_sum(wb, item);
b2e8fb6e
PZ
121 local_irq_restore(flags);
122
123 return sum;
e0bf68dd
PZ
124}
125
93f78d88 126extern void wb_writeout_inc(struct bdi_writeback *wb);
dd5656e5 127
b2e8fb6e
PZ
128/*
129 * maximal error of a stat counter.
130 */
93f78d88 131static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
e0bf68dd 132{
b2e8fb6e 133#ifdef CONFIG_SMP
93f78d88 134 return nr_cpu_ids * WB_STAT_BATCH;
b2e8fb6e
PZ
135#else
136 return 1;
137#endif
e0bf68dd 138}
1da177e4 139
189d3c4a 140int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
a42dde04 141int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
189d3c4a 142
1da177e4
LT
143/*
144 * Flags in backing_dev_info::capability
e4ad08fe
MS
145 *
146 * The first three flags control whether dirty pages will contribute to the
147 * VM's accounting and whether writepages() should be called for dirty pages
148 * (something that would not, for example, be appropriate for ramfs)
149 *
150 * WARNING: these flags are closely related and should not normally be
151 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
152 * three flags into a single convenience macro.
153 *
154 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
155 * BDI_CAP_NO_WRITEBACK: Don't write pages back
156 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
5a537485 157 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
89e9b9e0
TH
158 *
159 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
1da177e4 160 */
e4ad08fe
MS
161#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
162#define BDI_CAP_NO_WRITEBACK 0x00000002
b4caecd4
CH
163#define BDI_CAP_NO_ACCT_WB 0x00000004
164#define BDI_CAP_STABLE_WRITES 0x00000008
165#define BDI_CAP_STRICTLIMIT 0x00000010
89e9b9e0 166#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
1da177e4 167
e4ad08fe
MS
168#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
169 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
170
5129a469 171extern struct backing_dev_info noop_backing_dev_info;
1da177e4 172
bc05873d
TH
173/**
174 * writeback_in_progress - determine whether there is writeback in progress
175 * @wb: bdi_writeback of interest
176 *
177 * Determine whether there is writeback waiting to be handled against a
178 * bdi_writeback.
179 */
180static inline bool writeback_in_progress(struct bdi_writeback *wb)
1da177e4 181{
bc05873d 182 return test_bit(WB_writeback_running, &wb->state);
1da177e4
LT
183}
184
a212b105 185static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
1da177e4 186{
a212b105 187 struct super_block *sb;
1da177e4 188
a212b105
TH
189 if (!inode)
190 return &noop_backing_dev_info;
191
192 sb = inode->i_sb;
193#ifdef CONFIG_BLOCK
194 if (sb_is_blkdev_sb(sb))
efa7c9f9 195 return I_BDEV(inode)->bd_bdi;
a212b105
TH
196#endif
197 return sb->s_bdi;
1da177e4
LT
198}
199
ec8a6f26 200static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
1da177e4 201{
ec8a6f26 202 struct backing_dev_info *bdi = wb->bdi;
1da177e4 203
ec8a6f26
TH
204 if (bdi->congested_fn)
205 return bdi->congested_fn(bdi->congested_data, cong_bits);
206 return wb->congested->state & cong_bits;
1da177e4 207}
373c0a7e 208
8aa7e847 209long congestion_wait(int sync, long timeout);
599d0c95 210long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
3965c9ae
WL
211int pdflush_proc_obsolete(struct ctl_table *table, int write,
212 void __user *buffer, size_t *lenp, loff_t *ppos);
1da177e4 213
7d311cda
DW
214static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
215{
216 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
217}
218
e4ad08fe
MS
219static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
220{
221 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
222}
223
224static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
225{
226 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
227}
1da177e4 228
e4ad08fe
MS
229static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
230{
231 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
232 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
233 BDI_CAP_NO_WRITEBACK));
234}
1da177e4 235
e4ad08fe
MS
236static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
237{
de1414a6 238 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
e4ad08fe 239}
1da177e4 240
e4ad08fe
MS
241static inline bool mapping_cap_account_dirty(struct address_space *mapping)
242{
de1414a6 243 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
e4ad08fe 244}
1da177e4 245
03ba3782
JA
246static inline int bdi_sched_wait(void *word)
247{
248 schedule();
249 return 0;
250}
251
89e9b9e0
TH
252#ifdef CONFIG_CGROUP_WRITEBACK
253
52ebea74
TH
254struct bdi_writeback_congested *
255wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
256void wb_congested_put(struct bdi_writeback_congested *congested);
257struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
258 struct cgroup_subsys_state *memcg_css,
259 gfp_t gfp);
52ebea74
TH
260void wb_memcg_offline(struct mem_cgroup *memcg);
261void wb_blkcg_offline(struct blkcg *blkcg);
703c2708 262int inode_congested(struct inode *inode, int cong_bits);
52ebea74 263
89e9b9e0
TH
264/**
265 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
266 * @inode: inode of interest
267 *
268 * cgroup writeback requires support from both the bdi and filesystem.
9badce00
TH
269 * Also, both memcg and iocg have to be on the default hierarchy. Test
270 * whether all conditions are met.
271 *
272 * Note that the test result may change dynamically on the same inode
273 * depending on how memcg and iocg are configured.
89e9b9e0
TH
274 */
275static inline bool inode_cgwb_enabled(struct inode *inode)
276{
277 struct backing_dev_info *bdi = inode_to_bdi(inode);
278
c0522908
TH
279 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
280 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
9badce00 281 bdi_cap_account_dirty(bdi) &&
89e9b9e0 282 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
46b15caa 283 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
89e9b9e0
TH
284}
285
52ebea74
TH
286/**
287 * wb_find_current - find wb for %current on a bdi
288 * @bdi: bdi of interest
289 *
290 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
291 * Must be called under rcu_read_lock() which protects the returend wb.
292 * NULL if not found.
293 */
294static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
295{
296 struct cgroup_subsys_state *memcg_css;
297 struct bdi_writeback *wb;
298
299 memcg_css = task_css(current, memory_cgrp_id);
300 if (!memcg_css->parent)
301 return &bdi->wb;
302
303 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
304
305 /*
306 * %current's blkcg equals the effective blkcg of its memcg. No
307 * need to use the relatively expensive cgroup_get_e_css().
308 */
c165b3e3 309 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
52ebea74
TH
310 return wb;
311 return NULL;
312}
313
314/**
315 * wb_get_create_current - get or create wb for %current on a bdi
316 * @bdi: bdi of interest
317 * @gfp: allocation mask
318 *
319 * Equivalent to wb_get_create() on %current's memcg. This function is
320 * called from a relatively hot path and optimizes the common cases using
321 * wb_find_current().
322 */
323static inline struct bdi_writeback *
324wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
325{
326 struct bdi_writeback *wb;
327
328 rcu_read_lock();
329 wb = wb_find_current(bdi);
330 if (wb && unlikely(!wb_tryget(wb)))
331 wb = NULL;
332 rcu_read_unlock();
333
334 if (unlikely(!wb)) {
335 struct cgroup_subsys_state *memcg_css;
336
337 memcg_css = task_get_css(current, memory_cgrp_id);
338 wb = wb_get_create(bdi, memcg_css, gfp);
339 css_put(memcg_css);
340 }
341 return wb;
342}
343
aaa2cacf
TH
344/**
345 * inode_to_wb_is_valid - test whether an inode has a wb associated
346 * @inode: inode of interest
347 *
348 * Returns %true if @inode has a wb associated. May be called without any
349 * locking.
350 */
351static inline bool inode_to_wb_is_valid(struct inode *inode)
352{
353 return inode->i_wb;
354}
355
52ebea74
TH
356/**
357 * inode_to_wb - determine the wb of an inode
358 * @inode: inode of interest
359 *
aaa2cacf
TH
360 * Returns the wb @inode is currently associated with. The caller must be
361 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
362 * associated wb's list_lock.
52ebea74
TH
363 */
364static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
365{
aaa2cacf
TH
366#ifdef CONFIG_LOCKDEP
367 WARN_ON_ONCE(debug_locks &&
368 (!lockdep_is_held(&inode->i_lock) &&
369 !lockdep_is_held(&inode->i_mapping->tree_lock) &&
370 !lockdep_is_held(&inode->i_wb->list_lock)));
371#endif
52ebea74
TH
372 return inode->i_wb;
373}
374
682aa8e1
TH
375/**
376 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
377 * @inode: target inode
378 * @lockedp: temp bool output param, to be passed to the end function
379 *
380 * The caller wants to access the wb associated with @inode but isn't
381 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
382 * function determines the wb associated with @inode and ensures that the
383 * association doesn't change until the transaction is finished with
384 * unlocked_inode_to_wb_end().
385 *
386 * The caller must call unlocked_inode_to_wb_end() with *@lockdep
387 * afterwards and can't sleep during transaction. IRQ may or may not be
388 * disabled on return.
389 */
390static inline struct bdi_writeback *
391unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
392{
393 rcu_read_lock();
394
395 /*
396 * Paired with store_release in inode_switch_wb_work_fn() and
397 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
398 */
399 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
400
401 if (unlikely(*lockedp))
402 spin_lock_irq(&inode->i_mapping->tree_lock);
aaa2cacf
TH
403
404 /*
405 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
406 * inode_to_wb() will bark. Deref directly.
407 */
408 return inode->i_wb;
682aa8e1
TH
409}
410
411/**
412 * unlocked_inode_to_wb_end - end inode wb access transaction
413 * @inode: target inode
414 * @locked: *@lockedp from unlocked_inode_to_wb_begin()
415 */
416static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
417{
418 if (unlikely(locked))
419 spin_unlock_irq(&inode->i_mapping->tree_lock);
420
421 rcu_read_unlock();
422}
423
89e9b9e0
TH
424#else /* CONFIG_CGROUP_WRITEBACK */
425
426static inline bool inode_cgwb_enabled(struct inode *inode)
427{
428 return false;
429}
430
52ebea74
TH
431static inline struct bdi_writeback_congested *
432wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
433{
a13f35e8
TH
434 atomic_inc(&bdi->wb_congested->refcnt);
435 return bdi->wb_congested;
52ebea74
TH
436}
437
438static inline void wb_congested_put(struct bdi_writeback_congested *congested)
439{
a13f35e8
TH
440 if (atomic_dec_and_test(&congested->refcnt))
441 kfree(congested);
52ebea74
TH
442}
443
52ebea74
TH
444static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
445{
446 return &bdi->wb;
447}
448
449static inline struct bdi_writeback *
450wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
451{
452 return &bdi->wb;
453}
454
aaa2cacf
TH
455static inline bool inode_to_wb_is_valid(struct inode *inode)
456{
457 return true;
458}
459
52ebea74
TH
460static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
461{
462 return &inode_to_bdi(inode)->wb;
463}
464
682aa8e1
TH
465static inline struct bdi_writeback *
466unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
467{
468 return inode_to_wb(inode);
469}
470
471static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
472{
473}
474
52ebea74
TH
475static inline void wb_memcg_offline(struct mem_cgroup *memcg)
476{
477}
478
479static inline void wb_blkcg_offline(struct blkcg *blkcg)
480{
481}
482
703c2708
TH
483static inline int inode_congested(struct inode *inode, int cong_bits)
484{
485 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
486}
487
89e9b9e0
TH
488#endif /* CONFIG_CGROUP_WRITEBACK */
489
703c2708
TH
490static inline int inode_read_congested(struct inode *inode)
491{
492 return inode_congested(inode, 1 << WB_sync_congested);
493}
494
495static inline int inode_write_congested(struct inode *inode)
496{
497 return inode_congested(inode, 1 << WB_async_congested);
498}
499
500static inline int inode_rw_congested(struct inode *inode)
501{
502 return inode_congested(inode, (1 << WB_sync_congested) |
503 (1 << WB_async_congested));
504}
505
ec8a6f26
TH
506static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
507{
508 return wb_congested(&bdi->wb, cong_bits);
509}
510
511static inline int bdi_read_congested(struct backing_dev_info *bdi)
512{
513 return bdi_congested(bdi, 1 << WB_sync_congested);
514}
515
516static inline int bdi_write_congested(struct backing_dev_info *bdi)
517{
518 return bdi_congested(bdi, 1 << WB_async_congested);
519}
520
521static inline int bdi_rw_congested(struct backing_dev_info *bdi)
522{
523 return bdi_congested(bdi, (1 << WB_sync_congested) |
524 (1 << WB_async_congested));
525}
526
89e9b9e0 527#endif /* _LINUX_BACKING_DEV_H */