]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/backing-dev.h
Merge tag 'dlm-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm
[mirror_ubuntu-bionic-kernel.git] / include / linux / backing-dev.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2/*
3 * include/linux/backing-dev.h
4 *
5 * low-level device information and state which is propagated up through
6 * to high-level code.
7 */
8
9#ifndef _LINUX_BACKING_DEV_H
10#define _LINUX_BACKING_DEV_H
11
cf0ca9fe 12#include <linux/kernel.h>
e4ad08fe 13#include <linux/fs.h>
03ba3782 14#include <linux/sched.h>
a212b105 15#include <linux/blkdev.h>
03ba3782 16#include <linux/writeback.h>
52ebea74 17#include <linux/blk-cgroup.h>
66114cad 18#include <linux/backing-dev-defs.h>
a13f35e8 19#include <linux/slab.h>
de1414a6 20
d03f6cdc
JK
21static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
22{
23 kref_get(&bdi->refcnt);
24 return bdi;
25}
26
27void bdi_put(struct backing_dev_info *bdi);
b2e8fb6e 28
7c4cc300
JK
29__printf(2, 3)
30int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
32 va_list args);
df08c32c 33int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
b02176f3
TH
34void bdi_unregister(struct backing_dev_info *bdi);
35
d03f6cdc 36struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
baf7a616
JK
37static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
38{
39 return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
40}
b02176f3 41
c00ddad3
TH
42void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
43 bool range_cyclic, enum wb_reason reason);
9ecf4866 44void wb_start_background_writeback(struct bdi_writeback *wb);
f0054bb1 45void wb_workfn(struct work_struct *work);
f0054bb1 46void wb_wakeup_delayed(struct bdi_writeback *wb);
cf0ca9fe 47
03ba3782 48extern spinlock_t bdi_lock;
66f3b8e2
JA
49extern struct list_head bdi_list;
50
839a8e86
TH
51extern struct workqueue_struct *bdi_wq;
52
d6c10f1f 53static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
03ba3782 54{
d6c10f1f 55 return test_bit(WB_has_dirty_io, &wb->state);
03ba3782
JA
56}
57
95a46c65
TH
58static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
59{
60 /*
61 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
62 * any dirty wbs. See wb_update_write_bandwidth().
63 */
64 return atomic_long_read(&bdi->tot_write_bandwidth);
03ba3782
JA
65}
66
93f78d88
TH
67static inline void __add_wb_stat(struct bdi_writeback *wb,
68 enum wb_stat_item item, s64 amount)
b2e8fb6e 69{
104b4e51 70 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
b2e8fb6e
PZ
71}
72
93f78d88 73static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 74{
3e8f399d 75 __add_wb_stat(wb, item, 1);
b2e8fb6e
PZ
76}
77
93f78d88 78static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 79{
3e8f399d 80 __add_wb_stat(wb, item, -1);
b2e8fb6e
PZ
81}
82
93f78d88 83static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 84{
93f78d88 85 return percpu_counter_read_positive(&wb->stat[item]);
b2e8fb6e
PZ
86}
87
93f78d88 88static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
e0bf68dd 89{
e3d3910a 90 return percpu_counter_sum_positive(&wb->stat[item]);
e0bf68dd
PZ
91}
92
93f78d88 93extern void wb_writeout_inc(struct bdi_writeback *wb);
dd5656e5 94
b2e8fb6e
PZ
95/*
96 * maximal error of a stat counter.
97 */
93f78d88 98static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
e0bf68dd 99{
b2e8fb6e 100#ifdef CONFIG_SMP
93f78d88 101 return nr_cpu_ids * WB_STAT_BATCH;
b2e8fb6e
PZ
102#else
103 return 1;
104#endif
e0bf68dd 105}
1da177e4 106
189d3c4a 107int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
a42dde04 108int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
189d3c4a 109
1da177e4
LT
110/*
111 * Flags in backing_dev_info::capability
e4ad08fe
MS
112 *
113 * The first three flags control whether dirty pages will contribute to the
114 * VM's accounting and whether writepages() should be called for dirty pages
115 * (something that would not, for example, be appropriate for ramfs)
116 *
117 * WARNING: these flags are closely related and should not normally be
118 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
119 * three flags into a single convenience macro.
120 *
121 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
122 * BDI_CAP_NO_WRITEBACK: Don't write pages back
123 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
5a537485 124 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
89e9b9e0
TH
125 *
126 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
1da177e4 127 */
e4ad08fe
MS
128#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
129#define BDI_CAP_NO_WRITEBACK 0x00000002
b4caecd4
CH
130#define BDI_CAP_NO_ACCT_WB 0x00000004
131#define BDI_CAP_STABLE_WRITES 0x00000008
132#define BDI_CAP_STRICTLIMIT 0x00000010
89e9b9e0 133#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
1da177e4 134
e4ad08fe
MS
135#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
136 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
137
5129a469 138extern struct backing_dev_info noop_backing_dev_info;
1da177e4 139
bc05873d
TH
140/**
141 * writeback_in_progress - determine whether there is writeback in progress
142 * @wb: bdi_writeback of interest
143 *
144 * Determine whether there is writeback waiting to be handled against a
145 * bdi_writeback.
146 */
147static inline bool writeback_in_progress(struct bdi_writeback *wb)
1da177e4 148{
bc05873d 149 return test_bit(WB_writeback_running, &wb->state);
1da177e4
LT
150}
151
a212b105 152static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
1da177e4 153{
a212b105 154 struct super_block *sb;
1da177e4 155
a212b105
TH
156 if (!inode)
157 return &noop_backing_dev_info;
158
159 sb = inode->i_sb;
160#ifdef CONFIG_BLOCK
161 if (sb_is_blkdev_sb(sb))
efa7c9f9 162 return I_BDEV(inode)->bd_bdi;
a212b105
TH
163#endif
164 return sb->s_bdi;
1da177e4
LT
165}
166
ec8a6f26 167static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
1da177e4 168{
ec8a6f26 169 struct backing_dev_info *bdi = wb->bdi;
1da177e4 170
ec8a6f26
TH
171 if (bdi->congested_fn)
172 return bdi->congested_fn(bdi->congested_data, cong_bits);
173 return wb->congested->state & cong_bits;
1da177e4 174}
373c0a7e 175
8aa7e847 176long congestion_wait(int sync, long timeout);
599d0c95 177long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
3965c9ae
WL
178int pdflush_proc_obsolete(struct ctl_table *table, int write,
179 void __user *buffer, size_t *lenp, loff_t *ppos);
1da177e4 180
7d311cda
DW
181static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
182{
183 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
184}
185
e4ad08fe
MS
186static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
187{
188 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
189}
190
191static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
192{
193 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
194}
1da177e4 195
e4ad08fe
MS
196static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
197{
198 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
199 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
200 BDI_CAP_NO_WRITEBACK));
201}
1da177e4 202
e4ad08fe
MS
203static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
204{
de1414a6 205 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
e4ad08fe 206}
1da177e4 207
e4ad08fe
MS
208static inline bool mapping_cap_account_dirty(struct address_space *mapping)
209{
de1414a6 210 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
e4ad08fe 211}
1da177e4 212
03ba3782
JA
213static inline int bdi_sched_wait(void *word)
214{
215 schedule();
216 return 0;
217}
218
89e9b9e0
TH
219#ifdef CONFIG_CGROUP_WRITEBACK
220
52ebea74
TH
221struct bdi_writeback_congested *
222wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
223void wb_congested_put(struct bdi_writeback_congested *congested);
224struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
225 struct cgroup_subsys_state *memcg_css,
226 gfp_t gfp);
52ebea74
TH
227void wb_memcg_offline(struct mem_cgroup *memcg);
228void wb_blkcg_offline(struct blkcg *blkcg);
703c2708 229int inode_congested(struct inode *inode, int cong_bits);
52ebea74 230
89e9b9e0
TH
231/**
232 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
233 * @inode: inode of interest
234 *
235 * cgroup writeback requires support from both the bdi and filesystem.
9badce00
TH
236 * Also, both memcg and iocg have to be on the default hierarchy. Test
237 * whether all conditions are met.
238 *
239 * Note that the test result may change dynamically on the same inode
240 * depending on how memcg and iocg are configured.
89e9b9e0
TH
241 */
242static inline bool inode_cgwb_enabled(struct inode *inode)
243{
244 struct backing_dev_info *bdi = inode_to_bdi(inode);
245
c0522908
TH
246 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
247 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
9badce00 248 bdi_cap_account_dirty(bdi) &&
89e9b9e0 249 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
46b15caa 250 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
89e9b9e0
TH
251}
252
52ebea74
TH
253/**
254 * wb_find_current - find wb for %current on a bdi
255 * @bdi: bdi of interest
256 *
257 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
258 * Must be called under rcu_read_lock() which protects the returend wb.
259 * NULL if not found.
260 */
261static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
262{
263 struct cgroup_subsys_state *memcg_css;
264 struct bdi_writeback *wb;
265
266 memcg_css = task_css(current, memory_cgrp_id);
267 if (!memcg_css->parent)
268 return &bdi->wb;
269
270 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
271
272 /*
273 * %current's blkcg equals the effective blkcg of its memcg. No
274 * need to use the relatively expensive cgroup_get_e_css().
275 */
c165b3e3 276 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
52ebea74
TH
277 return wb;
278 return NULL;
279}
280
281/**
282 * wb_get_create_current - get or create wb for %current on a bdi
283 * @bdi: bdi of interest
284 * @gfp: allocation mask
285 *
286 * Equivalent to wb_get_create() on %current's memcg. This function is
287 * called from a relatively hot path and optimizes the common cases using
288 * wb_find_current().
289 */
290static inline struct bdi_writeback *
291wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
292{
293 struct bdi_writeback *wb;
294
295 rcu_read_lock();
296 wb = wb_find_current(bdi);
297 if (wb && unlikely(!wb_tryget(wb)))
298 wb = NULL;
299 rcu_read_unlock();
300
301 if (unlikely(!wb)) {
302 struct cgroup_subsys_state *memcg_css;
303
304 memcg_css = task_get_css(current, memory_cgrp_id);
305 wb = wb_get_create(bdi, memcg_css, gfp);
306 css_put(memcg_css);
307 }
308 return wb;
309}
310
aaa2cacf
TH
311/**
312 * inode_to_wb_is_valid - test whether an inode has a wb associated
313 * @inode: inode of interest
314 *
315 * Returns %true if @inode has a wb associated. May be called without any
316 * locking.
317 */
318static inline bool inode_to_wb_is_valid(struct inode *inode)
319{
320 return inode->i_wb;
321}
322
52ebea74
TH
323/**
324 * inode_to_wb - determine the wb of an inode
325 * @inode: inode of interest
326 *
aaa2cacf
TH
327 * Returns the wb @inode is currently associated with. The caller must be
328 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
329 * associated wb's list_lock.
52ebea74
TH
330 */
331static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
332{
aaa2cacf
TH
333#ifdef CONFIG_LOCKDEP
334 WARN_ON_ONCE(debug_locks &&
335 (!lockdep_is_held(&inode->i_lock) &&
336 !lockdep_is_held(&inode->i_mapping->tree_lock) &&
337 !lockdep_is_held(&inode->i_wb->list_lock)));
338#endif
52ebea74
TH
339 return inode->i_wb;
340}
341
682aa8e1
TH
342/**
343 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
344 * @inode: target inode
345 * @lockedp: temp bool output param, to be passed to the end function
346 *
347 * The caller wants to access the wb associated with @inode but isn't
348 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
349 * function determines the wb associated with @inode and ensures that the
350 * association doesn't change until the transaction is finished with
351 * unlocked_inode_to_wb_end().
352 *
353 * The caller must call unlocked_inode_to_wb_end() with *@lockdep
354 * afterwards and can't sleep during transaction. IRQ may or may not be
355 * disabled on return.
356 */
357static inline struct bdi_writeback *
358unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
359{
360 rcu_read_lock();
361
362 /*
363 * Paired with store_release in inode_switch_wb_work_fn() and
364 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
365 */
366 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
367
368 if (unlikely(*lockedp))
369 spin_lock_irq(&inode->i_mapping->tree_lock);
aaa2cacf
TH
370
371 /*
372 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
373 * inode_to_wb() will bark. Deref directly.
374 */
375 return inode->i_wb;
682aa8e1
TH
376}
377
378/**
379 * unlocked_inode_to_wb_end - end inode wb access transaction
380 * @inode: target inode
381 * @locked: *@lockedp from unlocked_inode_to_wb_begin()
382 */
383static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
384{
385 if (unlikely(locked))
386 spin_unlock_irq(&inode->i_mapping->tree_lock);
387
388 rcu_read_unlock();
389}
390
89e9b9e0
TH
391#else /* CONFIG_CGROUP_WRITEBACK */
392
393static inline bool inode_cgwb_enabled(struct inode *inode)
394{
395 return false;
396}
397
52ebea74
TH
398static inline struct bdi_writeback_congested *
399wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
400{
a13f35e8
TH
401 atomic_inc(&bdi->wb_congested->refcnt);
402 return bdi->wb_congested;
52ebea74
TH
403}
404
405static inline void wb_congested_put(struct bdi_writeback_congested *congested)
406{
a13f35e8
TH
407 if (atomic_dec_and_test(&congested->refcnt))
408 kfree(congested);
52ebea74
TH
409}
410
52ebea74
TH
411static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
412{
413 return &bdi->wb;
414}
415
416static inline struct bdi_writeback *
417wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
418{
419 return &bdi->wb;
420}
421
aaa2cacf
TH
422static inline bool inode_to_wb_is_valid(struct inode *inode)
423{
424 return true;
425}
426
52ebea74
TH
427static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
428{
429 return &inode_to_bdi(inode)->wb;
430}
431
682aa8e1
TH
432static inline struct bdi_writeback *
433unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
434{
435 return inode_to_wb(inode);
436}
437
438static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
439{
440}
441
52ebea74
TH
442static inline void wb_memcg_offline(struct mem_cgroup *memcg)
443{
444}
445
446static inline void wb_blkcg_offline(struct blkcg *blkcg)
447{
448}
449
703c2708
TH
450static inline int inode_congested(struct inode *inode, int cong_bits)
451{
452 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
453}
454
89e9b9e0
TH
455#endif /* CONFIG_CGROUP_WRITEBACK */
456
703c2708
TH
457static inline int inode_read_congested(struct inode *inode)
458{
459 return inode_congested(inode, 1 << WB_sync_congested);
460}
461
462static inline int inode_write_congested(struct inode *inode)
463{
464 return inode_congested(inode, 1 << WB_async_congested);
465}
466
467static inline int inode_rw_congested(struct inode *inode)
468{
469 return inode_congested(inode, (1 << WB_sync_congested) |
470 (1 << WB_async_congested));
471}
472
ec8a6f26
TH
473static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
474{
475 return wb_congested(&bdi->wb, cong_bits);
476}
477
478static inline int bdi_read_congested(struct backing_dev_info *bdi)
479{
480 return bdi_congested(bdi, 1 << WB_sync_congested);
481}
482
483static inline int bdi_write_congested(struct backing_dev_info *bdi)
484{
485 return bdi_congested(bdi, 1 << WB_async_congested);
486}
487
488static inline int bdi_rw_congested(struct backing_dev_info *bdi)
489{
490 return bdi_congested(bdi, (1 << WB_sync_congested) |
491 (1 << WB_async_congested));
492}
493
89e9b9e0 494#endif /* _LINUX_BACKING_DEV_H */