]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * include/linux/backing-dev.h | |
4 | * | |
5 | * low-level device information and state which is propagated up through | |
6 | * to high-level code. | |
7 | */ | |
8 | ||
9 | #ifndef _LINUX_BACKING_DEV_H | |
10 | #define _LINUX_BACKING_DEV_H | |
11 | ||
cf0ca9fe | 12 | #include <linux/kernel.h> |
e4ad08fe | 13 | #include <linux/fs.h> |
03ba3782 | 14 | #include <linux/sched.h> |
a212b105 | 15 | #include <linux/blkdev.h> |
03ba3782 | 16 | #include <linux/writeback.h> |
52ebea74 | 17 | #include <linux/blk-cgroup.h> |
66114cad | 18 | #include <linux/backing-dev-defs.h> |
a13f35e8 | 19 | #include <linux/slab.h> |
de1414a6 | 20 | |
d03f6cdc JK |
21 | static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) |
22 | { | |
23 | kref_get(&bdi->refcnt); | |
24 | return bdi; | |
25 | } | |
26 | ||
27 | void bdi_put(struct backing_dev_info *bdi); | |
b2e8fb6e | 28 | |
7c4cc300 JK |
29 | __printf(2, 3) |
30 | int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); | |
31 | int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, | |
32 | va_list args); | |
df08c32c | 33 | int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); |
b02176f3 TH |
34 | void bdi_unregister(struct backing_dev_info *bdi); |
35 | ||
d03f6cdc | 36 | struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id); |
baf7a616 JK |
37 | static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask) |
38 | { | |
39 | return bdi_alloc_node(gfp_mask, NUMA_NO_NODE); | |
40 | } | |
b02176f3 | 41 | |
9ecf4866 | 42 | void wb_start_background_writeback(struct bdi_writeback *wb); |
f0054bb1 | 43 | void wb_workfn(struct work_struct *work); |
f0054bb1 | 44 | void wb_wakeup_delayed(struct bdi_writeback *wb); |
cf0ca9fe | 45 | |
03ba3782 | 46 | extern spinlock_t bdi_lock; |
66f3b8e2 JA |
47 | extern struct list_head bdi_list; |
48 | ||
839a8e86 TH |
49 | extern struct workqueue_struct *bdi_wq; |
50 | ||
d6c10f1f | 51 | static inline bool wb_has_dirty_io(struct bdi_writeback *wb) |
03ba3782 | 52 | { |
d6c10f1f | 53 | return test_bit(WB_has_dirty_io, &wb->state); |
03ba3782 JA |
54 | } |
55 | ||
95a46c65 TH |
56 | static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) |
57 | { | |
58 | /* | |
59 | * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are | |
60 | * any dirty wbs. See wb_update_write_bandwidth(). | |
61 | */ | |
62 | return atomic_long_read(&bdi->tot_write_bandwidth); | |
03ba3782 JA |
63 | } |
64 | ||
93f78d88 TH |
65 | static inline void __add_wb_stat(struct bdi_writeback *wb, |
66 | enum wb_stat_item item, s64 amount) | |
b2e8fb6e | 67 | { |
104b4e51 | 68 | percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); |
b2e8fb6e PZ |
69 | } |
70 | ||
93f78d88 | 71 | static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) |
b2e8fb6e | 72 | { |
3e8f399d | 73 | __add_wb_stat(wb, item, 1); |
b2e8fb6e PZ |
74 | } |
75 | ||
93f78d88 | 76 | static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) |
b2e8fb6e | 77 | { |
3e8f399d | 78 | __add_wb_stat(wb, item, -1); |
b2e8fb6e PZ |
79 | } |
80 | ||
93f78d88 | 81 | static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) |
b2e8fb6e | 82 | { |
93f78d88 | 83 | return percpu_counter_read_positive(&wb->stat[item]); |
b2e8fb6e PZ |
84 | } |
85 | ||
93f78d88 | 86 | static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) |
e0bf68dd | 87 | { |
e3d3910a | 88 | return percpu_counter_sum_positive(&wb->stat[item]); |
e0bf68dd PZ |
89 | } |
90 | ||
93f78d88 | 91 | extern void wb_writeout_inc(struct bdi_writeback *wb); |
dd5656e5 | 92 | |
b2e8fb6e PZ |
93 | /* |
94 | * maximal error of a stat counter. | |
95 | */ | |
2bce774e | 96 | static inline unsigned long wb_stat_error(void) |
e0bf68dd | 97 | { |
b2e8fb6e | 98 | #ifdef CONFIG_SMP |
93f78d88 | 99 | return nr_cpu_ids * WB_STAT_BATCH; |
b2e8fb6e PZ |
100 | #else |
101 | return 1; | |
102 | #endif | |
e0bf68dd | 103 | } |
1da177e4 | 104 | |
189d3c4a | 105 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); |
a42dde04 | 106 | int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); |
189d3c4a | 107 | |
1da177e4 LT |
108 | /* |
109 | * Flags in backing_dev_info::capability | |
e4ad08fe MS |
110 | * |
111 | * The first three flags control whether dirty pages will contribute to the | |
112 | * VM's accounting and whether writepages() should be called for dirty pages | |
113 | * (something that would not, for example, be appropriate for ramfs) | |
114 | * | |
115 | * WARNING: these flags are closely related and should not normally be | |
116 | * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these | |
117 | * three flags into a single convenience macro. | |
118 | * | |
119 | * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting | |
120 | * BDI_CAP_NO_WRITEBACK: Don't write pages back | |
121 | * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages | |
5a537485 | 122 | * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. |
89e9b9e0 TH |
123 | * |
124 | * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback. | |
23c47d2a MK |
125 | * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be |
126 | * inefficient. | |
1da177e4 | 127 | */ |
e4ad08fe MS |
128 | #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 |
129 | #define BDI_CAP_NO_WRITEBACK 0x00000002 | |
b4caecd4 CH |
130 | #define BDI_CAP_NO_ACCT_WB 0x00000004 |
131 | #define BDI_CAP_STABLE_WRITES 0x00000008 | |
132 | #define BDI_CAP_STRICTLIMIT 0x00000010 | |
89e9b9e0 | 133 | #define BDI_CAP_CGROUP_WRITEBACK 0x00000020 |
23c47d2a | 134 | #define BDI_CAP_SYNCHRONOUS_IO 0x00000040 |
1da177e4 | 135 | |
e4ad08fe MS |
136 | #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ |
137 | (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) | |
138 | ||
5129a469 | 139 | extern struct backing_dev_info noop_backing_dev_info; |
1da177e4 | 140 | |
bc05873d TH |
141 | /** |
142 | * writeback_in_progress - determine whether there is writeback in progress | |
143 | * @wb: bdi_writeback of interest | |
144 | * | |
145 | * Determine whether there is writeback waiting to be handled against a | |
146 | * bdi_writeback. | |
147 | */ | |
148 | static inline bool writeback_in_progress(struct bdi_writeback *wb) | |
1da177e4 | 149 | { |
bc05873d | 150 | return test_bit(WB_writeback_running, &wb->state); |
1da177e4 LT |
151 | } |
152 | ||
a212b105 | 153 | static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) |
1da177e4 | 154 | { |
a212b105 | 155 | struct super_block *sb; |
1da177e4 | 156 | |
a212b105 TH |
157 | if (!inode) |
158 | return &noop_backing_dev_info; | |
159 | ||
160 | sb = inode->i_sb; | |
161 | #ifdef CONFIG_BLOCK | |
162 | if (sb_is_blkdev_sb(sb)) | |
efa7c9f9 | 163 | return I_BDEV(inode)->bd_bdi; |
a212b105 TH |
164 | #endif |
165 | return sb->s_bdi; | |
1da177e4 LT |
166 | } |
167 | ||
ec8a6f26 | 168 | static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) |
1da177e4 | 169 | { |
ec8a6f26 | 170 | struct backing_dev_info *bdi = wb->bdi; |
1da177e4 | 171 | |
ec8a6f26 TH |
172 | if (bdi->congested_fn) |
173 | return bdi->congested_fn(bdi->congested_data, cong_bits); | |
174 | return wb->congested->state & cong_bits; | |
1da177e4 | 175 | } |
373c0a7e | 176 | |
8aa7e847 | 177 | long congestion_wait(int sync, long timeout); |
599d0c95 | 178 | long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout); |
1da177e4 | 179 | |
23c47d2a MK |
180 | static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi) |
181 | { | |
182 | return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO; | |
183 | } | |
184 | ||
7d311cda DW |
185 | static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi) |
186 | { | |
187 | return bdi->capabilities & BDI_CAP_STABLE_WRITES; | |
188 | } | |
189 | ||
e4ad08fe MS |
190 | static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) |
191 | { | |
192 | return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); | |
193 | } | |
194 | ||
195 | static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) | |
196 | { | |
197 | return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); | |
198 | } | |
1da177e4 | 199 | |
e4ad08fe MS |
200 | static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) |
201 | { | |
202 | /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ | |
203 | return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | | |
204 | BDI_CAP_NO_WRITEBACK)); | |
205 | } | |
1da177e4 | 206 | |
e4ad08fe MS |
207 | static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) |
208 | { | |
de1414a6 | 209 | return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); |
e4ad08fe | 210 | } |
1da177e4 | 211 | |
e4ad08fe MS |
212 | static inline bool mapping_cap_account_dirty(struct address_space *mapping) |
213 | { | |
de1414a6 | 214 | return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); |
e4ad08fe | 215 | } |
1da177e4 | 216 | |
03ba3782 JA |
217 | static inline int bdi_sched_wait(void *word) |
218 | { | |
219 | schedule(); | |
220 | return 0; | |
221 | } | |
222 | ||
89e9b9e0 TH |
223 | #ifdef CONFIG_CGROUP_WRITEBACK |
224 | ||
52ebea74 TH |
225 | struct bdi_writeback_congested * |
226 | wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); | |
227 | void wb_congested_put(struct bdi_writeback_congested *congested); | |
228 | struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, | |
229 | struct cgroup_subsys_state *memcg_css, | |
230 | gfp_t gfp); | |
52ebea74 TH |
231 | void wb_memcg_offline(struct mem_cgroup *memcg); |
232 | void wb_blkcg_offline(struct blkcg *blkcg); | |
703c2708 | 233 | int inode_congested(struct inode *inode, int cong_bits); |
52ebea74 | 234 | |
89e9b9e0 TH |
235 | /** |
236 | * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode | |
237 | * @inode: inode of interest | |
238 | * | |
239 | * cgroup writeback requires support from both the bdi and filesystem. | |
9badce00 TH |
240 | * Also, both memcg and iocg have to be on the default hierarchy. Test |
241 | * whether all conditions are met. | |
242 | * | |
243 | * Note that the test result may change dynamically on the same inode | |
244 | * depending on how memcg and iocg are configured. | |
89e9b9e0 TH |
245 | */ |
246 | static inline bool inode_cgwb_enabled(struct inode *inode) | |
247 | { | |
248 | struct backing_dev_info *bdi = inode_to_bdi(inode); | |
249 | ||
c0522908 TH |
250 | return cgroup_subsys_on_dfl(memory_cgrp_subsys) && |
251 | cgroup_subsys_on_dfl(io_cgrp_subsys) && | |
9badce00 | 252 | bdi_cap_account_dirty(bdi) && |
89e9b9e0 | 253 | (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && |
46b15caa | 254 | (inode->i_sb->s_iflags & SB_I_CGROUPWB); |
89e9b9e0 TH |
255 | } |
256 | ||
52ebea74 TH |
257 | /** |
258 | * wb_find_current - find wb for %current on a bdi | |
259 | * @bdi: bdi of interest | |
260 | * | |
261 | * Find the wb of @bdi which matches both the memcg and blkcg of %current. | |
262 | * Must be called under rcu_read_lock() which protects the returend wb. | |
263 | * NULL if not found. | |
264 | */ | |
265 | static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) | |
266 | { | |
267 | struct cgroup_subsys_state *memcg_css; | |
268 | struct bdi_writeback *wb; | |
269 | ||
270 | memcg_css = task_css(current, memory_cgrp_id); | |
271 | if (!memcg_css->parent) | |
272 | return &bdi->wb; | |
273 | ||
274 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); | |
275 | ||
276 | /* | |
277 | * %current's blkcg equals the effective blkcg of its memcg. No | |
278 | * need to use the relatively expensive cgroup_get_e_css(). | |
279 | */ | |
c165b3e3 | 280 | if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) |
52ebea74 TH |
281 | return wb; |
282 | return NULL; | |
283 | } | |
284 | ||
285 | /** | |
286 | * wb_get_create_current - get or create wb for %current on a bdi | |
287 | * @bdi: bdi of interest | |
288 | * @gfp: allocation mask | |
289 | * | |
290 | * Equivalent to wb_get_create() on %current's memcg. This function is | |
291 | * called from a relatively hot path and optimizes the common cases using | |
292 | * wb_find_current(). | |
293 | */ | |
294 | static inline struct bdi_writeback * | |
295 | wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) | |
296 | { | |
297 | struct bdi_writeback *wb; | |
298 | ||
299 | rcu_read_lock(); | |
300 | wb = wb_find_current(bdi); | |
301 | if (wb && unlikely(!wb_tryget(wb))) | |
302 | wb = NULL; | |
303 | rcu_read_unlock(); | |
304 | ||
305 | if (unlikely(!wb)) { | |
306 | struct cgroup_subsys_state *memcg_css; | |
307 | ||
308 | memcg_css = task_get_css(current, memory_cgrp_id); | |
309 | wb = wb_get_create(bdi, memcg_css, gfp); | |
310 | css_put(memcg_css); | |
311 | } | |
312 | return wb; | |
313 | } | |
314 | ||
aaa2cacf TH |
315 | /** |
316 | * inode_to_wb_is_valid - test whether an inode has a wb associated | |
317 | * @inode: inode of interest | |
318 | * | |
319 | * Returns %true if @inode has a wb associated. May be called without any | |
320 | * locking. | |
321 | */ | |
322 | static inline bool inode_to_wb_is_valid(struct inode *inode) | |
323 | { | |
324 | return inode->i_wb; | |
325 | } | |
326 | ||
52ebea74 TH |
327 | /** |
328 | * inode_to_wb - determine the wb of an inode | |
329 | * @inode: inode of interest | |
330 | * | |
aaa2cacf TH |
331 | * Returns the wb @inode is currently associated with. The caller must be |
332 | * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the | |
333 | * associated wb's list_lock. | |
52ebea74 TH |
334 | */ |
335 | static inline struct bdi_writeback *inode_to_wb(struct inode *inode) | |
336 | { | |
aaa2cacf TH |
337 | #ifdef CONFIG_LOCKDEP |
338 | WARN_ON_ONCE(debug_locks && | |
339 | (!lockdep_is_held(&inode->i_lock) && | |
340 | !lockdep_is_held(&inode->i_mapping->tree_lock) && | |
341 | !lockdep_is_held(&inode->i_wb->list_lock))); | |
342 | #endif | |
52ebea74 TH |
343 | return inode->i_wb; |
344 | } | |
345 | ||
682aa8e1 TH |
346 | /** |
347 | * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction | |
348 | * @inode: target inode | |
7cb41dc8 | 349 | * @cookie: output param, to be passed to the end function |
682aa8e1 TH |
350 | * |
351 | * The caller wants to access the wb associated with @inode but isn't | |
352 | * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This | |
353 | * function determines the wb associated with @inode and ensures that the | |
354 | * association doesn't change until the transaction is finished with | |
355 | * unlocked_inode_to_wb_end(). | |
356 | * | |
7cb41dc8 GT |
357 | * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and |
358 | * can't sleep during the transaction. IRQs may or may not be disabled on | |
359 | * return. | |
682aa8e1 TH |
360 | */ |
361 | static inline struct bdi_writeback * | |
7cb41dc8 | 362 | unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) |
682aa8e1 TH |
363 | { |
364 | rcu_read_lock(); | |
365 | ||
366 | /* | |
367 | * Paired with store_release in inode_switch_wb_work_fn() and | |
368 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. | |
369 | */ | |
7cb41dc8 | 370 | cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; |
682aa8e1 | 371 | |
7cb41dc8 GT |
372 | if (unlikely(cookie->locked)) |
373 | spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags); | |
aaa2cacf TH |
374 | |
375 | /* | |
376 | * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. | |
377 | * inode_to_wb() will bark. Deref directly. | |
378 | */ | |
379 | return inode->i_wb; | |
682aa8e1 TH |
380 | } |
381 | ||
382 | /** | |
383 | * unlocked_inode_to_wb_end - end inode wb access transaction | |
384 | * @inode: target inode | |
7cb41dc8 | 385 | * @cookie: @cookie from unlocked_inode_to_wb_begin() |
682aa8e1 | 386 | */ |
7cb41dc8 GT |
387 | static inline void unlocked_inode_to_wb_end(struct inode *inode, |
388 | struct wb_lock_cookie *cookie) | |
682aa8e1 | 389 | { |
7cb41dc8 GT |
390 | if (unlikely(cookie->locked)) |
391 | spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags); | |
682aa8e1 TH |
392 | |
393 | rcu_read_unlock(); | |
394 | } | |
395 | ||
89e9b9e0 TH |
396 | #else /* CONFIG_CGROUP_WRITEBACK */ |
397 | ||
398 | static inline bool inode_cgwb_enabled(struct inode *inode) | |
399 | { | |
400 | return false; | |
401 | } | |
402 | ||
52ebea74 TH |
403 | static inline struct bdi_writeback_congested * |
404 | wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) | |
405 | { | |
a13f35e8 TH |
406 | atomic_inc(&bdi->wb_congested->refcnt); |
407 | return bdi->wb_congested; | |
52ebea74 TH |
408 | } |
409 | ||
410 | static inline void wb_congested_put(struct bdi_writeback_congested *congested) | |
411 | { | |
a13f35e8 TH |
412 | if (atomic_dec_and_test(&congested->refcnt)) |
413 | kfree(congested); | |
52ebea74 TH |
414 | } |
415 | ||
52ebea74 TH |
416 | static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) |
417 | { | |
418 | return &bdi->wb; | |
419 | } | |
420 | ||
421 | static inline struct bdi_writeback * | |
422 | wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) | |
423 | { | |
424 | return &bdi->wb; | |
425 | } | |
426 | ||
aaa2cacf TH |
427 | static inline bool inode_to_wb_is_valid(struct inode *inode) |
428 | { | |
429 | return true; | |
430 | } | |
431 | ||
52ebea74 TH |
432 | static inline struct bdi_writeback *inode_to_wb(struct inode *inode) |
433 | { | |
434 | return &inode_to_bdi(inode)->wb; | |
435 | } | |
436 | ||
682aa8e1 | 437 | static inline struct bdi_writeback * |
7cb41dc8 | 438 | unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) |
682aa8e1 TH |
439 | { |
440 | return inode_to_wb(inode); | |
441 | } | |
442 | ||
7cb41dc8 GT |
443 | static inline void unlocked_inode_to_wb_end(struct inode *inode, |
444 | struct wb_lock_cookie *cookie) | |
682aa8e1 TH |
445 | { |
446 | } | |
447 | ||
52ebea74 TH |
448 | static inline void wb_memcg_offline(struct mem_cgroup *memcg) |
449 | { | |
450 | } | |
451 | ||
452 | static inline void wb_blkcg_offline(struct blkcg *blkcg) | |
453 | { | |
454 | } | |
455 | ||
703c2708 TH |
456 | static inline int inode_congested(struct inode *inode, int cong_bits) |
457 | { | |
458 | return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); | |
459 | } | |
460 | ||
89e9b9e0 TH |
461 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
462 | ||
703c2708 TH |
463 | static inline int inode_read_congested(struct inode *inode) |
464 | { | |
465 | return inode_congested(inode, 1 << WB_sync_congested); | |
466 | } | |
467 | ||
468 | static inline int inode_write_congested(struct inode *inode) | |
469 | { | |
470 | return inode_congested(inode, 1 << WB_async_congested); | |
471 | } | |
472 | ||
473 | static inline int inode_rw_congested(struct inode *inode) | |
474 | { | |
475 | return inode_congested(inode, (1 << WB_sync_congested) | | |
476 | (1 << WB_async_congested)); | |
477 | } | |
478 | ||
ec8a6f26 TH |
479 | static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) |
480 | { | |
481 | return wb_congested(&bdi->wb, cong_bits); | |
482 | } | |
483 | ||
484 | static inline int bdi_read_congested(struct backing_dev_info *bdi) | |
485 | { | |
486 | return bdi_congested(bdi, 1 << WB_sync_congested); | |
487 | } | |
488 | ||
489 | static inline int bdi_write_congested(struct backing_dev_info *bdi) | |
490 | { | |
491 | return bdi_congested(bdi, 1 << WB_async_congested); | |
492 | } | |
493 | ||
494 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) | |
495 | { | |
496 | return bdi_congested(bdi, (1 << WB_sync_congested) | | |
497 | (1 << WB_async_congested)); | |
498 | } | |
499 | ||
89e9b9e0 | 500 | #endif /* _LINUX_BACKING_DEV_H */ |