]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/backing-dev.h
ACPI: fix acpi_find_child_device() invocation in acpi_preset_companion()
[mirror_ubuntu-bionic-kernel.git] / include / linux / backing-dev.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/linux/backing-dev.h
4 *
5 * low-level device information and state which is propagated up through
6 * to high-level code.
7 */
8
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
11
12 #include <linux/kernel.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/blkdev.h>
16 #include <linux/writeback.h>
17 #include <linux/blk-cgroup.h>
18 #include <linux/backing-dev-defs.h>
19 #include <linux/slab.h>
20
21 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
22 {
23 kref_get(&bdi->refcnt);
24 return bdi;
25 }
26
27 void bdi_put(struct backing_dev_info *bdi);
28
29 __printf(2, 3)
30 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
32 va_list args);
33 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
34 void bdi_unregister(struct backing_dev_info *bdi);
35
36 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
37 static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
38 {
39 return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
40 }
41
42 void wb_start_background_writeback(struct bdi_writeback *wb);
43 void wb_workfn(struct work_struct *work);
44 void wb_wakeup_delayed(struct bdi_writeback *wb);
45
46 extern spinlock_t bdi_lock;
47 extern struct list_head bdi_list;
48
49 extern struct workqueue_struct *bdi_wq;
50
51 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
52 {
53 return test_bit(WB_has_dirty_io, &wb->state);
54 }
55
56 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
57 {
58 /*
59 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
60 * any dirty wbs. See wb_update_write_bandwidth().
61 */
62 return atomic_long_read(&bdi->tot_write_bandwidth);
63 }
64
65 static inline void __add_wb_stat(struct bdi_writeback *wb,
66 enum wb_stat_item item, s64 amount)
67 {
68 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
69 }
70
71 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
72 {
73 __add_wb_stat(wb, item, 1);
74 }
75
76 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
77 {
78 __add_wb_stat(wb, item, -1);
79 }
80
81 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
82 {
83 return percpu_counter_read_positive(&wb->stat[item]);
84 }
85
86 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
87 {
88 return percpu_counter_sum_positive(&wb->stat[item]);
89 }
90
91 extern void wb_writeout_inc(struct bdi_writeback *wb);
92
93 /*
94 * maximal error of a stat counter.
95 */
96 static inline unsigned long wb_stat_error(void)
97 {
98 #ifdef CONFIG_SMP
99 return nr_cpu_ids * WB_STAT_BATCH;
100 #else
101 return 1;
102 #endif
103 }
104
105 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
106 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
107
108 /*
109 * Flags in backing_dev_info::capability
110 *
111 * The first three flags control whether dirty pages will contribute to the
112 * VM's accounting and whether writepages() should be called for dirty pages
113 * (something that would not, for example, be appropriate for ramfs)
114 *
115 * WARNING: these flags are closely related and should not normally be
116 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
117 * three flags into a single convenience macro.
118 *
119 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
120 * BDI_CAP_NO_WRITEBACK: Don't write pages back
121 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
122 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
123 *
124 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
125 * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
126 * inefficient.
127 */
128 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
129 #define BDI_CAP_NO_WRITEBACK 0x00000002
130 #define BDI_CAP_NO_ACCT_WB 0x00000004
131 #define BDI_CAP_STABLE_WRITES 0x00000008
132 #define BDI_CAP_STRICTLIMIT 0x00000010
133 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
134 #define BDI_CAP_SYNCHRONOUS_IO 0x00000040
135
136 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
137 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
138
139 extern struct backing_dev_info noop_backing_dev_info;
140
141 /**
142 * writeback_in_progress - determine whether there is writeback in progress
143 * @wb: bdi_writeback of interest
144 *
145 * Determine whether there is writeback waiting to be handled against a
146 * bdi_writeback.
147 */
148 static inline bool writeback_in_progress(struct bdi_writeback *wb)
149 {
150 return test_bit(WB_writeback_running, &wb->state);
151 }
152
153 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
154 {
155 struct super_block *sb;
156
157 if (!inode)
158 return &noop_backing_dev_info;
159
160 sb = inode->i_sb;
161 #ifdef CONFIG_BLOCK
162 if (sb_is_blkdev_sb(sb))
163 return I_BDEV(inode)->bd_bdi;
164 #endif
165 return sb->s_bdi;
166 }
167
168 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
169 {
170 struct backing_dev_info *bdi = wb->bdi;
171
172 if (bdi->congested_fn)
173 return bdi->congested_fn(bdi->congested_data, cong_bits);
174 return wb->congested->state & cong_bits;
175 }
176
177 long congestion_wait(int sync, long timeout);
178 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
179
180 static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
181 {
182 return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
183 }
184
185 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
186 {
187 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
188 }
189
190 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
191 {
192 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
193 }
194
195 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
196 {
197 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
198 }
199
200 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
201 {
202 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
203 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
204 BDI_CAP_NO_WRITEBACK));
205 }
206
207 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
208 {
209 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
210 }
211
212 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
213 {
214 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
215 }
216
217 static inline int bdi_sched_wait(void *word)
218 {
219 schedule();
220 return 0;
221 }
222
223 #ifdef CONFIG_CGROUP_WRITEBACK
224
225 struct bdi_writeback_congested *
226 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
227 void wb_congested_put(struct bdi_writeback_congested *congested);
228 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
229 struct cgroup_subsys_state *memcg_css,
230 gfp_t gfp);
231 void wb_memcg_offline(struct mem_cgroup *memcg);
232 void wb_blkcg_offline(struct blkcg *blkcg);
233 int inode_congested(struct inode *inode, int cong_bits);
234
235 /**
236 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
237 * @inode: inode of interest
238 *
239 * cgroup writeback requires support from both the bdi and filesystem.
240 * Also, both memcg and iocg have to be on the default hierarchy. Test
241 * whether all conditions are met.
242 *
243 * Note that the test result may change dynamically on the same inode
244 * depending on how memcg and iocg are configured.
245 */
246 static inline bool inode_cgwb_enabled(struct inode *inode)
247 {
248 struct backing_dev_info *bdi = inode_to_bdi(inode);
249
250 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
251 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
252 bdi_cap_account_dirty(bdi) &&
253 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
254 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
255 }
256
257 /**
258 * wb_find_current - find wb for %current on a bdi
259 * @bdi: bdi of interest
260 *
261 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
262 * Must be called under rcu_read_lock() which protects the returend wb.
263 * NULL if not found.
264 */
265 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
266 {
267 struct cgroup_subsys_state *memcg_css;
268 struct bdi_writeback *wb;
269
270 memcg_css = task_css(current, memory_cgrp_id);
271 if (!memcg_css->parent)
272 return &bdi->wb;
273
274 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
275
276 /*
277 * %current's blkcg equals the effective blkcg of its memcg. No
278 * need to use the relatively expensive cgroup_get_e_css().
279 */
280 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
281 return wb;
282 return NULL;
283 }
284
285 /**
286 * wb_get_create_current - get or create wb for %current on a bdi
287 * @bdi: bdi of interest
288 * @gfp: allocation mask
289 *
290 * Equivalent to wb_get_create() on %current's memcg. This function is
291 * called from a relatively hot path and optimizes the common cases using
292 * wb_find_current().
293 */
294 static inline struct bdi_writeback *
295 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
296 {
297 struct bdi_writeback *wb;
298
299 rcu_read_lock();
300 wb = wb_find_current(bdi);
301 if (wb && unlikely(!wb_tryget(wb)))
302 wb = NULL;
303 rcu_read_unlock();
304
305 if (unlikely(!wb)) {
306 struct cgroup_subsys_state *memcg_css;
307
308 memcg_css = task_get_css(current, memory_cgrp_id);
309 wb = wb_get_create(bdi, memcg_css, gfp);
310 css_put(memcg_css);
311 }
312 return wb;
313 }
314
315 /**
316 * inode_to_wb_is_valid - test whether an inode has a wb associated
317 * @inode: inode of interest
318 *
319 * Returns %true if @inode has a wb associated. May be called without any
320 * locking.
321 */
322 static inline bool inode_to_wb_is_valid(struct inode *inode)
323 {
324 return inode->i_wb;
325 }
326
327 /**
328 * inode_to_wb - determine the wb of an inode
329 * @inode: inode of interest
330 *
331 * Returns the wb @inode is currently associated with. The caller must be
332 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
333 * associated wb's list_lock.
334 */
335 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
336 {
337 #ifdef CONFIG_LOCKDEP
338 WARN_ON_ONCE(debug_locks &&
339 (!lockdep_is_held(&inode->i_lock) &&
340 !lockdep_is_held(&inode->i_mapping->tree_lock) &&
341 !lockdep_is_held(&inode->i_wb->list_lock)));
342 #endif
343 return inode->i_wb;
344 }
345
346 /**
347 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
348 * @inode: target inode
349 * @cookie: output param, to be passed to the end function
350 *
351 * The caller wants to access the wb associated with @inode but isn't
352 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
353 * function determines the wb associated with @inode and ensures that the
354 * association doesn't change until the transaction is finished with
355 * unlocked_inode_to_wb_end().
356 *
357 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
358 * can't sleep during the transaction. IRQs may or may not be disabled on
359 * return.
360 */
361 static inline struct bdi_writeback *
362 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
363 {
364 rcu_read_lock();
365
366 /*
367 * Paired with store_release in inode_switch_wb_work_fn() and
368 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
369 */
370 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
371
372 if (unlikely(cookie->locked))
373 spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
374
375 /*
376 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
377 * inode_to_wb() will bark. Deref directly.
378 */
379 return inode->i_wb;
380 }
381
382 /**
383 * unlocked_inode_to_wb_end - end inode wb access transaction
384 * @inode: target inode
385 * @cookie: @cookie from unlocked_inode_to_wb_begin()
386 */
387 static inline void unlocked_inode_to_wb_end(struct inode *inode,
388 struct wb_lock_cookie *cookie)
389 {
390 if (unlikely(cookie->locked))
391 spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags);
392
393 rcu_read_unlock();
394 }
395
396 #else /* CONFIG_CGROUP_WRITEBACK */
397
398 static inline bool inode_cgwb_enabled(struct inode *inode)
399 {
400 return false;
401 }
402
403 static inline struct bdi_writeback_congested *
404 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
405 {
406 atomic_inc(&bdi->wb_congested->refcnt);
407 return bdi->wb_congested;
408 }
409
410 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
411 {
412 if (atomic_dec_and_test(&congested->refcnt))
413 kfree(congested);
414 }
415
416 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
417 {
418 return &bdi->wb;
419 }
420
421 static inline struct bdi_writeback *
422 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
423 {
424 return &bdi->wb;
425 }
426
427 static inline bool inode_to_wb_is_valid(struct inode *inode)
428 {
429 return true;
430 }
431
432 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
433 {
434 return &inode_to_bdi(inode)->wb;
435 }
436
437 static inline struct bdi_writeback *
438 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
439 {
440 return inode_to_wb(inode);
441 }
442
443 static inline void unlocked_inode_to_wb_end(struct inode *inode,
444 struct wb_lock_cookie *cookie)
445 {
446 }
447
448 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
449 {
450 }
451
452 static inline void wb_blkcg_offline(struct blkcg *blkcg)
453 {
454 }
455
456 static inline int inode_congested(struct inode *inode, int cong_bits)
457 {
458 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
459 }
460
461 #endif /* CONFIG_CGROUP_WRITEBACK */
462
463 static inline int inode_read_congested(struct inode *inode)
464 {
465 return inode_congested(inode, 1 << WB_sync_congested);
466 }
467
468 static inline int inode_write_congested(struct inode *inode)
469 {
470 return inode_congested(inode, 1 << WB_async_congested);
471 }
472
473 static inline int inode_rw_congested(struct inode *inode)
474 {
475 return inode_congested(inode, (1 << WB_sync_congested) |
476 (1 << WB_async_congested));
477 }
478
479 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
480 {
481 return wb_congested(&bdi->wb, cong_bits);
482 }
483
484 static inline int bdi_read_congested(struct backing_dev_info *bdi)
485 {
486 return bdi_congested(bdi, 1 << WB_sync_congested);
487 }
488
489 static inline int bdi_write_congested(struct backing_dev_info *bdi)
490 {
491 return bdi_congested(bdi, 1 << WB_async_congested);
492 }
493
494 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
495 {
496 return bdi_congested(bdi, (1 << WB_sync_congested) |
497 (1 << WB_async_congested));
498 }
499
500 #endif /* _LINUX_BACKING_DEV_H */