]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/backing-dev.h
writeback: move backing_dev_info->bdi_stat[] into bdi_writeback
[mirror_ubuntu-artful-kernel.git] / include / linux / backing-dev.h
1 /*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/flex_proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/writeback.h>
19 #include <linux/atomic.h>
20 #include <linux/sysctl.h>
21 #include <linux/workqueue.h>
22
23 struct page;
24 struct device;
25 struct dentry;
26
27 /*
28 * Bits in bdi_writeback.state
29 */
30 enum wb_state {
31 WB_async_congested, /* The async (write) queue is getting full */
32 WB_sync_congested, /* The sync queue is getting full */
33 WB_registered, /* bdi_register() was done */
34 WB_writeback_running, /* Writeback is in progress */
35 };
36
37 typedef int (congested_fn)(void *, int);
38
39 enum wb_stat_item {
40 WB_RECLAIMABLE,
41 WB_WRITEBACK,
42 WB_DIRTIED,
43 WB_WRITTEN,
44 NR_WB_STAT_ITEMS
45 };
46
47 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
48
49 struct bdi_writeback {
50 struct backing_dev_info *bdi; /* our parent bdi */
51
52 unsigned long state; /* Always use atomic bitops on this */
53 unsigned long last_old_flush; /* last old data flush */
54
55 struct delayed_work dwork; /* work item used for writeback */
56 struct list_head b_dirty; /* dirty inodes */
57 struct list_head b_io; /* parked for writeback */
58 struct list_head b_more_io; /* parked for more writeback */
59 struct list_head b_dirty_time; /* time stamps are dirty */
60 spinlock_t list_lock; /* protects the b_* lists */
61
62 struct percpu_counter stat[NR_WB_STAT_ITEMS];
63 };
64
65 struct backing_dev_info {
66 struct list_head bdi_list;
67 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
68 unsigned int capabilities; /* Device capabilities */
69 congested_fn *congested_fn; /* Function pointer if device is md/dm */
70 void *congested_data; /* Pointer to aux data for congested func */
71
72 char *name;
73
74 unsigned long bw_time_stamp; /* last time write bw is updated */
75 unsigned long dirtied_stamp;
76 unsigned long written_stamp; /* pages written at bw_time_stamp */
77 unsigned long write_bandwidth; /* the estimated write bandwidth */
78 unsigned long avg_write_bandwidth; /* further smoothed write bw */
79
80 /*
81 * The base dirty throttle rate, re-calculated on every 200ms.
82 * All the bdi tasks' dirty rate will be curbed under it.
83 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
84 * in small steps and is much more smooth/stable than the latter.
85 */
86 unsigned long dirty_ratelimit;
87 unsigned long balanced_dirty_ratelimit;
88
89 struct fprop_local_percpu completions;
90 int dirty_exceeded;
91
92 unsigned int min_ratio;
93 unsigned int max_ratio, max_prop_frac;
94
95 struct bdi_writeback wb; /* default writeback info for this bdi */
96 spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
97
98 struct list_head work_list;
99
100 struct device *dev;
101
102 struct timer_list laptop_mode_wb_timer;
103
104 #ifdef CONFIG_DEBUG_FS
105 struct dentry *debug_dir;
106 struct dentry *debug_stats;
107 #endif
108 };
109
110 struct backing_dev_info *inode_to_bdi(struct inode *inode);
111
112 int __must_check bdi_init(struct backing_dev_info *bdi);
113 void bdi_destroy(struct backing_dev_info *bdi);
114
115 __printf(3, 4)
116 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
117 const char *fmt, ...);
118 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
119 void bdi_unregister(struct backing_dev_info *bdi);
120 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
121 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
122 enum wb_reason reason);
123 void bdi_start_background_writeback(struct backing_dev_info *bdi);
124 void bdi_writeback_workfn(struct work_struct *work);
125 int bdi_has_dirty_io(struct backing_dev_info *bdi);
126 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
127
128 extern spinlock_t bdi_lock;
129 extern struct list_head bdi_list;
130
131 extern struct workqueue_struct *bdi_wq;
132
133 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
134 {
135 return !list_empty(&wb->b_dirty) ||
136 !list_empty(&wb->b_io) ||
137 !list_empty(&wb->b_more_io);
138 }
139
140 static inline void __add_wb_stat(struct bdi_writeback *wb,
141 enum wb_stat_item item, s64 amount)
142 {
143 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
144 }
145
146 static inline void __inc_wb_stat(struct bdi_writeback *wb,
147 enum wb_stat_item item)
148 {
149 __add_wb_stat(wb, item, 1);
150 }
151
152 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
153 {
154 unsigned long flags;
155
156 local_irq_save(flags);
157 __inc_wb_stat(wb, item);
158 local_irq_restore(flags);
159 }
160
161 static inline void __dec_wb_stat(struct bdi_writeback *wb,
162 enum wb_stat_item item)
163 {
164 __add_wb_stat(wb, item, -1);
165 }
166
167 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
168 {
169 unsigned long flags;
170
171 local_irq_save(flags);
172 __dec_wb_stat(wb, item);
173 local_irq_restore(flags);
174 }
175
176 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
177 {
178 return percpu_counter_read_positive(&wb->stat[item]);
179 }
180
181 static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
182 enum wb_stat_item item)
183 {
184 return percpu_counter_sum_positive(&wb->stat[item]);
185 }
186
187 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
188 {
189 s64 sum;
190 unsigned long flags;
191
192 local_irq_save(flags);
193 sum = __wb_stat_sum(wb, item);
194 local_irq_restore(flags);
195
196 return sum;
197 }
198
199 extern void wb_writeout_inc(struct bdi_writeback *wb);
200
201 /*
202 * maximal error of a stat counter.
203 */
204 static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
205 {
206 #ifdef CONFIG_SMP
207 return nr_cpu_ids * WB_STAT_BATCH;
208 #else
209 return 1;
210 #endif
211 }
212
213 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
214 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
215
216 /*
217 * Flags in backing_dev_info::capability
218 *
219 * The first three flags control whether dirty pages will contribute to the
220 * VM's accounting and whether writepages() should be called for dirty pages
221 * (something that would not, for example, be appropriate for ramfs)
222 *
223 * WARNING: these flags are closely related and should not normally be
224 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
225 * three flags into a single convenience macro.
226 *
227 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
228 * BDI_CAP_NO_WRITEBACK: Don't write pages back
229 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
230 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
231 */
232 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
233 #define BDI_CAP_NO_WRITEBACK 0x00000002
234 #define BDI_CAP_NO_ACCT_WB 0x00000004
235 #define BDI_CAP_STABLE_WRITES 0x00000008
236 #define BDI_CAP_STRICTLIMIT 0x00000010
237
238 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
239 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
240
241 extern struct backing_dev_info noop_backing_dev_info;
242
243 int writeback_in_progress(struct backing_dev_info *bdi);
244
245 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
246 {
247 if (bdi->congested_fn)
248 return bdi->congested_fn(bdi->congested_data, bdi_bits);
249 return (bdi->wb.state & bdi_bits);
250 }
251
252 static inline int bdi_read_congested(struct backing_dev_info *bdi)
253 {
254 return bdi_congested(bdi, 1 << WB_sync_congested);
255 }
256
257 static inline int bdi_write_congested(struct backing_dev_info *bdi)
258 {
259 return bdi_congested(bdi, 1 << WB_async_congested);
260 }
261
262 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
263 {
264 return bdi_congested(bdi, (1 << WB_sync_congested) |
265 (1 << WB_async_congested));
266 }
267
268 enum {
269 BLK_RW_ASYNC = 0,
270 BLK_RW_SYNC = 1,
271 };
272
273 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
274 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
275 long congestion_wait(int sync, long timeout);
276 long wait_iff_congested(struct zone *zone, int sync, long timeout);
277 int pdflush_proc_obsolete(struct ctl_table *table, int write,
278 void __user *buffer, size_t *lenp, loff_t *ppos);
279
280 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
281 {
282 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
283 }
284
285 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
286 {
287 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
288 }
289
290 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
291 {
292 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
293 }
294
295 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
296 {
297 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
298 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
299 BDI_CAP_NO_WRITEBACK));
300 }
301
302 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
303 {
304 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
305 }
306
307 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
308 {
309 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
310 }
311
312 static inline int bdi_sched_wait(void *word)
313 {
314 schedule();
315 return 0;
316 }
317
318 #endif /* _LINUX_BACKING_DEV_H */