]>
Commit | Line | Data |
---|---|---|
66114cad TH |
1 | #ifndef __LINUX_BACKING_DEV_DEFS_H |
2 | #define __LINUX_BACKING_DEV_DEFS_H | |
3 | ||
4 | #include <linux/list.h> | |
52ebea74 TH |
5 | #include <linux/radix-tree.h> |
6 | #include <linux/rbtree.h> | |
66114cad TH |
7 | #include <linux/spinlock.h> |
8 | #include <linux/percpu_counter.h> | |
52ebea74 | 9 | #include <linux/percpu-refcount.h> |
66114cad TH |
10 | #include <linux/flex_proportions.h> |
11 | #include <linux/timer.h> | |
12 | #include <linux/workqueue.h> | |
d03f6cdc | 13 | #include <linux/kref.h> |
66114cad TH |
14 | |
15 | struct page; | |
16 | struct device; | |
17 | struct dentry; | |
18 | ||
19 | /* | |
20 | * Bits in bdi_writeback.state | |
21 | */ | |
22 | enum wb_state { | |
66114cad | 23 | WB_registered, /* bdi_register() was done */ |
5318ce7d | 24 | WB_shutting_down, /* wb_shutdown() in progress */ |
66114cad | 25 | WB_writeback_running, /* Writeback is in progress */ |
d6c10f1f | 26 | WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ |
66114cad TH |
27 | }; |
28 | ||
4aa9c692 TH |
29 | enum wb_congested_state { |
30 | WB_async_congested, /* The async (write) queue is getting full */ | |
31 | WB_sync_congested, /* The sync queue is getting full */ | |
32 | }; | |
33 | ||
66114cad TH |
34 | typedef int (congested_fn)(void *, int); |
35 | ||
36 | enum wb_stat_item { | |
37 | WB_RECLAIMABLE, | |
38 | WB_WRITEBACK, | |
39 | WB_DIRTIED, | |
40 | WB_WRITTEN, | |
41 | NR_WB_STAT_ITEMS | |
42 | }; | |
43 | ||
44 | #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) | |
45 | ||
52ebea74 TH |
46 | /* |
47 | * For cgroup writeback, multiple wb's may map to the same blkcg. Those | |
48 | * wb's can operate mostly independently but should share the congested | |
49 | * state. To facilitate such sharing, the congested state is tracked using | |
50 | * the following struct which is created on demand, indexed by blkcg ID on | |
51 | * its bdi, and refcounted. | |
52 | */ | |
4aa9c692 TH |
53 | struct bdi_writeback_congested { |
54 | unsigned long state; /* WB_[a]sync_congested flags */ | |
a13f35e8 | 55 | atomic_t refcnt; /* nr of attached wb's and blkg */ |
52ebea74 TH |
56 | |
57 | #ifdef CONFIG_CGROUP_WRITEBACK | |
b7d680d7 JK |
58 | struct backing_dev_info *__bdi; /* the associated bdi, set to NULL |
59 | * on bdi unregistration. For memcg-wb | |
60 | * internal use only! */ | |
52ebea74 TH |
61 | int blkcg_id; /* ID of the associated blkcg */ |
62 | struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ | |
63 | #endif | |
4aa9c692 TH |
64 | }; |
65 | ||
52ebea74 TH |
66 | /* |
67 | * Each wb (bdi_writeback) can perform writeback operations, is measured | |
68 | * and throttled, independently. Without cgroup writeback, each bdi | |
69 | * (bdi_writeback) is served by its embedded bdi->wb. | |
70 | * | |
71 | * On the default hierarchy, blkcg implicitly enables memcg. This allows | |
72 | * using memcg's page ownership for attributing writeback IOs, and every | |
73 | * memcg - blkcg combination can be served by its own wb by assigning a | |
74 | * dedicated wb to each memcg, which enables isolation across different | |
75 | * cgroups and propagation of IO back pressure down from the IO layer upto | |
76 | * the tasks which are generating the dirty pages to be written back. | |
77 | * | |
78 | * A cgroup wb is indexed on its bdi by the ID of the associated memcg, | |
79 | * refcounted with the number of inodes attached to it, and pins the memcg | |
80 | * and the corresponding blkcg. As the corresponding blkcg for a memcg may | |
81 | * change as blkcg is disabled and enabled higher up in the hierarchy, a wb | |
82 | * is tested for blkcg after lookup and removed from index on mismatch so | |
83 | * that a new wb for the combination can be created. | |
84 | */ | |
66114cad TH |
85 | struct bdi_writeback { |
86 | struct backing_dev_info *bdi; /* our parent bdi */ | |
87 | ||
88 | unsigned long state; /* Always use atomic bitops on this */ | |
89 | unsigned long last_old_flush; /* last old data flush */ | |
90 | ||
91 | struct list_head b_dirty; /* dirty inodes */ | |
92 | struct list_head b_io; /* parked for writeback */ | |
93 | struct list_head b_more_io; /* parked for more writeback */ | |
94 | struct list_head b_dirty_time; /* time stamps are dirty */ | |
95 | spinlock_t list_lock; /* protects the b_* lists */ | |
96 | ||
97 | struct percpu_counter stat[NR_WB_STAT_ITEMS]; | |
98 | ||
4aa9c692 TH |
99 | struct bdi_writeback_congested *congested; |
100 | ||
66114cad TH |
101 | unsigned long bw_time_stamp; /* last time write bw is updated */ |
102 | unsigned long dirtied_stamp; | |
103 | unsigned long written_stamp; /* pages written at bw_time_stamp */ | |
104 | unsigned long write_bandwidth; /* the estimated write bandwidth */ | |
95a46c65 | 105 | unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */ |
66114cad TH |
106 | |
107 | /* | |
108 | * The base dirty throttle rate, re-calculated on every 200ms. | |
109 | * All the bdi tasks' dirty rate will be curbed under it. | |
110 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit | |
111 | * in small steps and is much more smooth/stable than the latter. | |
112 | */ | |
113 | unsigned long dirty_ratelimit; | |
114 | unsigned long balanced_dirty_ratelimit; | |
115 | ||
116 | struct fprop_local_percpu completions; | |
117 | int dirty_exceeded; | |
118 | ||
119 | spinlock_t work_lock; /* protects work_list & dwork scheduling */ | |
120 | struct list_head work_list; | |
121 | struct delayed_work dwork; /* work item used for writeback */ | |
52ebea74 | 122 | |
b57d74af JA |
123 | unsigned long dirty_sleep; /* last wait */ |
124 | ||
b817525a TH |
125 | struct list_head bdi_node; /* anchored at bdi->wb_list */ |
126 | ||
52ebea74 TH |
127 | #ifdef CONFIG_CGROUP_WRITEBACK |
128 | struct percpu_ref refcnt; /* used only for !root wb's */ | |
841710aa | 129 | struct fprop_local_percpu memcg_completions; |
52ebea74 TH |
130 | struct cgroup_subsys_state *memcg_css; /* the associated memcg */ |
131 | struct cgroup_subsys_state *blkcg_css; /* and blkcg */ | |
132 | struct list_head memcg_node; /* anchored at memcg->cgwb_list */ | |
133 | struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ | |
134 | ||
135 | union { | |
136 | struct work_struct release_work; | |
137 | struct rcu_head rcu; | |
138 | }; | |
139 | #endif | |
66114cad TH |
140 | }; |
141 | ||
142 | struct backing_dev_info { | |
143 | struct list_head bdi_list; | |
ea1754a0 | 144 | unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ |
9491ae4a | 145 | unsigned long io_pages; /* max allowed IO size */ |
66114cad TH |
146 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
147 | void *congested_data; /* Pointer to aux data for congested func */ | |
148 | ||
fca39346 | 149 | const char *name; |
66114cad | 150 | |
d03f6cdc | 151 | struct kref refcnt; /* Reference counter for the structure */ |
8db378a5 | 152 | unsigned int capabilities; /* Device capabilities */ |
66114cad TH |
153 | unsigned int min_ratio; |
154 | unsigned int max_ratio, max_prop_frac; | |
155 | ||
95a46c65 TH |
156 | /* |
157 | * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are | |
158 | * any dirty wbs, which is depended upon by bdi_has_dirty(). | |
159 | */ | |
160 | atomic_long_t tot_write_bandwidth; | |
766a9d6e | 161 | |
52ebea74 | 162 | struct bdi_writeback wb; /* the root writeback info for this bdi */ |
b817525a | 163 | struct list_head wb_list; /* list of all wbs */ |
52ebea74 TH |
164 | #ifdef CONFIG_CGROUP_WRITEBACK |
165 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ | |
166 | struct rb_root cgwb_congested_tree; /* their congested states */ | |
a13f35e8 TH |
167 | #else |
168 | struct bdi_writeback_congested *wb_congested; | |
52ebea74 | 169 | #endif |
cc395d7f TH |
170 | wait_queue_head_t wb_waitq; |
171 | ||
66114cad | 172 | struct device *dev; |
df08c32c | 173 | struct device *owner; |
66114cad TH |
174 | |
175 | struct timer_list laptop_mode_wb_timer; | |
176 | ||
177 | #ifdef CONFIG_DEBUG_FS | |
178 | struct dentry *debug_dir; | |
179 | struct dentry *debug_stats; | |
180 | #endif | |
181 | }; | |
182 | ||
183 | enum { | |
184 | BLK_RW_ASYNC = 0, | |
185 | BLK_RW_SYNC = 1, | |
186 | }; | |
187 | ||
ec8a6f26 TH |
188 | void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); |
189 | void set_wb_congested(struct bdi_writeback_congested *congested, int sync); | |
190 | ||
191 | static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | |
192 | { | |
193 | clear_wb_congested(bdi->wb.congested, sync); | |
194 | } | |
195 | ||
196 | static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) | |
197 | { | |
198 | set_wb_congested(bdi->wb.congested, sync); | |
199 | } | |
66114cad | 200 | |
21c6321f TH |
201 | #ifdef CONFIG_CGROUP_WRITEBACK |
202 | ||
203 | /** | |
204 | * wb_tryget - try to increment a wb's refcount | |
205 | * @wb: bdi_writeback to get | |
206 | */ | |
207 | static inline bool wb_tryget(struct bdi_writeback *wb) | |
208 | { | |
209 | if (wb != &wb->bdi->wb) | |
210 | return percpu_ref_tryget(&wb->refcnt); | |
211 | return true; | |
212 | } | |
213 | ||
214 | /** | |
215 | * wb_get - increment a wb's refcount | |
216 | * @wb: bdi_writeback to get | |
217 | */ | |
218 | static inline void wb_get(struct bdi_writeback *wb) | |
219 | { | |
220 | if (wb != &wb->bdi->wb) | |
221 | percpu_ref_get(&wb->refcnt); | |
222 | } | |
223 | ||
224 | /** | |
225 | * wb_put - decrement a wb's refcount | |
226 | * @wb: bdi_writeback to put | |
227 | */ | |
228 | static inline void wb_put(struct bdi_writeback *wb) | |
229 | { | |
230 | if (wb != &wb->bdi->wb) | |
231 | percpu_ref_put(&wb->refcnt); | |
232 | } | |
233 | ||
e8a7abf5 TH |
234 | /** |
235 | * wb_dying - is a wb dying? | |
236 | * @wb: bdi_writeback of interest | |
237 | * | |
238 | * Returns whether @wb is unlinked and being drained. | |
239 | */ | |
240 | static inline bool wb_dying(struct bdi_writeback *wb) | |
241 | { | |
242 | return percpu_ref_is_dying(&wb->refcnt); | |
243 | } | |
244 | ||
21c6321f TH |
245 | #else /* CONFIG_CGROUP_WRITEBACK */ |
246 | ||
247 | static inline bool wb_tryget(struct bdi_writeback *wb) | |
248 | { | |
249 | return true; | |
250 | } | |
251 | ||
252 | static inline void wb_get(struct bdi_writeback *wb) | |
253 | { | |
254 | } | |
255 | ||
256 | static inline void wb_put(struct bdi_writeback *wb) | |
257 | { | |
258 | } | |
259 | ||
e8a7abf5 TH |
260 | static inline bool wb_dying(struct bdi_writeback *wb) |
261 | { | |
262 | return false; | |
263 | } | |
264 | ||
21c6321f TH |
265 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
266 | ||
66114cad | 267 | #endif /* __LINUX_BACKING_DEV_DEFS_H */ |