]>
Commit | Line | Data |
---|---|---|
ccd979bd MF |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * uptodate.c | |
5 | * | |
6 | * Tracking the up-to-date-ness of a local buffer_head with respect to | |
7 | * the cluster. | |
8 | * | |
9 | * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved. | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public | |
13 | * License as published by the Free Software Foundation; either | |
14 | * version 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public | |
22 | * License along with this program; if not, write to the | |
23 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
24 | * Boston, MA 021110-1307, USA. | |
25 | * | |
26 | * Standard buffer head caching flags (uptodate, etc) are insufficient | |
27 | * in a clustered environment - a buffer may be marked up to date on | |
28 | * our local node but could have been modified by another cluster | |
29 | * member. As a result an additional (and performant) caching scheme | |
30 | * is required. A further requirement is that we consume as little | |
31 | * memory as possible - we never pin buffer_head structures in order | |
32 | * to cache them. | |
33 | * | |
34 | * We track the existence of up to date buffers on the inodes which | |
35 | * are associated with them. Because we don't want to pin | |
36 | * buffer_heads, this is only a (strong) hint and several other checks | |
37 | * are made in the I/O path to ensure that we don't use a stale or | |
38 | * invalid buffer without going to disk: | |
39 | * - buffer_jbd is used liberally - if a bh is in the journal on | |
40 | * this node then it *must* be up to date. | |
41 | * - the standard buffer_uptodate() macro is used to detect buffers | |
42 | * which may be invalid (even if we have an up to date tracking | |
43 | * item for them) | |
44 | * | |
45 | * For a full understanding of how this code works together, one | |
46 | * should read the callers in dlmglue.c, the I/O functions in | |
47 | * buffer_head_io.c and ocfs2_journal_access in journal.c | |
48 | */ | |
49 | ||
50 | #include <linux/fs.h> | |
51 | #include <linux/types.h> | |
52 | #include <linux/slab.h> | |
53 | #include <linux/highmem.h> | |
54 | #include <linux/buffer_head.h> | |
55 | #include <linux/rbtree.h> | |
ccd979bd | 56 | |
ccd979bd MF |
57 | #include <cluster/masklog.h> |
58 | ||
59 | #include "ocfs2.h" | |
60 | ||
61 | #include "inode.h" | |
62 | #include "uptodate.h" | |
d701485a | 63 | #include "ocfs2_trace.h" |
ccd979bd MF |
64 | |
65 | struct ocfs2_meta_cache_item { | |
66 | struct rb_node c_node; | |
67 | sector_t c_block; | |
68 | }; | |
69 | ||
1a5c4e2a | 70 | static struct kmem_cache *ocfs2_uptodate_cachep; |
ccd979bd | 71 | |
8cb471e8 | 72 | u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci) |
6e5a3d75 JB |
73 | { |
74 | BUG_ON(!ci || !ci->ci_ops); | |
75 | ||
76 | return ci->ci_ops->co_owner(ci); | |
77 | } | |
78 | ||
8cb471e8 JB |
79 | struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci) |
80 | { | |
81 | BUG_ON(!ci || !ci->ci_ops); | |
82 | ||
83 | return ci->ci_ops->co_get_super(ci); | |
84 | } | |
85 | ||
6e5a3d75 JB |
86 | static void ocfs2_metadata_cache_lock(struct ocfs2_caching_info *ci) |
87 | { | |
88 | BUG_ON(!ci || !ci->ci_ops); | |
89 | ||
90 | ci->ci_ops->co_cache_lock(ci); | |
91 | } | |
92 | ||
93 | static void ocfs2_metadata_cache_unlock(struct ocfs2_caching_info *ci) | |
94 | { | |
95 | BUG_ON(!ci || !ci->ci_ops); | |
96 | ||
97 | ci->ci_ops->co_cache_unlock(ci); | |
98 | } | |
99 | ||
8cb471e8 | 100 | void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci) |
6e5a3d75 JB |
101 | { |
102 | BUG_ON(!ci || !ci->ci_ops); | |
103 | ||
104 | ci->ci_ops->co_io_lock(ci); | |
105 | } | |
106 | ||
8cb471e8 | 107 | void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci) |
6e5a3d75 JB |
108 | { |
109 | BUG_ON(!ci || !ci->ci_ops); | |
110 | ||
111 | ci->ci_ops->co_io_unlock(ci); | |
112 | } | |
113 | ||
114 | ||
66fb345d JB |
115 | static void ocfs2_metadata_cache_reset(struct ocfs2_caching_info *ci, |
116 | int clear) | |
117 | { | |
118 | ci->ci_flags |= OCFS2_CACHE_FL_INLINE; | |
119 | ci->ci_num_cached = 0; | |
120 | ||
292dd27e JB |
121 | if (clear) { |
122 | ci->ci_created_trans = 0; | |
66fb345d | 123 | ci->ci_last_trans = 0; |
292dd27e | 124 | } |
66fb345d JB |
125 | } |
126 | ||
47460d65 | 127 | void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci, |
6e5a3d75 | 128 | const struct ocfs2_caching_operations *ops) |
ccd979bd | 129 | { |
6e5a3d75 JB |
130 | BUG_ON(!ops); |
131 | ||
132 | ci->ci_ops = ops; | |
66fb345d JB |
133 | ocfs2_metadata_cache_reset(ci, 1); |
134 | } | |
135 | ||
136 | void ocfs2_metadata_cache_exit(struct ocfs2_caching_info *ci) | |
137 | { | |
138 | ocfs2_metadata_cache_purge(ci); | |
139 | ocfs2_metadata_cache_reset(ci, 1); | |
ccd979bd MF |
140 | } |
141 | ||
66fb345d | 142 | |
ccd979bd MF |
143 | /* No lock taken here as 'root' is not expected to be visible to other |
144 | * processes. */ | |
145 | static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root) | |
146 | { | |
147 | unsigned int purged = 0; | |
148 | struct rb_node *node; | |
149 | struct ocfs2_meta_cache_item *item; | |
150 | ||
151 | while ((node = rb_last(root)) != NULL) { | |
152 | item = rb_entry(node, struct ocfs2_meta_cache_item, c_node); | |
153 | ||
d701485a TM |
154 | trace_ocfs2_purge_copied_metadata_tree( |
155 | (unsigned long long) item->c_block); | |
ccd979bd MF |
156 | |
157 | rb_erase(&item->c_node, root); | |
158 | kmem_cache_free(ocfs2_uptodate_cachep, item); | |
159 | ||
160 | purged++; | |
161 | } | |
162 | return purged; | |
163 | } | |
164 | ||
165 | /* Called from locking and called from ocfs2_clear_inode. Dump the | |
166 | * cache for a given inode. | |
167 | * | |
168 | * This function is a few more lines longer than necessary due to some | |
169 | * accounting done here, but I think it's worth tracking down those | |
170 | * bugs sooner -- Mark */ | |
8cb471e8 | 171 | void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci) |
ccd979bd | 172 | { |
ccd979bd | 173 | unsigned int tree, to_purge, purged; |
ccd979bd MF |
174 | struct rb_root root = RB_ROOT; |
175 | ||
6e5a3d75 JB |
176 | BUG_ON(!ci || !ci->ci_ops); |
177 | ||
178 | ocfs2_metadata_cache_lock(ci); | |
47460d65 | 179 | tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE); |
ccd979bd MF |
180 | to_purge = ci->ci_num_cached; |
181 | ||
d701485a TM |
182 | trace_ocfs2_metadata_cache_purge( |
183 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
184 | to_purge, tree); | |
ccd979bd MF |
185 | |
186 | /* If we're a tree, save off the root so that we can safely | |
187 | * initialize the cache. We do the work to free tree members | |
188 | * without the spinlock. */ | |
189 | if (tree) | |
190 | root = ci->ci_cache.ci_tree; | |
191 | ||
66fb345d | 192 | ocfs2_metadata_cache_reset(ci, 0); |
6e5a3d75 | 193 | ocfs2_metadata_cache_unlock(ci); |
ccd979bd MF |
194 | |
195 | purged = ocfs2_purge_copied_metadata_tree(&root); | |
196 | /* If possible, track the number wiped so that we can more | |
197 | * easily detect counting errors. Unfortunately, this is only | |
198 | * meaningful for trees. */ | |
199 | if (tree && purged != to_purge) | |
6e5a3d75 JB |
200 | mlog(ML_ERROR, "Owner %llu, count = %u, purged = %u\n", |
201 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
202 | to_purge, purged); | |
ccd979bd MF |
203 | } |
204 | ||
205 | /* Returns the index in the cache array, -1 if not found. | |
206 | * Requires ip_lock. */ | |
207 | static int ocfs2_search_cache_array(struct ocfs2_caching_info *ci, | |
208 | sector_t item) | |
209 | { | |
210 | int i; | |
211 | ||
212 | for (i = 0; i < ci->ci_num_cached; i++) { | |
213 | if (item == ci->ci_cache.ci_array[i]) | |
214 | return i; | |
215 | } | |
216 | ||
217 | return -1; | |
218 | } | |
219 | ||
220 | /* Returns the cache item if found, otherwise NULL. | |
221 | * Requires ip_lock. */ | |
222 | static struct ocfs2_meta_cache_item * | |
223 | ocfs2_search_cache_tree(struct ocfs2_caching_info *ci, | |
224 | sector_t block) | |
225 | { | |
226 | struct rb_node * n = ci->ci_cache.ci_tree.rb_node; | |
227 | struct ocfs2_meta_cache_item *item = NULL; | |
228 | ||
229 | while (n) { | |
230 | item = rb_entry(n, struct ocfs2_meta_cache_item, c_node); | |
231 | ||
232 | if (block < item->c_block) | |
233 | n = n->rb_left; | |
234 | else if (block > item->c_block) | |
235 | n = n->rb_right; | |
236 | else | |
237 | return item; | |
238 | } | |
239 | ||
240 | return NULL; | |
241 | } | |
242 | ||
8cb471e8 | 243 | static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, |
ccd979bd MF |
244 | struct buffer_head *bh) |
245 | { | |
246 | int index = -1; | |
247 | struct ocfs2_meta_cache_item *item = NULL; | |
248 | ||
6e5a3d75 | 249 | ocfs2_metadata_cache_lock(ci); |
ccd979bd | 250 | |
d701485a TM |
251 | trace_ocfs2_buffer_cached_begin( |
252 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
253 | (unsigned long long) bh->b_blocknr, | |
254 | !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); | |
ccd979bd | 255 | |
47460d65 | 256 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) |
8cb471e8 | 257 | index = ocfs2_search_cache_array(ci, bh->b_blocknr); |
ccd979bd | 258 | else |
8cb471e8 | 259 | item = ocfs2_search_cache_tree(ci, bh->b_blocknr); |
ccd979bd | 260 | |
6e5a3d75 | 261 | ocfs2_metadata_cache_unlock(ci); |
ccd979bd | 262 | |
d701485a | 263 | trace_ocfs2_buffer_cached_end(index, item); |
ccd979bd MF |
264 | |
265 | return (index != -1) || (item != NULL); | |
266 | } | |
267 | ||
268 | /* Warning: even if it returns true, this does *not* guarantee that | |
2bd63216 SM |
269 | * the block is stored in our inode metadata cache. |
270 | * | |
aa958874 MF |
271 | * This can be called under lock_buffer() |
272 | */ | |
8cb471e8 | 273 | int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, |
ccd979bd MF |
274 | struct buffer_head *bh) |
275 | { | |
276 | /* Doesn't matter if the bh is in our cache or not -- if it's | |
277 | * not marked uptodate then we know it can't have correct | |
278 | * data. */ | |
279 | if (!buffer_uptodate(bh)) | |
280 | return 0; | |
281 | ||
282 | /* OCFS2 does not allow multiple nodes to be changing the same | |
283 | * block at the same time. */ | |
284 | if (buffer_jbd(bh)) | |
285 | return 1; | |
286 | ||
287 | /* Ok, locally the buffer is marked as up to date, now search | |
288 | * our cache to see if we can trust that. */ | |
8cb471e8 | 289 | return ocfs2_buffer_cached(ci, bh); |
ccd979bd MF |
290 | } |
291 | ||
8cb471e8 | 292 | /* |
aa958874 | 293 | * Determine whether a buffer is currently out on a read-ahead request. |
47460d65 | 294 | * ci_io_sem should be held to serialize submitters with the logic here. |
aa958874 | 295 | */ |
8cb471e8 | 296 | int ocfs2_buffer_read_ahead(struct ocfs2_caching_info *ci, |
aa958874 MF |
297 | struct buffer_head *bh) |
298 | { | |
8cb471e8 | 299 | return buffer_locked(bh) && ocfs2_buffer_cached(ci, bh); |
aa958874 MF |
300 | } |
301 | ||
ccd979bd MF |
302 | /* Requires ip_lock */ |
303 | static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci, | |
304 | sector_t block) | |
305 | { | |
47460d65 | 306 | BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY); |
ccd979bd | 307 | |
d701485a TM |
308 | trace_ocfs2_append_cache_array( |
309 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
310 | (unsigned long long)block, ci->ci_num_cached); | |
ccd979bd MF |
311 | |
312 | ci->ci_cache.ci_array[ci->ci_num_cached] = block; | |
313 | ci->ci_num_cached++; | |
314 | } | |
315 | ||
316 | /* By now the caller should have checked that the item does *not* | |
317 | * exist in the tree. | |
318 | * Requires ip_lock. */ | |
319 | static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci, | |
320 | struct ocfs2_meta_cache_item *new) | |
321 | { | |
322 | sector_t block = new->c_block; | |
323 | struct rb_node *parent = NULL; | |
324 | struct rb_node **p = &ci->ci_cache.ci_tree.rb_node; | |
325 | struct ocfs2_meta_cache_item *tmp; | |
326 | ||
d701485a TM |
327 | trace_ocfs2_insert_cache_tree( |
328 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
329 | (unsigned long long)block, ci->ci_num_cached); | |
ccd979bd MF |
330 | |
331 | while(*p) { | |
332 | parent = *p; | |
333 | ||
334 | tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node); | |
335 | ||
336 | if (block < tmp->c_block) | |
337 | p = &(*p)->rb_left; | |
338 | else if (block > tmp->c_block) | |
339 | p = &(*p)->rb_right; | |
340 | else { | |
341 | /* This should never happen! */ | |
342 | mlog(ML_ERROR, "Duplicate block %llu cached!\n", | |
343 | (unsigned long long) block); | |
344 | BUG(); | |
345 | } | |
346 | } | |
347 | ||
348 | rb_link_node(&new->c_node, parent, p); | |
349 | rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree); | |
350 | ci->ci_num_cached++; | |
351 | } | |
352 | ||
6e5a3d75 | 353 | /* co_cache_lock() must be held */ |
8cb471e8 | 354 | static inline int ocfs2_insert_can_use_array(struct ocfs2_caching_info *ci) |
ccd979bd | 355 | { |
47460d65 JB |
356 | return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) && |
357 | (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY); | |
ccd979bd MF |
358 | } |
359 | ||
47460d65 | 360 | /* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the |
ccd979bd | 361 | * pointers in tree after we use them - this allows caller to detect |
6e5a3d75 JB |
362 | * when to free in case of error. |
363 | * | |
364 | * The co_cache_lock() must be held. */ | |
8cb471e8 | 365 | static void ocfs2_expand_cache(struct ocfs2_caching_info *ci, |
ccd979bd MF |
366 | struct ocfs2_meta_cache_item **tree) |
367 | { | |
368 | int i; | |
ccd979bd | 369 | |
47460d65 | 370 | mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY, |
6e5a3d75 JB |
371 | "Owner %llu, num cached = %u, should be %u\n", |
372 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
373 | ci->ci_num_cached, OCFS2_CACHE_INFO_MAX_ARRAY); | |
47460d65 | 374 | mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE), |
6e5a3d75 JB |
375 | "Owner %llu not marked as inline anymore!\n", |
376 | (unsigned long long)ocfs2_metadata_cache_owner(ci)); | |
ccd979bd MF |
377 | |
378 | /* Be careful to initialize the tree members *first* because | |
379 | * once the ci_tree is used, the array is junk... */ | |
47460d65 | 380 | for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) |
ccd979bd MF |
381 | tree[i]->c_block = ci->ci_cache.ci_array[i]; |
382 | ||
47460d65 | 383 | ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE; |
ccd979bd MF |
384 | ci->ci_cache.ci_tree = RB_ROOT; |
385 | /* this will be set again by __ocfs2_insert_cache_tree */ | |
386 | ci->ci_num_cached = 0; | |
387 | ||
47460d65 | 388 | for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) { |
ccd979bd MF |
389 | __ocfs2_insert_cache_tree(ci, tree[i]); |
390 | tree[i] = NULL; | |
391 | } | |
392 | ||
d701485a TM |
393 | trace_ocfs2_expand_cache( |
394 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
395 | ci->ci_flags, ci->ci_num_cached); | |
ccd979bd MF |
396 | } |
397 | ||
398 | /* Slow path function - memory allocation is necessary. See the | |
399 | * comment above ocfs2_set_buffer_uptodate for more information. */ | |
8cb471e8 | 400 | static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, |
ccd979bd MF |
401 | sector_t block, |
402 | int expand_tree) | |
403 | { | |
404 | int i; | |
ccd979bd | 405 | struct ocfs2_meta_cache_item *new = NULL; |
47460d65 | 406 | struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] = |
ccd979bd MF |
407 | { NULL, }; |
408 | ||
d701485a TM |
409 | trace_ocfs2_set_buffer_uptodate( |
410 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
411 | (unsigned long long)block, expand_tree); | |
ccd979bd | 412 | |
afae00ab | 413 | new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); |
ccd979bd MF |
414 | if (!new) { |
415 | mlog_errno(-ENOMEM); | |
416 | return; | |
417 | } | |
418 | new->c_block = block; | |
419 | ||
420 | if (expand_tree) { | |
421 | /* Do *not* allocate an array here - the removal code | |
422 | * has no way of tracking that. */ | |
47460d65 | 423 | for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) { |
ccd979bd | 424 | tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, |
afae00ab | 425 | GFP_NOFS); |
ccd979bd MF |
426 | if (!tree[i]) { |
427 | mlog_errno(-ENOMEM); | |
428 | goto out_free; | |
429 | } | |
430 | ||
431 | /* These are initialized in ocfs2_expand_cache! */ | |
432 | } | |
433 | } | |
434 | ||
6e5a3d75 | 435 | ocfs2_metadata_cache_lock(ci); |
8cb471e8 | 436 | if (ocfs2_insert_can_use_array(ci)) { |
ccd979bd MF |
437 | /* Ok, items were removed from the cache in between |
438 | * locks. Detect this and revert back to the fast path */ | |
439 | ocfs2_append_cache_array(ci, block); | |
6e5a3d75 | 440 | ocfs2_metadata_cache_unlock(ci); |
ccd979bd MF |
441 | goto out_free; |
442 | } | |
443 | ||
444 | if (expand_tree) | |
8cb471e8 | 445 | ocfs2_expand_cache(ci, tree); |
ccd979bd MF |
446 | |
447 | __ocfs2_insert_cache_tree(ci, new); | |
6e5a3d75 | 448 | ocfs2_metadata_cache_unlock(ci); |
ccd979bd MF |
449 | |
450 | new = NULL; | |
451 | out_free: | |
452 | if (new) | |
453 | kmem_cache_free(ocfs2_uptodate_cachep, new); | |
454 | ||
455 | /* If these were used, then ocfs2_expand_cache re-set them to | |
456 | * NULL for us. */ | |
457 | if (tree[0]) { | |
47460d65 | 458 | for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) |
ccd979bd MF |
459 | if (tree[i]) |
460 | kmem_cache_free(ocfs2_uptodate_cachep, | |
461 | tree[i]); | |
462 | } | |
463 | } | |
464 | ||
6e5a3d75 | 465 | /* Item insertion is guarded by co_io_lock(), so the insertion path takes |
ccd979bd MF |
466 | * advantage of this by not rechecking for a duplicate insert during |
467 | * the slow case. Additionally, if the cache needs to be bumped up to | |
468 | * a tree, the code will not recheck after acquiring the lock -- | |
469 | * multiple paths cannot be expanding to a tree at the same time. | |
470 | * | |
471 | * The slow path takes into account that items can be removed | |
472 | * (including the whole tree wiped and reset) when this process it out | |
473 | * allocating memory. In those cases, it reverts back to the fast | |
474 | * path. | |
475 | * | |
476 | * Note that this function may actually fail to insert the block if | |
477 | * memory cannot be allocated. This is not fatal however (but may | |
aa958874 MF |
478 | * result in a performance penalty) |
479 | * | |
480 | * Readahead buffers can be passed in here before the I/O request is | |
481 | * completed. | |
482 | */ | |
8cb471e8 | 483 | void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, |
ccd979bd MF |
484 | struct buffer_head *bh) |
485 | { | |
486 | int expand; | |
ccd979bd MF |
487 | |
488 | /* The block may very well exist in our cache already, so avoid | |
489 | * doing any more work in that case. */ | |
8cb471e8 | 490 | if (ocfs2_buffer_cached(ci, bh)) |
ccd979bd MF |
491 | return; |
492 | ||
d701485a TM |
493 | trace_ocfs2_set_buffer_uptodate_begin( |
494 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
495 | (unsigned long long)bh->b_blocknr); | |
ccd979bd MF |
496 | |
497 | /* No need to recheck under spinlock - insertion is guarded by | |
6e5a3d75 JB |
498 | * co_io_lock() */ |
499 | ocfs2_metadata_cache_lock(ci); | |
8cb471e8 | 500 | if (ocfs2_insert_can_use_array(ci)) { |
ccd979bd MF |
501 | /* Fast case - it's an array and there's a free |
502 | * spot. */ | |
503 | ocfs2_append_cache_array(ci, bh->b_blocknr); | |
6e5a3d75 | 504 | ocfs2_metadata_cache_unlock(ci); |
ccd979bd MF |
505 | return; |
506 | } | |
507 | ||
508 | expand = 0; | |
47460d65 | 509 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { |
ccd979bd MF |
510 | /* We need to bump things up to a tree. */ |
511 | expand = 1; | |
512 | } | |
6e5a3d75 | 513 | ocfs2_metadata_cache_unlock(ci); |
ccd979bd | 514 | |
8cb471e8 | 515 | __ocfs2_set_buffer_uptodate(ci, bh->b_blocknr, expand); |
ccd979bd MF |
516 | } |
517 | ||
518 | /* Called against a newly allocated buffer. Most likely nobody should | |
519 | * be able to read this sort of metadata while it's still being | |
6e5a3d75 | 520 | * allocated, but this is careful to take co_io_lock() anyway. */ |
8cb471e8 | 521 | void ocfs2_set_new_buffer_uptodate(struct ocfs2_caching_info *ci, |
ccd979bd MF |
522 | struct buffer_head *bh) |
523 | { | |
ccd979bd | 524 | /* This should definitely *not* exist in our cache */ |
8cb471e8 | 525 | BUG_ON(ocfs2_buffer_cached(ci, bh)); |
ccd979bd MF |
526 | |
527 | set_buffer_uptodate(bh); | |
528 | ||
6e5a3d75 | 529 | ocfs2_metadata_cache_io_lock(ci); |
8cb471e8 | 530 | ocfs2_set_buffer_uptodate(ci, bh); |
6e5a3d75 | 531 | ocfs2_metadata_cache_io_unlock(ci); |
ccd979bd MF |
532 | } |
533 | ||
534 | /* Requires ip_lock. */ | |
535 | static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci, | |
536 | int index) | |
537 | { | |
538 | sector_t *array = ci->ci_cache.ci_array; | |
539 | int bytes; | |
540 | ||
47460d65 | 541 | BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY); |
ccd979bd MF |
542 | BUG_ON(index >= ci->ci_num_cached); |
543 | BUG_ON(!ci->ci_num_cached); | |
544 | ||
d701485a TM |
545 | trace_ocfs2_remove_metadata_array( |
546 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
547 | index, ci->ci_num_cached); | |
ccd979bd MF |
548 | |
549 | ci->ci_num_cached--; | |
550 | ||
551 | /* don't need to copy if the array is now empty, or if we | |
552 | * removed at the tail */ | |
553 | if (ci->ci_num_cached && index < ci->ci_num_cached) { | |
554 | bytes = sizeof(sector_t) * (ci->ci_num_cached - index); | |
555 | memmove(&array[index], &array[index + 1], bytes); | |
556 | } | |
557 | } | |
558 | ||
559 | /* Requires ip_lock. */ | |
560 | static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci, | |
561 | struct ocfs2_meta_cache_item *item) | |
562 | { | |
d701485a TM |
563 | trace_ocfs2_remove_metadata_tree( |
564 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
565 | (unsigned long long)item->c_block); | |
ccd979bd MF |
566 | |
567 | rb_erase(&item->c_node, &ci->ci_cache.ci_tree); | |
568 | ci->ci_num_cached--; | |
569 | } | |
570 | ||
8cb471e8 | 571 | static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci, |
ac11c827 | 572 | sector_t block) |
ccd979bd MF |
573 | { |
574 | int index; | |
ccd979bd | 575 | struct ocfs2_meta_cache_item *item = NULL; |
ccd979bd | 576 | |
6e5a3d75 | 577 | ocfs2_metadata_cache_lock(ci); |
d701485a TM |
578 | trace_ocfs2_remove_block_from_cache( |
579 | (unsigned long long)ocfs2_metadata_cache_owner(ci), | |
580 | (unsigned long long) block, ci->ci_num_cached, | |
581 | ci->ci_flags); | |
ccd979bd | 582 | |
47460d65 | 583 | if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { |
ccd979bd MF |
584 | index = ocfs2_search_cache_array(ci, block); |
585 | if (index != -1) | |
586 | ocfs2_remove_metadata_array(ci, index); | |
587 | } else { | |
588 | item = ocfs2_search_cache_tree(ci, block); | |
589 | if (item) | |
590 | ocfs2_remove_metadata_tree(ci, item); | |
591 | } | |
6e5a3d75 | 592 | ocfs2_metadata_cache_unlock(ci); |
ccd979bd MF |
593 | |
594 | if (item) | |
595 | kmem_cache_free(ocfs2_uptodate_cachep, item); | |
596 | } | |
597 | ||
ac11c827 TM |
598 | /* |
599 | * Called when we remove a chunk of metadata from an inode. We don't | |
600 | * bother reverting things to an inlined array in the case of a remove | |
601 | * which moves us back under the limit. | |
602 | */ | |
8cb471e8 | 603 | void ocfs2_remove_from_cache(struct ocfs2_caching_info *ci, |
ac11c827 TM |
604 | struct buffer_head *bh) |
605 | { | |
606 | sector_t block = bh->b_blocknr; | |
607 | ||
8cb471e8 | 608 | ocfs2_remove_block_from_cache(ci, block); |
ac11c827 TM |
609 | } |
610 | ||
611 | /* Called when we remove xattr clusters from an inode. */ | |
8cb471e8 | 612 | void ocfs2_remove_xattr_clusters_from_cache(struct ocfs2_caching_info *ci, |
ac11c827 TM |
613 | sector_t block, |
614 | u32 c_len) | |
615 | { | |
8cb471e8 JB |
616 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); |
617 | unsigned int i, b_len = ocfs2_clusters_to_blocks(sb, 1) * c_len; | |
ac11c827 TM |
618 | |
619 | for (i = 0; i < b_len; i++, block++) | |
8cb471e8 | 620 | ocfs2_remove_block_from_cache(ci, block); |
ac11c827 TM |
621 | } |
622 | ||
ccd979bd MF |
623 | int __init init_ocfs2_uptodate_cache(void) |
624 | { | |
625 | ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate", | |
626 | sizeof(struct ocfs2_meta_cache_item), | |
20c2df83 | 627 | 0, SLAB_HWCACHE_ALIGN, NULL); |
ccd979bd MF |
628 | if (!ocfs2_uptodate_cachep) |
629 | return -ENOMEM; | |
630 | ||
ccd979bd MF |
631 | return 0; |
632 | } | |
633 | ||
0c6c98fb | 634 | void exit_ocfs2_uptodate_cache(void) |
ccd979bd MF |
635 | { |
636 | if (ocfs2_uptodate_cachep) | |
637 | kmem_cache_destroy(ocfs2_uptodate_cachep); | |
638 | } |