]>
Commit | Line | Data |
---|---|---|
9ff05123 RK |
1 | /* |
2 | * segment.c - NILFS segment constructor. | |
3 | * | |
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | * Written by Ryusuke Konishi <ryusuke@osrg.net> | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/pagemap.h> | |
25 | #include <linux/buffer_head.h> | |
26 | #include <linux/writeback.h> | |
27 | #include <linux/bio.h> | |
28 | #include <linux/completion.h> | |
29 | #include <linux/blkdev.h> | |
30 | #include <linux/backing-dev.h> | |
31 | #include <linux/freezer.h> | |
32 | #include <linux/kthread.h> | |
33 | #include <linux/crc32.h> | |
34 | #include <linux/pagevec.h> | |
35 | #include "nilfs.h" | |
36 | #include "btnode.h" | |
37 | #include "page.h" | |
38 | #include "segment.h" | |
39 | #include "sufile.h" | |
40 | #include "cpfile.h" | |
41 | #include "ifile.h" | |
9ff05123 RK |
42 | #include "segbuf.h" |
43 | ||
44 | ||
45 | /* | |
46 | * Segment constructor | |
47 | */ | |
48 | #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */ | |
49 | ||
50 | #define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments | |
51 | appended in collection retry loop */ | |
52 | ||
53 | /* Construction mode */ | |
54 | enum { | |
55 | SC_LSEG_SR = 1, /* Make a logical segment having a super root */ | |
56 | SC_LSEG_DSYNC, /* Flush data blocks of a given file and make | |
57 | a logical segment without a super root */ | |
58 | SC_FLUSH_FILE, /* Flush data files, leads to segment writes without | |
59 | creating a checkpoint */ | |
60 | SC_FLUSH_DAT, /* Flush DAT file. This also creates segments without | |
61 | a checkpoint */ | |
62 | }; | |
63 | ||
64 | /* Stage numbers of dirty block collection */ | |
65 | enum { | |
66 | NILFS_ST_INIT = 0, | |
67 | NILFS_ST_GC, /* Collecting dirty blocks for GC */ | |
68 | NILFS_ST_FILE, | |
9ff05123 RK |
69 | NILFS_ST_IFILE, |
70 | NILFS_ST_CPFILE, | |
71 | NILFS_ST_SUFILE, | |
72 | NILFS_ST_DAT, | |
73 | NILFS_ST_SR, /* Super root */ | |
74 | NILFS_ST_DSYNC, /* Data sync blocks */ | |
75 | NILFS_ST_DONE, | |
76 | }; | |
77 | ||
78 | /* State flags of collection */ | |
79 | #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */ | |
80 | #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */ | |
071cb4b8 RK |
81 | #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */ |
82 | #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED) | |
9ff05123 RK |
83 | |
84 | /* Operations depending on the construction mode and file type */ | |
85 | struct nilfs_sc_operations { | |
86 | int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *, | |
87 | struct inode *); | |
88 | int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *, | |
89 | struct inode *); | |
90 | int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *, | |
91 | struct inode *); | |
92 | void (*write_data_binfo)(struct nilfs_sc_info *, | |
93 | struct nilfs_segsum_pointer *, | |
94 | union nilfs_binfo *); | |
95 | void (*write_node_binfo)(struct nilfs_sc_info *, | |
96 | struct nilfs_segsum_pointer *, | |
97 | union nilfs_binfo *); | |
98 | }; | |
99 | ||
100 | /* | |
101 | * Other definitions | |
102 | */ | |
103 | static void nilfs_segctor_start_timer(struct nilfs_sc_info *); | |
104 | static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int); | |
105 | static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *); | |
106 | static void nilfs_dispose_list(struct nilfs_sb_info *, struct list_head *, | |
107 | int); | |
108 | ||
109 | #define nilfs_cnt32_gt(a, b) \ | |
110 | (typecheck(__u32, a) && typecheck(__u32, b) && \ | |
111 | ((__s32)(b) - (__s32)(a) < 0)) | |
112 | #define nilfs_cnt32_ge(a, b) \ | |
113 | (typecheck(__u32, a) && typecheck(__u32, b) && \ | |
114 | ((__s32)(a) - (__s32)(b) >= 0)) | |
115 | #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a) | |
116 | #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a) | |
117 | ||
118 | /* | |
119 | * Transaction | |
120 | */ | |
121 | static struct kmem_cache *nilfs_transaction_cachep; | |
122 | ||
123 | /** | |
124 | * nilfs_init_transaction_cache - create a cache for nilfs_transaction_info | |
125 | * | |
126 | * nilfs_init_transaction_cache() creates a slab cache for the struct | |
127 | * nilfs_transaction_info. | |
128 | * | |
129 | * Return Value: On success, it returns 0. On error, one of the following | |
130 | * negative error code is returned. | |
131 | * | |
132 | * %-ENOMEM - Insufficient memory available. | |
133 | */ | |
134 | int nilfs_init_transaction_cache(void) | |
135 | { | |
136 | nilfs_transaction_cachep = | |
137 | kmem_cache_create("nilfs2_transaction_cache", | |
138 | sizeof(struct nilfs_transaction_info), | |
139 | 0, SLAB_RECLAIM_ACCOUNT, NULL); | |
140 | return (nilfs_transaction_cachep == NULL) ? -ENOMEM : 0; | |
141 | } | |
142 | ||
143 | /** | |
144 | * nilfs_detroy_transaction_cache - destroy the cache for transaction info | |
145 | * | |
146 | * nilfs_destroy_transaction_cache() frees the slab cache for the struct | |
147 | * nilfs_transaction_info. | |
148 | */ | |
149 | void nilfs_destroy_transaction_cache(void) | |
150 | { | |
151 | kmem_cache_destroy(nilfs_transaction_cachep); | |
152 | } | |
153 | ||
154 | static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti) | |
155 | { | |
156 | struct nilfs_transaction_info *cur_ti = current->journal_info; | |
157 | void *save = NULL; | |
158 | ||
159 | if (cur_ti) { | |
160 | if (cur_ti->ti_magic == NILFS_TI_MAGIC) | |
161 | return ++cur_ti->ti_count; | |
162 | else { | |
163 | /* | |
164 | * If journal_info field is occupied by other FS, | |
47420c79 RK |
165 | * it is saved and will be restored on |
166 | * nilfs_transaction_commit(). | |
9ff05123 RK |
167 | */ |
168 | printk(KERN_WARNING | |
169 | "NILFS warning: journal info from a different " | |
170 | "FS\n"); | |
171 | save = current->journal_info; | |
172 | } | |
173 | } | |
174 | if (!ti) { | |
175 | ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS); | |
176 | if (!ti) | |
177 | return -ENOMEM; | |
178 | ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC; | |
179 | } else { | |
180 | ti->ti_flags = 0; | |
181 | } | |
182 | ti->ti_count = 0; | |
183 | ti->ti_save = save; | |
184 | ti->ti_magic = NILFS_TI_MAGIC; | |
185 | current->journal_info = ti; | |
186 | return 0; | |
187 | } | |
188 | ||
189 | /** | |
190 | * nilfs_transaction_begin - start indivisible file operations. | |
191 | * @sb: super block | |
192 | * @ti: nilfs_transaction_info | |
193 | * @vacancy_check: flags for vacancy rate checks | |
194 | * | |
195 | * nilfs_transaction_begin() acquires a reader/writer semaphore, called | |
196 | * the segment semaphore, to make a segment construction and write tasks | |
47420c79 | 197 | * exclusive. The function is used with nilfs_transaction_commit() in pairs. |
9ff05123 RK |
198 | * The region enclosed by these two functions can be nested. To avoid a |
199 | * deadlock, the semaphore is only acquired or released in the outermost call. | |
200 | * | |
201 | * This function allocates a nilfs_transaction_info struct to keep context | |
202 | * information on it. It is initialized and hooked onto the current task in | |
203 | * the outermost call. If a pre-allocated struct is given to @ti, it is used | |
204 | * instead; othewise a new struct is assigned from a slab. | |
205 | * | |
206 | * When @vacancy_check flag is set, this function will check the amount of | |
207 | * free space, and will wait for the GC to reclaim disk space if low capacity. | |
208 | * | |
209 | * Return Value: On success, 0 is returned. On error, one of the following | |
210 | * negative error code is returned. | |
211 | * | |
212 | * %-ENOMEM - Insufficient memory available. | |
213 | * | |
9ff05123 RK |
214 | * %-ENOSPC - No space left on device |
215 | */ | |
216 | int nilfs_transaction_begin(struct super_block *sb, | |
217 | struct nilfs_transaction_info *ti, | |
218 | int vacancy_check) | |
219 | { | |
220 | struct nilfs_sb_info *sbi; | |
221 | struct the_nilfs *nilfs; | |
222 | int ret = nilfs_prepare_segment_lock(ti); | |
223 | ||
224 | if (unlikely(ret < 0)) | |
225 | return ret; | |
226 | if (ret > 0) | |
227 | return 0; | |
228 | ||
229 | sbi = NILFS_SB(sb); | |
230 | nilfs = sbi->s_nilfs; | |
231 | down_read(&nilfs->ns_segctor_sem); | |
232 | if (vacancy_check && nilfs_near_disk_full(nilfs)) { | |
233 | up_read(&nilfs->ns_segctor_sem); | |
234 | ret = -ENOSPC; | |
235 | goto failed; | |
236 | } | |
237 | return 0; | |
238 | ||
239 | failed: | |
240 | ti = current->journal_info; | |
241 | current->journal_info = ti->ti_save; | |
242 | if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) | |
243 | kmem_cache_free(nilfs_transaction_cachep, ti); | |
244 | return ret; | |
245 | } | |
246 | ||
247 | /** | |
47420c79 | 248 | * nilfs_transaction_commit - commit indivisible file operations. |
9ff05123 | 249 | * @sb: super block |
9ff05123 | 250 | * |
47420c79 RK |
251 | * nilfs_transaction_commit() releases the read semaphore which is |
252 | * acquired by nilfs_transaction_begin(). This is only performed | |
253 | * in outermost call of this function. If a commit flag is set, | |
254 | * nilfs_transaction_commit() sets a timer to start the segment | |
255 | * constructor. If a sync flag is set, it starts construction | |
256 | * directly. | |
9ff05123 | 257 | */ |
47420c79 | 258 | int nilfs_transaction_commit(struct super_block *sb) |
9ff05123 RK |
259 | { |
260 | struct nilfs_transaction_info *ti = current->journal_info; | |
261 | struct nilfs_sb_info *sbi; | |
262 | struct nilfs_sc_info *sci; | |
263 | int err = 0; | |
264 | ||
265 | BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); | |
47420c79 | 266 | ti->ti_flags |= NILFS_TI_COMMIT; |
9ff05123 RK |
267 | if (ti->ti_count > 0) { |
268 | ti->ti_count--; | |
269 | return 0; | |
270 | } | |
271 | sbi = NILFS_SB(sb); | |
272 | sci = NILFS_SC(sbi); | |
273 | if (sci != NULL) { | |
274 | if (ti->ti_flags & NILFS_TI_COMMIT) | |
275 | nilfs_segctor_start_timer(sci); | |
276 | if (atomic_read(&sbi->s_nilfs->ns_ndirtyblks) > | |
277 | sci->sc_watermark) | |
278 | nilfs_segctor_do_flush(sci, 0); | |
279 | } | |
280 | up_read(&sbi->s_nilfs->ns_segctor_sem); | |
281 | current->journal_info = ti->ti_save; | |
282 | ||
283 | if (ti->ti_flags & NILFS_TI_SYNC) | |
284 | err = nilfs_construct_segment(sb); | |
285 | if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) | |
286 | kmem_cache_free(nilfs_transaction_cachep, ti); | |
287 | return err; | |
288 | } | |
289 | ||
47420c79 RK |
290 | void nilfs_transaction_abort(struct super_block *sb) |
291 | { | |
292 | struct nilfs_transaction_info *ti = current->journal_info; | |
293 | ||
294 | BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); | |
295 | if (ti->ti_count > 0) { | |
296 | ti->ti_count--; | |
297 | return; | |
298 | } | |
299 | up_read(&NILFS_SB(sb)->s_nilfs->ns_segctor_sem); | |
300 | ||
301 | current->journal_info = ti->ti_save; | |
302 | if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) | |
303 | kmem_cache_free(nilfs_transaction_cachep, ti); | |
304 | } | |
305 | ||
9ff05123 RK |
306 | void nilfs_relax_pressure_in_lock(struct super_block *sb) |
307 | { | |
308 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | |
309 | struct nilfs_sc_info *sci = NILFS_SC(sbi); | |
310 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
311 | ||
312 | if (!sci || !sci->sc_flush_request) | |
313 | return; | |
314 | ||
315 | set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); | |
316 | up_read(&nilfs->ns_segctor_sem); | |
317 | ||
318 | down_write(&nilfs->ns_segctor_sem); | |
319 | if (sci->sc_flush_request && | |
320 | test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) { | |
321 | struct nilfs_transaction_info *ti = current->journal_info; | |
322 | ||
323 | ti->ti_flags |= NILFS_TI_WRITER; | |
324 | nilfs_segctor_do_immediate_flush(sci); | |
325 | ti->ti_flags &= ~NILFS_TI_WRITER; | |
326 | } | |
327 | downgrade_write(&nilfs->ns_segctor_sem); | |
328 | } | |
329 | ||
330 | static void nilfs_transaction_lock(struct nilfs_sb_info *sbi, | |
331 | struct nilfs_transaction_info *ti, | |
332 | int gcflag) | |
333 | { | |
334 | struct nilfs_transaction_info *cur_ti = current->journal_info; | |
335 | ||
1f5abe7e | 336 | WARN_ON(cur_ti); |
9ff05123 RK |
337 | ti->ti_flags = NILFS_TI_WRITER; |
338 | ti->ti_count = 0; | |
339 | ti->ti_save = cur_ti; | |
340 | ti->ti_magic = NILFS_TI_MAGIC; | |
341 | INIT_LIST_HEAD(&ti->ti_garbage); | |
342 | current->journal_info = ti; | |
343 | ||
344 | for (;;) { | |
345 | down_write(&sbi->s_nilfs->ns_segctor_sem); | |
346 | if (!test_bit(NILFS_SC_PRIOR_FLUSH, &NILFS_SC(sbi)->sc_flags)) | |
347 | break; | |
348 | ||
349 | nilfs_segctor_do_immediate_flush(NILFS_SC(sbi)); | |
350 | ||
351 | up_write(&sbi->s_nilfs->ns_segctor_sem); | |
352 | yield(); | |
353 | } | |
354 | if (gcflag) | |
355 | ti->ti_flags |= NILFS_TI_GC; | |
356 | } | |
357 | ||
358 | static void nilfs_transaction_unlock(struct nilfs_sb_info *sbi) | |
359 | { | |
360 | struct nilfs_transaction_info *ti = current->journal_info; | |
361 | ||
362 | BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); | |
363 | BUG_ON(ti->ti_count > 0); | |
364 | ||
365 | up_write(&sbi->s_nilfs->ns_segctor_sem); | |
366 | current->journal_info = ti->ti_save; | |
367 | if (!list_empty(&ti->ti_garbage)) | |
368 | nilfs_dispose_list(sbi, &ti->ti_garbage, 0); | |
369 | } | |
370 | ||
371 | static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, | |
372 | struct nilfs_segsum_pointer *ssp, | |
373 | unsigned bytes) | |
374 | { | |
375 | struct nilfs_segment_buffer *segbuf = sci->sc_curseg; | |
376 | unsigned blocksize = sci->sc_super->s_blocksize; | |
377 | void *p; | |
378 | ||
379 | if (unlikely(ssp->offset + bytes > blocksize)) { | |
380 | ssp->offset = 0; | |
381 | BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh, | |
382 | &segbuf->sb_segsum_buffers)); | |
383 | ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh); | |
384 | } | |
385 | p = ssp->bh->b_data + ssp->offset; | |
386 | ssp->offset += bytes; | |
387 | return p; | |
388 | } | |
389 | ||
390 | /** | |
391 | * nilfs_segctor_reset_segment_buffer - reset the current segment buffer | |
392 | * @sci: nilfs_sc_info | |
393 | */ | |
394 | static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) | |
395 | { | |
396 | struct nilfs_segment_buffer *segbuf = sci->sc_curseg; | |
397 | struct buffer_head *sumbh; | |
398 | unsigned sumbytes; | |
399 | unsigned flags = 0; | |
400 | int err; | |
401 | ||
402 | if (nilfs_doing_gc()) | |
403 | flags = NILFS_SS_GC; | |
404 | err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime); | |
405 | if (unlikely(err)) | |
406 | return err; | |
407 | ||
408 | sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); | |
409 | sumbytes = segbuf->sb_sum.sumbytes; | |
410 | sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes; | |
411 | sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes; | |
412 | sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; | |
413 | return 0; | |
414 | } | |
415 | ||
416 | static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) | |
417 | { | |
418 | sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; | |
419 | if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs)) | |
420 | return -E2BIG; /* The current segment is filled up | |
421 | (internal code) */ | |
422 | sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); | |
423 | return nilfs_segctor_reset_segment_buffer(sci); | |
424 | } | |
425 | ||
426 | static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci) | |
427 | { | |
428 | struct nilfs_segment_buffer *segbuf = sci->sc_curseg; | |
429 | int err; | |
430 | ||
431 | if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) { | |
432 | err = nilfs_segctor_feed_segment(sci); | |
433 | if (err) | |
434 | return err; | |
435 | segbuf = sci->sc_curseg; | |
436 | } | |
437 | err = nilfs_segbuf_extend_payload(segbuf, &sci->sc_super_root); | |
438 | if (likely(!err)) | |
439 | segbuf->sb_sum.flags |= NILFS_SS_SR; | |
440 | return err; | |
441 | } | |
442 | ||
443 | /* | |
444 | * Functions for making segment summary and payloads | |
445 | */ | |
446 | static int nilfs_segctor_segsum_block_required( | |
447 | struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp, | |
448 | unsigned binfo_size) | |
449 | { | |
450 | unsigned blocksize = sci->sc_super->s_blocksize; | |
451 | /* Size of finfo and binfo is enough small against blocksize */ | |
452 | ||
453 | return ssp->offset + binfo_size + | |
454 | (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) > | |
455 | blocksize; | |
456 | } | |
457 | ||
458 | static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, | |
459 | struct inode *inode) | |
460 | { | |
461 | sci->sc_curseg->sb_sum.nfinfo++; | |
462 | sci->sc_binfo_ptr = sci->sc_finfo_ptr; | |
463 | nilfs_segctor_map_segsum_entry( | |
464 | sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); | |
c96fa464 RK |
465 | |
466 | if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) | |
467 | set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); | |
9ff05123 RK |
468 | /* skip finfo */ |
469 | } | |
470 | ||
471 | static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci, | |
472 | struct inode *inode) | |
473 | { | |
474 | struct nilfs_finfo *finfo; | |
475 | struct nilfs_inode_info *ii; | |
476 | struct nilfs_segment_buffer *segbuf; | |
477 | ||
478 | if (sci->sc_blk_cnt == 0) | |
479 | return; | |
480 | ||
481 | ii = NILFS_I(inode); | |
482 | finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr, | |
483 | sizeof(*finfo)); | |
484 | finfo->fi_ino = cpu_to_le64(inode->i_ino); | |
485 | finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt); | |
486 | finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt); | |
487 | finfo->fi_cno = cpu_to_le64(ii->i_cno); | |
488 | ||
489 | segbuf = sci->sc_curseg; | |
490 | segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset + | |
491 | sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1); | |
492 | sci->sc_finfo_ptr = sci->sc_binfo_ptr; | |
493 | sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; | |
494 | } | |
495 | ||
496 | static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, | |
497 | struct buffer_head *bh, | |
498 | struct inode *inode, | |
499 | unsigned binfo_size) | |
500 | { | |
501 | struct nilfs_segment_buffer *segbuf; | |
502 | int required, err = 0; | |
503 | ||
504 | retry: | |
505 | segbuf = sci->sc_curseg; | |
506 | required = nilfs_segctor_segsum_block_required( | |
507 | sci, &sci->sc_binfo_ptr, binfo_size); | |
508 | if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) { | |
509 | nilfs_segctor_end_finfo(sci, inode); | |
510 | err = nilfs_segctor_feed_segment(sci); | |
511 | if (err) | |
512 | return err; | |
513 | goto retry; | |
514 | } | |
515 | if (unlikely(required)) { | |
516 | err = nilfs_segbuf_extend_segsum(segbuf); | |
517 | if (unlikely(err)) | |
518 | goto failed; | |
519 | } | |
520 | if (sci->sc_blk_cnt == 0) | |
521 | nilfs_segctor_begin_finfo(sci, inode); | |
522 | ||
523 | nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size); | |
524 | /* Substitution to vblocknr is delayed until update_blocknr() */ | |
525 | nilfs_segbuf_add_file_buffer(segbuf, bh); | |
526 | sci->sc_blk_cnt++; | |
527 | failed: | |
528 | return err; | |
529 | } | |
530 | ||
531 | static int nilfs_handle_bmap_error(int err, const char *fname, | |
532 | struct inode *inode, struct super_block *sb) | |
533 | { | |
534 | if (err == -EINVAL) { | |
535 | nilfs_error(sb, fname, "broken bmap (inode=%lu)\n", | |
536 | inode->i_ino); | |
537 | err = -EIO; | |
538 | } | |
539 | return err; | |
540 | } | |
541 | ||
542 | /* | |
543 | * Callback functions that enumerate, mark, and collect dirty blocks | |
544 | */ | |
545 | static int nilfs_collect_file_data(struct nilfs_sc_info *sci, | |
546 | struct buffer_head *bh, struct inode *inode) | |
547 | { | |
548 | int err; | |
549 | ||
9ff05123 RK |
550 | err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); |
551 | if (unlikely(err < 0)) | |
552 | return nilfs_handle_bmap_error(err, __func__, inode, | |
553 | sci->sc_super); | |
554 | ||
555 | err = nilfs_segctor_add_file_block(sci, bh, inode, | |
556 | sizeof(struct nilfs_binfo_v)); | |
557 | if (!err) | |
558 | sci->sc_datablk_cnt++; | |
559 | return err; | |
560 | } | |
561 | ||
562 | static int nilfs_collect_file_node(struct nilfs_sc_info *sci, | |
563 | struct buffer_head *bh, | |
564 | struct inode *inode) | |
565 | { | |
566 | int err; | |
567 | ||
9ff05123 RK |
568 | err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); |
569 | if (unlikely(err < 0)) | |
570 | return nilfs_handle_bmap_error(err, __func__, inode, | |
571 | sci->sc_super); | |
572 | return 0; | |
573 | } | |
574 | ||
575 | static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci, | |
576 | struct buffer_head *bh, | |
577 | struct inode *inode) | |
578 | { | |
1f5abe7e | 579 | WARN_ON(!buffer_dirty(bh)); |
9ff05123 RK |
580 | return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); |
581 | } | |
582 | ||
583 | static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci, | |
584 | struct nilfs_segsum_pointer *ssp, | |
585 | union nilfs_binfo *binfo) | |
586 | { | |
587 | struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry( | |
588 | sci, ssp, sizeof(*binfo_v)); | |
589 | *binfo_v = binfo->bi_v; | |
590 | } | |
591 | ||
592 | static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci, | |
593 | struct nilfs_segsum_pointer *ssp, | |
594 | union nilfs_binfo *binfo) | |
595 | { | |
596 | __le64 *vblocknr = nilfs_segctor_map_segsum_entry( | |
597 | sci, ssp, sizeof(*vblocknr)); | |
598 | *vblocknr = binfo->bi_v.bi_vblocknr; | |
599 | } | |
600 | ||
601 | struct nilfs_sc_operations nilfs_sc_file_ops = { | |
602 | .collect_data = nilfs_collect_file_data, | |
603 | .collect_node = nilfs_collect_file_node, | |
604 | .collect_bmap = nilfs_collect_file_bmap, | |
605 | .write_data_binfo = nilfs_write_file_data_binfo, | |
606 | .write_node_binfo = nilfs_write_file_node_binfo, | |
607 | }; | |
608 | ||
609 | static int nilfs_collect_dat_data(struct nilfs_sc_info *sci, | |
610 | struct buffer_head *bh, struct inode *inode) | |
611 | { | |
612 | int err; | |
613 | ||
614 | err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); | |
615 | if (unlikely(err < 0)) | |
616 | return nilfs_handle_bmap_error(err, __func__, inode, | |
617 | sci->sc_super); | |
618 | ||
619 | err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); | |
620 | if (!err) | |
621 | sci->sc_datablk_cnt++; | |
622 | return err; | |
623 | } | |
624 | ||
625 | static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci, | |
626 | struct buffer_head *bh, struct inode *inode) | |
627 | { | |
1f5abe7e | 628 | WARN_ON(!buffer_dirty(bh)); |
9ff05123 RK |
629 | return nilfs_segctor_add_file_block(sci, bh, inode, |
630 | sizeof(struct nilfs_binfo_dat)); | |
631 | } | |
632 | ||
633 | static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci, | |
634 | struct nilfs_segsum_pointer *ssp, | |
635 | union nilfs_binfo *binfo) | |
636 | { | |
637 | __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp, | |
638 | sizeof(*blkoff)); | |
639 | *blkoff = binfo->bi_dat.bi_blkoff; | |
640 | } | |
641 | ||
642 | static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci, | |
643 | struct nilfs_segsum_pointer *ssp, | |
644 | union nilfs_binfo *binfo) | |
645 | { | |
646 | struct nilfs_binfo_dat *binfo_dat = | |
647 | nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat)); | |
648 | *binfo_dat = binfo->bi_dat; | |
649 | } | |
650 | ||
651 | struct nilfs_sc_operations nilfs_sc_dat_ops = { | |
652 | .collect_data = nilfs_collect_dat_data, | |
653 | .collect_node = nilfs_collect_file_node, | |
654 | .collect_bmap = nilfs_collect_dat_bmap, | |
655 | .write_data_binfo = nilfs_write_dat_data_binfo, | |
656 | .write_node_binfo = nilfs_write_dat_node_binfo, | |
657 | }; | |
658 | ||
659 | struct nilfs_sc_operations nilfs_sc_dsync_ops = { | |
660 | .collect_data = nilfs_collect_file_data, | |
661 | .collect_node = NULL, | |
662 | .collect_bmap = NULL, | |
663 | .write_data_binfo = nilfs_write_file_data_binfo, | |
664 | .write_node_binfo = NULL, | |
665 | }; | |
666 | ||
f30bf3e4 RK |
667 | static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, |
668 | struct list_head *listp, | |
669 | size_t nlimit, | |
670 | loff_t start, loff_t end) | |
9ff05123 | 671 | { |
9ff05123 RK |
672 | struct address_space *mapping = inode->i_mapping; |
673 | struct pagevec pvec; | |
f30bf3e4 RK |
674 | pgoff_t index = 0, last = ULONG_MAX; |
675 | size_t ndirties = 0; | |
676 | int i; | |
9ff05123 | 677 | |
f30bf3e4 RK |
678 | if (unlikely(start != 0 || end != LLONG_MAX)) { |
679 | /* | |
680 | * A valid range is given for sync-ing data pages. The | |
681 | * range is rounded to per-page; extra dirty buffers | |
682 | * may be included if blocksize < pagesize. | |
683 | */ | |
684 | index = start >> PAGE_SHIFT; | |
685 | last = end >> PAGE_SHIFT; | |
686 | } | |
9ff05123 RK |
687 | pagevec_init(&pvec, 0); |
688 | repeat: | |
f30bf3e4 RK |
689 | if (unlikely(index > last) || |
690 | !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, | |
691 | min_t(pgoff_t, last - index, | |
692 | PAGEVEC_SIZE - 1) + 1)) | |
693 | return ndirties; | |
9ff05123 RK |
694 | |
695 | for (i = 0; i < pagevec_count(&pvec); i++) { | |
696 | struct buffer_head *bh, *head; | |
697 | struct page *page = pvec.pages[i]; | |
698 | ||
f30bf3e4 RK |
699 | if (unlikely(page->index > last)) |
700 | break; | |
701 | ||
9ff05123 RK |
702 | if (mapping->host) { |
703 | lock_page(page); | |
704 | if (!page_has_buffers(page)) | |
705 | create_empty_buffers(page, | |
706 | 1 << inode->i_blkbits, 0); | |
707 | unlock_page(page); | |
708 | } | |
709 | ||
710 | bh = head = page_buffers(page); | |
711 | do { | |
f30bf3e4 RK |
712 | if (!buffer_dirty(bh)) |
713 | continue; | |
714 | get_bh(bh); | |
715 | list_add_tail(&bh->b_assoc_buffers, listp); | |
716 | ndirties++; | |
717 | if (unlikely(ndirties >= nlimit)) { | |
718 | pagevec_release(&pvec); | |
719 | cond_resched(); | |
720 | return ndirties; | |
9ff05123 | 721 | } |
f30bf3e4 | 722 | } while (bh = bh->b_this_page, bh != head); |
9ff05123 RK |
723 | } |
724 | pagevec_release(&pvec); | |
725 | cond_resched(); | |
f30bf3e4 | 726 | goto repeat; |
9ff05123 RK |
727 | } |
728 | ||
729 | static void nilfs_lookup_dirty_node_buffers(struct inode *inode, | |
730 | struct list_head *listp) | |
731 | { | |
732 | struct nilfs_inode_info *ii = NILFS_I(inode); | |
733 | struct address_space *mapping = &ii->i_btnode_cache; | |
734 | struct pagevec pvec; | |
735 | struct buffer_head *bh, *head; | |
736 | unsigned int i; | |
737 | pgoff_t index = 0; | |
738 | ||
739 | pagevec_init(&pvec, 0); | |
740 | ||
741 | while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, | |
742 | PAGEVEC_SIZE)) { | |
743 | for (i = 0; i < pagevec_count(&pvec); i++) { | |
744 | bh = head = page_buffers(pvec.pages[i]); | |
745 | do { | |
746 | if (buffer_dirty(bh)) { | |
747 | get_bh(bh); | |
748 | list_add_tail(&bh->b_assoc_buffers, | |
749 | listp); | |
750 | } | |
751 | bh = bh->b_this_page; | |
752 | } while (bh != head); | |
753 | } | |
754 | pagevec_release(&pvec); | |
755 | cond_resched(); | |
756 | } | |
757 | } | |
758 | ||
759 | static void nilfs_dispose_list(struct nilfs_sb_info *sbi, | |
760 | struct list_head *head, int force) | |
761 | { | |
762 | struct nilfs_inode_info *ii, *n; | |
763 | struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii; | |
764 | unsigned nv = 0; | |
765 | ||
766 | while (!list_empty(head)) { | |
767 | spin_lock(&sbi->s_inode_lock); | |
768 | list_for_each_entry_safe(ii, n, head, i_dirty) { | |
769 | list_del_init(&ii->i_dirty); | |
770 | if (force) { | |
771 | if (unlikely(ii->i_bh)) { | |
772 | brelse(ii->i_bh); | |
773 | ii->i_bh = NULL; | |
774 | } | |
775 | } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) { | |
776 | set_bit(NILFS_I_QUEUED, &ii->i_state); | |
777 | list_add_tail(&ii->i_dirty, | |
778 | &sbi->s_dirty_files); | |
779 | continue; | |
780 | } | |
781 | ivec[nv++] = ii; | |
782 | if (nv == SC_N_INODEVEC) | |
783 | break; | |
784 | } | |
785 | spin_unlock(&sbi->s_inode_lock); | |
786 | ||
787 | for (pii = ivec; nv > 0; pii++, nv--) | |
788 | iput(&(*pii)->vfs_inode); | |
789 | } | |
790 | } | |
791 | ||
792 | static int nilfs_test_metadata_dirty(struct nilfs_sb_info *sbi) | |
793 | { | |
794 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
795 | int ret = 0; | |
796 | ||
797 | if (nilfs_mdt_fetch_dirty(sbi->s_ifile)) | |
798 | ret++; | |
799 | if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile)) | |
800 | ret++; | |
801 | if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile)) | |
802 | ret++; | |
803 | if (ret || nilfs_doing_gc()) | |
804 | if (nilfs_mdt_fetch_dirty(nilfs_dat_inode(nilfs))) | |
805 | ret++; | |
806 | return ret; | |
807 | } | |
808 | ||
809 | static int nilfs_segctor_clean(struct nilfs_sc_info *sci) | |
810 | { | |
811 | return list_empty(&sci->sc_dirty_files) && | |
812 | !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) && | |
071cb4b8 | 813 | sci->sc_nfreesegs == 0 && |
9ff05123 RK |
814 | (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes)); |
815 | } | |
816 | ||
817 | static int nilfs_segctor_confirm(struct nilfs_sc_info *sci) | |
818 | { | |
819 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
820 | int ret = 0; | |
821 | ||
822 | if (nilfs_test_metadata_dirty(sbi)) | |
823 | set_bit(NILFS_SC_DIRTY, &sci->sc_flags); | |
824 | ||
825 | spin_lock(&sbi->s_inode_lock); | |
826 | if (list_empty(&sbi->s_dirty_files) && nilfs_segctor_clean(sci)) | |
827 | ret++; | |
828 | ||
829 | spin_unlock(&sbi->s_inode_lock); | |
830 | return ret; | |
831 | } | |
832 | ||
833 | static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci) | |
834 | { | |
835 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
836 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
837 | ||
838 | nilfs_mdt_clear_dirty(sbi->s_ifile); | |
839 | nilfs_mdt_clear_dirty(nilfs->ns_cpfile); | |
840 | nilfs_mdt_clear_dirty(nilfs->ns_sufile); | |
841 | nilfs_mdt_clear_dirty(nilfs_dat_inode(nilfs)); | |
842 | } | |
843 | ||
844 | static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) | |
845 | { | |
846 | struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs; | |
847 | struct buffer_head *bh_cp; | |
848 | struct nilfs_checkpoint *raw_cp; | |
849 | int err; | |
850 | ||
851 | /* XXX: this interface will be changed */ | |
852 | err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1, | |
853 | &raw_cp, &bh_cp); | |
854 | if (likely(!err)) { | |
855 | /* The following code is duplicated with cpfile. But, it is | |
856 | needed to collect the checkpoint even if it was not newly | |
857 | created */ | |
858 | nilfs_mdt_mark_buffer_dirty(bh_cp); | |
859 | nilfs_mdt_mark_dirty(nilfs->ns_cpfile); | |
860 | nilfs_cpfile_put_checkpoint( | |
861 | nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); | |
1f5abe7e RK |
862 | } else |
863 | WARN_ON(err == -EINVAL || err == -ENOENT); | |
864 | ||
9ff05123 RK |
865 | return err; |
866 | } | |
867 | ||
868 | static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) | |
869 | { | |
870 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
871 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
872 | struct buffer_head *bh_cp; | |
873 | struct nilfs_checkpoint *raw_cp; | |
874 | int err; | |
875 | ||
876 | err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0, | |
877 | &raw_cp, &bh_cp); | |
878 | if (unlikely(err)) { | |
1f5abe7e | 879 | WARN_ON(err == -EINVAL || err == -ENOENT); |
9ff05123 RK |
880 | goto failed_ibh; |
881 | } | |
882 | raw_cp->cp_snapshot_list.ssl_next = 0; | |
883 | raw_cp->cp_snapshot_list.ssl_prev = 0; | |
884 | raw_cp->cp_inodes_count = | |
885 | cpu_to_le64(atomic_read(&sbi->s_inodes_count)); | |
886 | raw_cp->cp_blocks_count = | |
887 | cpu_to_le64(atomic_read(&sbi->s_blocks_count)); | |
888 | raw_cp->cp_nblk_inc = | |
889 | cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc); | |
890 | raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime); | |
891 | raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno); | |
458c5b08 | 892 | |
c96fa464 RK |
893 | if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) |
894 | nilfs_checkpoint_clear_minor(raw_cp); | |
895 | else | |
896 | nilfs_checkpoint_set_minor(raw_cp); | |
897 | ||
9ff05123 RK |
898 | nilfs_write_inode_common(sbi->s_ifile, &raw_cp->cp_ifile_inode, 1); |
899 | nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); | |
900 | return 0; | |
901 | ||
902 | failed_ibh: | |
903 | return err; | |
904 | } | |
905 | ||
906 | static void nilfs_fill_in_file_bmap(struct inode *ifile, | |
907 | struct nilfs_inode_info *ii) | |
908 | ||
909 | { | |
910 | struct buffer_head *ibh; | |
911 | struct nilfs_inode *raw_inode; | |
912 | ||
913 | if (test_bit(NILFS_I_BMAP, &ii->i_state)) { | |
914 | ibh = ii->i_bh; | |
915 | BUG_ON(!ibh); | |
916 | raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino, | |
917 | ibh); | |
918 | nilfs_bmap_write(ii->i_bmap, raw_inode); | |
919 | nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh); | |
920 | } | |
921 | } | |
922 | ||
923 | static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci, | |
924 | struct inode *ifile) | |
925 | { | |
926 | struct nilfs_inode_info *ii; | |
927 | ||
928 | list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) { | |
929 | nilfs_fill_in_file_bmap(ifile, ii); | |
930 | set_bit(NILFS_I_COLLECTED, &ii->i_state); | |
931 | } | |
9ff05123 RK |
932 | } |
933 | ||
934 | /* | |
935 | * CRC calculation routines | |
936 | */ | |
937 | static void nilfs_fill_in_super_root_crc(struct buffer_head *bh_sr, u32 seed) | |
938 | { | |
939 | struct nilfs_super_root *raw_sr = | |
940 | (struct nilfs_super_root *)bh_sr->b_data; | |
941 | u32 crc; | |
942 | ||
9ff05123 RK |
943 | crc = crc32_le(seed, |
944 | (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum), | |
945 | NILFS_SR_BYTES - sizeof(raw_sr->sr_sum)); | |
946 | raw_sr->sr_sum = cpu_to_le32(crc); | |
947 | } | |
948 | ||
949 | static void nilfs_segctor_fill_in_checksums(struct nilfs_sc_info *sci, | |
950 | u32 seed) | |
951 | { | |
952 | struct nilfs_segment_buffer *segbuf; | |
953 | ||
954 | if (sci->sc_super_root) | |
955 | nilfs_fill_in_super_root_crc(sci->sc_super_root, seed); | |
956 | ||
957 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { | |
958 | nilfs_segbuf_fill_in_segsum_crc(segbuf, seed); | |
959 | nilfs_segbuf_fill_in_data_crc(segbuf, seed); | |
960 | } | |
961 | } | |
962 | ||
963 | static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, | |
964 | struct the_nilfs *nilfs) | |
965 | { | |
966 | struct buffer_head *bh_sr = sci->sc_super_root; | |
967 | struct nilfs_super_root *raw_sr = | |
968 | (struct nilfs_super_root *)bh_sr->b_data; | |
969 | unsigned isz = nilfs->ns_inode_size; | |
970 | ||
971 | raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES); | |
972 | raw_sr->sr_nongc_ctime | |
973 | = cpu_to_le64(nilfs_doing_gc() ? | |
974 | nilfs->ns_nongc_ctime : sci->sc_seg_ctime); | |
975 | raw_sr->sr_flags = 0; | |
976 | ||
977 | nilfs_mdt_write_inode_direct( | |
978 | nilfs_dat_inode(nilfs), bh_sr, NILFS_SR_DAT_OFFSET(isz)); | |
979 | nilfs_mdt_write_inode_direct( | |
980 | nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(isz)); | |
981 | nilfs_mdt_write_inode_direct( | |
982 | nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(isz)); | |
983 | } | |
984 | ||
985 | static void nilfs_redirty_inodes(struct list_head *head) | |
986 | { | |
987 | struct nilfs_inode_info *ii; | |
988 | ||
989 | list_for_each_entry(ii, head, i_dirty) { | |
990 | if (test_bit(NILFS_I_COLLECTED, &ii->i_state)) | |
991 | clear_bit(NILFS_I_COLLECTED, &ii->i_state); | |
992 | } | |
993 | } | |
994 | ||
995 | static void nilfs_drop_collected_inodes(struct list_head *head) | |
996 | { | |
997 | struct nilfs_inode_info *ii; | |
998 | ||
999 | list_for_each_entry(ii, head, i_dirty) { | |
1000 | if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state)) | |
1001 | continue; | |
1002 | ||
1003 | clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state); | |
1004 | set_bit(NILFS_I_UPDATED, &ii->i_state); | |
1005 | } | |
1006 | } | |
1007 | ||
9ff05123 RK |
1008 | static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci, |
1009 | struct inode *inode, | |
1010 | struct list_head *listp, | |
1011 | int (*collect)(struct nilfs_sc_info *, | |
1012 | struct buffer_head *, | |
1013 | struct inode *)) | |
1014 | { | |
1015 | struct buffer_head *bh, *n; | |
1016 | int err = 0; | |
1017 | ||
1018 | if (collect) { | |
1019 | list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) { | |
1020 | list_del_init(&bh->b_assoc_buffers); | |
1021 | err = collect(sci, bh, inode); | |
1022 | brelse(bh); | |
1023 | if (unlikely(err)) | |
1024 | goto dispose_buffers; | |
1025 | } | |
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | dispose_buffers: | |
1030 | while (!list_empty(listp)) { | |
1031 | bh = list_entry(listp->next, struct buffer_head, | |
1032 | b_assoc_buffers); | |
1033 | list_del_init(&bh->b_assoc_buffers); | |
1034 | brelse(bh); | |
1035 | } | |
1036 | return err; | |
1037 | } | |
1038 | ||
f30bf3e4 RK |
1039 | static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci) |
1040 | { | |
1041 | /* Remaining number of blocks within segment buffer */ | |
1042 | return sci->sc_segbuf_nblocks - | |
1043 | (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks); | |
1044 | } | |
1045 | ||
9ff05123 RK |
1046 | static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci, |
1047 | struct inode *inode, | |
1048 | struct nilfs_sc_operations *sc_ops) | |
1049 | { | |
1050 | LIST_HEAD(data_buffers); | |
1051 | LIST_HEAD(node_buffers); | |
f30bf3e4 | 1052 | int err; |
9ff05123 RK |
1053 | |
1054 | if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { | |
f30bf3e4 RK |
1055 | size_t n, rest = nilfs_segctor_buffer_rest(sci); |
1056 | ||
1057 | n = nilfs_lookup_dirty_data_buffers( | |
1058 | inode, &data_buffers, rest + 1, 0, LLONG_MAX); | |
1059 | if (n > rest) { | |
1060 | err = nilfs_segctor_apply_buffers( | |
9ff05123 | 1061 | sci, inode, &data_buffers, |
f30bf3e4 RK |
1062 | sc_ops->collect_data); |
1063 | BUG_ON(!err); /* always receive -E2BIG or true error */ | |
9ff05123 RK |
1064 | goto break_or_fail; |
1065 | } | |
1066 | } | |
1067 | nilfs_lookup_dirty_node_buffers(inode, &node_buffers); | |
1068 | ||
1069 | if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { | |
1070 | err = nilfs_segctor_apply_buffers( | |
1071 | sci, inode, &data_buffers, sc_ops->collect_data); | |
1072 | if (unlikely(err)) { | |
1073 | /* dispose node list */ | |
1074 | nilfs_segctor_apply_buffers( | |
1075 | sci, inode, &node_buffers, NULL); | |
1076 | goto break_or_fail; | |
1077 | } | |
1078 | sci->sc_stage.flags |= NILFS_CF_NODE; | |
1079 | } | |
1080 | /* Collect node */ | |
1081 | err = nilfs_segctor_apply_buffers( | |
1082 | sci, inode, &node_buffers, sc_ops->collect_node); | |
1083 | if (unlikely(err)) | |
1084 | goto break_or_fail; | |
1085 | ||
1086 | nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers); | |
1087 | err = nilfs_segctor_apply_buffers( | |
1088 | sci, inode, &node_buffers, sc_ops->collect_bmap); | |
1089 | if (unlikely(err)) | |
1090 | goto break_or_fail; | |
1091 | ||
1092 | nilfs_segctor_end_finfo(sci, inode); | |
1093 | sci->sc_stage.flags &= ~NILFS_CF_NODE; | |
1094 | ||
1095 | break_or_fail: | |
1096 | return err; | |
1097 | } | |
1098 | ||
1099 | static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci, | |
1100 | struct inode *inode) | |
1101 | { | |
1102 | LIST_HEAD(data_buffers); | |
f30bf3e4 RK |
1103 | size_t n, rest = nilfs_segctor_buffer_rest(sci); |
1104 | int err; | |
9ff05123 | 1105 | |
f30bf3e4 RK |
1106 | n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1, |
1107 | sci->sc_dsync_start, | |
1108 | sci->sc_dsync_end); | |
1109 | ||
1110 | err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers, | |
1111 | nilfs_collect_file_data); | |
1112 | if (!err) { | |
9ff05123 | 1113 | nilfs_segctor_end_finfo(sci, inode); |
f30bf3e4 RK |
1114 | BUG_ON(n > rest); |
1115 | /* always receive -E2BIG or true error if n > rest */ | |
1116 | } | |
9ff05123 RK |
1117 | return err; |
1118 | } | |
1119 | ||
1120 | static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) | |
1121 | { | |
1122 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
1123 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
1124 | struct list_head *head; | |
1125 | struct nilfs_inode_info *ii; | |
071cb4b8 | 1126 | size_t ndone; |
9ff05123 RK |
1127 | int err = 0; |
1128 | ||
1129 | switch (sci->sc_stage.scnt) { | |
1130 | case NILFS_ST_INIT: | |
1131 | /* Pre-processes */ | |
1132 | sci->sc_stage.flags = 0; | |
1133 | ||
1134 | if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) { | |
1135 | sci->sc_nblk_inc = 0; | |
1136 | sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN; | |
1137 | if (mode == SC_LSEG_DSYNC) { | |
1138 | sci->sc_stage.scnt = NILFS_ST_DSYNC; | |
1139 | goto dsync_mode; | |
1140 | } | |
1141 | } | |
1142 | ||
1143 | sci->sc_stage.dirty_file_ptr = NULL; | |
1144 | sci->sc_stage.gc_inode_ptr = NULL; | |
1145 | if (mode == SC_FLUSH_DAT) { | |
1146 | sci->sc_stage.scnt = NILFS_ST_DAT; | |
1147 | goto dat_stage; | |
1148 | } | |
1149 | sci->sc_stage.scnt++; /* Fall through */ | |
1150 | case NILFS_ST_GC: | |
1151 | if (nilfs_doing_gc()) { | |
1152 | head = &sci->sc_gc_inodes; | |
1153 | ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr, | |
1154 | head, i_dirty); | |
1155 | list_for_each_entry_continue(ii, head, i_dirty) { | |
1156 | err = nilfs_segctor_scan_file( | |
1157 | sci, &ii->vfs_inode, | |
1158 | &nilfs_sc_file_ops); | |
1159 | if (unlikely(err)) { | |
1160 | sci->sc_stage.gc_inode_ptr = list_entry( | |
1161 | ii->i_dirty.prev, | |
1162 | struct nilfs_inode_info, | |
1163 | i_dirty); | |
1164 | goto break_or_fail; | |
1165 | } | |
1166 | set_bit(NILFS_I_COLLECTED, &ii->i_state); | |
1167 | } | |
1168 | sci->sc_stage.gc_inode_ptr = NULL; | |
1169 | } | |
1170 | sci->sc_stage.scnt++; /* Fall through */ | |
1171 | case NILFS_ST_FILE: | |
1172 | head = &sci->sc_dirty_files; | |
1173 | ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, | |
1174 | i_dirty); | |
1175 | list_for_each_entry_continue(ii, head, i_dirty) { | |
1176 | clear_bit(NILFS_I_DIRTY, &ii->i_state); | |
1177 | ||
1178 | err = nilfs_segctor_scan_file(sci, &ii->vfs_inode, | |
1179 | &nilfs_sc_file_ops); | |
1180 | if (unlikely(err)) { | |
1181 | sci->sc_stage.dirty_file_ptr = | |
1182 | list_entry(ii->i_dirty.prev, | |
1183 | struct nilfs_inode_info, | |
1184 | i_dirty); | |
1185 | goto break_or_fail; | |
1186 | } | |
1187 | /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */ | |
1188 | /* XXX: required ? */ | |
1189 | } | |
1190 | sci->sc_stage.dirty_file_ptr = NULL; | |
1191 | if (mode == SC_FLUSH_FILE) { | |
1192 | sci->sc_stage.scnt = NILFS_ST_DONE; | |
1193 | return 0; | |
1194 | } | |
9ff05123 RK |
1195 | sci->sc_stage.scnt++; |
1196 | sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED; | |
1197 | /* Fall through */ | |
1198 | case NILFS_ST_IFILE: | |
1199 | err = nilfs_segctor_scan_file(sci, sbi->s_ifile, | |
1200 | &nilfs_sc_file_ops); | |
1201 | if (unlikely(err)) | |
1202 | break; | |
1203 | sci->sc_stage.scnt++; | |
1204 | /* Creating a checkpoint */ | |
1205 | err = nilfs_segctor_create_checkpoint(sci); | |
1206 | if (unlikely(err)) | |
1207 | break; | |
1208 | /* Fall through */ | |
1209 | case NILFS_ST_CPFILE: | |
1210 | err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile, | |
1211 | &nilfs_sc_file_ops); | |
1212 | if (unlikely(err)) | |
1213 | break; | |
1214 | sci->sc_stage.scnt++; /* Fall through */ | |
1215 | case NILFS_ST_SUFILE: | |
071cb4b8 RK |
1216 | err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs, |
1217 | sci->sc_nfreesegs, &ndone); | |
1218 | if (unlikely(err)) { | |
1219 | nilfs_sufile_cancel_freev(nilfs->ns_sufile, | |
1220 | sci->sc_freesegs, ndone, | |
1221 | NULL); | |
9ff05123 | 1222 | break; |
071cb4b8 RK |
1223 | } |
1224 | sci->sc_stage.flags |= NILFS_CF_SUFREED; | |
1225 | ||
9ff05123 RK |
1226 | err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile, |
1227 | &nilfs_sc_file_ops); | |
1228 | if (unlikely(err)) | |
1229 | break; | |
1230 | sci->sc_stage.scnt++; /* Fall through */ | |
1231 | case NILFS_ST_DAT: | |
1232 | dat_stage: | |
1233 | err = nilfs_segctor_scan_file(sci, nilfs_dat_inode(nilfs), | |
1234 | &nilfs_sc_dat_ops); | |
1235 | if (unlikely(err)) | |
1236 | break; | |
1237 | if (mode == SC_FLUSH_DAT) { | |
1238 | sci->sc_stage.scnt = NILFS_ST_DONE; | |
1239 | return 0; | |
1240 | } | |
1241 | sci->sc_stage.scnt++; /* Fall through */ | |
1242 | case NILFS_ST_SR: | |
1243 | if (mode == SC_LSEG_SR) { | |
1244 | /* Appending a super root */ | |
1245 | err = nilfs_segctor_add_super_root(sci); | |
1246 | if (unlikely(err)) | |
1247 | break; | |
1248 | } | |
1249 | /* End of a logical segment */ | |
1250 | sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; | |
1251 | sci->sc_stage.scnt = NILFS_ST_DONE; | |
1252 | return 0; | |
1253 | case NILFS_ST_DSYNC: | |
1254 | dsync_mode: | |
1255 | sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT; | |
f30bf3e4 | 1256 | ii = sci->sc_dsync_inode; |
9ff05123 RK |
1257 | if (!test_bit(NILFS_I_BUSY, &ii->i_state)) |
1258 | break; | |
1259 | ||
1260 | err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode); | |
1261 | if (unlikely(err)) | |
1262 | break; | |
9ff05123 RK |
1263 | sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; |
1264 | sci->sc_stage.scnt = NILFS_ST_DONE; | |
1265 | return 0; | |
1266 | case NILFS_ST_DONE: | |
1267 | return 0; | |
1268 | default: | |
1269 | BUG(); | |
1270 | } | |
1271 | ||
1272 | break_or_fail: | |
1273 | return err; | |
1274 | } | |
1275 | ||
9ff05123 RK |
1276 | static int nilfs_touch_segusage(struct inode *sufile, __u64 segnum) |
1277 | { | |
1278 | struct buffer_head *bh_su; | |
1279 | struct nilfs_segment_usage *raw_su; | |
1280 | int err; | |
1281 | ||
1282 | err = nilfs_sufile_get_segment_usage(sufile, segnum, &raw_su, &bh_su); | |
1283 | if (unlikely(err)) | |
1284 | return err; | |
1285 | nilfs_mdt_mark_buffer_dirty(bh_su); | |
1286 | nilfs_mdt_mark_dirty(sufile); | |
1287 | nilfs_sufile_put_segment_usage(sufile, segnum, bh_su); | |
1288 | return 0; | |
1289 | } | |
1290 | ||
1291 | static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, | |
1292 | struct the_nilfs *nilfs) | |
1293 | { | |
1294 | struct nilfs_segment_buffer *segbuf, *n; | |
9ff05123 RK |
1295 | __u64 nextnum; |
1296 | int err; | |
1297 | ||
1298 | if (list_empty(&sci->sc_segbufs)) { | |
1299 | segbuf = nilfs_segbuf_new(sci->sc_super); | |
1300 | if (unlikely(!segbuf)) | |
1301 | return -ENOMEM; | |
1302 | list_add(&segbuf->sb_list, &sci->sc_segbufs); | |
1303 | } else | |
1304 | segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); | |
1305 | ||
cece5520 RK |
1306 | nilfs_segbuf_map(segbuf, nilfs->ns_segnum, nilfs->ns_pseg_offset, |
1307 | nilfs); | |
9ff05123 RK |
1308 | |
1309 | if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { | |
9ff05123 | 1310 | nilfs_shift_to_next_segment(nilfs); |
cece5520 | 1311 | nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs); |
9ff05123 RK |
1312 | } |
1313 | sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks; | |
1314 | ||
cece5520 | 1315 | err = nilfs_touch_segusage(nilfs->ns_sufile, segbuf->sb_segnum); |
9ff05123 RK |
1316 | if (unlikely(err)) |
1317 | return err; | |
1318 | ||
1319 | if (nilfs->ns_segnum == nilfs->ns_nextnum) { | |
1320 | /* Start from the head of a new full segment */ | |
cece5520 | 1321 | err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum); |
9ff05123 RK |
1322 | if (unlikely(err)) |
1323 | return err; | |
1324 | } else | |
1325 | nextnum = nilfs->ns_nextnum; | |
1326 | ||
1327 | segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq; | |
1328 | nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs); | |
1329 | ||
1330 | /* truncating segment buffers */ | |
1331 | list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs, | |
1332 | sb_list) { | |
1333 | list_del_init(&segbuf->sb_list); | |
1334 | nilfs_segbuf_free(segbuf); | |
1335 | } | |
cece5520 | 1336 | return 0; |
9ff05123 RK |
1337 | } |
1338 | ||
1339 | static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci, | |
1340 | struct the_nilfs *nilfs, int nadd) | |
1341 | { | |
1342 | struct nilfs_segment_buffer *segbuf, *prev, *n; | |
1343 | struct inode *sufile = nilfs->ns_sufile; | |
1344 | __u64 nextnextnum; | |
1345 | LIST_HEAD(list); | |
1346 | int err, ret, i; | |
1347 | ||
1348 | prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs); | |
1349 | /* | |
1350 | * Since the segment specified with nextnum might be allocated during | |
1351 | * the previous construction, the buffer including its segusage may | |
1352 | * not be dirty. The following call ensures that the buffer is dirty | |
1353 | * and will pin the buffer on memory until the sufile is written. | |
1354 | */ | |
1355 | err = nilfs_touch_segusage(sufile, prev->sb_nextnum); | |
1356 | if (unlikely(err)) | |
1357 | return err; | |
1358 | ||
1359 | for (i = 0; i < nadd; i++) { | |
1360 | /* extend segment info */ | |
1361 | err = -ENOMEM; | |
1362 | segbuf = nilfs_segbuf_new(sci->sc_super); | |
1363 | if (unlikely(!segbuf)) | |
1364 | goto failed; | |
1365 | ||
1366 | /* map this buffer to region of segment on-disk */ | |
cece5520 | 1367 | nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); |
9ff05123 RK |
1368 | sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks; |
1369 | ||
1370 | /* allocate the next next full segment */ | |
1371 | err = nilfs_sufile_alloc(sufile, &nextnextnum); | |
1372 | if (unlikely(err)) | |
1373 | goto failed_segbuf; | |
1374 | ||
1375 | segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1; | |
1376 | nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs); | |
1377 | ||
1378 | list_add_tail(&segbuf->sb_list, &list); | |
1379 | prev = segbuf; | |
1380 | } | |
1381 | list_splice(&list, sci->sc_segbufs.prev); | |
1382 | return 0; | |
1383 | ||
1384 | failed_segbuf: | |
1385 | nilfs_segbuf_free(segbuf); | |
1386 | failed: | |
1387 | list_for_each_entry_safe(segbuf, n, &list, sb_list) { | |
1388 | ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); | |
1f5abe7e | 1389 | WARN_ON(ret); /* never fails */ |
9ff05123 RK |
1390 | list_del_init(&segbuf->sb_list); |
1391 | nilfs_segbuf_free(segbuf); | |
1392 | } | |
1393 | return err; | |
1394 | } | |
1395 | ||
1396 | static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci, | |
1397 | struct the_nilfs *nilfs) | |
1398 | { | |
1399 | struct nilfs_segment_buffer *segbuf; | |
1400 | int ret, done = 0; | |
1401 | ||
1402 | segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); | |
1403 | if (nilfs->ns_nextnum != segbuf->sb_nextnum) { | |
1404 | ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum); | |
1f5abe7e | 1405 | WARN_ON(ret); /* never fails */ |
9ff05123 RK |
1406 | } |
1407 | if (segbuf->sb_io_error) { | |
1408 | /* Case 1: The first segment failed */ | |
1409 | if (segbuf->sb_pseg_start != segbuf->sb_fseg_start) | |
1410 | /* Case 1a: Partial segment appended into an existing | |
1411 | segment */ | |
1412 | nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start, | |
1413 | segbuf->sb_fseg_end); | |
1414 | else /* Case 1b: New full segment */ | |
1415 | set_nilfs_discontinued(nilfs); | |
1416 | done++; | |
1417 | } | |
1418 | ||
1419 | list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { | |
1420 | ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum); | |
1f5abe7e | 1421 | WARN_ON(ret); /* never fails */ |
9ff05123 RK |
1422 | if (!done && segbuf->sb_io_error) { |
1423 | if (segbuf->sb_segnum != nilfs->ns_nextnum) | |
1424 | /* Case 2: extended segment (!= next) failed */ | |
1425 | nilfs_sufile_set_error(nilfs->ns_sufile, | |
1426 | segbuf->sb_segnum); | |
1427 | done++; | |
1428 | } | |
1429 | } | |
1430 | } | |
1431 | ||
1432 | static void nilfs_segctor_clear_segment_buffers(struct nilfs_sc_info *sci) | |
1433 | { | |
1434 | struct nilfs_segment_buffer *segbuf; | |
1435 | ||
1436 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) | |
1437 | nilfs_segbuf_clear(segbuf); | |
1438 | sci->sc_super_root = NULL; | |
1439 | } | |
1440 | ||
1441 | static void nilfs_segctor_destroy_segment_buffers(struct nilfs_sc_info *sci) | |
1442 | { | |
1443 | struct nilfs_segment_buffer *segbuf; | |
1444 | ||
1445 | while (!list_empty(&sci->sc_segbufs)) { | |
1446 | segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); | |
1447 | list_del_init(&segbuf->sb_list); | |
1448 | nilfs_segbuf_free(segbuf); | |
1449 | } | |
1450 | /* sci->sc_curseg = NULL; */ | |
1451 | } | |
1452 | ||
1453 | static void nilfs_segctor_end_construction(struct nilfs_sc_info *sci, | |
1454 | struct the_nilfs *nilfs, int err) | |
1455 | { | |
1456 | if (unlikely(err)) { | |
1457 | nilfs_segctor_free_incomplete_segments(sci, nilfs); | |
071cb4b8 RK |
1458 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
1459 | int ret; | |
1460 | ||
1461 | ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile, | |
1462 | sci->sc_freesegs, | |
1463 | sci->sc_nfreesegs, | |
1464 | NULL); | |
1465 | WARN_ON(ret); /* do not happen */ | |
1466 | } | |
9ff05123 RK |
1467 | } |
1468 | nilfs_segctor_clear_segment_buffers(sci); | |
1469 | } | |
1470 | ||
1471 | static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci, | |
1472 | struct inode *sufile) | |
1473 | { | |
1474 | struct nilfs_segment_buffer *segbuf; | |
1475 | struct buffer_head *bh_su; | |
1476 | struct nilfs_segment_usage *raw_su; | |
1477 | unsigned long live_blocks; | |
1478 | int ret; | |
1479 | ||
1480 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { | |
1481 | ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum, | |
1482 | &raw_su, &bh_su); | |
1f5abe7e | 1483 | WARN_ON(ret); /* always succeed because bh_su is dirty */ |
9ff05123 RK |
1484 | live_blocks = segbuf->sb_sum.nblocks + |
1485 | (segbuf->sb_pseg_start - segbuf->sb_fseg_start); | |
1486 | raw_su->su_lastmod = cpu_to_le64(sci->sc_seg_ctime); | |
1487 | raw_su->su_nblocks = cpu_to_le32(live_blocks); | |
1488 | nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, | |
1489 | bh_su); | |
1490 | } | |
1491 | } | |
1492 | ||
1493 | static void nilfs_segctor_cancel_segusage(struct nilfs_sc_info *sci, | |
1494 | struct inode *sufile) | |
1495 | { | |
1496 | struct nilfs_segment_buffer *segbuf; | |
1497 | struct buffer_head *bh_su; | |
1498 | struct nilfs_segment_usage *raw_su; | |
1499 | int ret; | |
1500 | ||
1501 | segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); | |
1502 | ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum, | |
1503 | &raw_su, &bh_su); | |
1f5abe7e | 1504 | WARN_ON(ret); /* always succeed because bh_su is dirty */ |
9ff05123 RK |
1505 | raw_su->su_nblocks = cpu_to_le32(segbuf->sb_pseg_start - |
1506 | segbuf->sb_fseg_start); | |
1507 | nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, bh_su); | |
1508 | ||
1509 | list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { | |
1510 | ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum, | |
1511 | &raw_su, &bh_su); | |
1f5abe7e | 1512 | WARN_ON(ret); /* always succeed */ |
9ff05123 RK |
1513 | raw_su->su_nblocks = 0; |
1514 | nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, | |
1515 | bh_su); | |
1516 | } | |
1517 | } | |
1518 | ||
1519 | static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci, | |
1520 | struct nilfs_segment_buffer *last, | |
1521 | struct inode *sufile) | |
1522 | { | |
1523 | struct nilfs_segment_buffer *segbuf = last, *n; | |
1524 | int ret; | |
1525 | ||
1526 | list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs, | |
1527 | sb_list) { | |
1528 | list_del_init(&segbuf->sb_list); | |
1529 | sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks; | |
1530 | ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); | |
1f5abe7e | 1531 | WARN_ON(ret); |
9ff05123 RK |
1532 | nilfs_segbuf_free(segbuf); |
1533 | } | |
1534 | } | |
1535 | ||
1536 | ||
1537 | static int nilfs_segctor_collect(struct nilfs_sc_info *sci, | |
1538 | struct the_nilfs *nilfs, int mode) | |
1539 | { | |
1540 | struct nilfs_cstage prev_stage = sci->sc_stage; | |
1541 | int err, nadd = 1; | |
1542 | ||
1543 | /* Collection retry loop */ | |
1544 | for (;;) { | |
1545 | sci->sc_super_root = NULL; | |
1546 | sci->sc_nblk_this_inc = 0; | |
1547 | sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); | |
1548 | ||
1549 | err = nilfs_segctor_reset_segment_buffer(sci); | |
1550 | if (unlikely(err)) | |
1551 | goto failed; | |
1552 | ||
1553 | err = nilfs_segctor_collect_blocks(sci, mode); | |
1554 | sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; | |
1555 | if (!err) | |
1556 | break; | |
1557 | ||
1558 | if (unlikely(err != -E2BIG)) | |
1559 | goto failed; | |
1560 | ||
1561 | /* The current segment is filled up */ | |
1562 | if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE) | |
1563 | break; | |
1564 | ||
071cb4b8 RK |
1565 | if (sci->sc_stage.flags & NILFS_CF_SUFREED) { |
1566 | err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, | |
1567 | sci->sc_freesegs, | |
1568 | sci->sc_nfreesegs, | |
1569 | NULL); | |
1570 | WARN_ON(err); /* do not happen */ | |
1571 | } | |
9ff05123 RK |
1572 | nilfs_segctor_clear_segment_buffers(sci); |
1573 | ||
1574 | err = nilfs_segctor_extend_segments(sci, nilfs, nadd); | |
1575 | if (unlikely(err)) | |
1576 | return err; | |
1577 | ||
1578 | nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); | |
1579 | sci->sc_stage = prev_stage; | |
1580 | } | |
1581 | nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile); | |
1582 | return 0; | |
1583 | ||
1584 | failed: | |
1585 | return err; | |
1586 | } | |
1587 | ||
1588 | static void nilfs_list_replace_buffer(struct buffer_head *old_bh, | |
1589 | struct buffer_head *new_bh) | |
1590 | { | |
1591 | BUG_ON(!list_empty(&new_bh->b_assoc_buffers)); | |
1592 | ||
1593 | list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers); | |
1594 | /* The caller must release old_bh */ | |
1595 | } | |
1596 | ||
1597 | static int | |
1598 | nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, | |
1599 | struct nilfs_segment_buffer *segbuf, | |
1600 | int mode) | |
1601 | { | |
1602 | struct inode *inode = NULL; | |
1603 | sector_t blocknr; | |
1604 | unsigned long nfinfo = segbuf->sb_sum.nfinfo; | |
1605 | unsigned long nblocks = 0, ndatablk = 0; | |
1606 | struct nilfs_sc_operations *sc_op = NULL; | |
1607 | struct nilfs_segsum_pointer ssp; | |
1608 | struct nilfs_finfo *finfo = NULL; | |
1609 | union nilfs_binfo binfo; | |
1610 | struct buffer_head *bh, *bh_org; | |
1611 | ino_t ino = 0; | |
1612 | int err = 0; | |
1613 | ||
1614 | if (!nfinfo) | |
1615 | goto out; | |
1616 | ||
1617 | blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk; | |
1618 | ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); | |
1619 | ssp.offset = sizeof(struct nilfs_segment_summary); | |
1620 | ||
1621 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { | |
1622 | if (bh == sci->sc_super_root) | |
1623 | break; | |
1624 | if (!finfo) { | |
1625 | finfo = nilfs_segctor_map_segsum_entry( | |
1626 | sci, &ssp, sizeof(*finfo)); | |
1627 | ino = le64_to_cpu(finfo->fi_ino); | |
1628 | nblocks = le32_to_cpu(finfo->fi_nblocks); | |
1629 | ndatablk = le32_to_cpu(finfo->fi_ndatablk); | |
1630 | ||
1631 | if (buffer_nilfs_node(bh)) | |
1632 | inode = NILFS_BTNC_I(bh->b_page->mapping); | |
1633 | else | |
1634 | inode = NILFS_AS_I(bh->b_page->mapping); | |
1635 | ||
1636 | if (mode == SC_LSEG_DSYNC) | |
1637 | sc_op = &nilfs_sc_dsync_ops; | |
1638 | else if (ino == NILFS_DAT_INO) | |
1639 | sc_op = &nilfs_sc_dat_ops; | |
1640 | else /* file blocks */ | |
1641 | sc_op = &nilfs_sc_file_ops; | |
1642 | } | |
1643 | bh_org = bh; | |
1644 | get_bh(bh_org); | |
1645 | err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr, | |
1646 | &binfo); | |
1647 | if (bh != bh_org) | |
1648 | nilfs_list_replace_buffer(bh_org, bh); | |
1649 | brelse(bh_org); | |
1650 | if (unlikely(err)) | |
1651 | goto failed_bmap; | |
1652 | ||
1653 | if (ndatablk > 0) | |
1654 | sc_op->write_data_binfo(sci, &ssp, &binfo); | |
1655 | else | |
1656 | sc_op->write_node_binfo(sci, &ssp, &binfo); | |
1657 | ||
1658 | blocknr++; | |
1659 | if (--nblocks == 0) { | |
1660 | finfo = NULL; | |
1661 | if (--nfinfo == 0) | |
1662 | break; | |
1663 | } else if (ndatablk > 0) | |
1664 | ndatablk--; | |
1665 | } | |
1666 | out: | |
1667 | return 0; | |
1668 | ||
1669 | failed_bmap: | |
1670 | err = nilfs_handle_bmap_error(err, __func__, inode, sci->sc_super); | |
1671 | return err; | |
1672 | } | |
1673 | ||
1674 | static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) | |
1675 | { | |
1676 | struct nilfs_segment_buffer *segbuf; | |
1677 | int err; | |
1678 | ||
1679 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { | |
1680 | err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode); | |
1681 | if (unlikely(err)) | |
1682 | return err; | |
1683 | nilfs_segbuf_fill_in_segsum(segbuf); | |
1684 | } | |
1685 | return 0; | |
1686 | } | |
1687 | ||
1688 | static int | |
1689 | nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out) | |
1690 | { | |
1691 | struct page *clone_page; | |
1692 | struct buffer_head *bh, *head, *bh2; | |
1693 | void *kaddr; | |
1694 | ||
1695 | bh = head = page_buffers(page); | |
1696 | ||
1697 | clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0); | |
1698 | if (unlikely(!clone_page)) | |
1699 | return -ENOMEM; | |
1700 | ||
1701 | bh2 = page_buffers(clone_page); | |
1702 | kaddr = kmap_atomic(page, KM_USER0); | |
1703 | do { | |
1704 | if (list_empty(&bh->b_assoc_buffers)) | |
1705 | continue; | |
1706 | get_bh(bh2); | |
1707 | page_cache_get(clone_page); /* for each bh */ | |
1708 | memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size); | |
1709 | bh2->b_blocknr = bh->b_blocknr; | |
1710 | list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers); | |
1711 | list_add_tail(&bh->b_assoc_buffers, out); | |
1712 | } while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head); | |
1713 | kunmap_atomic(kaddr, KM_USER0); | |
1714 | ||
1715 | if (!TestSetPageWriteback(clone_page)) | |
1716 | inc_zone_page_state(clone_page, NR_WRITEBACK); | |
1717 | unlock_page(clone_page); | |
1718 | ||
1719 | return 0; | |
1720 | } | |
1721 | ||
1722 | static int nilfs_test_page_to_be_frozen(struct page *page) | |
1723 | { | |
1724 | struct address_space *mapping = page->mapping; | |
1725 | ||
1726 | if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode)) | |
1727 | return 0; | |
1728 | ||
1729 | if (page_mapped(page)) { | |
1730 | ClearPageChecked(page); | |
1731 | return 1; | |
1732 | } | |
1733 | return PageChecked(page); | |
1734 | } | |
1735 | ||
1736 | static int nilfs_begin_page_io(struct page *page, struct list_head *out) | |
1737 | { | |
1738 | if (!page || PageWriteback(page)) | |
1739 | /* For split b-tree node pages, this function may be called | |
1740 | twice. We ignore the 2nd or later calls by this check. */ | |
1741 | return 0; | |
1742 | ||
1743 | lock_page(page); | |
1744 | clear_page_dirty_for_io(page); | |
1745 | set_page_writeback(page); | |
1746 | unlock_page(page); | |
1747 | ||
1748 | if (nilfs_test_page_to_be_frozen(page)) { | |
1749 | int err = nilfs_copy_replace_page_buffers(page, out); | |
1750 | if (unlikely(err)) | |
1751 | return err; | |
1752 | } | |
1753 | return 0; | |
1754 | } | |
1755 | ||
1756 | static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci, | |
1757 | struct page **failed_page) | |
1758 | { | |
1759 | struct nilfs_segment_buffer *segbuf; | |
1760 | struct page *bd_page = NULL, *fs_page = NULL; | |
1761 | struct list_head *list = &sci->sc_copied_buffers; | |
1762 | int err; | |
1763 | ||
1764 | *failed_page = NULL; | |
1765 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { | |
1766 | struct buffer_head *bh; | |
1767 | ||
1768 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, | |
1769 | b_assoc_buffers) { | |
1770 | if (bh->b_page != bd_page) { | |
1771 | if (bd_page) { | |
1772 | lock_page(bd_page); | |
1773 | clear_page_dirty_for_io(bd_page); | |
1774 | set_page_writeback(bd_page); | |
1775 | unlock_page(bd_page); | |
1776 | } | |
1777 | bd_page = bh->b_page; | |
1778 | } | |
1779 | } | |
1780 | ||
1781 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, | |
1782 | b_assoc_buffers) { | |
1783 | if (bh == sci->sc_super_root) { | |
1784 | if (bh->b_page != bd_page) { | |
1785 | lock_page(bd_page); | |
1786 | clear_page_dirty_for_io(bd_page); | |
1787 | set_page_writeback(bd_page); | |
1788 | unlock_page(bd_page); | |
1789 | bd_page = bh->b_page; | |
1790 | } | |
1791 | break; | |
1792 | } | |
1793 | if (bh->b_page != fs_page) { | |
1794 | err = nilfs_begin_page_io(fs_page, list); | |
1795 | if (unlikely(err)) { | |
1796 | *failed_page = fs_page; | |
1797 | goto out; | |
1798 | } | |
1799 | fs_page = bh->b_page; | |
1800 | } | |
1801 | } | |
1802 | } | |
1803 | if (bd_page) { | |
1804 | lock_page(bd_page); | |
1805 | clear_page_dirty_for_io(bd_page); | |
1806 | set_page_writeback(bd_page); | |
1807 | unlock_page(bd_page); | |
1808 | } | |
1809 | err = nilfs_begin_page_io(fs_page, list); | |
1810 | if (unlikely(err)) | |
1811 | *failed_page = fs_page; | |
1812 | out: | |
1813 | return err; | |
1814 | } | |
1815 | ||
1816 | static int nilfs_segctor_write(struct nilfs_sc_info *sci, | |
1817 | struct backing_dev_info *bdi) | |
1818 | { | |
1819 | struct nilfs_segment_buffer *segbuf; | |
1820 | struct nilfs_write_info wi; | |
1821 | int err, res; | |
1822 | ||
1823 | wi.sb = sci->sc_super; | |
1824 | wi.bh_sr = sci->sc_super_root; | |
1825 | wi.bdi = bdi; | |
1826 | ||
1827 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { | |
1828 | nilfs_segbuf_prepare_write(segbuf, &wi); | |
1829 | err = nilfs_segbuf_write(segbuf, &wi); | |
1830 | ||
1831 | res = nilfs_segbuf_wait(segbuf, &wi); | |
0cfae3d8 RK |
1832 | err = err ? : res; |
1833 | if (err) | |
9ff05123 RK |
1834 | return err; |
1835 | } | |
1836 | return 0; | |
1837 | } | |
1838 | ||
1839 | static int nilfs_page_has_uncleared_buffer(struct page *page) | |
1840 | { | |
1841 | struct buffer_head *head, *bh; | |
1842 | ||
1843 | head = bh = page_buffers(page); | |
1844 | do { | |
1845 | if (buffer_dirty(bh) && !list_empty(&bh->b_assoc_buffers)) | |
1846 | return 1; | |
1847 | bh = bh->b_this_page; | |
1848 | } while (bh != head); | |
1849 | return 0; | |
1850 | } | |
1851 | ||
1852 | static void __nilfs_end_page_io(struct page *page, int err) | |
1853 | { | |
9ff05123 RK |
1854 | if (!err) { |
1855 | if (!nilfs_page_buffers_clean(page)) | |
1856 | __set_page_dirty_nobuffers(page); | |
1857 | ClearPageError(page); | |
1858 | } else { | |
1859 | __set_page_dirty_nobuffers(page); | |
1860 | SetPageError(page); | |
1861 | } | |
1862 | ||
1863 | if (buffer_nilfs_allocated(page_buffers(page))) { | |
1864 | if (TestClearPageWriteback(page)) | |
1865 | dec_zone_page_state(page, NR_WRITEBACK); | |
1866 | } else | |
1867 | end_page_writeback(page); | |
1868 | } | |
1869 | ||
1870 | static void nilfs_end_page_io(struct page *page, int err) | |
1871 | { | |
1872 | if (!page) | |
1873 | return; | |
1874 | ||
1875 | if (buffer_nilfs_node(page_buffers(page)) && | |
1876 | nilfs_page_has_uncleared_buffer(page)) | |
1877 | /* For b-tree node pages, this function may be called twice | |
1878 | or more because they might be split in a segment. | |
1879 | This check assures that cleanup has been done for all | |
1880 | buffers in a split btnode page. */ | |
1881 | return; | |
1882 | ||
1883 | __nilfs_end_page_io(page, err); | |
1884 | } | |
1885 | ||
1886 | static void nilfs_clear_copied_buffers(struct list_head *list, int err) | |
1887 | { | |
1888 | struct buffer_head *bh, *head; | |
1889 | struct page *page; | |
1890 | ||
1891 | while (!list_empty(list)) { | |
1892 | bh = list_entry(list->next, struct buffer_head, | |
1893 | b_assoc_buffers); | |
1894 | page = bh->b_page; | |
1895 | page_cache_get(page); | |
1896 | head = bh = page_buffers(page); | |
1897 | do { | |
1898 | if (!list_empty(&bh->b_assoc_buffers)) { | |
1899 | list_del_init(&bh->b_assoc_buffers); | |
1900 | if (!err) { | |
1901 | set_buffer_uptodate(bh); | |
1902 | clear_buffer_dirty(bh); | |
1903 | clear_buffer_nilfs_volatile(bh); | |
1904 | } | |
1905 | brelse(bh); /* for b_assoc_buffers */ | |
1906 | } | |
1907 | } while ((bh = bh->b_this_page) != head); | |
1908 | ||
1909 | __nilfs_end_page_io(page, err); | |
1910 | page_cache_release(page); | |
1911 | } | |
1912 | } | |
1913 | ||
1914 | static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci, | |
1915 | struct page *failed_page, int err) | |
1916 | { | |
1917 | struct nilfs_segment_buffer *segbuf; | |
1918 | struct page *bd_page = NULL, *fs_page = NULL; | |
1919 | ||
1920 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { | |
1921 | struct buffer_head *bh; | |
1922 | ||
1923 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, | |
1924 | b_assoc_buffers) { | |
1925 | if (bh->b_page != bd_page) { | |
1926 | if (bd_page) | |
1927 | end_page_writeback(bd_page); | |
1928 | bd_page = bh->b_page; | |
1929 | } | |
1930 | } | |
1931 | ||
1932 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, | |
1933 | b_assoc_buffers) { | |
1934 | if (bh == sci->sc_super_root) { | |
1935 | if (bh->b_page != bd_page) { | |
1936 | end_page_writeback(bd_page); | |
1937 | bd_page = bh->b_page; | |
1938 | } | |
1939 | break; | |
1940 | } | |
1941 | if (bh->b_page != fs_page) { | |
1942 | nilfs_end_page_io(fs_page, err); | |
1943 | if (unlikely(fs_page == failed_page)) | |
1944 | goto done; | |
1945 | fs_page = bh->b_page; | |
1946 | } | |
1947 | } | |
1948 | } | |
1949 | if (bd_page) | |
1950 | end_page_writeback(bd_page); | |
1951 | ||
1952 | nilfs_end_page_io(fs_page, err); | |
1953 | done: | |
1954 | nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err); | |
1955 | } | |
1956 | ||
1957 | static void nilfs_set_next_segment(struct the_nilfs *nilfs, | |
1958 | struct nilfs_segment_buffer *segbuf) | |
1959 | { | |
1960 | nilfs->ns_segnum = segbuf->sb_segnum; | |
1961 | nilfs->ns_nextnum = segbuf->sb_nextnum; | |
1962 | nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start | |
1963 | + segbuf->sb_sum.nblocks; | |
1964 | nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq; | |
1965 | nilfs->ns_ctime = segbuf->sb_sum.ctime; | |
1966 | } | |
1967 | ||
1968 | static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) | |
1969 | { | |
1970 | struct nilfs_segment_buffer *segbuf; | |
1971 | struct page *bd_page = NULL, *fs_page = NULL; | |
1972 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
1973 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
1974 | int update_sr = (sci->sc_super_root != NULL); | |
1975 | ||
1976 | list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { | |
1977 | struct buffer_head *bh; | |
1978 | ||
1979 | list_for_each_entry(bh, &segbuf->sb_segsum_buffers, | |
1980 | b_assoc_buffers) { | |
1981 | set_buffer_uptodate(bh); | |
1982 | clear_buffer_dirty(bh); | |
1983 | if (bh->b_page != bd_page) { | |
1984 | if (bd_page) | |
1985 | end_page_writeback(bd_page); | |
1986 | bd_page = bh->b_page; | |
1987 | } | |
1988 | } | |
1989 | /* | |
1990 | * We assume that the buffers which belong to the same page | |
1991 | * continue over the buffer list. | |
1992 | * Under this assumption, the last BHs of pages is | |
1993 | * identifiable by the discontinuity of bh->b_page | |
1994 | * (page != fs_page). | |
1995 | * | |
1996 | * For B-tree node blocks, however, this assumption is not | |
1997 | * guaranteed. The cleanup code of B-tree node pages needs | |
1998 | * special care. | |
1999 | */ | |
2000 | list_for_each_entry(bh, &segbuf->sb_payload_buffers, | |
2001 | b_assoc_buffers) { | |
2002 | set_buffer_uptodate(bh); | |
2003 | clear_buffer_dirty(bh); | |
2004 | clear_buffer_nilfs_volatile(bh); | |
2005 | if (bh == sci->sc_super_root) { | |
2006 | if (bh->b_page != bd_page) { | |
2007 | end_page_writeback(bd_page); | |
2008 | bd_page = bh->b_page; | |
2009 | } | |
2010 | break; | |
2011 | } | |
2012 | if (bh->b_page != fs_page) { | |
2013 | nilfs_end_page_io(fs_page, 0); | |
2014 | fs_page = bh->b_page; | |
2015 | } | |
2016 | } | |
2017 | ||
2018 | if (!NILFS_SEG_SIMPLEX(&segbuf->sb_sum)) { | |
2019 | if (NILFS_SEG_LOGBGN(&segbuf->sb_sum)) { | |
2020 | set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); | |
2021 | sci->sc_lseg_stime = jiffies; | |
2022 | } | |
2023 | if (NILFS_SEG_LOGEND(&segbuf->sb_sum)) | |
2024 | clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); | |
2025 | } | |
2026 | } | |
2027 | /* | |
2028 | * Since pages may continue over multiple segment buffers, | |
2029 | * end of the last page must be checked outside of the loop. | |
2030 | */ | |
2031 | if (bd_page) | |
2032 | end_page_writeback(bd_page); | |
2033 | ||
2034 | nilfs_end_page_io(fs_page, 0); | |
2035 | ||
2036 | nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0); | |
2037 | ||
2038 | nilfs_drop_collected_inodes(&sci->sc_dirty_files); | |
2039 | ||
2040 | if (nilfs_doing_gc()) { | |
2041 | nilfs_drop_collected_inodes(&sci->sc_gc_inodes); | |
2042 | if (update_sr) | |
2043 | nilfs_commit_gcdat_inode(nilfs); | |
1088dcf4 | 2044 | } else |
9ff05123 | 2045 | nilfs->ns_nongc_ctime = sci->sc_seg_ctime; |
9ff05123 RK |
2046 | |
2047 | sci->sc_nblk_inc += sci->sc_nblk_this_inc; | |
2048 | ||
2049 | segbuf = NILFS_LAST_SEGBUF(&sci->sc_segbufs); | |
2050 | nilfs_set_next_segment(nilfs, segbuf); | |
2051 | ||
2052 | if (update_sr) { | |
2053 | nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, | |
e339ad31 RK |
2054 | segbuf->sb_sum.seg_seq, nilfs->ns_cno++); |
2055 | sbi->s_super->s_dirt = 1; | |
9ff05123 | 2056 | |
c96fa464 | 2057 | clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); |
9ff05123 RK |
2058 | clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); |
2059 | set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); | |
2060 | } else | |
2061 | clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); | |
2062 | } | |
2063 | ||
2064 | static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci, | |
2065 | struct nilfs_sb_info *sbi) | |
2066 | { | |
2067 | struct nilfs_inode_info *ii, *n; | |
2068 | __u64 cno = sbi->s_nilfs->ns_cno; | |
2069 | ||
2070 | spin_lock(&sbi->s_inode_lock); | |
2071 | retry: | |
2072 | list_for_each_entry_safe(ii, n, &sbi->s_dirty_files, i_dirty) { | |
2073 | if (!ii->i_bh) { | |
2074 | struct buffer_head *ibh; | |
2075 | int err; | |
2076 | ||
2077 | spin_unlock(&sbi->s_inode_lock); | |
2078 | err = nilfs_ifile_get_inode_block( | |
2079 | sbi->s_ifile, ii->vfs_inode.i_ino, &ibh); | |
2080 | if (unlikely(err)) { | |
2081 | nilfs_warning(sbi->s_super, __func__, | |
2082 | "failed to get inode block.\n"); | |
2083 | return err; | |
2084 | } | |
2085 | nilfs_mdt_mark_buffer_dirty(ibh); | |
2086 | nilfs_mdt_mark_dirty(sbi->s_ifile); | |
2087 | spin_lock(&sbi->s_inode_lock); | |
2088 | if (likely(!ii->i_bh)) | |
2089 | ii->i_bh = ibh; | |
2090 | else | |
2091 | brelse(ibh); | |
2092 | goto retry; | |
2093 | } | |
2094 | ii->i_cno = cno; | |
2095 | ||
2096 | clear_bit(NILFS_I_QUEUED, &ii->i_state); | |
2097 | set_bit(NILFS_I_BUSY, &ii->i_state); | |
2098 | list_del(&ii->i_dirty); | |
2099 | list_add_tail(&ii->i_dirty, &sci->sc_dirty_files); | |
2100 | } | |
2101 | spin_unlock(&sbi->s_inode_lock); | |
2102 | ||
2103 | NILFS_I(sbi->s_ifile)->i_cno = cno; | |
2104 | ||
2105 | return 0; | |
2106 | } | |
2107 | ||
2108 | static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci, | |
2109 | struct nilfs_sb_info *sbi) | |
2110 | { | |
2111 | struct nilfs_transaction_info *ti = current->journal_info; | |
2112 | struct nilfs_inode_info *ii, *n; | |
2113 | __u64 cno = sbi->s_nilfs->ns_cno; | |
2114 | ||
2115 | spin_lock(&sbi->s_inode_lock); | |
2116 | list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { | |
2117 | if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) || | |
2118 | test_bit(NILFS_I_DIRTY, &ii->i_state)) { | |
2119 | /* The current checkpoint number (=nilfs->ns_cno) is | |
2120 | changed between check-in and check-out only if the | |
2121 | super root is written out. So, we can update i_cno | |
2122 | for the inodes that remain in the dirty list. */ | |
2123 | ii->i_cno = cno; | |
2124 | continue; | |
2125 | } | |
2126 | clear_bit(NILFS_I_BUSY, &ii->i_state); | |
2127 | brelse(ii->i_bh); | |
2128 | ii->i_bh = NULL; | |
2129 | list_del(&ii->i_dirty); | |
2130 | list_add_tail(&ii->i_dirty, &ti->ti_garbage); | |
2131 | } | |
2132 | spin_unlock(&sbi->s_inode_lock); | |
2133 | } | |
2134 | ||
9ff05123 RK |
2135 | /* |
2136 | * Main procedure of segment constructor | |
2137 | */ | |
2138 | static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) | |
2139 | { | |
2140 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
2141 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
2142 | struct page *failed_page; | |
2143 | int err, has_sr = 0; | |
2144 | ||
2145 | sci->sc_stage.scnt = NILFS_ST_INIT; | |
2146 | ||
2147 | err = nilfs_segctor_check_in_files(sci, sbi); | |
2148 | if (unlikely(err)) | |
2149 | goto out; | |
2150 | ||
2151 | if (nilfs_test_metadata_dirty(sbi)) | |
2152 | set_bit(NILFS_SC_DIRTY, &sci->sc_flags); | |
2153 | ||
2154 | if (nilfs_segctor_clean(sci)) | |
2155 | goto out; | |
2156 | ||
2157 | do { | |
2158 | sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK; | |
2159 | ||
2160 | err = nilfs_segctor_begin_construction(sci, nilfs); | |
2161 | if (unlikely(err)) | |
2162 | goto out; | |
2163 | ||
2164 | /* Update time stamp */ | |
2165 | sci->sc_seg_ctime = get_seconds(); | |
2166 | ||
2167 | err = nilfs_segctor_collect(sci, nilfs, mode); | |
2168 | if (unlikely(err)) | |
2169 | goto failed; | |
2170 | ||
2171 | has_sr = (sci->sc_super_root != NULL); | |
2172 | ||
2173 | /* Avoid empty segment */ | |
2174 | if (sci->sc_stage.scnt == NILFS_ST_DONE && | |
2175 | NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) { | |
9ff05123 RK |
2176 | nilfs_segctor_end_construction(sci, nilfs, 1); |
2177 | goto out; | |
2178 | } | |
2179 | ||
2180 | err = nilfs_segctor_assign(sci, mode); | |
2181 | if (unlikely(err)) | |
2182 | goto failed; | |
2183 | ||
9ff05123 RK |
2184 | if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) |
2185 | nilfs_segctor_fill_in_file_bmap(sci, sbi->s_ifile); | |
2186 | ||
2187 | if (has_sr) { | |
2188 | err = nilfs_segctor_fill_in_checkpoint(sci); | |
2189 | if (unlikely(err)) | |
2190 | goto failed_to_make_up; | |
2191 | ||
2192 | nilfs_segctor_fill_in_super_root(sci, nilfs); | |
2193 | } | |
2194 | nilfs_segctor_update_segusage(sci, nilfs->ns_sufile); | |
2195 | ||
2196 | /* Write partial segments */ | |
2197 | err = nilfs_segctor_prepare_write(sci, &failed_page); | |
2198 | if (unlikely(err)) | |
2199 | goto failed_to_write; | |
2200 | ||
2201 | nilfs_segctor_fill_in_checksums(sci, nilfs->ns_crc_seed); | |
2202 | ||
2203 | err = nilfs_segctor_write(sci, nilfs->ns_bdi); | |
2204 | if (unlikely(err)) | |
2205 | goto failed_to_write; | |
2206 | ||
2207 | nilfs_segctor_complete_write(sci); | |
2208 | ||
2209 | /* Commit segments */ | |
071cb4b8 | 2210 | if (has_sr) |
9ff05123 | 2211 | nilfs_segctor_clear_metadata_dirty(sci); |
9ff05123 RK |
2212 | |
2213 | nilfs_segctor_end_construction(sci, nilfs, 0); | |
2214 | ||
2215 | } while (sci->sc_stage.scnt != NILFS_ST_DONE); | |
2216 | ||
9ff05123 RK |
2217 | out: |
2218 | nilfs_segctor_destroy_segment_buffers(sci); | |
2219 | nilfs_segctor_check_out_files(sci, sbi); | |
2220 | return err; | |
2221 | ||
2222 | failed_to_write: | |
2223 | nilfs_segctor_abort_write(sci, failed_page, err); | |
2224 | nilfs_segctor_cancel_segusage(sci, nilfs->ns_sufile); | |
2225 | ||
2226 | failed_to_make_up: | |
2227 | if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) | |
2228 | nilfs_redirty_inodes(&sci->sc_dirty_files); | |
9ff05123 RK |
2229 | |
2230 | failed: | |
2231 | if (nilfs_doing_gc()) | |
2232 | nilfs_redirty_inodes(&sci->sc_gc_inodes); | |
2233 | nilfs_segctor_end_construction(sci, nilfs, err); | |
2234 | goto out; | |
2235 | } | |
2236 | ||
2237 | /** | |
2238 | * nilfs_secgtor_start_timer - set timer of background write | |
2239 | * @sci: nilfs_sc_info | |
2240 | * | |
2241 | * If the timer has already been set, it ignores the new request. | |
2242 | * This function MUST be called within a section locking the segment | |
2243 | * semaphore. | |
2244 | */ | |
2245 | static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci) | |
2246 | { | |
2247 | spin_lock(&sci->sc_state_lock); | |
2248 | if (sci->sc_timer && !(sci->sc_state & NILFS_SEGCTOR_COMMIT)) { | |
2249 | sci->sc_timer->expires = jiffies + sci->sc_interval; | |
2250 | add_timer(sci->sc_timer); | |
2251 | sci->sc_state |= NILFS_SEGCTOR_COMMIT; | |
2252 | } | |
2253 | spin_unlock(&sci->sc_state_lock); | |
2254 | } | |
2255 | ||
2256 | static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn) | |
2257 | { | |
2258 | spin_lock(&sci->sc_state_lock); | |
2259 | if (!(sci->sc_flush_request & (1 << bn))) { | |
2260 | unsigned long prev_req = sci->sc_flush_request; | |
2261 | ||
2262 | sci->sc_flush_request |= (1 << bn); | |
2263 | if (!prev_req) | |
2264 | wake_up(&sci->sc_wait_daemon); | |
2265 | } | |
2266 | spin_unlock(&sci->sc_state_lock); | |
2267 | } | |
2268 | ||
2269 | /** | |
2270 | * nilfs_flush_segment - trigger a segment construction for resource control | |
2271 | * @sb: super block | |
2272 | * @ino: inode number of the file to be flushed out. | |
2273 | */ | |
2274 | void nilfs_flush_segment(struct super_block *sb, ino_t ino) | |
2275 | { | |
2276 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | |
2277 | struct nilfs_sc_info *sci = NILFS_SC(sbi); | |
2278 | ||
2279 | if (!sci || nilfs_doing_construction()) | |
2280 | return; | |
2281 | nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0); | |
2282 | /* assign bit 0 to data files */ | |
2283 | } | |
2284 | ||
9ff05123 RK |
2285 | struct nilfs_segctor_wait_request { |
2286 | wait_queue_t wq; | |
2287 | __u32 seq; | |
2288 | int err; | |
2289 | atomic_t done; | |
2290 | }; | |
2291 | ||
2292 | static int nilfs_segctor_sync(struct nilfs_sc_info *sci) | |
2293 | { | |
2294 | struct nilfs_segctor_wait_request wait_req; | |
2295 | int err = 0; | |
2296 | ||
2297 | spin_lock(&sci->sc_state_lock); | |
2298 | init_wait(&wait_req.wq); | |
2299 | wait_req.err = 0; | |
2300 | atomic_set(&wait_req.done, 0); | |
2301 | wait_req.seq = ++sci->sc_seq_request; | |
2302 | spin_unlock(&sci->sc_state_lock); | |
2303 | ||
2304 | init_waitqueue_entry(&wait_req.wq, current); | |
2305 | add_wait_queue(&sci->sc_wait_request, &wait_req.wq); | |
2306 | set_current_state(TASK_INTERRUPTIBLE); | |
2307 | wake_up(&sci->sc_wait_daemon); | |
2308 | ||
2309 | for (;;) { | |
2310 | if (atomic_read(&wait_req.done)) { | |
2311 | err = wait_req.err; | |
2312 | break; | |
2313 | } | |
2314 | if (!signal_pending(current)) { | |
2315 | schedule(); | |
2316 | continue; | |
2317 | } | |
2318 | err = -ERESTARTSYS; | |
2319 | break; | |
2320 | } | |
2321 | finish_wait(&sci->sc_wait_request, &wait_req.wq); | |
2322 | return err; | |
2323 | } | |
2324 | ||
2325 | static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err) | |
2326 | { | |
2327 | struct nilfs_segctor_wait_request *wrq, *n; | |
2328 | unsigned long flags; | |
2329 | ||
2330 | spin_lock_irqsave(&sci->sc_wait_request.lock, flags); | |
2331 | list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list, | |
2332 | wq.task_list) { | |
2333 | if (!atomic_read(&wrq->done) && | |
2334 | nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) { | |
2335 | wrq->err = err; | |
2336 | atomic_set(&wrq->done, 1); | |
2337 | } | |
2338 | if (atomic_read(&wrq->done)) { | |
2339 | wrq->wq.func(&wrq->wq, | |
2340 | TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | |
2341 | 0, NULL); | |
2342 | } | |
2343 | } | |
2344 | spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags); | |
2345 | } | |
2346 | ||
2347 | /** | |
2348 | * nilfs_construct_segment - construct a logical segment | |
2349 | * @sb: super block | |
2350 | * | |
2351 | * Return Value: On success, 0 is retured. On errors, one of the following | |
2352 | * negative error code is returned. | |
2353 | * | |
2354 | * %-EROFS - Read only filesystem. | |
2355 | * | |
2356 | * %-EIO - I/O error | |
2357 | * | |
2358 | * %-ENOSPC - No space left on device (only in a panic state). | |
2359 | * | |
2360 | * %-ERESTARTSYS - Interrupted. | |
2361 | * | |
2362 | * %-ENOMEM - Insufficient memory available. | |
2363 | */ | |
2364 | int nilfs_construct_segment(struct super_block *sb) | |
2365 | { | |
2366 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | |
2367 | struct nilfs_sc_info *sci = NILFS_SC(sbi); | |
2368 | struct nilfs_transaction_info *ti; | |
2369 | int err; | |
2370 | ||
2371 | if (!sci) | |
2372 | return -EROFS; | |
2373 | ||
2374 | /* A call inside transactions causes a deadlock. */ | |
2375 | BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC); | |
2376 | ||
2377 | err = nilfs_segctor_sync(sci); | |
2378 | return err; | |
2379 | } | |
2380 | ||
2381 | /** | |
2382 | * nilfs_construct_dsync_segment - construct a data-only logical segment | |
2383 | * @sb: super block | |
f30bf3e4 RK |
2384 | * @inode: inode whose data blocks should be written out |
2385 | * @start: start byte offset | |
2386 | * @end: end byte offset (inclusive) | |
9ff05123 RK |
2387 | * |
2388 | * Return Value: On success, 0 is retured. On errors, one of the following | |
2389 | * negative error code is returned. | |
2390 | * | |
2391 | * %-EROFS - Read only filesystem. | |
2392 | * | |
2393 | * %-EIO - I/O error | |
2394 | * | |
2395 | * %-ENOSPC - No space left on device (only in a panic state). | |
2396 | * | |
2397 | * %-ERESTARTSYS - Interrupted. | |
2398 | * | |
2399 | * %-ENOMEM - Insufficient memory available. | |
2400 | */ | |
f30bf3e4 RK |
2401 | int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, |
2402 | loff_t start, loff_t end) | |
9ff05123 RK |
2403 | { |
2404 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | |
2405 | struct nilfs_sc_info *sci = NILFS_SC(sbi); | |
2406 | struct nilfs_inode_info *ii; | |
2407 | struct nilfs_transaction_info ti; | |
2408 | int err = 0; | |
2409 | ||
2410 | if (!sci) | |
2411 | return -EROFS; | |
2412 | ||
2413 | nilfs_transaction_lock(sbi, &ti, 0); | |
2414 | ||
2415 | ii = NILFS_I(inode); | |
2416 | if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) || | |
2417 | nilfs_test_opt(sbi, STRICT_ORDER) || | |
2418 | test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || | |
2419 | nilfs_discontinued(sbi->s_nilfs)) { | |
2420 | nilfs_transaction_unlock(sbi); | |
2421 | err = nilfs_segctor_sync(sci); | |
2422 | return err; | |
2423 | } | |
2424 | ||
2425 | spin_lock(&sbi->s_inode_lock); | |
2426 | if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && | |
2427 | !test_bit(NILFS_I_BUSY, &ii->i_state)) { | |
2428 | spin_unlock(&sbi->s_inode_lock); | |
2429 | nilfs_transaction_unlock(sbi); | |
2430 | return 0; | |
2431 | } | |
2432 | spin_unlock(&sbi->s_inode_lock); | |
f30bf3e4 RK |
2433 | sci->sc_dsync_inode = ii; |
2434 | sci->sc_dsync_start = start; | |
2435 | sci->sc_dsync_end = end; | |
9ff05123 RK |
2436 | |
2437 | err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC); | |
2438 | ||
2439 | nilfs_transaction_unlock(sbi); | |
2440 | return err; | |
2441 | } | |
2442 | ||
2443 | struct nilfs_segctor_req { | |
2444 | int mode; | |
2445 | __u32 seq_accepted; | |
2446 | int sc_err; /* construction failure */ | |
2447 | int sb_err; /* super block writeback failure */ | |
2448 | }; | |
2449 | ||
2450 | #define FLUSH_FILE_BIT (0x1) /* data file only */ | |
2451 | #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */ | |
2452 | ||
2453 | static void nilfs_segctor_accept(struct nilfs_sc_info *sci, | |
2454 | struct nilfs_segctor_req *req) | |
2455 | { | |
9ff05123 RK |
2456 | req->sc_err = req->sb_err = 0; |
2457 | spin_lock(&sci->sc_state_lock); | |
2458 | req->seq_accepted = sci->sc_seq_request; | |
2459 | spin_unlock(&sci->sc_state_lock); | |
2460 | ||
2461 | if (sci->sc_timer) | |
2462 | del_timer_sync(sci->sc_timer); | |
2463 | } | |
2464 | ||
2465 | static void nilfs_segctor_notify(struct nilfs_sc_info *sci, | |
2466 | struct nilfs_segctor_req *req) | |
2467 | { | |
2468 | /* Clear requests (even when the construction failed) */ | |
2469 | spin_lock(&sci->sc_state_lock); | |
2470 | ||
2471 | sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; | |
2472 | ||
2473 | if (req->mode == SC_LSEG_SR) { | |
2474 | sci->sc_seq_done = req->seq_accepted; | |
2475 | nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err); | |
2476 | sci->sc_flush_request = 0; | |
2477 | } else if (req->mode == SC_FLUSH_FILE) | |
2478 | sci->sc_flush_request &= ~FLUSH_FILE_BIT; | |
2479 | else if (req->mode == SC_FLUSH_DAT) | |
2480 | sci->sc_flush_request &= ~FLUSH_DAT_BIT; | |
2481 | ||
2482 | spin_unlock(&sci->sc_state_lock); | |
2483 | } | |
2484 | ||
2485 | static int nilfs_segctor_construct(struct nilfs_sc_info *sci, | |
2486 | struct nilfs_segctor_req *req) | |
2487 | { | |
2488 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
2489 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
2490 | int err = 0; | |
2491 | ||
2492 | if (nilfs_discontinued(nilfs)) | |
2493 | req->mode = SC_LSEG_SR; | |
2494 | if (!nilfs_segctor_confirm(sci)) { | |
2495 | err = nilfs_segctor_do_construct(sci, req->mode); | |
2496 | req->sc_err = err; | |
2497 | } | |
2498 | if (likely(!err)) { | |
2499 | if (req->mode != SC_FLUSH_DAT) | |
2500 | atomic_set(&nilfs->ns_ndirtyblks, 0); | |
2501 | if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && | |
2502 | nilfs_discontinued(nilfs)) { | |
2503 | down_write(&nilfs->ns_sem); | |
e339ad31 | 2504 | req->sb_err = nilfs_commit_super(sbi, 0); |
9ff05123 RK |
2505 | up_write(&nilfs->ns_sem); |
2506 | } | |
2507 | } | |
2508 | return err; | |
2509 | } | |
2510 | ||
2511 | static void nilfs_construction_timeout(unsigned long data) | |
2512 | { | |
2513 | struct task_struct *p = (struct task_struct *)data; | |
2514 | wake_up_process(p); | |
2515 | } | |
2516 | ||
2517 | static void | |
2518 | nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) | |
2519 | { | |
2520 | struct nilfs_inode_info *ii, *n; | |
2521 | ||
2522 | list_for_each_entry_safe(ii, n, head, i_dirty) { | |
2523 | if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) | |
2524 | continue; | |
2525 | hlist_del_init(&ii->vfs_inode.i_hash); | |
2526 | list_del_init(&ii->i_dirty); | |
2527 | nilfs_clear_gcinode(&ii->vfs_inode); | |
2528 | } | |
2529 | } | |
2530 | ||
4f6b8288 RK |
2531 | int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, |
2532 | void **kbufs) | |
9ff05123 RK |
2533 | { |
2534 | struct nilfs_sb_info *sbi = NILFS_SB(sb); | |
2535 | struct nilfs_sc_info *sci = NILFS_SC(sbi); | |
2536 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
2537 | struct nilfs_transaction_info ti; | |
2538 | struct nilfs_segctor_req req = { .mode = SC_LSEG_SR }; | |
2539 | int err; | |
2540 | ||
2541 | if (unlikely(!sci)) | |
2542 | return -EROFS; | |
2543 | ||
2544 | nilfs_transaction_lock(sbi, &ti, 1); | |
2545 | ||
2546 | err = nilfs_init_gcdat_inode(nilfs); | |
2547 | if (unlikely(err)) | |
2548 | goto out_unlock; | |
071cb4b8 | 2549 | |
4f6b8288 | 2550 | err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs); |
9ff05123 RK |
2551 | if (unlikely(err)) |
2552 | goto out_unlock; | |
2553 | ||
071cb4b8 RK |
2554 | sci->sc_freesegs = kbufs[4]; |
2555 | sci->sc_nfreesegs = argv[4].v_nmembs; | |
9ff05123 RK |
2556 | list_splice_init(&nilfs->ns_gc_inodes, sci->sc_gc_inodes.prev); |
2557 | ||
2558 | for (;;) { | |
2559 | nilfs_segctor_accept(sci, &req); | |
2560 | err = nilfs_segctor_construct(sci, &req); | |
2561 | nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes); | |
2562 | nilfs_segctor_notify(sci, &req); | |
2563 | ||
2564 | if (likely(!err)) | |
2565 | break; | |
2566 | ||
2567 | nilfs_warning(sb, __func__, | |
2568 | "segment construction failed. (err=%d)", err); | |
2569 | set_current_state(TASK_INTERRUPTIBLE); | |
2570 | schedule_timeout(sci->sc_interval); | |
2571 | } | |
2572 | ||
2573 | out_unlock: | |
071cb4b8 RK |
2574 | sci->sc_freesegs = NULL; |
2575 | sci->sc_nfreesegs = 0; | |
9ff05123 RK |
2576 | nilfs_clear_gcdat_inode(nilfs); |
2577 | nilfs_transaction_unlock(sbi); | |
2578 | return err; | |
2579 | } | |
2580 | ||
2581 | static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode) | |
2582 | { | |
2583 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
2584 | struct nilfs_transaction_info ti; | |
2585 | struct nilfs_segctor_req req = { .mode = mode }; | |
2586 | ||
2587 | nilfs_transaction_lock(sbi, &ti, 0); | |
2588 | ||
2589 | nilfs_segctor_accept(sci, &req); | |
2590 | nilfs_segctor_construct(sci, &req); | |
2591 | nilfs_segctor_notify(sci, &req); | |
2592 | ||
2593 | /* | |
2594 | * Unclosed segment should be retried. We do this using sc_timer. | |
2595 | * Timeout of sc_timer will invoke complete construction which leads | |
2596 | * to close the current logical segment. | |
2597 | */ | |
2598 | if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) | |
2599 | nilfs_segctor_start_timer(sci); | |
2600 | ||
2601 | nilfs_transaction_unlock(sbi); | |
2602 | } | |
2603 | ||
2604 | static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci) | |
2605 | { | |
2606 | int mode = 0; | |
2607 | int err; | |
2608 | ||
2609 | spin_lock(&sci->sc_state_lock); | |
2610 | mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ? | |
2611 | SC_FLUSH_DAT : SC_FLUSH_FILE; | |
2612 | spin_unlock(&sci->sc_state_lock); | |
2613 | ||
2614 | if (mode) { | |
2615 | err = nilfs_segctor_do_construct(sci, mode); | |
2616 | ||
2617 | spin_lock(&sci->sc_state_lock); | |
2618 | sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ? | |
2619 | ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT; | |
2620 | spin_unlock(&sci->sc_state_lock); | |
2621 | } | |
2622 | clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); | |
2623 | } | |
2624 | ||
2625 | static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci) | |
2626 | { | |
2627 | if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || | |
2628 | time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) { | |
2629 | if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT)) | |
2630 | return SC_FLUSH_FILE; | |
2631 | else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT)) | |
2632 | return SC_FLUSH_DAT; | |
2633 | } | |
2634 | return SC_LSEG_SR; | |
2635 | } | |
2636 | ||
2637 | /** | |
2638 | * nilfs_segctor_thread - main loop of the segment constructor thread. | |
2639 | * @arg: pointer to a struct nilfs_sc_info. | |
2640 | * | |
2641 | * nilfs_segctor_thread() initializes a timer and serves as a daemon | |
2642 | * to execute segment constructions. | |
2643 | */ | |
2644 | static int nilfs_segctor_thread(void *arg) | |
2645 | { | |
2646 | struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; | |
2647 | struct timer_list timer; | |
2648 | int timeout = 0; | |
2649 | ||
2650 | init_timer(&timer); | |
2651 | timer.data = (unsigned long)current; | |
2652 | timer.function = nilfs_construction_timeout; | |
2653 | sci->sc_timer = &timer; | |
2654 | ||
2655 | /* start sync. */ | |
2656 | sci->sc_task = current; | |
2657 | wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */ | |
2658 | printk(KERN_INFO | |
2659 | "segctord starting. Construction interval = %lu seconds, " | |
2660 | "CP frequency < %lu seconds\n", | |
2661 | sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); | |
2662 | ||
2663 | spin_lock(&sci->sc_state_lock); | |
2664 | loop: | |
2665 | for (;;) { | |
2666 | int mode; | |
2667 | ||
2668 | if (sci->sc_state & NILFS_SEGCTOR_QUIT) | |
2669 | goto end_thread; | |
2670 | ||
2671 | if (timeout || sci->sc_seq_request != sci->sc_seq_done) | |
2672 | mode = SC_LSEG_SR; | |
2673 | else if (!sci->sc_flush_request) | |
2674 | break; | |
2675 | else | |
2676 | mode = nilfs_segctor_flush_mode(sci); | |
2677 | ||
2678 | spin_unlock(&sci->sc_state_lock); | |
2679 | nilfs_segctor_thread_construct(sci, mode); | |
2680 | spin_lock(&sci->sc_state_lock); | |
2681 | timeout = 0; | |
2682 | } | |
2683 | ||
2684 | ||
2685 | if (freezing(current)) { | |
2686 | spin_unlock(&sci->sc_state_lock); | |
2687 | refrigerator(); | |
2688 | spin_lock(&sci->sc_state_lock); | |
2689 | } else { | |
2690 | DEFINE_WAIT(wait); | |
2691 | int should_sleep = 1; | |
2692 | ||
2693 | prepare_to_wait(&sci->sc_wait_daemon, &wait, | |
2694 | TASK_INTERRUPTIBLE); | |
2695 | ||
2696 | if (sci->sc_seq_request != sci->sc_seq_done) | |
2697 | should_sleep = 0; | |
2698 | else if (sci->sc_flush_request) | |
2699 | should_sleep = 0; | |
2700 | else if (sci->sc_state & NILFS_SEGCTOR_COMMIT) | |
2701 | should_sleep = time_before(jiffies, | |
2702 | sci->sc_timer->expires); | |
2703 | ||
2704 | if (should_sleep) { | |
2705 | spin_unlock(&sci->sc_state_lock); | |
2706 | schedule(); | |
2707 | spin_lock(&sci->sc_state_lock); | |
2708 | } | |
2709 | finish_wait(&sci->sc_wait_daemon, &wait); | |
2710 | timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && | |
2711 | time_after_eq(jiffies, sci->sc_timer->expires)); | |
2712 | } | |
2713 | goto loop; | |
2714 | ||
2715 | end_thread: | |
2716 | spin_unlock(&sci->sc_state_lock); | |
2717 | del_timer_sync(sci->sc_timer); | |
2718 | sci->sc_timer = NULL; | |
2719 | ||
2720 | /* end sync. */ | |
2721 | sci->sc_task = NULL; | |
2722 | wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */ | |
2723 | return 0; | |
2724 | } | |
2725 | ||
2726 | static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci) | |
2727 | { | |
2728 | struct task_struct *t; | |
2729 | ||
2730 | t = kthread_run(nilfs_segctor_thread, sci, "segctord"); | |
2731 | if (IS_ERR(t)) { | |
2732 | int err = PTR_ERR(t); | |
2733 | ||
2734 | printk(KERN_ERR "NILFS: error %d creating segctord thread\n", | |
2735 | err); | |
2736 | return err; | |
2737 | } | |
2738 | wait_event(sci->sc_wait_task, sci->sc_task != NULL); | |
2739 | return 0; | |
2740 | } | |
2741 | ||
2742 | static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci) | |
2743 | { | |
2744 | sci->sc_state |= NILFS_SEGCTOR_QUIT; | |
2745 | ||
2746 | while (sci->sc_task) { | |
2747 | wake_up(&sci->sc_wait_daemon); | |
2748 | spin_unlock(&sci->sc_state_lock); | |
2749 | wait_event(sci->sc_wait_task, sci->sc_task == NULL); | |
2750 | spin_lock(&sci->sc_state_lock); | |
2751 | } | |
2752 | } | |
2753 | ||
cece5520 | 2754 | static int nilfs_segctor_init(struct nilfs_sc_info *sci) |
9ff05123 | 2755 | { |
9ff05123 | 2756 | sci->sc_seq_done = sci->sc_seq_request; |
9ff05123 | 2757 | |
cece5520 | 2758 | return nilfs_segctor_start_thread(sci); |
9ff05123 RK |
2759 | } |
2760 | ||
2761 | /* | |
2762 | * Setup & clean-up functions | |
2763 | */ | |
2764 | static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi) | |
2765 | { | |
2766 | struct nilfs_sc_info *sci; | |
2767 | ||
2768 | sci = kzalloc(sizeof(*sci), GFP_KERNEL); | |
2769 | if (!sci) | |
2770 | return NULL; | |
2771 | ||
2772 | sci->sc_sbi = sbi; | |
2773 | sci->sc_super = sbi->s_super; | |
2774 | ||
2775 | init_waitqueue_head(&sci->sc_wait_request); | |
2776 | init_waitqueue_head(&sci->sc_wait_daemon); | |
2777 | init_waitqueue_head(&sci->sc_wait_task); | |
2778 | spin_lock_init(&sci->sc_state_lock); | |
2779 | INIT_LIST_HEAD(&sci->sc_dirty_files); | |
2780 | INIT_LIST_HEAD(&sci->sc_segbufs); | |
2781 | INIT_LIST_HEAD(&sci->sc_gc_inodes); | |
9ff05123 RK |
2782 | INIT_LIST_HEAD(&sci->sc_copied_buffers); |
2783 | ||
2784 | sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; | |
2785 | sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ; | |
2786 | sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK; | |
2787 | ||
2788 | if (sbi->s_interval) | |
2789 | sci->sc_interval = sbi->s_interval; | |
2790 | if (sbi->s_watermark) | |
2791 | sci->sc_watermark = sbi->s_watermark; | |
2792 | return sci; | |
2793 | } | |
2794 | ||
2795 | static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) | |
2796 | { | |
2797 | int ret, retrycount = NILFS_SC_CLEANUP_RETRY; | |
2798 | ||
2799 | /* The segctord thread was stopped and its timer was removed. | |
2800 | But some tasks remain. */ | |
2801 | do { | |
2802 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
2803 | struct nilfs_transaction_info ti; | |
2804 | struct nilfs_segctor_req req = { .mode = SC_LSEG_SR }; | |
2805 | ||
2806 | nilfs_transaction_lock(sbi, &ti, 0); | |
2807 | nilfs_segctor_accept(sci, &req); | |
2808 | ret = nilfs_segctor_construct(sci, &req); | |
2809 | nilfs_segctor_notify(sci, &req); | |
2810 | nilfs_transaction_unlock(sbi); | |
2811 | ||
2812 | } while (ret && retrycount-- > 0); | |
2813 | } | |
2814 | ||
2815 | /** | |
2816 | * nilfs_segctor_destroy - destroy the segment constructor. | |
2817 | * @sci: nilfs_sc_info | |
2818 | * | |
2819 | * nilfs_segctor_destroy() kills the segctord thread and frees | |
2820 | * the nilfs_sc_info struct. | |
2821 | * Caller must hold the segment semaphore. | |
2822 | */ | |
2823 | static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) | |
2824 | { | |
2825 | struct nilfs_sb_info *sbi = sci->sc_sbi; | |
2826 | int flag; | |
2827 | ||
2828 | up_write(&sbi->s_nilfs->ns_segctor_sem); | |
2829 | ||
2830 | spin_lock(&sci->sc_state_lock); | |
2831 | nilfs_segctor_kill_thread(sci); | |
2832 | flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request | |
2833 | || sci->sc_seq_request != sci->sc_seq_done); | |
2834 | spin_unlock(&sci->sc_state_lock); | |
2835 | ||
2836 | if (flag || nilfs_segctor_confirm(sci)) | |
2837 | nilfs_segctor_write_out(sci); | |
2838 | ||
1f5abe7e | 2839 | WARN_ON(!list_empty(&sci->sc_copied_buffers)); |
9ff05123 RK |
2840 | |
2841 | if (!list_empty(&sci->sc_dirty_files)) { | |
2842 | nilfs_warning(sbi->s_super, __func__, | |
2843 | "dirty file(s) after the final construction\n"); | |
2844 | nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1); | |
2845 | } | |
9ff05123 | 2846 | |
1f5abe7e | 2847 | WARN_ON(!list_empty(&sci->sc_segbufs)); |
9ff05123 | 2848 | |
9ff05123 RK |
2849 | down_write(&sbi->s_nilfs->ns_segctor_sem); |
2850 | ||
2851 | kfree(sci); | |
2852 | } | |
2853 | ||
2854 | /** | |
2855 | * nilfs_attach_segment_constructor - attach a segment constructor | |
2856 | * @sbi: nilfs_sb_info | |
9ff05123 RK |
2857 | * |
2858 | * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info, | |
2859 | * initilizes it, and starts the segment constructor. | |
2860 | * | |
2861 | * Return Value: On success, 0 is returned. On error, one of the following | |
2862 | * negative error code is returned. | |
2863 | * | |
2864 | * %-ENOMEM - Insufficient memory available. | |
2865 | */ | |
cece5520 | 2866 | int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi) |
9ff05123 RK |
2867 | { |
2868 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
2869 | int err; | |
2870 | ||
2871 | /* Each field of nilfs_segctor is cleared through the initialization | |
2872 | of super-block info */ | |
2873 | sbi->s_sc_info = nilfs_segctor_new(sbi); | |
2874 | if (!sbi->s_sc_info) | |
2875 | return -ENOMEM; | |
2876 | ||
2877 | nilfs_attach_writer(nilfs, sbi); | |
cece5520 | 2878 | err = nilfs_segctor_init(NILFS_SC(sbi)); |
9ff05123 RK |
2879 | if (err) { |
2880 | nilfs_detach_writer(nilfs, sbi); | |
2881 | kfree(sbi->s_sc_info); | |
2882 | sbi->s_sc_info = NULL; | |
2883 | } | |
2884 | return err; | |
2885 | } | |
2886 | ||
2887 | /** | |
2888 | * nilfs_detach_segment_constructor - destroy the segment constructor | |
2889 | * @sbi: nilfs_sb_info | |
2890 | * | |
2891 | * nilfs_detach_segment_constructor() kills the segment constructor daemon, | |
2892 | * frees the struct nilfs_sc_info, and destroy the dirty file list. | |
2893 | */ | |
2894 | void nilfs_detach_segment_constructor(struct nilfs_sb_info *sbi) | |
2895 | { | |
2896 | struct the_nilfs *nilfs = sbi->s_nilfs; | |
2897 | LIST_HEAD(garbage_list); | |
2898 | ||
2899 | down_write(&nilfs->ns_segctor_sem); | |
2900 | if (NILFS_SC(sbi)) { | |
2901 | nilfs_segctor_destroy(NILFS_SC(sbi)); | |
2902 | sbi->s_sc_info = NULL; | |
2903 | } | |
2904 | ||
2905 | /* Force to free the list of dirty files */ | |
2906 | spin_lock(&sbi->s_inode_lock); | |
2907 | if (!list_empty(&sbi->s_dirty_files)) { | |
2908 | list_splice_init(&sbi->s_dirty_files, &garbage_list); | |
2909 | nilfs_warning(sbi->s_super, __func__, | |
2910 | "Non empty dirty list after the last " | |
2911 | "segment construction\n"); | |
2912 | } | |
2913 | spin_unlock(&sbi->s_inode_lock); | |
2914 | up_write(&nilfs->ns_segctor_sem); | |
2915 | ||
2916 | nilfs_dispose_list(sbi, &garbage_list, 1); | |
2917 | nilfs_detach_writer(nilfs, sbi); | |
2918 | } |