]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/nilfs2/segment.c
treewide: Use fallthrough pseudo-keyword
[mirror_ubuntu-jammy-kernel.git] / fs / nilfs2 / segment.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * segment.c - NILFS segment constructor.
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 *
9 */
10
11 #include <linux/pagemap.h>
12 #include <linux/buffer_head.h>
13 #include <linux/writeback.h>
14 #include <linux/bitops.h>
15 #include <linux/bio.h>
16 #include <linux/completion.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/crc32.h>
22 #include <linux/pagevec.h>
23 #include <linux/slab.h>
24 #include <linux/sched/signal.h>
25
26 #include "nilfs.h"
27 #include "btnode.h"
28 #include "page.h"
29 #include "segment.h"
30 #include "sufile.h"
31 #include "cpfile.h"
32 #include "ifile.h"
33 #include "segbuf.h"
34
35
36 /*
37 * Segment constructor
38 */
39 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
40
41 #define SC_MAX_SEGDELTA 64 /*
42 * Upper limit of the number of segments
43 * appended in collection retry loop
44 */
45
46 /* Construction mode */
47 enum {
48 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
49 SC_LSEG_DSYNC, /*
50 * Flush data blocks of a given file and make
51 * a logical segment without a super root.
52 */
53 SC_FLUSH_FILE, /*
54 * Flush data files, leads to segment writes without
55 * creating a checkpoint.
56 */
57 SC_FLUSH_DAT, /*
58 * Flush DAT file. This also creates segments
59 * without a checkpoint.
60 */
61 };
62
63 /* Stage numbers of dirty block collection */
64 enum {
65 NILFS_ST_INIT = 0,
66 NILFS_ST_GC, /* Collecting dirty blocks for GC */
67 NILFS_ST_FILE,
68 NILFS_ST_IFILE,
69 NILFS_ST_CPFILE,
70 NILFS_ST_SUFILE,
71 NILFS_ST_DAT,
72 NILFS_ST_SR, /* Super root */
73 NILFS_ST_DSYNC, /* Data sync blocks */
74 NILFS_ST_DONE,
75 };
76
77 #define CREATE_TRACE_POINTS
78 #include <trace/events/nilfs2.h>
79
80 /*
81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83 * the variable must use them because transition of stage count must involve
84 * trace events (trace_nilfs2_collection_stage_transition).
85 *
86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87 * produce tracepoint events. It is provided just for making the intention
88 * clear.
89 */
90 static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91 {
92 sci->sc_stage.scnt++;
93 trace_nilfs2_collection_stage_transition(sci);
94 }
95
96 static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97 {
98 sci->sc_stage.scnt = next_scnt;
99 trace_nilfs2_collection_stage_transition(sci);
100 }
101
102 static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103 {
104 return sci->sc_stage.scnt;
105 }
106
107 /* State flags of collection */
108 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
109 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
110 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
111 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
112
113 /* Operations depending on the construction mode and file type */
114 struct nilfs_sc_operations {
115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116 struct inode *);
117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118 struct inode *);
119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120 struct inode *);
121 void (*write_data_binfo)(struct nilfs_sc_info *,
122 struct nilfs_segsum_pointer *,
123 union nilfs_binfo *);
124 void (*write_node_binfo)(struct nilfs_sc_info *,
125 struct nilfs_segsum_pointer *,
126 union nilfs_binfo *);
127 };
128
129 /*
130 * Other definitions
131 */
132 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
135 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
136
137 #define nilfs_cnt32_gt(a, b) \
138 (typecheck(__u32, a) && typecheck(__u32, b) && \
139 ((__s32)(b) - (__s32)(a) < 0))
140 #define nilfs_cnt32_ge(a, b) \
141 (typecheck(__u32, a) && typecheck(__u32, b) && \
142 ((__s32)(a) - (__s32)(b) >= 0))
143 #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
144 #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
145
146 static int nilfs_prepare_segment_lock(struct super_block *sb,
147 struct nilfs_transaction_info *ti)
148 {
149 struct nilfs_transaction_info *cur_ti = current->journal_info;
150 void *save = NULL;
151
152 if (cur_ti) {
153 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
154 return ++cur_ti->ti_count;
155
156 /*
157 * If journal_info field is occupied by other FS,
158 * it is saved and will be restored on
159 * nilfs_transaction_commit().
160 */
161 nilfs_warn(sb, "journal info from a different FS");
162 save = current->journal_info;
163 }
164 if (!ti) {
165 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
166 if (!ti)
167 return -ENOMEM;
168 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
169 } else {
170 ti->ti_flags = 0;
171 }
172 ti->ti_count = 0;
173 ti->ti_save = save;
174 ti->ti_magic = NILFS_TI_MAGIC;
175 current->journal_info = ti;
176 return 0;
177 }
178
179 /**
180 * nilfs_transaction_begin - start indivisible file operations.
181 * @sb: super block
182 * @ti: nilfs_transaction_info
183 * @vacancy_check: flags for vacancy rate checks
184 *
185 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
186 * the segment semaphore, to make a segment construction and write tasks
187 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
188 * The region enclosed by these two functions can be nested. To avoid a
189 * deadlock, the semaphore is only acquired or released in the outermost call.
190 *
191 * This function allocates a nilfs_transaction_info struct to keep context
192 * information on it. It is initialized and hooked onto the current task in
193 * the outermost call. If a pre-allocated struct is given to @ti, it is used
194 * instead; otherwise a new struct is assigned from a slab.
195 *
196 * When @vacancy_check flag is set, this function will check the amount of
197 * free space, and will wait for the GC to reclaim disk space if low capacity.
198 *
199 * Return Value: On success, 0 is returned. On error, one of the following
200 * negative error code is returned.
201 *
202 * %-ENOMEM - Insufficient memory available.
203 *
204 * %-ENOSPC - No space left on device
205 */
206 int nilfs_transaction_begin(struct super_block *sb,
207 struct nilfs_transaction_info *ti,
208 int vacancy_check)
209 {
210 struct the_nilfs *nilfs;
211 int ret = nilfs_prepare_segment_lock(sb, ti);
212 struct nilfs_transaction_info *trace_ti;
213
214 if (unlikely(ret < 0))
215 return ret;
216 if (ret > 0) {
217 trace_ti = current->journal_info;
218
219 trace_nilfs2_transaction_transition(sb, trace_ti,
220 trace_ti->ti_count, trace_ti->ti_flags,
221 TRACE_NILFS2_TRANSACTION_BEGIN);
222 return 0;
223 }
224
225 sb_start_intwrite(sb);
226
227 nilfs = sb->s_fs_info;
228 down_read(&nilfs->ns_segctor_sem);
229 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
230 up_read(&nilfs->ns_segctor_sem);
231 ret = -ENOSPC;
232 goto failed;
233 }
234
235 trace_ti = current->journal_info;
236 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
237 trace_ti->ti_flags,
238 TRACE_NILFS2_TRANSACTION_BEGIN);
239 return 0;
240
241 failed:
242 ti = current->journal_info;
243 current->journal_info = ti->ti_save;
244 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
245 kmem_cache_free(nilfs_transaction_cachep, ti);
246 sb_end_intwrite(sb);
247 return ret;
248 }
249
250 /**
251 * nilfs_transaction_commit - commit indivisible file operations.
252 * @sb: super block
253 *
254 * nilfs_transaction_commit() releases the read semaphore which is
255 * acquired by nilfs_transaction_begin(). This is only performed
256 * in outermost call of this function. If a commit flag is set,
257 * nilfs_transaction_commit() sets a timer to start the segment
258 * constructor. If a sync flag is set, it starts construction
259 * directly.
260 */
261 int nilfs_transaction_commit(struct super_block *sb)
262 {
263 struct nilfs_transaction_info *ti = current->journal_info;
264 struct the_nilfs *nilfs = sb->s_fs_info;
265 int err = 0;
266
267 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
268 ti->ti_flags |= NILFS_TI_COMMIT;
269 if (ti->ti_count > 0) {
270 ti->ti_count--;
271 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
272 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
273 return 0;
274 }
275 if (nilfs->ns_writer) {
276 struct nilfs_sc_info *sci = nilfs->ns_writer;
277
278 if (ti->ti_flags & NILFS_TI_COMMIT)
279 nilfs_segctor_start_timer(sci);
280 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
281 nilfs_segctor_do_flush(sci, 0);
282 }
283 up_read(&nilfs->ns_segctor_sem);
284 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
285 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
286
287 current->journal_info = ti->ti_save;
288
289 if (ti->ti_flags & NILFS_TI_SYNC)
290 err = nilfs_construct_segment(sb);
291 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
292 kmem_cache_free(nilfs_transaction_cachep, ti);
293 sb_end_intwrite(sb);
294 return err;
295 }
296
297 void nilfs_transaction_abort(struct super_block *sb)
298 {
299 struct nilfs_transaction_info *ti = current->journal_info;
300 struct the_nilfs *nilfs = sb->s_fs_info;
301
302 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
303 if (ti->ti_count > 0) {
304 ti->ti_count--;
305 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
306 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
307 return;
308 }
309 up_read(&nilfs->ns_segctor_sem);
310
311 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
312 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
313
314 current->journal_info = ti->ti_save;
315 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
316 kmem_cache_free(nilfs_transaction_cachep, ti);
317 sb_end_intwrite(sb);
318 }
319
320 void nilfs_relax_pressure_in_lock(struct super_block *sb)
321 {
322 struct the_nilfs *nilfs = sb->s_fs_info;
323 struct nilfs_sc_info *sci = nilfs->ns_writer;
324
325 if (!sci || !sci->sc_flush_request)
326 return;
327
328 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
329 up_read(&nilfs->ns_segctor_sem);
330
331 down_write(&nilfs->ns_segctor_sem);
332 if (sci->sc_flush_request &&
333 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
334 struct nilfs_transaction_info *ti = current->journal_info;
335
336 ti->ti_flags |= NILFS_TI_WRITER;
337 nilfs_segctor_do_immediate_flush(sci);
338 ti->ti_flags &= ~NILFS_TI_WRITER;
339 }
340 downgrade_write(&nilfs->ns_segctor_sem);
341 }
342
343 static void nilfs_transaction_lock(struct super_block *sb,
344 struct nilfs_transaction_info *ti,
345 int gcflag)
346 {
347 struct nilfs_transaction_info *cur_ti = current->journal_info;
348 struct the_nilfs *nilfs = sb->s_fs_info;
349 struct nilfs_sc_info *sci = nilfs->ns_writer;
350
351 WARN_ON(cur_ti);
352 ti->ti_flags = NILFS_TI_WRITER;
353 ti->ti_count = 0;
354 ti->ti_save = cur_ti;
355 ti->ti_magic = NILFS_TI_MAGIC;
356 current->journal_info = ti;
357
358 for (;;) {
359 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
360 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
361
362 down_write(&nilfs->ns_segctor_sem);
363 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
364 break;
365
366 nilfs_segctor_do_immediate_flush(sci);
367
368 up_write(&nilfs->ns_segctor_sem);
369 cond_resched();
370 }
371 if (gcflag)
372 ti->ti_flags |= NILFS_TI_GC;
373
374 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
375 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
376 }
377
378 static void nilfs_transaction_unlock(struct super_block *sb)
379 {
380 struct nilfs_transaction_info *ti = current->journal_info;
381 struct the_nilfs *nilfs = sb->s_fs_info;
382
383 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
384 BUG_ON(ti->ti_count > 0);
385
386 up_write(&nilfs->ns_segctor_sem);
387 current->journal_info = ti->ti_save;
388
389 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
390 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
391 }
392
393 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
394 struct nilfs_segsum_pointer *ssp,
395 unsigned int bytes)
396 {
397 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
398 unsigned int blocksize = sci->sc_super->s_blocksize;
399 void *p;
400
401 if (unlikely(ssp->offset + bytes > blocksize)) {
402 ssp->offset = 0;
403 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
404 &segbuf->sb_segsum_buffers));
405 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
406 }
407 p = ssp->bh->b_data + ssp->offset;
408 ssp->offset += bytes;
409 return p;
410 }
411
412 /**
413 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
414 * @sci: nilfs_sc_info
415 */
416 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
417 {
418 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
419 struct buffer_head *sumbh;
420 unsigned int sumbytes;
421 unsigned int flags = 0;
422 int err;
423
424 if (nilfs_doing_gc())
425 flags = NILFS_SS_GC;
426 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
427 if (unlikely(err))
428 return err;
429
430 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
431 sumbytes = segbuf->sb_sum.sumbytes;
432 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
433 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
434 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
435 return 0;
436 }
437
438 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
439 {
440 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
441 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
442 return -E2BIG; /*
443 * The current segment is filled up
444 * (internal code)
445 */
446 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
447 return nilfs_segctor_reset_segment_buffer(sci);
448 }
449
450 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
451 {
452 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
453 int err;
454
455 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
456 err = nilfs_segctor_feed_segment(sci);
457 if (err)
458 return err;
459 segbuf = sci->sc_curseg;
460 }
461 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
462 if (likely(!err))
463 segbuf->sb_sum.flags |= NILFS_SS_SR;
464 return err;
465 }
466
467 /*
468 * Functions for making segment summary and payloads
469 */
470 static int nilfs_segctor_segsum_block_required(
471 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
472 unsigned int binfo_size)
473 {
474 unsigned int blocksize = sci->sc_super->s_blocksize;
475 /* Size of finfo and binfo is enough small against blocksize */
476
477 return ssp->offset + binfo_size +
478 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
479 blocksize;
480 }
481
482 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
483 struct inode *inode)
484 {
485 sci->sc_curseg->sb_sum.nfinfo++;
486 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
487 nilfs_segctor_map_segsum_entry(
488 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
489
490 if (NILFS_I(inode)->i_root &&
491 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
492 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
493 /* skip finfo */
494 }
495
496 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
497 struct inode *inode)
498 {
499 struct nilfs_finfo *finfo;
500 struct nilfs_inode_info *ii;
501 struct nilfs_segment_buffer *segbuf;
502 __u64 cno;
503
504 if (sci->sc_blk_cnt == 0)
505 return;
506
507 ii = NILFS_I(inode);
508
509 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
510 cno = ii->i_cno;
511 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
512 cno = 0;
513 else
514 cno = sci->sc_cno;
515
516 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
517 sizeof(*finfo));
518 finfo->fi_ino = cpu_to_le64(inode->i_ino);
519 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
520 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
521 finfo->fi_cno = cpu_to_le64(cno);
522
523 segbuf = sci->sc_curseg;
524 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
525 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
526 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
527 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
528 }
529
530 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
531 struct buffer_head *bh,
532 struct inode *inode,
533 unsigned int binfo_size)
534 {
535 struct nilfs_segment_buffer *segbuf;
536 int required, err = 0;
537
538 retry:
539 segbuf = sci->sc_curseg;
540 required = nilfs_segctor_segsum_block_required(
541 sci, &sci->sc_binfo_ptr, binfo_size);
542 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
543 nilfs_segctor_end_finfo(sci, inode);
544 err = nilfs_segctor_feed_segment(sci);
545 if (err)
546 return err;
547 goto retry;
548 }
549 if (unlikely(required)) {
550 err = nilfs_segbuf_extend_segsum(segbuf);
551 if (unlikely(err))
552 goto failed;
553 }
554 if (sci->sc_blk_cnt == 0)
555 nilfs_segctor_begin_finfo(sci, inode);
556
557 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
558 /* Substitution to vblocknr is delayed until update_blocknr() */
559 nilfs_segbuf_add_file_buffer(segbuf, bh);
560 sci->sc_blk_cnt++;
561 failed:
562 return err;
563 }
564
565 /*
566 * Callback functions that enumerate, mark, and collect dirty blocks
567 */
568 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
569 struct buffer_head *bh, struct inode *inode)
570 {
571 int err;
572
573 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
574 if (err < 0)
575 return err;
576
577 err = nilfs_segctor_add_file_block(sci, bh, inode,
578 sizeof(struct nilfs_binfo_v));
579 if (!err)
580 sci->sc_datablk_cnt++;
581 return err;
582 }
583
584 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
585 struct buffer_head *bh,
586 struct inode *inode)
587 {
588 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
589 }
590
591 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
592 struct buffer_head *bh,
593 struct inode *inode)
594 {
595 WARN_ON(!buffer_dirty(bh));
596 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
597 }
598
599 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
600 struct nilfs_segsum_pointer *ssp,
601 union nilfs_binfo *binfo)
602 {
603 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
604 sci, ssp, sizeof(*binfo_v));
605 *binfo_v = binfo->bi_v;
606 }
607
608 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
609 struct nilfs_segsum_pointer *ssp,
610 union nilfs_binfo *binfo)
611 {
612 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
613 sci, ssp, sizeof(*vblocknr));
614 *vblocknr = binfo->bi_v.bi_vblocknr;
615 }
616
617 static const struct nilfs_sc_operations nilfs_sc_file_ops = {
618 .collect_data = nilfs_collect_file_data,
619 .collect_node = nilfs_collect_file_node,
620 .collect_bmap = nilfs_collect_file_bmap,
621 .write_data_binfo = nilfs_write_file_data_binfo,
622 .write_node_binfo = nilfs_write_file_node_binfo,
623 };
624
625 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
626 struct buffer_head *bh, struct inode *inode)
627 {
628 int err;
629
630 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
631 if (err < 0)
632 return err;
633
634 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
635 if (!err)
636 sci->sc_datablk_cnt++;
637 return err;
638 }
639
640 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
641 struct buffer_head *bh, struct inode *inode)
642 {
643 WARN_ON(!buffer_dirty(bh));
644 return nilfs_segctor_add_file_block(sci, bh, inode,
645 sizeof(struct nilfs_binfo_dat));
646 }
647
648 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
649 struct nilfs_segsum_pointer *ssp,
650 union nilfs_binfo *binfo)
651 {
652 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
653 sizeof(*blkoff));
654 *blkoff = binfo->bi_dat.bi_blkoff;
655 }
656
657 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
658 struct nilfs_segsum_pointer *ssp,
659 union nilfs_binfo *binfo)
660 {
661 struct nilfs_binfo_dat *binfo_dat =
662 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
663 *binfo_dat = binfo->bi_dat;
664 }
665
666 static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
667 .collect_data = nilfs_collect_dat_data,
668 .collect_node = nilfs_collect_file_node,
669 .collect_bmap = nilfs_collect_dat_bmap,
670 .write_data_binfo = nilfs_write_dat_data_binfo,
671 .write_node_binfo = nilfs_write_dat_node_binfo,
672 };
673
674 static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
675 .collect_data = nilfs_collect_file_data,
676 .collect_node = NULL,
677 .collect_bmap = NULL,
678 .write_data_binfo = nilfs_write_file_data_binfo,
679 .write_node_binfo = NULL,
680 };
681
682 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
683 struct list_head *listp,
684 size_t nlimit,
685 loff_t start, loff_t end)
686 {
687 struct address_space *mapping = inode->i_mapping;
688 struct pagevec pvec;
689 pgoff_t index = 0, last = ULONG_MAX;
690 size_t ndirties = 0;
691 int i;
692
693 if (unlikely(start != 0 || end != LLONG_MAX)) {
694 /*
695 * A valid range is given for sync-ing data pages. The
696 * range is rounded to per-page; extra dirty buffers
697 * may be included if blocksize < pagesize.
698 */
699 index = start >> PAGE_SHIFT;
700 last = end >> PAGE_SHIFT;
701 }
702 pagevec_init(&pvec);
703 repeat:
704 if (unlikely(index > last) ||
705 !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
706 PAGECACHE_TAG_DIRTY))
707 return ndirties;
708
709 for (i = 0; i < pagevec_count(&pvec); i++) {
710 struct buffer_head *bh, *head;
711 struct page *page = pvec.pages[i];
712
713 lock_page(page);
714 if (!page_has_buffers(page))
715 create_empty_buffers(page, i_blocksize(inode), 0);
716 unlock_page(page);
717
718 bh = head = page_buffers(page);
719 do {
720 if (!buffer_dirty(bh) || buffer_async_write(bh))
721 continue;
722 get_bh(bh);
723 list_add_tail(&bh->b_assoc_buffers, listp);
724 ndirties++;
725 if (unlikely(ndirties >= nlimit)) {
726 pagevec_release(&pvec);
727 cond_resched();
728 return ndirties;
729 }
730 } while (bh = bh->b_this_page, bh != head);
731 }
732 pagevec_release(&pvec);
733 cond_resched();
734 goto repeat;
735 }
736
737 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
738 struct list_head *listp)
739 {
740 struct nilfs_inode_info *ii = NILFS_I(inode);
741 struct address_space *mapping = &ii->i_btnode_cache;
742 struct pagevec pvec;
743 struct buffer_head *bh, *head;
744 unsigned int i;
745 pgoff_t index = 0;
746
747 pagevec_init(&pvec);
748
749 while (pagevec_lookup_tag(&pvec, mapping, &index,
750 PAGECACHE_TAG_DIRTY)) {
751 for (i = 0; i < pagevec_count(&pvec); i++) {
752 bh = head = page_buffers(pvec.pages[i]);
753 do {
754 if (buffer_dirty(bh) &&
755 !buffer_async_write(bh)) {
756 get_bh(bh);
757 list_add_tail(&bh->b_assoc_buffers,
758 listp);
759 }
760 bh = bh->b_this_page;
761 } while (bh != head);
762 }
763 pagevec_release(&pvec);
764 cond_resched();
765 }
766 }
767
768 static void nilfs_dispose_list(struct the_nilfs *nilfs,
769 struct list_head *head, int force)
770 {
771 struct nilfs_inode_info *ii, *n;
772 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
773 unsigned int nv = 0;
774
775 while (!list_empty(head)) {
776 spin_lock(&nilfs->ns_inode_lock);
777 list_for_each_entry_safe(ii, n, head, i_dirty) {
778 list_del_init(&ii->i_dirty);
779 if (force) {
780 if (unlikely(ii->i_bh)) {
781 brelse(ii->i_bh);
782 ii->i_bh = NULL;
783 }
784 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
785 set_bit(NILFS_I_QUEUED, &ii->i_state);
786 list_add_tail(&ii->i_dirty,
787 &nilfs->ns_dirty_files);
788 continue;
789 }
790 ivec[nv++] = ii;
791 if (nv == SC_N_INODEVEC)
792 break;
793 }
794 spin_unlock(&nilfs->ns_inode_lock);
795
796 for (pii = ivec; nv > 0; pii++, nv--)
797 iput(&(*pii)->vfs_inode);
798 }
799 }
800
801 static void nilfs_iput_work_func(struct work_struct *work)
802 {
803 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
804 sc_iput_work);
805 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
806
807 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
808 }
809
810 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
811 struct nilfs_root *root)
812 {
813 int ret = 0;
814
815 if (nilfs_mdt_fetch_dirty(root->ifile))
816 ret++;
817 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
818 ret++;
819 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
820 ret++;
821 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
822 ret++;
823 return ret;
824 }
825
826 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
827 {
828 return list_empty(&sci->sc_dirty_files) &&
829 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
830 sci->sc_nfreesegs == 0 &&
831 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
832 }
833
834 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
835 {
836 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
837 int ret = 0;
838
839 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
840 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
841
842 spin_lock(&nilfs->ns_inode_lock);
843 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
844 ret++;
845
846 spin_unlock(&nilfs->ns_inode_lock);
847 return ret;
848 }
849
850 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
851 {
852 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
853
854 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
855 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
856 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
857 nilfs_mdt_clear_dirty(nilfs->ns_dat);
858 }
859
860 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
861 {
862 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
863 struct buffer_head *bh_cp;
864 struct nilfs_checkpoint *raw_cp;
865 int err;
866
867 /* XXX: this interface will be changed */
868 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
869 &raw_cp, &bh_cp);
870 if (likely(!err)) {
871 /*
872 * The following code is duplicated with cpfile. But, it is
873 * needed to collect the checkpoint even if it was not newly
874 * created.
875 */
876 mark_buffer_dirty(bh_cp);
877 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
878 nilfs_cpfile_put_checkpoint(
879 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
880 } else
881 WARN_ON(err == -EINVAL || err == -ENOENT);
882
883 return err;
884 }
885
886 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
887 {
888 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
889 struct buffer_head *bh_cp;
890 struct nilfs_checkpoint *raw_cp;
891 int err;
892
893 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
894 &raw_cp, &bh_cp);
895 if (unlikely(err)) {
896 WARN_ON(err == -EINVAL || err == -ENOENT);
897 goto failed_ibh;
898 }
899 raw_cp->cp_snapshot_list.ssl_next = 0;
900 raw_cp->cp_snapshot_list.ssl_prev = 0;
901 raw_cp->cp_inodes_count =
902 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
903 raw_cp->cp_blocks_count =
904 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
905 raw_cp->cp_nblk_inc =
906 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
907 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
908 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
909
910 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
911 nilfs_checkpoint_clear_minor(raw_cp);
912 else
913 nilfs_checkpoint_set_minor(raw_cp);
914
915 nilfs_write_inode_common(sci->sc_root->ifile,
916 &raw_cp->cp_ifile_inode, 1);
917 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
918 return 0;
919
920 failed_ibh:
921 return err;
922 }
923
924 static void nilfs_fill_in_file_bmap(struct inode *ifile,
925 struct nilfs_inode_info *ii)
926
927 {
928 struct buffer_head *ibh;
929 struct nilfs_inode *raw_inode;
930
931 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
932 ibh = ii->i_bh;
933 BUG_ON(!ibh);
934 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
935 ibh);
936 nilfs_bmap_write(ii->i_bmap, raw_inode);
937 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
938 }
939 }
940
941 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
942 {
943 struct nilfs_inode_info *ii;
944
945 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
946 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
947 set_bit(NILFS_I_COLLECTED, &ii->i_state);
948 }
949 }
950
951 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
952 struct the_nilfs *nilfs)
953 {
954 struct buffer_head *bh_sr;
955 struct nilfs_super_root *raw_sr;
956 unsigned int isz, srsz;
957
958 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
959 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
960 isz = nilfs->ns_inode_size;
961 srsz = NILFS_SR_BYTES(isz);
962
963 raw_sr->sr_bytes = cpu_to_le16(srsz);
964 raw_sr->sr_nongc_ctime
965 = cpu_to_le64(nilfs_doing_gc() ?
966 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
967 raw_sr->sr_flags = 0;
968
969 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
970 NILFS_SR_DAT_OFFSET(isz), 1);
971 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
972 NILFS_SR_CPFILE_OFFSET(isz), 1);
973 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
974 NILFS_SR_SUFILE_OFFSET(isz), 1);
975 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
976 }
977
978 static void nilfs_redirty_inodes(struct list_head *head)
979 {
980 struct nilfs_inode_info *ii;
981
982 list_for_each_entry(ii, head, i_dirty) {
983 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
984 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
985 }
986 }
987
988 static void nilfs_drop_collected_inodes(struct list_head *head)
989 {
990 struct nilfs_inode_info *ii;
991
992 list_for_each_entry(ii, head, i_dirty) {
993 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
994 continue;
995
996 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
997 set_bit(NILFS_I_UPDATED, &ii->i_state);
998 }
999 }
1000
1001 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1002 struct inode *inode,
1003 struct list_head *listp,
1004 int (*collect)(struct nilfs_sc_info *,
1005 struct buffer_head *,
1006 struct inode *))
1007 {
1008 struct buffer_head *bh, *n;
1009 int err = 0;
1010
1011 if (collect) {
1012 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1013 list_del_init(&bh->b_assoc_buffers);
1014 err = collect(sci, bh, inode);
1015 brelse(bh);
1016 if (unlikely(err))
1017 goto dispose_buffers;
1018 }
1019 return 0;
1020 }
1021
1022 dispose_buffers:
1023 while (!list_empty(listp)) {
1024 bh = list_first_entry(listp, struct buffer_head,
1025 b_assoc_buffers);
1026 list_del_init(&bh->b_assoc_buffers);
1027 brelse(bh);
1028 }
1029 return err;
1030 }
1031
1032 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1033 {
1034 /* Remaining number of blocks within segment buffer */
1035 return sci->sc_segbuf_nblocks -
1036 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1037 }
1038
1039 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1040 struct inode *inode,
1041 const struct nilfs_sc_operations *sc_ops)
1042 {
1043 LIST_HEAD(data_buffers);
1044 LIST_HEAD(node_buffers);
1045 int err;
1046
1047 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1048 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1049
1050 n = nilfs_lookup_dirty_data_buffers(
1051 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1052 if (n > rest) {
1053 err = nilfs_segctor_apply_buffers(
1054 sci, inode, &data_buffers,
1055 sc_ops->collect_data);
1056 BUG_ON(!err); /* always receive -E2BIG or true error */
1057 goto break_or_fail;
1058 }
1059 }
1060 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1061
1062 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1063 err = nilfs_segctor_apply_buffers(
1064 sci, inode, &data_buffers, sc_ops->collect_data);
1065 if (unlikely(err)) {
1066 /* dispose node list */
1067 nilfs_segctor_apply_buffers(
1068 sci, inode, &node_buffers, NULL);
1069 goto break_or_fail;
1070 }
1071 sci->sc_stage.flags |= NILFS_CF_NODE;
1072 }
1073 /* Collect node */
1074 err = nilfs_segctor_apply_buffers(
1075 sci, inode, &node_buffers, sc_ops->collect_node);
1076 if (unlikely(err))
1077 goto break_or_fail;
1078
1079 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1080 err = nilfs_segctor_apply_buffers(
1081 sci, inode, &node_buffers, sc_ops->collect_bmap);
1082 if (unlikely(err))
1083 goto break_or_fail;
1084
1085 nilfs_segctor_end_finfo(sci, inode);
1086 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1087
1088 break_or_fail:
1089 return err;
1090 }
1091
1092 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1093 struct inode *inode)
1094 {
1095 LIST_HEAD(data_buffers);
1096 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1097 int err;
1098
1099 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1100 sci->sc_dsync_start,
1101 sci->sc_dsync_end);
1102
1103 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1104 nilfs_collect_file_data);
1105 if (!err) {
1106 nilfs_segctor_end_finfo(sci, inode);
1107 BUG_ON(n > rest);
1108 /* always receive -E2BIG or true error if n > rest */
1109 }
1110 return err;
1111 }
1112
1113 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1114 {
1115 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1116 struct list_head *head;
1117 struct nilfs_inode_info *ii;
1118 size_t ndone;
1119 int err = 0;
1120
1121 switch (nilfs_sc_cstage_get(sci)) {
1122 case NILFS_ST_INIT:
1123 /* Pre-processes */
1124 sci->sc_stage.flags = 0;
1125
1126 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1127 sci->sc_nblk_inc = 0;
1128 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1129 if (mode == SC_LSEG_DSYNC) {
1130 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1131 goto dsync_mode;
1132 }
1133 }
1134
1135 sci->sc_stage.dirty_file_ptr = NULL;
1136 sci->sc_stage.gc_inode_ptr = NULL;
1137 if (mode == SC_FLUSH_DAT) {
1138 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1139 goto dat_stage;
1140 }
1141 nilfs_sc_cstage_inc(sci);
1142 fallthrough;
1143 case NILFS_ST_GC:
1144 if (nilfs_doing_gc()) {
1145 head = &sci->sc_gc_inodes;
1146 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1147 head, i_dirty);
1148 list_for_each_entry_continue(ii, head, i_dirty) {
1149 err = nilfs_segctor_scan_file(
1150 sci, &ii->vfs_inode,
1151 &nilfs_sc_file_ops);
1152 if (unlikely(err)) {
1153 sci->sc_stage.gc_inode_ptr = list_entry(
1154 ii->i_dirty.prev,
1155 struct nilfs_inode_info,
1156 i_dirty);
1157 goto break_or_fail;
1158 }
1159 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1160 }
1161 sci->sc_stage.gc_inode_ptr = NULL;
1162 }
1163 nilfs_sc_cstage_inc(sci);
1164 fallthrough;
1165 case NILFS_ST_FILE:
1166 head = &sci->sc_dirty_files;
1167 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1168 i_dirty);
1169 list_for_each_entry_continue(ii, head, i_dirty) {
1170 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1171
1172 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1173 &nilfs_sc_file_ops);
1174 if (unlikely(err)) {
1175 sci->sc_stage.dirty_file_ptr =
1176 list_entry(ii->i_dirty.prev,
1177 struct nilfs_inode_info,
1178 i_dirty);
1179 goto break_or_fail;
1180 }
1181 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1182 /* XXX: required ? */
1183 }
1184 sci->sc_stage.dirty_file_ptr = NULL;
1185 if (mode == SC_FLUSH_FILE) {
1186 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1187 return 0;
1188 }
1189 nilfs_sc_cstage_inc(sci);
1190 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1191 fallthrough;
1192 case NILFS_ST_IFILE:
1193 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1194 &nilfs_sc_file_ops);
1195 if (unlikely(err))
1196 break;
1197 nilfs_sc_cstage_inc(sci);
1198 /* Creating a checkpoint */
1199 err = nilfs_segctor_create_checkpoint(sci);
1200 if (unlikely(err))
1201 break;
1202 fallthrough;
1203 case NILFS_ST_CPFILE:
1204 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1205 &nilfs_sc_file_ops);
1206 if (unlikely(err))
1207 break;
1208 nilfs_sc_cstage_inc(sci);
1209 fallthrough;
1210 case NILFS_ST_SUFILE:
1211 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1212 sci->sc_nfreesegs, &ndone);
1213 if (unlikely(err)) {
1214 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1215 sci->sc_freesegs, ndone,
1216 NULL);
1217 break;
1218 }
1219 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1220
1221 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1222 &nilfs_sc_file_ops);
1223 if (unlikely(err))
1224 break;
1225 nilfs_sc_cstage_inc(sci);
1226 fallthrough;
1227 case NILFS_ST_DAT:
1228 dat_stage:
1229 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1230 &nilfs_sc_dat_ops);
1231 if (unlikely(err))
1232 break;
1233 if (mode == SC_FLUSH_DAT) {
1234 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1235 return 0;
1236 }
1237 nilfs_sc_cstage_inc(sci);
1238 fallthrough;
1239 case NILFS_ST_SR:
1240 if (mode == SC_LSEG_SR) {
1241 /* Appending a super root */
1242 err = nilfs_segctor_add_super_root(sci);
1243 if (unlikely(err))
1244 break;
1245 }
1246 /* End of a logical segment */
1247 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1248 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1249 return 0;
1250 case NILFS_ST_DSYNC:
1251 dsync_mode:
1252 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1253 ii = sci->sc_dsync_inode;
1254 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1255 break;
1256
1257 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1258 if (unlikely(err))
1259 break;
1260 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1261 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1262 return 0;
1263 case NILFS_ST_DONE:
1264 return 0;
1265 default:
1266 BUG();
1267 }
1268
1269 break_or_fail:
1270 return err;
1271 }
1272
1273 /**
1274 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1275 * @sci: nilfs_sc_info
1276 * @nilfs: nilfs object
1277 */
1278 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1279 struct the_nilfs *nilfs)
1280 {
1281 struct nilfs_segment_buffer *segbuf, *prev;
1282 __u64 nextnum;
1283 int err, alloc = 0;
1284
1285 segbuf = nilfs_segbuf_new(sci->sc_super);
1286 if (unlikely(!segbuf))
1287 return -ENOMEM;
1288
1289 if (list_empty(&sci->sc_write_logs)) {
1290 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1291 nilfs->ns_pseg_offset, nilfs);
1292 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1293 nilfs_shift_to_next_segment(nilfs);
1294 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1295 }
1296
1297 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1298 nextnum = nilfs->ns_nextnum;
1299
1300 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1301 /* Start from the head of a new full segment */
1302 alloc++;
1303 } else {
1304 /* Continue logs */
1305 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1306 nilfs_segbuf_map_cont(segbuf, prev);
1307 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1308 nextnum = prev->sb_nextnum;
1309
1310 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1311 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1312 segbuf->sb_sum.seg_seq++;
1313 alloc++;
1314 }
1315 }
1316
1317 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1318 if (err)
1319 goto failed;
1320
1321 if (alloc) {
1322 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1323 if (err)
1324 goto failed;
1325 }
1326 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1327
1328 BUG_ON(!list_empty(&sci->sc_segbufs));
1329 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1330 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1331 return 0;
1332
1333 failed:
1334 nilfs_segbuf_free(segbuf);
1335 return err;
1336 }
1337
1338 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1339 struct the_nilfs *nilfs, int nadd)
1340 {
1341 struct nilfs_segment_buffer *segbuf, *prev;
1342 struct inode *sufile = nilfs->ns_sufile;
1343 __u64 nextnextnum;
1344 LIST_HEAD(list);
1345 int err, ret, i;
1346
1347 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1348 /*
1349 * Since the segment specified with nextnum might be allocated during
1350 * the previous construction, the buffer including its segusage may
1351 * not be dirty. The following call ensures that the buffer is dirty
1352 * and will pin the buffer on memory until the sufile is written.
1353 */
1354 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1355 if (unlikely(err))
1356 return err;
1357
1358 for (i = 0; i < nadd; i++) {
1359 /* extend segment info */
1360 err = -ENOMEM;
1361 segbuf = nilfs_segbuf_new(sci->sc_super);
1362 if (unlikely(!segbuf))
1363 goto failed;
1364
1365 /* map this buffer to region of segment on-disk */
1366 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1367 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1368
1369 /* allocate the next next full segment */
1370 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1371 if (unlikely(err))
1372 goto failed_segbuf;
1373
1374 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1375 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1376
1377 list_add_tail(&segbuf->sb_list, &list);
1378 prev = segbuf;
1379 }
1380 list_splice_tail(&list, &sci->sc_segbufs);
1381 return 0;
1382
1383 failed_segbuf:
1384 nilfs_segbuf_free(segbuf);
1385 failed:
1386 list_for_each_entry(segbuf, &list, sb_list) {
1387 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1388 WARN_ON(ret); /* never fails */
1389 }
1390 nilfs_destroy_logs(&list);
1391 return err;
1392 }
1393
1394 static void nilfs_free_incomplete_logs(struct list_head *logs,
1395 struct the_nilfs *nilfs)
1396 {
1397 struct nilfs_segment_buffer *segbuf, *prev;
1398 struct inode *sufile = nilfs->ns_sufile;
1399 int ret;
1400
1401 segbuf = NILFS_FIRST_SEGBUF(logs);
1402 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1403 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1404 WARN_ON(ret); /* never fails */
1405 }
1406 if (atomic_read(&segbuf->sb_err)) {
1407 /* Case 1: The first segment failed */
1408 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1409 /*
1410 * Case 1a: Partial segment appended into an existing
1411 * segment
1412 */
1413 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1414 segbuf->sb_fseg_end);
1415 else /* Case 1b: New full segment */
1416 set_nilfs_discontinued(nilfs);
1417 }
1418
1419 prev = segbuf;
1420 list_for_each_entry_continue(segbuf, logs, sb_list) {
1421 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1422 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1423 WARN_ON(ret); /* never fails */
1424 }
1425 if (atomic_read(&segbuf->sb_err) &&
1426 segbuf->sb_segnum != nilfs->ns_nextnum)
1427 /* Case 2: extended segment (!= next) failed */
1428 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1429 prev = segbuf;
1430 }
1431 }
1432
1433 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1434 struct inode *sufile)
1435 {
1436 struct nilfs_segment_buffer *segbuf;
1437 unsigned long live_blocks;
1438 int ret;
1439
1440 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1441 live_blocks = segbuf->sb_sum.nblocks +
1442 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1443 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1444 live_blocks,
1445 sci->sc_seg_ctime);
1446 WARN_ON(ret); /* always succeed because the segusage is dirty */
1447 }
1448 }
1449
1450 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1451 {
1452 struct nilfs_segment_buffer *segbuf;
1453 int ret;
1454
1455 segbuf = NILFS_FIRST_SEGBUF(logs);
1456 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1457 segbuf->sb_pseg_start -
1458 segbuf->sb_fseg_start, 0);
1459 WARN_ON(ret); /* always succeed because the segusage is dirty */
1460
1461 list_for_each_entry_continue(segbuf, logs, sb_list) {
1462 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1463 0, 0);
1464 WARN_ON(ret); /* always succeed */
1465 }
1466 }
1467
1468 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1469 struct nilfs_segment_buffer *last,
1470 struct inode *sufile)
1471 {
1472 struct nilfs_segment_buffer *segbuf = last;
1473 int ret;
1474
1475 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1476 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1477 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1478 WARN_ON(ret);
1479 }
1480 nilfs_truncate_logs(&sci->sc_segbufs, last);
1481 }
1482
1483
1484 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1485 struct the_nilfs *nilfs, int mode)
1486 {
1487 struct nilfs_cstage prev_stage = sci->sc_stage;
1488 int err, nadd = 1;
1489
1490 /* Collection retry loop */
1491 for (;;) {
1492 sci->sc_nblk_this_inc = 0;
1493 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1494
1495 err = nilfs_segctor_reset_segment_buffer(sci);
1496 if (unlikely(err))
1497 goto failed;
1498
1499 err = nilfs_segctor_collect_blocks(sci, mode);
1500 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1501 if (!err)
1502 break;
1503
1504 if (unlikely(err != -E2BIG))
1505 goto failed;
1506
1507 /* The current segment is filled up */
1508 if (mode != SC_LSEG_SR ||
1509 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1510 break;
1511
1512 nilfs_clear_logs(&sci->sc_segbufs);
1513
1514 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1515 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1516 sci->sc_freesegs,
1517 sci->sc_nfreesegs,
1518 NULL);
1519 WARN_ON(err); /* do not happen */
1520 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1521 }
1522
1523 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1524 if (unlikely(err))
1525 return err;
1526
1527 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1528 sci->sc_stage = prev_stage;
1529 }
1530 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1531 return 0;
1532
1533 failed:
1534 return err;
1535 }
1536
1537 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1538 struct buffer_head *new_bh)
1539 {
1540 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1541
1542 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1543 /* The caller must release old_bh */
1544 }
1545
1546 static int
1547 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1548 struct nilfs_segment_buffer *segbuf,
1549 int mode)
1550 {
1551 struct inode *inode = NULL;
1552 sector_t blocknr;
1553 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1554 unsigned long nblocks = 0, ndatablk = 0;
1555 const struct nilfs_sc_operations *sc_op = NULL;
1556 struct nilfs_segsum_pointer ssp;
1557 struct nilfs_finfo *finfo = NULL;
1558 union nilfs_binfo binfo;
1559 struct buffer_head *bh, *bh_org;
1560 ino_t ino = 0;
1561 int err = 0;
1562
1563 if (!nfinfo)
1564 goto out;
1565
1566 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1567 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1568 ssp.offset = sizeof(struct nilfs_segment_summary);
1569
1570 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1571 if (bh == segbuf->sb_super_root)
1572 break;
1573 if (!finfo) {
1574 finfo = nilfs_segctor_map_segsum_entry(
1575 sci, &ssp, sizeof(*finfo));
1576 ino = le64_to_cpu(finfo->fi_ino);
1577 nblocks = le32_to_cpu(finfo->fi_nblocks);
1578 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1579
1580 inode = bh->b_page->mapping->host;
1581
1582 if (mode == SC_LSEG_DSYNC)
1583 sc_op = &nilfs_sc_dsync_ops;
1584 else if (ino == NILFS_DAT_INO)
1585 sc_op = &nilfs_sc_dat_ops;
1586 else /* file blocks */
1587 sc_op = &nilfs_sc_file_ops;
1588 }
1589 bh_org = bh;
1590 get_bh(bh_org);
1591 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1592 &binfo);
1593 if (bh != bh_org)
1594 nilfs_list_replace_buffer(bh_org, bh);
1595 brelse(bh_org);
1596 if (unlikely(err))
1597 goto failed_bmap;
1598
1599 if (ndatablk > 0)
1600 sc_op->write_data_binfo(sci, &ssp, &binfo);
1601 else
1602 sc_op->write_node_binfo(sci, &ssp, &binfo);
1603
1604 blocknr++;
1605 if (--nblocks == 0) {
1606 finfo = NULL;
1607 if (--nfinfo == 0)
1608 break;
1609 } else if (ndatablk > 0)
1610 ndatablk--;
1611 }
1612 out:
1613 return 0;
1614
1615 failed_bmap:
1616 return err;
1617 }
1618
1619 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1620 {
1621 struct nilfs_segment_buffer *segbuf;
1622 int err;
1623
1624 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1625 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1626 if (unlikely(err))
1627 return err;
1628 nilfs_segbuf_fill_in_segsum(segbuf);
1629 }
1630 return 0;
1631 }
1632
1633 static void nilfs_begin_page_io(struct page *page)
1634 {
1635 if (!page || PageWriteback(page))
1636 /*
1637 * For split b-tree node pages, this function may be called
1638 * twice. We ignore the 2nd or later calls by this check.
1639 */
1640 return;
1641
1642 lock_page(page);
1643 clear_page_dirty_for_io(page);
1644 set_page_writeback(page);
1645 unlock_page(page);
1646 }
1647
1648 static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1649 {
1650 struct nilfs_segment_buffer *segbuf;
1651 struct page *bd_page = NULL, *fs_page = NULL;
1652
1653 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1654 struct buffer_head *bh;
1655
1656 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1657 b_assoc_buffers) {
1658 if (bh->b_page != bd_page) {
1659 if (bd_page) {
1660 lock_page(bd_page);
1661 clear_page_dirty_for_io(bd_page);
1662 set_page_writeback(bd_page);
1663 unlock_page(bd_page);
1664 }
1665 bd_page = bh->b_page;
1666 }
1667 }
1668
1669 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1670 b_assoc_buffers) {
1671 set_buffer_async_write(bh);
1672 if (bh == segbuf->sb_super_root) {
1673 if (bh->b_page != bd_page) {
1674 lock_page(bd_page);
1675 clear_page_dirty_for_io(bd_page);
1676 set_page_writeback(bd_page);
1677 unlock_page(bd_page);
1678 bd_page = bh->b_page;
1679 }
1680 break;
1681 }
1682 if (bh->b_page != fs_page) {
1683 nilfs_begin_page_io(fs_page);
1684 fs_page = bh->b_page;
1685 }
1686 }
1687 }
1688 if (bd_page) {
1689 lock_page(bd_page);
1690 clear_page_dirty_for_io(bd_page);
1691 set_page_writeback(bd_page);
1692 unlock_page(bd_page);
1693 }
1694 nilfs_begin_page_io(fs_page);
1695 }
1696
1697 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1698 struct the_nilfs *nilfs)
1699 {
1700 int ret;
1701
1702 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1703 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1704 return ret;
1705 }
1706
1707 static void nilfs_end_page_io(struct page *page, int err)
1708 {
1709 if (!page)
1710 return;
1711
1712 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1713 /*
1714 * For b-tree node pages, this function may be called twice
1715 * or more because they might be split in a segment.
1716 */
1717 if (PageDirty(page)) {
1718 /*
1719 * For pages holding split b-tree node buffers, dirty
1720 * flag on the buffers may be cleared discretely.
1721 * In that case, the page is once redirtied for
1722 * remaining buffers, and it must be cancelled if
1723 * all the buffers get cleaned later.
1724 */
1725 lock_page(page);
1726 if (nilfs_page_buffers_clean(page))
1727 __nilfs_clear_page_dirty(page);
1728 unlock_page(page);
1729 }
1730 return;
1731 }
1732
1733 if (!err) {
1734 if (!nilfs_page_buffers_clean(page))
1735 __set_page_dirty_nobuffers(page);
1736 ClearPageError(page);
1737 } else {
1738 __set_page_dirty_nobuffers(page);
1739 SetPageError(page);
1740 }
1741
1742 end_page_writeback(page);
1743 }
1744
1745 static void nilfs_abort_logs(struct list_head *logs, int err)
1746 {
1747 struct nilfs_segment_buffer *segbuf;
1748 struct page *bd_page = NULL, *fs_page = NULL;
1749 struct buffer_head *bh;
1750
1751 if (list_empty(logs))
1752 return;
1753
1754 list_for_each_entry(segbuf, logs, sb_list) {
1755 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1756 b_assoc_buffers) {
1757 if (bh->b_page != bd_page) {
1758 if (bd_page)
1759 end_page_writeback(bd_page);
1760 bd_page = bh->b_page;
1761 }
1762 }
1763
1764 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1765 b_assoc_buffers) {
1766 clear_buffer_async_write(bh);
1767 if (bh == segbuf->sb_super_root) {
1768 if (bh->b_page != bd_page) {
1769 end_page_writeback(bd_page);
1770 bd_page = bh->b_page;
1771 }
1772 break;
1773 }
1774 if (bh->b_page != fs_page) {
1775 nilfs_end_page_io(fs_page, err);
1776 fs_page = bh->b_page;
1777 }
1778 }
1779 }
1780 if (bd_page)
1781 end_page_writeback(bd_page);
1782
1783 nilfs_end_page_io(fs_page, err);
1784 }
1785
1786 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1787 struct the_nilfs *nilfs, int err)
1788 {
1789 LIST_HEAD(logs);
1790 int ret;
1791
1792 list_splice_tail_init(&sci->sc_write_logs, &logs);
1793 ret = nilfs_wait_on_logs(&logs);
1794 nilfs_abort_logs(&logs, ret ? : err);
1795
1796 list_splice_tail_init(&sci->sc_segbufs, &logs);
1797 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1798 nilfs_free_incomplete_logs(&logs, nilfs);
1799
1800 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1801 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1802 sci->sc_freesegs,
1803 sci->sc_nfreesegs,
1804 NULL);
1805 WARN_ON(ret); /* do not happen */
1806 }
1807
1808 nilfs_destroy_logs(&logs);
1809 }
1810
1811 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1812 struct nilfs_segment_buffer *segbuf)
1813 {
1814 nilfs->ns_segnum = segbuf->sb_segnum;
1815 nilfs->ns_nextnum = segbuf->sb_nextnum;
1816 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1817 + segbuf->sb_sum.nblocks;
1818 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1819 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1820 }
1821
1822 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1823 {
1824 struct nilfs_segment_buffer *segbuf;
1825 struct page *bd_page = NULL, *fs_page = NULL;
1826 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1827 int update_sr = false;
1828
1829 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1830 struct buffer_head *bh;
1831
1832 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1833 b_assoc_buffers) {
1834 set_buffer_uptodate(bh);
1835 clear_buffer_dirty(bh);
1836 if (bh->b_page != bd_page) {
1837 if (bd_page)
1838 end_page_writeback(bd_page);
1839 bd_page = bh->b_page;
1840 }
1841 }
1842 /*
1843 * We assume that the buffers which belong to the same page
1844 * continue over the buffer list.
1845 * Under this assumption, the last BHs of pages is
1846 * identifiable by the discontinuity of bh->b_page
1847 * (page != fs_page).
1848 *
1849 * For B-tree node blocks, however, this assumption is not
1850 * guaranteed. The cleanup code of B-tree node pages needs
1851 * special care.
1852 */
1853 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1854 b_assoc_buffers) {
1855 const unsigned long set_bits = BIT(BH_Uptodate);
1856 const unsigned long clear_bits =
1857 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1858 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1859 BIT(BH_NILFS_Redirected));
1860
1861 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1862 if (bh == segbuf->sb_super_root) {
1863 if (bh->b_page != bd_page) {
1864 end_page_writeback(bd_page);
1865 bd_page = bh->b_page;
1866 }
1867 update_sr = true;
1868 break;
1869 }
1870 if (bh->b_page != fs_page) {
1871 nilfs_end_page_io(fs_page, 0);
1872 fs_page = bh->b_page;
1873 }
1874 }
1875
1876 if (!nilfs_segbuf_simplex(segbuf)) {
1877 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1878 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1879 sci->sc_lseg_stime = jiffies;
1880 }
1881 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1882 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1883 }
1884 }
1885 /*
1886 * Since pages may continue over multiple segment buffers,
1887 * end of the last page must be checked outside of the loop.
1888 */
1889 if (bd_page)
1890 end_page_writeback(bd_page);
1891
1892 nilfs_end_page_io(fs_page, 0);
1893
1894 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1895
1896 if (nilfs_doing_gc())
1897 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1898 else
1899 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1900
1901 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1902
1903 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1904 nilfs_set_next_segment(nilfs, segbuf);
1905
1906 if (update_sr) {
1907 nilfs->ns_flushed_device = 0;
1908 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1909 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1910
1911 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1912 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1913 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1914 nilfs_segctor_clear_metadata_dirty(sci);
1915 } else
1916 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1917 }
1918
1919 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1920 {
1921 int ret;
1922
1923 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1924 if (!ret) {
1925 nilfs_segctor_complete_write(sci);
1926 nilfs_destroy_logs(&sci->sc_write_logs);
1927 }
1928 return ret;
1929 }
1930
1931 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1932 struct the_nilfs *nilfs)
1933 {
1934 struct nilfs_inode_info *ii, *n;
1935 struct inode *ifile = sci->sc_root->ifile;
1936
1937 spin_lock(&nilfs->ns_inode_lock);
1938 retry:
1939 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1940 if (!ii->i_bh) {
1941 struct buffer_head *ibh;
1942 int err;
1943
1944 spin_unlock(&nilfs->ns_inode_lock);
1945 err = nilfs_ifile_get_inode_block(
1946 ifile, ii->vfs_inode.i_ino, &ibh);
1947 if (unlikely(err)) {
1948 nilfs_warn(sci->sc_super,
1949 "log writer: error %d getting inode block (ino=%lu)",
1950 err, ii->vfs_inode.i_ino);
1951 return err;
1952 }
1953 spin_lock(&nilfs->ns_inode_lock);
1954 if (likely(!ii->i_bh))
1955 ii->i_bh = ibh;
1956 else
1957 brelse(ibh);
1958 goto retry;
1959 }
1960
1961 // Always redirty the buffer to avoid race condition
1962 mark_buffer_dirty(ii->i_bh);
1963 nilfs_mdt_mark_dirty(ifile);
1964
1965 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1966 set_bit(NILFS_I_BUSY, &ii->i_state);
1967 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1968 }
1969 spin_unlock(&nilfs->ns_inode_lock);
1970
1971 return 0;
1972 }
1973
1974 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1975 struct the_nilfs *nilfs)
1976 {
1977 struct nilfs_inode_info *ii, *n;
1978 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
1979 int defer_iput = false;
1980
1981 spin_lock(&nilfs->ns_inode_lock);
1982 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1983 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1984 test_bit(NILFS_I_DIRTY, &ii->i_state))
1985 continue;
1986
1987 clear_bit(NILFS_I_BUSY, &ii->i_state);
1988 brelse(ii->i_bh);
1989 ii->i_bh = NULL;
1990 list_del_init(&ii->i_dirty);
1991 if (!ii->vfs_inode.i_nlink || during_mount) {
1992 /*
1993 * Defer calling iput() to avoid deadlocks if
1994 * i_nlink == 0 or mount is not yet finished.
1995 */
1996 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
1997 defer_iput = true;
1998 } else {
1999 spin_unlock(&nilfs->ns_inode_lock);
2000 iput(&ii->vfs_inode);
2001 spin_lock(&nilfs->ns_inode_lock);
2002 }
2003 }
2004 spin_unlock(&nilfs->ns_inode_lock);
2005
2006 if (defer_iput)
2007 schedule_work(&sci->sc_iput_work);
2008 }
2009
2010 /*
2011 * Main procedure of segment constructor
2012 */
2013 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2014 {
2015 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2016 int err;
2017
2018 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2019 sci->sc_cno = nilfs->ns_cno;
2020
2021 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2022 if (unlikely(err))
2023 goto out;
2024
2025 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2026 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2027
2028 if (nilfs_segctor_clean(sci))
2029 goto out;
2030
2031 do {
2032 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2033
2034 err = nilfs_segctor_begin_construction(sci, nilfs);
2035 if (unlikely(err))
2036 goto out;
2037
2038 /* Update time stamp */
2039 sci->sc_seg_ctime = ktime_get_real_seconds();
2040
2041 err = nilfs_segctor_collect(sci, nilfs, mode);
2042 if (unlikely(err))
2043 goto failed;
2044
2045 /* Avoid empty segment */
2046 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2047 nilfs_segbuf_empty(sci->sc_curseg)) {
2048 nilfs_segctor_abort_construction(sci, nilfs, 1);
2049 goto out;
2050 }
2051
2052 err = nilfs_segctor_assign(sci, mode);
2053 if (unlikely(err))
2054 goto failed;
2055
2056 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2057 nilfs_segctor_fill_in_file_bmap(sci);
2058
2059 if (mode == SC_LSEG_SR &&
2060 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2061 err = nilfs_segctor_fill_in_checkpoint(sci);
2062 if (unlikely(err))
2063 goto failed_to_write;
2064
2065 nilfs_segctor_fill_in_super_root(sci, nilfs);
2066 }
2067 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2068
2069 /* Write partial segments */
2070 nilfs_segctor_prepare_write(sci);
2071
2072 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2073 nilfs->ns_crc_seed);
2074
2075 err = nilfs_segctor_write(sci, nilfs);
2076 if (unlikely(err))
2077 goto failed_to_write;
2078
2079 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2080 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2081 /*
2082 * At this point, we avoid double buffering
2083 * for blocksize < pagesize because page dirty
2084 * flag is turned off during write and dirty
2085 * buffers are not properly collected for
2086 * pages crossing over segments.
2087 */
2088 err = nilfs_segctor_wait(sci);
2089 if (err)
2090 goto failed_to_write;
2091 }
2092 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2093
2094 out:
2095 nilfs_segctor_drop_written_files(sci, nilfs);
2096 return err;
2097
2098 failed_to_write:
2099 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2100 nilfs_redirty_inodes(&sci->sc_dirty_files);
2101
2102 failed:
2103 if (nilfs_doing_gc())
2104 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2105 nilfs_segctor_abort_construction(sci, nilfs, err);
2106 goto out;
2107 }
2108
2109 /**
2110 * nilfs_segctor_start_timer - set timer of background write
2111 * @sci: nilfs_sc_info
2112 *
2113 * If the timer has already been set, it ignores the new request.
2114 * This function MUST be called within a section locking the segment
2115 * semaphore.
2116 */
2117 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2118 {
2119 spin_lock(&sci->sc_state_lock);
2120 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2121 sci->sc_timer.expires = jiffies + sci->sc_interval;
2122 add_timer(&sci->sc_timer);
2123 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2124 }
2125 spin_unlock(&sci->sc_state_lock);
2126 }
2127
2128 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2129 {
2130 spin_lock(&sci->sc_state_lock);
2131 if (!(sci->sc_flush_request & BIT(bn))) {
2132 unsigned long prev_req = sci->sc_flush_request;
2133
2134 sci->sc_flush_request |= BIT(bn);
2135 if (!prev_req)
2136 wake_up(&sci->sc_wait_daemon);
2137 }
2138 spin_unlock(&sci->sc_state_lock);
2139 }
2140
2141 /**
2142 * nilfs_flush_segment - trigger a segment construction for resource control
2143 * @sb: super block
2144 * @ino: inode number of the file to be flushed out.
2145 */
2146 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2147 {
2148 struct the_nilfs *nilfs = sb->s_fs_info;
2149 struct nilfs_sc_info *sci = nilfs->ns_writer;
2150
2151 if (!sci || nilfs_doing_construction())
2152 return;
2153 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2154 /* assign bit 0 to data files */
2155 }
2156
2157 struct nilfs_segctor_wait_request {
2158 wait_queue_entry_t wq;
2159 __u32 seq;
2160 int err;
2161 atomic_t done;
2162 };
2163
2164 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2165 {
2166 struct nilfs_segctor_wait_request wait_req;
2167 int err = 0;
2168
2169 spin_lock(&sci->sc_state_lock);
2170 init_wait(&wait_req.wq);
2171 wait_req.err = 0;
2172 atomic_set(&wait_req.done, 0);
2173 wait_req.seq = ++sci->sc_seq_request;
2174 spin_unlock(&sci->sc_state_lock);
2175
2176 init_waitqueue_entry(&wait_req.wq, current);
2177 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2178 set_current_state(TASK_INTERRUPTIBLE);
2179 wake_up(&sci->sc_wait_daemon);
2180
2181 for (;;) {
2182 if (atomic_read(&wait_req.done)) {
2183 err = wait_req.err;
2184 break;
2185 }
2186 if (!signal_pending(current)) {
2187 schedule();
2188 continue;
2189 }
2190 err = -ERESTARTSYS;
2191 break;
2192 }
2193 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2194 return err;
2195 }
2196
2197 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2198 {
2199 struct nilfs_segctor_wait_request *wrq, *n;
2200 unsigned long flags;
2201
2202 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2203 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2204 if (!atomic_read(&wrq->done) &&
2205 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2206 wrq->err = err;
2207 atomic_set(&wrq->done, 1);
2208 }
2209 if (atomic_read(&wrq->done)) {
2210 wrq->wq.func(&wrq->wq,
2211 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2212 0, NULL);
2213 }
2214 }
2215 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2216 }
2217
2218 /**
2219 * nilfs_construct_segment - construct a logical segment
2220 * @sb: super block
2221 *
2222 * Return Value: On success, 0 is retured. On errors, one of the following
2223 * negative error code is returned.
2224 *
2225 * %-EROFS - Read only filesystem.
2226 *
2227 * %-EIO - I/O error
2228 *
2229 * %-ENOSPC - No space left on device (only in a panic state).
2230 *
2231 * %-ERESTARTSYS - Interrupted.
2232 *
2233 * %-ENOMEM - Insufficient memory available.
2234 */
2235 int nilfs_construct_segment(struct super_block *sb)
2236 {
2237 struct the_nilfs *nilfs = sb->s_fs_info;
2238 struct nilfs_sc_info *sci = nilfs->ns_writer;
2239 struct nilfs_transaction_info *ti;
2240 int err;
2241
2242 if (!sci)
2243 return -EROFS;
2244
2245 /* A call inside transactions causes a deadlock. */
2246 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2247
2248 err = nilfs_segctor_sync(sci);
2249 return err;
2250 }
2251
2252 /**
2253 * nilfs_construct_dsync_segment - construct a data-only logical segment
2254 * @sb: super block
2255 * @inode: inode whose data blocks should be written out
2256 * @start: start byte offset
2257 * @end: end byte offset (inclusive)
2258 *
2259 * Return Value: On success, 0 is retured. On errors, one of the following
2260 * negative error code is returned.
2261 *
2262 * %-EROFS - Read only filesystem.
2263 *
2264 * %-EIO - I/O error
2265 *
2266 * %-ENOSPC - No space left on device (only in a panic state).
2267 *
2268 * %-ERESTARTSYS - Interrupted.
2269 *
2270 * %-ENOMEM - Insufficient memory available.
2271 */
2272 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2273 loff_t start, loff_t end)
2274 {
2275 struct the_nilfs *nilfs = sb->s_fs_info;
2276 struct nilfs_sc_info *sci = nilfs->ns_writer;
2277 struct nilfs_inode_info *ii;
2278 struct nilfs_transaction_info ti;
2279 int err = 0;
2280
2281 if (!sci)
2282 return -EROFS;
2283
2284 nilfs_transaction_lock(sb, &ti, 0);
2285
2286 ii = NILFS_I(inode);
2287 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2288 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2289 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2290 nilfs_discontinued(nilfs)) {
2291 nilfs_transaction_unlock(sb);
2292 err = nilfs_segctor_sync(sci);
2293 return err;
2294 }
2295
2296 spin_lock(&nilfs->ns_inode_lock);
2297 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2298 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2299 spin_unlock(&nilfs->ns_inode_lock);
2300 nilfs_transaction_unlock(sb);
2301 return 0;
2302 }
2303 spin_unlock(&nilfs->ns_inode_lock);
2304 sci->sc_dsync_inode = ii;
2305 sci->sc_dsync_start = start;
2306 sci->sc_dsync_end = end;
2307
2308 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2309 if (!err)
2310 nilfs->ns_flushed_device = 0;
2311
2312 nilfs_transaction_unlock(sb);
2313 return err;
2314 }
2315
2316 #define FLUSH_FILE_BIT (0x1) /* data file only */
2317 #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
2318
2319 /**
2320 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2321 * @sci: segment constructor object
2322 */
2323 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2324 {
2325 spin_lock(&sci->sc_state_lock);
2326 sci->sc_seq_accepted = sci->sc_seq_request;
2327 spin_unlock(&sci->sc_state_lock);
2328 del_timer_sync(&sci->sc_timer);
2329 }
2330
2331 /**
2332 * nilfs_segctor_notify - notify the result of request to caller threads
2333 * @sci: segment constructor object
2334 * @mode: mode of log forming
2335 * @err: error code to be notified
2336 */
2337 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2338 {
2339 /* Clear requests (even when the construction failed) */
2340 spin_lock(&sci->sc_state_lock);
2341
2342 if (mode == SC_LSEG_SR) {
2343 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2344 sci->sc_seq_done = sci->sc_seq_accepted;
2345 nilfs_segctor_wakeup(sci, err);
2346 sci->sc_flush_request = 0;
2347 } else {
2348 if (mode == SC_FLUSH_FILE)
2349 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2350 else if (mode == SC_FLUSH_DAT)
2351 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2352
2353 /* re-enable timer if checkpoint creation was not done */
2354 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2355 time_before(jiffies, sci->sc_timer.expires))
2356 add_timer(&sci->sc_timer);
2357 }
2358 spin_unlock(&sci->sc_state_lock);
2359 }
2360
2361 /**
2362 * nilfs_segctor_construct - form logs and write them to disk
2363 * @sci: segment constructor object
2364 * @mode: mode of log forming
2365 */
2366 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2367 {
2368 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2369 struct nilfs_super_block **sbp;
2370 int err = 0;
2371
2372 nilfs_segctor_accept(sci);
2373
2374 if (nilfs_discontinued(nilfs))
2375 mode = SC_LSEG_SR;
2376 if (!nilfs_segctor_confirm(sci))
2377 err = nilfs_segctor_do_construct(sci, mode);
2378
2379 if (likely(!err)) {
2380 if (mode != SC_FLUSH_DAT)
2381 atomic_set(&nilfs->ns_ndirtyblks, 0);
2382 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2383 nilfs_discontinued(nilfs)) {
2384 down_write(&nilfs->ns_sem);
2385 err = -EIO;
2386 sbp = nilfs_prepare_super(sci->sc_super,
2387 nilfs_sb_will_flip(nilfs));
2388 if (likely(sbp)) {
2389 nilfs_set_log_cursor(sbp[0], nilfs);
2390 err = nilfs_commit_super(sci->sc_super,
2391 NILFS_SB_COMMIT);
2392 }
2393 up_write(&nilfs->ns_sem);
2394 }
2395 }
2396
2397 nilfs_segctor_notify(sci, mode, err);
2398 return err;
2399 }
2400
2401 static void nilfs_construction_timeout(struct timer_list *t)
2402 {
2403 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2404
2405 wake_up_process(sci->sc_timer_task);
2406 }
2407
2408 static void
2409 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2410 {
2411 struct nilfs_inode_info *ii, *n;
2412
2413 list_for_each_entry_safe(ii, n, head, i_dirty) {
2414 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2415 continue;
2416 list_del_init(&ii->i_dirty);
2417 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2418 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2419 iput(&ii->vfs_inode);
2420 }
2421 }
2422
2423 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2424 void **kbufs)
2425 {
2426 struct the_nilfs *nilfs = sb->s_fs_info;
2427 struct nilfs_sc_info *sci = nilfs->ns_writer;
2428 struct nilfs_transaction_info ti;
2429 int err;
2430
2431 if (unlikely(!sci))
2432 return -EROFS;
2433
2434 nilfs_transaction_lock(sb, &ti, 1);
2435
2436 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2437 if (unlikely(err))
2438 goto out_unlock;
2439
2440 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2441 if (unlikely(err)) {
2442 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2443 goto out_unlock;
2444 }
2445
2446 sci->sc_freesegs = kbufs[4];
2447 sci->sc_nfreesegs = argv[4].v_nmembs;
2448 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2449
2450 for (;;) {
2451 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2452 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2453
2454 if (likely(!err))
2455 break;
2456
2457 nilfs_warn(sb, "error %d cleaning segments", err);
2458 set_current_state(TASK_INTERRUPTIBLE);
2459 schedule_timeout(sci->sc_interval);
2460 }
2461 if (nilfs_test_opt(nilfs, DISCARD)) {
2462 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2463 sci->sc_nfreesegs);
2464 if (ret) {
2465 nilfs_warn(sb,
2466 "error %d on discard request, turning discards off for the device",
2467 ret);
2468 nilfs_clear_opt(nilfs, DISCARD);
2469 }
2470 }
2471
2472 out_unlock:
2473 sci->sc_freesegs = NULL;
2474 sci->sc_nfreesegs = 0;
2475 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2476 nilfs_transaction_unlock(sb);
2477 return err;
2478 }
2479
2480 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2481 {
2482 struct nilfs_transaction_info ti;
2483
2484 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2485 nilfs_segctor_construct(sci, mode);
2486
2487 /*
2488 * Unclosed segment should be retried. We do this using sc_timer.
2489 * Timeout of sc_timer will invoke complete construction which leads
2490 * to close the current logical segment.
2491 */
2492 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2493 nilfs_segctor_start_timer(sci);
2494
2495 nilfs_transaction_unlock(sci->sc_super);
2496 }
2497
2498 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2499 {
2500 int mode = 0;
2501
2502 spin_lock(&sci->sc_state_lock);
2503 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2504 SC_FLUSH_DAT : SC_FLUSH_FILE;
2505 spin_unlock(&sci->sc_state_lock);
2506
2507 if (mode) {
2508 nilfs_segctor_do_construct(sci, mode);
2509
2510 spin_lock(&sci->sc_state_lock);
2511 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2512 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2513 spin_unlock(&sci->sc_state_lock);
2514 }
2515 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2516 }
2517
2518 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2519 {
2520 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2521 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2522 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2523 return SC_FLUSH_FILE;
2524 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2525 return SC_FLUSH_DAT;
2526 }
2527 return SC_LSEG_SR;
2528 }
2529
2530 /**
2531 * nilfs_segctor_thread - main loop of the segment constructor thread.
2532 * @arg: pointer to a struct nilfs_sc_info.
2533 *
2534 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2535 * to execute segment constructions.
2536 */
2537 static int nilfs_segctor_thread(void *arg)
2538 {
2539 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2540 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2541 int timeout = 0;
2542
2543 sci->sc_timer_task = current;
2544
2545 /* start sync. */
2546 sci->sc_task = current;
2547 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2548 nilfs_info(sci->sc_super,
2549 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2550 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2551
2552 spin_lock(&sci->sc_state_lock);
2553 loop:
2554 for (;;) {
2555 int mode;
2556
2557 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2558 goto end_thread;
2559
2560 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2561 mode = SC_LSEG_SR;
2562 else if (sci->sc_flush_request)
2563 mode = nilfs_segctor_flush_mode(sci);
2564 else
2565 break;
2566
2567 spin_unlock(&sci->sc_state_lock);
2568 nilfs_segctor_thread_construct(sci, mode);
2569 spin_lock(&sci->sc_state_lock);
2570 timeout = 0;
2571 }
2572
2573
2574 if (freezing(current)) {
2575 spin_unlock(&sci->sc_state_lock);
2576 try_to_freeze();
2577 spin_lock(&sci->sc_state_lock);
2578 } else {
2579 DEFINE_WAIT(wait);
2580 int should_sleep = 1;
2581
2582 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2583 TASK_INTERRUPTIBLE);
2584
2585 if (sci->sc_seq_request != sci->sc_seq_done)
2586 should_sleep = 0;
2587 else if (sci->sc_flush_request)
2588 should_sleep = 0;
2589 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2590 should_sleep = time_before(jiffies,
2591 sci->sc_timer.expires);
2592
2593 if (should_sleep) {
2594 spin_unlock(&sci->sc_state_lock);
2595 schedule();
2596 spin_lock(&sci->sc_state_lock);
2597 }
2598 finish_wait(&sci->sc_wait_daemon, &wait);
2599 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2600 time_after_eq(jiffies, sci->sc_timer.expires));
2601
2602 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2603 set_nilfs_discontinued(nilfs);
2604 }
2605 goto loop;
2606
2607 end_thread:
2608 spin_unlock(&sci->sc_state_lock);
2609
2610 /* end sync. */
2611 sci->sc_task = NULL;
2612 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2613 return 0;
2614 }
2615
2616 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2617 {
2618 struct task_struct *t;
2619
2620 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2621 if (IS_ERR(t)) {
2622 int err = PTR_ERR(t);
2623
2624 nilfs_err(sci->sc_super, "error %d creating segctord thread",
2625 err);
2626 return err;
2627 }
2628 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2629 return 0;
2630 }
2631
2632 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2633 __acquires(&sci->sc_state_lock)
2634 __releases(&sci->sc_state_lock)
2635 {
2636 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2637
2638 while (sci->sc_task) {
2639 wake_up(&sci->sc_wait_daemon);
2640 spin_unlock(&sci->sc_state_lock);
2641 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2642 spin_lock(&sci->sc_state_lock);
2643 }
2644 }
2645
2646 /*
2647 * Setup & clean-up functions
2648 */
2649 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2650 struct nilfs_root *root)
2651 {
2652 struct the_nilfs *nilfs = sb->s_fs_info;
2653 struct nilfs_sc_info *sci;
2654
2655 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2656 if (!sci)
2657 return NULL;
2658
2659 sci->sc_super = sb;
2660
2661 nilfs_get_root(root);
2662 sci->sc_root = root;
2663
2664 init_waitqueue_head(&sci->sc_wait_request);
2665 init_waitqueue_head(&sci->sc_wait_daemon);
2666 init_waitqueue_head(&sci->sc_wait_task);
2667 spin_lock_init(&sci->sc_state_lock);
2668 INIT_LIST_HEAD(&sci->sc_dirty_files);
2669 INIT_LIST_HEAD(&sci->sc_segbufs);
2670 INIT_LIST_HEAD(&sci->sc_write_logs);
2671 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2672 INIT_LIST_HEAD(&sci->sc_iput_queue);
2673 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2674 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2675
2676 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2677 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2678 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2679
2680 if (nilfs->ns_interval)
2681 sci->sc_interval = HZ * nilfs->ns_interval;
2682 if (nilfs->ns_watermark)
2683 sci->sc_watermark = nilfs->ns_watermark;
2684 return sci;
2685 }
2686
2687 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2688 {
2689 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2690
2691 /*
2692 * The segctord thread was stopped and its timer was removed.
2693 * But some tasks remain.
2694 */
2695 do {
2696 struct nilfs_transaction_info ti;
2697
2698 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2699 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2700 nilfs_transaction_unlock(sci->sc_super);
2701
2702 flush_work(&sci->sc_iput_work);
2703
2704 } while (ret && retrycount-- > 0);
2705 }
2706
2707 /**
2708 * nilfs_segctor_destroy - destroy the segment constructor.
2709 * @sci: nilfs_sc_info
2710 *
2711 * nilfs_segctor_destroy() kills the segctord thread and frees
2712 * the nilfs_sc_info struct.
2713 * Caller must hold the segment semaphore.
2714 */
2715 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2716 {
2717 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2718 int flag;
2719
2720 up_write(&nilfs->ns_segctor_sem);
2721
2722 spin_lock(&sci->sc_state_lock);
2723 nilfs_segctor_kill_thread(sci);
2724 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2725 || sci->sc_seq_request != sci->sc_seq_done);
2726 spin_unlock(&sci->sc_state_lock);
2727
2728 if (flush_work(&sci->sc_iput_work))
2729 flag = true;
2730
2731 if (flag || !nilfs_segctor_confirm(sci))
2732 nilfs_segctor_write_out(sci);
2733
2734 if (!list_empty(&sci->sc_dirty_files)) {
2735 nilfs_warn(sci->sc_super,
2736 "disposed unprocessed dirty file(s) when stopping log writer");
2737 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2738 }
2739
2740 if (!list_empty(&sci->sc_iput_queue)) {
2741 nilfs_warn(sci->sc_super,
2742 "disposed unprocessed inode(s) in iput queue when stopping log writer");
2743 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2744 }
2745
2746 WARN_ON(!list_empty(&sci->sc_segbufs));
2747 WARN_ON(!list_empty(&sci->sc_write_logs));
2748
2749 nilfs_put_root(sci->sc_root);
2750
2751 down_write(&nilfs->ns_segctor_sem);
2752
2753 del_timer_sync(&sci->sc_timer);
2754 kfree(sci);
2755 }
2756
2757 /**
2758 * nilfs_attach_log_writer - attach log writer
2759 * @sb: super block instance
2760 * @root: root object of the current filesystem tree
2761 *
2762 * This allocates a log writer object, initializes it, and starts the
2763 * log writer.
2764 *
2765 * Return Value: On success, 0 is returned. On error, one of the following
2766 * negative error code is returned.
2767 *
2768 * %-ENOMEM - Insufficient memory available.
2769 */
2770 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2771 {
2772 struct the_nilfs *nilfs = sb->s_fs_info;
2773 int err;
2774
2775 if (nilfs->ns_writer) {
2776 /*
2777 * This happens if the filesystem was remounted
2778 * read/write after nilfs_error degenerated it into a
2779 * read-only mount.
2780 */
2781 nilfs_detach_log_writer(sb);
2782 }
2783
2784 nilfs->ns_writer = nilfs_segctor_new(sb, root);
2785 if (!nilfs->ns_writer)
2786 return -ENOMEM;
2787
2788 inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2789
2790 err = nilfs_segctor_start_thread(nilfs->ns_writer);
2791 if (err) {
2792 kfree(nilfs->ns_writer);
2793 nilfs->ns_writer = NULL;
2794 }
2795 return err;
2796 }
2797
2798 /**
2799 * nilfs_detach_log_writer - destroy log writer
2800 * @sb: super block instance
2801 *
2802 * This kills log writer daemon, frees the log writer object, and
2803 * destroys list of dirty files.
2804 */
2805 void nilfs_detach_log_writer(struct super_block *sb)
2806 {
2807 struct the_nilfs *nilfs = sb->s_fs_info;
2808 LIST_HEAD(garbage_list);
2809
2810 down_write(&nilfs->ns_segctor_sem);
2811 if (nilfs->ns_writer) {
2812 nilfs_segctor_destroy(nilfs->ns_writer);
2813 nilfs->ns_writer = NULL;
2814 }
2815
2816 /* Force to free the list of dirty files */
2817 spin_lock(&nilfs->ns_inode_lock);
2818 if (!list_empty(&nilfs->ns_dirty_files)) {
2819 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2820 nilfs_warn(sb,
2821 "disposed unprocessed dirty file(s) when detaching log writer");
2822 }
2823 spin_unlock(&nilfs->ns_inode_lock);
2824 up_write(&nilfs->ns_segctor_sem);
2825
2826 nilfs_dispose_list(nilfs, &garbage_list, 1);
2827 }