]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/nilfs2/segment.c
Merge tag 'nfs-for-4.13-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[mirror_ubuntu-artful-kernel.git] / fs / nilfs2 / segment.c
CommitLineData
9ff05123
RK
1/*
2 * segment.c - NILFS segment constructor.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
4b420ab4 16 * Written by Ryusuke Konishi.
9ff05123
RK
17 *
18 */
19
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/writeback.h>
ead8ecff 23#include <linux/bitops.h>
9ff05123
RK
24#include <linux/bio.h>
25#include <linux/completion.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
28#include <linux/freezer.h>
29#include <linux/kthread.h>
30#include <linux/crc32.h>
31#include <linux/pagevec.h>
5a0e3ad6 32#include <linux/slab.h>
174cd4b1
IM
33#include <linux/sched/signal.h>
34
9ff05123
RK
35#include "nilfs.h"
36#include "btnode.h"
37#include "page.h"
38#include "segment.h"
39#include "sufile.h"
40#include "cpfile.h"
41#include "ifile.h"
9ff05123
RK
42#include "segbuf.h"
43
44
45/*
46 * Segment constructor
47 */
48#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
49
076a378b
RK
50#define SC_MAX_SEGDELTA 64 /*
51 * Upper limit of the number of segments
52 * appended in collection retry loop
53 */
9ff05123
RK
54
55/* Construction mode */
56enum {
57 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
076a378b
RK
58 SC_LSEG_DSYNC, /*
59 * Flush data blocks of a given file and make
60 * a logical segment without a super root.
61 */
62 SC_FLUSH_FILE, /*
63 * Flush data files, leads to segment writes without
64 * creating a checkpoint.
65 */
66 SC_FLUSH_DAT, /*
67 * Flush DAT file. This also creates segments
68 * without a checkpoint.
69 */
9ff05123
RK
70};
71
72/* Stage numbers of dirty block collection */
73enum {
74 NILFS_ST_INIT = 0,
75 NILFS_ST_GC, /* Collecting dirty blocks for GC */
76 NILFS_ST_FILE,
9ff05123
RK
77 NILFS_ST_IFILE,
78 NILFS_ST_CPFILE,
79 NILFS_ST_SUFILE,
80 NILFS_ST_DAT,
81 NILFS_ST_SR, /* Super root */
82 NILFS_ST_DSYNC, /* Data sync blocks */
83 NILFS_ST_DONE,
84};
85
58497703
HM
86#define CREATE_TRACE_POINTS
87#include <trace/events/nilfs2.h>
88
89/*
90 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
91 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
92 * the variable must use them because transition of stage count must involve
93 * trace events (trace_nilfs2_collection_stage_transition).
94 *
95 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
96 * produce tracepoint events. It is provided just for making the intention
97 * clear.
98 */
99static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
100{
101 sci->sc_stage.scnt++;
102 trace_nilfs2_collection_stage_transition(sci);
103}
104
105static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
106{
107 sci->sc_stage.scnt = next_scnt;
108 trace_nilfs2_collection_stage_transition(sci);
109}
110
111static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
112{
113 return sci->sc_stage.scnt;
114}
115
9ff05123
RK
116/* State flags of collection */
117#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
118#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
071cb4b8
RK
119#define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
120#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
9ff05123
RK
121
122/* Operations depending on the construction mode and file type */
123struct nilfs_sc_operations {
124 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
125 struct inode *);
126 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
127 struct inode *);
128 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
129 struct inode *);
130 void (*write_data_binfo)(struct nilfs_sc_info *,
131 struct nilfs_segsum_pointer *,
132 union nilfs_binfo *);
133 void (*write_node_binfo)(struct nilfs_sc_info *,
134 struct nilfs_segsum_pointer *,
135 union nilfs_binfo *);
136};
137
138/*
139 * Other definitions
140 */
141static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
142static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
143static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
693dd321 144static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
9ff05123
RK
145
146#define nilfs_cnt32_gt(a, b) \
147 (typecheck(__u32, a) && typecheck(__u32, b) && \
148 ((__s32)(b) - (__s32)(a) < 0))
149#define nilfs_cnt32_ge(a, b) \
150 (typecheck(__u32, a) && typecheck(__u32, b) && \
151 ((__s32)(a) - (__s32)(b) >= 0))
152#define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
153#define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
154
feee880f
RK
155static int nilfs_prepare_segment_lock(struct super_block *sb,
156 struct nilfs_transaction_info *ti)
9ff05123
RK
157{
158 struct nilfs_transaction_info *cur_ti = current->journal_info;
159 void *save = NULL;
160
161 if (cur_ti) {
162 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
163 return ++cur_ti->ti_count;
7f00184e
RK
164
165 /*
166 * If journal_info field is occupied by other FS,
167 * it is saved and will be restored on
168 * nilfs_transaction_commit().
169 */
feee880f 170 nilfs_msg(sb, KERN_WARNING, "journal info from a different FS");
7f00184e 171 save = current->journal_info;
9ff05123
RK
172 }
173 if (!ti) {
174 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
175 if (!ti)
176 return -ENOMEM;
177 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
178 } else {
179 ti->ti_flags = 0;
180 }
181 ti->ti_count = 0;
182 ti->ti_save = save;
183 ti->ti_magic = NILFS_TI_MAGIC;
184 current->journal_info = ti;
185 return 0;
186}
187
188/**
189 * nilfs_transaction_begin - start indivisible file operations.
190 * @sb: super block
191 * @ti: nilfs_transaction_info
192 * @vacancy_check: flags for vacancy rate checks
193 *
194 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
195 * the segment semaphore, to make a segment construction and write tasks
47420c79 196 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
9ff05123
RK
197 * The region enclosed by these two functions can be nested. To avoid a
198 * deadlock, the semaphore is only acquired or released in the outermost call.
199 *
200 * This function allocates a nilfs_transaction_info struct to keep context
201 * information on it. It is initialized and hooked onto the current task in
202 * the outermost call. If a pre-allocated struct is given to @ti, it is used
7a65004b 203 * instead; otherwise a new struct is assigned from a slab.
9ff05123
RK
204 *
205 * When @vacancy_check flag is set, this function will check the amount of
206 * free space, and will wait for the GC to reclaim disk space if low capacity.
207 *
208 * Return Value: On success, 0 is returned. On error, one of the following
209 * negative error code is returned.
210 *
211 * %-ENOMEM - Insufficient memory available.
212 *
9ff05123
RK
213 * %-ENOSPC - No space left on device
214 */
215int nilfs_transaction_begin(struct super_block *sb,
216 struct nilfs_transaction_info *ti,
217 int vacancy_check)
218{
9ff05123 219 struct the_nilfs *nilfs;
feee880f 220 int ret = nilfs_prepare_segment_lock(sb, ti);
44fda114 221 struct nilfs_transaction_info *trace_ti;
9ff05123
RK
222
223 if (unlikely(ret < 0))
224 return ret;
44fda114
HM
225 if (ret > 0) {
226 trace_ti = current->journal_info;
227
228 trace_nilfs2_transaction_transition(sb, trace_ti,
229 trace_ti->ti_count, trace_ti->ti_flags,
230 TRACE_NILFS2_TRANSACTION_BEGIN);
9ff05123 231 return 0;
44fda114 232 }
9ff05123 233
2c22b337 234 sb_start_intwrite(sb);
5beb6e0b 235
e3154e97 236 nilfs = sb->s_fs_info;
9ff05123
RK
237 down_read(&nilfs->ns_segctor_sem);
238 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
239 up_read(&nilfs->ns_segctor_sem);
240 ret = -ENOSPC;
241 goto failed;
242 }
44fda114
HM
243
244 trace_ti = current->journal_info;
245 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
246 trace_ti->ti_flags,
247 TRACE_NILFS2_TRANSACTION_BEGIN);
9ff05123
RK
248 return 0;
249
250 failed:
251 ti = current->journal_info;
252 current->journal_info = ti->ti_save;
253 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
254 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 255 sb_end_intwrite(sb);
9ff05123
RK
256 return ret;
257}
258
259/**
47420c79 260 * nilfs_transaction_commit - commit indivisible file operations.
9ff05123 261 * @sb: super block
9ff05123 262 *
47420c79
RK
263 * nilfs_transaction_commit() releases the read semaphore which is
264 * acquired by nilfs_transaction_begin(). This is only performed
265 * in outermost call of this function. If a commit flag is set,
266 * nilfs_transaction_commit() sets a timer to start the segment
267 * constructor. If a sync flag is set, it starts construction
268 * directly.
9ff05123 269 */
47420c79 270int nilfs_transaction_commit(struct super_block *sb)
9ff05123
RK
271{
272 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 273 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
274 int err = 0;
275
276 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
47420c79 277 ti->ti_flags |= NILFS_TI_COMMIT;
9ff05123
RK
278 if (ti->ti_count > 0) {
279 ti->ti_count--;
44fda114
HM
280 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
281 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
9ff05123
RK
282 return 0;
283 }
3fd3fe5a
RK
284 if (nilfs->ns_writer) {
285 struct nilfs_sc_info *sci = nilfs->ns_writer;
286
9ff05123
RK
287 if (ti->ti_flags & NILFS_TI_COMMIT)
288 nilfs_segctor_start_timer(sci);
3fd3fe5a 289 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
9ff05123
RK
290 nilfs_segctor_do_flush(sci, 0);
291 }
3fd3fe5a 292 up_read(&nilfs->ns_segctor_sem);
44fda114
HM
293 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
294 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
295
9ff05123
RK
296 current->journal_info = ti->ti_save;
297
298 if (ti->ti_flags & NILFS_TI_SYNC)
299 err = nilfs_construct_segment(sb);
300 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
301 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 302 sb_end_intwrite(sb);
9ff05123
RK
303 return err;
304}
305
47420c79
RK
306void nilfs_transaction_abort(struct super_block *sb)
307{
308 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 309 struct the_nilfs *nilfs = sb->s_fs_info;
47420c79
RK
310
311 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
312 if (ti->ti_count > 0) {
313 ti->ti_count--;
44fda114
HM
314 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
315 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
47420c79
RK
316 return;
317 }
e3154e97 318 up_read(&nilfs->ns_segctor_sem);
47420c79 319
44fda114
HM
320 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
321 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
322
47420c79
RK
323 current->journal_info = ti->ti_save;
324 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
325 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 326 sb_end_intwrite(sb);
47420c79
RK
327}
328
9ff05123
RK
329void nilfs_relax_pressure_in_lock(struct super_block *sb)
330{
e3154e97 331 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 332 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
333
334 if (!sci || !sci->sc_flush_request)
335 return;
336
337 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
338 up_read(&nilfs->ns_segctor_sem);
339
340 down_write(&nilfs->ns_segctor_sem);
341 if (sci->sc_flush_request &&
342 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
343 struct nilfs_transaction_info *ti = current->journal_info;
344
345 ti->ti_flags |= NILFS_TI_WRITER;
346 nilfs_segctor_do_immediate_flush(sci);
347 ti->ti_flags &= ~NILFS_TI_WRITER;
348 }
349 downgrade_write(&nilfs->ns_segctor_sem);
350}
351
f7545144 352static void nilfs_transaction_lock(struct super_block *sb,
9ff05123
RK
353 struct nilfs_transaction_info *ti,
354 int gcflag)
355{
356 struct nilfs_transaction_info *cur_ti = current->journal_info;
e3154e97 357 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 358 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 359
1f5abe7e 360 WARN_ON(cur_ti);
9ff05123
RK
361 ti->ti_flags = NILFS_TI_WRITER;
362 ti->ti_count = 0;
363 ti->ti_save = cur_ti;
364 ti->ti_magic = NILFS_TI_MAGIC;
9ff05123
RK
365 current->journal_info = ti;
366
367 for (;;) {
44fda114
HM
368 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
369 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
370
3fd3fe5a
RK
371 down_write(&nilfs->ns_segctor_sem);
372 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
9ff05123
RK
373 break;
374
3fd3fe5a 375 nilfs_segctor_do_immediate_flush(sci);
9ff05123 376
f7545144 377 up_write(&nilfs->ns_segctor_sem);
aceb4170 378 cond_resched();
9ff05123
RK
379 }
380 if (gcflag)
381 ti->ti_flags |= NILFS_TI_GC;
44fda114
HM
382
383 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
384 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
9ff05123
RK
385}
386
f7545144 387static void nilfs_transaction_unlock(struct super_block *sb)
9ff05123
RK
388{
389 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 390 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
391
392 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
393 BUG_ON(ti->ti_count > 0);
394
693dd321 395 up_write(&nilfs->ns_segctor_sem);
9ff05123 396 current->journal_info = ti->ti_save;
44fda114
HM
397
398 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
399 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
9ff05123
RK
400}
401
402static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
403 struct nilfs_segsum_pointer *ssp,
0c6c44cb 404 unsigned int bytes)
9ff05123
RK
405{
406 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
0c6c44cb 407 unsigned int blocksize = sci->sc_super->s_blocksize;
9ff05123
RK
408 void *p;
409
410 if (unlikely(ssp->offset + bytes > blocksize)) {
411 ssp->offset = 0;
412 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
413 &segbuf->sb_segsum_buffers));
414 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
415 }
416 p = ssp->bh->b_data + ssp->offset;
417 ssp->offset += bytes;
418 return p;
419}
420
421/**
422 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
423 * @sci: nilfs_sc_info
424 */
425static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
426{
427 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
428 struct buffer_head *sumbh;
0c6c44cb
RK
429 unsigned int sumbytes;
430 unsigned int flags = 0;
9ff05123
RK
431 int err;
432
433 if (nilfs_doing_gc())
434 flags = NILFS_SS_GC;
6c43f410 435 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
9ff05123
RK
436 if (unlikely(err))
437 return err;
438
439 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
440 sumbytes = segbuf->sb_sum.sumbytes;
441 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
442 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
443 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
444 return 0;
445}
446
447static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
448{
449 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
450 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
076a378b
RK
451 return -E2BIG; /*
452 * The current segment is filled up
453 * (internal code)
454 */
9ff05123
RK
455 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
456 return nilfs_segctor_reset_segment_buffer(sci);
457}
458
459static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
460{
461 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
462 int err;
463
464 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
465 err = nilfs_segctor_feed_segment(sci);
466 if (err)
467 return err;
468 segbuf = sci->sc_curseg;
469 }
1e2b68bf 470 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
9ff05123
RK
471 if (likely(!err))
472 segbuf->sb_sum.flags |= NILFS_SS_SR;
473 return err;
474}
475
476/*
477 * Functions for making segment summary and payloads
478 */
479static int nilfs_segctor_segsum_block_required(
480 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
0c6c44cb 481 unsigned int binfo_size)
9ff05123 482{
0c6c44cb 483 unsigned int blocksize = sci->sc_super->s_blocksize;
9ff05123
RK
484 /* Size of finfo and binfo is enough small against blocksize */
485
486 return ssp->offset + binfo_size +
487 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
488 blocksize;
489}
490
491static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
492 struct inode *inode)
493{
494 sci->sc_curseg->sb_sum.nfinfo++;
495 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
496 nilfs_segctor_map_segsum_entry(
497 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
c96fa464 498
72746ac6
RK
499 if (NILFS_I(inode)->i_root &&
500 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
c96fa464 501 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
502 /* skip finfo */
503}
504
505static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
506 struct inode *inode)
507{
508 struct nilfs_finfo *finfo;
509 struct nilfs_inode_info *ii;
510 struct nilfs_segment_buffer *segbuf;
6c43f410 511 __u64 cno;
9ff05123
RK
512
513 if (sci->sc_blk_cnt == 0)
514 return;
515
516 ii = NILFS_I(inode);
6c43f410
RK
517
518 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
519 cno = ii->i_cno;
520 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
521 cno = 0;
522 else
523 cno = sci->sc_cno;
524
9ff05123
RK
525 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
526 sizeof(*finfo));
527 finfo->fi_ino = cpu_to_le64(inode->i_ino);
528 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
529 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
6c43f410 530 finfo->fi_cno = cpu_to_le64(cno);
9ff05123
RK
531
532 segbuf = sci->sc_curseg;
533 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
534 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
535 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
536 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
537}
538
539static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
540 struct buffer_head *bh,
541 struct inode *inode,
0c6c44cb 542 unsigned int binfo_size)
9ff05123
RK
543{
544 struct nilfs_segment_buffer *segbuf;
545 int required, err = 0;
546
547 retry:
548 segbuf = sci->sc_curseg;
549 required = nilfs_segctor_segsum_block_required(
550 sci, &sci->sc_binfo_ptr, binfo_size);
551 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
552 nilfs_segctor_end_finfo(sci, inode);
553 err = nilfs_segctor_feed_segment(sci);
554 if (err)
555 return err;
556 goto retry;
557 }
558 if (unlikely(required)) {
559 err = nilfs_segbuf_extend_segsum(segbuf);
560 if (unlikely(err))
561 goto failed;
562 }
563 if (sci->sc_blk_cnt == 0)
564 nilfs_segctor_begin_finfo(sci, inode);
565
566 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
567 /* Substitution to vblocknr is delayed until update_blocknr() */
568 nilfs_segbuf_add_file_buffer(segbuf, bh);
569 sci->sc_blk_cnt++;
570 failed:
571 return err;
572}
573
9ff05123
RK
574/*
575 * Callback functions that enumerate, mark, and collect dirty blocks
576 */
577static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
578 struct buffer_head *bh, struct inode *inode)
579{
580 int err;
581
9ff05123 582 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
e828949e
RK
583 if (err < 0)
584 return err;
9ff05123
RK
585
586 err = nilfs_segctor_add_file_block(sci, bh, inode,
587 sizeof(struct nilfs_binfo_v));
588 if (!err)
589 sci->sc_datablk_cnt++;
590 return err;
591}
592
593static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
594 struct buffer_head *bh,
595 struct inode *inode)
596{
e828949e 597 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
9ff05123
RK
598}
599
600static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
601 struct buffer_head *bh,
602 struct inode *inode)
603{
1f5abe7e 604 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
605 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
606}
607
608static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
609 struct nilfs_segsum_pointer *ssp,
610 union nilfs_binfo *binfo)
611{
612 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
613 sci, ssp, sizeof(*binfo_v));
614 *binfo_v = binfo->bi_v;
615}
616
617static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
618 struct nilfs_segsum_pointer *ssp,
619 union nilfs_binfo *binfo)
620{
621 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
622 sci, ssp, sizeof(*vblocknr));
623 *vblocknr = binfo->bi_v.bi_vblocknr;
624}
625
1c613cb9 626static const struct nilfs_sc_operations nilfs_sc_file_ops = {
9ff05123
RK
627 .collect_data = nilfs_collect_file_data,
628 .collect_node = nilfs_collect_file_node,
629 .collect_bmap = nilfs_collect_file_bmap,
630 .write_data_binfo = nilfs_write_file_data_binfo,
631 .write_node_binfo = nilfs_write_file_node_binfo,
632};
633
634static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
635 struct buffer_head *bh, struct inode *inode)
636{
637 int err;
638
639 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
e828949e
RK
640 if (err < 0)
641 return err;
9ff05123
RK
642
643 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
644 if (!err)
645 sci->sc_datablk_cnt++;
646 return err;
647}
648
649static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
650 struct buffer_head *bh, struct inode *inode)
651{
1f5abe7e 652 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
653 return nilfs_segctor_add_file_block(sci, bh, inode,
654 sizeof(struct nilfs_binfo_dat));
655}
656
657static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
658 struct nilfs_segsum_pointer *ssp,
659 union nilfs_binfo *binfo)
660{
661 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
662 sizeof(*blkoff));
663 *blkoff = binfo->bi_dat.bi_blkoff;
664}
665
666static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
667 struct nilfs_segsum_pointer *ssp,
668 union nilfs_binfo *binfo)
669{
670 struct nilfs_binfo_dat *binfo_dat =
671 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
672 *binfo_dat = binfo->bi_dat;
673}
674
1c613cb9 675static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
9ff05123
RK
676 .collect_data = nilfs_collect_dat_data,
677 .collect_node = nilfs_collect_file_node,
678 .collect_bmap = nilfs_collect_dat_bmap,
679 .write_data_binfo = nilfs_write_dat_data_binfo,
680 .write_node_binfo = nilfs_write_dat_node_binfo,
681};
682
1c613cb9 683static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
9ff05123
RK
684 .collect_data = nilfs_collect_file_data,
685 .collect_node = NULL,
686 .collect_bmap = NULL,
687 .write_data_binfo = nilfs_write_file_data_binfo,
688 .write_node_binfo = NULL,
689};
690
f30bf3e4
RK
691static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
692 struct list_head *listp,
693 size_t nlimit,
694 loff_t start, loff_t end)
9ff05123 695{
9ff05123
RK
696 struct address_space *mapping = inode->i_mapping;
697 struct pagevec pvec;
f30bf3e4
RK
698 pgoff_t index = 0, last = ULONG_MAX;
699 size_t ndirties = 0;
700 int i;
9ff05123 701
f30bf3e4
RK
702 if (unlikely(start != 0 || end != LLONG_MAX)) {
703 /*
704 * A valid range is given for sync-ing data pages. The
705 * range is rounded to per-page; extra dirty buffers
706 * may be included if blocksize < pagesize.
707 */
708 index = start >> PAGE_SHIFT;
709 last = end >> PAGE_SHIFT;
710 }
9ff05123
RK
711 pagevec_init(&pvec, 0);
712 repeat:
f30bf3e4
RK
713 if (unlikely(index > last) ||
714 !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
715 min_t(pgoff_t, last - index,
716 PAGEVEC_SIZE - 1) + 1))
717 return ndirties;
9ff05123
RK
718
719 for (i = 0; i < pagevec_count(&pvec); i++) {
720 struct buffer_head *bh, *head;
721 struct page *page = pvec.pages[i];
722
f30bf3e4
RK
723 if (unlikely(page->index > last))
724 break;
725
aa405b1f
RK
726 lock_page(page);
727 if (!page_has_buffers(page))
93407472 728 create_empty_buffers(page, i_blocksize(inode), 0);
aa405b1f 729 unlock_page(page);
9ff05123
RK
730
731 bh = head = page_buffers(page);
732 do {
7f42ec39 733 if (!buffer_dirty(bh) || buffer_async_write(bh))
f30bf3e4
RK
734 continue;
735 get_bh(bh);
736 list_add_tail(&bh->b_assoc_buffers, listp);
737 ndirties++;
738 if (unlikely(ndirties >= nlimit)) {
739 pagevec_release(&pvec);
740 cond_resched();
741 return ndirties;
9ff05123 742 }
f30bf3e4 743 } while (bh = bh->b_this_page, bh != head);
9ff05123
RK
744 }
745 pagevec_release(&pvec);
746 cond_resched();
f30bf3e4 747 goto repeat;
9ff05123
RK
748}
749
750static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
751 struct list_head *listp)
752{
753 struct nilfs_inode_info *ii = NILFS_I(inode);
754 struct address_space *mapping = &ii->i_btnode_cache;
755 struct pagevec pvec;
756 struct buffer_head *bh, *head;
757 unsigned int i;
758 pgoff_t index = 0;
759
760 pagevec_init(&pvec, 0);
761
762 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
763 PAGEVEC_SIZE)) {
764 for (i = 0; i < pagevec_count(&pvec); i++) {
765 bh = head = page_buffers(pvec.pages[i]);
766 do {
7f42ec39
VD
767 if (buffer_dirty(bh) &&
768 !buffer_async_write(bh)) {
9ff05123
RK
769 get_bh(bh);
770 list_add_tail(&bh->b_assoc_buffers,
771 listp);
772 }
773 bh = bh->b_this_page;
774 } while (bh != head);
775 }
776 pagevec_release(&pvec);
777 cond_resched();
778 }
779}
780
693dd321 781static void nilfs_dispose_list(struct the_nilfs *nilfs,
9ff05123
RK
782 struct list_head *head, int force)
783{
784 struct nilfs_inode_info *ii, *n;
785 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
0c6c44cb 786 unsigned int nv = 0;
9ff05123
RK
787
788 while (!list_empty(head)) {
693dd321 789 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
790 list_for_each_entry_safe(ii, n, head, i_dirty) {
791 list_del_init(&ii->i_dirty);
792 if (force) {
793 if (unlikely(ii->i_bh)) {
794 brelse(ii->i_bh);
795 ii->i_bh = NULL;
796 }
797 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
798 set_bit(NILFS_I_QUEUED, &ii->i_state);
799 list_add_tail(&ii->i_dirty,
693dd321 800 &nilfs->ns_dirty_files);
9ff05123
RK
801 continue;
802 }
803 ivec[nv++] = ii;
804 if (nv == SC_N_INODEVEC)
805 break;
806 }
693dd321 807 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
808
809 for (pii = ivec; nv > 0; pii++, nv--)
810 iput(&(*pii)->vfs_inode);
811 }
812}
813
7ef3ff2f
RK
814static void nilfs_iput_work_func(struct work_struct *work)
815{
816 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
817 sc_iput_work);
818 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
819
820 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
821}
822
e912a5b6
RK
823static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
824 struct nilfs_root *root)
9ff05123 825{
9ff05123
RK
826 int ret = 0;
827
e912a5b6 828 if (nilfs_mdt_fetch_dirty(root->ifile))
9ff05123
RK
829 ret++;
830 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
831 ret++;
832 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
833 ret++;
365e215c
RK
834 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
835 ret++;
9ff05123
RK
836 return ret;
837}
838
839static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
840{
841 return list_empty(&sci->sc_dirty_files) &&
842 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
071cb4b8 843 sci->sc_nfreesegs == 0 &&
9ff05123
RK
844 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
845}
846
847static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
848{
e3154e97 849 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
850 int ret = 0;
851
693dd321 852 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
9ff05123
RK
853 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
854
693dd321
RK
855 spin_lock(&nilfs->ns_inode_lock);
856 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
9ff05123
RK
857 ret++;
858
693dd321 859 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
860 return ret;
861}
862
863static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
864{
e3154e97 865 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123 866
e912a5b6 867 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
9ff05123
RK
868 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
869 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
365e215c 870 nilfs_mdt_clear_dirty(nilfs->ns_dat);
9ff05123
RK
871}
872
873static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
874{
e3154e97 875 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
876 struct buffer_head *bh_cp;
877 struct nilfs_checkpoint *raw_cp;
878 int err;
879
880 /* XXX: this interface will be changed */
881 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
882 &raw_cp, &bh_cp);
883 if (likely(!err)) {
076a378b
RK
884 /*
885 * The following code is duplicated with cpfile. But, it is
886 * needed to collect the checkpoint even if it was not newly
887 * created.
888 */
5fc7b141 889 mark_buffer_dirty(bh_cp);
9ff05123
RK
890 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
891 nilfs_cpfile_put_checkpoint(
892 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
1f5abe7e
RK
893 } else
894 WARN_ON(err == -EINVAL || err == -ENOENT);
895
9ff05123
RK
896 return err;
897}
898
899static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
900{
e3154e97 901 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
902 struct buffer_head *bh_cp;
903 struct nilfs_checkpoint *raw_cp;
904 int err;
905
906 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
907 &raw_cp, &bh_cp);
908 if (unlikely(err)) {
1f5abe7e 909 WARN_ON(err == -EINVAL || err == -ENOENT);
9ff05123
RK
910 goto failed_ibh;
911 }
912 raw_cp->cp_snapshot_list.ssl_next = 0;
913 raw_cp->cp_snapshot_list.ssl_prev = 0;
914 raw_cp->cp_inodes_count =
e5f7f848 915 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
9ff05123 916 raw_cp->cp_blocks_count =
e5f7f848 917 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
9ff05123
RK
918 raw_cp->cp_nblk_inc =
919 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
920 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
921 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
458c5b08 922
c96fa464
RK
923 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
924 nilfs_checkpoint_clear_minor(raw_cp);
925 else
926 nilfs_checkpoint_set_minor(raw_cp);
927
e912a5b6
RK
928 nilfs_write_inode_common(sci->sc_root->ifile,
929 &raw_cp->cp_ifile_inode, 1);
9ff05123
RK
930 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
931 return 0;
932
933 failed_ibh:
934 return err;
935}
936
937static void nilfs_fill_in_file_bmap(struct inode *ifile,
938 struct nilfs_inode_info *ii)
939
940{
941 struct buffer_head *ibh;
942 struct nilfs_inode *raw_inode;
943
944 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
945 ibh = ii->i_bh;
946 BUG_ON(!ibh);
947 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
948 ibh);
949 nilfs_bmap_write(ii->i_bmap, raw_inode);
950 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
951 }
952}
953
e912a5b6 954static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
9ff05123
RK
955{
956 struct nilfs_inode_info *ii;
957
958 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
e912a5b6 959 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
9ff05123
RK
960 set_bit(NILFS_I_COLLECTED, &ii->i_state);
961 }
9ff05123
RK
962}
963
9ff05123
RK
964static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
965 struct the_nilfs *nilfs)
966{
1e2b68bf
RK
967 struct buffer_head *bh_sr;
968 struct nilfs_super_root *raw_sr;
0c6c44cb 969 unsigned int isz, srsz;
9ff05123 970
1e2b68bf
RK
971 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
972 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
56eb5538
RK
973 isz = nilfs->ns_inode_size;
974 srsz = NILFS_SR_BYTES(isz);
1e2b68bf 975
56eb5538 976 raw_sr->sr_bytes = cpu_to_le16(srsz);
9ff05123
RK
977 raw_sr->sr_nongc_ctime
978 = cpu_to_le64(nilfs_doing_gc() ?
979 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
980 raw_sr->sr_flags = 0;
981
365e215c 982 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
3961f0e2
RK
983 NILFS_SR_DAT_OFFSET(isz), 1);
984 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
985 NILFS_SR_CPFILE_OFFSET(isz), 1);
986 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
987 NILFS_SR_SUFILE_OFFSET(isz), 1);
56eb5538 988 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
9ff05123
RK
989}
990
991static void nilfs_redirty_inodes(struct list_head *head)
992{
993 struct nilfs_inode_info *ii;
994
995 list_for_each_entry(ii, head, i_dirty) {
996 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
997 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
998 }
999}
1000
1001static void nilfs_drop_collected_inodes(struct list_head *head)
1002{
1003 struct nilfs_inode_info *ii;
1004
1005 list_for_each_entry(ii, head, i_dirty) {
1006 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1007 continue;
1008
b9f66140 1009 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
9ff05123
RK
1010 set_bit(NILFS_I_UPDATED, &ii->i_state);
1011 }
1012}
1013
9ff05123
RK
1014static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1015 struct inode *inode,
1016 struct list_head *listp,
1017 int (*collect)(struct nilfs_sc_info *,
1018 struct buffer_head *,
1019 struct inode *))
1020{
1021 struct buffer_head *bh, *n;
1022 int err = 0;
1023
1024 if (collect) {
1025 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1026 list_del_init(&bh->b_assoc_buffers);
1027 err = collect(sci, bh, inode);
1028 brelse(bh);
1029 if (unlikely(err))
1030 goto dispose_buffers;
1031 }
1032 return 0;
1033 }
1034
1035 dispose_buffers:
1036 while (!list_empty(listp)) {
0cc12838
RK
1037 bh = list_first_entry(listp, struct buffer_head,
1038 b_assoc_buffers);
9ff05123
RK
1039 list_del_init(&bh->b_assoc_buffers);
1040 brelse(bh);
1041 }
1042 return err;
1043}
1044
f30bf3e4
RK
1045static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1046{
1047 /* Remaining number of blocks within segment buffer */
1048 return sci->sc_segbuf_nblocks -
1049 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1050}
1051
9ff05123
RK
1052static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1053 struct inode *inode,
1c613cb9 1054 const struct nilfs_sc_operations *sc_ops)
9ff05123
RK
1055{
1056 LIST_HEAD(data_buffers);
1057 LIST_HEAD(node_buffers);
f30bf3e4 1058 int err;
9ff05123
RK
1059
1060 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
f30bf3e4
RK
1061 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1062
1063 n = nilfs_lookup_dirty_data_buffers(
1064 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1065 if (n > rest) {
1066 err = nilfs_segctor_apply_buffers(
9ff05123 1067 sci, inode, &data_buffers,
f30bf3e4
RK
1068 sc_ops->collect_data);
1069 BUG_ON(!err); /* always receive -E2BIG or true error */
9ff05123
RK
1070 goto break_or_fail;
1071 }
1072 }
1073 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1074
1075 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1076 err = nilfs_segctor_apply_buffers(
1077 sci, inode, &data_buffers, sc_ops->collect_data);
1078 if (unlikely(err)) {
1079 /* dispose node list */
1080 nilfs_segctor_apply_buffers(
1081 sci, inode, &node_buffers, NULL);
1082 goto break_or_fail;
1083 }
1084 sci->sc_stage.flags |= NILFS_CF_NODE;
1085 }
1086 /* Collect node */
1087 err = nilfs_segctor_apply_buffers(
1088 sci, inode, &node_buffers, sc_ops->collect_node);
1089 if (unlikely(err))
1090 goto break_or_fail;
1091
1092 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1093 err = nilfs_segctor_apply_buffers(
1094 sci, inode, &node_buffers, sc_ops->collect_bmap);
1095 if (unlikely(err))
1096 goto break_or_fail;
1097
1098 nilfs_segctor_end_finfo(sci, inode);
1099 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1100
1101 break_or_fail:
1102 return err;
1103}
1104
1105static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1106 struct inode *inode)
1107{
1108 LIST_HEAD(data_buffers);
f30bf3e4
RK
1109 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1110 int err;
9ff05123 1111
f30bf3e4
RK
1112 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1113 sci->sc_dsync_start,
1114 sci->sc_dsync_end);
1115
1116 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1117 nilfs_collect_file_data);
1118 if (!err) {
9ff05123 1119 nilfs_segctor_end_finfo(sci, inode);
f30bf3e4
RK
1120 BUG_ON(n > rest);
1121 /* always receive -E2BIG or true error if n > rest */
1122 }
9ff05123
RK
1123 return err;
1124}
1125
1126static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1127{
e3154e97 1128 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
1129 struct list_head *head;
1130 struct nilfs_inode_info *ii;
071cb4b8 1131 size_t ndone;
9ff05123
RK
1132 int err = 0;
1133
58497703 1134 switch (nilfs_sc_cstage_get(sci)) {
9ff05123
RK
1135 case NILFS_ST_INIT:
1136 /* Pre-processes */
1137 sci->sc_stage.flags = 0;
1138
1139 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1140 sci->sc_nblk_inc = 0;
1141 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1142 if (mode == SC_LSEG_DSYNC) {
58497703 1143 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
9ff05123
RK
1144 goto dsync_mode;
1145 }
1146 }
1147
1148 sci->sc_stage.dirty_file_ptr = NULL;
1149 sci->sc_stage.gc_inode_ptr = NULL;
1150 if (mode == SC_FLUSH_DAT) {
58497703 1151 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
9ff05123
RK
1152 goto dat_stage;
1153 }
58497703 1154 nilfs_sc_cstage_inc(sci); /* Fall through */
9ff05123
RK
1155 case NILFS_ST_GC:
1156 if (nilfs_doing_gc()) {
1157 head = &sci->sc_gc_inodes;
1158 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1159 head, i_dirty);
1160 list_for_each_entry_continue(ii, head, i_dirty) {
1161 err = nilfs_segctor_scan_file(
1162 sci, &ii->vfs_inode,
1163 &nilfs_sc_file_ops);
1164 if (unlikely(err)) {
1165 sci->sc_stage.gc_inode_ptr = list_entry(
1166 ii->i_dirty.prev,
1167 struct nilfs_inode_info,
1168 i_dirty);
1169 goto break_or_fail;
1170 }
1171 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1172 }
1173 sci->sc_stage.gc_inode_ptr = NULL;
1174 }
58497703 1175 nilfs_sc_cstage_inc(sci); /* Fall through */
9ff05123
RK
1176 case NILFS_ST_FILE:
1177 head = &sci->sc_dirty_files;
1178 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1179 i_dirty);
1180 list_for_each_entry_continue(ii, head, i_dirty) {
1181 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1182
1183 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1184 &nilfs_sc_file_ops);
1185 if (unlikely(err)) {
1186 sci->sc_stage.dirty_file_ptr =
1187 list_entry(ii->i_dirty.prev,
1188 struct nilfs_inode_info,
1189 i_dirty);
1190 goto break_or_fail;
1191 }
1192 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1193 /* XXX: required ? */
1194 }
1195 sci->sc_stage.dirty_file_ptr = NULL;
1196 if (mode == SC_FLUSH_FILE) {
58497703 1197 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1198 return 0;
1199 }
58497703 1200 nilfs_sc_cstage_inc(sci);
9ff05123
RK
1201 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1202 /* Fall through */
1203 case NILFS_ST_IFILE:
e912a5b6 1204 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
9ff05123
RK
1205 &nilfs_sc_file_ops);
1206 if (unlikely(err))
1207 break;
58497703 1208 nilfs_sc_cstage_inc(sci);
9ff05123
RK
1209 /* Creating a checkpoint */
1210 err = nilfs_segctor_create_checkpoint(sci);
1211 if (unlikely(err))
1212 break;
1213 /* Fall through */
1214 case NILFS_ST_CPFILE:
1215 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1216 &nilfs_sc_file_ops);
1217 if (unlikely(err))
1218 break;
58497703 1219 nilfs_sc_cstage_inc(sci); /* Fall through */
9ff05123 1220 case NILFS_ST_SUFILE:
071cb4b8
RK
1221 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1222 sci->sc_nfreesegs, &ndone);
1223 if (unlikely(err)) {
1224 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1225 sci->sc_freesegs, ndone,
1226 NULL);
9ff05123 1227 break;
071cb4b8
RK
1228 }
1229 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1230
9ff05123
RK
1231 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1232 &nilfs_sc_file_ops);
1233 if (unlikely(err))
1234 break;
58497703 1235 nilfs_sc_cstage_inc(sci); /* Fall through */
9ff05123
RK
1236 case NILFS_ST_DAT:
1237 dat_stage:
365e215c 1238 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
9ff05123
RK
1239 &nilfs_sc_dat_ops);
1240 if (unlikely(err))
1241 break;
1242 if (mode == SC_FLUSH_DAT) {
58497703 1243 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1244 return 0;
1245 }
58497703 1246 nilfs_sc_cstage_inc(sci); /* Fall through */
9ff05123
RK
1247 case NILFS_ST_SR:
1248 if (mode == SC_LSEG_SR) {
1249 /* Appending a super root */
1250 err = nilfs_segctor_add_super_root(sci);
1251 if (unlikely(err))
1252 break;
1253 }
1254 /* End of a logical segment */
1255 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
58497703 1256 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1257 return 0;
1258 case NILFS_ST_DSYNC:
1259 dsync_mode:
1260 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
f30bf3e4 1261 ii = sci->sc_dsync_inode;
9ff05123
RK
1262 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1263 break;
1264
1265 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1266 if (unlikely(err))
1267 break;
9ff05123 1268 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
58497703 1269 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1270 return 0;
1271 case NILFS_ST_DONE:
1272 return 0;
1273 default:
1274 BUG();
1275 }
1276
1277 break_or_fail:
1278 return err;
1279}
1280
a694291a
RK
1281/**
1282 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1283 * @sci: nilfs_sc_info
1284 * @nilfs: nilfs object
1285 */
9ff05123
RK
1286static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1287 struct the_nilfs *nilfs)
1288{
a694291a 1289 struct nilfs_segment_buffer *segbuf, *prev;
9ff05123 1290 __u64 nextnum;
a694291a 1291 int err, alloc = 0;
9ff05123 1292
a694291a
RK
1293 segbuf = nilfs_segbuf_new(sci->sc_super);
1294 if (unlikely(!segbuf))
1295 return -ENOMEM;
9ff05123 1296
a694291a
RK
1297 if (list_empty(&sci->sc_write_logs)) {
1298 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1299 nilfs->ns_pseg_offset, nilfs);
1300 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1301 nilfs_shift_to_next_segment(nilfs);
1302 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1303 }
9ff05123 1304
a694291a
RK
1305 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1306 nextnum = nilfs->ns_nextnum;
1307
1308 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1309 /* Start from the head of a new full segment */
1310 alloc++;
1311 } else {
1312 /* Continue logs */
1313 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1314 nilfs_segbuf_map_cont(segbuf, prev);
1315 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1316 nextnum = prev->sb_nextnum;
1317
1318 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1319 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1320 segbuf->sb_sum.seg_seq++;
1321 alloc++;
1322 }
9ff05123 1323 }
9ff05123 1324
61a189e9 1325 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
a694291a
RK
1326 if (err)
1327 goto failed;
9ff05123 1328
a694291a 1329 if (alloc) {
cece5520 1330 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
a694291a
RK
1331 if (err)
1332 goto failed;
1333 }
9ff05123
RK
1334 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1335
a694291a
RK
1336 BUG_ON(!list_empty(&sci->sc_segbufs));
1337 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1338 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
cece5520 1339 return 0;
a694291a
RK
1340
1341 failed:
1342 nilfs_segbuf_free(segbuf);
1343 return err;
9ff05123
RK
1344}
1345
1346static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1347 struct the_nilfs *nilfs, int nadd)
1348{
e29df395 1349 struct nilfs_segment_buffer *segbuf, *prev;
9ff05123
RK
1350 struct inode *sufile = nilfs->ns_sufile;
1351 __u64 nextnextnum;
1352 LIST_HEAD(list);
1353 int err, ret, i;
1354
1355 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1356 /*
1357 * Since the segment specified with nextnum might be allocated during
1358 * the previous construction, the buffer including its segusage may
1359 * not be dirty. The following call ensures that the buffer is dirty
1360 * and will pin the buffer on memory until the sufile is written.
1361 */
61a189e9 1362 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
9ff05123
RK
1363 if (unlikely(err))
1364 return err;
1365
1366 for (i = 0; i < nadd; i++) {
1367 /* extend segment info */
1368 err = -ENOMEM;
1369 segbuf = nilfs_segbuf_new(sci->sc_super);
1370 if (unlikely(!segbuf))
1371 goto failed;
1372
1373 /* map this buffer to region of segment on-disk */
cece5520 1374 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
9ff05123
RK
1375 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1376
1377 /* allocate the next next full segment */
1378 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1379 if (unlikely(err))
1380 goto failed_segbuf;
1381
1382 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1383 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1384
1385 list_add_tail(&segbuf->sb_list, &list);
1386 prev = segbuf;
1387 }
0935db74 1388 list_splice_tail(&list, &sci->sc_segbufs);
9ff05123
RK
1389 return 0;
1390
1391 failed_segbuf:
1392 nilfs_segbuf_free(segbuf);
1393 failed:
e29df395 1394 list_for_each_entry(segbuf, &list, sb_list) {
9ff05123 1395 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1396 WARN_ON(ret); /* never fails */
9ff05123 1397 }
e29df395 1398 nilfs_destroy_logs(&list);
9ff05123
RK
1399 return err;
1400}
1401
a694291a
RK
1402static void nilfs_free_incomplete_logs(struct list_head *logs,
1403 struct the_nilfs *nilfs)
9ff05123 1404{
a694291a
RK
1405 struct nilfs_segment_buffer *segbuf, *prev;
1406 struct inode *sufile = nilfs->ns_sufile;
9284ad2a 1407 int ret;
9ff05123 1408
a694291a 1409 segbuf = NILFS_FIRST_SEGBUF(logs);
9ff05123 1410 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
a694291a 1411 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1412 WARN_ON(ret); /* never fails */
9ff05123 1413 }
9284ad2a 1414 if (atomic_read(&segbuf->sb_err)) {
9ff05123
RK
1415 /* Case 1: The first segment failed */
1416 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
076a378b
RK
1417 /*
1418 * Case 1a: Partial segment appended into an existing
1419 * segment
1420 */
9ff05123
RK
1421 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1422 segbuf->sb_fseg_end);
1423 else /* Case 1b: New full segment */
1424 set_nilfs_discontinued(nilfs);
9ff05123
RK
1425 }
1426
a694291a
RK
1427 prev = segbuf;
1428 list_for_each_entry_continue(segbuf, logs, sb_list) {
1429 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1430 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1431 WARN_ON(ret); /* never fails */
1432 }
9284ad2a
RK
1433 if (atomic_read(&segbuf->sb_err) &&
1434 segbuf->sb_segnum != nilfs->ns_nextnum)
1435 /* Case 2: extended segment (!= next) failed */
a694291a
RK
1436 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1437 prev = segbuf;
9ff05123 1438 }
9ff05123
RK
1439}
1440
1441static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1442 struct inode *sufile)
1443{
1444 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1445 unsigned long live_blocks;
1446 int ret;
1447
1448 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
9ff05123
RK
1449 live_blocks = segbuf->sb_sum.nblocks +
1450 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
071ec54d
RK
1451 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1452 live_blocks,
1453 sci->sc_seg_ctime);
1454 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123
RK
1455 }
1456}
1457
a694291a 1458static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
9ff05123
RK
1459{
1460 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1461 int ret;
1462
a694291a 1463 segbuf = NILFS_FIRST_SEGBUF(logs);
071ec54d
RK
1464 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1465 segbuf->sb_pseg_start -
1466 segbuf->sb_fseg_start, 0);
1467 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123 1468
a694291a 1469 list_for_each_entry_continue(segbuf, logs, sb_list) {
071ec54d
RK
1470 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1471 0, 0);
1f5abe7e 1472 WARN_ON(ret); /* always succeed */
9ff05123
RK
1473 }
1474}
1475
1476static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1477 struct nilfs_segment_buffer *last,
1478 struct inode *sufile)
1479{
e29df395 1480 struct nilfs_segment_buffer *segbuf = last;
9ff05123
RK
1481 int ret;
1482
e29df395 1483 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
9ff05123
RK
1484 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1485 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1486 WARN_ON(ret);
9ff05123 1487 }
e29df395 1488 nilfs_truncate_logs(&sci->sc_segbufs, last);
9ff05123
RK
1489}
1490
1491
1492static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1493 struct the_nilfs *nilfs, int mode)
1494{
1495 struct nilfs_cstage prev_stage = sci->sc_stage;
1496 int err, nadd = 1;
1497
1498 /* Collection retry loop */
1499 for (;;) {
9ff05123
RK
1500 sci->sc_nblk_this_inc = 0;
1501 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1502
1503 err = nilfs_segctor_reset_segment_buffer(sci);
1504 if (unlikely(err))
1505 goto failed;
1506
1507 err = nilfs_segctor_collect_blocks(sci, mode);
1508 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1509 if (!err)
1510 break;
1511
1512 if (unlikely(err != -E2BIG))
1513 goto failed;
1514
1515 /* The current segment is filled up */
58497703
HM
1516 if (mode != SC_LSEG_SR ||
1517 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
9ff05123
RK
1518 break;
1519
2d8428ac
RK
1520 nilfs_clear_logs(&sci->sc_segbufs);
1521
071cb4b8
RK
1522 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1523 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1524 sci->sc_freesegs,
1525 sci->sc_nfreesegs,
1526 NULL);
1527 WARN_ON(err); /* do not happen */
70f2fe3a 1528 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
071cb4b8 1529 }
70f2fe3a
AR
1530
1531 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1532 if (unlikely(err))
1533 return err;
1534
9ff05123
RK
1535 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1536 sci->sc_stage = prev_stage;
1537 }
1538 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1539 return 0;
1540
1541 failed:
1542 return err;
1543}
1544
1545static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1546 struct buffer_head *new_bh)
1547{
1548 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1549
1550 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1551 /* The caller must release old_bh */
1552}
1553
1554static int
1555nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1556 struct nilfs_segment_buffer *segbuf,
1557 int mode)
1558{
1559 struct inode *inode = NULL;
1560 sector_t blocknr;
1561 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1562 unsigned long nblocks = 0, ndatablk = 0;
1c613cb9 1563 const struct nilfs_sc_operations *sc_op = NULL;
9ff05123
RK
1564 struct nilfs_segsum_pointer ssp;
1565 struct nilfs_finfo *finfo = NULL;
1566 union nilfs_binfo binfo;
1567 struct buffer_head *bh, *bh_org;
1568 ino_t ino = 0;
1569 int err = 0;
1570
1571 if (!nfinfo)
1572 goto out;
1573
1574 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1575 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1576 ssp.offset = sizeof(struct nilfs_segment_summary);
1577
1578 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1e2b68bf 1579 if (bh == segbuf->sb_super_root)
9ff05123
RK
1580 break;
1581 if (!finfo) {
1582 finfo = nilfs_segctor_map_segsum_entry(
1583 sci, &ssp, sizeof(*finfo));
1584 ino = le64_to_cpu(finfo->fi_ino);
1585 nblocks = le32_to_cpu(finfo->fi_nblocks);
1586 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1587
aa405b1f 1588 inode = bh->b_page->mapping->host;
9ff05123
RK
1589
1590 if (mode == SC_LSEG_DSYNC)
1591 sc_op = &nilfs_sc_dsync_ops;
1592 else if (ino == NILFS_DAT_INO)
1593 sc_op = &nilfs_sc_dat_ops;
1594 else /* file blocks */
1595 sc_op = &nilfs_sc_file_ops;
1596 }
1597 bh_org = bh;
1598 get_bh(bh_org);
1599 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1600 &binfo);
1601 if (bh != bh_org)
1602 nilfs_list_replace_buffer(bh_org, bh);
1603 brelse(bh_org);
1604 if (unlikely(err))
1605 goto failed_bmap;
1606
1607 if (ndatablk > 0)
1608 sc_op->write_data_binfo(sci, &ssp, &binfo);
1609 else
1610 sc_op->write_node_binfo(sci, &ssp, &binfo);
1611
1612 blocknr++;
1613 if (--nblocks == 0) {
1614 finfo = NULL;
1615 if (--nfinfo == 0)
1616 break;
1617 } else if (ndatablk > 0)
1618 ndatablk--;
1619 }
1620 out:
1621 return 0;
1622
1623 failed_bmap:
9ff05123
RK
1624 return err;
1625}
1626
1627static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1628{
1629 struct nilfs_segment_buffer *segbuf;
1630 int err;
1631
1632 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1633 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1634 if (unlikely(err))
1635 return err;
1636 nilfs_segbuf_fill_in_segsum(segbuf);
1637 }
1638 return 0;
1639}
1640
1cb2d38c 1641static void nilfs_begin_page_io(struct page *page)
9ff05123
RK
1642{
1643 if (!page || PageWriteback(page))
076a378b
RK
1644 /*
1645 * For split b-tree node pages, this function may be called
1646 * twice. We ignore the 2nd or later calls by this check.
1647 */
1cb2d38c 1648 return;
9ff05123
RK
1649
1650 lock_page(page);
1651 clear_page_dirty_for_io(page);
1652 set_page_writeback(page);
1653 unlock_page(page);
9ff05123
RK
1654}
1655
1cb2d38c 1656static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
9ff05123
RK
1657{
1658 struct nilfs_segment_buffer *segbuf;
1659 struct page *bd_page = NULL, *fs_page = NULL;
9ff05123 1660
9ff05123
RK
1661 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1662 struct buffer_head *bh;
1663
1664 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1665 b_assoc_buffers) {
1666 if (bh->b_page != bd_page) {
1667 if (bd_page) {
1668 lock_page(bd_page);
1669 clear_page_dirty_for_io(bd_page);
1670 set_page_writeback(bd_page);
1671 unlock_page(bd_page);
1672 }
1673 bd_page = bh->b_page;
1674 }
1675 }
1676
1677 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1678 b_assoc_buffers) {
7f42ec39 1679 set_buffer_async_write(bh);
1e2b68bf 1680 if (bh == segbuf->sb_super_root) {
9ff05123
RK
1681 if (bh->b_page != bd_page) {
1682 lock_page(bd_page);
1683 clear_page_dirty_for_io(bd_page);
1684 set_page_writeback(bd_page);
1685 unlock_page(bd_page);
1686 bd_page = bh->b_page;
1687 }
1688 break;
1689 }
1690 if (bh->b_page != fs_page) {
1cb2d38c 1691 nilfs_begin_page_io(fs_page);
9ff05123
RK
1692 fs_page = bh->b_page;
1693 }
1694 }
1695 }
1696 if (bd_page) {
1697 lock_page(bd_page);
1698 clear_page_dirty_for_io(bd_page);
1699 set_page_writeback(bd_page);
1700 unlock_page(bd_page);
1701 }
1cb2d38c 1702 nilfs_begin_page_io(fs_page);
9ff05123
RK
1703}
1704
1705static int nilfs_segctor_write(struct nilfs_sc_info *sci,
9c965bac 1706 struct the_nilfs *nilfs)
9ff05123 1707{
d1c6b72a 1708 int ret;
9ff05123 1709
d1c6b72a 1710 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
a694291a
RK
1711 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1712 return ret;
9ff05123
RK
1713}
1714
9ff05123
RK
1715static void nilfs_end_page_io(struct page *page, int err)
1716{
1717 if (!page)
1718 return;
1719
a9777845 1720 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
8227b297
RK
1721 /*
1722 * For b-tree node pages, this function may be called twice
1723 * or more because they might be split in a segment.
1724 */
a9777845
RK
1725 if (PageDirty(page)) {
1726 /*
1727 * For pages holding split b-tree node buffers, dirty
1728 * flag on the buffers may be cleared discretely.
1729 * In that case, the page is once redirtied for
1730 * remaining buffers, and it must be cancelled if
1731 * all the buffers get cleaned later.
1732 */
1733 lock_page(page);
1734 if (nilfs_page_buffers_clean(page))
1735 __nilfs_clear_page_dirty(page);
1736 unlock_page(page);
1737 }
9ff05123 1738 return;
a9777845 1739 }
9ff05123 1740
1cb2d38c
RK
1741 if (!err) {
1742 if (!nilfs_page_buffers_clean(page))
1743 __set_page_dirty_nobuffers(page);
1744 ClearPageError(page);
1745 } else {
1746 __set_page_dirty_nobuffers(page);
1747 SetPageError(page);
9ff05123 1748 }
1cb2d38c
RK
1749
1750 end_page_writeback(page);
9ff05123
RK
1751}
1752
1cb2d38c 1753static void nilfs_abort_logs(struct list_head *logs, int err)
9ff05123
RK
1754{
1755 struct nilfs_segment_buffer *segbuf;
1756 struct page *bd_page = NULL, *fs_page = NULL;
a694291a 1757 struct buffer_head *bh;
9ff05123 1758
a694291a
RK
1759 if (list_empty(logs))
1760 return;
9ff05123 1761
a694291a 1762 list_for_each_entry(segbuf, logs, sb_list) {
9ff05123
RK
1763 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1764 b_assoc_buffers) {
1765 if (bh->b_page != bd_page) {
1766 if (bd_page)
1767 end_page_writeback(bd_page);
1768 bd_page = bh->b_page;
1769 }
1770 }
1771
1772 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1773 b_assoc_buffers) {
7f42ec39 1774 clear_buffer_async_write(bh);
1e2b68bf 1775 if (bh == segbuf->sb_super_root) {
9ff05123
RK
1776 if (bh->b_page != bd_page) {
1777 end_page_writeback(bd_page);
1778 bd_page = bh->b_page;
1779 }
1780 break;
1781 }
1782 if (bh->b_page != fs_page) {
1783 nilfs_end_page_io(fs_page, err);
9ff05123
RK
1784 fs_page = bh->b_page;
1785 }
1786 }
1787 }
1788 if (bd_page)
1789 end_page_writeback(bd_page);
1790
1791 nilfs_end_page_io(fs_page, err);
a694291a
RK
1792}
1793
1794static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1795 struct the_nilfs *nilfs, int err)
1796{
1797 LIST_HEAD(logs);
1798 int ret;
1799
1800 list_splice_tail_init(&sci->sc_write_logs, &logs);
1801 ret = nilfs_wait_on_logs(&logs);
1cb2d38c 1802 nilfs_abort_logs(&logs, ret ? : err);
a694291a
RK
1803
1804 list_splice_tail_init(&sci->sc_segbufs, &logs);
1805 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1806 nilfs_free_incomplete_logs(&logs, nilfs);
a694291a
RK
1807
1808 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1809 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1810 sci->sc_freesegs,
1811 sci->sc_nfreesegs,
1812 NULL);
1813 WARN_ON(ret); /* do not happen */
1814 }
1815
1816 nilfs_destroy_logs(&logs);
9ff05123
RK
1817}
1818
1819static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1820 struct nilfs_segment_buffer *segbuf)
1821{
1822 nilfs->ns_segnum = segbuf->sb_segnum;
1823 nilfs->ns_nextnum = segbuf->sb_nextnum;
1824 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1825 + segbuf->sb_sum.nblocks;
1826 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1827 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1828}
1829
1830static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1831{
1832 struct nilfs_segment_buffer *segbuf;
1833 struct page *bd_page = NULL, *fs_page = NULL;
e3154e97 1834 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1e2b68bf 1835 int update_sr = false;
9ff05123 1836
a694291a 1837 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
9ff05123
RK
1838 struct buffer_head *bh;
1839
1840 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1841 b_assoc_buffers) {
1842 set_buffer_uptodate(bh);
1843 clear_buffer_dirty(bh);
1844 if (bh->b_page != bd_page) {
1845 if (bd_page)
1846 end_page_writeback(bd_page);
1847 bd_page = bh->b_page;
1848 }
1849 }
1850 /*
1851 * We assume that the buffers which belong to the same page
1852 * continue over the buffer list.
1853 * Under this assumption, the last BHs of pages is
1854 * identifiable by the discontinuity of bh->b_page
1855 * (page != fs_page).
1856 *
1857 * For B-tree node blocks, however, this assumption is not
1858 * guaranteed. The cleanup code of B-tree node pages needs
1859 * special care.
1860 */
1861 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1862 b_assoc_buffers) {
4ce5c342 1863 const unsigned long set_bits = BIT(BH_Uptodate);
ead8ecff 1864 const unsigned long clear_bits =
4ce5c342
RK
1865 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1866 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1867 BIT(BH_NILFS_Redirected));
ead8ecff
RK
1868
1869 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1e2b68bf 1870 if (bh == segbuf->sb_super_root) {
9ff05123
RK
1871 if (bh->b_page != bd_page) {
1872 end_page_writeback(bd_page);
1873 bd_page = bh->b_page;
1874 }
1e2b68bf 1875 update_sr = true;
9ff05123
RK
1876 break;
1877 }
1878 if (bh->b_page != fs_page) {
1879 nilfs_end_page_io(fs_page, 0);
1880 fs_page = bh->b_page;
1881 }
1882 }
1883
4762077c
RK
1884 if (!nilfs_segbuf_simplex(segbuf)) {
1885 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
9ff05123
RK
1886 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1887 sci->sc_lseg_stime = jiffies;
1888 }
4762077c 1889 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
9ff05123
RK
1890 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1891 }
1892 }
1893 /*
1894 * Since pages may continue over multiple segment buffers,
1895 * end of the last page must be checked outside of the loop.
1896 */
1897 if (bd_page)
1898 end_page_writeback(bd_page);
1899
1900 nilfs_end_page_io(fs_page, 0);
1901
9ff05123
RK
1902 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1903
c1c1d709 1904 if (nilfs_doing_gc())
9ff05123 1905 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
c1c1d709 1906 else
9ff05123 1907 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
9ff05123
RK
1908
1909 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1910
a694291a 1911 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
9ff05123
RK
1912 nilfs_set_next_segment(nilfs, segbuf);
1913
1914 if (update_sr) {
e2c7617a 1915 nilfs->ns_flushed_device = 0;
9ff05123 1916 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
e339ad31 1917 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
9ff05123 1918
c96fa464 1919 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
1920 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1921 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
a694291a 1922 nilfs_segctor_clear_metadata_dirty(sci);
9ff05123
RK
1923 } else
1924 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1925}
1926
a694291a
RK
1927static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1928{
1929 int ret;
1930
1931 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1932 if (!ret) {
1933 nilfs_segctor_complete_write(sci);
1934 nilfs_destroy_logs(&sci->sc_write_logs);
1935 }
1936 return ret;
1937}
1938
693dd321
RK
1939static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1940 struct the_nilfs *nilfs)
9ff05123
RK
1941{
1942 struct nilfs_inode_info *ii, *n;
e912a5b6 1943 struct inode *ifile = sci->sc_root->ifile;
9ff05123 1944
693dd321 1945 spin_lock(&nilfs->ns_inode_lock);
9ff05123 1946 retry:
693dd321 1947 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
9ff05123
RK
1948 if (!ii->i_bh) {
1949 struct buffer_head *ibh;
1950 int err;
1951
693dd321 1952 spin_unlock(&nilfs->ns_inode_lock);
9ff05123 1953 err = nilfs_ifile_get_inode_block(
e912a5b6 1954 ifile, ii->vfs_inode.i_ino, &ibh);
9ff05123 1955 if (unlikely(err)) {
d6517deb
RK
1956 nilfs_msg(sci->sc_super, KERN_WARNING,
1957 "log writer: error %d getting inode block (ino=%lu)",
1958 err, ii->vfs_inode.i_ino);
9ff05123
RK
1959 return err;
1960 }
5fc7b141 1961 mark_buffer_dirty(ibh);
e912a5b6 1962 nilfs_mdt_mark_dirty(ifile);
693dd321 1963 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
1964 if (likely(!ii->i_bh))
1965 ii->i_bh = ibh;
1966 else
1967 brelse(ibh);
1968 goto retry;
1969 }
9ff05123
RK
1970
1971 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1972 set_bit(NILFS_I_BUSY, &ii->i_state);
eaae0f37 1973 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
9ff05123 1974 }
693dd321 1975 spin_unlock(&nilfs->ns_inode_lock);
9ff05123 1976
9ff05123
RK
1977 return 0;
1978}
1979
693dd321
RK
1980static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1981 struct the_nilfs *nilfs)
9ff05123 1982{
9ff05123 1983 struct nilfs_inode_info *ii, *n;
283ee148 1984 int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
7ef3ff2f 1985 int defer_iput = false;
9ff05123 1986
693dd321 1987 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
1988 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1989 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
6c43f410 1990 test_bit(NILFS_I_DIRTY, &ii->i_state))
9ff05123 1991 continue;
6c43f410 1992
9ff05123
RK
1993 clear_bit(NILFS_I_BUSY, &ii->i_state);
1994 brelse(ii->i_bh);
1995 ii->i_bh = NULL;
7ef3ff2f 1996 list_del_init(&ii->i_dirty);
283ee148 1997 if (!ii->vfs_inode.i_nlink || during_mount) {
7ef3ff2f 1998 /*
283ee148
RK
1999 * Defer calling iput() to avoid deadlocks if
2000 * i_nlink == 0 or mount is not yet finished.
7ef3ff2f
RK
2001 */
2002 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2003 defer_iput = true;
2004 } else {
2005 spin_unlock(&nilfs->ns_inode_lock);
2006 iput(&ii->vfs_inode);
2007 spin_lock(&nilfs->ns_inode_lock);
2008 }
9ff05123 2009 }
693dd321 2010 spin_unlock(&nilfs->ns_inode_lock);
7ef3ff2f
RK
2011
2012 if (defer_iput)
2013 schedule_work(&sci->sc_iput_work);
9ff05123
RK
2014}
2015
9ff05123
RK
2016/*
2017 * Main procedure of segment constructor
2018 */
2019static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2020{
e3154e97 2021 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1e2b68bf 2022 int err;
9ff05123 2023
58497703 2024 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
6c43f410 2025 sci->sc_cno = nilfs->ns_cno;
9ff05123 2026
693dd321 2027 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
9ff05123
RK
2028 if (unlikely(err))
2029 goto out;
2030
e912a5b6 2031 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
9ff05123
RK
2032 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2033
2034 if (nilfs_segctor_clean(sci))
2035 goto out;
2036
2037 do {
2038 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2039
2040 err = nilfs_segctor_begin_construction(sci, nilfs);
2041 if (unlikely(err))
2042 goto out;
2043
2044 /* Update time stamp */
2045 sci->sc_seg_ctime = get_seconds();
2046
2047 err = nilfs_segctor_collect(sci, nilfs, mode);
2048 if (unlikely(err))
2049 goto failed;
2050
9ff05123 2051 /* Avoid empty segment */
58497703 2052 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
4762077c 2053 nilfs_segbuf_empty(sci->sc_curseg)) {
a694291a 2054 nilfs_segctor_abort_construction(sci, nilfs, 1);
9ff05123
RK
2055 goto out;
2056 }
2057
2058 err = nilfs_segctor_assign(sci, mode);
2059 if (unlikely(err))
2060 goto failed;
2061
9ff05123 2062 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
e912a5b6 2063 nilfs_segctor_fill_in_file_bmap(sci);
9ff05123 2064
1e2b68bf 2065 if (mode == SC_LSEG_SR &&
58497703 2066 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
9ff05123
RK
2067 err = nilfs_segctor_fill_in_checkpoint(sci);
2068 if (unlikely(err))
a694291a 2069 goto failed_to_write;
9ff05123
RK
2070
2071 nilfs_segctor_fill_in_super_root(sci, nilfs);
2072 }
2073 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2074
2075 /* Write partial segments */
1cb2d38c 2076 nilfs_segctor_prepare_write(sci);
aaed1d5b
RK
2077
2078 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2079 nilfs->ns_crc_seed);
9ff05123 2080
9c965bac 2081 err = nilfs_segctor_write(sci, nilfs);
9ff05123
RK
2082 if (unlikely(err))
2083 goto failed_to_write;
2084
58497703 2085 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
09cbfeaf 2086 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
a694291a
RK
2087 /*
2088 * At this point, we avoid double buffering
2089 * for blocksize < pagesize because page dirty
2090 * flag is turned off during write and dirty
2091 * buffers are not properly collected for
2092 * pages crossing over segments.
2093 */
2094 err = nilfs_segctor_wait(sci);
2095 if (err)
2096 goto failed_to_write;
2097 }
58497703 2098 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
9ff05123 2099
9ff05123 2100 out:
693dd321 2101 nilfs_segctor_drop_written_files(sci, nilfs);
9ff05123
RK
2102 return err;
2103
2104 failed_to_write:
9ff05123
RK
2105 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2106 nilfs_redirty_inodes(&sci->sc_dirty_files);
9ff05123
RK
2107
2108 failed:
2109 if (nilfs_doing_gc())
2110 nilfs_redirty_inodes(&sci->sc_gc_inodes);
a694291a 2111 nilfs_segctor_abort_construction(sci, nilfs, err);
9ff05123
RK
2112 goto out;
2113}
2114
2115/**
9ccf56c1 2116 * nilfs_segctor_start_timer - set timer of background write
9ff05123
RK
2117 * @sci: nilfs_sc_info
2118 *
2119 * If the timer has already been set, it ignores the new request.
2120 * This function MUST be called within a section locking the segment
2121 * semaphore.
2122 */
2123static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2124{
2125 spin_lock(&sci->sc_state_lock);
fdce895e
LH
2126 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2127 sci->sc_timer.expires = jiffies + sci->sc_interval;
2128 add_timer(&sci->sc_timer);
9ff05123
RK
2129 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2130 }
2131 spin_unlock(&sci->sc_state_lock);
2132}
2133
2134static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2135{
2136 spin_lock(&sci->sc_state_lock);
4ce5c342 2137 if (!(sci->sc_flush_request & BIT(bn))) {
9ff05123
RK
2138 unsigned long prev_req = sci->sc_flush_request;
2139
4ce5c342 2140 sci->sc_flush_request |= BIT(bn);
9ff05123
RK
2141 if (!prev_req)
2142 wake_up(&sci->sc_wait_daemon);
2143 }
2144 spin_unlock(&sci->sc_state_lock);
2145}
2146
2147/**
2148 * nilfs_flush_segment - trigger a segment construction for resource control
2149 * @sb: super block
2150 * @ino: inode number of the file to be flushed out.
2151 */
2152void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2153{
e3154e97 2154 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2155 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2156
2157 if (!sci || nilfs_doing_construction())
2158 return;
2159 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2160 /* assign bit 0 to data files */
2161}
2162
9ff05123 2163struct nilfs_segctor_wait_request {
ac6424b9 2164 wait_queue_entry_t wq;
9ff05123
RK
2165 __u32 seq;
2166 int err;
2167 atomic_t done;
2168};
2169
2170static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2171{
2172 struct nilfs_segctor_wait_request wait_req;
2173 int err = 0;
2174
2175 spin_lock(&sci->sc_state_lock);
2176 init_wait(&wait_req.wq);
2177 wait_req.err = 0;
2178 atomic_set(&wait_req.done, 0);
2179 wait_req.seq = ++sci->sc_seq_request;
2180 spin_unlock(&sci->sc_state_lock);
2181
2182 init_waitqueue_entry(&wait_req.wq, current);
2183 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2184 set_current_state(TASK_INTERRUPTIBLE);
2185 wake_up(&sci->sc_wait_daemon);
2186
2187 for (;;) {
2188 if (atomic_read(&wait_req.done)) {
2189 err = wait_req.err;
2190 break;
2191 }
2192 if (!signal_pending(current)) {
2193 schedule();
2194 continue;
2195 }
2196 err = -ERESTARTSYS;
2197 break;
2198 }
2199 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2200 return err;
2201}
2202
2203static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2204{
2205 struct nilfs_segctor_wait_request *wrq, *n;
2206 unsigned long flags;
2207
2208 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2055da97 2209 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
9ff05123
RK
2210 if (!atomic_read(&wrq->done) &&
2211 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2212 wrq->err = err;
2213 atomic_set(&wrq->done, 1);
2214 }
2215 if (atomic_read(&wrq->done)) {
2216 wrq->wq.func(&wrq->wq,
2217 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2218 0, NULL);
2219 }
2220 }
2221 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2222}
2223
2224/**
2225 * nilfs_construct_segment - construct a logical segment
2226 * @sb: super block
2227 *
2228 * Return Value: On success, 0 is retured. On errors, one of the following
2229 * negative error code is returned.
2230 *
2231 * %-EROFS - Read only filesystem.
2232 *
2233 * %-EIO - I/O error
2234 *
2235 * %-ENOSPC - No space left on device (only in a panic state).
2236 *
2237 * %-ERESTARTSYS - Interrupted.
2238 *
2239 * %-ENOMEM - Insufficient memory available.
2240 */
2241int nilfs_construct_segment(struct super_block *sb)
2242{
e3154e97 2243 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2244 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2245 struct nilfs_transaction_info *ti;
2246 int err;
2247
2248 if (!sci)
2249 return -EROFS;
2250
2251 /* A call inside transactions causes a deadlock. */
2252 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2253
2254 err = nilfs_segctor_sync(sci);
2255 return err;
2256}
2257
2258/**
2259 * nilfs_construct_dsync_segment - construct a data-only logical segment
2260 * @sb: super block
f30bf3e4
RK
2261 * @inode: inode whose data blocks should be written out
2262 * @start: start byte offset
2263 * @end: end byte offset (inclusive)
9ff05123
RK
2264 *
2265 * Return Value: On success, 0 is retured. On errors, one of the following
2266 * negative error code is returned.
2267 *
2268 * %-EROFS - Read only filesystem.
2269 *
2270 * %-EIO - I/O error
2271 *
2272 * %-ENOSPC - No space left on device (only in a panic state).
2273 *
2274 * %-ERESTARTSYS - Interrupted.
2275 *
2276 * %-ENOMEM - Insufficient memory available.
2277 */
f30bf3e4
RK
2278int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2279 loff_t start, loff_t end)
9ff05123 2280{
e3154e97 2281 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2282 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2283 struct nilfs_inode_info *ii;
2284 struct nilfs_transaction_info ti;
2285 int err = 0;
2286
2287 if (!sci)
2288 return -EROFS;
2289
f7545144 2290 nilfs_transaction_lock(sb, &ti, 0);
9ff05123
RK
2291
2292 ii = NILFS_I(inode);
b9f66140 2293 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
3b2ce58b 2294 nilfs_test_opt(nilfs, STRICT_ORDER) ||
9ff05123 2295 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
3b2ce58b 2296 nilfs_discontinued(nilfs)) {
f7545144 2297 nilfs_transaction_unlock(sb);
9ff05123
RK
2298 err = nilfs_segctor_sync(sci);
2299 return err;
2300 }
2301
693dd321 2302 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
2303 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2304 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
693dd321 2305 spin_unlock(&nilfs->ns_inode_lock);
f7545144 2306 nilfs_transaction_unlock(sb);
9ff05123
RK
2307 return 0;
2308 }
693dd321 2309 spin_unlock(&nilfs->ns_inode_lock);
f30bf3e4
RK
2310 sci->sc_dsync_inode = ii;
2311 sci->sc_dsync_start = start;
2312 sci->sc_dsync_end = end;
9ff05123
RK
2313
2314 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
e2c7617a
AR
2315 if (!err)
2316 nilfs->ns_flushed_device = 0;
9ff05123 2317
f7545144 2318 nilfs_transaction_unlock(sb);
9ff05123
RK
2319 return err;
2320}
2321
9ff05123 2322#define FLUSH_FILE_BIT (0x1) /* data file only */
4ce5c342 2323#define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
9ff05123 2324
dcd76186
RK
2325/**
2326 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2327 * @sci: segment constructor object
2328 */
2329static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
9ff05123 2330{
9ff05123 2331 spin_lock(&sci->sc_state_lock);
dcd76186 2332 sci->sc_seq_accepted = sci->sc_seq_request;
9ff05123 2333 spin_unlock(&sci->sc_state_lock);
fdce895e 2334 del_timer_sync(&sci->sc_timer);
9ff05123
RK
2335}
2336
dcd76186
RK
2337/**
2338 * nilfs_segctor_notify - notify the result of request to caller threads
2339 * @sci: segment constructor object
2340 * @mode: mode of log forming
2341 * @err: error code to be notified
2342 */
2343static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
9ff05123
RK
2344{
2345 /* Clear requests (even when the construction failed) */
2346 spin_lock(&sci->sc_state_lock);
2347
dcd76186 2348 if (mode == SC_LSEG_SR) {
aeda7f63 2349 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
dcd76186
RK
2350 sci->sc_seq_done = sci->sc_seq_accepted;
2351 nilfs_segctor_wakeup(sci, err);
9ff05123 2352 sci->sc_flush_request = 0;
aeda7f63 2353 } else {
dcd76186 2354 if (mode == SC_FLUSH_FILE)
aeda7f63 2355 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
dcd76186 2356 else if (mode == SC_FLUSH_DAT)
aeda7f63
RK
2357 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2358
2359 /* re-enable timer if checkpoint creation was not done */
fdce895e
LH
2360 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2361 time_before(jiffies, sci->sc_timer.expires))
2362 add_timer(&sci->sc_timer);
aeda7f63 2363 }
9ff05123
RK
2364 spin_unlock(&sci->sc_state_lock);
2365}
2366
dcd76186
RK
2367/**
2368 * nilfs_segctor_construct - form logs and write them to disk
2369 * @sci: segment constructor object
2370 * @mode: mode of log forming
2371 */
2372static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
9ff05123 2373{
e3154e97 2374 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
d26493b6 2375 struct nilfs_super_block **sbp;
9ff05123
RK
2376 int err = 0;
2377
dcd76186
RK
2378 nilfs_segctor_accept(sci);
2379
9ff05123 2380 if (nilfs_discontinued(nilfs))
dcd76186
RK
2381 mode = SC_LSEG_SR;
2382 if (!nilfs_segctor_confirm(sci))
2383 err = nilfs_segctor_do_construct(sci, mode);
2384
9ff05123 2385 if (likely(!err)) {
dcd76186 2386 if (mode != SC_FLUSH_DAT)
9ff05123
RK
2387 atomic_set(&nilfs->ns_ndirtyblks, 0);
2388 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2389 nilfs_discontinued(nilfs)) {
2390 down_write(&nilfs->ns_sem);
d26493b6 2391 err = -EIO;
f7545144 2392 sbp = nilfs_prepare_super(sci->sc_super,
b2ac86e1
JS
2393 nilfs_sb_will_flip(nilfs));
2394 if (likely(sbp)) {
2395 nilfs_set_log_cursor(sbp[0], nilfs);
f7545144
RK
2396 err = nilfs_commit_super(sci->sc_super,
2397 NILFS_SB_COMMIT);
b2ac86e1 2398 }
9ff05123
RK
2399 up_write(&nilfs->ns_sem);
2400 }
2401 }
dcd76186
RK
2402
2403 nilfs_segctor_notify(sci, mode, err);
9ff05123
RK
2404 return err;
2405}
2406
2407static void nilfs_construction_timeout(unsigned long data)
2408{
2409 struct task_struct *p = (struct task_struct *)data;
4ad364ca 2410
9ff05123
RK
2411 wake_up_process(p);
2412}
2413
2414static void
2415nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2416{
2417 struct nilfs_inode_info *ii, *n;
2418
2419 list_for_each_entry_safe(ii, n, head, i_dirty) {
2420 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2421 continue;
9ff05123 2422 list_del_init(&ii->i_dirty);
fbb24a3a
RK
2423 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2424 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
263d90ce 2425 iput(&ii->vfs_inode);
9ff05123
RK
2426 }
2427}
2428
4f6b8288
RK
2429int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2430 void **kbufs)
9ff05123 2431{
e3154e97 2432 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2433 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 2434 struct nilfs_transaction_info ti;
9ff05123
RK
2435 int err;
2436
2437 if (unlikely(!sci))
2438 return -EROFS;
2439
f7545144 2440 nilfs_transaction_lock(sb, &ti, 1);
9ff05123 2441
c1c1d709 2442 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
9ff05123
RK
2443 if (unlikely(err))
2444 goto out_unlock;
071cb4b8 2445
4f6b8288 2446 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
c1c1d709
RK
2447 if (unlikely(err)) {
2448 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
9ff05123 2449 goto out_unlock;
c1c1d709 2450 }
9ff05123 2451
071cb4b8
RK
2452 sci->sc_freesegs = kbufs[4];
2453 sci->sc_nfreesegs = argv[4].v_nmembs;
0935db74 2454 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
9ff05123
RK
2455
2456 for (;;) {
dcd76186 2457 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
9ff05123 2458 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
9ff05123
RK
2459
2460 if (likely(!err))
2461 break;
2462
d6517deb 2463 nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err);
9ff05123
RK
2464 set_current_state(TASK_INTERRUPTIBLE);
2465 schedule_timeout(sci->sc_interval);
2466 }
3b2ce58b 2467 if (nilfs_test_opt(nilfs, DISCARD)) {
e902ec99
JS
2468 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2469 sci->sc_nfreesegs);
2470 if (ret) {
feee880f
RK
2471 nilfs_msg(sb, KERN_WARNING,
2472 "error %d on discard request, turning discards off for the device",
2473 ret);
3b2ce58b 2474 nilfs_clear_opt(nilfs, DISCARD);
e902ec99
JS
2475 }
2476 }
9ff05123
RK
2477
2478 out_unlock:
071cb4b8
RK
2479 sci->sc_freesegs = NULL;
2480 sci->sc_nfreesegs = 0;
c1c1d709 2481 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
f7545144 2482 nilfs_transaction_unlock(sb);
9ff05123
RK
2483 return err;
2484}
2485
2486static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2487{
9ff05123 2488 struct nilfs_transaction_info ti;
9ff05123 2489
f7545144 2490 nilfs_transaction_lock(sci->sc_super, &ti, 0);
dcd76186 2491 nilfs_segctor_construct(sci, mode);
9ff05123
RK
2492
2493 /*
2494 * Unclosed segment should be retried. We do this using sc_timer.
2495 * Timeout of sc_timer will invoke complete construction which leads
2496 * to close the current logical segment.
2497 */
2498 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2499 nilfs_segctor_start_timer(sci);
2500
f7545144 2501 nilfs_transaction_unlock(sci->sc_super);
9ff05123
RK
2502}
2503
2504static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2505{
2506 int mode = 0;
9ff05123
RK
2507
2508 spin_lock(&sci->sc_state_lock);
2509 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2510 SC_FLUSH_DAT : SC_FLUSH_FILE;
2511 spin_unlock(&sci->sc_state_lock);
2512
2513 if (mode) {
09ef29e0 2514 nilfs_segctor_do_construct(sci, mode);
9ff05123
RK
2515
2516 spin_lock(&sci->sc_state_lock);
2517 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2518 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2519 spin_unlock(&sci->sc_state_lock);
2520 }
2521 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2522}
2523
2524static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2525{
2526 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2527 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2528 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2529 return SC_FLUSH_FILE;
2530 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2531 return SC_FLUSH_DAT;
2532 }
2533 return SC_LSEG_SR;
2534}
2535
2536/**
2537 * nilfs_segctor_thread - main loop of the segment constructor thread.
2538 * @arg: pointer to a struct nilfs_sc_info.
2539 *
2540 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2541 * to execute segment constructions.
2542 */
2543static int nilfs_segctor_thread(void *arg)
2544{
2545 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
e3154e97 2546 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
2547 int timeout = 0;
2548
fdce895e
LH
2549 sci->sc_timer.data = (unsigned long)current;
2550 sci->sc_timer.function = nilfs_construction_timeout;
9ff05123
RK
2551
2552 /* start sync. */
2553 sci->sc_task = current;
2554 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
feee880f
RK
2555 nilfs_msg(sci->sc_super, KERN_INFO,
2556 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2557 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
9ff05123
RK
2558
2559 spin_lock(&sci->sc_state_lock);
2560 loop:
2561 for (;;) {
2562 int mode;
2563
2564 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2565 goto end_thread;
2566
2567 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2568 mode = SC_LSEG_SR;
7f00184e 2569 else if (sci->sc_flush_request)
9ff05123 2570 mode = nilfs_segctor_flush_mode(sci);
7f00184e
RK
2571 else
2572 break;
9ff05123
RK
2573
2574 spin_unlock(&sci->sc_state_lock);
2575 nilfs_segctor_thread_construct(sci, mode);
2576 spin_lock(&sci->sc_state_lock);
2577 timeout = 0;
2578 }
2579
2580
2581 if (freezing(current)) {
2582 spin_unlock(&sci->sc_state_lock);
a0acae0e 2583 try_to_freeze();
9ff05123
RK
2584 spin_lock(&sci->sc_state_lock);
2585 } else {
2586 DEFINE_WAIT(wait);
2587 int should_sleep = 1;
2588
2589 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2590 TASK_INTERRUPTIBLE);
2591
2592 if (sci->sc_seq_request != sci->sc_seq_done)
2593 should_sleep = 0;
2594 else if (sci->sc_flush_request)
2595 should_sleep = 0;
2596 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2597 should_sleep = time_before(jiffies,
fdce895e 2598 sci->sc_timer.expires);
9ff05123
RK
2599
2600 if (should_sleep) {
2601 spin_unlock(&sci->sc_state_lock);
2602 schedule();
2603 spin_lock(&sci->sc_state_lock);
2604 }
2605 finish_wait(&sci->sc_wait_daemon, &wait);
2606 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
fdce895e 2607 time_after_eq(jiffies, sci->sc_timer.expires));
e605f0a7
RK
2608
2609 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
1dfa2710 2610 set_nilfs_discontinued(nilfs);
9ff05123
RK
2611 }
2612 goto loop;
2613
2614 end_thread:
2615 spin_unlock(&sci->sc_state_lock);
9ff05123
RK
2616
2617 /* end sync. */
2618 sci->sc_task = NULL;
2619 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2620 return 0;
2621}
2622
2623static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2624{
2625 struct task_struct *t;
2626
2627 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2628 if (IS_ERR(t)) {
2629 int err = PTR_ERR(t);
2630
feee880f
RK
2631 nilfs_msg(sci->sc_super, KERN_ERR,
2632 "error %d creating segctord thread", err);
9ff05123
RK
2633 return err;
2634 }
2635 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2636 return 0;
2637}
2638
2639static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
6b81e14e
JS
2640 __acquires(&sci->sc_state_lock)
2641 __releases(&sci->sc_state_lock)
9ff05123
RK
2642{
2643 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2644
2645 while (sci->sc_task) {
2646 wake_up(&sci->sc_wait_daemon);
2647 spin_unlock(&sci->sc_state_lock);
2648 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2649 spin_lock(&sci->sc_state_lock);
2650 }
2651}
2652
9ff05123
RK
2653/*
2654 * Setup & clean-up functions
2655 */
f7545144 2656static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
e912a5b6 2657 struct nilfs_root *root)
9ff05123 2658{
e3154e97 2659 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2660 struct nilfs_sc_info *sci;
2661
2662 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2663 if (!sci)
2664 return NULL;
2665
f7545144 2666 sci->sc_super = sb;
9ff05123 2667
e912a5b6
RK
2668 nilfs_get_root(root);
2669 sci->sc_root = root;
2670
9ff05123
RK
2671 init_waitqueue_head(&sci->sc_wait_request);
2672 init_waitqueue_head(&sci->sc_wait_daemon);
2673 init_waitqueue_head(&sci->sc_wait_task);
2674 spin_lock_init(&sci->sc_state_lock);
2675 INIT_LIST_HEAD(&sci->sc_dirty_files);
2676 INIT_LIST_HEAD(&sci->sc_segbufs);
a694291a 2677 INIT_LIST_HEAD(&sci->sc_write_logs);
9ff05123 2678 INIT_LIST_HEAD(&sci->sc_gc_inodes);
7ef3ff2f
RK
2679 INIT_LIST_HEAD(&sci->sc_iput_queue);
2680 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
fdce895e 2681 init_timer(&sci->sc_timer);
9ff05123
RK
2682
2683 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2684 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2685 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2686
574e6c31 2687 if (nilfs->ns_interval)
071d73cf 2688 sci->sc_interval = HZ * nilfs->ns_interval;
574e6c31
RK
2689 if (nilfs->ns_watermark)
2690 sci->sc_watermark = nilfs->ns_watermark;
9ff05123
RK
2691 return sci;
2692}
2693
2694static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2695{
2696 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2697
076a378b
RK
2698 /*
2699 * The segctord thread was stopped and its timer was removed.
2700 * But some tasks remain.
2701 */
9ff05123 2702 do {
9ff05123 2703 struct nilfs_transaction_info ti;
9ff05123 2704
f7545144 2705 nilfs_transaction_lock(sci->sc_super, &ti, 0);
dcd76186 2706 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
f7545144 2707 nilfs_transaction_unlock(sci->sc_super);
9ff05123 2708
7ef3ff2f
RK
2709 flush_work(&sci->sc_iput_work);
2710
9ff05123
RK
2711 } while (ret && retrycount-- > 0);
2712}
2713
2714/**
2715 * nilfs_segctor_destroy - destroy the segment constructor.
2716 * @sci: nilfs_sc_info
2717 *
2718 * nilfs_segctor_destroy() kills the segctord thread and frees
2719 * the nilfs_sc_info struct.
2720 * Caller must hold the segment semaphore.
2721 */
2722static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2723{
e3154e97 2724 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
2725 int flag;
2726
693dd321 2727 up_write(&nilfs->ns_segctor_sem);
9ff05123
RK
2728
2729 spin_lock(&sci->sc_state_lock);
2730 nilfs_segctor_kill_thread(sci);
2731 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2732 || sci->sc_seq_request != sci->sc_seq_done);
2733 spin_unlock(&sci->sc_state_lock);
2734
7ef3ff2f
RK
2735 if (flush_work(&sci->sc_iput_work))
2736 flag = true;
2737
3256a055 2738 if (flag || !nilfs_segctor_confirm(sci))
9ff05123
RK
2739 nilfs_segctor_write_out(sci);
2740
9ff05123 2741 if (!list_empty(&sci->sc_dirty_files)) {
d6517deb
RK
2742 nilfs_msg(sci->sc_super, KERN_WARNING,
2743 "disposed unprocessed dirty file(s) when stopping log writer");
693dd321 2744 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
9ff05123 2745 }
9ff05123 2746
7ef3ff2f 2747 if (!list_empty(&sci->sc_iput_queue)) {
d6517deb
RK
2748 nilfs_msg(sci->sc_super, KERN_WARNING,
2749 "disposed unprocessed inode(s) in iput queue when stopping log writer");
7ef3ff2f
RK
2750 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2751 }
2752
1f5abe7e 2753 WARN_ON(!list_empty(&sci->sc_segbufs));
a694291a 2754 WARN_ON(!list_empty(&sci->sc_write_logs));
9ff05123 2755
e912a5b6
RK
2756 nilfs_put_root(sci->sc_root);
2757
693dd321 2758 down_write(&nilfs->ns_segctor_sem);
9ff05123 2759
fdce895e 2760 del_timer_sync(&sci->sc_timer);
9ff05123
RK
2761 kfree(sci);
2762}
2763
2764/**
f7545144
RK
2765 * nilfs_attach_log_writer - attach log writer
2766 * @sb: super block instance
e912a5b6 2767 * @root: root object of the current filesystem tree
9ff05123 2768 *
f7545144
RK
2769 * This allocates a log writer object, initializes it, and starts the
2770 * log writer.
9ff05123
RK
2771 *
2772 * Return Value: On success, 0 is returned. On error, one of the following
2773 * negative error code is returned.
2774 *
2775 * %-ENOMEM - Insufficient memory available.
2776 */
f7545144 2777int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
9ff05123 2778{
e3154e97 2779 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2780 int err;
2781
3fd3fe5a 2782 if (nilfs->ns_writer) {
fe5f171b
RK
2783 /*
2784 * This happens if the filesystem was remounted
2785 * read/write after nilfs_error degenerated it into a
2786 * read-only mount.
2787 */
f7545144 2788 nilfs_detach_log_writer(sb);
fe5f171b
RK
2789 }
2790
f7545144 2791 nilfs->ns_writer = nilfs_segctor_new(sb, root);
3fd3fe5a 2792 if (!nilfs->ns_writer)
9ff05123
RK
2793 return -ENOMEM;
2794
3fd3fe5a 2795 err = nilfs_segctor_start_thread(nilfs->ns_writer);
9ff05123 2796 if (err) {
3fd3fe5a
RK
2797 kfree(nilfs->ns_writer);
2798 nilfs->ns_writer = NULL;
9ff05123
RK
2799 }
2800 return err;
2801}
2802
2803/**
f7545144
RK
2804 * nilfs_detach_log_writer - destroy log writer
2805 * @sb: super block instance
9ff05123 2806 *
f7545144
RK
2807 * This kills log writer daemon, frees the log writer object, and
2808 * destroys list of dirty files.
9ff05123 2809 */
f7545144 2810void nilfs_detach_log_writer(struct super_block *sb)
9ff05123 2811{
e3154e97 2812 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2813 LIST_HEAD(garbage_list);
2814
2815 down_write(&nilfs->ns_segctor_sem);
3fd3fe5a
RK
2816 if (nilfs->ns_writer) {
2817 nilfs_segctor_destroy(nilfs->ns_writer);
2818 nilfs->ns_writer = NULL;
9ff05123
RK
2819 }
2820
2821 /* Force to free the list of dirty files */
693dd321
RK
2822 spin_lock(&nilfs->ns_inode_lock);
2823 if (!list_empty(&nilfs->ns_dirty_files)) {
2824 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
d6517deb
RK
2825 nilfs_msg(sb, KERN_WARNING,
2826 "disposed unprocessed dirty file(s) when detaching log writer");
9ff05123 2827 }
693dd321 2828 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
2829 up_write(&nilfs->ns_segctor_sem);
2830
693dd321 2831 nilfs_dispose_list(nilfs, &garbage_list, 1);
9ff05123 2832}