]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/f2fs/f2fs.h
f2fs: add static to get_max_meta_blks
[mirror_ubuntu-jammy-kernel.git] / fs / f2fs / f2fs.h
CommitLineData
0a8165d7 1/*
39a53e0c
JK
2 * fs/f2fs/f2fs.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef _LINUX_F2FS_H
12#define _LINUX_F2FS_H
13
14#include <linux/types.h>
15#include <linux/page-flags.h>
16#include <linux/buffer_head.h>
39a53e0c
JK
17#include <linux/slab.h>
18#include <linux/crc32.h>
19#include <linux/magic.h>
c2d715d1 20#include <linux/kobject.h>
7bd59381 21#include <linux/sched.h>
39a53e0c 22
5d56b671
JK
23#ifdef CONFIG_F2FS_CHECK_FS
24#define f2fs_bug_on(condition) BUG_ON(condition)
0daaad97 25#define f2fs_down_write(x, y) down_write_nest_lock(x, y)
5d56b671
JK
26#else
27#define f2fs_bug_on(condition)
0daaad97 28#define f2fs_down_write(x, y) down_write(x)
5d56b671
JK
29#endif
30
39a53e0c
JK
31/*
32 * For mount options
33 */
34#define F2FS_MOUNT_BG_GC 0x00000001
35#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
36#define F2FS_MOUNT_DISCARD 0x00000004
37#define F2FS_MOUNT_NOHEAP 0x00000008
38#define F2FS_MOUNT_XATTR_USER 0x00000010
39#define F2FS_MOUNT_POSIX_ACL 0x00000020
40#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
444c580f 41#define F2FS_MOUNT_INLINE_XATTR 0x00000080
1001b347 42#define F2FS_MOUNT_INLINE_DATA 0x00000100
6b4afdd7 43#define F2FS_MOUNT_FLUSH_MERGE 0x00000200
39a53e0c
JK
44
45#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
46#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
47#define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
48
49#define ver_after(a, b) (typecheck(unsigned long long, a) && \
50 typecheck(unsigned long long, b) && \
51 ((long long)((a) - (b)) > 0))
52
a9841c4d
JK
53typedef u32 block_t; /*
54 * should not change u32, since it is the on-disk block
55 * address format, __le32.
56 */
39a53e0c
JK
57typedef u32 nid_t;
58
59struct f2fs_mount_info {
60 unsigned int opt;
61};
62
7e586fa0
JK
63#define CRCPOLY_LE 0xedb88320
64
65static inline __u32 f2fs_crc32(void *buf, size_t len)
39a53e0c 66{
7e586fa0
JK
67 unsigned char *p = (unsigned char *)buf;
68 __u32 crc = F2FS_SUPER_MAGIC;
69 int i;
70
71 while (len--) {
72 crc ^= *p++;
73 for (i = 0; i < 8; i++)
74 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
75 }
76 return crc;
39a53e0c
JK
77}
78
7e586fa0 79static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
39a53e0c 80{
7e586fa0 81 return f2fs_crc32(buf, buf_size) == blk_crc;
39a53e0c
JK
82}
83
84/*
85 * For checkpoint manager
86 */
87enum {
88 NAT_BITMAP,
89 SIT_BITMAP
90};
91
662befda 92/*
81c1a0f1 93 * For CP/NAT/SIT/SSA readahead
662befda
CY
94 */
95enum {
96 META_CP,
97 META_NAT,
81c1a0f1
CY
98 META_SIT,
99 META_SSA
662befda
CY
100};
101
39a53e0c
JK
102/* for the list of orphan inodes */
103struct orphan_inode_entry {
104 struct list_head list; /* list head */
105 nid_t ino; /* inode number */
106};
107
108/* for the list of directory inodes */
109struct dir_inode_entry {
110 struct list_head list; /* list head */
111 struct inode *inode; /* vfs inode pointer */
112};
113
7fd9e544
JK
114/* for the list of blockaddresses to be discarded */
115struct discard_entry {
116 struct list_head list; /* list head */
117 block_t blkaddr; /* block address to be discarded */
118 int len; /* # of consecutive blocks of the discard */
119};
120
39a53e0c
JK
121/* for the list of fsync inodes, used only during recovery */
122struct fsync_inode_entry {
123 struct list_head list; /* list head */
124 struct inode *inode; /* vfs inode pointer */
125 block_t blkaddr; /* block address locating the last inode */
126};
127
128#define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
129#define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
130
131#define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
132#define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
133#define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
134#define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
135
136static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
137{
138 int before = nats_in_cursum(rs);
139 rs->n_nats = cpu_to_le16(before + i);
140 return before;
141}
142
143static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
144{
145 int before = sits_in_cursum(rs);
146 rs->n_sits = cpu_to_le16(before + i);
147 return before;
148}
149
e9750824
NJ
150/*
151 * ioctl commands
152 */
153#define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
154#define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
155
156#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
157/*
158 * ioctl commands in 32 bit emulation
159 */
160#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
161#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
162#endif
163
39a53e0c
JK
164/*
165 * For INODE and NODE manager
166 */
dbe6a5ff
JK
167/*
168 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
169 * as its node offset to distinguish from index node blocks.
170 * But some bits are used to mark the node block.
171 */
172#define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
173 >> OFFSET_BIT_SHIFT)
266e97a8
JK
174enum {
175 ALLOC_NODE, /* allocate a new node page if needed */
176 LOOKUP_NODE, /* look up a node without readahead */
177 LOOKUP_NODE_RA, /*
178 * look up a node with readahead called
4f4124d0 179 * by get_data_block.
39a53e0c 180 */
266e97a8
JK
181};
182
39a53e0c
JK
183#define F2FS_LINK_MAX 32000 /* maximum link count per file */
184
185/* for in-memory extent cache entry */
c11abd1a
JK
186#define F2FS_MIN_EXTENT_LEN 16 /* minimum extent length */
187
39a53e0c
JK
188struct extent_info {
189 rwlock_t ext_lock; /* rwlock for consistency */
190 unsigned int fofs; /* start offset in a file */
191 u32 blk_addr; /* start block address of the extent */
111d2495 192 unsigned int len; /* length of the extent */
39a53e0c
JK
193};
194
195/*
196 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
197 */
198#define FADVISE_COLD_BIT 0x01
354a3399 199#define FADVISE_LOST_PINO_BIT 0x02
39a53e0c 200
ab9fa662
JK
201#define DEF_DIR_LEVEL 0
202
39a53e0c
JK
203struct f2fs_inode_info {
204 struct inode vfs_inode; /* serve a vfs inode */
205 unsigned long i_flags; /* keep an inode flags for ioctl */
206 unsigned char i_advise; /* use to give file attribute hints */
38431545 207 unsigned char i_dir_level; /* use for dentry level for large dir */
39a53e0c 208 unsigned int i_current_depth; /* use only in directory structure */
6666e6aa 209 unsigned int i_pino; /* parent inode number */
39a53e0c
JK
210 umode_t i_acl_mode; /* keep file acl mode temporarily */
211
212 /* Use below internally in f2fs*/
213 unsigned long flags; /* use to pass per-file flags */
d928bfbf 214 struct rw_semaphore i_sem; /* protect fi info */
39a53e0c
JK
215 atomic_t dirty_dents; /* # of dirty dentry pages */
216 f2fs_hash_t chash; /* hash value of given file name */
217 unsigned int clevel; /* maximum level of given file name */
218 nid_t i_xattr_nid; /* node id that contains xattrs */
e518ff81 219 unsigned long long xattr_ver; /* cp version of xattr modification */
39a53e0c 220 struct extent_info ext; /* in-memory extent cache entry */
ed57c27f 221 struct dir_inode_entry *dirty_dir; /* the pointer of dirty dir */
39a53e0c
JK
222};
223
224static inline void get_extent_info(struct extent_info *ext,
225 struct f2fs_extent i_ext)
226{
227 write_lock(&ext->ext_lock);
228 ext->fofs = le32_to_cpu(i_ext.fofs);
229 ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
230 ext->len = le32_to_cpu(i_ext.len);
231 write_unlock(&ext->ext_lock);
232}
233
234static inline void set_raw_extent(struct extent_info *ext,
235 struct f2fs_extent *i_ext)
236{
237 read_lock(&ext->ext_lock);
238 i_ext->fofs = cpu_to_le32(ext->fofs);
239 i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
240 i_ext->len = cpu_to_le32(ext->len);
241 read_unlock(&ext->ext_lock);
242}
243
244struct f2fs_nm_info {
245 block_t nat_blkaddr; /* base disk address of NAT */
246 nid_t max_nid; /* maximum possible node ids */
39a53e0c 247 nid_t next_scan_nid; /* the next nid to be scanned */
cdfc41c1 248 unsigned int ram_thresh; /* control the memory footprint */
39a53e0c
JK
249
250 /* NAT cache management */
251 struct radix_tree_root nat_root;/* root of the nat entry cache */
252 rwlock_t nat_tree_lock; /* protect nat_tree_lock */
253 unsigned int nat_cnt; /* the # of cached nat entries */
254 struct list_head nat_entries; /* cached nat entry list (clean) */
255 struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */
256
257 /* free node ids management */
8a7ed66a 258 struct radix_tree_root free_nid_root;/* root of the free_nid cache */
39a53e0c
JK
259 struct list_head free_nid_list; /* a list for free nids */
260 spinlock_t free_nid_list_lock; /* protect free nid list */
261 unsigned int fcnt; /* the number of free node id */
262 struct mutex build_lock; /* lock for build free nids */
263
264 /* for checkpoint */
265 char *nat_bitmap; /* NAT bitmap pointer */
266 int bitmap_size; /* bitmap size */
267};
268
269/*
270 * this structure is used as one of function parameters.
271 * all the information are dedicated to a given direct node block determined
272 * by the data offset in a file.
273 */
274struct dnode_of_data {
275 struct inode *inode; /* vfs inode pointer */
276 struct page *inode_page; /* its inode page, NULL is possible */
277 struct page *node_page; /* cached direct node page */
278 nid_t nid; /* node id of the direct node block */
279 unsigned int ofs_in_node; /* data offset in the node page */
280 bool inode_page_locked; /* inode page is locked or not */
281 block_t data_blkaddr; /* block address of the node block */
282};
283
284static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
285 struct page *ipage, struct page *npage, nid_t nid)
286{
d66d1f76 287 memset(dn, 0, sizeof(*dn));
39a53e0c
JK
288 dn->inode = inode;
289 dn->inode_page = ipage;
290 dn->node_page = npage;
291 dn->nid = nid;
39a53e0c
JK
292}
293
294/*
295 * For SIT manager
296 *
297 * By default, there are 6 active log areas across the whole main area.
298 * When considering hot and cold data separation to reduce cleaning overhead,
299 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
300 * respectively.
301 * In the current design, you should not change the numbers intentionally.
302 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
303 * logs individually according to the underlying devices. (default: 6)
304 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
305 * data and 8 for node logs.
306 */
307#define NR_CURSEG_DATA_TYPE (3)
308#define NR_CURSEG_NODE_TYPE (3)
309#define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
310
311enum {
312 CURSEG_HOT_DATA = 0, /* directory entry blocks */
313 CURSEG_WARM_DATA, /* data blocks */
314 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
315 CURSEG_HOT_NODE, /* direct node blocks of directory files */
316 CURSEG_WARM_NODE, /* direct node blocks of normal files */
317 CURSEG_COLD_NODE, /* indirect node blocks */
318 NO_CHECK_TYPE
319};
320
6b4afdd7
JK
321struct flush_cmd {
322 struct flush_cmd *next;
323 struct completion wait;
324 int ret;
325};
326
39a53e0c
JK
327struct f2fs_sm_info {
328 struct sit_info *sit_info; /* whole segment information */
329 struct free_segmap_info *free_info; /* free segment information */
330 struct dirty_seglist_info *dirty_info; /* dirty segment information */
331 struct curseg_info *curseg_array; /* active segment information */
332
333 struct list_head wblist_head; /* list of under-writeback pages */
334 spinlock_t wblist_lock; /* lock for checkpoint */
335
336 block_t seg0_blkaddr; /* block address of 0'th segment */
337 block_t main_blkaddr; /* start block address of main area */
338 block_t ssa_blkaddr; /* start block address of SSA area */
339
340 unsigned int segment_count; /* total # of segments */
341 unsigned int main_segments; /* # of segments in main area */
342 unsigned int reserved_segments; /* # of reserved segments */
343 unsigned int ovp_segments; /* # of overprovision segments */
81eb8d6e
JK
344
345 /* a threshold to reclaim prefree segments */
346 unsigned int rec_prefree_segments;
7fd9e544
JK
347
348 /* for small discard management */
349 struct list_head discard_list; /* 4KB discard list */
350 int nr_discards; /* # of discards in the list */
351 int max_discards; /* max. discards to be issued */
216fbd64
JK
352
353 unsigned int ipu_policy; /* in-place-update policy */
354 unsigned int min_ipu_util; /* in-place-update threshold */
6b4afdd7
JK
355
356 /* for flush command control */
357 struct task_struct *f2fs_issue_flush; /* flush thread */
358 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
359 struct flush_cmd *issue_list; /* list for command issue */
360 struct flush_cmd *dispatch_list; /* list for command dispatch */
361 spinlock_t issue_lock; /* for issue list lock */
362 struct flush_cmd *issue_tail; /* list tail of issue list */
39a53e0c
JK
363};
364
39a53e0c
JK
365/*
366 * For superblock
367 */
368/*
369 * COUNT_TYPE for monitoring
370 *
371 * f2fs monitors the number of several block types such as on-writeback,
372 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
373 */
374enum count_type {
375 F2FS_WRITEBACK,
376 F2FS_DIRTY_DENTS,
377 F2FS_DIRTY_NODES,
378 F2FS_DIRTY_META,
379 NR_COUNT_TYPE,
380};
381
39a53e0c
JK
382/*
383 * The below are the page types of bios used in submti_bio().
384 * The available types are:
385 * DATA User data pages. It operates as async mode.
386 * NODE Node pages. It operates as async mode.
387 * META FS metadata pages such as SIT, NAT, CP.
388 * NR_PAGE_TYPE The number of page types.
389 * META_FLUSH Make sure the previous pages are written
390 * with waiting the bio's completion
391 * ... Only can be used with META.
392 */
7d5e5109 393#define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
39a53e0c
JK
394enum page_type {
395 DATA,
396 NODE,
397 META,
398 NR_PAGE_TYPE,
399 META_FLUSH,
400};
401
458e6197 402struct f2fs_io_info {
7e8f2308
GZ
403 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
404 int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */
458e6197
JK
405};
406
93dfe2ac 407#define is_read_io(rw) (((rw) & 1) == READ)
1ff7bd3b 408struct f2fs_bio_info {
458e6197 409 struct f2fs_sb_info *sbi; /* f2fs superblock */
1ff7bd3b
JK
410 struct bio *bio; /* bios to merge */
411 sector_t last_block_in_bio; /* last block number */
458e6197 412 struct f2fs_io_info fio; /* store buffered io info. */
df0f8dc0 413 struct rw_semaphore io_rwsem; /* blocking op for bio */
1ff7bd3b
JK
414};
415
39a53e0c
JK
416struct f2fs_sb_info {
417 struct super_block *sb; /* pointer to VFS super block */
5e176d54 418 struct proc_dir_entry *s_proc; /* proc entry */
39a53e0c
JK
419 struct buffer_head *raw_super_buf; /* buffer head of raw sb */
420 struct f2fs_super_block *raw_super; /* raw super block pointer */
421 int s_dirty; /* dirty flag for checkpoint */
422
423 /* for node-related operations */
424 struct f2fs_nm_info *nm_info; /* node manager */
425 struct inode *node_inode; /* cache node blocks */
426
427 /* for segment-related operations */
428 struct f2fs_sm_info *sm_info; /* segment manager */
1ff7bd3b
JK
429
430 /* for bio operations */
924b720b 431 struct f2fs_bio_info read_io; /* for read bios */
1ff7bd3b 432 struct f2fs_bio_info write_io[NR_PAGE_TYPE]; /* for write bios */
1b1f559f 433 struct completion *wait_io; /* for completion bios */
39a53e0c
JK
434
435 /* for checkpoint */
436 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
437 struct inode *meta_inode; /* cache meta blocks */
39936837 438 struct mutex cp_mutex; /* checkpoint procedure lock */
e479556b 439 struct rw_semaphore cp_rwsem; /* blocking FS operations */
39936837 440 struct mutex node_write; /* locking node writes */
39a53e0c 441 struct mutex writepages; /* mutex for writepages() */
aabe5136 442 bool por_doing; /* recovery is doing or not */
fb51b5ef 443 wait_queue_head_t cp_wait;
39a53e0c
JK
444
445 /* for orphan inode management */
446 struct list_head orphan_inode_list; /* orphan inode list */
17b692f6 447 spinlock_t orphan_inode_lock; /* for orphan inode list */
39a53e0c 448 unsigned int n_orphans; /* # of orphan inodes */
0d47c1ad 449 unsigned int max_orphans; /* max orphan inodes */
39a53e0c
JK
450
451 /* for directory inode management */
452 struct list_head dir_inode_list; /* dir inode list */
453 spinlock_t dir_inode_lock; /* for dir inode list lock */
39a53e0c
JK
454
455 /* basic file system units */
456 unsigned int log_sectors_per_block; /* log2 sectors per block */
457 unsigned int log_blocksize; /* log2 block size */
458 unsigned int blocksize; /* block size */
459 unsigned int root_ino_num; /* root inode number*/
460 unsigned int node_ino_num; /* node inode number*/
461 unsigned int meta_ino_num; /* meta inode number*/
462 unsigned int log_blocks_per_seg; /* log2 blocks per segment */
463 unsigned int blocks_per_seg; /* blocks per segment */
464 unsigned int segs_per_sec; /* segments per section */
465 unsigned int secs_per_zone; /* sections per zone */
466 unsigned int total_sections; /* total section count */
467 unsigned int total_node_count; /* total node block count */
468 unsigned int total_valid_node_count; /* valid node block count */
469 unsigned int total_valid_inode_count; /* valid inode count */
470 int active_logs; /* # of active logs */
ab9fa662 471 int dir_level; /* directory level */
39a53e0c
JK
472
473 block_t user_block_count; /* # of user blocks */
474 block_t total_valid_block_count; /* # of valid blocks */
475 block_t alloc_valid_block_count; /* # of allocated blocks */
476 block_t last_valid_block_count; /* for recovery */
477 u32 s_next_generation; /* for NFS support */
478 atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
479
480 struct f2fs_mount_info mount_opt; /* mount options */
481
482 /* for cleaning operations */
483 struct mutex gc_mutex; /* mutex for GC */
484 struct f2fs_gc_kthread *gc_thread; /* GC thread */
5ec4e49f 485 unsigned int cur_victim_sec; /* current victim section num */
39a53e0c 486
b1c57c1c
JK
487 /* maximum # of trials to find a victim segment for SSR and GC */
488 unsigned int max_victim_search;
489
39a53e0c
JK
490 /*
491 * for stat information.
492 * one is for the LFS mode, and the other is for the SSR mode.
493 */
35b09d82 494#ifdef CONFIG_F2FS_STAT_FS
39a53e0c
JK
495 struct f2fs_stat_info *stat_info; /* FS status information */
496 unsigned int segment_count[2]; /* # of allocated segments */
497 unsigned int block_count[2]; /* # of allocated blocks */
39a53e0c 498 int total_hit_ext, read_hit_ext; /* extent cache hit ratio */
0dbdc2ae 499 int inline_inode; /* # of inline_data inodes */
39a53e0c 500 int bg_gc; /* background gc calls */
35b09d82
NJ
501 unsigned int n_dirty_dirs; /* # of dir inodes */
502#endif
503 unsigned int last_victim[2]; /* last victim segment # */
39a53e0c 504 spinlock_t stat_lock; /* lock for stat operations */
b59d0bae
NJ
505
506 /* For sysfs suppport */
507 struct kobject s_kobj;
508 struct completion s_kobj_unregister;
39a53e0c
JK
509};
510
511/*
512 * Inline functions
513 */
514static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
515{
516 return container_of(inode, struct f2fs_inode_info, vfs_inode);
517}
518
519static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
520{
521 return sb->s_fs_info;
522}
523
524static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
525{
526 return (struct f2fs_super_block *)(sbi->raw_super);
527}
528
529static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
530{
531 return (struct f2fs_checkpoint *)(sbi->ckpt);
532}
533
45590710
GZ
534static inline struct f2fs_node *F2FS_NODE(struct page *page)
535{
536 return (struct f2fs_node *)page_address(page);
537}
538
58bfaf44
JK
539static inline struct f2fs_inode *F2FS_INODE(struct page *page)
540{
541 return &((struct f2fs_node *)page_address(page))->i;
542}
543
39a53e0c
JK
544static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
545{
546 return (struct f2fs_nm_info *)(sbi->nm_info);
547}
548
549static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
550{
551 return (struct f2fs_sm_info *)(sbi->sm_info);
552}
553
554static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
555{
556 return (struct sit_info *)(SM_I(sbi)->sit_info);
557}
558
559static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
560{
561 return (struct free_segmap_info *)(SM_I(sbi)->free_info);
562}
563
564static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
565{
566 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
567}
568
9df27d98
GZ
569static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
570{
571 return sbi->meta_inode->i_mapping;
572}
573
4ef51a8f
JK
574static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
575{
576 return sbi->node_inode->i_mapping;
577}
578
39a53e0c
JK
579static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
580{
581 sbi->s_dirty = 1;
582}
583
584static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
585{
586 sbi->s_dirty = 0;
587}
588
d71b5564
JK
589static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
590{
591 return le64_to_cpu(cp->checkpoint_ver);
592}
593
25ca923b
JK
594static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
595{
596 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
597 return ckpt_flags & f;
598}
599
600static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
601{
602 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
603 ckpt_flags |= f;
604 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
605}
606
607static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
608{
609 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
610 ckpt_flags &= (~f);
611 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
612}
613
e479556b 614static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
39936837 615{
e479556b 616 down_read(&sbi->cp_rwsem);
39936837
JK
617}
618
e479556b 619static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
39a53e0c 620{
e479556b 621 up_read(&sbi->cp_rwsem);
39a53e0c
JK
622}
623
e479556b 624static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
39a53e0c 625{
0daaad97 626 f2fs_down_write(&sbi->cp_rwsem, &sbi->cp_mutex);
39936837
JK
627}
628
e479556b 629static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
39936837 630{
e479556b 631 up_write(&sbi->cp_rwsem);
39a53e0c
JK
632}
633
634/*
635 * Check whether the given nid is within node id range.
636 */
064e0823 637static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
39a53e0c 638{
064e0823 639 WARN_ON((nid >= NM_I(sbi)->max_nid));
cfb271d4 640 if (unlikely(nid >= NM_I(sbi)->max_nid))
064e0823
NJ
641 return -EINVAL;
642 return 0;
39a53e0c
JK
643}
644
645#define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
646
647/*
648 * Check whether the inode has blocks or not
649 */
650static inline int F2FS_HAS_BLOCKS(struct inode *inode)
651{
652 if (F2FS_I(inode)->i_xattr_nid)
6c311ec6 653 return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1;
39a53e0c 654 else
6c311ec6 655 return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS;
39a53e0c
JK
656}
657
4bc8e9bc
CY
658static inline bool f2fs_has_xattr_block(unsigned int ofs)
659{
660 return ofs == XATTR_NODE_OFFSET;
661}
662
39a53e0c
JK
663static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
664 struct inode *inode, blkcnt_t count)
665{
666 block_t valid_block_count;
667
668 spin_lock(&sbi->stat_lock);
669 valid_block_count =
670 sbi->total_valid_block_count + (block_t)count;
cfb271d4 671 if (unlikely(valid_block_count > sbi->user_block_count)) {
39a53e0c
JK
672 spin_unlock(&sbi->stat_lock);
673 return false;
674 }
675 inode->i_blocks += count;
676 sbi->total_valid_block_count = valid_block_count;
677 sbi->alloc_valid_block_count += (block_t)count;
678 spin_unlock(&sbi->stat_lock);
679 return true;
680}
681
da19b0dc 682static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
39a53e0c
JK
683 struct inode *inode,
684 blkcnt_t count)
685{
686 spin_lock(&sbi->stat_lock);
5d56b671
JK
687 f2fs_bug_on(sbi->total_valid_block_count < (block_t) count);
688 f2fs_bug_on(inode->i_blocks < count);
39a53e0c
JK
689 inode->i_blocks -= count;
690 sbi->total_valid_block_count -= (block_t)count;
691 spin_unlock(&sbi->stat_lock);
39a53e0c
JK
692}
693
694static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
695{
696 atomic_inc(&sbi->nr_pages[count_type]);
697 F2FS_SET_SB_DIRT(sbi);
698}
699
700static inline void inode_inc_dirty_dents(struct inode *inode)
701{
1fe54f9d 702 inc_page_count(F2FS_SB(inode->i_sb), F2FS_DIRTY_DENTS);
39a53e0c
JK
703 atomic_inc(&F2FS_I(inode)->dirty_dents);
704}
705
706static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
707{
708 atomic_dec(&sbi->nr_pages[count_type]);
709}
710
711static inline void inode_dec_dirty_dents(struct inode *inode)
712{
1fe54f9d
JK
713 if (!S_ISDIR(inode->i_mode))
714 return;
715
716 dec_page_count(F2FS_SB(inode->i_sb), F2FS_DIRTY_DENTS);
39a53e0c
JK
717 atomic_dec(&F2FS_I(inode)->dirty_dents);
718}
719
720static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
721{
722 return atomic_read(&sbi->nr_pages[count_type]);
723}
724
f8b2c1f9
JK
725static inline int get_dirty_dents(struct inode *inode)
726{
727 return atomic_read(&F2FS_I(inode)->dirty_dents);
728}
729
5ac206cf
NJ
730static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
731{
732 unsigned int pages_per_sec = sbi->segs_per_sec *
733 (1 << sbi->log_blocks_per_seg);
734 return ((get_pages(sbi, block_type) + pages_per_sec - 1)
735 >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
736}
737
39a53e0c
JK
738static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
739{
8b8343fa 740 return sbi->total_valid_block_count;
39a53e0c
JK
741}
742
743static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
744{
745 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
746
747 /* return NAT or SIT bitmap */
748 if (flag == NAT_BITMAP)
749 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
750 else if (flag == SIT_BITMAP)
751 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
752
753 return 0;
754}
755
756static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
757{
758 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
25ca923b
JK
759 int offset = (flag == NAT_BITMAP) ?
760 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
39a53e0c
JK
761 return &ckpt->sit_nat_version_bitmap + offset;
762}
763
764static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
765{
766 block_t start_addr;
767 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
d71b5564 768 unsigned long long ckpt_version = cur_cp_version(ckpt);
39a53e0c 769
25ca923b 770 start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
39a53e0c
JK
771
772 /*
773 * odd numbered checkpoint should at cp segment 0
774 * and even segent must be at cp segment 1
775 */
776 if (!(ckpt_version & 1))
777 start_addr += sbi->blocks_per_seg;
778
779 return start_addr;
780}
781
782static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
783{
784 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
785}
786
787static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
ef86d709 788 struct inode *inode)
39a53e0c
JK
789{
790 block_t valid_block_count;
791 unsigned int valid_node_count;
792
793 spin_lock(&sbi->stat_lock);
794
ef86d709 795 valid_block_count = sbi->total_valid_block_count + 1;
cfb271d4 796 if (unlikely(valid_block_count > sbi->user_block_count)) {
39a53e0c
JK
797 spin_unlock(&sbi->stat_lock);
798 return false;
799 }
800
ef86d709 801 valid_node_count = sbi->total_valid_node_count + 1;
cfb271d4 802 if (unlikely(valid_node_count > sbi->total_node_count)) {
39a53e0c
JK
803 spin_unlock(&sbi->stat_lock);
804 return false;
805 }
806
807 if (inode)
ef86d709
GZ
808 inode->i_blocks++;
809
810 sbi->alloc_valid_block_count++;
811 sbi->total_valid_node_count++;
812 sbi->total_valid_block_count++;
39a53e0c
JK
813 spin_unlock(&sbi->stat_lock);
814
815 return true;
816}
817
818static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
ef86d709 819 struct inode *inode)
39a53e0c
JK
820{
821 spin_lock(&sbi->stat_lock);
822
ef86d709
GZ
823 f2fs_bug_on(!sbi->total_valid_block_count);
824 f2fs_bug_on(!sbi->total_valid_node_count);
825 f2fs_bug_on(!inode->i_blocks);
39a53e0c 826
ef86d709
GZ
827 inode->i_blocks--;
828 sbi->total_valid_node_count--;
829 sbi->total_valid_block_count--;
39a53e0c
JK
830
831 spin_unlock(&sbi->stat_lock);
832}
833
834static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
835{
8b8343fa 836 return sbi->total_valid_node_count;
39a53e0c
JK
837}
838
839static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
840{
841 spin_lock(&sbi->stat_lock);
5d56b671 842 f2fs_bug_on(sbi->total_valid_inode_count == sbi->total_node_count);
39a53e0c
JK
843 sbi->total_valid_inode_count++;
844 spin_unlock(&sbi->stat_lock);
845}
846
0e80220a 847static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
39a53e0c
JK
848{
849 spin_lock(&sbi->stat_lock);
5d56b671 850 f2fs_bug_on(!sbi->total_valid_inode_count);
39a53e0c
JK
851 sbi->total_valid_inode_count--;
852 spin_unlock(&sbi->stat_lock);
39a53e0c
JK
853}
854
855static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
856{
8b8343fa 857 return sbi->total_valid_inode_count;
39a53e0c
JK
858}
859
860static inline void f2fs_put_page(struct page *page, int unlock)
861{
031fa8cc 862 if (!page)
39a53e0c
JK
863 return;
864
865 if (unlock) {
5d56b671 866 f2fs_bug_on(!PageLocked(page));
39a53e0c
JK
867 unlock_page(page);
868 }
869 page_cache_release(page);
870}
871
872static inline void f2fs_put_dnode(struct dnode_of_data *dn)
873{
874 if (dn->node_page)
875 f2fs_put_page(dn->node_page, 1);
876 if (dn->inode_page && dn->node_page != dn->inode_page)
877 f2fs_put_page(dn->inode_page, 0);
878 dn->node_page = NULL;
879 dn->inode_page = NULL;
880}
881
882static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
e8512d2e 883 size_t size)
39a53e0c 884{
e8512d2e 885 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
39a53e0c
JK
886}
887
7bd59381
GZ
888static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
889 gfp_t flags)
890{
891 void *entry;
892retry:
893 entry = kmem_cache_alloc(cachep, flags);
894 if (!entry) {
895 cond_resched();
896 goto retry;
897 }
898
899 return entry;
900}
901
39a53e0c
JK
902#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
903
904static inline bool IS_INODE(struct page *page)
905{
45590710 906 struct f2fs_node *p = F2FS_NODE(page);
39a53e0c
JK
907 return RAW_IS_INODE(p);
908}
909
910static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
911{
912 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
913}
914
915static inline block_t datablock_addr(struct page *node_page,
916 unsigned int offset)
917{
918 struct f2fs_node *raw_node;
919 __le32 *addr_array;
45590710 920 raw_node = F2FS_NODE(node_page);
39a53e0c
JK
921 addr_array = blkaddr_in_node(raw_node);
922 return le32_to_cpu(addr_array[offset]);
923}
924
925static inline int f2fs_test_bit(unsigned int nr, char *addr)
926{
927 int mask;
928
929 addr += (nr >> 3);
930 mask = 1 << (7 - (nr & 0x07));
931 return mask & *addr;
932}
933
934static inline int f2fs_set_bit(unsigned int nr, char *addr)
935{
936 int mask;
937 int ret;
938
939 addr += (nr >> 3);
940 mask = 1 << (7 - (nr & 0x07));
941 ret = mask & *addr;
942 *addr |= mask;
943 return ret;
944}
945
946static inline int f2fs_clear_bit(unsigned int nr, char *addr)
947{
948 int mask;
949 int ret;
950
951 addr += (nr >> 3);
952 mask = 1 << (7 - (nr & 0x07));
953 ret = mask & *addr;
954 *addr &= ~mask;
955 return ret;
956}
957
958/* used for f2fs_inode_info->flags */
959enum {
960 FI_NEW_INODE, /* indicate newly allocated inode */
b3783873 961 FI_DIRTY_INODE, /* indicate inode is dirty or not */
ed57c27f 962 FI_DIRTY_DIR, /* indicate directory has dirty pages */
39a53e0c
JK
963 FI_INC_LINK, /* need to increment i_nlink */
964 FI_ACL_MODE, /* indicate acl mode */
965 FI_NO_ALLOC, /* should not allocate any blocks */
699489bb 966 FI_UPDATE_DIR, /* should update inode block for consistency */
74d0b917 967 FI_DELAY_IPUT, /* used for the recovery */
c11abd1a 968 FI_NO_EXTENT, /* not to use the extent cache */
444c580f 969 FI_INLINE_XATTR, /* used for inline xattr */
1001b347 970 FI_INLINE_DATA, /* used for inline data*/
39a53e0c
JK
971};
972
973static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
974{
975 set_bit(flag, &fi->flags);
976}
977
978static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
979{
980 return test_bit(flag, &fi->flags);
981}
982
983static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
984{
985 clear_bit(flag, &fi->flags);
986}
987
988static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
989{
990 fi->i_acl_mode = mode;
991 set_inode_flag(fi, FI_ACL_MODE);
992}
993
994static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
995{
996 if (is_inode_flag_set(fi, FI_ACL_MODE)) {
997 clear_inode_flag(fi, FI_ACL_MODE);
998 return 1;
999 }
1000 return 0;
1001}
1002
444c580f
JK
1003static inline void get_inline_info(struct f2fs_inode_info *fi,
1004 struct f2fs_inode *ri)
1005{
1006 if (ri->i_inline & F2FS_INLINE_XATTR)
1007 set_inode_flag(fi, FI_INLINE_XATTR);
1001b347
HL
1008 if (ri->i_inline & F2FS_INLINE_DATA)
1009 set_inode_flag(fi, FI_INLINE_DATA);
444c580f
JK
1010}
1011
1012static inline void set_raw_inline(struct f2fs_inode_info *fi,
1013 struct f2fs_inode *ri)
1014{
1015 ri->i_inline = 0;
1016
1017 if (is_inode_flag_set(fi, FI_INLINE_XATTR))
1018 ri->i_inline |= F2FS_INLINE_XATTR;
1001b347
HL
1019 if (is_inode_flag_set(fi, FI_INLINE_DATA))
1020 ri->i_inline |= F2FS_INLINE_DATA;
444c580f
JK
1021}
1022
987c7c31
CY
1023static inline int f2fs_has_inline_xattr(struct inode *inode)
1024{
1025 return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR);
1026}
1027
de93653f
JK
1028static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi)
1029{
987c7c31 1030 if (f2fs_has_inline_xattr(&fi->vfs_inode))
de93653f
JK
1031 return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
1032 return DEF_ADDRS_PER_INODE;
1033}
1034
65985d93
JK
1035static inline void *inline_xattr_addr(struct page *page)
1036{
695fd1ed 1037 struct f2fs_inode *ri = F2FS_INODE(page);
65985d93
JK
1038 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
1039 F2FS_INLINE_XATTR_ADDRS]);
1040}
1041
1042static inline int inline_xattr_size(struct inode *inode)
1043{
987c7c31 1044 if (f2fs_has_inline_xattr(inode))
65985d93
JK
1045 return F2FS_INLINE_XATTR_ADDRS << 2;
1046 else
1047 return 0;
1048}
1049
0dbdc2ae
JK
1050static inline int f2fs_has_inline_data(struct inode *inode)
1051{
1052 return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DATA);
1053}
1054
1001b347
HL
1055static inline void *inline_data_addr(struct page *page)
1056{
695fd1ed 1057 struct f2fs_inode *ri = F2FS_INODE(page);
1001b347
HL
1058 return (void *)&(ri->i_addr[1]);
1059}
1060
77888c1e
JK
1061static inline int f2fs_readonly(struct super_block *sb)
1062{
1063 return sb->s_flags & MS_RDONLY;
1064}
1065
744602cf
JK
1066static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
1067{
1068 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
1069 sbi->sb->s_flags |= MS_RDONLY;
1070}
1071
a6dda0e6
CH
1072#define get_inode_mode(i) \
1073 ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
1074 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
1075
39a53e0c
JK
1076/*
1077 * file.c
1078 */
1079int f2fs_sync_file(struct file *, loff_t, loff_t, int);
1080void truncate_data_blocks(struct dnode_of_data *);
1e1bb4ba 1081int truncate_blocks(struct inode *, u64);
39a53e0c 1082void f2fs_truncate(struct inode *);
2d4d9fb5 1083int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
39a53e0c
JK
1084int f2fs_setattr(struct dentry *, struct iattr *);
1085int truncate_hole(struct inode *, pgoff_t, pgoff_t);
b292dcab 1086int truncate_data_blocks_range(struct dnode_of_data *, int);
39a53e0c 1087long f2fs_ioctl(struct file *, unsigned int, unsigned long);
e9750824 1088long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
39a53e0c
JK
1089
1090/*
1091 * inode.c
1092 */
1093void f2fs_set_inode_flags(struct inode *);
39a53e0c 1094struct inode *f2fs_iget(struct super_block *, unsigned long);
4660f9c0 1095int try_to_free_nats(struct f2fs_sb_info *, int);
39a53e0c 1096void update_inode(struct inode *, struct page *);
744602cf 1097void update_inode_page(struct inode *);
39a53e0c
JK
1098int f2fs_write_inode(struct inode *, struct writeback_control *);
1099void f2fs_evict_inode(struct inode *);
1100
1101/*
1102 * namei.c
1103 */
1104struct dentry *f2fs_get_parent(struct dentry *child);
1105
1106/*
1107 * dir.c
1108 */
1109struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
1110 struct page **);
1111struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
1112ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
1113void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
1114 struct page *, struct inode *);
1cd14caf 1115int update_dent_inode(struct inode *, const struct qstr *);
b7f7a5e0 1116int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
39a53e0c
JK
1117void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
1118int f2fs_make_empty(struct inode *, struct inode *);
1119bool f2fs_empty_dir(struct inode *);
1120
b7f7a5e0
AV
1121static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
1122{
1123 return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
1124 inode);
1125}
1126
39a53e0c
JK
1127/*
1128 * super.c
1129 */
1130int f2fs_sync_fs(struct super_block *, int);
a07ef784
NJ
1131extern __printf(3, 4)
1132void f2fs_msg(struct super_block *, const char *, const char *, ...);
39a53e0c
JK
1133
1134/*
1135 * hash.c
1136 */
9836b8b9 1137f2fs_hash_t f2fs_dentry_hash(const char *, size_t);
39a53e0c
JK
1138
1139/*
1140 * node.c
1141 */
1142struct dnode_of_data;
1143struct node_info;
1144
1145int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
479f40c4 1146bool fsync_mark_done(struct f2fs_sb_info *, nid_t);
39a53e0c
JK
1147void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
1148int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
1149int truncate_inode_blocks(struct inode *, pgoff_t);
4f16fb0f 1150int truncate_xattr_node(struct inode *, struct page *);
cfe58f9d 1151int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
58e674d6 1152void remove_inode_page(struct inode *);
44a83ff6 1153struct page *new_inode_page(struct inode *, const struct qstr *);
8ae8f162 1154struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
39a53e0c
JK
1155void ra_node_page(struct f2fs_sb_info *, nid_t);
1156struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
1157struct page *get_node_page_ra(struct page *, int);
1158void sync_inode_page(struct dnode_of_data *);
1159int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
1160bool alloc_nid(struct f2fs_sb_info *, nid_t *);
1161void alloc_nid_done(struct f2fs_sb_info *, nid_t);
1162void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
1163void recover_node_page(struct f2fs_sb_info *, struct page *,
1164 struct f2fs_summary *, struct node_info *, block_t);
abb2366c 1165bool recover_xattr_data(struct inode *, struct page *, block_t);
39a53e0c
JK
1166int recover_inode_page(struct f2fs_sb_info *, struct page *);
1167int restore_node_summary(struct f2fs_sb_info *, unsigned int,
1168 struct f2fs_summary_block *);
1169void flush_nat_entries(struct f2fs_sb_info *);
1170int build_node_manager(struct f2fs_sb_info *);
1171void destroy_node_manager(struct f2fs_sb_info *);
6e6093a8 1172int __init create_node_manager_caches(void);
39a53e0c
JK
1173void destroy_node_manager_caches(void);
1174
1175/*
1176 * segment.c
1177 */
1178void f2fs_balance_fs(struct f2fs_sb_info *);
4660f9c0 1179void f2fs_balance_fs_bg(struct f2fs_sb_info *);
876dc59e 1180int issue_flush_thread(void *);
6b4afdd7 1181int f2fs_issue_flush(struct f2fs_sb_info *);
39a53e0c 1182void invalidate_blocks(struct f2fs_sb_info *, block_t);
5e443818 1183void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
39a53e0c 1184void clear_prefree_segments(struct f2fs_sb_info *);
1e87a78d 1185void discard_next_dnode(struct f2fs_sb_info *);
39a53e0c
JK
1186int npages_for_summary_flush(struct f2fs_sb_info *);
1187void allocate_new_segments(struct f2fs_sb_info *);
1188struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
577e3495 1189void write_meta_page(struct f2fs_sb_info *, struct page *);
fb5566da
JK
1190void write_node_page(struct f2fs_sb_info *, struct page *,
1191 struct f2fs_io_info *, unsigned int, block_t, block_t *);
458e6197
JK
1192void write_data_page(struct page *, struct dnode_of_data *, block_t *,
1193 struct f2fs_io_info *);
1194void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *);
39a53e0c
JK
1195void recover_data_page(struct f2fs_sb_info *, struct page *,
1196 struct f2fs_summary *, block_t, block_t);
1197void rewrite_node_page(struct f2fs_sb_info *, struct page *,
1198 struct f2fs_summary *, block_t, block_t);
bfad7c2d
JK
1199void allocate_data_block(struct f2fs_sb_info *, struct page *,
1200 block_t, block_t *, struct f2fs_summary *, int);
5514f0aa 1201void f2fs_wait_on_page_writeback(struct page *, enum page_type);
39a53e0c
JK
1202void write_data_summaries(struct f2fs_sb_info *, block_t);
1203void write_node_summaries(struct f2fs_sb_info *, block_t);
1204int lookup_journal_in_cursum(struct f2fs_summary_block *,
1205 int, unsigned int, int);
1206void flush_sit_entries(struct f2fs_sb_info *);
1207int build_segment_manager(struct f2fs_sb_info *);
39a53e0c 1208void destroy_segment_manager(struct f2fs_sb_info *);
7fd9e544
JK
1209int __init create_segment_manager_caches(void);
1210void destroy_segment_manager_caches(void);
39a53e0c
JK
1211
1212/*
1213 * checkpoint.c
1214 */
1215struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
1216struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
662befda 1217int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
39a53e0c 1218long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
cbd56e7d
JK
1219int acquire_orphan_inode(struct f2fs_sb_info *);
1220void release_orphan_inode(struct f2fs_sb_info *);
39a53e0c
JK
1221void add_orphan_inode(struct f2fs_sb_info *, nid_t);
1222void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
8f99a946 1223void recover_orphan_inodes(struct f2fs_sb_info *);
39a53e0c
JK
1224int get_valid_checkpoint(struct f2fs_sb_info *);
1225void set_dirty_dir_page(struct inode *, struct page *);
5deb8267 1226void add_dirty_dir_inode(struct inode *);
39a53e0c
JK
1227void remove_dirty_dir_inode(struct inode *);
1228void sync_dirty_dir_inodes(struct f2fs_sb_info *);
43727527 1229void write_checkpoint(struct f2fs_sb_info *, bool);
39a53e0c 1230void init_orphan_info(struct f2fs_sb_info *);
6e6093a8 1231int __init create_checkpoint_caches(void);
39a53e0c
JK
1232void destroy_checkpoint_caches(void);
1233
1234/*
1235 * data.c
1236 */
458e6197 1237void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
93dfe2ac
JK
1238int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *, block_t, int);
1239void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, block_t,
458e6197 1240 struct f2fs_io_info *);
39a53e0c 1241int reserve_new_block(struct dnode_of_data *);
b600965c 1242int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
39a53e0c 1243void update_extent_cache(block_t, struct dnode_of_data *);
c718379b 1244struct page *find_data_page(struct inode *, pgoff_t, bool);
39a53e0c 1245struct page *get_lock_data_page(struct inode *, pgoff_t);
64aa7ed9 1246struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
458e6197 1247int do_write_data_page(struct page *, struct f2fs_io_info *);
39a53e0c
JK
1248
1249/*
1250 * gc.c
1251 */
1252int start_gc_thread(struct f2fs_sb_info *);
1253void stop_gc_thread(struct f2fs_sb_info *);
de93653f 1254block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
408e9375 1255int f2fs_gc(struct f2fs_sb_info *);
39a53e0c 1256void build_gc_manager(struct f2fs_sb_info *);
6e6093a8 1257int __init create_gc_caches(void);
39a53e0c
JK
1258void destroy_gc_caches(void);
1259
1260/*
1261 * recovery.c
1262 */
6ead1142 1263int recover_fsync_data(struct f2fs_sb_info *);
39a53e0c
JK
1264bool space_for_roll_forward(struct f2fs_sb_info *);
1265
1266/*
1267 * debug.c
1268 */
1269#ifdef CONFIG_F2FS_STAT_FS
1270struct f2fs_stat_info {
1271 struct list_head stat_list;
1272 struct f2fs_sb_info *sbi;
1273 struct mutex stat_lock;
1274 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
1275 int main_area_segs, main_area_sections, main_area_zones;
1276 int hit_ext, total_ext;
1277 int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
1278 int nats, sits, fnids;
1279 int total_count, utilization;
0dbdc2ae 1280 int bg_gc, inline_inode;
39a53e0c
JK
1281 unsigned int valid_count, valid_node_count, valid_inode_count;
1282 unsigned int bimodal, avg_vblocks;
1283 int util_free, util_valid, util_invalid;
1284 int rsvd_segs, overp_segs;
1285 int dirty_count, node_pages, meta_pages;
942e0be6 1286 int prefree_count, call_count, cp_count;
39a53e0c
JK
1287 int tot_segs, node_segs, data_segs, free_segs, free_secs;
1288 int tot_blks, data_blks, node_blks;
1289 int curseg[NR_CURSEG_TYPE];
1290 int cursec[NR_CURSEG_TYPE];
1291 int curzone[NR_CURSEG_TYPE];
1292
1293 unsigned int segment_count[2];
1294 unsigned int block_count[2];
1295 unsigned base_mem, cache_mem;
1296};
1297
963d4f7d
GZ
1298static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
1299{
6c311ec6 1300 return (struct f2fs_stat_info *)sbi->stat_info;
963d4f7d
GZ
1301}
1302
942e0be6 1303#define stat_inc_cp_count(si) ((si)->cp_count++)
dcdfff65
JK
1304#define stat_inc_call_count(si) ((si)->call_count++)
1305#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
1306#define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++)
1307#define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--)
1308#define stat_inc_total_hit(sb) ((F2FS_SB(sb))->total_hit_ext++)
1309#define stat_inc_read_hit(sb) ((F2FS_SB(sb))->read_hit_ext++)
0dbdc2ae
JK
1310#define stat_inc_inline_inode(inode) \
1311 do { \
1312 if (f2fs_has_inline_data(inode)) \
1313 ((F2FS_SB(inode->i_sb))->inline_inode++); \
1314 } while (0)
1315#define stat_dec_inline_inode(inode) \
1316 do { \
1317 if (f2fs_has_inline_data(inode)) \
1318 ((F2FS_SB(inode->i_sb))->inline_inode--); \
1319 } while (0)
1320
dcdfff65
JK
1321#define stat_inc_seg_type(sbi, curseg) \
1322 ((sbi)->segment_count[(curseg)->alloc_type]++)
1323#define stat_inc_block_count(sbi, curseg) \
1324 ((sbi)->block_count[(curseg)->alloc_type]++)
39a53e0c
JK
1325
1326#define stat_inc_seg_count(sbi, type) \
1327 do { \
963d4f7d 1328 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
39a53e0c
JK
1329 (si)->tot_segs++; \
1330 if (type == SUM_TYPE_DATA) \
1331 si->data_segs++; \
1332 else \
1333 si->node_segs++; \
1334 } while (0)
1335
1336#define stat_inc_tot_blk_count(si, blks) \
1337 (si->tot_blks += (blks))
1338
1339#define stat_inc_data_blk_count(sbi, blks) \
1340 do { \
963d4f7d 1341 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
39a53e0c
JK
1342 stat_inc_tot_blk_count(si, blks); \
1343 si->data_blks += (blks); \
1344 } while (0)
1345
1346#define stat_inc_node_blk_count(sbi, blks) \
1347 do { \
963d4f7d 1348 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
39a53e0c
JK
1349 stat_inc_tot_blk_count(si, blks); \
1350 si->node_blks += (blks); \
1351 } while (0)
1352
1353int f2fs_build_stats(struct f2fs_sb_info *);
1354void f2fs_destroy_stats(struct f2fs_sb_info *);
6e6093a8 1355void __init f2fs_create_root_stats(void);
4589d25d 1356void f2fs_destroy_root_stats(void);
39a53e0c 1357#else
942e0be6 1358#define stat_inc_cp_count(si)
39a53e0c 1359#define stat_inc_call_count(si)
dcdfff65
JK
1360#define stat_inc_bggc_count(si)
1361#define stat_inc_dirty_dir(sbi)
1362#define stat_dec_dirty_dir(sbi)
1363#define stat_inc_total_hit(sb)
1364#define stat_inc_read_hit(sb)
0dbdc2ae
JK
1365#define stat_inc_inline_inode(inode)
1366#define stat_dec_inline_inode(inode)
dcdfff65
JK
1367#define stat_inc_seg_type(sbi, curseg)
1368#define stat_inc_block_count(sbi, curseg)
39a53e0c
JK
1369#define stat_inc_seg_count(si, type)
1370#define stat_inc_tot_blk_count(si, blks)
1371#define stat_inc_data_blk_count(si, blks)
1372#define stat_inc_node_blk_count(sbi, blks)
1373
1374static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
1375static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
6e6093a8 1376static inline void __init f2fs_create_root_stats(void) { }
4589d25d 1377static inline void f2fs_destroy_root_stats(void) { }
39a53e0c
JK
1378#endif
1379
1380extern const struct file_operations f2fs_dir_operations;
1381extern const struct file_operations f2fs_file_operations;
1382extern const struct inode_operations f2fs_file_inode_operations;
1383extern const struct address_space_operations f2fs_dblock_aops;
1384extern const struct address_space_operations f2fs_node_aops;
1385extern const struct address_space_operations f2fs_meta_aops;
1386extern const struct inode_operations f2fs_dir_inode_operations;
1387extern const struct inode_operations f2fs_symlink_inode_operations;
1388extern const struct inode_operations f2fs_special_inode_operations;
1001b347 1389
e18c65b2
HL
1390/*
1391 * inline.c
1392 */
e18c65b2
HL
1393bool f2fs_may_inline(struct inode *);
1394int f2fs_read_inline_data(struct inode *, struct page *);
9e09fc85 1395int f2fs_convert_inline_data(struct inode *, pgoff_t);
e18c65b2 1396int f2fs_write_inline_data(struct inode *, struct page *, unsigned int);
1e1bb4ba 1397int recover_inline_data(struct inode *, struct page *);
39a53e0c 1398#endif