]>
Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
39a53e0c JK |
2 | * fs/f2fs/f2fs.h |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #ifndef _LINUX_F2FS_H | |
12 | #define _LINUX_F2FS_H | |
13 | ||
14 | #include <linux/types.h> | |
15 | #include <linux/page-flags.h> | |
16 | #include <linux/buffer_head.h> | |
39a53e0c JK |
17 | #include <linux/slab.h> |
18 | #include <linux/crc32.h> | |
19 | #include <linux/magic.h> | |
c2d715d1 | 20 | #include <linux/kobject.h> |
39a53e0c JK |
21 | |
22 | /* | |
23 | * For mount options | |
24 | */ | |
25 | #define F2FS_MOUNT_BG_GC 0x00000001 | |
26 | #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 | |
27 | #define F2FS_MOUNT_DISCARD 0x00000004 | |
28 | #define F2FS_MOUNT_NOHEAP 0x00000008 | |
29 | #define F2FS_MOUNT_XATTR_USER 0x00000010 | |
30 | #define F2FS_MOUNT_POSIX_ACL 0x00000020 | |
31 | #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 | |
32 | ||
33 | #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) | |
34 | #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) | |
35 | #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) | |
36 | ||
37 | #define ver_after(a, b) (typecheck(unsigned long long, a) && \ | |
38 | typecheck(unsigned long long, b) && \ | |
39 | ((long long)((a) - (b)) > 0)) | |
40 | ||
a9841c4d JK |
41 | typedef u32 block_t; /* |
42 | * should not change u32, since it is the on-disk block | |
43 | * address format, __le32. | |
44 | */ | |
39a53e0c JK |
45 | typedef u32 nid_t; |
46 | ||
47 | struct f2fs_mount_info { | |
48 | unsigned int opt; | |
49 | }; | |
50 | ||
7e586fa0 JK |
51 | #define CRCPOLY_LE 0xedb88320 |
52 | ||
53 | static inline __u32 f2fs_crc32(void *buf, size_t len) | |
39a53e0c | 54 | { |
7e586fa0 JK |
55 | unsigned char *p = (unsigned char *)buf; |
56 | __u32 crc = F2FS_SUPER_MAGIC; | |
57 | int i; | |
58 | ||
59 | while (len--) { | |
60 | crc ^= *p++; | |
61 | for (i = 0; i < 8; i++) | |
62 | crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); | |
63 | } | |
64 | return crc; | |
39a53e0c JK |
65 | } |
66 | ||
7e586fa0 | 67 | static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size) |
39a53e0c | 68 | { |
7e586fa0 | 69 | return f2fs_crc32(buf, buf_size) == blk_crc; |
39a53e0c JK |
70 | } |
71 | ||
72 | /* | |
73 | * For checkpoint manager | |
74 | */ | |
75 | enum { | |
76 | NAT_BITMAP, | |
77 | SIT_BITMAP | |
78 | }; | |
79 | ||
80 | /* for the list of orphan inodes */ | |
81 | struct orphan_inode_entry { | |
82 | struct list_head list; /* list head */ | |
83 | nid_t ino; /* inode number */ | |
84 | }; | |
85 | ||
86 | /* for the list of directory inodes */ | |
87 | struct dir_inode_entry { | |
88 | struct list_head list; /* list head */ | |
89 | struct inode *inode; /* vfs inode pointer */ | |
90 | }; | |
91 | ||
92 | /* for the list of fsync inodes, used only during recovery */ | |
93 | struct fsync_inode_entry { | |
94 | struct list_head list; /* list head */ | |
95 | struct inode *inode; /* vfs inode pointer */ | |
96 | block_t blkaddr; /* block address locating the last inode */ | |
97 | }; | |
98 | ||
99 | #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats)) | |
100 | #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits)) | |
101 | ||
102 | #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne) | |
103 | #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid) | |
104 | #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se) | |
105 | #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno) | |
106 | ||
107 | static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i) | |
108 | { | |
109 | int before = nats_in_cursum(rs); | |
110 | rs->n_nats = cpu_to_le16(before + i); | |
111 | return before; | |
112 | } | |
113 | ||
114 | static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i) | |
115 | { | |
116 | int before = sits_in_cursum(rs); | |
117 | rs->n_sits = cpu_to_le16(before + i); | |
118 | return before; | |
119 | } | |
120 | ||
e9750824 NJ |
121 | /* |
122 | * ioctl commands | |
123 | */ | |
124 | #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS | |
125 | #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS | |
126 | ||
127 | #if defined(__KERNEL__) && defined(CONFIG_COMPAT) | |
128 | /* | |
129 | * ioctl commands in 32 bit emulation | |
130 | */ | |
131 | #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS | |
132 | #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS | |
133 | #endif | |
134 | ||
39a53e0c JK |
135 | /* |
136 | * For INODE and NODE manager | |
137 | */ | |
dbe6a5ff JK |
138 | /* |
139 | * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 | |
140 | * as its node offset to distinguish from index node blocks. | |
141 | * But some bits are used to mark the node block. | |
142 | */ | |
143 | #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ | |
144 | >> OFFSET_BIT_SHIFT) | |
266e97a8 JK |
145 | enum { |
146 | ALLOC_NODE, /* allocate a new node page if needed */ | |
147 | LOOKUP_NODE, /* look up a node without readahead */ | |
148 | LOOKUP_NODE_RA, /* | |
149 | * look up a node with readahead called | |
150 | * by get_datablock_ro. | |
39a53e0c | 151 | */ |
266e97a8 JK |
152 | }; |
153 | ||
39a53e0c JK |
154 | #define F2FS_LINK_MAX 32000 /* maximum link count per file */ |
155 | ||
156 | /* for in-memory extent cache entry */ | |
157 | struct extent_info { | |
158 | rwlock_t ext_lock; /* rwlock for consistency */ | |
159 | unsigned int fofs; /* start offset in a file */ | |
160 | u32 blk_addr; /* start block address of the extent */ | |
111d2495 | 161 | unsigned int len; /* length of the extent */ |
39a53e0c JK |
162 | }; |
163 | ||
164 | /* | |
165 | * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. | |
166 | */ | |
167 | #define FADVISE_COLD_BIT 0x01 | |
354a3399 | 168 | #define FADVISE_LOST_PINO_BIT 0x02 |
39a53e0c JK |
169 | |
170 | struct f2fs_inode_info { | |
171 | struct inode vfs_inode; /* serve a vfs inode */ | |
172 | unsigned long i_flags; /* keep an inode flags for ioctl */ | |
173 | unsigned char i_advise; /* use to give file attribute hints */ | |
174 | unsigned int i_current_depth; /* use only in directory structure */ | |
6666e6aa | 175 | unsigned int i_pino; /* parent inode number */ |
39a53e0c JK |
176 | umode_t i_acl_mode; /* keep file acl mode temporarily */ |
177 | ||
178 | /* Use below internally in f2fs*/ | |
179 | unsigned long flags; /* use to pass per-file flags */ | |
39a53e0c JK |
180 | atomic_t dirty_dents; /* # of dirty dentry pages */ |
181 | f2fs_hash_t chash; /* hash value of given file name */ | |
182 | unsigned int clevel; /* maximum level of given file name */ | |
183 | nid_t i_xattr_nid; /* node id that contains xattrs */ | |
184 | struct extent_info ext; /* in-memory extent cache entry */ | |
185 | }; | |
186 | ||
187 | static inline void get_extent_info(struct extent_info *ext, | |
188 | struct f2fs_extent i_ext) | |
189 | { | |
190 | write_lock(&ext->ext_lock); | |
191 | ext->fofs = le32_to_cpu(i_ext.fofs); | |
192 | ext->blk_addr = le32_to_cpu(i_ext.blk_addr); | |
193 | ext->len = le32_to_cpu(i_ext.len); | |
194 | write_unlock(&ext->ext_lock); | |
195 | } | |
196 | ||
197 | static inline void set_raw_extent(struct extent_info *ext, | |
198 | struct f2fs_extent *i_ext) | |
199 | { | |
200 | read_lock(&ext->ext_lock); | |
201 | i_ext->fofs = cpu_to_le32(ext->fofs); | |
202 | i_ext->blk_addr = cpu_to_le32(ext->blk_addr); | |
203 | i_ext->len = cpu_to_le32(ext->len); | |
204 | read_unlock(&ext->ext_lock); | |
205 | } | |
206 | ||
207 | struct f2fs_nm_info { | |
208 | block_t nat_blkaddr; /* base disk address of NAT */ | |
209 | nid_t max_nid; /* maximum possible node ids */ | |
39a53e0c JK |
210 | nid_t next_scan_nid; /* the next nid to be scanned */ |
211 | ||
212 | /* NAT cache management */ | |
213 | struct radix_tree_root nat_root;/* root of the nat entry cache */ | |
214 | rwlock_t nat_tree_lock; /* protect nat_tree_lock */ | |
215 | unsigned int nat_cnt; /* the # of cached nat entries */ | |
216 | struct list_head nat_entries; /* cached nat entry list (clean) */ | |
217 | struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */ | |
218 | ||
219 | /* free node ids management */ | |
220 | struct list_head free_nid_list; /* a list for free nids */ | |
221 | spinlock_t free_nid_list_lock; /* protect free nid list */ | |
222 | unsigned int fcnt; /* the number of free node id */ | |
223 | struct mutex build_lock; /* lock for build free nids */ | |
224 | ||
225 | /* for checkpoint */ | |
226 | char *nat_bitmap; /* NAT bitmap pointer */ | |
227 | int bitmap_size; /* bitmap size */ | |
228 | }; | |
229 | ||
230 | /* | |
231 | * this structure is used as one of function parameters. | |
232 | * all the information are dedicated to a given direct node block determined | |
233 | * by the data offset in a file. | |
234 | */ | |
235 | struct dnode_of_data { | |
236 | struct inode *inode; /* vfs inode pointer */ | |
237 | struct page *inode_page; /* its inode page, NULL is possible */ | |
238 | struct page *node_page; /* cached direct node page */ | |
239 | nid_t nid; /* node id of the direct node block */ | |
240 | unsigned int ofs_in_node; /* data offset in the node page */ | |
241 | bool inode_page_locked; /* inode page is locked or not */ | |
242 | block_t data_blkaddr; /* block address of the node block */ | |
243 | }; | |
244 | ||
245 | static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, | |
246 | struct page *ipage, struct page *npage, nid_t nid) | |
247 | { | |
d66d1f76 | 248 | memset(dn, 0, sizeof(*dn)); |
39a53e0c JK |
249 | dn->inode = inode; |
250 | dn->inode_page = ipage; | |
251 | dn->node_page = npage; | |
252 | dn->nid = nid; | |
39a53e0c JK |
253 | } |
254 | ||
255 | /* | |
256 | * For SIT manager | |
257 | * | |
258 | * By default, there are 6 active log areas across the whole main area. | |
259 | * When considering hot and cold data separation to reduce cleaning overhead, | |
260 | * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, | |
261 | * respectively. | |
262 | * In the current design, you should not change the numbers intentionally. | |
263 | * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 | |
264 | * logs individually according to the underlying devices. (default: 6) | |
265 | * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for | |
266 | * data and 8 for node logs. | |
267 | */ | |
268 | #define NR_CURSEG_DATA_TYPE (3) | |
269 | #define NR_CURSEG_NODE_TYPE (3) | |
270 | #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) | |
271 | ||
272 | enum { | |
273 | CURSEG_HOT_DATA = 0, /* directory entry blocks */ | |
274 | CURSEG_WARM_DATA, /* data blocks */ | |
275 | CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ | |
276 | CURSEG_HOT_NODE, /* direct node blocks of directory files */ | |
277 | CURSEG_WARM_NODE, /* direct node blocks of normal files */ | |
278 | CURSEG_COLD_NODE, /* indirect node blocks */ | |
279 | NO_CHECK_TYPE | |
280 | }; | |
281 | ||
282 | struct f2fs_sm_info { | |
283 | struct sit_info *sit_info; /* whole segment information */ | |
284 | struct free_segmap_info *free_info; /* free segment information */ | |
285 | struct dirty_seglist_info *dirty_info; /* dirty segment information */ | |
286 | struct curseg_info *curseg_array; /* active segment information */ | |
287 | ||
288 | struct list_head wblist_head; /* list of under-writeback pages */ | |
289 | spinlock_t wblist_lock; /* lock for checkpoint */ | |
290 | ||
291 | block_t seg0_blkaddr; /* block address of 0'th segment */ | |
292 | block_t main_blkaddr; /* start block address of main area */ | |
293 | block_t ssa_blkaddr; /* start block address of SSA area */ | |
294 | ||
295 | unsigned int segment_count; /* total # of segments */ | |
296 | unsigned int main_segments; /* # of segments in main area */ | |
297 | unsigned int reserved_segments; /* # of reserved segments */ | |
298 | unsigned int ovp_segments; /* # of overprovision segments */ | |
299 | }; | |
300 | ||
301 | /* | |
302 | * For directory operation | |
303 | */ | |
304 | #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1) | |
305 | #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2) | |
306 | #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3) | |
307 | #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4) | |
308 | #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5) | |
309 | ||
310 | /* | |
311 | * For superblock | |
312 | */ | |
313 | /* | |
314 | * COUNT_TYPE for monitoring | |
315 | * | |
316 | * f2fs monitors the number of several block types such as on-writeback, | |
317 | * dirty dentry blocks, dirty node blocks, and dirty meta blocks. | |
318 | */ | |
319 | enum count_type { | |
320 | F2FS_WRITEBACK, | |
321 | F2FS_DIRTY_DENTS, | |
322 | F2FS_DIRTY_NODES, | |
323 | F2FS_DIRTY_META, | |
324 | NR_COUNT_TYPE, | |
325 | }; | |
326 | ||
327 | /* | |
39936837 JK |
328 | * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS]. |
329 | * The checkpoint procedure blocks all the locks in this fs_lock array. | |
330 | * Some FS operations grab free locks, and if there is no free lock, | |
331 | * then wait to grab a lock in a round-robin manner. | |
39a53e0c | 332 | */ |
39936837 | 333 | #define NR_GLOBAL_LOCKS 8 |
39a53e0c JK |
334 | |
335 | /* | |
336 | * The below are the page types of bios used in submti_bio(). | |
337 | * The available types are: | |
338 | * DATA User data pages. It operates as async mode. | |
339 | * NODE Node pages. It operates as async mode. | |
340 | * META FS metadata pages such as SIT, NAT, CP. | |
341 | * NR_PAGE_TYPE The number of page types. | |
342 | * META_FLUSH Make sure the previous pages are written | |
343 | * with waiting the bio's completion | |
344 | * ... Only can be used with META. | |
345 | */ | |
346 | enum page_type { | |
347 | DATA, | |
348 | NODE, | |
349 | META, | |
350 | NR_PAGE_TYPE, | |
351 | META_FLUSH, | |
352 | }; | |
353 | ||
354 | struct f2fs_sb_info { | |
355 | struct super_block *sb; /* pointer to VFS super block */ | |
5e176d54 | 356 | struct proc_dir_entry *s_proc; /* proc entry */ |
39a53e0c JK |
357 | struct buffer_head *raw_super_buf; /* buffer head of raw sb */ |
358 | struct f2fs_super_block *raw_super; /* raw super block pointer */ | |
359 | int s_dirty; /* dirty flag for checkpoint */ | |
360 | ||
361 | /* for node-related operations */ | |
362 | struct f2fs_nm_info *nm_info; /* node manager */ | |
363 | struct inode *node_inode; /* cache node blocks */ | |
364 | ||
365 | /* for segment-related operations */ | |
366 | struct f2fs_sm_info *sm_info; /* segment manager */ | |
367 | struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */ | |
368 | sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */ | |
369 | struct rw_semaphore bio_sem; /* IO semaphore */ | |
370 | ||
371 | /* for checkpoint */ | |
372 | struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ | |
373 | struct inode *meta_inode; /* cache meta blocks */ | |
39936837 JK |
374 | struct mutex cp_mutex; /* checkpoint procedure lock */ |
375 | struct mutex fs_lock[NR_GLOBAL_LOCKS]; /* blocking FS operations */ | |
376 | struct mutex node_write; /* locking node writes */ | |
39a53e0c | 377 | struct mutex writepages; /* mutex for writepages() */ |
39936837 | 378 | unsigned char next_lock_num; /* round-robin global locks */ |
39a53e0c | 379 | int por_doing; /* recovery is doing or not */ |
55008d84 | 380 | int on_build_free_nids; /* build_free_nids is doing */ |
39a53e0c JK |
381 | |
382 | /* for orphan inode management */ | |
383 | struct list_head orphan_inode_list; /* orphan inode list */ | |
384 | struct mutex orphan_inode_mutex; /* for orphan inode list */ | |
385 | unsigned int n_orphans; /* # of orphan inodes */ | |
386 | ||
387 | /* for directory inode management */ | |
388 | struct list_head dir_inode_list; /* dir inode list */ | |
389 | spinlock_t dir_inode_lock; /* for dir inode list lock */ | |
39a53e0c JK |
390 | |
391 | /* basic file system units */ | |
392 | unsigned int log_sectors_per_block; /* log2 sectors per block */ | |
393 | unsigned int log_blocksize; /* log2 block size */ | |
394 | unsigned int blocksize; /* block size */ | |
395 | unsigned int root_ino_num; /* root inode number*/ | |
396 | unsigned int node_ino_num; /* node inode number*/ | |
397 | unsigned int meta_ino_num; /* meta inode number*/ | |
398 | unsigned int log_blocks_per_seg; /* log2 blocks per segment */ | |
399 | unsigned int blocks_per_seg; /* blocks per segment */ | |
400 | unsigned int segs_per_sec; /* segments per section */ | |
401 | unsigned int secs_per_zone; /* sections per zone */ | |
402 | unsigned int total_sections; /* total section count */ | |
403 | unsigned int total_node_count; /* total node block count */ | |
404 | unsigned int total_valid_node_count; /* valid node block count */ | |
405 | unsigned int total_valid_inode_count; /* valid inode count */ | |
406 | int active_logs; /* # of active logs */ | |
407 | ||
408 | block_t user_block_count; /* # of user blocks */ | |
409 | block_t total_valid_block_count; /* # of valid blocks */ | |
410 | block_t alloc_valid_block_count; /* # of allocated blocks */ | |
411 | block_t last_valid_block_count; /* for recovery */ | |
412 | u32 s_next_generation; /* for NFS support */ | |
413 | atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ | |
414 | ||
415 | struct f2fs_mount_info mount_opt; /* mount options */ | |
416 | ||
417 | /* for cleaning operations */ | |
418 | struct mutex gc_mutex; /* mutex for GC */ | |
419 | struct f2fs_gc_kthread *gc_thread; /* GC thread */ | |
5ec4e49f | 420 | unsigned int cur_victim_sec; /* current victim section num */ |
39a53e0c JK |
421 | |
422 | /* | |
423 | * for stat information. | |
424 | * one is for the LFS mode, and the other is for the SSR mode. | |
425 | */ | |
35b09d82 | 426 | #ifdef CONFIG_F2FS_STAT_FS |
39a53e0c JK |
427 | struct f2fs_stat_info *stat_info; /* FS status information */ |
428 | unsigned int segment_count[2]; /* # of allocated segments */ | |
429 | unsigned int block_count[2]; /* # of allocated blocks */ | |
39a53e0c JK |
430 | int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ |
431 | int bg_gc; /* background gc calls */ | |
35b09d82 NJ |
432 | unsigned int n_dirty_dirs; /* # of dir inodes */ |
433 | #endif | |
434 | unsigned int last_victim[2]; /* last victim segment # */ | |
39a53e0c | 435 | spinlock_t stat_lock; /* lock for stat operations */ |
b59d0bae NJ |
436 | |
437 | /* For sysfs suppport */ | |
438 | struct kobject s_kobj; | |
439 | struct completion s_kobj_unregister; | |
39a53e0c JK |
440 | }; |
441 | ||
442 | /* | |
443 | * Inline functions | |
444 | */ | |
445 | static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) | |
446 | { | |
447 | return container_of(inode, struct f2fs_inode_info, vfs_inode); | |
448 | } | |
449 | ||
450 | static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) | |
451 | { | |
452 | return sb->s_fs_info; | |
453 | } | |
454 | ||
455 | static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) | |
456 | { | |
457 | return (struct f2fs_super_block *)(sbi->raw_super); | |
458 | } | |
459 | ||
460 | static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) | |
461 | { | |
462 | return (struct f2fs_checkpoint *)(sbi->ckpt); | |
463 | } | |
464 | ||
45590710 GZ |
465 | static inline struct f2fs_node *F2FS_NODE(struct page *page) |
466 | { | |
467 | return (struct f2fs_node *)page_address(page); | |
468 | } | |
469 | ||
39a53e0c JK |
470 | static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) |
471 | { | |
472 | return (struct f2fs_nm_info *)(sbi->nm_info); | |
473 | } | |
474 | ||
475 | static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) | |
476 | { | |
477 | return (struct f2fs_sm_info *)(sbi->sm_info); | |
478 | } | |
479 | ||
480 | static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) | |
481 | { | |
482 | return (struct sit_info *)(SM_I(sbi)->sit_info); | |
483 | } | |
484 | ||
485 | static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) | |
486 | { | |
487 | return (struct free_segmap_info *)(SM_I(sbi)->free_info); | |
488 | } | |
489 | ||
490 | static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) | |
491 | { | |
492 | return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); | |
493 | } | |
494 | ||
495 | static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) | |
496 | { | |
497 | sbi->s_dirty = 1; | |
498 | } | |
499 | ||
500 | static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) | |
501 | { | |
502 | sbi->s_dirty = 0; | |
503 | } | |
504 | ||
25ca923b JK |
505 | static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) |
506 | { | |
507 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); | |
508 | return ckpt_flags & f; | |
509 | } | |
510 | ||
511 | static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) | |
512 | { | |
513 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); | |
514 | ckpt_flags |= f; | |
515 | cp->ckpt_flags = cpu_to_le32(ckpt_flags); | |
516 | } | |
517 | ||
518 | static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) | |
519 | { | |
520 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); | |
521 | ckpt_flags &= (~f); | |
522 | cp->ckpt_flags = cpu_to_le32(ckpt_flags); | |
523 | } | |
524 | ||
39936837 JK |
525 | static inline void mutex_lock_all(struct f2fs_sb_info *sbi) |
526 | { | |
bfe35965 PZ |
527 | int i; |
528 | ||
529 | for (i = 0; i < NR_GLOBAL_LOCKS; i++) { | |
530 | /* | |
531 | * This is the only time we take multiple fs_lock[] | |
532 | * instances; the order is immaterial since we | |
533 | * always hold cp_mutex, which serializes multiple | |
534 | * such operations. | |
535 | */ | |
536 | mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex); | |
537 | } | |
39936837 JK |
538 | } |
539 | ||
540 | static inline void mutex_unlock_all(struct f2fs_sb_info *sbi) | |
39a53e0c | 541 | { |
39936837 JK |
542 | int i = 0; |
543 | for (; i < NR_GLOBAL_LOCKS; i++) | |
544 | mutex_unlock(&sbi->fs_lock[i]); | |
39a53e0c JK |
545 | } |
546 | ||
39936837 | 547 | static inline int mutex_lock_op(struct f2fs_sb_info *sbi) |
39a53e0c | 548 | { |
39936837 JK |
549 | unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS; |
550 | int i = 0; | |
551 | ||
552 | for (; i < NR_GLOBAL_LOCKS; i++) | |
553 | if (mutex_trylock(&sbi->fs_lock[i])) | |
554 | return i; | |
555 | ||
556 | mutex_lock(&sbi->fs_lock[next_lock]); | |
557 | sbi->next_lock_num++; | |
558 | return next_lock; | |
559 | } | |
560 | ||
561 | static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock) | |
562 | { | |
563 | if (ilock < 0) | |
564 | return; | |
565 | BUG_ON(ilock >= NR_GLOBAL_LOCKS); | |
566 | mutex_unlock(&sbi->fs_lock[ilock]); | |
39a53e0c JK |
567 | } |
568 | ||
569 | /* | |
570 | * Check whether the given nid is within node id range. | |
571 | */ | |
064e0823 | 572 | static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) |
39a53e0c | 573 | { |
064e0823 NJ |
574 | WARN_ON((nid >= NM_I(sbi)->max_nid)); |
575 | if (nid >= NM_I(sbi)->max_nid) | |
576 | return -EINVAL; | |
577 | return 0; | |
39a53e0c JK |
578 | } |
579 | ||
580 | #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1 | |
581 | ||
582 | /* | |
583 | * Check whether the inode has blocks or not | |
584 | */ | |
585 | static inline int F2FS_HAS_BLOCKS(struct inode *inode) | |
586 | { | |
587 | if (F2FS_I(inode)->i_xattr_nid) | |
588 | return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1); | |
589 | else | |
590 | return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS); | |
591 | } | |
592 | ||
593 | static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, | |
594 | struct inode *inode, blkcnt_t count) | |
595 | { | |
596 | block_t valid_block_count; | |
597 | ||
598 | spin_lock(&sbi->stat_lock); | |
599 | valid_block_count = | |
600 | sbi->total_valid_block_count + (block_t)count; | |
601 | if (valid_block_count > sbi->user_block_count) { | |
602 | spin_unlock(&sbi->stat_lock); | |
603 | return false; | |
604 | } | |
605 | inode->i_blocks += count; | |
606 | sbi->total_valid_block_count = valid_block_count; | |
607 | sbi->alloc_valid_block_count += (block_t)count; | |
608 | spin_unlock(&sbi->stat_lock); | |
609 | return true; | |
610 | } | |
611 | ||
612 | static inline int dec_valid_block_count(struct f2fs_sb_info *sbi, | |
613 | struct inode *inode, | |
614 | blkcnt_t count) | |
615 | { | |
616 | spin_lock(&sbi->stat_lock); | |
617 | BUG_ON(sbi->total_valid_block_count < (block_t) count); | |
618 | BUG_ON(inode->i_blocks < count); | |
619 | inode->i_blocks -= count; | |
620 | sbi->total_valid_block_count -= (block_t)count; | |
621 | spin_unlock(&sbi->stat_lock); | |
622 | return 0; | |
623 | } | |
624 | ||
625 | static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) | |
626 | { | |
627 | atomic_inc(&sbi->nr_pages[count_type]); | |
628 | F2FS_SET_SB_DIRT(sbi); | |
629 | } | |
630 | ||
631 | static inline void inode_inc_dirty_dents(struct inode *inode) | |
632 | { | |
633 | atomic_inc(&F2FS_I(inode)->dirty_dents); | |
634 | } | |
635 | ||
636 | static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) | |
637 | { | |
638 | atomic_dec(&sbi->nr_pages[count_type]); | |
639 | } | |
640 | ||
641 | static inline void inode_dec_dirty_dents(struct inode *inode) | |
642 | { | |
643 | atomic_dec(&F2FS_I(inode)->dirty_dents); | |
644 | } | |
645 | ||
646 | static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) | |
647 | { | |
648 | return atomic_read(&sbi->nr_pages[count_type]); | |
649 | } | |
650 | ||
5ac206cf NJ |
651 | static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) |
652 | { | |
653 | unsigned int pages_per_sec = sbi->segs_per_sec * | |
654 | (1 << sbi->log_blocks_per_seg); | |
655 | return ((get_pages(sbi, block_type) + pages_per_sec - 1) | |
656 | >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; | |
657 | } | |
658 | ||
39a53e0c JK |
659 | static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) |
660 | { | |
661 | block_t ret; | |
662 | spin_lock(&sbi->stat_lock); | |
663 | ret = sbi->total_valid_block_count; | |
664 | spin_unlock(&sbi->stat_lock); | |
665 | return ret; | |
666 | } | |
667 | ||
668 | static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) | |
669 | { | |
670 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | |
671 | ||
672 | /* return NAT or SIT bitmap */ | |
673 | if (flag == NAT_BITMAP) | |
674 | return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); | |
675 | else if (flag == SIT_BITMAP) | |
676 | return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); | |
677 | ||
678 | return 0; | |
679 | } | |
680 | ||
681 | static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) | |
682 | { | |
683 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | |
25ca923b JK |
684 | int offset = (flag == NAT_BITMAP) ? |
685 | le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; | |
39a53e0c JK |
686 | return &ckpt->sit_nat_version_bitmap + offset; |
687 | } | |
688 | ||
689 | static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) | |
690 | { | |
691 | block_t start_addr; | |
692 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | |
693 | unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver); | |
694 | ||
25ca923b | 695 | start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); |
39a53e0c JK |
696 | |
697 | /* | |
698 | * odd numbered checkpoint should at cp segment 0 | |
699 | * and even segent must be at cp segment 1 | |
700 | */ | |
701 | if (!(ckpt_version & 1)) | |
702 | start_addr += sbi->blocks_per_seg; | |
703 | ||
704 | return start_addr; | |
705 | } | |
706 | ||
707 | static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) | |
708 | { | |
709 | return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); | |
710 | } | |
711 | ||
712 | static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, | |
713 | struct inode *inode, | |
714 | unsigned int count) | |
715 | { | |
716 | block_t valid_block_count; | |
717 | unsigned int valid_node_count; | |
718 | ||
719 | spin_lock(&sbi->stat_lock); | |
720 | ||
721 | valid_block_count = sbi->total_valid_block_count + (block_t)count; | |
722 | sbi->alloc_valid_block_count += (block_t)count; | |
723 | valid_node_count = sbi->total_valid_node_count + count; | |
724 | ||
725 | if (valid_block_count > sbi->user_block_count) { | |
726 | spin_unlock(&sbi->stat_lock); | |
727 | return false; | |
728 | } | |
729 | ||
730 | if (valid_node_count > sbi->total_node_count) { | |
731 | spin_unlock(&sbi->stat_lock); | |
732 | return false; | |
733 | } | |
734 | ||
735 | if (inode) | |
736 | inode->i_blocks += count; | |
737 | sbi->total_valid_node_count = valid_node_count; | |
738 | sbi->total_valid_block_count = valid_block_count; | |
739 | spin_unlock(&sbi->stat_lock); | |
740 | ||
741 | return true; | |
742 | } | |
743 | ||
744 | static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, | |
745 | struct inode *inode, | |
746 | unsigned int count) | |
747 | { | |
748 | spin_lock(&sbi->stat_lock); | |
749 | ||
750 | BUG_ON(sbi->total_valid_block_count < count); | |
751 | BUG_ON(sbi->total_valid_node_count < count); | |
752 | BUG_ON(inode->i_blocks < count); | |
753 | ||
754 | inode->i_blocks -= count; | |
755 | sbi->total_valid_node_count -= count; | |
756 | sbi->total_valid_block_count -= (block_t)count; | |
757 | ||
758 | spin_unlock(&sbi->stat_lock); | |
759 | } | |
760 | ||
761 | static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) | |
762 | { | |
763 | unsigned int ret; | |
764 | spin_lock(&sbi->stat_lock); | |
765 | ret = sbi->total_valid_node_count; | |
766 | spin_unlock(&sbi->stat_lock); | |
767 | return ret; | |
768 | } | |
769 | ||
770 | static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) | |
771 | { | |
772 | spin_lock(&sbi->stat_lock); | |
773 | BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count); | |
774 | sbi->total_valid_inode_count++; | |
775 | spin_unlock(&sbi->stat_lock); | |
776 | } | |
777 | ||
778 | static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi) | |
779 | { | |
780 | spin_lock(&sbi->stat_lock); | |
781 | BUG_ON(!sbi->total_valid_inode_count); | |
782 | sbi->total_valid_inode_count--; | |
783 | spin_unlock(&sbi->stat_lock); | |
784 | return 0; | |
785 | } | |
786 | ||
787 | static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) | |
788 | { | |
789 | unsigned int ret; | |
790 | spin_lock(&sbi->stat_lock); | |
791 | ret = sbi->total_valid_inode_count; | |
792 | spin_unlock(&sbi->stat_lock); | |
793 | return ret; | |
794 | } | |
795 | ||
796 | static inline void f2fs_put_page(struct page *page, int unlock) | |
797 | { | |
798 | if (!page || IS_ERR(page)) | |
799 | return; | |
800 | ||
801 | if (unlock) { | |
802 | BUG_ON(!PageLocked(page)); | |
803 | unlock_page(page); | |
804 | } | |
805 | page_cache_release(page); | |
806 | } | |
807 | ||
808 | static inline void f2fs_put_dnode(struct dnode_of_data *dn) | |
809 | { | |
810 | if (dn->node_page) | |
811 | f2fs_put_page(dn->node_page, 1); | |
812 | if (dn->inode_page && dn->node_page != dn->inode_page) | |
813 | f2fs_put_page(dn->inode_page, 0); | |
814 | dn->node_page = NULL; | |
815 | dn->inode_page = NULL; | |
816 | } | |
817 | ||
818 | static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, | |
819 | size_t size, void (*ctor)(void *)) | |
820 | { | |
821 | return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor); | |
822 | } | |
823 | ||
824 | #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) | |
825 | ||
826 | static inline bool IS_INODE(struct page *page) | |
827 | { | |
45590710 | 828 | struct f2fs_node *p = F2FS_NODE(page); |
39a53e0c JK |
829 | return RAW_IS_INODE(p); |
830 | } | |
831 | ||
832 | static inline __le32 *blkaddr_in_node(struct f2fs_node *node) | |
833 | { | |
834 | return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; | |
835 | } | |
836 | ||
837 | static inline block_t datablock_addr(struct page *node_page, | |
838 | unsigned int offset) | |
839 | { | |
840 | struct f2fs_node *raw_node; | |
841 | __le32 *addr_array; | |
45590710 | 842 | raw_node = F2FS_NODE(node_page); |
39a53e0c JK |
843 | addr_array = blkaddr_in_node(raw_node); |
844 | return le32_to_cpu(addr_array[offset]); | |
845 | } | |
846 | ||
847 | static inline int f2fs_test_bit(unsigned int nr, char *addr) | |
848 | { | |
849 | int mask; | |
850 | ||
851 | addr += (nr >> 3); | |
852 | mask = 1 << (7 - (nr & 0x07)); | |
853 | return mask & *addr; | |
854 | } | |
855 | ||
856 | static inline int f2fs_set_bit(unsigned int nr, char *addr) | |
857 | { | |
858 | int mask; | |
859 | int ret; | |
860 | ||
861 | addr += (nr >> 3); | |
862 | mask = 1 << (7 - (nr & 0x07)); | |
863 | ret = mask & *addr; | |
864 | *addr |= mask; | |
865 | return ret; | |
866 | } | |
867 | ||
868 | static inline int f2fs_clear_bit(unsigned int nr, char *addr) | |
869 | { | |
870 | int mask; | |
871 | int ret; | |
872 | ||
873 | addr += (nr >> 3); | |
874 | mask = 1 << (7 - (nr & 0x07)); | |
875 | ret = mask & *addr; | |
876 | *addr &= ~mask; | |
877 | return ret; | |
878 | } | |
879 | ||
880 | /* used for f2fs_inode_info->flags */ | |
881 | enum { | |
882 | FI_NEW_INODE, /* indicate newly allocated inode */ | |
b3783873 | 883 | FI_DIRTY_INODE, /* indicate inode is dirty or not */ |
39a53e0c JK |
884 | FI_INC_LINK, /* need to increment i_nlink */ |
885 | FI_ACL_MODE, /* indicate acl mode */ | |
886 | FI_NO_ALLOC, /* should not allocate any blocks */ | |
699489bb | 887 | FI_UPDATE_DIR, /* should update inode block for consistency */ |
74d0b917 | 888 | FI_DELAY_IPUT, /* used for the recovery */ |
39a53e0c JK |
889 | }; |
890 | ||
891 | static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) | |
892 | { | |
893 | set_bit(flag, &fi->flags); | |
894 | } | |
895 | ||
896 | static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag) | |
897 | { | |
898 | return test_bit(flag, &fi->flags); | |
899 | } | |
900 | ||
901 | static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag) | |
902 | { | |
903 | clear_bit(flag, &fi->flags); | |
904 | } | |
905 | ||
906 | static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode) | |
907 | { | |
908 | fi->i_acl_mode = mode; | |
909 | set_inode_flag(fi, FI_ACL_MODE); | |
910 | } | |
911 | ||
912 | static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) | |
913 | { | |
914 | if (is_inode_flag_set(fi, FI_ACL_MODE)) { | |
915 | clear_inode_flag(fi, FI_ACL_MODE); | |
916 | return 1; | |
917 | } | |
918 | return 0; | |
919 | } | |
920 | ||
77888c1e JK |
921 | static inline int f2fs_readonly(struct super_block *sb) |
922 | { | |
923 | return sb->s_flags & MS_RDONLY; | |
924 | } | |
925 | ||
39a53e0c JK |
926 | /* |
927 | * file.c | |
928 | */ | |
929 | int f2fs_sync_file(struct file *, loff_t, loff_t, int); | |
930 | void truncate_data_blocks(struct dnode_of_data *); | |
931 | void f2fs_truncate(struct inode *); | |
2d4d9fb5 | 932 | int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
39a53e0c JK |
933 | int f2fs_setattr(struct dentry *, struct iattr *); |
934 | int truncate_hole(struct inode *, pgoff_t, pgoff_t); | |
b292dcab | 935 | int truncate_data_blocks_range(struct dnode_of_data *, int); |
39a53e0c | 936 | long f2fs_ioctl(struct file *, unsigned int, unsigned long); |
e9750824 | 937 | long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); |
39a53e0c JK |
938 | |
939 | /* | |
940 | * inode.c | |
941 | */ | |
942 | void f2fs_set_inode_flags(struct inode *); | |
39a53e0c JK |
943 | struct inode *f2fs_iget(struct super_block *, unsigned long); |
944 | void update_inode(struct inode *, struct page *); | |
39936837 | 945 | int update_inode_page(struct inode *); |
39a53e0c JK |
946 | int f2fs_write_inode(struct inode *, struct writeback_control *); |
947 | void f2fs_evict_inode(struct inode *); | |
948 | ||
949 | /* | |
950 | * namei.c | |
951 | */ | |
952 | struct dentry *f2fs_get_parent(struct dentry *child); | |
953 | ||
954 | /* | |
955 | * dir.c | |
956 | */ | |
957 | struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *, | |
958 | struct page **); | |
959 | struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); | |
960 | ino_t f2fs_inode_by_name(struct inode *, struct qstr *); | |
961 | void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, | |
962 | struct page *, struct inode *); | |
1cd14caf | 963 | int update_dent_inode(struct inode *, const struct qstr *); |
b7f7a5e0 | 964 | int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *); |
39a53e0c JK |
965 | void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); |
966 | int f2fs_make_empty(struct inode *, struct inode *); | |
967 | bool f2fs_empty_dir(struct inode *); | |
968 | ||
b7f7a5e0 AV |
969 | static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) |
970 | { | |
971 | return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name, | |
972 | inode); | |
973 | } | |
974 | ||
39a53e0c JK |
975 | /* |
976 | * super.c | |
977 | */ | |
978 | int f2fs_sync_fs(struct super_block *, int); | |
a07ef784 NJ |
979 | extern __printf(3, 4) |
980 | void f2fs_msg(struct super_block *, const char *, const char *, ...); | |
39a53e0c JK |
981 | |
982 | /* | |
983 | * hash.c | |
984 | */ | |
9836b8b9 | 985 | f2fs_hash_t f2fs_dentry_hash(const char *, size_t); |
39a53e0c JK |
986 | |
987 | /* | |
988 | * node.c | |
989 | */ | |
990 | struct dnode_of_data; | |
991 | struct node_info; | |
992 | ||
993 | int is_checkpointed_node(struct f2fs_sb_info *, nid_t); | |
994 | void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); | |
995 | int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); | |
996 | int truncate_inode_blocks(struct inode *, pgoff_t); | |
997 | int remove_inode_page(struct inode *); | |
44a83ff6 | 998 | struct page *new_inode_page(struct inode *, const struct qstr *); |
8ae8f162 | 999 | struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *); |
39a53e0c JK |
1000 | void ra_node_page(struct f2fs_sb_info *, nid_t); |
1001 | struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); | |
1002 | struct page *get_node_page_ra(struct page *, int); | |
1003 | void sync_inode_page(struct dnode_of_data *); | |
1004 | int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); | |
1005 | bool alloc_nid(struct f2fs_sb_info *, nid_t *); | |
1006 | void alloc_nid_done(struct f2fs_sb_info *, nid_t); | |
1007 | void alloc_nid_failed(struct f2fs_sb_info *, nid_t); | |
1008 | void recover_node_page(struct f2fs_sb_info *, struct page *, | |
1009 | struct f2fs_summary *, struct node_info *, block_t); | |
1010 | int recover_inode_page(struct f2fs_sb_info *, struct page *); | |
1011 | int restore_node_summary(struct f2fs_sb_info *, unsigned int, | |
1012 | struct f2fs_summary_block *); | |
1013 | void flush_nat_entries(struct f2fs_sb_info *); | |
1014 | int build_node_manager(struct f2fs_sb_info *); | |
1015 | void destroy_node_manager(struct f2fs_sb_info *); | |
6e6093a8 | 1016 | int __init create_node_manager_caches(void); |
39a53e0c JK |
1017 | void destroy_node_manager_caches(void); |
1018 | ||
1019 | /* | |
1020 | * segment.c | |
1021 | */ | |
1022 | void f2fs_balance_fs(struct f2fs_sb_info *); | |
1023 | void invalidate_blocks(struct f2fs_sb_info *, block_t); | |
39a53e0c JK |
1024 | void clear_prefree_segments(struct f2fs_sb_info *); |
1025 | int npages_for_summary_flush(struct f2fs_sb_info *); | |
1026 | void allocate_new_segments(struct f2fs_sb_info *); | |
1027 | struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); | |
3cd8a239 | 1028 | struct bio *f2fs_bio_alloc(struct block_device *, int); |
a569469e JX |
1029 | void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool); |
1030 | void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool); | |
577e3495 | 1031 | void write_meta_page(struct f2fs_sb_info *, struct page *); |
39a53e0c JK |
1032 | void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int, |
1033 | block_t, block_t *); | |
1034 | void write_data_page(struct inode *, struct page *, struct dnode_of_data*, | |
1035 | block_t, block_t *); | |
1036 | void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t); | |
1037 | void recover_data_page(struct f2fs_sb_info *, struct page *, | |
1038 | struct f2fs_summary *, block_t, block_t); | |
1039 | void rewrite_node_page(struct f2fs_sb_info *, struct page *, | |
1040 | struct f2fs_summary *, block_t, block_t); | |
1041 | void write_data_summaries(struct f2fs_sb_info *, block_t); | |
1042 | void write_node_summaries(struct f2fs_sb_info *, block_t); | |
1043 | int lookup_journal_in_cursum(struct f2fs_summary_block *, | |
1044 | int, unsigned int, int); | |
1045 | void flush_sit_entries(struct f2fs_sb_info *); | |
1046 | int build_segment_manager(struct f2fs_sb_info *); | |
39a53e0c JK |
1047 | void destroy_segment_manager(struct f2fs_sb_info *); |
1048 | ||
1049 | /* | |
1050 | * checkpoint.c | |
1051 | */ | |
1052 | struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); | |
1053 | struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); | |
1054 | long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); | |
cbd56e7d JK |
1055 | int acquire_orphan_inode(struct f2fs_sb_info *); |
1056 | void release_orphan_inode(struct f2fs_sb_info *); | |
39a53e0c JK |
1057 | void add_orphan_inode(struct f2fs_sb_info *, nid_t); |
1058 | void remove_orphan_inode(struct f2fs_sb_info *, nid_t); | |
1059 | int recover_orphan_inodes(struct f2fs_sb_info *); | |
1060 | int get_valid_checkpoint(struct f2fs_sb_info *); | |
1061 | void set_dirty_dir_page(struct inode *, struct page *); | |
5deb8267 | 1062 | void add_dirty_dir_inode(struct inode *); |
39a53e0c | 1063 | void remove_dirty_dir_inode(struct inode *); |
74d0b917 | 1064 | struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t); |
39a53e0c | 1065 | void sync_dirty_dir_inodes(struct f2fs_sb_info *); |
43727527 | 1066 | void write_checkpoint(struct f2fs_sb_info *, bool); |
39a53e0c | 1067 | void init_orphan_info(struct f2fs_sb_info *); |
6e6093a8 | 1068 | int __init create_checkpoint_caches(void); |
39a53e0c JK |
1069 | void destroy_checkpoint_caches(void); |
1070 | ||
1071 | /* | |
1072 | * data.c | |
1073 | */ | |
1074 | int reserve_new_block(struct dnode_of_data *); | |
1075 | void update_extent_cache(block_t, struct dnode_of_data *); | |
c718379b | 1076 | struct page *find_data_page(struct inode *, pgoff_t, bool); |
39a53e0c | 1077 | struct page *get_lock_data_page(struct inode *, pgoff_t); |
64aa7ed9 | 1078 | struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); |
39a53e0c JK |
1079 | int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); |
1080 | int do_write_data_page(struct page *); | |
1081 | ||
1082 | /* | |
1083 | * gc.c | |
1084 | */ | |
1085 | int start_gc_thread(struct f2fs_sb_info *); | |
1086 | void stop_gc_thread(struct f2fs_sb_info *); | |
1087 | block_t start_bidx_of_node(unsigned int); | |
408e9375 | 1088 | int f2fs_gc(struct f2fs_sb_info *); |
39a53e0c | 1089 | void build_gc_manager(struct f2fs_sb_info *); |
6e6093a8 | 1090 | int __init create_gc_caches(void); |
39a53e0c JK |
1091 | void destroy_gc_caches(void); |
1092 | ||
1093 | /* | |
1094 | * recovery.c | |
1095 | */ | |
6ead1142 | 1096 | int recover_fsync_data(struct f2fs_sb_info *); |
39a53e0c JK |
1097 | bool space_for_roll_forward(struct f2fs_sb_info *); |
1098 | ||
1099 | /* | |
1100 | * debug.c | |
1101 | */ | |
1102 | #ifdef CONFIG_F2FS_STAT_FS | |
1103 | struct f2fs_stat_info { | |
1104 | struct list_head stat_list; | |
1105 | struct f2fs_sb_info *sbi; | |
1106 | struct mutex stat_lock; | |
1107 | int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; | |
1108 | int main_area_segs, main_area_sections, main_area_zones; | |
1109 | int hit_ext, total_ext; | |
1110 | int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; | |
1111 | int nats, sits, fnids; | |
1112 | int total_count, utilization; | |
1113 | int bg_gc; | |
1114 | unsigned int valid_count, valid_node_count, valid_inode_count; | |
1115 | unsigned int bimodal, avg_vblocks; | |
1116 | int util_free, util_valid, util_invalid; | |
1117 | int rsvd_segs, overp_segs; | |
1118 | int dirty_count, node_pages, meta_pages; | |
1119 | int prefree_count, call_count; | |
1120 | int tot_segs, node_segs, data_segs, free_segs, free_secs; | |
1121 | int tot_blks, data_blks, node_blks; | |
1122 | int curseg[NR_CURSEG_TYPE]; | |
1123 | int cursec[NR_CURSEG_TYPE]; | |
1124 | int curzone[NR_CURSEG_TYPE]; | |
1125 | ||
1126 | unsigned int segment_count[2]; | |
1127 | unsigned int block_count[2]; | |
1128 | unsigned base_mem, cache_mem; | |
1129 | }; | |
1130 | ||
963d4f7d GZ |
1131 | static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) |
1132 | { | |
1133 | return (struct f2fs_stat_info*)sbi->stat_info; | |
1134 | } | |
1135 | ||
39a53e0c JK |
1136 | #define stat_inc_call_count(si) ((si)->call_count++) |
1137 | ||
1138 | #define stat_inc_seg_count(sbi, type) \ | |
1139 | do { \ | |
963d4f7d | 1140 | struct f2fs_stat_info *si = F2FS_STAT(sbi); \ |
39a53e0c JK |
1141 | (si)->tot_segs++; \ |
1142 | if (type == SUM_TYPE_DATA) \ | |
1143 | si->data_segs++; \ | |
1144 | else \ | |
1145 | si->node_segs++; \ | |
1146 | } while (0) | |
1147 | ||
1148 | #define stat_inc_tot_blk_count(si, blks) \ | |
1149 | (si->tot_blks += (blks)) | |
1150 | ||
1151 | #define stat_inc_data_blk_count(sbi, blks) \ | |
1152 | do { \ | |
963d4f7d | 1153 | struct f2fs_stat_info *si = F2FS_STAT(sbi); \ |
39a53e0c JK |
1154 | stat_inc_tot_blk_count(si, blks); \ |
1155 | si->data_blks += (blks); \ | |
1156 | } while (0) | |
1157 | ||
1158 | #define stat_inc_node_blk_count(sbi, blks) \ | |
1159 | do { \ | |
963d4f7d | 1160 | struct f2fs_stat_info *si = F2FS_STAT(sbi); \ |
39a53e0c JK |
1161 | stat_inc_tot_blk_count(si, blks); \ |
1162 | si->node_blks += (blks); \ | |
1163 | } while (0) | |
1164 | ||
1165 | int f2fs_build_stats(struct f2fs_sb_info *); | |
1166 | void f2fs_destroy_stats(struct f2fs_sb_info *); | |
6e6093a8 | 1167 | void __init f2fs_create_root_stats(void); |
4589d25d | 1168 | void f2fs_destroy_root_stats(void); |
39a53e0c JK |
1169 | #else |
1170 | #define stat_inc_call_count(si) | |
1171 | #define stat_inc_seg_count(si, type) | |
1172 | #define stat_inc_tot_blk_count(si, blks) | |
1173 | #define stat_inc_data_blk_count(si, blks) | |
1174 | #define stat_inc_node_blk_count(sbi, blks) | |
1175 | ||
1176 | static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } | |
1177 | static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } | |
6e6093a8 | 1178 | static inline void __init f2fs_create_root_stats(void) { } |
4589d25d | 1179 | static inline void f2fs_destroy_root_stats(void) { } |
39a53e0c JK |
1180 | #endif |
1181 | ||
1182 | extern const struct file_operations f2fs_dir_operations; | |
1183 | extern const struct file_operations f2fs_file_operations; | |
1184 | extern const struct inode_operations f2fs_file_inode_operations; | |
1185 | extern const struct address_space_operations f2fs_dblock_aops; | |
1186 | extern const struct address_space_operations f2fs_node_aops; | |
1187 | extern const struct address_space_operations f2fs_meta_aops; | |
1188 | extern const struct inode_operations f2fs_dir_inode_operations; | |
1189 | extern const struct inode_operations f2fs_symlink_inode_operations; | |
1190 | extern const struct inode_operations f2fs_special_inode_operations; | |
1191 | #endif |