]>
Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
39a53e0c JK |
2 | * fs/f2fs/f2fs.h |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #ifndef _LINUX_F2FS_H | |
12 | #define _LINUX_F2FS_H | |
13 | ||
14 | #include <linux/types.h> | |
15 | #include <linux/page-flags.h> | |
16 | #include <linux/buffer_head.h> | |
39a53e0c JK |
17 | #include <linux/slab.h> |
18 | #include <linux/crc32.h> | |
19 | #include <linux/magic.h> | |
20 | ||
21 | /* | |
22 | * For mount options | |
23 | */ | |
24 | #define F2FS_MOUNT_BG_GC 0x00000001 | |
25 | #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 | |
26 | #define F2FS_MOUNT_DISCARD 0x00000004 | |
27 | #define F2FS_MOUNT_NOHEAP 0x00000008 | |
28 | #define F2FS_MOUNT_XATTR_USER 0x00000010 | |
29 | #define F2FS_MOUNT_POSIX_ACL 0x00000020 | |
30 | #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 | |
31 | ||
32 | #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) | |
33 | #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) | |
34 | #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) | |
35 | ||
36 | #define ver_after(a, b) (typecheck(unsigned long long, a) && \ | |
37 | typecheck(unsigned long long, b) && \ | |
38 | ((long long)((a) - (b)) > 0)) | |
39 | ||
40 | typedef u64 block_t; | |
41 | typedef u32 nid_t; | |
42 | ||
43 | struct f2fs_mount_info { | |
44 | unsigned int opt; | |
45 | }; | |
46 | ||
47 | static inline __u32 f2fs_crc32(void *buff, size_t len) | |
48 | { | |
49 | return crc32_le(F2FS_SUPER_MAGIC, buff, len); | |
50 | } | |
51 | ||
52 | static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size) | |
53 | { | |
54 | return f2fs_crc32(buff, buff_size) == blk_crc; | |
55 | } | |
56 | ||
57 | /* | |
58 | * For checkpoint manager | |
59 | */ | |
60 | enum { | |
61 | NAT_BITMAP, | |
62 | SIT_BITMAP | |
63 | }; | |
64 | ||
65 | /* for the list of orphan inodes */ | |
66 | struct orphan_inode_entry { | |
67 | struct list_head list; /* list head */ | |
68 | nid_t ino; /* inode number */ | |
69 | }; | |
70 | ||
71 | /* for the list of directory inodes */ | |
72 | struct dir_inode_entry { | |
73 | struct list_head list; /* list head */ | |
74 | struct inode *inode; /* vfs inode pointer */ | |
75 | }; | |
76 | ||
77 | /* for the list of fsync inodes, used only during recovery */ | |
78 | struct fsync_inode_entry { | |
79 | struct list_head list; /* list head */ | |
80 | struct inode *inode; /* vfs inode pointer */ | |
81 | block_t blkaddr; /* block address locating the last inode */ | |
82 | }; | |
83 | ||
84 | #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats)) | |
85 | #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits)) | |
86 | ||
87 | #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne) | |
88 | #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid) | |
89 | #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se) | |
90 | #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno) | |
91 | ||
92 | static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i) | |
93 | { | |
94 | int before = nats_in_cursum(rs); | |
95 | rs->n_nats = cpu_to_le16(before + i); | |
96 | return before; | |
97 | } | |
98 | ||
99 | static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i) | |
100 | { | |
101 | int before = sits_in_cursum(rs); | |
102 | rs->n_sits = cpu_to_le16(before + i); | |
103 | return before; | |
104 | } | |
105 | ||
106 | /* | |
107 | * For INODE and NODE manager | |
108 | */ | |
109 | #define XATTR_NODE_OFFSET (-1) /* | |
110 | * store xattrs to one node block per | |
111 | * file keeping -1 as its node offset to | |
112 | * distinguish from index node blocks. | |
113 | */ | |
114 | #define RDONLY_NODE 1 /* | |
115 | * specify a read-only mode when getting | |
116 | * a node block. 0 is read-write mode. | |
117 | * used by get_dnode_of_data(). | |
118 | */ | |
119 | #define F2FS_LINK_MAX 32000 /* maximum link count per file */ | |
120 | ||
121 | /* for in-memory extent cache entry */ | |
122 | struct extent_info { | |
123 | rwlock_t ext_lock; /* rwlock for consistency */ | |
124 | unsigned int fofs; /* start offset in a file */ | |
125 | u32 blk_addr; /* start block address of the extent */ | |
126 | unsigned int len; /* lenth of the extent */ | |
127 | }; | |
128 | ||
129 | /* | |
130 | * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. | |
131 | */ | |
132 | #define FADVISE_COLD_BIT 0x01 | |
133 | ||
134 | struct f2fs_inode_info { | |
135 | struct inode vfs_inode; /* serve a vfs inode */ | |
136 | unsigned long i_flags; /* keep an inode flags for ioctl */ | |
137 | unsigned char i_advise; /* use to give file attribute hints */ | |
138 | unsigned int i_current_depth; /* use only in directory structure */ | |
6666e6aa | 139 | unsigned int i_pino; /* parent inode number */ |
39a53e0c JK |
140 | umode_t i_acl_mode; /* keep file acl mode temporarily */ |
141 | ||
142 | /* Use below internally in f2fs*/ | |
143 | unsigned long flags; /* use to pass per-file flags */ | |
324ddc70 | 144 | unsigned long long data_version;/* latest version of data for fsync */ |
39a53e0c JK |
145 | atomic_t dirty_dents; /* # of dirty dentry pages */ |
146 | f2fs_hash_t chash; /* hash value of given file name */ | |
147 | unsigned int clevel; /* maximum level of given file name */ | |
148 | nid_t i_xattr_nid; /* node id that contains xattrs */ | |
149 | struct extent_info ext; /* in-memory extent cache entry */ | |
150 | }; | |
151 | ||
152 | static inline void get_extent_info(struct extent_info *ext, | |
153 | struct f2fs_extent i_ext) | |
154 | { | |
155 | write_lock(&ext->ext_lock); | |
156 | ext->fofs = le32_to_cpu(i_ext.fofs); | |
157 | ext->blk_addr = le32_to_cpu(i_ext.blk_addr); | |
158 | ext->len = le32_to_cpu(i_ext.len); | |
159 | write_unlock(&ext->ext_lock); | |
160 | } | |
161 | ||
162 | static inline void set_raw_extent(struct extent_info *ext, | |
163 | struct f2fs_extent *i_ext) | |
164 | { | |
165 | read_lock(&ext->ext_lock); | |
166 | i_ext->fofs = cpu_to_le32(ext->fofs); | |
167 | i_ext->blk_addr = cpu_to_le32(ext->blk_addr); | |
168 | i_ext->len = cpu_to_le32(ext->len); | |
169 | read_unlock(&ext->ext_lock); | |
170 | } | |
171 | ||
172 | struct f2fs_nm_info { | |
173 | block_t nat_blkaddr; /* base disk address of NAT */ | |
174 | nid_t max_nid; /* maximum possible node ids */ | |
175 | nid_t init_scan_nid; /* the first nid to be scanned */ | |
176 | nid_t next_scan_nid; /* the next nid to be scanned */ | |
177 | ||
178 | /* NAT cache management */ | |
179 | struct radix_tree_root nat_root;/* root of the nat entry cache */ | |
180 | rwlock_t nat_tree_lock; /* protect nat_tree_lock */ | |
181 | unsigned int nat_cnt; /* the # of cached nat entries */ | |
182 | struct list_head nat_entries; /* cached nat entry list (clean) */ | |
183 | struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */ | |
184 | ||
185 | /* free node ids management */ | |
186 | struct list_head free_nid_list; /* a list for free nids */ | |
187 | spinlock_t free_nid_list_lock; /* protect free nid list */ | |
188 | unsigned int fcnt; /* the number of free node id */ | |
189 | struct mutex build_lock; /* lock for build free nids */ | |
190 | ||
191 | /* for checkpoint */ | |
192 | char *nat_bitmap; /* NAT bitmap pointer */ | |
193 | int bitmap_size; /* bitmap size */ | |
194 | }; | |
195 | ||
196 | /* | |
197 | * this structure is used as one of function parameters. | |
198 | * all the information are dedicated to a given direct node block determined | |
199 | * by the data offset in a file. | |
200 | */ | |
201 | struct dnode_of_data { | |
202 | struct inode *inode; /* vfs inode pointer */ | |
203 | struct page *inode_page; /* its inode page, NULL is possible */ | |
204 | struct page *node_page; /* cached direct node page */ | |
205 | nid_t nid; /* node id of the direct node block */ | |
206 | unsigned int ofs_in_node; /* data offset in the node page */ | |
207 | bool inode_page_locked; /* inode page is locked or not */ | |
208 | block_t data_blkaddr; /* block address of the node block */ | |
209 | }; | |
210 | ||
211 | static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, | |
212 | struct page *ipage, struct page *npage, nid_t nid) | |
213 | { | |
d66d1f76 | 214 | memset(dn, 0, sizeof(*dn)); |
39a53e0c JK |
215 | dn->inode = inode; |
216 | dn->inode_page = ipage; | |
217 | dn->node_page = npage; | |
218 | dn->nid = nid; | |
39a53e0c JK |
219 | } |
220 | ||
221 | /* | |
222 | * For SIT manager | |
223 | * | |
224 | * By default, there are 6 active log areas across the whole main area. | |
225 | * When considering hot and cold data separation to reduce cleaning overhead, | |
226 | * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, | |
227 | * respectively. | |
228 | * In the current design, you should not change the numbers intentionally. | |
229 | * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 | |
230 | * logs individually according to the underlying devices. (default: 6) | |
231 | * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for | |
232 | * data and 8 for node logs. | |
233 | */ | |
234 | #define NR_CURSEG_DATA_TYPE (3) | |
235 | #define NR_CURSEG_NODE_TYPE (3) | |
236 | #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) | |
237 | ||
238 | enum { | |
239 | CURSEG_HOT_DATA = 0, /* directory entry blocks */ | |
240 | CURSEG_WARM_DATA, /* data blocks */ | |
241 | CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ | |
242 | CURSEG_HOT_NODE, /* direct node blocks of directory files */ | |
243 | CURSEG_WARM_NODE, /* direct node blocks of normal files */ | |
244 | CURSEG_COLD_NODE, /* indirect node blocks */ | |
245 | NO_CHECK_TYPE | |
246 | }; | |
247 | ||
248 | struct f2fs_sm_info { | |
249 | struct sit_info *sit_info; /* whole segment information */ | |
250 | struct free_segmap_info *free_info; /* free segment information */ | |
251 | struct dirty_seglist_info *dirty_info; /* dirty segment information */ | |
252 | struct curseg_info *curseg_array; /* active segment information */ | |
253 | ||
254 | struct list_head wblist_head; /* list of under-writeback pages */ | |
255 | spinlock_t wblist_lock; /* lock for checkpoint */ | |
256 | ||
257 | block_t seg0_blkaddr; /* block address of 0'th segment */ | |
258 | block_t main_blkaddr; /* start block address of main area */ | |
259 | block_t ssa_blkaddr; /* start block address of SSA area */ | |
260 | ||
261 | unsigned int segment_count; /* total # of segments */ | |
262 | unsigned int main_segments; /* # of segments in main area */ | |
263 | unsigned int reserved_segments; /* # of reserved segments */ | |
264 | unsigned int ovp_segments; /* # of overprovision segments */ | |
265 | }; | |
266 | ||
267 | /* | |
268 | * For directory operation | |
269 | */ | |
270 | #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1) | |
271 | #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2) | |
272 | #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3) | |
273 | #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4) | |
274 | #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5) | |
275 | ||
276 | /* | |
277 | * For superblock | |
278 | */ | |
279 | /* | |
280 | * COUNT_TYPE for monitoring | |
281 | * | |
282 | * f2fs monitors the number of several block types such as on-writeback, | |
283 | * dirty dentry blocks, dirty node blocks, and dirty meta blocks. | |
284 | */ | |
285 | enum count_type { | |
286 | F2FS_WRITEBACK, | |
287 | F2FS_DIRTY_DENTS, | |
288 | F2FS_DIRTY_NODES, | |
289 | F2FS_DIRTY_META, | |
290 | NR_COUNT_TYPE, | |
291 | }; | |
292 | ||
293 | /* | |
294 | * FS_LOCK nesting subclasses for the lock validator: | |
295 | * | |
296 | * The locking order between these classes is | |
297 | * RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW | |
298 | * -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC | |
299 | */ | |
300 | enum lock_type { | |
301 | RENAME, /* for renaming operations */ | |
302 | DENTRY_OPS, /* for directory operations */ | |
303 | DATA_WRITE, /* for data write */ | |
304 | DATA_NEW, /* for data allocation */ | |
305 | DATA_TRUNC, /* for data truncate */ | |
306 | NODE_NEW, /* for node allocation */ | |
307 | NODE_TRUNC, /* for node truncate */ | |
308 | NODE_WRITE, /* for node write */ | |
309 | NR_LOCK_TYPE, | |
310 | }; | |
311 | ||
312 | /* | |
313 | * The below are the page types of bios used in submti_bio(). | |
314 | * The available types are: | |
315 | * DATA User data pages. It operates as async mode. | |
316 | * NODE Node pages. It operates as async mode. | |
317 | * META FS metadata pages such as SIT, NAT, CP. | |
318 | * NR_PAGE_TYPE The number of page types. | |
319 | * META_FLUSH Make sure the previous pages are written | |
320 | * with waiting the bio's completion | |
321 | * ... Only can be used with META. | |
322 | */ | |
323 | enum page_type { | |
324 | DATA, | |
325 | NODE, | |
326 | META, | |
327 | NR_PAGE_TYPE, | |
328 | META_FLUSH, | |
329 | }; | |
330 | ||
331 | struct f2fs_sb_info { | |
332 | struct super_block *sb; /* pointer to VFS super block */ | |
333 | struct buffer_head *raw_super_buf; /* buffer head of raw sb */ | |
334 | struct f2fs_super_block *raw_super; /* raw super block pointer */ | |
335 | int s_dirty; /* dirty flag for checkpoint */ | |
336 | ||
337 | /* for node-related operations */ | |
338 | struct f2fs_nm_info *nm_info; /* node manager */ | |
339 | struct inode *node_inode; /* cache node blocks */ | |
340 | ||
341 | /* for segment-related operations */ | |
342 | struct f2fs_sm_info *sm_info; /* segment manager */ | |
343 | struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */ | |
344 | sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */ | |
345 | struct rw_semaphore bio_sem; /* IO semaphore */ | |
346 | ||
347 | /* for checkpoint */ | |
348 | struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ | |
349 | struct inode *meta_inode; /* cache meta blocks */ | |
350 | struct mutex cp_mutex; /* for checkpoint procedure */ | |
351 | struct mutex fs_lock[NR_LOCK_TYPE]; /* for blocking FS operations */ | |
352 | struct mutex write_inode; /* mutex for write inode */ | |
353 | struct mutex writepages; /* mutex for writepages() */ | |
354 | int por_doing; /* recovery is doing or not */ | |
355 | ||
356 | /* for orphan inode management */ | |
357 | struct list_head orphan_inode_list; /* orphan inode list */ | |
358 | struct mutex orphan_inode_mutex; /* for orphan inode list */ | |
359 | unsigned int n_orphans; /* # of orphan inodes */ | |
360 | ||
361 | /* for directory inode management */ | |
362 | struct list_head dir_inode_list; /* dir inode list */ | |
363 | spinlock_t dir_inode_lock; /* for dir inode list lock */ | |
364 | unsigned int n_dirty_dirs; /* # of dir inodes */ | |
365 | ||
366 | /* basic file system units */ | |
367 | unsigned int log_sectors_per_block; /* log2 sectors per block */ | |
368 | unsigned int log_blocksize; /* log2 block size */ | |
369 | unsigned int blocksize; /* block size */ | |
370 | unsigned int root_ino_num; /* root inode number*/ | |
371 | unsigned int node_ino_num; /* node inode number*/ | |
372 | unsigned int meta_ino_num; /* meta inode number*/ | |
373 | unsigned int log_blocks_per_seg; /* log2 blocks per segment */ | |
374 | unsigned int blocks_per_seg; /* blocks per segment */ | |
375 | unsigned int segs_per_sec; /* segments per section */ | |
376 | unsigned int secs_per_zone; /* sections per zone */ | |
377 | unsigned int total_sections; /* total section count */ | |
378 | unsigned int total_node_count; /* total node block count */ | |
379 | unsigned int total_valid_node_count; /* valid node block count */ | |
380 | unsigned int total_valid_inode_count; /* valid inode count */ | |
381 | int active_logs; /* # of active logs */ | |
382 | ||
383 | block_t user_block_count; /* # of user blocks */ | |
384 | block_t total_valid_block_count; /* # of valid blocks */ | |
385 | block_t alloc_valid_block_count; /* # of allocated blocks */ | |
386 | block_t last_valid_block_count; /* for recovery */ | |
387 | u32 s_next_generation; /* for NFS support */ | |
388 | atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ | |
389 | ||
390 | struct f2fs_mount_info mount_opt; /* mount options */ | |
391 | ||
392 | /* for cleaning operations */ | |
393 | struct mutex gc_mutex; /* mutex for GC */ | |
394 | struct f2fs_gc_kthread *gc_thread; /* GC thread */ | |
395 | ||
396 | /* | |
397 | * for stat information. | |
398 | * one is for the LFS mode, and the other is for the SSR mode. | |
399 | */ | |
400 | struct f2fs_stat_info *stat_info; /* FS status information */ | |
401 | unsigned int segment_count[2]; /* # of allocated segments */ | |
402 | unsigned int block_count[2]; /* # of allocated blocks */ | |
403 | unsigned int last_victim[2]; /* last victim segment # */ | |
404 | int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ | |
405 | int bg_gc; /* background gc calls */ | |
406 | spinlock_t stat_lock; /* lock for stat operations */ | |
407 | }; | |
408 | ||
409 | /* | |
410 | * Inline functions | |
411 | */ | |
412 | static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) | |
413 | { | |
414 | return container_of(inode, struct f2fs_inode_info, vfs_inode); | |
415 | } | |
416 | ||
417 | static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) | |
418 | { | |
419 | return sb->s_fs_info; | |
420 | } | |
421 | ||
422 | static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) | |
423 | { | |
424 | return (struct f2fs_super_block *)(sbi->raw_super); | |
425 | } | |
426 | ||
427 | static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) | |
428 | { | |
429 | return (struct f2fs_checkpoint *)(sbi->ckpt); | |
430 | } | |
431 | ||
432 | static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) | |
433 | { | |
434 | return (struct f2fs_nm_info *)(sbi->nm_info); | |
435 | } | |
436 | ||
437 | static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) | |
438 | { | |
439 | return (struct f2fs_sm_info *)(sbi->sm_info); | |
440 | } | |
441 | ||
442 | static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) | |
443 | { | |
444 | return (struct sit_info *)(SM_I(sbi)->sit_info); | |
445 | } | |
446 | ||
447 | static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) | |
448 | { | |
449 | return (struct free_segmap_info *)(SM_I(sbi)->free_info); | |
450 | } | |
451 | ||
452 | static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) | |
453 | { | |
454 | return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); | |
455 | } | |
456 | ||
457 | static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) | |
458 | { | |
459 | sbi->s_dirty = 1; | |
460 | } | |
461 | ||
462 | static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) | |
463 | { | |
464 | sbi->s_dirty = 0; | |
465 | } | |
466 | ||
25ca923b JK |
467 | static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) |
468 | { | |
469 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); | |
470 | return ckpt_flags & f; | |
471 | } | |
472 | ||
473 | static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) | |
474 | { | |
475 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); | |
476 | ckpt_flags |= f; | |
477 | cp->ckpt_flags = cpu_to_le32(ckpt_flags); | |
478 | } | |
479 | ||
480 | static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) | |
481 | { | |
482 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); | |
483 | ckpt_flags &= (~f); | |
484 | cp->ckpt_flags = cpu_to_le32(ckpt_flags); | |
485 | } | |
486 | ||
39a53e0c JK |
487 | static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t) |
488 | { | |
489 | mutex_lock_nested(&sbi->fs_lock[t], t); | |
490 | } | |
491 | ||
492 | static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t) | |
493 | { | |
494 | mutex_unlock(&sbi->fs_lock[t]); | |
495 | } | |
496 | ||
497 | /* | |
498 | * Check whether the given nid is within node id range. | |
499 | */ | |
500 | static inline void check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) | |
501 | { | |
502 | BUG_ON((nid >= NM_I(sbi)->max_nid)); | |
503 | } | |
504 | ||
505 | #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1 | |
506 | ||
507 | /* | |
508 | * Check whether the inode has blocks or not | |
509 | */ | |
510 | static inline int F2FS_HAS_BLOCKS(struct inode *inode) | |
511 | { | |
512 | if (F2FS_I(inode)->i_xattr_nid) | |
513 | return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1); | |
514 | else | |
515 | return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS); | |
516 | } | |
517 | ||
518 | static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, | |
519 | struct inode *inode, blkcnt_t count) | |
520 | { | |
521 | block_t valid_block_count; | |
522 | ||
523 | spin_lock(&sbi->stat_lock); | |
524 | valid_block_count = | |
525 | sbi->total_valid_block_count + (block_t)count; | |
526 | if (valid_block_count > sbi->user_block_count) { | |
527 | spin_unlock(&sbi->stat_lock); | |
528 | return false; | |
529 | } | |
530 | inode->i_blocks += count; | |
531 | sbi->total_valid_block_count = valid_block_count; | |
532 | sbi->alloc_valid_block_count += (block_t)count; | |
533 | spin_unlock(&sbi->stat_lock); | |
534 | return true; | |
535 | } | |
536 | ||
537 | static inline int dec_valid_block_count(struct f2fs_sb_info *sbi, | |
538 | struct inode *inode, | |
539 | blkcnt_t count) | |
540 | { | |
541 | spin_lock(&sbi->stat_lock); | |
542 | BUG_ON(sbi->total_valid_block_count < (block_t) count); | |
543 | BUG_ON(inode->i_blocks < count); | |
544 | inode->i_blocks -= count; | |
545 | sbi->total_valid_block_count -= (block_t)count; | |
546 | spin_unlock(&sbi->stat_lock); | |
547 | return 0; | |
548 | } | |
549 | ||
550 | static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) | |
551 | { | |
552 | atomic_inc(&sbi->nr_pages[count_type]); | |
553 | F2FS_SET_SB_DIRT(sbi); | |
554 | } | |
555 | ||
556 | static inline void inode_inc_dirty_dents(struct inode *inode) | |
557 | { | |
558 | atomic_inc(&F2FS_I(inode)->dirty_dents); | |
559 | } | |
560 | ||
561 | static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) | |
562 | { | |
563 | atomic_dec(&sbi->nr_pages[count_type]); | |
564 | } | |
565 | ||
566 | static inline void inode_dec_dirty_dents(struct inode *inode) | |
567 | { | |
568 | atomic_dec(&F2FS_I(inode)->dirty_dents); | |
569 | } | |
570 | ||
571 | static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) | |
572 | { | |
573 | return atomic_read(&sbi->nr_pages[count_type]); | |
574 | } | |
575 | ||
5ac206cf NJ |
576 | static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) |
577 | { | |
578 | unsigned int pages_per_sec = sbi->segs_per_sec * | |
579 | (1 << sbi->log_blocks_per_seg); | |
580 | return ((get_pages(sbi, block_type) + pages_per_sec - 1) | |
581 | >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; | |
582 | } | |
583 | ||
39a53e0c JK |
584 | static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) |
585 | { | |
586 | block_t ret; | |
587 | spin_lock(&sbi->stat_lock); | |
588 | ret = sbi->total_valid_block_count; | |
589 | spin_unlock(&sbi->stat_lock); | |
590 | return ret; | |
591 | } | |
592 | ||
593 | static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) | |
594 | { | |
595 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | |
596 | ||
597 | /* return NAT or SIT bitmap */ | |
598 | if (flag == NAT_BITMAP) | |
599 | return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); | |
600 | else if (flag == SIT_BITMAP) | |
601 | return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); | |
602 | ||
603 | return 0; | |
604 | } | |
605 | ||
606 | static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) | |
607 | { | |
608 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | |
25ca923b JK |
609 | int offset = (flag == NAT_BITMAP) ? |
610 | le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; | |
39a53e0c JK |
611 | return &ckpt->sit_nat_version_bitmap + offset; |
612 | } | |
613 | ||
614 | static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) | |
615 | { | |
616 | block_t start_addr; | |
617 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | |
618 | unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver); | |
619 | ||
25ca923b | 620 | start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); |
39a53e0c JK |
621 | |
622 | /* | |
623 | * odd numbered checkpoint should at cp segment 0 | |
624 | * and even segent must be at cp segment 1 | |
625 | */ | |
626 | if (!(ckpt_version & 1)) | |
627 | start_addr += sbi->blocks_per_seg; | |
628 | ||
629 | return start_addr; | |
630 | } | |
631 | ||
632 | static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) | |
633 | { | |
634 | return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); | |
635 | } | |
636 | ||
637 | static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, | |
638 | struct inode *inode, | |
639 | unsigned int count) | |
640 | { | |
641 | block_t valid_block_count; | |
642 | unsigned int valid_node_count; | |
643 | ||
644 | spin_lock(&sbi->stat_lock); | |
645 | ||
646 | valid_block_count = sbi->total_valid_block_count + (block_t)count; | |
647 | sbi->alloc_valid_block_count += (block_t)count; | |
648 | valid_node_count = sbi->total_valid_node_count + count; | |
649 | ||
650 | if (valid_block_count > sbi->user_block_count) { | |
651 | spin_unlock(&sbi->stat_lock); | |
652 | return false; | |
653 | } | |
654 | ||
655 | if (valid_node_count > sbi->total_node_count) { | |
656 | spin_unlock(&sbi->stat_lock); | |
657 | return false; | |
658 | } | |
659 | ||
660 | if (inode) | |
661 | inode->i_blocks += count; | |
662 | sbi->total_valid_node_count = valid_node_count; | |
663 | sbi->total_valid_block_count = valid_block_count; | |
664 | spin_unlock(&sbi->stat_lock); | |
665 | ||
666 | return true; | |
667 | } | |
668 | ||
669 | static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, | |
670 | struct inode *inode, | |
671 | unsigned int count) | |
672 | { | |
673 | spin_lock(&sbi->stat_lock); | |
674 | ||
675 | BUG_ON(sbi->total_valid_block_count < count); | |
676 | BUG_ON(sbi->total_valid_node_count < count); | |
677 | BUG_ON(inode->i_blocks < count); | |
678 | ||
679 | inode->i_blocks -= count; | |
680 | sbi->total_valid_node_count -= count; | |
681 | sbi->total_valid_block_count -= (block_t)count; | |
682 | ||
683 | spin_unlock(&sbi->stat_lock); | |
684 | } | |
685 | ||
686 | static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) | |
687 | { | |
688 | unsigned int ret; | |
689 | spin_lock(&sbi->stat_lock); | |
690 | ret = sbi->total_valid_node_count; | |
691 | spin_unlock(&sbi->stat_lock); | |
692 | return ret; | |
693 | } | |
694 | ||
695 | static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) | |
696 | { | |
697 | spin_lock(&sbi->stat_lock); | |
698 | BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count); | |
699 | sbi->total_valid_inode_count++; | |
700 | spin_unlock(&sbi->stat_lock); | |
701 | } | |
702 | ||
703 | static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi) | |
704 | { | |
705 | spin_lock(&sbi->stat_lock); | |
706 | BUG_ON(!sbi->total_valid_inode_count); | |
707 | sbi->total_valid_inode_count--; | |
708 | spin_unlock(&sbi->stat_lock); | |
709 | return 0; | |
710 | } | |
711 | ||
712 | static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) | |
713 | { | |
714 | unsigned int ret; | |
715 | spin_lock(&sbi->stat_lock); | |
716 | ret = sbi->total_valid_inode_count; | |
717 | spin_unlock(&sbi->stat_lock); | |
718 | return ret; | |
719 | } | |
720 | ||
721 | static inline void f2fs_put_page(struct page *page, int unlock) | |
722 | { | |
723 | if (!page || IS_ERR(page)) | |
724 | return; | |
725 | ||
726 | if (unlock) { | |
727 | BUG_ON(!PageLocked(page)); | |
728 | unlock_page(page); | |
729 | } | |
730 | page_cache_release(page); | |
731 | } | |
732 | ||
733 | static inline void f2fs_put_dnode(struct dnode_of_data *dn) | |
734 | { | |
735 | if (dn->node_page) | |
736 | f2fs_put_page(dn->node_page, 1); | |
737 | if (dn->inode_page && dn->node_page != dn->inode_page) | |
738 | f2fs_put_page(dn->inode_page, 0); | |
739 | dn->node_page = NULL; | |
740 | dn->inode_page = NULL; | |
741 | } | |
742 | ||
743 | static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, | |
744 | size_t size, void (*ctor)(void *)) | |
745 | { | |
746 | return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor); | |
747 | } | |
748 | ||
749 | #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) | |
750 | ||
751 | static inline bool IS_INODE(struct page *page) | |
752 | { | |
753 | struct f2fs_node *p = (struct f2fs_node *)page_address(page); | |
754 | return RAW_IS_INODE(p); | |
755 | } | |
756 | ||
757 | static inline __le32 *blkaddr_in_node(struct f2fs_node *node) | |
758 | { | |
759 | return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; | |
760 | } | |
761 | ||
762 | static inline block_t datablock_addr(struct page *node_page, | |
763 | unsigned int offset) | |
764 | { | |
765 | struct f2fs_node *raw_node; | |
766 | __le32 *addr_array; | |
767 | raw_node = (struct f2fs_node *)page_address(node_page); | |
768 | addr_array = blkaddr_in_node(raw_node); | |
769 | return le32_to_cpu(addr_array[offset]); | |
770 | } | |
771 | ||
772 | static inline int f2fs_test_bit(unsigned int nr, char *addr) | |
773 | { | |
774 | int mask; | |
775 | ||
776 | addr += (nr >> 3); | |
777 | mask = 1 << (7 - (nr & 0x07)); | |
778 | return mask & *addr; | |
779 | } | |
780 | ||
781 | static inline int f2fs_set_bit(unsigned int nr, char *addr) | |
782 | { | |
783 | int mask; | |
784 | int ret; | |
785 | ||
786 | addr += (nr >> 3); | |
787 | mask = 1 << (7 - (nr & 0x07)); | |
788 | ret = mask & *addr; | |
789 | *addr |= mask; | |
790 | return ret; | |
791 | } | |
792 | ||
793 | static inline int f2fs_clear_bit(unsigned int nr, char *addr) | |
794 | { | |
795 | int mask; | |
796 | int ret; | |
797 | ||
798 | addr += (nr >> 3); | |
799 | mask = 1 << (7 - (nr & 0x07)); | |
800 | ret = mask & *addr; | |
801 | *addr &= ~mask; | |
802 | return ret; | |
803 | } | |
804 | ||
805 | /* used for f2fs_inode_info->flags */ | |
806 | enum { | |
807 | FI_NEW_INODE, /* indicate newly allocated inode */ | |
808 | FI_NEED_CP, /* need to do checkpoint during fsync */ | |
809 | FI_INC_LINK, /* need to increment i_nlink */ | |
810 | FI_ACL_MODE, /* indicate acl mode */ | |
811 | FI_NO_ALLOC, /* should not allocate any blocks */ | |
812 | }; | |
813 | ||
814 | static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) | |
815 | { | |
816 | set_bit(flag, &fi->flags); | |
817 | } | |
818 | ||
819 | static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag) | |
820 | { | |
821 | return test_bit(flag, &fi->flags); | |
822 | } | |
823 | ||
824 | static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag) | |
825 | { | |
826 | clear_bit(flag, &fi->flags); | |
827 | } | |
828 | ||
829 | static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode) | |
830 | { | |
831 | fi->i_acl_mode = mode; | |
832 | set_inode_flag(fi, FI_ACL_MODE); | |
833 | } | |
834 | ||
835 | static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) | |
836 | { | |
837 | if (is_inode_flag_set(fi, FI_ACL_MODE)) { | |
838 | clear_inode_flag(fi, FI_ACL_MODE); | |
839 | return 1; | |
840 | } | |
841 | return 0; | |
842 | } | |
843 | ||
844 | /* | |
845 | * file.c | |
846 | */ | |
847 | int f2fs_sync_file(struct file *, loff_t, loff_t, int); | |
848 | void truncate_data_blocks(struct dnode_of_data *); | |
849 | void f2fs_truncate(struct inode *); | |
850 | int f2fs_setattr(struct dentry *, struct iattr *); | |
851 | int truncate_hole(struct inode *, pgoff_t, pgoff_t); | |
852 | long f2fs_ioctl(struct file *, unsigned int, unsigned long); | |
853 | ||
854 | /* | |
855 | * inode.c | |
856 | */ | |
857 | void f2fs_set_inode_flags(struct inode *); | |
39a53e0c JK |
858 | struct inode *f2fs_iget(struct super_block *, unsigned long); |
859 | void update_inode(struct inode *, struct page *); | |
860 | int f2fs_write_inode(struct inode *, struct writeback_control *); | |
861 | void f2fs_evict_inode(struct inode *); | |
862 | ||
863 | /* | |
864 | * namei.c | |
865 | */ | |
866 | struct dentry *f2fs_get_parent(struct dentry *child); | |
867 | ||
868 | /* | |
869 | * dir.c | |
870 | */ | |
871 | struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *, | |
872 | struct page **); | |
873 | struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); | |
874 | ino_t f2fs_inode_by_name(struct inode *, struct qstr *); | |
875 | void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, | |
876 | struct page *, struct inode *); | |
877 | void init_dent_inode(struct dentry *, struct page *); | |
878 | int f2fs_add_link(struct dentry *, struct inode *); | |
879 | void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); | |
880 | int f2fs_make_empty(struct inode *, struct inode *); | |
881 | bool f2fs_empty_dir(struct inode *); | |
882 | ||
883 | /* | |
884 | * super.c | |
885 | */ | |
886 | int f2fs_sync_fs(struct super_block *, int); | |
a07ef784 NJ |
887 | extern __printf(3, 4) |
888 | void f2fs_msg(struct super_block *, const char *, const char *, ...); | |
39a53e0c JK |
889 | |
890 | /* | |
891 | * hash.c | |
892 | */ | |
9836b8b9 | 893 | f2fs_hash_t f2fs_dentry_hash(const char *, size_t); |
39a53e0c JK |
894 | |
895 | /* | |
896 | * node.c | |
897 | */ | |
898 | struct dnode_of_data; | |
899 | struct node_info; | |
900 | ||
901 | int is_checkpointed_node(struct f2fs_sb_info *, nid_t); | |
902 | void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); | |
903 | int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); | |
904 | int truncate_inode_blocks(struct inode *, pgoff_t); | |
905 | int remove_inode_page(struct inode *); | |
906 | int new_inode_page(struct inode *, struct dentry *); | |
907 | struct page *new_node_page(struct dnode_of_data *, unsigned int); | |
908 | void ra_node_page(struct f2fs_sb_info *, nid_t); | |
909 | struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); | |
910 | struct page *get_node_page_ra(struct page *, int); | |
911 | void sync_inode_page(struct dnode_of_data *); | |
912 | int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); | |
913 | bool alloc_nid(struct f2fs_sb_info *, nid_t *); | |
914 | void alloc_nid_done(struct f2fs_sb_info *, nid_t); | |
915 | void alloc_nid_failed(struct f2fs_sb_info *, nid_t); | |
916 | void recover_node_page(struct f2fs_sb_info *, struct page *, | |
917 | struct f2fs_summary *, struct node_info *, block_t); | |
918 | int recover_inode_page(struct f2fs_sb_info *, struct page *); | |
919 | int restore_node_summary(struct f2fs_sb_info *, unsigned int, | |
920 | struct f2fs_summary_block *); | |
921 | void flush_nat_entries(struct f2fs_sb_info *); | |
922 | int build_node_manager(struct f2fs_sb_info *); | |
923 | void destroy_node_manager(struct f2fs_sb_info *); | |
6e6093a8 | 924 | int __init create_node_manager_caches(void); |
39a53e0c JK |
925 | void destroy_node_manager_caches(void); |
926 | ||
927 | /* | |
928 | * segment.c | |
929 | */ | |
930 | void f2fs_balance_fs(struct f2fs_sb_info *); | |
931 | void invalidate_blocks(struct f2fs_sb_info *, block_t); | |
932 | void locate_dirty_segment(struct f2fs_sb_info *, unsigned int); | |
933 | void clear_prefree_segments(struct f2fs_sb_info *); | |
934 | int npages_for_summary_flush(struct f2fs_sb_info *); | |
935 | void allocate_new_segments(struct f2fs_sb_info *); | |
936 | struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); | |
3cd8a239 | 937 | struct bio *f2fs_bio_alloc(struct block_device *, int); |
39a53e0c | 938 | void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync); |
577e3495 | 939 | void write_meta_page(struct f2fs_sb_info *, struct page *); |
39a53e0c JK |
940 | void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int, |
941 | block_t, block_t *); | |
942 | void write_data_page(struct inode *, struct page *, struct dnode_of_data*, | |
943 | block_t, block_t *); | |
944 | void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t); | |
945 | void recover_data_page(struct f2fs_sb_info *, struct page *, | |
946 | struct f2fs_summary *, block_t, block_t); | |
947 | void rewrite_node_page(struct f2fs_sb_info *, struct page *, | |
948 | struct f2fs_summary *, block_t, block_t); | |
949 | void write_data_summaries(struct f2fs_sb_info *, block_t); | |
950 | void write_node_summaries(struct f2fs_sb_info *, block_t); | |
951 | int lookup_journal_in_cursum(struct f2fs_summary_block *, | |
952 | int, unsigned int, int); | |
953 | void flush_sit_entries(struct f2fs_sb_info *); | |
954 | int build_segment_manager(struct f2fs_sb_info *); | |
955 | void reset_victim_segmap(struct f2fs_sb_info *); | |
956 | void destroy_segment_manager(struct f2fs_sb_info *); | |
957 | ||
958 | /* | |
959 | * checkpoint.c | |
960 | */ | |
961 | struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); | |
962 | struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); | |
963 | long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); | |
964 | int check_orphan_space(struct f2fs_sb_info *); | |
965 | void add_orphan_inode(struct f2fs_sb_info *, nid_t); | |
966 | void remove_orphan_inode(struct f2fs_sb_info *, nid_t); | |
967 | int recover_orphan_inodes(struct f2fs_sb_info *); | |
968 | int get_valid_checkpoint(struct f2fs_sb_info *); | |
969 | void set_dirty_dir_page(struct inode *, struct page *); | |
970 | void remove_dirty_dir_inode(struct inode *); | |
971 | void sync_dirty_dir_inodes(struct f2fs_sb_info *); | |
43727527 | 972 | void write_checkpoint(struct f2fs_sb_info *, bool); |
39a53e0c | 973 | void init_orphan_info(struct f2fs_sb_info *); |
6e6093a8 | 974 | int __init create_checkpoint_caches(void); |
39a53e0c JK |
975 | void destroy_checkpoint_caches(void); |
976 | ||
977 | /* | |
978 | * data.c | |
979 | */ | |
980 | int reserve_new_block(struct dnode_of_data *); | |
981 | void update_extent_cache(block_t, struct dnode_of_data *); | |
982 | struct page *find_data_page(struct inode *, pgoff_t); | |
983 | struct page *get_lock_data_page(struct inode *, pgoff_t); | |
984 | struct page *get_new_data_page(struct inode *, pgoff_t, bool); | |
985 | int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); | |
986 | int do_write_data_page(struct page *); | |
987 | ||
988 | /* | |
989 | * gc.c | |
990 | */ | |
991 | int start_gc_thread(struct f2fs_sb_info *); | |
992 | void stop_gc_thread(struct f2fs_sb_info *); | |
993 | block_t start_bidx_of_node(unsigned int); | |
408e9375 | 994 | int f2fs_gc(struct f2fs_sb_info *); |
39a53e0c | 995 | void build_gc_manager(struct f2fs_sb_info *); |
6e6093a8 | 996 | int __init create_gc_caches(void); |
39a53e0c JK |
997 | void destroy_gc_caches(void); |
998 | ||
999 | /* | |
1000 | * recovery.c | |
1001 | */ | |
1002 | void recover_fsync_data(struct f2fs_sb_info *); | |
1003 | bool space_for_roll_forward(struct f2fs_sb_info *); | |
1004 | ||
1005 | /* | |
1006 | * debug.c | |
1007 | */ | |
1008 | #ifdef CONFIG_F2FS_STAT_FS | |
1009 | struct f2fs_stat_info { | |
1010 | struct list_head stat_list; | |
1011 | struct f2fs_sb_info *sbi; | |
1012 | struct mutex stat_lock; | |
1013 | int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; | |
1014 | int main_area_segs, main_area_sections, main_area_zones; | |
1015 | int hit_ext, total_ext; | |
1016 | int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; | |
1017 | int nats, sits, fnids; | |
1018 | int total_count, utilization; | |
1019 | int bg_gc; | |
1020 | unsigned int valid_count, valid_node_count, valid_inode_count; | |
1021 | unsigned int bimodal, avg_vblocks; | |
1022 | int util_free, util_valid, util_invalid; | |
1023 | int rsvd_segs, overp_segs; | |
1024 | int dirty_count, node_pages, meta_pages; | |
1025 | int prefree_count, call_count; | |
1026 | int tot_segs, node_segs, data_segs, free_segs, free_secs; | |
1027 | int tot_blks, data_blks, node_blks; | |
1028 | int curseg[NR_CURSEG_TYPE]; | |
1029 | int cursec[NR_CURSEG_TYPE]; | |
1030 | int curzone[NR_CURSEG_TYPE]; | |
1031 | ||
1032 | unsigned int segment_count[2]; | |
1033 | unsigned int block_count[2]; | |
1034 | unsigned base_mem, cache_mem; | |
1035 | }; | |
1036 | ||
1037 | #define stat_inc_call_count(si) ((si)->call_count++) | |
1038 | ||
1039 | #define stat_inc_seg_count(sbi, type) \ | |
1040 | do { \ | |
1041 | struct f2fs_stat_info *si = sbi->stat_info; \ | |
1042 | (si)->tot_segs++; \ | |
1043 | if (type == SUM_TYPE_DATA) \ | |
1044 | si->data_segs++; \ | |
1045 | else \ | |
1046 | si->node_segs++; \ | |
1047 | } while (0) | |
1048 | ||
1049 | #define stat_inc_tot_blk_count(si, blks) \ | |
1050 | (si->tot_blks += (blks)) | |
1051 | ||
1052 | #define stat_inc_data_blk_count(sbi, blks) \ | |
1053 | do { \ | |
1054 | struct f2fs_stat_info *si = sbi->stat_info; \ | |
1055 | stat_inc_tot_blk_count(si, blks); \ | |
1056 | si->data_blks += (blks); \ | |
1057 | } while (0) | |
1058 | ||
1059 | #define stat_inc_node_blk_count(sbi, blks) \ | |
1060 | do { \ | |
1061 | struct f2fs_stat_info *si = sbi->stat_info; \ | |
1062 | stat_inc_tot_blk_count(si, blks); \ | |
1063 | si->node_blks += (blks); \ | |
1064 | } while (0) | |
1065 | ||
1066 | int f2fs_build_stats(struct f2fs_sb_info *); | |
1067 | void f2fs_destroy_stats(struct f2fs_sb_info *); | |
6e6093a8 | 1068 | void __init f2fs_create_root_stats(void); |
4589d25d | 1069 | void f2fs_destroy_root_stats(void); |
39a53e0c JK |
1070 | #else |
1071 | #define stat_inc_call_count(si) | |
1072 | #define stat_inc_seg_count(si, type) | |
1073 | #define stat_inc_tot_blk_count(si, blks) | |
1074 | #define stat_inc_data_blk_count(si, blks) | |
1075 | #define stat_inc_node_blk_count(sbi, blks) | |
1076 | ||
1077 | static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } | |
1078 | static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } | |
6e6093a8 | 1079 | static inline void __init f2fs_create_root_stats(void) { } |
4589d25d | 1080 | static inline void f2fs_destroy_root_stats(void) { } |
39a53e0c JK |
1081 | #endif |
1082 | ||
1083 | extern const struct file_operations f2fs_dir_operations; | |
1084 | extern const struct file_operations f2fs_file_operations; | |
1085 | extern const struct inode_operations f2fs_file_inode_operations; | |
1086 | extern const struct address_space_operations f2fs_dblock_aops; | |
1087 | extern const struct address_space_operations f2fs_node_aops; | |
1088 | extern const struct address_space_operations f2fs_meta_aops; | |
1089 | extern const struct inode_operations f2fs_dir_inode_operations; | |
1090 | extern const struct inode_operations f2fs_symlink_inode_operations; | |
1091 | extern const struct inode_operations f2fs_special_inode_operations; | |
1092 | #endif |