]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ext4/namei.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[mirror_ubuntu-bionic-kernel.git] / fs / ext4 / namei.c
1 /*
2 * linux/fs/ext4/namei.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/namei.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * Big-endian to little-endian byte-swapping/bitmaps by
16 * David S. Miller (davem@caip.rutgers.edu), 1995
17 * Directory entry file type support and forward compatibility hooks
18 * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
19 * Hash Tree Directory indexing (c)
20 * Daniel Phillips, 2001
21 * Hash Tree Directory indexing porting
22 * Christopher Li, 2002
23 * Hash Tree Directory indexing cleanup
24 * Theodore Ts'o, 2002
25 */
26
27 #include <linux/fs.h>
28 #include <linux/pagemap.h>
29 #include <linux/jbd2.h>
30 #include <linux/time.h>
31 #include <linux/fcntl.h>
32 #include <linux/stat.h>
33 #include <linux/string.h>
34 #include <linux/quotaops.h>
35 #include <linux/buffer_head.h>
36 #include <linux/bio.h>
37 #include "ext4.h"
38 #include "ext4_jbd2.h"
39
40 #include "xattr.h"
41 #include "acl.h"
42
43 #include <trace/events/ext4.h>
44 /*
45 * define how far ahead to read directories while searching them.
46 */
47 #define NAMEI_RA_CHUNKS 2
48 #define NAMEI_RA_BLOCKS 4
49 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
50
51 static struct buffer_head *ext4_append(handle_t *handle,
52 struct inode *inode,
53 ext4_lblk_t *block)
54 {
55 struct buffer_head *bh;
56 int err;
57
58 if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
59 ((inode->i_size >> 10) >=
60 EXT4_SB(inode->i_sb)->s_max_dir_size_kb)))
61 return ERR_PTR(-ENOSPC);
62
63 *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
64
65 bh = ext4_bread(handle, inode, *block, 1);
66 if (IS_ERR(bh))
67 return bh;
68 inode->i_size += inode->i_sb->s_blocksize;
69 EXT4_I(inode)->i_disksize = inode->i_size;
70 BUFFER_TRACE(bh, "get_write_access");
71 err = ext4_journal_get_write_access(handle, bh);
72 if (err) {
73 brelse(bh);
74 ext4_std_error(inode->i_sb, err);
75 return ERR_PTR(err);
76 }
77 return bh;
78 }
79
80 static int ext4_dx_csum_verify(struct inode *inode,
81 struct ext4_dir_entry *dirent);
82
83 typedef enum {
84 EITHER, INDEX, DIRENT
85 } dirblock_type_t;
86
87 #define ext4_read_dirblock(inode, block, type) \
88 __ext4_read_dirblock((inode), (block), (type), __LINE__)
89
90 static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
91 ext4_lblk_t block,
92 dirblock_type_t type,
93 unsigned int line)
94 {
95 struct buffer_head *bh;
96 struct ext4_dir_entry *dirent;
97 int is_dx_block = 0;
98
99 bh = ext4_bread(NULL, inode, block, 0);
100 if (IS_ERR(bh)) {
101 __ext4_warning(inode->i_sb, __func__, line,
102 "error %ld reading directory block "
103 "(ino %lu, block %lu)", PTR_ERR(bh), inode->i_ino,
104 (unsigned long) block);
105
106 return bh;
107 }
108 if (!bh) {
109 ext4_error_inode(inode, __func__, line, block, "Directory hole found");
110 return ERR_PTR(-EIO);
111 }
112 dirent = (struct ext4_dir_entry *) bh->b_data;
113 /* Determine whether or not we have an index block */
114 if (is_dx(inode)) {
115 if (block == 0)
116 is_dx_block = 1;
117 else if (ext4_rec_len_from_disk(dirent->rec_len,
118 inode->i_sb->s_blocksize) ==
119 inode->i_sb->s_blocksize)
120 is_dx_block = 1;
121 }
122 if (!is_dx_block && type == INDEX) {
123 ext4_error_inode(inode, __func__, line, block,
124 "directory leaf block found instead of index block");
125 return ERR_PTR(-EIO);
126 }
127 if (!ext4_has_metadata_csum(inode->i_sb) ||
128 buffer_verified(bh))
129 return bh;
130
131 /*
132 * An empty leaf block can get mistaken for a index block; for
133 * this reason, we can only check the index checksum when the
134 * caller is sure it should be an index block.
135 */
136 if (is_dx_block && type == INDEX) {
137 if (ext4_dx_csum_verify(inode, dirent))
138 set_buffer_verified(bh);
139 else {
140 ext4_error_inode(inode, __func__, line, block,
141 "Directory index failed checksum");
142 brelse(bh);
143 return ERR_PTR(-EIO);
144 }
145 }
146 if (!is_dx_block) {
147 if (ext4_dirent_csum_verify(inode, dirent))
148 set_buffer_verified(bh);
149 else {
150 ext4_error_inode(inode, __func__, line, block,
151 "Directory block failed checksum");
152 brelse(bh);
153 return ERR_PTR(-EIO);
154 }
155 }
156 return bh;
157 }
158
159 #ifndef assert
160 #define assert(test) J_ASSERT(test)
161 #endif
162
163 #ifdef DX_DEBUG
164 #define dxtrace(command) command
165 #else
166 #define dxtrace(command)
167 #endif
168
169 struct fake_dirent
170 {
171 __le32 inode;
172 __le16 rec_len;
173 u8 name_len;
174 u8 file_type;
175 };
176
177 struct dx_countlimit
178 {
179 __le16 limit;
180 __le16 count;
181 };
182
183 struct dx_entry
184 {
185 __le32 hash;
186 __le32 block;
187 };
188
189 /*
190 * dx_root_info is laid out so that if it should somehow get overlaid by a
191 * dirent the two low bits of the hash version will be zero. Therefore, the
192 * hash version mod 4 should never be 0. Sincerely, the paranoia department.
193 */
194
195 struct dx_root
196 {
197 struct fake_dirent dot;
198 char dot_name[4];
199 struct fake_dirent dotdot;
200 char dotdot_name[4];
201 struct dx_root_info
202 {
203 __le32 reserved_zero;
204 u8 hash_version;
205 u8 info_length; /* 8 */
206 u8 indirect_levels;
207 u8 unused_flags;
208 }
209 info;
210 struct dx_entry entries[0];
211 };
212
213 struct dx_node
214 {
215 struct fake_dirent fake;
216 struct dx_entry entries[0];
217 };
218
219
220 struct dx_frame
221 {
222 struct buffer_head *bh;
223 struct dx_entry *entries;
224 struct dx_entry *at;
225 };
226
227 struct dx_map_entry
228 {
229 u32 hash;
230 u16 offs;
231 u16 size;
232 };
233
234 /*
235 * This goes at the end of each htree block.
236 */
237 struct dx_tail {
238 u32 dt_reserved;
239 __le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */
240 };
241
242 static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
243 static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
244 static inline unsigned dx_get_hash(struct dx_entry *entry);
245 static void dx_set_hash(struct dx_entry *entry, unsigned value);
246 static unsigned dx_get_count(struct dx_entry *entries);
247 static unsigned dx_get_limit(struct dx_entry *entries);
248 static void dx_set_count(struct dx_entry *entries, unsigned value);
249 static void dx_set_limit(struct dx_entry *entries, unsigned value);
250 static unsigned dx_root_limit(struct inode *dir, unsigned infosize);
251 static unsigned dx_node_limit(struct inode *dir);
252 static struct dx_frame *dx_probe(const struct qstr *d_name,
253 struct inode *dir,
254 struct dx_hash_info *hinfo,
255 struct dx_frame *frame);
256 static void dx_release(struct dx_frame *frames);
257 static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
258 struct dx_hash_info *hinfo, struct dx_map_entry map[]);
259 static void dx_sort_map(struct dx_map_entry *map, unsigned count);
260 static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
261 struct dx_map_entry *offsets, int count, unsigned blocksize);
262 static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize);
263 static void dx_insert_block(struct dx_frame *frame,
264 u32 hash, ext4_lblk_t block);
265 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
266 struct dx_frame *frame,
267 struct dx_frame *frames,
268 __u32 *start_hash);
269 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
270 const struct qstr *d_name,
271 struct ext4_dir_entry_2 **res_dir);
272 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
273 struct inode *inode);
274
275 /* checksumming functions */
276 void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
277 unsigned int blocksize)
278 {
279 memset(t, 0, sizeof(struct ext4_dir_entry_tail));
280 t->det_rec_len = ext4_rec_len_to_disk(
281 sizeof(struct ext4_dir_entry_tail), blocksize);
282 t->det_reserved_ft = EXT4_FT_DIR_CSUM;
283 }
284
285 /* Walk through a dirent block to find a checksum "dirent" at the tail */
286 static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
287 struct ext4_dir_entry *de)
288 {
289 struct ext4_dir_entry_tail *t;
290
291 #ifdef PARANOID
292 struct ext4_dir_entry *d, *top;
293
294 d = de;
295 top = (struct ext4_dir_entry *)(((void *)de) +
296 (EXT4_BLOCK_SIZE(inode->i_sb) -
297 sizeof(struct ext4_dir_entry_tail)));
298 while (d < top && d->rec_len)
299 d = (struct ext4_dir_entry *)(((void *)d) +
300 le16_to_cpu(d->rec_len));
301
302 if (d != top)
303 return NULL;
304
305 t = (struct ext4_dir_entry_tail *)d;
306 #else
307 t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb));
308 #endif
309
310 if (t->det_reserved_zero1 ||
311 le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
312 t->det_reserved_zero2 ||
313 t->det_reserved_ft != EXT4_FT_DIR_CSUM)
314 return NULL;
315
316 return t;
317 }
318
319 static __le32 ext4_dirent_csum(struct inode *inode,
320 struct ext4_dir_entry *dirent, int size)
321 {
322 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
323 struct ext4_inode_info *ei = EXT4_I(inode);
324 __u32 csum;
325
326 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
327 return cpu_to_le32(csum);
328 }
329
330 static void warn_no_space_for_csum(struct inode *inode)
331 {
332 ext4_warning(inode->i_sb, "no space in directory inode %lu leaf for "
333 "checksum. Please run e2fsck -D.", inode->i_ino);
334 }
335
336 int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
337 {
338 struct ext4_dir_entry_tail *t;
339
340 if (!ext4_has_metadata_csum(inode->i_sb))
341 return 1;
342
343 t = get_dirent_tail(inode, dirent);
344 if (!t) {
345 warn_no_space_for_csum(inode);
346 return 0;
347 }
348
349 if (t->det_checksum != ext4_dirent_csum(inode, dirent,
350 (void *)t - (void *)dirent))
351 return 0;
352
353 return 1;
354 }
355
356 static void ext4_dirent_csum_set(struct inode *inode,
357 struct ext4_dir_entry *dirent)
358 {
359 struct ext4_dir_entry_tail *t;
360
361 if (!ext4_has_metadata_csum(inode->i_sb))
362 return;
363
364 t = get_dirent_tail(inode, dirent);
365 if (!t) {
366 warn_no_space_for_csum(inode);
367 return;
368 }
369
370 t->det_checksum = ext4_dirent_csum(inode, dirent,
371 (void *)t - (void *)dirent);
372 }
373
374 int ext4_handle_dirty_dirent_node(handle_t *handle,
375 struct inode *inode,
376 struct buffer_head *bh)
377 {
378 ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
379 return ext4_handle_dirty_metadata(handle, inode, bh);
380 }
381
382 static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
383 struct ext4_dir_entry *dirent,
384 int *offset)
385 {
386 struct ext4_dir_entry *dp;
387 struct dx_root_info *root;
388 int count_offset;
389
390 if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
391 count_offset = 8;
392 else if (le16_to_cpu(dirent->rec_len) == 12) {
393 dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
394 if (le16_to_cpu(dp->rec_len) !=
395 EXT4_BLOCK_SIZE(inode->i_sb) - 12)
396 return NULL;
397 root = (struct dx_root_info *)(((void *)dp + 12));
398 if (root->reserved_zero ||
399 root->info_length != sizeof(struct dx_root_info))
400 return NULL;
401 count_offset = 32;
402 } else
403 return NULL;
404
405 if (offset)
406 *offset = count_offset;
407 return (struct dx_countlimit *)(((void *)dirent) + count_offset);
408 }
409
410 static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
411 int count_offset, int count, struct dx_tail *t)
412 {
413 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
414 struct ext4_inode_info *ei = EXT4_I(inode);
415 __u32 csum;
416 __le32 save_csum;
417 int size;
418
419 size = count_offset + (count * sizeof(struct dx_entry));
420 save_csum = t->dt_checksum;
421 t->dt_checksum = 0;
422 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
423 csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
424 t->dt_checksum = save_csum;
425
426 return cpu_to_le32(csum);
427 }
428
429 static int ext4_dx_csum_verify(struct inode *inode,
430 struct ext4_dir_entry *dirent)
431 {
432 struct dx_countlimit *c;
433 struct dx_tail *t;
434 int count_offset, limit, count;
435
436 if (!ext4_has_metadata_csum(inode->i_sb))
437 return 1;
438
439 c = get_dx_countlimit(inode, dirent, &count_offset);
440 if (!c) {
441 EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D.");
442 return 1;
443 }
444 limit = le16_to_cpu(c->limit);
445 count = le16_to_cpu(c->count);
446 if (count_offset + (limit * sizeof(struct dx_entry)) >
447 EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
448 warn_no_space_for_csum(inode);
449 return 1;
450 }
451 t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
452
453 if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
454 count, t))
455 return 0;
456 return 1;
457 }
458
459 static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
460 {
461 struct dx_countlimit *c;
462 struct dx_tail *t;
463 int count_offset, limit, count;
464
465 if (!ext4_has_metadata_csum(inode->i_sb))
466 return;
467
468 c = get_dx_countlimit(inode, dirent, &count_offset);
469 if (!c) {
470 EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D.");
471 return;
472 }
473 limit = le16_to_cpu(c->limit);
474 count = le16_to_cpu(c->count);
475 if (count_offset + (limit * sizeof(struct dx_entry)) >
476 EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
477 warn_no_space_for_csum(inode);
478 return;
479 }
480 t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
481
482 t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
483 }
484
485 static inline int ext4_handle_dirty_dx_node(handle_t *handle,
486 struct inode *inode,
487 struct buffer_head *bh)
488 {
489 ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
490 return ext4_handle_dirty_metadata(handle, inode, bh);
491 }
492
493 /*
494 * p is at least 6 bytes before the end of page
495 */
496 static inline struct ext4_dir_entry_2 *
497 ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
498 {
499 return (struct ext4_dir_entry_2 *)((char *)p +
500 ext4_rec_len_from_disk(p->rec_len, blocksize));
501 }
502
503 /*
504 * Future: use high four bits of block for coalesce-on-delete flags
505 * Mask them off for now.
506 */
507
508 static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
509 {
510 return le32_to_cpu(entry->block) & 0x00ffffff;
511 }
512
513 static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
514 {
515 entry->block = cpu_to_le32(value);
516 }
517
518 static inline unsigned dx_get_hash(struct dx_entry *entry)
519 {
520 return le32_to_cpu(entry->hash);
521 }
522
523 static inline void dx_set_hash(struct dx_entry *entry, unsigned value)
524 {
525 entry->hash = cpu_to_le32(value);
526 }
527
528 static inline unsigned dx_get_count(struct dx_entry *entries)
529 {
530 return le16_to_cpu(((struct dx_countlimit *) entries)->count);
531 }
532
533 static inline unsigned dx_get_limit(struct dx_entry *entries)
534 {
535 return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
536 }
537
538 static inline void dx_set_count(struct dx_entry *entries, unsigned value)
539 {
540 ((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
541 }
542
543 static inline void dx_set_limit(struct dx_entry *entries, unsigned value)
544 {
545 ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
546 }
547
548 static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
549 {
550 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
551 EXT4_DIR_REC_LEN(2) - infosize;
552
553 if (ext4_has_metadata_csum(dir->i_sb))
554 entry_space -= sizeof(struct dx_tail);
555 return entry_space / sizeof(struct dx_entry);
556 }
557
558 static inline unsigned dx_node_limit(struct inode *dir)
559 {
560 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
561
562 if (ext4_has_metadata_csum(dir->i_sb))
563 entry_space -= sizeof(struct dx_tail);
564 return entry_space / sizeof(struct dx_entry);
565 }
566
567 /*
568 * Debug
569 */
570 #ifdef DX_DEBUG
571 static void dx_show_index(char * label, struct dx_entry *entries)
572 {
573 int i, n = dx_get_count (entries);
574 printk(KERN_DEBUG "%s index ", label);
575 for (i = 0; i < n; i++) {
576 printk("%x->%lu ", i ? dx_get_hash(entries + i) :
577 0, (unsigned long)dx_get_block(entries + i));
578 }
579 printk("\n");
580 }
581
582 struct stats
583 {
584 unsigned names;
585 unsigned space;
586 unsigned bcount;
587 };
588
589 static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
590 int size, int show_names)
591 {
592 unsigned names = 0, space = 0;
593 char *base = (char *) de;
594 struct dx_hash_info h = *hinfo;
595
596 printk("names: ");
597 while ((char *) de < base + size)
598 {
599 if (de->inode)
600 {
601 if (show_names)
602 {
603 int len = de->name_len;
604 char *name = de->name;
605 while (len--) printk("%c", *name++);
606 ext4fs_dirhash(de->name, de->name_len, &h);
607 printk(":%x.%u ", h.hash,
608 (unsigned) ((char *) de - base));
609 }
610 space += EXT4_DIR_REC_LEN(de->name_len);
611 names++;
612 }
613 de = ext4_next_entry(de, size);
614 }
615 printk("(%i)\n", names);
616 return (struct stats) { names, space, 1 };
617 }
618
619 struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
620 struct dx_entry *entries, int levels)
621 {
622 unsigned blocksize = dir->i_sb->s_blocksize;
623 unsigned count = dx_get_count(entries), names = 0, space = 0, i;
624 unsigned bcount = 0;
625 struct buffer_head *bh;
626 int err;
627 printk("%i indexed blocks...\n", count);
628 for (i = 0; i < count; i++, entries++)
629 {
630 ext4_lblk_t block = dx_get_block(entries);
631 ext4_lblk_t hash = i ? dx_get_hash(entries): 0;
632 u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
633 struct stats stats;
634 printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
635 bh = ext4_bread(NULL,dir, block, 0);
636 if (!bh || IS_ERR(bh))
637 continue;
638 stats = levels?
639 dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
640 dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
641 names += stats.names;
642 space += stats.space;
643 bcount += stats.bcount;
644 brelse(bh);
645 }
646 if (bcount)
647 printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n",
648 levels ? "" : " ", names, space/bcount,
649 (space/bcount)*100/blocksize);
650 return (struct stats) { names, space, bcount};
651 }
652 #endif /* DX_DEBUG */
653
654 /*
655 * Probe for a directory leaf block to search.
656 *
657 * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
658 * error in the directory index, and the caller should fall back to
659 * searching the directory normally. The callers of dx_probe **MUST**
660 * check for this error code, and make sure it never gets reflected
661 * back to userspace.
662 */
663 static struct dx_frame *
664 dx_probe(const struct qstr *d_name, struct inode *dir,
665 struct dx_hash_info *hinfo, struct dx_frame *frame_in)
666 {
667 unsigned count, indirect;
668 struct dx_entry *at, *entries, *p, *q, *m;
669 struct dx_root *root;
670 struct dx_frame *frame = frame_in;
671 struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
672 u32 hash;
673
674 frame->bh = ext4_read_dirblock(dir, 0, INDEX);
675 if (IS_ERR(frame->bh))
676 return (struct dx_frame *) frame->bh;
677
678 root = (struct dx_root *) frame->bh->b_data;
679 if (root->info.hash_version != DX_HASH_TEA &&
680 root->info.hash_version != DX_HASH_HALF_MD4 &&
681 root->info.hash_version != DX_HASH_LEGACY) {
682 ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
683 root->info.hash_version);
684 goto fail;
685 }
686 hinfo->hash_version = root->info.hash_version;
687 if (hinfo->hash_version <= DX_HASH_TEA)
688 hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
689 hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
690 if (d_name)
691 ext4fs_dirhash(d_name->name, d_name->len, hinfo);
692 hash = hinfo->hash;
693
694 if (root->info.unused_flags & 1) {
695 ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
696 root->info.unused_flags);
697 goto fail;
698 }
699
700 if ((indirect = root->info.indirect_levels) > 1) {
701 ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
702 root->info.indirect_levels);
703 goto fail;
704 }
705
706 entries = (struct dx_entry *) (((char *)&root->info) +
707 root->info.info_length);
708
709 if (dx_get_limit(entries) != dx_root_limit(dir,
710 root->info.info_length)) {
711 ext4_warning(dir->i_sb, "dx entry: limit != root limit");
712 goto fail;
713 }
714
715 dxtrace(printk("Look up %x", hash));
716 while (1) {
717 count = dx_get_count(entries);
718 if (!count || count > dx_get_limit(entries)) {
719 ext4_warning(dir->i_sb,
720 "dx entry: no count or count > limit");
721 goto fail;
722 }
723
724 p = entries + 1;
725 q = entries + count - 1;
726 while (p <= q) {
727 m = p + (q - p)/2;
728 dxtrace(printk("."));
729 if (dx_get_hash(m) > hash)
730 q = m - 1;
731 else
732 p = m + 1;
733 }
734
735 if (0) { // linear search cross check
736 unsigned n = count - 1;
737 at = entries;
738 while (n--)
739 {
740 dxtrace(printk(","));
741 if (dx_get_hash(++at) > hash)
742 {
743 at--;
744 break;
745 }
746 }
747 assert (at == p - 1);
748 }
749
750 at = p - 1;
751 dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
752 frame->entries = entries;
753 frame->at = at;
754 if (!indirect--)
755 return frame;
756 frame++;
757 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
758 if (IS_ERR(frame->bh)) {
759 ret_err = (struct dx_frame *) frame->bh;
760 frame->bh = NULL;
761 goto fail;
762 }
763 entries = ((struct dx_node *) frame->bh->b_data)->entries;
764
765 if (dx_get_limit(entries) != dx_node_limit (dir)) {
766 ext4_warning(dir->i_sb,
767 "dx entry: limit != node limit");
768 goto fail;
769 }
770 }
771 fail:
772 while (frame >= frame_in) {
773 brelse(frame->bh);
774 frame--;
775 }
776 if (ret_err == ERR_PTR(ERR_BAD_DX_DIR))
777 ext4_warning(dir->i_sb,
778 "Corrupt dir inode %lu, running e2fsck is "
779 "recommended.", dir->i_ino);
780 return ret_err;
781 }
782
783 static void dx_release (struct dx_frame *frames)
784 {
785 if (frames[0].bh == NULL)
786 return;
787
788 if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
789 brelse(frames[1].bh);
790 brelse(frames[0].bh);
791 }
792
793 /*
794 * This function increments the frame pointer to search the next leaf
795 * block, and reads in the necessary intervening nodes if the search
796 * should be necessary. Whether or not the search is necessary is
797 * controlled by the hash parameter. If the hash value is even, then
798 * the search is only continued if the next block starts with that
799 * hash value. This is used if we are searching for a specific file.
800 *
801 * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
802 *
803 * This function returns 1 if the caller should continue to search,
804 * or 0 if it should not. If there is an error reading one of the
805 * index blocks, it will a negative error code.
806 *
807 * If start_hash is non-null, it will be filled in with the starting
808 * hash of the next page.
809 */
810 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
811 struct dx_frame *frame,
812 struct dx_frame *frames,
813 __u32 *start_hash)
814 {
815 struct dx_frame *p;
816 struct buffer_head *bh;
817 int num_frames = 0;
818 __u32 bhash;
819
820 p = frame;
821 /*
822 * Find the next leaf page by incrementing the frame pointer.
823 * If we run out of entries in the interior node, loop around and
824 * increment pointer in the parent node. When we break out of
825 * this loop, num_frames indicates the number of interior
826 * nodes need to be read.
827 */
828 while (1) {
829 if (++(p->at) < p->entries + dx_get_count(p->entries))
830 break;
831 if (p == frames)
832 return 0;
833 num_frames++;
834 p--;
835 }
836
837 /*
838 * If the hash is 1, then continue only if the next page has a
839 * continuation hash of any value. This is used for readdir
840 * handling. Otherwise, check to see if the hash matches the
841 * desired contiuation hash. If it doesn't, return since
842 * there's no point to read in the successive index pages.
843 */
844 bhash = dx_get_hash(p->at);
845 if (start_hash)
846 *start_hash = bhash;
847 if ((hash & 1) == 0) {
848 if ((bhash & ~1) != hash)
849 return 0;
850 }
851 /*
852 * If the hash is HASH_NB_ALWAYS, we always go to the next
853 * block so no check is necessary
854 */
855 while (num_frames--) {
856 bh = ext4_read_dirblock(dir, dx_get_block(p->at), INDEX);
857 if (IS_ERR(bh))
858 return PTR_ERR(bh);
859 p++;
860 brelse(p->bh);
861 p->bh = bh;
862 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
863 }
864 return 1;
865 }
866
867
868 /*
869 * This function fills a red-black tree with information from a
870 * directory block. It returns the number directory entries loaded
871 * into the tree. If there is an error it is returned in err.
872 */
873 static int htree_dirblock_to_tree(struct file *dir_file,
874 struct inode *dir, ext4_lblk_t block,
875 struct dx_hash_info *hinfo,
876 __u32 start_hash, __u32 start_minor_hash)
877 {
878 struct buffer_head *bh;
879 struct ext4_dir_entry_2 *de, *top;
880 int err = 0, count = 0;
881
882 dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
883 (unsigned long)block));
884 bh = ext4_read_dirblock(dir, block, DIRENT);
885 if (IS_ERR(bh))
886 return PTR_ERR(bh);
887
888 de = (struct ext4_dir_entry_2 *) bh->b_data;
889 top = (struct ext4_dir_entry_2 *) ((char *) de +
890 dir->i_sb->s_blocksize -
891 EXT4_DIR_REC_LEN(0));
892 for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
893 if (ext4_check_dir_entry(dir, NULL, de, bh,
894 bh->b_data, bh->b_size,
895 (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
896 + ((char *)de - bh->b_data))) {
897 /* silently ignore the rest of the block */
898 break;
899 }
900 ext4fs_dirhash(de->name, de->name_len, hinfo);
901 if ((hinfo->hash < start_hash) ||
902 ((hinfo->hash == start_hash) &&
903 (hinfo->minor_hash < start_minor_hash)))
904 continue;
905 if (de->inode == 0)
906 continue;
907 if ((err = ext4_htree_store_dirent(dir_file,
908 hinfo->hash, hinfo->minor_hash, de)) != 0) {
909 brelse(bh);
910 return err;
911 }
912 count++;
913 }
914 brelse(bh);
915 return count;
916 }
917
918
919 /*
920 * This function fills a red-black tree with information from a
921 * directory. We start scanning the directory in hash order, starting
922 * at start_hash and start_minor_hash.
923 *
924 * This function returns the number of entries inserted into the tree,
925 * or a negative error code.
926 */
927 int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
928 __u32 start_minor_hash, __u32 *next_hash)
929 {
930 struct dx_hash_info hinfo;
931 struct ext4_dir_entry_2 *de;
932 struct dx_frame frames[2], *frame;
933 struct inode *dir;
934 ext4_lblk_t block;
935 int count = 0;
936 int ret, err;
937 __u32 hashval;
938
939 dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
940 start_hash, start_minor_hash));
941 dir = file_inode(dir_file);
942 if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
943 hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
944 if (hinfo.hash_version <= DX_HASH_TEA)
945 hinfo.hash_version +=
946 EXT4_SB(dir->i_sb)->s_hash_unsigned;
947 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
948 if (ext4_has_inline_data(dir)) {
949 int has_inline_data = 1;
950 count = htree_inlinedir_to_tree(dir_file, dir, 0,
951 &hinfo, start_hash,
952 start_minor_hash,
953 &has_inline_data);
954 if (has_inline_data) {
955 *next_hash = ~0;
956 return count;
957 }
958 }
959 count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
960 start_hash, start_minor_hash);
961 *next_hash = ~0;
962 return count;
963 }
964 hinfo.hash = start_hash;
965 hinfo.minor_hash = 0;
966 frame = dx_probe(NULL, dir, &hinfo, frames);
967 if (IS_ERR(frame))
968 return PTR_ERR(frame);
969
970 /* Add '.' and '..' from the htree header */
971 if (!start_hash && !start_minor_hash) {
972 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
973 if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
974 goto errout;
975 count++;
976 }
977 if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
978 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
979 de = ext4_next_entry(de, dir->i_sb->s_blocksize);
980 if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
981 goto errout;
982 count++;
983 }
984
985 while (1) {
986 block = dx_get_block(frame->at);
987 ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
988 start_hash, start_minor_hash);
989 if (ret < 0) {
990 err = ret;
991 goto errout;
992 }
993 count += ret;
994 hashval = ~0;
995 ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
996 frame, frames, &hashval);
997 *next_hash = hashval;
998 if (ret < 0) {
999 err = ret;
1000 goto errout;
1001 }
1002 /*
1003 * Stop if: (a) there are no more entries, or
1004 * (b) we have inserted at least one entry and the
1005 * next hash value is not a continuation
1006 */
1007 if ((ret == 0) ||
1008 (count && ((hashval & 1) == 0)))
1009 break;
1010 }
1011 dx_release(frames);
1012 dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, "
1013 "next hash: %x\n", count, *next_hash));
1014 return count;
1015 errout:
1016 dx_release(frames);
1017 return (err);
1018 }
1019
1020 static inline int search_dirblock(struct buffer_head *bh,
1021 struct inode *dir,
1022 const struct qstr *d_name,
1023 unsigned int offset,
1024 struct ext4_dir_entry_2 **res_dir)
1025 {
1026 return search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
1027 d_name, offset, res_dir);
1028 }
1029
1030 /*
1031 * Directory block splitting, compacting
1032 */
1033
1034 /*
1035 * Create map of hash values, offsets, and sizes, stored at end of block.
1036 * Returns number of entries mapped.
1037 */
1038 static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1039 struct dx_hash_info *hinfo,
1040 struct dx_map_entry *map_tail)
1041 {
1042 int count = 0;
1043 char *base = (char *) de;
1044 struct dx_hash_info h = *hinfo;
1045
1046 while ((char *) de < base + blocksize) {
1047 if (de->name_len && de->inode) {
1048 ext4fs_dirhash(de->name, de->name_len, &h);
1049 map_tail--;
1050 map_tail->hash = h.hash;
1051 map_tail->offs = ((char *) de - base)>>2;
1052 map_tail->size = le16_to_cpu(de->rec_len);
1053 count++;
1054 cond_resched();
1055 }
1056 /* XXX: do we need to check rec_len == 0 case? -Chris */
1057 de = ext4_next_entry(de, blocksize);
1058 }
1059 return count;
1060 }
1061
1062 /* Sort map by hash value */
1063 static void dx_sort_map (struct dx_map_entry *map, unsigned count)
1064 {
1065 struct dx_map_entry *p, *q, *top = map + count - 1;
1066 int more;
1067 /* Combsort until bubble sort doesn't suck */
1068 while (count > 2) {
1069 count = count*10/13;
1070 if (count - 9 < 2) /* 9, 10 -> 11 */
1071 count = 11;
1072 for (p = top, q = p - count; q >= map; p--, q--)
1073 if (p->hash < q->hash)
1074 swap(*p, *q);
1075 }
1076 /* Garden variety bubble sort */
1077 do {
1078 more = 0;
1079 q = top;
1080 while (q-- > map) {
1081 if (q[1].hash >= q[0].hash)
1082 continue;
1083 swap(*(q+1), *q);
1084 more = 1;
1085 }
1086 } while(more);
1087 }
1088
1089 static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
1090 {
1091 struct dx_entry *entries = frame->entries;
1092 struct dx_entry *old = frame->at, *new = old + 1;
1093 int count = dx_get_count(entries);
1094
1095 assert(count < dx_get_limit(entries));
1096 assert(old < entries + count);
1097 memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
1098 dx_set_hash(new, hash);
1099 dx_set_block(new, block);
1100 dx_set_count(entries, count + 1);
1101 }
1102
1103 /*
1104 * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
1105 *
1106 * `len <= EXT4_NAME_LEN' is guaranteed by caller.
1107 * `de != NULL' is guaranteed by caller.
1108 */
1109 static inline int ext4_match (int len, const char * const name,
1110 struct ext4_dir_entry_2 * de)
1111 {
1112 if (len != de->name_len)
1113 return 0;
1114 if (!de->inode)
1115 return 0;
1116 return !memcmp(name, de->name, len);
1117 }
1118
1119 /*
1120 * Returns 0 if not found, -1 on failure, and 1 on success
1121 */
1122 int search_dir(struct buffer_head *bh,
1123 char *search_buf,
1124 int buf_size,
1125 struct inode *dir,
1126 const struct qstr *d_name,
1127 unsigned int offset,
1128 struct ext4_dir_entry_2 **res_dir)
1129 {
1130 struct ext4_dir_entry_2 * de;
1131 char * dlimit;
1132 int de_len;
1133 const char *name = d_name->name;
1134 int namelen = d_name->len;
1135
1136 de = (struct ext4_dir_entry_2 *)search_buf;
1137 dlimit = search_buf + buf_size;
1138 while ((char *) de < dlimit) {
1139 /* this code is executed quadratically often */
1140 /* do minimal checking `by hand' */
1141
1142 if ((char *) de + namelen <= dlimit &&
1143 ext4_match (namelen, name, de)) {
1144 /* found a match - just to be sure, do a full check */
1145 if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
1146 bh->b_size, offset))
1147 return -1;
1148 *res_dir = de;
1149 return 1;
1150 }
1151 /* prevent looping on a bad block */
1152 de_len = ext4_rec_len_from_disk(de->rec_len,
1153 dir->i_sb->s_blocksize);
1154 if (de_len <= 0)
1155 return -1;
1156 offset += de_len;
1157 de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
1158 }
1159 return 0;
1160 }
1161
1162 static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
1163 struct ext4_dir_entry *de)
1164 {
1165 struct super_block *sb = dir->i_sb;
1166
1167 if (!is_dx(dir))
1168 return 0;
1169 if (block == 0)
1170 return 1;
1171 if (de->inode == 0 &&
1172 ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) ==
1173 sb->s_blocksize)
1174 return 1;
1175 return 0;
1176 }
1177
1178 /*
1179 * ext4_find_entry()
1180 *
1181 * finds an entry in the specified directory with the wanted name. It
1182 * returns the cache buffer in which the entry was found, and the entry
1183 * itself (as a parameter - res_dir). It does NOT read the inode of the
1184 * entry - you'll have to do that yourself if you want to.
1185 *
1186 * The returned buffer_head has ->b_count elevated. The caller is expected
1187 * to brelse() it when appropriate.
1188 */
1189 static struct buffer_head * ext4_find_entry (struct inode *dir,
1190 const struct qstr *d_name,
1191 struct ext4_dir_entry_2 **res_dir,
1192 int *inlined)
1193 {
1194 struct super_block *sb;
1195 struct buffer_head *bh_use[NAMEI_RA_SIZE];
1196 struct buffer_head *bh, *ret = NULL;
1197 ext4_lblk_t start, block, b;
1198 const u8 *name = d_name->name;
1199 int ra_max = 0; /* Number of bh's in the readahead
1200 buffer, bh_use[] */
1201 int ra_ptr = 0; /* Current index into readahead
1202 buffer */
1203 int num = 0;
1204 ext4_lblk_t nblocks;
1205 int i, namelen;
1206
1207 *res_dir = NULL;
1208 sb = dir->i_sb;
1209 namelen = d_name->len;
1210 if (namelen > EXT4_NAME_LEN)
1211 return NULL;
1212
1213 if (ext4_has_inline_data(dir)) {
1214 int has_inline_data = 1;
1215 ret = ext4_find_inline_entry(dir, d_name, res_dir,
1216 &has_inline_data);
1217 if (has_inline_data) {
1218 if (inlined)
1219 *inlined = 1;
1220 return ret;
1221 }
1222 }
1223
1224 if ((namelen <= 2) && (name[0] == '.') &&
1225 (name[1] == '.' || name[1] == '\0')) {
1226 /*
1227 * "." or ".." will only be in the first block
1228 * NFS may look up ".."; "." should be handled by the VFS
1229 */
1230 block = start = 0;
1231 nblocks = 1;
1232 goto restart;
1233 }
1234 if (is_dx(dir)) {
1235 bh = ext4_dx_find_entry(dir, d_name, res_dir);
1236 /*
1237 * On success, or if the error was file not found,
1238 * return. Otherwise, fall back to doing a search the
1239 * old fashioned way.
1240 */
1241 if (!IS_ERR(bh) || PTR_ERR(bh) != ERR_BAD_DX_DIR)
1242 return bh;
1243 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1244 "falling back\n"));
1245 }
1246 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1247 start = EXT4_I(dir)->i_dir_start_lookup;
1248 if (start >= nblocks)
1249 start = 0;
1250 block = start;
1251 restart:
1252 do {
1253 /*
1254 * We deal with the read-ahead logic here.
1255 */
1256 if (ra_ptr >= ra_max) {
1257 /* Refill the readahead buffer */
1258 ra_ptr = 0;
1259 b = block;
1260 for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
1261 /*
1262 * Terminate if we reach the end of the
1263 * directory and must wrap, or if our
1264 * search has finished at this block.
1265 */
1266 if (b >= nblocks || (num && block == start)) {
1267 bh_use[ra_max] = NULL;
1268 break;
1269 }
1270 num++;
1271 bh = ext4_getblk(NULL, dir, b++, 0);
1272 if (unlikely(IS_ERR(bh))) {
1273 if (ra_max == 0)
1274 return bh;
1275 break;
1276 }
1277 bh_use[ra_max] = bh;
1278 if (bh)
1279 ll_rw_block(READ | REQ_META | REQ_PRIO,
1280 1, &bh);
1281 }
1282 }
1283 if ((bh = bh_use[ra_ptr++]) == NULL)
1284 goto next;
1285 wait_on_buffer(bh);
1286 if (!buffer_uptodate(bh)) {
1287 /* read error, skip block & hope for the best */
1288 EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
1289 (unsigned long) block);
1290 brelse(bh);
1291 goto next;
1292 }
1293 if (!buffer_verified(bh) &&
1294 !is_dx_internal_node(dir, block,
1295 (struct ext4_dir_entry *)bh->b_data) &&
1296 !ext4_dirent_csum_verify(dir,
1297 (struct ext4_dir_entry *)bh->b_data)) {
1298 EXT4_ERROR_INODE(dir, "checksumming directory "
1299 "block %lu", (unsigned long)block);
1300 brelse(bh);
1301 goto next;
1302 }
1303 set_buffer_verified(bh);
1304 i = search_dirblock(bh, dir, d_name,
1305 block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
1306 if (i == 1) {
1307 EXT4_I(dir)->i_dir_start_lookup = block;
1308 ret = bh;
1309 goto cleanup_and_exit;
1310 } else {
1311 brelse(bh);
1312 if (i < 0)
1313 goto cleanup_and_exit;
1314 }
1315 next:
1316 if (++block >= nblocks)
1317 block = 0;
1318 } while (block != start);
1319
1320 /*
1321 * If the directory has grown while we were searching, then
1322 * search the last part of the directory before giving up.
1323 */
1324 block = nblocks;
1325 nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
1326 if (block < nblocks) {
1327 start = 0;
1328 goto restart;
1329 }
1330
1331 cleanup_and_exit:
1332 /* Clean up the read-ahead blocks */
1333 for (; ra_ptr < ra_max; ra_ptr++)
1334 brelse(bh_use[ra_ptr]);
1335 return ret;
1336 }
1337
1338 static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1339 struct ext4_dir_entry_2 **res_dir)
1340 {
1341 struct super_block * sb = dir->i_sb;
1342 struct dx_hash_info hinfo;
1343 struct dx_frame frames[2], *frame;
1344 struct buffer_head *bh;
1345 ext4_lblk_t block;
1346 int retval;
1347
1348 frame = dx_probe(d_name, dir, &hinfo, frames);
1349 if (IS_ERR(frame))
1350 return (struct buffer_head *) frame;
1351 do {
1352 block = dx_get_block(frame->at);
1353 bh = ext4_read_dirblock(dir, block, DIRENT);
1354 if (IS_ERR(bh))
1355 goto errout;
1356
1357 retval = search_dirblock(bh, dir, d_name,
1358 block << EXT4_BLOCK_SIZE_BITS(sb),
1359 res_dir);
1360 if (retval == 1)
1361 goto success;
1362 brelse(bh);
1363 if (retval == -1) {
1364 bh = ERR_PTR(ERR_BAD_DX_DIR);
1365 goto errout;
1366 }
1367
1368 /* Check to see if we should continue to search */
1369 retval = ext4_htree_next_block(dir, hinfo.hash, frame,
1370 frames, NULL);
1371 if (retval < 0) {
1372 ext4_warning(sb,
1373 "error %d reading index page in directory #%lu",
1374 retval, dir->i_ino);
1375 bh = ERR_PTR(retval);
1376 goto errout;
1377 }
1378 } while (retval == 1);
1379
1380 bh = NULL;
1381 errout:
1382 dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
1383 success:
1384 dx_release(frames);
1385 return bh;
1386 }
1387
1388 static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
1389 {
1390 struct inode *inode;
1391 struct ext4_dir_entry_2 *de;
1392 struct buffer_head *bh;
1393
1394 if (dentry->d_name.len > EXT4_NAME_LEN)
1395 return ERR_PTR(-ENAMETOOLONG);
1396
1397 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
1398 if (IS_ERR(bh))
1399 return (struct dentry *) bh;
1400 inode = NULL;
1401 if (bh) {
1402 __u32 ino = le32_to_cpu(de->inode);
1403 brelse(bh);
1404 if (!ext4_valid_inum(dir->i_sb, ino)) {
1405 EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
1406 return ERR_PTR(-EIO);
1407 }
1408 if (unlikely(ino == dir->i_ino)) {
1409 EXT4_ERROR_INODE(dir, "'%pd' linked to parent dir",
1410 dentry);
1411 return ERR_PTR(-EIO);
1412 }
1413 inode = ext4_iget_normal(dir->i_sb, ino);
1414 if (inode == ERR_PTR(-ESTALE)) {
1415 EXT4_ERROR_INODE(dir,
1416 "deleted inode referenced: %u",
1417 ino);
1418 return ERR_PTR(-EIO);
1419 }
1420 }
1421 return d_splice_alias(inode, dentry);
1422 }
1423
1424
1425 struct dentry *ext4_get_parent(struct dentry *child)
1426 {
1427 __u32 ino;
1428 static const struct qstr dotdot = QSTR_INIT("..", 2);
1429 struct ext4_dir_entry_2 * de;
1430 struct buffer_head *bh;
1431
1432 bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
1433 if (IS_ERR(bh))
1434 return (struct dentry *) bh;
1435 if (!bh)
1436 return ERR_PTR(-ENOENT);
1437 ino = le32_to_cpu(de->inode);
1438 brelse(bh);
1439
1440 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
1441 EXT4_ERROR_INODE(child->d_inode,
1442 "bad parent inode number: %u", ino);
1443 return ERR_PTR(-EIO);
1444 }
1445
1446 return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
1447 }
1448
1449 /*
1450 * Move count entries from end of map between two memory locations.
1451 * Returns pointer to last entry moved.
1452 */
1453 static struct ext4_dir_entry_2 *
1454 dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
1455 unsigned blocksize)
1456 {
1457 unsigned rec_len = 0;
1458
1459 while (count--) {
1460 struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
1461 (from + (map->offs<<2));
1462 rec_len = EXT4_DIR_REC_LEN(de->name_len);
1463 memcpy (to, de, rec_len);
1464 ((struct ext4_dir_entry_2 *) to)->rec_len =
1465 ext4_rec_len_to_disk(rec_len, blocksize);
1466 de->inode = 0;
1467 map++;
1468 to += rec_len;
1469 }
1470 return (struct ext4_dir_entry_2 *) (to - rec_len);
1471 }
1472
1473 /*
1474 * Compact each dir entry in the range to the minimal rec_len.
1475 * Returns pointer to last entry in range.
1476 */
1477 static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
1478 {
1479 struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
1480 unsigned rec_len = 0;
1481
1482 prev = to = de;
1483 while ((char*)de < base + blocksize) {
1484 next = ext4_next_entry(de, blocksize);
1485 if (de->inode && de->name_len) {
1486 rec_len = EXT4_DIR_REC_LEN(de->name_len);
1487 if (de > to)
1488 memmove(to, de, rec_len);
1489 to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
1490 prev = to;
1491 to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len);
1492 }
1493 de = next;
1494 }
1495 return prev;
1496 }
1497
1498 /*
1499 * Split a full leaf block to make room for a new dir entry.
1500 * Allocate a new block, and move entries so that they are approx. equally full.
1501 * Returns pointer to de in block into which the new entry will be inserted.
1502 */
1503 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1504 struct buffer_head **bh,struct dx_frame *frame,
1505 struct dx_hash_info *hinfo)
1506 {
1507 unsigned blocksize = dir->i_sb->s_blocksize;
1508 unsigned count, continued;
1509 struct buffer_head *bh2;
1510 ext4_lblk_t newblock;
1511 u32 hash2;
1512 struct dx_map_entry *map;
1513 char *data1 = (*bh)->b_data, *data2;
1514 unsigned split, move, size;
1515 struct ext4_dir_entry_2 *de = NULL, *de2;
1516 struct ext4_dir_entry_tail *t;
1517 int csum_size = 0;
1518 int err = 0, i;
1519
1520 if (ext4_has_metadata_csum(dir->i_sb))
1521 csum_size = sizeof(struct ext4_dir_entry_tail);
1522
1523 bh2 = ext4_append(handle, dir, &newblock);
1524 if (IS_ERR(bh2)) {
1525 brelse(*bh);
1526 *bh = NULL;
1527 return (struct ext4_dir_entry_2 *) bh2;
1528 }
1529
1530 BUFFER_TRACE(*bh, "get_write_access");
1531 err = ext4_journal_get_write_access(handle, *bh);
1532 if (err)
1533 goto journal_error;
1534
1535 BUFFER_TRACE(frame->bh, "get_write_access");
1536 err = ext4_journal_get_write_access(handle, frame->bh);
1537 if (err)
1538 goto journal_error;
1539
1540 data2 = bh2->b_data;
1541
1542 /* create map in the end of data2 block */
1543 map = (struct dx_map_entry *) (data2 + blocksize);
1544 count = dx_make_map((struct ext4_dir_entry_2 *) data1,
1545 blocksize, hinfo, map);
1546 map -= count;
1547 dx_sort_map(map, count);
1548 /* Split the existing block in the middle, size-wise */
1549 size = 0;
1550 move = 0;
1551 for (i = count-1; i >= 0; i--) {
1552 /* is more than half of this entry in 2nd half of the block? */
1553 if (size + map[i].size/2 > blocksize/2)
1554 break;
1555 size += map[i].size;
1556 move++;
1557 }
1558 /* map index at which we will split */
1559 split = count - move;
1560 hash2 = map[split].hash;
1561 continued = hash2 == map[split - 1].hash;
1562 dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
1563 (unsigned long)dx_get_block(frame->at),
1564 hash2, split, count-split));
1565
1566 /* Fancy dance to stay within two buffers */
1567 de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1568 de = dx_pack_dirents(data1, blocksize);
1569 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1570 (char *) de,
1571 blocksize);
1572 de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
1573 (char *) de2,
1574 blocksize);
1575 if (csum_size) {
1576 t = EXT4_DIRENT_TAIL(data2, blocksize);
1577 initialize_dirent_tail(t, blocksize);
1578
1579 t = EXT4_DIRENT_TAIL(data1, blocksize);
1580 initialize_dirent_tail(t, blocksize);
1581 }
1582
1583 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1584 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1585
1586 /* Which block gets the new entry? */
1587 if (hinfo->hash >= hash2) {
1588 swap(*bh, bh2);
1589 de = de2;
1590 }
1591 dx_insert_block(frame, hash2 + continued, newblock);
1592 err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1593 if (err)
1594 goto journal_error;
1595 err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1596 if (err)
1597 goto journal_error;
1598 brelse(bh2);
1599 dxtrace(dx_show_index("frame", frame->entries));
1600 return de;
1601
1602 journal_error:
1603 brelse(*bh);
1604 brelse(bh2);
1605 *bh = NULL;
1606 ext4_std_error(dir->i_sb, err);
1607 return ERR_PTR(err);
1608 }
1609
1610 int ext4_find_dest_de(struct inode *dir, struct inode *inode,
1611 struct buffer_head *bh,
1612 void *buf, int buf_size,
1613 const char *name, int namelen,
1614 struct ext4_dir_entry_2 **dest_de)
1615 {
1616 struct ext4_dir_entry_2 *de;
1617 unsigned short reclen = EXT4_DIR_REC_LEN(namelen);
1618 int nlen, rlen;
1619 unsigned int offset = 0;
1620 char *top;
1621
1622 de = (struct ext4_dir_entry_2 *)buf;
1623 top = buf + buf_size - reclen;
1624 while ((char *) de <= top) {
1625 if (ext4_check_dir_entry(dir, NULL, de, bh,
1626 buf, buf_size, offset))
1627 return -EIO;
1628 if (ext4_match(namelen, name, de))
1629 return -EEXIST;
1630 nlen = EXT4_DIR_REC_LEN(de->name_len);
1631 rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
1632 if ((de->inode ? rlen - nlen : rlen) >= reclen)
1633 break;
1634 de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
1635 offset += rlen;
1636 }
1637 if ((char *) de > top)
1638 return -ENOSPC;
1639
1640 *dest_de = de;
1641 return 0;
1642 }
1643
1644 void ext4_insert_dentry(struct inode *inode,
1645 struct ext4_dir_entry_2 *de,
1646 int buf_size,
1647 const char *name, int namelen)
1648 {
1649
1650 int nlen, rlen;
1651
1652 nlen = EXT4_DIR_REC_LEN(de->name_len);
1653 rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
1654 if (de->inode) {
1655 struct ext4_dir_entry_2 *de1 =
1656 (struct ext4_dir_entry_2 *)((char *)de + nlen);
1657 de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
1658 de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
1659 de = de1;
1660 }
1661 de->file_type = EXT4_FT_UNKNOWN;
1662 de->inode = cpu_to_le32(inode->i_ino);
1663 ext4_set_de_type(inode->i_sb, de, inode->i_mode);
1664 de->name_len = namelen;
1665 memcpy(de->name, name, namelen);
1666 }
1667 /*
1668 * Add a new entry into a directory (leaf) block. If de is non-NULL,
1669 * it points to a directory entry which is guaranteed to be large
1670 * enough for new directory entry. If de is NULL, then
1671 * add_dirent_to_buf will attempt search the directory block for
1672 * space. It will return -ENOSPC if no space is available, and -EIO
1673 * and -EEXIST if directory entry already exists.
1674 */
1675 static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1676 struct inode *inode, struct ext4_dir_entry_2 *de,
1677 struct buffer_head *bh)
1678 {
1679 struct inode *dir = dentry->d_parent->d_inode;
1680 const char *name = dentry->d_name.name;
1681 int namelen = dentry->d_name.len;
1682 unsigned int blocksize = dir->i_sb->s_blocksize;
1683 int csum_size = 0;
1684 int err;
1685
1686 if (ext4_has_metadata_csum(inode->i_sb))
1687 csum_size = sizeof(struct ext4_dir_entry_tail);
1688
1689 if (!de) {
1690 err = ext4_find_dest_de(dir, inode,
1691 bh, bh->b_data, blocksize - csum_size,
1692 name, namelen, &de);
1693 if (err)
1694 return err;
1695 }
1696 BUFFER_TRACE(bh, "get_write_access");
1697 err = ext4_journal_get_write_access(handle, bh);
1698 if (err) {
1699 ext4_std_error(dir->i_sb, err);
1700 return err;
1701 }
1702
1703 /* By now the buffer is marked for journaling */
1704 ext4_insert_dentry(inode, de, blocksize, name, namelen);
1705
1706 /*
1707 * XXX shouldn't update any times until successful
1708 * completion of syscall, but too many callers depend
1709 * on this.
1710 *
1711 * XXX similarly, too many callers depend on
1712 * ext4_new_inode() setting the times, but error
1713 * recovery deletes the inode, so the worst that can
1714 * happen is that the times are slightly out of date
1715 * and/or different from the directory change time.
1716 */
1717 dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
1718 ext4_update_dx_flag(dir);
1719 dir->i_version++;
1720 ext4_mark_inode_dirty(handle, dir);
1721 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1722 err = ext4_handle_dirty_dirent_node(handle, dir, bh);
1723 if (err)
1724 ext4_std_error(dir->i_sb, err);
1725 return 0;
1726 }
1727
1728 /*
1729 * This converts a one block unindexed directory to a 3 block indexed
1730 * directory, and adds the dentry to the indexed directory.
1731 */
1732 static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1733 struct inode *inode, struct buffer_head *bh)
1734 {
1735 struct inode *dir = dentry->d_parent->d_inode;
1736 const char *name = dentry->d_name.name;
1737 int namelen = dentry->d_name.len;
1738 struct buffer_head *bh2;
1739 struct dx_root *root;
1740 struct dx_frame frames[2], *frame;
1741 struct dx_entry *entries;
1742 struct ext4_dir_entry_2 *de, *de2;
1743 struct ext4_dir_entry_tail *t;
1744 char *data1, *top;
1745 unsigned len;
1746 int retval;
1747 unsigned blocksize;
1748 struct dx_hash_info hinfo;
1749 ext4_lblk_t block;
1750 struct fake_dirent *fde;
1751 int csum_size = 0;
1752
1753 if (ext4_has_metadata_csum(inode->i_sb))
1754 csum_size = sizeof(struct ext4_dir_entry_tail);
1755
1756 blocksize = dir->i_sb->s_blocksize;
1757 dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
1758 BUFFER_TRACE(bh, "get_write_access");
1759 retval = ext4_journal_get_write_access(handle, bh);
1760 if (retval) {
1761 ext4_std_error(dir->i_sb, retval);
1762 brelse(bh);
1763 return retval;
1764 }
1765 root = (struct dx_root *) bh->b_data;
1766
1767 /* The 0th block becomes the root, move the dirents out */
1768 fde = &root->dotdot;
1769 de = (struct ext4_dir_entry_2 *)((char *)fde +
1770 ext4_rec_len_from_disk(fde->rec_len, blocksize));
1771 if ((char *) de >= (((char *) root) + blocksize)) {
1772 EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
1773 brelse(bh);
1774 return -EIO;
1775 }
1776 len = ((char *) root) + (blocksize - csum_size) - (char *) de;
1777
1778 /* Allocate new block for the 0th block's dirents */
1779 bh2 = ext4_append(handle, dir, &block);
1780 if (IS_ERR(bh2)) {
1781 brelse(bh);
1782 return PTR_ERR(bh2);
1783 }
1784 ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
1785 data1 = bh2->b_data;
1786
1787 memcpy (data1, de, len);
1788 de = (struct ext4_dir_entry_2 *) data1;
1789 top = data1 + len;
1790 while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
1791 de = de2;
1792 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1793 (char *) de,
1794 blocksize);
1795
1796 if (csum_size) {
1797 t = EXT4_DIRENT_TAIL(data1, blocksize);
1798 initialize_dirent_tail(t, blocksize);
1799 }
1800
1801 /* Initialize the root; the dot dirents already exist */
1802 de = (struct ext4_dir_entry_2 *) (&root->dotdot);
1803 de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
1804 blocksize);
1805 memset (&root->info, 0, sizeof(root->info));
1806 root->info.info_length = sizeof(root->info);
1807 root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
1808 entries = root->entries;
1809 dx_set_block(entries, 1);
1810 dx_set_count(entries, 1);
1811 dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
1812
1813 /* Initialize as for dx_probe */
1814 hinfo.hash_version = root->info.hash_version;
1815 if (hinfo.hash_version <= DX_HASH_TEA)
1816 hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
1817 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
1818 ext4fs_dirhash(name, namelen, &hinfo);
1819 memset(frames, 0, sizeof(frames));
1820 frame = frames;
1821 frame->entries = entries;
1822 frame->at = entries;
1823 frame->bh = bh;
1824 bh = bh2;
1825
1826 retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1827 if (retval)
1828 goto out_frames;
1829 retval = ext4_handle_dirty_dirent_node(handle, dir, bh);
1830 if (retval)
1831 goto out_frames;
1832
1833 de = do_split(handle,dir, &bh, frame, &hinfo);
1834 if (IS_ERR(de)) {
1835 retval = PTR_ERR(de);
1836 goto out_frames;
1837 }
1838 dx_release(frames);
1839
1840 retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
1841 brelse(bh);
1842 return retval;
1843 out_frames:
1844 /*
1845 * Even if the block split failed, we have to properly write
1846 * out all the changes we did so far. Otherwise we can end up
1847 * with corrupted filesystem.
1848 */
1849 ext4_mark_inode_dirty(handle, dir);
1850 dx_release(frames);
1851 return retval;
1852 }
1853
1854 /*
1855 * ext4_add_entry()
1856 *
1857 * adds a file entry to the specified directory, using the same
1858 * semantics as ext4_find_entry(). It returns NULL if it failed.
1859 *
1860 * NOTE!! The inode part of 'de' is left at 0 - which means you
1861 * may not sleep between calling this and putting something into
1862 * the entry, as someone else might have used it while you slept.
1863 */
1864 static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1865 struct inode *inode)
1866 {
1867 struct inode *dir = dentry->d_parent->d_inode;
1868 struct buffer_head *bh;
1869 struct ext4_dir_entry_2 *de;
1870 struct ext4_dir_entry_tail *t;
1871 struct super_block *sb;
1872 int retval;
1873 int dx_fallback=0;
1874 unsigned blocksize;
1875 ext4_lblk_t block, blocks;
1876 int csum_size = 0;
1877
1878 if (ext4_has_metadata_csum(inode->i_sb))
1879 csum_size = sizeof(struct ext4_dir_entry_tail);
1880
1881 sb = dir->i_sb;
1882 blocksize = sb->s_blocksize;
1883 if (!dentry->d_name.len)
1884 return -EINVAL;
1885
1886 if (ext4_has_inline_data(dir)) {
1887 retval = ext4_try_add_inline_entry(handle, dentry, inode);
1888 if (retval < 0)
1889 return retval;
1890 if (retval == 1) {
1891 retval = 0;
1892 return retval;
1893 }
1894 }
1895
1896 if (is_dx(dir)) {
1897 retval = ext4_dx_add_entry(handle, dentry, inode);
1898 if (!retval || (retval != ERR_BAD_DX_DIR))
1899 return retval;
1900 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1901 dx_fallback++;
1902 ext4_mark_inode_dirty(handle, dir);
1903 }
1904 blocks = dir->i_size >> sb->s_blocksize_bits;
1905 for (block = 0; block < blocks; block++) {
1906 bh = ext4_read_dirblock(dir, block, DIRENT);
1907 if (IS_ERR(bh))
1908 return PTR_ERR(bh);
1909
1910 retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1911 if (retval != -ENOSPC) {
1912 brelse(bh);
1913 return retval;
1914 }
1915
1916 if (blocks == 1 && !dx_fallback &&
1917 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
1918 return make_indexed_dir(handle, dentry, inode, bh);
1919 brelse(bh);
1920 }
1921 bh = ext4_append(handle, dir, &block);
1922 if (IS_ERR(bh))
1923 return PTR_ERR(bh);
1924 de = (struct ext4_dir_entry_2 *) bh->b_data;
1925 de->inode = 0;
1926 de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
1927
1928 if (csum_size) {
1929 t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
1930 initialize_dirent_tail(t, blocksize);
1931 }
1932
1933 retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
1934 brelse(bh);
1935 if (retval == 0)
1936 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1937 return retval;
1938 }
1939
1940 /*
1941 * Returns 0 for success, or a negative error value
1942 */
1943 static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1944 struct inode *inode)
1945 {
1946 struct dx_frame frames[2], *frame;
1947 struct dx_entry *entries, *at;
1948 struct dx_hash_info hinfo;
1949 struct buffer_head *bh;
1950 struct inode *dir = dentry->d_parent->d_inode;
1951 struct super_block *sb = dir->i_sb;
1952 struct ext4_dir_entry_2 *de;
1953 int err;
1954
1955 frame = dx_probe(&dentry->d_name, dir, &hinfo, frames);
1956 if (IS_ERR(frame))
1957 return PTR_ERR(frame);
1958 entries = frame->entries;
1959 at = frame->at;
1960 bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
1961 if (IS_ERR(bh)) {
1962 err = PTR_ERR(bh);
1963 bh = NULL;
1964 goto cleanup;
1965 }
1966
1967 BUFFER_TRACE(bh, "get_write_access");
1968 err = ext4_journal_get_write_access(handle, bh);
1969 if (err)
1970 goto journal_error;
1971
1972 err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1973 if (err != -ENOSPC)
1974 goto cleanup;
1975
1976 /* Block full, should compress but for now just split */
1977 dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
1978 dx_get_count(entries), dx_get_limit(entries)));
1979 /* Need to split index? */
1980 if (dx_get_count(entries) == dx_get_limit(entries)) {
1981 ext4_lblk_t newblock;
1982 unsigned icount = dx_get_count(entries);
1983 int levels = frame - frames;
1984 struct dx_entry *entries2;
1985 struct dx_node *node2;
1986 struct buffer_head *bh2;
1987
1988 if (levels && (dx_get_count(frames->entries) ==
1989 dx_get_limit(frames->entries))) {
1990 ext4_warning(sb, "Directory index full!");
1991 err = -ENOSPC;
1992 goto cleanup;
1993 }
1994 bh2 = ext4_append(handle, dir, &newblock);
1995 if (IS_ERR(bh2)) {
1996 err = PTR_ERR(bh2);
1997 goto cleanup;
1998 }
1999 node2 = (struct dx_node *)(bh2->b_data);
2000 entries2 = node2->entries;
2001 memset(&node2->fake, 0, sizeof(struct fake_dirent));
2002 node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize,
2003 sb->s_blocksize);
2004 BUFFER_TRACE(frame->bh, "get_write_access");
2005 err = ext4_journal_get_write_access(handle, frame->bh);
2006 if (err)
2007 goto journal_error;
2008 if (levels) {
2009 unsigned icount1 = icount/2, icount2 = icount - icount1;
2010 unsigned hash2 = dx_get_hash(entries + icount1);
2011 dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
2012 icount1, icount2));
2013
2014 BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
2015 err = ext4_journal_get_write_access(handle,
2016 frames[0].bh);
2017 if (err)
2018 goto journal_error;
2019
2020 memcpy((char *) entries2, (char *) (entries + icount1),
2021 icount2 * sizeof(struct dx_entry));
2022 dx_set_count(entries, icount1);
2023 dx_set_count(entries2, icount2);
2024 dx_set_limit(entries2, dx_node_limit(dir));
2025
2026 /* Which index block gets the new entry? */
2027 if (at - entries >= icount1) {
2028 frame->at = at = at - entries - icount1 + entries2;
2029 frame->entries = entries = entries2;
2030 swap(frame->bh, bh2);
2031 }
2032 dx_insert_block(frames + 0, hash2, newblock);
2033 dxtrace(dx_show_index("node", frames[1].entries));
2034 dxtrace(dx_show_index("node",
2035 ((struct dx_node *) bh2->b_data)->entries));
2036 err = ext4_handle_dirty_dx_node(handle, dir, bh2);
2037 if (err)
2038 goto journal_error;
2039 brelse (bh2);
2040 } else {
2041 dxtrace(printk(KERN_DEBUG
2042 "Creating second level index...\n"));
2043 memcpy((char *) entries2, (char *) entries,
2044 icount * sizeof(struct dx_entry));
2045 dx_set_limit(entries2, dx_node_limit(dir));
2046
2047 /* Set up root */
2048 dx_set_count(entries, 1);
2049 dx_set_block(entries + 0, newblock);
2050 ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
2051
2052 /* Add new access path frame */
2053 frame = frames + 1;
2054 frame->at = at = at - entries + entries2;
2055 frame->entries = entries = entries2;
2056 frame->bh = bh2;
2057 err = ext4_journal_get_write_access(handle,
2058 frame->bh);
2059 if (err)
2060 goto journal_error;
2061 }
2062 err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
2063 if (err) {
2064 ext4_std_error(inode->i_sb, err);
2065 goto cleanup;
2066 }
2067 }
2068 de = do_split(handle, dir, &bh, frame, &hinfo);
2069 if (IS_ERR(de)) {
2070 err = PTR_ERR(de);
2071 goto cleanup;
2072 }
2073 err = add_dirent_to_buf(handle, dentry, inode, de, bh);
2074 goto cleanup;
2075
2076 journal_error:
2077 ext4_std_error(dir->i_sb, err);
2078 cleanup:
2079 brelse(bh);
2080 dx_release(frames);
2081 return err;
2082 }
2083
2084 /*
2085 * ext4_generic_delete_entry deletes a directory entry by merging it
2086 * with the previous entry
2087 */
2088 int ext4_generic_delete_entry(handle_t *handle,
2089 struct inode *dir,
2090 struct ext4_dir_entry_2 *de_del,
2091 struct buffer_head *bh,
2092 void *entry_buf,
2093 int buf_size,
2094 int csum_size)
2095 {
2096 struct ext4_dir_entry_2 *de, *pde;
2097 unsigned int blocksize = dir->i_sb->s_blocksize;
2098 int i;
2099
2100 i = 0;
2101 pde = NULL;
2102 de = (struct ext4_dir_entry_2 *)entry_buf;
2103 while (i < buf_size - csum_size) {
2104 if (ext4_check_dir_entry(dir, NULL, de, bh,
2105 bh->b_data, bh->b_size, i))
2106 return -EIO;
2107 if (de == de_del) {
2108 if (pde)
2109 pde->rec_len = ext4_rec_len_to_disk(
2110 ext4_rec_len_from_disk(pde->rec_len,
2111 blocksize) +
2112 ext4_rec_len_from_disk(de->rec_len,
2113 blocksize),
2114 blocksize);
2115 else
2116 de->inode = 0;
2117 dir->i_version++;
2118 return 0;
2119 }
2120 i += ext4_rec_len_from_disk(de->rec_len, blocksize);
2121 pde = de;
2122 de = ext4_next_entry(de, blocksize);
2123 }
2124 return -ENOENT;
2125 }
2126
2127 static int ext4_delete_entry(handle_t *handle,
2128 struct inode *dir,
2129 struct ext4_dir_entry_2 *de_del,
2130 struct buffer_head *bh)
2131 {
2132 int err, csum_size = 0;
2133
2134 if (ext4_has_inline_data(dir)) {
2135 int has_inline_data = 1;
2136 err = ext4_delete_inline_entry(handle, dir, de_del, bh,
2137 &has_inline_data);
2138 if (has_inline_data)
2139 return err;
2140 }
2141
2142 if (ext4_has_metadata_csum(dir->i_sb))
2143 csum_size = sizeof(struct ext4_dir_entry_tail);
2144
2145 BUFFER_TRACE(bh, "get_write_access");
2146 err = ext4_journal_get_write_access(handle, bh);
2147 if (unlikely(err))
2148 goto out;
2149
2150 err = ext4_generic_delete_entry(handle, dir, de_del,
2151 bh, bh->b_data,
2152 dir->i_sb->s_blocksize, csum_size);
2153 if (err)
2154 goto out;
2155
2156 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
2157 err = ext4_handle_dirty_dirent_node(handle, dir, bh);
2158 if (unlikely(err))
2159 goto out;
2160
2161 return 0;
2162 out:
2163 if (err != -ENOENT)
2164 ext4_std_error(dir->i_sb, err);
2165 return err;
2166 }
2167
2168 /*
2169 * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
2170 * since this indicates that nlinks count was previously 1.
2171 */
2172 static void ext4_inc_count(handle_t *handle, struct inode *inode)
2173 {
2174 inc_nlink(inode);
2175 if (is_dx(inode) && inode->i_nlink > 1) {
2176 /* limit is 16-bit i_links_count */
2177 if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) {
2178 set_nlink(inode, 1);
2179 EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb,
2180 EXT4_FEATURE_RO_COMPAT_DIR_NLINK);
2181 }
2182 }
2183 }
2184
2185 /*
2186 * If a directory had nlink == 1, then we should let it be 1. This indicates
2187 * directory has >EXT4_LINK_MAX subdirs.
2188 */
2189 static void ext4_dec_count(handle_t *handle, struct inode *inode)
2190 {
2191 if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
2192 drop_nlink(inode);
2193 }
2194
2195
2196 static int ext4_add_nondir(handle_t *handle,
2197 struct dentry *dentry, struct inode *inode)
2198 {
2199 int err = ext4_add_entry(handle, dentry, inode);
2200 if (!err) {
2201 ext4_mark_inode_dirty(handle, inode);
2202 unlock_new_inode(inode);
2203 d_instantiate(dentry, inode);
2204 return 0;
2205 }
2206 drop_nlink(inode);
2207 unlock_new_inode(inode);
2208 iput(inode);
2209 return err;
2210 }
2211
2212 /*
2213 * By the time this is called, we already have created
2214 * the directory cache entry for the new file, but it
2215 * is so far negative - it has no inode.
2216 *
2217 * If the create succeeds, we fill in the inode information
2218 * with d_instantiate().
2219 */
2220 static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2221 bool excl)
2222 {
2223 handle_t *handle;
2224 struct inode *inode;
2225 int err, credits, retries = 0;
2226
2227 dquot_initialize(dir);
2228
2229 credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2230 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
2231 retry:
2232 inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
2233 NULL, EXT4_HT_DIR, credits);
2234 handle = ext4_journal_current_handle();
2235 err = PTR_ERR(inode);
2236 if (!IS_ERR(inode)) {
2237 inode->i_op = &ext4_file_inode_operations;
2238 if (test_opt(inode->i_sb, DAX))
2239 inode->i_fop = &ext4_dax_file_operations;
2240 else
2241 inode->i_fop = &ext4_file_operations;
2242 ext4_set_aops(inode);
2243 err = ext4_add_nondir(handle, dentry, inode);
2244 if (!err && IS_DIRSYNC(dir))
2245 ext4_handle_sync(handle);
2246 }
2247 if (handle)
2248 ext4_journal_stop(handle);
2249 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2250 goto retry;
2251 return err;
2252 }
2253
2254 static int ext4_mknod(struct inode *dir, struct dentry *dentry,
2255 umode_t mode, dev_t rdev)
2256 {
2257 handle_t *handle;
2258 struct inode *inode;
2259 int err, credits, retries = 0;
2260
2261 if (!new_valid_dev(rdev))
2262 return -EINVAL;
2263
2264 dquot_initialize(dir);
2265
2266 credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2267 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
2268 retry:
2269 inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
2270 NULL, EXT4_HT_DIR, credits);
2271 handle = ext4_journal_current_handle();
2272 err = PTR_ERR(inode);
2273 if (!IS_ERR(inode)) {
2274 init_special_inode(inode, inode->i_mode, rdev);
2275 inode->i_op = &ext4_special_inode_operations;
2276 err = ext4_add_nondir(handle, dentry, inode);
2277 if (!err && IS_DIRSYNC(dir))
2278 ext4_handle_sync(handle);
2279 }
2280 if (handle)
2281 ext4_journal_stop(handle);
2282 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2283 goto retry;
2284 return err;
2285 }
2286
2287 static int ext4_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2288 {
2289 handle_t *handle;
2290 struct inode *inode;
2291 int err, retries = 0;
2292
2293 dquot_initialize(dir);
2294
2295 retry:
2296 inode = ext4_new_inode_start_handle(dir, mode,
2297 NULL, 0, NULL,
2298 EXT4_HT_DIR,
2299 EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
2300 4 + EXT4_XATTR_TRANS_BLOCKS);
2301 handle = ext4_journal_current_handle();
2302 err = PTR_ERR(inode);
2303 if (!IS_ERR(inode)) {
2304 inode->i_op = &ext4_file_inode_operations;
2305 if (test_opt(inode->i_sb, DAX))
2306 inode->i_fop = &ext4_dax_file_operations;
2307 else
2308 inode->i_fop = &ext4_file_operations;
2309 ext4_set_aops(inode);
2310 d_tmpfile(dentry, inode);
2311 err = ext4_orphan_add(handle, inode);
2312 if (err)
2313 goto err_unlock_inode;
2314 mark_inode_dirty(inode);
2315 unlock_new_inode(inode);
2316 }
2317 if (handle)
2318 ext4_journal_stop(handle);
2319 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2320 goto retry;
2321 return err;
2322 err_unlock_inode:
2323 ext4_journal_stop(handle);
2324 unlock_new_inode(inode);
2325 return err;
2326 }
2327
2328 struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
2329 struct ext4_dir_entry_2 *de,
2330 int blocksize, int csum_size,
2331 unsigned int parent_ino, int dotdot_real_len)
2332 {
2333 de->inode = cpu_to_le32(inode->i_ino);
2334 de->name_len = 1;
2335 de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
2336 blocksize);
2337 strcpy(de->name, ".");
2338 ext4_set_de_type(inode->i_sb, de, S_IFDIR);
2339
2340 de = ext4_next_entry(de, blocksize);
2341 de->inode = cpu_to_le32(parent_ino);
2342 de->name_len = 2;
2343 if (!dotdot_real_len)
2344 de->rec_len = ext4_rec_len_to_disk(blocksize -
2345 (csum_size + EXT4_DIR_REC_LEN(1)),
2346 blocksize);
2347 else
2348 de->rec_len = ext4_rec_len_to_disk(
2349 EXT4_DIR_REC_LEN(de->name_len), blocksize);
2350 strcpy(de->name, "..");
2351 ext4_set_de_type(inode->i_sb, de, S_IFDIR);
2352
2353 return ext4_next_entry(de, blocksize);
2354 }
2355
2356 static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
2357 struct inode *inode)
2358 {
2359 struct buffer_head *dir_block = NULL;
2360 struct ext4_dir_entry_2 *de;
2361 struct ext4_dir_entry_tail *t;
2362 ext4_lblk_t block = 0;
2363 unsigned int blocksize = dir->i_sb->s_blocksize;
2364 int csum_size = 0;
2365 int err;
2366
2367 if (ext4_has_metadata_csum(dir->i_sb))
2368 csum_size = sizeof(struct ext4_dir_entry_tail);
2369
2370 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2371 err = ext4_try_create_inline_dir(handle, dir, inode);
2372 if (err < 0 && err != -ENOSPC)
2373 goto out;
2374 if (!err)
2375 goto out;
2376 }
2377
2378 inode->i_size = 0;
2379 dir_block = ext4_append(handle, inode, &block);
2380 if (IS_ERR(dir_block))
2381 return PTR_ERR(dir_block);
2382 de = (struct ext4_dir_entry_2 *)dir_block->b_data;
2383 ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
2384 set_nlink(inode, 2);
2385 if (csum_size) {
2386 t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
2387 initialize_dirent_tail(t, blocksize);
2388 }
2389
2390 BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
2391 err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
2392 if (err)
2393 goto out;
2394 set_buffer_verified(dir_block);
2395 out:
2396 brelse(dir_block);
2397 return err;
2398 }
2399
2400 static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2401 {
2402 handle_t *handle;
2403 struct inode *inode;
2404 int err, credits, retries = 0;
2405
2406 if (EXT4_DIR_LINK_MAX(dir))
2407 return -EMLINK;
2408
2409 dquot_initialize(dir);
2410
2411 credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2412 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
2413 retry:
2414 inode = ext4_new_inode_start_handle(dir, S_IFDIR | mode,
2415 &dentry->d_name,
2416 0, NULL, EXT4_HT_DIR, credits);
2417 handle = ext4_journal_current_handle();
2418 err = PTR_ERR(inode);
2419 if (IS_ERR(inode))
2420 goto out_stop;
2421
2422 inode->i_op = &ext4_dir_inode_operations;
2423 inode->i_fop = &ext4_dir_operations;
2424 err = ext4_init_new_dir(handle, dir, inode);
2425 if (err)
2426 goto out_clear_inode;
2427 err = ext4_mark_inode_dirty(handle, inode);
2428 if (!err)
2429 err = ext4_add_entry(handle, dentry, inode);
2430 if (err) {
2431 out_clear_inode:
2432 clear_nlink(inode);
2433 unlock_new_inode(inode);
2434 ext4_mark_inode_dirty(handle, inode);
2435 iput(inode);
2436 goto out_stop;
2437 }
2438 ext4_inc_count(handle, dir);
2439 ext4_update_dx_flag(dir);
2440 err = ext4_mark_inode_dirty(handle, dir);
2441 if (err)
2442 goto out_clear_inode;
2443 unlock_new_inode(inode);
2444 d_instantiate(dentry, inode);
2445 if (IS_DIRSYNC(dir))
2446 ext4_handle_sync(handle);
2447
2448 out_stop:
2449 if (handle)
2450 ext4_journal_stop(handle);
2451 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2452 goto retry;
2453 return err;
2454 }
2455
2456 /*
2457 * routine to check that the specified directory is empty (for rmdir)
2458 */
2459 static int empty_dir(struct inode *inode)
2460 {
2461 unsigned int offset;
2462 struct buffer_head *bh;
2463 struct ext4_dir_entry_2 *de, *de1;
2464 struct super_block *sb;
2465 int err = 0;
2466
2467 if (ext4_has_inline_data(inode)) {
2468 int has_inline_data = 1;
2469
2470 err = empty_inline_dir(inode, &has_inline_data);
2471 if (has_inline_data)
2472 return err;
2473 }
2474
2475 sb = inode->i_sb;
2476 if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
2477 EXT4_ERROR_INODE(inode, "invalid size");
2478 return 1;
2479 }
2480 bh = ext4_read_dirblock(inode, 0, EITHER);
2481 if (IS_ERR(bh))
2482 return 1;
2483
2484 de = (struct ext4_dir_entry_2 *) bh->b_data;
2485 de1 = ext4_next_entry(de, sb->s_blocksize);
2486 if (le32_to_cpu(de->inode) != inode->i_ino ||
2487 !le32_to_cpu(de1->inode) ||
2488 strcmp(".", de->name) ||
2489 strcmp("..", de1->name)) {
2490 ext4_warning(inode->i_sb,
2491 "bad directory (dir #%lu) - no `.' or `..'",
2492 inode->i_ino);
2493 brelse(bh);
2494 return 1;
2495 }
2496 offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
2497 ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
2498 de = ext4_next_entry(de1, sb->s_blocksize);
2499 while (offset < inode->i_size) {
2500 if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
2501 unsigned int lblock;
2502 err = 0;
2503 brelse(bh);
2504 lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
2505 bh = ext4_read_dirblock(inode, lblock, EITHER);
2506 if (IS_ERR(bh))
2507 return 1;
2508 de = (struct ext4_dir_entry_2 *) bh->b_data;
2509 }
2510 if (ext4_check_dir_entry(inode, NULL, de, bh,
2511 bh->b_data, bh->b_size, offset)) {
2512 de = (struct ext4_dir_entry_2 *)(bh->b_data +
2513 sb->s_blocksize);
2514 offset = (offset | (sb->s_blocksize - 1)) + 1;
2515 continue;
2516 }
2517 if (le32_to_cpu(de->inode)) {
2518 brelse(bh);
2519 return 0;
2520 }
2521 offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
2522 de = ext4_next_entry(de, sb->s_blocksize);
2523 }
2524 brelse(bh);
2525 return 1;
2526 }
2527
2528 /*
2529 * ext4_orphan_add() links an unlinked or truncated inode into a list of
2530 * such inodes, starting at the superblock, in case we crash before the
2531 * file is closed/deleted, or in case the inode truncate spans multiple
2532 * transactions and the last transaction is not recovered after a crash.
2533 *
2534 * At filesystem recovery time, we walk this list deleting unlinked
2535 * inodes and truncating linked inodes in ext4_orphan_cleanup().
2536 *
2537 * Orphan list manipulation functions must be called under i_mutex unless
2538 * we are just creating the inode or deleting it.
2539 */
2540 int ext4_orphan_add(handle_t *handle, struct inode *inode)
2541 {
2542 struct super_block *sb = inode->i_sb;
2543 struct ext4_sb_info *sbi = EXT4_SB(sb);
2544 struct ext4_iloc iloc;
2545 int err = 0, rc;
2546 bool dirty = false;
2547
2548 if (!sbi->s_journal || is_bad_inode(inode))
2549 return 0;
2550
2551 WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
2552 !mutex_is_locked(&inode->i_mutex));
2553 /*
2554 * Exit early if inode already is on orphan list. This is a big speedup
2555 * since we don't have to contend on the global s_orphan_lock.
2556 */
2557 if (!list_empty(&EXT4_I(inode)->i_orphan))
2558 return 0;
2559
2560 /*
2561 * Orphan handling is only valid for files with data blocks
2562 * being truncated, or files being unlinked. Note that we either
2563 * hold i_mutex, or the inode can not be referenced from outside,
2564 * so i_nlink should not be bumped due to race
2565 */
2566 J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2567 S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
2568
2569 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
2570 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
2571 if (err)
2572 goto out;
2573
2574 err = ext4_reserve_inode_write(handle, inode, &iloc);
2575 if (err)
2576 goto out;
2577
2578 mutex_lock(&sbi->s_orphan_lock);
2579 /*
2580 * Due to previous errors inode may be already a part of on-disk
2581 * orphan list. If so skip on-disk list modification.
2582 */
2583 if (!NEXT_ORPHAN(inode) || NEXT_ORPHAN(inode) >
2584 (le32_to_cpu(sbi->s_es->s_inodes_count))) {
2585 /* Insert this inode at the head of the on-disk orphan list */
2586 NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan);
2587 sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
2588 dirty = true;
2589 }
2590 list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan);
2591 mutex_unlock(&sbi->s_orphan_lock);
2592
2593 if (dirty) {
2594 err = ext4_handle_dirty_super(handle, sb);
2595 rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
2596 if (!err)
2597 err = rc;
2598 if (err) {
2599 /*
2600 * We have to remove inode from in-memory list if
2601 * addition to on disk orphan list failed. Stray orphan
2602 * list entries can cause panics at unmount time.
2603 */
2604 mutex_lock(&sbi->s_orphan_lock);
2605 list_del(&EXT4_I(inode)->i_orphan);
2606 mutex_unlock(&sbi->s_orphan_lock);
2607 }
2608 }
2609 jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
2610 jbd_debug(4, "orphan inode %lu will point to %d\n",
2611 inode->i_ino, NEXT_ORPHAN(inode));
2612 out:
2613 ext4_std_error(sb, err);
2614 return err;
2615 }
2616
2617 /*
2618 * ext4_orphan_del() removes an unlinked or truncated inode from the list
2619 * of such inodes stored on disk, because it is finally being cleaned up.
2620 */
2621 int ext4_orphan_del(handle_t *handle, struct inode *inode)
2622 {
2623 struct list_head *prev;
2624 struct ext4_inode_info *ei = EXT4_I(inode);
2625 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2626 __u32 ino_next;
2627 struct ext4_iloc iloc;
2628 int err = 0;
2629
2630 if (!sbi->s_journal && !(sbi->s_mount_state & EXT4_ORPHAN_FS))
2631 return 0;
2632
2633 WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
2634 !mutex_is_locked(&inode->i_mutex));
2635 /* Do this quick check before taking global s_orphan_lock. */
2636 if (list_empty(&ei->i_orphan))
2637 return 0;
2638
2639 if (handle) {
2640 /* Grab inode buffer early before taking global s_orphan_lock */
2641 err = ext4_reserve_inode_write(handle, inode, &iloc);
2642 }
2643
2644 mutex_lock(&sbi->s_orphan_lock);
2645 jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
2646
2647 prev = ei->i_orphan.prev;
2648 list_del_init(&ei->i_orphan);
2649
2650 /* If we're on an error path, we may not have a valid
2651 * transaction handle with which to update the orphan list on
2652 * disk, but we still need to remove the inode from the linked
2653 * list in memory. */
2654 if (!handle || err) {
2655 mutex_unlock(&sbi->s_orphan_lock);
2656 goto out_err;
2657 }
2658
2659 ino_next = NEXT_ORPHAN(inode);
2660 if (prev == &sbi->s_orphan) {
2661 jbd_debug(4, "superblock will point to %u\n", ino_next);
2662 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
2663 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
2664 if (err) {
2665 mutex_unlock(&sbi->s_orphan_lock);
2666 goto out_brelse;
2667 }
2668 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
2669 mutex_unlock(&sbi->s_orphan_lock);
2670 err = ext4_handle_dirty_super(handle, inode->i_sb);
2671 } else {
2672 struct ext4_iloc iloc2;
2673 struct inode *i_prev =
2674 &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
2675
2676 jbd_debug(4, "orphan inode %lu will point to %u\n",
2677 i_prev->i_ino, ino_next);
2678 err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
2679 if (err) {
2680 mutex_unlock(&sbi->s_orphan_lock);
2681 goto out_brelse;
2682 }
2683 NEXT_ORPHAN(i_prev) = ino_next;
2684 err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
2685 mutex_unlock(&sbi->s_orphan_lock);
2686 }
2687 if (err)
2688 goto out_brelse;
2689 NEXT_ORPHAN(inode) = 0;
2690 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
2691 out_err:
2692 ext4_std_error(inode->i_sb, err);
2693 return err;
2694
2695 out_brelse:
2696 brelse(iloc.bh);
2697 goto out_err;
2698 }
2699
2700 static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2701 {
2702 int retval;
2703 struct inode *inode;
2704 struct buffer_head *bh;
2705 struct ext4_dir_entry_2 *de;
2706 handle_t *handle = NULL;
2707
2708 /* Initialize quotas before so that eventual writes go in
2709 * separate transaction */
2710 dquot_initialize(dir);
2711 dquot_initialize(dentry->d_inode);
2712
2713 retval = -ENOENT;
2714 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2715 if (IS_ERR(bh))
2716 return PTR_ERR(bh);
2717 if (!bh)
2718 goto end_rmdir;
2719
2720 inode = dentry->d_inode;
2721
2722 retval = -EIO;
2723 if (le32_to_cpu(de->inode) != inode->i_ino)
2724 goto end_rmdir;
2725
2726 retval = -ENOTEMPTY;
2727 if (!empty_dir(inode))
2728 goto end_rmdir;
2729
2730 handle = ext4_journal_start(dir, EXT4_HT_DIR,
2731 EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
2732 if (IS_ERR(handle)) {
2733 retval = PTR_ERR(handle);
2734 handle = NULL;
2735 goto end_rmdir;
2736 }
2737
2738 if (IS_DIRSYNC(dir))
2739 ext4_handle_sync(handle);
2740
2741 retval = ext4_delete_entry(handle, dir, de, bh);
2742 if (retval)
2743 goto end_rmdir;
2744 if (!EXT4_DIR_LINK_EMPTY(inode))
2745 ext4_warning(inode->i_sb,
2746 "empty directory has too many links (%d)",
2747 inode->i_nlink);
2748 inode->i_version++;
2749 clear_nlink(inode);
2750 /* There's no need to set i_disksize: the fact that i_nlink is
2751 * zero will ensure that the right thing happens during any
2752 * recovery. */
2753 inode->i_size = 0;
2754 ext4_orphan_add(handle, inode);
2755 inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
2756 ext4_mark_inode_dirty(handle, inode);
2757 ext4_dec_count(handle, dir);
2758 ext4_update_dx_flag(dir);
2759 ext4_mark_inode_dirty(handle, dir);
2760
2761 end_rmdir:
2762 brelse(bh);
2763 if (handle)
2764 ext4_journal_stop(handle);
2765 return retval;
2766 }
2767
2768 static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2769 {
2770 int retval;
2771 struct inode *inode;
2772 struct buffer_head *bh;
2773 struct ext4_dir_entry_2 *de;
2774 handle_t *handle = NULL;
2775
2776 trace_ext4_unlink_enter(dir, dentry);
2777 /* Initialize quotas before so that eventual writes go
2778 * in separate transaction */
2779 dquot_initialize(dir);
2780 dquot_initialize(dentry->d_inode);
2781
2782 retval = -ENOENT;
2783 bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
2784 if (IS_ERR(bh))
2785 return PTR_ERR(bh);
2786 if (!bh)
2787 goto end_unlink;
2788
2789 inode = dentry->d_inode;
2790
2791 retval = -EIO;
2792 if (le32_to_cpu(de->inode) != inode->i_ino)
2793 goto end_unlink;
2794
2795 handle = ext4_journal_start(dir, EXT4_HT_DIR,
2796 EXT4_DATA_TRANS_BLOCKS(dir->i_sb));
2797 if (IS_ERR(handle)) {
2798 retval = PTR_ERR(handle);
2799 handle = NULL;
2800 goto end_unlink;
2801 }
2802
2803 if (IS_DIRSYNC(dir))
2804 ext4_handle_sync(handle);
2805
2806 if (!inode->i_nlink) {
2807 ext4_warning(inode->i_sb,
2808 "Deleting nonexistent file (%lu), %d",
2809 inode->i_ino, inode->i_nlink);
2810 set_nlink(inode, 1);
2811 }
2812 retval = ext4_delete_entry(handle, dir, de, bh);
2813 if (retval)
2814 goto end_unlink;
2815 dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
2816 ext4_update_dx_flag(dir);
2817 ext4_mark_inode_dirty(handle, dir);
2818 drop_nlink(inode);
2819 if (!inode->i_nlink)
2820 ext4_orphan_add(handle, inode);
2821 inode->i_ctime = ext4_current_time(inode);
2822 ext4_mark_inode_dirty(handle, inode);
2823
2824 end_unlink:
2825 brelse(bh);
2826 if (handle)
2827 ext4_journal_stop(handle);
2828 trace_ext4_unlink_exit(dentry, retval);
2829 return retval;
2830 }
2831
2832 static int ext4_symlink(struct inode *dir,
2833 struct dentry *dentry, const char *symname)
2834 {
2835 handle_t *handle;
2836 struct inode *inode;
2837 int l, err, retries = 0;
2838 int credits;
2839
2840 l = strlen(symname)+1;
2841 if (l > dir->i_sb->s_blocksize)
2842 return -ENAMETOOLONG;
2843
2844 dquot_initialize(dir);
2845
2846 if (l > EXT4_N_BLOCKS * 4) {
2847 /*
2848 * For non-fast symlinks, we just allocate inode and put it on
2849 * orphan list in the first transaction => we need bitmap,
2850 * group descriptor, sb, inode block, quota blocks, and
2851 * possibly selinux xattr blocks.
2852 */
2853 credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
2854 EXT4_XATTR_TRANS_BLOCKS;
2855 } else {
2856 /*
2857 * Fast symlink. We have to add entry to directory
2858 * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS),
2859 * allocate new inode (bitmap, group descriptor, inode block,
2860 * quota blocks, sb is already counted in previous macros).
2861 */
2862 credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2863 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
2864 }
2865 retry:
2866 inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
2867 &dentry->d_name, 0, NULL,
2868 EXT4_HT_DIR, credits);
2869 handle = ext4_journal_current_handle();
2870 err = PTR_ERR(inode);
2871 if (IS_ERR(inode))
2872 goto out_stop;
2873
2874 if (l > EXT4_N_BLOCKS * 4) {
2875 inode->i_op = &ext4_symlink_inode_operations;
2876 ext4_set_aops(inode);
2877 /*
2878 * We cannot call page_symlink() with transaction started
2879 * because it calls into ext4_write_begin() which can wait
2880 * for transaction commit if we are running out of space
2881 * and thus we deadlock. So we have to stop transaction now
2882 * and restart it when symlink contents is written.
2883 *
2884 * To keep fs consistent in case of crash, we have to put inode
2885 * to orphan list in the mean time.
2886 */
2887 drop_nlink(inode);
2888 err = ext4_orphan_add(handle, inode);
2889 ext4_journal_stop(handle);
2890 if (err)
2891 goto err_drop_inode;
2892 err = __page_symlink(inode, symname, l, 1);
2893 if (err)
2894 goto err_drop_inode;
2895 /*
2896 * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
2897 * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
2898 */
2899 handle = ext4_journal_start(dir, EXT4_HT_DIR,
2900 EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2901 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
2902 if (IS_ERR(handle)) {
2903 err = PTR_ERR(handle);
2904 goto err_drop_inode;
2905 }
2906 set_nlink(inode, 1);
2907 err = ext4_orphan_del(handle, inode);
2908 if (err) {
2909 ext4_journal_stop(handle);
2910 clear_nlink(inode);
2911 goto err_drop_inode;
2912 }
2913 } else {
2914 /* clear the extent format for fast symlink */
2915 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
2916 inode->i_op = &ext4_fast_symlink_inode_operations;
2917 memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
2918 inode->i_size = l-1;
2919 }
2920 EXT4_I(inode)->i_disksize = inode->i_size;
2921 err = ext4_add_nondir(handle, dentry, inode);
2922 if (!err && IS_DIRSYNC(dir))
2923 ext4_handle_sync(handle);
2924
2925 out_stop:
2926 if (handle)
2927 ext4_journal_stop(handle);
2928 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2929 goto retry;
2930 return err;
2931 err_drop_inode:
2932 unlock_new_inode(inode);
2933 iput(inode);
2934 return err;
2935 }
2936
2937 static int ext4_link(struct dentry *old_dentry,
2938 struct inode *dir, struct dentry *dentry)
2939 {
2940 handle_t *handle;
2941 struct inode *inode = old_dentry->d_inode;
2942 int err, retries = 0;
2943
2944 if (inode->i_nlink >= EXT4_LINK_MAX)
2945 return -EMLINK;
2946
2947 dquot_initialize(dir);
2948
2949 retry:
2950 handle = ext4_journal_start(dir, EXT4_HT_DIR,
2951 (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2952 EXT4_INDEX_EXTRA_TRANS_BLOCKS) + 1);
2953 if (IS_ERR(handle))
2954 return PTR_ERR(handle);
2955
2956 if (IS_DIRSYNC(dir))
2957 ext4_handle_sync(handle);
2958
2959 inode->i_ctime = ext4_current_time(inode);
2960 ext4_inc_count(handle, inode);
2961 ihold(inode);
2962
2963 err = ext4_add_entry(handle, dentry, inode);
2964 if (!err) {
2965 ext4_mark_inode_dirty(handle, inode);
2966 /* this can happen only for tmpfile being
2967 * linked the first time
2968 */
2969 if (inode->i_nlink == 1)
2970 ext4_orphan_del(handle, inode);
2971 d_instantiate(dentry, inode);
2972 } else {
2973 drop_nlink(inode);
2974 iput(inode);
2975 }
2976 ext4_journal_stop(handle);
2977 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2978 goto retry;
2979 return err;
2980 }
2981
2982
2983 /*
2984 * Try to find buffer head where contains the parent block.
2985 * It should be the inode block if it is inlined or the 1st block
2986 * if it is a normal dir.
2987 */
2988 static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
2989 struct inode *inode,
2990 int *retval,
2991 struct ext4_dir_entry_2 **parent_de,
2992 int *inlined)
2993 {
2994 struct buffer_head *bh;
2995
2996 if (!ext4_has_inline_data(inode)) {
2997 bh = ext4_read_dirblock(inode, 0, EITHER);
2998 if (IS_ERR(bh)) {
2999 *retval = PTR_ERR(bh);
3000 return NULL;
3001 }
3002 *parent_de = ext4_next_entry(
3003 (struct ext4_dir_entry_2 *)bh->b_data,
3004 inode->i_sb->s_blocksize);
3005 return bh;
3006 }
3007
3008 *inlined = 1;
3009 return ext4_get_first_inline_block(inode, parent_de, retval);
3010 }
3011
3012 struct ext4_renament {
3013 struct inode *dir;
3014 struct dentry *dentry;
3015 struct inode *inode;
3016 bool is_dir;
3017 int dir_nlink_delta;
3018
3019 /* entry for "dentry" */
3020 struct buffer_head *bh;
3021 struct ext4_dir_entry_2 *de;
3022 int inlined;
3023
3024 /* entry for ".." in inode if it's a directory */
3025 struct buffer_head *dir_bh;
3026 struct ext4_dir_entry_2 *parent_de;
3027 int dir_inlined;
3028 };
3029
3030 static int ext4_rename_dir_prepare(handle_t *handle, struct ext4_renament *ent)
3031 {
3032 int retval;
3033
3034 ent->dir_bh = ext4_get_first_dir_block(handle, ent->inode,
3035 &retval, &ent->parent_de,
3036 &ent->dir_inlined);
3037 if (!ent->dir_bh)
3038 return retval;
3039 if (le32_to_cpu(ent->parent_de->inode) != ent->dir->i_ino)
3040 return -EIO;
3041 BUFFER_TRACE(ent->dir_bh, "get_write_access");
3042 return ext4_journal_get_write_access(handle, ent->dir_bh);
3043 }
3044
3045 static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent,
3046 unsigned dir_ino)
3047 {
3048 int retval;
3049
3050 ent->parent_de->inode = cpu_to_le32(dir_ino);
3051 BUFFER_TRACE(ent->dir_bh, "call ext4_handle_dirty_metadata");
3052 if (!ent->dir_inlined) {
3053 if (is_dx(ent->inode)) {
3054 retval = ext4_handle_dirty_dx_node(handle,
3055 ent->inode,
3056 ent->dir_bh);
3057 } else {
3058 retval = ext4_handle_dirty_dirent_node(handle,
3059 ent->inode,
3060 ent->dir_bh);
3061 }
3062 } else {
3063 retval = ext4_mark_inode_dirty(handle, ent->inode);
3064 }
3065 if (retval) {
3066 ext4_std_error(ent->dir->i_sb, retval);
3067 return retval;
3068 }
3069 return 0;
3070 }
3071
3072 static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
3073 unsigned ino, unsigned file_type)
3074 {
3075 int retval;
3076
3077 BUFFER_TRACE(ent->bh, "get write access");
3078 retval = ext4_journal_get_write_access(handle, ent->bh);
3079 if (retval)
3080 return retval;
3081 ent->de->inode = cpu_to_le32(ino);
3082 if (EXT4_HAS_INCOMPAT_FEATURE(ent->dir->i_sb,
3083 EXT4_FEATURE_INCOMPAT_FILETYPE))
3084 ent->de->file_type = file_type;
3085 ent->dir->i_version++;
3086 ent->dir->i_ctime = ent->dir->i_mtime =
3087 ext4_current_time(ent->dir);
3088 ext4_mark_inode_dirty(handle, ent->dir);
3089 BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
3090 if (!ent->inlined) {
3091 retval = ext4_handle_dirty_dirent_node(handle,
3092 ent->dir, ent->bh);
3093 if (unlikely(retval)) {
3094 ext4_std_error(ent->dir->i_sb, retval);
3095 return retval;
3096 }
3097 }
3098 brelse(ent->bh);
3099 ent->bh = NULL;
3100
3101 return 0;
3102 }
3103
3104 static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
3105 const struct qstr *d_name)
3106 {
3107 int retval = -ENOENT;
3108 struct buffer_head *bh;
3109 struct ext4_dir_entry_2 *de;
3110
3111 bh = ext4_find_entry(dir, d_name, &de, NULL);
3112 if (IS_ERR(bh))
3113 return PTR_ERR(bh);
3114 if (bh) {
3115 retval = ext4_delete_entry(handle, dir, de, bh);
3116 brelse(bh);
3117 }
3118 return retval;
3119 }
3120
3121 static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent,
3122 int force_reread)
3123 {
3124 int retval;
3125 /*
3126 * ent->de could have moved from under us during htree split, so make
3127 * sure that we are deleting the right entry. We might also be pointing
3128 * to a stale entry in the unused part of ent->bh so just checking inum
3129 * and the name isn't enough.
3130 */
3131 if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino ||
3132 ent->de->name_len != ent->dentry->d_name.len ||
3133 strncmp(ent->de->name, ent->dentry->d_name.name,
3134 ent->de->name_len) ||
3135 force_reread) {
3136 retval = ext4_find_delete_entry(handle, ent->dir,
3137 &ent->dentry->d_name);
3138 } else {
3139 retval = ext4_delete_entry(handle, ent->dir, ent->de, ent->bh);
3140 if (retval == -ENOENT) {
3141 retval = ext4_find_delete_entry(handle, ent->dir,
3142 &ent->dentry->d_name);
3143 }
3144 }
3145
3146 if (retval) {
3147 ext4_warning(ent->dir->i_sb,
3148 "Deleting old file (%lu), %d, error=%d",
3149 ent->dir->i_ino, ent->dir->i_nlink, retval);
3150 }
3151 }
3152
3153 static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent)
3154 {
3155 if (ent->dir_nlink_delta) {
3156 if (ent->dir_nlink_delta == -1)
3157 ext4_dec_count(handle, ent->dir);
3158 else
3159 ext4_inc_count(handle, ent->dir);
3160 ext4_mark_inode_dirty(handle, ent->dir);
3161 }
3162 }
3163
3164 static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent,
3165 int credits, handle_t **h)
3166 {
3167 struct inode *wh;
3168 handle_t *handle;
3169 int retries = 0;
3170
3171 /*
3172 * for inode block, sb block, group summaries,
3173 * and inode bitmap
3174 */
3175 credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) +
3176 EXT4_XATTR_TRANS_BLOCKS + 4);
3177 retry:
3178 wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE,
3179 &ent->dentry->d_name, 0, NULL,
3180 EXT4_HT_DIR, credits);
3181
3182 handle = ext4_journal_current_handle();
3183 if (IS_ERR(wh)) {
3184 if (handle)
3185 ext4_journal_stop(handle);
3186 if (PTR_ERR(wh) == -ENOSPC &&
3187 ext4_should_retry_alloc(ent->dir->i_sb, &retries))
3188 goto retry;
3189 } else {
3190 *h = handle;
3191 init_special_inode(wh, wh->i_mode, WHITEOUT_DEV);
3192 wh->i_op = &ext4_special_inode_operations;
3193 }
3194 return wh;
3195 }
3196
3197 /*
3198 * Anybody can rename anything with this: the permission checks are left to the
3199 * higher-level routines.
3200 *
3201 * n.b. old_{dentry,inode) refers to the source dentry/inode
3202 * while new_{dentry,inode) refers to the destination dentry/inode
3203 * This comes from rename(const char *oldpath, const char *newpath)
3204 */
3205 static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3206 struct inode *new_dir, struct dentry *new_dentry,
3207 unsigned int flags)
3208 {
3209 handle_t *handle = NULL;
3210 struct ext4_renament old = {
3211 .dir = old_dir,
3212 .dentry = old_dentry,
3213 .inode = old_dentry->d_inode,
3214 };
3215 struct ext4_renament new = {
3216 .dir = new_dir,
3217 .dentry = new_dentry,
3218 .inode = new_dentry->d_inode,
3219 };
3220 int force_reread;
3221 int retval;
3222 struct inode *whiteout = NULL;
3223 int credits;
3224 u8 old_file_type;
3225
3226 dquot_initialize(old.dir);
3227 dquot_initialize(new.dir);
3228
3229 /* Initialize quotas before so that eventual writes go
3230 * in separate transaction */
3231 if (new.inode)
3232 dquot_initialize(new.inode);
3233
3234 old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
3235 if (IS_ERR(old.bh))
3236 return PTR_ERR(old.bh);
3237 /*
3238 * Check for inode number is _not_ due to possible IO errors.
3239 * We might rmdir the source, keep it as pwd of some process
3240 * and merrily kill the link to whatever was created under the
3241 * same name. Goodbye sticky bit ;-<
3242 */
3243 retval = -ENOENT;
3244 if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
3245 goto end_rename;
3246
3247 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
3248 &new.de, &new.inlined);
3249 if (IS_ERR(new.bh)) {
3250 retval = PTR_ERR(new.bh);
3251 new.bh = NULL;
3252 goto end_rename;
3253 }
3254 if (new.bh) {
3255 if (!new.inode) {
3256 brelse(new.bh);
3257 new.bh = NULL;
3258 }
3259 }
3260 if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC))
3261 ext4_alloc_da_blocks(old.inode);
3262
3263 credits = (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
3264 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
3265 if (!(flags & RENAME_WHITEOUT)) {
3266 handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
3267 if (IS_ERR(handle))
3268 return PTR_ERR(handle);
3269 } else {
3270 whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
3271 if (IS_ERR(whiteout))
3272 return PTR_ERR(whiteout);
3273 }
3274
3275 if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
3276 ext4_handle_sync(handle);
3277
3278 if (S_ISDIR(old.inode->i_mode)) {
3279 if (new.inode) {
3280 retval = -ENOTEMPTY;
3281 if (!empty_dir(new.inode))
3282 goto end_rename;
3283 } else {
3284 retval = -EMLINK;
3285 if (new.dir != old.dir && EXT4_DIR_LINK_MAX(new.dir))
3286 goto end_rename;
3287 }
3288 retval = ext4_rename_dir_prepare(handle, &old);
3289 if (retval)
3290 goto end_rename;
3291 }
3292 /*
3293 * If we're renaming a file within an inline_data dir and adding or
3294 * setting the new dirent causes a conversion from inline_data to
3295 * extents/blockmap, we need to force the dirent delete code to
3296 * re-read the directory, or else we end up trying to delete a dirent
3297 * from what is now the extent tree root (or a block map).
3298 */
3299 force_reread = (new.dir->i_ino == old.dir->i_ino &&
3300 ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
3301
3302 old_file_type = old.de->file_type;
3303 if (whiteout) {
3304 /*
3305 * Do this before adding a new entry, so the old entry is sure
3306 * to be still pointing to the valid old entry.
3307 */
3308 retval = ext4_setent(handle, &old, whiteout->i_ino,
3309 EXT4_FT_CHRDEV);
3310 if (retval)
3311 goto end_rename;
3312 ext4_mark_inode_dirty(handle, whiteout);
3313 }
3314 if (!new.bh) {
3315 retval = ext4_add_entry(handle, new.dentry, old.inode);
3316 if (retval)
3317 goto end_rename;
3318 } else {
3319 retval = ext4_setent(handle, &new,
3320 old.inode->i_ino, old_file_type);
3321 if (retval)
3322 goto end_rename;
3323 }
3324 if (force_reread)
3325 force_reread = !ext4_test_inode_flag(new.dir,
3326 EXT4_INODE_INLINE_DATA);
3327
3328 /*
3329 * Like most other Unix systems, set the ctime for inodes on a
3330 * rename.
3331 */
3332 old.inode->i_ctime = ext4_current_time(old.inode);
3333 ext4_mark_inode_dirty(handle, old.inode);
3334
3335 if (!whiteout) {
3336 /*
3337 * ok, that's it
3338 */
3339 ext4_rename_delete(handle, &old, force_reread);
3340 }
3341
3342 if (new.inode) {
3343 ext4_dec_count(handle, new.inode);
3344 new.inode->i_ctime = ext4_current_time(new.inode);
3345 }
3346 old.dir->i_ctime = old.dir->i_mtime = ext4_current_time(old.dir);
3347 ext4_update_dx_flag(old.dir);
3348 if (old.dir_bh) {
3349 retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
3350 if (retval)
3351 goto end_rename;
3352
3353 ext4_dec_count(handle, old.dir);
3354 if (new.inode) {
3355 /* checked empty_dir above, can't have another parent,
3356 * ext4_dec_count() won't work for many-linked dirs */
3357 clear_nlink(new.inode);
3358 } else {
3359 ext4_inc_count(handle, new.dir);
3360 ext4_update_dx_flag(new.dir);
3361 ext4_mark_inode_dirty(handle, new.dir);
3362 }
3363 }
3364 ext4_mark_inode_dirty(handle, old.dir);
3365 if (new.inode) {
3366 ext4_mark_inode_dirty(handle, new.inode);
3367 if (!new.inode->i_nlink)
3368 ext4_orphan_add(handle, new.inode);
3369 }
3370 retval = 0;
3371
3372 end_rename:
3373 brelse(old.dir_bh);
3374 brelse(old.bh);
3375 brelse(new.bh);
3376 if (whiteout) {
3377 if (retval)
3378 drop_nlink(whiteout);
3379 unlock_new_inode(whiteout);
3380 iput(whiteout);
3381 }
3382 if (handle)
3383 ext4_journal_stop(handle);
3384 return retval;
3385 }
3386
3387 static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
3388 struct inode *new_dir, struct dentry *new_dentry)
3389 {
3390 handle_t *handle = NULL;
3391 struct ext4_renament old = {
3392 .dir = old_dir,
3393 .dentry = old_dentry,
3394 .inode = old_dentry->d_inode,
3395 };
3396 struct ext4_renament new = {
3397 .dir = new_dir,
3398 .dentry = new_dentry,
3399 .inode = new_dentry->d_inode,
3400 };
3401 u8 new_file_type;
3402 int retval;
3403
3404 dquot_initialize(old.dir);
3405 dquot_initialize(new.dir);
3406
3407 old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
3408 &old.de, &old.inlined);
3409 if (IS_ERR(old.bh))
3410 return PTR_ERR(old.bh);
3411 /*
3412 * Check for inode number is _not_ due to possible IO errors.
3413 * We might rmdir the source, keep it as pwd of some process
3414 * and merrily kill the link to whatever was created under the
3415 * same name. Goodbye sticky bit ;-<
3416 */
3417 retval = -ENOENT;
3418 if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
3419 goto end_rename;
3420
3421 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
3422 &new.de, &new.inlined);
3423 if (IS_ERR(new.bh)) {
3424 retval = PTR_ERR(new.bh);
3425 new.bh = NULL;
3426 goto end_rename;
3427 }
3428
3429 /* RENAME_EXCHANGE case: old *and* new must both exist */
3430 if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
3431 goto end_rename;
3432
3433 handle = ext4_journal_start(old.dir, EXT4_HT_DIR,
3434 (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
3435 2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
3436 if (IS_ERR(handle))
3437 return PTR_ERR(handle);
3438
3439 if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
3440 ext4_handle_sync(handle);
3441
3442 if (S_ISDIR(old.inode->i_mode)) {
3443 old.is_dir = true;
3444 retval = ext4_rename_dir_prepare(handle, &old);
3445 if (retval)
3446 goto end_rename;
3447 }
3448 if (S_ISDIR(new.inode->i_mode)) {
3449 new.is_dir = true;
3450 retval = ext4_rename_dir_prepare(handle, &new);
3451 if (retval)
3452 goto end_rename;
3453 }
3454
3455 /*
3456 * Other than the special case of overwriting a directory, parents'
3457 * nlink only needs to be modified if this is a cross directory rename.
3458 */
3459 if (old.dir != new.dir && old.is_dir != new.is_dir) {
3460 old.dir_nlink_delta = old.is_dir ? -1 : 1;
3461 new.dir_nlink_delta = -old.dir_nlink_delta;
3462 retval = -EMLINK;
3463 if ((old.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(old.dir)) ||
3464 (new.dir_nlink_delta > 0 && EXT4_DIR_LINK_MAX(new.dir)))
3465 goto end_rename;
3466 }
3467
3468 new_file_type = new.de->file_type;
3469 retval = ext4_setent(handle, &new, old.inode->i_ino, old.de->file_type);
3470 if (retval)
3471 goto end_rename;
3472
3473 retval = ext4_setent(handle, &old, new.inode->i_ino, new_file_type);
3474 if (retval)
3475 goto end_rename;
3476
3477 /*
3478 * Like most other Unix systems, set the ctime for inodes on a
3479 * rename.
3480 */
3481 old.inode->i_ctime = ext4_current_time(old.inode);
3482 new.inode->i_ctime = ext4_current_time(new.inode);
3483 ext4_mark_inode_dirty(handle, old.inode);
3484 ext4_mark_inode_dirty(handle, new.inode);
3485
3486 if (old.dir_bh) {
3487 retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
3488 if (retval)
3489 goto end_rename;
3490 }
3491 if (new.dir_bh) {
3492 retval = ext4_rename_dir_finish(handle, &new, old.dir->i_ino);
3493 if (retval)
3494 goto end_rename;
3495 }
3496 ext4_update_dir_count(handle, &old);
3497 ext4_update_dir_count(handle, &new);
3498 retval = 0;
3499
3500 end_rename:
3501 brelse(old.dir_bh);
3502 brelse(new.dir_bh);
3503 brelse(old.bh);
3504 brelse(new.bh);
3505 if (handle)
3506 ext4_journal_stop(handle);
3507 return retval;
3508 }
3509
3510 static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry,
3511 struct inode *new_dir, struct dentry *new_dentry,
3512 unsigned int flags)
3513 {
3514 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3515 return -EINVAL;
3516
3517 if (flags & RENAME_EXCHANGE) {
3518 return ext4_cross_rename(old_dir, old_dentry,
3519 new_dir, new_dentry);
3520 }
3521
3522 return ext4_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
3523 }
3524
3525 /*
3526 * directories can handle most operations...
3527 */
3528 const struct inode_operations ext4_dir_inode_operations = {
3529 .create = ext4_create,
3530 .lookup = ext4_lookup,
3531 .link = ext4_link,
3532 .unlink = ext4_unlink,
3533 .symlink = ext4_symlink,
3534 .mkdir = ext4_mkdir,
3535 .rmdir = ext4_rmdir,
3536 .mknod = ext4_mknod,
3537 .tmpfile = ext4_tmpfile,
3538 .rename2 = ext4_rename2,
3539 .setattr = ext4_setattr,
3540 .setxattr = generic_setxattr,
3541 .getxattr = generic_getxattr,
3542 .listxattr = ext4_listxattr,
3543 .removexattr = generic_removexattr,
3544 .get_acl = ext4_get_acl,
3545 .set_acl = ext4_set_acl,
3546 .fiemap = ext4_fiemap,
3547 };
3548
3549 const struct inode_operations ext4_special_inode_operations = {
3550 .setattr = ext4_setattr,
3551 .setxattr = generic_setxattr,
3552 .getxattr = generic_getxattr,
3553 .listxattr = ext4_listxattr,
3554 .removexattr = generic_removexattr,
3555 .get_acl = ext4_get_acl,
3556 .set_acl = ext4_set_acl,
3557 };