]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - fs/overlayfs/readdir.c
Merge tag 'perf-urgent-for-mingo-4.14-20171019' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-focal-kernel.git] / fs / overlayfs / readdir.c
1 /*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include <linux/ratelimit.h>
19 #include "overlayfs.h"
20
21 struct ovl_cache_entry {
22 unsigned int len;
23 unsigned int type;
24 u64 real_ino;
25 u64 ino;
26 struct list_head l_node;
27 struct rb_node node;
28 struct ovl_cache_entry *next_maybe_whiteout;
29 bool is_whiteout;
30 char name[];
31 };
32
33 struct ovl_dir_cache {
34 long refcount;
35 u64 version;
36 struct list_head entries;
37 struct rb_root root;
38 };
39
40 struct ovl_readdir_data {
41 struct dir_context ctx;
42 struct dentry *dentry;
43 bool is_lowest;
44 struct rb_root *root;
45 struct list_head *list;
46 struct list_head middle;
47 struct ovl_cache_entry *first_maybe_whiteout;
48 int count;
49 int err;
50 bool is_upper;
51 bool d_type_supported;
52 };
53
54 struct ovl_dir_file {
55 bool is_real;
56 bool is_upper;
57 struct ovl_dir_cache *cache;
58 struct list_head *cursor;
59 struct file *realfile;
60 struct file *upperfile;
61 };
62
63 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
64 {
65 return rb_entry(n, struct ovl_cache_entry, node);
66 }
67
68 static bool ovl_cache_entry_find_link(const char *name, int len,
69 struct rb_node ***link,
70 struct rb_node **parent)
71 {
72 bool found = false;
73 struct rb_node **newp = *link;
74
75 while (!found && *newp) {
76 int cmp;
77 struct ovl_cache_entry *tmp;
78
79 *parent = *newp;
80 tmp = ovl_cache_entry_from_node(*newp);
81 cmp = strncmp(name, tmp->name, len);
82 if (cmp > 0)
83 newp = &tmp->node.rb_right;
84 else if (cmp < 0 || len < tmp->len)
85 newp = &tmp->node.rb_left;
86 else
87 found = true;
88 }
89 *link = newp;
90
91 return found;
92 }
93
94 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
95 const char *name, int len)
96 {
97 struct rb_node *node = root->rb_node;
98 int cmp;
99
100 while (node) {
101 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
102
103 cmp = strncmp(name, p->name, len);
104 if (cmp > 0)
105 node = p->node.rb_right;
106 else if (cmp < 0 || len < p->len)
107 node = p->node.rb_left;
108 else
109 return p;
110 }
111
112 return NULL;
113 }
114
115 static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd,
116 struct ovl_cache_entry *p)
117 {
118 /* Don't care if not doing ovl_iter() */
119 if (!rdd->dentry)
120 return false;
121
122 /* Always recalc d_ino for parent */
123 if (strcmp(p->name, "..") == 0)
124 return true;
125
126 /* If this is lower, then native d_ino will do */
127 if (!rdd->is_upper)
128 return false;
129
130 /*
131 * Recalc d_ino for '.' and for all entries if dir is impure (contains
132 * copied up entries)
133 */
134 if ((p->name[0] == '.' && p->len == 1) ||
135 ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry)))
136 return true;
137
138 return false;
139 }
140
141 static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
142 const char *name, int len,
143 u64 ino, unsigned int d_type)
144 {
145 struct ovl_cache_entry *p;
146 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
147
148 p = kmalloc(size, GFP_KERNEL);
149 if (!p)
150 return NULL;
151
152 memcpy(p->name, name, len);
153 p->name[len] = '\0';
154 p->len = len;
155 p->type = d_type;
156 p->real_ino = ino;
157 p->ino = ino;
158 /* Defer setting d_ino for upper entry to ovl_iterate() */
159 if (ovl_calc_d_ino(rdd, p))
160 p->ino = 0;
161 p->is_whiteout = false;
162
163 if (d_type == DT_CHR) {
164 p->next_maybe_whiteout = rdd->first_maybe_whiteout;
165 rdd->first_maybe_whiteout = p;
166 }
167 return p;
168 }
169
170 static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
171 const char *name, int len, u64 ino,
172 unsigned int d_type)
173 {
174 struct rb_node **newp = &rdd->root->rb_node;
175 struct rb_node *parent = NULL;
176 struct ovl_cache_entry *p;
177
178 if (ovl_cache_entry_find_link(name, len, &newp, &parent))
179 return 0;
180
181 p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
182 if (p == NULL) {
183 rdd->err = -ENOMEM;
184 return -ENOMEM;
185 }
186
187 list_add_tail(&p->l_node, rdd->list);
188 rb_link_node(&p->node, parent, newp);
189 rb_insert_color(&p->node, rdd->root);
190
191 return 0;
192 }
193
194 static int ovl_fill_lowest(struct ovl_readdir_data *rdd,
195 const char *name, int namelen,
196 loff_t offset, u64 ino, unsigned int d_type)
197 {
198 struct ovl_cache_entry *p;
199
200 p = ovl_cache_entry_find(rdd->root, name, namelen);
201 if (p) {
202 list_move_tail(&p->l_node, &rdd->middle);
203 } else {
204 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
205 if (p == NULL)
206 rdd->err = -ENOMEM;
207 else
208 list_add_tail(&p->l_node, &rdd->middle);
209 }
210
211 return rdd->err;
212 }
213
214 void ovl_cache_free(struct list_head *list)
215 {
216 struct ovl_cache_entry *p;
217 struct ovl_cache_entry *n;
218
219 list_for_each_entry_safe(p, n, list, l_node)
220 kfree(p);
221
222 INIT_LIST_HEAD(list);
223 }
224
225 void ovl_dir_cache_free(struct inode *inode)
226 {
227 struct ovl_dir_cache *cache = ovl_dir_cache(inode);
228
229 if (cache) {
230 ovl_cache_free(&cache->entries);
231 kfree(cache);
232 }
233 }
234
235 static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
236 {
237 struct ovl_dir_cache *cache = od->cache;
238
239 WARN_ON(cache->refcount <= 0);
240 cache->refcount--;
241 if (!cache->refcount) {
242 if (ovl_dir_cache(d_inode(dentry)) == cache)
243 ovl_set_dir_cache(d_inode(dentry), NULL);
244
245 ovl_cache_free(&cache->entries);
246 kfree(cache);
247 }
248 }
249
250 static int ovl_fill_merge(struct dir_context *ctx, const char *name,
251 int namelen, loff_t offset, u64 ino,
252 unsigned int d_type)
253 {
254 struct ovl_readdir_data *rdd =
255 container_of(ctx, struct ovl_readdir_data, ctx);
256
257 rdd->count++;
258 if (!rdd->is_lowest)
259 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
260 else
261 return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
262 }
263
264 static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
265 {
266 int err;
267 struct ovl_cache_entry *p;
268 struct dentry *dentry;
269 const struct cred *old_cred;
270
271 old_cred = ovl_override_creds(rdd->dentry->d_sb);
272
273 err = down_write_killable(&dir->d_inode->i_rwsem);
274 if (!err) {
275 while (rdd->first_maybe_whiteout) {
276 p = rdd->first_maybe_whiteout;
277 rdd->first_maybe_whiteout = p->next_maybe_whiteout;
278 dentry = lookup_one_len(p->name, dir, p->len);
279 if (!IS_ERR(dentry)) {
280 p->is_whiteout = ovl_is_whiteout(dentry);
281 dput(dentry);
282 }
283 }
284 inode_unlock(dir->d_inode);
285 }
286 revert_creds(old_cred);
287
288 return err;
289 }
290
291 static inline int ovl_dir_read(struct path *realpath,
292 struct ovl_readdir_data *rdd)
293 {
294 struct file *realfile;
295 int err;
296
297 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
298 if (IS_ERR(realfile))
299 return PTR_ERR(realfile);
300
301 rdd->first_maybe_whiteout = NULL;
302 rdd->ctx.pos = 0;
303 do {
304 rdd->count = 0;
305 rdd->err = 0;
306 err = iterate_dir(realfile, &rdd->ctx);
307 if (err >= 0)
308 err = rdd->err;
309 } while (!err && rdd->count);
310
311 if (!err && rdd->first_maybe_whiteout && rdd->dentry)
312 err = ovl_check_whiteouts(realpath->dentry, rdd);
313
314 fput(realfile);
315
316 return err;
317 }
318
319 static void ovl_dir_reset(struct file *file)
320 {
321 struct ovl_dir_file *od = file->private_data;
322 struct ovl_dir_cache *cache = od->cache;
323 struct dentry *dentry = file->f_path.dentry;
324 enum ovl_path_type type = ovl_path_type(dentry);
325
326 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
327 ovl_cache_put(od, dentry);
328 od->cache = NULL;
329 od->cursor = NULL;
330 }
331 WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
332 if (od->is_real && OVL_TYPE_MERGE(type))
333 od->is_real = false;
334 }
335
336 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list,
337 struct rb_root *root)
338 {
339 int err;
340 struct path realpath;
341 struct ovl_readdir_data rdd = {
342 .ctx.actor = ovl_fill_merge,
343 .dentry = dentry,
344 .list = list,
345 .root = root,
346 .is_lowest = false,
347 };
348 int idx, next;
349
350 for (idx = 0; idx != -1; idx = next) {
351 next = ovl_path_next(idx, dentry, &realpath);
352 rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
353
354 if (next != -1) {
355 err = ovl_dir_read(&realpath, &rdd);
356 if (err)
357 break;
358 } else {
359 /*
360 * Insert lowest layer entries before upper ones, this
361 * allows offsets to be reasonably constant
362 */
363 list_add(&rdd.middle, rdd.list);
364 rdd.is_lowest = true;
365 err = ovl_dir_read(&realpath, &rdd);
366 list_del(&rdd.middle);
367 }
368 }
369 return err;
370 }
371
372 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
373 {
374 struct list_head *p;
375 loff_t off = 0;
376
377 list_for_each(p, &od->cache->entries) {
378 if (off >= pos)
379 break;
380 off++;
381 }
382 /* Cursor is safe since the cache is stable */
383 od->cursor = p;
384 }
385
386 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
387 {
388 int res;
389 struct ovl_dir_cache *cache;
390
391 cache = ovl_dir_cache(d_inode(dentry));
392 if (cache && ovl_dentry_version_get(dentry) == cache->version) {
393 WARN_ON(!cache->refcount);
394 cache->refcount++;
395 return cache;
396 }
397 ovl_set_dir_cache(d_inode(dentry), NULL);
398
399 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
400 if (!cache)
401 return ERR_PTR(-ENOMEM);
402
403 cache->refcount = 1;
404 INIT_LIST_HEAD(&cache->entries);
405 cache->root = RB_ROOT;
406
407 res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root);
408 if (res) {
409 ovl_cache_free(&cache->entries);
410 kfree(cache);
411 return ERR_PTR(res);
412 }
413
414 cache->version = ovl_dentry_version_get(dentry);
415 ovl_set_dir_cache(d_inode(dentry), cache);
416
417 return cache;
418 }
419
420 /*
421 * Set d_ino for upper entries. Non-upper entries should always report
422 * the uppermost real inode ino and should not call this function.
423 *
424 * When not all layer are on same fs, report real ino also for upper.
425 *
426 * When all layers are on the same fs, and upper has a reference to
427 * copy up origin, call vfs_getattr() on the overlay entry to make
428 * sure that d_ino will be consistent with st_ino from stat(2).
429 */
430 static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
431
432 {
433 struct dentry *dir = path->dentry;
434 struct dentry *this = NULL;
435 enum ovl_path_type type;
436 u64 ino = p->real_ino;
437 int err = 0;
438
439 if (!ovl_same_sb(dir->d_sb))
440 goto out;
441
442 if (p->name[0] == '.') {
443 if (p->len == 1) {
444 this = dget(dir);
445 goto get;
446 }
447 if (p->len == 2 && p->name[1] == '.') {
448 /* we shall not be moved */
449 this = dget(dir->d_parent);
450 goto get;
451 }
452 }
453 this = lookup_one_len(p->name, dir, p->len);
454 if (IS_ERR_OR_NULL(this) || !this->d_inode) {
455 if (IS_ERR(this)) {
456 err = PTR_ERR(this);
457 this = NULL;
458 goto fail;
459 }
460 goto out;
461 }
462
463 get:
464 type = ovl_path_type(this);
465 if (OVL_TYPE_ORIGIN(type)) {
466 struct kstat stat;
467 struct path statpath = *path;
468
469 statpath.dentry = this;
470 err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
471 if (err)
472 goto fail;
473
474 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
475 ino = stat.ino;
476 }
477
478 out:
479 p->ino = ino;
480 dput(this);
481 return err;
482
483 fail:
484 pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n",
485 p->name, err);
486 goto out;
487 }
488
489 static int ovl_fill_plain(struct dir_context *ctx, const char *name,
490 int namelen, loff_t offset, u64 ino,
491 unsigned int d_type)
492 {
493 struct ovl_cache_entry *p;
494 struct ovl_readdir_data *rdd =
495 container_of(ctx, struct ovl_readdir_data, ctx);
496
497 rdd->count++;
498 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
499 if (p == NULL) {
500 rdd->err = -ENOMEM;
501 return -ENOMEM;
502 }
503 list_add_tail(&p->l_node, rdd->list);
504
505 return 0;
506 }
507
508 static int ovl_dir_read_impure(struct path *path, struct list_head *list,
509 struct rb_root *root)
510 {
511 int err;
512 struct path realpath;
513 struct ovl_cache_entry *p, *n;
514 struct ovl_readdir_data rdd = {
515 .ctx.actor = ovl_fill_plain,
516 .list = list,
517 .root = root,
518 };
519
520 INIT_LIST_HEAD(list);
521 *root = RB_ROOT;
522 ovl_path_upper(path->dentry, &realpath);
523
524 err = ovl_dir_read(&realpath, &rdd);
525 if (err)
526 return err;
527
528 list_for_each_entry_safe(p, n, list, l_node) {
529 if (strcmp(p->name, ".") != 0 &&
530 strcmp(p->name, "..") != 0) {
531 err = ovl_cache_update_ino(path, p);
532 if (err)
533 return err;
534 }
535 if (p->ino == p->real_ino) {
536 list_del(&p->l_node);
537 kfree(p);
538 } else {
539 struct rb_node **newp = &root->rb_node;
540 struct rb_node *parent = NULL;
541
542 if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len,
543 &newp, &parent)))
544 return -EIO;
545
546 rb_link_node(&p->node, parent, newp);
547 rb_insert_color(&p->node, root);
548 }
549 }
550 return 0;
551 }
552
553 static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path)
554 {
555 int res;
556 struct dentry *dentry = path->dentry;
557 struct ovl_dir_cache *cache;
558
559 cache = ovl_dir_cache(d_inode(dentry));
560 if (cache && ovl_dentry_version_get(dentry) == cache->version)
561 return cache;
562
563 /* Impure cache is not refcounted, free it here */
564 ovl_dir_cache_free(d_inode(dentry));
565 ovl_set_dir_cache(d_inode(dentry), NULL);
566
567 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
568 if (!cache)
569 return ERR_PTR(-ENOMEM);
570
571 res = ovl_dir_read_impure(path, &cache->entries, &cache->root);
572 if (res) {
573 ovl_cache_free(&cache->entries);
574 kfree(cache);
575 return ERR_PTR(res);
576 }
577 if (list_empty(&cache->entries)) {
578 /* Good oportunity to get rid of an unnecessary "impure" flag */
579 ovl_do_removexattr(ovl_dentry_upper(dentry), OVL_XATTR_IMPURE);
580 ovl_clear_flag(OVL_IMPURE, d_inode(dentry));
581 kfree(cache);
582 return NULL;
583 }
584
585 cache->version = ovl_dentry_version_get(dentry);
586 ovl_set_dir_cache(d_inode(dentry), cache);
587
588 return cache;
589 }
590
591 struct ovl_readdir_translate {
592 struct dir_context *orig_ctx;
593 struct ovl_dir_cache *cache;
594 struct dir_context ctx;
595 u64 parent_ino;
596 };
597
598 static int ovl_fill_real(struct dir_context *ctx, const char *name,
599 int namelen, loff_t offset, u64 ino,
600 unsigned int d_type)
601 {
602 struct ovl_readdir_translate *rdt =
603 container_of(ctx, struct ovl_readdir_translate, ctx);
604 struct dir_context *orig_ctx = rdt->orig_ctx;
605
606 if (rdt->parent_ino && strcmp(name, "..") == 0)
607 ino = rdt->parent_ino;
608 else if (rdt->cache) {
609 struct ovl_cache_entry *p;
610
611 p = ovl_cache_entry_find(&rdt->cache->root, name, namelen);
612 if (p)
613 ino = p->ino;
614 }
615
616 return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
617 }
618
619 static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
620 {
621 int err;
622 struct ovl_dir_file *od = file->private_data;
623 struct dentry *dir = file->f_path.dentry;
624 struct ovl_readdir_translate rdt = {
625 .ctx.actor = ovl_fill_real,
626 .orig_ctx = ctx,
627 };
628
629 if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) {
630 struct kstat stat;
631 struct path statpath = file->f_path;
632
633 statpath.dentry = dir->d_parent;
634 err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
635 if (err)
636 return err;
637
638 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
639 rdt.parent_ino = stat.ino;
640 }
641
642 if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) {
643 rdt.cache = ovl_cache_get_impure(&file->f_path);
644 if (IS_ERR(rdt.cache))
645 return PTR_ERR(rdt.cache);
646 }
647
648 return iterate_dir(od->realfile, &rdt.ctx);
649 }
650
651
652 static int ovl_iterate(struct file *file, struct dir_context *ctx)
653 {
654 struct ovl_dir_file *od = file->private_data;
655 struct dentry *dentry = file->f_path.dentry;
656 struct ovl_cache_entry *p;
657 int err;
658
659 if (!ctx->pos)
660 ovl_dir_reset(file);
661
662 if (od->is_real) {
663 /*
664 * If parent is merge, then need to adjust d_ino for '..', if
665 * dir is impure then need to adjust d_ino for copied up
666 * entries.
667 */
668 if (ovl_same_sb(dentry->d_sb) &&
669 (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) ||
670 OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) {
671 return ovl_iterate_real(file, ctx);
672 }
673 return iterate_dir(od->realfile, ctx);
674 }
675
676 if (!od->cache) {
677 struct ovl_dir_cache *cache;
678
679 cache = ovl_cache_get(dentry);
680 if (IS_ERR(cache))
681 return PTR_ERR(cache);
682
683 od->cache = cache;
684 ovl_seek_cursor(od, ctx->pos);
685 }
686
687 while (od->cursor != &od->cache->entries) {
688 p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
689 if (!p->is_whiteout) {
690 if (!p->ino) {
691 err = ovl_cache_update_ino(&file->f_path, p);
692 if (err)
693 return err;
694 }
695 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
696 break;
697 }
698 od->cursor = p->l_node.next;
699 ctx->pos++;
700 }
701 return 0;
702 }
703
704 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
705 {
706 loff_t res;
707 struct ovl_dir_file *od = file->private_data;
708
709 inode_lock(file_inode(file));
710 if (!file->f_pos)
711 ovl_dir_reset(file);
712
713 if (od->is_real) {
714 res = vfs_llseek(od->realfile, offset, origin);
715 file->f_pos = od->realfile->f_pos;
716 } else {
717 res = -EINVAL;
718
719 switch (origin) {
720 case SEEK_CUR:
721 offset += file->f_pos;
722 break;
723 case SEEK_SET:
724 break;
725 default:
726 goto out_unlock;
727 }
728 if (offset < 0)
729 goto out_unlock;
730
731 if (offset != file->f_pos) {
732 file->f_pos = offset;
733 if (od->cache)
734 ovl_seek_cursor(od, offset);
735 }
736 res = offset;
737 }
738 out_unlock:
739 inode_unlock(file_inode(file));
740
741 return res;
742 }
743
744 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
745 int datasync)
746 {
747 struct ovl_dir_file *od = file->private_data;
748 struct dentry *dentry = file->f_path.dentry;
749 struct file *realfile = od->realfile;
750
751 /*
752 * Need to check if we started out being a lower dir, but got copied up
753 */
754 if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
755 struct inode *inode = file_inode(file);
756
757 realfile = lockless_dereference(od->upperfile);
758 if (!realfile) {
759 struct path upperpath;
760
761 ovl_path_upper(dentry, &upperpath);
762 realfile = ovl_path_open(&upperpath, O_RDONLY);
763
764 inode_lock(inode);
765 if (!od->upperfile) {
766 if (IS_ERR(realfile)) {
767 inode_unlock(inode);
768 return PTR_ERR(realfile);
769 }
770 smp_store_release(&od->upperfile, realfile);
771 } else {
772 /* somebody has beaten us to it */
773 if (!IS_ERR(realfile))
774 fput(realfile);
775 realfile = od->upperfile;
776 }
777 inode_unlock(inode);
778 }
779 }
780
781 return vfs_fsync_range(realfile, start, end, datasync);
782 }
783
784 static int ovl_dir_release(struct inode *inode, struct file *file)
785 {
786 struct ovl_dir_file *od = file->private_data;
787
788 if (od->cache) {
789 inode_lock(inode);
790 ovl_cache_put(od, file->f_path.dentry);
791 inode_unlock(inode);
792 }
793 fput(od->realfile);
794 if (od->upperfile)
795 fput(od->upperfile);
796 kfree(od);
797
798 return 0;
799 }
800
801 static int ovl_dir_open(struct inode *inode, struct file *file)
802 {
803 struct path realpath;
804 struct file *realfile;
805 struct ovl_dir_file *od;
806 enum ovl_path_type type;
807
808 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
809 if (!od)
810 return -ENOMEM;
811
812 type = ovl_path_real(file->f_path.dentry, &realpath);
813 realfile = ovl_path_open(&realpath, file->f_flags);
814 if (IS_ERR(realfile)) {
815 kfree(od);
816 return PTR_ERR(realfile);
817 }
818 od->realfile = realfile;
819 od->is_real = !OVL_TYPE_MERGE(type);
820 od->is_upper = OVL_TYPE_UPPER(type);
821 file->private_data = od;
822
823 return 0;
824 }
825
826 const struct file_operations ovl_dir_operations = {
827 .read = generic_read_dir,
828 .open = ovl_dir_open,
829 .iterate = ovl_iterate,
830 .llseek = ovl_dir_llseek,
831 .fsync = ovl_dir_fsync,
832 .release = ovl_dir_release,
833 };
834
835 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
836 {
837 int err;
838 struct ovl_cache_entry *p;
839 struct rb_root root = RB_ROOT;
840
841 err = ovl_dir_read_merged(dentry, list, &root);
842 if (err)
843 return err;
844
845 err = 0;
846
847 list_for_each_entry(p, list, l_node) {
848 if (p->is_whiteout)
849 continue;
850
851 if (p->name[0] == '.') {
852 if (p->len == 1)
853 continue;
854 if (p->len == 2 && p->name[1] == '.')
855 continue;
856 }
857 err = -ENOTEMPTY;
858 break;
859 }
860
861 return err;
862 }
863
864 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
865 {
866 struct ovl_cache_entry *p;
867
868 inode_lock_nested(upper->d_inode, I_MUTEX_CHILD);
869 list_for_each_entry(p, list, l_node) {
870 struct dentry *dentry;
871
872 if (!p->is_whiteout)
873 continue;
874
875 dentry = lookup_one_len(p->name, upper, p->len);
876 if (IS_ERR(dentry)) {
877 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
878 upper->d_name.name, p->len, p->name,
879 (int) PTR_ERR(dentry));
880 continue;
881 }
882 if (dentry->d_inode)
883 ovl_cleanup(upper->d_inode, dentry);
884 dput(dentry);
885 }
886 inode_unlock(upper->d_inode);
887 }
888
889 static int ovl_check_d_type(struct dir_context *ctx, const char *name,
890 int namelen, loff_t offset, u64 ino,
891 unsigned int d_type)
892 {
893 struct ovl_readdir_data *rdd =
894 container_of(ctx, struct ovl_readdir_data, ctx);
895
896 /* Even if d_type is not supported, DT_DIR is returned for . and .. */
897 if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen))
898 return 0;
899
900 if (d_type != DT_UNKNOWN)
901 rdd->d_type_supported = true;
902
903 return 0;
904 }
905
906 /*
907 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
908 * if error is encountered.
909 */
910 int ovl_check_d_type_supported(struct path *realpath)
911 {
912 int err;
913 struct ovl_readdir_data rdd = {
914 .ctx.actor = ovl_check_d_type,
915 .d_type_supported = false,
916 };
917
918 err = ovl_dir_read(realpath, &rdd);
919 if (err)
920 return err;
921
922 return rdd.d_type_supported;
923 }
924
925 static void ovl_workdir_cleanup_recurse(struct path *path, int level)
926 {
927 int err;
928 struct inode *dir = path->dentry->d_inode;
929 LIST_HEAD(list);
930 struct rb_root root = RB_ROOT;
931 struct ovl_cache_entry *p;
932 struct ovl_readdir_data rdd = {
933 .ctx.actor = ovl_fill_merge,
934 .dentry = NULL,
935 .list = &list,
936 .root = &root,
937 .is_lowest = false,
938 };
939
940 err = ovl_dir_read(path, &rdd);
941 if (err)
942 goto out;
943
944 inode_lock_nested(dir, I_MUTEX_PARENT);
945 list_for_each_entry(p, &list, l_node) {
946 struct dentry *dentry;
947
948 if (p->name[0] == '.') {
949 if (p->len == 1)
950 continue;
951 if (p->len == 2 && p->name[1] == '.')
952 continue;
953 }
954 dentry = lookup_one_len(p->name, path->dentry, p->len);
955 if (IS_ERR(dentry))
956 continue;
957 if (dentry->d_inode)
958 ovl_workdir_cleanup(dir, path->mnt, dentry, level);
959 dput(dentry);
960 }
961 inode_unlock(dir);
962 out:
963 ovl_cache_free(&list);
964 }
965
966 void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
967 struct dentry *dentry, int level)
968 {
969 int err;
970
971 if (!d_is_dir(dentry) || level > 1) {
972 ovl_cleanup(dir, dentry);
973 return;
974 }
975
976 err = ovl_do_rmdir(dir, dentry);
977 if (err) {
978 struct path path = { .mnt = mnt, .dentry = dentry };
979
980 inode_unlock(dir);
981 ovl_workdir_cleanup_recurse(&path, level + 1);
982 inode_lock_nested(dir, I_MUTEX_PARENT);
983 ovl_cleanup(dir, dentry);
984 }
985 }
986
987 int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
988 struct path *lowerstack, unsigned int numlower)
989 {
990 int err;
991 struct dentry *index = NULL;
992 struct inode *dir = dentry->d_inode;
993 struct path path = { .mnt = mnt, .dentry = dentry };
994 LIST_HEAD(list);
995 struct rb_root root = RB_ROOT;
996 struct ovl_cache_entry *p;
997 struct ovl_readdir_data rdd = {
998 .ctx.actor = ovl_fill_merge,
999 .dentry = NULL,
1000 .list = &list,
1001 .root = &root,
1002 .is_lowest = false,
1003 };
1004
1005 err = ovl_dir_read(&path, &rdd);
1006 if (err)
1007 goto out;
1008
1009 inode_lock_nested(dir, I_MUTEX_PARENT);
1010 list_for_each_entry(p, &list, l_node) {
1011 if (p->name[0] == '.') {
1012 if (p->len == 1)
1013 continue;
1014 if (p->len == 2 && p->name[1] == '.')
1015 continue;
1016 }
1017 index = lookup_one_len(p->name, dentry, p->len);
1018 if (IS_ERR(index)) {
1019 err = PTR_ERR(index);
1020 index = NULL;
1021 break;
1022 }
1023 err = ovl_verify_index(index, lowerstack, numlower);
1024 if (err) {
1025 if (err == -EROFS)
1026 break;
1027 err = ovl_cleanup(dir, index);
1028 if (err)
1029 break;
1030 }
1031 dput(index);
1032 index = NULL;
1033 }
1034 dput(index);
1035 inode_unlock(dir);
1036 out:
1037 ovl_cache_free(&list);
1038 if (err)
1039 pr_err("overlayfs: failed index dir cleanup (%i)\n", err);
1040 return err;
1041 }