]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/jffs2/fs.c
Merge tag 'mmc-v4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[mirror_ubuntu-bionic-kernel.git] / fs / jffs2 / fs.c
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 *
9 * For licensing information, see the file 'LICENCE' in this directory.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/cred.h>
19 #include <linux/fs.h>
20 #include <linux/list.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/pagemap.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/vfs.h>
26 #include <linux/crc32.h>
27 #include "nodelist.h"
28
29 static int jffs2_flash_setup(struct jffs2_sb_info *c);
30
31 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
32 {
33 struct jffs2_full_dnode *old_metadata, *new_metadata;
34 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
35 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
36 struct jffs2_raw_inode *ri;
37 union jffs2_device_node dev;
38 unsigned char *mdata = NULL;
39 int mdatalen = 0;
40 unsigned int ivalid;
41 uint32_t alloclen;
42 int ret;
43 int alloc_type = ALLOC_NORMAL;
44
45 jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
46
47 /* Special cases - we don't want more than one data node
48 for these types on the medium at any time. So setattr
49 must read the original data associated with the node
50 (i.e. the device numbers or the target name) and write
51 it out again with the appropriate data attached */
52 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
53 /* For these, we don't actually need to read the old node */
54 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
55 mdata = (char *)&dev;
56 jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
57 __func__, mdatalen);
58 } else if (S_ISLNK(inode->i_mode)) {
59 mutex_lock(&f->sem);
60 mdatalen = f->metadata->size;
61 mdata = kmalloc(f->metadata->size, GFP_USER);
62 if (!mdata) {
63 mutex_unlock(&f->sem);
64 return -ENOMEM;
65 }
66 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
67 if (ret) {
68 mutex_unlock(&f->sem);
69 kfree(mdata);
70 return ret;
71 }
72 mutex_unlock(&f->sem);
73 jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
74 __func__, mdatalen);
75 }
76
77 ri = jffs2_alloc_raw_inode();
78 if (!ri) {
79 if (S_ISLNK(inode->i_mode))
80 kfree(mdata);
81 return -ENOMEM;
82 }
83
84 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
85 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
86 if (ret) {
87 jffs2_free_raw_inode(ri);
88 if (S_ISLNK(inode->i_mode))
89 kfree(mdata);
90 return ret;
91 }
92 mutex_lock(&f->sem);
93 ivalid = iattr->ia_valid;
94
95 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
96 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
97 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
98 ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
99
100 ri->ino = cpu_to_je32(inode->i_ino);
101 ri->version = cpu_to_je32(++f->highest_version);
102
103 ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
104 from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
105 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
106 from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
107
108 if (ivalid & ATTR_MODE)
109 ri->mode = cpu_to_jemode(iattr->ia_mode);
110 else
111 ri->mode = cpu_to_jemode(inode->i_mode);
112
113
114 ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
115 ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
116 ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
117 ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
118
119 ri->offset = cpu_to_je32(0);
120 ri->csize = ri->dsize = cpu_to_je32(mdatalen);
121 ri->compr = JFFS2_COMPR_NONE;
122 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
123 /* It's an extension. Make it a hole node */
124 ri->compr = JFFS2_COMPR_ZERO;
125 ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
126 ri->offset = cpu_to_je32(inode->i_size);
127 } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
128 /* For truncate-to-zero, treat it as deletion because
129 it'll always be obsoleting all previous nodes */
130 alloc_type = ALLOC_DELETION;
131 }
132 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
133 if (mdatalen)
134 ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
135 else
136 ri->data_crc = cpu_to_je32(0);
137
138 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
139 if (S_ISLNK(inode->i_mode))
140 kfree(mdata);
141
142 if (IS_ERR(new_metadata)) {
143 jffs2_complete_reservation(c);
144 jffs2_free_raw_inode(ri);
145 mutex_unlock(&f->sem);
146 return PTR_ERR(new_metadata);
147 }
148 /* It worked. Update the inode */
149 inode->i_atime = ITIME(je32_to_cpu(ri->atime));
150 inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
151 inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
152 inode->i_mode = jemode_to_cpu(ri->mode);
153 i_uid_write(inode, je16_to_cpu(ri->uid));
154 i_gid_write(inode, je16_to_cpu(ri->gid));
155
156
157 old_metadata = f->metadata;
158
159 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
160 jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
161
162 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
163 jffs2_add_full_dnode_to_inode(c, f, new_metadata);
164 inode->i_size = iattr->ia_size;
165 inode->i_blocks = (inode->i_size + 511) >> 9;
166 f->metadata = NULL;
167 } else {
168 f->metadata = new_metadata;
169 }
170 if (old_metadata) {
171 jffs2_mark_node_obsolete(c, old_metadata->raw);
172 jffs2_free_full_dnode(old_metadata);
173 }
174 jffs2_free_raw_inode(ri);
175
176 mutex_unlock(&f->sem);
177 jffs2_complete_reservation(c);
178
179 /* We have to do the truncate_setsize() without f->sem held, since
180 some pages may be locked and waiting for it in readpage().
181 We are protected from a simultaneous write() extending i_size
182 back past iattr->ia_size, because do_truncate() holds the
183 generic inode semaphore. */
184 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
185 truncate_setsize(inode, iattr->ia_size);
186 inode->i_blocks = (inode->i_size + 511) >> 9;
187 }
188
189 return 0;
190 }
191
192 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
193 {
194 struct inode *inode = d_inode(dentry);
195 int rc;
196
197 rc = setattr_prepare(dentry, iattr);
198 if (rc)
199 return rc;
200
201 rc = jffs2_do_setattr(inode, iattr);
202 if (!rc && (iattr->ia_valid & ATTR_MODE))
203 rc = posix_acl_chmod(inode, inode->i_mode);
204
205 return rc;
206 }
207
208 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
209 {
210 struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
211 unsigned long avail;
212
213 buf->f_type = JFFS2_SUPER_MAGIC;
214 buf->f_bsize = 1 << PAGE_SHIFT;
215 buf->f_blocks = c->flash_size >> PAGE_SHIFT;
216 buf->f_files = 0;
217 buf->f_ffree = 0;
218 buf->f_namelen = JFFS2_MAX_NAME_LEN;
219 buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
220 buf->f_fsid.val[1] = c->mtd->index;
221
222 spin_lock(&c->erase_completion_lock);
223 avail = c->dirty_size + c->free_size;
224 if (avail > c->sector_size * c->resv_blocks_write)
225 avail -= c->sector_size * c->resv_blocks_write;
226 else
227 avail = 0;
228 spin_unlock(&c->erase_completion_lock);
229
230 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
231
232 return 0;
233 }
234
235
236 void jffs2_evict_inode (struct inode *inode)
237 {
238 /* We can forget about this inode for now - drop all
239 * the nodelists associated with it, etc.
240 */
241 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
242 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
243
244 jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
245 __func__, inode->i_ino, inode->i_mode);
246 truncate_inode_pages_final(&inode->i_data);
247 clear_inode(inode);
248 jffs2_do_clear_inode(c, f);
249 }
250
251 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
252 {
253 struct jffs2_inode_info *f;
254 struct jffs2_sb_info *c;
255 struct jffs2_raw_inode latest_node;
256 union jffs2_device_node jdev;
257 struct inode *inode;
258 dev_t rdev = 0;
259 int ret;
260
261 jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
262
263 inode = iget_locked(sb, ino);
264 if (!inode)
265 return ERR_PTR(-ENOMEM);
266 if (!(inode->i_state & I_NEW))
267 return inode;
268
269 f = JFFS2_INODE_INFO(inode);
270 c = JFFS2_SB_INFO(inode->i_sb);
271
272 jffs2_init_inode_info(f);
273 mutex_lock(&f->sem);
274
275 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
276 if (ret)
277 goto error;
278
279 inode->i_mode = jemode_to_cpu(latest_node.mode);
280 i_uid_write(inode, je16_to_cpu(latest_node.uid));
281 i_gid_write(inode, je16_to_cpu(latest_node.gid));
282 inode->i_size = je32_to_cpu(latest_node.isize);
283 inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
284 inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
285 inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
286
287 set_nlink(inode, f->inocache->pino_nlink);
288
289 inode->i_blocks = (inode->i_size + 511) >> 9;
290
291 switch (inode->i_mode & S_IFMT) {
292
293 case S_IFLNK:
294 inode->i_op = &jffs2_symlink_inode_operations;
295 inode->i_link = f->target;
296 break;
297
298 case S_IFDIR:
299 {
300 struct jffs2_full_dirent *fd;
301 set_nlink(inode, 2); /* parent and '.' */
302
303 for (fd=f->dents; fd; fd = fd->next) {
304 if (fd->type == DT_DIR && fd->ino)
305 inc_nlink(inode);
306 }
307 /* Root dir gets i_nlink 3 for some reason */
308 if (inode->i_ino == 1)
309 inc_nlink(inode);
310
311 inode->i_op = &jffs2_dir_inode_operations;
312 inode->i_fop = &jffs2_dir_operations;
313 break;
314 }
315 case S_IFREG:
316 inode->i_op = &jffs2_file_inode_operations;
317 inode->i_fop = &jffs2_file_operations;
318 inode->i_mapping->a_ops = &jffs2_file_address_operations;
319 inode->i_mapping->nrpages = 0;
320 break;
321
322 case S_IFBLK:
323 case S_IFCHR:
324 /* Read the device numbers from the media */
325 if (f->metadata->size != sizeof(jdev.old_id) &&
326 f->metadata->size != sizeof(jdev.new_id)) {
327 pr_notice("Device node has strange size %d\n",
328 f->metadata->size);
329 goto error_io;
330 }
331 jffs2_dbg(1, "Reading device numbers from flash\n");
332 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
333 if (ret < 0) {
334 /* Eep */
335 pr_notice("Read device numbers for inode %lu failed\n",
336 (unsigned long)inode->i_ino);
337 goto error;
338 }
339 if (f->metadata->size == sizeof(jdev.old_id))
340 rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
341 else
342 rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
343
344 case S_IFSOCK:
345 case S_IFIFO:
346 inode->i_op = &jffs2_file_inode_operations;
347 init_special_inode(inode, inode->i_mode, rdev);
348 break;
349
350 default:
351 pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
352 __func__, inode->i_mode, (unsigned long)inode->i_ino);
353 }
354
355 mutex_unlock(&f->sem);
356
357 jffs2_dbg(1, "jffs2_read_inode() returning\n");
358 unlock_new_inode(inode);
359 return inode;
360
361 error_io:
362 ret = -EIO;
363 error:
364 mutex_unlock(&f->sem);
365 jffs2_do_clear_inode(c, f);
366 iget_failed(inode);
367 return ERR_PTR(ret);
368 }
369
370 void jffs2_dirty_inode(struct inode *inode, int flags)
371 {
372 struct iattr iattr;
373
374 if (!(inode->i_state & I_DIRTY_DATASYNC)) {
375 jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
376 __func__, inode->i_ino);
377 return;
378 }
379
380 jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
381 __func__, inode->i_ino);
382
383 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
384 iattr.ia_mode = inode->i_mode;
385 iattr.ia_uid = inode->i_uid;
386 iattr.ia_gid = inode->i_gid;
387 iattr.ia_atime = inode->i_atime;
388 iattr.ia_mtime = inode->i_mtime;
389 iattr.ia_ctime = inode->i_ctime;
390
391 jffs2_do_setattr(inode, &iattr);
392 }
393
394 int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
395 {
396 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
397
398 if (c->flags & JFFS2_SB_FLAG_RO && !sb_rdonly(sb))
399 return -EROFS;
400
401 /* We stop if it was running, then restart if it needs to.
402 This also catches the case where it was stopped and this
403 is just a remount to restart it.
404 Flush the writebuffer, if neccecary, else we loose it */
405 if (!sb_rdonly(sb)) {
406 jffs2_stop_garbage_collect_thread(c);
407 mutex_lock(&c->alloc_sem);
408 jffs2_flush_wbuf_pad(c);
409 mutex_unlock(&c->alloc_sem);
410 }
411
412 if (!(*flags & SB_RDONLY))
413 jffs2_start_garbage_collect_thread(c);
414
415 *flags |= SB_NOATIME;
416 return 0;
417 }
418
419 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
420 fill in the raw_inode while you're at it. */
421 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
422 {
423 struct inode *inode;
424 struct super_block *sb = dir_i->i_sb;
425 struct jffs2_sb_info *c;
426 struct jffs2_inode_info *f;
427 int ret;
428
429 jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
430 __func__, dir_i->i_ino, mode);
431
432 c = JFFS2_SB_INFO(sb);
433
434 inode = new_inode(sb);
435
436 if (!inode)
437 return ERR_PTR(-ENOMEM);
438
439 f = JFFS2_INODE_INFO(inode);
440 jffs2_init_inode_info(f);
441 mutex_lock(&f->sem);
442
443 memset(ri, 0, sizeof(*ri));
444 /* Set OS-specific defaults for new inodes */
445 ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
446
447 if (dir_i->i_mode & S_ISGID) {
448 ri->gid = cpu_to_je16(i_gid_read(dir_i));
449 if (S_ISDIR(mode))
450 mode |= S_ISGID;
451 } else {
452 ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
453 }
454
455 /* POSIX ACLs have to be processed now, at least partly.
456 The umask is only applied if there's no default ACL */
457 ret = jffs2_init_acl_pre(dir_i, inode, &mode);
458 if (ret) {
459 mutex_unlock(&f->sem);
460 make_bad_inode(inode);
461 iput(inode);
462 return ERR_PTR(ret);
463 }
464 ret = jffs2_do_new_inode (c, f, mode, ri);
465 if (ret) {
466 mutex_unlock(&f->sem);
467 make_bad_inode(inode);
468 iput(inode);
469 return ERR_PTR(ret);
470 }
471 set_nlink(inode, 1);
472 inode->i_ino = je32_to_cpu(ri->ino);
473 inode->i_mode = jemode_to_cpu(ri->mode);
474 i_gid_write(inode, je16_to_cpu(ri->gid));
475 i_uid_write(inode, je16_to_cpu(ri->uid));
476 inode->i_atime = inode->i_ctime = inode->i_mtime = current_time(inode);
477 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
478
479 inode->i_blocks = 0;
480 inode->i_size = 0;
481
482 if (insert_inode_locked(inode) < 0) {
483 mutex_unlock(&f->sem);
484 make_bad_inode(inode);
485 iput(inode);
486 return ERR_PTR(-EINVAL);
487 }
488
489 return inode;
490 }
491
492 static int calculate_inocache_hashsize(uint32_t flash_size)
493 {
494 /*
495 * Pick a inocache hash size based on the size of the medium.
496 * Count how many megabytes we're dealing with, apply a hashsize twice
497 * that size, but rounding down to the usual big powers of 2. And keep
498 * to sensible bounds.
499 */
500
501 int size_mb = flash_size / 1024 / 1024;
502 int hashsize = (size_mb * 2) & ~0x3f;
503
504 if (hashsize < INOCACHE_HASHSIZE_MIN)
505 return INOCACHE_HASHSIZE_MIN;
506 if (hashsize > INOCACHE_HASHSIZE_MAX)
507 return INOCACHE_HASHSIZE_MAX;
508
509 return hashsize;
510 }
511
512 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
513 {
514 struct jffs2_sb_info *c;
515 struct inode *root_i;
516 int ret;
517 size_t blocks;
518
519 c = JFFS2_SB_INFO(sb);
520
521 /* Do not support the MLC nand */
522 if (c->mtd->type == MTD_MLCNANDFLASH)
523 return -EINVAL;
524
525 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
526 if (c->mtd->type == MTD_NANDFLASH) {
527 pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
528 return -EINVAL;
529 }
530 if (c->mtd->type == MTD_DATAFLASH) {
531 pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
532 return -EINVAL;
533 }
534 #endif
535
536 c->flash_size = c->mtd->size;
537 c->sector_size = c->mtd->erasesize;
538 blocks = c->flash_size / c->sector_size;
539
540 /*
541 * Size alignment check
542 */
543 if ((c->sector_size * blocks) != c->flash_size) {
544 c->flash_size = c->sector_size * blocks;
545 pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
546 c->flash_size / 1024);
547 }
548
549 if (c->flash_size < 5*c->sector_size) {
550 pr_err("Too few erase blocks (%d)\n",
551 c->flash_size / c->sector_size);
552 return -EINVAL;
553 }
554
555 c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
556
557 /* NAND (or other bizarre) flash... do setup accordingly */
558 ret = jffs2_flash_setup(c);
559 if (ret)
560 return ret;
561
562 c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
563 c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
564 if (!c->inocache_list) {
565 ret = -ENOMEM;
566 goto out_wbuf;
567 }
568
569 jffs2_init_xattr_subsystem(c);
570
571 if ((ret = jffs2_do_mount_fs(c)))
572 goto out_inohash;
573
574 jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
575 root_i = jffs2_iget(sb, 1);
576 if (IS_ERR(root_i)) {
577 jffs2_dbg(1, "get root inode failed\n");
578 ret = PTR_ERR(root_i);
579 goto out_root;
580 }
581
582 ret = -ENOMEM;
583
584 jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
585 sb->s_root = d_make_root(root_i);
586 if (!sb->s_root)
587 goto out_root;
588
589 sb->s_maxbytes = 0xFFFFFFFF;
590 sb->s_blocksize = PAGE_SIZE;
591 sb->s_blocksize_bits = PAGE_SHIFT;
592 sb->s_magic = JFFS2_SUPER_MAGIC;
593 if (!sb_rdonly(sb))
594 jffs2_start_garbage_collect_thread(c);
595 return 0;
596
597 out_root:
598 jffs2_free_ino_caches(c);
599 jffs2_free_raw_node_refs(c);
600 kvfree(c->blocks);
601 out_inohash:
602 jffs2_clear_xattr_subsystem(c);
603 kfree(c->inocache_list);
604 out_wbuf:
605 jffs2_flash_cleanup(c);
606
607 return ret;
608 }
609
610 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
611 struct jffs2_inode_info *f)
612 {
613 iput(OFNI_EDONI_2SFFJ(f));
614 }
615
616 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
617 int inum, int unlinked)
618 {
619 struct inode *inode;
620 struct jffs2_inode_cache *ic;
621
622 if (unlinked) {
623 /* The inode has zero nlink but its nodes weren't yet marked
624 obsolete. This has to be because we're still waiting for
625 the final (close() and) iput() to happen.
626
627 There's a possibility that the final iput() could have
628 happened while we were contemplating. In order to ensure
629 that we don't cause a new read_inode() (which would fail)
630 for the inode in question, we use ilookup() in this case
631 instead of iget().
632
633 The nlink can't _become_ zero at this point because we're
634 holding the alloc_sem, and jffs2_do_unlink() would also
635 need that while decrementing nlink on any inode.
636 */
637 inode = ilookup(OFNI_BS_2SFFJ(c), inum);
638 if (!inode) {
639 jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
640 inum);
641
642 spin_lock(&c->inocache_lock);
643 ic = jffs2_get_ino_cache(c, inum);
644 if (!ic) {
645 jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
646 inum);
647 spin_unlock(&c->inocache_lock);
648 return NULL;
649 }
650 if (ic->state != INO_STATE_CHECKEDABSENT) {
651 /* Wait for progress. Don't just loop */
652 jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
653 ic->ino, ic->state);
654 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
655 } else {
656 spin_unlock(&c->inocache_lock);
657 }
658
659 return NULL;
660 }
661 } else {
662 /* Inode has links to it still; they're not going away because
663 jffs2_do_unlink() would need the alloc_sem and we have it.
664 Just iget() it, and if read_inode() is necessary that's OK.
665 */
666 inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
667 if (IS_ERR(inode))
668 return ERR_CAST(inode);
669 }
670 if (is_bad_inode(inode)) {
671 pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
672 inum, unlinked);
673 /* NB. This will happen again. We need to do something appropriate here. */
674 iput(inode);
675 return ERR_PTR(-EIO);
676 }
677
678 return JFFS2_INODE_INFO(inode);
679 }
680
681 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
682 struct jffs2_inode_info *f,
683 unsigned long offset,
684 unsigned long *priv)
685 {
686 struct inode *inode = OFNI_EDONI_2SFFJ(f);
687 struct page *pg;
688
689 pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
690 (void *)jffs2_do_readpage_unlock, inode);
691 if (IS_ERR(pg))
692 return (void *)pg;
693
694 *priv = (unsigned long)pg;
695 return kmap(pg);
696 }
697
698 void jffs2_gc_release_page(struct jffs2_sb_info *c,
699 unsigned char *ptr,
700 unsigned long *priv)
701 {
702 struct page *pg = (void *)*priv;
703
704 kunmap(pg);
705 put_page(pg);
706 }
707
708 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
709 int ret = 0;
710
711 if (jffs2_cleanmarker_oob(c)) {
712 /* NAND flash... do setup accordingly */
713 ret = jffs2_nand_flash_setup(c);
714 if (ret)
715 return ret;
716 }
717
718 /* and Dataflash */
719 if (jffs2_dataflash(c)) {
720 ret = jffs2_dataflash_setup(c);
721 if (ret)
722 return ret;
723 }
724
725 /* and Intel "Sibley" flash */
726 if (jffs2_nor_wbuf_flash(c)) {
727 ret = jffs2_nor_wbuf_flash_setup(c);
728 if (ret)
729 return ret;
730 }
731
732 /* and an UBI volume */
733 if (jffs2_ubivol(c)) {
734 ret = jffs2_ubivol_setup(c);
735 if (ret)
736 return ret;
737 }
738
739 return ret;
740 }
741
742 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
743
744 if (jffs2_cleanmarker_oob(c)) {
745 jffs2_nand_flash_cleanup(c);
746 }
747
748 /* and DataFlash */
749 if (jffs2_dataflash(c)) {
750 jffs2_dataflash_cleanup(c);
751 }
752
753 /* and Intel "Sibley" flash */
754 if (jffs2_nor_wbuf_flash(c)) {
755 jffs2_nor_wbuf_flash_cleanup(c);
756 }
757
758 /* and an UBI volume */
759 if (jffs2_ubivol(c)) {
760 jffs2_ubivol_cleanup(c);
761 }
762 }