]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/jffs2/nodemgmt.c
[MTD] NAND: Add suspend/resume functionality
[mirror_ubuntu-artful-kernel.git] / fs / jffs2 / nodemgmt.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
e631ddba 10 * $Id: nodemgmt.c,v 1.125 2005/09/07 08:34:54 havasi Exp $
1da177e4
LT
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/mtd/mtd.h>
17#include <linux/compiler.h>
18#include <linux/sched.h> /* For cond_resched() */
19#include "nodelist.h"
e631ddba 20#include "debug.h"
1da177e4
LT
21
22/**
23 * jffs2_reserve_space - request physical space to write nodes to flash
24 * @c: superblock info
25 * @minsize: Minimum acceptable size of allocation
26 * @ofs: Returned value of node offset
27 * @len: Returned value of allocation length
28 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
29 *
30 * Requests a block of physical space on the flash. Returns zero for success
31 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
32 * or other error if appropriate.
33 *
34 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
35 * allocation semaphore, to prevent more than one allocation from being
36 * active at any time. The semaphore is later released by jffs2_commit_allocation()
37 *
38 * jffs2_reserve_space() may trigger garbage collection in order to make room
39 * for the requested allocation.
40 */
41
e631ddba
FH
42static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43 uint32_t *ofs, uint32_t *len, uint32_t sumsize);
1da177e4 44
e631ddba
FH
45int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
46 uint32_t *len, int prio, uint32_t sumsize)
1da177e4
LT
47{
48 int ret = -EAGAIN;
49 int blocksneeded = c->resv_blocks_write;
50 /* align it */
51 minsize = PAD(minsize);
52
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
54 down(&c->alloc_sem);
55
56 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
57
58 spin_lock(&c->erase_completion_lock);
59
60 /* this needs a little more thought (true <tglx> :)) */
61 while(ret == -EAGAIN) {
62 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
63 int ret;
64 uint32_t dirty, avail;
65
66 /* calculate real dirty size
67 * dirty_size contains blocks on erase_pending_list
68 * those blocks are counted in c->nr_erasing_blocks.
69 * If one block is actually erased, it is not longer counted as dirty_space
70 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
71 * with c->nr_erasing_blocks * c->sector_size again.
72 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
73 * This helps us to force gc and pick eventually a clean block to spread the load.
74 * We add unchecked_size here, as we hopefully will find some space to use.
75 * This will affect the sum only once, as gc first finishes checking
76 * of nodes.
77 */
78 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
79 if (dirty < c->nospc_dirty_size) {
80 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
4132ace8 81 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
1da177e4
LT
82 break;
83 }
84 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
85 dirty, c->unchecked_size, c->sector_size));
86
87 spin_unlock(&c->erase_completion_lock);
88 up(&c->alloc_sem);
89 return -ENOSPC;
90 }
91
92 /* Calc possibly available space. Possibly available means that we
93 * don't know, if unchecked size contains obsoleted nodes, which could give us some
94 * more usable space. This will affect the sum only once, as gc first finishes checking
95 * of nodes.
96 + Return -ENOSPC, if the maximum possibly available space is less or equal than
97 * blocksneeded * sector_size.
98 * This blocks endless gc looping on a filesystem, which is nearly full, even if
99 * the check above passes.
100 */
101 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
102 if ( (avail / c->sector_size) <= blocksneeded) {
103 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
4132ace8 104 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
1da177e4
LT
105 break;
106 }
107
108 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
109 avail, blocksneeded * c->sector_size));
110 spin_unlock(&c->erase_completion_lock);
111 up(&c->alloc_sem);
112 return -ENOSPC;
113 }
114
115 up(&c->alloc_sem);
116
117 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
118 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
119 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
120 spin_unlock(&c->erase_completion_lock);
121
122 ret = jffs2_garbage_collect_pass(c);
123 if (ret)
124 return ret;
125
126 cond_resched();
127
128 if (signal_pending(current))
129 return -EINTR;
130
131 down(&c->alloc_sem);
132 spin_lock(&c->erase_completion_lock);
133 }
134
e631ddba 135 ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
1da177e4
LT
136 if (ret) {
137 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
138 }
139 }
140 spin_unlock(&c->erase_completion_lock);
141 if (ret)
142 up(&c->alloc_sem);
143 return ret;
144}
145
e631ddba
FH
146int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
147 uint32_t *len, uint32_t sumsize)
1da177e4
LT
148{
149 int ret = -EAGAIN;
150 minsize = PAD(minsize);
151
152 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
153
154 spin_lock(&c->erase_completion_lock);
155 while(ret == -EAGAIN) {
e631ddba 156 ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
1da177e4
LT
157 if (ret) {
158 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
159 }
160 }
161 spin_unlock(&c->erase_completion_lock);
162 return ret;
163}
164
e631ddba
FH
165
166/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
167
168static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1da177e4 169{
e631ddba
FH
170
171 /* Check, if we have a dirty block now, or if it was dirty already */
172 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
173 c->dirty_size += jeb->wasted_size;
174 c->wasted_size -= jeb->wasted_size;
175 jeb->dirty_size += jeb->wasted_size;
176 jeb->wasted_size = 0;
177 if (VERYDIRTY(c, jeb->dirty_size)) {
178 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
179 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
180 list_add_tail(&jeb->list, &c->very_dirty_list);
181 } else {
182 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
183 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
184 list_add_tail(&jeb->list, &c->dirty_list);
185 }
186 } else {
187 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
188 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
189 list_add_tail(&jeb->list, &c->clean_list);
190 }
191 c->nextblock = NULL;
192
193}
194
195/* Select a new jeb for nextblock */
196
197static int jffs2_find_nextblock(struct jffs2_sb_info *c)
198{
199 struct list_head *next;
1da177e4 200
e631ddba
FH
201 /* Take the next block off the 'free' list */
202
203 if (list_empty(&c->free_list)) {
204
205 if (!c->nr_erasing_blocks &&
206 !list_empty(&c->erasable_list)) {
207 struct jffs2_eraseblock *ejeb;
208
209 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
210 list_del(&ejeb->list);
211 list_add_tail(&ejeb->list, &c->erase_pending_list);
212 c->nr_erasing_blocks++;
213 jffs2_erase_pending_trigger(c);
214 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
215 ejeb->offset));
216 }
217
218 if (!c->nr_erasing_blocks &&
219 !list_empty(&c->erasable_pending_wbuf_list)) {
220 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
221 /* c->nextblock is NULL, no update to c->nextblock allowed */
1da177e4 222 spin_unlock(&c->erase_completion_lock);
1da177e4
LT
223 jffs2_flush_wbuf_pad(c);
224 spin_lock(&c->erase_completion_lock);
e631ddba
FH
225 /* Have another go. It'll be on the erasable_list now */
226 return -EAGAIN;
1da177e4 227 }
e631ddba
FH
228
229 if (!c->nr_erasing_blocks) {
230 /* Ouch. We're in GC, or we wouldn't have got here.
231 And there's no space left. At all. */
232 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
233 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
234 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
235 return -ENOSPC;
1da177e4 236 }
e631ddba
FH
237
238 spin_unlock(&c->erase_completion_lock);
239 /* Don't wait for it; just erase one right now */
240 jffs2_erase_pending_blocks(c, 1);
241 spin_lock(&c->erase_completion_lock);
242
243 /* An erase may have failed, decreasing the
244 amount of free space available. So we must
245 restart from the beginning */
246 return -EAGAIN;
1da177e4 247 }
e631ddba
FH
248
249 next = c->free_list.next;
250 list_del(next);
251 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
252 c->nr_free_blocks--;
1da177e4 253
e631ddba
FH
254 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
255
256 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
257
258 return 0;
259}
260
261/* Called with alloc sem _and_ erase_completion_lock */
262static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize)
263{
264 struct jffs2_eraseblock *jeb = c->nextblock;
265 uint32_t reserved_size; /* for summary information at the end of the jeb */
266 int ret;
267
268 restart:
269 reserved_size = 0;
270
271 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
272 /* NOSUM_SIZE means not to generate summary */
273
274 if (jeb) {
275 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
276 JFFS2_DBG_SUMMARY("minsize=%d , jeb->free=%d ,"
277 "summary->size=%d , sumsize=%d\n",
278 minsize, jeb->free_size,
279 c->summary->sum_size, sumsize);
280 }
281
282 /* Is there enough space for writing out the current node, or we have to
283 write out summary information now, close this jeb and select new nextblock? */
284 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
285 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
286
287 /* Has summary been disabled for this jeb? */
288 if (jffs2_sum_is_disabled(c->summary)) {
289 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
290 goto restart;
1da177e4
LT
291 }
292
e631ddba
FH
293 /* Writing out the collected summary information */
294 JFFS2_DBG_SUMMARY("generating summary for 0x%08x.\n", jeb->offset);
295 ret = jffs2_sum_write_sumnode(c);
296
297 if (ret)
298 return ret;
299
300 if (jffs2_sum_is_disabled(c->summary)) {
301 /* jffs2_write_sumnode() couldn't write out the summary information
302 diabling summary for this jeb and free the collected information
303 */
304 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
305 goto restart;
306 }
307
308 jffs2_close_nextblock(c, jeb);
309 jeb = NULL;
310 }
311 } else {
312 if (jeb && minsize > jeb->free_size) {
313 /* Skip the end of this block and file it as having some dirty space */
314 /* If there's a pending write to it, flush now */
315
316 if (jffs2_wbuf_dirty(c)) {
1da177e4 317 spin_unlock(&c->erase_completion_lock);
e631ddba 318 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
1da177e4
LT
319 jffs2_flush_wbuf_pad(c);
320 spin_lock(&c->erase_completion_lock);
e631ddba
FH
321 jeb = c->nextblock;
322 goto restart;
1da177e4
LT
323 }
324
e631ddba
FH
325 c->wasted_size += jeb->free_size;
326 c->free_size -= jeb->free_size;
327 jeb->wasted_size += jeb->free_size;
328 jeb->free_size = 0;
1da177e4 329
e631ddba
FH
330 jffs2_close_nextblock(c, jeb);
331 jeb = NULL;
1da177e4 332 }
e631ddba
FH
333 }
334
335 if (!jeb) {
336
337 ret = jffs2_find_nextblock(c);
338 if (ret)
339 return ret;
1da177e4 340
e631ddba 341 jeb = c->nextblock;
1da177e4
LT
342
343 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
344 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
345 goto restart;
346 }
347 }
348 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
349 enough space */
350 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
e631ddba 351 *len = jeb->free_size - reserved_size;
1da177e4
LT
352
353 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
354 !jeb->first_node->next_in_ino) {
355 /* Only node in it beforehand was a CLEANMARKER node (we think).
356 So mark it obsolete now that there's going to be another node
357 in the block. This will reduce used_size to zero but We've
358 already set c->nextblock so that jffs2_mark_node_obsolete()
359 won't try to refile it to the dirty_list.
360 */
361 spin_unlock(&c->erase_completion_lock);
362 jffs2_mark_node_obsolete(c, jeb->first_node);
363 spin_lock(&c->erase_completion_lock);
364 }
365
366 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
367 return 0;
368}
369
370/**
371 * jffs2_add_physical_node_ref - add a physical node reference to the list
372 * @c: superblock info
373 * @new: new node reference to add
374 * @len: length of this physical node
375 * @dirty: dirty flag for new node
376 *
377 * Should only be used to report nodes for which space has been allocated
378 * by jffs2_reserve_space.
379 *
380 * Must be called with the alloc_sem held.
381 */
382
383int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
384{
385 struct jffs2_eraseblock *jeb;
386 uint32_t len;
387
388 jeb = &c->blocks[new->flash_offset / c->sector_size];
389 len = ref_totlen(c, jeb, new);
390
391 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
392#if 1
3118db3d
EH
393 /* we could get some obsolete nodes after nextblock was refiled
394 in wbuf.c */
9b88f473
EH
395 if ((c->nextblock || !ref_obsolete(new))
396 &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
1da177e4
LT
397 printk(KERN_WARNING "argh. node added in wrong place\n");
398 jffs2_free_raw_node_ref(new);
399 return -EINVAL;
400 }
401#endif
402 spin_lock(&c->erase_completion_lock);
403
404 if (!jeb->first_node)
405 jeb->first_node = new;
406 if (jeb->last_node)
407 jeb->last_node->next_phys = new;
408 jeb->last_node = new;
409
410 jeb->free_size -= len;
411 c->free_size -= len;
412 if (ref_obsolete(new)) {
413 jeb->dirty_size += len;
414 c->dirty_size += len;
415 } else {
416 jeb->used_size += len;
417 c->used_size += len;
418 }
419
9b88f473 420 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
1da177e4
LT
421 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
422 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
423 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
424 if (jffs2_wbuf_dirty(c)) {
425 /* Flush the last write in the block if it's outstanding */
426 spin_unlock(&c->erase_completion_lock);
427 jffs2_flush_wbuf_pad(c);
428 spin_lock(&c->erase_completion_lock);
429 }
430
431 list_add_tail(&jeb->list, &c->clean_list);
432 c->nextblock = NULL;
433 }
e0c8e42f
AB
434 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
435 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
436
437 spin_unlock(&c->erase_completion_lock);
438
439 return 0;
440}
441
442
443void jffs2_complete_reservation(struct jffs2_sb_info *c)
444{
445 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
446 jffs2_garbage_collect_trigger(c);
447 up(&c->alloc_sem);
448}
449
450static inline int on_list(struct list_head *obj, struct list_head *head)
451{
452 struct list_head *this;
453
454 list_for_each(this, head) {
455 if (this == obj) {
456 D1(printk("%p is on list at %p\n", obj, head));
457 return 1;
458
459 }
460 }
461 return 0;
462}
463
464void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
465{
466 struct jffs2_eraseblock *jeb;
467 int blocknr;
468 struct jffs2_unknown_node n;
469 int ret, addedsize;
470 size_t retlen;
471
472 if(!ref) {
473 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
474 return;
475 }
476 if (ref_obsolete(ref)) {
477 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
478 return;
479 }
480 blocknr = ref->flash_offset / c->sector_size;
481 if (blocknr >= c->nr_blocks) {
482 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
483 BUG();
484 }
485 jeb = &c->blocks[blocknr];
486
487 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
31fbdf7a 488 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
1da177e4
LT
489 /* Hm. This may confuse static lock analysis. If any of the above
490 three conditions is false, we're going to return from this
491 function without actually obliterating any nodes or freeing
492 any jffs2_raw_node_refs. So we don't need to stop erases from
493 happening, or protect against people holding an obsolete
494 jffs2_raw_node_ref without the erase_completion_lock. */
495 down(&c->erase_free_sem);
496 }
497
498 spin_lock(&c->erase_completion_lock);
499
500 if (ref_flags(ref) == REF_UNCHECKED) {
501 D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
502 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
503 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
504 BUG();
505 })
506 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
507 jeb->unchecked_size -= ref_totlen(c, jeb, ref);
508 c->unchecked_size -= ref_totlen(c, jeb, ref);
509 } else {
510 D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
511 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
512 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
513 BUG();
514 })
e0c8e42f 515 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
1da177e4
LT
516 jeb->used_size -= ref_totlen(c, jeb, ref);
517 c->used_size -= ref_totlen(c, jeb, ref);
518 }
519
520 // Take care, that wasted size is taken into concern
521 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
6f401a40 522 D1(printk(KERN_DEBUG "Dirtying\n"));
1da177e4
LT
523 addedsize = ref_totlen(c, jeb, ref);
524 jeb->dirty_size += ref_totlen(c, jeb, ref);
525 c->dirty_size += ref_totlen(c, jeb, ref);
526
527 /* Convert wasted space to dirty, if not a bad block */
528 if (jeb->wasted_size) {
529 if (on_list(&jeb->list, &c->bad_used_list)) {
530 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
531 jeb->offset));
532 addedsize = 0; /* To fool the refiling code later */
533 } else {
534 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
535 jeb->wasted_size, jeb->offset));
536 addedsize += jeb->wasted_size;
537 jeb->dirty_size += jeb->wasted_size;
538 c->dirty_size += jeb->wasted_size;
539 c->wasted_size -= jeb->wasted_size;
540 jeb->wasted_size = 0;
541 }
542 }
543 } else {
6f401a40 544 D1(printk(KERN_DEBUG "Wasting\n"));
1da177e4
LT
545 addedsize = 0;
546 jeb->wasted_size += ref_totlen(c, jeb, ref);
547 c->wasted_size += ref_totlen(c, jeb, ref);
548 }
549 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
550
e0c8e42f
AB
551 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
552 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 553
31fbdf7a
AB
554 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
555 /* Flash scanning is in progress. Don't muck about with the block
1da177e4
LT
556 lists because they're not ready yet, and don't actually
557 obliterate nodes that look obsolete. If they weren't
558 marked obsolete on the flash at the time they _became_
559 obsolete, there was probably a reason for that. */
560 spin_unlock(&c->erase_completion_lock);
561 /* We didn't lock the erase_free_sem */
562 return;
563 }
564
565 if (jeb == c->nextblock) {
566 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
567 } else if (!jeb->used_size && !jeb->unchecked_size) {
568 if (jeb == c->gcblock) {
569 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
570 c->gcblock = NULL;
571 } else {
572 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
573 list_del(&jeb->list);
574 }
575 if (jffs2_wbuf_dirty(c)) {
576 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
577 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
578 } else {
579 if (jiffies & 127) {
580 /* Most of the time, we just erase it immediately. Otherwise we
581 spend ages scanning it on mount, etc. */
582 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
583 list_add_tail(&jeb->list, &c->erase_pending_list);
584 c->nr_erasing_blocks++;
585 jffs2_erase_pending_trigger(c);
586 } else {
587 /* Sometimes, however, we leave it elsewhere so it doesn't get
588 immediately reused, and we spread the load a bit. */
589 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
590 list_add_tail(&jeb->list, &c->erasable_list);
591 }
592 }
593 D1(printk(KERN_DEBUG "Done OK\n"));
594 } else if (jeb == c->gcblock) {
595 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
596 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
597 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
598 list_del(&jeb->list);
599 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
600 list_add_tail(&jeb->list, &c->dirty_list);
601 } else if (VERYDIRTY(c, jeb->dirty_size) &&
602 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
603 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
604 list_del(&jeb->list);
605 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
606 list_add_tail(&jeb->list, &c->very_dirty_list);
607 } else {
608 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
609 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
610 }
611
612 spin_unlock(&c->erase_completion_lock);
613
31fbdf7a
AB
614 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
615 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
1da177e4
LT
616 /* We didn't lock the erase_free_sem */
617 return;
618 }
619
620 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
621 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
622 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
623 by jffs2_free_all_node_refs() in erase.c. Which is nice. */
624
625 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
626 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
627 if (ret) {
628 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
629 goto out_erase_sem;
630 }
631 if (retlen != sizeof(n)) {
632 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
633 goto out_erase_sem;
634 }
635 if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
636 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
637 goto out_erase_sem;
638 }
639 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
640 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
641 goto out_erase_sem;
642 }
643 /* XXX FIXME: This is ugly now */
644 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
645 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
646 if (ret) {
647 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
648 goto out_erase_sem;
649 }
650 if (retlen != sizeof(n)) {
651 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
652 goto out_erase_sem;
653 }
654
655 /* Nodes which have been marked obsolete no longer need to be
656 associated with any inode. Remove them from the per-inode list.
657
658 Note we can't do this for NAND at the moment because we need
659 obsolete dirent nodes to stay on the lists, because of the
660 horridness in jffs2_garbage_collect_deletion_dirent(). Also
661 because we delete the inocache, and on NAND we need that to
662 stay around until all the nodes are actually erased, in order
663 to stop us from giving the same inode number to another newly
664 created inode. */
665 if (ref->next_in_ino) {
666 struct jffs2_inode_cache *ic;
667 struct jffs2_raw_node_ref **p;
668
669 spin_lock(&c->erase_completion_lock);
670
671 ic = jffs2_raw_ref_to_ic(ref);
672 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
673 ;
674
675 *p = ref->next_in_ino;
676 ref->next_in_ino = NULL;
677
437316d9 678 if (ic->nodes == (void *)ic && ic->nlink == 0)
1da177e4 679 jffs2_del_ino_cache(c, ic);
1da177e4
LT
680
681 spin_unlock(&c->erase_completion_lock);
682 }
683
684
685 /* Merge with the next node in the physical list, if there is one
686 and if it's also obsolete and if it doesn't belong to any inode */
687 if (ref->next_phys && ref_obsolete(ref->next_phys) &&
688 !ref->next_phys->next_in_ino) {
689 struct jffs2_raw_node_ref *n = ref->next_phys;
690
691 spin_lock(&c->erase_completion_lock);
692
693 ref->__totlen += n->__totlen;
694 ref->next_phys = n->next_phys;
695 if (jeb->last_node == n) jeb->last_node = ref;
696 if (jeb->gc_node == n) {
697 /* gc will be happy continuing gc on this node */
698 jeb->gc_node=ref;
699 }
700 spin_unlock(&c->erase_completion_lock);
701
702 jffs2_free_raw_node_ref(n);
703 }
704
705 /* Also merge with the previous node in the list, if there is one
706 and that one is obsolete */
707 if (ref != jeb->first_node ) {
708 struct jffs2_raw_node_ref *p = jeb->first_node;
709
710 spin_lock(&c->erase_completion_lock);
711
712 while (p->next_phys != ref)
713 p = p->next_phys;
714
715 if (ref_obsolete(p) && !ref->next_in_ino) {
716 p->__totlen += ref->__totlen;
717 if (jeb->last_node == ref) {
718 jeb->last_node = p;
719 }
720 if (jeb->gc_node == ref) {
721 /* gc will be happy continuing gc on this node */
722 jeb->gc_node=p;
723 }
724 p->next_phys = ref->next_phys;
725 jffs2_free_raw_node_ref(ref);
726 }
727 spin_unlock(&c->erase_completion_lock);
728 }
729 out_erase_sem:
730 up(&c->erase_free_sem);
731}
732
1da177e4
LT
733int jffs2_thread_should_wake(struct jffs2_sb_info *c)
734{
735 int ret = 0;
736 uint32_t dirty;
737
738 if (c->unchecked_size) {
739 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
740 c->unchecked_size, c->checked_ino));
741 return 1;
742 }
743
744 /* dirty_size contains blocks on erase_pending_list
745 * those blocks are counted in c->nr_erasing_blocks.
746 * If one block is actually erased, it is not longer counted as dirty_space
747 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
748 * with c->nr_erasing_blocks * c->sector_size again.
749 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
750 * This helps us to force gc and pick eventually a clean block to spread the load.
751 */
752 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
753
754 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
755 (dirty > c->nospc_dirty_size))
756 ret = 1;
757
758 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
759 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
760
761 return ret;
762}