]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/jfs/jfs_metapage.c
Remove SLAB_CTOR_CONSTRUCTOR
[mirror_ubuntu-zesty-kernel.git] / fs / jfs / jfs_metapage.c
CommitLineData
1da177e4 1/*
7fab479b 2 * Copyright (C) International Business Machines Corp., 2000-2005
1da177e4
LT
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
63f83c9f 7 * the Free Software Foundation; either version 2 of the License, or
1da177e4 8 * (at your option) any later version.
63f83c9f 9 *
1da177e4
LT
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
63f83c9f 16 * along with this program; if not, write to the Free Software
1da177e4
LT
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/fs.h>
7fab479b
DK
21#include <linux/mm.h>
22#include <linux/bio.h>
1da177e4
LT
23#include <linux/init.h>
24#include <linux/buffer_head.h>
25#include <linux/mempool.h>
1da177e4
LT
26#include "jfs_incore.h"
27#include "jfs_superblock.h"
28#include "jfs_filsys.h"
29#include "jfs_metapage.h"
30#include "jfs_txnmgr.h"
31#include "jfs_debug.h"
32
1da177e4
LT
33#ifdef CONFIG_JFS_STATISTICS
34static struct {
35 uint pagealloc; /* # of page allocations */
36 uint pagefree; /* # of page frees */
37 uint lockwait; /* # of sleeping lock_metapage() calls */
38} mpStat;
39#endif
40
7fab479b
DK
41#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
42#define trylock_metapage(mp) test_and_set_bit(META_locked, &(mp)->flag)
1da177e4
LT
43
44static inline void unlock_metapage(struct metapage *mp)
45{
46 clear_bit(META_locked, &mp->flag);
47 wake_up(&mp->wait);
48}
49
7fab479b 50static inline void __lock_metapage(struct metapage *mp)
1da177e4
LT
51{
52 DECLARE_WAITQUEUE(wait, current);
1da177e4 53 INCREMENT(mpStat.lockwait);
1da177e4
LT
54 add_wait_queue_exclusive(&mp->wait, &wait);
55 do {
56 set_current_state(TASK_UNINTERRUPTIBLE);
57 if (metapage_locked(mp)) {
7fab479b 58 unlock_page(mp->page);
4aa0d230 59 io_schedule();
7fab479b 60 lock_page(mp->page);
1da177e4
LT
61 }
62 } while (trylock_metapage(mp));
63 __set_current_state(TASK_RUNNING);
64 remove_wait_queue(&mp->wait, &wait);
65}
66
7fab479b
DK
67/*
68 * Must have mp->page locked
69 */
1da177e4
LT
70static inline void lock_metapage(struct metapage *mp)
71{
72 if (trylock_metapage(mp))
73 __lock_metapage(mp);
74}
75
76#define METAPOOL_MIN_PAGES 32
e18b890b 77static struct kmem_cache *metapage_cache;
1da177e4
LT
78static mempool_t *metapage_mempool;
79
7fab479b
DK
80#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
81
82#if MPS_PER_PAGE > 1
83
84struct meta_anchor {
85 int mp_count;
86 atomic_t io_count;
87 struct metapage *mp[MPS_PER_PAGE];
88};
4c21e2f2 89#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
7fab479b
DK
90
91static inline struct metapage *page_to_mp(struct page *page, uint offset)
92{
93 if (!PagePrivate(page))
94 return NULL;
95 return mp_anchor(page)->mp[offset >> L2PSIZE];
96}
97
98static inline int insert_metapage(struct page *page, struct metapage *mp)
99{
100 struct meta_anchor *a;
101 int index;
102 int l2mp_blocks; /* log2 blocks per metapage */
103
104 if (PagePrivate(page))
105 a = mp_anchor(page);
106 else {
5b3030e3 107 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
7fab479b
DK
108 if (!a)
109 return -ENOMEM;
4c21e2f2 110 set_page_private(page, (unsigned long)a);
7fab479b
DK
111 SetPagePrivate(page);
112 kmap(page);
113 }
114
115 if (mp) {
116 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
117 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
118 a->mp_count++;
119 a->mp[index] = mp;
120 }
121
122 return 0;
123}
124
125static inline void remove_metapage(struct page *page, struct metapage *mp)
126{
127 struct meta_anchor *a = mp_anchor(page);
128 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
129 int index;
130
131 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
132
133 BUG_ON(a->mp[index] != mp);
134
135 a->mp[index] = NULL;
136 if (--a->mp_count == 0) {
137 kfree(a);
4c21e2f2 138 set_page_private(page, 0);
7fab479b
DK
139 ClearPagePrivate(page);
140 kunmap(page);
141 }
142}
143
144static inline void inc_io(struct page *page)
145{
146 atomic_inc(&mp_anchor(page)->io_count);
147}
148
149static inline void dec_io(struct page *page, void (*handler) (struct page *))
150{
151 if (atomic_dec_and_test(&mp_anchor(page)->io_count))
152 handler(page);
153}
154
155#else
156static inline struct metapage *page_to_mp(struct page *page, uint offset)
157{
4c21e2f2 158 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
7fab479b
DK
159}
160
161static inline int insert_metapage(struct page *page, struct metapage *mp)
162{
163 if (mp) {
4c21e2f2 164 set_page_private(page, (unsigned long)mp);
7fab479b
DK
165 SetPagePrivate(page);
166 kmap(page);
167 }
168 return 0;
169}
170
171static inline void remove_metapage(struct page *page, struct metapage *mp)
172{
4c21e2f2 173 set_page_private(page, 0);
7fab479b
DK
174 ClearPagePrivate(page);
175 kunmap(page);
176}
177
178#define inc_io(page) do {} while(0)
179#define dec_io(page, handler) handler(page)
180
181#endif
182
e18b890b 183static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
1da177e4
LT
184{
185 struct metapage *mp = (struct metapage *)foo;
186
a35afb83
CL
187 mp->lid = 0;
188 mp->lsn = 0;
189 mp->flag = 0;
190 mp->data = NULL;
191 mp->clsn = 0;
192 mp->log = NULL;
193 set_bit(META_free, &mp->flag);
194 init_waitqueue_head(&mp->wait);
1da177e4
LT
195}
196
27496a8c 197static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
1da177e4
LT
198{
199 return mempool_alloc(metapage_mempool, gfp_mask);
200}
201
202static inline void free_metapage(struct metapage *mp)
203{
204 mp->flag = 0;
205 set_bit(META_free, &mp->flag);
206
207 mempool_free(mp, metapage_mempool);
208}
209
210int __init metapage_init(void)
211{
212 /*
213 * Allocate the metapage structures
214 */
215 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
216 0, 0, init_once, NULL);
217 if (metapage_cache == NULL)
218 return -ENOMEM;
219
93d2341c
MD
220 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
221 metapage_cache);
1da177e4
LT
222
223 if (metapage_mempool == NULL) {
224 kmem_cache_destroy(metapage_cache);
225 return -ENOMEM;
226 }
1da177e4
LT
227
228 return 0;
229}
230
231void metapage_exit(void)
232{
233 mempool_destroy(metapage_mempool);
234 kmem_cache_destroy(metapage_cache);
235}
236
7fab479b
DK
237static inline void drop_metapage(struct page *page, struct metapage *mp)
238{
239 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
240 test_bit(META_io, &mp->flag))
241 return;
242 remove_metapage(page, mp);
243 INCREMENT(mpStat.pagefree);
244 free_metapage(mp);
245}
246
1da177e4 247/*
7fab479b 248 * Metapage address space operations
1da177e4 249 */
7fab479b
DK
250
251static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
252 unsigned int *len)
1da177e4 253{
7fab479b
DK
254 int rc = 0;
255 int xflag;
256 s64 xaddr;
ba52de12 257 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
7fab479b
DK
258 inode->i_blkbits;
259
260 if (lblock >= file_blocks)
261 return 0;
262 if (lblock + *len > file_blocks)
263 *len = file_blocks - lblock;
264
265 if (inode->i_ino) {
266 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
267 if ((rc == 0) && *len)
268 lblock = (sector_t)xaddr;
269 else
270 lblock = 0;
271 } /* else no mapping */
272
273 return lblock;
1da177e4
LT
274}
275
7fab479b 276static void last_read_complete(struct page *page)
1da177e4 277{
7fab479b
DK
278 if (!PageError(page))
279 SetPageUptodate(page);
280 unlock_page(page);
281}
282
283static int metapage_read_end_io(struct bio *bio, unsigned int bytes_done,
284 int err)
285{
286 struct page *page = bio->bi_private;
287
288 if (bio->bi_size)
289 return 1;
1da177e4 290
7fab479b
DK
291 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
292 printk(KERN_ERR "metapage_read_end_io: I/O error\n");
293 SetPageError(page);
1da177e4
LT
294 }
295
7fab479b
DK
296 dec_io(page, last_read_complete);
297 bio_put(bio);
298
299 return 0;
1da177e4
LT
300}
301
7fab479b 302static void remove_from_logsync(struct metapage *mp)
1da177e4 303{
7fab479b
DK
304 struct jfs_log *log = mp->log;
305 unsigned long flags;
306/*
307 * This can race. Recheck that log hasn't been set to null, and after
308 * acquiring logsync lock, recheck lsn
309 */
310 if (!log)
311 return;
312
313 LOGSYNC_LOCK(log, flags);
314 if (mp->lsn) {
315 mp->log = NULL;
316 mp->lsn = 0;
317 mp->clsn = 0;
318 log->count--;
319 list_del(&mp->synclist);
320 }
321 LOGSYNC_UNLOCK(log, flags);
322}
1da177e4 323
7fab479b
DK
324static void last_write_complete(struct page *page)
325{
326 struct metapage *mp;
327 unsigned int offset;
328
329 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
330 mp = page_to_mp(page, offset);
331 if (mp && test_bit(META_io, &mp->flag)) {
332 if (mp->lsn)
333 remove_from_logsync(mp);
334 clear_bit(META_io, &mp->flag);
335 }
336 /*
337 * I'd like to call drop_metapage here, but I don't think it's
338 * safe unless I have the page locked
339 */
340 }
341 end_page_writeback(page);
1da177e4
LT
342}
343
7fab479b
DK
344static int metapage_write_end_io(struct bio *bio, unsigned int bytes_done,
345 int err)
1da177e4 346{
7fab479b
DK
347 struct page *page = bio->bi_private;
348
349 BUG_ON(!PagePrivate(page));
350
351 if (bio->bi_size)
352 return 1;
353
354 if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
355 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
356 SetPageError(page);
357 }
358 dec_io(page, last_write_complete);
359 bio_put(bio);
360 return 0;
361}
362
363static int metapage_writepage(struct page *page, struct writeback_control *wbc)
364{
365 struct bio *bio = NULL;
366 unsigned int block_offset; /* block offset of mp within page */
367 struct inode *inode = page->mapping->host;
368 unsigned int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
369 unsigned int len;
370 unsigned int xlen;
371 struct metapage *mp;
372 int redirty = 0;
373 sector_t lblock;
374 sector_t pblock;
375 sector_t next_block = 0;
376 sector_t page_start;
377 unsigned long bio_bytes = 0;
378 unsigned long bio_offset = 0;
379 unsigned int offset;
380
381 page_start = (sector_t)page->index <<
382 (PAGE_CACHE_SHIFT - inode->i_blkbits);
383 BUG_ON(!PageLocked(page));
384 BUG_ON(PageWriteback(page));
385
386 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
387 mp = page_to_mp(page, offset);
388
389 if (!mp || !test_bit(META_dirty, &mp->flag))
390 continue;
391
392 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
393 redirty = 1;
ac17b8b5
DK
394 /*
395 * Make sure this page isn't blocked indefinitely.
396 * If the journal isn't undergoing I/O, push it
397 */
398 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
399 jfs_flush_journal(mp->log, 0);
7fab479b
DK
400 continue;
401 }
402
403 clear_bit(META_dirty, &mp->flag);
404 block_offset = offset >> inode->i_blkbits;
405 lblock = page_start + block_offset;
406 if (bio) {
407 if (xlen && lblock == next_block) {
408 /* Contiguous, in memory & on disk */
409 len = min(xlen, blocks_per_mp);
410 xlen -= len;
411 bio_bytes += len << inode->i_blkbits;
412 set_bit(META_io, &mp->flag);
413 continue;
414 }
415 /* Not contiguous */
416 if (bio_add_page(bio, page, bio_bytes, bio_offset) <
417 bio_bytes)
418 goto add_failed;
419 /*
420 * Increment counter before submitting i/o to keep
421 * count from hitting zero before we're through
422 */
423 inc_io(page);
424 if (!bio->bi_size)
425 goto dump_bio;
426 submit_bio(WRITE, bio);
427 bio = NULL;
428 } else {
429 set_page_writeback(page);
430 inc_io(page);
431 }
432 xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
433 pblock = metapage_get_blocks(inode, lblock, &xlen);
434 if (!pblock) {
435 /* Need better error handling */
436 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
437 dec_io(page, last_write_complete);
438 continue;
439 }
440 set_bit(META_io, &mp->flag);
441 len = min(xlen, (uint) JFS_SBI(inode->i_sb)->nbperpage);
442
443 bio = bio_alloc(GFP_NOFS, 1);
444 bio->bi_bdev = inode->i_sb->s_bdev;
445 bio->bi_sector = pblock << (inode->i_blkbits - 9);
446 bio->bi_end_io = metapage_write_end_io;
447 bio->bi_private = page;
448
449 /* Don't call bio_add_page yet, we may add to this vec */
450 bio_offset = offset;
451 bio_bytes = len << inode->i_blkbits;
452
453 xlen -= len;
454 next_block = lblock + len;
455 }
456 if (bio) {
457 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
458 goto add_failed;
459 if (!bio->bi_size)
460 goto dump_bio;
63f83c9f 461
7fab479b
DK
462 submit_bio(WRITE, bio);
463 }
464 if (redirty)
465 redirty_page_for_writepage(wbc, page);
466
467 unlock_page(page);
468
469 return 0;
470add_failed:
471 /* We should never reach here, since we're only adding one vec */
472 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
473 goto skip;
474dump_bio:
475 dump_mem("bio", bio, sizeof(*bio));
476skip:
477 bio_put(bio);
478 unlock_page(page);
479 dec_io(page, last_write_complete);
480
481 return -EIO;
482}
483
484static int metapage_readpage(struct file *fp, struct page *page)
485{
486 struct inode *inode = page->mapping->host;
487 struct bio *bio = NULL;
488 unsigned int block_offset;
489 unsigned int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
490 sector_t page_start; /* address of page in fs blocks */
491 sector_t pblock;
492 unsigned int xlen;
493 unsigned int len;
494 unsigned int offset;
495
496 BUG_ON(!PageLocked(page));
497 page_start = (sector_t)page->index <<
498 (PAGE_CACHE_SHIFT - inode->i_blkbits);
499
500 block_offset = 0;
501 while (block_offset < blocks_per_page) {
502 xlen = blocks_per_page - block_offset;
503 pblock = metapage_get_blocks(inode, page_start + block_offset,
504 &xlen);
505 if (pblock) {
506 if (!PagePrivate(page))
507 insert_metapage(page, NULL);
508 inc_io(page);
509 if (bio)
510 submit_bio(READ, bio);
511
512 bio = bio_alloc(GFP_NOFS, 1);
513 bio->bi_bdev = inode->i_sb->s_bdev;
514 bio->bi_sector = pblock << (inode->i_blkbits - 9);
515 bio->bi_end_io = metapage_read_end_io;
516 bio->bi_private = page;
517 len = xlen << inode->i_blkbits;
518 offset = block_offset << inode->i_blkbits;
519 if (bio_add_page(bio, page, len, offset) < len)
520 goto add_failed;
521 block_offset += xlen;
522 } else
523 block_offset++;
1da177e4 524 }
7fab479b
DK
525 if (bio)
526 submit_bio(READ, bio);
527 else
528 unlock_page(page);
529
530 return 0;
1da177e4 531
7fab479b
DK
532add_failed:
533 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
534 bio_put(bio);
535 dec_io(page, last_read_complete);
536 return -EIO;
1da177e4
LT
537}
538
27496a8c 539static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
7fab479b
DK
540{
541 struct metapage *mp;
b964638f 542 int ret = 1;
7fab479b
DK
543 unsigned int offset;
544
545 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
546 mp = page_to_mp(page, offset);
547
548 if (!mp)
549 continue;
550
551 jfs_info("metapage_releasepage: mp = 0x%p", mp);
b964638f
DK
552 if (mp->count || mp->nohomeok ||
553 test_bit(META_dirty, &mp->flag)) {
7fab479b
DK
554 jfs_info("count = %ld, nohomeok = %d", mp->count,
555 mp->nohomeok);
b964638f 556 ret = 0;
7fab479b
DK
557 continue;
558 }
7fab479b
DK
559 if (mp->lsn)
560 remove_from_logsync(mp);
561 remove_metapage(page, mp);
562 INCREMENT(mpStat.pagefree);
563 free_metapage(mp);
564 }
b964638f 565 return ret;
7fab479b
DK
566}
567
2ff28e22 568static void metapage_invalidatepage(struct page *page, unsigned long offset)
7fab479b
DK
569{
570 BUG_ON(offset);
571
2ff28e22 572 BUG_ON(PageWriteback(page));
7fab479b 573
2ff28e22 574 metapage_releasepage(page, 0);
7fab479b
DK
575}
576
f5e54d6e 577const struct address_space_operations jfs_metapage_aops = {
7fab479b
DK
578 .readpage = metapage_readpage,
579 .writepage = metapage_writepage,
580 .sync_page = block_sync_page,
581 .releasepage = metapage_releasepage,
582 .invalidatepage = metapage_invalidatepage,
583 .set_page_dirty = __set_page_dirty_nobuffers,
584};
585
1da177e4
LT
586struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
587 unsigned int size, int absolute,
588 unsigned long new)
589{
1da177e4
LT
590 int l2BlocksPerPage;
591 int l2bsize;
592 struct address_space *mapping;
7fab479b
DK
593 struct metapage *mp = NULL;
594 struct page *page;
1da177e4
LT
595 unsigned long page_index;
596 unsigned long page_offset;
597
7fab479b
DK
598 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
599 inode->i_ino, lblock, absolute);
600
601 l2bsize = inode->i_blkbits;
602 l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
603 page_index = lblock >> l2BlocksPerPage;
604 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
605 if ((page_offset + size) > PAGE_CACHE_SIZE) {
606 jfs_err("MetaData crosses page boundary!!");
607 jfs_err("lblock = %lx, size = %d", lblock, size);
608 dump_stack();
609 return NULL;
610 }
1da177e4 611 if (absolute)
7fab479b 612 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
1da177e4
LT
613 else {
614 /*
615 * If an nfs client tries to read an inode that is larger
616 * than any existing inodes, we may try to read past the
617 * end of the inode map
618 */
619 if ((lblock << inode->i_blkbits) >= inode->i_size)
620 return NULL;
621 mapping = inode->i_mapping;
622 }
623
7fab479b
DK
624 if (new && (PSIZE == PAGE_CACHE_SIZE)) {
625 page = grab_cache_page(mapping, page_index);
626 if (!page) {
627 jfs_err("grab_cache_page failed!");
628 return NULL;
629 }
630 SetPageUptodate(page);
631 } else {
090d2b18 632 page = read_mapping_page(mapping, page_index, NULL);
c40c2024 633 if (IS_ERR(page) || !PageUptodate(page)) {
090d2b18 634 jfs_err("read_mapping_page failed!");
7fab479b
DK
635 return NULL;
636 }
637 lock_page(page);
638 }
639
640 mp = page_to_mp(page, page_offset);
1da177e4 641 if (mp) {
7fab479b
DK
642 if (mp->logical_size != size) {
643 jfs_error(inode->i_sb,
644 "__get_metapage: mp->logical_size != size");
645 jfs_err("logical_size = %d, size = %d",
646 mp->logical_size, size);
647 dump_stack();
63f83c9f 648 goto unlock;
1da177e4
LT
649 }
650 mp->count++;
651 lock_metapage(mp);
1da177e4
LT
652 if (test_bit(META_discard, &mp->flag)) {
653 if (!new) {
654 jfs_error(inode->i_sb,
655 "__get_metapage: using a "
656 "discarded metapage");
7fab479b 657 discard_metapage(mp);
63f83c9f 658 goto unlock;
1da177e4
LT
659 }
660 clear_bit(META_discard, &mp->flag);
661 }
1da177e4 662 } else {
7fab479b
DK
663 INCREMENT(mpStat.pagealloc);
664 mp = alloc_metapage(GFP_NOFS);
665 mp->page = page;
1da177e4 666 mp->flag = 0;
1da177e4
LT
667 mp->xflag = COMMIT_PAGE;
668 mp->count = 1;
7fab479b 669 mp->nohomeok = 0;
1da177e4 670 mp->logical_size = size;
7fab479b
DK
671 mp->data = page_address(page) + page_offset;
672 mp->index = lblock;
673 if (unlikely(insert_metapage(page, mp))) {
674 free_metapage(mp);
675 goto unlock;
1da177e4 676 }
7fab479b 677 lock_metapage(mp);
1da177e4
LT
678 }
679
7fab479b
DK
680 if (new) {
681 jfs_info("zeroing mp = 0x%p", mp);
1da177e4 682 memset(mp->data, 0, PSIZE);
7fab479b 683 }
1da177e4 684
7fab479b
DK
685 unlock_page(page);
686 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
1da177e4
LT
687 return mp;
688
7fab479b
DK
689unlock:
690 unlock_page(page);
1da177e4
LT
691 return NULL;
692}
693
7fab479b 694void grab_metapage(struct metapage * mp)
1da177e4 695{
7fab479b
DK
696 jfs_info("grab_metapage: mp = 0x%p", mp);
697 page_cache_get(mp->page);
698 lock_page(mp->page);
1da177e4 699 mp->count++;
7fab479b
DK
700 lock_metapage(mp);
701 unlock_page(mp->page);
1da177e4
LT
702}
703
7fab479b 704void force_metapage(struct metapage *mp)
1da177e4 705{
7fab479b
DK
706 struct page *page = mp->page;
707 jfs_info("force_metapage: mp = 0x%p", mp);
708 set_bit(META_forcewrite, &mp->flag);
709 clear_bit(META_sync, &mp->flag);
710 page_cache_get(page);
711 lock_page(page);
712 set_page_dirty(page);
713 write_one_page(page, 1);
714 clear_bit(META_forcewrite, &mp->flag);
715 page_cache_release(page);
716}
1da177e4 717
1868f4aa 718void hold_metapage(struct metapage *mp)
7fab479b 719{
1da177e4 720 lock_page(mp->page);
7fab479b
DK
721}
722
1868f4aa 723void put_metapage(struct metapage *mp)
7fab479b
DK
724{
725 if (mp->count || mp->nohomeok) {
726 /* Someone else will release this */
1da177e4 727 unlock_page(mp->page);
1da177e4
LT
728 return;
729 }
7fab479b
DK
730 page_cache_get(mp->page);
731 mp->count++;
732 lock_metapage(mp);
1da177e4 733 unlock_page(mp->page);
7fab479b 734 release_metapage(mp);
1da177e4
LT
735}
736
737void release_metapage(struct metapage * mp)
738{
7fab479b 739 struct page *page = mp->page;
1da177e4
LT
740 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
741
7fab479b
DK
742 BUG_ON(!page);
743
744 lock_page(page);
745 unlock_metapage(mp);
1da177e4
LT
746
747 assert(mp->count);
7fab479b
DK
748 if (--mp->count || mp->nohomeok) {
749 unlock_page(page);
750 page_cache_release(page);
1da177e4
LT
751 return;
752 }
753
7fab479b
DK
754 if (test_bit(META_dirty, &mp->flag)) {
755 set_page_dirty(page);
1da177e4 756 if (test_bit(META_sync, &mp->flag)) {
1da177e4 757 clear_bit(META_sync, &mp->flag);
7fab479b
DK
758 write_one_page(page, 1);
759 lock_page(page); /* write_one_page unlocks the page */
1da177e4 760 }
7fab479b
DK
761 } else if (mp->lsn) /* discard_metapage doesn't remove it */
762 remove_from_logsync(mp);
1da177e4 763
7fab479b
DK
764 /* Try to keep metapages from using up too much memory */
765 drop_metapage(page, mp);
d0e671a9 766
7fab479b
DK
767 unlock_page(page);
768 page_cache_release(page);
1da177e4
LT
769}
770
771void __invalidate_metapages(struct inode *ip, s64 addr, int len)
772{
7fab479b 773 sector_t lblock;
1da177e4 774 int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
7fab479b 775 int BlocksPerPage = 1 << l2BlocksPerPage;
1da177e4 776 /* All callers are interested in block device's mapping */
7fab479b
DK
777 struct address_space *mapping =
778 JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
1da177e4
LT
779 struct metapage *mp;
780 struct page *page;
7fab479b 781 unsigned int offset;
1da177e4
LT
782
783 /*
7fab479b 784 * Mark metapages to discard. They will eventually be
1da177e4
LT
785 * released, but should not be written.
786 */
7fab479b
DK
787 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
788 lblock += BlocksPerPage) {
789 page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
790 if (!page)
791 continue;
792 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
793 mp = page_to_mp(page, offset);
794 if (!mp)
795 continue;
796 if (mp->index < addr)
797 continue;
798 if (mp->index >= addr + len)
799 break;
1da177e4
LT
800
801 clear_bit(META_dirty, &mp->flag);
802 set_bit(META_discard, &mp->flag);
7fab479b
DK
803 if (mp->lsn)
804 remove_from_logsync(mp);
1da177e4 805 }
7fab479b
DK
806 unlock_page(page);
807 page_cache_release(page);
1da177e4
LT
808 }
809}
810
811#ifdef CONFIG_JFS_STATISTICS
812int jfs_mpstat_read(char *buffer, char **start, off_t offset, int length,
813 int *eof, void *data)
814{
815 int len = 0;
816 off_t begin;
817
818 len += sprintf(buffer,
819 "JFS Metapage statistics\n"
820 "=======================\n"
821 "page allocations = %d\n"
822 "page frees = %d\n"
823 "lock waits = %d\n",
824 mpStat.pagealloc,
825 mpStat.pagefree,
826 mpStat.lockwait);
827
828 begin = offset;
829 *start = buffer + begin;
830 len -= begin;
831
832 if (len > length)
833 len = length;
834 else
835 *eof = 1;
836
837 if (len < 0)
838 len = 0;
839
840 return len;
841}
842#endif