]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/gfs2/meta_io.c
drm/vc4: Free hang state before destroying BO cache.
[mirror_ubuntu-zesty-kernel.git] / fs / gfs2 / meta_io.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
091806ed 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mm.h>
16#include <linux/pagemap.h>
17#include <linux/writeback.h>
18#include <linux/swap.h>
19#include <linux/delay.h>
2e565bb6 20#include <linux/bio.h>
5c676f6d 21#include <linux/gfs2_ondisk.h>
b3b94faa
DT
22
23#include "gfs2.h"
5c676f6d 24#include "incore.h"
b3b94faa
DT
25#include "glock.h"
26#include "glops.h"
27#include "inode.h"
28#include "log.h"
29#include "lops.h"
30#include "meta_io.h"
31#include "rgrp.h"
32#include "trans.h"
5c676f6d 33#include "util.h"
627c10b7 34#include "trace_gfs2.h"
b3b94faa 35
4a0f9a32 36static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
b3b94faa 37{
4a0f9a32
SW
38 struct buffer_head *bh, *head;
39 int nr_underway = 0;
2a222ca9
MC
40 int write_flags = REQ_META | REQ_PRIO |
41 (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
b3b94faa 42
4a0f9a32
SW
43 BUG_ON(!PageLocked(page));
44 BUG_ON(!page_has_buffers(page));
45
46 head = page_buffers(page);
47 bh = head;
48
49 do {
50 if (!buffer_mapped(bh))
51 continue;
52 /*
53 * If it's a fully non-blocking write attempt and we cannot
54 * lock the buffer then redirty the page. Note that this can
e76e0ec9 55 * potentially cause a busy-wait loop from flusher thread and kswapd
4a0f9a32
SW
56 * activity, but those code paths have their own higher-level
57 * throttling.
58 */
1b430bee 59 if (wbc->sync_mode != WB_SYNC_NONE) {
4a0f9a32
SW
60 lock_buffer(bh);
61 } else if (!trylock_buffer(bh)) {
62 redirty_page_for_writepage(wbc, page);
63 continue;
64 }
65 if (test_clear_buffer_dirty(bh)) {
66 mark_buffer_async_write(bh);
67 } else {
68 unlock_buffer(bh);
69 }
70 } while ((bh = bh->b_this_page) != head);
71
72 /*
73 * The page and its buffers are protected by PageWriteback(), so we can
74 * drop the bh refcounts early.
75 */
76 BUG_ON(PageWriteback(page));
77 set_page_writeback(page);
78
79 do {
80 struct buffer_head *next = bh->b_this_page;
81 if (buffer_async_write(bh)) {
2a222ca9 82 submit_bh(REQ_OP_WRITE, write_flags, bh);
4a0f9a32
SW
83 nr_underway++;
84 }
85 bh = next;
86 } while (bh != head);
87 unlock_page(page);
88
4a0f9a32
SW
89 if (nr_underway == 0)
90 end_page_writeback(page);
91
eaefbf96 92 return 0;
b3b94faa
DT
93}
94
009d8518 95const struct address_space_operations gfs2_meta_aops = {
b3b94faa 96 .writepage = gfs2_aspace_writepage,
4340fe62 97 .releasepage = gfs2_releasepage,
b3b94faa
DT
98};
99
1b2ad412
SW
100const struct address_space_operations gfs2_rgrp_aops = {
101 .writepage = gfs2_aspace_writepage,
102 .releasepage = gfs2_releasepage,
103};
104
b3b94faa 105/**
6802e340 106 * gfs2_getbuf - Get a buffer with a given address space
cb4c0313 107 * @gl: the glock
b3b94faa
DT
108 * @blkno: the block number (filesystem scope)
109 * @create: 1 if the buffer should be created
110 *
111 * Returns: the buffer
112 */
113
6802e340 114struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
b3b94faa 115{
009d8518 116 struct address_space *mapping = gfs2_glock2aspace(gl);
15562c43 117 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
b3b94faa
DT
118 struct page *page;
119 struct buffer_head *bh;
120 unsigned int shift;
121 unsigned long index;
122 unsigned int bufnum;
123
70d4ee94
SW
124 if (mapping == NULL)
125 mapping = &sdp->sd_aspace;
126
09cbfeaf 127 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
b3b94faa
DT
128 index = blkno >> shift; /* convert block to page */
129 bufnum = blkno - (index << shift); /* block buf index within page */
130
131 if (create) {
132 for (;;) {
cb4c0313 133 page = grab_cache_page(mapping, index);
b3b94faa
DT
134 if (page)
135 break;
136 yield();
137 }
138 } else {
2457aec6
MG
139 page = find_get_page_flags(mapping, index,
140 FGP_LOCK|FGP_ACCESSED);
b3b94faa
DT
141 if (!page)
142 return NULL;
143 }
144
145 if (!page_has_buffers(page))
146 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
147
148 /* Locate header for our buffer within our page */
149 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
150 /* Do nothing */;
151 get_bh(bh);
152
153 if (!buffer_mapped(bh))
154 map_bh(bh, sdp->sd_vfs, blkno);
155
156 unlock_page(page);
09cbfeaf 157 put_page(page);
b3b94faa
DT
158
159 return bh;
160}
161
162static void meta_prep_new(struct buffer_head *bh)
163{
164 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
165
166 lock_buffer(bh);
167 clear_buffer_dirty(bh);
168 set_buffer_uptodate(bh);
169 unlock_buffer(bh);
170
171 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
172}
173
174/**
175 * gfs2_meta_new - Get a block
176 * @gl: The glock associated with this block
177 * @blkno: The block number
178 *
179 * Returns: The buffer
180 */
181
cd915493 182struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
b3b94faa
DT
183{
184 struct buffer_head *bh;
6802e340 185 bh = gfs2_getbuf(gl, blkno, CREATE);
b3b94faa
DT
186 meta_prep_new(bh);
187 return bh;
188}
189
39b0555f 190static void gfs2_meta_read_endio(struct bio *bio)
c8d57703 191{
39b0555f
AG
192 struct bio_vec *bvec;
193 int i;
194
195 bio_for_each_segment_all(bvec, bio, i) {
196 struct page *page = bvec->bv_page;
197 struct buffer_head *bh = page_buffers(page);
198 unsigned int len = bvec->bv_len;
199
200 while (bh_offset(bh) < bvec->bv_offset)
201 bh = bh->b_this_page;
202 do {
203 struct buffer_head *next = bh->b_this_page;
204 len -= bh->b_size;
205 bh->b_end_io(bh, !bio->bi_error);
206 bh = next;
207 } while (bh && len);
208 }
209 bio_put(bio);
210}
c8d57703 211
39b0555f
AG
212/*
213 * Submit several consecutive buffer head I/O requests as a single bio I/O
214 * request. (See submit_bh_wbc.)
215 */
e1b1afa6
MC
216static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
217 int num)
39b0555f
AG
218{
219 struct buffer_head *bh = bhs[0];
220 struct bio *bio;
221 int i;
222
223 if (!num)
c8d57703 224 return;
39b0555f
AG
225
226 bio = bio_alloc(GFP_NOIO, num);
227 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
228 bio->bi_bdev = bh->b_bdev;
229 for (i = 0; i < num; i++) {
230 bh = bhs[i];
231 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
c8d57703 232 }
39b0555f 233 bio->bi_end_io = gfs2_meta_read_endio;
e1b1afa6 234 bio_set_op_attrs(bio, op, op_flags);
4e49ea4a 235 submit_bio(bio);
c8d57703
AG
236}
237
b3b94faa
DT
238/**
239 * gfs2_meta_read - Read a block from disk
240 * @gl: The glock covering the block
241 * @blkno: The block number
7276b3b0 242 * @flags: flags
b3b94faa
DT
243 * @bhp: the place where the buffer is returned (NULL on failure)
244 *
245 * Returns: errno
246 */
247
cd915493 248int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
c8d57703 249 int rahead, struct buffer_head **bhp)
b3b94faa 250{
15562c43 251 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
39b0555f
AG
252 struct buffer_head *bh, *bhs[2];
253 int num = 0;
c969f58c 254
44b8db13
MY
255 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
256 *bhp = NULL;
c969f58c 257 return -EIO;
44b8db13 258 }
c969f58c
SW
259
260 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
261
262 lock_buffer(bh);
263 if (buffer_uptodate(bh)) {
264 unlock_buffer(bh);
39b0555f
AG
265 flags &= ~DIO_WAIT;
266 } else {
267 bh->b_end_io = end_buffer_read_sync;
268 get_bh(bh);
269 bhs[num++] = bh;
c969f58c 270 }
39b0555f
AG
271
272 if (rahead) {
273 bh = gfs2_getbuf(gl, blkno + 1, CREATE);
274
275 lock_buffer(bh);
276 if (buffer_uptodate(bh)) {
277 unlock_buffer(bh);
278 brelse(bh);
279 } else {
280 bh->b_end_io = end_buffer_read_sync;
281 bhs[num++] = bh;
282 }
283 }
284
e1b1afa6 285 gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
c969f58c
SW
286 if (!(flags & DIO_WAIT))
287 return 0;
288
39b0555f 289 bh = *bhp;
c969f58c
SW
290 wait_on_buffer(bh);
291 if (unlikely(!buffer_uptodate(bh))) {
292 struct gfs2_trans *tr = current->journal_info;
293 if (tr && tr->tr_touched)
294 gfs2_io_error_bh(sdp, bh);
295 brelse(bh);
44b8db13 296 *bhp = NULL;
c969f58c 297 return -EIO;
7276b3b0 298 }
b3b94faa 299
7276b3b0 300 return 0;
b3b94faa
DT
301}
302
303/**
7276b3b0 304 * gfs2_meta_wait - Reread a block from disk
b3b94faa 305 * @sdp: the filesystem
7276b3b0 306 * @bh: The block to wait for
b3b94faa
DT
307 *
308 * Returns: errno
309 */
310
7276b3b0 311int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
b3b94faa
DT
312{
313 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
314 return -EIO;
315
7276b3b0 316 wait_on_buffer(bh);
b3b94faa 317
7276b3b0
SW
318 if (!buffer_uptodate(bh)) {
319 struct gfs2_trans *tr = current->journal_info;
320 if (tr && tr->tr_touched)
321 gfs2_io_error_bh(sdp, bh);
322 return -EIO;
b3b94faa 323 }
7276b3b0
SW
324 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
325 return -EIO;
b3b94faa
DT
326
327 return 0;
328}
329
68cd4ce2 330void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
16615be1 331{
009d8518
SW
332 struct address_space *mapping = bh->b_page->mapping;
333 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
16615be1 334 struct gfs2_bufdata *bd = bh->b_private;
68cd4ce2 335 struct gfs2_trans *tr = current->journal_info;
502be2a3 336 int was_pinned = 0;
009d8518 337
16615be1 338 if (test_clear_buffer_pinned(bh)) {
627c10b7 339 trace_gfs2_pin(bd, 0);
5e687eac 340 atomic_dec(&sdp->sd_log_pinned);
c0752aa7 341 list_del_init(&bd->bd_list);
68cd4ce2 342 if (meta == REMOVE_META)
16615be1 343 tr->tr_num_buf_rm++;
022ef4fe 344 else
16615be1 345 tr->tr_num_databuf_rm++;
16615be1 346 tr->tr_touched = 1;
502be2a3 347 was_pinned = 1;
16615be1
SW
348 brelse(bh);
349 }
350 if (bd) {
c618e87a 351 spin_lock(&sdp->sd_ail_lock);
16ca9412 352 if (bd->bd_tr) {
16615be1 353 gfs2_trans_add_revoke(sdp, bd);
502be2a3
BP
354 } else if (was_pinned) {
355 bh->b_private = NULL;
356 kmem_cache_free(gfs2_bufdata_cachep, bd);
16615be1 357 }
c618e87a 358 spin_unlock(&sdp->sd_ail_lock);
16615be1
SW
359 }
360 clear_buffer_dirty(bh);
361 clear_buffer_uptodate(bh);
362}
363
b3b94faa
DT
364/**
365 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
366 * @ip: the inode who owns the buffers
367 * @bstart: the first buffer in the run
368 * @blen: the number of buffers in the run
369 *
370 */
371
cd915493 372void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
b3b94faa 373{
feaa7bba 374 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
375 struct buffer_head *bh;
376
377 while (blen) {
6802e340 378 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
b3b94faa 379 if (bh) {
1ad38c43
SW
380 lock_buffer(bh);
381 gfs2_log_lock(sdp);
68cd4ce2 382 gfs2_remove_from_journal(bh, REMOVE_META);
1ad38c43 383 gfs2_log_unlock(sdp);
b3b94faa 384 unlock_buffer(bh);
b3b94faa
DT
385 brelse(bh);
386 }
387
388 bstart++;
389 blen--;
390 }
391}
392
b3b94faa
DT
393/**
394 * gfs2_meta_indirect_buffer - Get a metadata buffer
395 * @ip: The GFS2 inode
396 * @height: The level of this buf in the metadata (indir addr) tree (if any)
397 * @num: The block number (device relative) of the buffer
b3b94faa
DT
398 * @bhp: the buffer is returned here
399 *
b3b94faa
DT
400 * Returns: errno
401 */
402
cd915493 403int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
f2f9c812 404 struct buffer_head **bhp)
b3b94faa 405{
7276b3b0
SW
406 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
407 struct gfs2_glock *gl = ip->i_gl;
f91a0d3e
SW
408 struct buffer_head *bh;
409 int ret = 0;
f2f9c812 410 u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
c8d57703
AG
411 int rahead = 0;
412
413 if (num == ip->i_no_addr)
414 rahead = ip->i_rahead;
b3b94faa 415
c8d57703 416 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
f2f9c812
BP
417 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
418 brelse(bh);
419 ret = -EIO;
b3b94faa 420 }
b3b94faa 421 *bhp = bh;
f91a0d3e 422 return ret;
b3b94faa
DT
423}
424
425/**
426 * gfs2_meta_ra - start readahead on an extent of a file
427 * @gl: the glock the blocks belong to
428 * @dblock: the starting disk block
429 * @extlen: the number of blocks in the extent
430 *
7276b3b0 431 * returns: the first buffer in the extent
b3b94faa
DT
432 */
433
7276b3b0 434struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
b3b94faa 435{
15562c43 436 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
b3b94faa 437 struct buffer_head *first_bh, *bh;
cd915493 438 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
568f4c96 439 sdp->sd_sb.sb_bsize_shift;
b3b94faa 440
7276b3b0
SW
441 BUG_ON(!extlen);
442
443 if (max_ra < 1)
444 max_ra = 1;
b3b94faa
DT
445 if (extlen > max_ra)
446 extlen = max_ra;
447
6802e340 448 first_bh = gfs2_getbuf(gl, dblock, CREATE);
b3b94faa
DT
449
450 if (buffer_uptodate(first_bh))
451 goto out;
7276b3b0 452 if (!buffer_locked(first_bh))
dfec8a14 453 ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh);
b3b94faa
DT
454
455 dblock++;
456 extlen--;
457
458 while (extlen) {
6802e340 459 bh = gfs2_getbuf(gl, dblock, CREATE);
b3b94faa 460
7276b3b0 461 if (!buffer_uptodate(bh) && !buffer_locked(bh))
70246286 462 ll_rw_block(REQ_OP_READ, REQ_RAHEAD | REQ_META, 1, &bh);
7276b3b0 463 brelse(bh);
b3b94faa
DT
464 dblock++;
465 extlen--;
7276b3b0
SW
466 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
467 goto out;
b3b94faa
DT
468 }
469
7276b3b0 470 wait_on_buffer(first_bh);
a91ea69f 471out:
7276b3b0 472 return first_bh;
b3b94faa
DT
473}
474