]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/gfs2/lops.c
block: better op and flags encoding
[mirror_ubuntu-zesty-kernel.git] / fs / gfs2 / lops.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/fs.h>
19 #include <linux/list_sort.h>
20
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "log.h"
26 #include "lops.h"
27 #include "meta_io.h"
28 #include "recovery.h"
29 #include "rgrp.h"
30 #include "trans.h"
31 #include "util.h"
32 #include "trace_gfs2.h"
33
34 /**
35 * gfs2_pin - Pin a buffer in memory
36 * @sdp: The superblock
37 * @bh: The buffer to be pinned
38 *
39 * The log lock must be held when calling this function
40 */
41 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
42 {
43 struct gfs2_bufdata *bd;
44
45 BUG_ON(!current->journal_info);
46
47 clear_buffer_dirty(bh);
48 if (test_set_buffer_pinned(bh))
49 gfs2_assert_withdraw(sdp, 0);
50 if (!buffer_uptodate(bh))
51 gfs2_io_error_bh(sdp, bh);
52 bd = bh->b_private;
53 /* If this buffer is in the AIL and it has already been written
54 * to in-place disk block, remove it from the AIL.
55 */
56 spin_lock(&sdp->sd_ail_lock);
57 if (bd->bd_tr)
58 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
59 spin_unlock(&sdp->sd_ail_lock);
60 get_bh(bh);
61 atomic_inc(&sdp->sd_log_pinned);
62 trace_gfs2_pin(bd, 1);
63 }
64
65 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
66 {
67 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
68 }
69
70 static void maybe_release_space(struct gfs2_bufdata *bd)
71 {
72 struct gfs2_glock *gl = bd->bd_gl;
73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
74 struct gfs2_rgrpd *rgd = gl->gl_object;
75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
76 struct gfs2_bitmap *bi = rgd->rd_bits + index;
77
78 if (bi->bi_clone == NULL)
79 return;
80 if (sdp->sd_args.ar_discard)
81 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
82 memcpy(bi->bi_clone + bi->bi_offset,
83 bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
84 clear_bit(GBF_FULL, &bi->bi_flags);
85 rgd->rd_free_clone = rgd->rd_free;
86 rgd->rd_extfail_pt = rgd->rd_free;
87 }
88
89 /**
90 * gfs2_unpin - Unpin a buffer
91 * @sdp: the filesystem the buffer belongs to
92 * @bh: The buffer to unpin
93 * @ai:
94 * @flags: The inode dirty flags
95 *
96 */
97
98 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
99 struct gfs2_trans *tr)
100 {
101 struct gfs2_bufdata *bd = bh->b_private;
102
103 BUG_ON(!buffer_uptodate(bh));
104 BUG_ON(!buffer_pinned(bh));
105
106 lock_buffer(bh);
107 mark_buffer_dirty(bh);
108 clear_buffer_pinned(bh);
109
110 if (buffer_is_rgrp(bd))
111 maybe_release_space(bd);
112
113 spin_lock(&sdp->sd_ail_lock);
114 if (bd->bd_tr) {
115 list_del(&bd->bd_ail_st_list);
116 brelse(bh);
117 } else {
118 struct gfs2_glock *gl = bd->bd_gl;
119 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
120 atomic_inc(&gl->gl_ail_count);
121 }
122 bd->bd_tr = tr;
123 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
124 spin_unlock(&sdp->sd_ail_lock);
125
126 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
127 trace_gfs2_pin(bd, 0);
128 unlock_buffer(bh);
129 atomic_dec(&sdp->sd_log_pinned);
130 }
131
132 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
133 {
134 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
135 (sdp->sd_log_flush_head != sdp->sd_log_head));
136
137 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
138 sdp->sd_log_flush_head = 0;
139 sdp->sd_log_flush_wrapped = 1;
140 }
141 }
142
143 static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
144 {
145 unsigned int lbn = sdp->sd_log_flush_head;
146 struct gfs2_journal_extent *je;
147 u64 block;
148
149 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
150 if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
151 block = je->dblock + lbn - je->lblock;
152 gfs2_log_incr_head(sdp);
153 return block;
154 }
155 }
156
157 return -1;
158 }
159
160 /**
161 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
162 * @sdp: The superblock
163 * @bvec: The bio_vec
164 * @error: The i/o status
165 *
166 * This finds the relavent buffers and unlocks then and sets the
167 * error flag according to the status of the i/o request. This is
168 * used when the log is writing data which has an in-place version
169 * that is pinned in the pagecache.
170 */
171
172 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
173 int error)
174 {
175 struct buffer_head *bh, *next;
176 struct page *page = bvec->bv_page;
177 unsigned size;
178
179 bh = page_buffers(page);
180 size = bvec->bv_len;
181 while (bh_offset(bh) < bvec->bv_offset)
182 bh = bh->b_this_page;
183 do {
184 if (error)
185 set_buffer_write_io_error(bh);
186 unlock_buffer(bh);
187 next = bh->b_this_page;
188 size -= bh->b_size;
189 brelse(bh);
190 bh = next;
191 } while(bh && size);
192 }
193
194 /**
195 * gfs2_end_log_write - end of i/o to the log
196 * @bio: The bio
197 * @error: Status of i/o request
198 *
199 * Each bio_vec contains either data from the pagecache or data
200 * relating to the log itself. Here we iterate over the bio_vec
201 * array, processing both kinds of data.
202 *
203 */
204
205 static void gfs2_end_log_write(struct bio *bio)
206 {
207 struct gfs2_sbd *sdp = bio->bi_private;
208 struct bio_vec *bvec;
209 struct page *page;
210 int i;
211
212 if (bio->bi_error) {
213 sdp->sd_log_error = bio->bi_error;
214 fs_err(sdp, "Error %d writing to log\n", bio->bi_error);
215 }
216
217 bio_for_each_segment_all(bvec, bio, i) {
218 page = bvec->bv_page;
219 if (page_has_buffers(page))
220 gfs2_end_log_write_bh(sdp, bvec, bio->bi_error);
221 else
222 mempool_free(page, gfs2_page_pool);
223 }
224
225 bio_put(bio);
226 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
227 wake_up(&sdp->sd_log_flush_wait);
228 }
229
230 /**
231 * gfs2_log_flush_bio - Submit any pending log bio
232 * @sdp: The superblock
233 * @op: REQ_OP
234 * @op_flags: req_flag_bits
235 *
236 * Submit any pending part-built or full bio to the block device. If
237 * there is no pending bio, then this is a no-op.
238 */
239
240 void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
241 {
242 if (sdp->sd_log_bio) {
243 atomic_inc(&sdp->sd_log_in_flight);
244 bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
245 submit_bio(sdp->sd_log_bio);
246 sdp->sd_log_bio = NULL;
247 }
248 }
249
250 /**
251 * gfs2_log_alloc_bio - Allocate a new bio for log writing
252 * @sdp: The superblock
253 * @blkno: The next device block number we want to write to
254 *
255 * This should never be called when there is a cached bio in the
256 * super block. When it returns, there will be a cached bio in the
257 * super block which will have as many bio_vecs as the device is
258 * happy to handle.
259 *
260 * Returns: Newly allocated bio
261 */
262
263 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
264 {
265 struct super_block *sb = sdp->sd_vfs;
266 struct bio *bio;
267
268 BUG_ON(sdp->sd_log_bio);
269
270 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
271 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
272 bio->bi_bdev = sb->s_bdev;
273 bio->bi_end_io = gfs2_end_log_write;
274 bio->bi_private = sdp;
275
276 sdp->sd_log_bio = bio;
277
278 return bio;
279 }
280
281 /**
282 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
283 * @sdp: The superblock
284 * @blkno: The device block number we want to write to
285 *
286 * If there is a cached bio, then if the next block number is sequential
287 * with the previous one, return it, otherwise flush the bio to the
288 * device. If there is not a cached bio, or we just flushed it, then
289 * allocate a new one.
290 *
291 * Returns: The bio to use for log writes
292 */
293
294 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
295 {
296 struct bio *bio = sdp->sd_log_bio;
297 u64 nblk;
298
299 if (bio) {
300 nblk = bio_end_sector(bio);
301 nblk >>= sdp->sd_fsb2bb_shift;
302 if (blkno == nblk)
303 return bio;
304 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
305 }
306
307 return gfs2_log_alloc_bio(sdp, blkno);
308 }
309
310
311 /**
312 * gfs2_log_write - write to log
313 * @sdp: the filesystem
314 * @page: the page to write
315 * @size: the size of the data to write
316 * @offset: the offset within the page
317 *
318 * Try and add the page segment to the current bio. If that fails,
319 * submit the current bio to the device and create a new one, and
320 * then add the page segment to that.
321 */
322
323 static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
324 unsigned size, unsigned offset)
325 {
326 u64 blkno = gfs2_log_bmap(sdp);
327 struct bio *bio;
328 int ret;
329
330 bio = gfs2_log_get_bio(sdp, blkno);
331 ret = bio_add_page(bio, page, size, offset);
332 if (ret == 0) {
333 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
334 bio = gfs2_log_alloc_bio(sdp, blkno);
335 ret = bio_add_page(bio, page, size, offset);
336 WARN_ON(ret == 0);
337 }
338 }
339
340 /**
341 * gfs2_log_write_bh - write a buffer's content to the log
342 * @sdp: The super block
343 * @bh: The buffer pointing to the in-place location
344 *
345 * This writes the content of the buffer to the next available location
346 * in the log. The buffer will be unlocked once the i/o to the log has
347 * completed.
348 */
349
350 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
351 {
352 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
353 }
354
355 /**
356 * gfs2_log_write_page - write one block stored in a page, into the log
357 * @sdp: The superblock
358 * @page: The struct page
359 *
360 * This writes the first block-sized part of the page into the log. Note
361 * that the page must have been allocated from the gfs2_page_pool mempool
362 * and that after this has been called, ownership has been transferred and
363 * the page may be freed at any time.
364 */
365
366 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
367 {
368 struct super_block *sb = sdp->sd_vfs;
369 gfs2_log_write(sdp, page, sb->s_blocksize, 0);
370 }
371
372 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
373 u32 ld_length, u32 ld_data1)
374 {
375 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
376 struct gfs2_log_descriptor *ld = page_address(page);
377 clear_page(ld);
378 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
379 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
380 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
381 ld->ld_type = cpu_to_be32(ld_type);
382 ld->ld_length = cpu_to_be32(ld_length);
383 ld->ld_data1 = cpu_to_be32(ld_data1);
384 ld->ld_data2 = 0;
385 return page;
386 }
387
388 static void gfs2_check_magic(struct buffer_head *bh)
389 {
390 void *kaddr;
391 __be32 *ptr;
392
393 clear_buffer_escaped(bh);
394 kaddr = kmap_atomic(bh->b_page);
395 ptr = kaddr + bh_offset(bh);
396 if (*ptr == cpu_to_be32(GFS2_MAGIC))
397 set_buffer_escaped(bh);
398 kunmap_atomic(kaddr);
399 }
400
401 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
402 {
403 struct gfs2_bufdata *bda, *bdb;
404
405 bda = list_entry(a, struct gfs2_bufdata, bd_list);
406 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
407
408 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
409 return -1;
410 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
411 return 1;
412 return 0;
413 }
414
415 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
416 unsigned int total, struct list_head *blist,
417 bool is_databuf)
418 {
419 struct gfs2_log_descriptor *ld;
420 struct gfs2_bufdata *bd1 = NULL, *bd2;
421 struct page *page;
422 unsigned int num;
423 unsigned n;
424 __be64 *ptr;
425
426 gfs2_log_lock(sdp);
427 list_sort(NULL, blist, blocknr_cmp);
428 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
429 while(total) {
430 num = total;
431 if (total > limit)
432 num = limit;
433 gfs2_log_unlock(sdp);
434 page = gfs2_get_log_desc(sdp,
435 is_databuf ? GFS2_LOG_DESC_JDATA :
436 GFS2_LOG_DESC_METADATA, num + 1, num);
437 ld = page_address(page);
438 gfs2_log_lock(sdp);
439 ptr = (__be64 *)(ld + 1);
440
441 n = 0;
442 list_for_each_entry_continue(bd1, blist, bd_list) {
443 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
444 if (is_databuf) {
445 gfs2_check_magic(bd1->bd_bh);
446 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
447 }
448 if (++n >= num)
449 break;
450 }
451
452 gfs2_log_unlock(sdp);
453 gfs2_log_write_page(sdp, page);
454 gfs2_log_lock(sdp);
455
456 n = 0;
457 list_for_each_entry_continue(bd2, blist, bd_list) {
458 get_bh(bd2->bd_bh);
459 gfs2_log_unlock(sdp);
460 lock_buffer(bd2->bd_bh);
461
462 if (buffer_escaped(bd2->bd_bh)) {
463 void *kaddr;
464 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
465 ptr = page_address(page);
466 kaddr = kmap_atomic(bd2->bd_bh->b_page);
467 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
468 bd2->bd_bh->b_size);
469 kunmap_atomic(kaddr);
470 *(__be32 *)ptr = 0;
471 clear_buffer_escaped(bd2->bd_bh);
472 unlock_buffer(bd2->bd_bh);
473 brelse(bd2->bd_bh);
474 gfs2_log_write_page(sdp, page);
475 } else {
476 gfs2_log_write_bh(sdp, bd2->bd_bh);
477 }
478 gfs2_log_lock(sdp);
479 if (++n >= num)
480 break;
481 }
482
483 BUG_ON(total < num);
484 total -= num;
485 }
486 gfs2_log_unlock(sdp);
487 }
488
489 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
490 {
491 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
492 unsigned int nbuf;
493 if (tr == NULL)
494 return;
495 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
496 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
497 }
498
499 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
500 {
501 struct list_head *head;
502 struct gfs2_bufdata *bd;
503
504 if (tr == NULL)
505 return;
506
507 head = &tr->tr_buf;
508 while (!list_empty(head)) {
509 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
510 list_del_init(&bd->bd_list);
511 gfs2_unpin(sdp, bd->bd_bh, tr);
512 }
513 }
514
515 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
516 struct gfs2_log_header_host *head, int pass)
517 {
518 if (pass != 0)
519 return;
520
521 jd->jd_found_blocks = 0;
522 jd->jd_replayed_blocks = 0;
523 }
524
525 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
526 struct gfs2_log_descriptor *ld, __be64 *ptr,
527 int pass)
528 {
529 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
530 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
531 struct gfs2_glock *gl = ip->i_gl;
532 unsigned int blks = be32_to_cpu(ld->ld_data1);
533 struct buffer_head *bh_log, *bh_ip;
534 u64 blkno;
535 int error = 0;
536
537 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
538 return 0;
539
540 gfs2_replay_incr_blk(jd, &start);
541
542 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
543 blkno = be64_to_cpu(*ptr++);
544
545 jd->jd_found_blocks++;
546
547 if (gfs2_revoke_check(jd, blkno, start))
548 continue;
549
550 error = gfs2_replay_read_block(jd, start, &bh_log);
551 if (error)
552 return error;
553
554 bh_ip = gfs2_meta_new(gl, blkno);
555 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
556
557 if (gfs2_meta_check(sdp, bh_ip))
558 error = -EIO;
559 else
560 mark_buffer_dirty(bh_ip);
561
562 brelse(bh_log);
563 brelse(bh_ip);
564
565 if (error)
566 break;
567
568 jd->jd_replayed_blocks++;
569 }
570
571 return error;
572 }
573
574 /**
575 * gfs2_meta_sync - Sync all buffers associated with a glock
576 * @gl: The glock
577 *
578 */
579
580 static void gfs2_meta_sync(struct gfs2_glock *gl)
581 {
582 struct address_space *mapping = gfs2_glock2aspace(gl);
583 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
584 int error;
585
586 if (mapping == NULL)
587 mapping = &sdp->sd_aspace;
588
589 filemap_fdatawrite(mapping);
590 error = filemap_fdatawait(mapping);
591
592 if (error)
593 gfs2_io_error(gl->gl_name.ln_sbd);
594 }
595
596 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
597 {
598 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
599 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
600
601 if (error) {
602 gfs2_meta_sync(ip->i_gl);
603 return;
604 }
605 if (pass != 1)
606 return;
607
608 gfs2_meta_sync(ip->i_gl);
609
610 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
611 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
612 }
613
614 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
615 {
616 struct gfs2_meta_header *mh;
617 unsigned int offset;
618 struct list_head *head = &sdp->sd_log_le_revoke;
619 struct gfs2_bufdata *bd;
620 struct page *page;
621 unsigned int length;
622
623 gfs2_write_revokes(sdp);
624 if (!sdp->sd_log_num_revoke)
625 return;
626
627 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
628 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
629 offset = sizeof(struct gfs2_log_descriptor);
630
631 list_for_each_entry(bd, head, bd_list) {
632 sdp->sd_log_num_revoke--;
633
634 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
635
636 gfs2_log_write_page(sdp, page);
637 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
638 mh = page_address(page);
639 clear_page(mh);
640 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
641 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
642 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
643 offset = sizeof(struct gfs2_meta_header);
644 }
645
646 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
647 offset += sizeof(u64);
648 }
649 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
650
651 gfs2_log_write_page(sdp, page);
652 }
653
654 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
655 {
656 struct list_head *head = &sdp->sd_log_le_revoke;
657 struct gfs2_bufdata *bd;
658 struct gfs2_glock *gl;
659
660 while (!list_empty(head)) {
661 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
662 list_del_init(&bd->bd_list);
663 gl = bd->bd_gl;
664 atomic_dec(&gl->gl_revokes);
665 clear_bit(GLF_LFLUSH, &gl->gl_flags);
666 kmem_cache_free(gfs2_bufdata_cachep, bd);
667 }
668 }
669
670 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
671 struct gfs2_log_header_host *head, int pass)
672 {
673 if (pass != 0)
674 return;
675
676 jd->jd_found_revokes = 0;
677 jd->jd_replay_tail = head->lh_tail;
678 }
679
680 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
681 struct gfs2_log_descriptor *ld, __be64 *ptr,
682 int pass)
683 {
684 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
685 unsigned int blks = be32_to_cpu(ld->ld_length);
686 unsigned int revokes = be32_to_cpu(ld->ld_data1);
687 struct buffer_head *bh;
688 unsigned int offset;
689 u64 blkno;
690 int first = 1;
691 int error;
692
693 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
694 return 0;
695
696 offset = sizeof(struct gfs2_log_descriptor);
697
698 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
699 error = gfs2_replay_read_block(jd, start, &bh);
700 if (error)
701 return error;
702
703 if (!first)
704 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
705
706 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
707 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
708
709 error = gfs2_revoke_add(jd, blkno, start);
710 if (error < 0) {
711 brelse(bh);
712 return error;
713 }
714 else if (error)
715 jd->jd_found_revokes++;
716
717 if (!--revokes)
718 break;
719 offset += sizeof(u64);
720 }
721
722 brelse(bh);
723 offset = sizeof(struct gfs2_meta_header);
724 first = 0;
725 }
726
727 return 0;
728 }
729
730 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
731 {
732 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
733
734 if (error) {
735 gfs2_revoke_clean(jd);
736 return;
737 }
738 if (pass != 1)
739 return;
740
741 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
742 jd->jd_jid, jd->jd_found_revokes);
743
744 gfs2_revoke_clean(jd);
745 }
746
747 /**
748 * databuf_lo_before_commit - Scan the data buffers, writing as we go
749 *
750 */
751
752 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
753 {
754 unsigned int limit = databuf_limit(sdp);
755 unsigned int nbuf;
756 if (tr == NULL)
757 return;
758 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
759 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
760 }
761
762 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
763 struct gfs2_log_descriptor *ld,
764 __be64 *ptr, int pass)
765 {
766 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
767 struct gfs2_glock *gl = ip->i_gl;
768 unsigned int blks = be32_to_cpu(ld->ld_data1);
769 struct buffer_head *bh_log, *bh_ip;
770 u64 blkno;
771 u64 esc;
772 int error = 0;
773
774 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
775 return 0;
776
777 gfs2_replay_incr_blk(jd, &start);
778 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
779 blkno = be64_to_cpu(*ptr++);
780 esc = be64_to_cpu(*ptr++);
781
782 jd->jd_found_blocks++;
783
784 if (gfs2_revoke_check(jd, blkno, start))
785 continue;
786
787 error = gfs2_replay_read_block(jd, start, &bh_log);
788 if (error)
789 return error;
790
791 bh_ip = gfs2_meta_new(gl, blkno);
792 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
793
794 /* Unescape */
795 if (esc) {
796 __be32 *eptr = (__be32 *)bh_ip->b_data;
797 *eptr = cpu_to_be32(GFS2_MAGIC);
798 }
799 mark_buffer_dirty(bh_ip);
800
801 brelse(bh_log);
802 brelse(bh_ip);
803
804 jd->jd_replayed_blocks++;
805 }
806
807 return error;
808 }
809
810 /* FIXME: sort out accounting for log blocks etc. */
811
812 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
813 {
814 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
815 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
816
817 if (error) {
818 gfs2_meta_sync(ip->i_gl);
819 return;
820 }
821 if (pass != 1)
822 return;
823
824 /* data sync? */
825 gfs2_meta_sync(ip->i_gl);
826
827 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
828 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
829 }
830
831 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
832 {
833 struct list_head *head;
834 struct gfs2_bufdata *bd;
835
836 if (tr == NULL)
837 return;
838
839 head = &tr->tr_databuf;
840 while (!list_empty(head)) {
841 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
842 list_del_init(&bd->bd_list);
843 gfs2_unpin(sdp, bd->bd_bh, tr);
844 }
845 }
846
847
848 const struct gfs2_log_operations gfs2_buf_lops = {
849 .lo_before_commit = buf_lo_before_commit,
850 .lo_after_commit = buf_lo_after_commit,
851 .lo_before_scan = buf_lo_before_scan,
852 .lo_scan_elements = buf_lo_scan_elements,
853 .lo_after_scan = buf_lo_after_scan,
854 .lo_name = "buf",
855 };
856
857 const struct gfs2_log_operations gfs2_revoke_lops = {
858 .lo_before_commit = revoke_lo_before_commit,
859 .lo_after_commit = revoke_lo_after_commit,
860 .lo_before_scan = revoke_lo_before_scan,
861 .lo_scan_elements = revoke_lo_scan_elements,
862 .lo_after_scan = revoke_lo_after_scan,
863 .lo_name = "revoke",
864 };
865
866 const struct gfs2_log_operations gfs2_databuf_lops = {
867 .lo_before_commit = databuf_lo_before_commit,
868 .lo_after_commit = databuf_lo_after_commit,
869 .lo_scan_elements = databuf_lo_scan_elements,
870 .lo_after_scan = databuf_lo_after_scan,
871 .lo_name = "databuf",
872 };
873
874 const struct gfs2_log_operations *gfs2_log_ops[] = {
875 &gfs2_databuf_lops,
876 &gfs2_buf_lops,
877 &gfs2_revoke_lops,
878 NULL,
879 };
880