]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/gfs2/lops.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[mirror_ubuntu-bionic-kernel.git] / fs / gfs2 / lops.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/fs.h>
19 #include <linux/list_sort.h>
20
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "log.h"
26 #include "lops.h"
27 #include "meta_io.h"
28 #include "recovery.h"
29 #include "rgrp.h"
30 #include "trans.h"
31 #include "util.h"
32 #include "trace_gfs2.h"
33
34 /**
35 * gfs2_pin - Pin a buffer in memory
36 * @sdp: The superblock
37 * @bh: The buffer to be pinned
38 *
39 * The log lock must be held when calling this function
40 */
41 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
42 {
43 struct gfs2_bufdata *bd;
44
45 BUG_ON(!current->journal_info);
46
47 clear_buffer_dirty(bh);
48 if (test_set_buffer_pinned(bh))
49 gfs2_assert_withdraw(sdp, 0);
50 if (!buffer_uptodate(bh))
51 gfs2_io_error_bh(sdp, bh);
52 bd = bh->b_private;
53 /* If this buffer is in the AIL and it has already been written
54 * to in-place disk block, remove it from the AIL.
55 */
56 spin_lock(&sdp->sd_ail_lock);
57 if (bd->bd_tr)
58 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
59 spin_unlock(&sdp->sd_ail_lock);
60 get_bh(bh);
61 atomic_inc(&sdp->sd_log_pinned);
62 trace_gfs2_pin(bd, 1);
63 }
64
65 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
66 {
67 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
68 }
69
70 static void maybe_release_space(struct gfs2_bufdata *bd)
71 {
72 struct gfs2_glock *gl = bd->bd_gl;
73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
74 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
76 struct gfs2_bitmap *bi = rgd->rd_bits + index;
77
78 if (bi->bi_clone == NULL)
79 return;
80 if (sdp->sd_args.ar_discard)
81 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
82 memcpy(bi->bi_clone + bi->bi_offset,
83 bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
84 clear_bit(GBF_FULL, &bi->bi_flags);
85 rgd->rd_free_clone = rgd->rd_free;
86 rgd->rd_extfail_pt = rgd->rd_free;
87 }
88
89 /**
90 * gfs2_unpin - Unpin a buffer
91 * @sdp: the filesystem the buffer belongs to
92 * @bh: The buffer to unpin
93 * @ai:
94 * @flags: The inode dirty flags
95 *
96 */
97
98 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
99 struct gfs2_trans *tr)
100 {
101 struct gfs2_bufdata *bd = bh->b_private;
102
103 BUG_ON(!buffer_uptodate(bh));
104 BUG_ON(!buffer_pinned(bh));
105
106 lock_buffer(bh);
107 mark_buffer_dirty(bh);
108 clear_buffer_pinned(bh);
109
110 if (buffer_is_rgrp(bd))
111 maybe_release_space(bd);
112
113 spin_lock(&sdp->sd_ail_lock);
114 if (bd->bd_tr) {
115 list_del(&bd->bd_ail_st_list);
116 brelse(bh);
117 } else {
118 struct gfs2_glock *gl = bd->bd_gl;
119 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
120 atomic_inc(&gl->gl_ail_count);
121 }
122 bd->bd_tr = tr;
123 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
124 spin_unlock(&sdp->sd_ail_lock);
125
126 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
127 trace_gfs2_pin(bd, 0);
128 unlock_buffer(bh);
129 atomic_dec(&sdp->sd_log_pinned);
130 }
131
132 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
133 {
134 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
135 (sdp->sd_log_flush_head != sdp->sd_log_head));
136
137 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
138 sdp->sd_log_flush_head = 0;
139 }
140
141 static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
142 {
143 unsigned int lbn = sdp->sd_log_flush_head;
144 struct gfs2_journal_extent *je;
145 u64 block;
146
147 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
148 if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
149 block = je->dblock + lbn - je->lblock;
150 gfs2_log_incr_head(sdp);
151 return block;
152 }
153 }
154
155 return -1;
156 }
157
158 /**
159 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
160 * @sdp: The superblock
161 * @bvec: The bio_vec
162 * @error: The i/o status
163 *
164 * This finds the relavent buffers and unlocks then and sets the
165 * error flag according to the status of the i/o request. This is
166 * used when the log is writing data which has an in-place version
167 * that is pinned in the pagecache.
168 */
169
170 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
171 blk_status_t error)
172 {
173 struct buffer_head *bh, *next;
174 struct page *page = bvec->bv_page;
175 unsigned size;
176
177 bh = page_buffers(page);
178 size = bvec->bv_len;
179 while (bh_offset(bh) < bvec->bv_offset)
180 bh = bh->b_this_page;
181 do {
182 if (error)
183 mark_buffer_write_io_error(bh);
184 unlock_buffer(bh);
185 next = bh->b_this_page;
186 size -= bh->b_size;
187 brelse(bh);
188 bh = next;
189 } while(bh && size);
190 }
191
192 /**
193 * gfs2_end_log_write - end of i/o to the log
194 * @bio: The bio
195 * @error: Status of i/o request
196 *
197 * Each bio_vec contains either data from the pagecache or data
198 * relating to the log itself. Here we iterate over the bio_vec
199 * array, processing both kinds of data.
200 *
201 */
202
203 static void gfs2_end_log_write(struct bio *bio)
204 {
205 struct gfs2_sbd *sdp = bio->bi_private;
206 struct bio_vec *bvec;
207 struct page *page;
208 int i;
209
210 if (bio->bi_status) {
211 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
212 bio->bi_status, sdp->sd_jdesc->jd_jid);
213 wake_up(&sdp->sd_logd_waitq);
214 }
215
216 bio_for_each_segment_all(bvec, bio, i) {
217 page = bvec->bv_page;
218 if (page_has_buffers(page))
219 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
220 else
221 mempool_free(page, gfs2_page_pool);
222 }
223
224 bio_put(bio);
225 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
226 wake_up(&sdp->sd_log_flush_wait);
227 }
228
229 /**
230 * gfs2_log_flush_bio - Submit any pending log bio
231 * @sdp: The superblock
232 * @op: REQ_OP
233 * @op_flags: req_flag_bits
234 *
235 * Submit any pending part-built or full bio to the block device. If
236 * there is no pending bio, then this is a no-op.
237 */
238
239 void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
240 {
241 if (sdp->sd_log_bio) {
242 atomic_inc(&sdp->sd_log_in_flight);
243 bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
244 submit_bio(sdp->sd_log_bio);
245 sdp->sd_log_bio = NULL;
246 }
247 }
248
249 /**
250 * gfs2_log_alloc_bio - Allocate a new bio for log writing
251 * @sdp: The superblock
252 * @blkno: The next device block number we want to write to
253 *
254 * This should never be called when there is a cached bio in the
255 * super block. When it returns, there will be a cached bio in the
256 * super block which will have as many bio_vecs as the device is
257 * happy to handle.
258 *
259 * Returns: Newly allocated bio
260 */
261
262 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
263 {
264 struct super_block *sb = sdp->sd_vfs;
265 struct bio *bio;
266
267 BUG_ON(sdp->sd_log_bio);
268
269 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
270 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
271 bio_set_dev(bio, sb->s_bdev);
272 bio->bi_end_io = gfs2_end_log_write;
273 bio->bi_private = sdp;
274
275 sdp->sd_log_bio = bio;
276
277 return bio;
278 }
279
280 /**
281 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
282 * @sdp: The superblock
283 * @blkno: The device block number we want to write to
284 *
285 * If there is a cached bio, then if the next block number is sequential
286 * with the previous one, return it, otherwise flush the bio to the
287 * device. If there is not a cached bio, or we just flushed it, then
288 * allocate a new one.
289 *
290 * Returns: The bio to use for log writes
291 */
292
293 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
294 {
295 struct bio *bio = sdp->sd_log_bio;
296 u64 nblk;
297
298 if (bio) {
299 nblk = bio_end_sector(bio);
300 nblk >>= sdp->sd_fsb2bb_shift;
301 if (blkno == nblk)
302 return bio;
303 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
304 }
305
306 return gfs2_log_alloc_bio(sdp, blkno);
307 }
308
309
310 /**
311 * gfs2_log_write - write to log
312 * @sdp: the filesystem
313 * @page: the page to write
314 * @size: the size of the data to write
315 * @offset: the offset within the page
316 *
317 * Try and add the page segment to the current bio. If that fails,
318 * submit the current bio to the device and create a new one, and
319 * then add the page segment to that.
320 */
321
322 static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
323 unsigned size, unsigned offset)
324 {
325 u64 blkno = gfs2_log_bmap(sdp);
326 struct bio *bio;
327 int ret;
328
329 bio = gfs2_log_get_bio(sdp, blkno);
330 ret = bio_add_page(bio, page, size, offset);
331 if (ret == 0) {
332 gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
333 bio = gfs2_log_alloc_bio(sdp, blkno);
334 ret = bio_add_page(bio, page, size, offset);
335 WARN_ON(ret == 0);
336 }
337 }
338
339 /**
340 * gfs2_log_write_bh - write a buffer's content to the log
341 * @sdp: The super block
342 * @bh: The buffer pointing to the in-place location
343 *
344 * This writes the content of the buffer to the next available location
345 * in the log. The buffer will be unlocked once the i/o to the log has
346 * completed.
347 */
348
349 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
350 {
351 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
352 }
353
354 /**
355 * gfs2_log_write_page - write one block stored in a page, into the log
356 * @sdp: The superblock
357 * @page: The struct page
358 *
359 * This writes the first block-sized part of the page into the log. Note
360 * that the page must have been allocated from the gfs2_page_pool mempool
361 * and that after this has been called, ownership has been transferred and
362 * the page may be freed at any time.
363 */
364
365 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
366 {
367 struct super_block *sb = sdp->sd_vfs;
368 gfs2_log_write(sdp, page, sb->s_blocksize, 0);
369 }
370
371 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
372 u32 ld_length, u32 ld_data1)
373 {
374 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
375 struct gfs2_log_descriptor *ld = page_address(page);
376 clear_page(ld);
377 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
378 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
379 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
380 ld->ld_type = cpu_to_be32(ld_type);
381 ld->ld_length = cpu_to_be32(ld_length);
382 ld->ld_data1 = cpu_to_be32(ld_data1);
383 ld->ld_data2 = 0;
384 return page;
385 }
386
387 static void gfs2_check_magic(struct buffer_head *bh)
388 {
389 void *kaddr;
390 __be32 *ptr;
391
392 clear_buffer_escaped(bh);
393 kaddr = kmap_atomic(bh->b_page);
394 ptr = kaddr + bh_offset(bh);
395 if (*ptr == cpu_to_be32(GFS2_MAGIC))
396 set_buffer_escaped(bh);
397 kunmap_atomic(kaddr);
398 }
399
400 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
401 {
402 struct gfs2_bufdata *bda, *bdb;
403
404 bda = list_entry(a, struct gfs2_bufdata, bd_list);
405 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
406
407 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
408 return -1;
409 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
410 return 1;
411 return 0;
412 }
413
414 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
415 unsigned int total, struct list_head *blist,
416 bool is_databuf)
417 {
418 struct gfs2_log_descriptor *ld;
419 struct gfs2_bufdata *bd1 = NULL, *bd2;
420 struct page *page;
421 unsigned int num;
422 unsigned n;
423 __be64 *ptr;
424
425 gfs2_log_lock(sdp);
426 list_sort(NULL, blist, blocknr_cmp);
427 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
428 while(total) {
429 num = total;
430 if (total > limit)
431 num = limit;
432 gfs2_log_unlock(sdp);
433 page = gfs2_get_log_desc(sdp,
434 is_databuf ? GFS2_LOG_DESC_JDATA :
435 GFS2_LOG_DESC_METADATA, num + 1, num);
436 ld = page_address(page);
437 gfs2_log_lock(sdp);
438 ptr = (__be64 *)(ld + 1);
439
440 n = 0;
441 list_for_each_entry_continue(bd1, blist, bd_list) {
442 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
443 if (is_databuf) {
444 gfs2_check_magic(bd1->bd_bh);
445 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
446 }
447 if (++n >= num)
448 break;
449 }
450
451 gfs2_log_unlock(sdp);
452 gfs2_log_write_page(sdp, page);
453 gfs2_log_lock(sdp);
454
455 n = 0;
456 list_for_each_entry_continue(bd2, blist, bd_list) {
457 get_bh(bd2->bd_bh);
458 gfs2_log_unlock(sdp);
459 lock_buffer(bd2->bd_bh);
460
461 if (buffer_escaped(bd2->bd_bh)) {
462 void *kaddr;
463 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
464 ptr = page_address(page);
465 kaddr = kmap_atomic(bd2->bd_bh->b_page);
466 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
467 bd2->bd_bh->b_size);
468 kunmap_atomic(kaddr);
469 *(__be32 *)ptr = 0;
470 clear_buffer_escaped(bd2->bd_bh);
471 unlock_buffer(bd2->bd_bh);
472 brelse(bd2->bd_bh);
473 gfs2_log_write_page(sdp, page);
474 } else {
475 gfs2_log_write_bh(sdp, bd2->bd_bh);
476 }
477 gfs2_log_lock(sdp);
478 if (++n >= num)
479 break;
480 }
481
482 BUG_ON(total < num);
483 total -= num;
484 }
485 gfs2_log_unlock(sdp);
486 }
487
488 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
489 {
490 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
491 unsigned int nbuf;
492 if (tr == NULL)
493 return;
494 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
495 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
496 }
497
498 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
499 {
500 struct list_head *head;
501 struct gfs2_bufdata *bd;
502
503 if (tr == NULL)
504 return;
505
506 head = &tr->tr_buf;
507 while (!list_empty(head)) {
508 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
509 list_del_init(&bd->bd_list);
510 gfs2_unpin(sdp, bd->bd_bh, tr);
511 }
512 }
513
514 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
515 struct gfs2_log_header_host *head, int pass)
516 {
517 if (pass != 0)
518 return;
519
520 jd->jd_found_blocks = 0;
521 jd->jd_replayed_blocks = 0;
522 }
523
524 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
525 struct gfs2_log_descriptor *ld, __be64 *ptr,
526 int pass)
527 {
528 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
529 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
530 struct gfs2_glock *gl = ip->i_gl;
531 unsigned int blks = be32_to_cpu(ld->ld_data1);
532 struct buffer_head *bh_log, *bh_ip;
533 u64 blkno;
534 int error = 0;
535
536 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
537 return 0;
538
539 gfs2_replay_incr_blk(jd, &start);
540
541 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
542 blkno = be64_to_cpu(*ptr++);
543
544 jd->jd_found_blocks++;
545
546 if (gfs2_revoke_check(jd, blkno, start))
547 continue;
548
549 error = gfs2_replay_read_block(jd, start, &bh_log);
550 if (error)
551 return error;
552
553 bh_ip = gfs2_meta_new(gl, blkno);
554 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
555
556 if (gfs2_meta_check(sdp, bh_ip))
557 error = -EIO;
558 else
559 mark_buffer_dirty(bh_ip);
560
561 brelse(bh_log);
562 brelse(bh_ip);
563
564 if (error)
565 break;
566
567 jd->jd_replayed_blocks++;
568 }
569
570 return error;
571 }
572
573 /**
574 * gfs2_meta_sync - Sync all buffers associated with a glock
575 * @gl: The glock
576 *
577 */
578
579 static void gfs2_meta_sync(struct gfs2_glock *gl)
580 {
581 struct address_space *mapping = gfs2_glock2aspace(gl);
582 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
583 int error;
584
585 if (mapping == NULL)
586 mapping = &sdp->sd_aspace;
587
588 filemap_fdatawrite(mapping);
589 error = filemap_fdatawait(mapping);
590
591 if (error)
592 gfs2_io_error(gl->gl_name.ln_sbd);
593 }
594
595 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
596 {
597 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
598 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
599
600 if (error) {
601 gfs2_meta_sync(ip->i_gl);
602 return;
603 }
604 if (pass != 1)
605 return;
606
607 gfs2_meta_sync(ip->i_gl);
608
609 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
610 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
611 }
612
613 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
614 {
615 struct gfs2_meta_header *mh;
616 unsigned int offset;
617 struct list_head *head = &sdp->sd_log_le_revoke;
618 struct gfs2_bufdata *bd;
619 struct page *page;
620 unsigned int length;
621
622 gfs2_write_revokes(sdp);
623 if (!sdp->sd_log_num_revoke)
624 return;
625
626 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
627 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
628 offset = sizeof(struct gfs2_log_descriptor);
629
630 list_for_each_entry(bd, head, bd_list) {
631 sdp->sd_log_num_revoke--;
632
633 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
634
635 gfs2_log_write_page(sdp, page);
636 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
637 mh = page_address(page);
638 clear_page(mh);
639 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
640 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
641 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
642 offset = sizeof(struct gfs2_meta_header);
643 }
644
645 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
646 offset += sizeof(u64);
647 }
648 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
649
650 gfs2_log_write_page(sdp, page);
651 }
652
653 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
654 {
655 struct list_head *head = &sdp->sd_log_le_revoke;
656 struct gfs2_bufdata *bd;
657 struct gfs2_glock *gl;
658
659 while (!list_empty(head)) {
660 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
661 list_del_init(&bd->bd_list);
662 gl = bd->bd_gl;
663 atomic_dec(&gl->gl_revokes);
664 clear_bit(GLF_LFLUSH, &gl->gl_flags);
665 kmem_cache_free(gfs2_bufdata_cachep, bd);
666 }
667 }
668
669 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
670 struct gfs2_log_header_host *head, int pass)
671 {
672 if (pass != 0)
673 return;
674
675 jd->jd_found_revokes = 0;
676 jd->jd_replay_tail = head->lh_tail;
677 }
678
679 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
680 struct gfs2_log_descriptor *ld, __be64 *ptr,
681 int pass)
682 {
683 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
684 unsigned int blks = be32_to_cpu(ld->ld_length);
685 unsigned int revokes = be32_to_cpu(ld->ld_data1);
686 struct buffer_head *bh;
687 unsigned int offset;
688 u64 blkno;
689 int first = 1;
690 int error;
691
692 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
693 return 0;
694
695 offset = sizeof(struct gfs2_log_descriptor);
696
697 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
698 error = gfs2_replay_read_block(jd, start, &bh);
699 if (error)
700 return error;
701
702 if (!first)
703 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
704
705 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
706 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
707
708 error = gfs2_revoke_add(jd, blkno, start);
709 if (error < 0) {
710 brelse(bh);
711 return error;
712 }
713 else if (error)
714 jd->jd_found_revokes++;
715
716 if (!--revokes)
717 break;
718 offset += sizeof(u64);
719 }
720
721 brelse(bh);
722 offset = sizeof(struct gfs2_meta_header);
723 first = 0;
724 }
725
726 return 0;
727 }
728
729 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
730 {
731 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
732
733 if (error) {
734 gfs2_revoke_clean(jd);
735 return;
736 }
737 if (pass != 1)
738 return;
739
740 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
741 jd->jd_jid, jd->jd_found_revokes);
742
743 gfs2_revoke_clean(jd);
744 }
745
746 /**
747 * databuf_lo_before_commit - Scan the data buffers, writing as we go
748 *
749 */
750
751 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
752 {
753 unsigned int limit = databuf_limit(sdp);
754 unsigned int nbuf;
755 if (tr == NULL)
756 return;
757 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
758 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
759 }
760
761 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
762 struct gfs2_log_descriptor *ld,
763 __be64 *ptr, int pass)
764 {
765 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
766 struct gfs2_glock *gl = ip->i_gl;
767 unsigned int blks = be32_to_cpu(ld->ld_data1);
768 struct buffer_head *bh_log, *bh_ip;
769 u64 blkno;
770 u64 esc;
771 int error = 0;
772
773 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
774 return 0;
775
776 gfs2_replay_incr_blk(jd, &start);
777 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
778 blkno = be64_to_cpu(*ptr++);
779 esc = be64_to_cpu(*ptr++);
780
781 jd->jd_found_blocks++;
782
783 if (gfs2_revoke_check(jd, blkno, start))
784 continue;
785
786 error = gfs2_replay_read_block(jd, start, &bh_log);
787 if (error)
788 return error;
789
790 bh_ip = gfs2_meta_new(gl, blkno);
791 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
792
793 /* Unescape */
794 if (esc) {
795 __be32 *eptr = (__be32 *)bh_ip->b_data;
796 *eptr = cpu_to_be32(GFS2_MAGIC);
797 }
798 mark_buffer_dirty(bh_ip);
799
800 brelse(bh_log);
801 brelse(bh_ip);
802
803 jd->jd_replayed_blocks++;
804 }
805
806 return error;
807 }
808
809 /* FIXME: sort out accounting for log blocks etc. */
810
811 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
812 {
813 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
814 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
815
816 if (error) {
817 gfs2_meta_sync(ip->i_gl);
818 return;
819 }
820 if (pass != 1)
821 return;
822
823 /* data sync? */
824 gfs2_meta_sync(ip->i_gl);
825
826 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
827 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
828 }
829
830 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
831 {
832 struct list_head *head;
833 struct gfs2_bufdata *bd;
834
835 if (tr == NULL)
836 return;
837
838 head = &tr->tr_databuf;
839 while (!list_empty(head)) {
840 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
841 list_del_init(&bd->bd_list);
842 gfs2_unpin(sdp, bd->bd_bh, tr);
843 }
844 }
845
846
847 const struct gfs2_log_operations gfs2_buf_lops = {
848 .lo_before_commit = buf_lo_before_commit,
849 .lo_after_commit = buf_lo_after_commit,
850 .lo_before_scan = buf_lo_before_scan,
851 .lo_scan_elements = buf_lo_scan_elements,
852 .lo_after_scan = buf_lo_after_scan,
853 .lo_name = "buf",
854 };
855
856 const struct gfs2_log_operations gfs2_revoke_lops = {
857 .lo_before_commit = revoke_lo_before_commit,
858 .lo_after_commit = revoke_lo_after_commit,
859 .lo_before_scan = revoke_lo_before_scan,
860 .lo_scan_elements = revoke_lo_scan_elements,
861 .lo_after_scan = revoke_lo_after_scan,
862 .lo_name = "revoke",
863 };
864
865 const struct gfs2_log_operations gfs2_databuf_lops = {
866 .lo_before_commit = databuf_lo_before_commit,
867 .lo_after_commit = databuf_lo_after_commit,
868 .lo_scan_elements = databuf_lo_scan_elements,
869 .lo_after_scan = databuf_lo_after_scan,
870 .lo_name = "databuf",
871 };
872
873 const struct gfs2_log_operations *gfs2_log_ops[] = {
874 &gfs2_databuf_lops,
875 &gfs2_buf_lops,
876 &gfs2_revoke_lops,
877 NULL,
878 };
879