]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/gfs2/glops.c
[GFS2] Fix Kconfig wrt CRC32
[mirror_ubuntu-zesty-kernel.git] / fs / gfs2 / glops.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
5c676f6d 15#include <linux/gfs2_ondisk.h>
7d308590 16#include <linux/lm_interface.h>
b3b94faa
DT
17
18#include "gfs2.h"
5c676f6d 19#include "incore.h"
b3b94faa
DT
20#include "bmap.h"
21#include "glock.h"
22#include "glops.h"
23#include "inode.h"
24#include "log.h"
25#include "meta_io.h"
b3b94faa
DT
26#include "recovery.h"
27#include "rgrp.h"
5c676f6d 28#include "util.h"
ddacfaf7 29#include "trans.h"
b3b94faa 30
ddacfaf7
SW
31/**
32 * ail_empty_gl - remove all buffers for a given lock from the AIL
33 * @gl: the glock
34 *
35 * None of the buffers should be dirty, locked, or pinned.
36 */
37
38static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
39{
40 struct gfs2_sbd *sdp = gl->gl_sbd;
41 unsigned int blocks;
42 struct list_head *head = &gl->gl_ail_list;
43 struct gfs2_bufdata *bd;
44 struct buffer_head *bh;
45 u64 blkno;
46 int error;
47
48 blocks = atomic_read(&gl->gl_ail_count);
49 if (!blocks)
50 return;
51
52 error = gfs2_trans_begin(sdp, 0, blocks);
53 if (gfs2_assert_withdraw(sdp, !error))
54 return;
55
56 gfs2_log_lock(sdp);
57 while (!list_empty(head)) {
58 bd = list_entry(head->next, struct gfs2_bufdata,
59 bd_ail_gl_list);
60 bh = bd->bd_bh;
61 blkno = bh->b_blocknr;
62 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
63
64 bd->bd_ail = NULL;
65 list_del(&bd->bd_ail_st_list);
66 list_del(&bd->bd_ail_gl_list);
67 atomic_dec(&gl->gl_ail_count);
68 brelse(bh);
69 gfs2_log_unlock(sdp);
70
71 gfs2_trans_add_revoke(sdp, blkno);
72
73 gfs2_log_lock(sdp);
74 }
75 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
76 gfs2_log_unlock(sdp);
77
78 gfs2_trans_end(sdp);
79 gfs2_log_flush(sdp, NULL);
80}
ba7f7290
SW
81
82/**
83 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
84 * @gl: the glock
85 *
86 */
87
88static void gfs2_pte_inval(struct gfs2_glock *gl)
89{
90 struct gfs2_inode *ip;
91 struct inode *inode;
92
93 ip = gl->gl_object;
94 inode = &ip->i_inode;
b60623c2 95 if (!ip || !S_ISREG(inode->i_mode))
ba7f7290
SW
96 return;
97
98 if (!test_bit(GIF_PAGED, &ip->i_flags))
99 return;
100
101 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
102
103 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
104 set_bit(GLF_DIRTY, &gl->gl_flags);
105
106 clear_bit(GIF_SW_PAGED, &ip->i_flags);
107}
108
109/**
110 * gfs2_page_inval - Invalidate all pages associated with a glock
111 * @gl: the glock
112 *
113 */
114
115static void gfs2_page_inval(struct gfs2_glock *gl)
116{
117 struct gfs2_inode *ip;
118 struct inode *inode;
119
120 ip = gl->gl_object;
121 inode = &ip->i_inode;
b60623c2 122 if (!ip || !S_ISREG(inode->i_mode))
ba7f7290
SW
123 return;
124
125 truncate_inode_pages(inode->i_mapping, 0);
126 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
127 clear_bit(GIF_PAGED, &ip->i_flags);
128}
129
130/**
7276b3b0 131 * gfs2_page_wait - Wait for writeback of data
ba7f7290 132 * @gl: the glock
ba7f7290
SW
133 *
134 * Syncs data (not metadata) for a regular file.
135 * No-op for all other types.
136 */
137
7276b3b0 138static void gfs2_page_wait(struct gfs2_glock *gl)
ba7f7290 139{
7276b3b0
SW
140 struct gfs2_inode *ip = gl->gl_object;
141 struct inode *inode = &ip->i_inode;
142 struct address_space *mapping = inode->i_mapping;
143 int error;
ba7f7290 144
b60623c2 145 if (!S_ISREG(inode->i_mode))
ba7f7290
SW
146 return;
147
7276b3b0 148 error = filemap_fdatawait(mapping);
ba7f7290
SW
149
150 /* Put back any errors cleared by filemap_fdatawait()
151 so they can be caught by someone who can pass them
152 up to user space. */
153
154 if (error == -ENOSPC)
155 set_bit(AS_ENOSPC, &mapping->flags);
156 else if (error)
157 set_bit(AS_EIO, &mapping->flags);
158
159}
160
7276b3b0
SW
161static void gfs2_page_writeback(struct gfs2_glock *gl)
162{
163 struct gfs2_inode *ip = gl->gl_object;
164 struct inode *inode = &ip->i_inode;
165 struct address_space *mapping = inode->i_mapping;
166
b60623c2 167 if (!S_ISREG(inode->i_mode))
7276b3b0
SW
168 return;
169
170 filemap_fdatawrite(mapping);
171}
172
b3b94faa
DT
173/**
174 * meta_go_sync - sync out the metadata for this glock
175 * @gl: the glock
176 * @flags: DIO_*
177 *
178 * Called when demoting or unlocking an EX glock. We must flush
179 * to disk all dirty buffers/pages relating to this glock, and must not
180 * not return to caller to demote/unlock the glock until I/O is complete.
181 */
182
183static void meta_go_sync(struct gfs2_glock *gl, int flags)
184{
185 if (!(flags & DIO_METADATA))
186 return;
187
188 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
b09e593d 189 gfs2_log_flush(gl->gl_sbd, gl);
7276b3b0 190 gfs2_meta_sync(gl);
b3b94faa
DT
191 if (flags & DIO_RELEASE)
192 gfs2_ail_empty_gl(gl);
193 }
194
b3b94faa
DT
195}
196
197/**
198 * meta_go_inval - invalidate the metadata for this glock
199 * @gl: the glock
200 * @flags:
201 *
202 */
203
204static void meta_go_inval(struct gfs2_glock *gl, int flags)
205{
206 if (!(flags & DIO_METADATA))
207 return;
208
209 gfs2_meta_inval(gl);
210 gl->gl_vn++;
211}
212
b3b94faa
DT
213/**
214 * inode_go_xmote_th - promote/demote a glock
215 * @gl: the glock
216 * @state: the requested state
217 * @flags:
218 *
219 */
220
221static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
222 int flags)
223{
224 if (gl->gl_state != LM_ST_UNLOCKED)
225 gfs2_pte_inval(gl);
226 gfs2_glock_xmote_th(gl, state, flags);
227}
228
229/**
230 * inode_go_xmote_bh - After promoting/demoting a glock
231 * @gl: the glock
232 *
233 */
234
235static void inode_go_xmote_bh(struct gfs2_glock *gl)
236{
237 struct gfs2_holder *gh = gl->gl_req_gh;
238 struct buffer_head *bh;
239 int error;
240
241 if (gl->gl_state != LM_ST_UNLOCKED &&
242 (!gh || !(gh->gh_flags & GL_SKIP))) {
7276b3b0 243 error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
b3b94faa
DT
244 if (!error)
245 brelse(bh);
246 }
247}
248
249/**
250 * inode_go_drop_th - unlock a glock
251 * @gl: the glock
252 *
253 * Invoked from rq_demote().
254 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
255 * is being purged from our node's glock cache; we're dropping lock.
256 */
257
258static void inode_go_drop_th(struct gfs2_glock *gl)
259{
260 gfs2_pte_inval(gl);
261 gfs2_glock_drop_th(gl);
262}
263
264/**
265 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
266 * @gl: the glock protecting the inode
267 * @flags:
268 *
269 */
270
271static void inode_go_sync(struct gfs2_glock *gl, int flags)
272{
273 int meta = (flags & DIO_METADATA);
274 int data = (flags & DIO_DATA);
275
276 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
277 if (meta && data) {
7276b3b0 278 gfs2_page_writeback(gl);
b09e593d 279 gfs2_log_flush(gl->gl_sbd, gl);
7276b3b0
SW
280 gfs2_meta_sync(gl);
281 gfs2_page_wait(gl);
b3b94faa
DT
282 clear_bit(GLF_DIRTY, &gl->gl_flags);
283 } else if (meta) {
b09e593d 284 gfs2_log_flush(gl->gl_sbd, gl);
7276b3b0
SW
285 gfs2_meta_sync(gl);
286 } else if (data) {
287 gfs2_page_writeback(gl);
288 gfs2_page_wait(gl);
289 }
b3b94faa
DT
290 if (flags & DIO_RELEASE)
291 gfs2_ail_empty_gl(gl);
292 }
b3b94faa
DT
293}
294
295/**
296 * inode_go_inval - prepare a inode glock to be released
297 * @gl: the glock
298 * @flags:
299 *
300 */
301
302static void inode_go_inval(struct gfs2_glock *gl, int flags)
303{
304 int meta = (flags & DIO_METADATA);
305 int data = (flags & DIO_DATA);
306
307 if (meta) {
bfded27b 308 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 309 gfs2_meta_inval(gl);
bfded27b 310 set_bit(GIF_INVALID, &ip->i_flags);
b3b94faa
DT
311 }
312 if (data)
313 gfs2_page_inval(gl);
314}
315
316/**
317 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
318 * @gl: the glock
319 *
320 * Returns: 1 if it's ok
321 */
322
323static int inode_go_demote_ok(struct gfs2_glock *gl)
324{
325 struct gfs2_sbd *sdp = gl->gl_sbd;
326 int demote = 0;
327
5c676f6d 328 if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
b3b94faa
DT
329 demote = 1;
330 else if (!sdp->sd_args.ar_localcaching &&
331 time_after_eq(jiffies, gl->gl_stamp +
332 gfs2_tune_get(sdp, gt_demote_secs) * HZ))
333 demote = 1;
334
335 return demote;
336}
337
338/**
339 * inode_go_lock - operation done after an inode lock is locked by a process
340 * @gl: the glock
341 * @flags:
342 *
343 * Returns: errno
344 */
345
346static int inode_go_lock(struct gfs2_holder *gh)
347{
348 struct gfs2_glock *gl = gh->gh_gl;
5c676f6d 349 struct gfs2_inode *ip = gl->gl_object;
b3b94faa
DT
350 int error = 0;
351
352 if (!ip)
353 return 0;
354
bfded27b 355 if (test_bit(GIF_INVALID, &ip->i_flags)) {
b3b94faa
DT
356 error = gfs2_inode_refresh(ip);
357 if (error)
358 return error;
b3b94faa
DT
359 }
360
361 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
362 (gl->gl_state == LM_ST_EXCLUSIVE) &&
363 (gh->gh_flags & GL_LOCAL_EXCL))
364 error = gfs2_truncatei_resume(ip);
365
366 return error;
367}
368
369/**
370 * inode_go_unlock - operation done before an inode lock is unlocked by a
371 * process
372 * @gl: the glock
373 * @flags:
374 *
375 */
376
377static void inode_go_unlock(struct gfs2_holder *gh)
378{
379 struct gfs2_glock *gl = gh->gh_gl;
5c676f6d 380 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 381
9e2dbdac
SW
382 if (ip)
383 gfs2_meta_cache_flush(ip);
b3b94faa
DT
384}
385
386/**
387 * inode_greedy -
388 * @gl: the glock
389 *
390 */
391
392static void inode_greedy(struct gfs2_glock *gl)
393{
394 struct gfs2_sbd *sdp = gl->gl_sbd;
5c676f6d 395 struct gfs2_inode *ip = gl->gl_object;
b3b94faa
DT
396 unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
397 unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
398 unsigned int new_time;
399
400 spin_lock(&ip->i_spin);
401
402 if (time_after(ip->i_last_pfault + quantum, jiffies)) {
403 new_time = ip->i_greedy + quantum;
404 if (new_time > max)
405 new_time = max;
406 } else {
407 new_time = ip->i_greedy - quantum;
408 if (!new_time || new_time > max)
409 new_time = 1;
410 }
411
412 ip->i_greedy = new_time;
413
414 spin_unlock(&ip->i_spin);
415
feaa7bba 416 iput(&ip->i_inode);
b3b94faa
DT
417}
418
419/**
420 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
421 * @gl: the glock
422 *
423 * Returns: 1 if it's ok
424 */
425
426static int rgrp_go_demote_ok(struct gfs2_glock *gl)
427{
428 return !gl->gl_aspace->i_mapping->nrpages;
429}
430
431/**
432 * rgrp_go_lock - operation done after an rgrp lock is locked by
433 * a first holder on this node.
434 * @gl: the glock
435 * @flags:
436 *
437 * Returns: errno
438 */
439
440static int rgrp_go_lock(struct gfs2_holder *gh)
441{
5c676f6d 442 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
b3b94faa
DT
443}
444
445/**
446 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
447 * a last holder on this node.
448 * @gl: the glock
449 * @flags:
450 *
451 */
452
453static void rgrp_go_unlock(struct gfs2_holder *gh)
454{
5c676f6d 455 gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
b3b94faa
DT
456}
457
458/**
459 * trans_go_xmote_th - promote/demote the transaction glock
460 * @gl: the glock
461 * @state: the requested state
462 * @flags:
463 *
464 */
465
466static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
467 int flags)
468{
469 struct gfs2_sbd *sdp = gl->gl_sbd;
470
471 if (gl->gl_state != LM_ST_UNLOCKED &&
472 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
473 gfs2_meta_syncfs(sdp);
474 gfs2_log_shutdown(sdp);
475 }
476
477 gfs2_glock_xmote_th(gl, state, flags);
478}
479
480/**
481 * trans_go_xmote_bh - After promoting/demoting the transaction glock
482 * @gl: the glock
483 *
484 */
485
486static void trans_go_xmote_bh(struct gfs2_glock *gl)
487{
488 struct gfs2_sbd *sdp = gl->gl_sbd;
feaa7bba 489 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
5c676f6d 490 struct gfs2_glock *j_gl = ip->i_gl;
55167622 491 struct gfs2_log_header_host head;
b3b94faa
DT
492 int error;
493
494 if (gl->gl_state != LM_ST_UNLOCKED &&
495 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
feaa7bba 496 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
b3b94faa
DT
497 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
498
499 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
500 if (error)
501 gfs2_consist(sdp);
502 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
503 gfs2_consist(sdp);
504
505 /* Initialize some head of the log stuff */
506 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
507 sdp->sd_log_sequence = head.lh_sequence + 1;
508 gfs2_log_pointers_init(sdp, head.lh_blkno);
509 }
510 }
511}
512
513/**
514 * trans_go_drop_th - unlock the transaction glock
515 * @gl: the glock
516 *
517 * We want to sync the device even with localcaching. Remember
518 * that localcaching journal replay only marks buffers dirty.
519 */
520
521static void trans_go_drop_th(struct gfs2_glock *gl)
522{
523 struct gfs2_sbd *sdp = gl->gl_sbd;
524
525 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
526 gfs2_meta_syncfs(sdp);
527 gfs2_log_shutdown(sdp);
528 }
529
530 gfs2_glock_drop_th(gl);
531}
532
533/**
534 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
535 * @gl: the glock
536 *
537 * Returns: 1 if it's ok
538 */
539
540static int quota_go_demote_ok(struct gfs2_glock *gl)
541{
542 return !atomic_read(&gl->gl_lvb_count);
543}
544
8fb4b536 545const struct gfs2_glock_operations gfs2_meta_glops = {
b3b94faa
DT
546 .go_xmote_th = gfs2_glock_xmote_th,
547 .go_drop_th = gfs2_glock_drop_th,
ea67eedb 548 .go_type = LM_TYPE_META,
b3b94faa
DT
549};
550
8fb4b536 551const struct gfs2_glock_operations gfs2_inode_glops = {
b3b94faa
DT
552 .go_xmote_th = inode_go_xmote_th,
553 .go_xmote_bh = inode_go_xmote_bh,
554 .go_drop_th = inode_go_drop_th,
555 .go_sync = inode_go_sync,
556 .go_inval = inode_go_inval,
557 .go_demote_ok = inode_go_demote_ok,
558 .go_lock = inode_go_lock,
559 .go_unlock = inode_go_unlock,
560 .go_greedy = inode_greedy,
ea67eedb 561 .go_type = LM_TYPE_INODE,
b3b94faa
DT
562};
563
8fb4b536 564const struct gfs2_glock_operations gfs2_rgrp_glops = {
b3b94faa
DT
565 .go_xmote_th = gfs2_glock_xmote_th,
566 .go_drop_th = gfs2_glock_drop_th,
567 .go_sync = meta_go_sync,
568 .go_inval = meta_go_inval,
569 .go_demote_ok = rgrp_go_demote_ok,
570 .go_lock = rgrp_go_lock,
571 .go_unlock = rgrp_go_unlock,
ea67eedb 572 .go_type = LM_TYPE_RGRP,
b3b94faa
DT
573};
574
8fb4b536 575const struct gfs2_glock_operations gfs2_trans_glops = {
b3b94faa
DT
576 .go_xmote_th = trans_go_xmote_th,
577 .go_xmote_bh = trans_go_xmote_bh,
578 .go_drop_th = trans_go_drop_th,
ea67eedb 579 .go_type = LM_TYPE_NONDISK,
b3b94faa
DT
580};
581
8fb4b536 582const struct gfs2_glock_operations gfs2_iopen_glops = {
b3b94faa
DT
583 .go_xmote_th = gfs2_glock_xmote_th,
584 .go_drop_th = gfs2_glock_drop_th,
ea67eedb 585 .go_type = LM_TYPE_IOPEN,
b3b94faa
DT
586};
587
8fb4b536 588const struct gfs2_glock_operations gfs2_flock_glops = {
b3b94faa
DT
589 .go_xmote_th = gfs2_glock_xmote_th,
590 .go_drop_th = gfs2_glock_drop_th,
ea67eedb 591 .go_type = LM_TYPE_FLOCK,
b3b94faa
DT
592};
593
8fb4b536 594const struct gfs2_glock_operations gfs2_nondisk_glops = {
b3b94faa
DT
595 .go_xmote_th = gfs2_glock_xmote_th,
596 .go_drop_th = gfs2_glock_drop_th,
ea67eedb 597 .go_type = LM_TYPE_NONDISK,
b3b94faa
DT
598};
599
8fb4b536 600const struct gfs2_glock_operations gfs2_quota_glops = {
b3b94faa
DT
601 .go_xmote_th = gfs2_glock_xmote_th,
602 .go_drop_th = gfs2_glock_drop_th,
603 .go_demote_ok = quota_go_demote_ok,
ea67eedb 604 .go_type = LM_TYPE_QUOTA,
b3b94faa
DT
605};
606
8fb4b536 607const struct gfs2_glock_operations gfs2_journal_glops = {
b3b94faa
DT
608 .go_xmote_th = gfs2_glock_xmote_th,
609 .go_drop_th = gfs2_glock_drop_th,
ea67eedb 610 .go_type = LM_TYPE_JOURNAL,
b3b94faa
DT
611};
612