]>
Commit | Line | Data |
---|---|---|
b3b94faa DT |
1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
3a8a9a10 | 3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | |
6 | * modify, copy, or redistribute it subject to the terms and conditions | |
e9fc2aa0 | 7 | * of the GNU General Public License version 2. |
b3b94faa DT |
8 | */ |
9 | ||
10 | #include <linux/sched.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/buffer_head.h> | |
5c676f6d | 15 | #include <linux/gfs2_ondisk.h> |
7d308590 | 16 | #include <linux/lm_interface.h> |
b3b94faa DT |
17 | |
18 | #include "gfs2.h" | |
5c676f6d | 19 | #include "incore.h" |
b3b94faa DT |
20 | #include "bmap.h" |
21 | #include "glock.h" | |
22 | #include "glops.h" | |
23 | #include "inode.h" | |
24 | #include "log.h" | |
25 | #include "meta_io.h" | |
b3b94faa DT |
26 | #include "recovery.h" |
27 | #include "rgrp.h" | |
5c676f6d | 28 | #include "util.h" |
b3b94faa | 29 | |
ba7f7290 SW |
30 | |
31 | /** | |
32 | * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock | |
33 | * @gl: the glock | |
34 | * | |
35 | */ | |
36 | ||
37 | static void gfs2_pte_inval(struct gfs2_glock *gl) | |
38 | { | |
39 | struct gfs2_inode *ip; | |
40 | struct inode *inode; | |
41 | ||
42 | ip = gl->gl_object; | |
43 | inode = &ip->i_inode; | |
44 | if (!ip || !S_ISREG(ip->i_di.di_mode)) | |
45 | return; | |
46 | ||
47 | if (!test_bit(GIF_PAGED, &ip->i_flags)) | |
48 | return; | |
49 | ||
50 | unmap_shared_mapping_range(inode->i_mapping, 0, 0); | |
51 | ||
52 | if (test_bit(GIF_SW_PAGED, &ip->i_flags)) | |
53 | set_bit(GLF_DIRTY, &gl->gl_flags); | |
54 | ||
55 | clear_bit(GIF_SW_PAGED, &ip->i_flags); | |
56 | } | |
57 | ||
58 | /** | |
59 | * gfs2_page_inval - Invalidate all pages associated with a glock | |
60 | * @gl: the glock | |
61 | * | |
62 | */ | |
63 | ||
64 | static void gfs2_page_inval(struct gfs2_glock *gl) | |
65 | { | |
66 | struct gfs2_inode *ip; | |
67 | struct inode *inode; | |
68 | ||
69 | ip = gl->gl_object; | |
70 | inode = &ip->i_inode; | |
71 | if (!ip || !S_ISREG(ip->i_di.di_mode)) | |
72 | return; | |
73 | ||
74 | truncate_inode_pages(inode->i_mapping, 0); | |
75 | gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages); | |
76 | clear_bit(GIF_PAGED, &ip->i_flags); | |
77 | } | |
78 | ||
79 | /** | |
7276b3b0 | 80 | * gfs2_page_wait - Wait for writeback of data |
ba7f7290 | 81 | * @gl: the glock |
ba7f7290 SW |
82 | * |
83 | * Syncs data (not metadata) for a regular file. | |
84 | * No-op for all other types. | |
85 | */ | |
86 | ||
7276b3b0 | 87 | static void gfs2_page_wait(struct gfs2_glock *gl) |
ba7f7290 | 88 | { |
7276b3b0 SW |
89 | struct gfs2_inode *ip = gl->gl_object; |
90 | struct inode *inode = &ip->i_inode; | |
91 | struct address_space *mapping = inode->i_mapping; | |
92 | int error; | |
ba7f7290 | 93 | |
7276b3b0 | 94 | if (!S_ISREG(ip->i_di.di_mode)) |
ba7f7290 SW |
95 | return; |
96 | ||
7276b3b0 | 97 | error = filemap_fdatawait(mapping); |
ba7f7290 SW |
98 | |
99 | /* Put back any errors cleared by filemap_fdatawait() | |
100 | so they can be caught by someone who can pass them | |
101 | up to user space. */ | |
102 | ||
103 | if (error == -ENOSPC) | |
104 | set_bit(AS_ENOSPC, &mapping->flags); | |
105 | else if (error) | |
106 | set_bit(AS_EIO, &mapping->flags); | |
107 | ||
108 | } | |
109 | ||
7276b3b0 SW |
110 | static void gfs2_page_writeback(struct gfs2_glock *gl) |
111 | { | |
112 | struct gfs2_inode *ip = gl->gl_object; | |
113 | struct inode *inode = &ip->i_inode; | |
114 | struct address_space *mapping = inode->i_mapping; | |
115 | ||
116 | if (!S_ISREG(ip->i_di.di_mode)) | |
117 | return; | |
118 | ||
119 | filemap_fdatawrite(mapping); | |
120 | } | |
121 | ||
b3b94faa DT |
122 | /** |
123 | * meta_go_sync - sync out the metadata for this glock | |
124 | * @gl: the glock | |
125 | * @flags: DIO_* | |
126 | * | |
127 | * Called when demoting or unlocking an EX glock. We must flush | |
128 | * to disk all dirty buffers/pages relating to this glock, and must not | |
129 | * not return to caller to demote/unlock the glock until I/O is complete. | |
130 | */ | |
131 | ||
132 | static void meta_go_sync(struct gfs2_glock *gl, int flags) | |
133 | { | |
134 | if (!(flags & DIO_METADATA)) | |
135 | return; | |
136 | ||
137 | if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { | |
b09e593d | 138 | gfs2_log_flush(gl->gl_sbd, gl); |
7276b3b0 | 139 | gfs2_meta_sync(gl); |
b3b94faa DT |
140 | if (flags & DIO_RELEASE) |
141 | gfs2_ail_empty_gl(gl); | |
142 | } | |
143 | ||
b3b94faa DT |
144 | } |
145 | ||
146 | /** | |
147 | * meta_go_inval - invalidate the metadata for this glock | |
148 | * @gl: the glock | |
149 | * @flags: | |
150 | * | |
151 | */ | |
152 | ||
153 | static void meta_go_inval(struct gfs2_glock *gl, int flags) | |
154 | { | |
155 | if (!(flags & DIO_METADATA)) | |
156 | return; | |
157 | ||
158 | gfs2_meta_inval(gl); | |
159 | gl->gl_vn++; | |
160 | } | |
161 | ||
b3b94faa DT |
162 | /** |
163 | * inode_go_xmote_th - promote/demote a glock | |
164 | * @gl: the glock | |
165 | * @state: the requested state | |
166 | * @flags: | |
167 | * | |
168 | */ | |
169 | ||
170 | static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state, | |
171 | int flags) | |
172 | { | |
173 | if (gl->gl_state != LM_ST_UNLOCKED) | |
174 | gfs2_pte_inval(gl); | |
175 | gfs2_glock_xmote_th(gl, state, flags); | |
176 | } | |
177 | ||
178 | /** | |
179 | * inode_go_xmote_bh - After promoting/demoting a glock | |
180 | * @gl: the glock | |
181 | * | |
182 | */ | |
183 | ||
184 | static void inode_go_xmote_bh(struct gfs2_glock *gl) | |
185 | { | |
186 | struct gfs2_holder *gh = gl->gl_req_gh; | |
187 | struct buffer_head *bh; | |
188 | int error; | |
189 | ||
190 | if (gl->gl_state != LM_ST_UNLOCKED && | |
191 | (!gh || !(gh->gh_flags & GL_SKIP))) { | |
7276b3b0 | 192 | error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh); |
b3b94faa DT |
193 | if (!error) |
194 | brelse(bh); | |
195 | } | |
196 | } | |
197 | ||
198 | /** | |
199 | * inode_go_drop_th - unlock a glock | |
200 | * @gl: the glock | |
201 | * | |
202 | * Invoked from rq_demote(). | |
203 | * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long) | |
204 | * is being purged from our node's glock cache; we're dropping lock. | |
205 | */ | |
206 | ||
207 | static void inode_go_drop_th(struct gfs2_glock *gl) | |
208 | { | |
209 | gfs2_pte_inval(gl); | |
210 | gfs2_glock_drop_th(gl); | |
211 | } | |
212 | ||
213 | /** | |
214 | * inode_go_sync - Sync the dirty data and/or metadata for an inode glock | |
215 | * @gl: the glock protecting the inode | |
216 | * @flags: | |
217 | * | |
218 | */ | |
219 | ||
220 | static void inode_go_sync(struct gfs2_glock *gl, int flags) | |
221 | { | |
222 | int meta = (flags & DIO_METADATA); | |
223 | int data = (flags & DIO_DATA); | |
224 | ||
225 | if (test_bit(GLF_DIRTY, &gl->gl_flags)) { | |
226 | if (meta && data) { | |
7276b3b0 | 227 | gfs2_page_writeback(gl); |
b09e593d | 228 | gfs2_log_flush(gl->gl_sbd, gl); |
7276b3b0 SW |
229 | gfs2_meta_sync(gl); |
230 | gfs2_page_wait(gl); | |
b3b94faa DT |
231 | clear_bit(GLF_DIRTY, &gl->gl_flags); |
232 | } else if (meta) { | |
b09e593d | 233 | gfs2_log_flush(gl->gl_sbd, gl); |
7276b3b0 SW |
234 | gfs2_meta_sync(gl); |
235 | } else if (data) { | |
236 | gfs2_page_writeback(gl); | |
237 | gfs2_page_wait(gl); | |
238 | } | |
b3b94faa DT |
239 | if (flags & DIO_RELEASE) |
240 | gfs2_ail_empty_gl(gl); | |
241 | } | |
b3b94faa DT |
242 | } |
243 | ||
244 | /** | |
245 | * inode_go_inval - prepare a inode glock to be released | |
246 | * @gl: the glock | |
247 | * @flags: | |
248 | * | |
249 | */ | |
250 | ||
251 | static void inode_go_inval(struct gfs2_glock *gl, int flags) | |
252 | { | |
253 | int meta = (flags & DIO_METADATA); | |
254 | int data = (flags & DIO_DATA); | |
255 | ||
256 | if (meta) { | |
257 | gfs2_meta_inval(gl); | |
258 | gl->gl_vn++; | |
259 | } | |
260 | if (data) | |
261 | gfs2_page_inval(gl); | |
262 | } | |
263 | ||
264 | /** | |
265 | * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock | |
266 | * @gl: the glock | |
267 | * | |
268 | * Returns: 1 if it's ok | |
269 | */ | |
270 | ||
271 | static int inode_go_demote_ok(struct gfs2_glock *gl) | |
272 | { | |
273 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
274 | int demote = 0; | |
275 | ||
5c676f6d | 276 | if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages) |
b3b94faa DT |
277 | demote = 1; |
278 | else if (!sdp->sd_args.ar_localcaching && | |
279 | time_after_eq(jiffies, gl->gl_stamp + | |
280 | gfs2_tune_get(sdp, gt_demote_secs) * HZ)) | |
281 | demote = 1; | |
282 | ||
283 | return demote; | |
284 | } | |
285 | ||
286 | /** | |
287 | * inode_go_lock - operation done after an inode lock is locked by a process | |
288 | * @gl: the glock | |
289 | * @flags: | |
290 | * | |
291 | * Returns: errno | |
292 | */ | |
293 | ||
294 | static int inode_go_lock(struct gfs2_holder *gh) | |
295 | { | |
296 | struct gfs2_glock *gl = gh->gh_gl; | |
5c676f6d | 297 | struct gfs2_inode *ip = gl->gl_object; |
b3b94faa DT |
298 | int error = 0; |
299 | ||
300 | if (!ip) | |
301 | return 0; | |
302 | ||
303 | if (ip->i_vn != gl->gl_vn) { | |
304 | error = gfs2_inode_refresh(ip); | |
305 | if (error) | |
306 | return error; | |
307 | gfs2_inode_attr_in(ip); | |
308 | } | |
309 | ||
310 | if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && | |
311 | (gl->gl_state == LM_ST_EXCLUSIVE) && | |
312 | (gh->gh_flags & GL_LOCAL_EXCL)) | |
313 | error = gfs2_truncatei_resume(ip); | |
314 | ||
315 | return error; | |
316 | } | |
317 | ||
318 | /** | |
319 | * inode_go_unlock - operation done before an inode lock is unlocked by a | |
320 | * process | |
321 | * @gl: the glock | |
322 | * @flags: | |
323 | * | |
324 | */ | |
325 | ||
326 | static void inode_go_unlock(struct gfs2_holder *gh) | |
327 | { | |
328 | struct gfs2_glock *gl = gh->gh_gl; | |
5c676f6d | 329 | struct gfs2_inode *ip = gl->gl_object; |
b3b94faa | 330 | |
75d3b817 SW |
331 | if (ip == NULL) |
332 | return; | |
333 | if (test_bit(GLF_DIRTY, &gl->gl_flags)) | |
334 | gfs2_inode_attr_in(ip); | |
335 | gfs2_meta_cache_flush(ip); | |
b3b94faa DT |
336 | } |
337 | ||
338 | /** | |
339 | * inode_greedy - | |
340 | * @gl: the glock | |
341 | * | |
342 | */ | |
343 | ||
344 | static void inode_greedy(struct gfs2_glock *gl) | |
345 | { | |
346 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
5c676f6d | 347 | struct gfs2_inode *ip = gl->gl_object; |
b3b94faa DT |
348 | unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum); |
349 | unsigned int max = gfs2_tune_get(sdp, gt_greedy_max); | |
350 | unsigned int new_time; | |
351 | ||
352 | spin_lock(&ip->i_spin); | |
353 | ||
354 | if (time_after(ip->i_last_pfault + quantum, jiffies)) { | |
355 | new_time = ip->i_greedy + quantum; | |
356 | if (new_time > max) | |
357 | new_time = max; | |
358 | } else { | |
359 | new_time = ip->i_greedy - quantum; | |
360 | if (!new_time || new_time > max) | |
361 | new_time = 1; | |
362 | } | |
363 | ||
364 | ip->i_greedy = new_time; | |
365 | ||
366 | spin_unlock(&ip->i_spin); | |
367 | ||
feaa7bba | 368 | iput(&ip->i_inode); |
b3b94faa DT |
369 | } |
370 | ||
371 | /** | |
372 | * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock | |
373 | * @gl: the glock | |
374 | * | |
375 | * Returns: 1 if it's ok | |
376 | */ | |
377 | ||
378 | static int rgrp_go_demote_ok(struct gfs2_glock *gl) | |
379 | { | |
380 | return !gl->gl_aspace->i_mapping->nrpages; | |
381 | } | |
382 | ||
383 | /** | |
384 | * rgrp_go_lock - operation done after an rgrp lock is locked by | |
385 | * a first holder on this node. | |
386 | * @gl: the glock | |
387 | * @flags: | |
388 | * | |
389 | * Returns: errno | |
390 | */ | |
391 | ||
392 | static int rgrp_go_lock(struct gfs2_holder *gh) | |
393 | { | |
5c676f6d | 394 | return gfs2_rgrp_bh_get(gh->gh_gl->gl_object); |
b3b94faa DT |
395 | } |
396 | ||
397 | /** | |
398 | * rgrp_go_unlock - operation done before an rgrp lock is unlocked by | |
399 | * a last holder on this node. | |
400 | * @gl: the glock | |
401 | * @flags: | |
402 | * | |
403 | */ | |
404 | ||
405 | static void rgrp_go_unlock(struct gfs2_holder *gh) | |
406 | { | |
5c676f6d | 407 | gfs2_rgrp_bh_put(gh->gh_gl->gl_object); |
b3b94faa DT |
408 | } |
409 | ||
410 | /** | |
411 | * trans_go_xmote_th - promote/demote the transaction glock | |
412 | * @gl: the glock | |
413 | * @state: the requested state | |
414 | * @flags: | |
415 | * | |
416 | */ | |
417 | ||
418 | static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state, | |
419 | int flags) | |
420 | { | |
421 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
422 | ||
423 | if (gl->gl_state != LM_ST_UNLOCKED && | |
424 | test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | |
425 | gfs2_meta_syncfs(sdp); | |
426 | gfs2_log_shutdown(sdp); | |
427 | } | |
428 | ||
429 | gfs2_glock_xmote_th(gl, state, flags); | |
430 | } | |
431 | ||
432 | /** | |
433 | * trans_go_xmote_bh - After promoting/demoting the transaction glock | |
434 | * @gl: the glock | |
435 | * | |
436 | */ | |
437 | ||
438 | static void trans_go_xmote_bh(struct gfs2_glock *gl) | |
439 | { | |
440 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
feaa7bba | 441 | struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); |
5c676f6d | 442 | struct gfs2_glock *j_gl = ip->i_gl; |
b3b94faa DT |
443 | struct gfs2_log_header head; |
444 | int error; | |
445 | ||
446 | if (gl->gl_state != LM_ST_UNLOCKED && | |
447 | test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | |
feaa7bba | 448 | gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode)); |
b3b94faa DT |
449 | j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA); |
450 | ||
451 | error = gfs2_find_jhead(sdp->sd_jdesc, &head); | |
452 | if (error) | |
453 | gfs2_consist(sdp); | |
454 | if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) | |
455 | gfs2_consist(sdp); | |
456 | ||
457 | /* Initialize some head of the log stuff */ | |
458 | if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { | |
459 | sdp->sd_log_sequence = head.lh_sequence + 1; | |
460 | gfs2_log_pointers_init(sdp, head.lh_blkno); | |
461 | } | |
462 | } | |
463 | } | |
464 | ||
465 | /** | |
466 | * trans_go_drop_th - unlock the transaction glock | |
467 | * @gl: the glock | |
468 | * | |
469 | * We want to sync the device even with localcaching. Remember | |
470 | * that localcaching journal replay only marks buffers dirty. | |
471 | */ | |
472 | ||
473 | static void trans_go_drop_th(struct gfs2_glock *gl) | |
474 | { | |
475 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
476 | ||
477 | if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | |
478 | gfs2_meta_syncfs(sdp); | |
479 | gfs2_log_shutdown(sdp); | |
480 | } | |
481 | ||
482 | gfs2_glock_drop_th(gl); | |
483 | } | |
484 | ||
485 | /** | |
486 | * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock | |
487 | * @gl: the glock | |
488 | * | |
489 | * Returns: 1 if it's ok | |
490 | */ | |
491 | ||
492 | static int quota_go_demote_ok(struct gfs2_glock *gl) | |
493 | { | |
494 | return !atomic_read(&gl->gl_lvb_count); | |
495 | } | |
496 | ||
8fb4b536 | 497 | const struct gfs2_glock_operations gfs2_meta_glops = { |
b3b94faa DT |
498 | .go_xmote_th = gfs2_glock_xmote_th, |
499 | .go_drop_th = gfs2_glock_drop_th, | |
ea67eedb | 500 | .go_type = LM_TYPE_META, |
b3b94faa DT |
501 | }; |
502 | ||
8fb4b536 | 503 | const struct gfs2_glock_operations gfs2_inode_glops = { |
b3b94faa DT |
504 | .go_xmote_th = inode_go_xmote_th, |
505 | .go_xmote_bh = inode_go_xmote_bh, | |
506 | .go_drop_th = inode_go_drop_th, | |
507 | .go_sync = inode_go_sync, | |
508 | .go_inval = inode_go_inval, | |
509 | .go_demote_ok = inode_go_demote_ok, | |
510 | .go_lock = inode_go_lock, | |
511 | .go_unlock = inode_go_unlock, | |
512 | .go_greedy = inode_greedy, | |
ea67eedb | 513 | .go_type = LM_TYPE_INODE, |
b3b94faa DT |
514 | }; |
515 | ||
8fb4b536 | 516 | const struct gfs2_glock_operations gfs2_rgrp_glops = { |
b3b94faa DT |
517 | .go_xmote_th = gfs2_glock_xmote_th, |
518 | .go_drop_th = gfs2_glock_drop_th, | |
519 | .go_sync = meta_go_sync, | |
520 | .go_inval = meta_go_inval, | |
521 | .go_demote_ok = rgrp_go_demote_ok, | |
522 | .go_lock = rgrp_go_lock, | |
523 | .go_unlock = rgrp_go_unlock, | |
ea67eedb | 524 | .go_type = LM_TYPE_RGRP, |
b3b94faa DT |
525 | }; |
526 | ||
8fb4b536 | 527 | const struct gfs2_glock_operations gfs2_trans_glops = { |
b3b94faa DT |
528 | .go_xmote_th = trans_go_xmote_th, |
529 | .go_xmote_bh = trans_go_xmote_bh, | |
530 | .go_drop_th = trans_go_drop_th, | |
ea67eedb | 531 | .go_type = LM_TYPE_NONDISK, |
b3b94faa DT |
532 | }; |
533 | ||
8fb4b536 | 534 | const struct gfs2_glock_operations gfs2_iopen_glops = { |
b3b94faa DT |
535 | .go_xmote_th = gfs2_glock_xmote_th, |
536 | .go_drop_th = gfs2_glock_drop_th, | |
ea67eedb | 537 | .go_type = LM_TYPE_IOPEN, |
b3b94faa DT |
538 | }; |
539 | ||
8fb4b536 | 540 | const struct gfs2_glock_operations gfs2_flock_glops = { |
b3b94faa DT |
541 | .go_xmote_th = gfs2_glock_xmote_th, |
542 | .go_drop_th = gfs2_glock_drop_th, | |
ea67eedb | 543 | .go_type = LM_TYPE_FLOCK, |
b3b94faa DT |
544 | }; |
545 | ||
8fb4b536 | 546 | const struct gfs2_glock_operations gfs2_nondisk_glops = { |
b3b94faa DT |
547 | .go_xmote_th = gfs2_glock_xmote_th, |
548 | .go_drop_th = gfs2_glock_drop_th, | |
ea67eedb | 549 | .go_type = LM_TYPE_NONDISK, |
b3b94faa DT |
550 | }; |
551 | ||
8fb4b536 | 552 | const struct gfs2_glock_operations gfs2_quota_glops = { |
b3b94faa DT |
553 | .go_xmote_th = gfs2_glock_xmote_th, |
554 | .go_drop_th = gfs2_glock_drop_th, | |
555 | .go_demote_ok = quota_go_demote_ok, | |
ea67eedb | 556 | .go_type = LM_TYPE_QUOTA, |
b3b94faa DT |
557 | }; |
558 | ||
8fb4b536 | 559 | const struct gfs2_glock_operations gfs2_journal_glops = { |
b3b94faa DT |
560 | .go_xmote_th = gfs2_glock_xmote_th, |
561 | .go_drop_th = gfs2_glock_drop_th, | |
ea67eedb | 562 | .go_type = LM_TYPE_JOURNAL, |
b3b94faa DT |
563 | }; |
564 |