]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/gfs2/rgrp.c
GFS2: Eliminate unnecessary check for state > 3 in bitfit
[mirror_ubuntu-jammy-kernel.git] / fs / gfs2 / rgrp.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
fe6c991c 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
b3b94faa
DT
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
f42faf4f 14#include <linux/fs.h>
5c676f6d 15#include <linux/gfs2_ondisk.h>
1f466a47 16#include <linux/prefetch.h>
f15ab561 17#include <linux/blkdev.h>
7c9ca621 18#include <linux/rbtree.h>
b3b94faa
DT
19
20#include "gfs2.h"
5c676f6d 21#include "incore.h"
b3b94faa
DT
22#include "glock.h"
23#include "glops.h"
b3b94faa
DT
24#include "lops.h"
25#include "meta_io.h"
26#include "quota.h"
27#include "rgrp.h"
28#include "super.h"
29#include "trans.h"
5c676f6d 30#include "util.h"
172e045a 31#include "log.h"
c8cdf479 32#include "inode.h"
63997775 33#include "trace_gfs2.h"
b3b94faa 34
2c1e52aa 35#define BFITNOENT ((u32)~0)
6760bdcd 36#define NO_BLOCK ((u64)~0)
88c8ab1f 37
8e2e0047
BP
38#define RSRV_CONTENTION_FACTOR 4
39#define RGRP_RSRV_MAX_CONTENDERS 2
40
1f466a47
BP
41#if BITS_PER_LONG == 32
42#define LBITMASK (0x55555555UL)
43#define LBITSKIP55 (0x55555555UL)
44#define LBITSKIP00 (0x00000000UL)
45#else
46#define LBITMASK (0x5555555555555555UL)
47#define LBITSKIP55 (0x5555555555555555UL)
48#define LBITSKIP00 (0x0000000000000000UL)
49#endif
50
88c8ab1f
SW
51/*
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
feaa7bba
SW
54 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 *
56 * 0 = Free
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
59 * 3 = Used (metadata)
88c8ab1f
SW
60 */
61
62static const char valid_change[16] = {
63 /* current */
feaa7bba 64 /* n */ 0, 1, 1, 1,
88c8ab1f 65 /* e */ 1, 0, 0, 0,
feaa7bba 66 /* w */ 0, 0, 0, 1,
88c8ab1f
SW
67 1, 0, 0, 0
68};
69
70/**
71 * gfs2_setbit - Set a bit in the bitmaps
29c578f5 72 * @rgd: the resource group descriptor
29c578f5
BP
73 * @buf2: the clone buffer that holds the bitmaps
74 * @bi: the bitmap structure
88c8ab1f
SW
75 * @block: the block to set
76 * @new_state: the new state of the block
77 *
78 */
79
06344b91
BP
80static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2,
81 struct gfs2_bitmap *bi, u32 block,
82 unsigned char new_state)
88c8ab1f 83{
b45e41d7 84 unsigned char *byte1, *byte2, *end, cur_state;
95c8e17f 85 unsigned int buflen = bi->bi_len;
b45e41d7 86 const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
88c8ab1f 87
06344b91
BP
88 byte1 = bi->bi_bh->b_data + bi->bi_offset + (block / GFS2_NBBY);
89 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
88c8ab1f 90
b45e41d7 91 BUG_ON(byte1 >= end);
88c8ab1f 92
b45e41d7 93 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
88c8ab1f 94
b45e41d7 95 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
95c8e17f
BP
96 printk(KERN_WARNING "GFS2: buf_blk = 0x%llx old_state=%d, "
97 "new_state=%d\n",
98 (unsigned long long)block, cur_state, new_state);
99 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%lx\n",
100 (unsigned long long)rgd->rd_addr,
101 (unsigned long)bi->bi_start);
102 printk(KERN_WARNING "GFS2: bi_offset=0x%lx bi_len=0x%lx\n",
103 (unsigned long)bi->bi_offset,
104 (unsigned long)bi->bi_len);
105 dump_stack();
88c8ab1f 106 gfs2_consist_rgrpd(rgd);
b45e41d7
SW
107 return;
108 }
109 *byte1 ^= (cur_state ^ new_state) << bit;
110
111 if (buf2) {
29c578f5 112 byte2 = buf2 + bi->bi_offset + (block / GFS2_NBBY);
b45e41d7
SW
113 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
114 *byte2 ^= (cur_state ^ new_state) << bit;
115 }
88c8ab1f
SW
116}
117
118/**
119 * gfs2_testbit - test a bit in the bitmaps
886b1416 120 * @rgd: the resource group descriptor
88c8ab1f
SW
121 * @buffer: the buffer that holds the bitmaps
122 * @buflen: the length (in bytes) of the buffer
123 * @block: the block to read
124 *
125 */
126
b45e41d7
SW
127static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
128 const unsigned char *buffer,
129 unsigned int buflen, u32 block)
88c8ab1f 130{
b45e41d7
SW
131 const unsigned char *byte, *end;
132 unsigned char cur_state;
88c8ab1f
SW
133 unsigned int bit;
134
135 byte = buffer + (block / GFS2_NBBY);
136 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
137 end = buffer + buflen;
138
139 gfs2_assert(rgd->rd_sbd, byte < end);
140
141 cur_state = (*byte >> bit) & GFS2_BIT_MASK;
142
143 return cur_state;
144}
145
223b2b88
SW
146/**
147 * gfs2_bit_search
148 * @ptr: Pointer to bitmap data
149 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
150 * @state: The state we are searching for
151 *
152 * We xor the bitmap data with a patter which is the bitwise opposite
153 * of what we are looking for, this gives rise to a pattern of ones
154 * wherever there is a match. Since we have two bits per entry, we
155 * take this pattern, shift it down by one place and then and it with
156 * the original. All the even bit positions (0,2,4, etc) then represent
157 * successful matches, so we mask with 0x55555..... to remove the unwanted
158 * odd bit positions.
159 *
160 * This allows searching of a whole u64 at once (32 blocks) with a
161 * single test (on 64 bit arches).
162 */
163
164static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
165{
166 u64 tmp;
167 static const u64 search[] = {
075ac448
HE
168 [0] = 0xffffffffffffffffULL,
169 [1] = 0xaaaaaaaaaaaaaaaaULL,
170 [2] = 0x5555555555555555ULL,
171 [3] = 0x0000000000000000ULL,
223b2b88
SW
172 };
173 tmp = le64_to_cpu(*ptr) ^ search[state];
174 tmp &= (tmp >> 1);
175 tmp &= mask;
176 return tmp;
177}
178
8e2e0047
BP
179/**
180 * rs_cmp - multi-block reservation range compare
181 * @blk: absolute file system block number of the new reservation
182 * @len: number of blocks in the new reservation
183 * @rs: existing reservation to compare against
184 *
185 * returns: 1 if the block range is beyond the reach of the reservation
186 * -1 if the block range is before the start of the reservation
187 * 0 if the block range overlaps with the reservation
188 */
189static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
190{
4a993fb1 191 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
8e2e0047
BP
192
193 if (blk >= startblk + rs->rs_free)
194 return 1;
195 if (blk + len - 1 < startblk)
196 return -1;
197 return 0;
198}
199
88c8ab1f
SW
200/**
201 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
202 * a block in a given allocation state.
886b1416 203 * @buf: the buffer that holds the bitmaps
223b2b88 204 * @len: the length (in bytes) of the buffer
88c8ab1f 205 * @goal: start search at this block's bit-pair (within @buffer)
223b2b88 206 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
88c8ab1f
SW
207 *
208 * Scope of @goal and returned block number is only within this bitmap buffer,
209 * not entire rgrp or filesystem. @buffer will be offset from the actual
223b2b88
SW
210 * beginning of a bitmap block buffer, skipping any header structures, but
211 * headers are always a multiple of 64 bits long so that the buffer is
212 * always aligned to a 64 bit boundary.
213 *
214 * The size of the buffer is in bytes, but is it assumed that it is
fd589a8f 215 * always ok to read a complete multiple of 64 bits at the end
223b2b88 216 * of the block in case the end is no aligned to a natural boundary.
88c8ab1f
SW
217 *
218 * Return: the block number (bitmap buffer scope) that was found
219 */
220
02ab1721
HE
221static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
222 u32 goal, u8 state)
88c8ab1f 223{
223b2b88
SW
224 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
225 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
226 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
227 u64 tmp;
075ac448 228 u64 mask = 0x5555555555555555ULL;
223b2b88
SW
229 u32 bit;
230
223b2b88
SW
231 /* Mask off bits we don't care about at the start of the search */
232 mask <<= spoint;
233 tmp = gfs2_bit_search(ptr, mask, state);
234 ptr++;
235 while(tmp == 0 && ptr < end) {
075ac448 236 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
223b2b88 237 ptr++;
1f466a47 238 }
223b2b88
SW
239 /* Mask off any bits which are more than len bytes from the start */
240 if (ptr == end && (len & (sizeof(u64) - 1)))
241 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
242 /* Didn't find anything, so return */
243 if (tmp == 0)
244 return BFITNOENT;
245 ptr--;
d8bd504a 246 bit = __ffs64(tmp);
223b2b88
SW
247 bit /= 2; /* two bits per entry in the bitmap */
248 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
88c8ab1f
SW
249}
250
251/**
252 * gfs2_bitcount - count the number of bits in a certain state
886b1416 253 * @rgd: the resource group descriptor
88c8ab1f
SW
254 * @buffer: the buffer that holds the bitmaps
255 * @buflen: the length (in bytes) of the buffer
256 * @state: the state of the block we're looking for
257 *
258 * Returns: The number of bits
259 */
260
110acf38
SW
261static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
262 unsigned int buflen, u8 state)
88c8ab1f 263{
110acf38
SW
264 const u8 *byte = buffer;
265 const u8 *end = buffer + buflen;
266 const u8 state1 = state << 2;
267 const u8 state2 = state << 4;
268 const u8 state3 = state << 6;
cd915493 269 u32 count = 0;
88c8ab1f
SW
270
271 for (; byte < end; byte++) {
272 if (((*byte) & 0x03) == state)
273 count++;
274 if (((*byte) & 0x0C) == state1)
275 count++;
276 if (((*byte) & 0x30) == state2)
277 count++;
278 if (((*byte) & 0xC0) == state3)
279 count++;
280 }
281
282 return count;
283}
284
b3b94faa
DT
285/**
286 * gfs2_rgrp_verify - Verify that a resource group is consistent
b3b94faa
DT
287 * @rgd: the rgrp
288 *
289 */
290
291void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
292{
293 struct gfs2_sbd *sdp = rgd->rd_sbd;
294 struct gfs2_bitmap *bi = NULL;
bb8d8a6f 295 u32 length = rgd->rd_length;
cd915493 296 u32 count[4], tmp;
b3b94faa
DT
297 int buf, x;
298
cd915493 299 memset(count, 0, 4 * sizeof(u32));
b3b94faa
DT
300
301 /* Count # blocks in each of 4 possible allocation states */
302 for (buf = 0; buf < length; buf++) {
303 bi = rgd->rd_bits + buf;
304 for (x = 0; x < 4; x++)
305 count[x] += gfs2_bitcount(rgd,
306 bi->bi_bh->b_data +
307 bi->bi_offset,
308 bi->bi_len, x);
309 }
310
cfc8b549 311 if (count[0] != rgd->rd_free) {
b3b94faa
DT
312 if (gfs2_consist_rgrpd(rgd))
313 fs_err(sdp, "free data mismatch: %u != %u\n",
cfc8b549 314 count[0], rgd->rd_free);
b3b94faa
DT
315 return;
316 }
317
73f74948 318 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
6b946170 319 if (count[1] != tmp) {
b3b94faa
DT
320 if (gfs2_consist_rgrpd(rgd))
321 fs_err(sdp, "used data mismatch: %u != %u\n",
322 count[1], tmp);
323 return;
324 }
325
6b946170 326 if (count[2] + count[3] != rgd->rd_dinodes) {
b3b94faa 327 if (gfs2_consist_rgrpd(rgd))
feaa7bba 328 fs_err(sdp, "used metadata mismatch: %u != %u\n",
6b946170 329 count[2] + count[3], rgd->rd_dinodes);
b3b94faa
DT
330 return;
331 }
b3b94faa
DT
332}
333
bb8d8a6f 334static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
b3b94faa 335{
bb8d8a6f
SW
336 u64 first = rgd->rd_data0;
337 u64 last = first + rgd->rd_data;
16910427 338 return first <= block && block < last;
b3b94faa
DT
339}
340
341/**
342 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
343 * @sdp: The GFS2 superblock
886b1416
BP
344 * @blk: The data block number
345 * @exact: True if this needs to be an exact match
b3b94faa
DT
346 *
347 * Returns: The resource group, or NULL if not found
348 */
349
66fc061b 350struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
b3b94faa 351{
66fc061b 352 struct rb_node *n, *next;
f75bbfb4 353 struct gfs2_rgrpd *cur;
b3b94faa
DT
354
355 spin_lock(&sdp->sd_rindex_spin);
66fc061b
SW
356 n = sdp->sd_rindex_tree.rb_node;
357 while (n) {
358 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
359 next = NULL;
7c9ca621 360 if (blk < cur->rd_addr)
66fc061b 361 next = n->rb_left;
f75bbfb4 362 else if (blk >= cur->rd_data0 + cur->rd_data)
66fc061b
SW
363 next = n->rb_right;
364 if (next == NULL) {
b3b94faa 365 spin_unlock(&sdp->sd_rindex_spin);
66fc061b
SW
366 if (exact) {
367 if (blk < cur->rd_addr)
368 return NULL;
369 if (blk >= cur->rd_data0 + cur->rd_data)
370 return NULL;
371 }
7c9ca621 372 return cur;
b3b94faa 373 }
66fc061b 374 n = next;
b3b94faa 375 }
b3b94faa
DT
376 spin_unlock(&sdp->sd_rindex_spin);
377
378 return NULL;
379}
380
381/**
382 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
383 * @sdp: The GFS2 superblock
384 *
385 * Returns: The first rgrp in the filesystem
386 */
387
388struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
389{
7c9ca621
BP
390 const struct rb_node *n;
391 struct gfs2_rgrpd *rgd;
392
8339ee54 393 spin_lock(&sdp->sd_rindex_spin);
7c9ca621
BP
394 n = rb_first(&sdp->sd_rindex_tree);
395 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
8339ee54 396 spin_unlock(&sdp->sd_rindex_spin);
7c9ca621
BP
397
398 return rgd;
b3b94faa
DT
399}
400
401/**
402 * gfs2_rgrpd_get_next - get the next RG
886b1416 403 * @rgd: the resource group descriptor
b3b94faa
DT
404 *
405 * Returns: The next rgrp
406 */
407
408struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
409{
7c9ca621
BP
410 struct gfs2_sbd *sdp = rgd->rd_sbd;
411 const struct rb_node *n;
412
413 spin_lock(&sdp->sd_rindex_spin);
414 n = rb_next(&rgd->rd_node);
415 if (n == NULL)
416 n = rb_first(&sdp->sd_rindex_tree);
417
418 if (unlikely(&rgd->rd_node == n)) {
419 spin_unlock(&sdp->sd_rindex_spin);
b3b94faa 420 return NULL;
7c9ca621
BP
421 }
422 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
423 spin_unlock(&sdp->sd_rindex_spin);
424 return rgd;
b3b94faa
DT
425}
426
8339ee54
SW
427void gfs2_free_clones(struct gfs2_rgrpd *rgd)
428{
429 int x;
430
431 for (x = 0; x < rgd->rd_length; x++) {
432 struct gfs2_bitmap *bi = rgd->rd_bits + x;
433 kfree(bi->bi_clone);
434 bi->bi_clone = NULL;
435 }
436}
437
0a305e49
BP
438/**
439 * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
440 * @ip: the inode for this reservation
441 */
442int gfs2_rs_alloc(struct gfs2_inode *ip)
443{
444 int error = 0;
8e2e0047
BP
445 struct gfs2_blkreserv *res;
446
447 if (ip->i_res)
448 return 0;
449
450 res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
451 if (!res)
452 error = -ENOMEM;
0a305e49 453
24d634e8 454 RB_CLEAR_NODE(&res->rs_node);
4a993fb1 455
0a305e49 456 down_write(&ip->i_rw_mutex);
8e2e0047
BP
457 if (ip->i_res)
458 kmem_cache_free(gfs2_rsrv_cachep, res);
459 else
460 ip->i_res = res;
0a305e49
BP
461 up_write(&ip->i_rw_mutex);
462 return error;
463}
464
8e2e0047
BP
465static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
466{
467 gfs2_print_dbg(seq, " r: %llu s:%llu b:%u f:%u\n",
8d8b752a 468 rs->rs_rbm.rgd->rd_addr, gfs2_rbm_to_block(&rs->rs_rbm),
4a993fb1 469 rs->rs_rbm.offset, rs->rs_free);
8e2e0047
BP
470}
471
0a305e49 472/**
8e2e0047
BP
473 * __rs_deltree - remove a multi-block reservation from the rgd tree
474 * @rs: The reservation to remove
475 *
476 */
4a993fb1 477static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
8e2e0047
BP
478{
479 struct gfs2_rgrpd *rgd;
480
481 if (!gfs2_rs_active(rs))
482 return;
483
4a993fb1
SW
484 rgd = rs->rs_rbm.rgd;
485 trace_gfs2_rs(ip, rs, TRACE_RS_TREEDEL);
486 rb_erase(&rs->rs_node, &rgd->rd_rstree);
24d634e8 487 RB_CLEAR_NODE(&rs->rs_node);
8e2e0047
BP
488 BUG_ON(!rgd->rd_rs_cnt);
489 rgd->rd_rs_cnt--;
490
491 if (rs->rs_free) {
492 /* return reserved blocks to the rgrp and the ip */
4a993fb1
SW
493 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
494 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
8e2e0047 495 rs->rs_free = 0;
4a993fb1 496 clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
8e2e0047
BP
497 smp_mb__after_clear_bit();
498 }
8e2e0047
BP
499}
500
501/**
502 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
503 * @rs: The reservation to remove
504 *
505 */
4a993fb1 506void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
8e2e0047
BP
507{
508 struct gfs2_rgrpd *rgd;
509
4a993fb1
SW
510 rgd = rs->rs_rbm.rgd;
511 if (rgd) {
512 spin_lock(&rgd->rd_rsspin);
513 __rs_deltree(ip, rs);
514 spin_unlock(&rgd->rd_rsspin);
515 }
8e2e0047
BP
516}
517
518/**
519 * gfs2_rs_delete - delete a multi-block reservation
0a305e49
BP
520 * @ip: The inode for this reservation
521 *
522 */
523void gfs2_rs_delete(struct gfs2_inode *ip)
524{
525 down_write(&ip->i_rw_mutex);
526 if (ip->i_res) {
4a993fb1 527 gfs2_rs_deltree(ip, ip->i_res);
8e2e0047
BP
528 trace_gfs2_rs(ip, ip->i_res, TRACE_RS_DELETE);
529 BUG_ON(ip->i_res->rs_free);
0a305e49
BP
530 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
531 ip->i_res = NULL;
532 }
533 up_write(&ip->i_rw_mutex);
534}
535
8e2e0047
BP
536/**
537 * return_all_reservations - return all reserved blocks back to the rgrp.
538 * @rgd: the rgrp that needs its space back
539 *
540 * We previously reserved a bunch of blocks for allocation. Now we need to
541 * give them back. This leave the reservation structures in tact, but removes
542 * all of their corresponding "no-fly zones".
543 */
544static void return_all_reservations(struct gfs2_rgrpd *rgd)
545{
546 struct rb_node *n;
547 struct gfs2_blkreserv *rs;
548
549 spin_lock(&rgd->rd_rsspin);
550 while ((n = rb_first(&rgd->rd_rstree))) {
551 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
4a993fb1 552 __rs_deltree(NULL, rs);
8e2e0047
BP
553 }
554 spin_unlock(&rgd->rd_rsspin);
555}
556
8339ee54 557void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
b3b94faa 558{
7c9ca621 559 struct rb_node *n;
b3b94faa
DT
560 struct gfs2_rgrpd *rgd;
561 struct gfs2_glock *gl;
562
7c9ca621
BP
563 while ((n = rb_first(&sdp->sd_rindex_tree))) {
564 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
b3b94faa
DT
565 gl = rgd->rd_gl;
566
7c9ca621 567 rb_erase(n, &sdp->sd_rindex_tree);
b3b94faa
DT
568
569 if (gl) {
8339ee54 570 spin_lock(&gl->gl_spin);
5c676f6d 571 gl->gl_object = NULL;
8339ee54 572 spin_unlock(&gl->gl_spin);
29687a2a 573 gfs2_glock_add_to_lru(gl);
b3b94faa
DT
574 gfs2_glock_put(gl);
575 }
576
8339ee54 577 gfs2_free_clones(rgd);
b3b94faa 578 kfree(rgd->rd_bits);
8e2e0047 579 return_all_reservations(rgd);
6bdd9be6 580 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
b3b94faa
DT
581 }
582}
583
bb8d8a6f
SW
584static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
585{
586 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
587 printk(KERN_INFO " ri_length = %u\n", rgd->rd_length);
588 printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
589 printk(KERN_INFO " ri_data = %u\n", rgd->rd_data);
590 printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes);
591}
592
b3b94faa
DT
593/**
594 * gfs2_compute_bitstructs - Compute the bitmap sizes
595 * @rgd: The resource group descriptor
596 *
597 * Calculates bitmap descriptors, one for each block that contains bitmap data
598 *
599 * Returns: errno
600 */
601
602static int compute_bitstructs(struct gfs2_rgrpd *rgd)
603{
604 struct gfs2_sbd *sdp = rgd->rd_sbd;
605 struct gfs2_bitmap *bi;
bb8d8a6f 606 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
cd915493 607 u32 bytes_left, bytes;
b3b94faa
DT
608 int x;
609
feaa7bba
SW
610 if (!length)
611 return -EINVAL;
612
dd894be8 613 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
b3b94faa
DT
614 if (!rgd->rd_bits)
615 return -ENOMEM;
616
bb8d8a6f 617 bytes_left = rgd->rd_bitbytes;
b3b94faa
DT
618
619 for (x = 0; x < length; x++) {
620 bi = rgd->rd_bits + x;
621
60a0b8f9 622 bi->bi_flags = 0;
b3b94faa
DT
623 /* small rgrp; bitmap stored completely in header block */
624 if (length == 1) {
625 bytes = bytes_left;
626 bi->bi_offset = sizeof(struct gfs2_rgrp);
627 bi->bi_start = 0;
628 bi->bi_len = bytes;
629 /* header block */
630 } else if (x == 0) {
631 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
632 bi->bi_offset = sizeof(struct gfs2_rgrp);
633 bi->bi_start = 0;
634 bi->bi_len = bytes;
635 /* last block */
636 } else if (x + 1 == length) {
637 bytes = bytes_left;
638 bi->bi_offset = sizeof(struct gfs2_meta_header);
bb8d8a6f 639 bi->bi_start = rgd->rd_bitbytes - bytes_left;
b3b94faa
DT
640 bi->bi_len = bytes;
641 /* other blocks */
642 } else {
568f4c96
SW
643 bytes = sdp->sd_sb.sb_bsize -
644 sizeof(struct gfs2_meta_header);
b3b94faa 645 bi->bi_offset = sizeof(struct gfs2_meta_header);
bb8d8a6f 646 bi->bi_start = rgd->rd_bitbytes - bytes_left;
b3b94faa
DT
647 bi->bi_len = bytes;
648 }
649
650 bytes_left -= bytes;
651 }
652
653 if (bytes_left) {
654 gfs2_consist_rgrpd(rgd);
655 return -EIO;
656 }
657 bi = rgd->rd_bits + (length - 1);
bb8d8a6f 658 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
b3b94faa 659 if (gfs2_consist_rgrpd(rgd)) {
bb8d8a6f 660 gfs2_rindex_print(rgd);
b3b94faa
DT
661 fs_err(sdp, "start=%u len=%u offset=%u\n",
662 bi->bi_start, bi->bi_len, bi->bi_offset);
663 }
664 return -EIO;
665 }
666
667 return 0;
668}
669
7ae8fa84
RP
670/**
671 * gfs2_ri_total - Total up the file system space, according to the rindex.
886b1416 672 * @sdp: the filesystem
7ae8fa84
RP
673 *
674 */
675u64 gfs2_ri_total(struct gfs2_sbd *sdp)
676{
677 u64 total_data = 0;
678 struct inode *inode = sdp->sd_rindex;
679 struct gfs2_inode *ip = GFS2_I(inode);
7ae8fa84 680 char buf[sizeof(struct gfs2_rindex)];
7ae8fa84
RP
681 int error, rgrps;
682
7ae8fa84
RP
683 for (rgrps = 0;; rgrps++) {
684 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
685
bcd7278d 686 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
7ae8fa84 687 break;
4306629e 688 error = gfs2_internal_read(ip, buf, &pos,
7ae8fa84
RP
689 sizeof(struct gfs2_rindex));
690 if (error != sizeof(struct gfs2_rindex))
691 break;
bb8d8a6f 692 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
7ae8fa84 693 }
7ae8fa84
RP
694 return total_data;
695}
696
6aad1c3d 697static int rgd_insert(struct gfs2_rgrpd *rgd)
7c9ca621
BP
698{
699 struct gfs2_sbd *sdp = rgd->rd_sbd;
700 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
701
702 /* Figure out where to put new node */
703 while (*newn) {
704 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
705 rd_node);
706
707 parent = *newn;
708 if (rgd->rd_addr < cur->rd_addr)
709 newn = &((*newn)->rb_left);
710 else if (rgd->rd_addr > cur->rd_addr)
711 newn = &((*newn)->rb_right);
712 else
6aad1c3d 713 return -EEXIST;
7c9ca621
BP
714 }
715
716 rb_link_node(&rgd->rd_node, parent, newn);
717 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
6aad1c3d
BP
718 sdp->sd_rgrps++;
719 return 0;
7c9ca621
BP
720}
721
b3b94faa 722/**
6c53267f 723 * read_rindex_entry - Pull in a new resource index entry from the disk
4306629e 724 * @ip: Pointer to the rindex inode
b3b94faa 725 *
8339ee54 726 * Returns: 0 on success, > 0 on EOF, error code otherwise
6c53267f
RP
727 */
728
4306629e 729static int read_rindex_entry(struct gfs2_inode *ip)
6c53267f
RP
730{
731 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
732 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
8339ee54 733 struct gfs2_rindex buf;
6c53267f
RP
734 int error;
735 struct gfs2_rgrpd *rgd;
736
8339ee54
SW
737 if (pos >= i_size_read(&ip->i_inode))
738 return 1;
739
4306629e 740 error = gfs2_internal_read(ip, (char *)&buf, &pos,
6c53267f 741 sizeof(struct gfs2_rindex));
8339ee54
SW
742
743 if (error != sizeof(struct gfs2_rindex))
744 return (error == 0) ? 1 : error;
6c53267f 745
6bdd9be6 746 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
6c53267f
RP
747 error = -ENOMEM;
748 if (!rgd)
749 return error;
750
6c53267f 751 rgd->rd_sbd = sdp;
8339ee54
SW
752 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
753 rgd->rd_length = be32_to_cpu(buf.ri_length);
754 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
755 rgd->rd_data = be32_to_cpu(buf.ri_data);
756 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
8e2e0047 757 spin_lock_init(&rgd->rd_rsspin);
7c9ca621 758
6c53267f
RP
759 error = compute_bitstructs(rgd);
760 if (error)
8339ee54 761 goto fail;
6c53267f 762
bb8d8a6f 763 error = gfs2_glock_get(sdp, rgd->rd_addr,
6c53267f
RP
764 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
765 if (error)
8339ee54 766 goto fail;
6c53267f
RP
767
768 rgd->rd_gl->gl_object = rgd;
90306c41 769 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
cf45b752 770 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
7c9ca621
BP
771 if (rgd->rd_data > sdp->sd_max_rg_data)
772 sdp->sd_max_rg_data = rgd->rd_data;
8339ee54 773 spin_lock(&sdp->sd_rindex_spin);
6aad1c3d 774 error = rgd_insert(rgd);
8339ee54 775 spin_unlock(&sdp->sd_rindex_spin);
6aad1c3d
BP
776 if (!error)
777 return 0;
778
779 error = 0; /* someone else read in the rgrp; free it and ignore it */
c1ac539e 780 gfs2_glock_put(rgd->rd_gl);
8339ee54
SW
781
782fail:
783 kfree(rgd->rd_bits);
784 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
6c53267f
RP
785 return error;
786}
787
788/**
789 * gfs2_ri_update - Pull in a new resource index from the disk
790 * @ip: pointer to the rindex inode
791 *
b3b94faa
DT
792 * Returns: 0 on successful update, error code otherwise
793 */
794
8339ee54 795static int gfs2_ri_update(struct gfs2_inode *ip)
b3b94faa 796{
feaa7bba 797 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
798 int error;
799
8339ee54 800 do {
4306629e 801 error = read_rindex_entry(ip);
8339ee54
SW
802 } while (error == 0);
803
804 if (error < 0)
805 return error;
b3b94faa 806
cf45b752 807 sdp->sd_rindex_uptodate = 1;
6c53267f
RP
808 return 0;
809}
b3b94faa 810
b3b94faa 811/**
8339ee54 812 * gfs2_rindex_update - Update the rindex if required
b3b94faa 813 * @sdp: The GFS2 superblock
b3b94faa
DT
814 *
815 * We grab a lock on the rindex inode to make sure that it doesn't
816 * change whilst we are performing an operation. We keep this lock
817 * for quite long periods of time compared to other locks. This
818 * doesn't matter, since it is shared and it is very, very rarely
819 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
820 *
821 * This makes sure that we're using the latest copy of the resource index
822 * special file, which might have been updated if someone expanded the
823 * filesystem (via gfs2_grow utility), which adds new resource groups.
824 *
8339ee54 825 * Returns: 0 on succeess, error code otherwise
b3b94faa
DT
826 */
827
8339ee54 828int gfs2_rindex_update(struct gfs2_sbd *sdp)
b3b94faa 829{
feaa7bba 830 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
b3b94faa 831 struct gfs2_glock *gl = ip->i_gl;
8339ee54
SW
832 struct gfs2_holder ri_gh;
833 int error = 0;
a365fbf3 834 int unlock_required = 0;
b3b94faa
DT
835
836 /* Read new copy from disk if we don't have the latest */
cf45b752 837 if (!sdp->sd_rindex_uptodate) {
a365fbf3
SW
838 if (!gfs2_glock_is_locked_by_me(gl)) {
839 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
840 if (error)
6aad1c3d 841 return error;
a365fbf3
SW
842 unlock_required = 1;
843 }
8339ee54 844 if (!sdp->sd_rindex_uptodate)
b3b94faa 845 error = gfs2_ri_update(ip);
a365fbf3
SW
846 if (unlock_required)
847 gfs2_glock_dq_uninit(&ri_gh);
b3b94faa
DT
848 }
849
850 return error;
851}
852
42d52e38 853static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
bb8d8a6f
SW
854{
855 const struct gfs2_rgrp *str = buf;
42d52e38 856 u32 rg_flags;
bb8d8a6f 857
42d52e38 858 rg_flags = be32_to_cpu(str->rg_flags);
09010978 859 rg_flags &= ~GFS2_RDF_MASK;
1ce97e56
SW
860 rgd->rd_flags &= GFS2_RDF_MASK;
861 rgd->rd_flags |= rg_flags;
cfc8b549 862 rgd->rd_free = be32_to_cpu(str->rg_free);
73f74948 863 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
d8b71f73 864 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
bb8d8a6f
SW
865}
866
42d52e38 867static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
bb8d8a6f
SW
868{
869 struct gfs2_rgrp *str = buf;
870
09010978 871 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
cfc8b549 872 str->rg_free = cpu_to_be32(rgd->rd_free);
73f74948 873 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
bb8d8a6f 874 str->__pad = cpu_to_be32(0);
d8b71f73 875 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
bb8d8a6f
SW
876 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
877}
878
90306c41
BM
879static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
880{
881 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
882 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
883
884 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
885 rgl->rl_dinodes != str->rg_dinodes ||
886 rgl->rl_igeneration != str->rg_igeneration)
887 return 0;
888 return 1;
889}
890
891static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
892{
893 const struct gfs2_rgrp *str = buf;
894
895 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
896 rgl->rl_flags = str->rg_flags;
897 rgl->rl_free = str->rg_free;
898 rgl->rl_dinodes = str->rg_dinodes;
899 rgl->rl_igeneration = str->rg_igeneration;
900 rgl->__pad = 0UL;
901}
902
903static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
904{
905 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
906 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
907 rgl->rl_unlinked = cpu_to_be32(unlinked);
908}
909
910static u32 count_unlinked(struct gfs2_rgrpd *rgd)
911{
912 struct gfs2_bitmap *bi;
913 const u32 length = rgd->rd_length;
914 const u8 *buffer = NULL;
915 u32 i, goal, count = 0;
916
917 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
918 goal = 0;
919 buffer = bi->bi_bh->b_data + bi->bi_offset;
920 WARN_ON(!buffer_uptodate(bi->bi_bh));
921 while (goal < bi->bi_len * GFS2_NBBY) {
922 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
923 GFS2_BLKST_UNLINKED);
924 if (goal == BFITNOENT)
925 break;
926 count++;
927 goal++;
928 }
929 }
930
931 return count;
932}
933
934
b3b94faa 935/**
90306c41
BM
936 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
937 * @rgd: the struct gfs2_rgrpd describing the RG to read in
b3b94faa
DT
938 *
939 * Read in all of a Resource Group's header and bitmap blocks.
940 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
941 *
942 * Returns: errno
943 */
944
90306c41 945int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
b3b94faa
DT
946{
947 struct gfs2_sbd *sdp = rgd->rd_sbd;
948 struct gfs2_glock *gl = rgd->rd_gl;
bb8d8a6f 949 unsigned int length = rgd->rd_length;
b3b94faa
DT
950 struct gfs2_bitmap *bi;
951 unsigned int x, y;
952 int error;
953
90306c41
BM
954 if (rgd->rd_bits[0].bi_bh != NULL)
955 return 0;
956
b3b94faa
DT
957 for (x = 0; x < length; x++) {
958 bi = rgd->rd_bits + x;
bb8d8a6f 959 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
b3b94faa
DT
960 if (error)
961 goto fail;
962 }
963
964 for (y = length; y--;) {
965 bi = rgd->rd_bits + y;
7276b3b0 966 error = gfs2_meta_wait(sdp, bi->bi_bh);
b3b94faa
DT
967 if (error)
968 goto fail;
feaa7bba 969 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
b3b94faa
DT
970 GFS2_METATYPE_RG)) {
971 error = -EIO;
972 goto fail;
973 }
974 }
975
cf45b752 976 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
60a0b8f9
SW
977 for (x = 0; x < length; x++)
978 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
42d52e38 979 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1ce97e56 980 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
7c9ca621 981 rgd->rd_free_clone = rgd->rd_free;
b3b94faa 982 }
90306c41
BM
983 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
984 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
985 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
986 rgd->rd_bits[0].bi_bh->b_data);
987 }
988 else if (sdp->sd_args.ar_rgrplvb) {
989 if (!gfs2_rgrp_lvb_valid(rgd)){
990 gfs2_consist_rgrpd(rgd);
991 error = -EIO;
992 goto fail;
993 }
994 if (rgd->rd_rgl->rl_unlinked == 0)
995 rgd->rd_flags &= ~GFS2_RDF_CHECK;
996 }
b3b94faa
DT
997 return 0;
998
feaa7bba 999fail:
b3b94faa
DT
1000 while (x--) {
1001 bi = rgd->rd_bits + x;
1002 brelse(bi->bi_bh);
1003 bi->bi_bh = NULL;
1004 gfs2_assert_warn(sdp, !bi->bi_clone);
1005 }
b3b94faa
DT
1006
1007 return error;
1008}
1009
90306c41
BM
1010int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1011{
1012 u32 rl_flags;
1013
1014 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1015 return 0;
1016
1017 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1018 return gfs2_rgrp_bh_get(rgd);
1019
1020 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1021 rl_flags &= ~GFS2_RDF_MASK;
1022 rgd->rd_flags &= GFS2_RDF_MASK;
1023 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1024 if (rgd->rd_rgl->rl_unlinked == 0)
1025 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1026 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1027 rgd->rd_free_clone = rgd->rd_free;
1028 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1029 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1030 return 0;
1031}
1032
1033int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1034{
1035 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1036 struct gfs2_sbd *sdp = rgd->rd_sbd;
1037
1038 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1039 return 0;
1040 return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
1041}
1042
b3b94faa 1043/**
7c9ca621 1044 * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
886b1416 1045 * @gh: The glock holder for the resource group
b3b94faa
DT
1046 *
1047 */
1048
7c9ca621 1049void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
b3b94faa 1050{
7c9ca621 1051 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
bb8d8a6f 1052 int x, length = rgd->rd_length;
b3b94faa 1053
b3b94faa
DT
1054 for (x = 0; x < length; x++) {
1055 struct gfs2_bitmap *bi = rgd->rd_bits + x;
90306c41
BM
1056 if (bi->bi_bh) {
1057 brelse(bi->bi_bh);
1058 bi->bi_bh = NULL;
1059 }
b3b94faa
DT
1060 }
1061
b3b94faa
DT
1062}
1063
66fc061b 1064int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
7c9ca621 1065 struct buffer_head *bh,
66fc061b 1066 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
f15ab561
SW
1067{
1068 struct super_block *sb = sdp->sd_vfs;
1069 struct block_device *bdev = sb->s_bdev;
1070 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
e1defc4f 1071 bdev_logical_block_size(sb->s_bdev);
f15ab561 1072 u64 blk;
64d576ba 1073 sector_t start = 0;
f15ab561
SW
1074 sector_t nr_sects = 0;
1075 int rv;
1076 unsigned int x;
66fc061b
SW
1077 u32 trimmed = 0;
1078 u8 diff;
f15ab561
SW
1079
1080 for (x = 0; x < bi->bi_len; x++) {
66fc061b
SW
1081 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1082 clone += bi->bi_offset;
1083 clone += x;
1084 if (bh) {
1085 const u8 *orig = bh->b_data + bi->bi_offset + x;
1086 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1087 } else {
1088 diff = ~(*clone | (*clone >> 1));
1089 }
f15ab561
SW
1090 diff &= 0x55;
1091 if (diff == 0)
1092 continue;
1093 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1094 blk *= sects_per_blk; /* convert to sectors */
1095 while(diff) {
1096 if (diff & 1) {
1097 if (nr_sects == 0)
1098 goto start_new_extent;
1099 if ((start + nr_sects) != blk) {
66fc061b
SW
1100 if (nr_sects >= minlen) {
1101 rv = blkdev_issue_discard(bdev,
1102 start, nr_sects,
1103 GFP_NOFS, 0);
1104 if (rv)
1105 goto fail;
1106 trimmed += nr_sects;
1107 }
f15ab561
SW
1108 nr_sects = 0;
1109start_new_extent:
1110 start = blk;
1111 }
1112 nr_sects += sects_per_blk;
1113 }
1114 diff >>= 2;
1115 blk += sects_per_blk;
1116 }
1117 }
66fc061b 1118 if (nr_sects >= minlen) {
dd3932ed 1119 rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
f15ab561
SW
1120 if (rv)
1121 goto fail;
66fc061b 1122 trimmed += nr_sects;
f15ab561 1123 }
66fc061b
SW
1124 if (ptrimmed)
1125 *ptrimmed = trimmed;
1126 return 0;
1127
f15ab561 1128fail:
66fc061b
SW
1129 if (sdp->sd_args.ar_discard)
1130 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
f15ab561 1131 sdp->sd_args.ar_discard = 0;
66fc061b
SW
1132 return -EIO;
1133}
1134
1135/**
1136 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1137 * @filp: Any file on the filesystem
1138 * @argp: Pointer to the arguments (also used to pass result)
1139 *
1140 * Returns: 0 on success, otherwise error code
1141 */
1142
1143int gfs2_fitrim(struct file *filp, void __user *argp)
1144{
1145 struct inode *inode = filp->f_dentry->d_inode;
1146 struct gfs2_sbd *sdp = GFS2_SB(inode);
1147 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1148 struct buffer_head *bh;
1149 struct gfs2_rgrpd *rgd;
1150 struct gfs2_rgrpd *rgd_end;
1151 struct gfs2_holder gh;
1152 struct fstrim_range r;
1153 int ret = 0;
1154 u64 amt;
1155 u64 trimmed = 0;
1156 unsigned int x;
1157
1158 if (!capable(CAP_SYS_ADMIN))
1159 return -EPERM;
1160
1161 if (!blk_queue_discard(q))
1162 return -EOPNOTSUPP;
1163
66fc061b
SW
1164 if (argp == NULL) {
1165 r.start = 0;
1166 r.len = ULLONG_MAX;
1167 r.minlen = 0;
1168 } else if (copy_from_user(&r, argp, sizeof(r)))
1169 return -EFAULT;
1170
5e2f7d61
BP
1171 ret = gfs2_rindex_update(sdp);
1172 if (ret)
1173 return ret;
1174
66fc061b
SW
1175 rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
1176 rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
1177
1178 while (1) {
1179
1180 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1181 if (ret)
1182 goto out;
1183
1184 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1185 /* Trim each bitmap in the rgrp */
1186 for (x = 0; x < rgd->rd_length; x++) {
1187 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1188 ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt);
1189 if (ret) {
1190 gfs2_glock_dq_uninit(&gh);
1191 goto out;
1192 }
1193 trimmed += amt;
1194 }
1195
1196 /* Mark rgrp as having been trimmed */
1197 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1198 if (ret == 0) {
1199 bh = rgd->rd_bits[0].bi_bh;
1200 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1201 gfs2_trans_add_bh(rgd->rd_gl, bh, 1);
1202 gfs2_rgrp_out(rgd, bh->b_data);
90306c41 1203 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
66fc061b
SW
1204 gfs2_trans_end(sdp);
1205 }
1206 }
1207 gfs2_glock_dq_uninit(&gh);
1208
1209 if (rgd == rgd_end)
1210 break;
1211
1212 rgd = gfs2_rgrpd_get_next(rgd);
1213 }
1214
1215out:
1216 r.len = trimmed << 9;
1217 if (argp && copy_to_user(argp, &r, sizeof(r)))
1218 return -EFAULT;
1219
1220 return ret;
f15ab561
SW
1221}
1222
8e2e0047
BP
1223/**
1224 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1225 * @bi: the bitmap with the blocks
1226 * @ip: the inode structure
1227 * @biblk: the 32-bit block number relative to the start of the bitmap
1228 * @amount: the number of blocks to reserve
1229 *
1230 * Returns: NULL - reservation was already taken, so not inserted
1231 * pointer to the inserted reservation
1232 */
1233static struct gfs2_blkreserv *rs_insert(struct gfs2_bitmap *bi,
1234 struct gfs2_inode *ip, u32 biblk,
1235 int amount)
1236{
1237 struct rb_node **newn, *parent = NULL;
1238 int rc;
1239 struct gfs2_blkreserv *rs = ip->i_res;
4a993fb1 1240 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
8e2e0047
BP
1241 u64 fsblock = gfs2_bi2rgd_blk(bi, biblk) + rgd->rd_data0;
1242
1243 spin_lock(&rgd->rd_rsspin);
1244 newn = &rgd->rd_rstree.rb_node;
1245 BUG_ON(!ip->i_res);
1246 BUG_ON(gfs2_rs_active(rs));
1247 /* Figure out where to put new node */
1248 /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
1249 while (*newn) {
1250 struct gfs2_blkreserv *cur =
1251 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1252
1253 parent = *newn;
1254 rc = rs_cmp(fsblock, amount, cur);
1255 if (rc > 0)
1256 newn = &((*newn)->rb_right);
1257 else if (rc < 0)
1258 newn = &((*newn)->rb_left);
1259 else {
1260 spin_unlock(&rgd->rd_rsspin);
1261 return NULL; /* reservation already in use */
1262 }
1263 }
1264
1265 /* Do our reservation work */
1266 rs = ip->i_res;
1267 rs->rs_free = amount;
4a993fb1
SW
1268 rs->rs_rbm.offset = biblk;
1269 rs->rs_rbm.bi = bi;
8e2e0047
BP
1270 rb_link_node(&rs->rs_node, parent, newn);
1271 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1272
8e2e0047
BP
1273 /* Do our rgrp accounting for the reservation */
1274 rgd->rd_reserved += amount; /* blocks reserved */
1275 rgd->rd_rs_cnt++; /* number of in-tree reservations */
1276 spin_unlock(&rgd->rd_rsspin);
1277 trace_gfs2_rs(ip, rs, TRACE_RS_INSERT);
1278 return rs;
1279}
1280
1281/**
1282 * unclaimed_blocks - return number of blocks that aren't spoken for
1283 */
1284static u32 unclaimed_blocks(struct gfs2_rgrpd *rgd)
1285{
1286 return rgd->rd_free_clone - rgd->rd_reserved;
1287}
1288
1289/**
1290 * rg_mblk_search - find a group of multiple free blocks
1291 * @rgd: the resource group descriptor
1292 * @rs: the block reservation
1293 * @ip: pointer to the inode for which we're reserving blocks
1294 *
1295 * This is very similar to rgblk_search, except we're looking for whole
1296 * 64-bit words that represent a chunk of 32 free blocks. I'm only focusing
1297 * on aligned dwords for speed's sake.
1298 *
1299 * Returns: 0 if successful or BFITNOENT if there isn't enough free space
1300 */
1301
71f890f7 1302static int rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, unsigned requested)
8e2e0047
BP
1303{
1304 struct gfs2_bitmap *bi = rgd->rd_bits;
1305 const u32 length = rgd->rd_length;
1306 u32 blk;
1307 unsigned int buf, x, search_bytes;
1308 u8 *buffer = NULL;
1309 u8 *ptr, *end, *nonzero;
1310 u32 goal, rsv_bytes;
1311 struct gfs2_blkreserv *rs;
1312 u32 best_rs_bytes, unclaimed;
1313 int best_rs_blocks;
1314
1315 /* Find bitmap block that contains bits for goal block */
1316 if (rgrp_contains_block(rgd, ip->i_goal))
1317 goal = ip->i_goal - rgd->rd_data0;
1318 else
1319 goal = rgd->rd_last_alloc;
1320 for (buf = 0; buf < length; buf++) {
1321 bi = rgd->rd_bits + buf;
1322 /* Convert scope of "goal" from rgrp-wide to within
1323 found bit block */
1324 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
1325 goal -= bi->bi_start * GFS2_NBBY;
1326 goto do_search;
1327 }
1328 }
1329 buf = 0;
1330 goal = 0;
1331
1332do_search:
1333 best_rs_blocks = max_t(int, atomic_read(&ip->i_res->rs_sizehint),
1334 (RGRP_RSRV_MINBLKS * rgd->rd_length));
1335 best_rs_bytes = (best_rs_blocks *
1336 (1 + (RSRV_CONTENTION_FACTOR * rgd->rd_rs_cnt))) /
1337 GFS2_NBBY; /* 1 + is for our not-yet-created reservation */
1338 best_rs_bytes = ALIGN(best_rs_bytes, sizeof(u64));
1339 unclaimed = unclaimed_blocks(rgd);
1340 if (best_rs_bytes * GFS2_NBBY > unclaimed)
1341 best_rs_bytes = unclaimed >> GFS2_BIT_SIZE;
1342
1343 for (x = 0; x <= length; x++) {
1344 bi = rgd->rd_bits + buf;
1345
1346 if (test_bit(GBF_FULL, &bi->bi_flags))
1347 goto skip;
1348
1349 WARN_ON(!buffer_uptodate(bi->bi_bh));
1350 if (bi->bi_clone)
1351 buffer = bi->bi_clone + bi->bi_offset;
1352 else
1353 buffer = bi->bi_bh->b_data + bi->bi_offset;
1354
1355 /* We have to keep the reservations aligned on u64 boundaries
1356 otherwise we could get situations where a byte can't be
1357 used because it's after a reservation, but a free bit still
1358 is within the reservation's area. */
1359 ptr = buffer + ALIGN(goal >> GFS2_BIT_SIZE, sizeof(u64));
1360 end = (buffer + bi->bi_len);
1361 while (ptr < end) {
1362 rsv_bytes = 0;
1363 if ((ptr + best_rs_bytes) <= end)
1364 search_bytes = best_rs_bytes;
1365 else
1366 search_bytes = end - ptr;
1367 BUG_ON(!search_bytes);
1368 nonzero = memchr_inv(ptr, 0, search_bytes);
1369 /* If the lot is all zeroes, reserve the whole size. If
1370 there's enough zeroes to satisfy the request, use
1371 what we can. If there's not enough, keep looking. */
1372 if (nonzero == NULL)
1373 rsv_bytes = search_bytes;
71f890f7 1374 else if ((nonzero - ptr) * GFS2_NBBY >= requested)
8e2e0047
BP
1375 rsv_bytes = (nonzero - ptr);
1376
1377 if (rsv_bytes) {
1378 blk = ((ptr - buffer) * GFS2_NBBY);
1379 BUG_ON(blk >= bi->bi_len * GFS2_NBBY);
1380 rs = rs_insert(bi, ip, blk,
1381 rsv_bytes * GFS2_NBBY);
1382 if (IS_ERR(rs))
1383 return PTR_ERR(rs);
1384 if (rs)
1385 return 0;
1386 }
1387 ptr += ALIGN(search_bytes, sizeof(u64));
1388 }
1389skip:
1390 /* Try next bitmap block (wrap back to rgrp header
1391 if at end) */
1392 buf++;
1393 buf %= length;
1394 goal = 0;
1395 }
1396
1397 return BFITNOENT;
1398}
1399
b3b94faa
DT
1400/**
1401 * try_rgrp_fit - See if a given reservation will fit in a given RG
1402 * @rgd: the RG data
54335b1f 1403 * @ip: the inode
b3b94faa
DT
1404 *
1405 * If there's room for the requested blocks to be allocated from the RG:
8e2e0047
BP
1406 * This will try to get a multi-block reservation first, and if that doesn't
1407 * fit, it will take what it can.
b3b94faa
DT
1408 *
1409 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
1410 */
1411
71f890f7
SW
1412static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1413 unsigned requested)
b3b94faa 1414{
09010978 1415 if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
a43a4906 1416 return 0;
8e2e0047
BP
1417 /* Look for a multi-block reservation. */
1418 if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS &&
71f890f7 1419 rg_mblk_search(rgd, ip, requested) != BFITNOENT)
8e2e0047 1420 return 1;
71f890f7 1421 if (unclaimed_blocks(rgd) >= requested)
7c9ca621 1422 return 1;
b3b94faa 1423
8e2e0047 1424 return 0;
b3e47ca0
BP
1425}
1426
5b924ae2
SW
1427/**
1428 * gfs2_next_unreserved_block - Return next block that is not reserved
1429 * @rgd: The resource group
1430 * @block: The starting block
1431 * @ip: Ignore any reservations for this inode
1432 *
1433 * If the block does not appear in any reservation, then return the
1434 * block number unchanged. If it does appear in the reservation, then
1435 * keep looking through the tree of reservations in order to find the
1436 * first block number which is not reserved.
1437 */
1438
1439static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1440 const struct gfs2_inode *ip)
1441{
1442 struct gfs2_blkreserv *rs;
1443 struct rb_node *n;
1444 int rc;
1445
1446 spin_lock(&rgd->rd_rsspin);
1447 n = rb_first(&rgd->rd_rstree);
1448 while (n) {
1449 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1450 rc = rs_cmp(block, 1, rs);
1451 if (rc < 0)
1452 n = n->rb_left;
1453 else if (rc > 0)
1454 n = n->rb_right;
1455 else
1456 break;
1457 }
1458
1459 if (n) {
1460 while ((rs_cmp(block, 1, rs) == 0) && (ip->i_res != rs)) {
1461 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1462 n = rb_next(&rs->rs_node);
1463 if (n == NULL)
1464 break;
1465 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1466 }
1467 }
1468
1469 spin_unlock(&rgd->rd_rsspin);
1470 return block;
1471}
1472
1473/**
1474 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
1475 * @rbm: The rbm with rgd already set correctly
1476 * @block: The block number (filesystem relative)
1477 *
1478 * This sets the bi and offset members of an rbm based on a
1479 * resource group and a filesystem relative block number. The
1480 * resource group must be set in the rbm on entry, the bi and
1481 * offset members will be set by this function.
1482 *
1483 * Returns: 0 on success, or an error code
1484 */
1485
1486static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
1487{
1488 u64 rblock = block - rbm->rgd->rd_data0;
1489 u32 goal = (u32)rblock;
1490 int x;
1491
1492 if (WARN_ON_ONCE(rblock > UINT_MAX))
1493 return -EINVAL;
8d8b752a
BP
1494 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
1495 return -E2BIG;
5b924ae2
SW
1496
1497 for (x = 0; x < rbm->rgd->rd_length; x++) {
1498 rbm->bi = rbm->rgd->rd_bits + x;
1499 if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) {
1500 rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY);
8d8b752a 1501 break;
5b924ae2
SW
1502 }
1503 }
1504
8d8b752a 1505 return 0;
5b924ae2
SW
1506}
1507
1508/**
1509 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1510 * @rbm: The current position in the resource group
1511 *
1512 * This checks the current position in the rgrp to see whether there is
1513 * a reservation covering this block. If not then this function is a
1514 * no-op. If there is, then the position is moved to the end of the
1515 * contiguous reservation(s) so that we are pointing at the first
1516 * non-reserved block.
1517 *
1518 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1519 */
1520
1521static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1522 const struct gfs2_inode *ip)
1523{
1524 u64 block = gfs2_rbm_to_block(rbm);
1525 u64 nblock;
1526 int ret;
1527
1528 nblock = gfs2_next_unreserved_block(rbm->rgd, block, ip);
1529 if (nblock == block)
1530 return 0;
1531 ret = gfs2_rbm_from_block(rbm, nblock);
1532 if (ret < 0)
1533 return ret;
1534 return 1;
1535}
1536
1537/**
1538 * gfs2_rbm_find - Look for blocks of a particular state
1539 * @rbm: Value/result starting position and final position
1540 * @state: The state which we want to find
1541 * @ip: If set, check for reservations
1542 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1543 * around until we've reached the starting point.
1544 *
1545 * Side effects:
1546 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1547 * has no free blocks in it.
1548 *
1549 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1550 */
1551
1552static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state,
1553 const struct gfs2_inode *ip, bool nowrap)
1554{
1555 struct buffer_head *bh;
1556 struct gfs2_bitmap *initial_bi;
1557 u32 initial_offset;
1558 u32 offset;
1559 u8 *buffer;
1560 int index;
1561 int n = 0;
1562 int iters = rbm->rgd->rd_length;
1563 int ret;
1564
1565 /* If we are not starting at the beginning of a bitmap, then we
1566 * need to add one to the bitmap count to ensure that we search
1567 * the starting bitmap twice.
1568 */
1569 if (rbm->offset != 0)
1570 iters++;
1571
1572 while(1) {
1573 if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
1574 (state == GFS2_BLKST_FREE))
1575 goto next_bitmap;
1576
1577 bh = rbm->bi->bi_bh;
1578 buffer = bh->b_data + rbm->bi->bi_offset;
1579 WARN_ON(!buffer_uptodate(bh));
1580 if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
1581 buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
5b924ae2
SW
1582 initial_offset = rbm->offset;
1583 offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
1584 if (offset == BFITNOENT)
1585 goto bitmap_full;
1586 rbm->offset = offset;
1587 if (ip == NULL)
1588 return 0;
1589
1590 initial_bi = rbm->bi;
1591 ret = gfs2_reservation_check_and_update(rbm, ip);
1592 if (ret == 0)
1593 return 0;
1594 if (ret > 0) {
1595 n += (rbm->bi - initial_bi);
8d8b752a 1596 goto next_iter;
5b924ae2 1597 }
5d50d532
SW
1598 if (ret == -E2BIG) {
1599 index = 0;
1600 rbm->offset = 0;
1601 n += (rbm->bi - initial_bi);
1602 goto res_covered_end_of_rgrp;
1603 }
5b924ae2
SW
1604 return ret;
1605
1606bitmap_full: /* Mark bitmap as full and fall through */
1607 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1608 set_bit(GBF_FULL, &rbm->bi->bi_flags);
1609
1610next_bitmap: /* Find next bitmap in the rgrp */
1611 rbm->offset = 0;
1612 index = rbm->bi - rbm->rgd->rd_bits;
1613 index++;
1614 if (index == rbm->rgd->rd_length)
1615 index = 0;
5d50d532 1616res_covered_end_of_rgrp:
5b924ae2
SW
1617 rbm->bi = &rbm->rgd->rd_bits[index];
1618 if ((index == 0) && nowrap)
1619 break;
1620 n++;
8d8b752a 1621next_iter:
5b924ae2
SW
1622 if (n >= iters)
1623 break;
1624 }
1625
1626 return -ENOSPC;
1627}
1628
c8cdf479
SW
1629/**
1630 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1631 * @rgd: The rgrp
886b1416
BP
1632 * @last_unlinked: block address of the last dinode we unlinked
1633 * @skip: block address we should explicitly not unlink
c8cdf479 1634 *
1a0eae88
BP
1635 * Returns: 0 if no error
1636 * The inode, if one has been found, in inode.
c8cdf479
SW
1637 */
1638
044b9414 1639static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
c8cdf479 1640{
5b924ae2 1641 u64 block;
5f3eae75 1642 struct gfs2_sbd *sdp = rgd->rd_sbd;
044b9414
SW
1643 struct gfs2_glock *gl;
1644 struct gfs2_inode *ip;
1645 int error;
1646 int found = 0;
5b924ae2 1647 struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
c8cdf479 1648
5b924ae2 1649 while (1) {
5f3eae75 1650 down_write(&sdp->sd_log_flush_lock);
5b924ae2 1651 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, true);
5f3eae75 1652 up_write(&sdp->sd_log_flush_lock);
5b924ae2
SW
1653 if (error == -ENOSPC)
1654 break;
1655 if (WARN_ON_ONCE(error))
24c73873 1656 break;
b3e47ca0 1657
5b924ae2
SW
1658 block = gfs2_rbm_to_block(&rbm);
1659 if (gfs2_rbm_from_block(&rbm, block + 1))
1660 break;
1661 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
c8cdf479 1662 continue;
5b924ae2 1663 if (block == skip)
1e19a195 1664 continue;
5b924ae2 1665 *last_unlinked = block;
044b9414 1666
5b924ae2 1667 error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
044b9414
SW
1668 if (error)
1669 continue;
1670
1671 /* If the inode is already in cache, we can ignore it here
1672 * because the existing inode disposal code will deal with
1673 * it when all refs have gone away. Accessing gl_object like
1674 * this is not safe in general. Here it is ok because we do
1675 * not dereference the pointer, and we only need an approx
1676 * answer to whether it is NULL or not.
1677 */
1678 ip = gl->gl_object;
1679
1680 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1681 gfs2_glock_put(gl);
1682 else
1683 found++;
1684
1685 /* Limit reclaim to sensible number of tasks */
44ad37d6 1686 if (found > NR_CPUS)
044b9414 1687 return;
c8cdf479
SW
1688 }
1689
1690 rgd->rd_flags &= ~GFS2_RDF_CHECK;
044b9414 1691 return;
c8cdf479
SW
1692}
1693
b3b94faa 1694/**
666d1d8a 1695 * gfs2_inplace_reserve - Reserve space in the filesystem
b3b94faa 1696 * @ip: the inode to reserve space for
666d1d8a 1697 * @requested: the number of blocks to be reserved
b3b94faa
DT
1698 *
1699 * Returns: errno
1700 */
1701
666d1d8a 1702int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
b3b94faa 1703{
feaa7bba 1704 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
8e2e0047 1705 struct gfs2_rgrpd *begin = NULL;
564e12b1 1706 struct gfs2_blkreserv *rs = ip->i_res;
666d1d8a
BP
1707 int error = 0, rg_locked, flags = LM_FLAG_TRY;
1708 u64 last_unlinked = NO_BLOCK;
7c9ca621 1709 int loops = 0;
b3b94faa 1710
90306c41
BM
1711 if (sdp->sd_args.ar_rgrplvb)
1712 flags |= GL_SKIP;
666d1d8a
BP
1713 if (gfs2_assert_warn(sdp, requested)) {
1714 error = -EINVAL;
1715 goto out;
1716 }
8e2e0047 1717 if (gfs2_rs_active(rs)) {
4a993fb1 1718 begin = rs->rs_rbm.rgd;
8e2e0047
BP
1719 flags = 0; /* Yoda: Do or do not. There is no try */
1720 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
4a993fb1 1721 rs->rs_rbm.rgd = begin = ip->i_rgd;
8e2e0047 1722 } else {
4a993fb1 1723 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
8e2e0047 1724 }
4a993fb1 1725 if (rs->rs_rbm.rgd == NULL)
7c9ca621
BP
1726 return -EBADSLT;
1727
1728 while (loops < 3) {
292c8c14
AD
1729 rg_locked = 0;
1730
4a993fb1 1731 if (gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
292c8c14
AD
1732 rg_locked = 1;
1733 error = 0;
8e2e0047 1734 } else if (!loops && !gfs2_rs_active(rs) &&
4a993fb1 1735 rs->rs_rbm.rgd->rd_rs_cnt > RGRP_RSRV_MAX_CONTENDERS) {
8e2e0047
BP
1736 /* If the rgrp already is maxed out for contenders,
1737 we can eliminate it as a "first pass" without even
1738 requesting the rgrp glock. */
1739 error = GLR_TRYFAILED;
292c8c14 1740 } else {
4a993fb1 1741 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
8e2e0047
BP
1742 LM_ST_EXCLUSIVE, flags,
1743 &rs->rs_rgd_gh);
90306c41 1744 if (!error && sdp->sd_args.ar_rgrplvb) {
4a993fb1 1745 error = update_rgrp_lvb(rs->rs_rbm.rgd);
90306c41
BM
1746 if (error) {
1747 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
1748 return error;
1749 }
1750 }
292c8c14 1751 }
b3b94faa
DT
1752 switch (error) {
1753 case 0:
8e2e0047 1754 if (gfs2_rs_active(rs)) {
4a993fb1 1755 if (unclaimed_blocks(rs->rs_rbm.rgd) +
71f890f7 1756 rs->rs_free >= requested) {
4a993fb1 1757 ip->i_rgd = rs->rs_rbm.rgd;
8e2e0047
BP
1758 return 0;
1759 }
1760 /* We have a multi-block reservation, but the
1761 rgrp doesn't have enough free blocks to
1762 satisfy the request. Free the reservation
1763 and look for a suitable rgrp. */
4a993fb1 1764 gfs2_rs_deltree(ip, rs);
8e2e0047 1765 }
4a993fb1 1766 if (try_rgrp_fit(rs->rs_rbm.rgd, ip, requested)) {
90306c41 1767 if (sdp->sd_args.ar_rgrplvb)
4a993fb1
SW
1768 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
1769 ip->i_rgd = rs->rs_rbm.rgd;
7c9ca621 1770 return 0;
54335b1f 1771 }
4a993fb1 1772 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) {
90306c41 1773 if (sdp->sd_args.ar_rgrplvb)
4a993fb1
SW
1774 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
1775 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
666d1d8a 1776 ip->i_no_addr);
90306c41 1777 }
292c8c14 1778 if (!rg_locked)
564e12b1 1779 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
9cabcdbd 1780 /* fall through */
b3b94faa 1781 case GLR_TRYFAILED:
4a993fb1
SW
1782 rs->rs_rbm.rgd = gfs2_rgrpd_get_next(rs->rs_rbm.rgd);
1783 rs->rs_rbm.rgd = rs->rs_rbm.rgd ? : begin; /* if NULL, wrap */
1784 if (rs->rs_rbm.rgd != begin) /* If we didn't wrap */
666d1d8a
BP
1785 break;
1786
1787 flags &= ~LM_FLAG_TRY;
1788 loops++;
1789 /* Check that fs hasn't grown if writing to rindex */
1790 if (ip == GFS2_I(sdp->sd_rindex) &&
1791 !sdp->sd_rindex_uptodate) {
1792 error = gfs2_ri_update(ip);
1793 if (error)
1794 goto out;
1795 } else if (loops == 2)
1796 /* Flushing the log may release space */
1797 gfs2_log_flush(sdp, NULL);
b3b94faa 1798 break;
b3b94faa 1799 default:
666d1d8a 1800 goto out;
b3b94faa 1801 }
b3b94faa 1802 }
666d1d8a 1803 error = -ENOSPC;
b3b94faa 1804
564e12b1 1805out:
9ae32429 1806 return error;
b3b94faa
DT
1807}
1808
1809/**
1810 * gfs2_inplace_release - release an inplace reservation
1811 * @ip: the inode the reservation was taken out on
1812 *
1813 * Release a reservation made by gfs2_inplace_reserve().
1814 */
1815
1816void gfs2_inplace_release(struct gfs2_inode *ip)
1817{
564e12b1 1818 struct gfs2_blkreserv *rs = ip->i_res;
b3b94faa 1819
564e12b1
BP
1820 if (rs->rs_rgd_gh.gh_gl)
1821 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
b3b94faa
DT
1822}
1823
1824/**
1825 * gfs2_get_block_type - Check a block in a RG is of given type
1826 * @rgd: the resource group holding the block
1827 * @block: the block number
1828 *
1829 * Returns: The block type (GFS2_BLKST_*)
1830 */
1831
acf7e244 1832static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
b3b94faa 1833{
3983903a
SW
1834 struct gfs2_rbm rbm = { .rgd = rgd, };
1835 int ret;
b3b94faa 1836
3983903a
SW
1837 ret = gfs2_rbm_from_block(&rbm, block);
1838 WARN_ON_ONCE(ret != 0);
b3b94faa 1839
3983903a
SW
1840 return gfs2_testbit(rgd, rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
1841 rbm.bi->bi_len, rbm.offset);
b3b94faa
DT
1842}
1843
60a0b8f9 1844
b3e47ca0
BP
1845/**
1846 * gfs2_alloc_extent - allocate an extent from a given bitmap
4a993fb1 1847 * @rbm: the resource group information
b3e47ca0
BP
1848 * @dinode: TRUE if the first block we allocate is for a dinode
1849 * @n: The extent length
1850 *
1851 * Add the found bitmap buffer to the transaction.
1852 * Set the found bits to @new_state to change block's allocation state.
1853 * Returns: starting block number of the extent (fs scope)
1854 */
4a993fb1
SW
1855static u64 gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1856 unsigned int *n)
b3e47ca0 1857{
4a993fb1
SW
1858 struct gfs2_rgrpd *rgd = rbm->rgd;
1859 struct gfs2_bitmap *bi = rbm->bi;
1860 u32 blk = rbm->offset;
b3e47ca0 1861 const unsigned int elen = *n;
5b924ae2 1862 u32 goal;
b3e47ca0
BP
1863 const u8 *buffer = NULL;
1864
6a8099ed 1865 *n = 0;
b3e47ca0 1866 buffer = bi->bi_bh->b_data + bi->bi_offset;
60a0b8f9 1867 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
06344b91
BP
1868 gfs2_setbit(rgd, bi->bi_clone, bi, blk,
1869 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
6a8099ed 1870 (*n)++;
60a0b8f9
SW
1871 goal = blk;
1872 while (*n < elen) {
1873 goal++;
1874 if (goal >= (bi->bi_len * GFS2_NBBY))
1875 break;
1876 if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
1877 GFS2_BLKST_FREE)
1878 break;
06344b91 1879 gfs2_setbit(rgd, bi->bi_clone, bi, goal, GFS2_BLKST_USED);
60a0b8f9 1880 (*n)++;
c8cdf479 1881 }
b3e47ca0 1882 blk = gfs2_bi2rgd_blk(bi, blk);
6a8099ed 1883 rgd->rd_last_alloc = blk + *n - 1;
b3e47ca0 1884 return rgd->rd_data0 + blk;
b3b94faa
DT
1885}
1886
1887/**
1888 * rgblk_free - Change alloc state of given block(s)
1889 * @sdp: the filesystem
1890 * @bstart: the start of a run of blocks to free
1891 * @blen: the length of the block run (all must lie within ONE RG!)
1892 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1893 *
1894 * Returns: Resource group containing the block(s)
1895 */
1896
cd915493
SW
1897static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1898 u32 blen, unsigned char new_state)
b3b94faa 1899{
3b1d0b9d 1900 struct gfs2_rbm rbm;
b3b94faa 1901
3b1d0b9d
SW
1902 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
1903 if (!rbm.rgd) {
b3b94faa 1904 if (gfs2_consist(sdp))
382066da 1905 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
b3b94faa
DT
1906 return NULL;
1907 }
1908
b3b94faa 1909 while (blen--) {
3b1d0b9d
SW
1910 gfs2_rbm_from_block(&rbm, bstart);
1911 bstart++;
1912 if (!rbm.bi->bi_clone) {
1913 rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
1914 GFP_NOFS | __GFP_NOFAIL);
1915 memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
1916 rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
1917 rbm.bi->bi_len);
b3b94faa 1918 }
3b1d0b9d
SW
1919 gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1);
1920 gfs2_setbit(rbm.rgd, NULL, rbm.bi, rbm.offset, new_state);
b3b94faa
DT
1921 }
1922
3b1d0b9d 1923 return rbm.rgd;
b3b94faa
DT
1924}
1925
1926/**
09010978
SW
1927 * gfs2_rgrp_dump - print out an rgrp
1928 * @seq: The iterator
1929 * @gl: The glock in question
1930 *
1931 */
1932
1933int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
1934{
8e2e0047
BP
1935 struct gfs2_rgrpd *rgd = gl->gl_object;
1936 struct gfs2_blkreserv *trs;
1937 const struct rb_node *n;
1938
09010978
SW
1939 if (rgd == NULL)
1940 return 0;
8e2e0047 1941 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
09010978 1942 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
8e2e0047
BP
1943 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
1944 rgd->rd_reserved);
1945 spin_lock(&rgd->rd_rsspin);
1946 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
1947 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1948 dump_rs(seq, trs);
1949 }
1950 spin_unlock(&rgd->rd_rsspin);
09010978
SW
1951 return 0;
1952}
1953
6050b9c7
SW
1954static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
1955{
1956 struct gfs2_sbd *sdp = rgd->rd_sbd;
1957 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
86d00636 1958 (unsigned long long)rgd->rd_addr);
6050b9c7
SW
1959 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
1960 gfs2_rgrp_dump(NULL, rgd->rd_gl);
1961 rgd->rd_flags |= GFS2_RDF_ERROR;
1962}
1963
8e2e0047 1964/**
5b924ae2
SW
1965 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
1966 * @ip: The inode we have just allocated blocks for
1967 * @rbm: The start of the allocated blocks
1968 * @len: The extent length
8e2e0047 1969 *
5b924ae2
SW
1970 * Adjusts a reservation after an allocation has taken place. If the
1971 * reservation does not match the allocation, or if it is now empty
1972 * then it is removed.
8e2e0047 1973 */
5b924ae2
SW
1974
1975static void gfs2_adjust_reservation(struct gfs2_inode *ip,
1976 const struct gfs2_rbm *rbm, unsigned len)
8e2e0047
BP
1977{
1978 struct gfs2_blkreserv *rs = ip->i_res;
5b924ae2
SW
1979 struct gfs2_rgrpd *rgd = rbm->rgd;
1980 unsigned rlen;
1981 u64 block;
1982 int ret;
8e2e0047 1983
5b924ae2
SW
1984 spin_lock(&rgd->rd_rsspin);
1985 if (gfs2_rs_active(rs)) {
1986 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
1987 block = gfs2_rbm_to_block(rbm);
1988 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
1989 rlen = min(rs->rs_free, len);
1990 rs->rs_free -= rlen;
1991 rgd->rd_reserved -= rlen;
1992 trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
1993 if (rs->rs_free && !ret)
1994 goto out;
1995 }
1996 __rs_deltree(ip, rs);
8e2e0047 1997 }
5b924ae2
SW
1998out:
1999 spin_unlock(&rgd->rd_rsspin);
8e2e0047
BP
2000}
2001
09010978 2002/**
6e87ed0f 2003 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
1639431a 2004 * @ip: the inode to allocate the block for
09010978 2005 * @bn: Used to return the starting block number
8e2e0047 2006 * @nblocks: requested number of blocks/extent length (value/result)
6e87ed0f 2007 * @dinode: 1 if we're allocating a dinode block, else 0
3c5d785a 2008 * @generation: the generation number of the inode
b3b94faa 2009 *
09010978 2010 * Returns: 0 or error
b3b94faa
DT
2011 */
2012
6a8099ed 2013int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
6e87ed0f 2014 bool dinode, u64 *generation)
b3b94faa 2015{
feaa7bba 2016 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
d9ba7615 2017 struct buffer_head *dibh;
4a993fb1 2018 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
6a8099ed 2019 unsigned int ndata;
5b924ae2 2020 u64 goal;
3c5d785a 2021 u64 block; /* block, within the file system scope */
d9ba7615 2022 int error;
b3b94faa 2023
5b924ae2
SW
2024 if (gfs2_rs_active(ip->i_res))
2025 goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
2026 else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
2027 goal = ip->i_goal;
62e252ee 2028 else
5b924ae2 2029 goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
62e252ee 2030
8d8b752a
BP
2031 if ((goal < rbm.rgd->rd_data0) ||
2032 (goal >= rbm.rgd->rd_data0 + rbm.rgd->rd_data))
2033 rbm.rgd = gfs2_blk2rgrpd(sdp, goal, 1);
2034
5b924ae2
SW
2035 gfs2_rbm_from_block(&rbm, goal);
2036 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, ip, false);
62e252ee
SW
2037
2038 /* Since all blocks are reserved in advance, this shouldn't happen */
5b924ae2
SW
2039 if (error) {
2040 fs_warn(sdp, "error=%d, nblocks=%u, full=%d\n", error, *nblocks,
2041 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
62e252ee 2042 goto rgrp_error;
8e2e0047 2043 }
62e252ee 2044
4a993fb1 2045 block = gfs2_alloc_extent(&rbm, dinode, nblocks);
5b924ae2
SW
2046 if (gfs2_rs_active(ip->i_res))
2047 gfs2_adjust_reservation(ip, &rbm, *nblocks);
6a8099ed
SW
2048 ndata = *nblocks;
2049 if (dinode)
2050 ndata--;
b3e47ca0 2051
3c5d785a 2052 if (!dinode) {
6a8099ed 2053 ip->i_goal = block + ndata - 1;
3c5d785a
BP
2054 error = gfs2_meta_inode_buffer(ip, &dibh);
2055 if (error == 0) {
2056 struct gfs2_dinode *di =
2057 (struct gfs2_dinode *)dibh->b_data;
2058 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
2059 di->di_goal_meta = di->di_goal_data =
2060 cpu_to_be64(ip->i_goal);
2061 brelse(dibh);
2062 }
d9ba7615 2063 }
4a993fb1 2064 if (rbm.rgd->rd_free < *nblocks) {
8e2e0047 2065 printk(KERN_WARNING "nblocks=%u\n", *nblocks);
09010978 2066 goto rgrp_error;
8e2e0047 2067 }
09010978 2068
4a993fb1 2069 rbm.rgd->rd_free -= *nblocks;
3c5d785a 2070 if (dinode) {
4a993fb1
SW
2071 rbm.rgd->rd_dinodes++;
2072 *generation = rbm.rgd->rd_igeneration++;
3c5d785a 2073 if (*generation == 0)
4a993fb1 2074 *generation = rbm.rgd->rd_igeneration++;
3c5d785a 2075 }
b3b94faa 2076
4a993fb1
SW
2077 gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1);
2078 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2079 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
b3b94faa 2080
6a8099ed 2081 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
3c5d785a
BP
2082 if (dinode)
2083 gfs2_trans_add_unrevoke(sdp, block, 1);
6a8099ed
SW
2084
2085 /*
2086 * This needs reviewing to see why we cannot do the quota change
2087 * at this point in the dinode case.
2088 */
2089 if (ndata)
2090 gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
3c5d785a 2091 ip->i_inode.i_gid);
b3b94faa 2092
4a993fb1
SW
2093 rbm.rgd->rd_free_clone -= *nblocks;
2094 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
6e87ed0f 2095 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
6050b9c7
SW
2096 *bn = block;
2097 return 0;
2098
2099rgrp_error:
4a993fb1 2100 gfs2_rgrp_error(rbm.rgd);
6050b9c7 2101 return -EIO;
b3b94faa
DT
2102}
2103
2104/**
46fcb2ed 2105 * __gfs2_free_blocks - free a contiguous run of block(s)
b3b94faa
DT
2106 * @ip: the inode these blocks are being freed from
2107 * @bstart: first block of a run of contiguous blocks
2108 * @blen: the length of the block run
46fcb2ed 2109 * @meta: 1 if the blocks represent metadata
b3b94faa
DT
2110 *
2111 */
2112
46fcb2ed 2113void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
b3b94faa 2114{
feaa7bba 2115 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
2116 struct gfs2_rgrpd *rgd;
2117
2118 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2119 if (!rgd)
2120 return;
41db1ab9 2121 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
cfc8b549 2122 rgd->rd_free += blen;
66fc061b 2123 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
d4e9c4c3 2124 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
42d52e38 2125 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
90306c41 2126 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
b3b94faa 2127
6d3117b4 2128 /* Directories keep their data in the metadata address space */
46fcb2ed 2129 if (meta || ip->i_depth)
6d3117b4 2130 gfs2_meta_wipe(ip, bstart, blen);
4c16c36a 2131}
b3b94faa 2132
4c16c36a
BP
2133/**
2134 * gfs2_free_meta - free a contiguous run of data block(s)
2135 * @ip: the inode these blocks are being freed from
2136 * @bstart: first block of a run of contiguous blocks
2137 * @blen: the length of the block run
2138 *
2139 */
2140
2141void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2142{
2143 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2144
46fcb2ed 2145 __gfs2_free_blocks(ip, bstart, blen, 1);
b3b94faa 2146 gfs2_statfs_change(sdp, 0, +blen, 0);
2933f925 2147 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
b3b94faa
DT
2148}
2149
feaa7bba
SW
2150void gfs2_unlink_di(struct inode *inode)
2151{
2152 struct gfs2_inode *ip = GFS2_I(inode);
2153 struct gfs2_sbd *sdp = GFS2_SB(inode);
2154 struct gfs2_rgrpd *rgd;
dbb7cae2 2155 u64 blkno = ip->i_no_addr;
feaa7bba
SW
2156
2157 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2158 if (!rgd)
2159 return;
41db1ab9 2160 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
feaa7bba 2161 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
42d52e38 2162 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
90306c41
BM
2163 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2164 update_rgrp_lvb_unlinked(rgd, 1);
feaa7bba
SW
2165}
2166
cd915493 2167static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
b3b94faa
DT
2168{
2169 struct gfs2_sbd *sdp = rgd->rd_sbd;
2170 struct gfs2_rgrpd *tmp_rgd;
2171
2172 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2173 if (!tmp_rgd)
2174 return;
2175 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2176
73f74948 2177 if (!rgd->rd_dinodes)
b3b94faa 2178 gfs2_consist_rgrpd(rgd);
73f74948 2179 rgd->rd_dinodes--;
cfc8b549 2180 rgd->rd_free++;
b3b94faa 2181
d4e9c4c3 2182 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
42d52e38 2183 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
90306c41
BM
2184 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2185 update_rgrp_lvb_unlinked(rgd, -1);
b3b94faa
DT
2186
2187 gfs2_statfs_change(sdp, 0, +1, -1);
b3b94faa
DT
2188}
2189
b3b94faa
DT
2190
2191void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2192{
dbb7cae2 2193 gfs2_free_uninit_di(rgd, ip->i_no_addr);
41db1ab9 2194 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2933f925 2195 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
dbb7cae2 2196 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
b3b94faa
DT
2197}
2198
acf7e244
SW
2199/**
2200 * gfs2_check_blk_type - Check the type of a block
2201 * @sdp: The superblock
2202 * @no_addr: The block number to check
2203 * @type: The block type we are looking for
2204 *
2205 * Returns: 0 if the block type matches the expected type
2206 * -ESTALE if it doesn't match
2207 * or -ve errno if something went wrong while checking
2208 */
2209
2210int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2211{
2212 struct gfs2_rgrpd *rgd;
8339ee54 2213 struct gfs2_holder rgd_gh;
58884c4d 2214 int error = -EINVAL;
acf7e244 2215
66fc061b 2216 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
acf7e244 2217 if (!rgd)
8339ee54 2218 goto fail;
acf7e244
SW
2219
2220 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2221 if (error)
8339ee54 2222 goto fail;
acf7e244
SW
2223
2224 if (gfs2_get_block_type(rgd, no_addr) != type)
2225 error = -ESTALE;
2226
2227 gfs2_glock_dq_uninit(&rgd_gh);
acf7e244
SW
2228fail:
2229 return error;
2230}
2231
b3b94faa
DT
2232/**
2233 * gfs2_rlist_add - add a RG to a list of RGs
70b0c365 2234 * @ip: the inode
b3b94faa
DT
2235 * @rlist: the list of resource groups
2236 * @block: the block
2237 *
2238 * Figure out what RG a block belongs to and add that RG to the list
2239 *
2240 * FIXME: Don't use NOFAIL
2241 *
2242 */
2243
70b0c365 2244void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
cd915493 2245 u64 block)
b3b94faa 2246{
70b0c365 2247 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
2248 struct gfs2_rgrpd *rgd;
2249 struct gfs2_rgrpd **tmp;
2250 unsigned int new_space;
2251 unsigned int x;
2252
2253 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2254 return;
2255
70b0c365
SW
2256 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2257 rgd = ip->i_rgd;
2258 else
66fc061b 2259 rgd = gfs2_blk2rgrpd(sdp, block, 1);
b3b94faa 2260 if (!rgd) {
70b0c365 2261 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
b3b94faa
DT
2262 return;
2263 }
70b0c365 2264 ip->i_rgd = rgd;
b3b94faa
DT
2265
2266 for (x = 0; x < rlist->rl_rgrps; x++)
2267 if (rlist->rl_rgd[x] == rgd)
2268 return;
2269
2270 if (rlist->rl_rgrps == rlist->rl_space) {
2271 new_space = rlist->rl_space + 10;
2272
2273 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
dd894be8 2274 GFP_NOFS | __GFP_NOFAIL);
b3b94faa
DT
2275
2276 if (rlist->rl_rgd) {
2277 memcpy(tmp, rlist->rl_rgd,
2278 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2279 kfree(rlist->rl_rgd);
2280 }
2281
2282 rlist->rl_space = new_space;
2283 rlist->rl_rgd = tmp;
2284 }
2285
2286 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2287}
2288
2289/**
2290 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2291 * and initialize an array of glock holders for them
2292 * @rlist: the list of resource groups
2293 * @state: the lock state to acquire the RG lock in
b3b94faa
DT
2294 *
2295 * FIXME: Don't use NOFAIL
2296 *
2297 */
2298
fe6c991c 2299void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
b3b94faa
DT
2300{
2301 unsigned int x;
2302
2303 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
dd894be8 2304 GFP_NOFS | __GFP_NOFAIL);
b3b94faa
DT
2305 for (x = 0; x < rlist->rl_rgrps; x++)
2306 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
fe6c991c 2307 state, 0,
b3b94faa
DT
2308 &rlist->rl_ghs[x]);
2309}
2310
2311/**
2312 * gfs2_rlist_free - free a resource group list
2313 * @list: the list of resource groups
2314 *
2315 */
2316
2317void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2318{
2319 unsigned int x;
2320
2321 kfree(rlist->rl_rgd);
2322
2323 if (rlist->rl_ghs) {
2324 for (x = 0; x < rlist->rl_rgrps; x++)
2325 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2326 kfree(rlist->rl_ghs);
8e2e0047 2327 rlist->rl_ghs = NULL;
b3b94faa
DT
2328 }
2329}
2330