]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
a844f451 | 19 | #include "xfs_fs.h" |
1da177e4 | 20 | #include "xfs_types.h" |
a844f451 | 21 | #include "xfs_bit.h" |
1da177e4 | 22 | #include "xfs_log.h" |
a844f451 | 23 | #include "xfs_inum.h" |
1da177e4 LT |
24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | |
a844f451 | 26 | #include "xfs_ag.h" |
1da177e4 LT |
27 | #include "xfs_dir2.h" |
28 | #include "xfs_dmapi.h" | |
29 | #include "xfs_mount.h" | |
1da177e4 | 30 | #include "xfs_bmap_btree.h" |
a844f451 | 31 | #include "xfs_alloc_btree.h" |
1da177e4 | 32 | #include "xfs_ialloc_btree.h" |
1da177e4 | 33 | #include "xfs_dir2_sf.h" |
a844f451 | 34 | #include "xfs_attr_sf.h" |
1da177e4 LT |
35 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | |
37 | #include "xfs_ialloc.h" | |
38 | #include "xfs_itable.h" | |
39 | #include "xfs_error.h" | |
a844f451 | 40 | #include "xfs_btree.h" |
1da177e4 | 41 | |
6f1f2168 VA |
42 | int |
43 | xfs_internal_inum( | |
44 | xfs_mount_t *mp, | |
45 | xfs_ino_t ino) | |
46 | { | |
47 | return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || | |
48 | (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) && | |
49 | (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); | |
50 | } | |
51 | ||
1da177e4 LT |
52 | STATIC int |
53 | xfs_bulkstat_one_iget( | |
54 | xfs_mount_t *mp, /* mount point for filesystem */ | |
55 | xfs_ino_t ino, /* inode number to get data for */ | |
56 | xfs_daddr_t bno, /* starting bno of inode cluster */ | |
57 | xfs_bstat_t *buf, /* return buffer */ | |
58 | int *stat) /* BULKSTAT_RV_... */ | |
59 | { | |
347d1c01 | 60 | xfs_icdinode_t *dic; /* dinode core info pointer */ |
1da177e4 | 61 | xfs_inode_t *ip; /* incore inode pointer */ |
67fcaa73 | 62 | bhv_vnode_t *vp; |
1da177e4 LT |
63 | int error; |
64 | ||
745b1f47 NS |
65 | error = xfs_iget(mp, NULL, ino, |
66 | XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno); | |
1da177e4 LT |
67 | if (error) { |
68 | *stat = BULKSTAT_RV_NOTHING; | |
69 | return error; | |
70 | } | |
71 | ||
72 | ASSERT(ip != NULL); | |
73 | ASSERT(ip->i_blkno != (xfs_daddr_t)0); | |
74 | if (ip->i_d.di_mode == 0) { | |
75 | *stat = BULKSTAT_RV_NOTHING; | |
76 | error = XFS_ERROR(ENOENT); | |
77 | goto out_iput; | |
78 | } | |
79 | ||
42fe2b1f | 80 | vp = XFS_ITOV(ip); |
1da177e4 LT |
81 | dic = &ip->i_d; |
82 | ||
83 | /* xfs_iget returns the following without needing | |
84 | * further change. | |
85 | */ | |
86 | buf->bs_nlink = dic->di_nlink; | |
87 | buf->bs_projid = dic->di_projid; | |
88 | buf->bs_ino = ino; | |
89 | buf->bs_mode = dic->di_mode; | |
90 | buf->bs_uid = dic->di_uid; | |
91 | buf->bs_gid = dic->di_gid; | |
92 | buf->bs_size = dic->di_size; | |
ca5ccbf9 | 93 | vn_atime_to_bstime(vp, &buf->bs_atime); |
1da177e4 LT |
94 | buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; |
95 | buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; | |
96 | buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; | |
97 | buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec; | |
98 | buf->bs_xflags = xfs_ip2xflags(ip); | |
99 | buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; | |
100 | buf->bs_extents = dic->di_nextents; | |
101 | buf->bs_gen = dic->di_gen; | |
102 | memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); | |
103 | buf->bs_dmevmask = dic->di_dmevmask; | |
104 | buf->bs_dmstate = dic->di_dmstate; | |
105 | buf->bs_aextents = dic->di_anextents; | |
106 | ||
107 | switch (dic->di_format) { | |
108 | case XFS_DINODE_FMT_DEV: | |
109 | buf->bs_rdev = ip->i_df.if_u2.if_rdev; | |
110 | buf->bs_blksize = BLKDEV_IOSIZE; | |
111 | buf->bs_blocks = 0; | |
112 | break; | |
113 | case XFS_DINODE_FMT_LOCAL: | |
114 | case XFS_DINODE_FMT_UUID: | |
115 | buf->bs_rdev = 0; | |
116 | buf->bs_blksize = mp->m_sb.sb_blocksize; | |
117 | buf->bs_blocks = 0; | |
118 | break; | |
119 | case XFS_DINODE_FMT_EXTENTS: | |
120 | case XFS_DINODE_FMT_BTREE: | |
121 | buf->bs_rdev = 0; | |
122 | buf->bs_blksize = mp->m_sb.sb_blocksize; | |
123 | buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; | |
124 | break; | |
125 | } | |
126 | ||
127 | out_iput: | |
128 | xfs_iput(ip, XFS_ILOCK_SHARED); | |
129 | return error; | |
130 | } | |
131 | ||
132 | STATIC int | |
133 | xfs_bulkstat_one_dinode( | |
134 | xfs_mount_t *mp, /* mount point for filesystem */ | |
135 | xfs_ino_t ino, /* inode number to get data for */ | |
136 | xfs_dinode_t *dip, /* dinode inode pointer */ | |
137 | xfs_bstat_t *buf) /* return buffer */ | |
138 | { | |
139 | xfs_dinode_core_t *dic; /* dinode core info pointer */ | |
140 | ||
141 | dic = &dip->di_core; | |
142 | ||
143 | /* | |
144 | * The inode format changed when we moved the link count and | |
145 | * made it 32 bits long. If this is an old format inode, | |
146 | * convert it in memory to look like a new one. If it gets | |
147 | * flushed to disk we will convert back before flushing or | |
148 | * logging it. We zero out the new projid field and the old link | |
149 | * count field. We'll handle clearing the pad field (the remains | |
150 | * of the old uuid field) when we actually convert the inode to | |
151 | * the new format. We don't change the version number so that we | |
152 | * can distinguish this from a real new format inode. | |
153 | */ | |
347d1c01 CH |
154 | if (dic->di_version == XFS_DINODE_VERSION_1) { |
155 | buf->bs_nlink = be16_to_cpu(dic->di_onlink); | |
1da177e4 LT |
156 | buf->bs_projid = 0; |
157 | } else { | |
347d1c01 CH |
158 | buf->bs_nlink = be32_to_cpu(dic->di_nlink); |
159 | buf->bs_projid = be16_to_cpu(dic->di_projid); | |
1da177e4 LT |
160 | } |
161 | ||
162 | buf->bs_ino = ino; | |
347d1c01 CH |
163 | buf->bs_mode = be16_to_cpu(dic->di_mode); |
164 | buf->bs_uid = be32_to_cpu(dic->di_uid); | |
165 | buf->bs_gid = be32_to_cpu(dic->di_gid); | |
166 | buf->bs_size = be64_to_cpu(dic->di_size); | |
167 | buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec); | |
168 | buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec); | |
169 | buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec); | |
170 | buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec); | |
171 | buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec); | |
172 | buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec); | |
1da177e4 | 173 | buf->bs_xflags = xfs_dic2xflags(dic); |
347d1c01 CH |
174 | buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; |
175 | buf->bs_extents = be32_to_cpu(dic->di_nextents); | |
176 | buf->bs_gen = be32_to_cpu(dic->di_gen); | |
1da177e4 | 177 | memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); |
347d1c01 CH |
178 | buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask); |
179 | buf->bs_dmstate = be16_to_cpu(dic->di_dmstate); | |
180 | buf->bs_aextents = be16_to_cpu(dic->di_anextents); | |
1da177e4 | 181 | |
347d1c01 | 182 | switch (dic->di_format) { |
1da177e4 | 183 | case XFS_DINODE_FMT_DEV: |
347d1c01 | 184 | buf->bs_rdev = be32_to_cpu(dip->di_u.di_dev); |
1da177e4 LT |
185 | buf->bs_blksize = BLKDEV_IOSIZE; |
186 | buf->bs_blocks = 0; | |
187 | break; | |
188 | case XFS_DINODE_FMT_LOCAL: | |
189 | case XFS_DINODE_FMT_UUID: | |
190 | buf->bs_rdev = 0; | |
191 | buf->bs_blksize = mp->m_sb.sb_blocksize; | |
192 | buf->bs_blocks = 0; | |
193 | break; | |
194 | case XFS_DINODE_FMT_EXTENTS: | |
195 | case XFS_DINODE_FMT_BTREE: | |
196 | buf->bs_rdev = 0; | |
197 | buf->bs_blksize = mp->m_sb.sb_blocksize; | |
347d1c01 | 198 | buf->bs_blocks = be64_to_cpu(dic->di_nblocks); |
1da177e4 LT |
199 | break; |
200 | } | |
201 | ||
202 | return 0; | |
203 | } | |
204 | ||
faa63e95 MM |
205 | STATIC int |
206 | xfs_bulkstat_one_fmt( | |
207 | void __user *ubuffer, | |
208 | const xfs_bstat_t *buffer) | |
209 | { | |
210 | if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) | |
211 | return -EFAULT; | |
212 | return sizeof(*buffer); | |
213 | } | |
214 | ||
1da177e4 LT |
215 | /* |
216 | * Return stat information for one inode. | |
217 | * Return 0 if ok, else errno. | |
218 | */ | |
219 | int /* error status */ | |
220 | xfs_bulkstat_one( | |
221 | xfs_mount_t *mp, /* mount point for filesystem */ | |
222 | xfs_ino_t ino, /* inode number to get data for */ | |
223 | void __user *buffer, /* buffer to place output in */ | |
224 | int ubsize, /* size of buffer */ | |
225 | void *private_data, /* my private data */ | |
226 | xfs_daddr_t bno, /* starting bno of inode cluster */ | |
227 | int *ubused, /* bytes used by me */ | |
228 | void *dibuff, /* on-disk inode buffer */ | |
229 | int *stat) /* BULKSTAT_RV_... */ | |
230 | { | |
231 | xfs_bstat_t *buf; /* return buffer */ | |
232 | int error = 0; /* error value */ | |
233 | xfs_dinode_t *dip; /* dinode inode pointer */ | |
faa63e95 | 234 | bulkstat_one_fmt_pf formatter = private_data ? : xfs_bulkstat_one_fmt; |
1da177e4 LT |
235 | |
236 | dip = (xfs_dinode_t *)dibuff; | |
6f1f2168 | 237 | *stat = BULKSTAT_RV_NOTHING; |
1da177e4 | 238 | |
6f1f2168 | 239 | if (!buffer || xfs_internal_inum(mp, ino)) |
1da177e4 | 240 | return XFS_ERROR(EINVAL); |
6f1f2168 | 241 | if (ubsize < sizeof(*buf)) |
1da177e4 | 242 | return XFS_ERROR(ENOMEM); |
1da177e4 LT |
243 | |
244 | buf = kmem_alloc(sizeof(*buf), KM_SLEEP); | |
245 | ||
246 | if (dip == NULL) { | |
247 | /* We're not being passed a pointer to a dinode. This happens | |
248 | * if BULKSTAT_FG_IGET is selected. Do the iget. | |
249 | */ | |
250 | error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat); | |
251 | if (error) | |
252 | goto out_free; | |
253 | } else { | |
254 | xfs_bulkstat_one_dinode(mp, ino, dip, buf); | |
255 | } | |
256 | ||
faa63e95 MM |
257 | error = formatter(buffer, buf); |
258 | if (error < 0) { | |
6f1f2168 | 259 | error = EFAULT; |
1da177e4 LT |
260 | goto out_free; |
261 | } | |
262 | ||
263 | *stat = BULKSTAT_RV_DIDONE; | |
264 | if (ubused) | |
faa63e95 | 265 | *ubused = error; |
1da177e4 LT |
266 | |
267 | out_free: | |
268 | kmem_free(buf, sizeof(*buf)); | |
269 | return error; | |
270 | } | |
271 | ||
8b56f083 NS |
272 | /* |
273 | * Test to see whether we can use the ondisk inode directly, based | |
274 | * on the given bulkstat flags, filling in dipp accordingly. | |
275 | * Returns zero if the inode is dodgey. | |
276 | */ | |
277 | STATIC int | |
278 | xfs_bulkstat_use_dinode( | |
279 | xfs_mount_t *mp, | |
280 | int flags, | |
281 | xfs_buf_t *bp, | |
282 | int clustidx, | |
283 | xfs_dinode_t **dipp) | |
284 | { | |
285 | xfs_dinode_t *dip; | |
286 | unsigned int aformat; | |
287 | ||
288 | *dipp = NULL; | |
289 | if (!bp || (flags & BULKSTAT_FG_IGET)) | |
290 | return 1; | |
291 | dip = (xfs_dinode_t *) | |
292 | xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog); | |
859d7182 VA |
293 | /* |
294 | * Check the buffer containing the on-disk inode for di_nlink == 0. | |
295 | * This is to prevent xfs_bulkstat from picking up just reclaimed | |
296 | * inodes that have their in-core state initialized but not flushed | |
297 | * to disk yet. This is a temporary hack that would require a proper | |
298 | * fix in the future. | |
299 | */ | |
347d1c01 | 300 | if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC || |
859d7182 VA |
301 | !XFS_DINODE_GOOD_VERSION(dip->di_core.di_version) || |
302 | !dip->di_core.di_nlink) | |
8b56f083 NS |
303 | return 0; |
304 | if (flags & BULKSTAT_FG_QUICK) { | |
305 | *dipp = dip; | |
306 | return 1; | |
307 | } | |
308 | /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ | |
347d1c01 | 309 | aformat = dip->di_core.di_aformat; |
8b56f083 NS |
310 | if ((XFS_CFORK_Q(&dip->di_core) == 0) || |
311 | (aformat == XFS_DINODE_FMT_LOCAL) || | |
312 | (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) { | |
313 | *dipp = dip; | |
314 | return 1; | |
315 | } | |
316 | return 1; | |
317 | } | |
318 | ||
1da177e4 LT |
319 | /* |
320 | * Return stat information in bulk (by-inode) for the filesystem. | |
321 | */ | |
322 | int /* error status */ | |
323 | xfs_bulkstat( | |
324 | xfs_mount_t *mp, /* mount point for filesystem */ | |
325 | xfs_ino_t *lastinop, /* last inode returned */ | |
326 | int *ubcountp, /* size of buffer/count returned */ | |
327 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ | |
328 | void *private_data,/* private data for formatter */ | |
329 | size_t statstruct_size, /* sizeof struct filling */ | |
330 | char __user *ubuffer, /* buffer with inode stats */ | |
331 | int flags, /* defined in xfs_itable.h */ | |
c41564b5 | 332 | int *done) /* 1 if there are more stats to get */ |
1da177e4 LT |
333 | { |
334 | xfs_agblock_t agbno=0;/* allocation group block number */ | |
335 | xfs_buf_t *agbp; /* agi header buffer */ | |
336 | xfs_agi_t *agi; /* agi header data */ | |
337 | xfs_agino_t agino; /* inode # in allocation group */ | |
338 | xfs_agnumber_t agno; /* allocation group number */ | |
339 | xfs_daddr_t bno; /* inode cluster start daddr */ | |
340 | int chunkidx; /* current index into inode chunk */ | |
341 | int clustidx; /* current index into inode cluster */ | |
342 | xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ | |
343 | int end_of_ag; /* set if we've seen the ag end */ | |
344 | int error; /* error code */ | |
345 | int fmterror;/* bulkstat formatter result */ | |
346 | __int32_t gcnt; /* current btree rec's count */ | |
347 | xfs_inofree_t gfree; /* current btree rec's free mask */ | |
348 | xfs_agino_t gino; /* current btree rec's start inode */ | |
349 | int i; /* loop index */ | |
350 | int icount; /* count of inodes good in irbuf */ | |
215101c3 | 351 | size_t irbsize; /* size of irec buffer in bytes */ |
1da177e4 | 352 | xfs_ino_t ino; /* inode number (filesystem) */ |
26275093 NS |
353 | xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ |
354 | xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ | |
355 | xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ | |
1da177e4 LT |
356 | xfs_ino_t lastino=0; /* last inode number returned */ |
357 | int nbcluster; /* # of blocks in a cluster */ | |
358 | int nicluster; /* # of inodes in a cluster */ | |
359 | int nimask; /* mask for inode clusters */ | |
360 | int nirbuf; /* size of irbuf */ | |
361 | int rval; /* return value error code */ | |
362 | int tmp; /* result value from btree calls */ | |
363 | int ubcount; /* size of user's buffer */ | |
364 | int ubleft; /* bytes left in user's buffer */ | |
365 | char __user *ubufp; /* pointer into user's buffer */ | |
366 | int ubelem; /* spaces used in user's buffer */ | |
367 | int ubused; /* bytes used by formatter */ | |
368 | xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ | |
369 | xfs_dinode_t *dip; /* ptr into bp for specific inode */ | |
370 | xfs_inode_t *ip; /* ptr to in-core inode struct */ | |
371 | ||
372 | /* | |
373 | * Get the last inode value, see if there's nothing to do. | |
374 | */ | |
375 | ino = (xfs_ino_t)*lastinop; | |
376 | dip = NULL; | |
377 | agno = XFS_INO_TO_AGNO(mp, ino); | |
378 | agino = XFS_INO_TO_AGINO(mp, ino); | |
379 | if (agno >= mp->m_sb.sb_agcount || | |
380 | ino != XFS_AGINO_TO_INO(mp, agno, agino)) { | |
381 | *done = 1; | |
382 | *ubcountp = 0; | |
383 | return 0; | |
384 | } | |
385 | ubcount = *ubcountp; /* statstruct's */ | |
386 | ubleft = ubcount * statstruct_size; /* bytes */ | |
387 | *ubcountp = ubelem = 0; | |
388 | *done = 0; | |
389 | fmterror = 0; | |
390 | ubufp = ubuffer; | |
391 | nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? | |
392 | mp->m_sb.sb_inopblock : | |
393 | (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); | |
394 | nimask = ~(nicluster - 1); | |
395 | nbcluster = nicluster >> mp->m_sb.sb_inopblog; | |
77e4635a NS |
396 | irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4, |
397 | KM_SLEEP | KM_MAYFAIL | KM_LARGE); | |
bb3c7d29 NS |
398 | nirbuf = irbsize / sizeof(*irbuf); |
399 | ||
1da177e4 LT |
400 | /* |
401 | * Loop over the allocation groups, starting from the last | |
402 | * inode returned; 0 means start of the allocation group. | |
403 | */ | |
404 | rval = 0; | |
405 | while (ubleft >= statstruct_size && agno < mp->m_sb.sb_agcount) { | |
406 | bp = NULL; | |
407 | down_read(&mp->m_peraglock); | |
408 | error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); | |
409 | up_read(&mp->m_peraglock); | |
410 | if (error) { | |
411 | /* | |
412 | * Skip this allocation group and go to the next one. | |
413 | */ | |
414 | agno++; | |
415 | agino = 0; | |
416 | continue; | |
417 | } | |
418 | agi = XFS_BUF_TO_AGI(agbp); | |
419 | /* | |
420 | * Allocate and initialize a btree cursor for ialloc btree. | |
421 | */ | |
422 | cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, | |
26275093 | 423 | (xfs_inode_t *)0, 0); |
1da177e4 LT |
424 | irbp = irbuf; |
425 | irbufend = irbuf + nirbuf; | |
426 | end_of_ag = 0; | |
427 | /* | |
428 | * If we're returning in the middle of an allocation group, | |
429 | * we need to get the remainder of the chunk we're in. | |
430 | */ | |
431 | if (agino > 0) { | |
432 | /* | |
433 | * Lookup the inode chunk that this inode lives in. | |
434 | */ | |
435 | error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp); | |
436 | if (!error && /* no I/O error */ | |
437 | tmp && /* lookup succeeded */ | |
438 | /* got the record, should always work */ | |
439 | !(error = xfs_inobt_get_rec(cur, &gino, &gcnt, | |
440 | &gfree, &i)) && | |
441 | i == 1 && | |
442 | /* this is the right chunk */ | |
443 | agino < gino + XFS_INODES_PER_CHUNK && | |
444 | /* lastino was not last in chunk */ | |
445 | (chunkidx = agino - gino + 1) < | |
446 | XFS_INODES_PER_CHUNK && | |
447 | /* there are some left allocated */ | |
448 | XFS_INOBT_MASKN(chunkidx, | |
449 | XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { | |
450 | /* | |
451 | * Grab the chunk record. Mark all the | |
452 | * uninteresting inodes (because they're | |
453 | * before our start point) free. | |
454 | */ | |
455 | for (i = 0; i < chunkidx; i++) { | |
456 | if (XFS_INOBT_MASK(i) & ~gfree) | |
457 | gcnt++; | |
458 | } | |
459 | gfree |= XFS_INOBT_MASKN(0, chunkidx); | |
26275093 NS |
460 | irbp->ir_startino = gino; |
461 | irbp->ir_freecount = gcnt; | |
462 | irbp->ir_free = gfree; | |
1da177e4 LT |
463 | irbp++; |
464 | agino = gino + XFS_INODES_PER_CHUNK; | |
465 | icount = XFS_INODES_PER_CHUNK - gcnt; | |
466 | } else { | |
467 | /* | |
468 | * If any of those tests failed, bump the | |
469 | * inode number (just in case). | |
470 | */ | |
471 | agino++; | |
472 | icount = 0; | |
473 | } | |
474 | /* | |
475 | * In any case, increment to the next record. | |
476 | */ | |
477 | if (!error) | |
478 | error = xfs_inobt_increment(cur, 0, &tmp); | |
479 | } else { | |
480 | /* | |
481 | * Start of ag. Lookup the first inode chunk. | |
482 | */ | |
483 | error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp); | |
484 | icount = 0; | |
485 | } | |
486 | /* | |
487 | * Loop through inode btree records in this ag, | |
488 | * until we run out of inodes or space in the buffer. | |
489 | */ | |
490 | while (irbp < irbufend && icount < ubcount) { | |
491 | /* | |
492 | * Loop as long as we're unable to read the | |
493 | * inode btree. | |
494 | */ | |
495 | while (error) { | |
496 | agino += XFS_INODES_PER_CHUNK; | |
497 | if (XFS_AGINO_TO_AGBNO(mp, agino) >= | |
16259e7d | 498 | be32_to_cpu(agi->agi_length)) |
1da177e4 LT |
499 | break; |
500 | error = xfs_inobt_lookup_ge(cur, agino, 0, 0, | |
501 | &tmp); | |
502 | } | |
503 | /* | |
504 | * If ran off the end of the ag either with an error, | |
505 | * or the normal way, set end and stop collecting. | |
506 | */ | |
507 | if (error || | |
508 | (error = xfs_inobt_get_rec(cur, &gino, &gcnt, | |
509 | &gfree, &i)) || | |
510 | i == 0) { | |
511 | end_of_ag = 1; | |
512 | break; | |
513 | } | |
514 | /* | |
515 | * If this chunk has any allocated inodes, save it. | |
26275093 | 516 | * Also start read-ahead now for this chunk. |
1da177e4 LT |
517 | */ |
518 | if (gcnt < XFS_INODES_PER_CHUNK) { | |
26275093 NS |
519 | /* |
520 | * Loop over all clusters in the next chunk. | |
521 | * Do a readahead if there are any allocated | |
522 | * inodes in that cluster. | |
523 | */ | |
524 | for (agbno = XFS_AGINO_TO_AGBNO(mp, gino), | |
525 | chunkidx = 0; | |
526 | chunkidx < XFS_INODES_PER_CHUNK; | |
527 | chunkidx += nicluster, | |
528 | agbno += nbcluster) { | |
529 | if (XFS_INOBT_MASKN(chunkidx, | |
530 | nicluster) & ~gfree) | |
531 | xfs_btree_reada_bufs(mp, agno, | |
532 | agbno, nbcluster); | |
533 | } | |
534 | irbp->ir_startino = gino; | |
535 | irbp->ir_freecount = gcnt; | |
536 | irbp->ir_free = gfree; | |
1da177e4 LT |
537 | irbp++; |
538 | icount += XFS_INODES_PER_CHUNK - gcnt; | |
539 | } | |
540 | /* | |
541 | * Set agino to after this chunk and bump the cursor. | |
542 | */ | |
543 | agino = gino + XFS_INODES_PER_CHUNK; | |
544 | error = xfs_inobt_increment(cur, 0, &tmp); | |
545 | } | |
546 | /* | |
547 | * Drop the btree buffers and the agi buffer. | |
548 | * We can't hold any of the locks these represent | |
549 | * when calling iget. | |
550 | */ | |
551 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | |
552 | xfs_buf_relse(agbp); | |
553 | /* | |
554 | * Now format all the good inodes into the user's buffer. | |
555 | */ | |
556 | irbufend = irbp; | |
557 | for (irbp = irbuf; | |
558 | irbp < irbufend && ubleft >= statstruct_size; irbp++) { | |
1da177e4 LT |
559 | /* |
560 | * Now process this chunk of inodes. | |
561 | */ | |
26275093 | 562 | for (agino = irbp->ir_startino, chunkidx = clustidx = 0; |
1da177e4 | 563 | ubleft > 0 && |
26275093 | 564 | irbp->ir_freecount < XFS_INODES_PER_CHUNK; |
1da177e4 LT |
565 | chunkidx++, clustidx++, agino++) { |
566 | ASSERT(chunkidx < XFS_INODES_PER_CHUNK); | |
567 | /* | |
568 | * Recompute agbno if this is the | |
569 | * first inode of the cluster. | |
570 | * | |
571 | * Careful with clustidx. There can be | |
572 | * multple clusters per chunk, a single | |
573 | * cluster per chunk or a cluster that has | |
574 | * inodes represented from several different | |
575 | * chunks (if blocksize is large). | |
576 | * | |
577 | * Because of this, the starting clustidx is | |
578 | * initialized to zero in this loop but must | |
579 | * later be reset after reading in the cluster | |
580 | * buffer. | |
581 | */ | |
582 | if ((chunkidx & (nicluster - 1)) == 0) { | |
583 | agbno = XFS_AGINO_TO_AGBNO(mp, | |
26275093 | 584 | irbp->ir_startino) + |
1da177e4 LT |
585 | ((chunkidx & nimask) >> |
586 | mp->m_sb.sb_inopblog); | |
587 | ||
8b56f083 NS |
588 | if (flags & (BULKSTAT_FG_QUICK | |
589 | BULKSTAT_FG_INLINE)) { | |
1da177e4 LT |
590 | ino = XFS_AGINO_TO_INO(mp, agno, |
591 | agino); | |
592 | bno = XFS_AGB_TO_DADDR(mp, agno, | |
593 | agbno); | |
594 | ||
595 | /* | |
596 | * Get the inode cluster buffer | |
597 | */ | |
598 | ASSERT(xfs_inode_zone != NULL); | |
599 | ip = kmem_zone_zalloc(xfs_inode_zone, | |
600 | KM_SLEEP); | |
601 | ip->i_ino = ino; | |
602 | ip->i_mount = mp; | |
f273ab84 | 603 | spin_lock_init(&ip->i_flags_lock); |
1da177e4 LT |
604 | if (bp) |
605 | xfs_buf_relse(bp); | |
606 | error = xfs_itobp(mp, NULL, ip, | |
b12dd342 NS |
607 | &dip, &bp, bno, |
608 | XFS_IMAP_BULKSTAT); | |
1da177e4 LT |
609 | if (!error) |
610 | clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; | |
611 | kmem_zone_free(xfs_inode_zone, ip); | |
612 | if (XFS_TEST_ERROR(error != 0, | |
613 | mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, | |
614 | XFS_RANDOM_BULKSTAT_READ_CHUNK)) { | |
615 | bp = NULL; | |
b12dd342 NS |
616 | ubleft = 0; |
617 | rval = error; | |
1da177e4 LT |
618 | break; |
619 | } | |
620 | } | |
621 | } | |
c2cba57e LM |
622 | ino = XFS_AGINO_TO_INO(mp, agno, agino); |
623 | bno = XFS_AGB_TO_DADDR(mp, agno, agbno); | |
1da177e4 LT |
624 | /* |
625 | * Skip if this inode is free. | |
626 | */ | |
c2cba57e LM |
627 | if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { |
628 | lastino = ino; | |
1da177e4 | 629 | continue; |
c2cba57e | 630 | } |
1da177e4 LT |
631 | /* |
632 | * Count used inodes as free so we can tell | |
633 | * when the chunk is used up. | |
634 | */ | |
26275093 | 635 | irbp->ir_freecount++; |
8b56f083 | 636 | if (!xfs_bulkstat_use_dinode(mp, flags, bp, |
c2cba57e LM |
637 | clustidx, &dip)) { |
638 | lastino = ino; | |
8b56f083 | 639 | continue; |
c2cba57e | 640 | } |
8b56f083 NS |
641 | /* |
642 | * If we need to do an iget, cannot hold bp. | |
643 | * Drop it, until starting the next cluster. | |
644 | */ | |
645 | if ((flags & BULKSTAT_FG_INLINE) && !dip) { | |
646 | if (bp) | |
647 | xfs_buf_relse(bp); | |
648 | bp = NULL; | |
1da177e4 LT |
649 | } |
650 | ||
651 | /* | |
652 | * Get the inode and fill in a single buffer. | |
653 | * BULKSTAT_FG_QUICK uses dip to fill it in. | |
654 | * BULKSTAT_FG_IGET uses igets. | |
8b56f083 NS |
655 | * BULKSTAT_FG_INLINE uses dip if we have an |
656 | * inline attr fork, else igets. | |
1da177e4 LT |
657 | * See: xfs_bulkstat_one & xfs_dm_bulkstat_one. |
658 | * This is also used to count inodes/blks, etc | |
659 | * in xfs_qm_quotacheck. | |
660 | */ | |
661 | ubused = statstruct_size; | |
662 | error = formatter(mp, ino, ubufp, | |
663 | ubleft, private_data, | |
664 | bno, &ubused, dip, &fmterror); | |
665 | if (fmterror == BULKSTAT_RV_NOTHING) { | |
e132f54c | 666 | if (error == EFAULT) { |
22de606a VA |
667 | ubleft = 0; |
668 | rval = error; | |
669 | break; | |
670 | } | |
e132f54c VA |
671 | else if (error == ENOMEM) |
672 | ubleft = 0; | |
6e73b418 VA |
673 | else |
674 | lastino = ino; | |
1da177e4 LT |
675 | continue; |
676 | } | |
677 | if (fmterror == BULKSTAT_RV_GIVEUP) { | |
678 | ubleft = 0; | |
679 | ASSERT(error); | |
680 | rval = error; | |
681 | break; | |
682 | } | |
683 | if (ubufp) | |
684 | ubufp += ubused; | |
685 | ubleft -= ubused; | |
686 | ubelem++; | |
687 | lastino = ino; | |
688 | } | |
689 | } | |
690 | ||
691 | if (bp) | |
692 | xfs_buf_relse(bp); | |
693 | ||
694 | /* | |
695 | * Set up for the next loop iteration. | |
696 | */ | |
697 | if (ubleft > 0) { | |
698 | if (end_of_ag) { | |
699 | agno++; | |
700 | agino = 0; | |
c2cba57e | 701 | } |
1da177e4 LT |
702 | } else |
703 | break; | |
704 | } | |
705 | /* | |
706 | * Done, we're either out of filesystem or space to put the data. | |
707 | */ | |
bb3c7d29 | 708 | kmem_free(irbuf, irbsize); |
1da177e4 LT |
709 | *ubcountp = ubelem; |
710 | if (agno >= mp->m_sb.sb_agcount) { | |
711 | /* | |
712 | * If we ran out of filesystem, mark lastino as off | |
713 | * the end of the filesystem, so the next call | |
714 | * will return immediately. | |
715 | */ | |
716 | *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); | |
717 | *done = 1; | |
718 | } else | |
719 | *lastinop = (xfs_ino_t)lastino; | |
720 | ||
721 | return rval; | |
722 | } | |
723 | ||
724 | /* | |
725 | * Return stat information in bulk (by-inode) for the filesystem. | |
726 | * Special case for non-sequential one inode bulkstat. | |
727 | */ | |
728 | int /* error status */ | |
729 | xfs_bulkstat_single( | |
730 | xfs_mount_t *mp, /* mount point for filesystem */ | |
731 | xfs_ino_t *lastinop, /* inode to return */ | |
732 | char __user *buffer, /* buffer with inode stats */ | |
c41564b5 | 733 | int *done) /* 1 if there are more stats to get */ |
1da177e4 LT |
734 | { |
735 | int count; /* count value for bulkstat call */ | |
736 | int error; /* return value */ | |
737 | xfs_ino_t ino; /* filesystem inode number */ | |
738 | int res; /* result from bs1 */ | |
739 | ||
740 | /* | |
741 | * note that requesting valid inode numbers which are not allocated | |
742 | * to inodes will most likely cause xfs_itobp to generate warning | |
743 | * messages about bad magic numbers. This is ok. The fact that | |
744 | * the inode isn't actually an inode is handled by the | |
745 | * error check below. Done this way to make the usual case faster | |
746 | * at the expense of the error case. | |
747 | */ | |
748 | ||
749 | ino = (xfs_ino_t)*lastinop; | |
750 | error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), | |
751 | NULL, 0, NULL, NULL, &res); | |
752 | if (error) { | |
753 | /* | |
754 | * Special case way failed, do it the "long" way | |
755 | * to see if that works. | |
756 | */ | |
757 | (*lastinop)--; | |
758 | count = 1; | |
759 | if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, | |
760 | NULL, sizeof(xfs_bstat_t), buffer, | |
761 | BULKSTAT_FG_IGET, done)) | |
762 | return error; | |
763 | if (count == 0 || (xfs_ino_t)*lastinop != ino) | |
764 | return error == EFSCORRUPTED ? | |
765 | XFS_ERROR(EINVAL) : error; | |
766 | else | |
767 | return 0; | |
768 | } | |
769 | *done = 0; | |
770 | return 0; | |
771 | } | |
772 | ||
faa63e95 MM |
773 | int |
774 | xfs_inumbers_fmt( | |
775 | void __user *ubuffer, /* buffer to write to */ | |
776 | const xfs_inogrp_t *buffer, /* buffer to read from */ | |
777 | long count, /* # of elements to read */ | |
778 | long *written) /* # of bytes written */ | |
779 | { | |
780 | if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) | |
781 | return -EFAULT; | |
782 | *written = count * sizeof(*buffer); | |
783 | return 0; | |
784 | } | |
785 | ||
1da177e4 LT |
786 | /* |
787 | * Return inode number table for the filesystem. | |
788 | */ | |
789 | int /* error status */ | |
790 | xfs_inumbers( | |
791 | xfs_mount_t *mp, /* mount point for filesystem */ | |
792 | xfs_ino_t *lastino, /* last inode returned */ | |
793 | int *count, /* size of buffer/count returned */ | |
faa63e95 MM |
794 | void __user *ubuffer,/* buffer with inode descriptions */ |
795 | inumbers_fmt_pf formatter) | |
1da177e4 LT |
796 | { |
797 | xfs_buf_t *agbp; | |
798 | xfs_agino_t agino; | |
799 | xfs_agnumber_t agno; | |
800 | int bcount; | |
801 | xfs_inogrp_t *buffer; | |
802 | int bufidx; | |
803 | xfs_btree_cur_t *cur; | |
804 | int error; | |
805 | __int32_t gcnt; | |
806 | xfs_inofree_t gfree; | |
807 | xfs_agino_t gino; | |
808 | int i; | |
809 | xfs_ino_t ino; | |
810 | int left; | |
811 | int tmp; | |
812 | ||
813 | ino = (xfs_ino_t)*lastino; | |
814 | agno = XFS_INO_TO_AGNO(mp, ino); | |
815 | agino = XFS_INO_TO_AGINO(mp, ino); | |
816 | left = *count; | |
817 | *count = 0; | |
818 | bcount = MIN(left, (int)(NBPP / sizeof(*buffer))); | |
819 | buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); | |
820 | error = bufidx = 0; | |
821 | cur = NULL; | |
822 | agbp = NULL; | |
823 | while (left > 0 && agno < mp->m_sb.sb_agcount) { | |
824 | if (agbp == NULL) { | |
825 | down_read(&mp->m_peraglock); | |
826 | error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); | |
827 | up_read(&mp->m_peraglock); | |
828 | if (error) { | |
829 | /* | |
830 | * If we can't read the AGI of this ag, | |
831 | * then just skip to the next one. | |
832 | */ | |
833 | ASSERT(cur == NULL); | |
834 | agbp = NULL; | |
835 | agno++; | |
836 | agino = 0; | |
837 | continue; | |
838 | } | |
839 | cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, | |
840 | XFS_BTNUM_INO, (xfs_inode_t *)0, 0); | |
841 | error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); | |
842 | if (error) { | |
843 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | |
844 | cur = NULL; | |
845 | xfs_buf_relse(agbp); | |
846 | agbp = NULL; | |
847 | /* | |
59c51591 | 848 | * Move up the last inode in the current |
1da177e4 LT |
849 | * chunk. The lookup_ge will always get |
850 | * us the first inode in the next chunk. | |
851 | */ | |
852 | agino += XFS_INODES_PER_CHUNK - 1; | |
853 | continue; | |
854 | } | |
855 | } | |
856 | if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree, | |
857 | &i)) || | |
858 | i == 0) { | |
859 | xfs_buf_relse(agbp); | |
860 | agbp = NULL; | |
861 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | |
862 | cur = NULL; | |
863 | agno++; | |
864 | agino = 0; | |
865 | continue; | |
866 | } | |
867 | agino = gino + XFS_INODES_PER_CHUNK - 1; | |
868 | buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino); | |
869 | buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt; | |
870 | buffer[bufidx].xi_allocmask = ~gfree; | |
871 | bufidx++; | |
872 | left--; | |
873 | if (bufidx == bcount) { | |
faa63e95 MM |
874 | long written; |
875 | if (formatter(ubuffer, buffer, bufidx, &written)) { | |
1da177e4 LT |
876 | error = XFS_ERROR(EFAULT); |
877 | break; | |
878 | } | |
faa63e95 | 879 | ubuffer += written; |
1da177e4 LT |
880 | *count += bufidx; |
881 | bufidx = 0; | |
882 | } | |
883 | if (left) { | |
884 | error = xfs_inobt_increment(cur, 0, &tmp); | |
885 | if (error) { | |
886 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); | |
887 | cur = NULL; | |
888 | xfs_buf_relse(agbp); | |
889 | agbp = NULL; | |
890 | /* | |
891 | * The agino value has already been bumped. | |
892 | * Just try to skip up to it. | |
893 | */ | |
894 | agino += XFS_INODES_PER_CHUNK; | |
895 | continue; | |
896 | } | |
897 | } | |
898 | } | |
899 | if (!error) { | |
900 | if (bufidx) { | |
faa63e95 MM |
901 | long written; |
902 | if (formatter(ubuffer, buffer, bufidx, &written)) | |
1da177e4 LT |
903 | error = XFS_ERROR(EFAULT); |
904 | else | |
905 | *count += bufidx; | |
906 | } | |
907 | *lastino = XFS_AGINO_TO_INO(mp, agno, agino); | |
908 | } | |
909 | kmem_free(buffer, bcount * sizeof(*buffer)); | |
910 | if (cur) | |
911 | xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : | |
912 | XFS_BTREE_NOERROR)); | |
913 | if (agbp) | |
914 | xfs_buf_relse(agbp); | |
915 | return error; | |
916 | } |