]>
Commit | Line | Data |
---|---|---|
6c98cd4e KS |
1 | /* |
2 | * sufile.c - NILFS segment usage file. | |
3 | * | |
4 | * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
19 | * | |
20 | * Written by Koji Sato <koji@osrg.net>. | |
7a65004b | 21 | * Revised by Ryusuke Konishi <ryusuke@osrg.net>. |
6c98cd4e KS |
22 | */ |
23 | ||
24 | #include <linux/kernel.h> | |
25 | #include <linux/fs.h> | |
26 | #include <linux/string.h> | |
27 | #include <linux/buffer_head.h> | |
28 | #include <linux/errno.h> | |
29 | #include <linux/nilfs2_fs.h> | |
30 | #include "mdt.h" | |
31 | #include "sufile.h" | |
32 | ||
33 | ||
aa474a22 RK |
34 | struct nilfs_sufile_info { |
35 | struct nilfs_mdt_info mi; | |
36 | unsigned long ncleansegs; | |
37 | }; | |
38 | ||
39 | static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) | |
40 | { | |
41 | return (struct nilfs_sufile_info *)NILFS_MDT(sufile); | |
42 | } | |
43 | ||
6c98cd4e KS |
44 | static inline unsigned long |
45 | nilfs_sufile_segment_usages_per_block(const struct inode *sufile) | |
46 | { | |
47 | return NILFS_MDT(sufile)->mi_entries_per_block; | |
48 | } | |
49 | ||
50 | static unsigned long | |
51 | nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum) | |
52 | { | |
53 | __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; | |
54 | do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); | |
55 | return (unsigned long)t; | |
56 | } | |
57 | ||
58 | static unsigned long | |
59 | nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum) | |
60 | { | |
61 | __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; | |
62 | return do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); | |
63 | } | |
64 | ||
65 | static unsigned long | |
66 | nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, | |
67 | __u64 max) | |
68 | { | |
69 | return min_t(unsigned long, | |
70 | nilfs_sufile_segment_usages_per_block(sufile) - | |
71 | nilfs_sufile_get_offset(sufile, curr), | |
72 | max - curr + 1); | |
73 | } | |
74 | ||
6c98cd4e KS |
75 | static struct nilfs_segment_usage * |
76 | nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, | |
77 | struct buffer_head *bh, void *kaddr) | |
78 | { | |
79 | return kaddr + bh_offset(bh) + | |
80 | nilfs_sufile_get_offset(sufile, segnum) * | |
81 | NILFS_MDT(sufile)->mi_entry_size; | |
82 | } | |
83 | ||
84 | static inline int nilfs_sufile_get_header_block(struct inode *sufile, | |
85 | struct buffer_head **bhp) | |
86 | { | |
87 | return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp); | |
88 | } | |
89 | ||
90 | static inline int | |
91 | nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, | |
92 | int create, struct buffer_head **bhp) | |
93 | { | |
94 | return nilfs_mdt_get_block(sufile, | |
95 | nilfs_sufile_get_blkoff(sufile, segnum), | |
96 | create, NULL, bhp); | |
97 | } | |
98 | ||
a703018f RK |
99 | static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, |
100 | u64 ncleanadd, u64 ndirtyadd) | |
101 | { | |
102 | struct nilfs_sufile_header *header; | |
103 | void *kaddr; | |
104 | ||
105 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | |
106 | header = kaddr + bh_offset(header_bh); | |
107 | le64_add_cpu(&header->sh_ncleansegs, ncleanadd); | |
108 | le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); | |
109 | kunmap_atomic(kaddr, KM_USER0); | |
110 | ||
111 | nilfs_mdt_mark_buffer_dirty(header_bh); | |
112 | } | |
113 | ||
ef7d4757 RK |
114 | /** |
115 | * nilfs_sufile_get_ncleansegs - return the number of clean segments | |
116 | * @sufile: inode of segment usage file | |
117 | */ | |
118 | unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) | |
119 | { | |
120 | return NILFS_SUI(sufile)->ncleansegs; | |
121 | } | |
122 | ||
dda54f4b RK |
123 | /** |
124 | * nilfs_sufile_updatev - modify multiple segment usages at a time | |
125 | * @sufile: inode of segment usage file | |
126 | * @segnumv: array of segment numbers | |
127 | * @nsegs: size of @segnumv array | |
128 | * @create: creation flag | |
129 | * @ndone: place to store number of modified segments on @segnumv | |
130 | * @dofunc: primitive operation for the update | |
131 | * | |
132 | * Description: nilfs_sufile_updatev() repeatedly calls @dofunc | |
133 | * against the given array of segments. The @dofunc is called with | |
134 | * buffers of a header block and the sufile block in which the target | |
135 | * segment usage entry is contained. If @ndone is given, the number | |
136 | * of successfully modified segments from the head is stored in the | |
137 | * place @ndone points to. | |
138 | * | |
139 | * Return Value: On success, zero is returned. On error, one of the | |
140 | * following negative error codes is returned. | |
141 | * | |
142 | * %-EIO - I/O error. | |
143 | * | |
144 | * %-ENOMEM - Insufficient amount of memory available. | |
145 | * | |
146 | * %-ENOENT - Given segment usage is in hole block (may be returned if | |
147 | * @create is zero) | |
148 | * | |
149 | * %-EINVAL - Invalid segment usage number | |
150 | */ | |
151 | int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, | |
152 | int create, size_t *ndone, | |
153 | void (*dofunc)(struct inode *, __u64, | |
154 | struct buffer_head *, | |
155 | struct buffer_head *)) | |
156 | { | |
157 | struct buffer_head *header_bh, *bh; | |
158 | unsigned long blkoff, prev_blkoff; | |
159 | __u64 *seg; | |
160 | size_t nerr = 0, n = 0; | |
161 | int ret = 0; | |
162 | ||
163 | if (unlikely(nsegs == 0)) | |
164 | goto out; | |
165 | ||
166 | down_write(&NILFS_MDT(sufile)->mi_sem); | |
167 | for (seg = segnumv; seg < segnumv + nsegs; seg++) { | |
168 | if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { | |
169 | printk(KERN_WARNING | |
170 | "%s: invalid segment number: %llu\n", __func__, | |
171 | (unsigned long long)*seg); | |
172 | nerr++; | |
173 | } | |
174 | } | |
175 | if (nerr > 0) { | |
176 | ret = -EINVAL; | |
177 | goto out_sem; | |
178 | } | |
179 | ||
180 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | |
181 | if (ret < 0) | |
182 | goto out_sem; | |
183 | ||
184 | seg = segnumv; | |
185 | blkoff = nilfs_sufile_get_blkoff(sufile, *seg); | |
186 | ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); | |
187 | if (ret < 0) | |
188 | goto out_header; | |
189 | ||
190 | for (;;) { | |
191 | dofunc(sufile, *seg, header_bh, bh); | |
192 | ||
193 | if (++seg >= segnumv + nsegs) | |
194 | break; | |
195 | prev_blkoff = blkoff; | |
196 | blkoff = nilfs_sufile_get_blkoff(sufile, *seg); | |
197 | if (blkoff == prev_blkoff) | |
198 | continue; | |
199 | ||
200 | /* get different block */ | |
201 | brelse(bh); | |
202 | ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); | |
203 | if (unlikely(ret < 0)) | |
204 | goto out_header; | |
205 | } | |
206 | brelse(bh); | |
207 | ||
208 | out_header: | |
209 | n = seg - segnumv; | |
210 | brelse(header_bh); | |
211 | out_sem: | |
212 | up_write(&NILFS_MDT(sufile)->mi_sem); | |
213 | out: | |
214 | if (ndone) | |
215 | *ndone = n; | |
216 | return ret; | |
217 | } | |
218 | ||
a703018f RK |
219 | int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, |
220 | void (*dofunc)(struct inode *, __u64, | |
221 | struct buffer_head *, | |
222 | struct buffer_head *)) | |
223 | { | |
224 | struct buffer_head *header_bh, *bh; | |
225 | int ret; | |
226 | ||
227 | if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { | |
228 | printk(KERN_WARNING "%s: invalid segment number: %llu\n", | |
229 | __func__, (unsigned long long)segnum); | |
230 | return -EINVAL; | |
231 | } | |
232 | down_write(&NILFS_MDT(sufile)->mi_sem); | |
233 | ||
234 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | |
235 | if (ret < 0) | |
236 | goto out_sem; | |
237 | ||
238 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh); | |
239 | if (!ret) { | |
240 | dofunc(sufile, segnum, header_bh, bh); | |
241 | brelse(bh); | |
242 | } | |
243 | brelse(header_bh); | |
244 | ||
245 | out_sem: | |
246 | up_write(&NILFS_MDT(sufile)->mi_sem); | |
247 | return ret; | |
248 | } | |
249 | ||
6c98cd4e KS |
250 | /** |
251 | * nilfs_sufile_alloc - allocate a segment | |
252 | * @sufile: inode of segment usage file | |
253 | * @segnump: pointer to segment number | |
254 | * | |
255 | * Description: nilfs_sufile_alloc() allocates a clean segment. | |
256 | * | |
257 | * Return Value: On success, 0 is returned and the segment number of the | |
258 | * allocated segment is stored in the place pointed by @segnump. On error, one | |
259 | * of the following negative error codes is returned. | |
260 | * | |
261 | * %-EIO - I/O error. | |
262 | * | |
263 | * %-ENOMEM - Insufficient amount of memory available. | |
264 | * | |
265 | * %-ENOSPC - No clean segment left. | |
266 | */ | |
267 | int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) | |
268 | { | |
269 | struct buffer_head *header_bh, *su_bh; | |
6c98cd4e KS |
270 | struct nilfs_sufile_header *header; |
271 | struct nilfs_segment_usage *su; | |
272 | size_t susz = NILFS_MDT(sufile)->mi_entry_size; | |
273 | __u64 segnum, maxsegnum, last_alloc; | |
274 | void *kaddr; | |
275 | unsigned long nsegments, ncleansegs, nsus; | |
276 | int ret, i, j; | |
277 | ||
278 | down_write(&NILFS_MDT(sufile)->mi_sem); | |
279 | ||
6c98cd4e KS |
280 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); |
281 | if (ret < 0) | |
282 | goto out_sem; | |
283 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | |
7b16c8a2 | 284 | header = kaddr + bh_offset(header_bh); |
6c98cd4e KS |
285 | ncleansegs = le64_to_cpu(header->sh_ncleansegs); |
286 | last_alloc = le64_to_cpu(header->sh_last_alloc); | |
287 | kunmap_atomic(kaddr, KM_USER0); | |
288 | ||
289 | nsegments = nilfs_sufile_get_nsegments(sufile); | |
290 | segnum = last_alloc + 1; | |
291 | maxsegnum = nsegments - 1; | |
292 | for (i = 0; i < nsegments; i += nsus) { | |
293 | if (segnum >= nsegments) { | |
294 | /* wrap around */ | |
295 | segnum = 0; | |
296 | maxsegnum = last_alloc; | |
297 | } | |
298 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, | |
299 | &su_bh); | |
300 | if (ret < 0) | |
301 | goto out_header; | |
302 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | |
303 | su = nilfs_sufile_block_get_segment_usage( | |
304 | sufile, segnum, su_bh, kaddr); | |
305 | ||
306 | nsus = nilfs_sufile_segment_usages_in_block( | |
307 | sufile, segnum, maxsegnum); | |
308 | for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) { | |
309 | if (!nilfs_segment_usage_clean(su)) | |
310 | continue; | |
311 | /* found a clean segment */ | |
6c98cd4e KS |
312 | nilfs_segment_usage_set_dirty(su); |
313 | kunmap_atomic(kaddr, KM_USER0); | |
314 | ||
315 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | |
7b16c8a2 | 316 | header = kaddr + bh_offset(header_bh); |
6c98cd4e KS |
317 | le64_add_cpu(&header->sh_ncleansegs, -1); |
318 | le64_add_cpu(&header->sh_ndirtysegs, 1); | |
319 | header->sh_last_alloc = cpu_to_le64(segnum); | |
320 | kunmap_atomic(kaddr, KM_USER0); | |
321 | ||
aa474a22 | 322 | NILFS_SUI(sufile)->ncleansegs--; |
6c98cd4e KS |
323 | nilfs_mdt_mark_buffer_dirty(header_bh); |
324 | nilfs_mdt_mark_buffer_dirty(su_bh); | |
325 | nilfs_mdt_mark_dirty(sufile); | |
326 | brelse(su_bh); | |
327 | *segnump = segnum; | |
328 | goto out_header; | |
329 | } | |
330 | ||
331 | kunmap_atomic(kaddr, KM_USER0); | |
332 | brelse(su_bh); | |
333 | } | |
334 | ||
335 | /* no segments left */ | |
336 | ret = -ENOSPC; | |
337 | ||
338 | out_header: | |
339 | brelse(header_bh); | |
340 | ||
341 | out_sem: | |
342 | up_write(&NILFS_MDT(sufile)->mi_sem); | |
343 | return ret; | |
344 | } | |
345 | ||
a703018f RK |
346 | void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, |
347 | struct buffer_head *header_bh, | |
348 | struct buffer_head *su_bh) | |
6c98cd4e | 349 | { |
6c98cd4e KS |
350 | struct nilfs_segment_usage *su; |
351 | void *kaddr; | |
6c98cd4e KS |
352 | |
353 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | |
a703018f | 354 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); |
1f5abe7e RK |
355 | if (unlikely(!nilfs_segment_usage_clean(su))) { |
356 | printk(KERN_WARNING "%s: segment %llu must be clean\n", | |
6c98cd4e | 357 | __func__, (unsigned long long)segnum); |
1f5abe7e | 358 | kunmap_atomic(kaddr, KM_USER0); |
a703018f | 359 | return; |
6c98cd4e KS |
360 | } |
361 | nilfs_segment_usage_set_dirty(su); | |
362 | kunmap_atomic(kaddr, KM_USER0); | |
363 | ||
a703018f | 364 | nilfs_sufile_mod_counter(header_bh, -1, 1); |
aa474a22 RK |
365 | NILFS_SUI(sufile)->ncleansegs--; |
366 | ||
6c98cd4e KS |
367 | nilfs_mdt_mark_buffer_dirty(su_bh); |
368 | nilfs_mdt_mark_dirty(sufile); | |
6c98cd4e KS |
369 | } |
370 | ||
c85399c2 RK |
371 | void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, |
372 | struct buffer_head *header_bh, | |
373 | struct buffer_head *su_bh) | |
374 | { | |
375 | struct nilfs_segment_usage *su; | |
376 | void *kaddr; | |
377 | int clean, dirty; | |
378 | ||
379 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | |
380 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | |
381 | if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && | |
382 | su->su_nblocks == cpu_to_le32(0)) { | |
383 | kunmap_atomic(kaddr, KM_USER0); | |
384 | return; | |
385 | } | |
386 | clean = nilfs_segment_usage_clean(su); | |
387 | dirty = nilfs_segment_usage_dirty(su); | |
388 | ||
389 | /* make the segment garbage */ | |
390 | su->su_lastmod = cpu_to_le64(0); | |
391 | su->su_nblocks = cpu_to_le32(0); | |
392 | su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); | |
393 | kunmap_atomic(kaddr, KM_USER0); | |
394 | ||
395 | nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); | |
aa474a22 RK |
396 | NILFS_SUI(sufile)->ncleansegs -= clean; |
397 | ||
c85399c2 RK |
398 | nilfs_mdt_mark_buffer_dirty(su_bh); |
399 | nilfs_mdt_mark_dirty(sufile); | |
400 | } | |
401 | ||
a703018f RK |
402 | void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, |
403 | struct buffer_head *header_bh, | |
404 | struct buffer_head *su_bh) | |
6c98cd4e | 405 | { |
6c98cd4e KS |
406 | struct nilfs_segment_usage *su; |
407 | void *kaddr; | |
a703018f | 408 | int sudirty; |
6c98cd4e | 409 | |
a703018f RK |
410 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); |
411 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | |
412 | if (nilfs_segment_usage_clean(su)) { | |
413 | printk(KERN_WARNING "%s: segment %llu is already clean\n", | |
414 | __func__, (unsigned long long)segnum); | |
6c98cd4e | 415 | kunmap_atomic(kaddr, KM_USER0); |
a703018f | 416 | return; |
6c98cd4e | 417 | } |
a703018f RK |
418 | WARN_ON(nilfs_segment_usage_error(su)); |
419 | WARN_ON(!nilfs_segment_usage_dirty(su)); | |
6c98cd4e | 420 | |
a703018f RK |
421 | sudirty = nilfs_segment_usage_dirty(su); |
422 | nilfs_segment_usage_set_clean(su); | |
423 | kunmap_atomic(kaddr, KM_USER0); | |
424 | nilfs_mdt_mark_buffer_dirty(su_bh); | |
6c98cd4e | 425 | |
a703018f | 426 | nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); |
aa474a22 RK |
427 | NILFS_SUI(sufile)->ncleansegs++; |
428 | ||
a703018f | 429 | nilfs_mdt_mark_dirty(sufile); |
6c98cd4e KS |
430 | } |
431 | ||
61a189e9 RK |
432 | /** |
433 | * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty | |
434 | * @sufile: inode of segment usage file | |
435 | * @segnum: segment number | |
436 | */ | |
437 | int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) | |
438 | { | |
439 | struct buffer_head *bh; | |
440 | int ret; | |
441 | ||
442 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); | |
443 | if (!ret) { | |
444 | nilfs_mdt_mark_buffer_dirty(bh); | |
445 | nilfs_mdt_mark_dirty(sufile); | |
446 | brelse(bh); | |
447 | } | |
448 | return ret; | |
449 | } | |
450 | ||
071ec54d RK |
451 | /** |
452 | * nilfs_sufile_set_segment_usage - set usage of a segment | |
453 | * @sufile: inode of segment usage file | |
454 | * @segnum: segment number | |
455 | * @nblocks: number of live blocks in the segment | |
456 | * @modtime: modification time (option) | |
457 | */ | |
458 | int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, | |
459 | unsigned long nblocks, time_t modtime) | |
460 | { | |
461 | struct buffer_head *bh; | |
462 | struct nilfs_segment_usage *su; | |
463 | void *kaddr; | |
464 | int ret; | |
465 | ||
466 | down_write(&NILFS_MDT(sufile)->mi_sem); | |
467 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); | |
468 | if (ret < 0) | |
469 | goto out_sem; | |
470 | ||
471 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | |
472 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); | |
473 | WARN_ON(nilfs_segment_usage_error(su)); | |
474 | if (modtime) | |
475 | su->su_lastmod = cpu_to_le64(modtime); | |
476 | su->su_nblocks = cpu_to_le32(nblocks); | |
477 | kunmap_atomic(kaddr, KM_USER0); | |
478 | ||
479 | nilfs_mdt_mark_buffer_dirty(bh); | |
480 | nilfs_mdt_mark_dirty(sufile); | |
481 | brelse(bh); | |
482 | ||
483 | out_sem: | |
484 | up_write(&NILFS_MDT(sufile)->mi_sem); | |
485 | return ret; | |
486 | } | |
487 | ||
6c98cd4e KS |
488 | /** |
489 | * nilfs_sufile_get_stat - get segment usage statistics | |
490 | * @sufile: inode of segment usage file | |
491 | * @stat: pointer to a structure of segment usage statistics | |
492 | * | |
493 | * Description: nilfs_sufile_get_stat() returns information about segment | |
494 | * usage. | |
495 | * | |
496 | * Return Value: On success, 0 is returned, and segment usage information is | |
497 | * stored in the place pointed by @stat. On error, one of the following | |
498 | * negative error codes is returned. | |
499 | * | |
500 | * %-EIO - I/O error. | |
501 | * | |
502 | * %-ENOMEM - Insufficient amount of memory available. | |
503 | */ | |
504 | int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) | |
505 | { | |
506 | struct buffer_head *header_bh; | |
507 | struct nilfs_sufile_header *header; | |
2c2e52fc | 508 | struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs; |
6c98cd4e KS |
509 | void *kaddr; |
510 | int ret; | |
511 | ||
512 | down_read(&NILFS_MDT(sufile)->mi_sem); | |
513 | ||
514 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | |
515 | if (ret < 0) | |
516 | goto out_sem; | |
517 | ||
518 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | |
7b16c8a2 | 519 | header = kaddr + bh_offset(header_bh); |
6c98cd4e KS |
520 | sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); |
521 | sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); | |
522 | sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); | |
2c2e52fc RK |
523 | sustat->ss_ctime = nilfs->ns_ctime; |
524 | sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime; | |
525 | spin_lock(&nilfs->ns_last_segment_lock); | |
526 | sustat->ss_prot_seq = nilfs->ns_prot_seq; | |
527 | spin_unlock(&nilfs->ns_last_segment_lock); | |
6c98cd4e KS |
528 | kunmap_atomic(kaddr, KM_USER0); |
529 | brelse(header_bh); | |
530 | ||
531 | out_sem: | |
532 | up_read(&NILFS_MDT(sufile)->mi_sem); | |
533 | return ret; | |
534 | } | |
535 | ||
a703018f RK |
536 | void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, |
537 | struct buffer_head *header_bh, | |
538 | struct buffer_head *su_bh) | |
6c98cd4e | 539 | { |
6c98cd4e | 540 | struct nilfs_segment_usage *su; |
6c98cd4e | 541 | void *kaddr; |
a703018f | 542 | int suclean; |
6c98cd4e KS |
543 | |
544 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | |
545 | su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); | |
546 | if (nilfs_segment_usage_error(su)) { | |
547 | kunmap_atomic(kaddr, KM_USER0); | |
a703018f | 548 | return; |
6c98cd4e | 549 | } |
88072faf | 550 | suclean = nilfs_segment_usage_clean(su); |
6c98cd4e KS |
551 | nilfs_segment_usage_set_error(su); |
552 | kunmap_atomic(kaddr, KM_USER0); | |
6c98cd4e | 553 | |
aa474a22 | 554 | if (suclean) { |
a703018f | 555 | nilfs_sufile_mod_counter(header_bh, -1, 0); |
aa474a22 RK |
556 | NILFS_SUI(sufile)->ncleansegs--; |
557 | } | |
6c98cd4e KS |
558 | nilfs_mdt_mark_buffer_dirty(su_bh); |
559 | nilfs_mdt_mark_dirty(sufile); | |
6c98cd4e KS |
560 | } |
561 | ||
562 | /** | |
563 | * nilfs_sufile_get_suinfo - | |
564 | * @sufile: inode of segment usage file | |
565 | * @segnum: segment number to start looking | |
003ff182 RK |
566 | * @buf: array of suinfo |
567 | * @sisz: byte size of suinfo | |
6c98cd4e KS |
568 | * @nsi: size of suinfo array |
569 | * | |
570 | * Description: | |
571 | * | |
572 | * Return Value: On success, 0 is returned and .... On error, one of the | |
573 | * following negative error codes is returned. | |
574 | * | |
575 | * %-EIO - I/O error. | |
576 | * | |
577 | * %-ENOMEM - Insufficient amount of memory available. | |
578 | */ | |
003ff182 RK |
579 | ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, |
580 | unsigned sisz, size_t nsi) | |
6c98cd4e KS |
581 | { |
582 | struct buffer_head *su_bh; | |
583 | struct nilfs_segment_usage *su; | |
003ff182 | 584 | struct nilfs_suinfo *si = buf; |
6c98cd4e | 585 | size_t susz = NILFS_MDT(sufile)->mi_entry_size; |
cece5520 | 586 | struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs; |
6c98cd4e KS |
587 | void *kaddr; |
588 | unsigned long nsegs, segusages_per_block; | |
589 | ssize_t n; | |
590 | int ret, i, j; | |
591 | ||
592 | down_read(&NILFS_MDT(sufile)->mi_sem); | |
593 | ||
594 | segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); | |
595 | nsegs = min_t(unsigned long, | |
596 | nilfs_sufile_get_nsegments(sufile) - segnum, | |
597 | nsi); | |
598 | for (i = 0; i < nsegs; i += n, segnum += n) { | |
599 | n = min_t(unsigned long, | |
600 | segusages_per_block - | |
601 | nilfs_sufile_get_offset(sufile, segnum), | |
602 | nsegs - i); | |
603 | ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, | |
604 | &su_bh); | |
605 | if (ret < 0) { | |
606 | if (ret != -ENOENT) | |
607 | goto out; | |
608 | /* hole */ | |
003ff182 RK |
609 | memset(si, 0, sisz * n); |
610 | si = (void *)si + sisz * n; | |
6c98cd4e KS |
611 | continue; |
612 | } | |
613 | ||
614 | kaddr = kmap_atomic(su_bh->b_page, KM_USER0); | |
615 | su = nilfs_sufile_block_get_segment_usage( | |
616 | sufile, segnum, su_bh, kaddr); | |
003ff182 RK |
617 | for (j = 0; j < n; |
618 | j++, su = (void *)su + susz, si = (void *)si + sisz) { | |
619 | si->sui_lastmod = le64_to_cpu(su->su_lastmod); | |
620 | si->sui_nblocks = le32_to_cpu(su->su_nblocks); | |
621 | si->sui_flags = le32_to_cpu(su->su_flags) & | |
cece5520 | 622 | ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); |
3efb55b4 | 623 | if (nilfs_segment_is_active(nilfs, segnum + j)) |
003ff182 | 624 | si->sui_flags |= |
cece5520 | 625 | (1UL << NILFS_SEGMENT_USAGE_ACTIVE); |
6c98cd4e KS |
626 | } |
627 | kunmap_atomic(kaddr, KM_USER0); | |
628 | brelse(su_bh); | |
629 | } | |
630 | ret = nsegs; | |
631 | ||
632 | out: | |
633 | up_read(&NILFS_MDT(sufile)->mi_sem); | |
634 | return ret; | |
635 | } | |
79739565 | 636 | |
8707df38 RK |
637 | /** |
638 | * nilfs_sufile_read - read sufile inode | |
639 | * @sufile: sufile inode | |
640 | * @raw_inode: on-disk sufile inode | |
641 | */ | |
642 | int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode) | |
643 | { | |
aa474a22 RK |
644 | struct nilfs_sufile_info *sui = NILFS_SUI(sufile); |
645 | struct buffer_head *header_bh; | |
646 | struct nilfs_sufile_header *header; | |
647 | void *kaddr; | |
648 | int ret; | |
649 | ||
650 | ret = nilfs_read_inode_common(sufile, raw_inode); | |
651 | if (ret < 0) | |
652 | return ret; | |
653 | ||
654 | ret = nilfs_sufile_get_header_block(sufile, &header_bh); | |
655 | if (!ret) { | |
656 | kaddr = kmap_atomic(header_bh->b_page, KM_USER0); | |
657 | header = kaddr + bh_offset(header_bh); | |
658 | sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); | |
659 | kunmap_atomic(kaddr, KM_USER0); | |
660 | brelse(header_bh); | |
661 | } | |
662 | return ret; | |
8707df38 RK |
663 | } |
664 | ||
79739565 RK |
665 | /** |
666 | * nilfs_sufile_new - create sufile | |
667 | * @nilfs: nilfs object | |
668 | * @susize: size of a segment usage entry | |
669 | */ | |
670 | struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize) | |
671 | { | |
672 | struct inode *sufile; | |
673 | ||
aa474a22 RK |
674 | sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO, |
675 | sizeof(struct nilfs_sufile_info)); | |
79739565 RK |
676 | if (sufile) |
677 | nilfs_mdt_set_entry_size(sufile, susize, | |
678 | sizeof(struct nilfs_sufile_header)); | |
679 | return sufile; | |
680 | } |