]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/affs/file.c | |
3 | * | |
4 | * (c) 1996 Hans-Joachim Widmaier - Rewritten | |
5 | * | |
6 | * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. | |
7 | * | |
8 | * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. | |
9 | * | |
10 | * (C) 1991 Linus Torvalds - minix filesystem | |
11 | * | |
12 | * affs regular file handling primitives | |
13 | */ | |
14 | ||
15 | #include <linux/aio.h> | |
16 | #include "affs.h" | |
17 | ||
18 | static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); | |
19 | ||
20 | static int | |
21 | affs_file_open(struct inode *inode, struct file *filp) | |
22 | { | |
23 | pr_debug("open(%lu,%d)\n", | |
24 | inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); | |
25 | atomic_inc(&AFFS_I(inode)->i_opencnt); | |
26 | return 0; | |
27 | } | |
28 | ||
29 | static int | |
30 | affs_file_release(struct inode *inode, struct file *filp) | |
31 | { | |
32 | pr_debug("release(%lu, %d)\n", | |
33 | inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); | |
34 | ||
35 | if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) { | |
36 | mutex_lock(&inode->i_mutex); | |
37 | if (inode->i_size != AFFS_I(inode)->mmu_private) | |
38 | affs_truncate(inode); | |
39 | affs_free_prealloc(inode); | |
40 | mutex_unlock(&inode->i_mutex); | |
41 | } | |
42 | ||
43 | return 0; | |
44 | } | |
45 | ||
46 | static int | |
47 | affs_grow_extcache(struct inode *inode, u32 lc_idx) | |
48 | { | |
49 | struct super_block *sb = inode->i_sb; | |
50 | struct buffer_head *bh; | |
51 | u32 lc_max; | |
52 | int i, j, key; | |
53 | ||
54 | if (!AFFS_I(inode)->i_lc) { | |
55 | char *ptr = (char *)get_zeroed_page(GFP_NOFS); | |
56 | if (!ptr) | |
57 | return -ENOMEM; | |
58 | AFFS_I(inode)->i_lc = (u32 *)ptr; | |
59 | AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2); | |
60 | } | |
61 | ||
62 | lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift; | |
63 | ||
64 | if (AFFS_I(inode)->i_extcnt > lc_max) { | |
65 | u32 lc_shift, lc_mask, tmp, off; | |
66 | ||
67 | /* need to recalculate linear cache, start from old size */ | |
68 | lc_shift = AFFS_I(inode)->i_lc_shift; | |
69 | tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift; | |
70 | for (; tmp; tmp >>= 1) | |
71 | lc_shift++; | |
72 | lc_mask = (1 << lc_shift) - 1; | |
73 | ||
74 | /* fix idx and old size to new shift */ | |
75 | lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift); | |
76 | AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift); | |
77 | ||
78 | /* first shrink old cache to make more space */ | |
79 | off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift); | |
80 | for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off) | |
81 | AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j]; | |
82 | ||
83 | AFFS_I(inode)->i_lc_shift = lc_shift; | |
84 | AFFS_I(inode)->i_lc_mask = lc_mask; | |
85 | } | |
86 | ||
87 | /* fill cache to the needed index */ | |
88 | i = AFFS_I(inode)->i_lc_size; | |
89 | AFFS_I(inode)->i_lc_size = lc_idx + 1; | |
90 | for (; i <= lc_idx; i++) { | |
91 | if (!i) { | |
92 | AFFS_I(inode)->i_lc[0] = inode->i_ino; | |
93 | continue; | |
94 | } | |
95 | key = AFFS_I(inode)->i_lc[i - 1]; | |
96 | j = AFFS_I(inode)->i_lc_mask + 1; | |
97 | // unlock cache | |
98 | for (; j > 0; j--) { | |
99 | bh = affs_bread(sb, key); | |
100 | if (!bh) | |
101 | goto err; | |
102 | key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); | |
103 | affs_brelse(bh); | |
104 | } | |
105 | // lock cache | |
106 | AFFS_I(inode)->i_lc[i] = key; | |
107 | } | |
108 | ||
109 | return 0; | |
110 | ||
111 | err: | |
112 | // lock cache | |
113 | return -EIO; | |
114 | } | |
115 | ||
116 | static struct buffer_head * | |
117 | affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext) | |
118 | { | |
119 | struct super_block *sb = inode->i_sb; | |
120 | struct buffer_head *new_bh; | |
121 | u32 blocknr, tmp; | |
122 | ||
123 | blocknr = affs_alloc_block(inode, bh->b_blocknr); | |
124 | if (!blocknr) | |
125 | return ERR_PTR(-ENOSPC); | |
126 | ||
127 | new_bh = affs_getzeroblk(sb, blocknr); | |
128 | if (!new_bh) { | |
129 | affs_free_block(sb, blocknr); | |
130 | return ERR_PTR(-EIO); | |
131 | } | |
132 | ||
133 | AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST); | |
134 | AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr); | |
135 | AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE); | |
136 | AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino); | |
137 | affs_fix_checksum(sb, new_bh); | |
138 | ||
139 | mark_buffer_dirty_inode(new_bh, inode); | |
140 | ||
141 | tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); | |
142 | if (tmp) | |
143 | affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp); | |
144 | AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr); | |
145 | affs_adjust_checksum(bh, blocknr - tmp); | |
146 | mark_buffer_dirty_inode(bh, inode); | |
147 | ||
148 | AFFS_I(inode)->i_extcnt++; | |
149 | mark_inode_dirty(inode); | |
150 | ||
151 | return new_bh; | |
152 | } | |
153 | ||
154 | static inline struct buffer_head * | |
155 | affs_get_extblock(struct inode *inode, u32 ext) | |
156 | { | |
157 | /* inline the simplest case: same extended block as last time */ | |
158 | struct buffer_head *bh = AFFS_I(inode)->i_ext_bh; | |
159 | if (ext == AFFS_I(inode)->i_ext_last) | |
160 | get_bh(bh); | |
161 | else | |
162 | /* we have to do more (not inlined) */ | |
163 | bh = affs_get_extblock_slow(inode, ext); | |
164 | ||
165 | return bh; | |
166 | } | |
167 | ||
168 | static struct buffer_head * | |
169 | affs_get_extblock_slow(struct inode *inode, u32 ext) | |
170 | { | |
171 | struct super_block *sb = inode->i_sb; | |
172 | struct buffer_head *bh; | |
173 | u32 ext_key; | |
174 | u32 lc_idx, lc_off, ac_idx; | |
175 | u32 tmp, idx; | |
176 | ||
177 | if (ext == AFFS_I(inode)->i_ext_last + 1) { | |
178 | /* read the next extended block from the current one */ | |
179 | bh = AFFS_I(inode)->i_ext_bh; | |
180 | ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); | |
181 | if (ext < AFFS_I(inode)->i_extcnt) | |
182 | goto read_ext; | |
183 | BUG_ON(ext > AFFS_I(inode)->i_extcnt); | |
184 | bh = affs_alloc_extblock(inode, bh, ext); | |
185 | if (IS_ERR(bh)) | |
186 | return bh; | |
187 | goto store_ext; | |
188 | } | |
189 | ||
190 | if (ext == 0) { | |
191 | /* we seek back to the file header block */ | |
192 | ext_key = inode->i_ino; | |
193 | goto read_ext; | |
194 | } | |
195 | ||
196 | if (ext >= AFFS_I(inode)->i_extcnt) { | |
197 | struct buffer_head *prev_bh; | |
198 | ||
199 | /* allocate a new extended block */ | |
200 | BUG_ON(ext > AFFS_I(inode)->i_extcnt); | |
201 | ||
202 | /* get previous extended block */ | |
203 | prev_bh = affs_get_extblock(inode, ext - 1); | |
204 | if (IS_ERR(prev_bh)) | |
205 | return prev_bh; | |
206 | bh = affs_alloc_extblock(inode, prev_bh, ext); | |
207 | affs_brelse(prev_bh); | |
208 | if (IS_ERR(bh)) | |
209 | return bh; | |
210 | goto store_ext; | |
211 | } | |
212 | ||
213 | again: | |
214 | /* check if there is an extended cache and whether it's large enough */ | |
215 | lc_idx = ext >> AFFS_I(inode)->i_lc_shift; | |
216 | lc_off = ext & AFFS_I(inode)->i_lc_mask; | |
217 | ||
218 | if (lc_idx >= AFFS_I(inode)->i_lc_size) { | |
219 | int err; | |
220 | ||
221 | err = affs_grow_extcache(inode, lc_idx); | |
222 | if (err) | |
223 | return ERR_PTR(err); | |
224 | goto again; | |
225 | } | |
226 | ||
227 | /* every n'th key we find in the linear cache */ | |
228 | if (!lc_off) { | |
229 | ext_key = AFFS_I(inode)->i_lc[lc_idx]; | |
230 | goto read_ext; | |
231 | } | |
232 | ||
233 | /* maybe it's still in the associative cache */ | |
234 | ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK; | |
235 | if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) { | |
236 | ext_key = AFFS_I(inode)->i_ac[ac_idx].key; | |
237 | goto read_ext; | |
238 | } | |
239 | ||
240 | /* try to find one of the previous extended blocks */ | |
241 | tmp = ext; | |
242 | idx = ac_idx; | |
243 | while (--tmp, --lc_off > 0) { | |
244 | idx = (idx - 1) & AFFS_AC_MASK; | |
245 | if (AFFS_I(inode)->i_ac[idx].ext == tmp) { | |
246 | ext_key = AFFS_I(inode)->i_ac[idx].key; | |
247 | goto find_ext; | |
248 | } | |
249 | } | |
250 | ||
251 | /* fall back to the linear cache */ | |
252 | ext_key = AFFS_I(inode)->i_lc[lc_idx]; | |
253 | find_ext: | |
254 | /* read all extended blocks until we find the one we need */ | |
255 | //unlock cache | |
256 | do { | |
257 | bh = affs_bread(sb, ext_key); | |
258 | if (!bh) | |
259 | goto err_bread; | |
260 | ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); | |
261 | affs_brelse(bh); | |
262 | tmp++; | |
263 | } while (tmp < ext); | |
264 | //lock cache | |
265 | ||
266 | /* store it in the associative cache */ | |
267 | // recalculate ac_idx? | |
268 | AFFS_I(inode)->i_ac[ac_idx].ext = ext; | |
269 | AFFS_I(inode)->i_ac[ac_idx].key = ext_key; | |
270 | ||
271 | read_ext: | |
272 | /* finally read the right extended block */ | |
273 | //unlock cache | |
274 | bh = affs_bread(sb, ext_key); | |
275 | if (!bh) | |
276 | goto err_bread; | |
277 | //lock cache | |
278 | ||
279 | store_ext: | |
280 | /* release old cached extended block and store the new one */ | |
281 | affs_brelse(AFFS_I(inode)->i_ext_bh); | |
282 | AFFS_I(inode)->i_ext_last = ext; | |
283 | AFFS_I(inode)->i_ext_bh = bh; | |
284 | get_bh(bh); | |
285 | ||
286 | return bh; | |
287 | ||
288 | err_bread: | |
289 | affs_brelse(bh); | |
290 | return ERR_PTR(-EIO); | |
291 | } | |
292 | ||
293 | static int | |
294 | affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) | |
295 | { | |
296 | struct super_block *sb = inode->i_sb; | |
297 | struct buffer_head *ext_bh; | |
298 | u32 ext; | |
299 | ||
300 | pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino, | |
301 | (unsigned long long)block); | |
302 | ||
303 | BUG_ON(block > (sector_t)0x7fffffffUL); | |
304 | ||
305 | if (block >= AFFS_I(inode)->i_blkcnt) { | |
306 | if (block > AFFS_I(inode)->i_blkcnt || !create) | |
307 | goto err_big; | |
308 | } else | |
309 | create = 0; | |
310 | ||
311 | //lock cache | |
312 | affs_lock_ext(inode); | |
313 | ||
314 | ext = (u32)block / AFFS_SB(sb)->s_hashsize; | |
315 | block -= ext * AFFS_SB(sb)->s_hashsize; | |
316 | ext_bh = affs_get_extblock(inode, ext); | |
317 | if (IS_ERR(ext_bh)) | |
318 | goto err_ext; | |
319 | map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block))); | |
320 | ||
321 | if (create) { | |
322 | u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr); | |
323 | if (!blocknr) | |
324 | goto err_alloc; | |
325 | set_buffer_new(bh_result); | |
326 | AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize; | |
327 | AFFS_I(inode)->i_blkcnt++; | |
328 | ||
329 | /* store new block */ | |
330 | if (bh_result->b_blocknr) | |
331 | affs_warning(sb, "get_block", | |
332 | "block already set (%llx)", | |
333 | (unsigned long long)bh_result->b_blocknr); | |
334 | AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr); | |
335 | AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1); | |
336 | affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1); | |
337 | bh_result->b_blocknr = blocknr; | |
338 | ||
339 | if (!block) { | |
340 | /* insert first block into header block */ | |
341 | u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data); | |
342 | if (tmp) | |
343 | affs_warning(sb, "get_block", "first block already set (%d)", tmp); | |
344 | AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr); | |
345 | affs_adjust_checksum(ext_bh, blocknr - tmp); | |
346 | } | |
347 | } | |
348 | ||
349 | affs_brelse(ext_bh); | |
350 | //unlock cache | |
351 | affs_unlock_ext(inode); | |
352 | return 0; | |
353 | ||
354 | err_big: | |
355 | affs_error(inode->i_sb, "get_block", "strange block request %llu", | |
356 | (unsigned long long)block); | |
357 | return -EIO; | |
358 | err_ext: | |
359 | // unlock cache | |
360 | affs_unlock_ext(inode); | |
361 | return PTR_ERR(ext_bh); | |
362 | err_alloc: | |
363 | brelse(ext_bh); | |
364 | clear_buffer_mapped(bh_result); | |
365 | bh_result->b_bdev = NULL; | |
366 | // unlock cache | |
367 | affs_unlock_ext(inode); | |
368 | return -ENOSPC; | |
369 | } | |
370 | ||
371 | static int affs_writepage(struct page *page, struct writeback_control *wbc) | |
372 | { | |
373 | return block_write_full_page(page, affs_get_block, wbc); | |
374 | } | |
375 | ||
376 | static int affs_readpage(struct file *file, struct page *page) | |
377 | { | |
378 | return block_read_full_page(page, affs_get_block); | |
379 | } | |
380 | ||
381 | static void affs_write_failed(struct address_space *mapping, loff_t to) | |
382 | { | |
383 | struct inode *inode = mapping->host; | |
384 | ||
385 | if (to > inode->i_size) { | |
386 | truncate_pagecache(inode, inode->i_size); | |
387 | affs_truncate(inode); | |
388 | } | |
389 | } | |
390 | ||
391 | static ssize_t | |
392 | affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, | |
393 | loff_t offset) | |
394 | { | |
395 | struct file *file = iocb->ki_filp; | |
396 | struct address_space *mapping = file->f_mapping; | |
397 | struct inode *inode = mapping->host; | |
398 | size_t count = iov_iter_count(iter); | |
399 | ssize_t ret; | |
400 | ||
401 | ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block); | |
402 | if (ret < 0 && (rw & WRITE)) | |
403 | affs_write_failed(mapping, offset + count); | |
404 | return ret; | |
405 | } | |
406 | ||
407 | static int affs_write_begin(struct file *file, struct address_space *mapping, | |
408 | loff_t pos, unsigned len, unsigned flags, | |
409 | struct page **pagep, void **fsdata) | |
410 | { | |
411 | int ret; | |
412 | ||
413 | *pagep = NULL; | |
414 | ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, | |
415 | affs_get_block, | |
416 | &AFFS_I(mapping->host)->mmu_private); | |
417 | if (unlikely(ret)) | |
418 | affs_write_failed(mapping, pos + len); | |
419 | ||
420 | return ret; | |
421 | } | |
422 | ||
423 | static sector_t _affs_bmap(struct address_space *mapping, sector_t block) | |
424 | { | |
425 | return generic_block_bmap(mapping,block,affs_get_block); | |
426 | } | |
427 | ||
428 | const struct address_space_operations affs_aops = { | |
429 | .readpage = affs_readpage, | |
430 | .writepage = affs_writepage, | |
431 | .write_begin = affs_write_begin, | |
432 | .write_end = generic_write_end, | |
433 | .direct_IO = affs_direct_IO, | |
434 | .bmap = _affs_bmap | |
435 | }; | |
436 | ||
437 | static inline struct buffer_head * | |
438 | affs_bread_ino(struct inode *inode, int block, int create) | |
439 | { | |
440 | struct buffer_head *bh, tmp_bh; | |
441 | int err; | |
442 | ||
443 | tmp_bh.b_state = 0; | |
444 | err = affs_get_block(inode, block, &tmp_bh, create); | |
445 | if (!err) { | |
446 | bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr); | |
447 | if (bh) { | |
448 | bh->b_state |= tmp_bh.b_state; | |
449 | return bh; | |
450 | } | |
451 | err = -EIO; | |
452 | } | |
453 | return ERR_PTR(err); | |
454 | } | |
455 | ||
456 | static inline struct buffer_head * | |
457 | affs_getzeroblk_ino(struct inode *inode, int block) | |
458 | { | |
459 | struct buffer_head *bh, tmp_bh; | |
460 | int err; | |
461 | ||
462 | tmp_bh.b_state = 0; | |
463 | err = affs_get_block(inode, block, &tmp_bh, 1); | |
464 | if (!err) { | |
465 | bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr); | |
466 | if (bh) { | |
467 | bh->b_state |= tmp_bh.b_state; | |
468 | return bh; | |
469 | } | |
470 | err = -EIO; | |
471 | } | |
472 | return ERR_PTR(err); | |
473 | } | |
474 | ||
475 | static inline struct buffer_head * | |
476 | affs_getemptyblk_ino(struct inode *inode, int block) | |
477 | { | |
478 | struct buffer_head *bh, tmp_bh; | |
479 | int err; | |
480 | ||
481 | tmp_bh.b_state = 0; | |
482 | err = affs_get_block(inode, block, &tmp_bh, 1); | |
483 | if (!err) { | |
484 | bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr); | |
485 | if (bh) { | |
486 | bh->b_state |= tmp_bh.b_state; | |
487 | return bh; | |
488 | } | |
489 | err = -EIO; | |
490 | } | |
491 | return ERR_PTR(err); | |
492 | } | |
493 | ||
494 | static int | |
495 | affs_do_readpage_ofs(struct page *page, unsigned to) | |
496 | { | |
497 | struct inode *inode = page->mapping->host; | |
498 | struct super_block *sb = inode->i_sb; | |
499 | struct buffer_head *bh; | |
500 | char *data; | |
501 | unsigned pos = 0; | |
502 | u32 bidx, boff, bsize; | |
503 | u32 tmp; | |
504 | ||
505 | pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, | |
506 | page->index, to); | |
507 | BUG_ON(to > PAGE_CACHE_SIZE); | |
508 | kmap(page); | |
509 | data = page_address(page); | |
510 | bsize = AFFS_SB(sb)->s_data_blksize; | |
511 | tmp = page->index << PAGE_CACHE_SHIFT; | |
512 | bidx = tmp / bsize; | |
513 | boff = tmp % bsize; | |
514 | ||
515 | while (pos < to) { | |
516 | bh = affs_bread_ino(inode, bidx, 0); | |
517 | if (IS_ERR(bh)) | |
518 | return PTR_ERR(bh); | |
519 | tmp = min(bsize - boff, to - pos); | |
520 | BUG_ON(pos + tmp > to || tmp > bsize); | |
521 | memcpy(data + pos, AFFS_DATA(bh) + boff, tmp); | |
522 | affs_brelse(bh); | |
523 | bidx++; | |
524 | pos += tmp; | |
525 | boff = 0; | |
526 | } | |
527 | flush_dcache_page(page); | |
528 | kunmap(page); | |
529 | return 0; | |
530 | } | |
531 | ||
532 | static int | |
533 | affs_extent_file_ofs(struct inode *inode, u32 newsize) | |
534 | { | |
535 | struct super_block *sb = inode->i_sb; | |
536 | struct buffer_head *bh, *prev_bh; | |
537 | u32 bidx, boff; | |
538 | u32 size, bsize; | |
539 | u32 tmp; | |
540 | ||
541 | pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize); | |
542 | bsize = AFFS_SB(sb)->s_data_blksize; | |
543 | bh = NULL; | |
544 | size = AFFS_I(inode)->mmu_private; | |
545 | bidx = size / bsize; | |
546 | boff = size % bsize; | |
547 | if (boff) { | |
548 | bh = affs_bread_ino(inode, bidx, 0); | |
549 | if (IS_ERR(bh)) | |
550 | return PTR_ERR(bh); | |
551 | tmp = min(bsize - boff, newsize - size); | |
552 | BUG_ON(boff + tmp > bsize || tmp > bsize); | |
553 | memset(AFFS_DATA(bh) + boff, 0, tmp); | |
554 | be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); | |
555 | affs_fix_checksum(sb, bh); | |
556 | mark_buffer_dirty_inode(bh, inode); | |
557 | size += tmp; | |
558 | bidx++; | |
559 | } else if (bidx) { | |
560 | bh = affs_bread_ino(inode, bidx - 1, 0); | |
561 | if (IS_ERR(bh)) | |
562 | return PTR_ERR(bh); | |
563 | } | |
564 | ||
565 | while (size < newsize) { | |
566 | prev_bh = bh; | |
567 | bh = affs_getzeroblk_ino(inode, bidx); | |
568 | if (IS_ERR(bh)) | |
569 | goto out; | |
570 | tmp = min(bsize, newsize - size); | |
571 | BUG_ON(tmp > bsize); | |
572 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | |
573 | AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); | |
574 | AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); | |
575 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); | |
576 | affs_fix_checksum(sb, bh); | |
577 | bh->b_state &= ~(1UL << BH_New); | |
578 | mark_buffer_dirty_inode(bh, inode); | |
579 | if (prev_bh) { | |
580 | u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); | |
581 | ||
582 | if (tmp_next) | |
583 | affs_warning(sb, "extent_file_ofs", | |
584 | "next block already set for %d (%d)", | |
585 | bidx, tmp_next); | |
586 | AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); | |
587 | affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); | |
588 | mark_buffer_dirty_inode(prev_bh, inode); | |
589 | affs_brelse(prev_bh); | |
590 | } | |
591 | size += bsize; | |
592 | bidx++; | |
593 | } | |
594 | affs_brelse(bh); | |
595 | inode->i_size = AFFS_I(inode)->mmu_private = newsize; | |
596 | return 0; | |
597 | ||
598 | out: | |
599 | inode->i_size = AFFS_I(inode)->mmu_private = newsize; | |
600 | return PTR_ERR(bh); | |
601 | } | |
602 | ||
603 | static int | |
604 | affs_readpage_ofs(struct file *file, struct page *page) | |
605 | { | |
606 | struct inode *inode = page->mapping->host; | |
607 | u32 to; | |
608 | int err; | |
609 | ||
610 | pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index); | |
611 | to = PAGE_CACHE_SIZE; | |
612 | if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { | |
613 | to = inode->i_size & ~PAGE_CACHE_MASK; | |
614 | memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); | |
615 | } | |
616 | ||
617 | err = affs_do_readpage_ofs(page, to); | |
618 | if (!err) | |
619 | SetPageUptodate(page); | |
620 | unlock_page(page); | |
621 | return err; | |
622 | } | |
623 | ||
624 | static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, | |
625 | loff_t pos, unsigned len, unsigned flags, | |
626 | struct page **pagep, void **fsdata) | |
627 | { | |
628 | struct inode *inode = mapping->host; | |
629 | struct page *page; | |
630 | pgoff_t index; | |
631 | int err = 0; | |
632 | ||
633 | pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, | |
634 | pos + len); | |
635 | if (pos > AFFS_I(inode)->mmu_private) { | |
636 | /* XXX: this probably leaves a too-big i_size in case of | |
637 | * failure. Should really be updating i_size at write_end time | |
638 | */ | |
639 | err = affs_extent_file_ofs(inode, pos); | |
640 | if (err) | |
641 | return err; | |
642 | } | |
643 | ||
644 | index = pos >> PAGE_CACHE_SHIFT; | |
645 | page = grab_cache_page_write_begin(mapping, index, flags); | |
646 | if (!page) | |
647 | return -ENOMEM; | |
648 | *pagep = page; | |
649 | ||
650 | if (PageUptodate(page)) | |
651 | return 0; | |
652 | ||
653 | /* XXX: inefficient but safe in the face of short writes */ | |
654 | err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE); | |
655 | if (err) { | |
656 | unlock_page(page); | |
657 | page_cache_release(page); | |
658 | } | |
659 | return err; | |
660 | } | |
661 | ||
662 | static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |
663 | loff_t pos, unsigned len, unsigned copied, | |
664 | struct page *page, void *fsdata) | |
665 | { | |
666 | struct inode *inode = mapping->host; | |
667 | struct super_block *sb = inode->i_sb; | |
668 | struct buffer_head *bh, *prev_bh; | |
669 | char *data; | |
670 | u32 bidx, boff, bsize; | |
671 | unsigned from, to; | |
672 | u32 tmp; | |
673 | int written; | |
674 | ||
675 | from = pos & (PAGE_CACHE_SIZE - 1); | |
676 | to = pos + len; | |
677 | /* | |
678 | * XXX: not sure if this can handle short copies (len < copied), but | |
679 | * we don't have to, because the page should always be uptodate here, | |
680 | * due to write_begin. | |
681 | */ | |
682 | ||
683 | pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, | |
684 | pos + len); | |
685 | bsize = AFFS_SB(sb)->s_data_blksize; | |
686 | data = page_address(page); | |
687 | ||
688 | bh = NULL; | |
689 | written = 0; | |
690 | tmp = (page->index << PAGE_CACHE_SHIFT) + from; | |
691 | bidx = tmp / bsize; | |
692 | boff = tmp % bsize; | |
693 | if (boff) { | |
694 | bh = affs_bread_ino(inode, bidx, 0); | |
695 | if (IS_ERR(bh)) | |
696 | return PTR_ERR(bh); | |
697 | tmp = min(bsize - boff, to - from); | |
698 | BUG_ON(boff + tmp > bsize || tmp > bsize); | |
699 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); | |
700 | be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); | |
701 | affs_fix_checksum(sb, bh); | |
702 | mark_buffer_dirty_inode(bh, inode); | |
703 | written += tmp; | |
704 | from += tmp; | |
705 | bidx++; | |
706 | } else if (bidx) { | |
707 | bh = affs_bread_ino(inode, bidx - 1, 0); | |
708 | if (IS_ERR(bh)) | |
709 | return PTR_ERR(bh); | |
710 | } | |
711 | while (from + bsize <= to) { | |
712 | prev_bh = bh; | |
713 | bh = affs_getemptyblk_ino(inode, bidx); | |
714 | if (IS_ERR(bh)) | |
715 | goto out; | |
716 | memcpy(AFFS_DATA(bh), data + from, bsize); | |
717 | if (buffer_new(bh)) { | |
718 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | |
719 | AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); | |
720 | AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); | |
721 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); | |
722 | AFFS_DATA_HEAD(bh)->next = 0; | |
723 | bh->b_state &= ~(1UL << BH_New); | |
724 | if (prev_bh) { | |
725 | u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); | |
726 | ||
727 | if (tmp_next) | |
728 | affs_warning(sb, "commit_write_ofs", | |
729 | "next block already set for %d (%d)", | |
730 | bidx, tmp_next); | |
731 | AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); | |
732 | affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); | |
733 | mark_buffer_dirty_inode(prev_bh, inode); | |
734 | } | |
735 | } | |
736 | affs_brelse(prev_bh); | |
737 | affs_fix_checksum(sb, bh); | |
738 | mark_buffer_dirty_inode(bh, inode); | |
739 | written += bsize; | |
740 | from += bsize; | |
741 | bidx++; | |
742 | } | |
743 | if (from < to) { | |
744 | prev_bh = bh; | |
745 | bh = affs_bread_ino(inode, bidx, 1); | |
746 | if (IS_ERR(bh)) | |
747 | goto out; | |
748 | tmp = min(bsize, to - from); | |
749 | BUG_ON(tmp > bsize); | |
750 | memcpy(AFFS_DATA(bh), data + from, tmp); | |
751 | if (buffer_new(bh)) { | |
752 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | |
753 | AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); | |
754 | AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); | |
755 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); | |
756 | AFFS_DATA_HEAD(bh)->next = 0; | |
757 | bh->b_state &= ~(1UL << BH_New); | |
758 | if (prev_bh) { | |
759 | u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); | |
760 | ||
761 | if (tmp_next) | |
762 | affs_warning(sb, "commit_write_ofs", | |
763 | "next block already set for %d (%d)", | |
764 | bidx, tmp_next); | |
765 | AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); | |
766 | affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); | |
767 | mark_buffer_dirty_inode(prev_bh, inode); | |
768 | } | |
769 | } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp) | |
770 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); | |
771 | affs_brelse(prev_bh); | |
772 | affs_fix_checksum(sb, bh); | |
773 | mark_buffer_dirty_inode(bh, inode); | |
774 | written += tmp; | |
775 | from += tmp; | |
776 | bidx++; | |
777 | } | |
778 | SetPageUptodate(page); | |
779 | ||
780 | done: | |
781 | affs_brelse(bh); | |
782 | tmp = (page->index << PAGE_CACHE_SHIFT) + from; | |
783 | if (tmp > inode->i_size) | |
784 | inode->i_size = AFFS_I(inode)->mmu_private = tmp; | |
785 | ||
786 | unlock_page(page); | |
787 | page_cache_release(page); | |
788 | ||
789 | return written; | |
790 | ||
791 | out: | |
792 | bh = prev_bh; | |
793 | if (!written) | |
794 | written = PTR_ERR(bh); | |
795 | goto done; | |
796 | } | |
797 | ||
798 | const struct address_space_operations affs_aops_ofs = { | |
799 | .readpage = affs_readpage_ofs, | |
800 | //.writepage = affs_writepage_ofs, | |
801 | .write_begin = affs_write_begin_ofs, | |
802 | .write_end = affs_write_end_ofs | |
803 | }; | |
804 | ||
805 | /* Free any preallocated blocks. */ | |
806 | ||
807 | void | |
808 | affs_free_prealloc(struct inode *inode) | |
809 | { | |
810 | struct super_block *sb = inode->i_sb; | |
811 | ||
812 | pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino); | |
813 | ||
814 | while (AFFS_I(inode)->i_pa_cnt) { | |
815 | AFFS_I(inode)->i_pa_cnt--; | |
816 | affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc); | |
817 | } | |
818 | } | |
819 | ||
820 | /* Truncate (or enlarge) a file to the requested size. */ | |
821 | ||
822 | void | |
823 | affs_truncate(struct inode *inode) | |
824 | { | |
825 | struct super_block *sb = inode->i_sb; | |
826 | u32 ext, ext_key; | |
827 | u32 last_blk, blkcnt, blk; | |
828 | u32 size; | |
829 | struct buffer_head *ext_bh; | |
830 | int i; | |
831 | ||
832 | pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n", | |
833 | inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size); | |
834 | ||
835 | last_blk = 0; | |
836 | ext = 0; | |
837 | if (inode->i_size) { | |
838 | last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize; | |
839 | ext = last_blk / AFFS_SB(sb)->s_hashsize; | |
840 | } | |
841 | ||
842 | if (inode->i_size > AFFS_I(inode)->mmu_private) { | |
843 | struct address_space *mapping = inode->i_mapping; | |
844 | struct page *page; | |
845 | void *fsdata; | |
846 | loff_t isize = inode->i_size; | |
847 | int res; | |
848 | ||
849 | res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata); | |
850 | if (!res) | |
851 | res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata); | |
852 | else | |
853 | inode->i_size = AFFS_I(inode)->mmu_private; | |
854 | mark_inode_dirty(inode); | |
855 | return; | |
856 | } else if (inode->i_size == AFFS_I(inode)->mmu_private) | |
857 | return; | |
858 | ||
859 | // lock cache | |
860 | ext_bh = affs_get_extblock(inode, ext); | |
861 | if (IS_ERR(ext_bh)) { | |
862 | affs_warning(sb, "truncate", | |
863 | "unexpected read error for ext block %u (%ld)", | |
864 | ext, PTR_ERR(ext_bh)); | |
865 | return; | |
866 | } | |
867 | if (AFFS_I(inode)->i_lc) { | |
868 | /* clear linear cache */ | |
869 | i = (ext + 1) >> AFFS_I(inode)->i_lc_shift; | |
870 | if (AFFS_I(inode)->i_lc_size > i) { | |
871 | AFFS_I(inode)->i_lc_size = i; | |
872 | for (; i < AFFS_LC_SIZE; i++) | |
873 | AFFS_I(inode)->i_lc[i] = 0; | |
874 | } | |
875 | /* clear associative cache */ | |
876 | for (i = 0; i < AFFS_AC_SIZE; i++) | |
877 | if (AFFS_I(inode)->i_ac[i].ext >= ext) | |
878 | AFFS_I(inode)->i_ac[i].ext = 0; | |
879 | } | |
880 | ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); | |
881 | ||
882 | blkcnt = AFFS_I(inode)->i_blkcnt; | |
883 | i = 0; | |
884 | blk = last_blk; | |
885 | if (inode->i_size) { | |
886 | i = last_blk % AFFS_SB(sb)->s_hashsize + 1; | |
887 | blk++; | |
888 | } else | |
889 | AFFS_HEAD(ext_bh)->first_data = 0; | |
890 | AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i); | |
891 | size = AFFS_SB(sb)->s_hashsize; | |
892 | if (size > blkcnt - blk + i) | |
893 | size = blkcnt - blk + i; | |
894 | for (; i < size; i++, blk++) { | |
895 | affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); | |
896 | AFFS_BLOCK(sb, ext_bh, i) = 0; | |
897 | } | |
898 | AFFS_TAIL(sb, ext_bh)->extension = 0; | |
899 | affs_fix_checksum(sb, ext_bh); | |
900 | mark_buffer_dirty_inode(ext_bh, inode); | |
901 | affs_brelse(ext_bh); | |
902 | ||
903 | if (inode->i_size) { | |
904 | AFFS_I(inode)->i_blkcnt = last_blk + 1; | |
905 | AFFS_I(inode)->i_extcnt = ext + 1; | |
906 | if (AFFS_SB(sb)->s_flags & SF_OFS) { | |
907 | struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0); | |
908 | u32 tmp; | |
909 | if (IS_ERR(bh)) { | |
910 | affs_warning(sb, "truncate", | |
911 | "unexpected read error for last block %u (%ld)", | |
912 | ext, PTR_ERR(bh)); | |
913 | return; | |
914 | } | |
915 | tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next); | |
916 | AFFS_DATA_HEAD(bh)->next = 0; | |
917 | affs_adjust_checksum(bh, -tmp); | |
918 | affs_brelse(bh); | |
919 | } | |
920 | } else { | |
921 | AFFS_I(inode)->i_blkcnt = 0; | |
922 | AFFS_I(inode)->i_extcnt = 1; | |
923 | } | |
924 | AFFS_I(inode)->mmu_private = inode->i_size; | |
925 | // unlock cache | |
926 | ||
927 | while (ext_key) { | |
928 | ext_bh = affs_bread(sb, ext_key); | |
929 | size = AFFS_SB(sb)->s_hashsize; | |
930 | if (size > blkcnt - blk) | |
931 | size = blkcnt - blk; | |
932 | for (i = 0; i < size; i++, blk++) | |
933 | affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); | |
934 | affs_free_block(sb, ext_key); | |
935 | ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); | |
936 | affs_brelse(ext_bh); | |
937 | } | |
938 | affs_free_prealloc(inode); | |
939 | } | |
940 | ||
941 | int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) | |
942 | { | |
943 | struct inode *inode = filp->f_mapping->host; | |
944 | int ret, err; | |
945 | ||
946 | err = filemap_write_and_wait_range(inode->i_mapping, start, end); | |
947 | if (err) | |
948 | return err; | |
949 | ||
950 | mutex_lock(&inode->i_mutex); | |
951 | ret = write_inode_now(inode, 0); | |
952 | err = sync_blockdev(inode->i_sb->s_bdev); | |
953 | if (!ret) | |
954 | ret = err; | |
955 | mutex_unlock(&inode->i_mutex); | |
956 | return ret; | |
957 | } | |
958 | const struct file_operations affs_file_operations = { | |
959 | .llseek = generic_file_llseek, | |
960 | .read = new_sync_read, | |
961 | .read_iter = generic_file_read_iter, | |
962 | .write = new_sync_write, | |
963 | .write_iter = generic_file_write_iter, | |
964 | .mmap = generic_file_mmap, | |
965 | .open = affs_file_open, | |
966 | .release = affs_file_release, | |
967 | .fsync = affs_file_fsync, | |
968 | .splice_read = generic_file_splice_read, | |
969 | }; | |
970 | ||
971 | const struct inode_operations affs_file_inode_operations = { | |
972 | .setattr = affs_notify_change, | |
973 | }; |