]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/udf/inode.c
udf: use get_bh()
[mirror_ubuntu-bionic-kernel.git] / fs / udf / inode.c
1 /*
2 * inode.c
3 *
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
6 *
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
12 *
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
16 *
17 * HISTORY
18 *
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096
26 * 03/07/99 rewrote udf_block_map (again)
27 * New funcs, inode_bmap, udf_next_aext
28 * 04/19/99 Support for writing device EA's for major/minor #
29 */
30
31 #include "udfdecl.h"
32 #include <linux/mm.h>
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
39
40 #include "udf_i.h"
41 #include "udf_sb.h"
42
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
46
47 #define EXTENT_MERGE_SIZE 5
48
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
53 long *, int *);
54 static int8_t udf_insert_aext(struct inode *, struct extent_position,
55 kernel_lb_addr, uint32_t);
56 static void udf_split_extents(struct inode *, int *, int, int,
57 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58 static void udf_prealloc_extents(struct inode *, int, int,
59 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_merge_extents(struct inode *,
61 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_update_extents(struct inode *,
63 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 struct extent_position *);
65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
66
67 /*
68 * udf_delete_inode
69 *
70 * PURPOSE
71 * Clean-up before the specified inode is destroyed.
72 *
73 * DESCRIPTION
74 * This routine is called when the kernel destroys an inode structure
75 * ie. when iput() finds i_count == 0.
76 *
77 * HISTORY
78 * July 1, 1997 - Andrew E. Mileski
79 * Written, tested, and released.
80 *
81 * Called at the last iput() if i_nlink is zero.
82 */
83 void udf_delete_inode(struct inode * inode)
84 {
85 truncate_inode_pages(&inode->i_data, 0);
86
87 if (is_bad_inode(inode))
88 goto no_delete;
89
90 inode->i_size = 0;
91 udf_truncate(inode);
92 lock_kernel();
93
94 udf_update_inode(inode, IS_SYNC(inode));
95 udf_free_inode(inode);
96
97 unlock_kernel();
98 return;
99 no_delete:
100 clear_inode(inode);
101 }
102
103 void udf_clear_inode(struct inode *inode)
104 {
105 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
106 lock_kernel();
107 udf_discard_prealloc(inode);
108 unlock_kernel();
109 }
110
111 kfree(UDF_I_DATA(inode));
112 UDF_I_DATA(inode) = NULL;
113 }
114
115 static int udf_writepage(struct page *page, struct writeback_control *wbc)
116 {
117 return block_write_full_page(page, udf_get_block, wbc);
118 }
119
120 static int udf_readpage(struct file *file, struct page *page)
121 {
122 return block_read_full_page(page, udf_get_block);
123 }
124
125 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
126 {
127 return block_prepare_write(page, from, to, udf_get_block);
128 }
129
130 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
131 {
132 return generic_block_bmap(mapping,block,udf_get_block);
133 }
134
135 const struct address_space_operations udf_aops = {
136 .readpage = udf_readpage,
137 .writepage = udf_writepage,
138 .sync_page = block_sync_page,
139 .prepare_write = udf_prepare_write,
140 .commit_write = generic_commit_write,
141 .bmap = udf_bmap,
142 };
143
144 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
145 {
146 struct page *page;
147 char *kaddr;
148 struct writeback_control udf_wbc = {
149 .sync_mode = WB_SYNC_NONE,
150 .nr_to_write = 1,
151 };
152
153 /* from now on we have normal address_space methods */
154 inode->i_data.a_ops = &udf_aops;
155
156 if (!UDF_I_LENALLOC(inode))
157 {
158 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
160 else
161 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162 mark_inode_dirty(inode);
163 return;
164 }
165
166 page = grab_cache_page(inode->i_mapping, 0);
167 BUG_ON(!PageLocked(page));
168
169 if (!PageUptodate(page))
170 {
171 kaddr = kmap(page);
172 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175 UDF_I_LENALLOC(inode));
176 flush_dcache_page(page);
177 SetPageUptodate(page);
178 kunmap(page);
179 }
180 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181 UDF_I_LENALLOC(inode));
182 UDF_I_LENALLOC(inode) = 0;
183 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
185 else
186 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
187
188 inode->i_data.a_ops->writepage(page, &udf_wbc);
189 page_cache_release(page);
190
191 mark_inode_dirty(inode);
192 }
193
194 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
195 {
196 int newblock;
197 struct buffer_head *dbh = NULL;
198 kernel_lb_addr eloc;
199 uint32_t elen;
200 uint8_t alloctype;
201 struct extent_position epos;
202
203 struct udf_fileident_bh sfibh, dfibh;
204 loff_t f_pos = udf_ext0_offset(inode) >> 2;
205 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
206 struct fileIdentDesc cfi, *sfi, *dfi;
207
208 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
209 alloctype = ICBTAG_FLAG_AD_SHORT;
210 else
211 alloctype = ICBTAG_FLAG_AD_LONG;
212
213 if (!inode->i_size)
214 {
215 UDF_I_ALLOCTYPE(inode) = alloctype;
216 mark_inode_dirty(inode);
217 return NULL;
218 }
219
220 /* alloc block, and copy data to it */
221 *block = udf_new_block(inode->i_sb, inode,
222 UDF_I_LOCATION(inode).partitionReferenceNum,
223 UDF_I_LOCATION(inode).logicalBlockNum, err);
224
225 if (!(*block))
226 return NULL;
227 newblock = udf_get_pblock(inode->i_sb, *block,
228 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
229 if (!newblock)
230 return NULL;
231 dbh = udf_tgetblk(inode->i_sb, newblock);
232 if (!dbh)
233 return NULL;
234 lock_buffer(dbh);
235 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
236 set_buffer_uptodate(dbh);
237 unlock_buffer(dbh);
238 mark_buffer_dirty_inode(dbh, inode);
239
240 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
241 sfibh.sbh = sfibh.ebh = NULL;
242 dfibh.soffset = dfibh.eoffset = 0;
243 dfibh.sbh = dfibh.ebh = dbh;
244 while ( (f_pos < size) )
245 {
246 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
247 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
248 if (!sfi)
249 {
250 brelse(dbh);
251 return NULL;
252 }
253 UDF_I_ALLOCTYPE(inode) = alloctype;
254 sfi->descTag.tagLocation = cpu_to_le32(*block);
255 dfibh.soffset = dfibh.eoffset;
256 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
257 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
258 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
259 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
260 {
261 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
262 brelse(dbh);
263 return NULL;
264 }
265 }
266 mark_buffer_dirty_inode(dbh, inode);
267
268 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
269 UDF_I_LENALLOC(inode) = 0;
270 eloc.logicalBlockNum = *block;
271 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272 elen = inode->i_size;
273 UDF_I_LENEXTENTS(inode) = elen;
274 epos.bh = NULL;
275 epos.block = UDF_I_LOCATION(inode);
276 epos.offset = udf_file_entry_alloc_offset(inode);
277 udf_add_aext(inode, &epos, eloc, elen, 0);
278 /* UniqueID stuff */
279
280 brelse(epos.bh);
281 mark_inode_dirty(inode);
282 return dbh;
283 }
284
285 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
286 {
287 int err, new;
288 struct buffer_head *bh;
289 unsigned long phys;
290
291 if (!create)
292 {
293 phys = udf_block_map(inode, block);
294 if (phys)
295 map_bh(bh_result, inode->i_sb, phys);
296 return 0;
297 }
298
299 err = -EIO;
300 new = 0;
301 bh = NULL;
302
303 lock_kernel();
304
305 if (block < 0)
306 goto abort_negative;
307
308 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
309 {
310 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
311 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
312 }
313
314 err = 0;
315
316 bh = inode_getblk(inode, block, &err, &phys, &new);
317 BUG_ON(bh);
318 if (err)
319 goto abort;
320 BUG_ON(!phys);
321
322 if (new)
323 set_buffer_new(bh_result);
324 map_bh(bh_result, inode->i_sb, phys);
325 abort:
326 unlock_kernel();
327 return err;
328
329 abort_negative:
330 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
331 goto abort;
332 }
333
334 static struct buffer_head *
335 udf_getblk(struct inode *inode, long block, int create, int *err)
336 {
337 struct buffer_head dummy;
338
339 dummy.b_state = 0;
340 dummy.b_blocknr = -1000;
341 *err = udf_get_block(inode, block, &dummy, create);
342 if (!*err && buffer_mapped(&dummy))
343 {
344 struct buffer_head *bh;
345 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
346 if (buffer_new(&dummy))
347 {
348 lock_buffer(bh);
349 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
350 set_buffer_uptodate(bh);
351 unlock_buffer(bh);
352 mark_buffer_dirty_inode(bh, inode);
353 }
354 return bh;
355 }
356 return NULL;
357 }
358
359 static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
360 int *err, long *phys, int *new)
361 {
362 struct buffer_head *result = NULL;
363 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
364 struct extent_position prev_epos, cur_epos, next_epos;
365 int count = 0, startnum = 0, endnum = 0;
366 uint32_t elen = 0;
367 kernel_lb_addr eloc;
368 int c = 1;
369 loff_t lbcount = 0, b_off = 0;
370 uint32_t newblocknum, newblock;
371 sector_t offset = 0;
372 int8_t etype;
373 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
374 char lastblock = 0;
375
376 prev_epos.offset = udf_file_entry_alloc_offset(inode);
377 prev_epos.block = UDF_I_LOCATION(inode);
378 prev_epos.bh = NULL;
379 cur_epos = next_epos = prev_epos;
380 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
381
382 /* find the extent which contains the block we are looking for.
383 alternate between laarr[0] and laarr[1] for locations of the
384 current extent, and the previous extent */
385 do
386 {
387 if (prev_epos.bh != cur_epos.bh)
388 {
389 brelse(prev_epos.bh);
390 get_bh(cur_epos.bh);
391 prev_epos.bh = cur_epos.bh;
392 }
393 if (cur_epos.bh != next_epos.bh)
394 {
395 brelse(cur_epos.bh);
396 get_bh(next_epos.bh);
397 cur_epos.bh = next_epos.bh;
398 }
399
400 lbcount += elen;
401
402 prev_epos.block = cur_epos.block;
403 cur_epos.block = next_epos.block;
404
405 prev_epos.offset = cur_epos.offset;
406 cur_epos.offset = next_epos.offset;
407
408 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
409 break;
410
411 c = !c;
412
413 laarr[c].extLength = (etype << 30) | elen;
414 laarr[c].extLocation = eloc;
415
416 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
417 pgoal = eloc.logicalBlockNum +
418 ((elen + inode->i_sb->s_blocksize - 1) >>
419 inode->i_sb->s_blocksize_bits);
420
421 count ++;
422 } while (lbcount + elen <= b_off);
423
424 b_off -= lbcount;
425 offset = b_off >> inode->i_sb->s_blocksize_bits;
426
427 /* if the extent is allocated and recorded, return the block
428 if the extent is not a multiple of the blocksize, round up */
429
430 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
431 {
432 if (elen & (inode->i_sb->s_blocksize - 1))
433 {
434 elen = EXT_RECORDED_ALLOCATED |
435 ((elen + inode->i_sb->s_blocksize - 1) &
436 ~(inode->i_sb->s_blocksize - 1));
437 etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
438 }
439 brelse(prev_epos.bh);
440 brelse(cur_epos.bh);
441 brelse(next_epos.bh);
442 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
443 *phys = newblock;
444 return NULL;
445 }
446
447 if (etype == -1)
448 {
449 endnum = startnum = ((count > 1) ? 1 : count);
450 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
451 {
452 laarr[c].extLength =
453 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
454 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
455 inode->i_sb->s_blocksize - 1) &
456 ~(inode->i_sb->s_blocksize - 1));
457 UDF_I_LENEXTENTS(inode) =
458 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
459 ~(inode->i_sb->s_blocksize - 1);
460 }
461 c = !c;
462 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
463 ((offset + 1) << inode->i_sb->s_blocksize_bits);
464 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
465 count ++;
466 endnum ++;
467 lastblock = 1;
468 }
469 else
470 endnum = startnum = ((count > 2) ? 2 : count);
471
472 /* if the current extent is in position 0, swap it with the previous */
473 if (!c && count != 1)
474 {
475 laarr[2] = laarr[0];
476 laarr[0] = laarr[1];
477 laarr[1] = laarr[2];
478 c = 1;
479 }
480
481 /* if the current block is located in a extent, read the next extent */
482 if (etype != -1)
483 {
484 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1)
485 {
486 laarr[c+1].extLength = (etype << 30) | elen;
487 laarr[c+1].extLocation = eloc;
488 count ++;
489 startnum ++;
490 endnum ++;
491 }
492 else
493 lastblock = 1;
494 }
495 brelse(cur_epos.bh);
496 brelse(next_epos.bh);
497
498 /* if the current extent is not recorded but allocated, get the
499 block in the extent corresponding to the requested block */
500 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
501 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
502 else /* otherwise, allocate a new block */
503 {
504 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
505 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
506
507 if (!goal)
508 {
509 if (!(goal = pgoal))
510 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
511 }
512
513 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
514 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
515 {
516 brelse(prev_epos.bh);
517 *err = -ENOSPC;
518 return NULL;
519 }
520 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
521 }
522
523 /* if the extent the requsted block is located in contains multiple blocks,
524 split the extent into at most three extents. blocks prior to requested
525 block, requested block, and blocks after requested block */
526 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
527
528 #ifdef UDF_PREALLOCATE
529 /* preallocate blocks */
530 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
531 #endif
532
533 /* merge any continuous blocks in laarr */
534 udf_merge_extents(inode, laarr, &endnum);
535
536 /* write back the new extents, inserting new extents if the new number
537 of extents is greater than the old number, and deleting extents if
538 the new number of extents is less than the old number */
539 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
540
541 brelse(prev_epos.bh);
542
543 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
544 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
545 {
546 return NULL;
547 }
548 *phys = newblock;
549 *err = 0;
550 *new = 1;
551 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
552 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
553 inode->i_ctime = current_fs_time(inode->i_sb);
554
555 if (IS_SYNC(inode))
556 udf_sync_inode(inode);
557 else
558 mark_inode_dirty(inode);
559 return result;
560 }
561
562 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
563 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
564 {
565 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
566 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
567 {
568 int curr = *c;
569 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
570 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
571 int8_t etype = (laarr[curr].extLength >> 30);
572
573 if (blen == 1)
574 ;
575 else if (!offset || blen == offset + 1)
576 {
577 laarr[curr+2] = laarr[curr+1];
578 laarr[curr+1] = laarr[curr];
579 }
580 else
581 {
582 laarr[curr+3] = laarr[curr+1];
583 laarr[curr+2] = laarr[curr+1] = laarr[curr];
584 }
585
586 if (offset)
587 {
588 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
589 {
590 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
591 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
592 (offset << inode->i_sb->s_blocksize_bits);
593 laarr[curr].extLocation.logicalBlockNum = 0;
594 laarr[curr].extLocation.partitionReferenceNum = 0;
595 }
596 else
597 laarr[curr].extLength = (etype << 30) |
598 (offset << inode->i_sb->s_blocksize_bits);
599 curr ++;
600 (*c) ++;
601 (*endnum) ++;
602 }
603
604 laarr[curr].extLocation.logicalBlockNum = newblocknum;
605 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
606 laarr[curr].extLocation.partitionReferenceNum =
607 UDF_I_LOCATION(inode).partitionReferenceNum;
608 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
609 inode->i_sb->s_blocksize;
610 curr ++;
611
612 if (blen != offset + 1)
613 {
614 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
615 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
616 laarr[curr].extLength = (etype << 30) |
617 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
618 curr ++;
619 (*endnum) ++;
620 }
621 }
622 }
623
624 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
625 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
626 {
627 int start, length = 0, currlength = 0, i;
628
629 if (*endnum >= (c+1))
630 {
631 if (!lastblock)
632 return;
633 else
634 start = c;
635 }
636 else
637 {
638 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
639 {
640 start = c+1;
641 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
642 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
643 }
644 else
645 start = c;
646 }
647
648 for (i=start+1; i<=*endnum; i++)
649 {
650 if (i == *endnum)
651 {
652 if (lastblock)
653 length += UDF_DEFAULT_PREALLOC_BLOCKS;
654 }
655 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
656 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
657 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
658 else
659 break;
660 }
661
662 if (length)
663 {
664 int next = laarr[start].extLocation.logicalBlockNum +
665 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
666 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
667 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
668 laarr[start].extLocation.partitionReferenceNum,
669 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
670 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
671
672 if (numalloc)
673 {
674 if (start == (c+1))
675 laarr[start].extLength +=
676 (numalloc << inode->i_sb->s_blocksize_bits);
677 else
678 {
679 memmove(&laarr[c+2], &laarr[c+1],
680 sizeof(long_ad) * (*endnum - (c+1)));
681 (*endnum) ++;
682 laarr[c+1].extLocation.logicalBlockNum = next;
683 laarr[c+1].extLocation.partitionReferenceNum =
684 laarr[c].extLocation.partitionReferenceNum;
685 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
686 (numalloc << inode->i_sb->s_blocksize_bits);
687 start = c+1;
688 }
689
690 for (i=start+1; numalloc && i<*endnum; i++)
691 {
692 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
693 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
694
695 if (elen > numalloc)
696 {
697 laarr[i].extLength -=
698 (numalloc << inode->i_sb->s_blocksize_bits);
699 numalloc = 0;
700 }
701 else
702 {
703 numalloc -= elen;
704 if (*endnum > (i+1))
705 memmove(&laarr[i], &laarr[i+1],
706 sizeof(long_ad) * (*endnum - (i+1)));
707 i --;
708 (*endnum) --;
709 }
710 }
711 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
712 }
713 }
714 }
715
716 static void udf_merge_extents(struct inode *inode,
717 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
718 {
719 int i;
720
721 for (i=0; i<(*endnum-1); i++)
722 {
723 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
724 {
725 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
726 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
727 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
728 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
729 {
730 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
731 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
732 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
733 {
734 laarr[i+1].extLength = (laarr[i+1].extLength -
735 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
736 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
737 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
738 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
739 laarr[i+1].extLocation.logicalBlockNum =
740 laarr[i].extLocation.logicalBlockNum +
741 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
742 inode->i_sb->s_blocksize_bits);
743 }
744 else
745 {
746 laarr[i].extLength = laarr[i+1].extLength +
747 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
748 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
749 if (*endnum > (i+2))
750 memmove(&laarr[i+1], &laarr[i+2],
751 sizeof(long_ad) * (*endnum - (i+2)));
752 i --;
753 (*endnum) --;
754 }
755 }
756 }
757 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
758 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
759 {
760 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
761 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
762 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
763 laarr[i].extLocation.logicalBlockNum = 0;
764 laarr[i].extLocation.partitionReferenceNum = 0;
765
766 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
767 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
768 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
769 {
770 laarr[i+1].extLength = (laarr[i+1].extLength -
771 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
772 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
773 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
774 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
775 }
776 else
777 {
778 laarr[i].extLength = laarr[i+1].extLength +
779 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
780 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
781 if (*endnum > (i+2))
782 memmove(&laarr[i+1], &laarr[i+2],
783 sizeof(long_ad) * (*endnum - (i+2)));
784 i --;
785 (*endnum) --;
786 }
787 }
788 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
789 {
790 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
791 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
792 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
793 laarr[i].extLocation.logicalBlockNum = 0;
794 laarr[i].extLocation.partitionReferenceNum = 0;
795 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
796 EXT_NOT_RECORDED_NOT_ALLOCATED;
797 }
798 }
799 }
800
801 static void udf_update_extents(struct inode *inode,
802 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
803 struct extent_position *epos)
804 {
805 int start = 0, i;
806 kernel_lb_addr tmploc;
807 uint32_t tmplen;
808
809 if (startnum > endnum)
810 {
811 for (i=0; i<(startnum-endnum); i++)
812 udf_delete_aext(inode, *epos, laarr[i].extLocation,
813 laarr[i].extLength);
814 }
815 else if (startnum < endnum)
816 {
817 for (i=0; i<(endnum-startnum); i++)
818 {
819 udf_insert_aext(inode, *epos, laarr[i].extLocation,
820 laarr[i].extLength);
821 udf_next_aext(inode, epos, &laarr[i].extLocation,
822 &laarr[i].extLength, 1);
823 start ++;
824 }
825 }
826
827 for (i=start; i<endnum; i++)
828 {
829 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
830 udf_write_aext(inode, epos, laarr[i].extLocation,
831 laarr[i].extLength, 1);
832 }
833 }
834
835 struct buffer_head * udf_bread(struct inode * inode, int block,
836 int create, int * err)
837 {
838 struct buffer_head * bh = NULL;
839
840 bh = udf_getblk(inode, block, create, err);
841 if (!bh)
842 return NULL;
843
844 if (buffer_uptodate(bh))
845 return bh;
846 ll_rw_block(READ, 1, &bh);
847 wait_on_buffer(bh);
848 if (buffer_uptodate(bh))
849 return bh;
850 brelse(bh);
851 *err = -EIO;
852 return NULL;
853 }
854
855 void udf_truncate(struct inode * inode)
856 {
857 int offset;
858 int err;
859
860 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
861 S_ISLNK(inode->i_mode)))
862 return;
863 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
864 return;
865
866 lock_kernel();
867 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
868 {
869 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
870 inode->i_size))
871 {
872 udf_expand_file_adinicb(inode, inode->i_size, &err);
873 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
874 {
875 inode->i_size = UDF_I_LENALLOC(inode);
876 unlock_kernel();
877 return;
878 }
879 else
880 udf_truncate_extents(inode);
881 }
882 else
883 {
884 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
885 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
886 UDF_I_LENALLOC(inode) = inode->i_size;
887 }
888 }
889 else
890 {
891 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
892 udf_truncate_extents(inode);
893 }
894
895 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
896 if (IS_SYNC(inode))
897 udf_sync_inode (inode);
898 else
899 mark_inode_dirty(inode);
900 unlock_kernel();
901 }
902
903 static void
904 __udf_read_inode(struct inode *inode)
905 {
906 struct buffer_head *bh = NULL;
907 struct fileEntry *fe;
908 uint16_t ident;
909
910 /*
911 * Set defaults, but the inode is still incomplete!
912 * Note: get_new_inode() sets the following on a new inode:
913 * i_sb = sb
914 * i_no = ino
915 * i_flags = sb->s_flags
916 * i_state = 0
917 * clean_inode(): zero fills and sets
918 * i_count = 1
919 * i_nlink = 1
920 * i_op = NULL;
921 */
922 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
923
924 if (!bh)
925 {
926 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
927 inode->i_ino);
928 make_bad_inode(inode);
929 return;
930 }
931
932 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
933 ident != TAG_IDENT_USE)
934 {
935 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
936 inode->i_ino, ident);
937 brelse(bh);
938 make_bad_inode(inode);
939 return;
940 }
941
942 fe = (struct fileEntry *)bh->b_data;
943
944 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
945 {
946 struct buffer_head *ibh = NULL, *nbh = NULL;
947 struct indirectEntry *ie;
948
949 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
950 if (ident == TAG_IDENT_IE)
951 {
952 if (ibh)
953 {
954 kernel_lb_addr loc;
955 ie = (struct indirectEntry *)ibh->b_data;
956
957 loc = lelb_to_cpu(ie->indirectICB.extLocation);
958
959 if (ie->indirectICB.extLength &&
960 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
961 {
962 if (ident == TAG_IDENT_FE ||
963 ident == TAG_IDENT_EFE)
964 {
965 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
966 brelse(bh);
967 brelse(ibh);
968 brelse(nbh);
969 __udf_read_inode(inode);
970 return;
971 }
972 else
973 {
974 brelse(nbh);
975 brelse(ibh);
976 }
977 }
978 else
979 brelse(ibh);
980 }
981 }
982 else
983 brelse(ibh);
984 }
985 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
986 {
987 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
988 le16_to_cpu(fe->icbTag.strategyType));
989 brelse(bh);
990 make_bad_inode(inode);
991 return;
992 }
993 udf_fill_inode(inode, bh);
994 brelse(bh);
995 }
996
997 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
998 {
999 struct fileEntry *fe;
1000 struct extendedFileEntry *efe;
1001 time_t convtime;
1002 long convtime_usec;
1003 int offset;
1004
1005 fe = (struct fileEntry *)bh->b_data;
1006 efe = (struct extendedFileEntry *)bh->b_data;
1007
1008 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1009 UDF_I_STRAT4096(inode) = 0;
1010 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1011 UDF_I_STRAT4096(inode) = 1;
1012
1013 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1014 UDF_I_UNIQUE(inode) = 0;
1015 UDF_I_LENEATTR(inode) = 0;
1016 UDF_I_LENEXTENTS(inode) = 0;
1017 UDF_I_LENALLOC(inode) = 0;
1018 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1019 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1020 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1021 {
1022 UDF_I_EFE(inode) = 1;
1023 UDF_I_USE(inode) = 0;
1024 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1025 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1026 }
1027 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1028 {
1029 UDF_I_EFE(inode) = 0;
1030 UDF_I_USE(inode) = 0;
1031 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1032 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1033 }
1034 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1035 {
1036 UDF_I_EFE(inode) = 0;
1037 UDF_I_USE(inode) = 1;
1038 UDF_I_LENALLOC(inode) =
1039 le32_to_cpu(
1040 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1041 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1042 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1043 return;
1044 }
1045
1046 inode->i_uid = le32_to_cpu(fe->uid);
1047 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1048 UDF_FLAG_UID_IGNORE))
1049 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1050
1051 inode->i_gid = le32_to_cpu(fe->gid);
1052 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1053 UDF_FLAG_GID_IGNORE))
1054 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1055
1056 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1057 if (!inode->i_nlink)
1058 inode->i_nlink = 1;
1059
1060 inode->i_size = le64_to_cpu(fe->informationLength);
1061 UDF_I_LENEXTENTS(inode) = inode->i_size;
1062
1063 inode->i_mode = udf_convert_permissions(fe);
1064 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1065
1066 if (UDF_I_EFE(inode) == 0)
1067 {
1068 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1069 (inode->i_sb->s_blocksize_bits - 9);
1070
1071 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1072 lets_to_cpu(fe->accessTime)) )
1073 {
1074 inode->i_atime.tv_sec = convtime;
1075 inode->i_atime.tv_nsec = convtime_usec * 1000;
1076 }
1077 else
1078 {
1079 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1080 }
1081
1082 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1083 lets_to_cpu(fe->modificationTime)) )
1084 {
1085 inode->i_mtime.tv_sec = convtime;
1086 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1087 }
1088 else
1089 {
1090 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1091 }
1092
1093 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1094 lets_to_cpu(fe->attrTime)) )
1095 {
1096 inode->i_ctime.tv_sec = convtime;
1097 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1098 }
1099 else
1100 {
1101 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1102 }
1103
1104 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1105 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1106 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1107 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1108 }
1109 else
1110 {
1111 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1112 (inode->i_sb->s_blocksize_bits - 9);
1113
1114 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1115 lets_to_cpu(efe->accessTime)) )
1116 {
1117 inode->i_atime.tv_sec = convtime;
1118 inode->i_atime.tv_nsec = convtime_usec * 1000;
1119 }
1120 else
1121 {
1122 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1123 }
1124
1125 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1126 lets_to_cpu(efe->modificationTime)) )
1127 {
1128 inode->i_mtime.tv_sec = convtime;
1129 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1130 }
1131 else
1132 {
1133 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1134 }
1135
1136 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1137 lets_to_cpu(efe->createTime)) )
1138 {
1139 UDF_I_CRTIME(inode).tv_sec = convtime;
1140 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1141 }
1142 else
1143 {
1144 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1145 }
1146
1147 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1148 lets_to_cpu(efe->attrTime)) )
1149 {
1150 inode->i_ctime.tv_sec = convtime;
1151 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1152 }
1153 else
1154 {
1155 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1156 }
1157
1158 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1159 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1160 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1161 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1162 }
1163
1164 switch (fe->icbTag.fileType)
1165 {
1166 case ICBTAG_FILE_TYPE_DIRECTORY:
1167 {
1168 inode->i_op = &udf_dir_inode_operations;
1169 inode->i_fop = &udf_dir_operations;
1170 inode->i_mode |= S_IFDIR;
1171 inc_nlink(inode);
1172 break;
1173 }
1174 case ICBTAG_FILE_TYPE_REALTIME:
1175 case ICBTAG_FILE_TYPE_REGULAR:
1176 case ICBTAG_FILE_TYPE_UNDEF:
1177 {
1178 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1179 inode->i_data.a_ops = &udf_adinicb_aops;
1180 else
1181 inode->i_data.a_ops = &udf_aops;
1182 inode->i_op = &udf_file_inode_operations;
1183 inode->i_fop = &udf_file_operations;
1184 inode->i_mode |= S_IFREG;
1185 break;
1186 }
1187 case ICBTAG_FILE_TYPE_BLOCK:
1188 {
1189 inode->i_mode |= S_IFBLK;
1190 break;
1191 }
1192 case ICBTAG_FILE_TYPE_CHAR:
1193 {
1194 inode->i_mode |= S_IFCHR;
1195 break;
1196 }
1197 case ICBTAG_FILE_TYPE_FIFO:
1198 {
1199 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1200 break;
1201 }
1202 case ICBTAG_FILE_TYPE_SOCKET:
1203 {
1204 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1205 break;
1206 }
1207 case ICBTAG_FILE_TYPE_SYMLINK:
1208 {
1209 inode->i_data.a_ops = &udf_symlink_aops;
1210 inode->i_op = &page_symlink_inode_operations;
1211 inode->i_mode = S_IFLNK|S_IRWXUGO;
1212 break;
1213 }
1214 default:
1215 {
1216 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1217 inode->i_ino, fe->icbTag.fileType);
1218 make_bad_inode(inode);
1219 return;
1220 }
1221 }
1222 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1223 {
1224 struct deviceSpec *dsea =
1225 (struct deviceSpec *)
1226 udf_get_extendedattr(inode, 12, 1);
1227
1228 if (dsea)
1229 {
1230 init_special_inode(inode, inode->i_mode, MKDEV(
1231 le32_to_cpu(dsea->majorDeviceIdent),
1232 le32_to_cpu(dsea->minorDeviceIdent)));
1233 /* Developer ID ??? */
1234 }
1235 else
1236 {
1237 make_bad_inode(inode);
1238 }
1239 }
1240 }
1241
1242 static mode_t
1243 udf_convert_permissions(struct fileEntry *fe)
1244 {
1245 mode_t mode;
1246 uint32_t permissions;
1247 uint32_t flags;
1248
1249 permissions = le32_to_cpu(fe->permissions);
1250 flags = le16_to_cpu(fe->icbTag.flags);
1251
1252 mode = (( permissions ) & S_IRWXO) |
1253 (( permissions >> 2 ) & S_IRWXG) |
1254 (( permissions >> 4 ) & S_IRWXU) |
1255 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1256 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1257 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1258
1259 return mode;
1260 }
1261
1262 /*
1263 * udf_write_inode
1264 *
1265 * PURPOSE
1266 * Write out the specified inode.
1267 *
1268 * DESCRIPTION
1269 * This routine is called whenever an inode is synced.
1270 * Currently this routine is just a placeholder.
1271 *
1272 * HISTORY
1273 * July 1, 1997 - Andrew E. Mileski
1274 * Written, tested, and released.
1275 */
1276
1277 int udf_write_inode(struct inode * inode, int sync)
1278 {
1279 int ret;
1280 lock_kernel();
1281 ret = udf_update_inode(inode, sync);
1282 unlock_kernel();
1283 return ret;
1284 }
1285
1286 int udf_sync_inode(struct inode * inode)
1287 {
1288 return udf_update_inode(inode, 1);
1289 }
1290
1291 static int
1292 udf_update_inode(struct inode *inode, int do_sync)
1293 {
1294 struct buffer_head *bh = NULL;
1295 struct fileEntry *fe;
1296 struct extendedFileEntry *efe;
1297 uint32_t udfperms;
1298 uint16_t icbflags;
1299 uint16_t crclen;
1300 int i;
1301 kernel_timestamp cpu_time;
1302 int err = 0;
1303
1304 bh = udf_tread(inode->i_sb,
1305 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1306
1307 if (!bh)
1308 {
1309 udf_debug("bread failure\n");
1310 return -EIO;
1311 }
1312
1313 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1314
1315 fe = (struct fileEntry *)bh->b_data;
1316 efe = (struct extendedFileEntry *)bh->b_data;
1317
1318 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1319 {
1320 struct unallocSpaceEntry *use =
1321 (struct unallocSpaceEntry *)bh->b_data;
1322
1323 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1324 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1325 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1326 sizeof(tag);
1327 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1328 use->descTag.descCRCLength = cpu_to_le16(crclen);
1329 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1330
1331 use->descTag.tagChecksum = 0;
1332 for (i=0; i<16; i++)
1333 if (i != 4)
1334 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1335
1336 mark_buffer_dirty(bh);
1337 brelse(bh);
1338 return err;
1339 }
1340
1341 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1342 fe->uid = cpu_to_le32(-1);
1343 else fe->uid = cpu_to_le32(inode->i_uid);
1344
1345 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1346 fe->gid = cpu_to_le32(-1);
1347 else fe->gid = cpu_to_le32(inode->i_gid);
1348
1349 udfperms = ((inode->i_mode & S_IRWXO) ) |
1350 ((inode->i_mode & S_IRWXG) << 2) |
1351 ((inode->i_mode & S_IRWXU) << 4);
1352
1353 udfperms |= (le32_to_cpu(fe->permissions) &
1354 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1355 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1356 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1357 fe->permissions = cpu_to_le32(udfperms);
1358
1359 if (S_ISDIR(inode->i_mode))
1360 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1361 else
1362 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1363
1364 fe->informationLength = cpu_to_le64(inode->i_size);
1365
1366 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1367 {
1368 regid *eid;
1369 struct deviceSpec *dsea =
1370 (struct deviceSpec *)
1371 udf_get_extendedattr(inode, 12, 1);
1372
1373 if (!dsea)
1374 {
1375 dsea = (struct deviceSpec *)
1376 udf_add_extendedattr(inode,
1377 sizeof(struct deviceSpec) +
1378 sizeof(regid), 12, 0x3);
1379 dsea->attrType = cpu_to_le32(12);
1380 dsea->attrSubtype = 1;
1381 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1382 sizeof(regid));
1383 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1384 }
1385 eid = (regid *)dsea->impUse;
1386 memset(eid, 0, sizeof(regid));
1387 strcpy(eid->ident, UDF_ID_DEVELOPER);
1388 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1389 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1390 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1391 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1392 }
1393
1394 if (UDF_I_EFE(inode) == 0)
1395 {
1396 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1397 fe->logicalBlocksRecorded = cpu_to_le64(
1398 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1399 (inode->i_sb->s_blocksize_bits - 9));
1400
1401 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1402 fe->accessTime = cpu_to_lets(cpu_time);
1403 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1404 fe->modificationTime = cpu_to_lets(cpu_time);
1405 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1406 fe->attrTime = cpu_to_lets(cpu_time);
1407 memset(&(fe->impIdent), 0, sizeof(regid));
1408 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1409 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1410 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1411 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1412 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1413 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1414 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1415 crclen = sizeof(struct fileEntry);
1416 }
1417 else
1418 {
1419 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1420 efe->objectSize = cpu_to_le64(inode->i_size);
1421 efe->logicalBlocksRecorded = cpu_to_le64(
1422 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1423 (inode->i_sb->s_blocksize_bits - 9));
1424
1425 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1426 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1427 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1428 {
1429 UDF_I_CRTIME(inode) = inode->i_atime;
1430 }
1431 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1432 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1433 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1434 {
1435 UDF_I_CRTIME(inode) = inode->i_mtime;
1436 }
1437 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1438 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1439 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1440 {
1441 UDF_I_CRTIME(inode) = inode->i_ctime;
1442 }
1443
1444 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1445 efe->accessTime = cpu_to_lets(cpu_time);
1446 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1447 efe->modificationTime = cpu_to_lets(cpu_time);
1448 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1449 efe->createTime = cpu_to_lets(cpu_time);
1450 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1451 efe->attrTime = cpu_to_lets(cpu_time);
1452
1453 memset(&(efe->impIdent), 0, sizeof(regid));
1454 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1455 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1456 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1457 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1458 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1459 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1460 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1461 crclen = sizeof(struct extendedFileEntry);
1462 }
1463 if (UDF_I_STRAT4096(inode))
1464 {
1465 fe->icbTag.strategyType = cpu_to_le16(4096);
1466 fe->icbTag.strategyParameter = cpu_to_le16(1);
1467 fe->icbTag.numEntries = cpu_to_le16(2);
1468 }
1469 else
1470 {
1471 fe->icbTag.strategyType = cpu_to_le16(4);
1472 fe->icbTag.numEntries = cpu_to_le16(1);
1473 }
1474
1475 if (S_ISDIR(inode->i_mode))
1476 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1477 else if (S_ISREG(inode->i_mode))
1478 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1479 else if (S_ISLNK(inode->i_mode))
1480 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1481 else if (S_ISBLK(inode->i_mode))
1482 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1483 else if (S_ISCHR(inode->i_mode))
1484 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1485 else if (S_ISFIFO(inode->i_mode))
1486 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1487 else if (S_ISSOCK(inode->i_mode))
1488 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1489
1490 icbflags = UDF_I_ALLOCTYPE(inode) |
1491 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1492 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1493 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1494 (le16_to_cpu(fe->icbTag.flags) &
1495 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1496 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1497
1498 fe->icbTag.flags = cpu_to_le16(icbflags);
1499 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1500 fe->descTag.descVersion = cpu_to_le16(3);
1501 else
1502 fe->descTag.descVersion = cpu_to_le16(2);
1503 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1504 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1505 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1506 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1507 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1508
1509 fe->descTag.tagChecksum = 0;
1510 for (i=0; i<16; i++)
1511 if (i != 4)
1512 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1513
1514 /* write the data blocks */
1515 mark_buffer_dirty(bh);
1516 if (do_sync)
1517 {
1518 sync_dirty_buffer(bh);
1519 if (buffer_req(bh) && !buffer_uptodate(bh))
1520 {
1521 printk("IO error syncing udf inode [%s:%08lx]\n",
1522 inode->i_sb->s_id, inode->i_ino);
1523 err = -EIO;
1524 }
1525 }
1526 brelse(bh);
1527 return err;
1528 }
1529
1530 struct inode *
1531 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1532 {
1533 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1534 struct inode *inode = iget_locked(sb, block);
1535
1536 if (!inode)
1537 return NULL;
1538
1539 if (inode->i_state & I_NEW) {
1540 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1541 __udf_read_inode(inode);
1542 unlock_new_inode(inode);
1543 }
1544
1545 if (is_bad_inode(inode))
1546 goto out_iput;
1547
1548 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1549 udf_debug("block=%d, partition=%d out of range\n",
1550 ino.logicalBlockNum, ino.partitionReferenceNum);
1551 make_bad_inode(inode);
1552 goto out_iput;
1553 }
1554
1555 return inode;
1556
1557 out_iput:
1558 iput(inode);
1559 return NULL;
1560 }
1561
1562 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1563 kernel_lb_addr eloc, uint32_t elen, int inc)
1564 {
1565 int adsize;
1566 short_ad *sad = NULL;
1567 long_ad *lad = NULL;
1568 struct allocExtDesc *aed;
1569 int8_t etype;
1570 uint8_t *ptr;
1571
1572 if (!epos->bh)
1573 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1574 else
1575 ptr = epos->bh->b_data + epos->offset;
1576
1577 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1578 adsize = sizeof(short_ad);
1579 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1580 adsize = sizeof(long_ad);
1581 else
1582 return -1;
1583
1584 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize)
1585 {
1586 char *sptr, *dptr;
1587 struct buffer_head *nbh;
1588 int err, loffset;
1589 kernel_lb_addr obloc = epos->block;
1590
1591 if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1592 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1593 {
1594 return -1;
1595 }
1596 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1597 epos->block, 0))))
1598 {
1599 return -1;
1600 }
1601 lock_buffer(nbh);
1602 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1603 set_buffer_uptodate(nbh);
1604 unlock_buffer(nbh);
1605 mark_buffer_dirty_inode(nbh, inode);
1606
1607 aed = (struct allocExtDesc *)(nbh->b_data);
1608 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1609 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1610 if (epos->offset + adsize > inode->i_sb->s_blocksize)
1611 {
1612 loffset = epos->offset;
1613 aed->lengthAllocDescs = cpu_to_le32(adsize);
1614 sptr = ptr - adsize;
1615 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1616 memcpy(dptr, sptr, adsize);
1617 epos->offset = sizeof(struct allocExtDesc) + adsize;
1618 }
1619 else
1620 {
1621 loffset = epos->offset + adsize;
1622 aed->lengthAllocDescs = cpu_to_le32(0);
1623 sptr = ptr;
1624 epos->offset = sizeof(struct allocExtDesc);
1625
1626 if (epos->bh)
1627 {
1628 aed = (struct allocExtDesc *)epos->bh->b_data;
1629 aed->lengthAllocDescs =
1630 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1631 }
1632 else
1633 {
1634 UDF_I_LENALLOC(inode) += adsize;
1635 mark_inode_dirty(inode);
1636 }
1637 }
1638 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1639 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1640 epos->block.logicalBlockNum, sizeof(tag));
1641 else
1642 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1643 epos->block.logicalBlockNum, sizeof(tag));
1644 switch (UDF_I_ALLOCTYPE(inode))
1645 {
1646 case ICBTAG_FLAG_AD_SHORT:
1647 {
1648 sad = (short_ad *)sptr;
1649 sad->extLength = cpu_to_le32(
1650 EXT_NEXT_EXTENT_ALLOCDECS |
1651 inode->i_sb->s_blocksize);
1652 sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
1653 break;
1654 }
1655 case ICBTAG_FLAG_AD_LONG:
1656 {
1657 lad = (long_ad *)sptr;
1658 lad->extLength = cpu_to_le32(
1659 EXT_NEXT_EXTENT_ALLOCDECS |
1660 inode->i_sb->s_blocksize);
1661 lad->extLocation = cpu_to_lelb(epos->block);
1662 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1663 break;
1664 }
1665 }
1666 if (epos->bh)
1667 {
1668 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1669 udf_update_tag(epos->bh->b_data, loffset);
1670 else
1671 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1672 mark_buffer_dirty_inode(epos->bh, inode);
1673 brelse(epos->bh);
1674 }
1675 else
1676 mark_inode_dirty(inode);
1677 epos->bh = nbh;
1678 }
1679
1680 etype = udf_write_aext(inode, epos, eloc, elen, inc);
1681
1682 if (!epos->bh)
1683 {
1684 UDF_I_LENALLOC(inode) += adsize;
1685 mark_inode_dirty(inode);
1686 }
1687 else
1688 {
1689 aed = (struct allocExtDesc *)epos->bh->b_data;
1690 aed->lengthAllocDescs =
1691 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1692 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1693 udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
1694 else
1695 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1696 mark_buffer_dirty_inode(epos->bh, inode);
1697 }
1698
1699 return etype;
1700 }
1701
1702 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1703 kernel_lb_addr eloc, uint32_t elen, int inc)
1704 {
1705 int adsize;
1706 uint8_t *ptr;
1707
1708 if (!epos->bh)
1709 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1710 else
1711 ptr = epos->bh->b_data + epos->offset;
1712
1713 switch (UDF_I_ALLOCTYPE(inode))
1714 {
1715 case ICBTAG_FLAG_AD_SHORT:
1716 {
1717 short_ad *sad = (short_ad *)ptr;
1718 sad->extLength = cpu_to_le32(elen);
1719 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1720 adsize = sizeof(short_ad);
1721 break;
1722 }
1723 case ICBTAG_FLAG_AD_LONG:
1724 {
1725 long_ad *lad = (long_ad *)ptr;
1726 lad->extLength = cpu_to_le32(elen);
1727 lad->extLocation = cpu_to_lelb(eloc);
1728 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1729 adsize = sizeof(long_ad);
1730 break;
1731 }
1732 default:
1733 return -1;
1734 }
1735
1736 if (epos->bh)
1737 {
1738 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1739 {
1740 struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
1741 udf_update_tag(epos->bh->b_data,
1742 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1743 }
1744 mark_buffer_dirty_inode(epos->bh, inode);
1745 }
1746 else
1747 mark_inode_dirty(inode);
1748
1749 if (inc)
1750 epos->offset += adsize;
1751 return (elen >> 30);
1752 }
1753
1754 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1755 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1756 {
1757 int8_t etype;
1758
1759 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1760 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1761 {
1762 epos->block = *eloc;
1763 epos->offset = sizeof(struct allocExtDesc);
1764 brelse(epos->bh);
1765 if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0))))
1766 {
1767 udf_debug("reading block %d failed!\n",
1768 udf_get_lb_pblock(inode->i_sb, epos->block, 0));
1769 return -1;
1770 }
1771 }
1772
1773 return etype;
1774 }
1775
1776 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1777 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1778 {
1779 int alen;
1780 int8_t etype;
1781 uint8_t *ptr;
1782
1783 if (!epos->bh)
1784 {
1785 if (!epos->offset)
1786 epos->offset = udf_file_entry_alloc_offset(inode);
1787 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1788 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1789 }
1790 else
1791 {
1792 if (!epos->offset)
1793 epos->offset = sizeof(struct allocExtDesc);
1794 ptr = epos->bh->b_data + epos->offset;
1795 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
1796 }
1797
1798 switch (UDF_I_ALLOCTYPE(inode))
1799 {
1800 case ICBTAG_FLAG_AD_SHORT:
1801 {
1802 short_ad *sad;
1803
1804 if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
1805 return -1;
1806
1807 etype = le32_to_cpu(sad->extLength) >> 30;
1808 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1809 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1810 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1811 break;
1812 }
1813 case ICBTAG_FLAG_AD_LONG:
1814 {
1815 long_ad *lad;
1816
1817 if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
1818 return -1;
1819
1820 etype = le32_to_cpu(lad->extLength) >> 30;
1821 *eloc = lelb_to_cpu(lad->extLocation);
1822 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1823 break;
1824 }
1825 default:
1826 {
1827 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1828 return -1;
1829 }
1830 }
1831
1832 return etype;
1833 }
1834
1835 static int8_t
1836 udf_insert_aext(struct inode *inode, struct extent_position epos,
1837 kernel_lb_addr neloc, uint32_t nelen)
1838 {
1839 kernel_lb_addr oeloc;
1840 uint32_t oelen;
1841 int8_t etype;
1842
1843 if (epos.bh)
1844 get_bh(epos.bh);
1845
1846 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1)
1847 {
1848 udf_write_aext(inode, &epos, neloc, nelen, 1);
1849
1850 neloc = oeloc;
1851 nelen = (etype << 30) | oelen;
1852 }
1853 udf_add_aext(inode, &epos, neloc, nelen, 1);
1854 brelse(epos.bh);
1855 return (nelen >> 30);
1856 }
1857
1858 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1859 kernel_lb_addr eloc, uint32_t elen)
1860 {
1861 struct extent_position oepos;
1862 int adsize;
1863 int8_t etype;
1864 struct allocExtDesc *aed;
1865
1866 if (epos.bh)
1867 {
1868 get_bh(epos.bh);
1869 get_bh(epos.bh);
1870 }
1871
1872 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1873 adsize = sizeof(short_ad);
1874 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1875 adsize = sizeof(long_ad);
1876 else
1877 adsize = 0;
1878
1879 oepos = epos;
1880 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
1881 return -1;
1882
1883 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
1884 {
1885 udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
1886 if (oepos.bh != epos.bh)
1887 {
1888 oepos.block = epos.block;
1889 brelse(oepos.bh);
1890 get_bh(epos.bh);
1891 oepos.bh = epos.bh;
1892 oepos.offset = epos.offset - adsize;
1893 }
1894 }
1895 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1896 elen = 0;
1897
1898 if (epos.bh != oepos.bh)
1899 {
1900 udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
1901 udf_write_aext(inode, &oepos, eloc, elen, 1);
1902 udf_write_aext(inode, &oepos, eloc, elen, 1);
1903 if (!oepos.bh)
1904 {
1905 UDF_I_LENALLOC(inode) -= (adsize * 2);
1906 mark_inode_dirty(inode);
1907 }
1908 else
1909 {
1910 aed = (struct allocExtDesc *)oepos.bh->b_data;
1911 aed->lengthAllocDescs =
1912 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1913 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1914 udf_update_tag(oepos.bh->b_data, oepos.offset - (2*adsize));
1915 else
1916 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
1917 mark_buffer_dirty_inode(oepos.bh, inode);
1918 }
1919 }
1920 else
1921 {
1922 udf_write_aext(inode, &oepos, eloc, elen, 1);
1923 if (!oepos.bh)
1924 {
1925 UDF_I_LENALLOC(inode) -= adsize;
1926 mark_inode_dirty(inode);
1927 }
1928 else
1929 {
1930 aed = (struct allocExtDesc *)oepos.bh->b_data;
1931 aed->lengthAllocDescs =
1932 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1933 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1934 udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
1935 else
1936 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
1937 mark_buffer_dirty_inode(oepos.bh, inode);
1938 }
1939 }
1940
1941 brelse(epos.bh);
1942 brelse(oepos.bh);
1943 return (elen >> 30);
1944 }
1945
1946 int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
1947 kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset)
1948 {
1949 loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
1950 int8_t etype;
1951
1952 if (block < 0)
1953 {
1954 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1955 return -1;
1956 }
1957
1958 pos->offset = 0;
1959 pos->block = UDF_I_LOCATION(inode);
1960 pos->bh = NULL;
1961 *elen = 0;
1962
1963 do
1964 {
1965 if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1)
1966 {
1967 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
1968 UDF_I_LENEXTENTS(inode) = lbcount;
1969 return -1;
1970 }
1971 lbcount += *elen;
1972 } while (lbcount <= bcount);
1973
1974 *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
1975
1976 return etype;
1977 }
1978
1979 long udf_block_map(struct inode *inode, sector_t block)
1980 {
1981 kernel_lb_addr eloc;
1982 uint32_t elen;
1983 sector_t offset;
1984 struct extent_position epos = { NULL, 0, { 0, 0}};
1985 int ret;
1986
1987 lock_kernel();
1988
1989 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
1990 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
1991 else
1992 ret = 0;
1993
1994 unlock_kernel();
1995 brelse(epos.bh);
1996
1997 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
1998 return udf_fixed_to_variable(ret);
1999 else
2000 return ret;
2001 }