]>
Commit | Line | Data |
---|---|---|
ac27a0ec | 1 | /* |
617ba13b | 2 | * linux/fs/ext4/file.c |
ac27a0ec DK |
3 | * |
4 | * Copyright (C) 1992, 1993, 1994, 1995 | |
5 | * Remy Card (card@masi.ibp.fr) | |
6 | * Laboratoire MASI - Institut Blaise Pascal | |
7 | * Universite Pierre et Marie Curie (Paris VI) | |
8 | * | |
9 | * from | |
10 | * | |
11 | * linux/fs/minix/file.c | |
12 | * | |
13 | * Copyright (C) 1991, 1992 Linus Torvalds | |
14 | * | |
617ba13b | 15 | * ext4 fs regular file handling primitives |
ac27a0ec DK |
16 | * |
17 | * 64-bit file support on 64-bit platforms by Jakub Jelinek | |
18 | * (jj@sunsite.ms.mff.cuni.cz) | |
19 | */ | |
20 | ||
21 | #include <linux/time.h> | |
22 | #include <linux/fs.h> | |
bc0b0d6d TT |
23 | #include <linux/mount.h> |
24 | #include <linux/path.h> | |
871a2931 | 25 | #include <linux/quotaops.h> |
c8c0df24 | 26 | #include <linux/pagevec.h> |
e2e40f2c | 27 | #include <linux/uio.h> |
3dcf5451 CH |
28 | #include "ext4.h" |
29 | #include "ext4_jbd2.h" | |
ac27a0ec DK |
30 | #include "xattr.h" |
31 | #include "acl.h" | |
32 | ||
33 | /* | |
34 | * Called when an inode is released. Note that this is different | |
617ba13b | 35 | * from ext4_file_open: open gets called at every open, but release |
ac27a0ec DK |
36 | * gets called only when /all/ the files are closed. |
37 | */ | |
af5bc92d | 38 | static int ext4_release_file(struct inode *inode, struct file *filp) |
ac27a0ec | 39 | { |
19f5fb7a | 40 | if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { |
7d8f9f7d | 41 | ext4_alloc_da_blocks(inode); |
19f5fb7a | 42 | ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); |
7d8f9f7d | 43 | } |
ac27a0ec DK |
44 | /* if we are the last writer on the inode, drop the block reservation */ |
45 | if ((filp->f_mode & FMODE_WRITE) && | |
d6014301 AK |
46 | (atomic_read(&inode->i_writecount) == 1) && |
47 | !EXT4_I(inode)->i_reserved_data_blocks) | |
ac27a0ec | 48 | { |
0e855ac8 | 49 | down_write(&EXT4_I(inode)->i_data_sem); |
c2ea3fde | 50 | ext4_discard_preallocations(inode); |
0e855ac8 | 51 | up_write(&EXT4_I(inode)->i_data_sem); |
ac27a0ec DK |
52 | } |
53 | if (is_dx(inode) && filp->private_data) | |
617ba13b | 54 | ext4_htree_free_dir_info(filp->private_data); |
ac27a0ec DK |
55 | |
56 | return 0; | |
57 | } | |
58 | ||
c197855e | 59 | static void ext4_unwritten_wait(struct inode *inode) |
e9e3bcec ES |
60 | { |
61 | wait_queue_head_t *wq = ext4_ioend_wq(inode); | |
62 | ||
e27f41e1 | 63 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); |
e9e3bcec ES |
64 | } |
65 | ||
66 | /* | |
67 | * This tests whether the IO in question is block-aligned or not. | |
68 | * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they | |
69 | * are converted to written only after the IO is complete. Until they are | |
70 | * mapped, these blocks appear as holes, so dio_zero_block() will assume that | |
71 | * it needs to zero out portions of the start and/or end block. If 2 AIO | |
72 | * threads are at work on the same unwritten block, they must be synchronized | |
73 | * or one thread will zero the other's data, causing corruption. | |
74 | */ | |
75 | static int | |
9b884164 | 76 | ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos) |
e9e3bcec ES |
77 | { |
78 | struct super_block *sb = inode->i_sb; | |
79 | int blockmask = sb->s_blocksize - 1; | |
e9e3bcec | 80 | |
6e6358fc | 81 | if (pos >= i_size_read(inode)) |
e9e3bcec ES |
82 | return 0; |
83 | ||
9b884164 | 84 | if ((pos | iov_iter_alignment(from)) & blockmask) |
e9e3bcec ES |
85 | return 1; |
86 | ||
87 | return 0; | |
88 | } | |
89 | ||
ac27a0ec | 90 | static ssize_t |
9b884164 | 91 | ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
ac27a0ec | 92 | { |
4bd809db | 93 | struct file *file = iocb->ki_filp; |
8ad2850f | 94 | struct inode *inode = file_inode(iocb->ki_filp); |
7ed07ba8 | 95 | struct mutex *aio_mutex = NULL; |
4bd809db | 96 | struct blk_plug plug; |
2ba48ce5 | 97 | int o_direct = iocb->ki_flags & IOCB_DIRECT; |
4bd809db | 98 | int overwrite = 0; |
8563000d | 99 | ssize_t ret; |
7608e610 | 100 | |
f5ccfe1d TT |
101 | /* |
102 | * Unaligned direct AIO must be serialized; see comment above | |
103 | * In the case of O_APPEND, assume that we must always serialize | |
104 | */ | |
105 | if (o_direct && | |
106 | ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && | |
107 | !is_sync_kiocb(iocb) && | |
2ba48ce5 | 108 | (iocb->ki_flags & IOCB_APPEND || |
e768d7ff | 109 | ext4_unaligned_aio(inode, from, iocb->ki_pos))) { |
f5ccfe1d TT |
110 | aio_mutex = ext4_aio_mutex(inode); |
111 | mutex_lock(aio_mutex); | |
112 | ext4_unwritten_wait(inode); | |
113 | } | |
114 | ||
115 | mutex_lock(&inode->i_mutex); | |
3309dd04 AV |
116 | ret = generic_write_checks(iocb, from); |
117 | if (ret <= 0) | |
e768d7ff | 118 | goto out; |
f5ccfe1d | 119 | |
e2b46574 ES |
120 | /* |
121 | * If we have encountered a bitmap-format file, the size limit | |
122 | * is smaller than s_maxbytes, which is for extent-mapped files. | |
123 | */ | |
12e9b892 | 124 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { |
e2b46574 | 125 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
ac27a0ec | 126 | |
3309dd04 | 127 | if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) { |
f5ccfe1d | 128 | ret = -EFBIG; |
e768d7ff | 129 | goto out; |
f5ccfe1d | 130 | } |
3309dd04 | 131 | iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); |
e2b46574 ES |
132 | } |
133 | ||
a41537e6 | 134 | iocb->private = &overwrite; |
7ed07ba8 | 135 | if (o_direct) { |
3309dd04 AV |
136 | size_t length = iov_iter_count(from); |
137 | loff_t pos = iocb->ki_pos; | |
8ad2850f TT |
138 | blk_start_plug(&plug); |
139 | ||
8ad2850f | 140 | /* check whether we do a DIO overwrite or not */ |
7ed07ba8 | 141 | if (ext4_should_dioread_nolock(inode) && !aio_mutex && |
8ad2850f TT |
142 | !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { |
143 | struct ext4_map_blocks map; | |
144 | unsigned int blkbits = inode->i_blkbits; | |
145 | int err, len; | |
146 | ||
147 | map.m_lblk = pos >> blkbits; | |
148 | map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits) | |
149 | - map.m_lblk; | |
150 | len = map.m_len; | |
151 | ||
152 | err = ext4_map_blocks(NULL, inode, &map, 0); | |
153 | /* | |
154 | * 'err==len' means that all of blocks has | |
155 | * been preallocated no matter they are | |
156 | * initialized or not. For excluding | |
157 | * unwritten extents, we need to check | |
158 | * m_flags. There are two conditions that | |
159 | * indicate for initialized extents. 1) If we | |
160 | * hit extent cache, EXT4_MAP_MAPPED flag is | |
161 | * returned; 2) If we do a real lookup, | |
162 | * non-flags are returned. So we should check | |
163 | * these two conditions. | |
164 | */ | |
165 | if (err == len && (map.m_flags & EXT4_MAP_MAPPED)) | |
166 | overwrite = 1; | |
167 | } | |
f5ccfe1d | 168 | } |
7608e610 | 169 | |
9b884164 | 170 | ret = __generic_file_write_iter(iocb, from); |
7ed07ba8 | 171 | mutex_unlock(&inode->i_mutex); |
7608e610 | 172 | |
7ed07ba8 TT |
173 | if (ret > 0) { |
174 | ssize_t err; | |
175 | ||
176 | err = generic_write_sync(file, iocb->ki_pos - ret, ret); | |
177 | if (err < 0) | |
178 | ret = err; | |
7608e610 | 179 | } |
7ed07ba8 TT |
180 | if (o_direct) |
181 | blk_finish_plug(&plug); | |
e9e3bcec | 182 | |
e768d7ff AV |
183 | if (aio_mutex) |
184 | mutex_unlock(aio_mutex); | |
185 | return ret; | |
186 | ||
187 | out: | |
188 | mutex_unlock(&inode->i_mutex); | |
7ed07ba8 TT |
189 | if (aio_mutex) |
190 | mutex_unlock(aio_mutex); | |
e9e3bcec | 191 | return ret; |
ac27a0ec DK |
192 | } |
193 | ||
923ae0ff RZ |
194 | #ifdef CONFIG_FS_DAX |
195 | static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
196 | { | |
197 | return dax_fault(vma, vmf, ext4_get_block); | |
198 | /* Is this the right get_block? */ | |
199 | } | |
200 | ||
201 | static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |
202 | { | |
203 | return dax_mkwrite(vma, vmf, ext4_get_block); | |
204 | } | |
205 | ||
206 | static const struct vm_operations_struct ext4_dax_vm_ops = { | |
207 | .fault = ext4_dax_fault, | |
208 | .page_mkwrite = ext4_dax_mkwrite, | |
0e3b210c | 209 | .pfn_mkwrite = dax_pfn_mkwrite, |
923ae0ff RZ |
210 | }; |
211 | #else | |
212 | #define ext4_dax_vm_ops ext4_file_vm_ops | |
213 | #endif | |
214 | ||
f0f37e2f | 215 | static const struct vm_operations_struct ext4_file_vm_ops = { |
2e9ee850 | 216 | .fault = filemap_fault, |
f1820361 | 217 | .map_pages = filemap_map_pages, |
2e9ee850 AK |
218 | .page_mkwrite = ext4_page_mkwrite, |
219 | }; | |
220 | ||
221 | static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) | |
222 | { | |
c9c7429c MH |
223 | struct inode *inode = file->f_mapping->host; |
224 | ||
225 | if (ext4_encrypted_inode(inode)) { | |
226 | int err = ext4_generate_encryption_key(inode); | |
227 | if (err) | |
228 | return 0; | |
229 | } | |
2e9ee850 | 230 | file_accessed(file); |
923ae0ff RZ |
231 | if (IS_DAX(file_inode(file))) { |
232 | vma->vm_ops = &ext4_dax_vm_ops; | |
233 | vma->vm_flags |= VM_MIXEDMAP; | |
234 | } else { | |
235 | vma->vm_ops = &ext4_file_vm_ops; | |
236 | } | |
2e9ee850 AK |
237 | return 0; |
238 | } | |
239 | ||
bc0b0d6d TT |
240 | static int ext4_file_open(struct inode * inode, struct file * filp) |
241 | { | |
242 | struct super_block *sb = inode->i_sb; | |
243 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | |
244 | struct vfsmount *mnt = filp->f_path.mnt; | |
245 | struct path path; | |
246 | char buf[64], *cp; | |
c9c7429c | 247 | int ret; |
bc0b0d6d TT |
248 | |
249 | if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && | |
250 | !(sb->s_flags & MS_RDONLY))) { | |
251 | sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; | |
252 | /* | |
253 | * Sample where the filesystem has been mounted and | |
254 | * store it in the superblock for sysadmin convenience | |
255 | * when trying to sort through large numbers of block | |
256 | * devices or filesystem images. | |
257 | */ | |
258 | memset(buf, 0, sizeof(buf)); | |
3899167d AV |
259 | path.mnt = mnt; |
260 | path.dentry = mnt->mnt_root; | |
bc0b0d6d | 261 | cp = d_path(&path, buf, sizeof(buf)); |
bc0b0d6d | 262 | if (!IS_ERR(cp)) { |
044ce47f JK |
263 | handle_t *handle; |
264 | int err; | |
265 | ||
9924a92a | 266 | handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); |
044ce47f JK |
267 | if (IS_ERR(handle)) |
268 | return PTR_ERR(handle); | |
5d601255 | 269 | BUFFER_TRACE(sbi->s_sbh, "get_write_access"); |
044ce47f JK |
270 | err = ext4_journal_get_write_access(handle, sbi->s_sbh); |
271 | if (err) { | |
272 | ext4_journal_stop(handle); | |
273 | return err; | |
274 | } | |
cf803903 DW |
275 | strlcpy(sbi->s_es->s_last_mounted, cp, |
276 | sizeof(sbi->s_es->s_last_mounted)); | |
044ce47f JK |
277 | ext4_handle_dirty_super(handle, sb); |
278 | ext4_journal_stop(handle); | |
bc0b0d6d TT |
279 | } |
280 | } | |
8aefcd55 TT |
281 | /* |
282 | * Set up the jbd2_inode if we are opening the inode for | |
283 | * writing and the journal is present | |
284 | */ | |
a361293f | 285 | if (filp->f_mode & FMODE_WRITE) { |
c9c7429c | 286 | ret = ext4_inode_attach_jinode(inode); |
a361293f JK |
287 | if (ret < 0) |
288 | return ret; | |
8aefcd55 | 289 | } |
c9c7429c MH |
290 | ret = dquot_file_open(inode, filp); |
291 | if (!ret && ext4_encrypted_inode(inode)) { | |
292 | ret = ext4_generate_encryption_key(inode); | |
293 | if (ret) | |
294 | ret = -EACCES; | |
295 | } | |
296 | return ret; | |
bc0b0d6d TT |
297 | } |
298 | ||
c8c0df24 ZL |
299 | /* |
300 | * Here we use ext4_map_blocks() to get a block mapping for a extent-based | |
301 | * file rather than ext4_ext_walk_space() because we can introduce | |
302 | * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same | |
303 | * function. When extent status tree has been fully implemented, it will | |
304 | * track all extent status for a file and we can directly use it to | |
305 | * retrieve the offset for SEEK_DATA/SEEK_HOLE. | |
306 | */ | |
307 | ||
308 | /* | |
309 | * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to | |
310 | * lookup page cache to check whether or not there has some data between | |
311 | * [startoff, endoff] because, if this range contains an unwritten extent, | |
312 | * we determine this extent as a data or a hole according to whether the | |
313 | * page cache has data or not. | |
314 | */ | |
ad7fefb1 TT |
315 | static int ext4_find_unwritten_pgoff(struct inode *inode, |
316 | int whence, | |
317 | struct ext4_map_blocks *map, | |
318 | loff_t *offset) | |
c8c0df24 ZL |
319 | { |
320 | struct pagevec pvec; | |
ad7fefb1 | 321 | unsigned int blkbits; |
c8c0df24 ZL |
322 | pgoff_t index; |
323 | pgoff_t end; | |
ad7fefb1 | 324 | loff_t endoff; |
c8c0df24 ZL |
325 | loff_t startoff; |
326 | loff_t lastoff; | |
327 | int found = 0; | |
328 | ||
ad7fefb1 | 329 | blkbits = inode->i_sb->s_blocksize_bits; |
c8c0df24 ZL |
330 | startoff = *offset; |
331 | lastoff = startoff; | |
ad7fefb1 | 332 | endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; |
c8c0df24 ZL |
333 | |
334 | index = startoff >> PAGE_CACHE_SHIFT; | |
335 | end = endoff >> PAGE_CACHE_SHIFT; | |
336 | ||
337 | pagevec_init(&pvec, 0); | |
338 | do { | |
339 | int i, num; | |
340 | unsigned long nr_pages; | |
341 | ||
342 | num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); | |
343 | nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, | |
344 | (pgoff_t)num); | |
345 | if (nr_pages == 0) { | |
965c8e59 | 346 | if (whence == SEEK_DATA) |
c8c0df24 ZL |
347 | break; |
348 | ||
965c8e59 | 349 | BUG_ON(whence != SEEK_HOLE); |
c8c0df24 ZL |
350 | /* |
351 | * If this is the first time to go into the loop and | |
352 | * offset is not beyond the end offset, it will be a | |
353 | * hole at this offset | |
354 | */ | |
355 | if (lastoff == startoff || lastoff < endoff) | |
356 | found = 1; | |
357 | break; | |
358 | } | |
359 | ||
360 | /* | |
361 | * If this is the first time to go into the loop and | |
362 | * offset is smaller than the first page offset, it will be a | |
363 | * hole at this offset. | |
364 | */ | |
965c8e59 | 365 | if (lastoff == startoff && whence == SEEK_HOLE && |
c8c0df24 ZL |
366 | lastoff < page_offset(pvec.pages[0])) { |
367 | found = 1; | |
368 | break; | |
369 | } | |
370 | ||
371 | for (i = 0; i < nr_pages; i++) { | |
372 | struct page *page = pvec.pages[i]; | |
373 | struct buffer_head *bh, *head; | |
374 | ||
375 | /* | |
376 | * If the current offset is not beyond the end of given | |
377 | * range, it will be a hole. | |
378 | */ | |
965c8e59 | 379 | if (lastoff < endoff && whence == SEEK_HOLE && |
c8c0df24 ZL |
380 | page->index > end) { |
381 | found = 1; | |
382 | *offset = lastoff; | |
383 | goto out; | |
384 | } | |
385 | ||
386 | lock_page(page); | |
387 | ||
388 | if (unlikely(page->mapping != inode->i_mapping)) { | |
389 | unlock_page(page); | |
390 | continue; | |
391 | } | |
392 | ||
393 | if (!page_has_buffers(page)) { | |
394 | unlock_page(page); | |
395 | continue; | |
396 | } | |
397 | ||
398 | if (page_has_buffers(page)) { | |
399 | lastoff = page_offset(page); | |
400 | bh = head = page_buffers(page); | |
401 | do { | |
402 | if (buffer_uptodate(bh) || | |
403 | buffer_unwritten(bh)) { | |
965c8e59 | 404 | if (whence == SEEK_DATA) |
c8c0df24 ZL |
405 | found = 1; |
406 | } else { | |
965c8e59 | 407 | if (whence == SEEK_HOLE) |
c8c0df24 ZL |
408 | found = 1; |
409 | } | |
410 | if (found) { | |
411 | *offset = max_t(loff_t, | |
412 | startoff, lastoff); | |
413 | unlock_page(page); | |
414 | goto out; | |
415 | } | |
416 | lastoff += bh->b_size; | |
417 | bh = bh->b_this_page; | |
418 | } while (bh != head); | |
419 | } | |
420 | ||
421 | lastoff = page_offset(page) + PAGE_SIZE; | |
422 | unlock_page(page); | |
423 | } | |
424 | ||
425 | /* | |
426 | * The no. of pages is less than our desired, that would be a | |
427 | * hole in there. | |
428 | */ | |
965c8e59 | 429 | if (nr_pages < num && whence == SEEK_HOLE) { |
c8c0df24 ZL |
430 | found = 1; |
431 | *offset = lastoff; | |
432 | break; | |
433 | } | |
434 | ||
435 | index = pvec.pages[i - 1]->index + 1; | |
436 | pagevec_release(&pvec); | |
437 | } while (index <= end); | |
438 | ||
439 | out: | |
440 | pagevec_release(&pvec); | |
441 | return found; | |
442 | } | |
443 | ||
444 | /* | |
445 | * ext4_seek_data() retrieves the offset for SEEK_DATA. | |
446 | */ | |
447 | static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) | |
448 | { | |
449 | struct inode *inode = file->f_mapping->host; | |
ad7fefb1 TT |
450 | struct ext4_map_blocks map; |
451 | struct extent_status es; | |
452 | ext4_lblk_t start, last, end; | |
453 | loff_t dataoff, isize; | |
454 | int blkbits; | |
455 | int ret = 0; | |
c8c0df24 ZL |
456 | |
457 | mutex_lock(&inode->i_mutex); | |
ad7fefb1 TT |
458 | |
459 | isize = i_size_read(inode); | |
460 | if (offset >= isize) { | |
c8c0df24 ZL |
461 | mutex_unlock(&inode->i_mutex); |
462 | return -ENXIO; | |
463 | } | |
ad7fefb1 TT |
464 | |
465 | blkbits = inode->i_sb->s_blocksize_bits; | |
466 | start = offset >> blkbits; | |
467 | last = start; | |
468 | end = isize >> blkbits; | |
469 | dataoff = offset; | |
470 | ||
471 | do { | |
472 | map.m_lblk = last; | |
473 | map.m_len = end - last + 1; | |
474 | ret = ext4_map_blocks(NULL, inode, &map, 0); | |
475 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { | |
476 | if (last != start) | |
477 | dataoff = (loff_t)last << blkbits; | |
c8c0df24 | 478 | break; |
ad7fefb1 | 479 | } |
c8c0df24 | 480 | |
ad7fefb1 TT |
481 | /* |
482 | * If there is a delay extent at this offset, | |
483 | * it will be as a data. | |
484 | */ | |
485 | ext4_es_find_delayed_extent_range(inode, last, last, &es); | |
486 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { | |
487 | if (last != start) | |
488 | dataoff = (loff_t)last << blkbits; | |
c8c0df24 ZL |
489 | break; |
490 | } | |
491 | ||
ad7fefb1 TT |
492 | /* |
493 | * If there is a unwritten extent at this offset, | |
494 | * it will be as a data or a hole according to page | |
495 | * cache that has data or not. | |
496 | */ | |
497 | if (map.m_flags & EXT4_MAP_UNWRITTEN) { | |
498 | int unwritten; | |
499 | unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, | |
500 | &map, &dataoff); | |
501 | if (unwritten) | |
502 | break; | |
503 | } | |
c8c0df24 | 504 | |
ad7fefb1 TT |
505 | last++; |
506 | dataoff = (loff_t)last << blkbits; | |
507 | } while (last <= end); | |
c8c0df24 ZL |
508 | |
509 | mutex_unlock(&inode->i_mutex); | |
510 | ||
ad7fefb1 TT |
511 | if (dataoff > isize) |
512 | return -ENXIO; | |
513 | ||
514 | return vfs_setpos(file, dataoff, maxsize); | |
c8c0df24 ZL |
515 | } |
516 | ||
517 | /* | |
ad7fefb1 | 518 | * ext4_seek_hole() retrieves the offset for SEEK_HOLE. |
c8c0df24 ZL |
519 | */ |
520 | static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) | |
521 | { | |
522 | struct inode *inode = file->f_mapping->host; | |
ad7fefb1 TT |
523 | struct ext4_map_blocks map; |
524 | struct extent_status es; | |
525 | ext4_lblk_t start, last, end; | |
526 | loff_t holeoff, isize; | |
527 | int blkbits; | |
528 | int ret = 0; | |
c8c0df24 ZL |
529 | |
530 | mutex_lock(&inode->i_mutex); | |
ad7fefb1 TT |
531 | |
532 | isize = i_size_read(inode); | |
533 | if (offset >= isize) { | |
c8c0df24 ZL |
534 | mutex_unlock(&inode->i_mutex); |
535 | return -ENXIO; | |
536 | } | |
537 | ||
ad7fefb1 TT |
538 | blkbits = inode->i_sb->s_blocksize_bits; |
539 | start = offset >> blkbits; | |
540 | last = start; | |
541 | end = isize >> blkbits; | |
542 | holeoff = offset; | |
c8c0df24 | 543 | |
ad7fefb1 TT |
544 | do { |
545 | map.m_lblk = last; | |
546 | map.m_len = end - last + 1; | |
547 | ret = ext4_map_blocks(NULL, inode, &map, 0); | |
548 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { | |
549 | last += ret; | |
550 | holeoff = (loff_t)last << blkbits; | |
551 | continue; | |
552 | } | |
c8c0df24 | 553 | |
ad7fefb1 TT |
554 | /* |
555 | * If there is a delay extent at this offset, | |
556 | * we will skip this extent. | |
557 | */ | |
558 | ext4_es_find_delayed_extent_range(inode, last, last, &es); | |
559 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { | |
560 | last = es.es_lblk + es.es_len; | |
561 | holeoff = (loff_t)last << blkbits; | |
562 | continue; | |
563 | } | |
14516bb7 | 564 | |
ad7fefb1 TT |
565 | /* |
566 | * If there is a unwritten extent at this offset, | |
567 | * it will be as a data or a hole according to page | |
568 | * cache that has data or not. | |
569 | */ | |
570 | if (map.m_flags & EXT4_MAP_UNWRITTEN) { | |
571 | int unwritten; | |
572 | unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, | |
573 | &map, &holeoff); | |
574 | if (!unwritten) { | |
575 | last += ret; | |
576 | holeoff = (loff_t)last << blkbits; | |
c8c0df24 ZL |
577 | continue; |
578 | } | |
14516bb7 | 579 | } |
ad7fefb1 TT |
580 | |
581 | /* find a hole */ | |
582 | break; | |
583 | } while (last <= end); | |
584 | ||
c8c0df24 ZL |
585 | mutex_unlock(&inode->i_mutex); |
586 | ||
ad7fefb1 TT |
587 | if (holeoff > isize) |
588 | holeoff = isize; | |
589 | ||
590 | return vfs_setpos(file, holeoff, maxsize); | |
c8c0df24 ZL |
591 | } |
592 | ||
e0d10bfa | 593 | /* |
ec7268ce ES |
594 | * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values |
595 | * by calling generic_file_llseek_size() with the appropriate maxbytes | |
596 | * value for each. | |
e0d10bfa | 597 | */ |
965c8e59 | 598 | loff_t ext4_llseek(struct file *file, loff_t offset, int whence) |
e0d10bfa TO |
599 | { |
600 | struct inode *inode = file->f_mapping->host; | |
601 | loff_t maxbytes; | |
602 | ||
603 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) | |
604 | maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; | |
605 | else | |
606 | maxbytes = inode->i_sb->s_maxbytes; | |
e0d10bfa | 607 | |
965c8e59 | 608 | switch (whence) { |
c8c0df24 ZL |
609 | case SEEK_SET: |
610 | case SEEK_CUR: | |
611 | case SEEK_END: | |
965c8e59 | 612 | return generic_file_llseek_size(file, offset, whence, |
c8c0df24 ZL |
613 | maxbytes, i_size_read(inode)); |
614 | case SEEK_DATA: | |
615 | return ext4_seek_data(file, offset, maxbytes); | |
616 | case SEEK_HOLE: | |
617 | return ext4_seek_hole(file, offset, maxbytes); | |
618 | } | |
619 | ||
620 | return -EINVAL; | |
e0d10bfa TO |
621 | } |
622 | ||
617ba13b | 623 | const struct file_operations ext4_file_operations = { |
e0d10bfa | 624 | .llseek = ext4_llseek, |
aad4f8bb | 625 | .read_iter = generic_file_read_iter, |
9b884164 | 626 | .write_iter = ext4_file_write_iter, |
5cdd7b2d | 627 | .unlocked_ioctl = ext4_ioctl, |
ac27a0ec | 628 | #ifdef CONFIG_COMPAT |
617ba13b | 629 | .compat_ioctl = ext4_compat_ioctl, |
ac27a0ec | 630 | #endif |
2e9ee850 | 631 | .mmap = ext4_file_mmap, |
bc0b0d6d | 632 | .open = ext4_file_open, |
617ba13b MC |
633 | .release = ext4_release_file, |
634 | .fsync = ext4_sync_file, | |
ac27a0ec | 635 | .splice_read = generic_file_splice_read, |
8d020765 | 636 | .splice_write = iter_file_splice_write, |
2fe17c10 | 637 | .fallocate = ext4_fallocate, |
ac27a0ec DK |
638 | }; |
639 | ||
754661f1 | 640 | const struct inode_operations ext4_file_inode_operations = { |
617ba13b | 641 | .setattr = ext4_setattr, |
3e3398a0 | 642 | .getattr = ext4_getattr, |
ac27a0ec DK |
643 | .setxattr = generic_setxattr, |
644 | .getxattr = generic_getxattr, | |
617ba13b | 645 | .listxattr = ext4_listxattr, |
ac27a0ec | 646 | .removexattr = generic_removexattr, |
4e34e719 | 647 | .get_acl = ext4_get_acl, |
64e178a7 | 648 | .set_acl = ext4_set_acl, |
6873fa0d | 649 | .fiemap = ext4_fiemap, |
ac27a0ec DK |
650 | }; |
651 |