]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
1da177e4 | 19 | #include "xfs_fs.h" |
a844f451 | 20 | #include "xfs_bit.h" |
1da177e4 | 21 | #include "xfs_log.h" |
a844f451 | 22 | #include "xfs_inum.h" |
1da177e4 LT |
23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | |
25 | #include "xfs_ag.h" | |
26 | #include "xfs_dir.h" | |
27 | #include "xfs_dir2.h" | |
28 | #include "xfs_alloc.h" | |
29 | #include "xfs_dmapi.h" | |
30 | #include "xfs_quota.h" | |
31 | #include "xfs_mount.h" | |
1da177e4 | 32 | #include "xfs_bmap_btree.h" |
a844f451 | 33 | #include "xfs_alloc_btree.h" |
1da177e4 | 34 | #include "xfs_ialloc_btree.h" |
1da177e4 LT |
35 | #include "xfs_dir_sf.h" |
36 | #include "xfs_dir2_sf.h" | |
a844f451 | 37 | #include "xfs_attr_sf.h" |
1da177e4 LT |
38 | #include "xfs_dinode.h" |
39 | #include "xfs_inode.h" | |
40 | #include "xfs_bmap.h" | |
a844f451 NS |
41 | #include "xfs_btree.h" |
42 | #include "xfs_ialloc.h" | |
1da177e4 LT |
43 | #include "xfs_rtalloc.h" |
44 | #include "xfs_error.h" | |
45 | #include "xfs_itable.h" | |
46 | #include "xfs_rw.h" | |
47 | #include "xfs_acl.h" | |
48 | #include "xfs_cap.h" | |
49 | #include "xfs_mac.h" | |
50 | #include "xfs_attr.h" | |
51 | #include "xfs_inode_item.h" | |
52 | #include "xfs_buf_item.h" | |
53 | #include "xfs_utils.h" | |
54 | #include "xfs_iomap.h" | |
55 | ||
56 | #include <linux/capability.h> | |
57 | #include <linux/writeback.h> | |
58 | ||
59 | ||
60 | #if defined(XFS_RW_TRACE) | |
61 | void | |
62 | xfs_rw_enter_trace( | |
63 | int tag, | |
64 | xfs_iocore_t *io, | |
65 | void *data, | |
66 | size_t segs, | |
67 | loff_t offset, | |
68 | int ioflags) | |
69 | { | |
70 | xfs_inode_t *ip = XFS_IO_INODE(io); | |
71 | ||
72 | if (ip->i_rwtrace == NULL) | |
73 | return; | |
74 | ktrace_enter(ip->i_rwtrace, | |
75 | (void *)(unsigned long)tag, | |
76 | (void *)ip, | |
77 | (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), | |
78 | (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), | |
79 | (void *)data, | |
80 | (void *)((unsigned long)segs), | |
81 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), | |
82 | (void *)((unsigned long)(offset & 0xffffffff)), | |
83 | (void *)((unsigned long)ioflags), | |
84 | (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), | |
85 | (void *)((unsigned long)(io->io_new_size & 0xffffffff)), | |
86 | (void *)NULL, | |
87 | (void *)NULL, | |
88 | (void *)NULL, | |
89 | (void *)NULL, | |
90 | (void *)NULL); | |
91 | } | |
92 | ||
93 | void | |
94 | xfs_inval_cached_trace( | |
95 | xfs_iocore_t *io, | |
96 | xfs_off_t offset, | |
97 | xfs_off_t len, | |
98 | xfs_off_t first, | |
99 | xfs_off_t last) | |
100 | { | |
101 | xfs_inode_t *ip = XFS_IO_INODE(io); | |
102 | ||
103 | if (ip->i_rwtrace == NULL) | |
104 | return; | |
105 | ktrace_enter(ip->i_rwtrace, | |
106 | (void *)(__psint_t)XFS_INVAL_CACHED, | |
107 | (void *)ip, | |
108 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), | |
109 | (void *)((unsigned long)(offset & 0xffffffff)), | |
110 | (void *)((unsigned long)((len >> 32) & 0xffffffff)), | |
111 | (void *)((unsigned long)(len & 0xffffffff)), | |
112 | (void *)((unsigned long)((first >> 32) & 0xffffffff)), | |
113 | (void *)((unsigned long)(first & 0xffffffff)), | |
114 | (void *)((unsigned long)((last >> 32) & 0xffffffff)), | |
115 | (void *)((unsigned long)(last & 0xffffffff)), | |
116 | (void *)NULL, | |
117 | (void *)NULL, | |
118 | (void *)NULL, | |
119 | (void *)NULL, | |
120 | (void *)NULL, | |
121 | (void *)NULL); | |
122 | } | |
123 | #endif | |
124 | ||
125 | /* | |
126 | * xfs_iozero | |
127 | * | |
128 | * xfs_iozero clears the specified range of buffer supplied, | |
129 | * and marks all the affected blocks as valid and modified. If | |
130 | * an affected block is not allocated, it will be allocated. If | |
131 | * an affected block is not completely overwritten, and is not | |
132 | * valid before the operation, it will be read from disk before | |
133 | * being partially zeroed. | |
134 | */ | |
135 | STATIC int | |
136 | xfs_iozero( | |
137 | struct inode *ip, /* inode */ | |
138 | loff_t pos, /* offset in file */ | |
139 | size_t count, /* size of data to zero */ | |
140 | loff_t end_size) /* max file size to set */ | |
141 | { | |
142 | unsigned bytes; | |
143 | struct page *page; | |
144 | struct address_space *mapping; | |
145 | char *kaddr; | |
146 | int status; | |
147 | ||
148 | mapping = ip->i_mapping; | |
149 | do { | |
150 | unsigned long index, offset; | |
151 | ||
152 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
153 | index = pos >> PAGE_CACHE_SHIFT; | |
154 | bytes = PAGE_CACHE_SIZE - offset; | |
155 | if (bytes > count) | |
156 | bytes = count; | |
157 | ||
158 | status = -ENOMEM; | |
159 | page = grab_cache_page(mapping, index); | |
160 | if (!page) | |
161 | break; | |
162 | ||
163 | kaddr = kmap(page); | |
164 | status = mapping->a_ops->prepare_write(NULL, page, offset, | |
165 | offset + bytes); | |
166 | if (status) { | |
167 | goto unlock; | |
168 | } | |
169 | ||
170 | memset((void *) (kaddr + offset), 0, bytes); | |
171 | flush_dcache_page(page); | |
172 | status = mapping->a_ops->commit_write(NULL, page, offset, | |
173 | offset + bytes); | |
174 | if (!status) { | |
175 | pos += bytes; | |
176 | count -= bytes; | |
177 | if (pos > i_size_read(ip)) | |
178 | i_size_write(ip, pos < end_size ? pos : end_size); | |
179 | } | |
180 | ||
181 | unlock: | |
182 | kunmap(page); | |
183 | unlock_page(page); | |
184 | page_cache_release(page); | |
185 | if (status) | |
186 | break; | |
187 | } while (count); | |
188 | ||
189 | return (-status); | |
190 | } | |
191 | ||
1da177e4 LT |
192 | ssize_t /* bytes read, or (-) error */ |
193 | xfs_read( | |
194 | bhv_desc_t *bdp, | |
195 | struct kiocb *iocb, | |
196 | const struct iovec *iovp, | |
197 | unsigned int segs, | |
198 | loff_t *offset, | |
199 | int ioflags, | |
200 | cred_t *credp) | |
201 | { | |
202 | struct file *file = iocb->ki_filp; | |
203 | struct inode *inode = file->f_mapping->host; | |
204 | size_t size = 0; | |
205 | ssize_t ret; | |
206 | xfs_fsize_t n; | |
207 | xfs_inode_t *ip; | |
208 | xfs_mount_t *mp; | |
209 | vnode_t *vp; | |
210 | unsigned long seg; | |
211 | ||
212 | ip = XFS_BHVTOI(bdp); | |
213 | vp = BHV_TO_VNODE(bdp); | |
214 | mp = ip->i_mount; | |
215 | ||
216 | XFS_STATS_INC(xs_read_calls); | |
217 | ||
218 | /* START copy & waste from filemap.c */ | |
219 | for (seg = 0; seg < segs; seg++) { | |
220 | const struct iovec *iv = &iovp[seg]; | |
221 | ||
222 | /* | |
223 | * If any segment has a negative length, or the cumulative | |
224 | * length ever wraps negative then return -EINVAL. | |
225 | */ | |
226 | size += iv->iov_len; | |
227 | if (unlikely((ssize_t)(size|iv->iov_len) < 0)) | |
228 | return XFS_ERROR(-EINVAL); | |
229 | } | |
230 | /* END copy & waste from filemap.c */ | |
231 | ||
232 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
233 | xfs_buftarg_t *target = | |
234 | (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? | |
235 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
236 | if ((*offset & target->pbr_smask) || | |
237 | (size & target->pbr_smask)) { | |
238 | if (*offset == ip->i_d.di_size) { | |
239 | return (0); | |
240 | } | |
241 | return -XFS_ERROR(EINVAL); | |
242 | } | |
243 | } | |
244 | ||
245 | n = XFS_MAXIOFFSET(mp) - *offset; | |
246 | if ((n <= 0) || (size == 0)) | |
247 | return 0; | |
248 | ||
249 | if (n < size) | |
250 | size = n; | |
251 | ||
252 | if (XFS_FORCED_SHUTDOWN(mp)) { | |
253 | return -EIO; | |
254 | } | |
255 | ||
256 | if (unlikely(ioflags & IO_ISDIRECT)) | |
1b1dcc1b | 257 | mutex_lock(&inode->i_mutex); |
1da177e4 LT |
258 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
259 | ||
260 | if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && | |
261 | !(ioflags & IO_INVIS)) { | |
262 | vrwlock_t locktype = VRWLOCK_READ; | |
e1a40fa9 | 263 | int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags); |
1da177e4 LT |
264 | |
265 | ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, | |
266 | BHV_TO_VNODE(bdp), *offset, size, | |
e1a40fa9 | 267 | dmflags, &locktype); |
1da177e4 LT |
268 | if (ret) { |
269 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
270 | goto unlock_isem; | |
271 | } | |
272 | } | |
273 | ||
274 | xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, | |
275 | (void *)iovp, segs, *offset, ioflags); | |
276 | ret = __generic_file_aio_read(iocb, iovp, segs, offset); | |
277 | if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO)) | |
278 | ret = wait_on_sync_kiocb(iocb); | |
279 | if (ret > 0) | |
280 | XFS_STATS_ADD(xs_read_bytes, ret); | |
281 | ||
282 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
283 | ||
284 | if (likely(!(ioflags & IO_INVIS))) | |
4aeb664c | 285 | xfs_ichgtime_fast(ip, inode, XFS_ICHGTIME_ACC); |
1da177e4 LT |
286 | |
287 | unlock_isem: | |
288 | if (unlikely(ioflags & IO_ISDIRECT)) | |
1b1dcc1b | 289 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
290 | return ret; |
291 | } | |
292 | ||
293 | ssize_t | |
294 | xfs_sendfile( | |
295 | bhv_desc_t *bdp, | |
296 | struct file *filp, | |
297 | loff_t *offset, | |
298 | int ioflags, | |
299 | size_t count, | |
300 | read_actor_t actor, | |
301 | void *target, | |
302 | cred_t *credp) | |
303 | { | |
304 | ssize_t ret; | |
305 | xfs_fsize_t n; | |
306 | xfs_inode_t *ip; | |
307 | xfs_mount_t *mp; | |
308 | vnode_t *vp; | |
309 | ||
310 | ip = XFS_BHVTOI(bdp); | |
311 | vp = BHV_TO_VNODE(bdp); | |
312 | mp = ip->i_mount; | |
313 | ||
314 | XFS_STATS_INC(xs_read_calls); | |
315 | ||
316 | n = XFS_MAXIOFFSET(mp) - *offset; | |
317 | if ((n <= 0) || (count == 0)) | |
318 | return 0; | |
319 | ||
320 | if (n < count) | |
321 | count = n; | |
322 | ||
323 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
324 | return -EIO; | |
325 | ||
326 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | |
327 | ||
328 | if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && | |
329 | (!(ioflags & IO_INVIS))) { | |
330 | vrwlock_t locktype = VRWLOCK_READ; | |
331 | int error; | |
332 | ||
333 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count, | |
334 | FILP_DELAY_FLAG(filp), &locktype); | |
335 | if (error) { | |
336 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
337 | return -error; | |
338 | } | |
339 | } | |
340 | xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore, | |
341 | (void *)(unsigned long)target, count, *offset, ioflags); | |
342 | ret = generic_file_sendfile(filp, offset, count, actor, target); | |
343 | ||
344 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
345 | ||
346 | if (ret > 0) | |
347 | XFS_STATS_ADD(xs_read_bytes, ret); | |
348 | ||
349 | if (likely(!(ioflags & IO_INVIS))) | |
4aeb664c | 350 | xfs_ichgtime_fast(ip, LINVFS_GET_IP(vp), XFS_ICHGTIME_ACC); |
1da177e4 LT |
351 | |
352 | return ret; | |
353 | } | |
354 | ||
355 | /* | |
356 | * This routine is called to handle zeroing any space in the last | |
357 | * block of the file that is beyond the EOF. We do this since the | |
358 | * size is being increased without writing anything to that block | |
359 | * and we don't want anyone to read the garbage on the disk. | |
360 | */ | |
361 | STATIC int /* error (positive) */ | |
362 | xfs_zero_last_block( | |
363 | struct inode *ip, | |
364 | xfs_iocore_t *io, | |
365 | xfs_off_t offset, | |
366 | xfs_fsize_t isize, | |
367 | xfs_fsize_t end_size) | |
368 | { | |
369 | xfs_fileoff_t last_fsb; | |
370 | xfs_mount_t *mp; | |
371 | int nimaps; | |
372 | int zero_offset; | |
373 | int zero_len; | |
374 | int isize_fsb_offset; | |
375 | int error = 0; | |
376 | xfs_bmbt_irec_t imap; | |
377 | loff_t loff; | |
378 | size_t lsize; | |
379 | ||
380 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); | |
381 | ASSERT(offset > isize); | |
382 | ||
383 | mp = io->io_mount; | |
384 | ||
385 | isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize); | |
386 | if (isize_fsb_offset == 0) { | |
387 | /* | |
388 | * There are no extra bytes in the last block on disk to | |
389 | * zero, so return. | |
390 | */ | |
391 | return 0; | |
392 | } | |
393 | ||
394 | last_fsb = XFS_B_TO_FSBT(mp, isize); | |
395 | nimaps = 1; | |
396 | error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap, | |
397 | &nimaps, NULL); | |
398 | if (error) { | |
399 | return error; | |
400 | } | |
401 | ASSERT(nimaps > 0); | |
402 | /* | |
403 | * If the block underlying isize is just a hole, then there | |
404 | * is nothing to zero. | |
405 | */ | |
406 | if (imap.br_startblock == HOLESTARTBLOCK) { | |
407 | return 0; | |
408 | } | |
409 | /* | |
410 | * Zero the part of the last block beyond the EOF, and write it | |
411 | * out sync. We need to drop the ilock while we do this so we | |
412 | * don't deadlock when the buffer cache calls back to us. | |
413 | */ | |
414 | XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); | |
415 | loff = XFS_FSB_TO_B(mp, last_fsb); | |
416 | lsize = XFS_FSB_TO_B(mp, 1); | |
417 | ||
418 | zero_offset = isize_fsb_offset; | |
419 | zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset; | |
420 | ||
421 | error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size); | |
422 | ||
423 | XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | |
424 | ASSERT(error >= 0); | |
425 | return error; | |
426 | } | |
427 | ||
428 | /* | |
429 | * Zero any on disk space between the current EOF and the new, | |
430 | * larger EOF. This handles the normal case of zeroing the remainder | |
431 | * of the last block in the file and the unusual case of zeroing blocks | |
432 | * out beyond the size of the file. This second case only happens | |
433 | * with fixed size extents and when the system crashes before the inode | |
434 | * size was updated but after blocks were allocated. If fill is set, | |
435 | * then any holes in the range are filled and zeroed. If not, the holes | |
436 | * are left alone as holes. | |
437 | */ | |
438 | ||
439 | int /* error (positive) */ | |
440 | xfs_zero_eof( | |
441 | vnode_t *vp, | |
442 | xfs_iocore_t *io, | |
443 | xfs_off_t offset, /* starting I/O offset */ | |
444 | xfs_fsize_t isize, /* current inode size */ | |
445 | xfs_fsize_t end_size) /* terminal inode size */ | |
446 | { | |
447 | struct inode *ip = LINVFS_GET_IP(vp); | |
448 | xfs_fileoff_t start_zero_fsb; | |
449 | xfs_fileoff_t end_zero_fsb; | |
450 | xfs_fileoff_t prev_zero_fsb; | |
451 | xfs_fileoff_t zero_count_fsb; | |
452 | xfs_fileoff_t last_fsb; | |
453 | xfs_extlen_t buf_len_fsb; | |
454 | xfs_extlen_t prev_zero_count; | |
455 | xfs_mount_t *mp; | |
456 | int nimaps; | |
457 | int error = 0; | |
458 | xfs_bmbt_irec_t imap; | |
459 | loff_t loff; | |
460 | size_t lsize; | |
461 | ||
462 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); | |
463 | ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); | |
464 | ||
465 | mp = io->io_mount; | |
466 | ||
467 | /* | |
468 | * First handle zeroing the block on which isize resides. | |
469 | * We only zero a part of that block so it is handled specially. | |
470 | */ | |
471 | error = xfs_zero_last_block(ip, io, offset, isize, end_size); | |
472 | if (error) { | |
473 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); | |
474 | ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); | |
475 | return error; | |
476 | } | |
477 | ||
478 | /* | |
479 | * Calculate the range between the new size and the old | |
480 | * where blocks needing to be zeroed may exist. To get the | |
481 | * block where the last byte in the file currently resides, | |
482 | * we need to subtract one from the size and truncate back | |
483 | * to a block boundary. We subtract 1 in case the size is | |
484 | * exactly on a block boundary. | |
485 | */ | |
486 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
487 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
488 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
489 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
490 | if (last_fsb == end_zero_fsb) { | |
491 | /* | |
492 | * The size was only incremented on its last block. | |
493 | * We took care of that above, so just return. | |
494 | */ | |
495 | return 0; | |
496 | } | |
497 | ||
498 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
499 | prev_zero_fsb = NULLFILEOFF; | |
500 | prev_zero_count = 0; | |
501 | while (start_zero_fsb <= end_zero_fsb) { | |
502 | nimaps = 1; | |
503 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
504 | error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb, | |
505 | 0, NULL, 0, &imap, &nimaps, NULL); | |
506 | if (error) { | |
507 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); | |
508 | ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); | |
509 | return error; | |
510 | } | |
511 | ASSERT(nimaps > 0); | |
512 | ||
513 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
514 | imap.br_startblock == HOLESTARTBLOCK) { | |
515 | /* | |
516 | * This loop handles initializing pages that were | |
517 | * partially initialized by the code below this | |
518 | * loop. It basically zeroes the part of the page | |
519 | * that sits on a hole and sets the page as P_HOLE | |
520 | * and calls remapf if it is a mapped file. | |
521 | */ | |
522 | prev_zero_fsb = NULLFILEOFF; | |
523 | prev_zero_count = 0; | |
524 | start_zero_fsb = imap.br_startoff + | |
525 | imap.br_blockcount; | |
526 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
527 | continue; | |
528 | } | |
529 | ||
530 | /* | |
531 | * There are blocks in the range requested. | |
532 | * Zero them a single write at a time. We actually | |
533 | * don't zero the entire range returned if it is | |
534 | * too big and simply loop around to get the rest. | |
535 | * That is not the most efficient thing to do, but it | |
536 | * is simple and this path should not be exercised often. | |
537 | */ | |
538 | buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount, | |
539 | mp->m_writeio_blocks << 8); | |
540 | /* | |
541 | * Drop the inode lock while we're doing the I/O. | |
542 | * We'll still have the iolock to protect us. | |
543 | */ | |
544 | XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | |
545 | ||
546 | loff = XFS_FSB_TO_B(mp, start_zero_fsb); | |
547 | lsize = XFS_FSB_TO_B(mp, buf_len_fsb); | |
548 | ||
549 | error = xfs_iozero(ip, loff, lsize, end_size); | |
550 | ||
551 | if (error) { | |
552 | goto out_lock; | |
553 | } | |
554 | ||
555 | prev_zero_fsb = start_zero_fsb; | |
556 | prev_zero_count = buf_len_fsb; | |
557 | start_zero_fsb = imap.br_startoff + buf_len_fsb; | |
558 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
559 | ||
560 | XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | |
561 | } | |
562 | ||
563 | return 0; | |
564 | ||
565 | out_lock: | |
566 | ||
567 | XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | |
568 | ASSERT(error >= 0); | |
569 | return error; | |
570 | } | |
571 | ||
572 | ssize_t /* bytes written, or (-) error */ | |
573 | xfs_write( | |
574 | bhv_desc_t *bdp, | |
575 | struct kiocb *iocb, | |
576 | const struct iovec *iovp, | |
577 | unsigned int nsegs, | |
578 | loff_t *offset, | |
579 | int ioflags, | |
580 | cred_t *credp) | |
581 | { | |
582 | struct file *file = iocb->ki_filp; | |
583 | struct address_space *mapping = file->f_mapping; | |
584 | struct inode *inode = mapping->host; | |
585 | unsigned long segs = nsegs; | |
586 | xfs_inode_t *xip; | |
587 | xfs_mount_t *mp; | |
588 | ssize_t ret = 0, error = 0; | |
589 | xfs_fsize_t isize, new_size; | |
590 | xfs_iocore_t *io; | |
591 | vnode_t *vp; | |
592 | unsigned long seg; | |
593 | int iolock; | |
594 | int eventsent = 0; | |
595 | vrwlock_t locktype; | |
596 | size_t ocount = 0, count; | |
597 | loff_t pos; | |
598 | int need_isem = 1, need_flush = 0; | |
599 | ||
600 | XFS_STATS_INC(xs_write_calls); | |
601 | ||
602 | vp = BHV_TO_VNODE(bdp); | |
603 | xip = XFS_BHVTOI(bdp); | |
604 | ||
605 | for (seg = 0; seg < segs; seg++) { | |
606 | const struct iovec *iv = &iovp[seg]; | |
607 | ||
608 | /* | |
609 | * If any segment has a negative length, or the cumulative | |
610 | * length ever wraps negative then return -EINVAL. | |
611 | */ | |
612 | ocount += iv->iov_len; | |
613 | if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) | |
614 | return -EINVAL; | |
615 | if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) | |
616 | continue; | |
617 | if (seg == 0) | |
618 | return -EFAULT; | |
619 | segs = seg; | |
620 | ocount -= iv->iov_len; /* This segment is no good */ | |
621 | break; | |
622 | } | |
623 | ||
624 | count = ocount; | |
625 | pos = *offset; | |
626 | ||
627 | if (count == 0) | |
628 | return 0; | |
629 | ||
630 | io = &xip->i_iocore; | |
631 | mp = io->io_mount; | |
632 | ||
633 | if (XFS_FORCED_SHUTDOWN(mp)) | |
634 | return -EIO; | |
635 | ||
636 | fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE); | |
637 | ||
638 | if (ioflags & IO_ISDIRECT) { | |
639 | xfs_buftarg_t *target = | |
640 | (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? | |
641 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
642 | ||
643 | if ((pos & target->pbr_smask) || (count & target->pbr_smask)) | |
644 | return XFS_ERROR(-EINVAL); | |
645 | ||
646 | if (!VN_CACHED(vp) && pos < i_size_read(inode)) | |
647 | need_isem = 0; | |
648 | ||
649 | if (VN_CACHED(vp)) | |
650 | need_flush = 1; | |
651 | } | |
652 | ||
653 | relock: | |
654 | if (need_isem) { | |
655 | iolock = XFS_IOLOCK_EXCL; | |
656 | locktype = VRWLOCK_WRITE; | |
657 | ||
1b1dcc1b | 658 | mutex_lock(&inode->i_mutex); |
1da177e4 LT |
659 | } else { |
660 | iolock = XFS_IOLOCK_SHARED; | |
661 | locktype = VRWLOCK_WRITE_DIRECT; | |
662 | } | |
663 | ||
664 | xfs_ilock(xip, XFS_ILOCK_EXCL|iolock); | |
665 | ||
666 | isize = i_size_read(inode); | |
667 | ||
668 | if (file->f_flags & O_APPEND) | |
669 | *offset = isize; | |
670 | ||
671 | start: | |
672 | error = -generic_write_checks(file, &pos, &count, | |
673 | S_ISBLK(inode->i_mode)); | |
674 | if (error) { | |
675 | xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); | |
676 | goto out_unlock_isem; | |
677 | } | |
678 | ||
679 | new_size = pos + count; | |
680 | if (new_size > isize) | |
681 | io->io_new_size = new_size; | |
682 | ||
683 | if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) && | |
684 | !(ioflags & IO_INVIS) && !eventsent)) { | |
685 | loff_t savedsize = pos; | |
686 | int dmflags = FILP_DELAY_FLAG(file); | |
687 | ||
688 | if (need_isem) | |
1b1dcc1b | 689 | dmflags |= DM_FLAGS_IMUX; |
1da177e4 LT |
690 | |
691 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | |
692 | error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp, | |
693 | pos, count, | |
694 | dmflags, &locktype); | |
695 | if (error) { | |
696 | xfs_iunlock(xip, iolock); | |
697 | goto out_unlock_isem; | |
698 | } | |
699 | xfs_ilock(xip, XFS_ILOCK_EXCL); | |
700 | eventsent = 1; | |
701 | ||
702 | /* | |
703 | * The iolock was dropped and reaquired in XFS_SEND_DATA | |
704 | * so we have to recheck the size when appending. | |
705 | * We will only "goto start;" once, since having sent the | |
706 | * event prevents another call to XFS_SEND_DATA, which is | |
707 | * what allows the size to change in the first place. | |
708 | */ | |
709 | if ((file->f_flags & O_APPEND) && savedsize != isize) { | |
710 | pos = isize = xip->i_d.di_size; | |
711 | goto start; | |
712 | } | |
713 | } | |
714 | ||
4aeb664c | 715 | if (likely(!(ioflags & IO_INVIS))) { |
1da177e4 | 716 | inode_update_time(inode, 1); |
4aeb664c NS |
717 | xfs_ichgtime_fast(xip, inode, |
718 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
1da177e4 LT |
719 | } |
720 | ||
721 | /* | |
722 | * If the offset is beyond the size of the file, we have a couple | |
723 | * of things to do. First, if there is already space allocated | |
724 | * we need to either create holes or zero the disk or ... | |
725 | * | |
726 | * If there is a page where the previous size lands, we need | |
727 | * to zero it out up to the new size. | |
728 | */ | |
729 | ||
730 | if (pos > isize) { | |
731 | error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos, | |
732 | isize, pos + count); | |
733 | if (error) { | |
734 | xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); | |
735 | goto out_unlock_isem; | |
736 | } | |
737 | } | |
738 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | |
739 | ||
740 | /* | |
741 | * If we're writing the file then make sure to clear the | |
742 | * setuid and setgid bits if the process is not being run | |
743 | * by root. This keeps people from modifying setuid and | |
744 | * setgid binaries. | |
745 | */ | |
746 | ||
747 | if (((xip->i_d.di_mode & S_ISUID) || | |
748 | ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) == | |
749 | (S_ISGID | S_IXGRP))) && | |
750 | !capable(CAP_FSETID)) { | |
751 | error = xfs_write_clear_setuid(xip); | |
752 | if (likely(!error)) | |
753 | error = -remove_suid(file->f_dentry); | |
754 | if (unlikely(error)) { | |
755 | xfs_iunlock(xip, iolock); | |
756 | goto out_unlock_isem; | |
757 | } | |
758 | } | |
759 | ||
760 | retry: | |
761 | /* We can write back this queue in page reclaim */ | |
762 | current->backing_dev_info = mapping->backing_dev_info; | |
763 | ||
764 | if ((ioflags & IO_ISDIRECT)) { | |
765 | if (need_flush) { | |
766 | xfs_inval_cached_trace(io, pos, -1, | |
767 | ctooff(offtoct(pos)), -1); | |
768 | VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)), | |
769 | -1, FI_REMAPF_LOCKED); | |
770 | } | |
771 | ||
772 | if (need_isem) { | |
773 | /* demote the lock now the cached pages are gone */ | |
774 | XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); | |
1b1dcc1b | 775 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
776 | |
777 | iolock = XFS_IOLOCK_SHARED; | |
778 | locktype = VRWLOCK_WRITE_DIRECT; | |
779 | need_isem = 0; | |
780 | } | |
781 | ||
782 | xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs, | |
783 | *offset, ioflags); | |
784 | ret = generic_file_direct_write(iocb, iovp, | |
785 | &segs, pos, offset, count, ocount); | |
786 | ||
787 | /* | |
788 | * direct-io write to a hole: fall through to buffered I/O | |
789 | * for completing the rest of the request. | |
790 | */ | |
791 | if (ret >= 0 && ret != count) { | |
792 | XFS_STATS_ADD(xs_write_bytes, ret); | |
793 | ||
794 | pos += ret; | |
795 | count -= ret; | |
796 | ||
797 | need_isem = 1; | |
798 | ioflags &= ~IO_ISDIRECT; | |
799 | xfs_iunlock(xip, iolock); | |
800 | goto relock; | |
801 | } | |
802 | } else { | |
803 | xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs, | |
804 | *offset, ioflags); | |
805 | ret = generic_file_buffered_write(iocb, iovp, segs, | |
806 | pos, offset, count, ret); | |
807 | } | |
808 | ||
809 | current->backing_dev_info = NULL; | |
810 | ||
811 | if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO)) | |
812 | ret = wait_on_sync_kiocb(iocb); | |
813 | ||
814 | if ((ret == -ENOSPC) && | |
815 | DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) && | |
816 | !(ioflags & IO_INVIS)) { | |
817 | ||
818 | xfs_rwunlock(bdp, locktype); | |
e1a40fa9 | 819 | if (need_isem) |
1b1dcc1b | 820 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
821 | error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp, |
822 | DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL, | |
823 | 0, 0, 0); /* Delay flag intentionally unused */ | |
824 | if (error) | |
e1a40fa9 DR |
825 | goto out_nounlocks; |
826 | if (need_isem) | |
1b1dcc1b | 827 | mutex_lock(&inode->i_mutex); |
1da177e4 LT |
828 | xfs_rwlock(bdp, locktype); |
829 | pos = xip->i_d.di_size; | |
830 | ret = 0; | |
831 | goto retry; | |
832 | } | |
833 | ||
834 | if (*offset > xip->i_d.di_size) { | |
835 | xfs_ilock(xip, XFS_ILOCK_EXCL); | |
836 | if (*offset > xip->i_d.di_size) { | |
837 | xip->i_d.di_size = *offset; | |
838 | i_size_write(inode, *offset); | |
839 | xip->i_update_core = 1; | |
840 | xip->i_update_size = 1; | |
841 | } | |
842 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | |
843 | } | |
844 | ||
845 | error = -ret; | |
846 | if (ret <= 0) | |
847 | goto out_unlock_internal; | |
848 | ||
849 | XFS_STATS_ADD(xs_write_bytes, ret); | |
850 | ||
851 | /* Handle various SYNC-type writes */ | |
852 | if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { | |
853 | /* | |
854 | * If we're treating this as O_DSYNC and we have not updated the | |
855 | * size, force the log. | |
856 | */ | |
857 | if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) && | |
858 | !(xip->i_update_size)) { | |
859 | xfs_inode_log_item_t *iip = xip->i_itemp; | |
860 | ||
861 | /* | |
862 | * If an allocation transaction occurred | |
863 | * without extending the size, then we have to force | |
864 | * the log up the proper point to ensure that the | |
865 | * allocation is permanent. We can't count on | |
866 | * the fact that buffered writes lock out direct I/O | |
867 | * writes - the direct I/O write could have extended | |
868 | * the size nontransactionally, then finished before | |
869 | * we started. xfs_write_file will think that the file | |
870 | * didn't grow but the update isn't safe unless the | |
871 | * size change is logged. | |
872 | * | |
873 | * Force the log if we've committed a transaction | |
874 | * against the inode or if someone else has and | |
875 | * the commit record hasn't gone to disk (e.g. | |
876 | * the inode is pinned). This guarantees that | |
877 | * all changes affecting the inode are permanent | |
878 | * when we return. | |
879 | */ | |
880 | if (iip && iip->ili_last_lsn) { | |
881 | xfs_log_force(mp, iip->ili_last_lsn, | |
882 | XFS_LOG_FORCE | XFS_LOG_SYNC); | |
883 | } else if (xfs_ipincount(xip) > 0) { | |
884 | xfs_log_force(mp, (xfs_lsn_t)0, | |
885 | XFS_LOG_FORCE | XFS_LOG_SYNC); | |
886 | } | |
887 | ||
888 | } else { | |
889 | xfs_trans_t *tp; | |
890 | ||
891 | /* | |
892 | * O_SYNC or O_DSYNC _with_ a size update are handled | |
893 | * the same way. | |
894 | * | |
895 | * If the write was synchronous then we need to make | |
896 | * sure that the inode modification time is permanent. | |
897 | * We'll have updated the timestamp above, so here | |
898 | * we use a synchronous transaction to log the inode. | |
899 | * It's not fast, but it's necessary. | |
900 | * | |
901 | * If this a dsync write and the size got changed | |
902 | * non-transactionally, then we need to ensure that | |
903 | * the size change gets logged in a synchronous | |
904 | * transaction. | |
905 | */ | |
906 | ||
907 | tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC); | |
908 | if ((error = xfs_trans_reserve(tp, 0, | |
909 | XFS_SWRITE_LOG_RES(mp), | |
910 | 0, 0, 0))) { | |
911 | /* Transaction reserve failed */ | |
912 | xfs_trans_cancel(tp, 0); | |
913 | } else { | |
914 | /* Transaction reserve successful */ | |
915 | xfs_ilock(xip, XFS_ILOCK_EXCL); | |
916 | xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL); | |
917 | xfs_trans_ihold(tp, xip); | |
918 | xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE); | |
919 | xfs_trans_set_sync(tp); | |
920 | error = xfs_trans_commit(tp, 0, NULL); | |
921 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | |
922 | } | |
923 | if (error) | |
924 | goto out_unlock_internal; | |
925 | } | |
926 | ||
927 | xfs_rwunlock(bdp, locktype); | |
928 | if (need_isem) | |
1b1dcc1b | 929 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
930 | |
931 | error = sync_page_range(inode, mapping, pos, ret); | |
932 | if (!error) | |
933 | error = ret; | |
934 | return error; | |
935 | } | |
936 | ||
937 | out_unlock_internal: | |
938 | xfs_rwunlock(bdp, locktype); | |
939 | out_unlock_isem: | |
940 | if (need_isem) | |
1b1dcc1b | 941 | mutex_unlock(&inode->i_mutex); |
e1a40fa9 | 942 | out_nounlocks: |
1da177e4 LT |
943 | return -error; |
944 | } | |
945 | ||
946 | /* | |
947 | * All xfs metadata buffers except log state machine buffers | |
948 | * get this attached as their b_bdstrat callback function. | |
949 | * This is so that we can catch a buffer | |
950 | * after prematurely unpinning it to forcibly shutdown the filesystem. | |
951 | */ | |
952 | int | |
953 | xfs_bdstrat_cb(struct xfs_buf *bp) | |
954 | { | |
955 | xfs_mount_t *mp; | |
956 | ||
957 | mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); | |
958 | if (!XFS_FORCED_SHUTDOWN(mp)) { | |
959 | pagebuf_iorequest(bp); | |
960 | return 0; | |
961 | } else { | |
962 | xfs_buftrace("XFS__BDSTRAT IOERROR", bp); | |
963 | /* | |
964 | * Metadata write that didn't get logged but | |
965 | * written delayed anyway. These aren't associated | |
966 | * with a transaction, and can be ignored. | |
967 | */ | |
968 | if (XFS_BUF_IODONE_FUNC(bp) == NULL && | |
969 | (XFS_BUF_ISREAD(bp)) == 0) | |
970 | return (xfs_bioerror_relse(bp)); | |
971 | else | |
972 | return (xfs_bioerror(bp)); | |
973 | } | |
974 | } | |
975 | ||
976 | ||
977 | int | |
978 | xfs_bmap(bhv_desc_t *bdp, | |
979 | xfs_off_t offset, | |
980 | ssize_t count, | |
981 | int flags, | |
982 | xfs_iomap_t *iomapp, | |
983 | int *niomaps) | |
984 | { | |
985 | xfs_inode_t *ip = XFS_BHVTOI(bdp); | |
986 | xfs_iocore_t *io = &ip->i_iocore; | |
987 | ||
988 | ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); | |
989 | ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == | |
990 | ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); | |
991 | ||
992 | return xfs_iomap(io, offset, count, flags, iomapp, niomaps); | |
993 | } | |
994 | ||
995 | /* | |
996 | * Wrapper around bdstrat so that we can stop data | |
997 | * from going to disk in case we are shutting down the filesystem. | |
998 | * Typically user data goes thru this path; one of the exceptions | |
999 | * is the superblock. | |
1000 | */ | |
1001 | int | |
1002 | xfsbdstrat( | |
1003 | struct xfs_mount *mp, | |
1004 | struct xfs_buf *bp) | |
1005 | { | |
1006 | ASSERT(mp); | |
1007 | if (!XFS_FORCED_SHUTDOWN(mp)) { | |
1008 | /* Grio redirection would go here | |
1009 | * if (XFS_BUF_IS_GRIO(bp)) { | |
1010 | */ | |
1011 | ||
1012 | pagebuf_iorequest(bp); | |
1013 | return 0; | |
1014 | } | |
1015 | ||
1016 | xfs_buftrace("XFSBDSTRAT IOERROR", bp); | |
1017 | return (xfs_bioerror_relse(bp)); | |
1018 | } | |
1019 | ||
1020 | /* | |
1021 | * If the underlying (data/log/rt) device is readonly, there are some | |
1022 | * operations that cannot proceed. | |
1023 | */ | |
1024 | int | |
1025 | xfs_dev_is_read_only( | |
1026 | xfs_mount_t *mp, | |
1027 | char *message) | |
1028 | { | |
1029 | if (xfs_readonly_buftarg(mp->m_ddev_targp) || | |
1030 | xfs_readonly_buftarg(mp->m_logdev_targp) || | |
1031 | (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { | |
1032 | cmn_err(CE_NOTE, | |
1033 | "XFS: %s required on read-only device.", message); | |
1034 | cmn_err(CE_NOTE, | |
1035 | "XFS: write access unavailable, cannot proceed."); | |
1036 | return EROFS; | |
1037 | } | |
1038 | return 0; | |
1039 | } |