]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
1da177e4 | 19 | #include "xfs_fs.h" |
a844f451 | 20 | #include "xfs_bit.h" |
1da177e4 | 21 | #include "xfs_log.h" |
a844f451 | 22 | #include "xfs_inum.h" |
1da177e4 LT |
23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | |
25 | #include "xfs_ag.h" | |
26 | #include "xfs_dir.h" | |
27 | #include "xfs_dir2.h" | |
28 | #include "xfs_alloc.h" | |
29 | #include "xfs_dmapi.h" | |
30 | #include "xfs_quota.h" | |
31 | #include "xfs_mount.h" | |
1da177e4 | 32 | #include "xfs_bmap_btree.h" |
a844f451 | 33 | #include "xfs_alloc_btree.h" |
1da177e4 | 34 | #include "xfs_ialloc_btree.h" |
1da177e4 LT |
35 | #include "xfs_dir_sf.h" |
36 | #include "xfs_dir2_sf.h" | |
a844f451 | 37 | #include "xfs_attr_sf.h" |
1da177e4 LT |
38 | #include "xfs_dinode.h" |
39 | #include "xfs_inode.h" | |
40 | #include "xfs_bmap.h" | |
a844f451 NS |
41 | #include "xfs_btree.h" |
42 | #include "xfs_ialloc.h" | |
1da177e4 LT |
43 | #include "xfs_rtalloc.h" |
44 | #include "xfs_error.h" | |
45 | #include "xfs_itable.h" | |
46 | #include "xfs_rw.h" | |
47 | #include "xfs_acl.h" | |
48 | #include "xfs_cap.h" | |
49 | #include "xfs_mac.h" | |
50 | #include "xfs_attr.h" | |
51 | #include "xfs_inode_item.h" | |
52 | #include "xfs_buf_item.h" | |
53 | #include "xfs_utils.h" | |
54 | #include "xfs_iomap.h" | |
55 | ||
56 | #include <linux/capability.h> | |
57 | #include <linux/writeback.h> | |
58 | ||
59 | ||
60 | #if defined(XFS_RW_TRACE) | |
61 | void | |
62 | xfs_rw_enter_trace( | |
63 | int tag, | |
64 | xfs_iocore_t *io, | |
65 | void *data, | |
66 | size_t segs, | |
67 | loff_t offset, | |
68 | int ioflags) | |
69 | { | |
70 | xfs_inode_t *ip = XFS_IO_INODE(io); | |
71 | ||
72 | if (ip->i_rwtrace == NULL) | |
73 | return; | |
74 | ktrace_enter(ip->i_rwtrace, | |
75 | (void *)(unsigned long)tag, | |
76 | (void *)ip, | |
77 | (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), | |
78 | (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), | |
79 | (void *)data, | |
80 | (void *)((unsigned long)segs), | |
81 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), | |
82 | (void *)((unsigned long)(offset & 0xffffffff)), | |
83 | (void *)((unsigned long)ioflags), | |
84 | (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), | |
85 | (void *)((unsigned long)(io->io_new_size & 0xffffffff)), | |
f1fdc848 | 86 | (void *)((unsigned long)current_pid()), |
1da177e4 LT |
87 | (void *)NULL, |
88 | (void *)NULL, | |
89 | (void *)NULL, | |
90 | (void *)NULL); | |
91 | } | |
92 | ||
93 | void | |
94 | xfs_inval_cached_trace( | |
95 | xfs_iocore_t *io, | |
96 | xfs_off_t offset, | |
97 | xfs_off_t len, | |
98 | xfs_off_t first, | |
99 | xfs_off_t last) | |
100 | { | |
101 | xfs_inode_t *ip = XFS_IO_INODE(io); | |
102 | ||
103 | if (ip->i_rwtrace == NULL) | |
104 | return; | |
105 | ktrace_enter(ip->i_rwtrace, | |
106 | (void *)(__psint_t)XFS_INVAL_CACHED, | |
107 | (void *)ip, | |
108 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), | |
109 | (void *)((unsigned long)(offset & 0xffffffff)), | |
110 | (void *)((unsigned long)((len >> 32) & 0xffffffff)), | |
111 | (void *)((unsigned long)(len & 0xffffffff)), | |
112 | (void *)((unsigned long)((first >> 32) & 0xffffffff)), | |
113 | (void *)((unsigned long)(first & 0xffffffff)), | |
114 | (void *)((unsigned long)((last >> 32) & 0xffffffff)), | |
115 | (void *)((unsigned long)(last & 0xffffffff)), | |
f1fdc848 | 116 | (void *)((unsigned long)current_pid()), |
1da177e4 LT |
117 | (void *)NULL, |
118 | (void *)NULL, | |
119 | (void *)NULL, | |
120 | (void *)NULL, | |
121 | (void *)NULL); | |
122 | } | |
123 | #endif | |
124 | ||
125 | /* | |
126 | * xfs_iozero | |
127 | * | |
128 | * xfs_iozero clears the specified range of buffer supplied, | |
129 | * and marks all the affected blocks as valid and modified. If | |
130 | * an affected block is not allocated, it will be allocated. If | |
131 | * an affected block is not completely overwritten, and is not | |
132 | * valid before the operation, it will be read from disk before | |
133 | * being partially zeroed. | |
134 | */ | |
135 | STATIC int | |
136 | xfs_iozero( | |
137 | struct inode *ip, /* inode */ | |
138 | loff_t pos, /* offset in file */ | |
139 | size_t count, /* size of data to zero */ | |
140 | loff_t end_size) /* max file size to set */ | |
141 | { | |
142 | unsigned bytes; | |
143 | struct page *page; | |
144 | struct address_space *mapping; | |
145 | char *kaddr; | |
146 | int status; | |
147 | ||
148 | mapping = ip->i_mapping; | |
149 | do { | |
150 | unsigned long index, offset; | |
151 | ||
152 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
153 | index = pos >> PAGE_CACHE_SHIFT; | |
154 | bytes = PAGE_CACHE_SIZE - offset; | |
155 | if (bytes > count) | |
156 | bytes = count; | |
157 | ||
158 | status = -ENOMEM; | |
159 | page = grab_cache_page(mapping, index); | |
160 | if (!page) | |
161 | break; | |
162 | ||
163 | kaddr = kmap(page); | |
164 | status = mapping->a_ops->prepare_write(NULL, page, offset, | |
165 | offset + bytes); | |
166 | if (status) { | |
167 | goto unlock; | |
168 | } | |
169 | ||
170 | memset((void *) (kaddr + offset), 0, bytes); | |
171 | flush_dcache_page(page); | |
172 | status = mapping->a_ops->commit_write(NULL, page, offset, | |
173 | offset + bytes); | |
174 | if (!status) { | |
175 | pos += bytes; | |
176 | count -= bytes; | |
177 | if (pos > i_size_read(ip)) | |
178 | i_size_write(ip, pos < end_size ? pos : end_size); | |
179 | } | |
180 | ||
181 | unlock: | |
182 | kunmap(page); | |
183 | unlock_page(page); | |
184 | page_cache_release(page); | |
185 | if (status) | |
186 | break; | |
187 | } while (count); | |
188 | ||
189 | return (-status); | |
190 | } | |
191 | ||
1da177e4 LT |
192 | ssize_t /* bytes read, or (-) error */ |
193 | xfs_read( | |
194 | bhv_desc_t *bdp, | |
195 | struct kiocb *iocb, | |
196 | const struct iovec *iovp, | |
197 | unsigned int segs, | |
198 | loff_t *offset, | |
199 | int ioflags, | |
200 | cred_t *credp) | |
201 | { | |
202 | struct file *file = iocb->ki_filp; | |
203 | struct inode *inode = file->f_mapping->host; | |
204 | size_t size = 0; | |
205 | ssize_t ret; | |
206 | xfs_fsize_t n; | |
207 | xfs_inode_t *ip; | |
208 | xfs_mount_t *mp; | |
209 | vnode_t *vp; | |
210 | unsigned long seg; | |
211 | ||
212 | ip = XFS_BHVTOI(bdp); | |
213 | vp = BHV_TO_VNODE(bdp); | |
214 | mp = ip->i_mount; | |
215 | ||
216 | XFS_STATS_INC(xs_read_calls); | |
217 | ||
218 | /* START copy & waste from filemap.c */ | |
219 | for (seg = 0; seg < segs; seg++) { | |
220 | const struct iovec *iv = &iovp[seg]; | |
221 | ||
222 | /* | |
223 | * If any segment has a negative length, or the cumulative | |
224 | * length ever wraps negative then return -EINVAL. | |
225 | */ | |
226 | size += iv->iov_len; | |
227 | if (unlikely((ssize_t)(size|iv->iov_len) < 0)) | |
228 | return XFS_ERROR(-EINVAL); | |
229 | } | |
230 | /* END copy & waste from filemap.c */ | |
231 | ||
232 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
233 | xfs_buftarg_t *target = | |
234 | (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? | |
235 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
ce8e922c NS |
236 | if ((*offset & target->bt_smask) || |
237 | (size & target->bt_smask)) { | |
1da177e4 LT |
238 | if (*offset == ip->i_d.di_size) { |
239 | return (0); | |
240 | } | |
241 | return -XFS_ERROR(EINVAL); | |
242 | } | |
243 | } | |
244 | ||
245 | n = XFS_MAXIOFFSET(mp) - *offset; | |
246 | if ((n <= 0) || (size == 0)) | |
247 | return 0; | |
248 | ||
249 | if (n < size) | |
250 | size = n; | |
251 | ||
a13828b1 | 252 | if (XFS_FORCED_SHUTDOWN(mp)) |
1da177e4 | 253 | return -EIO; |
1da177e4 LT |
254 | |
255 | if (unlikely(ioflags & IO_ISDIRECT)) | |
1b1dcc1b | 256 | mutex_lock(&inode->i_mutex); |
1da177e4 LT |
257 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
258 | ||
259 | if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && | |
260 | !(ioflags & IO_INVIS)) { | |
261 | vrwlock_t locktype = VRWLOCK_READ; | |
e1a40fa9 | 262 | int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags); |
1da177e4 LT |
263 | |
264 | ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, | |
265 | BHV_TO_VNODE(bdp), *offset, size, | |
e1a40fa9 | 266 | dmflags, &locktype); |
1da177e4 LT |
267 | if (ret) { |
268 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
a13828b1 | 269 | goto unlock_mutex; |
1da177e4 LT |
270 | } |
271 | } | |
272 | ||
9cea2364 NS |
273 | if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp))) |
274 | VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(*offset)), | |
275 | -1, FI_REMAPF_LOCKED); | |
276 | ||
1da177e4 LT |
277 | xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, |
278 | (void *)iovp, segs, *offset, ioflags); | |
279 | ret = __generic_file_aio_read(iocb, iovp, segs, offset); | |
280 | if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO)) | |
281 | ret = wait_on_sync_kiocb(iocb); | |
282 | if (ret > 0) | |
283 | XFS_STATS_ADD(xs_read_bytes, ret); | |
284 | ||
285 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
286 | ||
a13828b1 | 287 | unlock_mutex: |
1da177e4 | 288 | if (unlikely(ioflags & IO_ISDIRECT)) |
1b1dcc1b | 289 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
290 | return ret; |
291 | } | |
292 | ||
293 | ssize_t | |
294 | xfs_sendfile( | |
295 | bhv_desc_t *bdp, | |
296 | struct file *filp, | |
297 | loff_t *offset, | |
298 | int ioflags, | |
299 | size_t count, | |
300 | read_actor_t actor, | |
301 | void *target, | |
302 | cred_t *credp) | |
303 | { | |
1b895840 NS |
304 | xfs_inode_t *ip = XFS_BHVTOI(bdp); |
305 | xfs_mount_t *mp = ip->i_mount; | |
1da177e4 | 306 | ssize_t ret; |
1da177e4 LT |
307 | |
308 | XFS_STATS_INC(xs_read_calls); | |
1b895840 | 309 | if (XFS_FORCED_SHUTDOWN(mp)) |
1da177e4 LT |
310 | return -EIO; |
311 | ||
312 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | |
313 | ||
1b895840 | 314 | if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) && |
1da177e4 LT |
315 | (!(ioflags & IO_INVIS))) { |
316 | vrwlock_t locktype = VRWLOCK_READ; | |
317 | int error; | |
318 | ||
1b895840 NS |
319 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), |
320 | *offset, count, | |
1da177e4 LT |
321 | FILP_DELAY_FLAG(filp), &locktype); |
322 | if (error) { | |
323 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
324 | return -error; | |
325 | } | |
326 | } | |
327 | xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore, | |
328 | (void *)(unsigned long)target, count, *offset, ioflags); | |
329 | ret = generic_file_sendfile(filp, offset, count, actor, target); | |
1b895840 NS |
330 | if (ret > 0) |
331 | XFS_STATS_ADD(xs_read_bytes, ret); | |
1da177e4 LT |
332 | |
333 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
1b895840 NS |
334 | return ret; |
335 | } | |
336 | ||
337 | ssize_t | |
338 | xfs_splice_read( | |
339 | bhv_desc_t *bdp, | |
340 | struct file *infilp, | |
cbb7e577 | 341 | loff_t *ppos, |
3a326a2c | 342 | struct pipe_inode_info *pipe, |
1b895840 NS |
343 | size_t count, |
344 | int flags, | |
345 | int ioflags, | |
346 | cred_t *credp) | |
347 | { | |
348 | xfs_inode_t *ip = XFS_BHVTOI(bdp); | |
349 | xfs_mount_t *mp = ip->i_mount; | |
350 | ssize_t ret; | |
1da177e4 | 351 | |
1b895840 NS |
352 | XFS_STATS_INC(xs_read_calls); |
353 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
354 | return -EIO; | |
355 | ||
356 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | |
357 | ||
358 | if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) && | |
359 | (!(ioflags & IO_INVIS))) { | |
360 | vrwlock_t locktype = VRWLOCK_READ; | |
361 | int error; | |
362 | ||
363 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), | |
cbb7e577 | 364 | *ppos, count, |
1b895840 NS |
365 | FILP_DELAY_FLAG(infilp), &locktype); |
366 | if (error) { | |
367 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
368 | return -error; | |
369 | } | |
370 | } | |
371 | xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore, | |
cbb7e577 JA |
372 | pipe, count, *ppos, ioflags); |
373 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | |
1da177e4 LT |
374 | if (ret > 0) |
375 | XFS_STATS_ADD(xs_read_bytes, ret); | |
376 | ||
1b895840 NS |
377 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
378 | return ret; | |
379 | } | |
380 | ||
381 | ssize_t | |
382 | xfs_splice_write( | |
383 | bhv_desc_t *bdp, | |
3a326a2c | 384 | struct pipe_inode_info *pipe, |
1b895840 | 385 | struct file *outfilp, |
cbb7e577 | 386 | loff_t *ppos, |
1b895840 NS |
387 | size_t count, |
388 | int flags, | |
389 | int ioflags, | |
390 | cred_t *credp) | |
391 | { | |
392 | xfs_inode_t *ip = XFS_BHVTOI(bdp); | |
393 | xfs_mount_t *mp = ip->i_mount; | |
394 | ssize_t ret; | |
395 | ||
396 | XFS_STATS_INC(xs_write_calls); | |
397 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
398 | return -EIO; | |
399 | ||
400 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
401 | ||
402 | if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_WRITE) && | |
403 | (!(ioflags & IO_INVIS))) { | |
404 | vrwlock_t locktype = VRWLOCK_WRITE; | |
405 | int error; | |
406 | ||
407 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, BHV_TO_VNODE(bdp), | |
cbb7e577 | 408 | *ppos, count, |
1b895840 NS |
409 | FILP_DELAY_FLAG(outfilp), &locktype); |
410 | if (error) { | |
411 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
412 | return -error; | |
413 | } | |
414 | } | |
415 | xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore, | |
cbb7e577 JA |
416 | pipe, count, *ppos, ioflags); |
417 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | |
1b895840 NS |
418 | if (ret > 0) |
419 | XFS_STATS_ADD(xs_write_bytes, ret); | |
420 | ||
421 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
1da177e4 LT |
422 | return ret; |
423 | } | |
424 | ||
425 | /* | |
426 | * This routine is called to handle zeroing any space in the last | |
427 | * block of the file that is beyond the EOF. We do this since the | |
428 | * size is being increased without writing anything to that block | |
429 | * and we don't want anyone to read the garbage on the disk. | |
430 | */ | |
431 | STATIC int /* error (positive) */ | |
432 | xfs_zero_last_block( | |
433 | struct inode *ip, | |
434 | xfs_iocore_t *io, | |
1da177e4 LT |
435 | xfs_fsize_t isize, |
436 | xfs_fsize_t end_size) | |
437 | { | |
438 | xfs_fileoff_t last_fsb; | |
1b895840 | 439 | xfs_mount_t *mp = io->io_mount; |
1da177e4 LT |
440 | int nimaps; |
441 | int zero_offset; | |
442 | int zero_len; | |
1da177e4 LT |
443 | int error = 0; |
444 | xfs_bmbt_irec_t imap; | |
445 | loff_t loff; | |
1da177e4 LT |
446 | |
447 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); | |
1da177e4 | 448 | |
24ee8088 ES |
449 | zero_offset = XFS_B_FSB_OFFSET(mp, isize); |
450 | if (zero_offset == 0) { | |
1da177e4 LT |
451 | /* |
452 | * There are no extra bytes in the last block on disk to | |
453 | * zero, so return. | |
454 | */ | |
455 | return 0; | |
456 | } | |
457 | ||
458 | last_fsb = XFS_B_TO_FSBT(mp, isize); | |
459 | nimaps = 1; | |
460 | error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap, | |
461 | &nimaps, NULL); | |
462 | if (error) { | |
463 | return error; | |
464 | } | |
465 | ASSERT(nimaps > 0); | |
466 | /* | |
467 | * If the block underlying isize is just a hole, then there | |
468 | * is nothing to zero. | |
469 | */ | |
470 | if (imap.br_startblock == HOLESTARTBLOCK) { | |
471 | return 0; | |
472 | } | |
473 | /* | |
474 | * Zero the part of the last block beyond the EOF, and write it | |
475 | * out sync. We need to drop the ilock while we do this so we | |
476 | * don't deadlock when the buffer cache calls back to us. | |
477 | */ | |
478 | XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); | |
1da177e4 | 479 | |
1b895840 | 480 | loff = XFS_FSB_TO_B(mp, last_fsb); |
24ee8088 | 481 | zero_len = mp->m_sb.sb_blocksize - zero_offset; |
1da177e4 LT |
482 | error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size); |
483 | ||
484 | XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | |
485 | ASSERT(error >= 0); | |
486 | return error; | |
487 | } | |
488 | ||
489 | /* | |
490 | * Zero any on disk space between the current EOF and the new, | |
491 | * larger EOF. This handles the normal case of zeroing the remainder | |
492 | * of the last block in the file and the unusual case of zeroing blocks | |
493 | * out beyond the size of the file. This second case only happens | |
494 | * with fixed size extents and when the system crashes before the inode | |
495 | * size was updated but after blocks were allocated. If fill is set, | |
496 | * then any holes in the range are filled and zeroed. If not, the holes | |
497 | * are left alone as holes. | |
498 | */ | |
499 | ||
500 | int /* error (positive) */ | |
501 | xfs_zero_eof( | |
502 | vnode_t *vp, | |
503 | xfs_iocore_t *io, | |
504 | xfs_off_t offset, /* starting I/O offset */ | |
505 | xfs_fsize_t isize, /* current inode size */ | |
506 | xfs_fsize_t end_size) /* terminal inode size */ | |
507 | { | |
ec86dc02 | 508 | struct inode *ip = vn_to_inode(vp); |
1da177e4 LT |
509 | xfs_fileoff_t start_zero_fsb; |
510 | xfs_fileoff_t end_zero_fsb; | |
1da177e4 LT |
511 | xfs_fileoff_t zero_count_fsb; |
512 | xfs_fileoff_t last_fsb; | |
513 | xfs_extlen_t buf_len_fsb; | |
1b895840 | 514 | xfs_mount_t *mp = io->io_mount; |
1da177e4 LT |
515 | int nimaps; |
516 | int error = 0; | |
517 | xfs_bmbt_irec_t imap; | |
1da177e4 LT |
518 | |
519 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); | |
520 | ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); | |
24ee8088 | 521 | ASSERT(offset > isize); |
1da177e4 | 522 | |
1da177e4 LT |
523 | /* |
524 | * First handle zeroing the block on which isize resides. | |
525 | * We only zero a part of that block so it is handled specially. | |
526 | */ | |
24ee8088 | 527 | error = xfs_zero_last_block(ip, io, isize, end_size); |
1da177e4 LT |
528 | if (error) { |
529 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); | |
530 | ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); | |
531 | return error; | |
532 | } | |
533 | ||
534 | /* | |
535 | * Calculate the range between the new size and the old | |
536 | * where blocks needing to be zeroed may exist. To get the | |
537 | * block where the last byte in the file currently resides, | |
538 | * we need to subtract one from the size and truncate back | |
539 | * to a block boundary. We subtract 1 in case the size is | |
540 | * exactly on a block boundary. | |
541 | */ | |
542 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
543 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
544 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
545 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
546 | if (last_fsb == end_zero_fsb) { | |
547 | /* | |
548 | * The size was only incremented on its last block. | |
549 | * We took care of that above, so just return. | |
550 | */ | |
551 | return 0; | |
552 | } | |
553 | ||
554 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
1da177e4 LT |
555 | while (start_zero_fsb <= end_zero_fsb) { |
556 | nimaps = 1; | |
557 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
558 | error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb, | |
559 | 0, NULL, 0, &imap, &nimaps, NULL); | |
560 | if (error) { | |
561 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); | |
562 | ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); | |
563 | return error; | |
564 | } | |
565 | ASSERT(nimaps > 0); | |
566 | ||
567 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
568 | imap.br_startblock == HOLESTARTBLOCK) { | |
569 | /* | |
570 | * This loop handles initializing pages that were | |
571 | * partially initialized by the code below this | |
572 | * loop. It basically zeroes the part of the page | |
573 | * that sits on a hole and sets the page as P_HOLE | |
574 | * and calls remapf if it is a mapped file. | |
575 | */ | |
24ee8088 | 576 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; |
1da177e4 LT |
577 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); |
578 | continue; | |
579 | } | |
580 | ||
581 | /* | |
582 | * There are blocks in the range requested. | |
583 | * Zero them a single write at a time. We actually | |
584 | * don't zero the entire range returned if it is | |
585 | * too big and simply loop around to get the rest. | |
586 | * That is not the most efficient thing to do, but it | |
587 | * is simple and this path should not be exercised often. | |
588 | */ | |
589 | buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount, | |
590 | mp->m_writeio_blocks << 8); | |
591 | /* | |
592 | * Drop the inode lock while we're doing the I/O. | |
593 | * We'll still have the iolock to protect us. | |
594 | */ | |
595 | XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | |
596 | ||
24ee8088 ES |
597 | error = xfs_iozero(ip, |
598 | XFS_FSB_TO_B(mp, start_zero_fsb), | |
599 | XFS_FSB_TO_B(mp, buf_len_fsb), | |
600 | end_size); | |
1da177e4 LT |
601 | |
602 | if (error) { | |
603 | goto out_lock; | |
604 | } | |
605 | ||
1da177e4 LT |
606 | start_zero_fsb = imap.br_startoff + buf_len_fsb; |
607 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
608 | ||
609 | XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | |
610 | } | |
611 | ||
612 | return 0; | |
613 | ||
614 | out_lock: | |
615 | ||
616 | XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | |
617 | ASSERT(error >= 0); | |
618 | return error; | |
619 | } | |
620 | ||
621 | ssize_t /* bytes written, or (-) error */ | |
622 | xfs_write( | |
623 | bhv_desc_t *bdp, | |
624 | struct kiocb *iocb, | |
625 | const struct iovec *iovp, | |
626 | unsigned int nsegs, | |
627 | loff_t *offset, | |
628 | int ioflags, | |
629 | cred_t *credp) | |
630 | { | |
631 | struct file *file = iocb->ki_filp; | |
632 | struct address_space *mapping = file->f_mapping; | |
633 | struct inode *inode = mapping->host; | |
634 | unsigned long segs = nsegs; | |
635 | xfs_inode_t *xip; | |
636 | xfs_mount_t *mp; | |
637 | ssize_t ret = 0, error = 0; | |
638 | xfs_fsize_t isize, new_size; | |
639 | xfs_iocore_t *io; | |
640 | vnode_t *vp; | |
641 | unsigned long seg; | |
642 | int iolock; | |
643 | int eventsent = 0; | |
644 | vrwlock_t locktype; | |
645 | size_t ocount = 0, count; | |
646 | loff_t pos; | |
a13828b1 | 647 | int need_i_mutex = 1, need_flush = 0; |
1da177e4 LT |
648 | |
649 | XFS_STATS_INC(xs_write_calls); | |
650 | ||
651 | vp = BHV_TO_VNODE(bdp); | |
652 | xip = XFS_BHVTOI(bdp); | |
653 | ||
654 | for (seg = 0; seg < segs; seg++) { | |
655 | const struct iovec *iv = &iovp[seg]; | |
656 | ||
657 | /* | |
658 | * If any segment has a negative length, or the cumulative | |
659 | * length ever wraps negative then return -EINVAL. | |
660 | */ | |
661 | ocount += iv->iov_len; | |
662 | if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) | |
663 | return -EINVAL; | |
664 | if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) | |
665 | continue; | |
666 | if (seg == 0) | |
667 | return -EFAULT; | |
668 | segs = seg; | |
669 | ocount -= iv->iov_len; /* This segment is no good */ | |
670 | break; | |
671 | } | |
672 | ||
673 | count = ocount; | |
674 | pos = *offset; | |
675 | ||
676 | if (count == 0) | |
677 | return 0; | |
678 | ||
679 | io = &xip->i_iocore; | |
680 | mp = io->io_mount; | |
681 | ||
682 | if (XFS_FORCED_SHUTDOWN(mp)) | |
683 | return -EIO; | |
684 | ||
685 | fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE); | |
686 | ||
687 | if (ioflags & IO_ISDIRECT) { | |
688 | xfs_buftarg_t *target = | |
689 | (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? | |
690 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
691 | ||
ce8e922c | 692 | if ((pos & target->bt_smask) || (count & target->bt_smask)) |
1da177e4 LT |
693 | return XFS_ERROR(-EINVAL); |
694 | ||
695 | if (!VN_CACHED(vp) && pos < i_size_read(inode)) | |
a13828b1 | 696 | need_i_mutex = 0; |
1da177e4 LT |
697 | |
698 | if (VN_CACHED(vp)) | |
699 | need_flush = 1; | |
700 | } | |
701 | ||
702 | relock: | |
a13828b1 | 703 | if (need_i_mutex) { |
1da177e4 LT |
704 | iolock = XFS_IOLOCK_EXCL; |
705 | locktype = VRWLOCK_WRITE; | |
706 | ||
1b1dcc1b | 707 | mutex_lock(&inode->i_mutex); |
1da177e4 LT |
708 | } else { |
709 | iolock = XFS_IOLOCK_SHARED; | |
710 | locktype = VRWLOCK_WRITE_DIRECT; | |
711 | } | |
712 | ||
713 | xfs_ilock(xip, XFS_ILOCK_EXCL|iolock); | |
714 | ||
715 | isize = i_size_read(inode); | |
716 | ||
717 | if (file->f_flags & O_APPEND) | |
718 | *offset = isize; | |
719 | ||
720 | start: | |
721 | error = -generic_write_checks(file, &pos, &count, | |
722 | S_ISBLK(inode->i_mode)); | |
723 | if (error) { | |
724 | xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); | |
a13828b1 | 725 | goto out_unlock_mutex; |
1da177e4 LT |
726 | } |
727 | ||
728 | new_size = pos + count; | |
729 | if (new_size > isize) | |
730 | io->io_new_size = new_size; | |
731 | ||
732 | if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) && | |
733 | !(ioflags & IO_INVIS) && !eventsent)) { | |
734 | loff_t savedsize = pos; | |
735 | int dmflags = FILP_DELAY_FLAG(file); | |
736 | ||
a13828b1 | 737 | if (need_i_mutex) |
1b1dcc1b | 738 | dmflags |= DM_FLAGS_IMUX; |
1da177e4 LT |
739 | |
740 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | |
741 | error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp, | |
742 | pos, count, | |
743 | dmflags, &locktype); | |
744 | if (error) { | |
745 | xfs_iunlock(xip, iolock); | |
a13828b1 | 746 | goto out_unlock_mutex; |
1da177e4 LT |
747 | } |
748 | xfs_ilock(xip, XFS_ILOCK_EXCL); | |
749 | eventsent = 1; | |
750 | ||
751 | /* | |
c41564b5 | 752 | * The iolock was dropped and reacquired in XFS_SEND_DATA |
1da177e4 LT |
753 | * so we have to recheck the size when appending. |
754 | * We will only "goto start;" once, since having sent the | |
755 | * event prevents another call to XFS_SEND_DATA, which is | |
756 | * what allows the size to change in the first place. | |
757 | */ | |
758 | if ((file->f_flags & O_APPEND) && savedsize != isize) { | |
759 | pos = isize = xip->i_d.di_size; | |
760 | goto start; | |
761 | } | |
762 | } | |
763 | ||
4aeb664c | 764 | if (likely(!(ioflags & IO_INVIS))) { |
870f4817 | 765 | file_update_time(file); |
4aeb664c NS |
766 | xfs_ichgtime_fast(xip, inode, |
767 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
1da177e4 LT |
768 | } |
769 | ||
770 | /* | |
771 | * If the offset is beyond the size of the file, we have a couple | |
772 | * of things to do. First, if there is already space allocated | |
773 | * we need to either create holes or zero the disk or ... | |
774 | * | |
775 | * If there is a page where the previous size lands, we need | |
776 | * to zero it out up to the new size. | |
777 | */ | |
778 | ||
779 | if (pos > isize) { | |
780 | error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos, | |
781 | isize, pos + count); | |
782 | if (error) { | |
783 | xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); | |
a13828b1 | 784 | goto out_unlock_mutex; |
1da177e4 LT |
785 | } |
786 | } | |
787 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | |
788 | ||
789 | /* | |
790 | * If we're writing the file then make sure to clear the | |
791 | * setuid and setgid bits if the process is not being run | |
792 | * by root. This keeps people from modifying setuid and | |
793 | * setgid binaries. | |
794 | */ | |
795 | ||
796 | if (((xip->i_d.di_mode & S_ISUID) || | |
797 | ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) == | |
798 | (S_ISGID | S_IXGRP))) && | |
799 | !capable(CAP_FSETID)) { | |
800 | error = xfs_write_clear_setuid(xip); | |
801 | if (likely(!error)) | |
802 | error = -remove_suid(file->f_dentry); | |
803 | if (unlikely(error)) { | |
804 | xfs_iunlock(xip, iolock); | |
a13828b1 | 805 | goto out_unlock_mutex; |
1da177e4 LT |
806 | } |
807 | } | |
808 | ||
809 | retry: | |
810 | /* We can write back this queue in page reclaim */ | |
811 | current->backing_dev_info = mapping->backing_dev_info; | |
812 | ||
813 | if ((ioflags & IO_ISDIRECT)) { | |
814 | if (need_flush) { | |
815 | xfs_inval_cached_trace(io, pos, -1, | |
816 | ctooff(offtoct(pos)), -1); | |
817 | VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)), | |
818 | -1, FI_REMAPF_LOCKED); | |
819 | } | |
820 | ||
a13828b1 | 821 | if (need_i_mutex) { |
1da177e4 LT |
822 | /* demote the lock now the cached pages are gone */ |
823 | XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); | |
1b1dcc1b | 824 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
825 | |
826 | iolock = XFS_IOLOCK_SHARED; | |
827 | locktype = VRWLOCK_WRITE_DIRECT; | |
a13828b1 | 828 | need_i_mutex = 0; |
1da177e4 LT |
829 | } |
830 | ||
831 | xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs, | |
832 | *offset, ioflags); | |
833 | ret = generic_file_direct_write(iocb, iovp, | |
834 | &segs, pos, offset, count, ocount); | |
835 | ||
836 | /* | |
837 | * direct-io write to a hole: fall through to buffered I/O | |
838 | * for completing the rest of the request. | |
839 | */ | |
840 | if (ret >= 0 && ret != count) { | |
841 | XFS_STATS_ADD(xs_write_bytes, ret); | |
842 | ||
843 | pos += ret; | |
844 | count -= ret; | |
845 | ||
a13828b1 | 846 | need_i_mutex = 1; |
1da177e4 LT |
847 | ioflags &= ~IO_ISDIRECT; |
848 | xfs_iunlock(xip, iolock); | |
849 | goto relock; | |
850 | } | |
851 | } else { | |
852 | xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs, | |
853 | *offset, ioflags); | |
854 | ret = generic_file_buffered_write(iocb, iovp, segs, | |
855 | pos, offset, count, ret); | |
856 | } | |
857 | ||
858 | current->backing_dev_info = NULL; | |
859 | ||
860 | if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO)) | |
861 | ret = wait_on_sync_kiocb(iocb); | |
862 | ||
863 | if ((ret == -ENOSPC) && | |
864 | DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) && | |
865 | !(ioflags & IO_INVIS)) { | |
866 | ||
867 | xfs_rwunlock(bdp, locktype); | |
a13828b1 | 868 | if (need_i_mutex) |
1b1dcc1b | 869 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
870 | error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp, |
871 | DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL, | |
872 | 0, 0, 0); /* Delay flag intentionally unused */ | |
873 | if (error) | |
e1a40fa9 | 874 | goto out_nounlocks; |
a13828b1 | 875 | if (need_i_mutex) |
1b1dcc1b | 876 | mutex_lock(&inode->i_mutex); |
1da177e4 LT |
877 | xfs_rwlock(bdp, locktype); |
878 | pos = xip->i_d.di_size; | |
879 | ret = 0; | |
880 | goto retry; | |
881 | } | |
882 | ||
68bdb6ea YL |
883 | isize = i_size_read(inode); |
884 | if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize)) | |
885 | *offset = isize; | |
886 | ||
1da177e4 LT |
887 | if (*offset > xip->i_d.di_size) { |
888 | xfs_ilock(xip, XFS_ILOCK_EXCL); | |
889 | if (*offset > xip->i_d.di_size) { | |
890 | xip->i_d.di_size = *offset; | |
891 | i_size_write(inode, *offset); | |
892 | xip->i_update_core = 1; | |
893 | xip->i_update_size = 1; | |
894 | } | |
895 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | |
896 | } | |
897 | ||
898 | error = -ret; | |
899 | if (ret <= 0) | |
900 | goto out_unlock_internal; | |
901 | ||
902 | XFS_STATS_ADD(xs_write_bytes, ret); | |
903 | ||
904 | /* Handle various SYNC-type writes */ | |
905 | if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { | |
906 | /* | |
907 | * If we're treating this as O_DSYNC and we have not updated the | |
908 | * size, force the log. | |
909 | */ | |
910 | if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) && | |
911 | !(xip->i_update_size)) { | |
912 | xfs_inode_log_item_t *iip = xip->i_itemp; | |
913 | ||
914 | /* | |
915 | * If an allocation transaction occurred | |
916 | * without extending the size, then we have to force | |
917 | * the log up the proper point to ensure that the | |
918 | * allocation is permanent. We can't count on | |
919 | * the fact that buffered writes lock out direct I/O | |
920 | * writes - the direct I/O write could have extended | |
921 | * the size nontransactionally, then finished before | |
922 | * we started. xfs_write_file will think that the file | |
923 | * didn't grow but the update isn't safe unless the | |
924 | * size change is logged. | |
925 | * | |
926 | * Force the log if we've committed a transaction | |
927 | * against the inode or if someone else has and | |
928 | * the commit record hasn't gone to disk (e.g. | |
929 | * the inode is pinned). This guarantees that | |
930 | * all changes affecting the inode are permanent | |
931 | * when we return. | |
932 | */ | |
933 | if (iip && iip->ili_last_lsn) { | |
934 | xfs_log_force(mp, iip->ili_last_lsn, | |
935 | XFS_LOG_FORCE | XFS_LOG_SYNC); | |
936 | } else if (xfs_ipincount(xip) > 0) { | |
937 | xfs_log_force(mp, (xfs_lsn_t)0, | |
938 | XFS_LOG_FORCE | XFS_LOG_SYNC); | |
939 | } | |
940 | ||
941 | } else { | |
942 | xfs_trans_t *tp; | |
943 | ||
944 | /* | |
945 | * O_SYNC or O_DSYNC _with_ a size update are handled | |
946 | * the same way. | |
947 | * | |
948 | * If the write was synchronous then we need to make | |
949 | * sure that the inode modification time is permanent. | |
950 | * We'll have updated the timestamp above, so here | |
951 | * we use a synchronous transaction to log the inode. | |
952 | * It's not fast, but it's necessary. | |
953 | * | |
954 | * If this a dsync write and the size got changed | |
955 | * non-transactionally, then we need to ensure that | |
956 | * the size change gets logged in a synchronous | |
957 | * transaction. | |
958 | */ | |
959 | ||
960 | tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC); | |
961 | if ((error = xfs_trans_reserve(tp, 0, | |
962 | XFS_SWRITE_LOG_RES(mp), | |
963 | 0, 0, 0))) { | |
964 | /* Transaction reserve failed */ | |
965 | xfs_trans_cancel(tp, 0); | |
966 | } else { | |
967 | /* Transaction reserve successful */ | |
968 | xfs_ilock(xip, XFS_ILOCK_EXCL); | |
969 | xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL); | |
970 | xfs_trans_ihold(tp, xip); | |
971 | xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE); | |
972 | xfs_trans_set_sync(tp); | |
973 | error = xfs_trans_commit(tp, 0, NULL); | |
974 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | |
975 | } | |
976 | if (error) | |
977 | goto out_unlock_internal; | |
978 | } | |
a13828b1 | 979 | |
1da177e4 | 980 | xfs_rwunlock(bdp, locktype); |
a13828b1 | 981 | if (need_i_mutex) |
1b1dcc1b | 982 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
983 | |
984 | error = sync_page_range(inode, mapping, pos, ret); | |
985 | if (!error) | |
986 | error = ret; | |
987 | return error; | |
988 | } | |
989 | ||
990 | out_unlock_internal: | |
991 | xfs_rwunlock(bdp, locktype); | |
a13828b1 NS |
992 | out_unlock_mutex: |
993 | if (need_i_mutex) | |
1b1dcc1b | 994 | mutex_unlock(&inode->i_mutex); |
e1a40fa9 | 995 | out_nounlocks: |
1da177e4 LT |
996 | return -error; |
997 | } | |
998 | ||
999 | /* | |
1000 | * All xfs metadata buffers except log state machine buffers | |
1001 | * get this attached as their b_bdstrat callback function. | |
1002 | * This is so that we can catch a buffer | |
1003 | * after prematurely unpinning it to forcibly shutdown the filesystem. | |
1004 | */ | |
1005 | int | |
1006 | xfs_bdstrat_cb(struct xfs_buf *bp) | |
1007 | { | |
1008 | xfs_mount_t *mp; | |
1009 | ||
1010 | mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); | |
1011 | if (!XFS_FORCED_SHUTDOWN(mp)) { | |
ce8e922c | 1012 | xfs_buf_iorequest(bp); |
1da177e4 LT |
1013 | return 0; |
1014 | } else { | |
1015 | xfs_buftrace("XFS__BDSTRAT IOERROR", bp); | |
1016 | /* | |
1017 | * Metadata write that didn't get logged but | |
1018 | * written delayed anyway. These aren't associated | |
1019 | * with a transaction, and can be ignored. | |
1020 | */ | |
1021 | if (XFS_BUF_IODONE_FUNC(bp) == NULL && | |
1022 | (XFS_BUF_ISREAD(bp)) == 0) | |
1023 | return (xfs_bioerror_relse(bp)); | |
1024 | else | |
1025 | return (xfs_bioerror(bp)); | |
1026 | } | |
1027 | } | |
1028 | ||
1029 | ||
1030 | int | |
1031 | xfs_bmap(bhv_desc_t *bdp, | |
1032 | xfs_off_t offset, | |
1033 | ssize_t count, | |
1034 | int flags, | |
1035 | xfs_iomap_t *iomapp, | |
1036 | int *niomaps) | |
1037 | { | |
1038 | xfs_inode_t *ip = XFS_BHVTOI(bdp); | |
1039 | xfs_iocore_t *io = &ip->i_iocore; | |
1040 | ||
1041 | ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); | |
1042 | ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == | |
1043 | ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); | |
1044 | ||
1045 | return xfs_iomap(io, offset, count, flags, iomapp, niomaps); | |
1046 | } | |
1047 | ||
1048 | /* | |
1049 | * Wrapper around bdstrat so that we can stop data | |
1050 | * from going to disk in case we are shutting down the filesystem. | |
1051 | * Typically user data goes thru this path; one of the exceptions | |
1052 | * is the superblock. | |
1053 | */ | |
1054 | int | |
1055 | xfsbdstrat( | |
1056 | struct xfs_mount *mp, | |
1057 | struct xfs_buf *bp) | |
1058 | { | |
1059 | ASSERT(mp); | |
1060 | if (!XFS_FORCED_SHUTDOWN(mp)) { | |
1061 | /* Grio redirection would go here | |
1062 | * if (XFS_BUF_IS_GRIO(bp)) { | |
1063 | */ | |
1064 | ||
ce8e922c | 1065 | xfs_buf_iorequest(bp); |
1da177e4 LT |
1066 | return 0; |
1067 | } | |
1068 | ||
1069 | xfs_buftrace("XFSBDSTRAT IOERROR", bp); | |
1070 | return (xfs_bioerror_relse(bp)); | |
1071 | } | |
1072 | ||
1073 | /* | |
1074 | * If the underlying (data/log/rt) device is readonly, there are some | |
1075 | * operations that cannot proceed. | |
1076 | */ | |
1077 | int | |
1078 | xfs_dev_is_read_only( | |
1079 | xfs_mount_t *mp, | |
1080 | char *message) | |
1081 | { | |
1082 | if (xfs_readonly_buftarg(mp->m_ddev_targp) || | |
1083 | xfs_readonly_buftarg(mp->m_logdev_targp) || | |
1084 | (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { | |
1085 | cmn_err(CE_NOTE, | |
1086 | "XFS: %s required on read-only device.", message); | |
1087 | cmn_err(CE_NOTE, | |
1088 | "XFS: write access unavailable, cannot proceed."); | |
1089 | return EROFS; | |
1090 | } | |
1091 | return 0; | |
1092 | } |