]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/xfs/linux-2.6/xfs_lrw.c
xfs: convert to new aops
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / linux-2.6 / xfs_lrw.c
1 /*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_bmap.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
44 #include "xfs_rw.h"
45 #include "xfs_acl.h"
46 #include "xfs_attr.h"
47 #include "xfs_inode_item.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_utils.h"
50 #include "xfs_iomap.h"
51
52 #include <linux/capability.h>
53 #include <linux/writeback.h>
54
55
56 #if defined(XFS_RW_TRACE)
57 void
58 xfs_rw_enter_trace(
59 int tag,
60 xfs_iocore_t *io,
61 void *data,
62 size_t segs,
63 loff_t offset,
64 int ioflags)
65 {
66 xfs_inode_t *ip = XFS_IO_INODE(io);
67
68 if (ip->i_rwtrace == NULL)
69 return;
70 ktrace_enter(ip->i_rwtrace,
71 (void *)(unsigned long)tag,
72 (void *)ip,
73 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
74 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
75 (void *)data,
76 (void *)((unsigned long)segs),
77 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
78 (void *)((unsigned long)(offset & 0xffffffff)),
79 (void *)((unsigned long)ioflags),
80 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
81 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
82 (void *)((unsigned long)current_pid()),
83 (void *)NULL,
84 (void *)NULL,
85 (void *)NULL,
86 (void *)NULL);
87 }
88
89 void
90 xfs_inval_cached_trace(
91 xfs_iocore_t *io,
92 xfs_off_t offset,
93 xfs_off_t len,
94 xfs_off_t first,
95 xfs_off_t last)
96 {
97 xfs_inode_t *ip = XFS_IO_INODE(io);
98
99 if (ip->i_rwtrace == NULL)
100 return;
101 ktrace_enter(ip->i_rwtrace,
102 (void *)(__psint_t)XFS_INVAL_CACHED,
103 (void *)ip,
104 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
105 (void *)((unsigned long)(offset & 0xffffffff)),
106 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
107 (void *)((unsigned long)(len & 0xffffffff)),
108 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
109 (void *)((unsigned long)(first & 0xffffffff)),
110 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
111 (void *)((unsigned long)(last & 0xffffffff)),
112 (void *)((unsigned long)current_pid()),
113 (void *)NULL,
114 (void *)NULL,
115 (void *)NULL,
116 (void *)NULL,
117 (void *)NULL);
118 }
119 #endif
120
121 /*
122 * xfs_iozero
123 *
124 * xfs_iozero clears the specified range of buffer supplied,
125 * and marks all the affected blocks as valid and modified. If
126 * an affected block is not allocated, it will be allocated. If
127 * an affected block is not completely overwritten, and is not
128 * valid before the operation, it will be read from disk before
129 * being partially zeroed.
130 */
131 STATIC int
132 xfs_iozero(
133 struct inode *ip, /* inode */
134 loff_t pos, /* offset in file */
135 size_t count) /* size of data to zero */
136 {
137 struct page *page;
138 struct address_space *mapping;
139 int status;
140
141 mapping = ip->i_mapping;
142 do {
143 unsigned offset, bytes;
144 void *fsdata;
145
146 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
147 bytes = PAGE_CACHE_SIZE - offset;
148 if (bytes > count)
149 bytes = count;
150
151 status = pagecache_write_begin(NULL, mapping, pos, bytes,
152 AOP_FLAG_UNINTERRUPTIBLE,
153 &page, &fsdata);
154 if (status)
155 break;
156
157 zero_user_page(page, offset, bytes, KM_USER0);
158
159 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
160 page, fsdata);
161 WARN_ON(status <= 0); /* can't return less than zero! */
162 pos += bytes;
163 count -= bytes;
164 status = 0;
165 } while (count);
166
167 return (-status);
168 }
169
170 ssize_t /* bytes read, or (-) error */
171 xfs_read(
172 bhv_desc_t *bdp,
173 struct kiocb *iocb,
174 const struct iovec *iovp,
175 unsigned int segs,
176 loff_t *offset,
177 int ioflags,
178 cred_t *credp)
179 {
180 struct file *file = iocb->ki_filp;
181 struct inode *inode = file->f_mapping->host;
182 size_t size = 0;
183 ssize_t ret = 0;
184 xfs_fsize_t n;
185 xfs_inode_t *ip;
186 xfs_mount_t *mp;
187 bhv_vnode_t *vp;
188 unsigned long seg;
189
190 ip = XFS_BHVTOI(bdp);
191 vp = BHV_TO_VNODE(bdp);
192 mp = ip->i_mount;
193
194 XFS_STATS_INC(xs_read_calls);
195
196 /* START copy & waste from filemap.c */
197 for (seg = 0; seg < segs; seg++) {
198 const struct iovec *iv = &iovp[seg];
199
200 /*
201 * If any segment has a negative length, or the cumulative
202 * length ever wraps negative then return -EINVAL.
203 */
204 size += iv->iov_len;
205 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
206 return XFS_ERROR(-EINVAL);
207 }
208 /* END copy & waste from filemap.c */
209
210 if (unlikely(ioflags & IO_ISDIRECT)) {
211 xfs_buftarg_t *target =
212 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
213 mp->m_rtdev_targp : mp->m_ddev_targp;
214 if ((*offset & target->bt_smask) ||
215 (size & target->bt_smask)) {
216 if (*offset == ip->i_size) {
217 return (0);
218 }
219 return -XFS_ERROR(EINVAL);
220 }
221 }
222
223 n = XFS_MAXIOFFSET(mp) - *offset;
224 if ((n <= 0) || (size == 0))
225 return 0;
226
227 if (n < size)
228 size = n;
229
230 if (XFS_FORCED_SHUTDOWN(mp))
231 return -EIO;
232
233 if (unlikely(ioflags & IO_ISDIRECT))
234 mutex_lock(&inode->i_mutex);
235 xfs_ilock(ip, XFS_IOLOCK_SHARED);
236
237 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
238 !(ioflags & IO_INVIS)) {
239 bhv_vrwlock_t locktype = VRWLOCK_READ;
240 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
241
242 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
243 BHV_TO_VNODE(bdp), *offset, size,
244 dmflags, &locktype);
245 if (ret) {
246 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
247 if (unlikely(ioflags & IO_ISDIRECT))
248 mutex_unlock(&inode->i_mutex);
249 return ret;
250 }
251 }
252
253 if (unlikely(ioflags & IO_ISDIRECT)) {
254 if (VN_CACHED(vp))
255 ret = bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)),
256 -1, FI_REMAPF_LOCKED);
257 mutex_unlock(&inode->i_mutex);
258 if (ret) {
259 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
260 return ret;
261 }
262 }
263
264 xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
265 (void *)iovp, segs, *offset, ioflags);
266
267 iocb->ki_pos = *offset;
268 ret = generic_file_aio_read(iocb, iovp, segs, *offset);
269 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
270 ret = wait_on_sync_kiocb(iocb);
271 if (ret > 0)
272 XFS_STATS_ADD(xs_read_bytes, ret);
273
274 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
275 return ret;
276 }
277
278 ssize_t
279 xfs_splice_read(
280 bhv_desc_t *bdp,
281 struct file *infilp,
282 loff_t *ppos,
283 struct pipe_inode_info *pipe,
284 size_t count,
285 int flags,
286 int ioflags,
287 cred_t *credp)
288 {
289 xfs_inode_t *ip = XFS_BHVTOI(bdp);
290 xfs_mount_t *mp = ip->i_mount;
291 ssize_t ret;
292
293 XFS_STATS_INC(xs_read_calls);
294 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
295 return -EIO;
296
297 xfs_ilock(ip, XFS_IOLOCK_SHARED);
298
299 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
300 (!(ioflags & IO_INVIS))) {
301 bhv_vrwlock_t locktype = VRWLOCK_READ;
302 int error;
303
304 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
305 *ppos, count,
306 FILP_DELAY_FLAG(infilp), &locktype);
307 if (error) {
308 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
309 return -error;
310 }
311 }
312 xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore,
313 pipe, count, *ppos, ioflags);
314 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
315 if (ret > 0)
316 XFS_STATS_ADD(xs_read_bytes, ret);
317
318 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
319 return ret;
320 }
321
322 ssize_t
323 xfs_splice_write(
324 bhv_desc_t *bdp,
325 struct pipe_inode_info *pipe,
326 struct file *outfilp,
327 loff_t *ppos,
328 size_t count,
329 int flags,
330 int ioflags,
331 cred_t *credp)
332 {
333 xfs_inode_t *ip = XFS_BHVTOI(bdp);
334 xfs_mount_t *mp = ip->i_mount;
335 xfs_iocore_t *io = &ip->i_iocore;
336 ssize_t ret;
337 struct inode *inode = outfilp->f_mapping->host;
338 xfs_fsize_t isize, new_size;
339
340 XFS_STATS_INC(xs_write_calls);
341 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
342 return -EIO;
343
344 xfs_ilock(ip, XFS_IOLOCK_EXCL);
345
346 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_WRITE) &&
347 (!(ioflags & IO_INVIS))) {
348 bhv_vrwlock_t locktype = VRWLOCK_WRITE;
349 int error;
350
351 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, BHV_TO_VNODE(bdp),
352 *ppos, count,
353 FILP_DELAY_FLAG(outfilp), &locktype);
354 if (error) {
355 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
356 return -error;
357 }
358 }
359
360 new_size = *ppos + count;
361
362 xfs_ilock(ip, XFS_ILOCK_EXCL);
363 if (new_size > ip->i_size)
364 io->io_new_size = new_size;
365 xfs_iunlock(ip, XFS_ILOCK_EXCL);
366
367 xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore,
368 pipe, count, *ppos, ioflags);
369 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
370 if (ret > 0)
371 XFS_STATS_ADD(xs_write_bytes, ret);
372
373 isize = i_size_read(inode);
374 if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
375 *ppos = isize;
376
377 if (*ppos > ip->i_size) {
378 xfs_ilock(ip, XFS_ILOCK_EXCL);
379 if (*ppos > ip->i_size)
380 ip->i_size = *ppos;
381 xfs_iunlock(ip, XFS_ILOCK_EXCL);
382 }
383
384 if (io->io_new_size) {
385 xfs_ilock(ip, XFS_ILOCK_EXCL);
386 io->io_new_size = 0;
387 if (ip->i_d.di_size > ip->i_size)
388 ip->i_d.di_size = ip->i_size;
389 xfs_iunlock(ip, XFS_ILOCK_EXCL);
390 }
391 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
392 return ret;
393 }
394
395 /*
396 * This routine is called to handle zeroing any space in the last
397 * block of the file that is beyond the EOF. We do this since the
398 * size is being increased without writing anything to that block
399 * and we don't want anyone to read the garbage on the disk.
400 */
401 STATIC int /* error (positive) */
402 xfs_zero_last_block(
403 struct inode *ip,
404 xfs_iocore_t *io,
405 xfs_fsize_t offset,
406 xfs_fsize_t isize)
407 {
408 xfs_fileoff_t last_fsb;
409 xfs_mount_t *mp = io->io_mount;
410 int nimaps;
411 int zero_offset;
412 int zero_len;
413 int error = 0;
414 xfs_bmbt_irec_t imap;
415
416 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
417
418 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
419 if (zero_offset == 0) {
420 /*
421 * There are no extra bytes in the last block on disk to
422 * zero, so return.
423 */
424 return 0;
425 }
426
427 last_fsb = XFS_B_TO_FSBT(mp, isize);
428 nimaps = 1;
429 error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
430 &nimaps, NULL, NULL);
431 if (error) {
432 return error;
433 }
434 ASSERT(nimaps > 0);
435 /*
436 * If the block underlying isize is just a hole, then there
437 * is nothing to zero.
438 */
439 if (imap.br_startblock == HOLESTARTBLOCK) {
440 return 0;
441 }
442 /*
443 * Zero the part of the last block beyond the EOF, and write it
444 * out sync. We need to drop the ilock while we do this so we
445 * don't deadlock when the buffer cache calls back to us.
446 */
447 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
448
449 zero_len = mp->m_sb.sb_blocksize - zero_offset;
450 if (isize + zero_len > offset)
451 zero_len = offset - isize;
452 error = xfs_iozero(ip, isize, zero_len);
453
454 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
455 ASSERT(error >= 0);
456 return error;
457 }
458
459 /*
460 * Zero any on disk space between the current EOF and the new,
461 * larger EOF. This handles the normal case of zeroing the remainder
462 * of the last block in the file and the unusual case of zeroing blocks
463 * out beyond the size of the file. This second case only happens
464 * with fixed size extents and when the system crashes before the inode
465 * size was updated but after blocks were allocated. If fill is set,
466 * then any holes in the range are filled and zeroed. If not, the holes
467 * are left alone as holes.
468 */
469
470 int /* error (positive) */
471 xfs_zero_eof(
472 bhv_vnode_t *vp,
473 xfs_iocore_t *io,
474 xfs_off_t offset, /* starting I/O offset */
475 xfs_fsize_t isize) /* current inode size */
476 {
477 struct inode *ip = vn_to_inode(vp);
478 xfs_fileoff_t start_zero_fsb;
479 xfs_fileoff_t end_zero_fsb;
480 xfs_fileoff_t zero_count_fsb;
481 xfs_fileoff_t last_fsb;
482 xfs_fileoff_t zero_off;
483 xfs_fsize_t zero_len;
484 xfs_mount_t *mp = io->io_mount;
485 int nimaps;
486 int error = 0;
487 xfs_bmbt_irec_t imap;
488
489 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
490 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
491 ASSERT(offset > isize);
492
493 /*
494 * First handle zeroing the block on which isize resides.
495 * We only zero a part of that block so it is handled specially.
496 */
497 error = xfs_zero_last_block(ip, io, offset, isize);
498 if (error) {
499 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
500 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
501 return error;
502 }
503
504 /*
505 * Calculate the range between the new size and the old
506 * where blocks needing to be zeroed may exist. To get the
507 * block where the last byte in the file currently resides,
508 * we need to subtract one from the size and truncate back
509 * to a block boundary. We subtract 1 in case the size is
510 * exactly on a block boundary.
511 */
512 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
513 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
514 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
515 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
516 if (last_fsb == end_zero_fsb) {
517 /*
518 * The size was only incremented on its last block.
519 * We took care of that above, so just return.
520 */
521 return 0;
522 }
523
524 ASSERT(start_zero_fsb <= end_zero_fsb);
525 while (start_zero_fsb <= end_zero_fsb) {
526 nimaps = 1;
527 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
528 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
529 0, NULL, 0, &imap, &nimaps, NULL, NULL);
530 if (error) {
531 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
532 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
533 return error;
534 }
535 ASSERT(nimaps > 0);
536
537 if (imap.br_state == XFS_EXT_UNWRITTEN ||
538 imap.br_startblock == HOLESTARTBLOCK) {
539 /*
540 * This loop handles initializing pages that were
541 * partially initialized by the code below this
542 * loop. It basically zeroes the part of the page
543 * that sits on a hole and sets the page as P_HOLE
544 * and calls remapf if it is a mapped file.
545 */
546 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
547 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
548 continue;
549 }
550
551 /*
552 * There are blocks we need to zero.
553 * Drop the inode lock while we're doing the I/O.
554 * We'll still have the iolock to protect us.
555 */
556 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
557
558 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
559 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
560
561 if ((zero_off + zero_len) > offset)
562 zero_len = offset - zero_off;
563
564 error = xfs_iozero(ip, zero_off, zero_len);
565 if (error) {
566 goto out_lock;
567 }
568
569 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
570 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
571
572 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
573 }
574
575 return 0;
576
577 out_lock:
578
579 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
580 ASSERT(error >= 0);
581 return error;
582 }
583
584 ssize_t /* bytes written, or (-) error */
585 xfs_write(
586 bhv_desc_t *bdp,
587 struct kiocb *iocb,
588 const struct iovec *iovp,
589 unsigned int nsegs,
590 loff_t *offset,
591 int ioflags,
592 cred_t *credp)
593 {
594 struct file *file = iocb->ki_filp;
595 struct address_space *mapping = file->f_mapping;
596 struct inode *inode = mapping->host;
597 unsigned long segs = nsegs;
598 xfs_inode_t *xip;
599 xfs_mount_t *mp;
600 ssize_t ret = 0, error = 0;
601 xfs_fsize_t isize, new_size;
602 xfs_iocore_t *io;
603 bhv_vnode_t *vp;
604 int iolock;
605 int eventsent = 0;
606 bhv_vrwlock_t locktype;
607 size_t ocount = 0, count;
608 loff_t pos;
609 int need_i_mutex;
610
611 XFS_STATS_INC(xs_write_calls);
612
613 vp = BHV_TO_VNODE(bdp);
614 xip = XFS_BHVTOI(bdp);
615
616 error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
617 if (error)
618 return error;
619
620 count = ocount;
621 pos = *offset;
622
623 if (count == 0)
624 return 0;
625
626 io = &xip->i_iocore;
627 mp = io->io_mount;
628
629 vfs_wait_for_freeze(vp->v_vfsp, SB_FREEZE_WRITE);
630
631 if (XFS_FORCED_SHUTDOWN(mp))
632 return -EIO;
633
634 relock:
635 if (ioflags & IO_ISDIRECT) {
636 iolock = XFS_IOLOCK_SHARED;
637 locktype = VRWLOCK_WRITE_DIRECT;
638 need_i_mutex = 0;
639 } else {
640 iolock = XFS_IOLOCK_EXCL;
641 locktype = VRWLOCK_WRITE;
642 need_i_mutex = 1;
643 mutex_lock(&inode->i_mutex);
644 }
645
646 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
647
648 start:
649 error = -generic_write_checks(file, &pos, &count,
650 S_ISBLK(inode->i_mode));
651 if (error) {
652 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
653 goto out_unlock_mutex;
654 }
655
656 if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
657 !(ioflags & IO_INVIS) && !eventsent)) {
658 int dmflags = FILP_DELAY_FLAG(file);
659
660 if (need_i_mutex)
661 dmflags |= DM_FLAGS_IMUX;
662
663 xfs_iunlock(xip, XFS_ILOCK_EXCL);
664 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
665 pos, count,
666 dmflags, &locktype);
667 if (error) {
668 goto out_unlock_internal;
669 }
670 xfs_ilock(xip, XFS_ILOCK_EXCL);
671 eventsent = 1;
672
673 /*
674 * The iolock was dropped and reacquired in XFS_SEND_DATA
675 * so we have to recheck the size when appending.
676 * We will only "goto start;" once, since having sent the
677 * event prevents another call to XFS_SEND_DATA, which is
678 * what allows the size to change in the first place.
679 */
680 if ((file->f_flags & O_APPEND) && pos != xip->i_size)
681 goto start;
682 }
683
684 if (ioflags & IO_ISDIRECT) {
685 xfs_buftarg_t *target =
686 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
687 mp->m_rtdev_targp : mp->m_ddev_targp;
688
689 if ((pos & target->bt_smask) || (count & target->bt_smask)) {
690 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
691 return XFS_ERROR(-EINVAL);
692 }
693
694 if (!need_i_mutex && (VN_CACHED(vp) || pos > xip->i_size)) {
695 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
696 iolock = XFS_IOLOCK_EXCL;
697 locktype = VRWLOCK_WRITE;
698 need_i_mutex = 1;
699 mutex_lock(&inode->i_mutex);
700 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
701 goto start;
702 }
703 }
704
705 new_size = pos + count;
706 if (new_size > xip->i_size)
707 io->io_new_size = new_size;
708
709 if (likely(!(ioflags & IO_INVIS))) {
710 file_update_time(file);
711 xfs_ichgtime_fast(xip, inode,
712 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
713 }
714
715 /*
716 * If the offset is beyond the size of the file, we have a couple
717 * of things to do. First, if there is already space allocated
718 * we need to either create holes or zero the disk or ...
719 *
720 * If there is a page where the previous size lands, we need
721 * to zero it out up to the new size.
722 */
723
724 if (pos > xip->i_size) {
725 error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos, xip->i_size);
726 if (error) {
727 xfs_iunlock(xip, XFS_ILOCK_EXCL);
728 goto out_unlock_internal;
729 }
730 }
731 xfs_iunlock(xip, XFS_ILOCK_EXCL);
732
733 /*
734 * If we're writing the file then make sure to clear the
735 * setuid and setgid bits if the process is not being run
736 * by root. This keeps people from modifying setuid and
737 * setgid binaries.
738 */
739
740 if (((xip->i_d.di_mode & S_ISUID) ||
741 ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
742 (S_ISGID | S_IXGRP))) &&
743 !capable(CAP_FSETID)) {
744 error = xfs_write_clear_setuid(xip);
745 if (likely(!error))
746 error = -remove_suid(file->f_path.dentry);
747 if (unlikely(error)) {
748 goto out_unlock_internal;
749 }
750 }
751
752 retry:
753 /* We can write back this queue in page reclaim */
754 current->backing_dev_info = mapping->backing_dev_info;
755
756 if ((ioflags & IO_ISDIRECT)) {
757 if (VN_CACHED(vp)) {
758 WARN_ON(need_i_mutex == 0);
759 xfs_inval_cached_trace(io, pos, -1,
760 ctooff(offtoct(pos)), -1);
761 error = bhv_vop_flushinval_pages(vp, ctooff(offtoct(pos)),
762 -1, FI_REMAPF_LOCKED);
763 if (error)
764 goto out_unlock_internal;
765 }
766
767 if (need_i_mutex) {
768 /* demote the lock now the cached pages are gone */
769 XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
770 mutex_unlock(&inode->i_mutex);
771
772 iolock = XFS_IOLOCK_SHARED;
773 locktype = VRWLOCK_WRITE_DIRECT;
774 need_i_mutex = 0;
775 }
776
777 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
778 *offset, ioflags);
779 ret = generic_file_direct_write(iocb, iovp,
780 &segs, pos, offset, count, ocount);
781
782 /*
783 * direct-io write to a hole: fall through to buffered I/O
784 * for completing the rest of the request.
785 */
786 if (ret >= 0 && ret != count) {
787 XFS_STATS_ADD(xs_write_bytes, ret);
788
789 pos += ret;
790 count -= ret;
791
792 ioflags &= ~IO_ISDIRECT;
793 xfs_iunlock(xip, iolock);
794 goto relock;
795 }
796 } else {
797 xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
798 *offset, ioflags);
799 ret = generic_file_buffered_write(iocb, iovp, segs,
800 pos, offset, count, ret);
801 }
802
803 current->backing_dev_info = NULL;
804
805 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
806 ret = wait_on_sync_kiocb(iocb);
807
808 if ((ret == -ENOSPC) &&
809 DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
810 !(ioflags & IO_INVIS)) {
811
812 xfs_rwunlock(bdp, locktype);
813 if (need_i_mutex)
814 mutex_unlock(&inode->i_mutex);
815 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
816 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
817 0, 0, 0); /* Delay flag intentionally unused */
818 if (need_i_mutex)
819 mutex_lock(&inode->i_mutex);
820 xfs_rwlock(bdp, locktype);
821 if (error)
822 goto out_unlock_internal;
823 pos = xip->i_size;
824 ret = 0;
825 goto retry;
826 }
827
828 isize = i_size_read(inode);
829 if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
830 *offset = isize;
831
832 if (*offset > xip->i_size) {
833 xfs_ilock(xip, XFS_ILOCK_EXCL);
834 if (*offset > xip->i_size)
835 xip->i_size = *offset;
836 xfs_iunlock(xip, XFS_ILOCK_EXCL);
837 }
838
839 error = -ret;
840 if (ret <= 0)
841 goto out_unlock_internal;
842
843 XFS_STATS_ADD(xs_write_bytes, ret);
844
845 /* Handle various SYNC-type writes */
846 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
847 error = xfs_write_sync_logforce(mp, xip);
848 if (error)
849 goto out_unlock_internal;
850
851 xfs_rwunlock(bdp, locktype);
852 if (need_i_mutex)
853 mutex_unlock(&inode->i_mutex);
854
855 error = sync_page_range(inode, mapping, pos, ret);
856 if (!error)
857 error = -ret;
858 if (need_i_mutex)
859 mutex_lock(&inode->i_mutex);
860 xfs_rwlock(bdp, locktype);
861 }
862
863 out_unlock_internal:
864 if (io->io_new_size) {
865 xfs_ilock(xip, XFS_ILOCK_EXCL);
866 io->io_new_size = 0;
867 /*
868 * If this was a direct or synchronous I/O that failed (such
869 * as ENOSPC) then part of the I/O may have been written to
870 * disk before the error occured. In this case the on-disk
871 * file size may have been adjusted beyond the in-memory file
872 * size and now needs to be truncated back.
873 */
874 if (xip->i_d.di_size > xip->i_size)
875 xip->i_d.di_size = xip->i_size;
876 xfs_iunlock(xip, XFS_ILOCK_EXCL);
877 }
878 xfs_rwunlock(bdp, locktype);
879 out_unlock_mutex:
880 if (need_i_mutex)
881 mutex_unlock(&inode->i_mutex);
882 return -error;
883 }
884
885 /*
886 * All xfs metadata buffers except log state machine buffers
887 * get this attached as their b_bdstrat callback function.
888 * This is so that we can catch a buffer
889 * after prematurely unpinning it to forcibly shutdown the filesystem.
890 */
891 int
892 xfs_bdstrat_cb(struct xfs_buf *bp)
893 {
894 xfs_mount_t *mp;
895
896 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
897 if (!XFS_FORCED_SHUTDOWN(mp)) {
898 xfs_buf_iorequest(bp);
899 return 0;
900 } else {
901 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
902 /*
903 * Metadata write that didn't get logged but
904 * written delayed anyway. These aren't associated
905 * with a transaction, and can be ignored.
906 */
907 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
908 (XFS_BUF_ISREAD(bp)) == 0)
909 return (xfs_bioerror_relse(bp));
910 else
911 return (xfs_bioerror(bp));
912 }
913 }
914
915
916 int
917 xfs_bmap(bhv_desc_t *bdp,
918 xfs_off_t offset,
919 ssize_t count,
920 int flags,
921 xfs_iomap_t *iomapp,
922 int *niomaps)
923 {
924 xfs_inode_t *ip = XFS_BHVTOI(bdp);
925 xfs_iocore_t *io = &ip->i_iocore;
926
927 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
928 ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
929 ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
930
931 return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
932 }
933
934 /*
935 * Wrapper around bdstrat so that we can stop data
936 * from going to disk in case we are shutting down the filesystem.
937 * Typically user data goes thru this path; one of the exceptions
938 * is the superblock.
939 */
940 int
941 xfsbdstrat(
942 struct xfs_mount *mp,
943 struct xfs_buf *bp)
944 {
945 ASSERT(mp);
946 if (!XFS_FORCED_SHUTDOWN(mp)) {
947 /* Grio redirection would go here
948 * if (XFS_BUF_IS_GRIO(bp)) {
949 */
950
951 xfs_buf_iorequest(bp);
952 return 0;
953 }
954
955 xfs_buftrace("XFSBDSTRAT IOERROR", bp);
956 return (xfs_bioerror_relse(bp));
957 }
958
959 /*
960 * If the underlying (data/log/rt) device is readonly, there are some
961 * operations that cannot proceed.
962 */
963 int
964 xfs_dev_is_read_only(
965 xfs_mount_t *mp,
966 char *message)
967 {
968 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
969 xfs_readonly_buftarg(mp->m_logdev_targp) ||
970 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
971 cmn_err(CE_NOTE,
972 "XFS: %s required on read-only device.", message);
973 cmn_err(CE_NOTE,
974 "XFS: write access unavailable, cannot proceed.");
975 return EROFS;
976 }
977 return 0;
978 }