]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/sync.c
get rid of restarts in sync_filesystems()
[mirror_ubuntu-bionic-kernel.git] / fs / sync.c
1 /*
2 * High-level sync()-related operations
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/file.h>
7 #include <linux/fs.h>
8 #include <linux/slab.h>
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/writeback.h>
12 #include <linux/syscalls.h>
13 #include <linux/linkage.h>
14 #include <linux/pagemap.h>
15 #include <linux/quotaops.h>
16 #include <linux/buffer_head.h>
17 #include <linux/backing-dev.h>
18 #include "internal.h"
19
20 #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
21 SYNC_FILE_RANGE_WAIT_AFTER)
22
23 /*
24 * Do the filesystem syncing work. For simple filesystems
25 * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
26 * submit IO for these buffers via __sync_blockdev(). This also speeds up the
27 * wait == 1 case since in that case write_inode() functions do
28 * sync_dirty_buffer() and thus effectively write one block at a time.
29 */
30 static int __sync_filesystem(struct super_block *sb, int wait)
31 {
32 /*
33 * This should be safe, as we require bdi backing to actually
34 * write out data in the first place
35 */
36 if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info)
37 return 0;
38
39 if (sb->s_qcop && sb->s_qcop->quota_sync)
40 sb->s_qcop->quota_sync(sb, -1, wait);
41
42 if (wait)
43 sync_inodes_sb(sb);
44 else
45 writeback_inodes_sb(sb);
46
47 if (sb->s_op->sync_fs)
48 sb->s_op->sync_fs(sb, wait);
49 return __sync_blockdev(sb->s_bdev, wait);
50 }
51
52 /*
53 * Write out and wait upon all dirty data associated with this
54 * superblock. Filesystem data as well as the underlying block
55 * device. Takes the superblock lock.
56 */
57 int sync_filesystem(struct super_block *sb)
58 {
59 int ret;
60
61 /*
62 * We need to be protected against the filesystem going from
63 * r/o to r/w or vice versa.
64 */
65 WARN_ON(!rwsem_is_locked(&sb->s_umount));
66
67 /*
68 * No point in syncing out anything if the filesystem is read-only.
69 */
70 if (sb->s_flags & MS_RDONLY)
71 return 0;
72
73 ret = __sync_filesystem(sb, 0);
74 if (ret < 0)
75 return ret;
76 return __sync_filesystem(sb, 1);
77 }
78 EXPORT_SYMBOL_GPL(sync_filesystem);
79
80 /*
81 * Sync all the data for all the filesystems (called by sys_sync() and
82 * emergency sync)
83 */
84 static void sync_filesystems(int wait)
85 {
86 struct super_block *sb, *n;
87
88 spin_lock(&sb_lock);
89 list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
90 if (list_empty(&sb->s_instances))
91 continue;
92 sb->s_count++;
93 spin_unlock(&sb_lock);
94
95 down_read(&sb->s_umount);
96 if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi)
97 __sync_filesystem(sb, wait);
98 up_read(&sb->s_umount);
99
100 /* restart only when sb is no longer on the list */
101 spin_lock(&sb_lock);
102 __put_super(sb);
103 }
104 spin_unlock(&sb_lock);
105 }
106
107 /*
108 * sync everything. Start out by waking pdflush, because that writes back
109 * all queues in parallel.
110 */
111 SYSCALL_DEFINE0(sync)
112 {
113 wakeup_flusher_threads(0);
114 sync_filesystems(0);
115 sync_filesystems(1);
116 if (unlikely(laptop_mode))
117 laptop_sync_completion();
118 return 0;
119 }
120
121 static void do_sync_work(struct work_struct *work)
122 {
123 /*
124 * Sync twice to reduce the possibility we skipped some inodes / pages
125 * because they were temporarily locked
126 */
127 sync_filesystems(0);
128 sync_filesystems(0);
129 printk("Emergency Sync complete\n");
130 kfree(work);
131 }
132
133 void emergency_sync(void)
134 {
135 struct work_struct *work;
136
137 work = kmalloc(sizeof(*work), GFP_ATOMIC);
138 if (work) {
139 INIT_WORK(work, do_sync_work);
140 schedule_work(work);
141 }
142 }
143
144 /*
145 * Generic function to fsync a file.
146 *
147 * filp may be NULL if called via the msync of a vma.
148 */
149 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
150 {
151 struct inode * inode = dentry->d_inode;
152 struct super_block * sb;
153 int ret, err;
154
155 /* sync the inode to buffers */
156 ret = write_inode_now(inode, 0);
157
158 /* sync the superblock to buffers */
159 sb = inode->i_sb;
160 if (sb->s_dirt && sb->s_op->write_super)
161 sb->s_op->write_super(sb);
162
163 /* .. finally sync the buffers to disk */
164 err = sync_blockdev(sb->s_bdev);
165 if (!ret)
166 ret = err;
167 return ret;
168 }
169 EXPORT_SYMBOL(file_fsync);
170
171 /**
172 * vfs_fsync_range - helper to sync a range of data & metadata to disk
173 * @file: file to sync
174 * @dentry: dentry of @file
175 * @start: offset in bytes of the beginning of data range to sync
176 * @end: offset in bytes of the end of data range (inclusive)
177 * @datasync: perform only datasync
178 *
179 * Write back data in range @start..@end and metadata for @file to disk. If
180 * @datasync is set only metadata needed to access modified file data is
181 * written.
182 *
183 * In case this function is called from nfsd @file may be %NULL and
184 * only @dentry is set. This can only happen when the filesystem
185 * implements the export_operations API.
186 */
187 int vfs_fsync_range(struct file *file, struct dentry *dentry, loff_t start,
188 loff_t end, int datasync)
189 {
190 const struct file_operations *fop;
191 struct address_space *mapping;
192 int err, ret;
193
194 /*
195 * Get mapping and operations from the file in case we have
196 * as file, or get the default values for them in case we
197 * don't have a struct file available. Damn nfsd..
198 */
199 if (file) {
200 mapping = file->f_mapping;
201 fop = file->f_op;
202 } else {
203 mapping = dentry->d_inode->i_mapping;
204 fop = dentry->d_inode->i_fop;
205 }
206
207 if (!fop || !fop->fsync) {
208 ret = -EINVAL;
209 goto out;
210 }
211
212 ret = filemap_write_and_wait_range(mapping, start, end);
213
214 /*
215 * We need to protect against concurrent writers, which could cause
216 * livelocks in fsync_buffers_list().
217 */
218 mutex_lock(&mapping->host->i_mutex);
219 err = fop->fsync(file, dentry, datasync);
220 if (!ret)
221 ret = err;
222 mutex_unlock(&mapping->host->i_mutex);
223
224 out:
225 return ret;
226 }
227 EXPORT_SYMBOL(vfs_fsync_range);
228
229 /**
230 * vfs_fsync - perform a fsync or fdatasync on a file
231 * @file: file to sync
232 * @dentry: dentry of @file
233 * @datasync: only perform a fdatasync operation
234 *
235 * Write back data and metadata for @file to disk. If @datasync is
236 * set only metadata needed to access modified file data is written.
237 *
238 * In case this function is called from nfsd @file may be %NULL and
239 * only @dentry is set. This can only happen when the filesystem
240 * implements the export_operations API.
241 */
242 int vfs_fsync(struct file *file, struct dentry *dentry, int datasync)
243 {
244 return vfs_fsync_range(file, dentry, 0, LLONG_MAX, datasync);
245 }
246 EXPORT_SYMBOL(vfs_fsync);
247
248 static int do_fsync(unsigned int fd, int datasync)
249 {
250 struct file *file;
251 int ret = -EBADF;
252
253 file = fget(fd);
254 if (file) {
255 ret = vfs_fsync(file, file->f_path.dentry, datasync);
256 fput(file);
257 }
258 return ret;
259 }
260
261 SYSCALL_DEFINE1(fsync, unsigned int, fd)
262 {
263 return do_fsync(fd, 0);
264 }
265
266 SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
267 {
268 return do_fsync(fd, 1);
269 }
270
271 /**
272 * generic_write_sync - perform syncing after a write if file / inode is sync
273 * @file: file to which the write happened
274 * @pos: offset where the write started
275 * @count: length of the write
276 *
277 * This is just a simple wrapper about our general syncing function.
278 */
279 int generic_write_sync(struct file *file, loff_t pos, loff_t count)
280 {
281 if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
282 return 0;
283 return vfs_fsync_range(file, file->f_path.dentry, pos,
284 pos + count - 1,
285 (file->f_flags & __O_SYNC) ? 0 : 1);
286 }
287 EXPORT_SYMBOL(generic_write_sync);
288
289 /*
290 * sys_sync_file_range() permits finely controlled syncing over a segment of
291 * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
292 * zero then sys_sync_file_range() will operate from offset out to EOF.
293 *
294 * The flag bits are:
295 *
296 * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
297 * before performing the write.
298 *
299 * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
300 * range which are not presently under writeback. Note that this may block for
301 * significant periods due to exhaustion of disk request structures.
302 *
303 * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
304 * after performing the write.
305 *
306 * Useful combinations of the flag bits are:
307 *
308 * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
309 * in the range which were dirty on entry to sys_sync_file_range() are placed
310 * under writeout. This is a start-write-for-data-integrity operation.
311 *
312 * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
313 * are not presently under writeout. This is an asynchronous flush-to-disk
314 * operation. Not suitable for data integrity operations.
315 *
316 * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
317 * completion of writeout of all pages in the range. This will be used after an
318 * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
319 * for that operation to complete and to return the result.
320 *
321 * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER:
322 * a traditional sync() operation. This is a write-for-data-integrity operation
323 * which will ensure that all pages in the range which were dirty on entry to
324 * sys_sync_file_range() are committed to disk.
325 *
326 *
327 * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
328 * I/O errors or ENOSPC conditions and will return those to the caller, after
329 * clearing the EIO and ENOSPC flags in the address_space.
330 *
331 * It should be noted that none of these operations write out the file's
332 * metadata. So unless the application is strictly performing overwrites of
333 * already-instantiated disk blocks, there are no guarantees here that the data
334 * will be available after a crash.
335 */
336 SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
337 unsigned int flags)
338 {
339 int ret;
340 struct file *file;
341 struct address_space *mapping;
342 loff_t endbyte; /* inclusive */
343 int fput_needed;
344 umode_t i_mode;
345
346 ret = -EINVAL;
347 if (flags & ~VALID_FLAGS)
348 goto out;
349
350 endbyte = offset + nbytes;
351
352 if ((s64)offset < 0)
353 goto out;
354 if ((s64)endbyte < 0)
355 goto out;
356 if (endbyte < offset)
357 goto out;
358
359 if (sizeof(pgoff_t) == 4) {
360 if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
361 /*
362 * The range starts outside a 32 bit machine's
363 * pagecache addressing capabilities. Let it "succeed"
364 */
365 ret = 0;
366 goto out;
367 }
368 if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
369 /*
370 * Out to EOF
371 */
372 nbytes = 0;
373 }
374 }
375
376 if (nbytes == 0)
377 endbyte = LLONG_MAX;
378 else
379 endbyte--; /* inclusive */
380
381 ret = -EBADF;
382 file = fget_light(fd, &fput_needed);
383 if (!file)
384 goto out;
385
386 i_mode = file->f_path.dentry->d_inode->i_mode;
387 ret = -ESPIPE;
388 if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
389 !S_ISLNK(i_mode))
390 goto out_put;
391
392 mapping = file->f_mapping;
393 if (!mapping) {
394 ret = -EINVAL;
395 goto out_put;
396 }
397
398 ret = 0;
399 if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
400 ret = filemap_fdatawait_range(mapping, offset, endbyte);
401 if (ret < 0)
402 goto out_put;
403 }
404
405 if (flags & SYNC_FILE_RANGE_WRITE) {
406 ret = filemap_fdatawrite_range(mapping, offset, endbyte);
407 if (ret < 0)
408 goto out_put;
409 }
410
411 if (flags & SYNC_FILE_RANGE_WAIT_AFTER)
412 ret = filemap_fdatawait_range(mapping, offset, endbyte);
413
414 out_put:
415 fput_light(file, fput_needed);
416 out:
417 return ret;
418 }
419 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
420 asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes,
421 long flags)
422 {
423 return SYSC_sync_file_range((int) fd, offset, nbytes,
424 (unsigned int) flags);
425 }
426 SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range);
427 #endif
428
429 /* It would be nice if people remember that not all the world's an i386
430 when they introduce new system calls */
431 SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags,
432 loff_t offset, loff_t nbytes)
433 {
434 return sys_sync_file_range(fd, offset, nbytes, flags);
435 }
436 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
437 asmlinkage long SyS_sync_file_range2(long fd, long flags,
438 loff_t offset, loff_t nbytes)
439 {
440 return SYSC_sync_file_range2((int) fd, (unsigned int) flags,
441 offset, nbytes);
442 }
443 SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2);
444 #endif