]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - ubuntu/vbox/vboxsf/regops.c
2a9f5c4a921dc9674499bc23b63c88a0dd34c4bc
[mirror_ubuntu-zesty-kernel.git] / ubuntu / vbox / vboxsf / regops.c
1 /* $Id: regops.c $ */
2 /** @file
3 * vboxsf - VBox Linux Shared Folders, Regular file inode and file operations.
4 */
5
6 /*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18 /*
19 * Limitations: only COW memory mapping is supported
20 */
21
22 #include "vfsmod.h"
23
24 static void *alloc_bounce_buffer(size_t *tmp_sizep, PRTCCPHYS physp, size_t
25 xfer_size, const char *caller)
26 {
27 size_t tmp_size;
28 void *tmp;
29
30 /* try for big first. */
31 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE);
32 if (tmp_size > 16U*_1K)
33 tmp_size = 16U*_1K;
34 tmp = kmalloc(tmp_size, GFP_KERNEL);
35 if (!tmp)
36 {
37 /* fall back on a page sized buffer. */
38 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
39 if (!tmp)
40 {
41 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size));
42 return NULL;
43 }
44 tmp_size = PAGE_SIZE;
45 }
46
47 *tmp_sizep = tmp_size;
48 *physp = virt_to_phys(tmp);
49 return tmp;
50 }
51
52 static void free_bounce_buffer(void *tmp)
53 {
54 kfree (tmp);
55 }
56
57
58 /* fops */
59 static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g,
60 struct sf_reg_info *sf_r, void *buf,
61 uint32_t *nread, uint64_t pos)
62 {
63 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
64 * contiguous in physical memory (kmalloc or single page), we should
65 * use a physical address here to speed things up. */
66 int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r->handle,
67 pos, nread, buf, false /* already locked? */);
68 if (RT_FAILURE(rc))
69 {
70 LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller, rc));
71 return -EPROTO;
72 }
73 return 0;
74 }
75
76 static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,
77 struct sf_reg_info *sf_r, void *buf,
78 uint32_t *nwritten, uint64_t pos)
79 {
80 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
81 * contiguous in physical memory (kmalloc or single page), we should
82 * use a physical address here to speed things up. */
83 int rc = VbglR0SfWrite(&client_handle, &sf_g->map, sf_r->handle,
84 pos, nwritten, buf, false /* already locked? */);
85 if (RT_FAILURE(rc))
86 {
87 LogFunc(("VbglR0SfWrite failed. caller=%s, rc=%Rrc\n",
88 caller, rc));
89 return -EPROTO;
90 }
91 return 0;
92 }
93
94 /**
95 * Read from a regular file.
96 *
97 * @param file the file
98 * @param buf the buffer
99 * @param size length of the buffer
100 * @param off offset within the file
101 * @returns the number of read bytes on success, Linux error code otherwise
102 */
103 static ssize_t sf_reg_read(struct file *file, char *buf, size_t size, loff_t *off)
104 {
105 int err;
106 void *tmp;
107 RTCCPHYS tmp_phys;
108 size_t tmp_size;
109 size_t left = size;
110 ssize_t total_bytes_read = 0;
111 struct inode *inode = GET_F_DENTRY(file)->d_inode;
112 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
113 struct sf_reg_info *sf_r = file->private_data;
114 loff_t pos = *off;
115
116 TRACE();
117 if (!S_ISREG(inode->i_mode))
118 {
119 LogFunc(("read from non regular file %d\n", inode->i_mode));
120 return -EINVAL;
121 }
122
123 /** XXX Check read permission according to inode->i_mode! */
124
125 if (!size)
126 return 0;
127
128 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
129 if (!tmp)
130 return -ENOMEM;
131
132 while (left)
133 {
134 uint32_t to_read, nread;
135
136 to_read = tmp_size;
137 if (to_read > left)
138 to_read = (uint32_t) left;
139
140 nread = to_read;
141
142 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);
143 if (err)
144 goto fail;
145
146 if (copy_to_user(buf, tmp, nread))
147 {
148 err = -EFAULT;
149 goto fail;
150 }
151
152 pos += nread;
153 left -= nread;
154 buf += nread;
155 total_bytes_read += nread;
156 if (nread != to_read)
157 break;
158 }
159
160 *off += total_bytes_read;
161 free_bounce_buffer(tmp);
162 return total_bytes_read;
163
164 fail:
165 free_bounce_buffer(tmp);
166 return err;
167 }
168
169 /**
170 * Write to a regular file.
171 *
172 * @param file the file
173 * @param buf the buffer
174 * @param size length of the buffer
175 * @param off offset within the file
176 * @returns the number of written bytes on success, Linux error code otherwise
177 */
178 static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size, loff_t *off)
179 {
180 int err;
181 void *tmp;
182 RTCCPHYS tmp_phys;
183 size_t tmp_size;
184 size_t left = size;
185 ssize_t total_bytes_written = 0;
186 struct inode *inode = GET_F_DENTRY(file)->d_inode;
187 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
188 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
189 struct sf_reg_info *sf_r = file->private_data;
190 loff_t pos;
191
192 TRACE();
193 BUG_ON(!sf_i);
194 BUG_ON(!sf_g);
195 BUG_ON(!sf_r);
196
197 if (!S_ISREG(inode->i_mode))
198 {
199 LogFunc(("write to non regular file %d\n", inode->i_mode));
200 return -EINVAL;
201 }
202
203 pos = *off;
204 if (file->f_flags & O_APPEND)
205 {
206 pos = inode->i_size;
207 *off = pos;
208 }
209
210 /** XXX Check write permission according to inode->i_mode! */
211
212 if (!size)
213 return 0;
214
215 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
216 if (!tmp)
217 return -ENOMEM;
218
219 while (left)
220 {
221 uint32_t to_write, nwritten;
222
223 to_write = tmp_size;
224 if (to_write > left)
225 to_write = (uint32_t) left;
226
227 nwritten = to_write;
228
229 if (copy_from_user(tmp, buf, to_write))
230 {
231 err = -EFAULT;
232 goto fail;
233 }
234
235 #if 1
236 if (VbglR0CanUsePhysPageList())
237 {
238 err = VbglR0SfWritePhysCont(&client_handle, &sf_g->map, sf_r->handle,
239 pos, &nwritten, tmp_phys);
240 err = RT_FAILURE(err) ? -EPROTO : 0;
241 }
242 else
243 #endif
244 err = sf_reg_write_aux(__func__, sf_g, sf_r, tmp, &nwritten, pos);
245 if (err)
246 goto fail;
247
248 pos += nwritten;
249 left -= nwritten;
250 buf += nwritten;
251 total_bytes_written += nwritten;
252 if (nwritten != to_write)
253 break;
254 }
255
256 *off += total_bytes_written;
257 if (*off > inode->i_size)
258 inode->i_size = *off;
259
260 sf_i->force_restat = 1;
261 free_bounce_buffer(tmp);
262 return total_bytes_written;
263
264 fail:
265 free_bounce_buffer(tmp);
266 return err;
267 }
268
269 /**
270 * Open a regular file.
271 *
272 * @param inode the inode
273 * @param file the file
274 * @returns 0 on success, Linux error code otherwise
275 */
276 static int sf_reg_open(struct inode *inode, struct file *file)
277 {
278 int rc, rc_linux = 0;
279 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
280 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
281 struct sf_reg_info *sf_r;
282 SHFLCREATEPARMS params;
283
284 TRACE();
285 BUG_ON(!sf_g);
286 BUG_ON(!sf_i);
287
288 LogFunc(("open %s\n", sf_i->path->String.utf8));
289
290 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
291 if (!sf_r)
292 {
293 LogRelFunc(("could not allocate reg info\n"));
294 return -ENOMEM;
295 }
296
297 /* Already open? */
298 if (sf_i->handle != SHFL_HANDLE_NIL)
299 {
300 /*
301 * This inode was created with sf_create_aux(). Check the CreateFlags:
302 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
303 * about the access flags (SHFL_CF_ACCESS_*).
304 */
305 sf_i->force_restat = 1;
306 sf_r->handle = sf_i->handle;
307 sf_i->handle = SHFL_HANDLE_NIL;
308 sf_i->file = file;
309 file->private_data = sf_r;
310 return 0;
311 }
312
313 RT_ZERO(params);
314 params.Handle = SHFL_HANDLE_NIL;
315 /* We check the value of params.Handle afterwards to find out if
316 * the call succeeded or failed, as the API does not seem to cleanly
317 * distinguish error and informational messages.
318 *
319 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
320 * make the shared folders host service use our fMode parameter */
321
322 if (file->f_flags & O_CREAT)
323 {
324 LogFunc(("O_CREAT set\n"));
325 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
326 /* We ignore O_EXCL, as the Linux kernel seems to call create
327 beforehand itself, so O_EXCL should always fail. */
328 if (file->f_flags & O_TRUNC)
329 {
330 LogFunc(("O_TRUNC set\n"));
331 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
332 | SHFL_CF_ACCESS_WRITE);
333 }
334 else
335 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
336 }
337 else
338 {
339 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
340 if (file->f_flags & O_TRUNC)
341 {
342 LogFunc(("O_TRUNC set\n"));
343 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
344 | SHFL_CF_ACCESS_WRITE);
345 }
346 }
347
348 if (!(params.CreateFlags & SHFL_CF_ACCESS_READWRITE))
349 {
350 switch (file->f_flags & O_ACCMODE)
351 {
352 case O_RDONLY:
353 params.CreateFlags |= SHFL_CF_ACCESS_READ;
354 break;
355
356 case O_WRONLY:
357 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
358 break;
359
360 case O_RDWR:
361 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
362 break;
363
364 default:
365 BUG ();
366 }
367 }
368
369 if (file->f_flags & O_APPEND)
370 {
371 LogFunc(("O_APPEND set\n"));
372 params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
373 }
374
375 params.Info.Attr.fMode = inode->i_mode;
376 LogFunc(("sf_reg_open: calling VbglR0SfCreate, file %s, flags=%#x, %#x\n",
377 sf_i->path->String.utf8 , file->f_flags, params.CreateFlags));
378 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, &params);
379 if (RT_FAILURE(rc))
380 {
381 LogFunc(("VbglR0SfCreate failed flags=%d,%#x rc=%Rrc\n",
382 file->f_flags, params.CreateFlags, rc));
383 kfree(sf_r);
384 return -RTErrConvertToErrno(rc);
385 }
386
387 if (SHFL_HANDLE_NIL == params.Handle)
388 {
389 switch (params.Result)
390 {
391 case SHFL_PATH_NOT_FOUND:
392 case SHFL_FILE_NOT_FOUND:
393 rc_linux = -ENOENT;
394 break;
395 case SHFL_FILE_EXISTS:
396 rc_linux = -EEXIST;
397 break;
398 default:
399 break;
400 }
401 }
402
403 sf_i->force_restat = 1;
404 sf_r->handle = params.Handle;
405 sf_i->file = file;
406 file->private_data = sf_r;
407 return rc_linux;
408 }
409
410 /**
411 * Close a regular file.
412 *
413 * @param inode the inode
414 * @param file the file
415 * @returns 0 on success, Linux error code otherwise
416 */
417 static int sf_reg_release(struct inode *inode, struct file *file)
418 {
419 int rc;
420 struct sf_reg_info *sf_r;
421 struct sf_glob_info *sf_g;
422 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
423
424 TRACE();
425 sf_g = GET_GLOB_INFO(inode->i_sb);
426 sf_r = file->private_data;
427
428 BUG_ON(!sf_g);
429 BUG_ON(!sf_r);
430
431 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
432 /* See the smbfs source (file.c). mmap in particular can cause data to be
433 * written to the file after it is closed, which we can't cope with. We
434 * copy and paste the body of filemap_write_and_wait() here as it was not
435 * defined before 2.6.6 and not exported until quite a bit later. */
436 /* filemap_write_and_wait(inode->i_mapping); */
437 if ( inode->i_mapping->nrpages
438 && filemap_fdatawrite(inode->i_mapping) != -EIO)
439 filemap_fdatawait(inode->i_mapping);
440 #endif
441 rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle);
442 if (RT_FAILURE(rc))
443 LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc));
444
445 kfree(sf_r);
446 sf_i->file = NULL;
447 sf_i->handle = SHFL_HANDLE_NIL;
448 file->private_data = NULL;
449 return 0;
450 }
451
452 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
453 static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
454 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
455 static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type)
456 # define SET_TYPE(t) *type = (t)
457 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
458 static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused)
459 # define SET_TYPE(t)
460 #endif
461 {
462 struct page *page;
463 char *buf;
464 loff_t off;
465 uint32_t nread = PAGE_SIZE;
466 int err;
467 struct file *file = vma->vm_file;
468 struct inode *inode = GET_F_DENTRY(file)->d_inode;
469 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
470 struct sf_reg_info *sf_r = file->private_data;
471
472 TRACE();
473 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
474 if (vmf->pgoff > vma->vm_end)
475 return VM_FAULT_SIGBUS;
476 #else
477 if (vaddr > vma->vm_end)
478 {
479 SET_TYPE(VM_FAULT_SIGBUS);
480 return NOPAGE_SIGBUS;
481 }
482 #endif
483
484 /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead()
485 * which works on virtual addresses. On Linux cannot reliably determine the
486 * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
487 page = alloc_page(GFP_USER);
488 if (!page) {
489 LogRelFunc(("failed to allocate page\n"));
490 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
491 return VM_FAULT_OOM;
492 #else
493 SET_TYPE(VM_FAULT_OOM);
494 return NOPAGE_OOM;
495 #endif
496 }
497
498 buf = kmap(page);
499 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
500 off = (vmf->pgoff << PAGE_SHIFT);
501 #else
502 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
503 #endif
504 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
505 if (err)
506 {
507 kunmap(page);
508 put_page(page);
509 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
510 return VM_FAULT_SIGBUS;
511 #else
512 SET_TYPE(VM_FAULT_SIGBUS);
513 return NOPAGE_SIGBUS;
514 #endif
515 }
516
517 BUG_ON (nread > PAGE_SIZE);
518 if (!nread)
519 {
520 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
521 clear_user_page(page_address(page), vmf->pgoff, page);
522 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
523 clear_user_page(page_address(page), vaddr, page);
524 #else
525 clear_user_page(page_address(page), vaddr);
526 #endif
527 }
528 else
529 memset(buf + nread, 0, PAGE_SIZE - nread);
530
531 flush_dcache_page(page);
532 kunmap(page);
533 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
534 vmf->page = page;
535 return 0;
536 #else
537 SET_TYPE(VM_FAULT_MAJOR);
538 return page;
539 #endif
540 }
541
542 static struct vm_operations_struct sf_vma_ops =
543 {
544 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
545 .fault = sf_reg_fault
546 #else
547 .nopage = sf_reg_nopage
548 #endif
549 };
550
551 static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
552 {
553 TRACE();
554 if (vma->vm_flags & VM_SHARED)
555 {
556 LogFunc(("shared mmapping not available\n"));
557 return -EINVAL;
558 }
559
560 vma->vm_ops = &sf_vma_ops;
561 return 0;
562 }
563
564 struct file_operations sf_reg_fops =
565 {
566 .read = sf_reg_read,
567 .open = sf_reg_open,
568 .write = sf_reg_write,
569 .release = sf_reg_release,
570 .mmap = sf_reg_mmap,
571 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
572 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
573 .splice_read = generic_file_splice_read,
574 # else
575 .sendfile = generic_file_sendfile,
576 # endif
577 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
578 .read_iter = generic_file_read_iter,
579 .write_iter = generic_file_write_iter,
580 # else
581 .aio_read = generic_file_aio_read,
582 .aio_write = generic_file_aio_write,
583 # endif
584 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
585 .fsync = noop_fsync,
586 # else
587 .fsync = simple_sync_file,
588 # endif
589 .llseek = generic_file_llseek,
590 #endif
591 };
592
593
594 struct inode_operations sf_reg_iops =
595 {
596 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
597 .revalidate = sf_inode_revalidate
598 #else
599 .getattr = sf_getattr,
600 .setattr = sf_setattr
601 #endif
602 };
603
604
605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
606 static int sf_readpage(struct file *file, struct page *page)
607 {
608 struct inode *inode = GET_F_DENTRY(file)->d_inode;
609 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
610 struct sf_reg_info *sf_r = file->private_data;
611 uint32_t nread = PAGE_SIZE;
612 char *buf;
613 loff_t off = ((loff_t)page->index) << PAGE_SHIFT;
614 int ret;
615
616 TRACE();
617
618 buf = kmap(page);
619 ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
620 if (ret)
621 {
622 kunmap(page);
623 if (PageLocked(page))
624 unlock_page(page);
625 return ret;
626 }
627 BUG_ON(nread > PAGE_SIZE);
628 memset(&buf[nread], 0, PAGE_SIZE - nread);
629 flush_dcache_page(page);
630 kunmap(page);
631 SetPageUptodate(page);
632 unlock_page(page);
633 return 0;
634 }
635
636 static int
637 sf_writepage(struct page *page, struct writeback_control *wbc)
638 {
639 struct address_space *mapping = page->mapping;
640 struct inode *inode = mapping->host;
641 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
642 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
643 struct file *file = sf_i->file;
644 struct sf_reg_info *sf_r = file->private_data;
645 char *buf;
646 uint32_t nwritten = PAGE_SIZE;
647 int end_index = inode->i_size >> PAGE_SHIFT;
648 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
649 int err;
650
651 TRACE();
652
653 if (page->index >= end_index)
654 nwritten = inode->i_size & (PAGE_SIZE-1);
655
656 buf = kmap(page);
657
658 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
659 if (err < 0)
660 {
661 ClearPageUptodate(page);
662 goto out;
663 }
664
665 if (off > inode->i_size)
666 inode->i_size = off;
667
668 if (PageError(page))
669 ClearPageError(page);
670 err = 0;
671
672 out:
673 kunmap(page);
674
675 unlock_page(page);
676 return err;
677 }
678
679 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
680 int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
681 unsigned len, unsigned flags, struct page **pagep, void **fsdata)
682 {
683 TRACE();
684
685 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
686 }
687
688 int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
689 unsigned len, unsigned copied, struct page *page, void *fsdata)
690 {
691 struct inode *inode = mapping->host;
692 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
693 struct sf_reg_info *sf_r = file->private_data;
694 void *buf;
695 unsigned from = pos & (PAGE_SIZE - 1);
696 uint32_t nwritten = len;
697 int err;
698
699 TRACE();
700
701 buf = kmap(page);
702 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf+from, &nwritten, pos);
703 kunmap(page);
704
705 if (!PageUptodate(page) && err == PAGE_SIZE)
706 SetPageUptodate(page);
707
708 if (err >= 0) {
709 pos += nwritten;
710 if (pos > inode->i_size)
711 inode->i_size = pos;
712 }
713
714 unlock_page(page);
715 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
716 put_page(page);
717 #else
718 page_cache_release(page);
719 #endif
720
721 return nwritten;
722 }
723
724 # endif /* KERNEL_VERSION >= 2.6.24 */
725
726 struct address_space_operations sf_reg_aops =
727 {
728 .readpage = sf_readpage,
729 .writepage = sf_writepage,
730 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
731 .write_begin = sf_write_begin,
732 .write_end = sf_write_end,
733 # else
734 .prepare_write = simple_prepare_write,
735 .commit_write = simple_commit_write,
736 # endif
737 };
738 #endif