]>
Commit | Line | Data |
---|---|---|
39adb5c3 TG |
1 | /* $Id: regops.c $ */ |
2 | /** @file | |
3 | * vboxsf - VBox Linux Shared Folders, Regular file inode and file operations. | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Copyright (C) 2006-2016 Oracle Corporation | |
8 | * | |
9 | * This file is part of VirtualBox Open Source Edition (OSE), as | |
10 | * available from http://www.virtualbox.org. This file is free software; | |
11 | * you can redistribute it and/or modify it under the terms of the GNU | |
12 | * General Public License (GPL) as published by the Free Software | |
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the | |
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the | |
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. | |
16 | */ | |
17 | ||
18 | /* | |
19 | * Limitations: only COW memory mapping is supported | |
20 | */ | |
21 | ||
22 | #include "vfsmod.h" | |
23 | ||
24 | static void *alloc_bounce_buffer(size_t *tmp_sizep, PRTCCPHYS physp, size_t | |
25 | xfer_size, const char *caller) | |
26 | { | |
27 | size_t tmp_size; | |
28 | void *tmp; | |
29 | ||
30 | /* try for big first. */ | |
31 | tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE); | |
32 | if (tmp_size > 16U*_1K) | |
33 | tmp_size = 16U*_1K; | |
34 | tmp = kmalloc(tmp_size, GFP_KERNEL); | |
35 | if (!tmp) | |
36 | { | |
37 | /* fall back on a page sized buffer. */ | |
38 | tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); | |
39 | if (!tmp) | |
40 | { | |
41 | LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size)); | |
42 | return NULL; | |
43 | } | |
44 | tmp_size = PAGE_SIZE; | |
45 | } | |
46 | ||
47 | *tmp_sizep = tmp_size; | |
48 | *physp = virt_to_phys(tmp); | |
49 | return tmp; | |
50 | } | |
51 | ||
52 | static void free_bounce_buffer(void *tmp) | |
53 | { | |
54 | kfree (tmp); | |
55 | } | |
56 | ||
57 | ||
58 | /* fops */ | |
59 | static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g, | |
60 | struct sf_reg_info *sf_r, void *buf, | |
61 | uint32_t *nread, uint64_t pos) | |
62 | { | |
63 | /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is | |
64 | * contiguous in physical memory (kmalloc or single page), we should | |
65 | * use a physical address here to speed things up. */ | |
66 | int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r->handle, | |
67 | pos, nread, buf, false /* already locked? */); | |
68 | if (RT_FAILURE(rc)) | |
69 | { | |
70 | LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller, rc)); | |
71 | return -EPROTO; | |
72 | } | |
73 | return 0; | |
74 | } | |
75 | ||
76 | static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g, | |
77 | struct sf_reg_info *sf_r, void *buf, | |
78 | uint32_t *nwritten, uint64_t pos) | |
79 | { | |
80 | /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is | |
81 | * contiguous in physical memory (kmalloc or single page), we should | |
82 | * use a physical address here to speed things up. */ | |
83 | int rc = VbglR0SfWrite(&client_handle, &sf_g->map, sf_r->handle, | |
84 | pos, nwritten, buf, false /* already locked? */); | |
85 | if (RT_FAILURE(rc)) | |
86 | { | |
87 | LogFunc(("VbglR0SfWrite failed. caller=%s, rc=%Rrc\n", | |
88 | caller, rc)); | |
89 | return -EPROTO; | |
90 | } | |
91 | return 0; | |
92 | } | |
93 | ||
94 | /** | |
95 | * Read from a regular file. | |
96 | * | |
97 | * @param file the file | |
98 | * @param buf the buffer | |
99 | * @param size length of the buffer | |
100 | * @param off offset within the file | |
101 | * @returns the number of read bytes on success, Linux error code otherwise | |
102 | */ | |
103 | static ssize_t sf_reg_read(struct file *file, char *buf, size_t size, loff_t *off) | |
104 | { | |
105 | int err; | |
106 | void *tmp; | |
107 | RTCCPHYS tmp_phys; | |
108 | size_t tmp_size; | |
109 | size_t left = size; | |
110 | ssize_t total_bytes_read = 0; | |
111 | struct inode *inode = GET_F_DENTRY(file)->d_inode; | |
112 | struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); | |
113 | struct sf_reg_info *sf_r = file->private_data; | |
114 | loff_t pos = *off; | |
115 | ||
116 | TRACE(); | |
117 | if (!S_ISREG(inode->i_mode)) | |
118 | { | |
119 | LogFunc(("read from non regular file %d\n", inode->i_mode)); | |
120 | return -EINVAL; | |
121 | } | |
122 | ||
123 | /** XXX Check read permission according to inode->i_mode! */ | |
124 | ||
125 | if (!size) | |
126 | return 0; | |
127 | ||
128 | tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__); | |
129 | if (!tmp) | |
130 | return -ENOMEM; | |
131 | ||
132 | while (left) | |
133 | { | |
134 | uint32_t to_read, nread; | |
135 | ||
136 | to_read = tmp_size; | |
137 | if (to_read > left) | |
138 | to_read = (uint32_t) left; | |
139 | ||
140 | nread = to_read; | |
141 | ||
142 | err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos); | |
143 | if (err) | |
144 | goto fail; | |
145 | ||
146 | if (copy_to_user(buf, tmp, nread)) | |
147 | { | |
148 | err = -EFAULT; | |
149 | goto fail; | |
150 | } | |
151 | ||
152 | pos += nread; | |
153 | left -= nread; | |
154 | buf += nread; | |
155 | total_bytes_read += nread; | |
156 | if (nread != to_read) | |
157 | break; | |
158 | } | |
159 | ||
160 | *off += total_bytes_read; | |
161 | free_bounce_buffer(tmp); | |
162 | return total_bytes_read; | |
163 | ||
164 | fail: | |
165 | free_bounce_buffer(tmp); | |
166 | return err; | |
167 | } | |
168 | ||
169 | /** | |
170 | * Write to a regular file. | |
171 | * | |
172 | * @param file the file | |
173 | * @param buf the buffer | |
174 | * @param size length of the buffer | |
175 | * @param off offset within the file | |
176 | * @returns the number of written bytes on success, Linux error code otherwise | |
177 | */ | |
178 | static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size, loff_t *off) | |
179 | { | |
180 | int err; | |
181 | void *tmp; | |
182 | RTCCPHYS tmp_phys; | |
183 | size_t tmp_size; | |
184 | size_t left = size; | |
185 | ssize_t total_bytes_written = 0; | |
186 | struct inode *inode = GET_F_DENTRY(file)->d_inode; | |
187 | struct sf_inode_info *sf_i = GET_INODE_INFO(inode); | |
188 | struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); | |
189 | struct sf_reg_info *sf_r = file->private_data; | |
190 | loff_t pos; | |
191 | ||
192 | TRACE(); | |
193 | BUG_ON(!sf_i); | |
194 | BUG_ON(!sf_g); | |
195 | BUG_ON(!sf_r); | |
196 | ||
197 | if (!S_ISREG(inode->i_mode)) | |
198 | { | |
199 | LogFunc(("write to non regular file %d\n", inode->i_mode)); | |
200 | return -EINVAL; | |
201 | } | |
202 | ||
203 | pos = *off; | |
204 | if (file->f_flags & O_APPEND) | |
205 | { | |
206 | pos = inode->i_size; | |
207 | *off = pos; | |
208 | } | |
209 | ||
210 | /** XXX Check write permission according to inode->i_mode! */ | |
211 | ||
212 | if (!size) | |
213 | return 0; | |
214 | ||
215 | tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__); | |
216 | if (!tmp) | |
217 | return -ENOMEM; | |
218 | ||
219 | while (left) | |
220 | { | |
221 | uint32_t to_write, nwritten; | |
222 | ||
223 | to_write = tmp_size; | |
224 | if (to_write > left) | |
225 | to_write = (uint32_t) left; | |
226 | ||
227 | nwritten = to_write; | |
228 | ||
229 | if (copy_from_user(tmp, buf, to_write)) | |
230 | { | |
231 | err = -EFAULT; | |
232 | goto fail; | |
233 | } | |
234 | ||
235 | #if 1 | |
236 | if (VbglR0CanUsePhysPageList()) | |
237 | { | |
238 | err = VbglR0SfWritePhysCont(&client_handle, &sf_g->map, sf_r->handle, | |
239 | pos, &nwritten, tmp_phys); | |
240 | err = RT_FAILURE(err) ? -EPROTO : 0; | |
241 | } | |
242 | else | |
243 | #endif | |
244 | err = sf_reg_write_aux(__func__, sf_g, sf_r, tmp, &nwritten, pos); | |
245 | if (err) | |
246 | goto fail; | |
247 | ||
248 | pos += nwritten; | |
249 | left -= nwritten; | |
250 | buf += nwritten; | |
251 | total_bytes_written += nwritten; | |
252 | if (nwritten != to_write) | |
253 | break; | |
254 | } | |
255 | ||
256 | *off += total_bytes_written; | |
257 | if (*off > inode->i_size) | |
258 | inode->i_size = *off; | |
259 | ||
260 | sf_i->force_restat = 1; | |
261 | free_bounce_buffer(tmp); | |
262 | return total_bytes_written; | |
263 | ||
264 | fail: | |
265 | free_bounce_buffer(tmp); | |
266 | return err; | |
267 | } | |
268 | ||
269 | /** | |
270 | * Open a regular file. | |
271 | * | |
272 | * @param inode the inode | |
273 | * @param file the file | |
274 | * @returns 0 on success, Linux error code otherwise | |
275 | */ | |
276 | static int sf_reg_open(struct inode *inode, struct file *file) | |
277 | { | |
278 | int rc, rc_linux = 0; | |
279 | struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); | |
280 | struct sf_inode_info *sf_i = GET_INODE_INFO(inode); | |
281 | struct sf_reg_info *sf_r; | |
282 | SHFLCREATEPARMS params; | |
283 | ||
284 | TRACE(); | |
285 | BUG_ON(!sf_g); | |
286 | BUG_ON(!sf_i); | |
287 | ||
288 | LogFunc(("open %s\n", sf_i->path->String.utf8)); | |
289 | ||
290 | sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL); | |
291 | if (!sf_r) | |
292 | { | |
293 | LogRelFunc(("could not allocate reg info\n")); | |
294 | return -ENOMEM; | |
295 | } | |
296 | ||
297 | /* Already open? */ | |
298 | if (sf_i->handle != SHFL_HANDLE_NIL) | |
299 | { | |
300 | /* | |
301 | * This inode was created with sf_create_aux(). Check the CreateFlags: | |
302 | * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure | |
303 | * about the access flags (SHFL_CF_ACCESS_*). | |
304 | */ | |
305 | sf_i->force_restat = 1; | |
306 | sf_r->handle = sf_i->handle; | |
307 | sf_i->handle = SHFL_HANDLE_NIL; | |
308 | sf_i->file = file; | |
309 | file->private_data = sf_r; | |
310 | return 0; | |
311 | } | |
312 | ||
313 | RT_ZERO(params); | |
314 | params.Handle = SHFL_HANDLE_NIL; | |
315 | /* We check the value of params.Handle afterwards to find out if | |
316 | * the call succeeded or failed, as the API does not seem to cleanly | |
317 | * distinguish error and informational messages. | |
318 | * | |
319 | * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to | |
320 | * make the shared folders host service use our fMode parameter */ | |
321 | ||
322 | if (file->f_flags & O_CREAT) | |
323 | { | |
324 | LogFunc(("O_CREAT set\n")); | |
325 | params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW; | |
326 | /* We ignore O_EXCL, as the Linux kernel seems to call create | |
327 | beforehand itself, so O_EXCL should always fail. */ | |
328 | if (file->f_flags & O_TRUNC) | |
329 | { | |
330 | LogFunc(("O_TRUNC set\n")); | |
b94f26ec | 331 | params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; |
39adb5c3 TG |
332 | } |
333 | else | |
334 | params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS; | |
335 | } | |
336 | else | |
337 | { | |
338 | params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW; | |
339 | if (file->f_flags & O_TRUNC) | |
340 | { | |
341 | LogFunc(("O_TRUNC set\n")); | |
b94f26ec | 342 | params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; |
39adb5c3 TG |
343 | } |
344 | } | |
345 | ||
b94f26ec | 346 | switch (file->f_flags & O_ACCMODE) |
39adb5c3 | 347 | { |
b94f26ec SF |
348 | case O_RDONLY: |
349 | params.CreateFlags |= SHFL_CF_ACCESS_READ; | |
350 | break; | |
39adb5c3 | 351 | |
b94f26ec SF |
352 | case O_WRONLY: |
353 | params.CreateFlags |= SHFL_CF_ACCESS_WRITE; | |
354 | break; | |
39adb5c3 | 355 | |
b94f26ec SF |
356 | case O_RDWR: |
357 | params.CreateFlags |= SHFL_CF_ACCESS_READWRITE; | |
358 | break; | |
39adb5c3 | 359 | |
b94f26ec SF |
360 | default: |
361 | BUG (); | |
39adb5c3 TG |
362 | } |
363 | ||
364 | if (file->f_flags & O_APPEND) | |
365 | { | |
366 | LogFunc(("O_APPEND set\n")); | |
367 | params.CreateFlags |= SHFL_CF_ACCESS_APPEND; | |
368 | } | |
369 | ||
370 | params.Info.Attr.fMode = inode->i_mode; | |
371 | LogFunc(("sf_reg_open: calling VbglR0SfCreate, file %s, flags=%#x, %#x\n", | |
372 | sf_i->path->String.utf8 , file->f_flags, params.CreateFlags)); | |
373 | rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); | |
374 | if (RT_FAILURE(rc)) | |
375 | { | |
376 | LogFunc(("VbglR0SfCreate failed flags=%d,%#x rc=%Rrc\n", | |
377 | file->f_flags, params.CreateFlags, rc)); | |
378 | kfree(sf_r); | |
379 | return -RTErrConvertToErrno(rc); | |
380 | } | |
381 | ||
382 | if (SHFL_HANDLE_NIL == params.Handle) | |
383 | { | |
384 | switch (params.Result) | |
385 | { | |
386 | case SHFL_PATH_NOT_FOUND: | |
387 | case SHFL_FILE_NOT_FOUND: | |
388 | rc_linux = -ENOENT; | |
389 | break; | |
390 | case SHFL_FILE_EXISTS: | |
391 | rc_linux = -EEXIST; | |
392 | break; | |
393 | default: | |
394 | break; | |
395 | } | |
396 | } | |
397 | ||
398 | sf_i->force_restat = 1; | |
399 | sf_r->handle = params.Handle; | |
400 | sf_i->file = file; | |
401 | file->private_data = sf_r; | |
402 | return rc_linux; | |
403 | } | |
404 | ||
405 | /** | |
406 | * Close a regular file. | |
407 | * | |
408 | * @param inode the inode | |
409 | * @param file the file | |
410 | * @returns 0 on success, Linux error code otherwise | |
411 | */ | |
412 | static int sf_reg_release(struct inode *inode, struct file *file) | |
413 | { | |
414 | int rc; | |
415 | struct sf_reg_info *sf_r; | |
416 | struct sf_glob_info *sf_g; | |
417 | struct sf_inode_info *sf_i = GET_INODE_INFO(inode); | |
418 | ||
419 | TRACE(); | |
420 | sf_g = GET_GLOB_INFO(inode->i_sb); | |
421 | sf_r = file->private_data; | |
422 | ||
423 | BUG_ON(!sf_g); | |
424 | BUG_ON(!sf_r); | |
425 | ||
426 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) | |
427 | /* See the smbfs source (file.c). mmap in particular can cause data to be | |
428 | * written to the file after it is closed, which we can't cope with. We | |
429 | * copy and paste the body of filemap_write_and_wait() here as it was not | |
430 | * defined before 2.6.6 and not exported until quite a bit later. */ | |
431 | /* filemap_write_and_wait(inode->i_mapping); */ | |
432 | if ( inode->i_mapping->nrpages | |
433 | && filemap_fdatawrite(inode->i_mapping) != -EIO) | |
434 | filemap_fdatawait(inode->i_mapping); | |
435 | #endif | |
436 | rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle); | |
437 | if (RT_FAILURE(rc)) | |
438 | LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc)); | |
439 | ||
440 | kfree(sf_r); | |
441 | sf_i->file = NULL; | |
442 | sf_i->handle = SHFL_HANDLE_NIL; | |
443 | file->private_data = NULL; | |
444 | return 0; | |
445 | } | |
446 | ||
e5d380d7 SF |
447 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) |
448 | static int sf_reg_fault(struct vm_fault *vmf) | |
449 | #elif LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) | |
39adb5c3 TG |
450 | static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
451 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) | |
452 | static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type) | |
453 | # define SET_TYPE(t) *type = (t) | |
454 | #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ | |
455 | static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused) | |
456 | # define SET_TYPE(t) | |
457 | #endif | |
458 | { | |
459 | struct page *page; | |
460 | char *buf; | |
461 | loff_t off; | |
462 | uint32_t nread = PAGE_SIZE; | |
463 | int err; | |
e5d380d7 SF |
464 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) |
465 | struct vm_area_struct *vma = vmf->vma; | |
466 | #endif | |
39adb5c3 TG |
467 | struct file *file = vma->vm_file; |
468 | struct inode *inode = GET_F_DENTRY(file)->d_inode; | |
469 | struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); | |
470 | struct sf_reg_info *sf_r = file->private_data; | |
471 | ||
472 | TRACE(); | |
473 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) | |
474 | if (vmf->pgoff > vma->vm_end) | |
475 | return VM_FAULT_SIGBUS; | |
476 | #else | |
477 | if (vaddr > vma->vm_end) | |
478 | { | |
479 | SET_TYPE(VM_FAULT_SIGBUS); | |
480 | return NOPAGE_SIGBUS; | |
481 | } | |
482 | #endif | |
483 | ||
484 | /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead() | |
485 | * which works on virtual addresses. On Linux cannot reliably determine the | |
486 | * physical address for high memory, see rtR0MemObjNativeLockKernel(). */ | |
487 | page = alloc_page(GFP_USER); | |
488 | if (!page) { | |
489 | LogRelFunc(("failed to allocate page\n")); | |
490 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) | |
491 | return VM_FAULT_OOM; | |
492 | #else | |
493 | SET_TYPE(VM_FAULT_OOM); | |
494 | return NOPAGE_OOM; | |
495 | #endif | |
496 | } | |
497 | ||
498 | buf = kmap(page); | |
499 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) | |
500 | off = (vmf->pgoff << PAGE_SHIFT); | |
501 | #else | |
502 | off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); | |
503 | #endif | |
504 | err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off); | |
505 | if (err) | |
506 | { | |
507 | kunmap(page); | |
508 | put_page(page); | |
509 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) | |
510 | return VM_FAULT_SIGBUS; | |
511 | #else | |
512 | SET_TYPE(VM_FAULT_SIGBUS); | |
513 | return NOPAGE_SIGBUS; | |
514 | #endif | |
515 | } | |
516 | ||
517 | BUG_ON (nread > PAGE_SIZE); | |
518 | if (!nread) | |
519 | { | |
520 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) | |
521 | clear_user_page(page_address(page), vmf->pgoff, page); | |
522 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) | |
523 | clear_user_page(page_address(page), vaddr, page); | |
524 | #else | |
525 | clear_user_page(page_address(page), vaddr); | |
526 | #endif | |
527 | } | |
528 | else | |
529 | memset(buf + nread, 0, PAGE_SIZE - nread); | |
530 | ||
531 | flush_dcache_page(page); | |
532 | kunmap(page); | |
533 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) | |
534 | vmf->page = page; | |
535 | return 0; | |
536 | #else | |
537 | SET_TYPE(VM_FAULT_MAJOR); | |
538 | return page; | |
539 | #endif | |
540 | } | |
541 | ||
542 | static struct vm_operations_struct sf_vma_ops = | |
543 | { | |
544 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) | |
545 | .fault = sf_reg_fault | |
546 | #else | |
e5d380d7 | 547 | .nopage = sf_reg_nopage |
39adb5c3 TG |
548 | #endif |
549 | }; | |
550 | ||
551 | static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma) | |
552 | { | |
553 | TRACE(); | |
554 | if (vma->vm_flags & VM_SHARED) | |
555 | { | |
556 | LogFunc(("shared mmapping not available\n")); | |
557 | return -EINVAL; | |
558 | } | |
559 | ||
560 | vma->vm_ops = &sf_vma_ops; | |
561 | return 0; | |
562 | } | |
563 | ||
564 | struct file_operations sf_reg_fops = | |
565 | { | |
566 | .read = sf_reg_read, | |
567 | .open = sf_reg_open, | |
568 | .write = sf_reg_write, | |
569 | .release = sf_reg_release, | |
570 | .mmap = sf_reg_mmap, | |
571 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) | |
572 | # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) | |
573 | .splice_read = generic_file_splice_read, | |
574 | # else | |
575 | .sendfile = generic_file_sendfile, | |
576 | # endif | |
577 | # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) | |
578 | .read_iter = generic_file_read_iter, | |
579 | .write_iter = generic_file_write_iter, | |
580 | # else | |
581 | .aio_read = generic_file_aio_read, | |
582 | .aio_write = generic_file_aio_write, | |
583 | # endif | |
584 | # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) | |
585 | .fsync = noop_fsync, | |
586 | # else | |
587 | .fsync = simple_sync_file, | |
588 | # endif | |
589 | .llseek = generic_file_llseek, | |
590 | #endif | |
591 | }; | |
592 | ||
593 | ||
594 | struct inode_operations sf_reg_iops = | |
595 | { | |
596 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) | |
597 | .revalidate = sf_inode_revalidate | |
598 | #else | |
599 | .getattr = sf_getattr, | |
600 | .setattr = sf_setattr | |
601 | #endif | |
602 | }; | |
603 | ||
604 | ||
605 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) | |
606 | static int sf_readpage(struct file *file, struct page *page) | |
607 | { | |
608 | struct inode *inode = GET_F_DENTRY(file)->d_inode; | |
609 | struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); | |
610 | struct sf_reg_info *sf_r = file->private_data; | |
611 | uint32_t nread = PAGE_SIZE; | |
612 | char *buf; | |
613 | loff_t off = ((loff_t)page->index) << PAGE_SHIFT; | |
614 | int ret; | |
615 | ||
616 | TRACE(); | |
617 | ||
618 | buf = kmap(page); | |
619 | ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off); | |
620 | if (ret) | |
621 | { | |
622 | kunmap(page); | |
623 | if (PageLocked(page)) | |
624 | unlock_page(page); | |
625 | return ret; | |
626 | } | |
627 | BUG_ON(nread > PAGE_SIZE); | |
628 | memset(&buf[nread], 0, PAGE_SIZE - nread); | |
629 | flush_dcache_page(page); | |
630 | kunmap(page); | |
631 | SetPageUptodate(page); | |
632 | unlock_page(page); | |
633 | return 0; | |
634 | } | |
635 | ||
636 | static int | |
637 | sf_writepage(struct page *page, struct writeback_control *wbc) | |
638 | { | |
639 | struct address_space *mapping = page->mapping; | |
640 | struct inode *inode = mapping->host; | |
641 | struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); | |
642 | struct sf_inode_info *sf_i = GET_INODE_INFO(inode); | |
643 | struct file *file = sf_i->file; | |
644 | struct sf_reg_info *sf_r = file->private_data; | |
645 | char *buf; | |
646 | uint32_t nwritten = PAGE_SIZE; | |
647 | int end_index = inode->i_size >> PAGE_SHIFT; | |
648 | loff_t off = ((loff_t) page->index) << PAGE_SHIFT; | |
649 | int err; | |
650 | ||
651 | TRACE(); | |
652 | ||
653 | if (page->index >= end_index) | |
654 | nwritten = inode->i_size & (PAGE_SIZE-1); | |
655 | ||
656 | buf = kmap(page); | |
657 | ||
658 | err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off); | |
659 | if (err < 0) | |
660 | { | |
661 | ClearPageUptodate(page); | |
662 | goto out; | |
663 | } | |
664 | ||
665 | if (off > inode->i_size) | |
666 | inode->i_size = off; | |
667 | ||
668 | if (PageError(page)) | |
669 | ClearPageError(page); | |
670 | err = 0; | |
671 | ||
672 | out: | |
673 | kunmap(page); | |
674 | ||
675 | unlock_page(page); | |
676 | return err; | |
677 | } | |
678 | ||
679 | # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) | |
680 | int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos, | |
681 | unsigned len, unsigned flags, struct page **pagep, void **fsdata) | |
682 | { | |
683 | TRACE(); | |
684 | ||
685 | return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata); | |
686 | } | |
687 | ||
688 | int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos, | |
689 | unsigned len, unsigned copied, struct page *page, void *fsdata) | |
690 | { | |
691 | struct inode *inode = mapping->host; | |
692 | struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); | |
693 | struct sf_reg_info *sf_r = file->private_data; | |
694 | void *buf; | |
695 | unsigned from = pos & (PAGE_SIZE - 1); | |
696 | uint32_t nwritten = len; | |
697 | int err; | |
698 | ||
699 | TRACE(); | |
700 | ||
701 | buf = kmap(page); | |
702 | err = sf_reg_write_aux(__func__, sf_g, sf_r, buf+from, &nwritten, pos); | |
703 | kunmap(page); | |
704 | ||
705 | if (!PageUptodate(page) && err == PAGE_SIZE) | |
706 | SetPageUptodate(page); | |
707 | ||
708 | if (err >= 0) { | |
709 | pos += nwritten; | |
710 | if (pos > inode->i_size) | |
711 | inode->i_size = pos; | |
712 | } | |
713 | ||
714 | unlock_page(page); | |
715 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) | |
716 | put_page(page); | |
717 | #else | |
718 | page_cache_release(page); | |
719 | #endif | |
720 | ||
721 | return nwritten; | |
722 | } | |
723 | ||
724 | # endif /* KERNEL_VERSION >= 2.6.24 */ | |
725 | ||
726 | struct address_space_operations sf_reg_aops = | |
727 | { | |
728 | .readpage = sf_readpage, | |
729 | .writepage = sf_writepage, | |
730 | # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) | |
731 | .write_begin = sf_write_begin, | |
732 | .write_end = sf_write_end, | |
733 | # else | |
734 | .prepare_write = simple_prepare_write, | |
735 | .commit_write = simple_commit_write, | |
736 | # endif | |
737 | }; | |
738 | #endif |