1 // SPDX-License-Identifier: GPL-2.0
3 * (C) 2001 Clemson University and The University of Chicago
4 * Copyright 2018 Omnibond Systems, L.L.C.
6 * See COPYING in top-level directory.
10 * Linux VFS file operations.
14 #include "orangefs-kernel.h"
15 #include "orangefs-bufmap.h"
17 #include <linux/pagemap.h>
19 static int flush_racache(struct inode
*inode
)
21 struct orangefs_inode_s
*orangefs_inode
= ORANGEFS_I(inode
);
22 struct orangefs_kernel_op_s
*new_op
;
25 gossip_debug(GOSSIP_UTILS_DEBUG
,
26 "%s: %pU: Handle is %pU | fs_id %d\n", __func__
,
27 get_khandle_from_ino(inode
), &orangefs_inode
->refn
.khandle
,
28 orangefs_inode
->refn
.fs_id
);
30 new_op
= op_alloc(ORANGEFS_VFS_OP_RA_FLUSH
);
33 new_op
->upcall
.req
.ra_cache_flush
.refn
= orangefs_inode
->refn
;
35 ret
= service_operation(new_op
, "orangefs_flush_racache",
36 get_interruptible_flag(inode
));
38 gossip_debug(GOSSIP_UTILS_DEBUG
, "%s: got return value of %d\n",
46 * Post and wait for the I/O upcall to finish
48 ssize_t
wait_for_direct_io(enum ORANGEFS_io_type type
, struct inode
*inode
,
49 loff_t
*offset
, struct iov_iter
*iter
, size_t total_size
,
50 loff_t readahead_size
, struct orangefs_write_range
*wr
,
51 int *index_return
, struct file
*file
)
53 struct orangefs_inode_s
*orangefs_inode
= ORANGEFS_I(inode
);
54 struct orangefs_khandle
*handle
= &orangefs_inode
->refn
.khandle
;
55 struct orangefs_kernel_op_s
*new_op
= NULL
;
62 new_op
= op_alloc(ORANGEFS_VFS_OP_FILE_IO
);
67 new_op
->upcall
.req
.io
.readahead_size
= readahead_size
;
68 new_op
->upcall
.req
.io
.io_type
= type
;
69 new_op
->upcall
.req
.io
.refn
= orangefs_inode
->refn
;
71 populate_shared_memory
:
72 /* get a shared buffer index */
73 buffer_index
= orangefs_bufmap_get();
74 if (buffer_index
< 0) {
76 gossip_debug(GOSSIP_FILE_DEBUG
,
77 "%s: orangefs_bufmap_get failure (%zd)\n",
81 gossip_debug(GOSSIP_FILE_DEBUG
,
82 "%s(%pU): GET op %p -> buffer_index %d\n",
88 new_op
->uses_shared_memory
= 1;
89 new_op
->upcall
.req
.io
.buf_index
= buffer_index
;
90 new_op
->upcall
.req
.io
.count
= total_size
;
91 new_op
->upcall
.req
.io
.offset
= *offset
;
92 if (type
== ORANGEFS_IO_WRITE
&& wr
) {
93 new_op
->upcall
.uid
= from_kuid(&init_user_ns
, wr
->uid
);
94 new_op
->upcall
.gid
= from_kgid(&init_user_ns
, wr
->gid
);
97 * Orangefs has no open, and orangefs checks file permissions
98 * on each file access. Posix requires that file permissions
99 * be checked on open and nowhere else. Orangefs-through-the-kernel
100 * needs to seem posix compliant.
102 * The VFS opens files, even if the filesystem provides no
103 * method. We can see if a file was successfully opened for
104 * read and or for write by looking at file->f_mode.
106 * When writes are flowing from the page cache, file is no
107 * longer available. We can trust the VFS to have checked
108 * file->f_mode before writing to the page cache.
110 * The mode of a file might change between when it is opened
111 * and IO commences, or it might be created with an arbitrary mode.
113 * We'll make sure we don't hit EACCES during the IO stage by
114 * using UID 0. Some of the time we have access without changing
115 * to UID 0 - how to check?
118 open_for_write
= file
->f_mode
& FMODE_WRITE
;
119 open_for_read
= file
->f_mode
& FMODE_READ
;
122 open_for_read
= 0; /* not relevant? */
124 if ((type
== ORANGEFS_IO_WRITE
) && open_for_write
)
125 new_op
->upcall
.uid
= 0;
126 if ((type
== ORANGEFS_IO_READ
) && open_for_read
)
127 new_op
->upcall
.uid
= 0;
129 gossip_debug(GOSSIP_FILE_DEBUG
,
130 "%s(%pU): offset: %llu total_size: %zd\n",
136 * Stage 1: copy the buffers into client-core's address space
138 if (type
== ORANGEFS_IO_WRITE
&& total_size
) {
139 ret
= orangefs_bufmap_copy_from_iovec(iter
, buffer_index
,
142 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
143 __func__
, (long)ret
);
148 gossip_debug(GOSSIP_FILE_DEBUG
,
149 "%s(%pU): Calling post_io_request with tag (%llu)\n",
154 /* Stage 2: Service the I/O operation */
155 ret
= service_operation(new_op
,
156 type
== ORANGEFS_IO_WRITE
?
159 get_interruptible_flag(inode
));
162 * If service_operation() returns -EAGAIN #and# the operation was
163 * purged from orangefs_request_list or htable_ops_in_progress, then
164 * we know that the client was restarted, causing the shared memory
165 * area to be wiped clean. To restart a write operation in this
166 * case, we must re-copy the data from the user's iovec to a NEW
167 * shared memory location. To restart a read operation, we must get
168 * a new shared memory location.
170 if (ret
== -EAGAIN
&& op_state_purged(new_op
)) {
171 orangefs_bufmap_put(buffer_index
);
172 if (type
== ORANGEFS_IO_WRITE
)
173 iov_iter_revert(iter
, total_size
);
174 gossip_debug(GOSSIP_FILE_DEBUG
,
175 "%s:going to repopulate_shared_memory.\n",
177 goto populate_shared_memory
;
183 * We can't return EINTR if any data was written,
184 * it's not POSIX. It is minimally acceptable
185 * to give a partial write, the way NFS does.
187 * It would be optimal to return all or nothing,
188 * but if a userspace write is bigger than
189 * an IO buffer, and the interrupt occurs
190 * between buffer writes, that would not be
193 switch (new_op
->op_state
- OP_VFS_STATE_GIVEN_UP
) {
195 * If the op was waiting when the interrupt
196 * occurred, then the client-core did not
199 case OP_VFS_STATE_WAITING
:
206 * If the op was in progress when the interrupt
207 * occurred, then the client-core was able to
210 case OP_VFS_STATE_INPROGR
:
211 if (type
== ORANGEFS_IO_READ
)
217 gossip_err("%s: unexpected op state :%d:.\n",
223 gossip_debug(GOSSIP_FILE_DEBUG
,
224 "%s: got EINTR, state:%d: %p\n",
229 gossip_err("%s: error in %s handle %pU, returning %zd\n",
231 type
== ORANGEFS_IO_READ
?
232 "read from" : "write to",
235 if (orangefs_cancel_op_in_progress(new_op
))
242 * Stage 3: Post copy buffers from client-core's address space
244 if (type
== ORANGEFS_IO_READ
&& new_op
->downcall
.resp
.io
.amt_complete
) {
246 * NOTE: the iovector can either contain addresses which
247 * can futher be kernel-space or user-space addresses.
248 * or it can pointers to struct page's
251 copy_amount
= new_op
->downcall
.resp
.io
.amt_complete
;
253 ret
= orangefs_bufmap_copy_to_iovec(iter
, buffer_index
,
256 gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
257 __func__
, (long)ret
);
261 gossip_debug(GOSSIP_FILE_DEBUG
,
262 "%s(%pU): Amount %s, returned by the sys-io call:%d\n",
265 type
== ORANGEFS_IO_READ
? "read" : "written",
266 (int)new_op
->downcall
.resp
.io
.amt_complete
);
268 ret
= new_op
->downcall
.resp
.io
.amt_complete
;
271 if (buffer_index
>= 0) {
272 orangefs_bufmap_put(buffer_index
);
273 gossip_debug(GOSSIP_FILE_DEBUG
,
274 "%s(%pU): PUT buffer_index %d\n",
275 __func__
, handle
, buffer_index
);
282 int orangefs_revalidate_mapping(struct inode
*inode
)
284 struct orangefs_inode_s
*orangefs_inode
= ORANGEFS_I(inode
);
285 struct address_space
*mapping
= inode
->i_mapping
;
286 unsigned long *bitlock
= &orangefs_inode
->bitlock
;
290 ret
= wait_on_bit(bitlock
, 1, TASK_KILLABLE
);
293 spin_lock(&inode
->i_lock
);
294 if (test_bit(1, bitlock
)) {
295 spin_unlock(&inode
->i_lock
);
298 if (!time_before(jiffies
, orangefs_inode
->mapping_time
))
300 spin_unlock(&inode
->i_lock
);
306 spin_unlock(&inode
->i_lock
);
308 unmap_mapping_range(mapping
, 0, 0, 0);
309 ret
= filemap_write_and_wait(mapping
);
311 ret
= invalidate_inode_pages2(mapping
);
313 orangefs_inode
->mapping_time
= jiffies
+
314 orangefs_cache_timeout_msecs
*HZ
/1000;
316 clear_bit(1, bitlock
);
317 smp_mb__after_atomic();
318 wake_up_bit(bitlock
, 1);
323 static ssize_t
orangefs_file_read_iter(struct kiocb
*iocb
,
324 struct iov_iter
*iter
)
327 orangefs_stats
.reads
++;
329 down_read(&file_inode(iocb
->ki_filp
)->i_rwsem
);
330 ret
= orangefs_revalidate_mapping(file_inode(iocb
->ki_filp
));
334 ret
= generic_file_read_iter(iocb
, iter
);
336 up_read(&file_inode(iocb
->ki_filp
)->i_rwsem
);
340 static ssize_t
orangefs_file_write_iter(struct kiocb
*iocb
,
341 struct iov_iter
*iter
)
344 orangefs_stats
.writes
++;
346 if (iocb
->ki_pos
> i_size_read(file_inode(iocb
->ki_filp
))) {
347 ret
= orangefs_revalidate_mapping(file_inode(iocb
->ki_filp
));
352 ret
= generic_file_write_iter(iocb
, iter
);
356 static vm_fault_t
orangefs_fault(struct vm_fault
*vmf
)
358 struct file
*file
= vmf
->vma
->vm_file
;
360 ret
= orangefs_inode_getattr(file
->f_mapping
->host
,
361 ORANGEFS_GETATTR_SIZE
);
365 gossip_err("%s: orangefs_inode_getattr failed, "
366 "ret:%d:.\n", __func__
, ret
);
367 return VM_FAULT_SIGBUS
;
369 return filemap_fault(vmf
);
372 static const struct vm_operations_struct orangefs_file_vm_ops
= {
373 .fault
= orangefs_fault
,
374 .map_pages
= filemap_map_pages
,
375 .page_mkwrite
= orangefs_page_mkwrite
,
379 * Memory map a region of a file.
381 static int orangefs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
385 ret
= orangefs_revalidate_mapping(file_inode(file
));
389 gossip_debug(GOSSIP_FILE_DEBUG
,
390 "orangefs_file_mmap: called on %pD\n", file
);
392 /* set the sequential readahead hint */
393 vma
->vm_flags
|= VM_SEQ_READ
;
394 vma
->vm_flags
&= ~VM_RAND_READ
;
397 vma
->vm_ops
= &orangefs_file_vm_ops
;
401 #define mapping_nrpages(idata) ((idata)->nrpages)
404 * Called to notify the module that there are no more references to
405 * this file (i.e. no processes have it open).
407 * \note Not called when each file is closed.
409 static int orangefs_file_release(struct inode
*inode
, struct file
*file
)
411 gossip_debug(GOSSIP_FILE_DEBUG
,
412 "orangefs_file_release: called on %pD\n",
416 * remove all associated inode pages from the page cache and
417 * readahead cache (if any); this forces an expensive refresh of
418 * data for the next caller of mmap (or 'get_block' accesses)
420 if (file_inode(file
) &&
421 file_inode(file
)->i_mapping
&&
422 mapping_nrpages(&file_inode(file
)->i_data
)) {
423 if (orangefs_features
& ORANGEFS_FEATURE_READAHEAD
) {
424 gossip_debug(GOSSIP_INODE_DEBUG
,
425 "calling flush_racache on %pU\n",
426 get_khandle_from_ino(inode
));
427 flush_racache(inode
);
428 gossip_debug(GOSSIP_INODE_DEBUG
,
429 "flush_racache finished\n");
437 * Push all data for a specific file onto permanent storage.
439 static int orangefs_fsync(struct file
*file
,
445 struct orangefs_inode_s
*orangefs_inode
=
446 ORANGEFS_I(file_inode(file
));
447 struct orangefs_kernel_op_s
*new_op
= NULL
;
449 ret
= filemap_write_and_wait_range(file_inode(file
)->i_mapping
,
454 new_op
= op_alloc(ORANGEFS_VFS_OP_FSYNC
);
457 new_op
->upcall
.req
.fsync
.refn
= orangefs_inode
->refn
;
459 ret
= service_operation(new_op
,
461 get_interruptible_flag(file_inode(file
)));
463 gossip_debug(GOSSIP_FILE_DEBUG
,
464 "orangefs_fsync got return value of %d\n",
472 * Change the file pointer position for an instance of an open file.
474 * \note If .llseek is overriden, we must acquire lock as described in
475 * Documentation/filesystems/locking.rst.
477 * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
478 * require much changes to the FS
480 static loff_t
orangefs_file_llseek(struct file
*file
, loff_t offset
, int origin
)
483 struct inode
*inode
= file_inode(file
);
485 if (origin
== SEEK_END
) {
487 * revalidate the inode's file size.
488 * NOTE: We are only interested in file size here,
489 * so we set mask accordingly.
491 ret
= orangefs_inode_getattr(file
->f_mapping
->host
,
492 ORANGEFS_GETATTR_SIZE
);
496 gossip_debug(GOSSIP_FILE_DEBUG
,
497 "%s:%s:%d calling make bad inode\n",
505 gossip_debug(GOSSIP_FILE_DEBUG
,
506 "orangefs_file_llseek: offset is %ld | origin is %d"
507 " | inode size is %lu\n",
510 (unsigned long)i_size_read(inode
));
512 return generic_file_llseek(file
, offset
, origin
);
516 * Support local locks (locks that only this kernel knows about)
517 * if Orangefs was mounted -o local_lock.
519 static int orangefs_lock(struct file
*filp
, int cmd
, struct file_lock
*fl
)
523 if (ORANGEFS_SB(file_inode(filp
)->i_sb
)->flags
& ORANGEFS_OPT_LOCAL_LOCK
) {
524 if (cmd
== F_GETLK
) {
526 posix_test_lock(filp
, fl
);
528 rc
= posix_lock_file(filp
, fl
, NULL
);
535 static int orangefs_flush(struct file
*file
, fl_owner_t id
)
538 * This is vfs_fsync_range(file, 0, LLONG_MAX, 0) without the
539 * service_operation in orangefs_fsync.
541 * Do not send fsync to OrangeFS server on a close. Do send fsync
542 * on an explicit fsync call. This duplicates historical OrangeFS
547 r
= filemap_write_and_wait_range(file
->f_mapping
, 0, LLONG_MAX
);
554 /** ORANGEFS implementation of VFS file operations */
555 const struct file_operations orangefs_file_operations
= {
556 .llseek
= orangefs_file_llseek
,
557 .read_iter
= orangefs_file_read_iter
,
558 .write_iter
= orangefs_file_write_iter
,
559 .lock
= orangefs_lock
,
560 .mmap
= orangefs_file_mmap
,
561 .open
= generic_file_open
,
562 .splice_read
= generic_file_splice_read
,
563 .splice_write
= iter_file_splice_write
,
564 .flush
= orangefs_flush
,
565 .release
= orangefs_file_release
,
566 .fsync
= orangefs_fsync
,