4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
47 static inline int cifs_convert_flags(unsigned int flags
)
49 if ((flags
& O_ACCMODE
) == O_RDONLY
)
51 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
53 else if ((flags
& O_ACCMODE
) == O_RDWR
) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ
| GENERIC_WRITE
);
60 return (READ_CONTROL
| FILE_WRITE_ATTRIBUTES
| FILE_READ_ATTRIBUTES
|
61 FILE_WRITE_EA
| FILE_APPEND_DATA
| FILE_WRITE_DATA
|
65 static u32
cifs_posix_convert_flags(unsigned int flags
)
69 if ((flags
& O_ACCMODE
) == O_RDONLY
)
70 posix_flags
= SMB_O_RDONLY
;
71 else if ((flags
& O_ACCMODE
) == O_WRONLY
)
72 posix_flags
= SMB_O_WRONLY
;
73 else if ((flags
& O_ACCMODE
) == O_RDWR
)
74 posix_flags
= SMB_O_RDWR
;
76 if (flags
& O_CREAT
) {
77 posix_flags
|= SMB_O_CREAT
;
79 posix_flags
|= SMB_O_EXCL
;
80 } else if (flags
& O_EXCL
)
81 cifs_dbg(FYI
, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current
->comm
, current
->tgid
);
85 posix_flags
|= SMB_O_TRUNC
;
86 /* be safe and imply O_SYNC for O_DSYNC */
88 posix_flags
|= SMB_O_SYNC
;
89 if (flags
& O_DIRECTORY
)
90 posix_flags
|= SMB_O_DIRECTORY
;
91 if (flags
& O_NOFOLLOW
)
92 posix_flags
|= SMB_O_NOFOLLOW
;
94 posix_flags
|= SMB_O_DIRECT
;
99 static inline int cifs_get_disposition(unsigned int flags
)
101 if ((flags
& (O_CREAT
| O_EXCL
)) == (O_CREAT
| O_EXCL
))
103 else if ((flags
& (O_CREAT
| O_TRUNC
)) == (O_CREAT
| O_TRUNC
))
104 return FILE_OVERWRITE_IF
;
105 else if ((flags
& O_CREAT
) == O_CREAT
)
107 else if ((flags
& O_TRUNC
) == O_TRUNC
)
108 return FILE_OVERWRITE
;
113 int cifs_posix_open(char *full_path
, struct inode
**pinode
,
114 struct super_block
*sb
, int mode
, unsigned int f_flags
,
115 __u32
*poplock
, __u16
*pnetfid
, unsigned int xid
)
118 FILE_UNIX_BASIC_INFO
*presp_data
;
119 __u32 posix_flags
= 0;
120 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
121 struct cifs_fattr fattr
;
122 struct tcon_link
*tlink
;
123 struct cifs_tcon
*tcon
;
125 cifs_dbg(FYI
, "posix open %s\n", full_path
);
127 presp_data
= kzalloc(sizeof(FILE_UNIX_BASIC_INFO
), GFP_KERNEL
);
128 if (presp_data
== NULL
)
131 tlink
= cifs_sb_tlink(cifs_sb
);
137 tcon
= tlink_tcon(tlink
);
138 mode
&= ~current_umask();
140 posix_flags
= cifs_posix_convert_flags(f_flags
);
141 rc
= CIFSPOSIXCreate(xid
, tcon
, posix_flags
, mode
, pnetfid
, presp_data
,
142 poplock
, full_path
, cifs_sb
->local_nls
,
143 cifs_remap(cifs_sb
));
144 cifs_put_tlink(tlink
);
149 if (presp_data
->Type
== cpu_to_le32(-1))
150 goto posix_open_ret
; /* open ok, caller does qpathinfo */
153 goto posix_open_ret
; /* caller does not need info */
155 cifs_unix_basic_to_fattr(&fattr
, presp_data
, cifs_sb
);
157 /* get new inode and set it up */
158 if (*pinode
== NULL
) {
159 cifs_fill_uniqueid(sb
, &fattr
);
160 *pinode
= cifs_iget(sb
, &fattr
);
166 cifs_fattr_to_inode(*pinode
, &fattr
);
175 cifs_nt_open(char *full_path
, struct inode
*inode
, struct cifs_sb_info
*cifs_sb
,
176 struct cifs_tcon
*tcon
, unsigned int f_flags
, __u32
*oplock
,
177 struct cifs_fid
*fid
, unsigned int xid
)
182 int create_options
= CREATE_NOT_DIR
;
184 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
185 struct cifs_open_parms oparms
;
187 if (!server
->ops
->open
)
190 desired_access
= cifs_convert_flags(f_flags
);
192 /*********************************************************************
193 * open flag mapping table:
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
216 disposition
= cifs_get_disposition(f_flags
);
218 /* BB pass O_SYNC flag through on file attributes .. BB */
220 buf
= kmalloc(sizeof(FILE_ALL_INFO
), GFP_KERNEL
);
224 if (backup_cred(cifs_sb
))
225 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
228 oparms
.cifs_sb
= cifs_sb
;
229 oparms
.desired_access
= desired_access
;
230 oparms
.create_options
= create_options
;
231 oparms
.disposition
= disposition
;
232 oparms
.path
= full_path
;
234 oparms
.reconnect
= false;
236 rc
= server
->ops
->open(xid
, &oparms
, oplock
, buf
);
242 rc
= cifs_get_inode_info_unix(&inode
, full_path
, inode
->i_sb
,
245 rc
= cifs_get_inode_info(&inode
, full_path
, buf
, inode
->i_sb
,
254 cifs_has_mand_locks(struct cifsInodeInfo
*cinode
)
256 struct cifs_fid_locks
*cur
;
257 bool has_locks
= false;
259 down_read(&cinode
->lock_sem
);
260 list_for_each_entry(cur
, &cinode
->llist
, llist
) {
261 if (!list_empty(&cur
->locks
)) {
266 up_read(&cinode
->lock_sem
);
270 struct cifsFileInfo
*
271 cifs_new_fileinfo(struct cifs_fid
*fid
, struct file
*file
,
272 struct tcon_link
*tlink
, __u32 oplock
)
274 struct dentry
*dentry
= file_dentry(file
);
275 struct inode
*inode
= d_inode(dentry
);
276 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
277 struct cifsFileInfo
*cfile
;
278 struct cifs_fid_locks
*fdlocks
;
279 struct cifs_tcon
*tcon
= tlink_tcon(tlink
);
280 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
282 cfile
= kzalloc(sizeof(struct cifsFileInfo
), GFP_KERNEL
);
286 fdlocks
= kzalloc(sizeof(struct cifs_fid_locks
), GFP_KERNEL
);
292 INIT_LIST_HEAD(&fdlocks
->locks
);
293 fdlocks
->cfile
= cfile
;
294 cfile
->llist
= fdlocks
;
295 down_write(&cinode
->lock_sem
);
296 list_add(&fdlocks
->llist
, &cinode
->llist
);
297 up_write(&cinode
->lock_sem
);
300 cfile
->pid
= current
->tgid
;
301 cfile
->uid
= current_fsuid();
302 cfile
->dentry
= dget(dentry
);
303 cfile
->f_flags
= file
->f_flags
;
304 cfile
->invalidHandle
= false;
305 cfile
->tlink
= cifs_get_tlink(tlink
);
306 INIT_WORK(&cfile
->oplock_break
, cifs_oplock_break
);
307 mutex_init(&cfile
->fh_mutex
);
308 spin_lock_init(&cfile
->file_info_lock
);
310 cifs_sb_active(inode
->i_sb
);
313 * If the server returned a read oplock and we have mandatory brlocks,
314 * set oplock level to None.
316 if (server
->ops
->is_read_op(oplock
) && cifs_has_mand_locks(cinode
)) {
317 cifs_dbg(FYI
, "Reset oplock val from read to None due to mand locks\n");
321 spin_lock(&tcon
->open_file_lock
);
322 if (fid
->pending_open
->oplock
!= CIFS_OPLOCK_NO_CHANGE
&& oplock
)
323 oplock
= fid
->pending_open
->oplock
;
324 list_del(&fid
->pending_open
->olist
);
326 fid
->purge_cache
= false;
327 server
->ops
->set_fid(cfile
, fid
, oplock
);
329 list_add(&cfile
->tlist
, &tcon
->openFileList
);
331 /* if readable file instance put first in list*/
332 if (file
->f_mode
& FMODE_READ
)
333 list_add(&cfile
->flist
, &cinode
->openFileList
);
335 list_add_tail(&cfile
->flist
, &cinode
->openFileList
);
336 spin_unlock(&tcon
->open_file_lock
);
338 if (fid
->purge_cache
)
339 cifs_zap_mapping(inode
);
341 file
->private_data
= cfile
;
345 struct cifsFileInfo
*
346 cifsFileInfo_get(struct cifsFileInfo
*cifs_file
)
348 spin_lock(&cifs_file
->file_info_lock
);
349 cifsFileInfo_get_locked(cifs_file
);
350 spin_unlock(&cifs_file
->file_info_lock
);
355 * Release a reference on the file private data. This may involve closing
356 * the filehandle out on the server. Must be called without holding
357 * tcon->open_file_lock and cifs_file->file_info_lock.
359 void cifsFileInfo_put(struct cifsFileInfo
*cifs_file
)
361 struct inode
*inode
= d_inode(cifs_file
->dentry
);
362 struct cifs_tcon
*tcon
= tlink_tcon(cifs_file
->tlink
);
363 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
364 struct cifsInodeInfo
*cifsi
= CIFS_I(inode
);
365 struct super_block
*sb
= inode
->i_sb
;
366 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
367 struct cifsLockInfo
*li
, *tmp
;
369 struct cifs_pending_open open
;
370 bool oplock_break_cancelled
;
372 spin_lock(&tcon
->open_file_lock
);
374 spin_lock(&cifs_file
->file_info_lock
);
375 if (--cifs_file
->count
> 0) {
376 spin_unlock(&cifs_file
->file_info_lock
);
377 spin_unlock(&tcon
->open_file_lock
);
380 spin_unlock(&cifs_file
->file_info_lock
);
382 if (server
->ops
->get_lease_key
)
383 server
->ops
->get_lease_key(inode
, &fid
);
385 /* store open in pending opens to make sure we don't miss lease break */
386 cifs_add_pending_open_locked(&fid
, cifs_file
->tlink
, &open
);
388 /* remove it from the lists */
389 list_del(&cifs_file
->flist
);
390 list_del(&cifs_file
->tlist
);
392 if (list_empty(&cifsi
->openFileList
)) {
393 cifs_dbg(FYI
, "closing last open instance for inode %p\n",
394 d_inode(cifs_file
->dentry
));
396 * In strict cache mode we need invalidate mapping on the last
397 * close because it may cause a error when we open this file
398 * again and get at least level II oplock.
400 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_STRICT_IO
)
401 set_bit(CIFS_INO_INVALID_MAPPING
, &cifsi
->flags
);
402 cifs_set_oplock_level(cifsi
, 0);
405 spin_unlock(&tcon
->open_file_lock
);
407 oplock_break_cancelled
= cancel_work_sync(&cifs_file
->oplock_break
);
409 if (!tcon
->need_reconnect
&& !cifs_file
->invalidHandle
) {
410 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
414 if (server
->ops
->close
)
415 server
->ops
->close(xid
, tcon
, &cifs_file
->fid
);
419 if (oplock_break_cancelled
)
420 cifs_done_oplock_break(cifsi
);
422 cifs_del_pending_open(&open
);
425 * Delete any outstanding lock records. We'll lose them when the file
428 down_write(&cifsi
->lock_sem
);
429 list_for_each_entry_safe(li
, tmp
, &cifs_file
->llist
->locks
, llist
) {
430 list_del(&li
->llist
);
431 cifs_del_lock_waiters(li
);
434 list_del(&cifs_file
->llist
->llist
);
435 kfree(cifs_file
->llist
);
436 up_write(&cifsi
->lock_sem
);
438 cifs_put_tlink(cifs_file
->tlink
);
439 dput(cifs_file
->dentry
);
440 cifs_sb_deactive(sb
);
444 int cifs_open(struct inode
*inode
, struct file
*file
)
450 struct cifs_sb_info
*cifs_sb
;
451 struct TCP_Server_Info
*server
;
452 struct cifs_tcon
*tcon
;
453 struct tcon_link
*tlink
;
454 struct cifsFileInfo
*cfile
= NULL
;
455 char *full_path
= NULL
;
456 bool posix_open_ok
= false;
458 struct cifs_pending_open open
;
462 cifs_sb
= CIFS_SB(inode
->i_sb
);
463 tlink
= cifs_sb_tlink(cifs_sb
);
466 return PTR_ERR(tlink
);
468 tcon
= tlink_tcon(tlink
);
469 server
= tcon
->ses
->server
;
471 full_path
= build_path_from_dentry(file_dentry(file
));
472 if (full_path
== NULL
) {
477 cifs_dbg(FYI
, "inode = 0x%p file flags are 0x%x for %s\n",
478 inode
, file
->f_flags
, full_path
);
480 if (file
->f_flags
& O_DIRECT
&&
481 cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_STRICT_IO
) {
482 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_BRL
)
483 file
->f_op
= &cifs_file_direct_nobrl_ops
;
485 file
->f_op
= &cifs_file_direct_ops
;
493 if (!tcon
->broken_posix_open
&& tcon
->unix_ext
&&
494 cap_unix(tcon
->ses
) && (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
495 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
496 /* can not refresh inode info since size could be stale */
497 rc
= cifs_posix_open(full_path
, &inode
, inode
->i_sb
,
498 cifs_sb
->mnt_file_mode
/* ignored */,
499 file
->f_flags
, &oplock
, &fid
.netfid
, xid
);
501 cifs_dbg(FYI
, "posix open succeeded\n");
502 posix_open_ok
= true;
503 } else if ((rc
== -EINVAL
) || (rc
== -EOPNOTSUPP
)) {
504 if (tcon
->ses
->serverNOS
)
505 cifs_dbg(VFS
, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
506 tcon
->ses
->serverName
,
507 tcon
->ses
->serverNOS
);
508 tcon
->broken_posix_open
= true;
509 } else if ((rc
!= -EIO
) && (rc
!= -EREMOTE
) &&
510 (rc
!= -EOPNOTSUPP
)) /* path not found or net err */
513 * Else fallthrough to retry open the old way on network i/o
518 if (server
->ops
->get_lease_key
)
519 server
->ops
->get_lease_key(inode
, &fid
);
521 cifs_add_pending_open(&fid
, tlink
, &open
);
523 if (!posix_open_ok
) {
524 if (server
->ops
->get_lease_key
)
525 server
->ops
->get_lease_key(inode
, &fid
);
527 rc
= cifs_nt_open(full_path
, inode
, cifs_sb
, tcon
,
528 file
->f_flags
, &oplock
, &fid
, xid
);
530 cifs_del_pending_open(&open
);
535 cfile
= cifs_new_fileinfo(&fid
, file
, tlink
, oplock
);
537 if (server
->ops
->close
)
538 server
->ops
->close(xid
, tcon
, &fid
);
539 cifs_del_pending_open(&open
);
544 cifs_fscache_set_inode_cookie(inode
, file
);
546 if ((oplock
& CIFS_CREATE_ACTION
) && !posix_open_ok
&& tcon
->unix_ext
) {
548 * Time to set mode which we can not set earlier due to
549 * problems creating new read-only files.
551 struct cifs_unix_set_info_args args
= {
552 .mode
= inode
->i_mode
,
553 .uid
= INVALID_UID
, /* no change */
554 .gid
= INVALID_GID
, /* no change */
555 .ctime
= NO_CHANGE_64
,
556 .atime
= NO_CHANGE_64
,
557 .mtime
= NO_CHANGE_64
,
560 CIFSSMBUnixSetFileInfo(xid
, tcon
, &args
, fid
.netfid
,
567 cifs_put_tlink(tlink
);
571 static int cifs_push_posix_locks(struct cifsFileInfo
*cfile
);
574 * Try to reacquire byte range locks that were released when session
575 * to server was lost.
578 cifs_relock_file(struct cifsFileInfo
*cfile
)
580 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->dentry
->d_sb
);
581 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
582 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
585 down_read(&cinode
->lock_sem
);
586 if (cinode
->can_cache_brlcks
) {
587 /* can cache locks - no need to relock */
588 up_read(&cinode
->lock_sem
);
592 if (cap_unix(tcon
->ses
) &&
593 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
594 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
595 rc
= cifs_push_posix_locks(cfile
);
597 rc
= tcon
->ses
->server
->ops
->push_mand_locks(cfile
);
599 up_read(&cinode
->lock_sem
);
604 cifs_reopen_file(struct cifsFileInfo
*cfile
, bool can_flush
)
609 struct cifs_sb_info
*cifs_sb
;
610 struct cifs_tcon
*tcon
;
611 struct TCP_Server_Info
*server
;
612 struct cifsInodeInfo
*cinode
;
614 char *full_path
= NULL
;
616 int disposition
= FILE_OPEN
;
617 int create_options
= CREATE_NOT_DIR
;
618 struct cifs_open_parms oparms
;
621 mutex_lock(&cfile
->fh_mutex
);
622 if (!cfile
->invalidHandle
) {
623 mutex_unlock(&cfile
->fh_mutex
);
629 inode
= d_inode(cfile
->dentry
);
630 cifs_sb
= CIFS_SB(inode
->i_sb
);
631 tcon
= tlink_tcon(cfile
->tlink
);
632 server
= tcon
->ses
->server
;
635 * Can not grab rename sem here because various ops, including those
636 * that already have the rename sem can end up causing writepage to get
637 * called and if the server was down that means we end up here, and we
638 * can never tell if the caller already has the rename_sem.
640 full_path
= build_path_from_dentry(cfile
->dentry
);
641 if (full_path
== NULL
) {
643 mutex_unlock(&cfile
->fh_mutex
);
648 cifs_dbg(FYI
, "inode = 0x%p file flags 0x%x for %s\n",
649 inode
, cfile
->f_flags
, full_path
);
651 if (tcon
->ses
->server
->oplocks
)
656 if (tcon
->unix_ext
&& cap_unix(tcon
->ses
) &&
657 (CIFS_UNIX_POSIX_PATH_OPS_CAP
&
658 le64_to_cpu(tcon
->fsUnixInfo
.Capability
))) {
660 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
661 * original open. Must mask them off for a reopen.
663 unsigned int oflags
= cfile
->f_flags
&
664 ~(O_CREAT
| O_EXCL
| O_TRUNC
);
666 rc
= cifs_posix_open(full_path
, NULL
, inode
->i_sb
,
667 cifs_sb
->mnt_file_mode
/* ignored */,
668 oflags
, &oplock
, &cfile
->fid
.netfid
, xid
);
670 cifs_dbg(FYI
, "posix reopen succeeded\n");
671 oparms
.reconnect
= true;
675 * fallthrough to retry open the old way on errors, especially
676 * in the reconnect path it is important to retry hard
680 desired_access
= cifs_convert_flags(cfile
->f_flags
);
682 if (backup_cred(cifs_sb
))
683 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
685 if (server
->ops
->get_lease_key
)
686 server
->ops
->get_lease_key(inode
, &cfile
->fid
);
689 oparms
.cifs_sb
= cifs_sb
;
690 oparms
.desired_access
= desired_access
;
691 oparms
.create_options
= create_options
;
692 oparms
.disposition
= disposition
;
693 oparms
.path
= full_path
;
694 oparms
.fid
= &cfile
->fid
;
695 oparms
.reconnect
= true;
698 * Can not refresh inode by passing in file_info buf to be returned by
699 * ops->open and then calling get_inode_info with returned buf since
700 * file might have write behind data that needs to be flushed and server
701 * version of file size can be stale. If we knew for sure that inode was
702 * not dirty locally we could do this.
704 rc
= server
->ops
->open(xid
, &oparms
, &oplock
, NULL
);
705 if (rc
== -ENOENT
&& oparms
.reconnect
== false) {
706 /* durable handle timeout is expired - open the file again */
707 rc
= server
->ops
->open(xid
, &oparms
, &oplock
, NULL
);
708 /* indicate that we need to relock the file */
709 oparms
.reconnect
= true;
713 mutex_unlock(&cfile
->fh_mutex
);
714 cifs_dbg(FYI
, "cifs_reopen returned 0x%x\n", rc
);
715 cifs_dbg(FYI
, "oplock: %d\n", oplock
);
716 goto reopen_error_exit
;
720 cfile
->invalidHandle
= false;
721 mutex_unlock(&cfile
->fh_mutex
);
722 cinode
= CIFS_I(inode
);
725 rc
= filemap_write_and_wait(inode
->i_mapping
);
726 mapping_set_error(inode
->i_mapping
, rc
);
729 rc
= cifs_get_inode_info_unix(&inode
, full_path
,
732 rc
= cifs_get_inode_info(&inode
, full_path
, NULL
,
733 inode
->i_sb
, xid
, NULL
);
736 * Else we are writing out data to server already and could deadlock if
737 * we tried to flush data, and since we do not know if we have data that
738 * would invalidate the current end of file on the server we can not go
739 * to the server to get the new inode info.
743 * If the server returned a read oplock and we have mandatory brlocks,
744 * set oplock level to None.
746 if (server
->ops
->is_read_op(oplock
) && cifs_has_mand_locks(cinode
)) {
747 cifs_dbg(FYI
, "Reset oplock val from read to None due to mand locks\n");
751 server
->ops
->set_fid(cfile
, &cfile
->fid
, oplock
);
752 if (oparms
.reconnect
)
753 cifs_relock_file(cfile
);
761 int cifs_close(struct inode
*inode
, struct file
*file
)
763 if (file
->private_data
!= NULL
) {
764 cifsFileInfo_put(file
->private_data
);
765 file
->private_data
= NULL
;
768 /* return code from the ->release op is always ignored */
773 cifs_reopen_persistent_handles(struct cifs_tcon
*tcon
)
775 struct cifsFileInfo
*open_file
;
776 struct list_head
*tmp
;
777 struct list_head
*tmp1
;
778 struct list_head tmp_list
;
780 cifs_dbg(FYI
, "Reopen persistent handles");
781 INIT_LIST_HEAD(&tmp_list
);
783 /* list all files open on tree connection, reopen resilient handles */
784 spin_lock(&tcon
->open_file_lock
);
785 list_for_each(tmp
, &tcon
->openFileList
) {
786 open_file
= list_entry(tmp
, struct cifsFileInfo
, tlist
);
787 if (!open_file
->invalidHandle
)
789 cifsFileInfo_get(open_file
);
790 list_add_tail(&open_file
->rlist
, &tmp_list
);
792 spin_unlock(&tcon
->open_file_lock
);
794 list_for_each_safe(tmp
, tmp1
, &tmp_list
) {
795 open_file
= list_entry(tmp
, struct cifsFileInfo
, rlist
);
796 cifs_reopen_file(open_file
, false /* do not flush */);
797 list_del_init(&open_file
->rlist
);
798 cifsFileInfo_put(open_file
);
802 int cifs_closedir(struct inode
*inode
, struct file
*file
)
806 struct cifsFileInfo
*cfile
= file
->private_data
;
807 struct cifs_tcon
*tcon
;
808 struct TCP_Server_Info
*server
;
811 cifs_dbg(FYI
, "Closedir inode = 0x%p\n", inode
);
817 tcon
= tlink_tcon(cfile
->tlink
);
818 server
= tcon
->ses
->server
;
820 cifs_dbg(FYI
, "Freeing private data in close dir\n");
821 spin_lock(&cfile
->file_info_lock
);
822 if (server
->ops
->dir_needs_close(cfile
)) {
823 cfile
->invalidHandle
= true;
824 spin_unlock(&cfile
->file_info_lock
);
825 if (server
->ops
->close_dir
)
826 rc
= server
->ops
->close_dir(xid
, tcon
, &cfile
->fid
);
829 cifs_dbg(FYI
, "Closing uncompleted readdir with rc %d\n", rc
);
830 /* not much we can do if it fails anyway, ignore rc */
833 spin_unlock(&cfile
->file_info_lock
);
835 buf
= cfile
->srch_inf
.ntwrk_buf_start
;
837 cifs_dbg(FYI
, "closedir free smb buf in srch struct\n");
838 cfile
->srch_inf
.ntwrk_buf_start
= NULL
;
839 if (cfile
->srch_inf
.smallBuf
)
840 cifs_small_buf_release(buf
);
842 cifs_buf_release(buf
);
845 cifs_put_tlink(cfile
->tlink
);
846 kfree(file
->private_data
);
847 file
->private_data
= NULL
;
848 /* BB can we lock the filestruct while this is going on? */
853 static struct cifsLockInfo
*
854 cifs_lock_init(__u64 offset
, __u64 length
, __u8 type
)
856 struct cifsLockInfo
*lock
=
857 kmalloc(sizeof(struct cifsLockInfo
), GFP_KERNEL
);
860 lock
->offset
= offset
;
861 lock
->length
= length
;
863 lock
->pid
= current
->tgid
;
864 INIT_LIST_HEAD(&lock
->blist
);
865 init_waitqueue_head(&lock
->block_q
);
870 cifs_del_lock_waiters(struct cifsLockInfo
*lock
)
872 struct cifsLockInfo
*li
, *tmp
;
873 list_for_each_entry_safe(li
, tmp
, &lock
->blist
, blist
) {
874 list_del_init(&li
->blist
);
875 wake_up(&li
->block_q
);
879 #define CIFS_LOCK_OP 0
880 #define CIFS_READ_OP 1
881 #define CIFS_WRITE_OP 2
883 /* @rw_check : 0 - no op, 1 - read, 2 - write */
885 cifs_find_fid_lock_conflict(struct cifs_fid_locks
*fdlocks
, __u64 offset
,
886 __u64 length
, __u8 type
, struct cifsFileInfo
*cfile
,
887 struct cifsLockInfo
**conf_lock
, int rw_check
)
889 struct cifsLockInfo
*li
;
890 struct cifsFileInfo
*cur_cfile
= fdlocks
->cfile
;
891 struct TCP_Server_Info
*server
= tlink_tcon(cfile
->tlink
)->ses
->server
;
893 list_for_each_entry(li
, &fdlocks
->locks
, llist
) {
894 if (offset
+ length
<= li
->offset
||
895 offset
>= li
->offset
+ li
->length
)
897 if (rw_check
!= CIFS_LOCK_OP
&& current
->tgid
== li
->pid
&&
898 server
->ops
->compare_fids(cfile
, cur_cfile
)) {
899 /* shared lock prevents write op through the same fid */
900 if (!(li
->type
& server
->vals
->shared_lock_type
) ||
901 rw_check
!= CIFS_WRITE_OP
)
904 if ((type
& server
->vals
->shared_lock_type
) &&
905 ((server
->ops
->compare_fids(cfile
, cur_cfile
) &&
906 current
->tgid
== li
->pid
) || type
== li
->type
))
916 cifs_find_lock_conflict(struct cifsFileInfo
*cfile
, __u64 offset
, __u64 length
,
917 __u8 type
, struct cifsLockInfo
**conf_lock
,
921 struct cifs_fid_locks
*cur
;
922 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
924 list_for_each_entry(cur
, &cinode
->llist
, llist
) {
925 rc
= cifs_find_fid_lock_conflict(cur
, offset
, length
, type
,
926 cfile
, conf_lock
, rw_check
);
935 * Check if there is another lock that prevents us to set the lock (mandatory
936 * style). If such a lock exists, update the flock structure with its
937 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
938 * or leave it the same if we can't. Returns 0 if we don't need to request to
939 * the server or 1 otherwise.
942 cifs_lock_test(struct cifsFileInfo
*cfile
, __u64 offset
, __u64 length
,
943 __u8 type
, struct file_lock
*flock
)
946 struct cifsLockInfo
*conf_lock
;
947 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
948 struct TCP_Server_Info
*server
= tlink_tcon(cfile
->tlink
)->ses
->server
;
951 down_read(&cinode
->lock_sem
);
953 exist
= cifs_find_lock_conflict(cfile
, offset
, length
, type
,
954 &conf_lock
, CIFS_LOCK_OP
);
956 flock
->fl_start
= conf_lock
->offset
;
957 flock
->fl_end
= conf_lock
->offset
+ conf_lock
->length
- 1;
958 flock
->fl_pid
= conf_lock
->pid
;
959 if (conf_lock
->type
& server
->vals
->shared_lock_type
)
960 flock
->fl_type
= F_RDLCK
;
962 flock
->fl_type
= F_WRLCK
;
963 } else if (!cinode
->can_cache_brlcks
)
966 flock
->fl_type
= F_UNLCK
;
968 up_read(&cinode
->lock_sem
);
973 cifs_lock_add(struct cifsFileInfo
*cfile
, struct cifsLockInfo
*lock
)
975 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
976 down_write(&cinode
->lock_sem
);
977 list_add_tail(&lock
->llist
, &cfile
->llist
->locks
);
978 up_write(&cinode
->lock_sem
);
982 * Set the byte-range lock (mandatory style). Returns:
983 * 1) 0, if we set the lock and don't need to request to the server;
984 * 2) 1, if no locks prevent us but we need to request to the server;
985 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
988 cifs_lock_add_if(struct cifsFileInfo
*cfile
, struct cifsLockInfo
*lock
,
991 struct cifsLockInfo
*conf_lock
;
992 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
998 down_write(&cinode
->lock_sem
);
1000 exist
= cifs_find_lock_conflict(cfile
, lock
->offset
, lock
->length
,
1001 lock
->type
, &conf_lock
, CIFS_LOCK_OP
);
1002 if (!exist
&& cinode
->can_cache_brlcks
) {
1003 list_add_tail(&lock
->llist
, &cfile
->llist
->locks
);
1004 up_write(&cinode
->lock_sem
);
1013 list_add_tail(&lock
->blist
, &conf_lock
->blist
);
1014 up_write(&cinode
->lock_sem
);
1015 rc
= wait_event_interruptible(lock
->block_q
,
1016 (lock
->blist
.prev
== &lock
->blist
) &&
1017 (lock
->blist
.next
== &lock
->blist
));
1020 down_write(&cinode
->lock_sem
);
1021 list_del_init(&lock
->blist
);
1024 up_write(&cinode
->lock_sem
);
1029 * Check if there is another lock that prevents us to set the lock (posix
1030 * style). If such a lock exists, update the flock structure with its
1031 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1032 * or leave it the same if we can't. Returns 0 if we don't need to request to
1033 * the server or 1 otherwise.
1036 cifs_posix_lock_test(struct file
*file
, struct file_lock
*flock
)
1039 struct cifsInodeInfo
*cinode
= CIFS_I(file_inode(file
));
1040 unsigned char saved_type
= flock
->fl_type
;
1042 if ((flock
->fl_flags
& FL_POSIX
) == 0)
1045 down_read(&cinode
->lock_sem
);
1046 posix_test_lock(file
, flock
);
1048 if (flock
->fl_type
== F_UNLCK
&& !cinode
->can_cache_brlcks
) {
1049 flock
->fl_type
= saved_type
;
1053 up_read(&cinode
->lock_sem
);
1058 * Set the byte-range lock (posix style). Returns:
1059 * 1) 0, if we set the lock and don't need to request to the server;
1060 * 2) 1, if we need to request to the server;
1061 * 3) <0, if the error occurs while setting the lock.
1064 cifs_posix_lock_set(struct file
*file
, struct file_lock
*flock
)
1066 struct cifsInodeInfo
*cinode
= CIFS_I(file_inode(file
));
1069 if ((flock
->fl_flags
& FL_POSIX
) == 0)
1073 down_write(&cinode
->lock_sem
);
1074 if (!cinode
->can_cache_brlcks
) {
1075 up_write(&cinode
->lock_sem
);
1079 rc
= posix_lock_file(file
, flock
, NULL
);
1080 up_write(&cinode
->lock_sem
);
1081 if (rc
== FILE_LOCK_DEFERRED
) {
1082 rc
= wait_event_interruptible(flock
->fl_wait
, !flock
->fl_next
);
1085 posix_unblock_lock(flock
);
1091 cifs_push_mandatory_locks(struct cifsFileInfo
*cfile
)
1094 int rc
= 0, stored_rc
;
1095 struct cifsLockInfo
*li
, *tmp
;
1096 struct cifs_tcon
*tcon
;
1097 unsigned int num
, max_num
, max_buf
;
1098 LOCKING_ANDX_RANGE
*buf
, *cur
;
1099 int types
[] = {LOCKING_ANDX_LARGE_FILES
,
1100 LOCKING_ANDX_SHARED_LOCK
| LOCKING_ANDX_LARGE_FILES
};
1104 tcon
= tlink_tcon(cfile
->tlink
);
1107 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1108 * and check it for zero before using.
1110 max_buf
= tcon
->ses
->server
->maxBuf
;
1116 max_num
= (max_buf
- sizeof(struct smb_hdr
)) /
1117 sizeof(LOCKING_ANDX_RANGE
);
1118 buf
= kcalloc(max_num
, sizeof(LOCKING_ANDX_RANGE
), GFP_KERNEL
);
1124 for (i
= 0; i
< 2; i
++) {
1127 list_for_each_entry_safe(li
, tmp
, &cfile
->llist
->locks
, llist
) {
1128 if (li
->type
!= types
[i
])
1130 cur
->Pid
= cpu_to_le16(li
->pid
);
1131 cur
->LengthLow
= cpu_to_le32((u32
)li
->length
);
1132 cur
->LengthHigh
= cpu_to_le32((u32
)(li
->length
>>32));
1133 cur
->OffsetLow
= cpu_to_le32((u32
)li
->offset
);
1134 cur
->OffsetHigh
= cpu_to_le32((u32
)(li
->offset
>>32));
1135 if (++num
== max_num
) {
1136 stored_rc
= cifs_lockv(xid
, tcon
,
1138 (__u8
)li
->type
, 0, num
,
1149 stored_rc
= cifs_lockv(xid
, tcon
, cfile
->fid
.netfid
,
1150 (__u8
)types
[i
], 0, num
, buf
);
1162 hash_lockowner(fl_owner_t owner
)
1164 return cifs_lock_secret
^ hash32_ptr((const void *)owner
);
1167 struct lock_to_push
{
1168 struct list_head llist
;
1177 cifs_push_posix_locks(struct cifsFileInfo
*cfile
)
1179 struct inode
*inode
= d_inode(cfile
->dentry
);
1180 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1181 struct file_lock
*flock
;
1182 struct file_lock_context
*flctx
= inode
->i_flctx
;
1183 unsigned int count
= 0, i
;
1184 int rc
= 0, xid
, type
;
1185 struct list_head locks_to_send
, *el
;
1186 struct lock_to_push
*lck
, *tmp
;
1194 spin_lock(&flctx
->flc_lock
);
1195 list_for_each(el
, &flctx
->flc_posix
) {
1198 spin_unlock(&flctx
->flc_lock
);
1200 INIT_LIST_HEAD(&locks_to_send
);
1203 * Allocating count locks is enough because no FL_POSIX locks can be
1204 * added to the list while we are holding cinode->lock_sem that
1205 * protects locking operations of this inode.
1207 for (i
= 0; i
< count
; i
++) {
1208 lck
= kmalloc(sizeof(struct lock_to_push
), GFP_KERNEL
);
1213 list_add_tail(&lck
->llist
, &locks_to_send
);
1216 el
= locks_to_send
.next
;
1217 spin_lock(&flctx
->flc_lock
);
1218 list_for_each_entry(flock
, &flctx
->flc_posix
, fl_list
) {
1219 if (el
== &locks_to_send
) {
1221 * The list ended. We don't have enough allocated
1222 * structures - something is really wrong.
1224 cifs_dbg(VFS
, "Can't push all brlocks!\n");
1227 length
= 1 + flock
->fl_end
- flock
->fl_start
;
1228 if (flock
->fl_type
== F_RDLCK
|| flock
->fl_type
== F_SHLCK
)
1232 lck
= list_entry(el
, struct lock_to_push
, llist
);
1233 lck
->pid
= hash_lockowner(flock
->fl_owner
);
1234 lck
->netfid
= cfile
->fid
.netfid
;
1235 lck
->length
= length
;
1237 lck
->offset
= flock
->fl_start
;
1239 spin_unlock(&flctx
->flc_lock
);
1241 list_for_each_entry_safe(lck
, tmp
, &locks_to_send
, llist
) {
1244 stored_rc
= CIFSSMBPosixLock(xid
, tcon
, lck
->netfid
, lck
->pid
,
1245 lck
->offset
, lck
->length
, NULL
,
1249 list_del(&lck
->llist
);
1257 list_for_each_entry_safe(lck
, tmp
, &locks_to_send
, llist
) {
1258 list_del(&lck
->llist
);
1265 cifs_push_locks(struct cifsFileInfo
*cfile
)
1267 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->dentry
->d_sb
);
1268 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1269 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1272 /* we are going to update can_cache_brlcks here - need a write access */
1273 down_write(&cinode
->lock_sem
);
1274 if (!cinode
->can_cache_brlcks
) {
1275 up_write(&cinode
->lock_sem
);
1279 if (cap_unix(tcon
->ses
) &&
1280 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
1281 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
1282 rc
= cifs_push_posix_locks(cfile
);
1284 rc
= tcon
->ses
->server
->ops
->push_mand_locks(cfile
);
1286 cinode
->can_cache_brlcks
= false;
1287 up_write(&cinode
->lock_sem
);
1292 cifs_read_flock(struct file_lock
*flock
, __u32
*type
, int *lock
, int *unlock
,
1293 bool *wait_flag
, struct TCP_Server_Info
*server
)
1295 if (flock
->fl_flags
& FL_POSIX
)
1296 cifs_dbg(FYI
, "Posix\n");
1297 if (flock
->fl_flags
& FL_FLOCK
)
1298 cifs_dbg(FYI
, "Flock\n");
1299 if (flock
->fl_flags
& FL_SLEEP
) {
1300 cifs_dbg(FYI
, "Blocking lock\n");
1303 if (flock
->fl_flags
& FL_ACCESS
)
1304 cifs_dbg(FYI
, "Process suspended by mandatory locking - not implemented yet\n");
1305 if (flock
->fl_flags
& FL_LEASE
)
1306 cifs_dbg(FYI
, "Lease on file - not implemented yet\n");
1307 if (flock
->fl_flags
&
1308 (~(FL_POSIX
| FL_FLOCK
| FL_SLEEP
|
1309 FL_ACCESS
| FL_LEASE
| FL_CLOSE
)))
1310 cifs_dbg(FYI
, "Unknown lock flags 0x%x\n", flock
->fl_flags
);
1312 *type
= server
->vals
->large_lock_type
;
1313 if (flock
->fl_type
== F_WRLCK
) {
1314 cifs_dbg(FYI
, "F_WRLCK\n");
1315 *type
|= server
->vals
->exclusive_lock_type
;
1317 } else if (flock
->fl_type
== F_UNLCK
) {
1318 cifs_dbg(FYI
, "F_UNLCK\n");
1319 *type
|= server
->vals
->unlock_lock_type
;
1321 /* Check if unlock includes more than one lock range */
1322 } else if (flock
->fl_type
== F_RDLCK
) {
1323 cifs_dbg(FYI
, "F_RDLCK\n");
1324 *type
|= server
->vals
->shared_lock_type
;
1326 } else if (flock
->fl_type
== F_EXLCK
) {
1327 cifs_dbg(FYI
, "F_EXLCK\n");
1328 *type
|= server
->vals
->exclusive_lock_type
;
1330 } else if (flock
->fl_type
== F_SHLCK
) {
1331 cifs_dbg(FYI
, "F_SHLCK\n");
1332 *type
|= server
->vals
->shared_lock_type
;
1335 cifs_dbg(FYI
, "Unknown type of lock\n");
1339 cifs_getlk(struct file
*file
, struct file_lock
*flock
, __u32 type
,
1340 bool wait_flag
, bool posix_lck
, unsigned int xid
)
1343 __u64 length
= 1 + flock
->fl_end
- flock
->fl_start
;
1344 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)file
->private_data
;
1345 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1346 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
1347 __u16 netfid
= cfile
->fid
.netfid
;
1350 int posix_lock_type
;
1352 rc
= cifs_posix_lock_test(file
, flock
);
1356 if (type
& server
->vals
->shared_lock_type
)
1357 posix_lock_type
= CIFS_RDLCK
;
1359 posix_lock_type
= CIFS_WRLCK
;
1360 rc
= CIFSSMBPosixLock(xid
, tcon
, netfid
,
1361 hash_lockowner(flock
->fl_owner
),
1362 flock
->fl_start
, length
, flock
,
1363 posix_lock_type
, wait_flag
);
1367 rc
= cifs_lock_test(cfile
, flock
->fl_start
, length
, type
, flock
);
1371 /* BB we could chain these into one lock request BB */
1372 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
, type
,
1375 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
1377 flock
->fl_type
= F_UNLCK
;
1379 cifs_dbg(VFS
, "Error unlocking previously locked range %d during test of lock\n",
1384 if (type
& server
->vals
->shared_lock_type
) {
1385 flock
->fl_type
= F_WRLCK
;
1389 type
&= ~server
->vals
->exclusive_lock_type
;
1391 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
1392 type
| server
->vals
->shared_lock_type
,
1395 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
1396 type
| server
->vals
->shared_lock_type
, 0, 1, false);
1397 flock
->fl_type
= F_RDLCK
;
1399 cifs_dbg(VFS
, "Error unlocking previously locked range %d during test of lock\n",
1402 flock
->fl_type
= F_WRLCK
;
1408 cifs_move_llist(struct list_head
*source
, struct list_head
*dest
)
1410 struct list_head
*li
, *tmp
;
1411 list_for_each_safe(li
, tmp
, source
)
1412 list_move(li
, dest
);
1416 cifs_free_llist(struct list_head
*llist
)
1418 struct cifsLockInfo
*li
, *tmp
;
1419 list_for_each_entry_safe(li
, tmp
, llist
, llist
) {
1420 cifs_del_lock_waiters(li
);
1421 list_del(&li
->llist
);
1427 cifs_unlock_range(struct cifsFileInfo
*cfile
, struct file_lock
*flock
,
1430 int rc
= 0, stored_rc
;
1431 int types
[] = {LOCKING_ANDX_LARGE_FILES
,
1432 LOCKING_ANDX_SHARED_LOCK
| LOCKING_ANDX_LARGE_FILES
};
1434 unsigned int max_num
, num
, max_buf
;
1435 LOCKING_ANDX_RANGE
*buf
, *cur
;
1436 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1437 struct cifsInodeInfo
*cinode
= CIFS_I(d_inode(cfile
->dentry
));
1438 struct cifsLockInfo
*li
, *tmp
;
1439 __u64 length
= 1 + flock
->fl_end
- flock
->fl_start
;
1440 struct list_head tmp_llist
;
1442 INIT_LIST_HEAD(&tmp_llist
);
1445 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1446 * and check it for zero before using.
1448 max_buf
= tcon
->ses
->server
->maxBuf
;
1452 max_num
= (max_buf
- sizeof(struct smb_hdr
)) /
1453 sizeof(LOCKING_ANDX_RANGE
);
1454 buf
= kcalloc(max_num
, sizeof(LOCKING_ANDX_RANGE
), GFP_KERNEL
);
1458 down_write(&cinode
->lock_sem
);
1459 for (i
= 0; i
< 2; i
++) {
1462 list_for_each_entry_safe(li
, tmp
, &cfile
->llist
->locks
, llist
) {
1463 if (flock
->fl_start
> li
->offset
||
1464 (flock
->fl_start
+ length
) <
1465 (li
->offset
+ li
->length
))
1467 if (current
->tgid
!= li
->pid
)
1469 if (types
[i
] != li
->type
)
1471 if (cinode
->can_cache_brlcks
) {
1473 * We can cache brlock requests - simply remove
1474 * a lock from the file's list.
1476 list_del(&li
->llist
);
1477 cifs_del_lock_waiters(li
);
1481 cur
->Pid
= cpu_to_le16(li
->pid
);
1482 cur
->LengthLow
= cpu_to_le32((u32
)li
->length
);
1483 cur
->LengthHigh
= cpu_to_le32((u32
)(li
->length
>>32));
1484 cur
->OffsetLow
= cpu_to_le32((u32
)li
->offset
);
1485 cur
->OffsetHigh
= cpu_to_le32((u32
)(li
->offset
>>32));
1487 * We need to save a lock here to let us add it again to
1488 * the file's list if the unlock range request fails on
1491 list_move(&li
->llist
, &tmp_llist
);
1492 if (++num
== max_num
) {
1493 stored_rc
= cifs_lockv(xid
, tcon
,
1495 li
->type
, num
, 0, buf
);
1498 * We failed on the unlock range
1499 * request - add all locks from the tmp
1500 * list to the head of the file's list.
1502 cifs_move_llist(&tmp_llist
,
1503 &cfile
->llist
->locks
);
1507 * The unlock range request succeed -
1508 * free the tmp list.
1510 cifs_free_llist(&tmp_llist
);
1517 stored_rc
= cifs_lockv(xid
, tcon
, cfile
->fid
.netfid
,
1518 types
[i
], num
, 0, buf
);
1520 cifs_move_llist(&tmp_llist
,
1521 &cfile
->llist
->locks
);
1524 cifs_free_llist(&tmp_llist
);
1528 up_write(&cinode
->lock_sem
);
1534 cifs_setlk(struct file
*file
, struct file_lock
*flock
, __u32 type
,
1535 bool wait_flag
, bool posix_lck
, int lock
, int unlock
,
1539 __u64 length
= 1 + flock
->fl_end
- flock
->fl_start
;
1540 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)file
->private_data
;
1541 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
1542 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
1543 struct inode
*inode
= d_inode(cfile
->dentry
);
1546 int posix_lock_type
;
1548 rc
= cifs_posix_lock_set(file
, flock
);
1552 if (type
& server
->vals
->shared_lock_type
)
1553 posix_lock_type
= CIFS_RDLCK
;
1555 posix_lock_type
= CIFS_WRLCK
;
1558 posix_lock_type
= CIFS_UNLCK
;
1560 rc
= CIFSSMBPosixLock(xid
, tcon
, cfile
->fid
.netfid
,
1561 hash_lockowner(flock
->fl_owner
),
1562 flock
->fl_start
, length
,
1563 NULL
, posix_lock_type
, wait_flag
);
1568 struct cifsLockInfo
*lock
;
1570 lock
= cifs_lock_init(flock
->fl_start
, length
, type
);
1574 rc
= cifs_lock_add_if(cfile
, lock
, wait_flag
);
1583 * Windows 7 server can delay breaking lease from read to None
1584 * if we set a byte-range lock on a file - break it explicitly
1585 * before sending the lock to the server to be sure the next
1586 * read won't conflict with non-overlapted locks due to
1589 if (!CIFS_CACHE_WRITE(CIFS_I(inode
)) &&
1590 CIFS_CACHE_READ(CIFS_I(inode
))) {
1591 cifs_zap_mapping(inode
);
1592 cifs_dbg(FYI
, "Set no oplock for inode=%p due to mand locks\n",
1594 CIFS_I(inode
)->oplock
= 0;
1597 rc
= server
->ops
->mand_lock(xid
, cfile
, flock
->fl_start
, length
,
1598 type
, 1, 0, wait_flag
);
1604 cifs_lock_add(cfile
, lock
);
1606 rc
= server
->ops
->mand_unlock_range(cfile
, flock
, xid
);
1609 if (flock
->fl_flags
& FL_POSIX
&& !rc
)
1610 rc
= locks_lock_file_wait(file
, flock
);
1614 int cifs_lock(struct file
*file
, int cmd
, struct file_lock
*flock
)
1617 int lock
= 0, unlock
= 0;
1618 bool wait_flag
= false;
1619 bool posix_lck
= false;
1620 struct cifs_sb_info
*cifs_sb
;
1621 struct cifs_tcon
*tcon
;
1622 struct cifsInodeInfo
*cinode
;
1623 struct cifsFileInfo
*cfile
;
1630 cifs_dbg(FYI
, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1631 cmd
, flock
->fl_flags
, flock
->fl_type
,
1632 flock
->fl_start
, flock
->fl_end
);
1634 cfile
= (struct cifsFileInfo
*)file
->private_data
;
1635 tcon
= tlink_tcon(cfile
->tlink
);
1637 cifs_read_flock(flock
, &type
, &lock
, &unlock
, &wait_flag
,
1640 cifs_sb
= CIFS_FILE_SB(file
);
1641 netfid
= cfile
->fid
.netfid
;
1642 cinode
= CIFS_I(file_inode(file
));
1644 if (cap_unix(tcon
->ses
) &&
1645 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
1646 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
1649 * BB add code here to normalize offset and length to account for
1650 * negative length which we can not accept over the wire.
1652 if (IS_GETLK(cmd
)) {
1653 rc
= cifs_getlk(file
, flock
, type
, wait_flag
, posix_lck
, xid
);
1658 if (!lock
&& !unlock
) {
1660 * if no lock or unlock then nothing to do since we do not
1667 rc
= cifs_setlk(file
, flock
, type
, wait_flag
, posix_lck
, lock
, unlock
,
1674 * update the file size (if needed) after a write. Should be called with
1675 * the inode->i_lock held
1678 cifs_update_eof(struct cifsInodeInfo
*cifsi
, loff_t offset
,
1679 unsigned int bytes_written
)
1681 loff_t end_of_write
= offset
+ bytes_written
;
1683 if (end_of_write
> cifsi
->server_eof
)
1684 cifsi
->server_eof
= end_of_write
;
1688 cifs_write(struct cifsFileInfo
*open_file
, __u32 pid
, const char *write_data
,
1689 size_t write_size
, loff_t
*offset
)
1692 unsigned int bytes_written
= 0;
1693 unsigned int total_written
;
1694 struct cifs_sb_info
*cifs_sb
;
1695 struct cifs_tcon
*tcon
;
1696 struct TCP_Server_Info
*server
;
1698 struct dentry
*dentry
= open_file
->dentry
;
1699 struct cifsInodeInfo
*cifsi
= CIFS_I(d_inode(dentry
));
1700 struct cifs_io_parms io_parms
;
1702 cifs_sb
= CIFS_SB(dentry
->d_sb
);
1704 cifs_dbg(FYI
, "write %zd bytes to offset %lld of %pd\n",
1705 write_size
, *offset
, dentry
);
1707 tcon
= tlink_tcon(open_file
->tlink
);
1708 server
= tcon
->ses
->server
;
1710 if (!server
->ops
->sync_write
)
1715 for (total_written
= 0; write_size
> total_written
;
1716 total_written
+= bytes_written
) {
1718 while (rc
== -EAGAIN
) {
1722 if (open_file
->invalidHandle
) {
1723 /* we could deadlock if we called
1724 filemap_fdatawait from here so tell
1725 reopen_file not to flush data to
1727 rc
= cifs_reopen_file(open_file
, false);
1732 len
= min(server
->ops
->wp_retry_size(d_inode(dentry
)),
1733 (unsigned int)write_size
- total_written
);
1734 /* iov[0] is reserved for smb header */
1735 iov
[1].iov_base
= (char *)write_data
+ total_written
;
1736 iov
[1].iov_len
= len
;
1738 io_parms
.tcon
= tcon
;
1739 io_parms
.offset
= *offset
;
1740 io_parms
.length
= len
;
1741 rc
= server
->ops
->sync_write(xid
, &open_file
->fid
,
1742 &io_parms
, &bytes_written
, iov
, 1);
1744 if (rc
|| (bytes_written
== 0)) {
1752 spin_lock(&d_inode(dentry
)->i_lock
);
1753 cifs_update_eof(cifsi
, *offset
, bytes_written
);
1754 spin_unlock(&d_inode(dentry
)->i_lock
);
1755 *offset
+= bytes_written
;
1759 cifs_stats_bytes_written(tcon
, total_written
);
1761 if (total_written
> 0) {
1762 spin_lock(&d_inode(dentry
)->i_lock
);
1763 if (*offset
> d_inode(dentry
)->i_size
)
1764 i_size_write(d_inode(dentry
), *offset
);
1765 spin_unlock(&d_inode(dentry
)->i_lock
);
1767 mark_inode_dirty_sync(d_inode(dentry
));
1769 return total_written
;
1772 struct cifsFileInfo
*find_readable_file(struct cifsInodeInfo
*cifs_inode
,
1775 struct cifsFileInfo
*open_file
= NULL
;
1776 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cifs_inode
->vfs_inode
.i_sb
);
1777 struct cifs_tcon
*tcon
= cifs_sb_master_tcon(cifs_sb
);
1779 /* only filter by fsuid on multiuser mounts */
1780 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
))
1783 spin_lock(&tcon
->open_file_lock
);
1784 /* we could simply get the first_list_entry since write-only entries
1785 are always at the end of the list but since the first entry might
1786 have a close pending, we go through the whole list */
1787 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
1788 if (fsuid_only
&& !uid_eq(open_file
->uid
, current_fsuid()))
1790 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_READ
) {
1791 if (!open_file
->invalidHandle
) {
1792 /* found a good file */
1793 /* lock it so it will not be closed on us */
1794 cifsFileInfo_get(open_file
);
1795 spin_unlock(&tcon
->open_file_lock
);
1797 } /* else might as well continue, and look for
1798 another, or simply have the caller reopen it
1799 again rather than trying to fix this handle */
1800 } else /* write only file */
1801 break; /* write only files are last so must be done */
1803 spin_unlock(&tcon
->open_file_lock
);
1807 struct cifsFileInfo
*find_writable_file(struct cifsInodeInfo
*cifs_inode
,
1810 struct cifsFileInfo
*open_file
, *inv_file
= NULL
;
1811 struct cifs_sb_info
*cifs_sb
;
1812 struct cifs_tcon
*tcon
;
1813 bool any_available
= false;
1815 unsigned int refind
= 0;
1817 /* Having a null inode here (because mapping->host was set to zero by
1818 the VFS or MM) should not happen but we had reports of on oops (due to
1819 it being zero) during stress testcases so we need to check for it */
1821 if (cifs_inode
== NULL
) {
1822 cifs_dbg(VFS
, "Null inode passed to cifs_writeable_file\n");
1827 cifs_sb
= CIFS_SB(cifs_inode
->vfs_inode
.i_sb
);
1828 tcon
= cifs_sb_master_tcon(cifs_sb
);
1830 /* only filter by fsuid on multiuser mounts */
1831 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MULTIUSER
))
1834 spin_lock(&tcon
->open_file_lock
);
1836 if (refind
> MAX_REOPEN_ATT
) {
1837 spin_unlock(&tcon
->open_file_lock
);
1840 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
1841 if (!any_available
&& open_file
->pid
!= current
->tgid
)
1843 if (fsuid_only
&& !uid_eq(open_file
->uid
, current_fsuid()))
1845 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_WRITE
) {
1846 if (!open_file
->invalidHandle
) {
1847 /* found a good writable file */
1848 cifsFileInfo_get(open_file
);
1849 spin_unlock(&tcon
->open_file_lock
);
1853 inv_file
= open_file
;
1857 /* couldn't find useable FH with same pid, try any available */
1858 if (!any_available
) {
1859 any_available
= true;
1860 goto refind_writable
;
1864 any_available
= false;
1865 cifsFileInfo_get(inv_file
);
1868 spin_unlock(&tcon
->open_file_lock
);
1871 rc
= cifs_reopen_file(inv_file
, false);
1875 spin_lock(&tcon
->open_file_lock
);
1876 list_move_tail(&inv_file
->flist
,
1877 &cifs_inode
->openFileList
);
1878 spin_unlock(&tcon
->open_file_lock
);
1879 cifsFileInfo_put(inv_file
);
1882 spin_lock(&tcon
->open_file_lock
);
1883 goto refind_writable
;
1890 static int cifs_partialpagewrite(struct page
*page
, unsigned from
, unsigned to
)
1892 struct address_space
*mapping
= page
->mapping
;
1893 loff_t offset
= (loff_t
)page
->index
<< PAGE_SHIFT
;
1896 int bytes_written
= 0;
1897 struct inode
*inode
;
1898 struct cifsFileInfo
*open_file
;
1900 if (!mapping
|| !mapping
->host
)
1903 inode
= page
->mapping
->host
;
1905 offset
+= (loff_t
)from
;
1906 write_data
= kmap(page
);
1909 if ((to
> PAGE_SIZE
) || (from
> to
)) {
1914 /* racing with truncate? */
1915 if (offset
> mapping
->host
->i_size
) {
1917 return 0; /* don't care */
1920 /* check to make sure that we are not extending the file */
1921 if (mapping
->host
->i_size
- offset
< (loff_t
)to
)
1922 to
= (unsigned)(mapping
->host
->i_size
- offset
);
1924 open_file
= find_writable_file(CIFS_I(mapping
->host
), false);
1926 bytes_written
= cifs_write(open_file
, open_file
->pid
,
1927 write_data
, to
- from
, &offset
);
1928 cifsFileInfo_put(open_file
);
1929 /* Does mm or vfs already set times? */
1930 inode
->i_atime
= inode
->i_mtime
= current_time(inode
);
1931 if ((bytes_written
> 0) && (offset
))
1933 else if (bytes_written
< 0)
1936 cifs_dbg(FYI
, "No writeable filehandles for inode\n");
1944 static struct cifs_writedata
*
1945 wdata_alloc_and_fillpages(pgoff_t tofind
, struct address_space
*mapping
,
1946 pgoff_t end
, pgoff_t
*index
,
1947 unsigned int *found_pages
)
1949 unsigned int nr_pages
;
1950 struct page
**pages
;
1951 struct cifs_writedata
*wdata
;
1953 wdata
= cifs_writedata_alloc((unsigned int)tofind
,
1954 cifs_writev_complete
);
1959 * find_get_pages_tag seems to return a max of 256 on each
1960 * iteration, so we must call it several times in order to
1961 * fill the array or the wsize is effectively limited to
1965 pages
= wdata
->pages
;
1967 nr_pages
= find_get_pages_tag(mapping
, index
,
1968 PAGECACHE_TAG_DIRTY
, tofind
,
1970 *found_pages
+= nr_pages
;
1973 } while (nr_pages
&& tofind
&& *index
<= end
);
1979 wdata_prepare_pages(struct cifs_writedata
*wdata
, unsigned int found_pages
,
1980 struct address_space
*mapping
,
1981 struct writeback_control
*wbc
,
1982 pgoff_t end
, pgoff_t
*index
, pgoff_t
*next
, bool *done
)
1984 unsigned int nr_pages
= 0, i
;
1987 for (i
= 0; i
< found_pages
; i
++) {
1988 page
= wdata
->pages
[i
];
1990 * At this point we hold neither mapping->tree_lock nor
1991 * lock on the page itself: the page may be truncated or
1992 * invalidated (changing page->mapping to NULL), or even
1993 * swizzled back from swapper_space to tmpfs file
1999 else if (!trylock_page(page
))
2002 if (unlikely(page
->mapping
!= mapping
)) {
2007 if (!wbc
->range_cyclic
&& page
->index
> end
) {
2013 if (*next
&& (page
->index
!= *next
)) {
2014 /* Not next consecutive page */
2019 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
2020 wait_on_page_writeback(page
);
2022 if (PageWriteback(page
) ||
2023 !clear_page_dirty_for_io(page
)) {
2029 * This actually clears the dirty bit in the radix tree.
2030 * See cifs_writepage() for more commentary.
2032 set_page_writeback(page
);
2033 if (page_offset(page
) >= i_size_read(mapping
->host
)) {
2036 end_page_writeback(page
);
2040 wdata
->pages
[i
] = page
;
2041 *next
= page
->index
+ 1;
2045 /* reset index to refind any pages skipped */
2047 *index
= wdata
->pages
[0]->index
+ 1;
2049 /* put any pages we aren't going to use */
2050 for (i
= nr_pages
; i
< found_pages
; i
++) {
2051 put_page(wdata
->pages
[i
]);
2052 wdata
->pages
[i
] = NULL
;
2059 wdata_send_pages(struct cifs_writedata
*wdata
, unsigned int nr_pages
,
2060 struct address_space
*mapping
, struct writeback_control
*wbc
)
2063 struct TCP_Server_Info
*server
;
2066 wdata
->sync_mode
= wbc
->sync_mode
;
2067 wdata
->nr_pages
= nr_pages
;
2068 wdata
->offset
= page_offset(wdata
->pages
[0]);
2069 wdata
->pagesz
= PAGE_SIZE
;
2070 wdata
->tailsz
= min(i_size_read(mapping
->host
) -
2071 page_offset(wdata
->pages
[nr_pages
- 1]),
2073 wdata
->bytes
= ((nr_pages
- 1) * PAGE_SIZE
) + wdata
->tailsz
;
2075 if (wdata
->cfile
!= NULL
)
2076 cifsFileInfo_put(wdata
->cfile
);
2077 wdata
->cfile
= find_writable_file(CIFS_I(mapping
->host
), false);
2078 if (!wdata
->cfile
) {
2079 cifs_dbg(VFS
, "No writable handles for inode\n");
2082 wdata
->pid
= wdata
->cfile
->pid
;
2083 server
= tlink_tcon(wdata
->cfile
->tlink
)->ses
->server
;
2084 rc
= server
->ops
->async_writev(wdata
, cifs_writedata_release
);
2087 for (i
= 0; i
< nr_pages
; ++i
)
2088 unlock_page(wdata
->pages
[i
]);
2093 static int cifs_writepages(struct address_space
*mapping
,
2094 struct writeback_control
*wbc
)
2096 struct cifs_sb_info
*cifs_sb
= CIFS_SB(mapping
->host
->i_sb
);
2097 struct TCP_Server_Info
*server
;
2098 bool done
= false, scanned
= false, range_whole
= false;
2100 struct cifs_writedata
*wdata
;
2104 * If wsize is smaller than the page cache size, default to writing
2105 * one page at a time via cifs_writepage
2107 if (cifs_sb
->wsize
< PAGE_SIZE
)
2108 return generic_writepages(mapping
, wbc
);
2110 if (wbc
->range_cyclic
) {
2111 index
= mapping
->writeback_index
; /* Start from prev offset */
2114 index
= wbc
->range_start
>> PAGE_SHIFT
;
2115 end
= wbc
->range_end
>> PAGE_SHIFT
;
2116 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
2120 server
= cifs_sb_master_tcon(cifs_sb
)->ses
->server
;
2122 while (!done
&& index
<= end
) {
2123 unsigned int i
, nr_pages
, found_pages
, wsize
, credits
;
2124 pgoff_t next
= 0, tofind
, saved_index
= index
;
2126 rc
= server
->ops
->wait_mtu_credits(server
, cifs_sb
->wsize
,
2131 tofind
= min((wsize
/ PAGE_SIZE
) - 1, end
- index
) + 1;
2133 wdata
= wdata_alloc_and_fillpages(tofind
, mapping
, end
, &index
,
2137 add_credits_and_wake_if(server
, credits
, 0);
2141 if (found_pages
== 0) {
2142 kref_put(&wdata
->refcount
, cifs_writedata_release
);
2143 add_credits_and_wake_if(server
, credits
, 0);
2147 nr_pages
= wdata_prepare_pages(wdata
, found_pages
, mapping
, wbc
,
2148 end
, &index
, &next
, &done
);
2150 /* nothing to write? */
2151 if (nr_pages
== 0) {
2152 kref_put(&wdata
->refcount
, cifs_writedata_release
);
2153 add_credits_and_wake_if(server
, credits
, 0);
2157 wdata
->credits
= credits
;
2159 rc
= wdata_send_pages(wdata
, nr_pages
, mapping
, wbc
);
2161 /* send failure -- clean up the mess */
2163 add_credits_and_wake_if(server
, wdata
->credits
, 0);
2164 for (i
= 0; i
< nr_pages
; ++i
) {
2166 redirty_page_for_writepage(wbc
,
2169 SetPageError(wdata
->pages
[i
]);
2170 end_page_writeback(wdata
->pages
[i
]);
2171 put_page(wdata
->pages
[i
]);
2174 mapping_set_error(mapping
, rc
);
2176 kref_put(&wdata
->refcount
, cifs_writedata_release
);
2178 if (wbc
->sync_mode
== WB_SYNC_ALL
&& rc
== -EAGAIN
) {
2179 index
= saved_index
;
2183 wbc
->nr_to_write
-= nr_pages
;
2184 if (wbc
->nr_to_write
<= 0)
2190 if (!scanned
&& !done
) {
2192 * We hit the last page and there is more work to be done: wrap
2193 * back to the start of the file
2200 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
2201 mapping
->writeback_index
= index
;
2207 cifs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
2213 /* BB add check for wbc flags */
2215 if (!PageUptodate(page
))
2216 cifs_dbg(FYI
, "ppw - page not up to date\n");
2219 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2221 * A writepage() implementation always needs to do either this,
2222 * or re-dirty the page with "redirty_page_for_writepage()" in
2223 * the case of a failure.
2225 * Just unlocking the page will cause the radix tree tag-bits
2226 * to fail to update with the state of the page correctly.
2228 set_page_writeback(page
);
2230 rc
= cifs_partialpagewrite(page
, 0, PAGE_SIZE
);
2231 if (rc
== -EAGAIN
&& wbc
->sync_mode
== WB_SYNC_ALL
)
2233 else if (rc
== -EAGAIN
)
2234 redirty_page_for_writepage(wbc
, page
);
2238 SetPageUptodate(page
);
2239 end_page_writeback(page
);
2245 static int cifs_writepage(struct page
*page
, struct writeback_control
*wbc
)
2247 int rc
= cifs_writepage_locked(page
, wbc
);
2252 static int cifs_write_end(struct file
*file
, struct address_space
*mapping
,
2253 loff_t pos
, unsigned len
, unsigned copied
,
2254 struct page
*page
, void *fsdata
)
2257 struct inode
*inode
= mapping
->host
;
2258 struct cifsFileInfo
*cfile
= file
->private_data
;
2259 struct cifs_sb_info
*cifs_sb
= CIFS_SB(cfile
->dentry
->d_sb
);
2262 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
2265 pid
= current
->tgid
;
2267 cifs_dbg(FYI
, "write_end for page %p from pos %lld with %d bytes\n",
2270 if (PageChecked(page
)) {
2272 SetPageUptodate(page
);
2273 ClearPageChecked(page
);
2274 } else if (!PageUptodate(page
) && copied
== PAGE_SIZE
)
2275 SetPageUptodate(page
);
2277 if (!PageUptodate(page
)) {
2279 unsigned offset
= pos
& (PAGE_SIZE
- 1);
2283 /* this is probably better than directly calling
2284 partialpage_write since in this function the file handle is
2285 known which we might as well leverage */
2286 /* BB check if anything else missing out of ppw
2287 such as updating last write time */
2288 page_data
= kmap(page
);
2289 rc
= cifs_write(cfile
, pid
, page_data
+ offset
, copied
, &pos
);
2290 /* if (rc < 0) should we set writebehind rc? */
2297 set_page_dirty(page
);
2301 spin_lock(&inode
->i_lock
);
2302 if (pos
> inode
->i_size
)
2303 i_size_write(inode
, pos
);
2304 spin_unlock(&inode
->i_lock
);
2313 int cifs_strict_fsync(struct file
*file
, loff_t start
, loff_t end
,
2318 struct cifs_tcon
*tcon
;
2319 struct TCP_Server_Info
*server
;
2320 struct cifsFileInfo
*smbfile
= file
->private_data
;
2321 struct inode
*inode
= file_inode(file
);
2322 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
2324 rc
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
2331 cifs_dbg(FYI
, "Sync file - name: %pD datasync: 0x%x\n",
2334 if (!CIFS_CACHE_READ(CIFS_I(inode
))) {
2335 rc
= cifs_zap_mapping(inode
);
2337 cifs_dbg(FYI
, "rc: %d during invalidate phase\n", rc
);
2338 rc
= 0; /* don't care about it in fsync */
2342 tcon
= tlink_tcon(smbfile
->tlink
);
2343 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
)) {
2344 server
= tcon
->ses
->server
;
2345 if (server
->ops
->flush
)
2346 rc
= server
->ops
->flush(xid
, tcon
, &smbfile
->fid
);
2352 inode_unlock(inode
);
2356 int cifs_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
2360 struct cifs_tcon
*tcon
;
2361 struct TCP_Server_Info
*server
;
2362 struct cifsFileInfo
*smbfile
= file
->private_data
;
2363 struct cifs_sb_info
*cifs_sb
= CIFS_FILE_SB(file
);
2364 struct inode
*inode
= file
->f_mapping
->host
;
2366 rc
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
2373 cifs_dbg(FYI
, "Sync file - name: %pD datasync: 0x%x\n",
2376 tcon
= tlink_tcon(smbfile
->tlink
);
2377 if (!(cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOSSYNC
)) {
2378 server
= tcon
->ses
->server
;
2379 if (server
->ops
->flush
)
2380 rc
= server
->ops
->flush(xid
, tcon
, &smbfile
->fid
);
2386 inode_unlock(inode
);
2391 * As file closes, flush all cached write data for this inode checking
2392 * for write behind errors.
2394 int cifs_flush(struct file
*file
, fl_owner_t id
)
2396 struct inode
*inode
= file_inode(file
);
2399 if (file
->f_mode
& FMODE_WRITE
)
2400 rc
= filemap_write_and_wait(inode
->i_mapping
);
2402 cifs_dbg(FYI
, "Flush inode %p file %p rc %d\n", inode
, file
, rc
);
2408 cifs_write_allocate_pages(struct page
**pages
, unsigned long num_pages
)
2413 for (i
= 0; i
< num_pages
; i
++) {
2414 pages
[i
] = alloc_page(GFP_KERNEL
|__GFP_HIGHMEM
);
2417 * save number of pages we have already allocated and
2418 * return with ENOMEM error
2427 for (i
= 0; i
< num_pages
; i
++)
2434 size_t get_numpages(const size_t wsize
, const size_t len
, size_t *cur_len
)
2439 clen
= min_t(const size_t, len
, wsize
);
2440 num_pages
= DIV_ROUND_UP(clen
, PAGE_SIZE
);
2449 cifs_uncached_writedata_release(struct kref
*refcount
)
2452 struct cifs_writedata
*wdata
= container_of(refcount
,
2453 struct cifs_writedata
, refcount
);
2455 for (i
= 0; i
< wdata
->nr_pages
; i
++)
2456 put_page(wdata
->pages
[i
]);
2457 cifs_writedata_release(refcount
);
2461 cifs_uncached_writev_complete(struct work_struct
*work
)
2463 struct cifs_writedata
*wdata
= container_of(work
,
2464 struct cifs_writedata
, work
);
2465 struct inode
*inode
= d_inode(wdata
->cfile
->dentry
);
2466 struct cifsInodeInfo
*cifsi
= CIFS_I(inode
);
2468 spin_lock(&inode
->i_lock
);
2469 cifs_update_eof(cifsi
, wdata
->offset
, wdata
->bytes
);
2470 if (cifsi
->server_eof
> inode
->i_size
)
2471 i_size_write(inode
, cifsi
->server_eof
);
2472 spin_unlock(&inode
->i_lock
);
2474 complete(&wdata
->done
);
2476 kref_put(&wdata
->refcount
, cifs_uncached_writedata_release
);
2480 wdata_fill_from_iovec(struct cifs_writedata
*wdata
, struct iov_iter
*from
,
2481 size_t *len
, unsigned long *num_pages
)
2483 size_t save_len
, copied
, bytes
, cur_len
= *len
;
2484 unsigned long i
, nr_pages
= *num_pages
;
2487 for (i
= 0; i
< nr_pages
; i
++) {
2488 bytes
= min_t(const size_t, cur_len
, PAGE_SIZE
);
2489 copied
= copy_page_from_iter(wdata
->pages
[i
], 0, bytes
, from
);
2492 * If we didn't copy as much as we expected, then that
2493 * may mean we trod into an unmapped area. Stop copying
2494 * at that point. On the next pass through the big
2495 * loop, we'll likely end up getting a zero-length
2496 * write and bailing out of it.
2501 cur_len
= save_len
- cur_len
;
2505 * If we have no data to send, then that probably means that
2506 * the copy above failed altogether. That's most likely because
2507 * the address in the iovec was bogus. Return -EFAULT and let
2508 * the caller free anything we allocated and bail out.
2514 * i + 1 now represents the number of pages we actually used in
2515 * the copy phase above.
2522 cifs_write_from_iter(loff_t offset
, size_t len
, struct iov_iter
*from
,
2523 struct cifsFileInfo
*open_file
,
2524 struct cifs_sb_info
*cifs_sb
, struct list_head
*wdata_list
)
2528 unsigned long nr_pages
, num_pages
, i
;
2529 struct cifs_writedata
*wdata
;
2530 struct iov_iter saved_from
= *from
;
2531 loff_t saved_offset
= offset
;
2533 struct TCP_Server_Info
*server
;
2535 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
2536 pid
= open_file
->pid
;
2538 pid
= current
->tgid
;
2540 server
= tlink_tcon(open_file
->tlink
)->ses
->server
;
2543 unsigned int wsize
, credits
;
2545 rc
= server
->ops
->wait_mtu_credits(server
, cifs_sb
->wsize
,
2550 nr_pages
= get_numpages(wsize
, len
, &cur_len
);
2551 wdata
= cifs_writedata_alloc(nr_pages
,
2552 cifs_uncached_writev_complete
);
2555 add_credits_and_wake_if(server
, credits
, 0);
2559 rc
= cifs_write_allocate_pages(wdata
->pages
, nr_pages
);
2562 add_credits_and_wake_if(server
, credits
, 0);
2566 num_pages
= nr_pages
;
2567 rc
= wdata_fill_from_iovec(wdata
, from
, &cur_len
, &num_pages
);
2569 for (i
= 0; i
< nr_pages
; i
++)
2570 put_page(wdata
->pages
[i
]);
2572 add_credits_and_wake_if(server
, credits
, 0);
2577 * Bring nr_pages down to the number of pages we actually used,
2578 * and free any pages that we didn't use.
2580 for ( ; nr_pages
> num_pages
; nr_pages
--)
2581 put_page(wdata
->pages
[nr_pages
- 1]);
2583 wdata
->sync_mode
= WB_SYNC_ALL
;
2584 wdata
->nr_pages
= nr_pages
;
2585 wdata
->offset
= (__u64
)offset
;
2586 wdata
->cfile
= cifsFileInfo_get(open_file
);
2588 wdata
->bytes
= cur_len
;
2589 wdata
->pagesz
= PAGE_SIZE
;
2590 wdata
->tailsz
= cur_len
- ((nr_pages
- 1) * PAGE_SIZE
);
2591 wdata
->credits
= credits
;
2593 if (!wdata
->cfile
->invalidHandle
||
2594 !cifs_reopen_file(wdata
->cfile
, false))
2595 rc
= server
->ops
->async_writev(wdata
,
2596 cifs_uncached_writedata_release
);
2598 add_credits_and_wake_if(server
, wdata
->credits
, 0);
2599 kref_put(&wdata
->refcount
,
2600 cifs_uncached_writedata_release
);
2601 if (rc
== -EAGAIN
) {
2603 iov_iter_advance(from
, offset
- saved_offset
);
2609 list_add_tail(&wdata
->list
, wdata_list
);
2617 ssize_t
cifs_user_writev(struct kiocb
*iocb
, struct iov_iter
*from
)
2619 struct file
*file
= iocb
->ki_filp
;
2620 ssize_t total_written
= 0;
2621 struct cifsFileInfo
*open_file
;
2622 struct cifs_tcon
*tcon
;
2623 struct cifs_sb_info
*cifs_sb
;
2624 struct cifs_writedata
*wdata
, *tmp
;
2625 struct list_head wdata_list
;
2626 struct iov_iter saved_from
= *from
;
2630 * BB - optimize the way when signing is disabled. We can drop this
2631 * extra memory-to-memory copying and use iovec buffers for constructing
2635 rc
= generic_write_checks(iocb
, from
);
2639 INIT_LIST_HEAD(&wdata_list
);
2640 cifs_sb
= CIFS_FILE_SB(file
);
2641 open_file
= file
->private_data
;
2642 tcon
= tlink_tcon(open_file
->tlink
);
2644 if (!tcon
->ses
->server
->ops
->async_writev
)
2647 rc
= cifs_write_from_iter(iocb
->ki_pos
, iov_iter_count(from
), from
,
2648 open_file
, cifs_sb
, &wdata_list
);
2651 * If at least one write was successfully sent, then discard any rc
2652 * value from the later writes. If the other write succeeds, then
2653 * we'll end up returning whatever was written. If it fails, then
2654 * we'll get a new rc value from that.
2656 if (!list_empty(&wdata_list
))
2660 * Wait for and collect replies for any successful sends in order of
2661 * increasing offset. Once an error is hit or we get a fatal signal
2662 * while waiting, then return without waiting for any more replies.
2665 list_for_each_entry_safe(wdata
, tmp
, &wdata_list
, list
) {
2667 /* FIXME: freezable too? */
2668 rc
= wait_for_completion_killable(&wdata
->done
);
2671 else if (wdata
->result
)
2674 total_written
+= wdata
->bytes
;
2676 /* resend call if it's a retryable error */
2677 if (rc
== -EAGAIN
) {
2678 struct list_head tmp_list
;
2679 struct iov_iter tmp_from
= saved_from
;
2681 INIT_LIST_HEAD(&tmp_list
);
2682 list_del_init(&wdata
->list
);
2684 iov_iter_advance(&tmp_from
,
2685 wdata
->offset
- iocb
->ki_pos
);
2687 rc
= cifs_write_from_iter(wdata
->offset
,
2688 wdata
->bytes
, &tmp_from
,
2689 open_file
, cifs_sb
, &tmp_list
);
2691 list_splice(&tmp_list
, &wdata_list
);
2693 kref_put(&wdata
->refcount
,
2694 cifs_uncached_writedata_release
);
2698 list_del_init(&wdata
->list
);
2699 kref_put(&wdata
->refcount
, cifs_uncached_writedata_release
);
2702 if (unlikely(!total_written
))
2705 iocb
->ki_pos
+= total_written
;
2706 set_bit(CIFS_INO_INVALID_MAPPING
, &CIFS_I(file_inode(file
))->flags
);
2707 cifs_stats_bytes_written(tcon
, total_written
);
2708 return total_written
;
2712 cifs_writev(struct kiocb
*iocb
, struct iov_iter
*from
)
2714 struct file
*file
= iocb
->ki_filp
;
2715 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)file
->private_data
;
2716 struct inode
*inode
= file
->f_mapping
->host
;
2717 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2718 struct TCP_Server_Info
*server
= tlink_tcon(cfile
->tlink
)->ses
->server
;
2722 * We need to hold the sem to be sure nobody modifies lock list
2723 * with a brlock that prevents writing.
2725 down_read(&cinode
->lock_sem
);
2728 rc
= generic_write_checks(iocb
, from
);
2732 if (!cifs_find_lock_conflict(cfile
, iocb
->ki_pos
, iov_iter_count(from
),
2733 server
->vals
->exclusive_lock_type
, NULL
,
2735 rc
= __generic_file_write_iter(iocb
, from
);
2739 inode_unlock(inode
);
2742 rc
= generic_write_sync(iocb
, rc
);
2743 up_read(&cinode
->lock_sem
);
2748 cifs_strict_writev(struct kiocb
*iocb
, struct iov_iter
*from
)
2750 struct inode
*inode
= file_inode(iocb
->ki_filp
);
2751 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
2752 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
2753 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)
2754 iocb
->ki_filp
->private_data
;
2755 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
2758 written
= cifs_get_writer(cinode
);
2762 if (CIFS_CACHE_WRITE(cinode
)) {
2763 if (cap_unix(tcon
->ses
) &&
2764 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
))
2765 && ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0)) {
2766 written
= generic_file_write_iter(iocb
, from
);
2769 written
= cifs_writev(iocb
, from
);
2773 * For non-oplocked files in strict cache mode we need to write the data
2774 * to the server exactly from the pos to pos+len-1 rather than flush all
2775 * affected pages because it may cause a error with mandatory locks on
2776 * these pages but not on the region from pos to ppos+len-1.
2778 written
= cifs_user_writev(iocb
, from
);
2779 if (written
> 0 && CIFS_CACHE_READ(cinode
)) {
2781 * Windows 7 server can delay breaking level2 oplock if a write
2782 * request comes - break it on the client to prevent reading
2785 cifs_zap_mapping(inode
);
2786 cifs_dbg(FYI
, "Set no oplock for inode=%p after a write operation\n",
2791 cifs_put_writer(cinode
);
2795 static struct cifs_readdata
*
2796 cifs_readdata_alloc(unsigned int nr_pages
, work_func_t complete
)
2798 struct cifs_readdata
*rdata
;
2800 rdata
= kzalloc(sizeof(*rdata
) + (sizeof(struct page
*) * nr_pages
),
2802 if (rdata
!= NULL
) {
2803 kref_init(&rdata
->refcount
);
2804 INIT_LIST_HEAD(&rdata
->list
);
2805 init_completion(&rdata
->done
);
2806 INIT_WORK(&rdata
->work
, complete
);
2813 cifs_readdata_release(struct kref
*refcount
)
2815 struct cifs_readdata
*rdata
= container_of(refcount
,
2816 struct cifs_readdata
, refcount
);
2819 cifsFileInfo_put(rdata
->cfile
);
2825 cifs_read_allocate_pages(struct cifs_readdata
*rdata
, unsigned int nr_pages
)
2831 for (i
= 0; i
< nr_pages
; i
++) {
2832 page
= alloc_page(GFP_KERNEL
|__GFP_HIGHMEM
);
2837 rdata
->pages
[i
] = page
;
2841 for (i
= 0; i
< nr_pages
; i
++) {
2842 put_page(rdata
->pages
[i
]);
2843 rdata
->pages
[i
] = NULL
;
2850 cifs_uncached_readdata_release(struct kref
*refcount
)
2852 struct cifs_readdata
*rdata
= container_of(refcount
,
2853 struct cifs_readdata
, refcount
);
2856 for (i
= 0; i
< rdata
->nr_pages
; i
++) {
2857 put_page(rdata
->pages
[i
]);
2858 rdata
->pages
[i
] = NULL
;
2860 cifs_readdata_release(refcount
);
2864 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2865 * @rdata: the readdata response with list of pages holding data
2866 * @iter: destination for our data
2868 * This function copies data from a list of pages in a readdata response into
2869 * an array of iovecs. It will first calculate where the data should go
2870 * based on the info in the readdata and then copy the data into that spot.
2873 cifs_readdata_to_iov(struct cifs_readdata
*rdata
, struct iov_iter
*iter
)
2875 size_t remaining
= rdata
->got_bytes
;
2878 for (i
= 0; i
< rdata
->nr_pages
; i
++) {
2879 struct page
*page
= rdata
->pages
[i
];
2880 size_t copy
= min_t(size_t, remaining
, PAGE_SIZE
);
2881 size_t written
= copy_page_to_iter(page
, 0, copy
, iter
);
2882 remaining
-= written
;
2883 if (written
< copy
&& iov_iter_count(iter
) > 0)
2886 return remaining
? -EFAULT
: 0;
2890 cifs_uncached_readv_complete(struct work_struct
*work
)
2892 struct cifs_readdata
*rdata
= container_of(work
,
2893 struct cifs_readdata
, work
);
2895 complete(&rdata
->done
);
2896 kref_put(&rdata
->refcount
, cifs_uncached_readdata_release
);
2900 cifs_uncached_read_into_pages(struct TCP_Server_Info
*server
,
2901 struct cifs_readdata
*rdata
, unsigned int len
)
2905 unsigned int nr_pages
= rdata
->nr_pages
;
2907 rdata
->got_bytes
= 0;
2908 rdata
->tailsz
= PAGE_SIZE
;
2909 for (i
= 0; i
< nr_pages
; i
++) {
2910 struct page
*page
= rdata
->pages
[i
];
2914 /* no need to hold page hostage */
2915 rdata
->pages
[i
] = NULL
;
2921 if (len
>= PAGE_SIZE
) {
2922 /* enough data to fill the page */
2926 zero_user(page
, len
, PAGE_SIZE
- len
);
2927 rdata
->tailsz
= len
;
2930 result
= cifs_read_page_from_socket(server
, page
, n
);
2934 rdata
->got_bytes
+= result
;
2937 return rdata
->got_bytes
> 0 && result
!= -ECONNABORTED
?
2938 rdata
->got_bytes
: result
;
2942 cifs_send_async_read(loff_t offset
, size_t len
, struct cifsFileInfo
*open_file
,
2943 struct cifs_sb_info
*cifs_sb
, struct list_head
*rdata_list
)
2945 struct cifs_readdata
*rdata
;
2946 unsigned int npages
, rsize
, credits
;
2950 struct TCP_Server_Info
*server
;
2952 server
= tlink_tcon(open_file
->tlink
)->ses
->server
;
2954 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
2955 pid
= open_file
->pid
;
2957 pid
= current
->tgid
;
2960 rc
= server
->ops
->wait_mtu_credits(server
, cifs_sb
->rsize
,
2965 cur_len
= min_t(const size_t, len
, rsize
);
2966 npages
= DIV_ROUND_UP(cur_len
, PAGE_SIZE
);
2968 /* allocate a readdata struct */
2969 rdata
= cifs_readdata_alloc(npages
,
2970 cifs_uncached_readv_complete
);
2972 add_credits_and_wake_if(server
, credits
, 0);
2977 rc
= cifs_read_allocate_pages(rdata
, npages
);
2981 rdata
->cfile
= cifsFileInfo_get(open_file
);
2982 rdata
->nr_pages
= npages
;
2983 rdata
->offset
= offset
;
2984 rdata
->bytes
= cur_len
;
2986 rdata
->pagesz
= PAGE_SIZE
;
2987 rdata
->read_into_pages
= cifs_uncached_read_into_pages
;
2988 rdata
->credits
= credits
;
2990 if (!rdata
->cfile
->invalidHandle
||
2991 !cifs_reopen_file(rdata
->cfile
, true))
2992 rc
= server
->ops
->async_readv(rdata
);
2995 add_credits_and_wake_if(server
, rdata
->credits
, 0);
2996 kref_put(&rdata
->refcount
,
2997 cifs_uncached_readdata_release
);
3003 list_add_tail(&rdata
->list
, rdata_list
);
3011 ssize_t
cifs_user_readv(struct kiocb
*iocb
, struct iov_iter
*to
)
3013 struct file
*file
= iocb
->ki_filp
;
3016 ssize_t total_read
= 0;
3017 loff_t offset
= iocb
->ki_pos
;
3018 struct cifs_sb_info
*cifs_sb
;
3019 struct cifs_tcon
*tcon
;
3020 struct cifsFileInfo
*open_file
;
3021 struct cifs_readdata
*rdata
, *tmp
;
3022 struct list_head rdata_list
;
3024 len
= iov_iter_count(to
);
3028 INIT_LIST_HEAD(&rdata_list
);
3029 cifs_sb
= CIFS_FILE_SB(file
);
3030 open_file
= file
->private_data
;
3031 tcon
= tlink_tcon(open_file
->tlink
);
3033 if (!tcon
->ses
->server
->ops
->async_readv
)
3036 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
3037 cifs_dbg(FYI
, "attempting read on write only file instance\n");
3039 rc
= cifs_send_async_read(offset
, len
, open_file
, cifs_sb
, &rdata_list
);
3041 /* if at least one read request send succeeded, then reset rc */
3042 if (!list_empty(&rdata_list
))
3045 len
= iov_iter_count(to
);
3046 /* the loop below should proceed in the order of increasing offsets */
3048 list_for_each_entry_safe(rdata
, tmp
, &rdata_list
, list
) {
3050 /* FIXME: freezable sleep too? */
3051 rc
= wait_for_completion_killable(&rdata
->done
);
3054 else if (rdata
->result
== -EAGAIN
) {
3055 /* resend call if it's a retryable error */
3056 struct list_head tmp_list
;
3057 unsigned int got_bytes
= rdata
->got_bytes
;
3059 list_del_init(&rdata
->list
);
3060 INIT_LIST_HEAD(&tmp_list
);
3063 * Got a part of data and then reconnect has
3064 * happened -- fill the buffer and continue
3067 if (got_bytes
&& got_bytes
< rdata
->bytes
) {
3068 rc
= cifs_readdata_to_iov(rdata
, to
);
3070 kref_put(&rdata
->refcount
,
3071 cifs_uncached_readdata_release
);
3076 rc
= cifs_send_async_read(
3077 rdata
->offset
+ got_bytes
,
3078 rdata
->bytes
- got_bytes
,
3079 rdata
->cfile
, cifs_sb
,
3082 list_splice(&tmp_list
, &rdata_list
);
3084 kref_put(&rdata
->refcount
,
3085 cifs_uncached_readdata_release
);
3087 } else if (rdata
->result
)
3090 rc
= cifs_readdata_to_iov(rdata
, to
);
3092 /* if there was a short read -- discard anything left */
3093 if (rdata
->got_bytes
&& rdata
->got_bytes
< rdata
->bytes
)
3096 list_del_init(&rdata
->list
);
3097 kref_put(&rdata
->refcount
, cifs_uncached_readdata_release
);
3100 total_read
= len
- iov_iter_count(to
);
3102 cifs_stats_bytes_read(tcon
, total_read
);
3104 /* mask nodata case */
3109 iocb
->ki_pos
+= total_read
;
3116 cifs_strict_readv(struct kiocb
*iocb
, struct iov_iter
*to
)
3118 struct inode
*inode
= file_inode(iocb
->ki_filp
);
3119 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
3120 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
3121 struct cifsFileInfo
*cfile
= (struct cifsFileInfo
*)
3122 iocb
->ki_filp
->private_data
;
3123 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
3127 * In strict cache mode we need to read from the server all the time
3128 * if we don't have level II oplock because the server can delay mtime
3129 * change - so we can't make a decision about inode invalidating.
3130 * And we can also fail with pagereading if there are mandatory locks
3131 * on pages affected by this read but not on the region from pos to
3134 if (!CIFS_CACHE_READ(cinode
))
3135 return cifs_user_readv(iocb
, to
);
3137 if (cap_unix(tcon
->ses
) &&
3138 (CIFS_UNIX_FCNTL_CAP
& le64_to_cpu(tcon
->fsUnixInfo
.Capability
)) &&
3139 ((cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NOPOSIXBRL
) == 0))
3140 return generic_file_read_iter(iocb
, to
);
3143 * We need to hold the sem to be sure nobody modifies lock list
3144 * with a brlock that prevents reading.
3146 down_read(&cinode
->lock_sem
);
3147 if (!cifs_find_lock_conflict(cfile
, iocb
->ki_pos
, iov_iter_count(to
),
3148 tcon
->ses
->server
->vals
->shared_lock_type
,
3149 NULL
, CIFS_READ_OP
))
3150 rc
= generic_file_read_iter(iocb
, to
);
3151 up_read(&cinode
->lock_sem
);
3156 cifs_read(struct file
*file
, char *read_data
, size_t read_size
, loff_t
*offset
)
3159 unsigned int bytes_read
= 0;
3160 unsigned int total_read
;
3161 unsigned int current_read_size
;
3163 struct cifs_sb_info
*cifs_sb
;
3164 struct cifs_tcon
*tcon
;
3165 struct TCP_Server_Info
*server
;
3168 struct cifsFileInfo
*open_file
;
3169 struct cifs_io_parms io_parms
;
3170 int buf_type
= CIFS_NO_BUFFER
;
3174 cifs_sb
= CIFS_FILE_SB(file
);
3176 /* FIXME: set up handlers for larger reads and/or convert to async */
3177 rsize
= min_t(unsigned int, cifs_sb
->rsize
, CIFSMaxBufSize
);
3179 if (file
->private_data
== NULL
) {
3184 open_file
= file
->private_data
;
3185 tcon
= tlink_tcon(open_file
->tlink
);
3186 server
= tcon
->ses
->server
;
3188 if (!server
->ops
->sync_read
) {
3193 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
3194 pid
= open_file
->pid
;
3196 pid
= current
->tgid
;
3198 if ((file
->f_flags
& O_ACCMODE
) == O_WRONLY
)
3199 cifs_dbg(FYI
, "attempting read on write only file instance\n");
3201 for (total_read
= 0, cur_offset
= read_data
; read_size
> total_read
;
3202 total_read
+= bytes_read
, cur_offset
+= bytes_read
) {
3204 current_read_size
= min_t(uint
, read_size
- total_read
,
3207 * For windows me and 9x we do not want to request more
3208 * than it negotiated since it will refuse the read
3211 if ((tcon
->ses
) && !(tcon
->ses
->capabilities
&
3212 tcon
->ses
->server
->vals
->cap_large_files
)) {
3213 current_read_size
= min_t(uint
,
3214 current_read_size
, CIFSMaxBufSize
);
3216 if (open_file
->invalidHandle
) {
3217 rc
= cifs_reopen_file(open_file
, true);
3222 io_parms
.tcon
= tcon
;
3223 io_parms
.offset
= *offset
;
3224 io_parms
.length
= current_read_size
;
3225 rc
= server
->ops
->sync_read(xid
, &open_file
->fid
, &io_parms
,
3226 &bytes_read
, &cur_offset
,
3228 } while (rc
== -EAGAIN
);
3230 if (rc
|| (bytes_read
== 0)) {
3238 cifs_stats_bytes_read(tcon
, total_read
);
3239 *offset
+= bytes_read
;
3247 * If the page is mmap'ed into a process' page tables, then we need to make
3248 * sure that it doesn't change while being written back.
3251 cifs_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
3253 struct page
*page
= vmf
->page
;
3256 return VM_FAULT_LOCKED
;
3259 static const struct vm_operations_struct cifs_file_vm_ops
= {
3260 .fault
= filemap_fault
,
3261 .map_pages
= filemap_map_pages
,
3262 .page_mkwrite
= cifs_page_mkwrite
,
3265 int cifs_file_strict_mmap(struct file
*file
, struct vm_area_struct
*vma
)
3268 struct inode
*inode
= file_inode(file
);
3272 if (!CIFS_CACHE_READ(CIFS_I(inode
))) {
3273 rc
= cifs_zap_mapping(inode
);
3278 rc
= generic_file_mmap(file
, vma
);
3280 vma
->vm_ops
= &cifs_file_vm_ops
;
3285 int cifs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
3290 rc
= cifs_revalidate_file(file
);
3292 cifs_dbg(FYI
, "Validation prior to mmap failed, error=%d\n",
3297 rc
= generic_file_mmap(file
, vma
);
3299 vma
->vm_ops
= &cifs_file_vm_ops
;
3305 cifs_readv_complete(struct work_struct
*work
)
3307 unsigned int i
, got_bytes
;
3308 struct cifs_readdata
*rdata
= container_of(work
,
3309 struct cifs_readdata
, work
);
3311 got_bytes
= rdata
->got_bytes
;
3312 for (i
= 0; i
< rdata
->nr_pages
; i
++) {
3313 struct page
*page
= rdata
->pages
[i
];
3315 lru_cache_add_file(page
);
3317 if (rdata
->result
== 0 ||
3318 (rdata
->result
== -EAGAIN
&& got_bytes
)) {
3319 flush_dcache_page(page
);
3320 SetPageUptodate(page
);
3325 if (rdata
->result
== 0 ||
3326 (rdata
->result
== -EAGAIN
&& got_bytes
))
3327 cifs_readpage_to_fscache(rdata
->mapping
->host
, page
);
3329 got_bytes
-= min_t(unsigned int, PAGE_SIZE
, got_bytes
);
3332 rdata
->pages
[i
] = NULL
;
3334 kref_put(&rdata
->refcount
, cifs_readdata_release
);
3338 cifs_readpages_read_into_pages(struct TCP_Server_Info
*server
,
3339 struct cifs_readdata
*rdata
, unsigned int len
)
3345 unsigned int nr_pages
= rdata
->nr_pages
;
3347 /* determine the eof that the server (probably) has */
3348 eof
= CIFS_I(rdata
->mapping
->host
)->server_eof
;
3349 eof_index
= eof
? (eof
- 1) >> PAGE_SHIFT
: 0;
3350 cifs_dbg(FYI
, "eof=%llu eof_index=%lu\n", eof
, eof_index
);
3352 rdata
->got_bytes
= 0;
3353 rdata
->tailsz
= PAGE_SIZE
;
3354 for (i
= 0; i
< nr_pages
; i
++) {
3355 struct page
*page
= rdata
->pages
[i
];
3356 size_t n
= PAGE_SIZE
;
3358 if (len
>= PAGE_SIZE
) {
3360 } else if (len
> 0) {
3361 /* enough for partial page, fill and zero the rest */
3362 zero_user(page
, len
, PAGE_SIZE
- len
);
3363 n
= rdata
->tailsz
= len
;
3365 } else if (page
->index
> eof_index
) {
3367 * The VFS will not try to do readahead past the
3368 * i_size, but it's possible that we have outstanding
3369 * writes with gaps in the middle and the i_size hasn't
3370 * caught up yet. Populate those with zeroed out pages
3371 * to prevent the VFS from repeatedly attempting to
3372 * fill them until the writes are flushed.
3374 zero_user(page
, 0, PAGE_SIZE
);
3375 lru_cache_add_file(page
);
3376 flush_dcache_page(page
);
3377 SetPageUptodate(page
);
3380 rdata
->pages
[i
] = NULL
;
3384 /* no need to hold page hostage */
3385 lru_cache_add_file(page
);
3388 rdata
->pages
[i
] = NULL
;
3393 result
= cifs_read_page_from_socket(server
, page
, n
);
3397 rdata
->got_bytes
+= result
;
3400 return rdata
->got_bytes
> 0 && result
!= -ECONNABORTED
?
3401 rdata
->got_bytes
: result
;
3405 readpages_get_pages(struct address_space
*mapping
, struct list_head
*page_list
,
3406 unsigned int rsize
, struct list_head
*tmplist
,
3407 unsigned int *nr_pages
, loff_t
*offset
, unsigned int *bytes
)
3409 struct page
*page
, *tpage
;
3410 unsigned int expected_index
;
3412 gfp_t gfp
= readahead_gfp_mask(mapping
);
3414 INIT_LIST_HEAD(tmplist
);
3416 page
= list_entry(page_list
->prev
, struct page
, lru
);
3419 * Lock the page and put it in the cache. Since no one else
3420 * should have access to this page, we're safe to simply set
3421 * PG_locked without checking it first.
3423 __SetPageLocked(page
);
3424 rc
= add_to_page_cache_locked(page
, mapping
,
3427 /* give up if we can't stick it in the cache */
3429 __ClearPageLocked(page
);
3433 /* move first page to the tmplist */
3434 *offset
= (loff_t
)page
->index
<< PAGE_SHIFT
;
3437 list_move_tail(&page
->lru
, tmplist
);
3439 /* now try and add more pages onto the request */
3440 expected_index
= page
->index
+ 1;
3441 list_for_each_entry_safe_reverse(page
, tpage
, page_list
, lru
) {
3442 /* discontinuity ? */
3443 if (page
->index
!= expected_index
)
3446 /* would this page push the read over the rsize? */
3447 if (*bytes
+ PAGE_SIZE
> rsize
)
3450 __SetPageLocked(page
);
3451 if (add_to_page_cache_locked(page
, mapping
, page
->index
, gfp
)) {
3452 __ClearPageLocked(page
);
3455 list_move_tail(&page
->lru
, tmplist
);
3456 (*bytes
) += PAGE_SIZE
;
3463 static int cifs_readpages(struct file
*file
, struct address_space
*mapping
,
3464 struct list_head
*page_list
, unsigned num_pages
)
3467 struct list_head tmplist
;
3468 struct cifsFileInfo
*open_file
= file
->private_data
;
3469 struct cifs_sb_info
*cifs_sb
= CIFS_FILE_SB(file
);
3470 struct TCP_Server_Info
*server
;
3474 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3475 * immediately if the cookie is negative
3477 * After this point, every page in the list might have PG_fscache set,
3478 * so we will need to clean that up off of every page we don't use.
3480 rc
= cifs_readpages_from_fscache(mapping
->host
, mapping
, page_list
,
3485 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_RWPIDFORWARD
)
3486 pid
= open_file
->pid
;
3488 pid
= current
->tgid
;
3491 server
= tlink_tcon(open_file
->tlink
)->ses
->server
;
3493 cifs_dbg(FYI
, "%s: file=%p mapping=%p num_pages=%u\n",
3494 __func__
, file
, mapping
, num_pages
);
3497 * Start with the page at end of list and move it to private
3498 * list. Do the same with any following pages until we hit
3499 * the rsize limit, hit an index discontinuity, or run out of
3500 * pages. Issue the async read and then start the loop again
3501 * until the list is empty.
3503 * Note that list order is important. The page_list is in
3504 * the order of declining indexes. When we put the pages in
3505 * the rdata->pages, then we want them in increasing order.
3507 while (!list_empty(page_list
)) {
3508 unsigned int i
, nr_pages
, bytes
, rsize
;
3510 struct page
*page
, *tpage
;
3511 struct cifs_readdata
*rdata
;
3514 rc
= server
->ops
->wait_mtu_credits(server
, cifs_sb
->rsize
,
3520 * Give up immediately if rsize is too small to read an entire
3521 * page. The VFS will fall back to readpage. We should never
3522 * reach this point however since we set ra_pages to 0 when the
3523 * rsize is smaller than a cache page.
3525 if (unlikely(rsize
< PAGE_SIZE
)) {
3526 add_credits_and_wake_if(server
, credits
, 0);
3530 rc
= readpages_get_pages(mapping
, page_list
, rsize
, &tmplist
,
3531 &nr_pages
, &offset
, &bytes
);
3533 add_credits_and_wake_if(server
, credits
, 0);
3537 rdata
= cifs_readdata_alloc(nr_pages
, cifs_readv_complete
);
3539 /* best to give up if we're out of mem */
3540 list_for_each_entry_safe(page
, tpage
, &tmplist
, lru
) {
3541 list_del(&page
->lru
);
3542 lru_cache_add_file(page
);
3547 add_credits_and_wake_if(server
, credits
, 0);
3551 rdata
->cfile
= cifsFileInfo_get(open_file
);
3552 rdata
->mapping
= mapping
;
3553 rdata
->offset
= offset
;
3554 rdata
->bytes
= bytes
;
3556 rdata
->pagesz
= PAGE_SIZE
;
3557 rdata
->read_into_pages
= cifs_readpages_read_into_pages
;
3558 rdata
->credits
= credits
;
3560 list_for_each_entry_safe(page
, tpage
, &tmplist
, lru
) {
3561 list_del(&page
->lru
);
3562 rdata
->pages
[rdata
->nr_pages
++] = page
;
3565 if (!rdata
->cfile
->invalidHandle
||
3566 !cifs_reopen_file(rdata
->cfile
, true))
3567 rc
= server
->ops
->async_readv(rdata
);
3569 add_credits_and_wake_if(server
, rdata
->credits
, 0);
3570 for (i
= 0; i
< rdata
->nr_pages
; i
++) {
3571 page
= rdata
->pages
[i
];
3572 lru_cache_add_file(page
);
3576 /* Fallback to the readpage in error/reconnect cases */
3577 kref_put(&rdata
->refcount
, cifs_readdata_release
);
3581 kref_put(&rdata
->refcount
, cifs_readdata_release
);
3584 /* Any pages that have been shown to fscache but didn't get added to
3585 * the pagecache must be uncached before they get returned to the
3588 cifs_fscache_readpages_cancel(mapping
->host
, page_list
);
3593 * cifs_readpage_worker must be called with the page pinned
3595 static int cifs_readpage_worker(struct file
*file
, struct page
*page
,
3601 /* Is the page cached? */
3602 rc
= cifs_readpage_from_fscache(file_inode(file
), page
);
3606 read_data
= kmap(page
);
3607 /* for reads over a certain size could initiate async read ahead */
3609 rc
= cifs_read(file
, read_data
, PAGE_SIZE
, poffset
);
3614 cifs_dbg(FYI
, "Bytes read %d\n", rc
);
3616 file_inode(file
)->i_atime
=
3617 current_time(file_inode(file
));
3620 memset(read_data
+ rc
, 0, PAGE_SIZE
- rc
);
3622 flush_dcache_page(page
);
3623 SetPageUptodate(page
);
3625 /* send this page to the cache */
3626 cifs_readpage_to_fscache(file_inode(file
), page
);
3638 static int cifs_readpage(struct file
*file
, struct page
*page
)
3640 loff_t offset
= (loff_t
)page
->index
<< PAGE_SHIFT
;
3646 if (file
->private_data
== NULL
) {
3652 cifs_dbg(FYI
, "readpage %p at offset %d 0x%x\n",
3653 page
, (int)offset
, (int)offset
);
3655 rc
= cifs_readpage_worker(file
, page
, &offset
);
3661 static int is_inode_writable(struct cifsInodeInfo
*cifs_inode
)
3663 struct cifsFileInfo
*open_file
;
3664 struct cifs_tcon
*tcon
=
3665 cifs_sb_master_tcon(CIFS_SB(cifs_inode
->vfs_inode
.i_sb
));
3667 spin_lock(&tcon
->open_file_lock
);
3668 list_for_each_entry(open_file
, &cifs_inode
->openFileList
, flist
) {
3669 if (OPEN_FMODE(open_file
->f_flags
) & FMODE_WRITE
) {
3670 spin_unlock(&tcon
->open_file_lock
);
3674 spin_unlock(&tcon
->open_file_lock
);
3678 /* We do not want to update the file size from server for inodes
3679 open for write - to avoid races with writepage extending
3680 the file - in the future we could consider allowing
3681 refreshing the inode only on increases in the file size
3682 but this is tricky to do without racing with writebehind
3683 page caching in the current Linux kernel design */
3684 bool is_size_safe_to_change(struct cifsInodeInfo
*cifsInode
, __u64 end_of_file
)
3689 if (is_inode_writable(cifsInode
)) {
3690 /* This inode is open for write at least once */
3691 struct cifs_sb_info
*cifs_sb
;
3693 cifs_sb
= CIFS_SB(cifsInode
->vfs_inode
.i_sb
);
3694 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_DIRECT_IO
) {
3695 /* since no page cache to corrupt on directio
3696 we can change size safely */
3700 if (i_size_read(&cifsInode
->vfs_inode
) < end_of_file
)
3708 static int cifs_write_begin(struct file
*file
, struct address_space
*mapping
,
3709 loff_t pos
, unsigned len
, unsigned flags
,
3710 struct page
**pagep
, void **fsdata
)
3713 pgoff_t index
= pos
>> PAGE_SHIFT
;
3714 loff_t offset
= pos
& (PAGE_SIZE
- 1);
3715 loff_t page_start
= pos
& PAGE_MASK
;
3720 cifs_dbg(FYI
, "write_begin from %lld len %d\n", (long long)pos
, len
);
3723 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
3729 if (PageUptodate(page
))
3733 * If we write a full page it will be up to date, no need to read from
3734 * the server. If the write is short, we'll end up doing a sync write
3737 if (len
== PAGE_SIZE
)
3741 * optimize away the read when we have an oplock, and we're not
3742 * expecting to use any of the data we'd be reading in. That
3743 * is, when the page lies beyond the EOF, or straddles the EOF
3744 * and the write will cover all of the existing data.
3746 if (CIFS_CACHE_READ(CIFS_I(mapping
->host
))) {
3747 i_size
= i_size_read(mapping
->host
);
3748 if (page_start
>= i_size
||
3749 (offset
== 0 && (pos
+ len
) >= i_size
)) {
3750 zero_user_segments(page
, 0, offset
,
3754 * PageChecked means that the parts of the page
3755 * to which we're not writing are considered up
3756 * to date. Once the data is copied to the
3757 * page, it can be set uptodate.
3759 SetPageChecked(page
);
3764 if ((file
->f_flags
& O_ACCMODE
) != O_WRONLY
&& !oncethru
) {
3766 * might as well read a page, it is fast enough. If we get
3767 * an error, we don't need to return it. cifs_write_end will
3768 * do a sync write instead since PG_uptodate isn't set.
3770 cifs_readpage_worker(file
, page
, &page_start
);
3775 /* we could try using another file handle if there is one -
3776 but how would we lock it to prevent close of that handle
3777 racing with this read? In any case
3778 this will be written out by write_end so is fine */
3785 static int cifs_release_page(struct page
*page
, gfp_t gfp
)
3787 if (PagePrivate(page
))
3790 return cifs_fscache_release_page(page
, gfp
);
3793 static void cifs_invalidate_page(struct page
*page
, unsigned int offset
,
3794 unsigned int length
)
3796 struct cifsInodeInfo
*cifsi
= CIFS_I(page
->mapping
->host
);
3798 if (offset
== 0 && length
== PAGE_SIZE
)
3799 cifs_fscache_invalidate_page(page
, &cifsi
->vfs_inode
);
3802 static int cifs_launder_page(struct page
*page
)
3805 loff_t range_start
= page_offset(page
);
3806 loff_t range_end
= range_start
+ (loff_t
)(PAGE_SIZE
- 1);
3807 struct writeback_control wbc
= {
3808 .sync_mode
= WB_SYNC_ALL
,
3810 .range_start
= range_start
,
3811 .range_end
= range_end
,
3814 cifs_dbg(FYI
, "Launder page: %p\n", page
);
3816 if (clear_page_dirty_for_io(page
))
3817 rc
= cifs_writepage_locked(page
, &wbc
);
3819 cifs_fscache_invalidate_page(page
, page
->mapping
->host
);
3823 void cifs_oplock_break(struct work_struct
*work
)
3825 struct cifsFileInfo
*cfile
= container_of(work
, struct cifsFileInfo
,
3827 struct inode
*inode
= d_inode(cfile
->dentry
);
3828 struct cifsInodeInfo
*cinode
= CIFS_I(inode
);
3829 struct cifs_tcon
*tcon
= tlink_tcon(cfile
->tlink
);
3830 struct TCP_Server_Info
*server
= tcon
->ses
->server
;
3833 wait_on_bit(&cinode
->flags
, CIFS_INODE_PENDING_WRITERS
,
3834 TASK_UNINTERRUPTIBLE
);
3836 server
->ops
->downgrade_oplock(server
, cinode
,
3837 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2
, &cinode
->flags
));
3839 if (!CIFS_CACHE_WRITE(cinode
) && CIFS_CACHE_READ(cinode
) &&
3840 cifs_has_mand_locks(cinode
)) {
3841 cifs_dbg(FYI
, "Reset oplock to None for inode=%p due to mand locks\n",
3846 if (inode
&& S_ISREG(inode
->i_mode
)) {
3847 if (CIFS_CACHE_READ(cinode
))
3848 break_lease(inode
, O_RDONLY
);
3850 break_lease(inode
, O_WRONLY
);
3851 rc
= filemap_fdatawrite(inode
->i_mapping
);
3852 if (!CIFS_CACHE_READ(cinode
)) {
3853 rc
= filemap_fdatawait(inode
->i_mapping
);
3854 mapping_set_error(inode
->i_mapping
, rc
);
3855 cifs_zap_mapping(inode
);
3857 cifs_dbg(FYI
, "Oplock flush inode %p rc %d\n", inode
, rc
);
3860 rc
= cifs_push_locks(cfile
);
3862 cifs_dbg(VFS
, "Push locks rc = %d\n", rc
);
3865 * releasing stale oplock after recent reconnect of smb session using
3866 * a now incorrect file handle is not a data integrity issue but do
3867 * not bother sending an oplock release if session to server still is
3868 * disconnected since oplock already released by the server
3870 if (!cfile
->oplock_break_cancelled
) {
3871 rc
= tcon
->ses
->server
->ops
->oplock_response(tcon
, &cfile
->fid
,
3873 cifs_dbg(FYI
, "Oplock release rc = %d\n", rc
);
3875 cifs_done_oplock_break(cinode
);
3879 * The presence of cifs_direct_io() in the address space ops vector
3880 * allowes open() O_DIRECT flags which would have failed otherwise.
3882 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3883 * so this method should never be called.
3885 * Direct IO is not yet supported in the cached mode.
3888 cifs_direct_io(struct kiocb
*iocb
, struct iov_iter
*iter
)
3892 * Eventually need to support direct IO for non forcedirectio mounts
3898 const struct address_space_operations cifs_addr_ops
= {
3899 .readpage
= cifs_readpage
,
3900 .readpages
= cifs_readpages
,
3901 .writepage
= cifs_writepage
,
3902 .writepages
= cifs_writepages
,
3903 .write_begin
= cifs_write_begin
,
3904 .write_end
= cifs_write_end
,
3905 .set_page_dirty
= __set_page_dirty_nobuffers
,
3906 .releasepage
= cifs_release_page
,
3907 .direct_IO
= cifs_direct_io
,
3908 .invalidatepage
= cifs_invalidate_page
,
3909 .launder_page
= cifs_launder_page
,
3913 * cifs_readpages requires the server to support a buffer large enough to
3914 * contain the header plus one complete page of data. Otherwise, we need
3915 * to leave cifs_readpages out of the address space operations.
3917 const struct address_space_operations cifs_addr_ops_smallbuf
= {
3918 .readpage
= cifs_readpage
,
3919 .writepage
= cifs_writepage
,
3920 .writepages
= cifs_writepages
,
3921 .write_begin
= cifs_write_begin
,
3922 .write_end
= cifs_write_end
,
3923 .set_page_dirty
= __set_page_dirty_nobuffers
,
3924 .releasepage
= cifs_release_page
,
3925 .invalidatepage
= cifs_invalidate_page
,
3926 .launder_page
= cifs_launder_page
,