]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/cifs/file.c
CIFS: Fix DFS handling in cifs_get_file_info
[mirror_ubuntu-artful-kernel.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
96daf2b0 172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
eeb910a6
PS
173 __u16 *pnetfid, int xid)
174{
175 int rc;
176 int desiredAccess;
177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6
PS
179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
3d3ea8e6
SP
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
eeb910a6
PS
218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
3d3ea8e6 220 desiredAccess, create_options, pnetfid, poplock, buf,
eeb910a6
PS
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
15ecb436
JL
244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
5f6dbc9e 257 pCifsFile->count = 1;
15ecb436
JL
258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
15ecb436
JL
264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
15ecb436
JL
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267
4477288a 268 spin_lock(&cifs_file_list_lock);
15ecb436
JL
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273 else
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 275 spin_unlock(&cifs_file_list_lock);
15ecb436 276
c6723628 277 cifs_set_oplock_level(pCifsInode, oplock);
15ecb436
JL
278
279 file->private_data = pCifsFile;
280 return pCifsFile;
281}
282
cdff08e7
SF
283/*
284 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
285 * the filehandle out on the server. Must be called without holding
286 * cifs_file_list_lock.
cdff08e7 287 */
b33879aa
JL
288void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
289{
e66673e3 290 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 291 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 292 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 293 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
294 struct cifsLockInfo *li, *tmp;
295
296 spin_lock(&cifs_file_list_lock);
5f6dbc9e 297 if (--cifs_file->count > 0) {
cdff08e7
SF
298 spin_unlock(&cifs_file_list_lock);
299 return;
300 }
301
302 /* remove it from the lists */
303 list_del(&cifs_file->flist);
304 list_del(&cifs_file->tlist);
305
306 if (list_empty(&cifsi->openFileList)) {
307 cFYI(1, "closing last open instance for inode %p",
308 cifs_file->dentry->d_inode);
4f8ba8a0
PS
309
310 /* in strict cache mode we need invalidate mapping on the last
311 close because it may cause a error when we open this file
312 again and get at least level II oplock */
313 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
314 CIFS_I(inode)->invalid_mapping = true;
315
c6723628 316 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
317 }
318 spin_unlock(&cifs_file_list_lock);
319
ad635942
JL
320 cancel_work_sync(&cifs_file->oplock_break);
321
cdff08e7
SF
322 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
323 int xid, rc;
324
325 xid = GetXid();
326 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
327 FreeXid(xid);
328 }
329
330 /* Delete any outstanding lock records. We'll lose them when the file
331 * is closed anyway.
332 */
d59dad2b
PS
333 mutex_lock(&cifsi->lock_mutex);
334 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
335 if (li->netfid != cifs_file->netfid)
336 continue;
cdff08e7
SF
337 list_del(&li->llist);
338 kfree(li);
b33879aa 339 }
d59dad2b 340 mutex_unlock(&cifsi->lock_mutex);
cdff08e7
SF
341
342 cifs_put_tlink(cifs_file->tlink);
343 dput(cifs_file->dentry);
344 kfree(cifs_file);
b33879aa
JL
345}
346
1da177e4
LT
347int cifs_open(struct inode *inode, struct file *file)
348{
349 int rc = -EACCES;
590a3fe0
JL
350 int xid;
351 __u32 oplock;
1da177e4 352 struct cifs_sb_info *cifs_sb;
96daf2b0 353 struct cifs_tcon *tcon;
7ffec372 354 struct tcon_link *tlink;
6ca9f3ba 355 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 356 char *full_path = NULL;
7e12eddb 357 bool posix_open_ok = false;
1da177e4 358 __u16 netfid;
1da177e4
LT
359
360 xid = GetXid();
361
362 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
363 tlink = cifs_sb_tlink(cifs_sb);
364 if (IS_ERR(tlink)) {
365 FreeXid(xid);
366 return PTR_ERR(tlink);
367 }
368 tcon = tlink_tcon(tlink);
1da177e4 369
e6a00296 370 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 371 if (full_path == NULL) {
0f3bc09e 372 rc = -ENOMEM;
232341ba 373 goto out;
1da177e4
LT
374 }
375
b6b38f70
JP
376 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
377 inode, file->f_flags, full_path);
276a74a4 378
e7504734 379 if (enable_oplocks)
276a74a4
SF
380 oplock = REQ_OPLOCK;
381 else
382 oplock = 0;
383
64cc2c63
SF
384 if (!tcon->broken_posix_open && tcon->unix_ext &&
385 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
386 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
387 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 388 /* can not refresh inode info since size could be stale */
2422f676 389 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 390 cifs_sb->mnt_file_mode /* ignored */,
608712fe 391 file->f_flags, &oplock, &netfid, xid);
276a74a4 392 if (rc == 0) {
b6b38f70 393 cFYI(1, "posix open succeeded");
7e12eddb 394 posix_open_ok = true;
64cc2c63
SF
395 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
396 if (tcon->ses->serverNOS)
b6b38f70 397 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
398 " unexpected error on SMB posix open"
399 ", disabling posix open support."
400 " Check if server update available.",
401 tcon->ses->serverName,
b6b38f70 402 tcon->ses->serverNOS);
64cc2c63 403 tcon->broken_posix_open = true;
276a74a4
SF
404 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
405 (rc != -EOPNOTSUPP)) /* path not found or net err */
406 goto out;
64cc2c63
SF
407 /* else fallthrough to retry open the old way on network i/o
408 or DFS errors */
276a74a4
SF
409 }
410
7e12eddb
PS
411 if (!posix_open_ok) {
412 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
413 file->f_flags, &oplock, &netfid, xid);
414 if (rc)
415 goto out;
416 }
47c78b7f 417
abfe1eed 418 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 419 if (pCifsFile == NULL) {
7e12eddb 420 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
421 rc = -ENOMEM;
422 goto out;
423 }
1da177e4 424
9451a9a5
SJ
425 cifs_fscache_set_inode_cookie(inode, file);
426
7e12eddb 427 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
428 /* time to set mode which we can not set earlier due to
429 problems creating new read-only files */
7e12eddb
PS
430 struct cifs_unix_set_info_args args = {
431 .mode = inode->i_mode,
432 .uid = NO_CHANGE_64,
433 .gid = NO_CHANGE_64,
434 .ctime = NO_CHANGE_64,
435 .atime = NO_CHANGE_64,
436 .mtime = NO_CHANGE_64,
437 .device = 0,
438 };
d44a9fe2
JL
439 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
440 pCifsFile->pid);
1da177e4
LT
441 }
442
443out:
1da177e4
LT
444 kfree(full_path);
445 FreeXid(xid);
7ffec372 446 cifs_put_tlink(tlink);
1da177e4
LT
447 return rc;
448}
449
0418726b 450/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
451/* to server was lost */
452static int cifs_relock_file(struct cifsFileInfo *cifsFile)
453{
454 int rc = 0;
455
456/* BB list all locks open on this file and relock */
457
458 return rc;
459}
460
15886177 461static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
462{
463 int rc = -EACCES;
590a3fe0
JL
464 int xid;
465 __u32 oplock;
1da177e4 466 struct cifs_sb_info *cifs_sb;
96daf2b0 467 struct cifs_tcon *tcon;
1da177e4 468 struct cifsInodeInfo *pCifsInode;
fb8c4b14 469 struct inode *inode;
1da177e4
LT
470 char *full_path = NULL;
471 int desiredAccess;
472 int disposition = FILE_OPEN;
3d3ea8e6 473 int create_options = CREATE_NOT_DIR;
1da177e4
LT
474 __u16 netfid;
475
1da177e4 476 xid = GetXid();
f0a71eb8 477 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 478 if (!pCifsFile->invalidHandle) {
f0a71eb8 479 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 480 rc = 0;
1da177e4 481 FreeXid(xid);
0f3bc09e 482 return rc;
1da177e4
LT
483 }
484
15886177 485 inode = pCifsFile->dentry->d_inode;
1da177e4 486 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 487 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 488
1da177e4
LT
489/* can not grab rename sem here because various ops, including
490 those that already have the rename sem can end up causing writepage
491 to get called and if the server was down that means we end up here,
492 and we can never tell if the caller already has the rename_sem */
15886177 493 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 494 if (full_path == NULL) {
3a9f462f 495 rc = -ENOMEM;
f0a71eb8 496 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 497 FreeXid(xid);
3a9f462f 498 return rc;
1da177e4
LT
499 }
500
b6b38f70 501 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 502 inode, pCifsFile->f_flags, full_path);
1da177e4 503
e7504734 504 if (enable_oplocks)
1da177e4
LT
505 oplock = REQ_OPLOCK;
506 else
4b18f2a9 507 oplock = 0;
1da177e4 508
7fc8f4e9
SF
509 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
510 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
511 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
512
513 /*
514 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
515 * original open. Must mask them off for a reopen.
516 */
15886177
JL
517 unsigned int oflags = pCifsFile->f_flags &
518 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 519
2422f676 520 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
521 cifs_sb->mnt_file_mode /* ignored */,
522 oflags, &oplock, &netfid, xid);
7fc8f4e9 523 if (rc == 0) {
b6b38f70 524 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
525 goto reopen_success;
526 }
527 /* fallthrough to retry open the old way on errors, especially
528 in the reconnect path it is important to retry hard */
529 }
530
15886177 531 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 532
3d3ea8e6
SP
533 if (backup_cred(cifs_sb))
534 create_options |= CREATE_OPEN_BACKUP_INTENT;
535
1da177e4 536 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
537 by SMBOpen and then calling get_inode_info with returned buf
538 since file might have write behind data that needs to be flushed
1da177e4
LT
539 and server version of file size can be stale. If we knew for sure
540 that inode was not dirty locally we could do this */
541
7fc8f4e9 542 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
3d3ea8e6 543 create_options, &netfid, &oplock, NULL,
fb8c4b14 544 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 545 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 546 if (rc) {
f0a71eb8 547 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
548 cFYI(1, "cifs_open returned 0x%x", rc);
549 cFYI(1, "oplock: %d", oplock);
15886177
JL
550 goto reopen_error_exit;
551 }
552
7fc8f4e9 553reopen_success:
15886177
JL
554 pCifsFile->netfid = netfid;
555 pCifsFile->invalidHandle = false;
556 mutex_unlock(&pCifsFile->fh_mutex);
557 pCifsInode = CIFS_I(inode);
558
559 if (can_flush) {
560 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 561 mapping_set_error(inode->i_mapping, rc);
15886177 562
15886177
JL
563 if (tcon->unix_ext)
564 rc = cifs_get_inode_info_unix(&inode,
565 full_path, inode->i_sb, xid);
566 else
567 rc = cifs_get_inode_info(&inode,
568 full_path, NULL, inode->i_sb,
569 xid, NULL);
570 } /* else we are writing out data to server already
571 and could deadlock if we tried to flush data, and
572 since we do not know if we have data that would
573 invalidate the current end of file on the server
574 we can not go to the server to get the new inod
575 info */
e66673e3 576
c6723628 577 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 578
15886177
JL
579 cifs_relock_file(pCifsFile);
580
581reopen_error_exit:
1da177e4
LT
582 kfree(full_path);
583 FreeXid(xid);
584 return rc;
585}
586
587int cifs_close(struct inode *inode, struct file *file)
588{
77970693
JL
589 if (file->private_data != NULL) {
590 cifsFileInfo_put(file->private_data);
591 file->private_data = NULL;
592 }
7ee1af76 593
cdff08e7
SF
594 /* return code from the ->release op is always ignored */
595 return 0;
1da177e4
LT
596}
597
598int cifs_closedir(struct inode *inode, struct file *file)
599{
600 int rc = 0;
601 int xid;
c21dfb69 602 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
603 char *ptmp;
604
b6b38f70 605 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
606
607 xid = GetXid();
608
609 if (pCFileStruct) {
96daf2b0 610 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 611
b6b38f70 612 cFYI(1, "Freeing private data in close dir");
4477288a 613 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
614 if (!pCFileStruct->srch_inf.endOfSearch &&
615 !pCFileStruct->invalidHandle) {
616 pCFileStruct->invalidHandle = true;
4477288a 617 spin_unlock(&cifs_file_list_lock);
1da177e4 618 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
619 cFYI(1, "Closing uncompleted readdir with rc %d",
620 rc);
1da177e4
LT
621 /* not much we can do if it fails anyway, ignore rc */
622 rc = 0;
ddb4cbfc 623 } else
4477288a 624 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
625 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
626 if (ptmp) {
b6b38f70 627 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 628 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 629 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
630 cifs_small_buf_release(ptmp);
631 else
632 cifs_buf_release(ptmp);
1da177e4 633 }
13cfb733 634 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
635 kfree(file->private_data);
636 file->private_data = NULL;
637 }
638 /* BB can we lock the filestruct while this is going on? */
639 FreeXid(xid);
640 return rc;
641}
642
d59dad2b 643static int store_file_lock(struct cifsInodeInfo *cinode, __u64 len,
03776f45 644 __u64 offset, __u8 type, __u16 netfid)
7ee1af76 645{
fb8c4b14
SF
646 struct cifsLockInfo *li =
647 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
648 if (li == NULL)
649 return -ENOMEM;
d59dad2b 650 li->netfid = netfid;
7ee1af76
JA
651 li->offset = offset;
652 li->length = len;
03776f45
PS
653 li->type = type;
654 li->pid = current->tgid;
d59dad2b
PS
655 mutex_lock(&cinode->lock_mutex);
656 list_add_tail(&li->llist, &cinode->llist);
657 mutex_unlock(&cinode->lock_mutex);
7ee1af76
JA
658 return 0;
659}
660
03776f45
PS
661static void
662cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
663 bool *wait_flag)
1da177e4 664{
03776f45 665 if (flock->fl_flags & FL_POSIX)
b6b38f70 666 cFYI(1, "Posix");
03776f45 667 if (flock->fl_flags & FL_FLOCK)
b6b38f70 668 cFYI(1, "Flock");
03776f45 669 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 670 cFYI(1, "Blocking lock");
03776f45 671 *wait_flag = true;
1da177e4 672 }
03776f45 673 if (flock->fl_flags & FL_ACCESS)
b6b38f70 674 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
675 "not implemented yet");
676 if (flock->fl_flags & FL_LEASE)
b6b38f70 677 cFYI(1, "Lease on file - not implemented yet");
03776f45 678 if (flock->fl_flags &
1da177e4 679 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 680 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 681
03776f45
PS
682 *type = LOCKING_ANDX_LARGE_FILES;
683 if (flock->fl_type == F_WRLCK) {
b6b38f70 684 cFYI(1, "F_WRLCK ");
03776f45
PS
685 *lock = 1;
686 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 687 cFYI(1, "F_UNLCK");
03776f45
PS
688 *unlock = 1;
689 /* Check if unlock includes more than one lock range */
690 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 691 cFYI(1, "F_RDLCK");
03776f45
PS
692 *type |= LOCKING_ANDX_SHARED_LOCK;
693 *lock = 1;
694 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 695 cFYI(1, "F_EXLCK");
03776f45
PS
696 *lock = 1;
697 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 698 cFYI(1, "F_SHLCK");
03776f45
PS
699 *type |= LOCKING_ANDX_SHARED_LOCK;
700 *lock = 1;
1da177e4 701 } else
b6b38f70 702 cFYI(1, "Unknown type of lock");
03776f45 703}
1da177e4 704
03776f45
PS
705static int
706cifs_getlk(struct cifsFileInfo *cfile, struct file_lock *flock, __u8 type,
707 bool wait_flag, bool posix_lck, int xid)
708{
709 int rc = 0;
710 __u64 length = 1 + flock->fl_end - flock->fl_start;
711 __u16 netfid = cfile->netfid;
712 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
f05337c6 713
03776f45
PS
714 if (posix_lck) {
715 int posix_lock_type;
716 if (type & LOCKING_ANDX_SHARED_LOCK)
717 posix_lock_type = CIFS_RDLCK;
718 else
719 posix_lock_type = CIFS_WRLCK;
720 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
721 length, flock, posix_lock_type,
722 wait_flag);
723 return rc;
724 }
1da177e4 725
03776f45
PS
726 /* BB we could chain these into one lock request BB */
727 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
728 flock->fl_start, 0, 1, type, 0, 0);
729 if (rc == 0) {
730 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
731 length, flock->fl_start, 1, 0,
732 type, 0, 0);
733 flock->fl_type = F_UNLCK;
734 if (rc != 0)
735 cERROR(1, "Error unlocking previously locked "
736 "range %d during test of lock", rc);
737 rc = 0;
1da177e4
LT
738 return rc;
739 }
7ee1af76 740
03776f45
PS
741 if (type & LOCKING_ANDX_SHARED_LOCK) {
742 flock->fl_type = F_WRLCK;
743 rc = 0;
744 return rc;
7ee1af76
JA
745 }
746
03776f45
PS
747 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
748 flock->fl_start, 0, 1,
749 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
750 if (rc == 0) {
751 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
752 length, flock->fl_start, 1, 0,
753 type | LOCKING_ANDX_SHARED_LOCK,
754 0, 0);
755 flock->fl_type = F_RDLCK;
756 if (rc != 0)
757 cERROR(1, "Error unlocking previously locked "
758 "range %d during test of lock", rc);
759 } else
760 flock->fl_type = F_WRLCK;
761
762 rc = 0;
763 return rc;
764}
765
766static int
767cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
768 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
769{
770 int rc = 0;
771 __u64 length = 1 + flock->fl_end - flock->fl_start;
772 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
773 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
d59dad2b 774 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
03776f45
PS
775 __u16 netfid = cfile->netfid;
776
777 if (posix_lck) {
08547b03 778 int posix_lock_type;
03776f45 779 if (type & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
780 posix_lock_type = CIFS_RDLCK;
781 else
782 posix_lock_type = CIFS_WRLCK;
50c2f753 783
03776f45 784 if (unlock == 1)
beb84dc8 785 posix_lock_type = CIFS_UNLCK;
7ee1af76 786
03776f45
PS
787 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, length,
788 flock, posix_lock_type, wait_flag);
789 goto out;
790 }
7ee1af76 791
03776f45
PS
792 if (lock) {
793 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
794 flock->fl_start, 0, lock, type, wait_flag, 0);
795 if (rc == 0) {
796 /* For Windows locks we must store them. */
d59dad2b 797 rc = store_file_lock(cinode, length, flock->fl_start,
03776f45
PS
798 type, netfid);
799 }
800 } else if (unlock) {
801 /*
802 * For each stored lock that this unlock overlaps completely,
803 * unlock it.
804 */
805 int stored_rc = 0;
806 struct cifsLockInfo *li, *tmp;
807
d59dad2b
PS
808 mutex_lock(&cinode->lock_mutex);
809 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
03776f45
PS
810 if (flock->fl_start > li->offset ||
811 (flock->fl_start + length) <
812 (li->offset + li->length))
813 continue;
814 if (current->tgid != li->pid)
815 continue;
d59dad2b
PS
816 if (cfile->netfid != li->netfid)
817 continue;
03776f45
PS
818
819 stored_rc = CIFSSMBLock(xid, tcon, netfid,
820 current->tgid, li->length,
821 li->offset, 1, 0, li->type,
822 0, 0);
823 if (stored_rc)
824 rc = stored_rc;
825 else {
826 list_del(&li->llist);
827 kfree(li);
7ee1af76 828 }
7ee1af76 829 }
d59dad2b 830 mutex_unlock(&cinode->lock_mutex);
03776f45
PS
831 }
832out:
833 if (flock->fl_flags & FL_POSIX)
834 posix_lock_file_wait(file, flock);
835 return rc;
836}
837
838int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
839{
840 int rc, xid;
841 int lock = 0, unlock = 0;
842 bool wait_flag = false;
843 bool posix_lck = false;
844 struct cifs_sb_info *cifs_sb;
845 struct cifs_tcon *tcon;
846 struct cifsInodeInfo *cinode;
847 struct cifsFileInfo *cfile;
848 __u16 netfid;
849 __u8 type;
850
851 rc = -EACCES;
852 xid = GetXid();
853
854 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
855 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
856 flock->fl_start, flock->fl_end);
857
858 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
859
860 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
861 cfile = (struct cifsFileInfo *)file->private_data;
862 tcon = tlink_tcon(cfile->tlink);
863 netfid = cfile->netfid;
864 cinode = CIFS_I(file->f_path.dentry->d_inode);
865
866 if ((tcon->ses->capabilities & CAP_UNIX) &&
867 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
868 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
869 posix_lck = true;
870 /*
871 * BB add code here to normalize offset and length to account for
872 * negative length which we can not accept over the wire.
873 */
874 if (IS_GETLK(cmd)) {
875 rc = cifs_getlk(cfile, flock, type, wait_flag, posix_lck, xid);
876 FreeXid(xid);
877 return rc;
878 }
879
880 if (!lock && !unlock) {
881 /*
882 * if no lock or unlock then nothing to do since we do not
883 * know what it is
884 */
885 FreeXid(xid);
886 return -EOPNOTSUPP;
7ee1af76
JA
887 }
888
03776f45
PS
889 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
890 xid);
1da177e4
LT
891 FreeXid(xid);
892 return rc;
893}
894
fbec9ab9 895/* update the file size (if needed) after a write */
72432ffc 896void
fbec9ab9
JL
897cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
898 unsigned int bytes_written)
899{
900 loff_t end_of_write = offset + bytes_written;
901
902 if (end_of_write > cifsi->server_eof)
903 cifsi->server_eof = end_of_write;
904}
905
fa2989f4 906static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
7da4b49a
JL
907 const char *write_data, size_t write_size,
908 loff_t *poffset)
1da177e4
LT
909{
910 int rc = 0;
911 unsigned int bytes_written = 0;
912 unsigned int total_written;
913 struct cifs_sb_info *cifs_sb;
96daf2b0 914 struct cifs_tcon *pTcon;
7749981e 915 int xid;
7da4b49a
JL
916 struct dentry *dentry = open_file->dentry;
917 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 918 struct cifs_io_parms io_parms;
1da177e4 919
7da4b49a 920 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 921
b6b38f70 922 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 923 *poffset, dentry->d_name.name);
1da177e4 924
13cfb733 925 pTcon = tlink_tcon(open_file->tlink);
50c2f753 926
1da177e4 927 xid = GetXid();
1da177e4 928
1da177e4
LT
929 for (total_written = 0; write_size > total_written;
930 total_written += bytes_written) {
931 rc = -EAGAIN;
932 while (rc == -EAGAIN) {
ca83ce3d
JL
933 struct kvec iov[2];
934 unsigned int len;
935
1da177e4 936 if (open_file->invalidHandle) {
1da177e4
LT
937 /* we could deadlock if we called
938 filemap_fdatawait from here so tell
fb8c4b14 939 reopen_file not to flush data to
1da177e4 940 server now */
15886177 941 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
942 if (rc != 0)
943 break;
944 }
ca83ce3d
JL
945
946 len = min((size_t)cifs_sb->wsize,
947 write_size - total_written);
948 /* iov[0] is reserved for smb header */
949 iov[1].iov_base = (char *)write_data + total_written;
950 iov[1].iov_len = len;
fa2989f4
PS
951 io_parms.netfid = open_file->netfid;
952 io_parms.pid = pid;
953 io_parms.tcon = pTcon;
954 io_parms.offset = *poffset;
955 io_parms.length = len;
956 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
957 1, 0);
1da177e4
LT
958 }
959 if (rc || (bytes_written == 0)) {
960 if (total_written)
961 break;
962 else {
963 FreeXid(xid);
964 return rc;
965 }
fbec9ab9
JL
966 } else {
967 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 968 *poffset += bytes_written;
fbec9ab9 969 }
1da177e4
LT
970 }
971
a4544347 972 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 973
7da4b49a
JL
974 if (total_written > 0) {
975 spin_lock(&dentry->d_inode->i_lock);
976 if (*poffset > dentry->d_inode->i_size)
977 i_size_write(dentry->d_inode, *poffset);
978 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 979 }
7da4b49a 980 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
981 FreeXid(xid);
982 return total_written;
983}
984
6508d904
JL
985struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
986 bool fsuid_only)
630f3f0c
SF
987{
988 struct cifsFileInfo *open_file = NULL;
6508d904
JL
989 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
990
991 /* only filter by fsuid on multiuser mounts */
992 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
993 fsuid_only = false;
630f3f0c 994
4477288a 995 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
996 /* we could simply get the first_list_entry since write-only entries
997 are always at the end of the list but since the first entry might
998 have a close pending, we go through the whole list */
999 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1000 if (fsuid_only && open_file->uid != current_fsuid())
1001 continue;
2e396b83 1002 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1003 if (!open_file->invalidHandle) {
1004 /* found a good file */
1005 /* lock it so it will not be closed on us */
6ab409b5 1006 cifsFileInfo_get(open_file);
4477288a 1007 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1008 return open_file;
1009 } /* else might as well continue, and look for
1010 another, or simply have the caller reopen it
1011 again rather than trying to fix this handle */
1012 } else /* write only file */
1013 break; /* write only files are last so must be done */
1014 }
4477288a 1015 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1016 return NULL;
1017}
630f3f0c 1018
6508d904
JL
1019struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1020 bool fsuid_only)
6148a742
SF
1021{
1022 struct cifsFileInfo *open_file;
d3892294 1023 struct cifs_sb_info *cifs_sb;
2846d386 1024 bool any_available = false;
dd99cd80 1025 int rc;
6148a742 1026
60808233
SF
1027 /* Having a null inode here (because mapping->host was set to zero by
1028 the VFS or MM) should not happen but we had reports of on oops (due to
1029 it being zero) during stress testcases so we need to check for it */
1030
fb8c4b14 1031 if (cifs_inode == NULL) {
b6b38f70 1032 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1033 dump_stack();
1034 return NULL;
1035 }
1036
d3892294
JL
1037 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1038
6508d904
JL
1039 /* only filter by fsuid on multiuser mounts */
1040 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1041 fsuid_only = false;
1042
4477288a 1043 spin_lock(&cifs_file_list_lock);
9b22b0b7 1044refind_writable:
6148a742 1045 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1046 if (!any_available && open_file->pid != current->tgid)
1047 continue;
1048 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1049 continue;
2e396b83 1050 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 1051 cifsFileInfo_get(open_file);
9b22b0b7
SF
1052
1053 if (!open_file->invalidHandle) {
1054 /* found a good writable file */
4477288a 1055 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1056 return open_file;
1057 }
8840dee9 1058
4477288a 1059 spin_unlock(&cifs_file_list_lock);
cdff08e7 1060
9b22b0b7 1061 /* Had to unlock since following call can block */
15886177 1062 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1063 if (!rc)
1064 return open_file;
9b22b0b7 1065
cdff08e7 1066 /* if it fails, try another handle if possible */
b6b38f70 1067 cFYI(1, "wp failed on reopen file");
6ab409b5 1068 cifsFileInfo_put(open_file);
8840dee9 1069
cdff08e7
SF
1070 spin_lock(&cifs_file_list_lock);
1071
9b22b0b7
SF
1072 /* else we simply continue to the next entry. Thus
1073 we do not loop on reopen errors. If we
1074 can not reopen the file, for example if we
1075 reconnected to a server with another client
1076 racing to delete or lock the file we would not
1077 make progress if we restarted before the beginning
1078 of the loop here. */
6148a742
SF
1079 }
1080 }
2846d386
JL
1081 /* couldn't find useable FH with same pid, try any available */
1082 if (!any_available) {
1083 any_available = true;
1084 goto refind_writable;
1085 }
4477288a 1086 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1087 return NULL;
1088}
1089
1da177e4
LT
1090static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1091{
1092 struct address_space *mapping = page->mapping;
1093 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1094 char *write_data;
1095 int rc = -EFAULT;
1096 int bytes_written = 0;
1da177e4 1097 struct inode *inode;
6148a742 1098 struct cifsFileInfo *open_file;
1da177e4
LT
1099
1100 if (!mapping || !mapping->host)
1101 return -EFAULT;
1102
1103 inode = page->mapping->host;
1da177e4
LT
1104
1105 offset += (loff_t)from;
1106 write_data = kmap(page);
1107 write_data += from;
1108
1109 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1110 kunmap(page);
1111 return -EIO;
1112 }
1113
1114 /* racing with truncate? */
1115 if (offset > mapping->host->i_size) {
1116 kunmap(page);
1117 return 0; /* don't care */
1118 }
1119
1120 /* check to make sure that we are not extending the file */
1121 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1122 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1123
6508d904 1124 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1125 if (open_file) {
fa2989f4
PS
1126 bytes_written = cifs_write(open_file, open_file->pid,
1127 write_data, to - from, &offset);
6ab409b5 1128 cifsFileInfo_put(open_file);
1da177e4 1129 /* Does mm or vfs already set times? */
6148a742 1130 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1131 if ((bytes_written > 0) && (offset))
6148a742 1132 rc = 0;
bb5a9a04
SF
1133 else if (bytes_written < 0)
1134 rc = bytes_written;
6148a742 1135 } else {
b6b38f70 1136 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1137 rc = -EIO;
1138 }
1139
1140 kunmap(page);
1141 return rc;
1142}
1143
1da177e4 1144static int cifs_writepages(struct address_space *mapping,
37c0eb46 1145 struct writeback_control *wbc)
1da177e4 1146{
c3d17b63
JL
1147 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1148 bool done = false, scanned = false, range_whole = false;
1149 pgoff_t end, index;
1150 struct cifs_writedata *wdata;
37c0eb46 1151 struct page *page;
37c0eb46 1152 int rc = 0;
50c2f753 1153
37c0eb46 1154 /*
c3d17b63 1155 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1156 * one page at a time via cifs_writepage
1157 */
1158 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1159 return generic_writepages(mapping, wbc);
1160
111ebb6e 1161 if (wbc->range_cyclic) {
37c0eb46 1162 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1163 end = -1;
1164 } else {
1165 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1166 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1167 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1168 range_whole = true;
1169 scanned = true;
37c0eb46
SF
1170 }
1171retry:
c3d17b63
JL
1172 while (!done && index <= end) {
1173 unsigned int i, nr_pages, found_pages;
1174 pgoff_t next = 0, tofind;
1175 struct page **pages;
1176
1177 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1178 end - index) + 1;
1179
1180 wdata = cifs_writedata_alloc((unsigned int)tofind);
1181 if (!wdata) {
1182 rc = -ENOMEM;
1183 break;
1184 }
1185
1186 /*
1187 * find_get_pages_tag seems to return a max of 256 on each
1188 * iteration, so we must call it several times in order to
1189 * fill the array or the wsize is effectively limited to
1190 * 256 * PAGE_CACHE_SIZE.
1191 */
1192 found_pages = 0;
1193 pages = wdata->pages;
1194 do {
1195 nr_pages = find_get_pages_tag(mapping, &index,
1196 PAGECACHE_TAG_DIRTY,
1197 tofind, pages);
1198 found_pages += nr_pages;
1199 tofind -= nr_pages;
1200 pages += nr_pages;
1201 } while (nr_pages && tofind && index <= end);
1202
1203 if (found_pages == 0) {
1204 kref_put(&wdata->refcount, cifs_writedata_release);
1205 break;
1206 }
1207
1208 nr_pages = 0;
1209 for (i = 0; i < found_pages; i++) {
1210 page = wdata->pages[i];
37c0eb46
SF
1211 /*
1212 * At this point we hold neither mapping->tree_lock nor
1213 * lock on the page itself: the page may be truncated or
1214 * invalidated (changing page->mapping to NULL), or even
1215 * swizzled back from swapper_space to tmpfs file
1216 * mapping
1217 */
1218
c3d17b63 1219 if (nr_pages == 0)
37c0eb46 1220 lock_page(page);
529ae9aa 1221 else if (!trylock_page(page))
37c0eb46
SF
1222 break;
1223
1224 if (unlikely(page->mapping != mapping)) {
1225 unlock_page(page);
1226 break;
1227 }
1228
111ebb6e 1229 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1230 done = true;
37c0eb46
SF
1231 unlock_page(page);
1232 break;
1233 }
1234
1235 if (next && (page->index != next)) {
1236 /* Not next consecutive page */
1237 unlock_page(page);
1238 break;
1239 }
1240
1241 if (wbc->sync_mode != WB_SYNC_NONE)
1242 wait_on_page_writeback(page);
1243
1244 if (PageWriteback(page) ||
cb876f45 1245 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1246 unlock_page(page);
1247 break;
1248 }
84d2f07e 1249
cb876f45
LT
1250 /*
1251 * This actually clears the dirty bit in the radix tree.
1252 * See cifs_writepage() for more commentary.
1253 */
1254 set_page_writeback(page);
1255
84d2f07e 1256 if (page_offset(page) >= mapping->host->i_size) {
c3d17b63 1257 done = true;
84d2f07e 1258 unlock_page(page);
cb876f45 1259 end_page_writeback(page);
84d2f07e
SF
1260 break;
1261 }
1262
c3d17b63
JL
1263 wdata->pages[i] = page;
1264 next = page->index + 1;
1265 ++nr_pages;
1266 }
37c0eb46 1267
c3d17b63
JL
1268 /* reset index to refind any pages skipped */
1269 if (nr_pages == 0)
1270 index = wdata->pages[0]->index + 1;
84d2f07e 1271
c3d17b63
JL
1272 /* put any pages we aren't going to use */
1273 for (i = nr_pages; i < found_pages; i++) {
1274 page_cache_release(wdata->pages[i]);
1275 wdata->pages[i] = NULL;
1276 }
37c0eb46 1277
c3d17b63
JL
1278 /* nothing to write? */
1279 if (nr_pages == 0) {
1280 kref_put(&wdata->refcount, cifs_writedata_release);
1281 continue;
37c0eb46 1282 }
fbec9ab9 1283
c3d17b63
JL
1284 wdata->sync_mode = wbc->sync_mode;
1285 wdata->nr_pages = nr_pages;
1286 wdata->offset = page_offset(wdata->pages[0]);
941b853d 1287
c3d17b63
JL
1288 do {
1289 if (wdata->cfile != NULL)
1290 cifsFileInfo_put(wdata->cfile);
1291 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1292 false);
1293 if (!wdata->cfile) {
1294 cERROR(1, "No writable handles for inode");
1295 rc = -EBADF;
1296 break;
941b853d 1297 }
c3d17b63
JL
1298 rc = cifs_async_writev(wdata);
1299 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1300
c3d17b63
JL
1301 for (i = 0; i < nr_pages; ++i)
1302 unlock_page(wdata->pages[i]);
f3983c21 1303
c3d17b63
JL
1304 /* send failure -- clean up the mess */
1305 if (rc != 0) {
1306 for (i = 0; i < nr_pages; ++i) {
941b853d 1307 if (rc == -EAGAIN)
c3d17b63
JL
1308 redirty_page_for_writepage(wbc,
1309 wdata->pages[i]);
1310 else
1311 SetPageError(wdata->pages[i]);
1312 end_page_writeback(wdata->pages[i]);
1313 page_cache_release(wdata->pages[i]);
37c0eb46 1314 }
941b853d
JL
1315 if (rc != -EAGAIN)
1316 mapping_set_error(mapping, rc);
c3d17b63
JL
1317 }
1318 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1319
c3d17b63
JL
1320 wbc->nr_to_write -= nr_pages;
1321 if (wbc->nr_to_write <= 0)
1322 done = true;
b066a48c 1323
c3d17b63 1324 index = next;
37c0eb46 1325 }
c3d17b63 1326
37c0eb46
SF
1327 if (!scanned && !done) {
1328 /*
1329 * We hit the last page and there is more work to be done: wrap
1330 * back to the start of the file
1331 */
c3d17b63 1332 scanned = true;
37c0eb46
SF
1333 index = 0;
1334 goto retry;
1335 }
c3d17b63 1336
111ebb6e 1337 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1338 mapping->writeback_index = index;
1339
1da177e4
LT
1340 return rc;
1341}
1da177e4 1342
9ad1506b
PS
1343static int
1344cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1345{
9ad1506b 1346 int rc;
1da177e4
LT
1347 int xid;
1348
1349 xid = GetXid();
1350/* BB add check for wbc flags */
1351 page_cache_get(page);
ad7a2926 1352 if (!PageUptodate(page))
b6b38f70 1353 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1354
1355 /*
1356 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1357 *
1358 * A writepage() implementation always needs to do either this,
1359 * or re-dirty the page with "redirty_page_for_writepage()" in
1360 * the case of a failure.
1361 *
1362 * Just unlocking the page will cause the radix tree tag-bits
1363 * to fail to update with the state of the page correctly.
1364 */
fb8c4b14 1365 set_page_writeback(page);
9ad1506b 1366retry_write:
1da177e4 1367 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1368 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1369 goto retry_write;
1370 else if (rc == -EAGAIN)
1371 redirty_page_for_writepage(wbc, page);
1372 else if (rc != 0)
1373 SetPageError(page);
1374 else
1375 SetPageUptodate(page);
cb876f45
LT
1376 end_page_writeback(page);
1377 page_cache_release(page);
1da177e4
LT
1378 FreeXid(xid);
1379 return rc;
1380}
1381
9ad1506b
PS
1382static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1383{
1384 int rc = cifs_writepage_locked(page, wbc);
1385 unlock_page(page);
1386 return rc;
1387}
1388
d9414774
NP
1389static int cifs_write_end(struct file *file, struct address_space *mapping,
1390 loff_t pos, unsigned len, unsigned copied,
1391 struct page *page, void *fsdata)
1da177e4 1392{
d9414774
NP
1393 int rc;
1394 struct inode *inode = mapping->host;
d4ffff1f
PS
1395 struct cifsFileInfo *cfile = file->private_data;
1396 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1397 __u32 pid;
1398
1399 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1400 pid = cfile->pid;
1401 else
1402 pid = current->tgid;
1da177e4 1403
b6b38f70
JP
1404 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1405 page, pos, copied);
d9414774 1406
a98ee8c1
JL
1407 if (PageChecked(page)) {
1408 if (copied == len)
1409 SetPageUptodate(page);
1410 ClearPageChecked(page);
1411 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1412 SetPageUptodate(page);
ad7a2926 1413
1da177e4 1414 if (!PageUptodate(page)) {
d9414774
NP
1415 char *page_data;
1416 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1417 int xid;
1418
1419 xid = GetXid();
1da177e4
LT
1420 /* this is probably better than directly calling
1421 partialpage_write since in this function the file handle is
1422 known which we might as well leverage */
1423 /* BB check if anything else missing out of ppw
1424 such as updating last write time */
1425 page_data = kmap(page);
d4ffff1f 1426 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 1427 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1428 kunmap(page);
d9414774
NP
1429
1430 FreeXid(xid);
fb8c4b14 1431 } else {
d9414774
NP
1432 rc = copied;
1433 pos += copied;
1da177e4
LT
1434 set_page_dirty(page);
1435 }
1436
d9414774
NP
1437 if (rc > 0) {
1438 spin_lock(&inode->i_lock);
1439 if (pos > inode->i_size)
1440 i_size_write(inode, pos);
1441 spin_unlock(&inode->i_lock);
1442 }
1443
1444 unlock_page(page);
1445 page_cache_release(page);
1446
1da177e4
LT
1447 return rc;
1448}
1449
02c24a82
JB
1450int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1451 int datasync)
1da177e4
LT
1452{
1453 int xid;
1454 int rc = 0;
96daf2b0 1455 struct cifs_tcon *tcon;
c21dfb69 1456 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1457 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 1458 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 1459
02c24a82
JB
1460 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1461 if (rc)
1462 return rc;
1463 mutex_lock(&inode->i_mutex);
1464
1da177e4
LT
1465 xid = GetXid();
1466
b6b38f70 1467 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1468 file->f_path.dentry->d_name.name, datasync);
50c2f753 1469
6feb9891
PS
1470 if (!CIFS_I(inode)->clientCanCacheRead) {
1471 rc = cifs_invalidate_mapping(inode);
1472 if (rc) {
1473 cFYI(1, "rc: %d during invalidate phase", rc);
1474 rc = 0; /* don't care about it in fsync */
1475 }
1476 }
eb4b756b 1477
8be7e6ba
PS
1478 tcon = tlink_tcon(smbfile->tlink);
1479 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1480 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1481
1482 FreeXid(xid);
02c24a82 1483 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
1484 return rc;
1485}
1486
02c24a82 1487int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba
PS
1488{
1489 int xid;
1490 int rc = 0;
96daf2b0 1491 struct cifs_tcon *tcon;
8be7e6ba
PS
1492 struct cifsFileInfo *smbfile = file->private_data;
1493 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
1494 struct inode *inode = file->f_mapping->host;
1495
1496 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1497 if (rc)
1498 return rc;
1499 mutex_lock(&inode->i_mutex);
8be7e6ba
PS
1500
1501 xid = GetXid();
1502
1503 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1504 file->f_path.dentry->d_name.name, datasync);
1505
1506 tcon = tlink_tcon(smbfile->tlink);
1507 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1508 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 1509
1da177e4 1510 FreeXid(xid);
02c24a82 1511 mutex_unlock(&inode->i_mutex);
1da177e4
LT
1512 return rc;
1513}
1514
1da177e4
LT
1515/*
1516 * As file closes, flush all cached write data for this inode checking
1517 * for write behind errors.
1518 */
75e1fcc0 1519int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1520{
fb8c4b14 1521 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1522 int rc = 0;
1523
eb4b756b 1524 if (file->f_mode & FMODE_WRITE)
d3f1322a 1525 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 1526
b6b38f70 1527 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1528
1529 return rc;
1530}
1531
72432ffc
PS
1532static int
1533cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1534{
1535 int rc = 0;
1536 unsigned long i;
1537
1538 for (i = 0; i < num_pages; i++) {
1539 pages[i] = alloc_page(__GFP_HIGHMEM);
1540 if (!pages[i]) {
1541 /*
1542 * save number of pages we have already allocated and
1543 * return with ENOMEM error
1544 */
1545 num_pages = i;
1546 rc = -ENOMEM;
1547 goto error;
1548 }
1549 }
1550
1551 return rc;
1552
1553error:
1554 for (i = 0; i < num_pages; i++)
1555 put_page(pages[i]);
1556 return rc;
1557}
1558
1559static inline
1560size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1561{
1562 size_t num_pages;
1563 size_t clen;
1564
1565 clen = min_t(const size_t, len, wsize);
1566 num_pages = clen / PAGE_CACHE_SIZE;
1567 if (clen % PAGE_CACHE_SIZE)
1568 num_pages++;
1569
1570 if (cur_len)
1571 *cur_len = clen;
1572
1573 return num_pages;
1574}
1575
1576static ssize_t
1577cifs_iovec_write(struct file *file, const struct iovec *iov,
1578 unsigned long nr_segs, loff_t *poffset)
1579{
76429c14
PS
1580 unsigned int written;
1581 unsigned long num_pages, npages, i;
1582 size_t copied, len, cur_len;
1583 ssize_t total_written = 0;
72432ffc
PS
1584 struct kvec *to_send;
1585 struct page **pages;
1586 struct iov_iter it;
1587 struct inode *inode;
1588 struct cifsFileInfo *open_file;
96daf2b0 1589 struct cifs_tcon *pTcon;
72432ffc 1590 struct cifs_sb_info *cifs_sb;
fa2989f4 1591 struct cifs_io_parms io_parms;
72432ffc 1592 int xid, rc;
d4ffff1f 1593 __u32 pid;
72432ffc
PS
1594
1595 len = iov_length(iov, nr_segs);
1596 if (!len)
1597 return 0;
1598
1599 rc = generic_write_checks(file, poffset, &len, 0);
1600 if (rc)
1601 return rc;
1602
1603 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1604 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1605
1606 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1607 if (!pages)
1608 return -ENOMEM;
1609
1610 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1611 if (!to_send) {
1612 kfree(pages);
1613 return -ENOMEM;
1614 }
1615
1616 rc = cifs_write_allocate_pages(pages, num_pages);
1617 if (rc) {
1618 kfree(pages);
1619 kfree(to_send);
1620 return rc;
1621 }
1622
1623 xid = GetXid();
1624 open_file = file->private_data;
d4ffff1f
PS
1625
1626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1627 pid = open_file->pid;
1628 else
1629 pid = current->tgid;
1630
72432ffc
PS
1631 pTcon = tlink_tcon(open_file->tlink);
1632 inode = file->f_path.dentry->d_inode;
1633
1634 iov_iter_init(&it, iov, nr_segs, len, 0);
1635 npages = num_pages;
1636
1637 do {
1638 size_t save_len = cur_len;
1639 for (i = 0; i < npages; i++) {
1640 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1641 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1642 copied);
1643 cur_len -= copied;
1644 iov_iter_advance(&it, copied);
1645 to_send[i+1].iov_base = kmap(pages[i]);
1646 to_send[i+1].iov_len = copied;
1647 }
1648
1649 cur_len = save_len - cur_len;
1650
1651 do {
1652 if (open_file->invalidHandle) {
1653 rc = cifs_reopen_file(open_file, false);
1654 if (rc != 0)
1655 break;
1656 }
fa2989f4 1657 io_parms.netfid = open_file->netfid;
d4ffff1f 1658 io_parms.pid = pid;
fa2989f4
PS
1659 io_parms.tcon = pTcon;
1660 io_parms.offset = *poffset;
1661 io_parms.length = cur_len;
1662 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1663 npages, 0);
72432ffc
PS
1664 } while (rc == -EAGAIN);
1665
1666 for (i = 0; i < npages; i++)
1667 kunmap(pages[i]);
1668
1669 if (written) {
1670 len -= written;
1671 total_written += written;
1672 cifs_update_eof(CIFS_I(inode), *poffset, written);
1673 *poffset += written;
1674 } else if (rc < 0) {
1675 if (!total_written)
1676 total_written = rc;
1677 break;
1678 }
1679
1680 /* get length and number of kvecs of the next write */
1681 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1682 } while (len > 0);
1683
1684 if (total_written > 0) {
1685 spin_lock(&inode->i_lock);
1686 if (*poffset > inode->i_size)
1687 i_size_write(inode, *poffset);
1688 spin_unlock(&inode->i_lock);
1689 }
1690
1691 cifs_stats_bytes_written(pTcon, total_written);
1692 mark_inode_dirty_sync(inode);
1693
1694 for (i = 0; i < num_pages; i++)
1695 put_page(pages[i]);
1696 kfree(to_send);
1697 kfree(pages);
1698 FreeXid(xid);
1699 return total_written;
1700}
1701
0b81c1c4 1702ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
1703 unsigned long nr_segs, loff_t pos)
1704{
1705 ssize_t written;
1706 struct inode *inode;
1707
1708 inode = iocb->ki_filp->f_path.dentry->d_inode;
1709
1710 /*
1711 * BB - optimize the way when signing is disabled. We can drop this
1712 * extra memory-to-memory copying and use iovec buffers for constructing
1713 * write request.
1714 */
1715
1716 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1717 if (written > 0) {
1718 CIFS_I(inode)->invalid_mapping = true;
1719 iocb->ki_pos = pos;
1720 }
1721
1722 return written;
1723}
1724
1725ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1726 unsigned long nr_segs, loff_t pos)
1727{
1728 struct inode *inode;
1729
1730 inode = iocb->ki_filp->f_path.dentry->d_inode;
1731
1732 if (CIFS_I(inode)->clientCanCacheAll)
1733 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1734
1735 /*
1736 * In strict cache mode we need to write the data to the server exactly
1737 * from the pos to pos+len-1 rather than flush all affected pages
1738 * because it may cause a error with mandatory locks on these pages but
1739 * not on the region from pos to ppos+len-1.
1740 */
1741
1742 return cifs_user_writev(iocb, iov, nr_segs, pos);
1743}
1744
a70307ee
PS
1745static ssize_t
1746cifs_iovec_read(struct file *file, const struct iovec *iov,
1747 unsigned long nr_segs, loff_t *poffset)
1da177e4 1748{
a70307ee
PS
1749 int rc;
1750 int xid;
76429c14
PS
1751 ssize_t total_read;
1752 unsigned int bytes_read = 0;
a70307ee
PS
1753 size_t len, cur_len;
1754 int iov_offset = 0;
1da177e4 1755 struct cifs_sb_info *cifs_sb;
96daf2b0 1756 struct cifs_tcon *pTcon;
1da177e4 1757 struct cifsFileInfo *open_file;
1da177e4 1758 struct smb_com_read_rsp *pSMBr;
d4ffff1f 1759 struct cifs_io_parms io_parms;
a70307ee 1760 char *read_data;
5eba8ab3 1761 unsigned int rsize;
d4ffff1f 1762 __u32 pid;
a70307ee
PS
1763
1764 if (!nr_segs)
1765 return 0;
1766
1767 len = iov_length(iov, nr_segs);
1768 if (!len)
1769 return 0;
1da177e4
LT
1770
1771 xid = GetXid();
e6a00296 1772 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1773
5eba8ab3
JL
1774 /* FIXME: set up handlers for larger reads and/or convert to async */
1775 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
1776
c21dfb69 1777 open_file = file->private_data;
13cfb733 1778 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1779
d4ffff1f
PS
1780 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1781 pid = open_file->pid;
1782 else
1783 pid = current->tgid;
1784
ad7a2926 1785 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1786 cFYI(1, "attempting read on write only file instance");
ad7a2926 1787
a70307ee 1788 for (total_read = 0; total_read < len; total_read += bytes_read) {
5eba8ab3 1789 cur_len = min_t(const size_t, len - total_read, rsize);
1da177e4 1790 rc = -EAGAIN;
a70307ee
PS
1791 read_data = NULL;
1792
1da177e4 1793 while (rc == -EAGAIN) {
ec637e3f 1794 int buf_type = CIFS_NO_BUFFER;
cdff08e7 1795 if (open_file->invalidHandle) {
15886177 1796 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1797 if (rc != 0)
1798 break;
1799 }
d4ffff1f
PS
1800 io_parms.netfid = open_file->netfid;
1801 io_parms.pid = pid;
1802 io_parms.tcon = pTcon;
1803 io_parms.offset = *poffset;
2cebaa58 1804 io_parms.length = cur_len;
d4ffff1f 1805 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
a70307ee
PS
1806 &read_data, &buf_type);
1807 pSMBr = (struct smb_com_read_rsp *)read_data;
1808 if (read_data) {
1809 char *data_offset = read_data + 4 +
1810 le16_to_cpu(pSMBr->DataOffset);
1811 if (memcpy_toiovecend(iov, data_offset,
1812 iov_offset, bytes_read))
93544cc6 1813 rc = -EFAULT;
fb8c4b14 1814 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 1815 cifs_small_buf_release(read_data);
fb8c4b14 1816 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
1817 cifs_buf_release(read_data);
1818 read_data = NULL;
1819 iov_offset += bytes_read;
1da177e4
LT
1820 }
1821 }
a70307ee 1822
1da177e4
LT
1823 if (rc || (bytes_read == 0)) {
1824 if (total_read) {
1825 break;
1826 } else {
1827 FreeXid(xid);
1828 return rc;
1829 }
1830 } else {
a4544347 1831 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1832 *poffset += bytes_read;
1833 }
1834 }
a70307ee 1835
1da177e4
LT
1836 FreeXid(xid);
1837 return total_read;
1838}
1839
0b81c1c4 1840ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
1841 unsigned long nr_segs, loff_t pos)
1842{
1843 ssize_t read;
1844
1845 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1846 if (read > 0)
1847 iocb->ki_pos = pos;
1848
1849 return read;
1850}
1851
1852ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1853 unsigned long nr_segs, loff_t pos)
1854{
1855 struct inode *inode;
1856
1857 inode = iocb->ki_filp->f_path.dentry->d_inode;
1858
1859 if (CIFS_I(inode)->clientCanCacheRead)
1860 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1861
1862 /*
1863 * In strict cache mode we need to read from the server all the time
1864 * if we don't have level II oplock because the server can delay mtime
1865 * change - so we can't make a decision about inode invalidating.
1866 * And we can also fail with pagereading if there are mandatory locks
1867 * on pages affected by this read but not on the region from pos to
1868 * pos+len-1.
1869 */
1870
1871 return cifs_user_readv(iocb, iov, nr_segs, pos);
1872}
1da177e4
LT
1873
1874static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 1875 loff_t *poffset)
1da177e4
LT
1876{
1877 int rc = -EACCES;
1878 unsigned int bytes_read = 0;
1879 unsigned int total_read;
1880 unsigned int current_read_size;
5eba8ab3 1881 unsigned int rsize;
1da177e4 1882 struct cifs_sb_info *cifs_sb;
96daf2b0 1883 struct cifs_tcon *pTcon;
1da177e4
LT
1884 int xid;
1885 char *current_offset;
1886 struct cifsFileInfo *open_file;
d4ffff1f 1887 struct cifs_io_parms io_parms;
ec637e3f 1888 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 1889 __u32 pid;
1da177e4
LT
1890
1891 xid = GetXid();
e6a00296 1892 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1893
5eba8ab3
JL
1894 /* FIXME: set up handlers for larger reads and/or convert to async */
1895 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
1896
1da177e4 1897 if (file->private_data == NULL) {
0f3bc09e 1898 rc = -EBADF;
1da177e4 1899 FreeXid(xid);
0f3bc09e 1900 return rc;
1da177e4 1901 }
c21dfb69 1902 open_file = file->private_data;
13cfb733 1903 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1904
d4ffff1f
PS
1905 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1906 pid = open_file->pid;
1907 else
1908 pid = current->tgid;
1909
1da177e4 1910 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1911 cFYI(1, "attempting read on write only file instance");
1da177e4 1912
fb8c4b14 1913 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1914 read_size > total_read;
1915 total_read += bytes_read, current_offset += bytes_read) {
5eba8ab3
JL
1916 current_read_size = min_t(uint, read_size - total_read, rsize);
1917
f9f5c817
SF
1918 /* For windows me and 9x we do not want to request more
1919 than it negotiated since it will refuse the read then */
fb8c4b14 1920 if ((pTcon->ses) &&
f9f5c817 1921 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
7748dd6e 1922 current_read_size = min_t(uint, current_read_size,
c974befa 1923 CIFSMaxBufSize);
f9f5c817 1924 }
1da177e4
LT
1925 rc = -EAGAIN;
1926 while (rc == -EAGAIN) {
cdff08e7 1927 if (open_file->invalidHandle) {
15886177 1928 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1929 if (rc != 0)
1930 break;
1931 }
d4ffff1f
PS
1932 io_parms.netfid = open_file->netfid;
1933 io_parms.pid = pid;
1934 io_parms.tcon = pTcon;
1935 io_parms.offset = *poffset;
1936 io_parms.length = current_read_size;
1937 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1938 &current_offset, &buf_type);
1da177e4
LT
1939 }
1940 if (rc || (bytes_read == 0)) {
1941 if (total_read) {
1942 break;
1943 } else {
1944 FreeXid(xid);
1945 return rc;
1946 }
1947 } else {
a4544347 1948 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1949 *poffset += bytes_read;
1950 }
1951 }
1952 FreeXid(xid);
1953 return total_read;
1954}
1955
ca83ce3d
JL
1956/*
1957 * If the page is mmap'ed into a process' page tables, then we need to make
1958 * sure that it doesn't change while being written back.
1959 */
1960static int
1961cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1962{
1963 struct page *page = vmf->page;
1964
1965 lock_page(page);
1966 return VM_FAULT_LOCKED;
1967}
1968
1969static struct vm_operations_struct cifs_file_vm_ops = {
1970 .fault = filemap_fault,
1971 .page_mkwrite = cifs_page_mkwrite,
1972};
1973
7a6a19b1
PS
1974int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1975{
1976 int rc, xid;
1977 struct inode *inode = file->f_path.dentry->d_inode;
1978
1979 xid = GetXid();
1980
6feb9891
PS
1981 if (!CIFS_I(inode)->clientCanCacheRead) {
1982 rc = cifs_invalidate_mapping(inode);
1983 if (rc)
1984 return rc;
1985 }
7a6a19b1
PS
1986
1987 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
1988 if (rc == 0)
1989 vma->vm_ops = &cifs_file_vm_ops;
7a6a19b1
PS
1990 FreeXid(xid);
1991 return rc;
1992}
1993
1da177e4
LT
1994int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1995{
1da177e4
LT
1996 int rc, xid;
1997
1998 xid = GetXid();
abab095d 1999 rc = cifs_revalidate_file(file);
1da177e4 2000 if (rc) {
b6b38f70 2001 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
2002 FreeXid(xid);
2003 return rc;
2004 }
2005 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2006 if (rc == 0)
2007 vma->vm_ops = &cifs_file_vm_ops;
1da177e4
LT
2008 FreeXid(xid);
2009 return rc;
2010}
2011
1da177e4
LT
2012static int cifs_readpages(struct file *file, struct address_space *mapping,
2013 struct list_head *page_list, unsigned num_pages)
2014{
690c5e31
JL
2015 int rc;
2016 struct list_head tmplist;
2017 struct cifsFileInfo *open_file = file->private_data;
2018 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2019 unsigned int rsize = cifs_sb->rsize;
2020 pid_t pid;
1da177e4 2021
690c5e31
JL
2022 /*
2023 * Give up immediately if rsize is too small to read an entire page.
2024 * The VFS will fall back to readpage. We should never reach this
2025 * point however since we set ra_pages to 0 when the rsize is smaller
2026 * than a cache page.
2027 */
2028 if (unlikely(rsize < PAGE_CACHE_SIZE))
2029 return 0;
bfa0d75a 2030
56698236
SJ
2031 /*
2032 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2033 * immediately if the cookie is negative
2034 */
2035 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2036 &num_pages);
2037 if (rc == 0)
690c5e31 2038 return rc;
56698236 2039
d4ffff1f
PS
2040 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2041 pid = open_file->pid;
2042 else
2043 pid = current->tgid;
2044
690c5e31
JL
2045 rc = 0;
2046 INIT_LIST_HEAD(&tmplist);
1da177e4 2047
690c5e31
JL
2048 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2049 mapping, num_pages);
2050
2051 /*
2052 * Start with the page at end of list and move it to private
2053 * list. Do the same with any following pages until we hit
2054 * the rsize limit, hit an index discontinuity, or run out of
2055 * pages. Issue the async read and then start the loop again
2056 * until the list is empty.
2057 *
2058 * Note that list order is important. The page_list is in
2059 * the order of declining indexes. When we put the pages in
2060 * the rdata->pages, then we want them in increasing order.
2061 */
2062 while (!list_empty(page_list)) {
2063 unsigned int bytes = PAGE_CACHE_SIZE;
2064 unsigned int expected_index;
2065 unsigned int nr_pages = 1;
2066 loff_t offset;
2067 struct page *page, *tpage;
2068 struct cifs_readdata *rdata;
1da177e4
LT
2069
2070 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
2071
2072 /*
2073 * Lock the page and put it in the cache. Since no one else
2074 * should have access to this page, we're safe to simply set
2075 * PG_locked without checking it first.
2076 */
2077 __set_page_locked(page);
2078 rc = add_to_page_cache_locked(page, mapping,
2079 page->index, GFP_KERNEL);
2080
2081 /* give up if we can't stick it in the cache */
2082 if (rc) {
2083 __clear_page_locked(page);
2084 break;
2085 }
2086
2087 /* move first page to the tmplist */
1da177e4 2088 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 2089 list_move_tail(&page->lru, &tmplist);
1da177e4 2090
690c5e31
JL
2091 /* now try and add more pages onto the request */
2092 expected_index = page->index + 1;
2093 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2094 /* discontinuity ? */
2095 if (page->index != expected_index)
fb8c4b14 2096 break;
690c5e31
JL
2097
2098 /* would this page push the read over the rsize? */
2099 if (bytes + PAGE_CACHE_SIZE > rsize)
2100 break;
2101
2102 __set_page_locked(page);
2103 if (add_to_page_cache_locked(page, mapping,
2104 page->index, GFP_KERNEL)) {
2105 __clear_page_locked(page);
2106 break;
2107 }
2108 list_move_tail(&page->lru, &tmplist);
2109 bytes += PAGE_CACHE_SIZE;
2110 expected_index++;
2111 nr_pages++;
1da177e4 2112 }
690c5e31
JL
2113
2114 rdata = cifs_readdata_alloc(nr_pages);
2115 if (!rdata) {
2116 /* best to give up if we're out of mem */
2117 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2118 list_del(&page->lru);
2119 lru_cache_add_file(page);
2120 unlock_page(page);
2121 page_cache_release(page);
2122 }
2123 rc = -ENOMEM;
2124 break;
2125 }
2126
2127 spin_lock(&cifs_file_list_lock);
2128 cifsFileInfo_get(open_file);
2129 spin_unlock(&cifs_file_list_lock);
2130 rdata->cfile = open_file;
2131 rdata->mapping = mapping;
2132 rdata->offset = offset;
2133 rdata->bytes = bytes;
2134 rdata->pid = pid;
2135 list_splice_init(&tmplist, &rdata->pages);
2136
2137 do {
cdff08e7 2138 if (open_file->invalidHandle) {
15886177 2139 rc = cifs_reopen_file(open_file, true);
1da177e4 2140 if (rc != 0)
690c5e31 2141 continue;
1da177e4 2142 }
690c5e31
JL
2143 rc = cifs_async_readv(rdata);
2144 } while (rc == -EAGAIN);
1da177e4 2145
690c5e31
JL
2146 if (rc != 0) {
2147 list_for_each_entry_safe(page, tpage, &rdata->pages,
2148 lru) {
2149 list_del(&page->lru);
2150 lru_cache_add_file(page);
2151 unlock_page(page);
2152 page_cache_release(page);
1da177e4 2153 }
690c5e31 2154 cifs_readdata_free(rdata);
1da177e4
LT
2155 break;
2156 }
1da177e4
LT
2157 }
2158
1da177e4
LT
2159 return rc;
2160}
2161
2162static int cifs_readpage_worker(struct file *file, struct page *page,
2163 loff_t *poffset)
2164{
2165 char *read_data;
2166 int rc;
2167
56698236
SJ
2168 /* Is the page cached? */
2169 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2170 if (rc == 0)
2171 goto read_complete;
2172
1da177e4
LT
2173 page_cache_get(page);
2174 read_data = kmap(page);
2175 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2176
1da177e4 2177 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2178
1da177e4
LT
2179 if (rc < 0)
2180 goto io_error;
2181 else
b6b38f70 2182 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2183
e6a00296
JJS
2184 file->f_path.dentry->d_inode->i_atime =
2185 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2186
1da177e4
LT
2187 if (PAGE_CACHE_SIZE > rc)
2188 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2189
2190 flush_dcache_page(page);
2191 SetPageUptodate(page);
9dc06558
SJ
2192
2193 /* send this page to the cache */
2194 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2195
1da177e4 2196 rc = 0;
fb8c4b14 2197
1da177e4 2198io_error:
fb8c4b14 2199 kunmap(page);
1da177e4 2200 page_cache_release(page);
56698236
SJ
2201
2202read_complete:
1da177e4
LT
2203 return rc;
2204}
2205
2206static int cifs_readpage(struct file *file, struct page *page)
2207{
2208 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2209 int rc = -EACCES;
2210 int xid;
2211
2212 xid = GetXid();
2213
2214 if (file->private_data == NULL) {
0f3bc09e 2215 rc = -EBADF;
1da177e4 2216 FreeXid(xid);
0f3bc09e 2217 return rc;
1da177e4
LT
2218 }
2219
b6b38f70
JP
2220 cFYI(1, "readpage %p at offset %d 0x%x\n",
2221 page, (int)offset, (int)offset);
1da177e4
LT
2222
2223 rc = cifs_readpage_worker(file, page, &offset);
2224
2225 unlock_page(page);
2226
2227 FreeXid(xid);
2228 return rc;
2229}
2230
a403a0a3
SF
2231static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2232{
2233 struct cifsFileInfo *open_file;
2234
4477288a 2235 spin_lock(&cifs_file_list_lock);
a403a0a3 2236 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2237 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2238 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2239 return 1;
2240 }
2241 }
4477288a 2242 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2243 return 0;
2244}
2245
1da177e4
LT
2246/* We do not want to update the file size from server for inodes
2247 open for write - to avoid races with writepage extending
2248 the file - in the future we could consider allowing
fb8c4b14 2249 refreshing the inode only on increases in the file size
1da177e4
LT
2250 but this is tricky to do without racing with writebehind
2251 page caching in the current Linux kernel design */
4b18f2a9 2252bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2253{
a403a0a3 2254 if (!cifsInode)
4b18f2a9 2255 return true;
50c2f753 2256
a403a0a3
SF
2257 if (is_inode_writable(cifsInode)) {
2258 /* This inode is open for write at least once */
c32a0b68
SF
2259 struct cifs_sb_info *cifs_sb;
2260
c32a0b68 2261 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2262 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2263 /* since no page cache to corrupt on directio
c32a0b68 2264 we can change size safely */
4b18f2a9 2265 return true;
c32a0b68
SF
2266 }
2267
fb8c4b14 2268 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2269 return true;
7ba52631 2270
4b18f2a9 2271 return false;
23e7dd7d 2272 } else
4b18f2a9 2273 return true;
1da177e4
LT
2274}
2275
d9414774
NP
2276static int cifs_write_begin(struct file *file, struct address_space *mapping,
2277 loff_t pos, unsigned len, unsigned flags,
2278 struct page **pagep, void **fsdata)
1da177e4 2279{
d9414774
NP
2280 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2281 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2282 loff_t page_start = pos & PAGE_MASK;
2283 loff_t i_size;
2284 struct page *page;
2285 int rc = 0;
d9414774 2286
b6b38f70 2287 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2288
54566b2c 2289 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2290 if (!page) {
2291 rc = -ENOMEM;
2292 goto out;
2293 }
8a236264 2294
a98ee8c1
JL
2295 if (PageUptodate(page))
2296 goto out;
8a236264 2297
a98ee8c1
JL
2298 /*
2299 * If we write a full page it will be up to date, no need to read from
2300 * the server. If the write is short, we'll end up doing a sync write
2301 * instead.
2302 */
2303 if (len == PAGE_CACHE_SIZE)
2304 goto out;
8a236264 2305
a98ee8c1
JL
2306 /*
2307 * optimize away the read when we have an oplock, and we're not
2308 * expecting to use any of the data we'd be reading in. That
2309 * is, when the page lies beyond the EOF, or straddles the EOF
2310 * and the write will cover all of the existing data.
2311 */
2312 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2313 i_size = i_size_read(mapping->host);
2314 if (page_start >= i_size ||
2315 (offset == 0 && (pos + len) >= i_size)) {
2316 zero_user_segments(page, 0, offset,
2317 offset + len,
2318 PAGE_CACHE_SIZE);
2319 /*
2320 * PageChecked means that the parts of the page
2321 * to which we're not writing are considered up
2322 * to date. Once the data is copied to the
2323 * page, it can be set uptodate.
2324 */
2325 SetPageChecked(page);
2326 goto out;
2327 }
2328 }
d9414774 2329
a98ee8c1
JL
2330 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2331 /*
2332 * might as well read a page, it is fast enough. If we get
2333 * an error, we don't need to return it. cifs_write_end will
2334 * do a sync write instead since PG_uptodate isn't set.
2335 */
2336 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2337 } else {
2338 /* we could try using another file handle if there is one -
2339 but how would we lock it to prevent close of that handle
2340 racing with this read? In any case
d9414774 2341 this will be written out by write_end so is fine */
1da177e4 2342 }
a98ee8c1
JL
2343out:
2344 *pagep = page;
2345 return rc;
1da177e4
LT
2346}
2347
85f2d6b4
SJ
2348static int cifs_release_page(struct page *page, gfp_t gfp)
2349{
2350 if (PagePrivate(page))
2351 return 0;
2352
2353 return cifs_fscache_release_page(page, gfp);
2354}
2355
2356static void cifs_invalidate_page(struct page *page, unsigned long offset)
2357{
2358 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2359
2360 if (offset == 0)
2361 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2362}
2363
9ad1506b
PS
2364static int cifs_launder_page(struct page *page)
2365{
2366 int rc = 0;
2367 loff_t range_start = page_offset(page);
2368 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2369 struct writeback_control wbc = {
2370 .sync_mode = WB_SYNC_ALL,
2371 .nr_to_write = 0,
2372 .range_start = range_start,
2373 .range_end = range_end,
2374 };
2375
2376 cFYI(1, "Launder page: %p", page);
2377
2378 if (clear_page_dirty_for_io(page))
2379 rc = cifs_writepage_locked(page, &wbc);
2380
2381 cifs_fscache_invalidate_page(page, page->mapping->host);
2382 return rc;
2383}
2384
9b646972 2385void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2386{
2387 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2388 oplock_break);
a5e18bc3 2389 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2390 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 2391 int rc = 0;
3bc303c2
JL
2392
2393 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2394 if (cinode->clientCanCacheRead)
8737c930 2395 break_lease(inode, O_RDONLY);
d54ff732 2396 else
8737c930 2397 break_lease(inode, O_WRONLY);
3bc303c2
JL
2398 rc = filemap_fdatawrite(inode->i_mapping);
2399 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
2400 rc = filemap_fdatawait(inode->i_mapping);
2401 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
2402 invalidate_remote_inode(inode);
2403 }
b6b38f70 2404 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2405 }
2406
2407 /*
2408 * releasing stale oplock after recent reconnect of smb session using
2409 * a now incorrect file handle is not a data integrity issue but do
2410 * not bother sending an oplock release if session to server still is
2411 * disconnected since oplock already released by the server
2412 */
cdff08e7 2413 if (!cfile->oplock_break_cancelled) {
03776f45
PS
2414 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2415 current->tgid, 0, 0, 0, 0,
2416 LOCKING_ANDX_OPLOCK_RELEASE, false,
12fed00d 2417 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 2418 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2419 }
3bc303c2
JL
2420}
2421
f5e54d6e 2422const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2423 .readpage = cifs_readpage,
2424 .readpages = cifs_readpages,
2425 .writepage = cifs_writepage,
37c0eb46 2426 .writepages = cifs_writepages,
d9414774
NP
2427 .write_begin = cifs_write_begin,
2428 .write_end = cifs_write_end,
1da177e4 2429 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2430 .releasepage = cifs_release_page,
2431 .invalidatepage = cifs_invalidate_page,
9ad1506b 2432 .launder_page = cifs_launder_page,
1da177e4 2433};
273d81d6
DK
2434
2435/*
2436 * cifs_readpages requires the server to support a buffer large enough to
2437 * contain the header plus one complete page of data. Otherwise, we need
2438 * to leave cifs_readpages out of the address space operations.
2439 */
f5e54d6e 2440const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2441 .readpage = cifs_readpage,
2442 .writepage = cifs_writepage,
2443 .writepages = cifs_writepages,
d9414774
NP
2444 .write_begin = cifs_write_begin,
2445 .write_end = cifs_write_end,
273d81d6 2446 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2447 .releasepage = cifs_release_page,
2448 .invalidatepage = cifs_invalidate_page,
9ad1506b 2449 .launder_page = cifs_launder_page,
273d81d6 2450};