]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/cifs/file.c
CIFS: Implement caching mechanism for posix brlocks
[mirror_ubuntu-bionic-kernel.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
96daf2b0 172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
eeb910a6
PS
173 __u16 *pnetfid, int xid)
174{
175 int rc;
176 int desiredAccess;
177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6
PS
179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
3d3ea8e6
SP
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
eeb910a6
PS
218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
3d3ea8e6 220 desiredAccess, create_options, pnetfid, poplock, buf,
eeb910a6
PS
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
15ecb436
JL
244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
5f6dbc9e 257 pCifsFile->count = 1;
15ecb436
JL
258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
15ecb436
JL
264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
15ecb436
JL
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267
4477288a 268 spin_lock(&cifs_file_list_lock);
15ecb436
JL
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273 else
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 275 spin_unlock(&cifs_file_list_lock);
15ecb436 276
c6723628 277 cifs_set_oplock_level(pCifsInode, oplock);
85160e03 278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
15ecb436
JL
279
280 file->private_data = pCifsFile;
281 return pCifsFile;
282}
283
85160e03
PS
284static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
285
cdff08e7
SF
286/*
287 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
cdff08e7 290 */
b33879aa
JL
291void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
292{
e66673e3 293 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
297 struct cifsLockInfo *li, *tmp;
298
299 spin_lock(&cifs_file_list_lock);
5f6dbc9e 300 if (--cifs_file->count > 0) {
cdff08e7
SF
301 spin_unlock(&cifs_file_list_lock);
302 return;
303 }
304
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
308
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
4f8ba8a0
PS
312
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
318
c6723628 319 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
320 }
321 spin_unlock(&cifs_file_list_lock);
322
ad635942
JL
323 cancel_work_sync(&cifs_file->oplock_break);
324
cdff08e7
SF
325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
326 int xid, rc;
327
328 xid = GetXid();
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
330 FreeXid(xid);
331 }
332
333 /* Delete any outstanding lock records. We'll lose them when the file
334 * is closed anyway.
335 */
d59dad2b
PS
336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
cdff08e7 340 list_del(&li->llist);
85160e03 341 cifs_del_lock_waiters(li);
cdff08e7 342 kfree(li);
b33879aa 343 }
d59dad2b 344 mutex_unlock(&cifsi->lock_mutex);
cdff08e7
SF
345
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
348 kfree(cifs_file);
b33879aa
JL
349}
350
1da177e4
LT
351int cifs_open(struct inode *inode, struct file *file)
352{
353 int rc = -EACCES;
590a3fe0
JL
354 int xid;
355 __u32 oplock;
1da177e4 356 struct cifs_sb_info *cifs_sb;
96daf2b0 357 struct cifs_tcon *tcon;
7ffec372 358 struct tcon_link *tlink;
6ca9f3ba 359 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 360 char *full_path = NULL;
7e12eddb 361 bool posix_open_ok = false;
1da177e4 362 __u16 netfid;
1da177e4
LT
363
364 xid = GetXid();
365
366 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
367 tlink = cifs_sb_tlink(cifs_sb);
368 if (IS_ERR(tlink)) {
369 FreeXid(xid);
370 return PTR_ERR(tlink);
371 }
372 tcon = tlink_tcon(tlink);
1da177e4 373
e6a00296 374 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 375 if (full_path == NULL) {
0f3bc09e 376 rc = -ENOMEM;
232341ba 377 goto out;
1da177e4
LT
378 }
379
b6b38f70
JP
380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
276a74a4 382
e7504734 383 if (enable_oplocks)
276a74a4
SF
384 oplock = REQ_OPLOCK;
385 else
386 oplock = 0;
387
64cc2c63
SF
388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 392 /* can not refresh inode info since size could be stale */
2422f676 393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 394 cifs_sb->mnt_file_mode /* ignored */,
608712fe 395 file->f_flags, &oplock, &netfid, xid);
276a74a4 396 if (rc == 0) {
b6b38f70 397 cFYI(1, "posix open succeeded");
7e12eddb 398 posix_open_ok = true;
64cc2c63
SF
399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
b6b38f70 401 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
b6b38f70 406 tcon->ses->serverNOS);
64cc2c63 407 tcon->broken_posix_open = true;
276a74a4
SF
408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
410 goto out;
64cc2c63
SF
411 /* else fallthrough to retry open the old way on network i/o
412 or DFS errors */
276a74a4
SF
413 }
414
7e12eddb
PS
415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
418 if (rc)
419 goto out;
420 }
47c78b7f 421
abfe1eed 422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 423 if (pCifsFile == NULL) {
7e12eddb 424 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
425 rc = -ENOMEM;
426 goto out;
427 }
1da177e4 428
9451a9a5
SJ
429 cifs_fscache_set_inode_cookie(inode, file);
430
7e12eddb 431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
7e12eddb
PS
434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
436 .uid = NO_CHANGE_64,
437 .gid = NO_CHANGE_64,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
441 .device = 0,
442 };
d44a9fe2
JL
443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
444 pCifsFile->pid);
1da177e4
LT
445 }
446
447out:
1da177e4
LT
448 kfree(full_path);
449 FreeXid(xid);
7ffec372 450 cifs_put_tlink(tlink);
1da177e4
LT
451 return rc;
452}
453
0418726b 454/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
455/* to server was lost */
456static int cifs_relock_file(struct cifsFileInfo *cifsFile)
457{
458 int rc = 0;
459
460/* BB list all locks open on this file and relock */
461
462 return rc;
463}
464
15886177 465static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
466{
467 int rc = -EACCES;
590a3fe0
JL
468 int xid;
469 __u32 oplock;
1da177e4 470 struct cifs_sb_info *cifs_sb;
96daf2b0 471 struct cifs_tcon *tcon;
1da177e4 472 struct cifsInodeInfo *pCifsInode;
fb8c4b14 473 struct inode *inode;
1da177e4
LT
474 char *full_path = NULL;
475 int desiredAccess;
476 int disposition = FILE_OPEN;
3d3ea8e6 477 int create_options = CREATE_NOT_DIR;
1da177e4
LT
478 __u16 netfid;
479
1da177e4 480 xid = GetXid();
f0a71eb8 481 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 482 if (!pCifsFile->invalidHandle) {
f0a71eb8 483 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 484 rc = 0;
1da177e4 485 FreeXid(xid);
0f3bc09e 486 return rc;
1da177e4
LT
487 }
488
15886177 489 inode = pCifsFile->dentry->d_inode;
1da177e4 490 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 491 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 492
1da177e4
LT
493/* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
15886177 497 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 498 if (full_path == NULL) {
3a9f462f 499 rc = -ENOMEM;
f0a71eb8 500 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 501 FreeXid(xid);
3a9f462f 502 return rc;
1da177e4
LT
503 }
504
b6b38f70 505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 506 inode, pCifsFile->f_flags, full_path);
1da177e4 507
e7504734 508 if (enable_oplocks)
1da177e4
LT
509 oplock = REQ_OPLOCK;
510 else
4b18f2a9 511 oplock = 0;
1da177e4 512
7fc8f4e9
SF
513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
516
517 /*
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
520 */
15886177
JL
521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 523
2422f676 524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
7fc8f4e9 527 if (rc == 0) {
b6b38f70 528 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
529 goto reopen_success;
530 }
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
533 }
534
15886177 535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 536
3d3ea8e6
SP
537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
539
1da177e4 540 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
1da177e4
LT
543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
545
7fc8f4e9 546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
3d3ea8e6 547 create_options, &netfid, &oplock, NULL,
fb8c4b14 548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 549 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 550 if (rc) {
f0a71eb8 551 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
15886177
JL
554 goto reopen_error_exit;
555 }
556
7fc8f4e9 557reopen_success:
15886177
JL
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
562
563 if (can_flush) {
564 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 565 mapping_set_error(inode->i_mapping, rc);
15886177 566
15886177
JL
567 if (tcon->unix_ext)
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
570 else
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
573 xid, NULL);
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
579 info */
e66673e3 580
c6723628 581 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 582
15886177
JL
583 cifs_relock_file(pCifsFile);
584
585reopen_error_exit:
1da177e4
LT
586 kfree(full_path);
587 FreeXid(xid);
588 return rc;
589}
590
591int cifs_close(struct inode *inode, struct file *file)
592{
77970693
JL
593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
596 }
7ee1af76 597
cdff08e7
SF
598 /* return code from the ->release op is always ignored */
599 return 0;
1da177e4
LT
600}
601
602int cifs_closedir(struct inode *inode, struct file *file)
603{
604 int rc = 0;
605 int xid;
c21dfb69 606 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
607 char *ptmp;
608
b6b38f70 609 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
610
611 xid = GetXid();
612
613 if (pCFileStruct) {
96daf2b0 614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 615
b6b38f70 616 cFYI(1, "Freeing private data in close dir");
4477288a 617 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
4477288a 621 spin_unlock(&cifs_file_list_lock);
1da177e4 622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
623 cFYI(1, "Closing uncompleted readdir with rc %d",
624 rc);
1da177e4
LT
625 /* not much we can do if it fails anyway, ignore rc */
626 rc = 0;
ddb4cbfc 627 } else
4477288a 628 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
630 if (ptmp) {
b6b38f70 631 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 633 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
634 cifs_small_buf_release(ptmp);
635 else
636 cifs_buf_release(ptmp);
1da177e4 637 }
13cfb733 638 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
639 kfree(file->private_data);
640 file->private_data = NULL;
641 }
642 /* BB can we lock the filestruct while this is going on? */
643 FreeXid(xid);
644 return rc;
645}
646
85160e03
PS
647static struct cifsLockInfo *
648cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid)
7ee1af76 649{
fb8c4b14
SF
650 struct cifsLockInfo *li =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
85160e03
PS
652 if (!li)
653 return li;
d59dad2b 654 li->netfid = netfid;
7ee1af76
JA
655 li->offset = offset;
656 li->length = len;
03776f45
PS
657 li->type = type;
658 li->pid = current->tgid;
85160e03
PS
659 INIT_LIST_HEAD(&li->blist);
660 init_waitqueue_head(&li->block_q);
661 return li;
662}
663
664static void
665cifs_del_lock_waiters(struct cifsLockInfo *lock)
666{
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
671 }
672}
673
674static bool
675cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
678{
679 struct cifsLockInfo *li, *tmp;
680
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
684 continue;
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
687 type == li->type))
688 continue;
689 else {
690 *conf_lock = li;
691 return true;
692 }
693 }
694 return false;
695}
696
697static int
698cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
699 __u8 type, __u16 netfid, struct file_lock *flock)
700{
701 int rc = 0;
702 struct cifsLockInfo *conf_lock;
703 bool exist;
704
705 mutex_lock(&cinode->lock_mutex);
706
707 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
708 &conf_lock);
709 if (exist) {
710 flock->fl_start = conf_lock->offset;
711 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
712 flock->fl_pid = conf_lock->pid;
713 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
714 flock->fl_type = F_RDLCK;
715 else
716 flock->fl_type = F_WRLCK;
717 } else if (!cinode->can_cache_brlcks)
718 rc = 1;
719 else
720 flock->fl_type = F_UNLCK;
721
722 mutex_unlock(&cinode->lock_mutex);
723 return rc;
724}
725
726static int
727cifs_lock_add(struct cifsInodeInfo *cinode, __u64 len, __u64 offset,
728 __u8 type, __u16 netfid)
729{
730 struct cifsLockInfo *li;
731
732 li = cifs_lock_init(len, offset, type, netfid);
733 if (!li)
734 return -ENOMEM;
735
d59dad2b
PS
736 mutex_lock(&cinode->lock_mutex);
737 list_add_tail(&li->llist, &cinode->llist);
738 mutex_unlock(&cinode->lock_mutex);
7ee1af76
JA
739 return 0;
740}
741
85160e03
PS
742static int
743cifs_lock_add_if(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
744 __u8 type, __u16 netfid, bool wait)
745{
746 struct cifsLockInfo *lock, *conf_lock;
747 bool exist;
748 int rc = 0;
749
750 lock = cifs_lock_init(length, offset, type, netfid);
751 if (!lock)
752 return -ENOMEM;
753
754try_again:
755 exist = false;
756 mutex_lock(&cinode->lock_mutex);
757
758 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
759 &conf_lock);
760 if (!exist && cinode->can_cache_brlcks) {
761 list_add_tail(&lock->llist, &cinode->llist);
762 mutex_unlock(&cinode->lock_mutex);
763 return rc;
764 }
765
766 if (!exist)
767 rc = 1;
768 else if (!wait)
769 rc = -EACCES;
770 else {
771 list_add_tail(&lock->blist, &conf_lock->blist);
772 mutex_unlock(&cinode->lock_mutex);
773 rc = wait_event_interruptible(lock->block_q,
774 (lock->blist.prev == &lock->blist) &&
775 (lock->blist.next == &lock->blist));
776 if (!rc)
777 goto try_again;
778 else {
779 mutex_lock(&cinode->lock_mutex);
780 list_del_init(&lock->blist);
781 mutex_unlock(&cinode->lock_mutex);
782 }
783 }
784
785 kfree(lock);
786 mutex_unlock(&cinode->lock_mutex);
787 return rc;
788}
789
790static int
4f6bcec9
PS
791cifs_posix_lock_test(struct file *file, struct file_lock *flock)
792{
793 int rc = 0;
794 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
795 unsigned char saved_type = flock->fl_type;
796
797 mutex_lock(&cinode->lock_mutex);
798 posix_test_lock(file, flock);
799
800 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
801 flock->fl_type = saved_type;
802 rc = 1;
803 }
804
805 mutex_unlock(&cinode->lock_mutex);
806 return rc;
807}
808
809static int
810cifs_posix_lock_set(struct file *file, struct file_lock *flock)
811{
812 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
813 int rc;
814
815 mutex_lock(&cinode->lock_mutex);
816 if (!cinode->can_cache_brlcks) {
817 mutex_unlock(&cinode->lock_mutex);
818 return 1;
819 }
820 rc = posix_lock_file_wait(file, flock);
821 mutex_unlock(&cinode->lock_mutex);
822 return rc;
823}
824
825static int
826cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03
PS
827{
828 int xid, rc = 0, stored_rc;
829 struct cifsLockInfo *li, *tmp;
830 struct cifs_tcon *tcon;
831 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
832
833 xid = GetXid();
834 tcon = tlink_tcon(cfile->tlink);
835
836 mutex_lock(&cinode->lock_mutex);
837 if (!cinode->can_cache_brlcks) {
838 mutex_unlock(&cinode->lock_mutex);
839 FreeXid(xid);
840 return rc;
841 }
842
843 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
844 stored_rc = CIFSSMBLock(xid, tcon, cfile->netfid,
845 li->pid, li->length, li->offset,
846 0, 1, li->type, 0, 0);
847 if (stored_rc)
848 rc = stored_rc;
849 }
850
851 cinode->can_cache_brlcks = false;
852 mutex_unlock(&cinode->lock_mutex);
853
854 FreeXid(xid);
855 return rc;
856}
857
4f6bcec9
PS
858/* copied from fs/locks.c with a name change */
859#define cifs_for_each_lock(inode, lockp) \
860 for (lockp = &inode->i_flock; *lockp != NULL; \
861 lockp = &(*lockp)->fl_next)
862
863static int
864cifs_push_posix_locks(struct cifsFileInfo *cfile)
865{
866 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
867 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
868 struct file_lock *flock, **before;
869 struct cifsLockInfo *lck, *tmp;
870 int rc = 0, xid, type;
871 __u64 length;
872 struct list_head locks_to_send;
873
874 xid = GetXid();
875
876 mutex_lock(&cinode->lock_mutex);
877 if (!cinode->can_cache_brlcks) {
878 mutex_unlock(&cinode->lock_mutex);
879 FreeXid(xid);
880 return rc;
881 }
882
883 INIT_LIST_HEAD(&locks_to_send);
884
885 lock_flocks();
886 cifs_for_each_lock(cfile->dentry->d_inode, before) {
887 flock = *before;
888 length = 1 + flock->fl_end - flock->fl_start;
889 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
890 type = CIFS_RDLCK;
891 else
892 type = CIFS_WRLCK;
893
894 lck = cifs_lock_init(length, flock->fl_start, type,
895 cfile->netfid);
896 if (!lck) {
897 rc = -ENOMEM;
898 goto send_locks;
899 }
900 lck->pid = flock->fl_pid;
901
902 list_add_tail(&lck->llist, &locks_to_send);
903 }
904
905send_locks:
906 unlock_flocks();
907
908 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
909 struct file_lock tmp_lock;
910 int stored_rc;
911
912 tmp_lock.fl_start = lck->offset;
913 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
914 0, lck->length, &tmp_lock,
915 lck->type, 0);
916 if (stored_rc)
917 rc = stored_rc;
918 list_del(&lck->llist);
919 kfree(lck);
920 }
921
922 cinode->can_cache_brlcks = false;
923 mutex_unlock(&cinode->lock_mutex);
924
925 FreeXid(xid);
926 return rc;
927}
928
929static int
930cifs_push_locks(struct cifsFileInfo *cfile)
931{
932 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
933 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
934
935 if ((tcon->ses->capabilities & CAP_UNIX) &&
936 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
937 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
938 return cifs_push_posix_locks(cfile);
939
940 return cifs_push_mandatory_locks(cfile);
941}
942
03776f45
PS
943static void
944cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
945 bool *wait_flag)
1da177e4 946{
03776f45 947 if (flock->fl_flags & FL_POSIX)
b6b38f70 948 cFYI(1, "Posix");
03776f45 949 if (flock->fl_flags & FL_FLOCK)
b6b38f70 950 cFYI(1, "Flock");
03776f45 951 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 952 cFYI(1, "Blocking lock");
03776f45 953 *wait_flag = true;
1da177e4 954 }
03776f45 955 if (flock->fl_flags & FL_ACCESS)
b6b38f70 956 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
957 "not implemented yet");
958 if (flock->fl_flags & FL_LEASE)
b6b38f70 959 cFYI(1, "Lease on file - not implemented yet");
03776f45 960 if (flock->fl_flags &
1da177e4 961 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 962 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 963
03776f45
PS
964 *type = LOCKING_ANDX_LARGE_FILES;
965 if (flock->fl_type == F_WRLCK) {
b6b38f70 966 cFYI(1, "F_WRLCK ");
03776f45
PS
967 *lock = 1;
968 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 969 cFYI(1, "F_UNLCK");
03776f45
PS
970 *unlock = 1;
971 /* Check if unlock includes more than one lock range */
972 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 973 cFYI(1, "F_RDLCK");
03776f45
PS
974 *type |= LOCKING_ANDX_SHARED_LOCK;
975 *lock = 1;
976 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 977 cFYI(1, "F_EXLCK");
03776f45
PS
978 *lock = 1;
979 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 980 cFYI(1, "F_SHLCK");
03776f45
PS
981 *type |= LOCKING_ANDX_SHARED_LOCK;
982 *lock = 1;
1da177e4 983 } else
b6b38f70 984 cFYI(1, "Unknown type of lock");
03776f45 985}
1da177e4 986
03776f45 987static int
4f6bcec9 988cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
03776f45
PS
989 bool wait_flag, bool posix_lck, int xid)
990{
991 int rc = 0;
992 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
993 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
994 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
85160e03 995 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
03776f45 996 __u16 netfid = cfile->netfid;
f05337c6 997
03776f45
PS
998 if (posix_lck) {
999 int posix_lock_type;
4f6bcec9
PS
1000
1001 rc = cifs_posix_lock_test(file, flock);
1002 if (!rc)
1003 return rc;
1004
03776f45
PS
1005 if (type & LOCKING_ANDX_SHARED_LOCK)
1006 posix_lock_type = CIFS_RDLCK;
1007 else
1008 posix_lock_type = CIFS_WRLCK;
4f6bcec9
PS
1009 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1010 1 /* get */, length, flock,
1011 posix_lock_type, wait_flag);
03776f45
PS
1012 return rc;
1013 }
1da177e4 1014
85160e03
PS
1015 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1016 flock);
1017 if (!rc)
1018 return rc;
1019
03776f45
PS
1020 /* BB we could chain these into one lock request BB */
1021 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1022 flock->fl_start, 0, 1, type, 0, 0);
1023 if (rc == 0) {
1024 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1025 length, flock->fl_start, 1, 0,
1026 type, 0, 0);
1027 flock->fl_type = F_UNLCK;
1028 if (rc != 0)
1029 cERROR(1, "Error unlocking previously locked "
1030 "range %d during test of lock", rc);
1031 rc = 0;
1da177e4
LT
1032 return rc;
1033 }
7ee1af76 1034
03776f45
PS
1035 if (type & LOCKING_ANDX_SHARED_LOCK) {
1036 flock->fl_type = F_WRLCK;
1037 rc = 0;
1038 return rc;
7ee1af76
JA
1039 }
1040
03776f45
PS
1041 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1042 flock->fl_start, 0, 1,
1043 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1044 if (rc == 0) {
1045 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1046 length, flock->fl_start, 1, 0,
1047 type | LOCKING_ANDX_SHARED_LOCK,
1048 0, 0);
1049 flock->fl_type = F_RDLCK;
1050 if (rc != 0)
1051 cERROR(1, "Error unlocking previously locked "
1052 "range %d during test of lock", rc);
1053 } else
1054 flock->fl_type = F_WRLCK;
1055
1056 rc = 0;
1057 return rc;
1058}
1059
1060static int
1061cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1062 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1063{
1064 int rc = 0;
1065 __u64 length = 1 + flock->fl_end - flock->fl_start;
1066 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1067 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
d59dad2b 1068 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
03776f45
PS
1069 __u16 netfid = cfile->netfid;
1070
1071 if (posix_lck) {
08547b03 1072 int posix_lock_type;
4f6bcec9
PS
1073
1074 rc = cifs_posix_lock_set(file, flock);
1075 if (!rc || rc < 0)
1076 return rc;
1077
03776f45 1078 if (type & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
1079 posix_lock_type = CIFS_RDLCK;
1080 else
1081 posix_lock_type = CIFS_WRLCK;
50c2f753 1082
03776f45 1083 if (unlock == 1)
beb84dc8 1084 posix_lock_type = CIFS_UNLCK;
7ee1af76 1085
4f6bcec9
PS
1086 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1087 0 /* set */, length, flock,
1088 posix_lock_type, wait_flag);
03776f45
PS
1089 goto out;
1090 }
7ee1af76 1091
03776f45 1092 if (lock) {
85160e03
PS
1093 rc = cifs_lock_add_if(cinode, flock->fl_start, length,
1094 type, netfid, wait_flag);
1095 if (rc < 0)
1096 return rc;
1097 else if (!rc)
1098 goto out;
1099
03776f45 1100 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
85160e03 1101 flock->fl_start, 0, 1, type, wait_flag, 0);
03776f45
PS
1102 if (rc == 0) {
1103 /* For Windows locks we must store them. */
85160e03
PS
1104 rc = cifs_lock_add(cinode, length, flock->fl_start,
1105 type, netfid);
03776f45
PS
1106 }
1107 } else if (unlock) {
1108 /*
1109 * For each stored lock that this unlock overlaps completely,
1110 * unlock it.
1111 */
1112 int stored_rc = 0;
1113 struct cifsLockInfo *li, *tmp;
1114
d59dad2b
PS
1115 mutex_lock(&cinode->lock_mutex);
1116 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
03776f45
PS
1117 if (flock->fl_start > li->offset ||
1118 (flock->fl_start + length) <
1119 (li->offset + li->length))
1120 continue;
1121 if (current->tgid != li->pid)
1122 continue;
d59dad2b
PS
1123 if (cfile->netfid != li->netfid)
1124 continue;
03776f45 1125
85160e03
PS
1126 if (!cinode->can_cache_brlcks)
1127 stored_rc = CIFSSMBLock(xid, tcon, netfid,
1128 current->tgid,
1129 li->length, li->offset,
1130 1, 0, li->type, 0, 0);
1131 else
1132 stored_rc = 0;
1133
03776f45
PS
1134 if (stored_rc)
1135 rc = stored_rc;
1136 else {
1137 list_del(&li->llist);
85160e03 1138 cifs_del_lock_waiters(li);
03776f45 1139 kfree(li);
7ee1af76 1140 }
7ee1af76 1141 }
d59dad2b 1142 mutex_unlock(&cinode->lock_mutex);
03776f45
PS
1143 }
1144out:
1145 if (flock->fl_flags & FL_POSIX)
1146 posix_lock_file_wait(file, flock);
1147 return rc;
1148}
1149
1150int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1151{
1152 int rc, xid;
1153 int lock = 0, unlock = 0;
1154 bool wait_flag = false;
1155 bool posix_lck = false;
1156 struct cifs_sb_info *cifs_sb;
1157 struct cifs_tcon *tcon;
1158 struct cifsInodeInfo *cinode;
1159 struct cifsFileInfo *cfile;
1160 __u16 netfid;
1161 __u8 type;
1162
1163 rc = -EACCES;
1164 xid = GetXid();
1165
1166 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1167 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1168 flock->fl_start, flock->fl_end);
1169
1170 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1171
1172 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1173 cfile = (struct cifsFileInfo *)file->private_data;
1174 tcon = tlink_tcon(cfile->tlink);
1175 netfid = cfile->netfid;
1176 cinode = CIFS_I(file->f_path.dentry->d_inode);
1177
1178 if ((tcon->ses->capabilities & CAP_UNIX) &&
1179 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1180 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1181 posix_lck = true;
1182 /*
1183 * BB add code here to normalize offset and length to account for
1184 * negative length which we can not accept over the wire.
1185 */
1186 if (IS_GETLK(cmd)) {
4f6bcec9 1187 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
03776f45
PS
1188 FreeXid(xid);
1189 return rc;
1190 }
1191
1192 if (!lock && !unlock) {
1193 /*
1194 * if no lock or unlock then nothing to do since we do not
1195 * know what it is
1196 */
1197 FreeXid(xid);
1198 return -EOPNOTSUPP;
7ee1af76
JA
1199 }
1200
03776f45
PS
1201 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1202 xid);
1da177e4
LT
1203 FreeXid(xid);
1204 return rc;
1205}
1206
fbec9ab9 1207/* update the file size (if needed) after a write */
72432ffc 1208void
fbec9ab9
JL
1209cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1210 unsigned int bytes_written)
1211{
1212 loff_t end_of_write = offset + bytes_written;
1213
1214 if (end_of_write > cifsi->server_eof)
1215 cifsi->server_eof = end_of_write;
1216}
1217
fa2989f4 1218static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
7da4b49a
JL
1219 const char *write_data, size_t write_size,
1220 loff_t *poffset)
1da177e4
LT
1221{
1222 int rc = 0;
1223 unsigned int bytes_written = 0;
1224 unsigned int total_written;
1225 struct cifs_sb_info *cifs_sb;
96daf2b0 1226 struct cifs_tcon *pTcon;
7749981e 1227 int xid;
7da4b49a
JL
1228 struct dentry *dentry = open_file->dentry;
1229 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1230 struct cifs_io_parms io_parms;
1da177e4 1231
7da4b49a 1232 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1233
b6b38f70 1234 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 1235 *poffset, dentry->d_name.name);
1da177e4 1236
13cfb733 1237 pTcon = tlink_tcon(open_file->tlink);
50c2f753 1238
1da177e4 1239 xid = GetXid();
1da177e4 1240
1da177e4
LT
1241 for (total_written = 0; write_size > total_written;
1242 total_written += bytes_written) {
1243 rc = -EAGAIN;
1244 while (rc == -EAGAIN) {
ca83ce3d
JL
1245 struct kvec iov[2];
1246 unsigned int len;
1247
1da177e4 1248 if (open_file->invalidHandle) {
1da177e4
LT
1249 /* we could deadlock if we called
1250 filemap_fdatawait from here so tell
fb8c4b14 1251 reopen_file not to flush data to
1da177e4 1252 server now */
15886177 1253 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1254 if (rc != 0)
1255 break;
1256 }
ca83ce3d
JL
1257
1258 len = min((size_t)cifs_sb->wsize,
1259 write_size - total_written);
1260 /* iov[0] is reserved for smb header */
1261 iov[1].iov_base = (char *)write_data + total_written;
1262 iov[1].iov_len = len;
fa2989f4
PS
1263 io_parms.netfid = open_file->netfid;
1264 io_parms.pid = pid;
1265 io_parms.tcon = pTcon;
1266 io_parms.offset = *poffset;
1267 io_parms.length = len;
1268 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1269 1, 0);
1da177e4
LT
1270 }
1271 if (rc || (bytes_written == 0)) {
1272 if (total_written)
1273 break;
1274 else {
1275 FreeXid(xid);
1276 return rc;
1277 }
fbec9ab9
JL
1278 } else {
1279 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1280 *poffset += bytes_written;
fbec9ab9 1281 }
1da177e4
LT
1282 }
1283
a4544347 1284 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 1285
7da4b49a
JL
1286 if (total_written > 0) {
1287 spin_lock(&dentry->d_inode->i_lock);
1288 if (*poffset > dentry->d_inode->i_size)
1289 i_size_write(dentry->d_inode, *poffset);
1290 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1291 }
7da4b49a 1292 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
1293 FreeXid(xid);
1294 return total_written;
1295}
1296
6508d904
JL
1297struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1298 bool fsuid_only)
630f3f0c
SF
1299{
1300 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1301 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1302
1303 /* only filter by fsuid on multiuser mounts */
1304 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1305 fsuid_only = false;
630f3f0c 1306
4477288a 1307 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1308 /* we could simply get the first_list_entry since write-only entries
1309 are always at the end of the list but since the first entry might
1310 have a close pending, we go through the whole list */
1311 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1312 if (fsuid_only && open_file->uid != current_fsuid())
1313 continue;
2e396b83 1314 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1315 if (!open_file->invalidHandle) {
1316 /* found a good file */
1317 /* lock it so it will not be closed on us */
6ab409b5 1318 cifsFileInfo_get(open_file);
4477288a 1319 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1320 return open_file;
1321 } /* else might as well continue, and look for
1322 another, or simply have the caller reopen it
1323 again rather than trying to fix this handle */
1324 } else /* write only file */
1325 break; /* write only files are last so must be done */
1326 }
4477288a 1327 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1328 return NULL;
1329}
630f3f0c 1330
6508d904
JL
1331struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1332 bool fsuid_only)
6148a742
SF
1333{
1334 struct cifsFileInfo *open_file;
d3892294 1335 struct cifs_sb_info *cifs_sb;
2846d386 1336 bool any_available = false;
dd99cd80 1337 int rc;
6148a742 1338
60808233
SF
1339 /* Having a null inode here (because mapping->host was set to zero by
1340 the VFS or MM) should not happen but we had reports of on oops (due to
1341 it being zero) during stress testcases so we need to check for it */
1342
fb8c4b14 1343 if (cifs_inode == NULL) {
b6b38f70 1344 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1345 dump_stack();
1346 return NULL;
1347 }
1348
d3892294
JL
1349 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1350
6508d904
JL
1351 /* only filter by fsuid on multiuser mounts */
1352 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1353 fsuid_only = false;
1354
4477288a 1355 spin_lock(&cifs_file_list_lock);
9b22b0b7 1356refind_writable:
6148a742 1357 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1358 if (!any_available && open_file->pid != current->tgid)
1359 continue;
1360 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1361 continue;
2e396b83 1362 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 1363 cifsFileInfo_get(open_file);
9b22b0b7
SF
1364
1365 if (!open_file->invalidHandle) {
1366 /* found a good writable file */
4477288a 1367 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1368 return open_file;
1369 }
8840dee9 1370
4477288a 1371 spin_unlock(&cifs_file_list_lock);
cdff08e7 1372
9b22b0b7 1373 /* Had to unlock since following call can block */
15886177 1374 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1375 if (!rc)
1376 return open_file;
9b22b0b7 1377
cdff08e7 1378 /* if it fails, try another handle if possible */
b6b38f70 1379 cFYI(1, "wp failed on reopen file");
6ab409b5 1380 cifsFileInfo_put(open_file);
8840dee9 1381
cdff08e7
SF
1382 spin_lock(&cifs_file_list_lock);
1383
9b22b0b7
SF
1384 /* else we simply continue to the next entry. Thus
1385 we do not loop on reopen errors. If we
1386 can not reopen the file, for example if we
1387 reconnected to a server with another client
1388 racing to delete or lock the file we would not
1389 make progress if we restarted before the beginning
1390 of the loop here. */
6148a742
SF
1391 }
1392 }
2846d386
JL
1393 /* couldn't find useable FH with same pid, try any available */
1394 if (!any_available) {
1395 any_available = true;
1396 goto refind_writable;
1397 }
4477288a 1398 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1399 return NULL;
1400}
1401
1da177e4
LT
1402static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1403{
1404 struct address_space *mapping = page->mapping;
1405 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1406 char *write_data;
1407 int rc = -EFAULT;
1408 int bytes_written = 0;
1da177e4 1409 struct inode *inode;
6148a742 1410 struct cifsFileInfo *open_file;
1da177e4
LT
1411
1412 if (!mapping || !mapping->host)
1413 return -EFAULT;
1414
1415 inode = page->mapping->host;
1da177e4
LT
1416
1417 offset += (loff_t)from;
1418 write_data = kmap(page);
1419 write_data += from;
1420
1421 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1422 kunmap(page);
1423 return -EIO;
1424 }
1425
1426 /* racing with truncate? */
1427 if (offset > mapping->host->i_size) {
1428 kunmap(page);
1429 return 0; /* don't care */
1430 }
1431
1432 /* check to make sure that we are not extending the file */
1433 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1434 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1435
6508d904 1436 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1437 if (open_file) {
fa2989f4
PS
1438 bytes_written = cifs_write(open_file, open_file->pid,
1439 write_data, to - from, &offset);
6ab409b5 1440 cifsFileInfo_put(open_file);
1da177e4 1441 /* Does mm or vfs already set times? */
6148a742 1442 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1443 if ((bytes_written > 0) && (offset))
6148a742 1444 rc = 0;
bb5a9a04
SF
1445 else if (bytes_written < 0)
1446 rc = bytes_written;
6148a742 1447 } else {
b6b38f70 1448 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1449 rc = -EIO;
1450 }
1451
1452 kunmap(page);
1453 return rc;
1454}
1455
1da177e4 1456static int cifs_writepages(struct address_space *mapping,
37c0eb46 1457 struct writeback_control *wbc)
1da177e4 1458{
c3d17b63
JL
1459 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1460 bool done = false, scanned = false, range_whole = false;
1461 pgoff_t end, index;
1462 struct cifs_writedata *wdata;
37c0eb46 1463 struct page *page;
37c0eb46 1464 int rc = 0;
50c2f753 1465
37c0eb46 1466 /*
c3d17b63 1467 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1468 * one page at a time via cifs_writepage
1469 */
1470 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1471 return generic_writepages(mapping, wbc);
1472
111ebb6e 1473 if (wbc->range_cyclic) {
37c0eb46 1474 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1475 end = -1;
1476 } else {
1477 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1478 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1479 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1480 range_whole = true;
1481 scanned = true;
37c0eb46
SF
1482 }
1483retry:
c3d17b63
JL
1484 while (!done && index <= end) {
1485 unsigned int i, nr_pages, found_pages;
1486 pgoff_t next = 0, tofind;
1487 struct page **pages;
1488
1489 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1490 end - index) + 1;
1491
1492 wdata = cifs_writedata_alloc((unsigned int)tofind);
1493 if (!wdata) {
1494 rc = -ENOMEM;
1495 break;
1496 }
1497
1498 /*
1499 * find_get_pages_tag seems to return a max of 256 on each
1500 * iteration, so we must call it several times in order to
1501 * fill the array or the wsize is effectively limited to
1502 * 256 * PAGE_CACHE_SIZE.
1503 */
1504 found_pages = 0;
1505 pages = wdata->pages;
1506 do {
1507 nr_pages = find_get_pages_tag(mapping, &index,
1508 PAGECACHE_TAG_DIRTY,
1509 tofind, pages);
1510 found_pages += nr_pages;
1511 tofind -= nr_pages;
1512 pages += nr_pages;
1513 } while (nr_pages && tofind && index <= end);
1514
1515 if (found_pages == 0) {
1516 kref_put(&wdata->refcount, cifs_writedata_release);
1517 break;
1518 }
1519
1520 nr_pages = 0;
1521 for (i = 0; i < found_pages; i++) {
1522 page = wdata->pages[i];
37c0eb46
SF
1523 /*
1524 * At this point we hold neither mapping->tree_lock nor
1525 * lock on the page itself: the page may be truncated or
1526 * invalidated (changing page->mapping to NULL), or even
1527 * swizzled back from swapper_space to tmpfs file
1528 * mapping
1529 */
1530
c3d17b63 1531 if (nr_pages == 0)
37c0eb46 1532 lock_page(page);
529ae9aa 1533 else if (!trylock_page(page))
37c0eb46
SF
1534 break;
1535
1536 if (unlikely(page->mapping != mapping)) {
1537 unlock_page(page);
1538 break;
1539 }
1540
111ebb6e 1541 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1542 done = true;
37c0eb46
SF
1543 unlock_page(page);
1544 break;
1545 }
1546
1547 if (next && (page->index != next)) {
1548 /* Not next consecutive page */
1549 unlock_page(page);
1550 break;
1551 }
1552
1553 if (wbc->sync_mode != WB_SYNC_NONE)
1554 wait_on_page_writeback(page);
1555
1556 if (PageWriteback(page) ||
cb876f45 1557 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1558 unlock_page(page);
1559 break;
1560 }
84d2f07e 1561
cb876f45
LT
1562 /*
1563 * This actually clears the dirty bit in the radix tree.
1564 * See cifs_writepage() for more commentary.
1565 */
1566 set_page_writeback(page);
1567
84d2f07e 1568 if (page_offset(page) >= mapping->host->i_size) {
c3d17b63 1569 done = true;
84d2f07e 1570 unlock_page(page);
cb876f45 1571 end_page_writeback(page);
84d2f07e
SF
1572 break;
1573 }
1574
c3d17b63
JL
1575 wdata->pages[i] = page;
1576 next = page->index + 1;
1577 ++nr_pages;
1578 }
37c0eb46 1579
c3d17b63
JL
1580 /* reset index to refind any pages skipped */
1581 if (nr_pages == 0)
1582 index = wdata->pages[0]->index + 1;
84d2f07e 1583
c3d17b63
JL
1584 /* put any pages we aren't going to use */
1585 for (i = nr_pages; i < found_pages; i++) {
1586 page_cache_release(wdata->pages[i]);
1587 wdata->pages[i] = NULL;
1588 }
37c0eb46 1589
c3d17b63
JL
1590 /* nothing to write? */
1591 if (nr_pages == 0) {
1592 kref_put(&wdata->refcount, cifs_writedata_release);
1593 continue;
37c0eb46 1594 }
fbec9ab9 1595
c3d17b63
JL
1596 wdata->sync_mode = wbc->sync_mode;
1597 wdata->nr_pages = nr_pages;
1598 wdata->offset = page_offset(wdata->pages[0]);
941b853d 1599
c3d17b63
JL
1600 do {
1601 if (wdata->cfile != NULL)
1602 cifsFileInfo_put(wdata->cfile);
1603 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1604 false);
1605 if (!wdata->cfile) {
1606 cERROR(1, "No writable handles for inode");
1607 rc = -EBADF;
1608 break;
941b853d 1609 }
c3d17b63
JL
1610 rc = cifs_async_writev(wdata);
1611 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1612
c3d17b63
JL
1613 for (i = 0; i < nr_pages; ++i)
1614 unlock_page(wdata->pages[i]);
f3983c21 1615
c3d17b63
JL
1616 /* send failure -- clean up the mess */
1617 if (rc != 0) {
1618 for (i = 0; i < nr_pages; ++i) {
941b853d 1619 if (rc == -EAGAIN)
c3d17b63
JL
1620 redirty_page_for_writepage(wbc,
1621 wdata->pages[i]);
1622 else
1623 SetPageError(wdata->pages[i]);
1624 end_page_writeback(wdata->pages[i]);
1625 page_cache_release(wdata->pages[i]);
37c0eb46 1626 }
941b853d
JL
1627 if (rc != -EAGAIN)
1628 mapping_set_error(mapping, rc);
c3d17b63
JL
1629 }
1630 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1631
c3d17b63
JL
1632 wbc->nr_to_write -= nr_pages;
1633 if (wbc->nr_to_write <= 0)
1634 done = true;
b066a48c 1635
c3d17b63 1636 index = next;
37c0eb46 1637 }
c3d17b63 1638
37c0eb46
SF
1639 if (!scanned && !done) {
1640 /*
1641 * We hit the last page and there is more work to be done: wrap
1642 * back to the start of the file
1643 */
c3d17b63 1644 scanned = true;
37c0eb46
SF
1645 index = 0;
1646 goto retry;
1647 }
c3d17b63 1648
111ebb6e 1649 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1650 mapping->writeback_index = index;
1651
1da177e4
LT
1652 return rc;
1653}
1da177e4 1654
9ad1506b
PS
1655static int
1656cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1657{
9ad1506b 1658 int rc;
1da177e4
LT
1659 int xid;
1660
1661 xid = GetXid();
1662/* BB add check for wbc flags */
1663 page_cache_get(page);
ad7a2926 1664 if (!PageUptodate(page))
b6b38f70 1665 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1666
1667 /*
1668 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1669 *
1670 * A writepage() implementation always needs to do either this,
1671 * or re-dirty the page with "redirty_page_for_writepage()" in
1672 * the case of a failure.
1673 *
1674 * Just unlocking the page will cause the radix tree tag-bits
1675 * to fail to update with the state of the page correctly.
1676 */
fb8c4b14 1677 set_page_writeback(page);
9ad1506b 1678retry_write:
1da177e4 1679 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1680 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1681 goto retry_write;
1682 else if (rc == -EAGAIN)
1683 redirty_page_for_writepage(wbc, page);
1684 else if (rc != 0)
1685 SetPageError(page);
1686 else
1687 SetPageUptodate(page);
cb876f45
LT
1688 end_page_writeback(page);
1689 page_cache_release(page);
1da177e4
LT
1690 FreeXid(xid);
1691 return rc;
1692}
1693
9ad1506b
PS
1694static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1695{
1696 int rc = cifs_writepage_locked(page, wbc);
1697 unlock_page(page);
1698 return rc;
1699}
1700
d9414774
NP
1701static int cifs_write_end(struct file *file, struct address_space *mapping,
1702 loff_t pos, unsigned len, unsigned copied,
1703 struct page *page, void *fsdata)
1da177e4 1704{
d9414774
NP
1705 int rc;
1706 struct inode *inode = mapping->host;
d4ffff1f
PS
1707 struct cifsFileInfo *cfile = file->private_data;
1708 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1709 __u32 pid;
1710
1711 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1712 pid = cfile->pid;
1713 else
1714 pid = current->tgid;
1da177e4 1715
b6b38f70
JP
1716 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1717 page, pos, copied);
d9414774 1718
a98ee8c1
JL
1719 if (PageChecked(page)) {
1720 if (copied == len)
1721 SetPageUptodate(page);
1722 ClearPageChecked(page);
1723 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1724 SetPageUptodate(page);
ad7a2926 1725
1da177e4 1726 if (!PageUptodate(page)) {
d9414774
NP
1727 char *page_data;
1728 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1729 int xid;
1730
1731 xid = GetXid();
1da177e4
LT
1732 /* this is probably better than directly calling
1733 partialpage_write since in this function the file handle is
1734 known which we might as well leverage */
1735 /* BB check if anything else missing out of ppw
1736 such as updating last write time */
1737 page_data = kmap(page);
d4ffff1f 1738 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 1739 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1740 kunmap(page);
d9414774
NP
1741
1742 FreeXid(xid);
fb8c4b14 1743 } else {
d9414774
NP
1744 rc = copied;
1745 pos += copied;
1da177e4
LT
1746 set_page_dirty(page);
1747 }
1748
d9414774
NP
1749 if (rc > 0) {
1750 spin_lock(&inode->i_lock);
1751 if (pos > inode->i_size)
1752 i_size_write(inode, pos);
1753 spin_unlock(&inode->i_lock);
1754 }
1755
1756 unlock_page(page);
1757 page_cache_release(page);
1758
1da177e4
LT
1759 return rc;
1760}
1761
02c24a82
JB
1762int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1763 int datasync)
1da177e4
LT
1764{
1765 int xid;
1766 int rc = 0;
96daf2b0 1767 struct cifs_tcon *tcon;
c21dfb69 1768 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1769 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 1770 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 1771
02c24a82
JB
1772 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1773 if (rc)
1774 return rc;
1775 mutex_lock(&inode->i_mutex);
1776
1da177e4
LT
1777 xid = GetXid();
1778
b6b38f70 1779 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1780 file->f_path.dentry->d_name.name, datasync);
50c2f753 1781
6feb9891
PS
1782 if (!CIFS_I(inode)->clientCanCacheRead) {
1783 rc = cifs_invalidate_mapping(inode);
1784 if (rc) {
1785 cFYI(1, "rc: %d during invalidate phase", rc);
1786 rc = 0; /* don't care about it in fsync */
1787 }
1788 }
eb4b756b 1789
8be7e6ba
PS
1790 tcon = tlink_tcon(smbfile->tlink);
1791 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1792 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1793
1794 FreeXid(xid);
02c24a82 1795 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
1796 return rc;
1797}
1798
02c24a82 1799int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba
PS
1800{
1801 int xid;
1802 int rc = 0;
96daf2b0 1803 struct cifs_tcon *tcon;
8be7e6ba
PS
1804 struct cifsFileInfo *smbfile = file->private_data;
1805 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
1806 struct inode *inode = file->f_mapping->host;
1807
1808 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1809 if (rc)
1810 return rc;
1811 mutex_lock(&inode->i_mutex);
8be7e6ba
PS
1812
1813 xid = GetXid();
1814
1815 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1816 file->f_path.dentry->d_name.name, datasync);
1817
1818 tcon = tlink_tcon(smbfile->tlink);
1819 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1820 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 1821
1da177e4 1822 FreeXid(xid);
02c24a82 1823 mutex_unlock(&inode->i_mutex);
1da177e4
LT
1824 return rc;
1825}
1826
1da177e4
LT
1827/*
1828 * As file closes, flush all cached write data for this inode checking
1829 * for write behind errors.
1830 */
75e1fcc0 1831int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1832{
fb8c4b14 1833 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1834 int rc = 0;
1835
eb4b756b 1836 if (file->f_mode & FMODE_WRITE)
d3f1322a 1837 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 1838
b6b38f70 1839 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1840
1841 return rc;
1842}
1843
72432ffc
PS
1844static int
1845cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1846{
1847 int rc = 0;
1848 unsigned long i;
1849
1850 for (i = 0; i < num_pages; i++) {
1851 pages[i] = alloc_page(__GFP_HIGHMEM);
1852 if (!pages[i]) {
1853 /*
1854 * save number of pages we have already allocated and
1855 * return with ENOMEM error
1856 */
1857 num_pages = i;
1858 rc = -ENOMEM;
1859 goto error;
1860 }
1861 }
1862
1863 return rc;
1864
1865error:
1866 for (i = 0; i < num_pages; i++)
1867 put_page(pages[i]);
1868 return rc;
1869}
1870
1871static inline
1872size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1873{
1874 size_t num_pages;
1875 size_t clen;
1876
1877 clen = min_t(const size_t, len, wsize);
1878 num_pages = clen / PAGE_CACHE_SIZE;
1879 if (clen % PAGE_CACHE_SIZE)
1880 num_pages++;
1881
1882 if (cur_len)
1883 *cur_len = clen;
1884
1885 return num_pages;
1886}
1887
1888static ssize_t
1889cifs_iovec_write(struct file *file, const struct iovec *iov,
1890 unsigned long nr_segs, loff_t *poffset)
1891{
76429c14
PS
1892 unsigned int written;
1893 unsigned long num_pages, npages, i;
1894 size_t copied, len, cur_len;
1895 ssize_t total_written = 0;
72432ffc
PS
1896 struct kvec *to_send;
1897 struct page **pages;
1898 struct iov_iter it;
1899 struct inode *inode;
1900 struct cifsFileInfo *open_file;
96daf2b0 1901 struct cifs_tcon *pTcon;
72432ffc 1902 struct cifs_sb_info *cifs_sb;
fa2989f4 1903 struct cifs_io_parms io_parms;
72432ffc 1904 int xid, rc;
d4ffff1f 1905 __u32 pid;
72432ffc
PS
1906
1907 len = iov_length(iov, nr_segs);
1908 if (!len)
1909 return 0;
1910
1911 rc = generic_write_checks(file, poffset, &len, 0);
1912 if (rc)
1913 return rc;
1914
1915 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1916 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1917
1918 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1919 if (!pages)
1920 return -ENOMEM;
1921
1922 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1923 if (!to_send) {
1924 kfree(pages);
1925 return -ENOMEM;
1926 }
1927
1928 rc = cifs_write_allocate_pages(pages, num_pages);
1929 if (rc) {
1930 kfree(pages);
1931 kfree(to_send);
1932 return rc;
1933 }
1934
1935 xid = GetXid();
1936 open_file = file->private_data;
d4ffff1f
PS
1937
1938 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1939 pid = open_file->pid;
1940 else
1941 pid = current->tgid;
1942
72432ffc
PS
1943 pTcon = tlink_tcon(open_file->tlink);
1944 inode = file->f_path.dentry->d_inode;
1945
1946 iov_iter_init(&it, iov, nr_segs, len, 0);
1947 npages = num_pages;
1948
1949 do {
1950 size_t save_len = cur_len;
1951 for (i = 0; i < npages; i++) {
1952 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1953 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1954 copied);
1955 cur_len -= copied;
1956 iov_iter_advance(&it, copied);
1957 to_send[i+1].iov_base = kmap(pages[i]);
1958 to_send[i+1].iov_len = copied;
1959 }
1960
1961 cur_len = save_len - cur_len;
1962
1963 do {
1964 if (open_file->invalidHandle) {
1965 rc = cifs_reopen_file(open_file, false);
1966 if (rc != 0)
1967 break;
1968 }
fa2989f4 1969 io_parms.netfid = open_file->netfid;
d4ffff1f 1970 io_parms.pid = pid;
fa2989f4
PS
1971 io_parms.tcon = pTcon;
1972 io_parms.offset = *poffset;
1973 io_parms.length = cur_len;
1974 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1975 npages, 0);
72432ffc
PS
1976 } while (rc == -EAGAIN);
1977
1978 for (i = 0; i < npages; i++)
1979 kunmap(pages[i]);
1980
1981 if (written) {
1982 len -= written;
1983 total_written += written;
1984 cifs_update_eof(CIFS_I(inode), *poffset, written);
1985 *poffset += written;
1986 } else if (rc < 0) {
1987 if (!total_written)
1988 total_written = rc;
1989 break;
1990 }
1991
1992 /* get length and number of kvecs of the next write */
1993 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1994 } while (len > 0);
1995
1996 if (total_written > 0) {
1997 spin_lock(&inode->i_lock);
1998 if (*poffset > inode->i_size)
1999 i_size_write(inode, *poffset);
2000 spin_unlock(&inode->i_lock);
2001 }
2002
2003 cifs_stats_bytes_written(pTcon, total_written);
2004 mark_inode_dirty_sync(inode);
2005
2006 for (i = 0; i < num_pages; i++)
2007 put_page(pages[i]);
2008 kfree(to_send);
2009 kfree(pages);
2010 FreeXid(xid);
2011 return total_written;
2012}
2013
0b81c1c4 2014ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
2015 unsigned long nr_segs, loff_t pos)
2016{
2017 ssize_t written;
2018 struct inode *inode;
2019
2020 inode = iocb->ki_filp->f_path.dentry->d_inode;
2021
2022 /*
2023 * BB - optimize the way when signing is disabled. We can drop this
2024 * extra memory-to-memory copying and use iovec buffers for constructing
2025 * write request.
2026 */
2027
2028 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2029 if (written > 0) {
2030 CIFS_I(inode)->invalid_mapping = true;
2031 iocb->ki_pos = pos;
2032 }
2033
2034 return written;
2035}
2036
2037ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2038 unsigned long nr_segs, loff_t pos)
2039{
2040 struct inode *inode;
2041
2042 inode = iocb->ki_filp->f_path.dentry->d_inode;
2043
2044 if (CIFS_I(inode)->clientCanCacheAll)
2045 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2046
2047 /*
2048 * In strict cache mode we need to write the data to the server exactly
2049 * from the pos to pos+len-1 rather than flush all affected pages
2050 * because it may cause a error with mandatory locks on these pages but
2051 * not on the region from pos to ppos+len-1.
2052 */
2053
2054 return cifs_user_writev(iocb, iov, nr_segs, pos);
2055}
2056
a70307ee
PS
2057static ssize_t
2058cifs_iovec_read(struct file *file, const struct iovec *iov,
2059 unsigned long nr_segs, loff_t *poffset)
1da177e4 2060{
a70307ee
PS
2061 int rc;
2062 int xid;
76429c14
PS
2063 ssize_t total_read;
2064 unsigned int bytes_read = 0;
a70307ee
PS
2065 size_t len, cur_len;
2066 int iov_offset = 0;
1da177e4 2067 struct cifs_sb_info *cifs_sb;
96daf2b0 2068 struct cifs_tcon *pTcon;
1da177e4 2069 struct cifsFileInfo *open_file;
1da177e4 2070 struct smb_com_read_rsp *pSMBr;
d4ffff1f 2071 struct cifs_io_parms io_parms;
a70307ee 2072 char *read_data;
5eba8ab3 2073 unsigned int rsize;
d4ffff1f 2074 __u32 pid;
a70307ee
PS
2075
2076 if (!nr_segs)
2077 return 0;
2078
2079 len = iov_length(iov, nr_segs);
2080 if (!len)
2081 return 0;
1da177e4
LT
2082
2083 xid = GetXid();
e6a00296 2084 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2085
5eba8ab3
JL
2086 /* FIXME: set up handlers for larger reads and/or convert to async */
2087 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2088
c21dfb69 2089 open_file = file->private_data;
13cfb733 2090 pTcon = tlink_tcon(open_file->tlink);
1da177e4 2091
d4ffff1f
PS
2092 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2093 pid = open_file->pid;
2094 else
2095 pid = current->tgid;
2096
ad7a2926 2097 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2098 cFYI(1, "attempting read on write only file instance");
ad7a2926 2099
a70307ee 2100 for (total_read = 0; total_read < len; total_read += bytes_read) {
5eba8ab3 2101 cur_len = min_t(const size_t, len - total_read, rsize);
1da177e4 2102 rc = -EAGAIN;
a70307ee
PS
2103 read_data = NULL;
2104
1da177e4 2105 while (rc == -EAGAIN) {
ec637e3f 2106 int buf_type = CIFS_NO_BUFFER;
cdff08e7 2107 if (open_file->invalidHandle) {
15886177 2108 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2109 if (rc != 0)
2110 break;
2111 }
d4ffff1f
PS
2112 io_parms.netfid = open_file->netfid;
2113 io_parms.pid = pid;
2114 io_parms.tcon = pTcon;
2115 io_parms.offset = *poffset;
2cebaa58 2116 io_parms.length = cur_len;
d4ffff1f 2117 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
a70307ee
PS
2118 &read_data, &buf_type);
2119 pSMBr = (struct smb_com_read_rsp *)read_data;
2120 if (read_data) {
2121 char *data_offset = read_data + 4 +
2122 le16_to_cpu(pSMBr->DataOffset);
2123 if (memcpy_toiovecend(iov, data_offset,
2124 iov_offset, bytes_read))
93544cc6 2125 rc = -EFAULT;
fb8c4b14 2126 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 2127 cifs_small_buf_release(read_data);
fb8c4b14 2128 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
2129 cifs_buf_release(read_data);
2130 read_data = NULL;
2131 iov_offset += bytes_read;
1da177e4
LT
2132 }
2133 }
a70307ee 2134
1da177e4
LT
2135 if (rc || (bytes_read == 0)) {
2136 if (total_read) {
2137 break;
2138 } else {
2139 FreeXid(xid);
2140 return rc;
2141 }
2142 } else {
a4544347 2143 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
2144 *poffset += bytes_read;
2145 }
2146 }
a70307ee 2147
1da177e4
LT
2148 FreeXid(xid);
2149 return total_read;
2150}
2151
0b81c1c4 2152ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2153 unsigned long nr_segs, loff_t pos)
2154{
2155 ssize_t read;
2156
2157 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2158 if (read > 0)
2159 iocb->ki_pos = pos;
2160
2161 return read;
2162}
2163
2164ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2165 unsigned long nr_segs, loff_t pos)
2166{
2167 struct inode *inode;
2168
2169 inode = iocb->ki_filp->f_path.dentry->d_inode;
2170
2171 if (CIFS_I(inode)->clientCanCacheRead)
2172 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2173
2174 /*
2175 * In strict cache mode we need to read from the server all the time
2176 * if we don't have level II oplock because the server can delay mtime
2177 * change - so we can't make a decision about inode invalidating.
2178 * And we can also fail with pagereading if there are mandatory locks
2179 * on pages affected by this read but not on the region from pos to
2180 * pos+len-1.
2181 */
2182
2183 return cifs_user_readv(iocb, iov, nr_segs, pos);
2184}
1da177e4
LT
2185
2186static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 2187 loff_t *poffset)
1da177e4
LT
2188{
2189 int rc = -EACCES;
2190 unsigned int bytes_read = 0;
2191 unsigned int total_read;
2192 unsigned int current_read_size;
5eba8ab3 2193 unsigned int rsize;
1da177e4 2194 struct cifs_sb_info *cifs_sb;
96daf2b0 2195 struct cifs_tcon *pTcon;
1da177e4
LT
2196 int xid;
2197 char *current_offset;
2198 struct cifsFileInfo *open_file;
d4ffff1f 2199 struct cifs_io_parms io_parms;
ec637e3f 2200 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2201 __u32 pid;
1da177e4
LT
2202
2203 xid = GetXid();
e6a00296 2204 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2205
5eba8ab3
JL
2206 /* FIXME: set up handlers for larger reads and/or convert to async */
2207 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2208
1da177e4 2209 if (file->private_data == NULL) {
0f3bc09e 2210 rc = -EBADF;
1da177e4 2211 FreeXid(xid);
0f3bc09e 2212 return rc;
1da177e4 2213 }
c21dfb69 2214 open_file = file->private_data;
13cfb733 2215 pTcon = tlink_tcon(open_file->tlink);
1da177e4 2216
d4ffff1f
PS
2217 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2218 pid = open_file->pid;
2219 else
2220 pid = current->tgid;
2221
1da177e4 2222 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2223 cFYI(1, "attempting read on write only file instance");
1da177e4 2224
fb8c4b14 2225 for (total_read = 0, current_offset = read_data;
1da177e4
LT
2226 read_size > total_read;
2227 total_read += bytes_read, current_offset += bytes_read) {
5eba8ab3
JL
2228 current_read_size = min_t(uint, read_size - total_read, rsize);
2229
f9f5c817
SF
2230 /* For windows me and 9x we do not want to request more
2231 than it negotiated since it will refuse the read then */
fb8c4b14 2232 if ((pTcon->ses) &&
f9f5c817 2233 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
7748dd6e 2234 current_read_size = min_t(uint, current_read_size,
c974befa 2235 CIFSMaxBufSize);
f9f5c817 2236 }
1da177e4
LT
2237 rc = -EAGAIN;
2238 while (rc == -EAGAIN) {
cdff08e7 2239 if (open_file->invalidHandle) {
15886177 2240 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2241 if (rc != 0)
2242 break;
2243 }
d4ffff1f
PS
2244 io_parms.netfid = open_file->netfid;
2245 io_parms.pid = pid;
2246 io_parms.tcon = pTcon;
2247 io_parms.offset = *poffset;
2248 io_parms.length = current_read_size;
2249 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2250 &current_offset, &buf_type);
1da177e4
LT
2251 }
2252 if (rc || (bytes_read == 0)) {
2253 if (total_read) {
2254 break;
2255 } else {
2256 FreeXid(xid);
2257 return rc;
2258 }
2259 } else {
a4544347 2260 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
2261 *poffset += bytes_read;
2262 }
2263 }
2264 FreeXid(xid);
2265 return total_read;
2266}
2267
ca83ce3d
JL
2268/*
2269 * If the page is mmap'ed into a process' page tables, then we need to make
2270 * sure that it doesn't change while being written back.
2271 */
2272static int
2273cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2274{
2275 struct page *page = vmf->page;
2276
2277 lock_page(page);
2278 return VM_FAULT_LOCKED;
2279}
2280
2281static struct vm_operations_struct cifs_file_vm_ops = {
2282 .fault = filemap_fault,
2283 .page_mkwrite = cifs_page_mkwrite,
2284};
2285
7a6a19b1
PS
2286int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2287{
2288 int rc, xid;
2289 struct inode *inode = file->f_path.dentry->d_inode;
2290
2291 xid = GetXid();
2292
6feb9891
PS
2293 if (!CIFS_I(inode)->clientCanCacheRead) {
2294 rc = cifs_invalidate_mapping(inode);
2295 if (rc)
2296 return rc;
2297 }
7a6a19b1
PS
2298
2299 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2300 if (rc == 0)
2301 vma->vm_ops = &cifs_file_vm_ops;
7a6a19b1
PS
2302 FreeXid(xid);
2303 return rc;
2304}
2305
1da177e4
LT
2306int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2307{
1da177e4
LT
2308 int rc, xid;
2309
2310 xid = GetXid();
abab095d 2311 rc = cifs_revalidate_file(file);
1da177e4 2312 if (rc) {
b6b38f70 2313 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
2314 FreeXid(xid);
2315 return rc;
2316 }
2317 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2318 if (rc == 0)
2319 vma->vm_ops = &cifs_file_vm_ops;
1da177e4
LT
2320 FreeXid(xid);
2321 return rc;
2322}
2323
1da177e4
LT
2324static int cifs_readpages(struct file *file, struct address_space *mapping,
2325 struct list_head *page_list, unsigned num_pages)
2326{
690c5e31
JL
2327 int rc;
2328 struct list_head tmplist;
2329 struct cifsFileInfo *open_file = file->private_data;
2330 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2331 unsigned int rsize = cifs_sb->rsize;
2332 pid_t pid;
1da177e4 2333
690c5e31
JL
2334 /*
2335 * Give up immediately if rsize is too small to read an entire page.
2336 * The VFS will fall back to readpage. We should never reach this
2337 * point however since we set ra_pages to 0 when the rsize is smaller
2338 * than a cache page.
2339 */
2340 if (unlikely(rsize < PAGE_CACHE_SIZE))
2341 return 0;
bfa0d75a 2342
56698236
SJ
2343 /*
2344 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2345 * immediately if the cookie is negative
2346 */
2347 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2348 &num_pages);
2349 if (rc == 0)
690c5e31 2350 return rc;
56698236 2351
d4ffff1f
PS
2352 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2353 pid = open_file->pid;
2354 else
2355 pid = current->tgid;
2356
690c5e31
JL
2357 rc = 0;
2358 INIT_LIST_HEAD(&tmplist);
1da177e4 2359
690c5e31
JL
2360 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2361 mapping, num_pages);
2362
2363 /*
2364 * Start with the page at end of list and move it to private
2365 * list. Do the same with any following pages until we hit
2366 * the rsize limit, hit an index discontinuity, or run out of
2367 * pages. Issue the async read and then start the loop again
2368 * until the list is empty.
2369 *
2370 * Note that list order is important. The page_list is in
2371 * the order of declining indexes. When we put the pages in
2372 * the rdata->pages, then we want them in increasing order.
2373 */
2374 while (!list_empty(page_list)) {
2375 unsigned int bytes = PAGE_CACHE_SIZE;
2376 unsigned int expected_index;
2377 unsigned int nr_pages = 1;
2378 loff_t offset;
2379 struct page *page, *tpage;
2380 struct cifs_readdata *rdata;
1da177e4
LT
2381
2382 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
2383
2384 /*
2385 * Lock the page and put it in the cache. Since no one else
2386 * should have access to this page, we're safe to simply set
2387 * PG_locked without checking it first.
2388 */
2389 __set_page_locked(page);
2390 rc = add_to_page_cache_locked(page, mapping,
2391 page->index, GFP_KERNEL);
2392
2393 /* give up if we can't stick it in the cache */
2394 if (rc) {
2395 __clear_page_locked(page);
2396 break;
2397 }
2398
2399 /* move first page to the tmplist */
1da177e4 2400 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 2401 list_move_tail(&page->lru, &tmplist);
1da177e4 2402
690c5e31
JL
2403 /* now try and add more pages onto the request */
2404 expected_index = page->index + 1;
2405 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2406 /* discontinuity ? */
2407 if (page->index != expected_index)
fb8c4b14 2408 break;
690c5e31
JL
2409
2410 /* would this page push the read over the rsize? */
2411 if (bytes + PAGE_CACHE_SIZE > rsize)
2412 break;
2413
2414 __set_page_locked(page);
2415 if (add_to_page_cache_locked(page, mapping,
2416 page->index, GFP_KERNEL)) {
2417 __clear_page_locked(page);
2418 break;
2419 }
2420 list_move_tail(&page->lru, &tmplist);
2421 bytes += PAGE_CACHE_SIZE;
2422 expected_index++;
2423 nr_pages++;
1da177e4 2424 }
690c5e31
JL
2425
2426 rdata = cifs_readdata_alloc(nr_pages);
2427 if (!rdata) {
2428 /* best to give up if we're out of mem */
2429 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2430 list_del(&page->lru);
2431 lru_cache_add_file(page);
2432 unlock_page(page);
2433 page_cache_release(page);
2434 }
2435 rc = -ENOMEM;
2436 break;
2437 }
2438
2439 spin_lock(&cifs_file_list_lock);
2440 cifsFileInfo_get(open_file);
2441 spin_unlock(&cifs_file_list_lock);
2442 rdata->cfile = open_file;
2443 rdata->mapping = mapping;
2444 rdata->offset = offset;
2445 rdata->bytes = bytes;
2446 rdata->pid = pid;
2447 list_splice_init(&tmplist, &rdata->pages);
2448
2449 do {
cdff08e7 2450 if (open_file->invalidHandle) {
15886177 2451 rc = cifs_reopen_file(open_file, true);
1da177e4 2452 if (rc != 0)
690c5e31 2453 continue;
1da177e4 2454 }
690c5e31
JL
2455 rc = cifs_async_readv(rdata);
2456 } while (rc == -EAGAIN);
1da177e4 2457
690c5e31
JL
2458 if (rc != 0) {
2459 list_for_each_entry_safe(page, tpage, &rdata->pages,
2460 lru) {
2461 list_del(&page->lru);
2462 lru_cache_add_file(page);
2463 unlock_page(page);
2464 page_cache_release(page);
1da177e4 2465 }
690c5e31 2466 cifs_readdata_free(rdata);
1da177e4
LT
2467 break;
2468 }
1da177e4
LT
2469 }
2470
1da177e4
LT
2471 return rc;
2472}
2473
2474static int cifs_readpage_worker(struct file *file, struct page *page,
2475 loff_t *poffset)
2476{
2477 char *read_data;
2478 int rc;
2479
56698236
SJ
2480 /* Is the page cached? */
2481 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2482 if (rc == 0)
2483 goto read_complete;
2484
1da177e4
LT
2485 page_cache_get(page);
2486 read_data = kmap(page);
2487 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2488
1da177e4 2489 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2490
1da177e4
LT
2491 if (rc < 0)
2492 goto io_error;
2493 else
b6b38f70 2494 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2495
e6a00296
JJS
2496 file->f_path.dentry->d_inode->i_atime =
2497 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2498
1da177e4
LT
2499 if (PAGE_CACHE_SIZE > rc)
2500 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2501
2502 flush_dcache_page(page);
2503 SetPageUptodate(page);
9dc06558
SJ
2504
2505 /* send this page to the cache */
2506 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2507
1da177e4 2508 rc = 0;
fb8c4b14 2509
1da177e4 2510io_error:
fb8c4b14 2511 kunmap(page);
1da177e4 2512 page_cache_release(page);
56698236
SJ
2513
2514read_complete:
1da177e4
LT
2515 return rc;
2516}
2517
2518static int cifs_readpage(struct file *file, struct page *page)
2519{
2520 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2521 int rc = -EACCES;
2522 int xid;
2523
2524 xid = GetXid();
2525
2526 if (file->private_data == NULL) {
0f3bc09e 2527 rc = -EBADF;
1da177e4 2528 FreeXid(xid);
0f3bc09e 2529 return rc;
1da177e4
LT
2530 }
2531
b6b38f70
JP
2532 cFYI(1, "readpage %p at offset %d 0x%x\n",
2533 page, (int)offset, (int)offset);
1da177e4
LT
2534
2535 rc = cifs_readpage_worker(file, page, &offset);
2536
2537 unlock_page(page);
2538
2539 FreeXid(xid);
2540 return rc;
2541}
2542
a403a0a3
SF
2543static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2544{
2545 struct cifsFileInfo *open_file;
2546
4477288a 2547 spin_lock(&cifs_file_list_lock);
a403a0a3 2548 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2549 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2550 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2551 return 1;
2552 }
2553 }
4477288a 2554 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2555 return 0;
2556}
2557
1da177e4
LT
2558/* We do not want to update the file size from server for inodes
2559 open for write - to avoid races with writepage extending
2560 the file - in the future we could consider allowing
fb8c4b14 2561 refreshing the inode only on increases in the file size
1da177e4
LT
2562 but this is tricky to do without racing with writebehind
2563 page caching in the current Linux kernel design */
4b18f2a9 2564bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2565{
a403a0a3 2566 if (!cifsInode)
4b18f2a9 2567 return true;
50c2f753 2568
a403a0a3
SF
2569 if (is_inode_writable(cifsInode)) {
2570 /* This inode is open for write at least once */
c32a0b68
SF
2571 struct cifs_sb_info *cifs_sb;
2572
c32a0b68 2573 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2574 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2575 /* since no page cache to corrupt on directio
c32a0b68 2576 we can change size safely */
4b18f2a9 2577 return true;
c32a0b68
SF
2578 }
2579
fb8c4b14 2580 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2581 return true;
7ba52631 2582
4b18f2a9 2583 return false;
23e7dd7d 2584 } else
4b18f2a9 2585 return true;
1da177e4
LT
2586}
2587
d9414774
NP
2588static int cifs_write_begin(struct file *file, struct address_space *mapping,
2589 loff_t pos, unsigned len, unsigned flags,
2590 struct page **pagep, void **fsdata)
1da177e4 2591{
d9414774
NP
2592 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2593 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2594 loff_t page_start = pos & PAGE_MASK;
2595 loff_t i_size;
2596 struct page *page;
2597 int rc = 0;
d9414774 2598
b6b38f70 2599 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2600
54566b2c 2601 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2602 if (!page) {
2603 rc = -ENOMEM;
2604 goto out;
2605 }
8a236264 2606
a98ee8c1
JL
2607 if (PageUptodate(page))
2608 goto out;
8a236264 2609
a98ee8c1
JL
2610 /*
2611 * If we write a full page it will be up to date, no need to read from
2612 * the server. If the write is short, we'll end up doing a sync write
2613 * instead.
2614 */
2615 if (len == PAGE_CACHE_SIZE)
2616 goto out;
8a236264 2617
a98ee8c1
JL
2618 /*
2619 * optimize away the read when we have an oplock, and we're not
2620 * expecting to use any of the data we'd be reading in. That
2621 * is, when the page lies beyond the EOF, or straddles the EOF
2622 * and the write will cover all of the existing data.
2623 */
2624 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2625 i_size = i_size_read(mapping->host);
2626 if (page_start >= i_size ||
2627 (offset == 0 && (pos + len) >= i_size)) {
2628 zero_user_segments(page, 0, offset,
2629 offset + len,
2630 PAGE_CACHE_SIZE);
2631 /*
2632 * PageChecked means that the parts of the page
2633 * to which we're not writing are considered up
2634 * to date. Once the data is copied to the
2635 * page, it can be set uptodate.
2636 */
2637 SetPageChecked(page);
2638 goto out;
2639 }
2640 }
d9414774 2641
a98ee8c1
JL
2642 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2643 /*
2644 * might as well read a page, it is fast enough. If we get
2645 * an error, we don't need to return it. cifs_write_end will
2646 * do a sync write instead since PG_uptodate isn't set.
2647 */
2648 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2649 } else {
2650 /* we could try using another file handle if there is one -
2651 but how would we lock it to prevent close of that handle
2652 racing with this read? In any case
d9414774 2653 this will be written out by write_end so is fine */
1da177e4 2654 }
a98ee8c1
JL
2655out:
2656 *pagep = page;
2657 return rc;
1da177e4
LT
2658}
2659
85f2d6b4
SJ
2660static int cifs_release_page(struct page *page, gfp_t gfp)
2661{
2662 if (PagePrivate(page))
2663 return 0;
2664
2665 return cifs_fscache_release_page(page, gfp);
2666}
2667
2668static void cifs_invalidate_page(struct page *page, unsigned long offset)
2669{
2670 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2671
2672 if (offset == 0)
2673 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2674}
2675
9ad1506b
PS
2676static int cifs_launder_page(struct page *page)
2677{
2678 int rc = 0;
2679 loff_t range_start = page_offset(page);
2680 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2681 struct writeback_control wbc = {
2682 .sync_mode = WB_SYNC_ALL,
2683 .nr_to_write = 0,
2684 .range_start = range_start,
2685 .range_end = range_end,
2686 };
2687
2688 cFYI(1, "Launder page: %p", page);
2689
2690 if (clear_page_dirty_for_io(page))
2691 rc = cifs_writepage_locked(page, &wbc);
2692
2693 cifs_fscache_invalidate_page(page, page->mapping->host);
2694 return rc;
2695}
2696
9b646972 2697void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2698{
2699 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2700 oplock_break);
a5e18bc3 2701 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2702 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 2703 int rc = 0;
3bc303c2
JL
2704
2705 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2706 if (cinode->clientCanCacheRead)
8737c930 2707 break_lease(inode, O_RDONLY);
d54ff732 2708 else
8737c930 2709 break_lease(inode, O_WRONLY);
3bc303c2
JL
2710 rc = filemap_fdatawrite(inode->i_mapping);
2711 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
2712 rc = filemap_fdatawait(inode->i_mapping);
2713 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
2714 invalidate_remote_inode(inode);
2715 }
b6b38f70 2716 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2717 }
2718
85160e03
PS
2719 rc = cifs_push_locks(cfile);
2720 if (rc)
2721 cERROR(1, "Push locks rc = %d", rc);
2722
3bc303c2
JL
2723 /*
2724 * releasing stale oplock after recent reconnect of smb session using
2725 * a now incorrect file handle is not a data integrity issue but do
2726 * not bother sending an oplock release if session to server still is
2727 * disconnected since oplock already released by the server
2728 */
cdff08e7 2729 if (!cfile->oplock_break_cancelled) {
03776f45
PS
2730 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2731 current->tgid, 0, 0, 0, 0,
2732 LOCKING_ANDX_OPLOCK_RELEASE, false,
12fed00d 2733 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 2734 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2735 }
3bc303c2
JL
2736}
2737
f5e54d6e 2738const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2739 .readpage = cifs_readpage,
2740 .readpages = cifs_readpages,
2741 .writepage = cifs_writepage,
37c0eb46 2742 .writepages = cifs_writepages,
d9414774
NP
2743 .write_begin = cifs_write_begin,
2744 .write_end = cifs_write_end,
1da177e4 2745 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2746 .releasepage = cifs_release_page,
2747 .invalidatepage = cifs_invalidate_page,
9ad1506b 2748 .launder_page = cifs_launder_page,
1da177e4 2749};
273d81d6
DK
2750
2751/*
2752 * cifs_readpages requires the server to support a buffer large enough to
2753 * contain the header plus one complete page of data. Otherwise, we need
2754 * to leave cifs_readpages out of the address space operations.
2755 */
f5e54d6e 2756const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2757 .readpage = cifs_readpage,
2758 .writepage = cifs_writepage,
2759 .writepages = cifs_writepages,
d9414774
NP
2760 .write_begin = cifs_write_begin,
2761 .write_end = cifs_write_end,
273d81d6 2762 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2763 .releasepage = cifs_release_page,
2764 .invalidatepage = cifs_invalidate_page,
9ad1506b 2765 .launder_page = cifs_launder_page,
273d81d6 2766};