]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/cifs/file.c
Merge branches 'pm-cpufreq-fixes' and 'pm-sleep-fixes'
[mirror_ubuntu-bionic-kernel.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
07b92d0d 46
1da177e4
LT
47static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
e10f7b55
JL
60 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
7fc8f4e9 63}
e10f7b55 64
608712fe 65static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 66{
608712fe 67 u32 posix_flags = 0;
e10f7b55 68
7fc8f4e9 69 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 70 posix_flags = SMB_O_RDONLY;
7fc8f4e9 71 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
72 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
07b92d0d 76 if (flags & O_CREAT) {
608712fe 77 posix_flags |= SMB_O_CREAT;
07b92d0d
SF
78 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
f96637be
JP
81 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
07b92d0d 83
608712fe
JL
84 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 87 if (flags & O_DSYNC)
608712fe 88 posix_flags |= SMB_O_SYNC;
7fc8f4e9 89 if (flags & O_DIRECTORY)
608712fe 90 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 91 if (flags & O_NOFOLLOW)
608712fe 92 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 93 if (flags & O_DIRECT)
608712fe 94 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
95
96 return posix_flags;
1da177e4
LT
97}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
55aa2e09
SF
107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
1da177e4
LT
109 else
110 return FILE_OPEN;
111}
112
608712fe
JL
113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
96daf2b0 123 struct cifs_tcon *tcon;
608712fe 124
f96637be 125 cifs_dbg(FYI, "posix open %s\n", full_path);
608712fe
JL
126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
bc8ebdc4 143 cifs_remap(cifs_sb));
608712fe
JL
144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
eeb910a6
PS
174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
178{
179 int rc;
fb1214e4 180 int desired_access;
eeb910a6 181 int disposition;
3d3ea8e6 182 int create_options = CREATE_NOT_DIR;
eeb910a6 183 FILE_ALL_INFO *buf;
b8c32dbb 184 struct TCP_Server_Info *server = tcon->ses->server;
226730b4 185 struct cifs_open_parms oparms;
eeb910a6 186
b8c32dbb 187 if (!server->ops->open)
fb1214e4
PS
188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
3d3ea8e6
SP
224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
226730b4
PS
227 oparms.tcon = tcon;
228 oparms.cifs_sb = cifs_sb;
229 oparms.desired_access = desired_access;
230 oparms.create_options = create_options;
231 oparms.disposition = disposition;
232 oparms.path = full_path;
233 oparms.fid = fid;
9cbc0b73 234 oparms.reconnect = false;
226730b4
PS
235
236 rc = server->ops->open(xid, &oparms, oplock, buf);
eeb910a6
PS
237
238 if (rc)
239 goto out;
240
241 if (tcon->unix_ext)
242 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
243 xid);
244 else
245 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
42eacf9e 246 xid, fid);
eeb910a6
PS
247
248out:
249 kfree(buf);
250 return rc;
251}
252
63b7d3a4
PS
253static bool
254cifs_has_mand_locks(struct cifsInodeInfo *cinode)
255{
256 struct cifs_fid_locks *cur;
257 bool has_locks = false;
258
259 down_read(&cinode->lock_sem);
260 list_for_each_entry(cur, &cinode->llist, llist) {
261 if (!list_empty(&cur->locks)) {
262 has_locks = true;
263 break;
264 }
265 }
266 up_read(&cinode->lock_sem);
267 return has_locks;
268}
269
15ecb436 270struct cifsFileInfo *
fb1214e4 271cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
272 struct tcon_link *tlink, __u32 oplock)
273{
1f1735cb 274 struct dentry *dentry = file_dentry(file);
2b0143b5 275 struct inode *inode = d_inode(dentry);
4b4de76e
PS
276 struct cifsInodeInfo *cinode = CIFS_I(inode);
277 struct cifsFileInfo *cfile;
f45d3416 278 struct cifs_fid_locks *fdlocks;
233839b1 279 struct cifs_tcon *tcon = tlink_tcon(tlink);
63b7d3a4 280 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e
PS
281
282 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
283 if (cfile == NULL)
284 return cfile;
285
f45d3416
PS
286 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
287 if (!fdlocks) {
288 kfree(cfile);
289 return NULL;
290 }
291
292 INIT_LIST_HEAD(&fdlocks->locks);
293 fdlocks->cfile = cfile;
294 cfile->llist = fdlocks;
1b4b55a1 295 down_write(&cinode->lock_sem);
f45d3416 296 list_add(&fdlocks->llist, &cinode->llist);
1b4b55a1 297 up_write(&cinode->lock_sem);
f45d3416 298
4b4de76e 299 cfile->count = 1;
4b4de76e
PS
300 cfile->pid = current->tgid;
301 cfile->uid = current_fsuid();
302 cfile->dentry = dget(dentry);
303 cfile->f_flags = file->f_flags;
304 cfile->invalidHandle = false;
305 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 306 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
f45d3416 307 mutex_init(&cfile->fh_mutex);
3afca265 308 spin_lock_init(&cfile->file_info_lock);
15ecb436 309
24261fc2
MG
310 cifs_sb_active(inode->i_sb);
311
63b7d3a4
PS
312 /*
313 * If the server returned a read oplock and we have mandatory brlocks,
314 * set oplock level to None.
315 */
53ef1016 316 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
f96637be 317 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
63b7d3a4
PS
318 oplock = 0;
319 }
320
3afca265 321 spin_lock(&tcon->open_file_lock);
63b7d3a4 322 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
233839b1
PS
323 oplock = fid->pending_open->oplock;
324 list_del(&fid->pending_open->olist);
325
42873b0a 326 fid->purge_cache = false;
63b7d3a4 327 server->ops->set_fid(cfile, fid, oplock);
233839b1
PS
328
329 list_add(&cfile->tlist, &tcon->openFileList);
3afca265 330
15ecb436
JL
331 /* if readable file instance put first in list*/
332 if (file->f_mode & FMODE_READ)
4b4de76e 333 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 334 else
4b4de76e 335 list_add_tail(&cfile->flist, &cinode->openFileList);
3afca265 336 spin_unlock(&tcon->open_file_lock);
15ecb436 337
42873b0a 338 if (fid->purge_cache)
4f73c7d3 339 cifs_zap_mapping(inode);
42873b0a 340
4b4de76e
PS
341 file->private_data = cfile;
342 return cfile;
15ecb436
JL
343}
344
764a1b1a
JL
345struct cifsFileInfo *
346cifsFileInfo_get(struct cifsFileInfo *cifs_file)
347{
3afca265 348 spin_lock(&cifs_file->file_info_lock);
764a1b1a 349 cifsFileInfo_get_locked(cifs_file);
3afca265 350 spin_unlock(&cifs_file->file_info_lock);
764a1b1a
JL
351 return cifs_file;
352}
353
cdff08e7
SF
354/*
355 * Release a reference on the file private data. This may involve closing
5f6dbc9e 356 * the filehandle out on the server. Must be called without holding
3afca265 357 * tcon->open_file_lock and cifs_file->file_info_lock.
cdff08e7 358 */
b33879aa
JL
359void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
360{
2b0143b5 361 struct inode *inode = d_inode(cifs_file->dentry);
96daf2b0 362 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
233839b1 363 struct TCP_Server_Info *server = tcon->ses->server;
e66673e3 364 struct cifsInodeInfo *cifsi = CIFS_I(inode);
24261fc2
MG
365 struct super_block *sb = inode->i_sb;
366 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cdff08e7 367 struct cifsLockInfo *li, *tmp;
233839b1
PS
368 struct cifs_fid fid;
369 struct cifs_pending_open open;
ca7df8e0 370 bool oplock_break_cancelled;
cdff08e7 371
3afca265
SF
372 spin_lock(&tcon->open_file_lock);
373
374 spin_lock(&cifs_file->file_info_lock);
5f6dbc9e 375 if (--cifs_file->count > 0) {
3afca265
SF
376 spin_unlock(&cifs_file->file_info_lock);
377 spin_unlock(&tcon->open_file_lock);
cdff08e7
SF
378 return;
379 }
3afca265 380 spin_unlock(&cifs_file->file_info_lock);
cdff08e7 381
233839b1
PS
382 if (server->ops->get_lease_key)
383 server->ops->get_lease_key(inode, &fid);
384
385 /* store open in pending opens to make sure we don't miss lease break */
386 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
387
cdff08e7
SF
388 /* remove it from the lists */
389 list_del(&cifs_file->flist);
390 list_del(&cifs_file->tlist);
391
392 if (list_empty(&cifsi->openFileList)) {
f96637be 393 cifs_dbg(FYI, "closing last open instance for inode %p\n",
2b0143b5 394 d_inode(cifs_file->dentry));
25364138
PS
395 /*
396 * In strict cache mode we need invalidate mapping on the last
397 * close because it may cause a error when we open this file
398 * again and get at least level II oplock.
399 */
4f8ba8a0 400 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
aff8d5ca 401 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
c6723628 402 cifs_set_oplock_level(cifsi, 0);
cdff08e7 403 }
3afca265
SF
404
405 spin_unlock(&tcon->open_file_lock);
cdff08e7 406
ca7df8e0 407 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
ad635942 408
cdff08e7 409 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 410 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 411 unsigned int xid;
0ff78a22 412
6d5786a3 413 xid = get_xid();
0ff78a22 414 if (server->ops->close)
760ad0ca
PS
415 server->ops->close(xid, tcon, &cifs_file->fid);
416 _free_xid(xid);
cdff08e7
SF
417 }
418
ca7df8e0
SP
419 if (oplock_break_cancelled)
420 cifs_done_oplock_break(cifsi);
421
233839b1
PS
422 cifs_del_pending_open(&open);
423
f45d3416
PS
424 /*
425 * Delete any outstanding lock records. We'll lose them when the file
cdff08e7
SF
426 * is closed anyway.
427 */
1b4b55a1 428 down_write(&cifsi->lock_sem);
f45d3416 429 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
cdff08e7 430 list_del(&li->llist);
85160e03 431 cifs_del_lock_waiters(li);
cdff08e7 432 kfree(li);
b33879aa 433 }
f45d3416
PS
434 list_del(&cifs_file->llist->llist);
435 kfree(cifs_file->llist);
1b4b55a1 436 up_write(&cifsi->lock_sem);
cdff08e7
SF
437
438 cifs_put_tlink(cifs_file->tlink);
439 dput(cifs_file->dentry);
24261fc2 440 cifs_sb_deactive(sb);
cdff08e7 441 kfree(cifs_file);
b33879aa
JL
442}
443
1da177e4 444int cifs_open(struct inode *inode, struct file *file)
233839b1 445
1da177e4
LT
446{
447 int rc = -EACCES;
6d5786a3 448 unsigned int xid;
590a3fe0 449 __u32 oplock;
1da177e4 450 struct cifs_sb_info *cifs_sb;
b8c32dbb 451 struct TCP_Server_Info *server;
96daf2b0 452 struct cifs_tcon *tcon;
7ffec372 453 struct tcon_link *tlink;
fb1214e4 454 struct cifsFileInfo *cfile = NULL;
1da177e4 455 char *full_path = NULL;
7e12eddb 456 bool posix_open_ok = false;
fb1214e4 457 struct cifs_fid fid;
233839b1 458 struct cifs_pending_open open;
1da177e4 459
6d5786a3 460 xid = get_xid();
1da177e4
LT
461
462 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
463 tlink = cifs_sb_tlink(cifs_sb);
464 if (IS_ERR(tlink)) {
6d5786a3 465 free_xid(xid);
7ffec372
JL
466 return PTR_ERR(tlink);
467 }
468 tcon = tlink_tcon(tlink);
b8c32dbb 469 server = tcon->ses->server;
1da177e4 470
1f1735cb 471 full_path = build_path_from_dentry(file_dentry(file));
1da177e4 472 if (full_path == NULL) {
0f3bc09e 473 rc = -ENOMEM;
232341ba 474 goto out;
1da177e4
LT
475 }
476
f96637be 477 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
b6b38f70 478 inode, file->f_flags, full_path);
276a74a4 479
787aded6
NJ
480 if (file->f_flags & O_DIRECT &&
481 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
483 file->f_op = &cifs_file_direct_nobrl_ops;
484 else
485 file->f_op = &cifs_file_direct_ops;
486 }
487
233839b1 488 if (server->oplocks)
276a74a4
SF
489 oplock = REQ_OPLOCK;
490 else
491 oplock = 0;
492
64cc2c63 493 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
494 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
495 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 496 /* can not refresh inode info since size could be stale */
2422f676 497 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 498 cifs_sb->mnt_file_mode /* ignored */,
fb1214e4 499 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 500 if (rc == 0) {
f96637be 501 cifs_dbg(FYI, "posix open succeeded\n");
7e12eddb 502 posix_open_ok = true;
64cc2c63
SF
503 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
504 if (tcon->ses->serverNOS)
f96637be
JP
505 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
506 tcon->ses->serverName,
507 tcon->ses->serverNOS);
64cc2c63 508 tcon->broken_posix_open = true;
276a74a4
SF
509 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
510 (rc != -EOPNOTSUPP)) /* path not found or net err */
511 goto out;
fb1214e4
PS
512 /*
513 * Else fallthrough to retry open the old way on network i/o
514 * or DFS errors.
515 */
276a74a4
SF
516 }
517
233839b1
PS
518 if (server->ops->get_lease_key)
519 server->ops->get_lease_key(inode, &fid);
520
521 cifs_add_pending_open(&fid, tlink, &open);
522
7e12eddb 523 if (!posix_open_ok) {
b8c32dbb
PS
524 if (server->ops->get_lease_key)
525 server->ops->get_lease_key(inode, &fid);
526
7e12eddb 527 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 528 file->f_flags, &oplock, &fid, xid);
233839b1
PS
529 if (rc) {
530 cifs_del_pending_open(&open);
7e12eddb 531 goto out;
233839b1 532 }
7e12eddb 533 }
47c78b7f 534
fb1214e4
PS
535 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
536 if (cfile == NULL) {
b8c32dbb
PS
537 if (server->ops->close)
538 server->ops->close(xid, tcon, &fid);
233839b1 539 cifs_del_pending_open(&open);
1da177e4
LT
540 rc = -ENOMEM;
541 goto out;
542 }
1da177e4 543
9451a9a5
SJ
544 cifs_fscache_set_inode_cookie(inode, file);
545
7e12eddb 546 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
547 /*
548 * Time to set mode which we can not set earlier due to
549 * problems creating new read-only files.
550 */
7e12eddb
PS
551 struct cifs_unix_set_info_args args = {
552 .mode = inode->i_mode,
49418b2c
EB
553 .uid = INVALID_UID, /* no change */
554 .gid = INVALID_GID, /* no change */
7e12eddb
PS
555 .ctime = NO_CHANGE_64,
556 .atime = NO_CHANGE_64,
557 .mtime = NO_CHANGE_64,
558 .device = 0,
559 };
fb1214e4
PS
560 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
561 cfile->pid);
1da177e4
LT
562 }
563
564out:
1da177e4 565 kfree(full_path);
6d5786a3 566 free_xid(xid);
7ffec372 567 cifs_put_tlink(tlink);
1da177e4
LT
568 return rc;
569}
570
f152fd5f
PS
571static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
572
2ae78ba8
PS
573/*
574 * Try to reacquire byte range locks that were released when session
f152fd5f 575 * to server was lost.
2ae78ba8 576 */
f152fd5f
PS
577static int
578cifs_relock_file(struct cifsFileInfo *cfile)
1da177e4 579{
f152fd5f 580 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2b0143b5 581 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
f152fd5f 582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1da177e4
LT
583 int rc = 0;
584
689c3db4 585 down_read(&cinode->lock_sem);
f152fd5f 586 if (cinode->can_cache_brlcks) {
689c3db4
PS
587 /* can cache locks - no need to relock */
588 up_read(&cinode->lock_sem);
f152fd5f
PS
589 return rc;
590 }
591
592 if (cap_unix(tcon->ses) &&
593 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
594 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
595 rc = cifs_push_posix_locks(cfile);
596 else
597 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1da177e4 598
689c3db4 599 up_read(&cinode->lock_sem);
1da177e4
LT
600 return rc;
601}
602
2ae78ba8
PS
603static int
604cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
605{
606 int rc = -EACCES;
6d5786a3 607 unsigned int xid;
590a3fe0 608 __u32 oplock;
1da177e4 609 struct cifs_sb_info *cifs_sb;
96daf2b0 610 struct cifs_tcon *tcon;
2ae78ba8
PS
611 struct TCP_Server_Info *server;
612 struct cifsInodeInfo *cinode;
fb8c4b14 613 struct inode *inode;
1da177e4 614 char *full_path = NULL;
2ae78ba8 615 int desired_access;
1da177e4 616 int disposition = FILE_OPEN;
3d3ea8e6 617 int create_options = CREATE_NOT_DIR;
226730b4 618 struct cifs_open_parms oparms;
1da177e4 619
6d5786a3 620 xid = get_xid();
2ae78ba8
PS
621 mutex_lock(&cfile->fh_mutex);
622 if (!cfile->invalidHandle) {
623 mutex_unlock(&cfile->fh_mutex);
0f3bc09e 624 rc = 0;
6d5786a3 625 free_xid(xid);
0f3bc09e 626 return rc;
1da177e4
LT
627 }
628
2b0143b5 629 inode = d_inode(cfile->dentry);
1da177e4 630 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
631 tcon = tlink_tcon(cfile->tlink);
632 server = tcon->ses->server;
633
634 /*
635 * Can not grab rename sem here because various ops, including those
636 * that already have the rename sem can end up causing writepage to get
637 * called and if the server was down that means we end up here, and we
638 * can never tell if the caller already has the rename_sem.
639 */
640 full_path = build_path_from_dentry(cfile->dentry);
1da177e4 641 if (full_path == NULL) {
3a9f462f 642 rc = -ENOMEM;
2ae78ba8 643 mutex_unlock(&cfile->fh_mutex);
6d5786a3 644 free_xid(xid);
3a9f462f 645 return rc;
1da177e4
LT
646 }
647
f96637be
JP
648 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
649 inode, cfile->f_flags, full_path);
1da177e4 650
10b9b98e 651 if (tcon->ses->server->oplocks)
1da177e4
LT
652 oplock = REQ_OPLOCK;
653 else
4b18f2a9 654 oplock = 0;
1da177e4 655
29e20f9c 656 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 657 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 658 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
659 /*
660 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
661 * original open. Must mask them off for a reopen.
662 */
2ae78ba8 663 unsigned int oflags = cfile->f_flags &
15886177 664 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 665
2422f676 666 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
2ae78ba8 667 cifs_sb->mnt_file_mode /* ignored */,
9cbc0b73 668 oflags, &oplock, &cfile->fid.netfid, xid);
7fc8f4e9 669 if (rc == 0) {
f96637be 670 cifs_dbg(FYI, "posix reopen succeeded\n");
fe090e4e 671 oparms.reconnect = true;
7fc8f4e9
SF
672 goto reopen_success;
673 }
2ae78ba8
PS
674 /*
675 * fallthrough to retry open the old way on errors, especially
676 * in the reconnect path it is important to retry hard
677 */
7fc8f4e9
SF
678 }
679
2ae78ba8 680 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 681
3d3ea8e6
SP
682 if (backup_cred(cifs_sb))
683 create_options |= CREATE_OPEN_BACKUP_INTENT;
684
b8c32dbb 685 if (server->ops->get_lease_key)
9cbc0b73 686 server->ops->get_lease_key(inode, &cfile->fid);
b8c32dbb 687
226730b4
PS
688 oparms.tcon = tcon;
689 oparms.cifs_sb = cifs_sb;
690 oparms.desired_access = desired_access;
691 oparms.create_options = create_options;
692 oparms.disposition = disposition;
693 oparms.path = full_path;
9cbc0b73
PS
694 oparms.fid = &cfile->fid;
695 oparms.reconnect = true;
226730b4 696
2ae78ba8
PS
697 /*
698 * Can not refresh inode by passing in file_info buf to be returned by
d81b8a40 699 * ops->open and then calling get_inode_info with returned buf since
2ae78ba8
PS
700 * file might have write behind data that needs to be flushed and server
701 * version of file size can be stale. If we knew for sure that inode was
702 * not dirty locally we could do this.
703 */
226730b4 704 rc = server->ops->open(xid, &oparms, &oplock, NULL);
b33fcf1c
PS
705 if (rc == -ENOENT && oparms.reconnect == false) {
706 /* durable handle timeout is expired - open the file again */
707 rc = server->ops->open(xid, &oparms, &oplock, NULL);
708 /* indicate that we need to relock the file */
709 oparms.reconnect = true;
710 }
711
1da177e4 712 if (rc) {
2ae78ba8 713 mutex_unlock(&cfile->fh_mutex);
f96637be
JP
714 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
715 cifs_dbg(FYI, "oplock: %d\n", oplock);
15886177
JL
716 goto reopen_error_exit;
717 }
718
7fc8f4e9 719reopen_success:
2ae78ba8
PS
720 cfile->invalidHandle = false;
721 mutex_unlock(&cfile->fh_mutex);
722 cinode = CIFS_I(inode);
15886177
JL
723
724 if (can_flush) {
725 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 726 mapping_set_error(inode->i_mapping, rc);
15886177 727
15886177 728 if (tcon->unix_ext)
2ae78ba8
PS
729 rc = cifs_get_inode_info_unix(&inode, full_path,
730 inode->i_sb, xid);
15886177 731 else
2ae78ba8
PS
732 rc = cifs_get_inode_info(&inode, full_path, NULL,
733 inode->i_sb, xid, NULL);
734 }
735 /*
736 * Else we are writing out data to server already and could deadlock if
737 * we tried to flush data, and since we do not know if we have data that
738 * would invalidate the current end of file on the server we can not go
739 * to the server to get the new inode info.
740 */
741
de740250
PS
742 /*
743 * If the server returned a read oplock and we have mandatory brlocks,
744 * set oplock level to None.
745 */
746 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
747 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
748 oplock = 0;
749 }
750
9cbc0b73
PS
751 server->ops->set_fid(cfile, &cfile->fid, oplock);
752 if (oparms.reconnect)
753 cifs_relock_file(cfile);
15886177
JL
754
755reopen_error_exit:
1da177e4 756 kfree(full_path);
6d5786a3 757 free_xid(xid);
1da177e4
LT
758 return rc;
759}
760
761int cifs_close(struct inode *inode, struct file *file)
762{
77970693
JL
763 if (file->private_data != NULL) {
764 cifsFileInfo_put(file->private_data);
765 file->private_data = NULL;
766 }
7ee1af76 767
cdff08e7
SF
768 /* return code from the ->release op is always ignored */
769 return 0;
1da177e4
LT
770}
771
52ace1ef
SF
772void
773cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
774{
f2cca6a7 775 struct cifsFileInfo *open_file;
52ace1ef
SF
776 struct list_head *tmp;
777 struct list_head *tmp1;
f2cca6a7
PS
778 struct list_head tmp_list;
779
780 cifs_dbg(FYI, "Reopen persistent handles");
781 INIT_LIST_HEAD(&tmp_list);
52ace1ef
SF
782
783 /* list all files open on tree connection, reopen resilient handles */
784 spin_lock(&tcon->open_file_lock);
f2cca6a7 785 list_for_each(tmp, &tcon->openFileList) {
52ace1ef 786 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
f2cca6a7
PS
787 if (!open_file->invalidHandle)
788 continue;
789 cifsFileInfo_get(open_file);
790 list_add_tail(&open_file->rlist, &tmp_list);
52ace1ef
SF
791 }
792 spin_unlock(&tcon->open_file_lock);
f2cca6a7
PS
793
794 list_for_each_safe(tmp, tmp1, &tmp_list) {
795 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
796 cifs_reopen_file(open_file, false /* do not flush */);
797 list_del_init(&open_file->rlist);
798 cifsFileInfo_put(open_file);
799 }
52ace1ef
SF
800}
801
1da177e4
LT
802int cifs_closedir(struct inode *inode, struct file *file)
803{
804 int rc = 0;
6d5786a3 805 unsigned int xid;
4b4de76e 806 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
807 struct cifs_tcon *tcon;
808 struct TCP_Server_Info *server;
809 char *buf;
1da177e4 810
f96637be 811 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1da177e4 812
92fc65a7
PS
813 if (cfile == NULL)
814 return rc;
815
6d5786a3 816 xid = get_xid();
92fc65a7
PS
817 tcon = tlink_tcon(cfile->tlink);
818 server = tcon->ses->server;
1da177e4 819
f96637be 820 cifs_dbg(FYI, "Freeing private data in close dir\n");
3afca265 821 spin_lock(&cfile->file_info_lock);
52755808 822 if (server->ops->dir_needs_close(cfile)) {
92fc65a7 823 cfile->invalidHandle = true;
3afca265 824 spin_unlock(&cfile->file_info_lock);
92fc65a7
PS
825 if (server->ops->close_dir)
826 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
827 else
828 rc = -ENOSYS;
f96637be 829 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
92fc65a7
PS
830 /* not much we can do if it fails anyway, ignore rc */
831 rc = 0;
832 } else
3afca265 833 spin_unlock(&cfile->file_info_lock);
92fc65a7
PS
834
835 buf = cfile->srch_inf.ntwrk_buf_start;
836 if (buf) {
f96637be 837 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
92fc65a7
PS
838 cfile->srch_inf.ntwrk_buf_start = NULL;
839 if (cfile->srch_inf.smallBuf)
840 cifs_small_buf_release(buf);
841 else
842 cifs_buf_release(buf);
1da177e4 843 }
92fc65a7
PS
844
845 cifs_put_tlink(cfile->tlink);
846 kfree(file->private_data);
847 file->private_data = NULL;
1da177e4 848 /* BB can we lock the filestruct while this is going on? */
6d5786a3 849 free_xid(xid);
1da177e4
LT
850 return rc;
851}
852
85160e03 853static struct cifsLockInfo *
fbd35aca 854cifs_lock_init(__u64 offset, __u64 length, __u8 type)
7ee1af76 855{
a88b4707 856 struct cifsLockInfo *lock =
fb8c4b14 857 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
858 if (!lock)
859 return lock;
860 lock->offset = offset;
861 lock->length = length;
862 lock->type = type;
a88b4707
PS
863 lock->pid = current->tgid;
864 INIT_LIST_HEAD(&lock->blist);
865 init_waitqueue_head(&lock->block_q);
866 return lock;
85160e03
PS
867}
868
f7ba7fe6 869void
85160e03
PS
870cifs_del_lock_waiters(struct cifsLockInfo *lock)
871{
872 struct cifsLockInfo *li, *tmp;
873 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
874 list_del_init(&li->blist);
875 wake_up(&li->block_q);
876 }
877}
878
081c0414
PS
879#define CIFS_LOCK_OP 0
880#define CIFS_READ_OP 1
881#define CIFS_WRITE_OP 2
882
883/* @rw_check : 0 - no op, 1 - read, 2 - write */
85160e03 884static bool
f45d3416
PS
885cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
886 __u64 length, __u8 type, struct cifsFileInfo *cfile,
081c0414 887 struct cifsLockInfo **conf_lock, int rw_check)
85160e03 888{
fbd35aca 889 struct cifsLockInfo *li;
f45d3416 890 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 891 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 892
f45d3416 893 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
894 if (offset + length <= li->offset ||
895 offset >= li->offset + li->length)
896 continue;
081c0414
PS
897 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
898 server->ops->compare_fids(cfile, cur_cfile)) {
899 /* shared lock prevents write op through the same fid */
900 if (!(li->type & server->vals->shared_lock_type) ||
901 rw_check != CIFS_WRITE_OP)
902 continue;
903 }
f45d3416
PS
904 if ((type & server->vals->shared_lock_type) &&
905 ((server->ops->compare_fids(cfile, cur_cfile) &&
906 current->tgid == li->pid) || type == li->type))
85160e03 907 continue;
579f9053
PS
908 if (conf_lock)
909 *conf_lock = li;
f45d3416 910 return true;
85160e03
PS
911 }
912 return false;
913}
914
579f9053 915bool
55157dfb 916cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
579f9053 917 __u8 type, struct cifsLockInfo **conf_lock,
081c0414 918 int rw_check)
161ebf9f 919{
fbd35aca 920 bool rc = false;
f45d3416 921 struct cifs_fid_locks *cur;
2b0143b5 922 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
fbd35aca 923
f45d3416
PS
924 list_for_each_entry(cur, &cinode->llist, llist) {
925 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
579f9053 926 cfile, conf_lock, rw_check);
fbd35aca
PS
927 if (rc)
928 break;
929 }
fbd35aca
PS
930
931 return rc;
161ebf9f
PS
932}
933
9a5101c8
PS
934/*
935 * Check if there is another lock that prevents us to set the lock (mandatory
936 * style). If such a lock exists, update the flock structure with its
937 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
938 * or leave it the same if we can't. Returns 0 if we don't need to request to
939 * the server or 1 otherwise.
940 */
85160e03 941static int
fbd35aca
PS
942cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
943 __u8 type, struct file_lock *flock)
85160e03
PS
944{
945 int rc = 0;
946 struct cifsLockInfo *conf_lock;
2b0143b5 947 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
106dc538 948 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
949 bool exist;
950
1b4b55a1 951 down_read(&cinode->lock_sem);
85160e03 952
55157dfb 953 exist = cifs_find_lock_conflict(cfile, offset, length, type,
081c0414 954 &conf_lock, CIFS_LOCK_OP);
85160e03
PS
955 if (exist) {
956 flock->fl_start = conf_lock->offset;
957 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
958 flock->fl_pid = conf_lock->pid;
106dc538 959 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
960 flock->fl_type = F_RDLCK;
961 else
962 flock->fl_type = F_WRLCK;
963 } else if (!cinode->can_cache_brlcks)
964 rc = 1;
965 else
966 flock->fl_type = F_UNLCK;
967
1b4b55a1 968 up_read(&cinode->lock_sem);
85160e03
PS
969 return rc;
970}
971
161ebf9f 972static void
fbd35aca 973cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 974{
2b0143b5 975 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1b4b55a1 976 down_write(&cinode->lock_sem);
f45d3416 977 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 978 up_write(&cinode->lock_sem);
7ee1af76
JA
979}
980
9a5101c8
PS
981/*
982 * Set the byte-range lock (mandatory style). Returns:
983 * 1) 0, if we set the lock and don't need to request to the server;
984 * 2) 1, if no locks prevent us but we need to request to the server;
985 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
986 */
85160e03 987static int
fbd35aca 988cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 989 bool wait)
85160e03 990{
161ebf9f 991 struct cifsLockInfo *conf_lock;
2b0143b5 992 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
85160e03
PS
993 bool exist;
994 int rc = 0;
995
85160e03
PS
996try_again:
997 exist = false;
1b4b55a1 998 down_write(&cinode->lock_sem);
85160e03 999
55157dfb 1000 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
081c0414 1001 lock->type, &conf_lock, CIFS_LOCK_OP);
85160e03 1002 if (!exist && cinode->can_cache_brlcks) {
f45d3416 1003 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 1004 up_write(&cinode->lock_sem);
85160e03
PS
1005 return rc;
1006 }
1007
1008 if (!exist)
1009 rc = 1;
1010 else if (!wait)
1011 rc = -EACCES;
1012 else {
1013 list_add_tail(&lock->blist, &conf_lock->blist);
1b4b55a1 1014 up_write(&cinode->lock_sem);
85160e03
PS
1015 rc = wait_event_interruptible(lock->block_q,
1016 (lock->blist.prev == &lock->blist) &&
1017 (lock->blist.next == &lock->blist));
1018 if (!rc)
1019 goto try_again;
1b4b55a1 1020 down_write(&cinode->lock_sem);
a88b4707 1021 list_del_init(&lock->blist);
85160e03
PS
1022 }
1023
1b4b55a1 1024 up_write(&cinode->lock_sem);
85160e03
PS
1025 return rc;
1026}
1027
9a5101c8
PS
1028/*
1029 * Check if there is another lock that prevents us to set the lock (posix
1030 * style). If such a lock exists, update the flock structure with its
1031 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1032 * or leave it the same if we can't. Returns 0 if we don't need to request to
1033 * the server or 1 otherwise.
1034 */
85160e03 1035static int
4f6bcec9
PS
1036cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1037{
1038 int rc = 0;
496ad9aa 1039 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
4f6bcec9
PS
1040 unsigned char saved_type = flock->fl_type;
1041
50792760
PS
1042 if ((flock->fl_flags & FL_POSIX) == 0)
1043 return 1;
1044
1b4b55a1 1045 down_read(&cinode->lock_sem);
4f6bcec9
PS
1046 posix_test_lock(file, flock);
1047
1048 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1049 flock->fl_type = saved_type;
1050 rc = 1;
1051 }
1052
1b4b55a1 1053 up_read(&cinode->lock_sem);
4f6bcec9
PS
1054 return rc;
1055}
1056
9a5101c8
PS
1057/*
1058 * Set the byte-range lock (posix style). Returns:
1059 * 1) 0, if we set the lock and don't need to request to the server;
1060 * 2) 1, if we need to request to the server;
1061 * 3) <0, if the error occurs while setting the lock.
1062 */
4f6bcec9
PS
1063static int
1064cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1065{
496ad9aa 1066 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
50792760
PS
1067 int rc = 1;
1068
1069 if ((flock->fl_flags & FL_POSIX) == 0)
1070 return rc;
4f6bcec9 1071
66189be7 1072try_again:
1b4b55a1 1073 down_write(&cinode->lock_sem);
4f6bcec9 1074 if (!cinode->can_cache_brlcks) {
1b4b55a1 1075 up_write(&cinode->lock_sem);
50792760 1076 return rc;
4f6bcec9 1077 }
66189be7
PS
1078
1079 rc = posix_lock_file(file, flock, NULL);
1b4b55a1 1080 up_write(&cinode->lock_sem);
66189be7
PS
1081 if (rc == FILE_LOCK_DEFERRED) {
1082 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1083 if (!rc)
1084 goto try_again;
1a9e64a7 1085 posix_unblock_lock(flock);
66189be7 1086 }
9ebb389d 1087 return rc;
4f6bcec9
PS
1088}
1089
d39a4f71 1090int
4f6bcec9 1091cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 1092{
6d5786a3
PS
1093 unsigned int xid;
1094 int rc = 0, stored_rc;
85160e03
PS
1095 struct cifsLockInfo *li, *tmp;
1096 struct cifs_tcon *tcon;
0013fb4c 1097 unsigned int num, max_num, max_buf;
32b9aaf1
PS
1098 LOCKING_ANDX_RANGE *buf, *cur;
1099 int types[] = {LOCKING_ANDX_LARGE_FILES,
1100 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1101 int i;
85160e03 1102
6d5786a3 1103 xid = get_xid();
85160e03
PS
1104 tcon = tlink_tcon(cfile->tlink);
1105
0013fb4c
PS
1106 /*
1107 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1108 * and check it for zero before using.
1109 */
1110 max_buf = tcon->ses->server->maxBuf;
1111 if (!max_buf) {
6d5786a3 1112 free_xid(xid);
0013fb4c
PS
1113 return -EINVAL;
1114 }
1115
1116 max_num = (max_buf - sizeof(struct smb_hdr)) /
1117 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 1118 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
32b9aaf1 1119 if (!buf) {
6d5786a3 1120 free_xid(xid);
e2f2886a 1121 return -ENOMEM;
32b9aaf1
PS
1122 }
1123
1124 for (i = 0; i < 2; i++) {
1125 cur = buf;
1126 num = 0;
f45d3416 1127 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
1128 if (li->type != types[i])
1129 continue;
1130 cur->Pid = cpu_to_le16(li->pid);
1131 cur->LengthLow = cpu_to_le32((u32)li->length);
1132 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1133 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1134 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1135 if (++num == max_num) {
4b4de76e
PS
1136 stored_rc = cifs_lockv(xid, tcon,
1137 cfile->fid.netfid,
04a6aa8a
PS
1138 (__u8)li->type, 0, num,
1139 buf);
32b9aaf1
PS
1140 if (stored_rc)
1141 rc = stored_rc;
1142 cur = buf;
1143 num = 0;
1144 } else
1145 cur++;
1146 }
1147
1148 if (num) {
4b4de76e 1149 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 1150 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
1151 if (stored_rc)
1152 rc = stored_rc;
1153 }
85160e03
PS
1154 }
1155
32b9aaf1 1156 kfree(buf);
6d5786a3 1157 free_xid(xid);
85160e03
PS
1158 return rc;
1159}
1160
3d22462a
JL
1161static __u32
1162hash_lockowner(fl_owner_t owner)
1163{
1164 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1165}
1166
d5751469
PS
1167struct lock_to_push {
1168 struct list_head llist;
1169 __u64 offset;
1170 __u64 length;
1171 __u32 pid;
1172 __u16 netfid;
1173 __u8 type;
1174};
1175
4f6bcec9 1176static int
b8db928b 1177cifs_push_posix_locks(struct cifsFileInfo *cfile)
4f6bcec9 1178{
2b0143b5 1179 struct inode *inode = d_inode(cfile->dentry);
4f6bcec9 1180 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
bd61e0a9
JL
1181 struct file_lock *flock;
1182 struct file_lock_context *flctx = inode->i_flctx;
e084c1bd 1183 unsigned int count = 0, i;
4f6bcec9 1184 int rc = 0, xid, type;
d5751469
PS
1185 struct list_head locks_to_send, *el;
1186 struct lock_to_push *lck, *tmp;
4f6bcec9 1187 __u64 length;
4f6bcec9 1188
6d5786a3 1189 xid = get_xid();
4f6bcec9 1190
bd61e0a9
JL
1191 if (!flctx)
1192 goto out;
d5751469 1193
e084c1bd
JL
1194 spin_lock(&flctx->flc_lock);
1195 list_for_each(el, &flctx->flc_posix) {
1196 count++;
1197 }
1198 spin_unlock(&flctx->flc_lock);
1199
4f6bcec9
PS
1200 INIT_LIST_HEAD(&locks_to_send);
1201
d5751469 1202 /*
e084c1bd
JL
1203 * Allocating count locks is enough because no FL_POSIX locks can be
1204 * added to the list while we are holding cinode->lock_sem that
ce85852b 1205 * protects locking operations of this inode.
d5751469 1206 */
e084c1bd 1207 for (i = 0; i < count; i++) {
d5751469
PS
1208 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1209 if (!lck) {
1210 rc = -ENOMEM;
1211 goto err_out;
1212 }
1213 list_add_tail(&lck->llist, &locks_to_send);
1214 }
1215
d5751469 1216 el = locks_to_send.next;
6109c850 1217 spin_lock(&flctx->flc_lock);
bd61e0a9 1218 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
d5751469 1219 if (el == &locks_to_send) {
ce85852b
PS
1220 /*
1221 * The list ended. We don't have enough allocated
1222 * structures - something is really wrong.
1223 */
f96637be 1224 cifs_dbg(VFS, "Can't push all brlocks!\n");
d5751469
PS
1225 break;
1226 }
4f6bcec9
PS
1227 length = 1 + flock->fl_end - flock->fl_start;
1228 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1229 type = CIFS_RDLCK;
1230 else
1231 type = CIFS_WRLCK;
d5751469 1232 lck = list_entry(el, struct lock_to_push, llist);
3d22462a 1233 lck->pid = hash_lockowner(flock->fl_owner);
4b4de76e 1234 lck->netfid = cfile->fid.netfid;
d5751469
PS
1235 lck->length = length;
1236 lck->type = type;
1237 lck->offset = flock->fl_start;
4f6bcec9 1238 }
6109c850 1239 spin_unlock(&flctx->flc_lock);
4f6bcec9
PS
1240
1241 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1242 int stored_rc;
1243
4f6bcec9 1244 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1245 lck->offset, lck->length, NULL,
4f6bcec9
PS
1246 lck->type, 0);
1247 if (stored_rc)
1248 rc = stored_rc;
1249 list_del(&lck->llist);
1250 kfree(lck);
1251 }
1252
d5751469 1253out:
6d5786a3 1254 free_xid(xid);
4f6bcec9 1255 return rc;
d5751469
PS
1256err_out:
1257 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1258 list_del(&lck->llist);
1259 kfree(lck);
1260 }
1261 goto out;
4f6bcec9
PS
1262}
1263
9ec3c882 1264static int
b8db928b 1265cifs_push_locks(struct cifsFileInfo *cfile)
9ec3c882 1266{
b8db928b 1267 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2b0143b5 1268 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
b8db928b 1269 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
9ec3c882
PS
1270 int rc = 0;
1271
1272 /* we are going to update can_cache_brlcks here - need a write access */
1273 down_write(&cinode->lock_sem);
1274 if (!cinode->can_cache_brlcks) {
1275 up_write(&cinode->lock_sem);
1276 return rc;
1277 }
4f6bcec9 1278
29e20f9c 1279 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1280 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1281 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
b8db928b
PS
1282 rc = cifs_push_posix_locks(cfile);
1283 else
1284 rc = tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9 1285
b8db928b
PS
1286 cinode->can_cache_brlcks = false;
1287 up_write(&cinode->lock_sem);
1288 return rc;
4f6bcec9
PS
1289}
1290
03776f45 1291static void
04a6aa8a 1292cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1293 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1294{
03776f45 1295 if (flock->fl_flags & FL_POSIX)
f96637be 1296 cifs_dbg(FYI, "Posix\n");
03776f45 1297 if (flock->fl_flags & FL_FLOCK)
f96637be 1298 cifs_dbg(FYI, "Flock\n");
03776f45 1299 if (flock->fl_flags & FL_SLEEP) {
f96637be 1300 cifs_dbg(FYI, "Blocking lock\n");
03776f45 1301 *wait_flag = true;
1da177e4 1302 }
03776f45 1303 if (flock->fl_flags & FL_ACCESS)
f96637be 1304 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
03776f45 1305 if (flock->fl_flags & FL_LEASE)
f96637be 1306 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
03776f45 1307 if (flock->fl_flags &
3d6d854a
JL
1308 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1309 FL_ACCESS | FL_LEASE | FL_CLOSE)))
f96637be 1310 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1da177e4 1311
106dc538 1312 *type = server->vals->large_lock_type;
03776f45 1313 if (flock->fl_type == F_WRLCK) {
f96637be 1314 cifs_dbg(FYI, "F_WRLCK\n");
106dc538 1315 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1316 *lock = 1;
1317 } else if (flock->fl_type == F_UNLCK) {
f96637be 1318 cifs_dbg(FYI, "F_UNLCK\n");
106dc538 1319 *type |= server->vals->unlock_lock_type;
03776f45
PS
1320 *unlock = 1;
1321 /* Check if unlock includes more than one lock range */
1322 } else if (flock->fl_type == F_RDLCK) {
f96637be 1323 cifs_dbg(FYI, "F_RDLCK\n");
106dc538 1324 *type |= server->vals->shared_lock_type;
03776f45
PS
1325 *lock = 1;
1326 } else if (flock->fl_type == F_EXLCK) {
f96637be 1327 cifs_dbg(FYI, "F_EXLCK\n");
106dc538 1328 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1329 *lock = 1;
1330 } else if (flock->fl_type == F_SHLCK) {
f96637be 1331 cifs_dbg(FYI, "F_SHLCK\n");
106dc538 1332 *type |= server->vals->shared_lock_type;
03776f45 1333 *lock = 1;
1da177e4 1334 } else
f96637be 1335 cifs_dbg(FYI, "Unknown type of lock\n");
03776f45 1336}
1da177e4 1337
03776f45 1338static int
04a6aa8a 1339cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1340 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1341{
1342 int rc = 0;
1343 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1344 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1345 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1346 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1347 __u16 netfid = cfile->fid.netfid;
f05337c6 1348
03776f45
PS
1349 if (posix_lck) {
1350 int posix_lock_type;
4f6bcec9
PS
1351
1352 rc = cifs_posix_lock_test(file, flock);
1353 if (!rc)
1354 return rc;
1355
106dc538 1356 if (type & server->vals->shared_lock_type)
03776f45
PS
1357 posix_lock_type = CIFS_RDLCK;
1358 else
1359 posix_lock_type = CIFS_WRLCK;
3d22462a
JL
1360 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1361 hash_lockowner(flock->fl_owner),
c5fd363d 1362 flock->fl_start, length, flock,
4f6bcec9 1363 posix_lock_type, wait_flag);
03776f45
PS
1364 return rc;
1365 }
1da177e4 1366
fbd35aca 1367 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1368 if (!rc)
1369 return rc;
1370
03776f45 1371 /* BB we could chain these into one lock request BB */
d39a4f71
PS
1372 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1373 1, 0, false);
03776f45 1374 if (rc == 0) {
d39a4f71
PS
1375 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1376 type, 0, 1, false);
03776f45
PS
1377 flock->fl_type = F_UNLCK;
1378 if (rc != 0)
f96637be
JP
1379 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1380 rc);
a88b4707 1381 return 0;
1da177e4 1382 }
7ee1af76 1383
106dc538 1384 if (type & server->vals->shared_lock_type) {
03776f45 1385 flock->fl_type = F_WRLCK;
a88b4707 1386 return 0;
7ee1af76
JA
1387 }
1388
d39a4f71
PS
1389 type &= ~server->vals->exclusive_lock_type;
1390
1391 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1392 type | server->vals->shared_lock_type,
1393 1, 0, false);
03776f45 1394 if (rc == 0) {
d39a4f71
PS
1395 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1396 type | server->vals->shared_lock_type, 0, 1, false);
03776f45
PS
1397 flock->fl_type = F_RDLCK;
1398 if (rc != 0)
f96637be
JP
1399 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1400 rc);
03776f45
PS
1401 } else
1402 flock->fl_type = F_WRLCK;
1403
a88b4707 1404 return 0;
03776f45
PS
1405}
1406
f7ba7fe6 1407void
9ee305b7
PS
1408cifs_move_llist(struct list_head *source, struct list_head *dest)
1409{
1410 struct list_head *li, *tmp;
1411 list_for_each_safe(li, tmp, source)
1412 list_move(li, dest);
1413}
1414
f7ba7fe6 1415void
9ee305b7
PS
1416cifs_free_llist(struct list_head *llist)
1417{
1418 struct cifsLockInfo *li, *tmp;
1419 list_for_each_entry_safe(li, tmp, llist, llist) {
1420 cifs_del_lock_waiters(li);
1421 list_del(&li->llist);
1422 kfree(li);
1423 }
1424}
1425
d39a4f71 1426int
6d5786a3
PS
1427cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1428 unsigned int xid)
9ee305b7
PS
1429{
1430 int rc = 0, stored_rc;
1431 int types[] = {LOCKING_ANDX_LARGE_FILES,
1432 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1433 unsigned int i;
0013fb4c 1434 unsigned int max_num, num, max_buf;
9ee305b7
PS
1435 LOCKING_ANDX_RANGE *buf, *cur;
1436 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2b0143b5 1437 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
9ee305b7
PS
1438 struct cifsLockInfo *li, *tmp;
1439 __u64 length = 1 + flock->fl_end - flock->fl_start;
1440 struct list_head tmp_llist;
1441
1442 INIT_LIST_HEAD(&tmp_llist);
1443
0013fb4c
PS
1444 /*
1445 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1446 * and check it for zero before using.
1447 */
1448 max_buf = tcon->ses->server->maxBuf;
1449 if (!max_buf)
1450 return -EINVAL;
1451
1452 max_num = (max_buf - sizeof(struct smb_hdr)) /
1453 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 1454 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
9ee305b7
PS
1455 if (!buf)
1456 return -ENOMEM;
1457
1b4b55a1 1458 down_write(&cinode->lock_sem);
9ee305b7
PS
1459 for (i = 0; i < 2; i++) {
1460 cur = buf;
1461 num = 0;
f45d3416 1462 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1463 if (flock->fl_start > li->offset ||
1464 (flock->fl_start + length) <
1465 (li->offset + li->length))
1466 continue;
1467 if (current->tgid != li->pid)
1468 continue;
9ee305b7
PS
1469 if (types[i] != li->type)
1470 continue;
ea319d57 1471 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1472 /*
1473 * We can cache brlock requests - simply remove
fbd35aca 1474 * a lock from the file's list.
9ee305b7
PS
1475 */
1476 list_del(&li->llist);
1477 cifs_del_lock_waiters(li);
1478 kfree(li);
ea319d57 1479 continue;
9ee305b7 1480 }
ea319d57
PS
1481 cur->Pid = cpu_to_le16(li->pid);
1482 cur->LengthLow = cpu_to_le32((u32)li->length);
1483 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1484 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1485 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1486 /*
1487 * We need to save a lock here to let us add it again to
1488 * the file's list if the unlock range request fails on
1489 * the server.
1490 */
1491 list_move(&li->llist, &tmp_llist);
1492 if (++num == max_num) {
4b4de76e
PS
1493 stored_rc = cifs_lockv(xid, tcon,
1494 cfile->fid.netfid,
ea319d57
PS
1495 li->type, num, 0, buf);
1496 if (stored_rc) {
1497 /*
1498 * We failed on the unlock range
1499 * request - add all locks from the tmp
1500 * list to the head of the file's list.
1501 */
1502 cifs_move_llist(&tmp_llist,
f45d3416 1503 &cfile->llist->locks);
ea319d57
PS
1504 rc = stored_rc;
1505 } else
1506 /*
1507 * The unlock range request succeed -
1508 * free the tmp list.
1509 */
1510 cifs_free_llist(&tmp_llist);
1511 cur = buf;
1512 num = 0;
1513 } else
1514 cur++;
9ee305b7
PS
1515 }
1516 if (num) {
4b4de76e 1517 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1518 types[i], num, 0, buf);
1519 if (stored_rc) {
f45d3416
PS
1520 cifs_move_llist(&tmp_llist,
1521 &cfile->llist->locks);
9ee305b7
PS
1522 rc = stored_rc;
1523 } else
1524 cifs_free_llist(&tmp_llist);
1525 }
1526 }
1527
1b4b55a1 1528 up_write(&cinode->lock_sem);
9ee305b7
PS
1529 kfree(buf);
1530 return rc;
1531}
1532
03776f45 1533static int
f45d3416 1534cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1535 bool wait_flag, bool posix_lck, int lock, int unlock,
1536 unsigned int xid)
03776f45
PS
1537{
1538 int rc = 0;
1539 __u64 length = 1 + flock->fl_end - flock->fl_start;
1540 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1541 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1542 struct TCP_Server_Info *server = tcon->ses->server;
2b0143b5 1543 struct inode *inode = d_inode(cfile->dentry);
03776f45
PS
1544
1545 if (posix_lck) {
08547b03 1546 int posix_lock_type;
4f6bcec9
PS
1547
1548 rc = cifs_posix_lock_set(file, flock);
1549 if (!rc || rc < 0)
1550 return rc;
1551
106dc538 1552 if (type & server->vals->shared_lock_type)
08547b03
SF
1553 posix_lock_type = CIFS_RDLCK;
1554 else
1555 posix_lock_type = CIFS_WRLCK;
50c2f753 1556
03776f45 1557 if (unlock == 1)
beb84dc8 1558 posix_lock_type = CIFS_UNLCK;
7ee1af76 1559
f45d3416 1560 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
3d22462a
JL
1561 hash_lockowner(flock->fl_owner),
1562 flock->fl_start, length,
f45d3416 1563 NULL, posix_lock_type, wait_flag);
03776f45
PS
1564 goto out;
1565 }
7ee1af76 1566
03776f45 1567 if (lock) {
161ebf9f
PS
1568 struct cifsLockInfo *lock;
1569
fbd35aca 1570 lock = cifs_lock_init(flock->fl_start, length, type);
161ebf9f
PS
1571 if (!lock)
1572 return -ENOMEM;
1573
fbd35aca 1574 rc = cifs_lock_add_if(cfile, lock, wait_flag);
21cb2d90 1575 if (rc < 0) {
161ebf9f 1576 kfree(lock);
21cb2d90
PS
1577 return rc;
1578 }
1579 if (!rc)
85160e03
PS
1580 goto out;
1581
63b7d3a4
PS
1582 /*
1583 * Windows 7 server can delay breaking lease from read to None
1584 * if we set a byte-range lock on a file - break it explicitly
1585 * before sending the lock to the server to be sure the next
1586 * read won't conflict with non-overlapted locks due to
1587 * pagereading.
1588 */
18cceb6a
PS
1589 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1590 CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 1591 cifs_zap_mapping(inode);
f96637be
JP
1592 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1593 inode);
18cceb6a 1594 CIFS_I(inode)->oplock = 0;
63b7d3a4
PS
1595 }
1596
d39a4f71
PS
1597 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1598 type, 1, 0, wait_flag);
161ebf9f
PS
1599 if (rc) {
1600 kfree(lock);
21cb2d90 1601 return rc;
03776f45 1602 }
161ebf9f 1603
fbd35aca 1604 cifs_lock_add(cfile, lock);
9ee305b7 1605 } else if (unlock)
d39a4f71 1606 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 1607
03776f45 1608out:
00b8c95b 1609 if (flock->fl_flags & FL_POSIX && !rc)
4f656367 1610 rc = locks_lock_file_wait(file, flock);
03776f45
PS
1611 return rc;
1612}
1613
1614int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1615{
1616 int rc, xid;
1617 int lock = 0, unlock = 0;
1618 bool wait_flag = false;
1619 bool posix_lck = false;
1620 struct cifs_sb_info *cifs_sb;
1621 struct cifs_tcon *tcon;
1622 struct cifsInodeInfo *cinode;
1623 struct cifsFileInfo *cfile;
1624 __u16 netfid;
04a6aa8a 1625 __u32 type;
03776f45
PS
1626
1627 rc = -EACCES;
6d5786a3 1628 xid = get_xid();
03776f45 1629
f96637be
JP
1630 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1631 cmd, flock->fl_flags, flock->fl_type,
1632 flock->fl_start, flock->fl_end);
03776f45 1633
03776f45
PS
1634 cfile = (struct cifsFileInfo *)file->private_data;
1635 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1636
1637 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1638 tcon->ses->server);
1639
7119e220 1640 cifs_sb = CIFS_FILE_SB(file);
4b4de76e 1641 netfid = cfile->fid.netfid;
496ad9aa 1642 cinode = CIFS_I(file_inode(file));
03776f45 1643
29e20f9c 1644 if (cap_unix(tcon->ses) &&
03776f45
PS
1645 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1646 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1647 posix_lck = true;
1648 /*
1649 * BB add code here to normalize offset and length to account for
1650 * negative length which we can not accept over the wire.
1651 */
1652 if (IS_GETLK(cmd)) {
4f6bcec9 1653 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1654 free_xid(xid);
03776f45
PS
1655 return rc;
1656 }
1657
1658 if (!lock && !unlock) {
1659 /*
1660 * if no lock or unlock then nothing to do since we do not
1661 * know what it is
1662 */
6d5786a3 1663 free_xid(xid);
03776f45 1664 return -EOPNOTSUPP;
7ee1af76
JA
1665 }
1666
03776f45
PS
1667 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1668 xid);
6d5786a3 1669 free_xid(xid);
1da177e4
LT
1670 return rc;
1671}
1672
597b027f
JL
1673/*
1674 * update the file size (if needed) after a write. Should be called with
1675 * the inode->i_lock held
1676 */
72432ffc 1677void
fbec9ab9
JL
1678cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1679 unsigned int bytes_written)
1680{
1681 loff_t end_of_write = offset + bytes_written;
1682
1683 if (end_of_write > cifsi->server_eof)
1684 cifsi->server_eof = end_of_write;
1685}
1686
ba9ad725
PS
1687static ssize_t
1688cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1689 size_t write_size, loff_t *offset)
1da177e4
LT
1690{
1691 int rc = 0;
1692 unsigned int bytes_written = 0;
1693 unsigned int total_written;
1694 struct cifs_sb_info *cifs_sb;
ba9ad725
PS
1695 struct cifs_tcon *tcon;
1696 struct TCP_Server_Info *server;
6d5786a3 1697 unsigned int xid;
7da4b49a 1698 struct dentry *dentry = open_file->dentry;
2b0143b5 1699 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
fa2989f4 1700 struct cifs_io_parms io_parms;
1da177e4 1701
7da4b49a 1702 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1703
35c265e0
AV
1704 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1705 write_size, *offset, dentry);
1da177e4 1706
ba9ad725
PS
1707 tcon = tlink_tcon(open_file->tlink);
1708 server = tcon->ses->server;
1709
1710 if (!server->ops->sync_write)
1711 return -ENOSYS;
50c2f753 1712
6d5786a3 1713 xid = get_xid();
1da177e4 1714
1da177e4
LT
1715 for (total_written = 0; write_size > total_written;
1716 total_written += bytes_written) {
1717 rc = -EAGAIN;
1718 while (rc == -EAGAIN) {
ca83ce3d
JL
1719 struct kvec iov[2];
1720 unsigned int len;
1721
1da177e4 1722 if (open_file->invalidHandle) {
1da177e4
LT
1723 /* we could deadlock if we called
1724 filemap_fdatawait from here so tell
fb8c4b14 1725 reopen_file not to flush data to
1da177e4 1726 server now */
15886177 1727 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1728 if (rc != 0)
1729 break;
1730 }
ca83ce3d 1731
2b0143b5 1732 len = min(server->ops->wp_retry_size(d_inode(dentry)),
cb7e9eab 1733 (unsigned int)write_size - total_written);
ca83ce3d
JL
1734 /* iov[0] is reserved for smb header */
1735 iov[1].iov_base = (char *)write_data + total_written;
1736 iov[1].iov_len = len;
fa2989f4 1737 io_parms.pid = pid;
ba9ad725
PS
1738 io_parms.tcon = tcon;
1739 io_parms.offset = *offset;
fa2989f4 1740 io_parms.length = len;
db8b631d
SF
1741 rc = server->ops->sync_write(xid, &open_file->fid,
1742 &io_parms, &bytes_written, iov, 1);
1da177e4
LT
1743 }
1744 if (rc || (bytes_written == 0)) {
1745 if (total_written)
1746 break;
1747 else {
6d5786a3 1748 free_xid(xid);
1da177e4
LT
1749 return rc;
1750 }
fbec9ab9 1751 } else {
2b0143b5 1752 spin_lock(&d_inode(dentry)->i_lock);
ba9ad725 1753 cifs_update_eof(cifsi, *offset, bytes_written);
2b0143b5 1754 spin_unlock(&d_inode(dentry)->i_lock);
ba9ad725 1755 *offset += bytes_written;
fbec9ab9 1756 }
1da177e4
LT
1757 }
1758
ba9ad725 1759 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1760
7da4b49a 1761 if (total_written > 0) {
2b0143b5
DH
1762 spin_lock(&d_inode(dentry)->i_lock);
1763 if (*offset > d_inode(dentry)->i_size)
1764 i_size_write(d_inode(dentry), *offset);
1765 spin_unlock(&d_inode(dentry)->i_lock);
1da177e4 1766 }
2b0143b5 1767 mark_inode_dirty_sync(d_inode(dentry));
6d5786a3 1768 free_xid(xid);
1da177e4
LT
1769 return total_written;
1770}
1771
6508d904
JL
1772struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1773 bool fsuid_only)
630f3f0c
SF
1774{
1775 struct cifsFileInfo *open_file = NULL;
6508d904 1776 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
3afca265 1777 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
6508d904
JL
1778
1779 /* only filter by fsuid on multiuser mounts */
1780 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1781 fsuid_only = false;
630f3f0c 1782
3afca265 1783 spin_lock(&tcon->open_file_lock);
630f3f0c
SF
1784 /* we could simply get the first_list_entry since write-only entries
1785 are always at the end of the list but since the first entry might
1786 have a close pending, we go through the whole list */
1787 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
fef59fd7 1788 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6508d904 1789 continue;
2e396b83 1790 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1791 if (!open_file->invalidHandle) {
1792 /* found a good file */
1793 /* lock it so it will not be closed on us */
3afca265
SF
1794 cifsFileInfo_get(open_file);
1795 spin_unlock(&tcon->open_file_lock);
630f3f0c
SF
1796 return open_file;
1797 } /* else might as well continue, and look for
1798 another, or simply have the caller reopen it
1799 again rather than trying to fix this handle */
1800 } else /* write only file */
1801 break; /* write only files are last so must be done */
1802 }
3afca265 1803 spin_unlock(&tcon->open_file_lock);
630f3f0c
SF
1804 return NULL;
1805}
630f3f0c 1806
6508d904
JL
1807struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1808 bool fsuid_only)
6148a742 1809{
2c0c2a08 1810 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 1811 struct cifs_sb_info *cifs_sb;
3afca265 1812 struct cifs_tcon *tcon;
2846d386 1813 bool any_available = false;
dd99cd80 1814 int rc;
2c0c2a08 1815 unsigned int refind = 0;
6148a742 1816
60808233
SF
1817 /* Having a null inode here (because mapping->host was set to zero by
1818 the VFS or MM) should not happen but we had reports of on oops (due to
1819 it being zero) during stress testcases so we need to check for it */
1820
fb8c4b14 1821 if (cifs_inode == NULL) {
f96637be 1822 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
60808233
SF
1823 dump_stack();
1824 return NULL;
1825 }
1826
d3892294 1827 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
3afca265 1828 tcon = cifs_sb_master_tcon(cifs_sb);
d3892294 1829
6508d904
JL
1830 /* only filter by fsuid on multiuser mounts */
1831 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1832 fsuid_only = false;
1833
3afca265 1834 spin_lock(&tcon->open_file_lock);
9b22b0b7 1835refind_writable:
2c0c2a08 1836 if (refind > MAX_REOPEN_ATT) {
3afca265 1837 spin_unlock(&tcon->open_file_lock);
2c0c2a08
SP
1838 return NULL;
1839 }
6148a742 1840 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1841 if (!any_available && open_file->pid != current->tgid)
1842 continue;
fef59fd7 1843 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6148a742 1844 continue;
2e396b83 1845 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
1846 if (!open_file->invalidHandle) {
1847 /* found a good writable file */
3afca265
SF
1848 cifsFileInfo_get(open_file);
1849 spin_unlock(&tcon->open_file_lock);
9b22b0b7 1850 return open_file;
2c0c2a08
SP
1851 } else {
1852 if (!inv_file)
1853 inv_file = open_file;
9b22b0b7 1854 }
6148a742
SF
1855 }
1856 }
2846d386
JL
1857 /* couldn't find useable FH with same pid, try any available */
1858 if (!any_available) {
1859 any_available = true;
1860 goto refind_writable;
1861 }
2c0c2a08
SP
1862
1863 if (inv_file) {
1864 any_available = false;
3afca265 1865 cifsFileInfo_get(inv_file);
2c0c2a08
SP
1866 }
1867
3afca265 1868 spin_unlock(&tcon->open_file_lock);
2c0c2a08
SP
1869
1870 if (inv_file) {
1871 rc = cifs_reopen_file(inv_file, false);
1872 if (!rc)
1873 return inv_file;
1874 else {
3afca265 1875 spin_lock(&tcon->open_file_lock);
2c0c2a08
SP
1876 list_move_tail(&inv_file->flist,
1877 &cifs_inode->openFileList);
3afca265 1878 spin_unlock(&tcon->open_file_lock);
2c0c2a08 1879 cifsFileInfo_put(inv_file);
2c0c2a08 1880 ++refind;
e1e9bda2 1881 inv_file = NULL;
3afca265 1882 spin_lock(&tcon->open_file_lock);
2c0c2a08
SP
1883 goto refind_writable;
1884 }
1885 }
1886
6148a742
SF
1887 return NULL;
1888}
1889
1da177e4
LT
1890static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1891{
1892 struct address_space *mapping = page->mapping;
09cbfeaf 1893 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1da177e4
LT
1894 char *write_data;
1895 int rc = -EFAULT;
1896 int bytes_written = 0;
1da177e4 1897 struct inode *inode;
6148a742 1898 struct cifsFileInfo *open_file;
1da177e4
LT
1899
1900 if (!mapping || !mapping->host)
1901 return -EFAULT;
1902
1903 inode = page->mapping->host;
1da177e4
LT
1904
1905 offset += (loff_t)from;
1906 write_data = kmap(page);
1907 write_data += from;
1908
09cbfeaf 1909 if ((to > PAGE_SIZE) || (from > to)) {
1da177e4
LT
1910 kunmap(page);
1911 return -EIO;
1912 }
1913
1914 /* racing with truncate? */
1915 if (offset > mapping->host->i_size) {
1916 kunmap(page);
1917 return 0; /* don't care */
1918 }
1919
1920 /* check to make sure that we are not extending the file */
1921 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1922 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1923
6508d904 1924 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1925 if (open_file) {
fa2989f4
PS
1926 bytes_written = cifs_write(open_file, open_file->pid,
1927 write_data, to - from, &offset);
6ab409b5 1928 cifsFileInfo_put(open_file);
1da177e4 1929 /* Does mm or vfs already set times? */
c2050a45 1930 inode->i_atime = inode->i_mtime = current_time(inode);
bb5a9a04 1931 if ((bytes_written > 0) && (offset))
6148a742 1932 rc = 0;
bb5a9a04
SF
1933 else if (bytes_written < 0)
1934 rc = bytes_written;
6148a742 1935 } else {
f96637be 1936 cifs_dbg(FYI, "No writeable filehandles for inode\n");
1da177e4
LT
1937 rc = -EIO;
1938 }
1939
1940 kunmap(page);
1941 return rc;
1942}
1943
90ac1387
PS
1944static struct cifs_writedata *
1945wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1946 pgoff_t end, pgoff_t *index,
1947 unsigned int *found_pages)
1948{
1949 unsigned int nr_pages;
1950 struct page **pages;
1951 struct cifs_writedata *wdata;
1952
1953 wdata = cifs_writedata_alloc((unsigned int)tofind,
1954 cifs_writev_complete);
1955 if (!wdata)
1956 return NULL;
1957
1958 /*
1959 * find_get_pages_tag seems to return a max of 256 on each
1960 * iteration, so we must call it several times in order to
1961 * fill the array or the wsize is effectively limited to
ea1754a0 1962 * 256 * PAGE_SIZE.
90ac1387
PS
1963 */
1964 *found_pages = 0;
1965 pages = wdata->pages;
1966 do {
1967 nr_pages = find_get_pages_tag(mapping, index,
1968 PAGECACHE_TAG_DIRTY, tofind,
1969 pages);
1970 *found_pages += nr_pages;
1971 tofind -= nr_pages;
1972 pages += nr_pages;
1973 } while (nr_pages && tofind && *index <= end);
1974
1975 return wdata;
1976}
1977
7e48ff82
PS
1978static unsigned int
1979wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1980 struct address_space *mapping,
1981 struct writeback_control *wbc,
1982 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1983{
1984 unsigned int nr_pages = 0, i;
1985 struct page *page;
1986
1987 for (i = 0; i < found_pages; i++) {
1988 page = wdata->pages[i];
1989 /*
1990 * At this point we hold neither mapping->tree_lock nor
1991 * lock on the page itself: the page may be truncated or
1992 * invalidated (changing page->mapping to NULL), or even
1993 * swizzled back from swapper_space to tmpfs file
1994 * mapping
1995 */
1996
1997 if (nr_pages == 0)
1998 lock_page(page);
1999 else if (!trylock_page(page))
2000 break;
2001
2002 if (unlikely(page->mapping != mapping)) {
2003 unlock_page(page);
2004 break;
2005 }
2006
2007 if (!wbc->range_cyclic && page->index > end) {
2008 *done = true;
2009 unlock_page(page);
2010 break;
2011 }
2012
2013 if (*next && (page->index != *next)) {
2014 /* Not next consecutive page */
2015 unlock_page(page);
2016 break;
2017 }
2018
2019 if (wbc->sync_mode != WB_SYNC_NONE)
2020 wait_on_page_writeback(page);
2021
2022 if (PageWriteback(page) ||
2023 !clear_page_dirty_for_io(page)) {
2024 unlock_page(page);
2025 break;
2026 }
2027
2028 /*
2029 * This actually clears the dirty bit in the radix tree.
2030 * See cifs_writepage() for more commentary.
2031 */
2032 set_page_writeback(page);
2033 if (page_offset(page) >= i_size_read(mapping->host)) {
2034 *done = true;
2035 unlock_page(page);
2036 end_page_writeback(page);
2037 break;
2038 }
2039
2040 wdata->pages[i] = page;
2041 *next = page->index + 1;
2042 ++nr_pages;
2043 }
2044
2045 /* reset index to refind any pages skipped */
2046 if (nr_pages == 0)
2047 *index = wdata->pages[0]->index + 1;
2048
2049 /* put any pages we aren't going to use */
2050 for (i = nr_pages; i < found_pages; i++) {
09cbfeaf 2051 put_page(wdata->pages[i]);
7e48ff82
PS
2052 wdata->pages[i] = NULL;
2053 }
2054
2055 return nr_pages;
2056}
2057
619aa48e
PS
2058static int
2059wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2060 struct address_space *mapping, struct writeback_control *wbc)
2061{
2062 int rc = 0;
2063 struct TCP_Server_Info *server;
2064 unsigned int i;
2065
2066 wdata->sync_mode = wbc->sync_mode;
2067 wdata->nr_pages = nr_pages;
2068 wdata->offset = page_offset(wdata->pages[0]);
09cbfeaf 2069 wdata->pagesz = PAGE_SIZE;
619aa48e
PS
2070 wdata->tailsz = min(i_size_read(mapping->host) -
2071 page_offset(wdata->pages[nr_pages - 1]),
09cbfeaf
KS
2072 (loff_t)PAGE_SIZE);
2073 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
619aa48e 2074
66231a47
PS
2075 if (wdata->cfile != NULL)
2076 cifsFileInfo_put(wdata->cfile);
2077 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2078 if (!wdata->cfile) {
2079 cifs_dbg(VFS, "No writable handles for inode\n");
2080 rc = -EBADF;
2081 } else {
619aa48e
PS
2082 wdata->pid = wdata->cfile->pid;
2083 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2084 rc = server->ops->async_writev(wdata, cifs_writedata_release);
66231a47 2085 }
619aa48e
PS
2086
2087 for (i = 0; i < nr_pages; ++i)
2088 unlock_page(wdata->pages[i]);
2089
2090 return rc;
2091}
2092
1da177e4 2093static int cifs_writepages(struct address_space *mapping,
37c0eb46 2094 struct writeback_control *wbc)
1da177e4 2095{
c3d17b63 2096 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
cb7e9eab 2097 struct TCP_Server_Info *server;
c3d17b63
JL
2098 bool done = false, scanned = false, range_whole = false;
2099 pgoff_t end, index;
2100 struct cifs_writedata *wdata;
37c0eb46 2101 int rc = 0;
50c2f753 2102
37c0eb46 2103 /*
c3d17b63 2104 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
2105 * one page at a time via cifs_writepage
2106 */
09cbfeaf 2107 if (cifs_sb->wsize < PAGE_SIZE)
37c0eb46
SF
2108 return generic_writepages(mapping, wbc);
2109
111ebb6e 2110 if (wbc->range_cyclic) {
37c0eb46 2111 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
2112 end = -1;
2113 } else {
09cbfeaf
KS
2114 index = wbc->range_start >> PAGE_SHIFT;
2115 end = wbc->range_end >> PAGE_SHIFT;
111ebb6e 2116 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
2117 range_whole = true;
2118 scanned = true;
37c0eb46 2119 }
cb7e9eab 2120 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
37c0eb46 2121retry:
c3d17b63 2122 while (!done && index <= end) {
cb7e9eab 2123 unsigned int i, nr_pages, found_pages, wsize, credits;
66231a47 2124 pgoff_t next = 0, tofind, saved_index = index;
c3d17b63 2125
cb7e9eab
PS
2126 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2127 &wsize, &credits);
2128 if (rc)
2129 break;
c3d17b63 2130
09cbfeaf 2131 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
c3d17b63 2132
90ac1387
PS
2133 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2134 &found_pages);
c3d17b63
JL
2135 if (!wdata) {
2136 rc = -ENOMEM;
cb7e9eab 2137 add_credits_and_wake_if(server, credits, 0);
c3d17b63
JL
2138 break;
2139 }
2140
c3d17b63
JL
2141 if (found_pages == 0) {
2142 kref_put(&wdata->refcount, cifs_writedata_release);
cb7e9eab 2143 add_credits_and_wake_if(server, credits, 0);
c3d17b63
JL
2144 break;
2145 }
2146
7e48ff82
PS
2147 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2148 end, &index, &next, &done);
37c0eb46 2149
c3d17b63
JL
2150 /* nothing to write? */
2151 if (nr_pages == 0) {
2152 kref_put(&wdata->refcount, cifs_writedata_release);
cb7e9eab 2153 add_credits_and_wake_if(server, credits, 0);
c3d17b63 2154 continue;
37c0eb46 2155 }
fbec9ab9 2156
cb7e9eab 2157 wdata->credits = credits;
941b853d 2158
619aa48e 2159 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
f3983c21 2160
c3d17b63
JL
2161 /* send failure -- clean up the mess */
2162 if (rc != 0) {
cb7e9eab 2163 add_credits_and_wake_if(server, wdata->credits, 0);
c3d17b63 2164 for (i = 0; i < nr_pages; ++i) {
941b853d 2165 if (rc == -EAGAIN)
c3d17b63
JL
2166 redirty_page_for_writepage(wbc,
2167 wdata->pages[i]);
2168 else
2169 SetPageError(wdata->pages[i]);
2170 end_page_writeback(wdata->pages[i]);
09cbfeaf 2171 put_page(wdata->pages[i]);
37c0eb46 2172 }
941b853d
JL
2173 if (rc != -EAGAIN)
2174 mapping_set_error(mapping, rc);
c3d17b63
JL
2175 }
2176 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 2177
66231a47
PS
2178 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2179 index = saved_index;
2180 continue;
2181 }
2182
c3d17b63
JL
2183 wbc->nr_to_write -= nr_pages;
2184 if (wbc->nr_to_write <= 0)
2185 done = true;
b066a48c 2186
c3d17b63 2187 index = next;
37c0eb46 2188 }
c3d17b63 2189
37c0eb46
SF
2190 if (!scanned && !done) {
2191 /*
2192 * We hit the last page and there is more work to be done: wrap
2193 * back to the start of the file
2194 */
c3d17b63 2195 scanned = true;
37c0eb46
SF
2196 index = 0;
2197 goto retry;
2198 }
c3d17b63 2199
111ebb6e 2200 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
2201 mapping->writeback_index = index;
2202
1da177e4
LT
2203 return rc;
2204}
1da177e4 2205
9ad1506b
PS
2206static int
2207cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 2208{
9ad1506b 2209 int rc;
6d5786a3 2210 unsigned int xid;
1da177e4 2211
6d5786a3 2212 xid = get_xid();
1da177e4 2213/* BB add check for wbc flags */
09cbfeaf 2214 get_page(page);
ad7a2926 2215 if (!PageUptodate(page))
f96637be 2216 cifs_dbg(FYI, "ppw - page not up to date\n");
cb876f45
LT
2217
2218 /*
2219 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2220 *
2221 * A writepage() implementation always needs to do either this,
2222 * or re-dirty the page with "redirty_page_for_writepage()" in
2223 * the case of a failure.
2224 *
2225 * Just unlocking the page will cause the radix tree tag-bits
2226 * to fail to update with the state of the page correctly.
2227 */
fb8c4b14 2228 set_page_writeback(page);
9ad1506b 2229retry_write:
09cbfeaf 2230 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
9ad1506b
PS
2231 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2232 goto retry_write;
2233 else if (rc == -EAGAIN)
2234 redirty_page_for_writepage(wbc, page);
2235 else if (rc != 0)
2236 SetPageError(page);
2237 else
2238 SetPageUptodate(page);
cb876f45 2239 end_page_writeback(page);
09cbfeaf 2240 put_page(page);
6d5786a3 2241 free_xid(xid);
1da177e4
LT
2242 return rc;
2243}
2244
9ad1506b
PS
2245static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2246{
2247 int rc = cifs_writepage_locked(page, wbc);
2248 unlock_page(page);
2249 return rc;
2250}
2251
d9414774
NP
2252static int cifs_write_end(struct file *file, struct address_space *mapping,
2253 loff_t pos, unsigned len, unsigned copied,
2254 struct page *page, void *fsdata)
1da177e4 2255{
d9414774
NP
2256 int rc;
2257 struct inode *inode = mapping->host;
d4ffff1f
PS
2258 struct cifsFileInfo *cfile = file->private_data;
2259 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2260 __u32 pid;
2261
2262 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2263 pid = cfile->pid;
2264 else
2265 pid = current->tgid;
1da177e4 2266
f96637be 2267 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
b6b38f70 2268 page, pos, copied);
d9414774 2269
a98ee8c1
JL
2270 if (PageChecked(page)) {
2271 if (copied == len)
2272 SetPageUptodate(page);
2273 ClearPageChecked(page);
09cbfeaf 2274 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
d9414774 2275 SetPageUptodate(page);
ad7a2926 2276
1da177e4 2277 if (!PageUptodate(page)) {
d9414774 2278 char *page_data;
09cbfeaf 2279 unsigned offset = pos & (PAGE_SIZE - 1);
6d5786a3 2280 unsigned int xid;
d9414774 2281
6d5786a3 2282 xid = get_xid();
1da177e4
LT
2283 /* this is probably better than directly calling
2284 partialpage_write since in this function the file handle is
2285 known which we might as well leverage */
2286 /* BB check if anything else missing out of ppw
2287 such as updating last write time */
2288 page_data = kmap(page);
d4ffff1f 2289 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2290 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2291 kunmap(page);
d9414774 2292
6d5786a3 2293 free_xid(xid);
fb8c4b14 2294 } else {
d9414774
NP
2295 rc = copied;
2296 pos += copied;
ca8aa29c 2297 set_page_dirty(page);
1da177e4
LT
2298 }
2299
d9414774
NP
2300 if (rc > 0) {
2301 spin_lock(&inode->i_lock);
2302 if (pos > inode->i_size)
2303 i_size_write(inode, pos);
2304 spin_unlock(&inode->i_lock);
2305 }
2306
2307 unlock_page(page);
09cbfeaf 2308 put_page(page);
d9414774 2309
1da177e4
LT
2310 return rc;
2311}
2312
02c24a82
JB
2313int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2314 int datasync)
1da177e4 2315{
6d5786a3 2316 unsigned int xid;
1da177e4 2317 int rc = 0;
96daf2b0 2318 struct cifs_tcon *tcon;
1d8c4c00 2319 struct TCP_Server_Info *server;
c21dfb69 2320 struct cifsFileInfo *smbfile = file->private_data;
496ad9aa 2321 struct inode *inode = file_inode(file);
8be7e6ba 2322 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2323
02c24a82
JB
2324 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2325 if (rc)
2326 return rc;
5955102c 2327 inode_lock(inode);
02c24a82 2328
6d5786a3 2329 xid = get_xid();
1da177e4 2330
35c265e0
AV
2331 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2332 file, datasync);
50c2f753 2333
18cceb6a 2334 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 2335 rc = cifs_zap_mapping(inode);
6feb9891 2336 if (rc) {
f96637be 2337 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
6feb9891
PS
2338 rc = 0; /* don't care about it in fsync */
2339 }
2340 }
eb4b756b 2341
8be7e6ba 2342 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2343 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2344 server = tcon->ses->server;
2345 if (server->ops->flush)
2346 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2347 else
2348 rc = -ENOSYS;
2349 }
8be7e6ba 2350
6d5786a3 2351 free_xid(xid);
5955102c 2352 inode_unlock(inode);
8be7e6ba
PS
2353 return rc;
2354}
2355
02c24a82 2356int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2357{
6d5786a3 2358 unsigned int xid;
8be7e6ba 2359 int rc = 0;
96daf2b0 2360 struct cifs_tcon *tcon;
1d8c4c00 2361 struct TCP_Server_Info *server;
8be7e6ba 2362 struct cifsFileInfo *smbfile = file->private_data;
7119e220 2363 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
02c24a82
JB
2364 struct inode *inode = file->f_mapping->host;
2365
2366 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2367 if (rc)
2368 return rc;
5955102c 2369 inode_lock(inode);
8be7e6ba 2370
6d5786a3 2371 xid = get_xid();
8be7e6ba 2372
35c265e0
AV
2373 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2374 file, datasync);
8be7e6ba
PS
2375
2376 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2377 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2378 server = tcon->ses->server;
2379 if (server->ops->flush)
2380 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2381 else
2382 rc = -ENOSYS;
2383 }
b298f223 2384
6d5786a3 2385 free_xid(xid);
5955102c 2386 inode_unlock(inode);
1da177e4
LT
2387 return rc;
2388}
2389
1da177e4
LT
2390/*
2391 * As file closes, flush all cached write data for this inode checking
2392 * for write behind errors.
2393 */
75e1fcc0 2394int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2395{
496ad9aa 2396 struct inode *inode = file_inode(file);
1da177e4
LT
2397 int rc = 0;
2398
eb4b756b 2399 if (file->f_mode & FMODE_WRITE)
d3f1322a 2400 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2401
f96637be 2402 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
1da177e4
LT
2403
2404 return rc;
2405}
2406
72432ffc
PS
2407static int
2408cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2409{
2410 int rc = 0;
2411 unsigned long i;
2412
2413 for (i = 0; i < num_pages; i++) {
e94f7ba1 2414 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2415 if (!pages[i]) {
2416 /*
2417 * save number of pages we have already allocated and
2418 * return with ENOMEM error
2419 */
2420 num_pages = i;
2421 rc = -ENOMEM;
e94f7ba1 2422 break;
72432ffc
PS
2423 }
2424 }
2425
e94f7ba1
JL
2426 if (rc) {
2427 for (i = 0; i < num_pages; i++)
2428 put_page(pages[i]);
2429 }
72432ffc
PS
2430 return rc;
2431}
2432
2433static inline
2434size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2435{
2436 size_t num_pages;
2437 size_t clen;
2438
2439 clen = min_t(const size_t, len, wsize);
a7103b99 2440 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2441
2442 if (cur_len)
2443 *cur_len = clen;
2444
2445 return num_pages;
2446}
2447
da82f7e7 2448static void
4a5c80d7 2449cifs_uncached_writedata_release(struct kref *refcount)
da82f7e7
JL
2450{
2451 int i;
4a5c80d7
SF
2452 struct cifs_writedata *wdata = container_of(refcount,
2453 struct cifs_writedata, refcount);
2454
2455 for (i = 0; i < wdata->nr_pages; i++)
2456 put_page(wdata->pages[i]);
2457 cifs_writedata_release(refcount);
2458}
2459
2460static void
2461cifs_uncached_writev_complete(struct work_struct *work)
2462{
da82f7e7
JL
2463 struct cifs_writedata *wdata = container_of(work,
2464 struct cifs_writedata, work);
2b0143b5 2465 struct inode *inode = d_inode(wdata->cfile->dentry);
da82f7e7
JL
2466 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2467
2468 spin_lock(&inode->i_lock);
2469 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2470 if (cifsi->server_eof > inode->i_size)
2471 i_size_write(inode, cifsi->server_eof);
2472 spin_unlock(&inode->i_lock);
2473
2474 complete(&wdata->done);
2475
4a5c80d7 2476 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
da82f7e7
JL
2477}
2478
da82f7e7 2479static int
66386c08
PS
2480wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2481 size_t *len, unsigned long *num_pages)
da82f7e7 2482{
66386c08
PS
2483 size_t save_len, copied, bytes, cur_len = *len;
2484 unsigned long i, nr_pages = *num_pages;
c9de5c80 2485
66386c08
PS
2486 save_len = cur_len;
2487 for (i = 0; i < nr_pages; i++) {
2488 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2489 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2490 cur_len -= copied;
2491 /*
2492 * If we didn't copy as much as we expected, then that
2493 * may mean we trod into an unmapped area. Stop copying
2494 * at that point. On the next pass through the big
2495 * loop, we'll likely end up getting a zero-length
2496 * write and bailing out of it.
2497 */
2498 if (copied < bytes)
2499 break;
2500 }
2501 cur_len = save_len - cur_len;
2502 *len = cur_len;
da82f7e7 2503
66386c08
PS
2504 /*
2505 * If we have no data to send, then that probably means that
2506 * the copy above failed altogether. That's most likely because
2507 * the address in the iovec was bogus. Return -EFAULT and let
2508 * the caller free anything we allocated and bail out.
2509 */
2510 if (!cur_len)
2511 return -EFAULT;
da82f7e7 2512
66386c08
PS
2513 /*
2514 * i + 1 now represents the number of pages we actually used in
2515 * the copy phase above.
2516 */
2517 *num_pages = i + 1;
2518 return 0;
da82f7e7
JL
2519}
2520
43de94ea
PS
2521static int
2522cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2523 struct cifsFileInfo *open_file,
2524 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
72432ffc 2525{
43de94ea
PS
2526 int rc = 0;
2527 size_t cur_len;
66386c08 2528 unsigned long nr_pages, num_pages, i;
43de94ea 2529 struct cifs_writedata *wdata;
fc56b983 2530 struct iov_iter saved_from = *from;
6ec0b01b 2531 loff_t saved_offset = offset;
da82f7e7 2532 pid_t pid;
6ec0b01b 2533 struct TCP_Server_Info *server;
d4ffff1f
PS
2534
2535 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2536 pid = open_file->pid;
2537 else
2538 pid = current->tgid;
2539
6ec0b01b 2540 server = tlink_tcon(open_file->tlink)->ses->server;
6ec0b01b 2541
72432ffc 2542 do {
cb7e9eab
PS
2543 unsigned int wsize, credits;
2544
2545 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2546 &wsize, &credits);
2547 if (rc)
2548 break;
da82f7e7 2549
cb7e9eab 2550 nr_pages = get_numpages(wsize, len, &cur_len);
da82f7e7
JL
2551 wdata = cifs_writedata_alloc(nr_pages,
2552 cifs_uncached_writev_complete);
2553 if (!wdata) {
2554 rc = -ENOMEM;
cb7e9eab 2555 add_credits_and_wake_if(server, credits, 0);
da82f7e7
JL
2556 break;
2557 }
2558
2559 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2560 if (rc) {
2561 kfree(wdata);
cb7e9eab 2562 add_credits_and_wake_if(server, credits, 0);
da82f7e7
JL
2563 break;
2564 }
2565
66386c08
PS
2566 num_pages = nr_pages;
2567 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2568 if (rc) {
5d81de8e
JL
2569 for (i = 0; i < nr_pages; i++)
2570 put_page(wdata->pages[i]);
2571 kfree(wdata);
cb7e9eab 2572 add_credits_and_wake_if(server, credits, 0);
5d81de8e
JL
2573 break;
2574 }
2575
2576 /*
66386c08
PS
2577 * Bring nr_pages down to the number of pages we actually used,
2578 * and free any pages that we didn't use.
5d81de8e 2579 */
66386c08 2580 for ( ; nr_pages > num_pages; nr_pages--)
5d81de8e
JL
2581 put_page(wdata->pages[nr_pages - 1]);
2582
da82f7e7
JL
2583 wdata->sync_mode = WB_SYNC_ALL;
2584 wdata->nr_pages = nr_pages;
2585 wdata->offset = (__u64)offset;
2586 wdata->cfile = cifsFileInfo_get(open_file);
2587 wdata->pid = pid;
2588 wdata->bytes = cur_len;
eddb079d
JL
2589 wdata->pagesz = PAGE_SIZE;
2590 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
cb7e9eab 2591 wdata->credits = credits;
6ec0b01b
PS
2592
2593 if (!wdata->cfile->invalidHandle ||
2594 !cifs_reopen_file(wdata->cfile, false))
2595 rc = server->ops->async_writev(wdata,
2596 cifs_uncached_writedata_release);
da82f7e7 2597 if (rc) {
cb7e9eab 2598 add_credits_and_wake_if(server, wdata->credits, 0);
4a5c80d7
SF
2599 kref_put(&wdata->refcount,
2600 cifs_uncached_writedata_release);
6ec0b01b 2601 if (rc == -EAGAIN) {
fc56b983 2602 *from = saved_from;
6ec0b01b
PS
2603 iov_iter_advance(from, offset - saved_offset);
2604 continue;
2605 }
72432ffc
PS
2606 break;
2607 }
2608
43de94ea 2609 list_add_tail(&wdata->list, wdata_list);
da82f7e7
JL
2610 offset += cur_len;
2611 len -= cur_len;
72432ffc
PS
2612 } while (len > 0);
2613
43de94ea
PS
2614 return rc;
2615}
2616
e9d1593d 2617ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
43de94ea 2618{
e9d1593d 2619 struct file *file = iocb->ki_filp;
43de94ea
PS
2620 ssize_t total_written = 0;
2621 struct cifsFileInfo *open_file;
2622 struct cifs_tcon *tcon;
2623 struct cifs_sb_info *cifs_sb;
2624 struct cifs_writedata *wdata, *tmp;
2625 struct list_head wdata_list;
fc56b983 2626 struct iov_iter saved_from = *from;
43de94ea
PS
2627 int rc;
2628
e9d1593d
AV
2629 /*
2630 * BB - optimize the way when signing is disabled. We can drop this
2631 * extra memory-to-memory copying and use iovec buffers for constructing
2632 * write request.
2633 */
2634
3309dd04
AV
2635 rc = generic_write_checks(iocb, from);
2636 if (rc <= 0)
43de94ea
PS
2637 return rc;
2638
43de94ea 2639 INIT_LIST_HEAD(&wdata_list);
7119e220 2640 cifs_sb = CIFS_FILE_SB(file);
43de94ea
PS
2641 open_file = file->private_data;
2642 tcon = tlink_tcon(open_file->tlink);
2643
2644 if (!tcon->ses->server->ops->async_writev)
2645 return -ENOSYS;
2646
3309dd04
AV
2647 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2648 open_file, cifs_sb, &wdata_list);
43de94ea 2649
da82f7e7
JL
2650 /*
2651 * If at least one write was successfully sent, then discard any rc
2652 * value from the later writes. If the other write succeeds, then
2653 * we'll end up returning whatever was written. If it fails, then
2654 * we'll get a new rc value from that.
2655 */
2656 if (!list_empty(&wdata_list))
2657 rc = 0;
2658
2659 /*
2660 * Wait for and collect replies for any successful sends in order of
2661 * increasing offset. Once an error is hit or we get a fatal signal
2662 * while waiting, then return without waiting for any more replies.
2663 */
2664restart_loop:
2665 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2666 if (!rc) {
2667 /* FIXME: freezable too? */
2668 rc = wait_for_completion_killable(&wdata->done);
2669 if (rc)
2670 rc = -EINTR;
2671 else if (wdata->result)
2672 rc = wdata->result;
2673 else
2674 total_written += wdata->bytes;
2675
2676 /* resend call if it's a retryable error */
2677 if (rc == -EAGAIN) {
6ec0b01b 2678 struct list_head tmp_list;
fc56b983 2679 struct iov_iter tmp_from = saved_from;
6ec0b01b
PS
2680
2681 INIT_LIST_HEAD(&tmp_list);
2682 list_del_init(&wdata->list);
2683
6ec0b01b 2684 iov_iter_advance(&tmp_from,
e9d1593d 2685 wdata->offset - iocb->ki_pos);
6ec0b01b
PS
2686
2687 rc = cifs_write_from_iter(wdata->offset,
2688 wdata->bytes, &tmp_from,
2689 open_file, cifs_sb, &tmp_list);
2690
2691 list_splice(&tmp_list, &wdata_list);
2692
2693 kref_put(&wdata->refcount,
2694 cifs_uncached_writedata_release);
da82f7e7
JL
2695 goto restart_loop;
2696 }
2697 }
2698 list_del_init(&wdata->list);
4a5c80d7 2699 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
72432ffc
PS
2700 }
2701
e9d1593d
AV
2702 if (unlikely(!total_written))
2703 return rc;
72432ffc 2704
e9d1593d
AV
2705 iocb->ki_pos += total_written;
2706 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
da82f7e7 2707 cifs_stats_bytes_written(tcon, total_written);
e9d1593d 2708 return total_written;
72432ffc
PS
2709}
2710
579f9053 2711static ssize_t
3dae8750 2712cifs_writev(struct kiocb *iocb, struct iov_iter *from)
72432ffc 2713{
579f9053
PS
2714 struct file *file = iocb->ki_filp;
2715 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2716 struct inode *inode = file->f_mapping->host;
2717 struct cifsInodeInfo *cinode = CIFS_I(inode);
2718 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
5f380c7f 2719 ssize_t rc;
72432ffc 2720
579f9053
PS
2721 /*
2722 * We need to hold the sem to be sure nobody modifies lock list
2723 * with a brlock that prevents writing.
2724 */
2725 down_read(&cinode->lock_sem);
5955102c 2726 inode_lock(inode);
5f380c7f 2727
3309dd04
AV
2728 rc = generic_write_checks(iocb, from);
2729 if (rc <= 0)
5f380c7f
AV
2730 goto out;
2731
5f380c7f 2732 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
579f9053 2733 server->vals->exclusive_lock_type, NULL,
5f380c7f 2734 CIFS_WRITE_OP))
3dae8750 2735 rc = __generic_file_write_iter(iocb, from);
5f380c7f
AV
2736 else
2737 rc = -EACCES;
2738out:
5955102c 2739 inode_unlock(inode);
19dfc1f5 2740
e2592217
CH
2741 if (rc > 0)
2742 rc = generic_write_sync(iocb, rc);
579f9053 2743 up_read(&cinode->lock_sem);
579f9053
PS
2744 return rc;
2745}
2746
2747ssize_t
3dae8750 2748cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
579f9053 2749{
496ad9aa 2750 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
2751 struct cifsInodeInfo *cinode = CIFS_I(inode);
2752 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2753 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2754 iocb->ki_filp->private_data;
2755 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
88cf75aa 2756 ssize_t written;
ca8aa29c 2757
c11f1df5
SP
2758 written = cifs_get_writer(cinode);
2759 if (written)
2760 return written;
2761
18cceb6a 2762 if (CIFS_CACHE_WRITE(cinode)) {
88cf75aa
PS
2763 if (cap_unix(tcon->ses) &&
2764 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
c11f1df5 2765 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3dae8750 2766 written = generic_file_write_iter(iocb, from);
c11f1df5
SP
2767 goto out;
2768 }
3dae8750 2769 written = cifs_writev(iocb, from);
c11f1df5 2770 goto out;
25078105 2771 }
25078105 2772 /*
ca8aa29c
PS
2773 * For non-oplocked files in strict cache mode we need to write the data
2774 * to the server exactly from the pos to pos+len-1 rather than flush all
2775 * affected pages because it may cause a error with mandatory locks on
2776 * these pages but not on the region from pos to ppos+len-1.
72432ffc 2777 */
3dae8750 2778 written = cifs_user_writev(iocb, from);
18cceb6a 2779 if (written > 0 && CIFS_CACHE_READ(cinode)) {
88cf75aa
PS
2780 /*
2781 * Windows 7 server can delay breaking level2 oplock if a write
2782 * request comes - break it on the client to prevent reading
2783 * an old data.
2784 */
4f73c7d3 2785 cifs_zap_mapping(inode);
f96637be
JP
2786 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2787 inode);
18cceb6a 2788 cinode->oplock = 0;
88cf75aa 2789 }
c11f1df5
SP
2790out:
2791 cifs_put_writer(cinode);
88cf75aa 2792 return written;
72432ffc
PS
2793}
2794
0471ca3f 2795static struct cifs_readdata *
f4e49cd2 2796cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
0471ca3f
JL
2797{
2798 struct cifs_readdata *rdata;
f4e49cd2 2799
c5fab6f4
JL
2800 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2801 GFP_KERNEL);
0471ca3f 2802 if (rdata != NULL) {
6993f74a 2803 kref_init(&rdata->refcount);
1c892549
JL
2804 INIT_LIST_HEAD(&rdata->list);
2805 init_completion(&rdata->done);
0471ca3f 2806 INIT_WORK(&rdata->work, complete);
0471ca3f 2807 }
f4e49cd2 2808
0471ca3f
JL
2809 return rdata;
2810}
2811
6993f74a
JL
2812void
2813cifs_readdata_release(struct kref *refcount)
0471ca3f 2814{
6993f74a
JL
2815 struct cifs_readdata *rdata = container_of(refcount,
2816 struct cifs_readdata, refcount);
2817
2818 if (rdata->cfile)
2819 cifsFileInfo_put(rdata->cfile);
2820
0471ca3f
JL
2821 kfree(rdata);
2822}
2823
1c892549 2824static int
c5fab6f4 2825cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
2826{
2827 int rc = 0;
c5fab6f4 2828 struct page *page;
1c892549
JL
2829 unsigned int i;
2830
c5fab6f4 2831 for (i = 0; i < nr_pages; i++) {
1c892549
JL
2832 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2833 if (!page) {
2834 rc = -ENOMEM;
2835 break;
2836 }
c5fab6f4 2837 rdata->pages[i] = page;
1c892549
JL
2838 }
2839
2840 if (rc) {
c5fab6f4
JL
2841 for (i = 0; i < nr_pages; i++) {
2842 put_page(rdata->pages[i]);
2843 rdata->pages[i] = NULL;
1c892549
JL
2844 }
2845 }
2846 return rc;
2847}
2848
2849static void
2850cifs_uncached_readdata_release(struct kref *refcount)
2851{
1c892549
JL
2852 struct cifs_readdata *rdata = container_of(refcount,
2853 struct cifs_readdata, refcount);
c5fab6f4 2854 unsigned int i;
1c892549 2855
c5fab6f4
JL
2856 for (i = 0; i < rdata->nr_pages; i++) {
2857 put_page(rdata->pages[i]);
2858 rdata->pages[i] = NULL;
1c892549
JL
2859 }
2860 cifs_readdata_release(refcount);
2861}
2862
1c892549
JL
2863/**
2864 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2865 * @rdata: the readdata response with list of pages holding data
7f25bba8 2866 * @iter: destination for our data
1c892549
JL
2867 *
2868 * This function copies data from a list of pages in a readdata response into
2869 * an array of iovecs. It will first calculate where the data should go
2870 * based on the info in the readdata and then copy the data into that spot.
2871 */
7f25bba8
AV
2872static int
2873cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
1c892549 2874{
34a54d61 2875 size_t remaining = rdata->got_bytes;
c5fab6f4 2876 unsigned int i;
1c892549 2877
c5fab6f4 2878 for (i = 0; i < rdata->nr_pages; i++) {
c5fab6f4 2879 struct page *page = rdata->pages[i];
e686bd8d 2880 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
7f25bba8
AV
2881 size_t written = copy_page_to_iter(page, 0, copy, iter);
2882 remaining -= written;
2883 if (written < copy && iov_iter_count(iter) > 0)
2884 break;
1c892549 2885 }
7f25bba8 2886 return remaining ? -EFAULT : 0;
1c892549
JL
2887}
2888
2889static void
2890cifs_uncached_readv_complete(struct work_struct *work)
2891{
2892 struct cifs_readdata *rdata = container_of(work,
2893 struct cifs_readdata, work);
1c892549
JL
2894
2895 complete(&rdata->done);
2896 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2897}
2898
2899static int
8321fec4
JL
2900cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2901 struct cifs_readdata *rdata, unsigned int len)
1c892549 2902{
b3160aeb 2903 int result = 0;
c5fab6f4
JL
2904 unsigned int i;
2905 unsigned int nr_pages = rdata->nr_pages;
1c892549 2906
b3160aeb 2907 rdata->got_bytes = 0;
8321fec4 2908 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
2909 for (i = 0; i < nr_pages; i++) {
2910 struct page *page = rdata->pages[i];
71335664 2911 size_t n;
c5fab6f4 2912
71335664 2913 if (len <= 0) {
1c892549 2914 /* no need to hold page hostage */
c5fab6f4
JL
2915 rdata->pages[i] = NULL;
2916 rdata->nr_pages--;
1c892549 2917 put_page(page);
8321fec4 2918 continue;
1c892549 2919 }
71335664
AV
2920 n = len;
2921 if (len >= PAGE_SIZE) {
2922 /* enough data to fill the page */
2923 n = PAGE_SIZE;
2924 len -= n;
2925 } else {
2926 zero_user(page, len, PAGE_SIZE - len);
2927 rdata->tailsz = len;
2928 len = 0;
2929 }
2930 result = cifs_read_page_from_socket(server, page, n);
8321fec4
JL
2931 if (result < 0)
2932 break;
2933
b3160aeb 2934 rdata->got_bytes += result;
1c892549
JL
2935 }
2936
b3160aeb
PS
2937 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
2938 rdata->got_bytes : result;
1c892549
JL
2939}
2940
0ada36b2
PS
2941static int
2942cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2943 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
1da177e4 2944{
0ada36b2 2945 struct cifs_readdata *rdata;
bed9da02 2946 unsigned int npages, rsize, credits;
0ada36b2
PS
2947 size_t cur_len;
2948 int rc;
1c892549 2949 pid_t pid;
25f40259 2950 struct TCP_Server_Info *server;
a70307ee 2951
25f40259 2952 server = tlink_tcon(open_file->tlink)->ses->server;
fc9c5966 2953
d4ffff1f
PS
2954 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2955 pid = open_file->pid;
2956 else
2957 pid = current->tgid;
2958
1c892549 2959 do {
bed9da02
PS
2960 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
2961 &rsize, &credits);
2962 if (rc)
2963 break;
2964
2965 cur_len = min_t(const size_t, len, rsize);
1c892549 2966 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
a70307ee 2967
1c892549
JL
2968 /* allocate a readdata struct */
2969 rdata = cifs_readdata_alloc(npages,
2970 cifs_uncached_readv_complete);
2971 if (!rdata) {
bed9da02 2972 add_credits_and_wake_if(server, credits, 0);
1c892549 2973 rc = -ENOMEM;
bae9f746 2974 break;
1da177e4 2975 }
a70307ee 2976
c5fab6f4 2977 rc = cifs_read_allocate_pages(rdata, npages);
1c892549
JL
2978 if (rc)
2979 goto error;
2980
2981 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 2982 rdata->nr_pages = npages;
1c892549
JL
2983 rdata->offset = offset;
2984 rdata->bytes = cur_len;
2985 rdata->pid = pid;
8321fec4
JL
2986 rdata->pagesz = PAGE_SIZE;
2987 rdata->read_into_pages = cifs_uncached_read_into_pages;
bed9da02 2988 rdata->credits = credits;
1c892549 2989
25f40259
PS
2990 if (!rdata->cfile->invalidHandle ||
2991 !cifs_reopen_file(rdata->cfile, true))
2992 rc = server->ops->async_readv(rdata);
1c892549
JL
2993error:
2994 if (rc) {
bed9da02 2995 add_credits_and_wake_if(server, rdata->credits, 0);
1c892549
JL
2996 kref_put(&rdata->refcount,
2997 cifs_uncached_readdata_release);
25f40259
PS
2998 if (rc == -EAGAIN)
2999 continue;
1c892549
JL
3000 break;
3001 }
3002
0ada36b2 3003 list_add_tail(&rdata->list, rdata_list);
1c892549
JL
3004 offset += cur_len;
3005 len -= cur_len;
3006 } while (len > 0);
3007
0ada36b2
PS
3008 return rc;
3009}
3010
3011ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3012{
3013 struct file *file = iocb->ki_filp;
3014 ssize_t rc;
3015 size_t len;
3016 ssize_t total_read = 0;
3017 loff_t offset = iocb->ki_pos;
3018 struct cifs_sb_info *cifs_sb;
3019 struct cifs_tcon *tcon;
3020 struct cifsFileInfo *open_file;
3021 struct cifs_readdata *rdata, *tmp;
3022 struct list_head rdata_list;
3023
3024 len = iov_iter_count(to);
3025 if (!len)
3026 return 0;
3027
3028 INIT_LIST_HEAD(&rdata_list);
7119e220 3029 cifs_sb = CIFS_FILE_SB(file);
0ada36b2
PS
3030 open_file = file->private_data;
3031 tcon = tlink_tcon(open_file->tlink);
3032
3033 if (!tcon->ses->server->ops->async_readv)
3034 return -ENOSYS;
3035
3036 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3037 cifs_dbg(FYI, "attempting read on write only file instance\n");
3038
3039 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3040
1c892549
JL
3041 /* if at least one read request send succeeded, then reset rc */
3042 if (!list_empty(&rdata_list))
3043 rc = 0;
3044
e6a7bcb4 3045 len = iov_iter_count(to);
1c892549 3046 /* the loop below should proceed in the order of increasing offsets */
25f40259 3047again:
1c892549
JL
3048 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3049 if (!rc) {
1c892549
JL
3050 /* FIXME: freezable sleep too? */
3051 rc = wait_for_completion_killable(&rdata->done);
3052 if (rc)
3053 rc = -EINTR;
fb8a3e52 3054 else if (rdata->result == -EAGAIN) {
74027f4a 3055 /* resend call if it's a retryable error */
fb8a3e52 3056 struct list_head tmp_list;
d913ed17 3057 unsigned int got_bytes = rdata->got_bytes;
25f40259 3058
fb8a3e52
PS
3059 list_del_init(&rdata->list);
3060 INIT_LIST_HEAD(&tmp_list);
25f40259 3061
d913ed17
PS
3062 /*
3063 * Got a part of data and then reconnect has
3064 * happened -- fill the buffer and continue
3065 * reading.
3066 */
3067 if (got_bytes && got_bytes < rdata->bytes) {
3068 rc = cifs_readdata_to_iov(rdata, to);
3069 if (rc) {
3070 kref_put(&rdata->refcount,
3071 cifs_uncached_readdata_release);
3072 continue;
3073 }
74027f4a 3074 }
d913ed17
PS
3075
3076 rc = cifs_send_async_read(
3077 rdata->offset + got_bytes,
3078 rdata->bytes - got_bytes,
3079 rdata->cfile, cifs_sb,
3080 &tmp_list);
25f40259 3081
fb8a3e52 3082 list_splice(&tmp_list, &rdata_list);
25f40259 3083
fb8a3e52
PS
3084 kref_put(&rdata->refcount,
3085 cifs_uncached_readdata_release);
3086 goto again;
3087 } else if (rdata->result)
3088 rc = rdata->result;
3089 else
e6a7bcb4 3090 rc = cifs_readdata_to_iov(rdata, to);
1c892549 3091
2e8a05d8
PS
3092 /* if there was a short read -- discard anything left */
3093 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3094 rc = -ENODATA;
1da177e4 3095 }
1c892549
JL
3096 list_del_init(&rdata->list);
3097 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 3098 }
a70307ee 3099
e6a7bcb4 3100 total_read = len - iov_iter_count(to);
7f25bba8 3101
1c892549 3102 cifs_stats_bytes_read(tcon, total_read);
1c892549 3103
09a4707e
PS
3104 /* mask nodata case */
3105 if (rc == -ENODATA)
3106 rc = 0;
3107
0165e810 3108 if (total_read) {
e6a7bcb4 3109 iocb->ki_pos += total_read;
0165e810
AV
3110 return total_read;
3111 }
3112 return rc;
a70307ee
PS
3113}
3114
579f9053 3115ssize_t
e6a7bcb4 3116cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
a70307ee 3117{
496ad9aa 3118 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
3119 struct cifsInodeInfo *cinode = CIFS_I(inode);
3120 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3121 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3122 iocb->ki_filp->private_data;
3123 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3124 int rc = -EACCES;
a70307ee
PS
3125
3126 /*
3127 * In strict cache mode we need to read from the server all the time
3128 * if we don't have level II oplock because the server can delay mtime
3129 * change - so we can't make a decision about inode invalidating.
3130 * And we can also fail with pagereading if there are mandatory locks
3131 * on pages affected by this read but not on the region from pos to
3132 * pos+len-1.
3133 */
18cceb6a 3134 if (!CIFS_CACHE_READ(cinode))
e6a7bcb4 3135 return cifs_user_readv(iocb, to);
a70307ee 3136
579f9053
PS
3137 if (cap_unix(tcon->ses) &&
3138 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3139 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
e6a7bcb4 3140 return generic_file_read_iter(iocb, to);
579f9053
PS
3141
3142 /*
3143 * We need to hold the sem to be sure nobody modifies lock list
3144 * with a brlock that prevents reading.
3145 */
3146 down_read(&cinode->lock_sem);
e6a7bcb4 3147 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
579f9053 3148 tcon->ses->server->vals->shared_lock_type,
081c0414 3149 NULL, CIFS_READ_OP))
e6a7bcb4 3150 rc = generic_file_read_iter(iocb, to);
579f9053
PS
3151 up_read(&cinode->lock_sem);
3152 return rc;
a70307ee 3153}
1da177e4 3154
f9c6e234
PS
3155static ssize_t
3156cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
3157{
3158 int rc = -EACCES;
3159 unsigned int bytes_read = 0;
3160 unsigned int total_read;
3161 unsigned int current_read_size;
5eba8ab3 3162 unsigned int rsize;
1da177e4 3163 struct cifs_sb_info *cifs_sb;
29e20f9c 3164 struct cifs_tcon *tcon;
f9c6e234 3165 struct TCP_Server_Info *server;
6d5786a3 3166 unsigned int xid;
f9c6e234 3167 char *cur_offset;
1da177e4 3168 struct cifsFileInfo *open_file;
d4ffff1f 3169 struct cifs_io_parms io_parms;
ec637e3f 3170 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 3171 __u32 pid;
1da177e4 3172
6d5786a3 3173 xid = get_xid();
7119e220 3174 cifs_sb = CIFS_FILE_SB(file);
1da177e4 3175
5eba8ab3
JL
3176 /* FIXME: set up handlers for larger reads and/or convert to async */
3177 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3178
1da177e4 3179 if (file->private_data == NULL) {
0f3bc09e 3180 rc = -EBADF;
6d5786a3 3181 free_xid(xid);
0f3bc09e 3182 return rc;
1da177e4 3183 }
c21dfb69 3184 open_file = file->private_data;
29e20f9c 3185 tcon = tlink_tcon(open_file->tlink);
f9c6e234
PS
3186 server = tcon->ses->server;
3187
3188 if (!server->ops->sync_read) {
3189 free_xid(xid);
3190 return -ENOSYS;
3191 }
1da177e4 3192
d4ffff1f
PS
3193 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3194 pid = open_file->pid;
3195 else
3196 pid = current->tgid;
3197
1da177e4 3198 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
f96637be 3199 cifs_dbg(FYI, "attempting read on write only file instance\n");
1da177e4 3200
f9c6e234
PS
3201 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3202 total_read += bytes_read, cur_offset += bytes_read) {
e374d90f
PS
3203 do {
3204 current_read_size = min_t(uint, read_size - total_read,
3205 rsize);
3206 /*
3207 * For windows me and 9x we do not want to request more
3208 * than it negotiated since it will refuse the read
3209 * then.
3210 */
3211 if ((tcon->ses) && !(tcon->ses->capabilities &
29e20f9c 3212 tcon->ses->server->vals->cap_large_files)) {
e374d90f
PS
3213 current_read_size = min_t(uint,
3214 current_read_size, CIFSMaxBufSize);
3215 }
cdff08e7 3216 if (open_file->invalidHandle) {
15886177 3217 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
3218 if (rc != 0)
3219 break;
3220 }
d4ffff1f 3221 io_parms.pid = pid;
29e20f9c 3222 io_parms.tcon = tcon;
f9c6e234 3223 io_parms.offset = *offset;
d4ffff1f 3224 io_parms.length = current_read_size;
db8b631d 3225 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
f9c6e234
PS
3226 &bytes_read, &cur_offset,
3227 &buf_type);
e374d90f
PS
3228 } while (rc == -EAGAIN);
3229
1da177e4
LT
3230 if (rc || (bytes_read == 0)) {
3231 if (total_read) {
3232 break;
3233 } else {
6d5786a3 3234 free_xid(xid);
1da177e4
LT
3235 return rc;
3236 }
3237 } else {
29e20f9c 3238 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 3239 *offset += bytes_read;
1da177e4
LT
3240 }
3241 }
6d5786a3 3242 free_xid(xid);
1da177e4
LT
3243 return total_read;
3244}
3245
ca83ce3d
JL
3246/*
3247 * If the page is mmap'ed into a process' page tables, then we need to make
3248 * sure that it doesn't change while being written back.
3249 */
3250static int
3251cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3252{
3253 struct page *page = vmf->page;
3254
3255 lock_page(page);
3256 return VM_FAULT_LOCKED;
3257}
3258
7cbea8dc 3259static const struct vm_operations_struct cifs_file_vm_ops = {
ca83ce3d 3260 .fault = filemap_fault,
f1820361 3261 .map_pages = filemap_map_pages,
ca83ce3d
JL
3262 .page_mkwrite = cifs_page_mkwrite,
3263};
3264
7a6a19b1
PS
3265int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3266{
3267 int rc, xid;
496ad9aa 3268 struct inode *inode = file_inode(file);
7a6a19b1 3269
6d5786a3 3270 xid = get_xid();
7a6a19b1 3271
18cceb6a 3272 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 3273 rc = cifs_zap_mapping(inode);
6feb9891
PS
3274 if (rc)
3275 return rc;
3276 }
7a6a19b1
PS
3277
3278 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3279 if (rc == 0)
3280 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3281 free_xid(xid);
7a6a19b1
PS
3282 return rc;
3283}
3284
1da177e4
LT
3285int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3286{
1da177e4
LT
3287 int rc, xid;
3288
6d5786a3 3289 xid = get_xid();
abab095d 3290 rc = cifs_revalidate_file(file);
1da177e4 3291 if (rc) {
f96637be
JP
3292 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3293 rc);
6d5786a3 3294 free_xid(xid);
1da177e4
LT
3295 return rc;
3296 }
3297 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3298 if (rc == 0)
3299 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3300 free_xid(xid);
1da177e4
LT
3301 return rc;
3302}
3303
0471ca3f
JL
3304static void
3305cifs_readv_complete(struct work_struct *work)
3306{
b770ddfa 3307 unsigned int i, got_bytes;
0471ca3f
JL
3308 struct cifs_readdata *rdata = container_of(work,
3309 struct cifs_readdata, work);
0471ca3f 3310
b770ddfa 3311 got_bytes = rdata->got_bytes;
c5fab6f4
JL
3312 for (i = 0; i < rdata->nr_pages; i++) {
3313 struct page *page = rdata->pages[i];
3314
0471ca3f
JL
3315 lru_cache_add_file(page);
3316
b770ddfa
PS
3317 if (rdata->result == 0 ||
3318 (rdata->result == -EAGAIN && got_bytes)) {
0471ca3f
JL
3319 flush_dcache_page(page);
3320 SetPageUptodate(page);
3321 }
3322
3323 unlock_page(page);
3324
b770ddfa
PS
3325 if (rdata->result == 0 ||
3326 (rdata->result == -EAGAIN && got_bytes))
0471ca3f
JL
3327 cifs_readpage_to_fscache(rdata->mapping->host, page);
3328
09cbfeaf 3329 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
b770ddfa 3330
09cbfeaf 3331 put_page(page);
c5fab6f4 3332 rdata->pages[i] = NULL;
0471ca3f 3333 }
6993f74a 3334 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
3335}
3336
8d5ce4d2 3337static int
8321fec4
JL
3338cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3339 struct cifs_readdata *rdata, unsigned int len)
8d5ce4d2 3340{
b3160aeb 3341 int result = 0;
c5fab6f4 3342 unsigned int i;
8d5ce4d2
JL
3343 u64 eof;
3344 pgoff_t eof_index;
c5fab6f4 3345 unsigned int nr_pages = rdata->nr_pages;
8d5ce4d2
JL
3346
3347 /* determine the eof that the server (probably) has */
3348 eof = CIFS_I(rdata->mapping->host)->server_eof;
09cbfeaf 3349 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
f96637be 3350 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
8d5ce4d2 3351
b3160aeb 3352 rdata->got_bytes = 0;
09cbfeaf 3353 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
3354 for (i = 0; i < nr_pages; i++) {
3355 struct page *page = rdata->pages[i];
442c9ac9 3356 size_t n = PAGE_SIZE;
c5fab6f4 3357
09cbfeaf 3358 if (len >= PAGE_SIZE) {
09cbfeaf 3359 len -= PAGE_SIZE;
8321fec4 3360 } else if (len > 0) {
8d5ce4d2 3361 /* enough for partial page, fill and zero the rest */
442c9ac9 3362 zero_user(page, len, PAGE_SIZE - len);
71335664 3363 n = rdata->tailsz = len;
8321fec4 3364 len = 0;
8d5ce4d2
JL
3365 } else if (page->index > eof_index) {
3366 /*
3367 * The VFS will not try to do readahead past the
3368 * i_size, but it's possible that we have outstanding
3369 * writes with gaps in the middle and the i_size hasn't
3370 * caught up yet. Populate those with zeroed out pages
3371 * to prevent the VFS from repeatedly attempting to
3372 * fill them until the writes are flushed.
3373 */
09cbfeaf 3374 zero_user(page, 0, PAGE_SIZE);
8d5ce4d2
JL
3375 lru_cache_add_file(page);
3376 flush_dcache_page(page);
3377 SetPageUptodate(page);
3378 unlock_page(page);
09cbfeaf 3379 put_page(page);
c5fab6f4
JL
3380 rdata->pages[i] = NULL;
3381 rdata->nr_pages--;
8321fec4 3382 continue;
8d5ce4d2
JL
3383 } else {
3384 /* no need to hold page hostage */
8d5ce4d2
JL
3385 lru_cache_add_file(page);
3386 unlock_page(page);
09cbfeaf 3387 put_page(page);
c5fab6f4
JL
3388 rdata->pages[i] = NULL;
3389 rdata->nr_pages--;
8321fec4 3390 continue;
8d5ce4d2 3391 }
8321fec4 3392
71335664 3393 result = cifs_read_page_from_socket(server, page, n);
8321fec4
JL
3394 if (result < 0)
3395 break;
3396
b3160aeb 3397 rdata->got_bytes += result;
8d5ce4d2
JL
3398 }
3399
b3160aeb
PS
3400 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3401 rdata->got_bytes : result;
8d5ce4d2
JL
3402}
3403
387eb92a
PS
3404static int
3405readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3406 unsigned int rsize, struct list_head *tmplist,
3407 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3408{
3409 struct page *page, *tpage;
3410 unsigned int expected_index;
3411 int rc;
8a5c743e 3412 gfp_t gfp = readahead_gfp_mask(mapping);
387eb92a 3413
69cebd75
PS
3414 INIT_LIST_HEAD(tmplist);
3415
387eb92a
PS
3416 page = list_entry(page_list->prev, struct page, lru);
3417
3418 /*
3419 * Lock the page and put it in the cache. Since no one else
3420 * should have access to this page, we're safe to simply set
3421 * PG_locked without checking it first.
3422 */
48c935ad 3423 __SetPageLocked(page);
387eb92a 3424 rc = add_to_page_cache_locked(page, mapping,
063d99b4 3425 page->index, gfp);
387eb92a
PS
3426
3427 /* give up if we can't stick it in the cache */
3428 if (rc) {
48c935ad 3429 __ClearPageLocked(page);
387eb92a
PS
3430 return rc;
3431 }
3432
3433 /* move first page to the tmplist */
09cbfeaf
KS
3434 *offset = (loff_t)page->index << PAGE_SHIFT;
3435 *bytes = PAGE_SIZE;
387eb92a
PS
3436 *nr_pages = 1;
3437 list_move_tail(&page->lru, tmplist);
3438
3439 /* now try and add more pages onto the request */
3440 expected_index = page->index + 1;
3441 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3442 /* discontinuity ? */
3443 if (page->index != expected_index)
3444 break;
3445
3446 /* would this page push the read over the rsize? */
09cbfeaf 3447 if (*bytes + PAGE_SIZE > rsize)
387eb92a
PS
3448 break;
3449
48c935ad 3450 __SetPageLocked(page);
063d99b4 3451 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
48c935ad 3452 __ClearPageLocked(page);
387eb92a
PS
3453 break;
3454 }
3455 list_move_tail(&page->lru, tmplist);
09cbfeaf 3456 (*bytes) += PAGE_SIZE;
387eb92a
PS
3457 expected_index++;
3458 (*nr_pages)++;
3459 }
3460 return rc;
8d5ce4d2
JL
3461}
3462
1da177e4
LT
3463static int cifs_readpages(struct file *file, struct address_space *mapping,
3464 struct list_head *page_list, unsigned num_pages)
3465{
690c5e31
JL
3466 int rc;
3467 struct list_head tmplist;
3468 struct cifsFileInfo *open_file = file->private_data;
7119e220 3469 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
69cebd75 3470 struct TCP_Server_Info *server;
690c5e31 3471 pid_t pid;
1da177e4 3472
56698236
SJ
3473 /*
3474 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3475 * immediately if the cookie is negative
54afa990
DH
3476 *
3477 * After this point, every page in the list might have PG_fscache set,
3478 * so we will need to clean that up off of every page we don't use.
56698236
SJ
3479 */
3480 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3481 &num_pages);
3482 if (rc == 0)
690c5e31 3483 return rc;
56698236 3484
d4ffff1f
PS
3485 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3486 pid = open_file->pid;
3487 else
3488 pid = current->tgid;
3489
690c5e31 3490 rc = 0;
69cebd75 3491 server = tlink_tcon(open_file->tlink)->ses->server;
1da177e4 3492
f96637be
JP
3493 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3494 __func__, file, mapping, num_pages);
690c5e31
JL
3495
3496 /*
3497 * Start with the page at end of list and move it to private
3498 * list. Do the same with any following pages until we hit
3499 * the rsize limit, hit an index discontinuity, or run out of
3500 * pages. Issue the async read and then start the loop again
3501 * until the list is empty.
3502 *
3503 * Note that list order is important. The page_list is in
3504 * the order of declining indexes. When we put the pages in
3505 * the rdata->pages, then we want them in increasing order.
3506 */
3507 while (!list_empty(page_list)) {
bed9da02 3508 unsigned int i, nr_pages, bytes, rsize;
690c5e31
JL
3509 loff_t offset;
3510 struct page *page, *tpage;
3511 struct cifs_readdata *rdata;
bed9da02 3512 unsigned credits;
1da177e4 3513
bed9da02
PS
3514 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3515 &rsize, &credits);
3516 if (rc)
3517 break;
690c5e31
JL
3518
3519 /*
69cebd75
PS
3520 * Give up immediately if rsize is too small to read an entire
3521 * page. The VFS will fall back to readpage. We should never
3522 * reach this point however since we set ra_pages to 0 when the
3523 * rsize is smaller than a cache page.
690c5e31 3524 */
09cbfeaf 3525 if (unlikely(rsize < PAGE_SIZE)) {
bed9da02 3526 add_credits_and_wake_if(server, credits, 0);
69cebd75 3527 return 0;
bed9da02 3528 }
690c5e31 3529
bed9da02
PS
3530 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3531 &nr_pages, &offset, &bytes);
690c5e31 3532 if (rc) {
bed9da02 3533 add_credits_and_wake_if(server, credits, 0);
690c5e31
JL
3534 break;
3535 }
3536
0471ca3f 3537 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
3538 if (!rdata) {
3539 /* best to give up if we're out of mem */
3540 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3541 list_del(&page->lru);
3542 lru_cache_add_file(page);
3543 unlock_page(page);
09cbfeaf 3544 put_page(page);
690c5e31
JL
3545 }
3546 rc = -ENOMEM;
bed9da02 3547 add_credits_and_wake_if(server, credits, 0);
690c5e31
JL
3548 break;
3549 }
3550
6993f74a 3551 rdata->cfile = cifsFileInfo_get(open_file);
690c5e31
JL
3552 rdata->mapping = mapping;
3553 rdata->offset = offset;
3554 rdata->bytes = bytes;
3555 rdata->pid = pid;
09cbfeaf 3556 rdata->pagesz = PAGE_SIZE;
8321fec4 3557 rdata->read_into_pages = cifs_readpages_read_into_pages;
bed9da02 3558 rdata->credits = credits;
c5fab6f4
JL
3559
3560 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3561 list_del(&page->lru);
3562 rdata->pages[rdata->nr_pages++] = page;
3563 }
690c5e31 3564
69cebd75
PS
3565 if (!rdata->cfile->invalidHandle ||
3566 !cifs_reopen_file(rdata->cfile, true))
3567 rc = server->ops->async_readv(rdata);
3568 if (rc) {
bed9da02 3569 add_credits_and_wake_if(server, rdata->credits, 0);
c5fab6f4
JL
3570 for (i = 0; i < rdata->nr_pages; i++) {
3571 page = rdata->pages[i];
690c5e31
JL
3572 lru_cache_add_file(page);
3573 unlock_page(page);
09cbfeaf 3574 put_page(page);
1da177e4 3575 }
1209bbdf 3576 /* Fallback to the readpage in error/reconnect cases */
6993f74a 3577 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3578 break;
3579 }
6993f74a
JL
3580
3581 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3582 }
3583
54afa990
DH
3584 /* Any pages that have been shown to fscache but didn't get added to
3585 * the pagecache must be uncached before they get returned to the
3586 * allocator.
3587 */
3588 cifs_fscache_readpages_cancel(mapping->host, page_list);
1da177e4
LT
3589 return rc;
3590}
3591
a9e9b7bc
SP
3592/*
3593 * cifs_readpage_worker must be called with the page pinned
3594 */
1da177e4
LT
3595static int cifs_readpage_worker(struct file *file, struct page *page,
3596 loff_t *poffset)
3597{
3598 char *read_data;
3599 int rc;
3600
56698236 3601 /* Is the page cached? */
496ad9aa 3602 rc = cifs_readpage_from_fscache(file_inode(file), page);
56698236
SJ
3603 if (rc == 0)
3604 goto read_complete;
3605
1da177e4
LT
3606 read_data = kmap(page);
3607 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 3608
09cbfeaf 3609 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
fb8c4b14 3610
1da177e4
LT
3611 if (rc < 0)
3612 goto io_error;
3613 else
f96637be 3614 cifs_dbg(FYI, "Bytes read %d\n", rc);
fb8c4b14 3615
496ad9aa 3616 file_inode(file)->i_atime =
c2050a45 3617 current_time(file_inode(file));
fb8c4b14 3618
09cbfeaf
KS
3619 if (PAGE_SIZE > rc)
3620 memset(read_data + rc, 0, PAGE_SIZE - rc);
1da177e4
LT
3621
3622 flush_dcache_page(page);
3623 SetPageUptodate(page);
9dc06558
SJ
3624
3625 /* send this page to the cache */
496ad9aa 3626 cifs_readpage_to_fscache(file_inode(file), page);
9dc06558 3627
1da177e4 3628 rc = 0;
fb8c4b14 3629
1da177e4 3630io_error:
fb8c4b14 3631 kunmap(page);
466bd31b 3632 unlock_page(page);
56698236
SJ
3633
3634read_complete:
1da177e4
LT
3635 return rc;
3636}
3637
3638static int cifs_readpage(struct file *file, struct page *page)
3639{
09cbfeaf 3640 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1da177e4 3641 int rc = -EACCES;
6d5786a3 3642 unsigned int xid;
1da177e4 3643
6d5786a3 3644 xid = get_xid();
1da177e4
LT
3645
3646 if (file->private_data == NULL) {
0f3bc09e 3647 rc = -EBADF;
6d5786a3 3648 free_xid(xid);
0f3bc09e 3649 return rc;
1da177e4
LT
3650 }
3651
f96637be 3652 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
b6b38f70 3653 page, (int)offset, (int)offset);
1da177e4
LT
3654
3655 rc = cifs_readpage_worker(file, page, &offset);
3656
6d5786a3 3657 free_xid(xid);
1da177e4
LT
3658 return rc;
3659}
3660
a403a0a3
SF
3661static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3662{
3663 struct cifsFileInfo *open_file;
3afca265
SF
3664 struct cifs_tcon *tcon =
3665 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
a403a0a3 3666
3afca265 3667 spin_lock(&tcon->open_file_lock);
a403a0a3 3668 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 3669 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3afca265 3670 spin_unlock(&tcon->open_file_lock);
a403a0a3
SF
3671 return 1;
3672 }
3673 }
3afca265 3674 spin_unlock(&tcon->open_file_lock);
a403a0a3
SF
3675 return 0;
3676}
3677
1da177e4
LT
3678/* We do not want to update the file size from server for inodes
3679 open for write - to avoid races with writepage extending
3680 the file - in the future we could consider allowing
fb8c4b14 3681 refreshing the inode only on increases in the file size
1da177e4
LT
3682 but this is tricky to do without racing with writebehind
3683 page caching in the current Linux kernel design */
4b18f2a9 3684bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 3685{
a403a0a3 3686 if (!cifsInode)
4b18f2a9 3687 return true;
50c2f753 3688
a403a0a3
SF
3689 if (is_inode_writable(cifsInode)) {
3690 /* This inode is open for write at least once */
c32a0b68
SF
3691 struct cifs_sb_info *cifs_sb;
3692
c32a0b68 3693 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 3694 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3695 /* since no page cache to corrupt on directio
c32a0b68 3696 we can change size safely */
4b18f2a9 3697 return true;
c32a0b68
SF
3698 }
3699
fb8c4b14 3700 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 3701 return true;
7ba52631 3702
4b18f2a9 3703 return false;
23e7dd7d 3704 } else
4b18f2a9 3705 return true;
1da177e4
LT
3706}
3707
d9414774
NP
3708static int cifs_write_begin(struct file *file, struct address_space *mapping,
3709 loff_t pos, unsigned len, unsigned flags,
3710 struct page **pagep, void **fsdata)
1da177e4 3711{
466bd31b 3712 int oncethru = 0;
09cbfeaf
KS
3713 pgoff_t index = pos >> PAGE_SHIFT;
3714 loff_t offset = pos & (PAGE_SIZE - 1);
a98ee8c1
JL
3715 loff_t page_start = pos & PAGE_MASK;
3716 loff_t i_size;
3717 struct page *page;
3718 int rc = 0;
d9414774 3719
f96637be 3720 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
d9414774 3721
466bd31b 3722start:
54566b2c 3723 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
3724 if (!page) {
3725 rc = -ENOMEM;
3726 goto out;
3727 }
8a236264 3728
a98ee8c1
JL
3729 if (PageUptodate(page))
3730 goto out;
8a236264 3731
a98ee8c1
JL
3732 /*
3733 * If we write a full page it will be up to date, no need to read from
3734 * the server. If the write is short, we'll end up doing a sync write
3735 * instead.
3736 */
09cbfeaf 3737 if (len == PAGE_SIZE)
a98ee8c1 3738 goto out;
8a236264 3739
a98ee8c1
JL
3740 /*
3741 * optimize away the read when we have an oplock, and we're not
3742 * expecting to use any of the data we'd be reading in. That
3743 * is, when the page lies beyond the EOF, or straddles the EOF
3744 * and the write will cover all of the existing data.
3745 */
18cceb6a 3746 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
a98ee8c1
JL
3747 i_size = i_size_read(mapping->host);
3748 if (page_start >= i_size ||
3749 (offset == 0 && (pos + len) >= i_size)) {
3750 zero_user_segments(page, 0, offset,
3751 offset + len,
09cbfeaf 3752 PAGE_SIZE);
a98ee8c1
JL
3753 /*
3754 * PageChecked means that the parts of the page
3755 * to which we're not writing are considered up
3756 * to date. Once the data is copied to the
3757 * page, it can be set uptodate.
3758 */
3759 SetPageChecked(page);
3760 goto out;
3761 }
3762 }
d9414774 3763
466bd31b 3764 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
a98ee8c1
JL
3765 /*
3766 * might as well read a page, it is fast enough. If we get
3767 * an error, we don't need to return it. cifs_write_end will
3768 * do a sync write instead since PG_uptodate isn't set.
3769 */
3770 cifs_readpage_worker(file, page, &page_start);
09cbfeaf 3771 put_page(page);
466bd31b
SP
3772 oncethru = 1;
3773 goto start;
8a236264
SF
3774 } else {
3775 /* we could try using another file handle if there is one -
3776 but how would we lock it to prevent close of that handle
3777 racing with this read? In any case
d9414774 3778 this will be written out by write_end so is fine */
1da177e4 3779 }
a98ee8c1
JL
3780out:
3781 *pagep = page;
3782 return rc;
1da177e4
LT
3783}
3784
85f2d6b4
SJ
3785static int cifs_release_page(struct page *page, gfp_t gfp)
3786{
3787 if (PagePrivate(page))
3788 return 0;
3789
3790 return cifs_fscache_release_page(page, gfp);
3791}
3792
d47992f8
LC
3793static void cifs_invalidate_page(struct page *page, unsigned int offset,
3794 unsigned int length)
85f2d6b4
SJ
3795{
3796 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3797
09cbfeaf 3798 if (offset == 0 && length == PAGE_SIZE)
85f2d6b4
SJ
3799 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3800}
3801
9ad1506b
PS
3802static int cifs_launder_page(struct page *page)
3803{
3804 int rc = 0;
3805 loff_t range_start = page_offset(page);
09cbfeaf 3806 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
9ad1506b
PS
3807 struct writeback_control wbc = {
3808 .sync_mode = WB_SYNC_ALL,
3809 .nr_to_write = 0,
3810 .range_start = range_start,
3811 .range_end = range_end,
3812 };
3813
f96637be 3814 cifs_dbg(FYI, "Launder page: %p\n", page);
9ad1506b
PS
3815
3816 if (clear_page_dirty_for_io(page))
3817 rc = cifs_writepage_locked(page, &wbc);
3818
3819 cifs_fscache_invalidate_page(page, page->mapping->host);
3820 return rc;
3821}
3822
9b646972 3823void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3824{
3825 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3826 oplock_break);
2b0143b5 3827 struct inode *inode = d_inode(cfile->dentry);
3bc303c2 3828 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 3829 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
c11f1df5 3830 struct TCP_Server_Info *server = tcon->ses->server;
eb4b756b 3831 int rc = 0;
3bc303c2 3832
c11f1df5 3833 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
74316201 3834 TASK_UNINTERRUPTIBLE);
c11f1df5
SP
3835
3836 server->ops->downgrade_oplock(server, cinode,
3837 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3838
18cceb6a 3839 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
63b7d3a4 3840 cifs_has_mand_locks(cinode)) {
f96637be
JP
3841 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3842 inode);
18cceb6a 3843 cinode->oplock = 0;
63b7d3a4
PS
3844 }
3845
3bc303c2 3846 if (inode && S_ISREG(inode->i_mode)) {
18cceb6a 3847 if (CIFS_CACHE_READ(cinode))
8737c930 3848 break_lease(inode, O_RDONLY);
d54ff732 3849 else
8737c930 3850 break_lease(inode, O_WRONLY);
3bc303c2 3851 rc = filemap_fdatawrite(inode->i_mapping);
18cceb6a 3852 if (!CIFS_CACHE_READ(cinode)) {
eb4b756b
JL
3853 rc = filemap_fdatawait(inode->i_mapping);
3854 mapping_set_error(inode->i_mapping, rc);
4f73c7d3 3855 cifs_zap_mapping(inode);
3bc303c2 3856 }
f96637be 3857 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3bc303c2
JL
3858 }
3859
85160e03
PS
3860 rc = cifs_push_locks(cfile);
3861 if (rc)
f96637be 3862 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
85160e03 3863
3bc303c2
JL
3864 /*
3865 * releasing stale oplock after recent reconnect of smb session using
3866 * a now incorrect file handle is not a data integrity issue but do
3867 * not bother sending an oplock release if session to server still is
3868 * disconnected since oplock already released by the server
3869 */
cdff08e7 3870 if (!cfile->oplock_break_cancelled) {
95a3f2f3
PS
3871 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3872 cinode);
f96637be 3873 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3bc303c2 3874 }
c11f1df5 3875 cifs_done_oplock_break(cinode);
3bc303c2
JL
3876}
3877
dca69288
SF
3878/*
3879 * The presence of cifs_direct_io() in the address space ops vector
3880 * allowes open() O_DIRECT flags which would have failed otherwise.
3881 *
3882 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3883 * so this method should never be called.
3884 *
3885 * Direct IO is not yet supported in the cached mode.
3886 */
3887static ssize_t
c8b8e32d 3888cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
dca69288
SF
3889{
3890 /*
3891 * FIXME
3892 * Eventually need to support direct IO for non forcedirectio mounts
3893 */
3894 return -EINVAL;
3895}
3896
3897
f5e54d6e 3898const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3899 .readpage = cifs_readpage,
3900 .readpages = cifs_readpages,
3901 .writepage = cifs_writepage,
37c0eb46 3902 .writepages = cifs_writepages,
d9414774
NP
3903 .write_begin = cifs_write_begin,
3904 .write_end = cifs_write_end,
1da177e4 3905 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4 3906 .releasepage = cifs_release_page,
dca69288 3907 .direct_IO = cifs_direct_io,
85f2d6b4 3908 .invalidatepage = cifs_invalidate_page,
9ad1506b 3909 .launder_page = cifs_launder_page,
1da177e4 3910};
273d81d6
DK
3911
3912/*
3913 * cifs_readpages requires the server to support a buffer large enough to
3914 * contain the header plus one complete page of data. Otherwise, we need
3915 * to leave cifs_readpages out of the address space operations.
3916 */
f5e54d6e 3917const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3918 .readpage = cifs_readpage,
3919 .writepage = cifs_writepage,
3920 .writepages = cifs_writepages,
d9414774
NP
3921 .write_begin = cifs_write_begin,
3922 .write_end = cifs_write_end,
273d81d6 3923 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3924 .releasepage = cifs_release_page,
3925 .invalidatepage = cifs_invalidate_page,
9ad1506b 3926 .launder_page = cifs_launder_page,
273d81d6 3927};