]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/cifs/file.c
exfat: fix referencing wrong parent directory information after renaming
[mirror_ubuntu-jammy-kernel.git] / fs / cifs / file.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4
LT
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4 9 *
1da177e4
LT
10 */
11#include <linux/fs.h>
37c0eb46 12#include <linux/backing-dev.h>
1da177e4
LT
13#include <linux/stat.h>
14#include <linux/fcntl.h>
15#include <linux/pagemap.h>
16#include <linux/pagevec.h>
37c0eb46 17#include <linux/writeback.h>
6f88cc2e 18#include <linux/task_io_accounting_ops.h>
23e7dd7d 19#include <linux/delay.h>
3bc303c2 20#include <linux/mount.h>
5a0e3ad6 21#include <linux/slab.h>
690c5e31 22#include <linux/swap.h>
f86196ea 23#include <linux/mm.h>
1da177e4
LT
24#include <asm/div64.h>
25#include "cifsfs.h"
26#include "cifspdu.h"
27#include "cifsglob.h"
28#include "cifsproto.h"
29#include "cifs_unicode.h"
30#include "cifs_debug.h"
31#include "cifs_fs_sb.h"
9451a9a5 32#include "fscache.h"
bd3dcc6a 33#include "smbdirect.h"
8401e936 34#include "fs_context.h"
087f757b 35#include "cifs_ioctl.h"
07b92d0d 36
1da177e4
LT
37static inline int cifs_convert_flags(unsigned int flags)
38{
39 if ((flags & O_ACCMODE) == O_RDONLY)
40 return GENERIC_READ;
41 else if ((flags & O_ACCMODE) == O_WRONLY)
42 return GENERIC_WRITE;
43 else if ((flags & O_ACCMODE) == O_RDWR) {
44 /* GENERIC_ALL is too much permission to request
45 can cause unnecessary access denied on create */
46 /* return GENERIC_ALL; */
47 return (GENERIC_READ | GENERIC_WRITE);
48 }
49
e10f7b55
JL
50 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
51 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
52 FILE_READ_DATA);
7fc8f4e9 53}
e10f7b55 54
608712fe 55static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 56{
608712fe 57 u32 posix_flags = 0;
e10f7b55 58
7fc8f4e9 59 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 60 posix_flags = SMB_O_RDONLY;
7fc8f4e9 61 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
62 posix_flags = SMB_O_WRONLY;
63 else if ((flags & O_ACCMODE) == O_RDWR)
64 posix_flags = SMB_O_RDWR;
65
07b92d0d 66 if (flags & O_CREAT) {
608712fe 67 posix_flags |= SMB_O_CREAT;
07b92d0d
SF
68 if (flags & O_EXCL)
69 posix_flags |= SMB_O_EXCL;
70 } else if (flags & O_EXCL)
f96637be
JP
71 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
72 current->comm, current->tgid);
07b92d0d 73
608712fe
JL
74 if (flags & O_TRUNC)
75 posix_flags |= SMB_O_TRUNC;
76 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 77 if (flags & O_DSYNC)
608712fe 78 posix_flags |= SMB_O_SYNC;
7fc8f4e9 79 if (flags & O_DIRECTORY)
608712fe 80 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 81 if (flags & O_NOFOLLOW)
608712fe 82 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 83 if (flags & O_DIRECT)
608712fe 84 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
85
86 return posix_flags;
1da177e4
LT
87}
88
89static inline int cifs_get_disposition(unsigned int flags)
90{
91 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
92 return FILE_CREATE;
93 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
94 return FILE_OVERWRITE_IF;
95 else if ((flags & O_CREAT) == O_CREAT)
96 return FILE_OPEN_IF;
55aa2e09
SF
97 else if ((flags & O_TRUNC) == O_TRUNC)
98 return FILE_OVERWRITE;
1da177e4
LT
99 else
100 return FILE_OPEN;
101}
102
f6f1f179 103int cifs_posix_open(const char *full_path, struct inode **pinode,
608712fe 104 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 105 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
106{
107 int rc;
108 FILE_UNIX_BASIC_INFO *presp_data;
109 __u32 posix_flags = 0;
110 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
111 struct cifs_fattr fattr;
112 struct tcon_link *tlink;
96daf2b0 113 struct cifs_tcon *tcon;
608712fe 114
f96637be 115 cifs_dbg(FYI, "posix open %s\n", full_path);
608712fe
JL
116
117 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
118 if (presp_data == NULL)
119 return -ENOMEM;
120
121 tlink = cifs_sb_tlink(cifs_sb);
122 if (IS_ERR(tlink)) {
123 rc = PTR_ERR(tlink);
124 goto posix_open_ret;
125 }
126
127 tcon = tlink_tcon(tlink);
128 mode &= ~current_umask();
129
130 posix_flags = cifs_posix_convert_flags(f_flags);
131 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
132 poplock, full_path, cifs_sb->local_nls,
bc8ebdc4 133 cifs_remap(cifs_sb));
608712fe
JL
134 cifs_put_tlink(tlink);
135
136 if (rc)
137 goto posix_open_ret;
138
139 if (presp_data->Type == cpu_to_le32(-1))
140 goto posix_open_ret; /* open ok, caller does qpathinfo */
141
142 if (!pinode)
143 goto posix_open_ret; /* caller does not need info */
144
145 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
146
147 /* get new inode and set it up */
148 if (*pinode == NULL) {
149 cifs_fill_uniqueid(sb, &fattr);
150 *pinode = cifs_iget(sb, &fattr);
151 if (!*pinode) {
152 rc = -ENOMEM;
153 goto posix_open_ret;
154 }
155 } else {
cee8f4f6 156 cifs_revalidate_mapping(*pinode);
4d66952a 157 rc = cifs_fattr_to_inode(*pinode, &fattr);
608712fe
JL
158 }
159
160posix_open_ret:
161 kfree(presp_data);
162 return rc;
163}
164
eeb910a6 165static int
f6f1f179 166cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
167 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
168 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
169{
170 int rc;
fb1214e4 171 int desired_access;
eeb910a6 172 int disposition;
3d3ea8e6 173 int create_options = CREATE_NOT_DIR;
eeb910a6 174 FILE_ALL_INFO *buf;
b8c32dbb 175 struct TCP_Server_Info *server = tcon->ses->server;
226730b4 176 struct cifs_open_parms oparms;
eeb910a6 177
b8c32dbb 178 if (!server->ops->open)
fb1214e4
PS
179 return -ENOSYS;
180
181 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
1013e760
SF
215 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
216 if (f_flags & O_SYNC)
217 create_options |= CREATE_WRITE_THROUGH;
218
219 if (f_flags & O_DIRECT)
220 create_options |= CREATE_NO_BUFFER;
221
226730b4
PS
222 oparms.tcon = tcon;
223 oparms.cifs_sb = cifs_sb;
224 oparms.desired_access = desired_access;
0f060936 225 oparms.create_options = cifs_create_options(cifs_sb, create_options);
226730b4
PS
226 oparms.disposition = disposition;
227 oparms.path = full_path;
228 oparms.fid = fid;
9cbc0b73 229 oparms.reconnect = false;
226730b4
PS
230
231 rc = server->ops->open(xid, &oparms, oplock, buf);
eeb910a6
PS
232
233 if (rc)
234 goto out;
235
d313852d 236 /* TODO: Add support for calling posix query info but with passing in fid */
eeb910a6
PS
237 if (tcon->unix_ext)
238 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
239 xid);
240 else
241 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
42eacf9e 242 xid, fid);
eeb910a6 243
30573a82
PS
244 if (rc) {
245 server->ops->close(xid, tcon, fid);
246 if (rc == -ESTALE)
247 rc = -EOPENSTALE;
248 }
249
eeb910a6
PS
250out:
251 kfree(buf);
252 return rc;
253}
254
63b7d3a4
PS
255static bool
256cifs_has_mand_locks(struct cifsInodeInfo *cinode)
257{
258 struct cifs_fid_locks *cur;
259 bool has_locks = false;
260
261 down_read(&cinode->lock_sem);
262 list_for_each_entry(cur, &cinode->llist, llist) {
263 if (!list_empty(&cur->locks)) {
264 has_locks = true;
265 break;
266 }
267 }
268 up_read(&cinode->lock_sem);
269 return has_locks;
270}
271
d46b0da7
DW
272void
273cifs_down_write(struct rw_semaphore *sem)
274{
275 while (!down_write_trylock(sem))
276 msleep(10);
277}
278
32546a95
RS
279static void cifsFileInfo_put_work(struct work_struct *work);
280
15ecb436 281struct cifsFileInfo *
fb1214e4 282cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
283 struct tcon_link *tlink, __u32 oplock)
284{
1f1735cb 285 struct dentry *dentry = file_dentry(file);
2b0143b5 286 struct inode *inode = d_inode(dentry);
4b4de76e
PS
287 struct cifsInodeInfo *cinode = CIFS_I(inode);
288 struct cifsFileInfo *cfile;
f45d3416 289 struct cifs_fid_locks *fdlocks;
233839b1 290 struct cifs_tcon *tcon = tlink_tcon(tlink);
63b7d3a4 291 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e
PS
292
293 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
294 if (cfile == NULL)
295 return cfile;
296
f45d3416
PS
297 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
298 if (!fdlocks) {
299 kfree(cfile);
300 return NULL;
301 }
302
303 INIT_LIST_HEAD(&fdlocks->locks);
304 fdlocks->cfile = cfile;
305 cfile->llist = fdlocks;
f45d3416 306
4b4de76e 307 cfile->count = 1;
4b4de76e
PS
308 cfile->pid = current->tgid;
309 cfile->uid = current_fsuid();
310 cfile->dentry = dget(dentry);
311 cfile->f_flags = file->f_flags;
312 cfile->invalidHandle = false;
860b69a9 313 cfile->deferred_close_scheduled = false;
4b4de76e 314 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 315 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
32546a95 316 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
c3f207ab 317 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
f45d3416 318 mutex_init(&cfile->fh_mutex);
3afca265 319 spin_lock_init(&cfile->file_info_lock);
15ecb436 320
24261fc2
MG
321 cifs_sb_active(inode->i_sb);
322
63b7d3a4
PS
323 /*
324 * If the server returned a read oplock and we have mandatory brlocks,
325 * set oplock level to None.
326 */
53ef1016 327 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
f96637be 328 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
63b7d3a4
PS
329 oplock = 0;
330 }
331
6f582b27
PS
332 cifs_down_write(&cinode->lock_sem);
333 list_add(&fdlocks->llist, &cinode->llist);
334 up_write(&cinode->lock_sem);
335
3afca265 336 spin_lock(&tcon->open_file_lock);
63b7d3a4 337 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
233839b1
PS
338 oplock = fid->pending_open->oplock;
339 list_del(&fid->pending_open->olist);
340
42873b0a 341 fid->purge_cache = false;
63b7d3a4 342 server->ops->set_fid(cfile, fid, oplock);
233839b1
PS
343
344 list_add(&cfile->tlist, &tcon->openFileList);
fae8044c 345 atomic_inc(&tcon->num_local_opens);
3afca265 346
15ecb436 347 /* if readable file instance put first in list*/
487317c9 348 spin_lock(&cinode->open_file_lock);
15ecb436 349 if (file->f_mode & FMODE_READ)
4b4de76e 350 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 351 else
4b4de76e 352 list_add_tail(&cfile->flist, &cinode->openFileList);
487317c9 353 spin_unlock(&cinode->open_file_lock);
3afca265 354 spin_unlock(&tcon->open_file_lock);
15ecb436 355
42873b0a 356 if (fid->purge_cache)
4f73c7d3 357 cifs_zap_mapping(inode);
42873b0a 358
4b4de76e
PS
359 file->private_data = cfile;
360 return cfile;
15ecb436
JL
361}
362
764a1b1a
JL
363struct cifsFileInfo *
364cifsFileInfo_get(struct cifsFileInfo *cifs_file)
365{
3afca265 366 spin_lock(&cifs_file->file_info_lock);
764a1b1a 367 cifsFileInfo_get_locked(cifs_file);
3afca265 368 spin_unlock(&cifs_file->file_info_lock);
764a1b1a
JL
369 return cifs_file;
370}
371
32546a95
RS
372static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
373{
374 struct inode *inode = d_inode(cifs_file->dentry);
375 struct cifsInodeInfo *cifsi = CIFS_I(inode);
376 struct cifsLockInfo *li, *tmp;
377 struct super_block *sb = inode->i_sb;
378
18d04062
SP
379 cifs_fscache_release_inode_cookie(inode);
380
32546a95
RS
381 /*
382 * Delete any outstanding lock records. We'll lose them when the file
383 * is closed anyway.
384 */
385 cifs_down_write(&cifsi->lock_sem);
386 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
387 list_del(&li->llist);
388 cifs_del_lock_waiters(li);
389 kfree(li);
390 }
391 list_del(&cifs_file->llist->llist);
392 kfree(cifs_file->llist);
393 up_write(&cifsi->lock_sem);
394
395 cifs_put_tlink(cifs_file->tlink);
396 dput(cifs_file->dentry);
397 cifs_sb_deactive(sb);
398 kfree(cifs_file);
399}
400
401static void cifsFileInfo_put_work(struct work_struct *work)
402{
403 struct cifsFileInfo *cifs_file = container_of(work,
404 struct cifsFileInfo, put);
405
406 cifsFileInfo_put_final(cifs_file);
407}
408
b98749ca
AA
409/**
410 * cifsFileInfo_put - release a reference of file priv data
411 *
412 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
607dfc79
SF
413 *
414 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
cdff08e7 415 */
b33879aa 416void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
b98749ca 417{
32546a95 418 _cifsFileInfo_put(cifs_file, true, true);
b98749ca
AA
419}
420
421/**
422 * _cifsFileInfo_put - release a reference of file priv data
423 *
424 * This may involve closing the filehandle @cifs_file out on the
32546a95
RS
425 * server. Must be called without holding tcon->open_file_lock,
426 * cinode->open_file_lock and cifs_file->file_info_lock.
b98749ca
AA
427 *
428 * If @wait_for_oplock_handler is true and we are releasing the last
429 * reference, wait for any running oplock break handler of the file
607dfc79
SF
430 * and cancel any pending one.
431 *
432 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
433 * @wait_oplock_handler: must be false if called from oplock_break_handler
434 * @offload: not offloaded on close and oplock breaks
b98749ca
AA
435 *
436 */
32546a95
RS
437void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
438 bool wait_oplock_handler, bool offload)
b33879aa 439{
2b0143b5 440 struct inode *inode = d_inode(cifs_file->dentry);
96daf2b0 441 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
233839b1 442 struct TCP_Server_Info *server = tcon->ses->server;
e66673e3 443 struct cifsInodeInfo *cifsi = CIFS_I(inode);
24261fc2
MG
444 struct super_block *sb = inode->i_sb;
445 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
233839b1
PS
446 struct cifs_fid fid;
447 struct cifs_pending_open open;
ca7df8e0 448 bool oplock_break_cancelled;
cdff08e7 449
3afca265 450 spin_lock(&tcon->open_file_lock);
1a67c415 451 spin_lock(&cifsi->open_file_lock);
3afca265 452 spin_lock(&cifs_file->file_info_lock);
5f6dbc9e 453 if (--cifs_file->count > 0) {
3afca265 454 spin_unlock(&cifs_file->file_info_lock);
1a67c415 455 spin_unlock(&cifsi->open_file_lock);
3afca265 456 spin_unlock(&tcon->open_file_lock);
cdff08e7
SF
457 return;
458 }
3afca265 459 spin_unlock(&cifs_file->file_info_lock);
cdff08e7 460
233839b1
PS
461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
464 /* store open in pending opens to make sure we don't miss lease break */
465 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
466
cdff08e7
SF
467 /* remove it from the lists */
468 list_del(&cifs_file->flist);
469 list_del(&cifs_file->tlist);
fae8044c 470 atomic_dec(&tcon->num_local_opens);
cdff08e7
SF
471
472 if (list_empty(&cifsi->openFileList)) {
f96637be 473 cifs_dbg(FYI, "closing last open instance for inode %p\n",
2b0143b5 474 d_inode(cifs_file->dentry));
25364138
PS
475 /*
476 * In strict cache mode we need invalidate mapping on the last
477 * close because it may cause a error when we open this file
478 * again and get at least level II oplock.
479 */
4f8ba8a0 480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
aff8d5ca 481 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
c6723628 482 cifs_set_oplock_level(cifsi, 0);
cdff08e7 483 }
3afca265 484
1a67c415 485 spin_unlock(&cifsi->open_file_lock);
3afca265 486 spin_unlock(&tcon->open_file_lock);
cdff08e7 487
b98749ca
AA
488 oplock_break_cancelled = wait_oplock_handler ?
489 cancel_work_sync(&cifs_file->oplock_break) : false;
ad635942 490
cdff08e7 491 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 492 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 493 unsigned int xid;
0ff78a22 494
6d5786a3 495 xid = get_xid();
43f8a6a7
SF
496 if (server->ops->close_getattr)
497 server->ops->close_getattr(xid, tcon, cifs_file);
498 else if (server->ops->close)
760ad0ca
PS
499 server->ops->close(xid, tcon, &cifs_file->fid);
500 _free_xid(xid);
cdff08e7
SF
501 }
502
ca7df8e0
SP
503 if (oplock_break_cancelled)
504 cifs_done_oplock_break(cifsi);
505
233839b1
PS
506 cifs_del_pending_open(&open);
507
32546a95
RS
508 if (offload)
509 queue_work(fileinfo_put_wq, &cifs_file->put);
510 else
511 cifsFileInfo_put_final(cifs_file);
b33879aa
JL
512}
513
1da177e4 514int cifs_open(struct inode *inode, struct file *file)
233839b1 515
1da177e4
LT
516{
517 int rc = -EACCES;
6d5786a3 518 unsigned int xid;
590a3fe0 519 __u32 oplock;
1da177e4 520 struct cifs_sb_info *cifs_sb;
b8c32dbb 521 struct TCP_Server_Info *server;
96daf2b0 522 struct cifs_tcon *tcon;
7ffec372 523 struct tcon_link *tlink;
fb1214e4 524 struct cifsFileInfo *cfile = NULL;
f6a9bc33
AV
525 void *page;
526 const char *full_path;
7e12eddb 527 bool posix_open_ok = false;
fb1214e4 528 struct cifs_fid fid;
233839b1 529 struct cifs_pending_open open;
1da177e4 530
6d5786a3 531 xid = get_xid();
1da177e4
LT
532
533 cifs_sb = CIFS_SB(inode->i_sb);
087f757b
SF
534 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
535 free_xid(xid);
536 return -EIO;
537 }
538
7ffec372
JL
539 tlink = cifs_sb_tlink(cifs_sb);
540 if (IS_ERR(tlink)) {
6d5786a3 541 free_xid(xid);
7ffec372
JL
542 return PTR_ERR(tlink);
543 }
544 tcon = tlink_tcon(tlink);
b8c32dbb 545 server = tcon->ses->server;
1da177e4 546
f6a9bc33
AV
547 page = alloc_dentry_path();
548 full_path = build_path_from_dentry(file_dentry(file), page);
549 if (IS_ERR(full_path)) {
550 rc = PTR_ERR(full_path);
232341ba 551 goto out;
1da177e4
LT
552 }
553
f96637be 554 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
b6b38f70 555 inode, file->f_flags, full_path);
276a74a4 556
787aded6
NJ
557 if (file->f_flags & O_DIRECT &&
558 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
559 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
560 file->f_op = &cifs_file_direct_nobrl_ops;
561 else
562 file->f_op = &cifs_file_direct_ops;
563 }
564
c3f207ab
RS
565 /* Get the cached handle as SMB2 close is deferred */
566 rc = cifs_get_readable_path(tcon, full_path, &cfile);
567 if (rc == 0) {
568 if (file->f_flags == cfile->f_flags) {
569 file->private_data = cfile;
860b69a9 570 spin_lock(&CIFS_I(inode)->deferred_lock);
c3f207ab
RS
571 cifs_del_deferred_close(cfile);
572 spin_unlock(&CIFS_I(inode)->deferred_lock);
573 goto out;
574 } else {
c3f207ab
RS
575 _cifsFileInfo_put(cfile, true, false);
576 }
c3f207ab
RS
577 }
578
233839b1 579 if (server->oplocks)
276a74a4
SF
580 oplock = REQ_OPLOCK;
581 else
582 oplock = 0;
583
64cc2c63 584 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
585 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
586 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 587 /* can not refresh inode info since size could be stale */
2422f676 588 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
8401e936 589 cifs_sb->ctx->file_mode /* ignored */,
fb1214e4 590 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 591 if (rc == 0) {
f96637be 592 cifs_dbg(FYI, "posix open succeeded\n");
7e12eddb 593 posix_open_ok = true;
64cc2c63
SF
594 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
595 if (tcon->ses->serverNOS)
f96637be 596 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
b438fcf1 597 tcon->ses->ip_addr,
f96637be 598 tcon->ses->serverNOS);
64cc2c63 599 tcon->broken_posix_open = true;
276a74a4
SF
600 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
601 (rc != -EOPNOTSUPP)) /* path not found or net err */
602 goto out;
fb1214e4
PS
603 /*
604 * Else fallthrough to retry open the old way on network i/o
605 * or DFS errors.
606 */
276a74a4
SF
607 }
608
233839b1
PS
609 if (server->ops->get_lease_key)
610 server->ops->get_lease_key(inode, &fid);
611
612 cifs_add_pending_open(&fid, tlink, &open);
613
7e12eddb 614 if (!posix_open_ok) {
b8c32dbb
PS
615 if (server->ops->get_lease_key)
616 server->ops->get_lease_key(inode, &fid);
617
7e12eddb 618 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 619 file->f_flags, &oplock, &fid, xid);
233839b1
PS
620 if (rc) {
621 cifs_del_pending_open(&open);
7e12eddb 622 goto out;
233839b1 623 }
7e12eddb 624 }
47c78b7f 625
fb1214e4
PS
626 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
627 if (cfile == NULL) {
b8c32dbb
PS
628 if (server->ops->close)
629 server->ops->close(xid, tcon, &fid);
233839b1 630 cifs_del_pending_open(&open);
1da177e4
LT
631 rc = -ENOMEM;
632 goto out;
633 }
1da177e4 634
9451a9a5
SJ
635 cifs_fscache_set_inode_cookie(inode, file);
636
7e12eddb 637 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
638 /*
639 * Time to set mode which we can not set earlier due to
640 * problems creating new read-only files.
641 */
7e12eddb
PS
642 struct cifs_unix_set_info_args args = {
643 .mode = inode->i_mode,
49418b2c
EB
644 .uid = INVALID_UID, /* no change */
645 .gid = INVALID_GID, /* no change */
7e12eddb
PS
646 .ctime = NO_CHANGE_64,
647 .atime = NO_CHANGE_64,
648 .mtime = NO_CHANGE_64,
649 .device = 0,
650 };
fb1214e4
PS
651 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
652 cfile->pid);
1da177e4
LT
653 }
654
655out:
f6a9bc33 656 free_dentry_path(page);
6d5786a3 657 free_xid(xid);
7ffec372 658 cifs_put_tlink(tlink);
1da177e4
LT
659 return rc;
660}
661
f152fd5f
PS
662static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
663
2ae78ba8
PS
664/*
665 * Try to reacquire byte range locks that were released when session
f152fd5f 666 * to server was lost.
2ae78ba8 667 */
f152fd5f
PS
668static int
669cifs_relock_file(struct cifsFileInfo *cfile)
1da177e4 670{
f152fd5f 671 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2b0143b5 672 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
f152fd5f 673 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1da177e4
LT
674 int rc = 0;
675
560d3889 676 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
f152fd5f 677 if (cinode->can_cache_brlcks) {
689c3db4
PS
678 /* can cache locks - no need to relock */
679 up_read(&cinode->lock_sem);
f152fd5f
PS
680 return rc;
681 }
682
683 if (cap_unix(tcon->ses) &&
684 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
685 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
686 rc = cifs_push_posix_locks(cfile);
687 else
688 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1da177e4 689
689c3db4 690 up_read(&cinode->lock_sem);
1da177e4
LT
691 return rc;
692}
693
2ae78ba8
PS
694static int
695cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
696{
697 int rc = -EACCES;
6d5786a3 698 unsigned int xid;
590a3fe0 699 __u32 oplock;
1da177e4 700 struct cifs_sb_info *cifs_sb;
96daf2b0 701 struct cifs_tcon *tcon;
2ae78ba8
PS
702 struct TCP_Server_Info *server;
703 struct cifsInodeInfo *cinode;
fb8c4b14 704 struct inode *inode;
f6a9bc33
AV
705 void *page;
706 const char *full_path;
2ae78ba8 707 int desired_access;
1da177e4 708 int disposition = FILE_OPEN;
3d3ea8e6 709 int create_options = CREATE_NOT_DIR;
226730b4 710 struct cifs_open_parms oparms;
1da177e4 711
6d5786a3 712 xid = get_xid();
2ae78ba8
PS
713 mutex_lock(&cfile->fh_mutex);
714 if (!cfile->invalidHandle) {
715 mutex_unlock(&cfile->fh_mutex);
6d5786a3 716 free_xid(xid);
f6a9bc33 717 return 0;
1da177e4
LT
718 }
719
2b0143b5 720 inode = d_inode(cfile->dentry);
1da177e4 721 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
722 tcon = tlink_tcon(cfile->tlink);
723 server = tcon->ses->server;
724
725 /*
726 * Can not grab rename sem here because various ops, including those
727 * that already have the rename sem can end up causing writepage to get
728 * called and if the server was down that means we end up here, and we
729 * can never tell if the caller already has the rename_sem.
730 */
f6a9bc33
AV
731 page = alloc_dentry_path();
732 full_path = build_path_from_dentry(cfile->dentry, page);
733 if (IS_ERR(full_path)) {
2ae78ba8 734 mutex_unlock(&cfile->fh_mutex);
f6a9bc33 735 free_dentry_path(page);
6d5786a3 736 free_xid(xid);
f6a9bc33 737 return PTR_ERR(full_path);
1da177e4
LT
738 }
739
f96637be
JP
740 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
741 inode, cfile->f_flags, full_path);
1da177e4 742
10b9b98e 743 if (tcon->ses->server->oplocks)
1da177e4
LT
744 oplock = REQ_OPLOCK;
745 else
4b18f2a9 746 oplock = 0;
1da177e4 747
29e20f9c 748 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 749 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 750 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
751 /*
752 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
753 * original open. Must mask them off for a reopen.
754 */
2ae78ba8 755 unsigned int oflags = cfile->f_flags &
15886177 756 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 757
2422f676 758 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
8401e936 759 cifs_sb->ctx->file_mode /* ignored */,
9cbc0b73 760 oflags, &oplock, &cfile->fid.netfid, xid);
7fc8f4e9 761 if (rc == 0) {
f96637be 762 cifs_dbg(FYI, "posix reopen succeeded\n");
fe090e4e 763 oparms.reconnect = true;
7fc8f4e9
SF
764 goto reopen_success;
765 }
2ae78ba8
PS
766 /*
767 * fallthrough to retry open the old way on errors, especially
768 * in the reconnect path it is important to retry hard
769 */
7fc8f4e9
SF
770 }
771
2ae78ba8 772 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 773
44805b0e
PS
774 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
775 if (cfile->f_flags & O_SYNC)
776 create_options |= CREATE_WRITE_THROUGH;
777
778 if (cfile->f_flags & O_DIRECT)
779 create_options |= CREATE_NO_BUFFER;
780
b8c32dbb 781 if (server->ops->get_lease_key)
9cbc0b73 782 server->ops->get_lease_key(inode, &cfile->fid);
b8c32dbb 783
226730b4
PS
784 oparms.tcon = tcon;
785 oparms.cifs_sb = cifs_sb;
786 oparms.desired_access = desired_access;
0f060936 787 oparms.create_options = cifs_create_options(cifs_sb, create_options);
226730b4
PS
788 oparms.disposition = disposition;
789 oparms.path = full_path;
9cbc0b73
PS
790 oparms.fid = &cfile->fid;
791 oparms.reconnect = true;
226730b4 792
2ae78ba8
PS
793 /*
794 * Can not refresh inode by passing in file_info buf to be returned by
d81b8a40 795 * ops->open and then calling get_inode_info with returned buf since
2ae78ba8
PS
796 * file might have write behind data that needs to be flushed and server
797 * version of file size can be stale. If we knew for sure that inode was
798 * not dirty locally we could do this.
799 */
226730b4 800 rc = server->ops->open(xid, &oparms, &oplock, NULL);
b33fcf1c
PS
801 if (rc == -ENOENT && oparms.reconnect == false) {
802 /* durable handle timeout is expired - open the file again */
803 rc = server->ops->open(xid, &oparms, &oplock, NULL);
804 /* indicate that we need to relock the file */
805 oparms.reconnect = true;
806 }
807
1da177e4 808 if (rc) {
2ae78ba8 809 mutex_unlock(&cfile->fh_mutex);
f96637be
JP
810 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
811 cifs_dbg(FYI, "oplock: %d\n", oplock);
15886177
JL
812 goto reopen_error_exit;
813 }
814
7fc8f4e9 815reopen_success:
2ae78ba8
PS
816 cfile->invalidHandle = false;
817 mutex_unlock(&cfile->fh_mutex);
818 cinode = CIFS_I(inode);
15886177
JL
819
820 if (can_flush) {
821 rc = filemap_write_and_wait(inode->i_mapping);
9a66396f
PS
822 if (!is_interrupt_error(rc))
823 mapping_set_error(inode->i_mapping, rc);
15886177 824
d313852d
SF
825 if (tcon->posix_extensions)
826 rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
827 else if (tcon->unix_ext)
2ae78ba8
PS
828 rc = cifs_get_inode_info_unix(&inode, full_path,
829 inode->i_sb, xid);
15886177 830 else
2ae78ba8
PS
831 rc = cifs_get_inode_info(&inode, full_path, NULL,
832 inode->i_sb, xid, NULL);
833 }
834 /*
835 * Else we are writing out data to server already and could deadlock if
836 * we tried to flush data, and since we do not know if we have data that
837 * would invalidate the current end of file on the server we can not go
838 * to the server to get the new inode info.
839 */
840
de740250
PS
841 /*
842 * If the server returned a read oplock and we have mandatory brlocks,
843 * set oplock level to None.
844 */
845 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
846 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
847 oplock = 0;
848 }
849
9cbc0b73
PS
850 server->ops->set_fid(cfile, &cfile->fid, oplock);
851 if (oparms.reconnect)
852 cifs_relock_file(cfile);
15886177
JL
853
854reopen_error_exit:
f6a9bc33 855 free_dentry_path(page);
6d5786a3 856 free_xid(xid);
1da177e4
LT
857 return rc;
858}
859
c3f207ab
RS
860void smb2_deferred_work_close(struct work_struct *work)
861{
862 struct cifsFileInfo *cfile = container_of(work,
863 struct cifsFileInfo, deferred.work);
864
865 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
866 cifs_del_deferred_close(cfile);
860b69a9 867 cfile->deferred_close_scheduled = false;
c3f207ab
RS
868 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
869 _cifsFileInfo_put(cfile, true, false);
870}
871
1da177e4
LT
872int cifs_close(struct inode *inode, struct file *file)
873{
c3f207ab
RS
874 struct cifsFileInfo *cfile;
875 struct cifsInodeInfo *cinode = CIFS_I(inode);
876 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
877 struct cifs_deferred_close *dclose;
878
77970693 879 if (file->private_data != NULL) {
c3f207ab 880 cfile = file->private_data;
77970693 881 file->private_data = NULL;
c3f207ab
RS
882 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
883 if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
0ab95c25 884 cinode->lease_granted &&
35866f3f 885 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
c3f207ab 886 dclose) {
4f222622 887 if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
c3f207ab 888 inode->i_ctime = inode->i_mtime = current_time(inode);
18d04062
SP
889 cifs_fscache_update_inode_cookie(inode);
890 }
c3f207ab
RS
891 spin_lock(&cinode->deferred_lock);
892 cifs_add_deferred_close(cfile, dclose);
860b69a9
RS
893 if (cfile->deferred_close_scheduled &&
894 delayed_work_pending(&cfile->deferred)) {
9687c85d
RS
895 /*
896 * If there is no pending work, mod_delayed_work queues new work.
897 * So, Increase the ref count to avoid use-after-free.
898 */
899 if (!mod_delayed_work(deferredclose_wq,
900 &cfile->deferred, cifs_sb->ctx->acregmax))
901 cifsFileInfo_get(cfile);
c3f207ab
RS
902 } else {
903 /* Deferred close for files */
904 queue_delayed_work(deferredclose_wq,
905 &cfile->deferred, cifs_sb->ctx->acregmax);
860b69a9 906 cfile->deferred_close_scheduled = true;
c3f207ab
RS
907 spin_unlock(&cinode->deferred_lock);
908 return 0;
909 }
910 spin_unlock(&cinode->deferred_lock);
911 _cifsFileInfo_put(cfile, true, false);
912 } else {
913 _cifsFileInfo_put(cfile, true, false);
914 kfree(dclose);
915 }
77970693 916 }
7ee1af76 917
cdff08e7
SF
918 /* return code from the ->release op is always ignored */
919 return 0;
1da177e4
LT
920}
921
52ace1ef
SF
922void
923cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
924{
f2cca6a7 925 struct cifsFileInfo *open_file;
52ace1ef
SF
926 struct list_head *tmp;
927 struct list_head *tmp1;
f2cca6a7
PS
928 struct list_head tmp_list;
929
96a988ff
PS
930 if (!tcon->use_persistent || !tcon->need_reopen_files)
931 return;
932
933 tcon->need_reopen_files = false;
934
a0a3036b 935 cifs_dbg(FYI, "Reopen persistent handles\n");
f2cca6a7 936 INIT_LIST_HEAD(&tmp_list);
52ace1ef
SF
937
938 /* list all files open on tree connection, reopen resilient handles */
939 spin_lock(&tcon->open_file_lock);
f2cca6a7 940 list_for_each(tmp, &tcon->openFileList) {
52ace1ef 941 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
f2cca6a7
PS
942 if (!open_file->invalidHandle)
943 continue;
944 cifsFileInfo_get(open_file);
945 list_add_tail(&open_file->rlist, &tmp_list);
52ace1ef
SF
946 }
947 spin_unlock(&tcon->open_file_lock);
f2cca6a7
PS
948
949 list_for_each_safe(tmp, tmp1, &tmp_list) {
950 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
96a988ff
PS
951 if (cifs_reopen_file(open_file, false /* do not flush */))
952 tcon->need_reopen_files = true;
f2cca6a7
PS
953 list_del_init(&open_file->rlist);
954 cifsFileInfo_put(open_file);
955 }
52ace1ef
SF
956}
957
1da177e4
LT
958int cifs_closedir(struct inode *inode, struct file *file)
959{
960 int rc = 0;
6d5786a3 961 unsigned int xid;
4b4de76e 962 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
963 struct cifs_tcon *tcon;
964 struct TCP_Server_Info *server;
965 char *buf;
1da177e4 966
f96637be 967 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1da177e4 968
92fc65a7
PS
969 if (cfile == NULL)
970 return rc;
971
6d5786a3 972 xid = get_xid();
92fc65a7
PS
973 tcon = tlink_tcon(cfile->tlink);
974 server = tcon->ses->server;
1da177e4 975
f96637be 976 cifs_dbg(FYI, "Freeing private data in close dir\n");
3afca265 977 spin_lock(&cfile->file_info_lock);
52755808 978 if (server->ops->dir_needs_close(cfile)) {
92fc65a7 979 cfile->invalidHandle = true;
3afca265 980 spin_unlock(&cfile->file_info_lock);
92fc65a7
PS
981 if (server->ops->close_dir)
982 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
983 else
984 rc = -ENOSYS;
f96637be 985 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
92fc65a7
PS
986 /* not much we can do if it fails anyway, ignore rc */
987 rc = 0;
988 } else
3afca265 989 spin_unlock(&cfile->file_info_lock);
92fc65a7
PS
990
991 buf = cfile->srch_inf.ntwrk_buf_start;
992 if (buf) {
f96637be 993 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
92fc65a7
PS
994 cfile->srch_inf.ntwrk_buf_start = NULL;
995 if (cfile->srch_inf.smallBuf)
996 cifs_small_buf_release(buf);
997 else
998 cifs_buf_release(buf);
1da177e4 999 }
92fc65a7
PS
1000
1001 cifs_put_tlink(cfile->tlink);
1002 kfree(file->private_data);
1003 file->private_data = NULL;
1da177e4 1004 /* BB can we lock the filestruct while this is going on? */
6d5786a3 1005 free_xid(xid);
1da177e4
LT
1006 return rc;
1007}
1008
85160e03 1009static struct cifsLockInfo *
9645759c 1010cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
7ee1af76 1011{
a88b4707 1012 struct cifsLockInfo *lock =
fb8c4b14 1013 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
1014 if (!lock)
1015 return lock;
1016 lock->offset = offset;
1017 lock->length = length;
1018 lock->type = type;
a88b4707 1019 lock->pid = current->tgid;
9645759c 1020 lock->flags = flags;
a88b4707
PS
1021 INIT_LIST_HEAD(&lock->blist);
1022 init_waitqueue_head(&lock->block_q);
1023 return lock;
85160e03
PS
1024}
1025
f7ba7fe6 1026void
85160e03
PS
1027cifs_del_lock_waiters(struct cifsLockInfo *lock)
1028{
1029 struct cifsLockInfo *li, *tmp;
1030 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1031 list_del_init(&li->blist);
1032 wake_up(&li->block_q);
1033 }
1034}
1035
081c0414
PS
1036#define CIFS_LOCK_OP 0
1037#define CIFS_READ_OP 1
1038#define CIFS_WRITE_OP 2
1039
1040/* @rw_check : 0 - no op, 1 - read, 2 - write */
85160e03 1041static bool
f45d3416 1042cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
9645759c
RS
1043 __u64 length, __u8 type, __u16 flags,
1044 struct cifsFileInfo *cfile,
081c0414 1045 struct cifsLockInfo **conf_lock, int rw_check)
85160e03 1046{
fbd35aca 1047 struct cifsLockInfo *li;
f45d3416 1048 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 1049 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 1050
f45d3416 1051 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
1052 if (offset + length <= li->offset ||
1053 offset >= li->offset + li->length)
1054 continue;
081c0414
PS
1055 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1056 server->ops->compare_fids(cfile, cur_cfile)) {
1057 /* shared lock prevents write op through the same fid */
1058 if (!(li->type & server->vals->shared_lock_type) ||
1059 rw_check != CIFS_WRITE_OP)
1060 continue;
1061 }
f45d3416
PS
1062 if ((type & server->vals->shared_lock_type) &&
1063 ((server->ops->compare_fids(cfile, cur_cfile) &&
1064 current->tgid == li->pid) || type == li->type))
85160e03 1065 continue;
9645759c
RS
1066 if (rw_check == CIFS_LOCK_OP &&
1067 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1068 server->ops->compare_fids(cfile, cur_cfile))
1069 continue;
579f9053
PS
1070 if (conf_lock)
1071 *conf_lock = li;
f45d3416 1072 return true;
85160e03
PS
1073 }
1074 return false;
1075}
1076
579f9053 1077bool
55157dfb 1078cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
9645759c
RS
1079 __u8 type, __u16 flags,
1080 struct cifsLockInfo **conf_lock, int rw_check)
161ebf9f 1081{
fbd35aca 1082 bool rc = false;
f45d3416 1083 struct cifs_fid_locks *cur;
2b0143b5 1084 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
fbd35aca 1085
f45d3416
PS
1086 list_for_each_entry(cur, &cinode->llist, llist) {
1087 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
9645759c
RS
1088 flags, cfile, conf_lock,
1089 rw_check);
fbd35aca
PS
1090 if (rc)
1091 break;
1092 }
fbd35aca
PS
1093
1094 return rc;
161ebf9f
PS
1095}
1096
9a5101c8
PS
1097/*
1098 * Check if there is another lock that prevents us to set the lock (mandatory
1099 * style). If such a lock exists, update the flock structure with its
1100 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1101 * or leave it the same if we can't. Returns 0 if we don't need to request to
1102 * the server or 1 otherwise.
1103 */
85160e03 1104static int
fbd35aca
PS
1105cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1106 __u8 type, struct file_lock *flock)
85160e03
PS
1107{
1108 int rc = 0;
1109 struct cifsLockInfo *conf_lock;
2b0143b5 1110 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
106dc538 1111 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
1112 bool exist;
1113
1b4b55a1 1114 down_read(&cinode->lock_sem);
85160e03 1115
55157dfb 1116 exist = cifs_find_lock_conflict(cfile, offset, length, type,
9645759c
RS
1117 flock->fl_flags, &conf_lock,
1118 CIFS_LOCK_OP);
85160e03
PS
1119 if (exist) {
1120 flock->fl_start = conf_lock->offset;
1121 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1122 flock->fl_pid = conf_lock->pid;
106dc538 1123 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
1124 flock->fl_type = F_RDLCK;
1125 else
1126 flock->fl_type = F_WRLCK;
1127 } else if (!cinode->can_cache_brlcks)
1128 rc = 1;
1129 else
1130 flock->fl_type = F_UNLCK;
1131
1b4b55a1 1132 up_read(&cinode->lock_sem);
85160e03
PS
1133 return rc;
1134}
1135
161ebf9f 1136static void
fbd35aca 1137cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 1138{
2b0143b5 1139 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
d46b0da7 1140 cifs_down_write(&cinode->lock_sem);
f45d3416 1141 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 1142 up_write(&cinode->lock_sem);
7ee1af76
JA
1143}
1144
9a5101c8
PS
1145/*
1146 * Set the byte-range lock (mandatory style). Returns:
1147 * 1) 0, if we set the lock and don't need to request to the server;
1148 * 2) 1, if no locks prevent us but we need to request to the server;
413d6100 1149 * 3) -EACCES, if there is a lock that prevents us and wait is false.
9a5101c8 1150 */
85160e03 1151static int
fbd35aca 1152cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 1153 bool wait)
85160e03 1154{
161ebf9f 1155 struct cifsLockInfo *conf_lock;
2b0143b5 1156 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
85160e03
PS
1157 bool exist;
1158 int rc = 0;
1159
85160e03
PS
1160try_again:
1161 exist = false;
d46b0da7 1162 cifs_down_write(&cinode->lock_sem);
85160e03 1163
55157dfb 1164 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
9645759c
RS
1165 lock->type, lock->flags, &conf_lock,
1166 CIFS_LOCK_OP);
85160e03 1167 if (!exist && cinode->can_cache_brlcks) {
f45d3416 1168 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 1169 up_write(&cinode->lock_sem);
85160e03
PS
1170 return rc;
1171 }
1172
1173 if (!exist)
1174 rc = 1;
1175 else if (!wait)
1176 rc = -EACCES;
1177 else {
1178 list_add_tail(&lock->blist, &conf_lock->blist);
1b4b55a1 1179 up_write(&cinode->lock_sem);
85160e03
PS
1180 rc = wait_event_interruptible(lock->block_q,
1181 (lock->blist.prev == &lock->blist) &&
1182 (lock->blist.next == &lock->blist));
1183 if (!rc)
1184 goto try_again;
d46b0da7 1185 cifs_down_write(&cinode->lock_sem);
a88b4707 1186 list_del_init(&lock->blist);
85160e03
PS
1187 }
1188
1b4b55a1 1189 up_write(&cinode->lock_sem);
85160e03
PS
1190 return rc;
1191}
1192
9a5101c8
PS
1193/*
1194 * Check if there is another lock that prevents us to set the lock (posix
1195 * style). If such a lock exists, update the flock structure with its
1196 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1197 * or leave it the same if we can't. Returns 0 if we don't need to request to
1198 * the server or 1 otherwise.
1199 */
85160e03 1200static int
4f6bcec9
PS
1201cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1202{
1203 int rc = 0;
496ad9aa 1204 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
4f6bcec9
PS
1205 unsigned char saved_type = flock->fl_type;
1206
50792760
PS
1207 if ((flock->fl_flags & FL_POSIX) == 0)
1208 return 1;
1209
1b4b55a1 1210 down_read(&cinode->lock_sem);
4f6bcec9
PS
1211 posix_test_lock(file, flock);
1212
1213 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1214 flock->fl_type = saved_type;
1215 rc = 1;
1216 }
1217
1b4b55a1 1218 up_read(&cinode->lock_sem);
4f6bcec9
PS
1219 return rc;
1220}
1221
9a5101c8
PS
1222/*
1223 * Set the byte-range lock (posix style). Returns:
2e98c018 1224 * 1) <0, if the error occurs while setting the lock;
1225 * 2) 0, if we set the lock and don't need to request to the server;
1226 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1227 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
9a5101c8 1228 */
4f6bcec9
PS
1229static int
1230cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1231{
496ad9aa 1232 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
2e98c018 1233 int rc = FILE_LOCK_DEFERRED + 1;
50792760
PS
1234
1235 if ((flock->fl_flags & FL_POSIX) == 0)
1236 return rc;
4f6bcec9 1237
d46b0da7 1238 cifs_down_write(&cinode->lock_sem);
4f6bcec9 1239 if (!cinode->can_cache_brlcks) {
1b4b55a1 1240 up_write(&cinode->lock_sem);
50792760 1241 return rc;
4f6bcec9 1242 }
66189be7
PS
1243
1244 rc = posix_lock_file(file, flock, NULL);
1b4b55a1 1245 up_write(&cinode->lock_sem);
9ebb389d 1246 return rc;
4f6bcec9
PS
1247}
1248
d39a4f71 1249int
4f6bcec9 1250cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 1251{
6d5786a3
PS
1252 unsigned int xid;
1253 int rc = 0, stored_rc;
85160e03
PS
1254 struct cifsLockInfo *li, *tmp;
1255 struct cifs_tcon *tcon;
0013fb4c 1256 unsigned int num, max_num, max_buf;
32b9aaf1 1257 LOCKING_ANDX_RANGE *buf, *cur;
4d61eda8
CIK
1258 static const int types[] = {
1259 LOCKING_ANDX_LARGE_FILES,
1260 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1261 };
32b9aaf1 1262 int i;
85160e03 1263
6d5786a3 1264 xid = get_xid();
85160e03
PS
1265 tcon = tlink_tcon(cfile->tlink);
1266
0013fb4c
PS
1267 /*
1268 * Accessing maxBuf is racy with cifs_reconnect - need to store value
b9a74cde 1269 * and check it before using.
0013fb4c
PS
1270 */
1271 max_buf = tcon->ses->server->maxBuf;
b9a74cde 1272 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
6d5786a3 1273 free_xid(xid);
0013fb4c
PS
1274 return -EINVAL;
1275 }
1276
92a8109e
RL
1277 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1278 PAGE_SIZE);
1279 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1280 PAGE_SIZE);
0013fb4c
PS
1281 max_num = (max_buf - sizeof(struct smb_hdr)) /
1282 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 1283 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
32b9aaf1 1284 if (!buf) {
6d5786a3 1285 free_xid(xid);
e2f2886a 1286 return -ENOMEM;
32b9aaf1
PS
1287 }
1288
1289 for (i = 0; i < 2; i++) {
1290 cur = buf;
1291 num = 0;
f45d3416 1292 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
1293 if (li->type != types[i])
1294 continue;
1295 cur->Pid = cpu_to_le16(li->pid);
1296 cur->LengthLow = cpu_to_le32((u32)li->length);
1297 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1298 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1299 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1300 if (++num == max_num) {
4b4de76e
PS
1301 stored_rc = cifs_lockv(xid, tcon,
1302 cfile->fid.netfid,
04a6aa8a
PS
1303 (__u8)li->type, 0, num,
1304 buf);
32b9aaf1
PS
1305 if (stored_rc)
1306 rc = stored_rc;
1307 cur = buf;
1308 num = 0;
1309 } else
1310 cur++;
1311 }
1312
1313 if (num) {
4b4de76e 1314 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 1315 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
1316 if (stored_rc)
1317 rc = stored_rc;
1318 }
85160e03
PS
1319 }
1320
32b9aaf1 1321 kfree(buf);
6d5786a3 1322 free_xid(xid);
85160e03
PS
1323 return rc;
1324}
1325
3d22462a
JL
1326static __u32
1327hash_lockowner(fl_owner_t owner)
1328{
1329 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1330}
1331
d5751469
PS
1332struct lock_to_push {
1333 struct list_head llist;
1334 __u64 offset;
1335 __u64 length;
1336 __u32 pid;
1337 __u16 netfid;
1338 __u8 type;
1339};
1340
4f6bcec9 1341static int
b8db928b 1342cifs_push_posix_locks(struct cifsFileInfo *cfile)
4f6bcec9 1343{
2b0143b5 1344 struct inode *inode = d_inode(cfile->dentry);
4f6bcec9 1345 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
bd61e0a9
JL
1346 struct file_lock *flock;
1347 struct file_lock_context *flctx = inode->i_flctx;
e084c1bd 1348 unsigned int count = 0, i;
4f6bcec9 1349 int rc = 0, xid, type;
d5751469
PS
1350 struct list_head locks_to_send, *el;
1351 struct lock_to_push *lck, *tmp;
4f6bcec9 1352 __u64 length;
4f6bcec9 1353
6d5786a3 1354 xid = get_xid();
4f6bcec9 1355
bd61e0a9
JL
1356 if (!flctx)
1357 goto out;
d5751469 1358
e084c1bd
JL
1359 spin_lock(&flctx->flc_lock);
1360 list_for_each(el, &flctx->flc_posix) {
1361 count++;
1362 }
1363 spin_unlock(&flctx->flc_lock);
1364
4f6bcec9
PS
1365 INIT_LIST_HEAD(&locks_to_send);
1366
d5751469 1367 /*
e084c1bd
JL
1368 * Allocating count locks is enough because no FL_POSIX locks can be
1369 * added to the list while we are holding cinode->lock_sem that
ce85852b 1370 * protects locking operations of this inode.
d5751469 1371 */
e084c1bd 1372 for (i = 0; i < count; i++) {
d5751469
PS
1373 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1374 if (!lck) {
1375 rc = -ENOMEM;
1376 goto err_out;
1377 }
1378 list_add_tail(&lck->llist, &locks_to_send);
1379 }
1380
d5751469 1381 el = locks_to_send.next;
6109c850 1382 spin_lock(&flctx->flc_lock);
bd61e0a9 1383 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
d5751469 1384 if (el == &locks_to_send) {
ce85852b
PS
1385 /*
1386 * The list ended. We don't have enough allocated
1387 * structures - something is really wrong.
1388 */
f96637be 1389 cifs_dbg(VFS, "Can't push all brlocks!\n");
d5751469
PS
1390 break;
1391 }
4f6bcec9
PS
1392 length = 1 + flock->fl_end - flock->fl_start;
1393 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1394 type = CIFS_RDLCK;
1395 else
1396 type = CIFS_WRLCK;
d5751469 1397 lck = list_entry(el, struct lock_to_push, llist);
3d22462a 1398 lck->pid = hash_lockowner(flock->fl_owner);
4b4de76e 1399 lck->netfid = cfile->fid.netfid;
d5751469
PS
1400 lck->length = length;
1401 lck->type = type;
1402 lck->offset = flock->fl_start;
4f6bcec9 1403 }
6109c850 1404 spin_unlock(&flctx->flc_lock);
4f6bcec9
PS
1405
1406 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1407 int stored_rc;
1408
4f6bcec9 1409 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1410 lck->offset, lck->length, NULL,
4f6bcec9
PS
1411 lck->type, 0);
1412 if (stored_rc)
1413 rc = stored_rc;
1414 list_del(&lck->llist);
1415 kfree(lck);
1416 }
1417
d5751469 1418out:
6d5786a3 1419 free_xid(xid);
4f6bcec9 1420 return rc;
d5751469
PS
1421err_out:
1422 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1423 list_del(&lck->llist);
1424 kfree(lck);
1425 }
1426 goto out;
4f6bcec9
PS
1427}
1428
9ec3c882 1429static int
b8db928b 1430cifs_push_locks(struct cifsFileInfo *cfile)
9ec3c882 1431{
b8db928b 1432 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2b0143b5 1433 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
b8db928b 1434 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
9ec3c882
PS
1435 int rc = 0;
1436
1437 /* we are going to update can_cache_brlcks here - need a write access */
d46b0da7 1438 cifs_down_write(&cinode->lock_sem);
9ec3c882
PS
1439 if (!cinode->can_cache_brlcks) {
1440 up_write(&cinode->lock_sem);
1441 return rc;
1442 }
4f6bcec9 1443
29e20f9c 1444 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1445 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1446 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
b8db928b
PS
1447 rc = cifs_push_posix_locks(cfile);
1448 else
1449 rc = tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9 1450
b8db928b
PS
1451 cinode->can_cache_brlcks = false;
1452 up_write(&cinode->lock_sem);
1453 return rc;
4f6bcec9
PS
1454}
1455
03776f45 1456static void
04a6aa8a 1457cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1458 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1459{
03776f45 1460 if (flock->fl_flags & FL_POSIX)
f96637be 1461 cifs_dbg(FYI, "Posix\n");
03776f45 1462 if (flock->fl_flags & FL_FLOCK)
f96637be 1463 cifs_dbg(FYI, "Flock\n");
03776f45 1464 if (flock->fl_flags & FL_SLEEP) {
f96637be 1465 cifs_dbg(FYI, "Blocking lock\n");
03776f45 1466 *wait_flag = true;
1da177e4 1467 }
03776f45 1468 if (flock->fl_flags & FL_ACCESS)
f96637be 1469 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
03776f45 1470 if (flock->fl_flags & FL_LEASE)
f96637be 1471 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
03776f45 1472 if (flock->fl_flags &
3d6d854a 1473 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
9645759c 1474 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
f96637be 1475 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1da177e4 1476
106dc538 1477 *type = server->vals->large_lock_type;
03776f45 1478 if (flock->fl_type == F_WRLCK) {
f96637be 1479 cifs_dbg(FYI, "F_WRLCK\n");
106dc538 1480 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1481 *lock = 1;
1482 } else if (flock->fl_type == F_UNLCK) {
f96637be 1483 cifs_dbg(FYI, "F_UNLCK\n");
106dc538 1484 *type |= server->vals->unlock_lock_type;
03776f45
PS
1485 *unlock = 1;
1486 /* Check if unlock includes more than one lock range */
1487 } else if (flock->fl_type == F_RDLCK) {
f96637be 1488 cifs_dbg(FYI, "F_RDLCK\n");
106dc538 1489 *type |= server->vals->shared_lock_type;
03776f45
PS
1490 *lock = 1;
1491 } else if (flock->fl_type == F_EXLCK) {
f96637be 1492 cifs_dbg(FYI, "F_EXLCK\n");
106dc538 1493 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1494 *lock = 1;
1495 } else if (flock->fl_type == F_SHLCK) {
f96637be 1496 cifs_dbg(FYI, "F_SHLCK\n");
106dc538 1497 *type |= server->vals->shared_lock_type;
03776f45 1498 *lock = 1;
1da177e4 1499 } else
f96637be 1500 cifs_dbg(FYI, "Unknown type of lock\n");
03776f45 1501}
1da177e4 1502
03776f45 1503static int
04a6aa8a 1504cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1505 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1506{
1507 int rc = 0;
1508 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1509 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1510 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1511 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1512 __u16 netfid = cfile->fid.netfid;
f05337c6 1513
03776f45
PS
1514 if (posix_lck) {
1515 int posix_lock_type;
4f6bcec9
PS
1516
1517 rc = cifs_posix_lock_test(file, flock);
1518 if (!rc)
1519 return rc;
1520
106dc538 1521 if (type & server->vals->shared_lock_type)
03776f45
PS
1522 posix_lock_type = CIFS_RDLCK;
1523 else
1524 posix_lock_type = CIFS_WRLCK;
3d22462a
JL
1525 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1526 hash_lockowner(flock->fl_owner),
c5fd363d 1527 flock->fl_start, length, flock,
4f6bcec9 1528 posix_lock_type, wait_flag);
03776f45
PS
1529 return rc;
1530 }
1da177e4 1531
fbd35aca 1532 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1533 if (!rc)
1534 return rc;
1535
03776f45 1536 /* BB we could chain these into one lock request BB */
d39a4f71
PS
1537 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1538 1, 0, false);
03776f45 1539 if (rc == 0) {
d39a4f71
PS
1540 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1541 type, 0, 1, false);
03776f45
PS
1542 flock->fl_type = F_UNLCK;
1543 if (rc != 0)
f96637be
JP
1544 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1545 rc);
a88b4707 1546 return 0;
1da177e4 1547 }
7ee1af76 1548
106dc538 1549 if (type & server->vals->shared_lock_type) {
03776f45 1550 flock->fl_type = F_WRLCK;
a88b4707 1551 return 0;
7ee1af76
JA
1552 }
1553
d39a4f71
PS
1554 type &= ~server->vals->exclusive_lock_type;
1555
1556 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1557 type | server->vals->shared_lock_type,
1558 1, 0, false);
03776f45 1559 if (rc == 0) {
d39a4f71
PS
1560 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1561 type | server->vals->shared_lock_type, 0, 1, false);
03776f45
PS
1562 flock->fl_type = F_RDLCK;
1563 if (rc != 0)
f96637be
JP
1564 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1565 rc);
03776f45
PS
1566 } else
1567 flock->fl_type = F_WRLCK;
1568
a88b4707 1569 return 0;
03776f45
PS
1570}
1571
f7ba7fe6 1572void
9ee305b7
PS
1573cifs_move_llist(struct list_head *source, struct list_head *dest)
1574{
1575 struct list_head *li, *tmp;
1576 list_for_each_safe(li, tmp, source)
1577 list_move(li, dest);
1578}
1579
f7ba7fe6 1580void
9ee305b7
PS
1581cifs_free_llist(struct list_head *llist)
1582{
1583 struct cifsLockInfo *li, *tmp;
1584 list_for_each_entry_safe(li, tmp, llist, llist) {
1585 cifs_del_lock_waiters(li);
1586 list_del(&li->llist);
1587 kfree(li);
1588 }
1589}
1590
d39a4f71 1591int
6d5786a3
PS
1592cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1593 unsigned int xid)
9ee305b7
PS
1594{
1595 int rc = 0, stored_rc;
4d61eda8
CIK
1596 static const int types[] = {
1597 LOCKING_ANDX_LARGE_FILES,
1598 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1599 };
9ee305b7 1600 unsigned int i;
0013fb4c 1601 unsigned int max_num, num, max_buf;
9ee305b7
PS
1602 LOCKING_ANDX_RANGE *buf, *cur;
1603 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2b0143b5 1604 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
9ee305b7
PS
1605 struct cifsLockInfo *li, *tmp;
1606 __u64 length = 1 + flock->fl_end - flock->fl_start;
1607 struct list_head tmp_llist;
1608
1609 INIT_LIST_HEAD(&tmp_llist);
1610
0013fb4c
PS
1611 /*
1612 * Accessing maxBuf is racy with cifs_reconnect - need to store value
b9a74cde 1613 * and check it before using.
0013fb4c
PS
1614 */
1615 max_buf = tcon->ses->server->maxBuf;
b9a74cde 1616 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
0013fb4c
PS
1617 return -EINVAL;
1618
92a8109e
RL
1619 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1620 PAGE_SIZE);
1621 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1622 PAGE_SIZE);
0013fb4c
PS
1623 max_num = (max_buf - sizeof(struct smb_hdr)) /
1624 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 1625 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
9ee305b7
PS
1626 if (!buf)
1627 return -ENOMEM;
1628
d46b0da7 1629 cifs_down_write(&cinode->lock_sem);
9ee305b7
PS
1630 for (i = 0; i < 2; i++) {
1631 cur = buf;
1632 num = 0;
f45d3416 1633 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1634 if (flock->fl_start > li->offset ||
1635 (flock->fl_start + length) <
1636 (li->offset + li->length))
1637 continue;
1638 if (current->tgid != li->pid)
1639 continue;
9ee305b7
PS
1640 if (types[i] != li->type)
1641 continue;
ea319d57 1642 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1643 /*
1644 * We can cache brlock requests - simply remove
fbd35aca 1645 * a lock from the file's list.
9ee305b7
PS
1646 */
1647 list_del(&li->llist);
1648 cifs_del_lock_waiters(li);
1649 kfree(li);
ea319d57 1650 continue;
9ee305b7 1651 }
ea319d57
PS
1652 cur->Pid = cpu_to_le16(li->pid);
1653 cur->LengthLow = cpu_to_le32((u32)li->length);
1654 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1655 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1656 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1657 /*
1658 * We need to save a lock here to let us add it again to
1659 * the file's list if the unlock range request fails on
1660 * the server.
1661 */
1662 list_move(&li->llist, &tmp_llist);
1663 if (++num == max_num) {
4b4de76e
PS
1664 stored_rc = cifs_lockv(xid, tcon,
1665 cfile->fid.netfid,
ea319d57
PS
1666 li->type, num, 0, buf);
1667 if (stored_rc) {
1668 /*
1669 * We failed on the unlock range
1670 * request - add all locks from the tmp
1671 * list to the head of the file's list.
1672 */
1673 cifs_move_llist(&tmp_llist,
f45d3416 1674 &cfile->llist->locks);
ea319d57
PS
1675 rc = stored_rc;
1676 } else
1677 /*
1678 * The unlock range request succeed -
1679 * free the tmp list.
1680 */
1681 cifs_free_llist(&tmp_llist);
1682 cur = buf;
1683 num = 0;
1684 } else
1685 cur++;
9ee305b7
PS
1686 }
1687 if (num) {
4b4de76e 1688 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1689 types[i], num, 0, buf);
1690 if (stored_rc) {
f45d3416
PS
1691 cifs_move_llist(&tmp_llist,
1692 &cfile->llist->locks);
9ee305b7
PS
1693 rc = stored_rc;
1694 } else
1695 cifs_free_llist(&tmp_llist);
1696 }
1697 }
1698
1b4b55a1 1699 up_write(&cinode->lock_sem);
9ee305b7
PS
1700 kfree(buf);
1701 return rc;
1702}
1703
03776f45 1704static int
f45d3416 1705cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1706 bool wait_flag, bool posix_lck, int lock, int unlock,
1707 unsigned int xid)
03776f45
PS
1708{
1709 int rc = 0;
1710 __u64 length = 1 + flock->fl_end - flock->fl_start;
1711 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1712 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1713 struct TCP_Server_Info *server = tcon->ses->server;
2b0143b5 1714 struct inode *inode = d_inode(cfile->dentry);
03776f45
PS
1715
1716 if (posix_lck) {
08547b03 1717 int posix_lock_type;
4f6bcec9
PS
1718
1719 rc = cifs_posix_lock_set(file, flock);
2e98c018 1720 if (rc <= FILE_LOCK_DEFERRED)
4f6bcec9
PS
1721 return rc;
1722
106dc538 1723 if (type & server->vals->shared_lock_type)
08547b03
SF
1724 posix_lock_type = CIFS_RDLCK;
1725 else
1726 posix_lock_type = CIFS_WRLCK;
50c2f753 1727
03776f45 1728 if (unlock == 1)
beb84dc8 1729 posix_lock_type = CIFS_UNLCK;
7ee1af76 1730
f45d3416 1731 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
3d22462a
JL
1732 hash_lockowner(flock->fl_owner),
1733 flock->fl_start, length,
f45d3416 1734 NULL, posix_lock_type, wait_flag);
03776f45
PS
1735 goto out;
1736 }
7ee1af76 1737
03776f45 1738 if (lock) {
161ebf9f
PS
1739 struct cifsLockInfo *lock;
1740
9645759c
RS
1741 lock = cifs_lock_init(flock->fl_start, length, type,
1742 flock->fl_flags);
161ebf9f
PS
1743 if (!lock)
1744 return -ENOMEM;
1745
fbd35aca 1746 rc = cifs_lock_add_if(cfile, lock, wait_flag);
21cb2d90 1747 if (rc < 0) {
161ebf9f 1748 kfree(lock);
21cb2d90
PS
1749 return rc;
1750 }
1751 if (!rc)
85160e03
PS
1752 goto out;
1753
63b7d3a4
PS
1754 /*
1755 * Windows 7 server can delay breaking lease from read to None
1756 * if we set a byte-range lock on a file - break it explicitly
1757 * before sending the lock to the server to be sure the next
1758 * read won't conflict with non-overlapted locks due to
1759 * pagereading.
1760 */
18cceb6a
PS
1761 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1762 CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 1763 cifs_zap_mapping(inode);
f96637be
JP
1764 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1765 inode);
18cceb6a 1766 CIFS_I(inode)->oplock = 0;
63b7d3a4
PS
1767 }
1768
d39a4f71
PS
1769 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1770 type, 1, 0, wait_flag);
161ebf9f
PS
1771 if (rc) {
1772 kfree(lock);
21cb2d90 1773 return rc;
03776f45 1774 }
161ebf9f 1775
fbd35aca 1776 cifs_lock_add(cfile, lock);
9ee305b7 1777 } else if (unlock)
d39a4f71 1778 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 1779
03776f45 1780out:
d0677992 1781 if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
bc31d0cd
AA
1782 /*
1783 * If this is a request to remove all locks because we
1784 * are closing the file, it doesn't matter if the
1785 * unlocking failed as both cifs.ko and the SMB server
1786 * remove the lock on file close
1787 */
1788 if (rc) {
1789 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1790 if (!(flock->fl_flags & FL_CLOSE))
1791 return rc;
1792 }
4f656367 1793 rc = locks_lock_file_wait(file, flock);
bc31d0cd 1794 }
03776f45
PS
1795 return rc;
1796}
1797
d0677992
SF
1798int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
1799{
1800 int rc, xid;
1801 int lock = 0, unlock = 0;
1802 bool wait_flag = false;
1803 bool posix_lck = false;
1804 struct cifs_sb_info *cifs_sb;
1805 struct cifs_tcon *tcon;
d0677992 1806 struct cifsFileInfo *cfile;
d0677992
SF
1807 __u32 type;
1808
1809 rc = -EACCES;
1810 xid = get_xid();
1811
1812 if (!(fl->fl_flags & FL_FLOCK))
1813 return -ENOLCK;
1814
1815 cfile = (struct cifsFileInfo *)file->private_data;
1816 tcon = tlink_tcon(cfile->tlink);
1817
1818 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
1819 tcon->ses->server);
1820 cifs_sb = CIFS_FILE_SB(file);
d0677992
SF
1821
1822 if (cap_unix(tcon->ses) &&
1823 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1824 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1825 posix_lck = true;
1826
1827 if (!lock && !unlock) {
1828 /*
1829 * if no lock or unlock then nothing to do since we do not
1830 * know what it is
1831 */
1832 free_xid(xid);
1833 return -EOPNOTSUPP;
1834 }
1835
1836 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
1837 xid);
1838 free_xid(xid);
1839 return rc;
1840
1841
1842}
1843
03776f45
PS
1844int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1845{
1846 int rc, xid;
1847 int lock = 0, unlock = 0;
1848 bool wait_flag = false;
1849 bool posix_lck = false;
1850 struct cifs_sb_info *cifs_sb;
1851 struct cifs_tcon *tcon;
03776f45 1852 struct cifsFileInfo *cfile;
04a6aa8a 1853 __u32 type;
03776f45
PS
1854
1855 rc = -EACCES;
6d5786a3 1856 xid = get_xid();
03776f45 1857
f96637be
JP
1858 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1859 cmd, flock->fl_flags, flock->fl_type,
1860 flock->fl_start, flock->fl_end);
03776f45 1861
03776f45
PS
1862 cfile = (struct cifsFileInfo *)file->private_data;
1863 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1864
1865 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1866 tcon->ses->server);
7119e220 1867 cifs_sb = CIFS_FILE_SB(file);
35866f3f 1868 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
03776f45 1869
29e20f9c 1870 if (cap_unix(tcon->ses) &&
03776f45
PS
1871 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1872 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1873 posix_lck = true;
1874 /*
1875 * BB add code here to normalize offset and length to account for
1876 * negative length which we can not accept over the wire.
1877 */
1878 if (IS_GETLK(cmd)) {
4f6bcec9 1879 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1880 free_xid(xid);
03776f45
PS
1881 return rc;
1882 }
1883
1884 if (!lock && !unlock) {
1885 /*
1886 * if no lock or unlock then nothing to do since we do not
1887 * know what it is
1888 */
6d5786a3 1889 free_xid(xid);
03776f45 1890 return -EOPNOTSUPP;
7ee1af76
JA
1891 }
1892
03776f45
PS
1893 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1894 xid);
6d5786a3 1895 free_xid(xid);
1da177e4
LT
1896 return rc;
1897}
1898
597b027f
JL
1899/*
1900 * update the file size (if needed) after a write. Should be called with
1901 * the inode->i_lock held
1902 */
72432ffc 1903void
fbec9ab9
JL
1904cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1905 unsigned int bytes_written)
1906{
1907 loff_t end_of_write = offset + bytes_written;
1908
1909 if (end_of_write > cifsi->server_eof)
1910 cifsi->server_eof = end_of_write;
1911}
1912
ba9ad725
PS
1913static ssize_t
1914cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1915 size_t write_size, loff_t *offset)
1da177e4
LT
1916{
1917 int rc = 0;
1918 unsigned int bytes_written = 0;
1919 unsigned int total_written;
ba9ad725
PS
1920 struct cifs_tcon *tcon;
1921 struct TCP_Server_Info *server;
6d5786a3 1922 unsigned int xid;
7da4b49a 1923 struct dentry *dentry = open_file->dentry;
2b0143b5 1924 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
7c06514a 1925 struct cifs_io_parms io_parms = {0};
1da177e4 1926
35c265e0
AV
1927 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1928 write_size, *offset, dentry);
1da177e4 1929
ba9ad725
PS
1930 tcon = tlink_tcon(open_file->tlink);
1931 server = tcon->ses->server;
1932
1933 if (!server->ops->sync_write)
1934 return -ENOSYS;
50c2f753 1935
6d5786a3 1936 xid = get_xid();
1da177e4 1937
1da177e4
LT
1938 for (total_written = 0; write_size > total_written;
1939 total_written += bytes_written) {
1940 rc = -EAGAIN;
1941 while (rc == -EAGAIN) {
ca83ce3d
JL
1942 struct kvec iov[2];
1943 unsigned int len;
1944
1da177e4 1945 if (open_file->invalidHandle) {
1da177e4
LT
1946 /* we could deadlock if we called
1947 filemap_fdatawait from here so tell
fb8c4b14 1948 reopen_file not to flush data to
1da177e4 1949 server now */
15886177 1950 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1951 if (rc != 0)
1952 break;
1953 }
ca83ce3d 1954
2b0143b5 1955 len = min(server->ops->wp_retry_size(d_inode(dentry)),
cb7e9eab 1956 (unsigned int)write_size - total_written);
ca83ce3d
JL
1957 /* iov[0] is reserved for smb header */
1958 iov[1].iov_base = (char *)write_data + total_written;
1959 iov[1].iov_len = len;
fa2989f4 1960 io_parms.pid = pid;
ba9ad725
PS
1961 io_parms.tcon = tcon;
1962 io_parms.offset = *offset;
fa2989f4 1963 io_parms.length = len;
db8b631d
SF
1964 rc = server->ops->sync_write(xid, &open_file->fid,
1965 &io_parms, &bytes_written, iov, 1);
1da177e4
LT
1966 }
1967 if (rc || (bytes_written == 0)) {
1968 if (total_written)
1969 break;
1970 else {
6d5786a3 1971 free_xid(xid);
1da177e4
LT
1972 return rc;
1973 }
fbec9ab9 1974 } else {
2b0143b5 1975 spin_lock(&d_inode(dentry)->i_lock);
ba9ad725 1976 cifs_update_eof(cifsi, *offset, bytes_written);
2b0143b5 1977 spin_unlock(&d_inode(dentry)->i_lock);
ba9ad725 1978 *offset += bytes_written;
fbec9ab9 1979 }
1da177e4
LT
1980 }
1981
ba9ad725 1982 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1983
7da4b49a 1984 if (total_written > 0) {
2b0143b5 1985 spin_lock(&d_inode(dentry)->i_lock);
78c09634 1986 if (*offset > d_inode(dentry)->i_size) {
2b0143b5 1987 i_size_write(d_inode(dentry), *offset);
78c09634
RS
1988 d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
1989 }
2b0143b5 1990 spin_unlock(&d_inode(dentry)->i_lock);
1da177e4 1991 }
2b0143b5 1992 mark_inode_dirty_sync(d_inode(dentry));
6d5786a3 1993 free_xid(xid);
1da177e4
LT
1994 return total_written;
1995}
1996
6508d904
JL
1997struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1998 bool fsuid_only)
630f3f0c
SF
1999{
2000 struct cifsFileInfo *open_file = NULL;
6508d904
JL
2001 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2002
2003 /* only filter by fsuid on multiuser mounts */
2004 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2005 fsuid_only = false;
630f3f0c 2006
cb248819 2007 spin_lock(&cifs_inode->open_file_lock);
630f3f0c
SF
2008 /* we could simply get the first_list_entry since write-only entries
2009 are always at the end of the list but since the first entry might
2010 have a close pending, we go through the whole list */
2011 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
fef59fd7 2012 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6508d904 2013 continue;
2e396b83 2014 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
860b69a9 2015 if ((!open_file->invalidHandle)) {
630f3f0c
SF
2016 /* found a good file */
2017 /* lock it so it will not be closed on us */
3afca265 2018 cifsFileInfo_get(open_file);
cb248819 2019 spin_unlock(&cifs_inode->open_file_lock);
630f3f0c
SF
2020 return open_file;
2021 } /* else might as well continue, and look for
2022 another, or simply have the caller reopen it
2023 again rather than trying to fix this handle */
2024 } else /* write only file */
2025 break; /* write only files are last so must be done */
2026 }
cb248819 2027 spin_unlock(&cifs_inode->open_file_lock);
630f3f0c
SF
2028 return NULL;
2029}
630f3f0c 2030
fe768d51
PS
2031/* Return -EBADF if no handle is found and general rc otherwise */
2032int
86f740f2 2033cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
fe768d51 2034 struct cifsFileInfo **ret_file)
6148a742 2035{
2c0c2a08 2036 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 2037 struct cifs_sb_info *cifs_sb;
2846d386 2038 bool any_available = false;
fe768d51 2039 int rc = -EBADF;
2c0c2a08 2040 unsigned int refind = 0;
86f740f2
AA
2041 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2042 bool with_delete = flags & FIND_WR_WITH_DELETE;
fe768d51
PS
2043 *ret_file = NULL;
2044
2045 /*
2046 * Having a null inode here (because mapping->host was set to zero by
2047 * the VFS or MM) should not happen but we had reports of on oops (due
2048 * to it being zero) during stress testcases so we need to check for it
2049 */
60808233 2050
fb8c4b14 2051 if (cifs_inode == NULL) {
f96637be 2052 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
60808233 2053 dump_stack();
fe768d51 2054 return rc;
60808233
SF
2055 }
2056
d3892294
JL
2057 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2058
6508d904
JL
2059 /* only filter by fsuid on multiuser mounts */
2060 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2061 fsuid_only = false;
2062
cb248819 2063 spin_lock(&cifs_inode->open_file_lock);
9b22b0b7 2064refind_writable:
2c0c2a08 2065 if (refind > MAX_REOPEN_ATT) {
cb248819 2066 spin_unlock(&cifs_inode->open_file_lock);
fe768d51 2067 return rc;
2c0c2a08 2068 }
6148a742 2069 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
2070 if (!any_available && open_file->pid != current->tgid)
2071 continue;
fef59fd7 2072 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6148a742 2073 continue;
86f740f2
AA
2074 if (with_delete && !(open_file->fid.access & DELETE))
2075 continue;
2e396b83 2076 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
2077 if (!open_file->invalidHandle) {
2078 /* found a good writable file */
3afca265 2079 cifsFileInfo_get(open_file);
cb248819 2080 spin_unlock(&cifs_inode->open_file_lock);
fe768d51
PS
2081 *ret_file = open_file;
2082 return 0;
2c0c2a08
SP
2083 } else {
2084 if (!inv_file)
2085 inv_file = open_file;
9b22b0b7 2086 }
6148a742
SF
2087 }
2088 }
2846d386
JL
2089 /* couldn't find useable FH with same pid, try any available */
2090 if (!any_available) {
2091 any_available = true;
2092 goto refind_writable;
2093 }
2c0c2a08
SP
2094
2095 if (inv_file) {
2096 any_available = false;
3afca265 2097 cifsFileInfo_get(inv_file);
2c0c2a08
SP
2098 }
2099
cb248819 2100 spin_unlock(&cifs_inode->open_file_lock);
2c0c2a08
SP
2101
2102 if (inv_file) {
2103 rc = cifs_reopen_file(inv_file, false);
fe768d51
PS
2104 if (!rc) {
2105 *ret_file = inv_file;
2106 return 0;
2c0c2a08 2107 }
fe768d51 2108
487317c9 2109 spin_lock(&cifs_inode->open_file_lock);
fe768d51 2110 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
487317c9 2111 spin_unlock(&cifs_inode->open_file_lock);
fe768d51
PS
2112 cifsFileInfo_put(inv_file);
2113 ++refind;
2114 inv_file = NULL;
cb248819 2115 spin_lock(&cifs_inode->open_file_lock);
fe768d51 2116 goto refind_writable;
2c0c2a08
SP
2117 }
2118
fe768d51
PS
2119 return rc;
2120}
2121
2122struct cifsFileInfo *
86f740f2 2123find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
fe768d51
PS
2124{
2125 struct cifsFileInfo *cfile;
2126 int rc;
2127
86f740f2 2128 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
fe768d51 2129 if (rc)
a0a3036b 2130 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
fe768d51
PS
2131
2132 return cfile;
6148a742
SF
2133}
2134
8de9e86c
RS
2135int
2136cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
86f740f2 2137 int flags,
8de9e86c
RS
2138 struct cifsFileInfo **ret_file)
2139{
8de9e86c 2140 struct cifsFileInfo *cfile;
f6a9bc33 2141 void *page = alloc_dentry_path();
8de9e86c
RS
2142
2143 *ret_file = NULL;
2144
2145 spin_lock(&tcon->open_file_lock);
f6a9bc33
AV
2146 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2147 struct cifsInodeInfo *cinode;
2148 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2149 if (IS_ERR(full_path)) {
8de9e86c 2150 spin_unlock(&tcon->open_file_lock);
f6a9bc33
AV
2151 free_dentry_path(page);
2152 return PTR_ERR(full_path);
8de9e86c 2153 }
f6a9bc33 2154 if (strcmp(full_path, name))
8de9e86c 2155 continue;
8de9e86c 2156
8de9e86c
RS
2157 cinode = CIFS_I(d_inode(cfile->dentry));
2158 spin_unlock(&tcon->open_file_lock);
f6a9bc33 2159 free_dentry_path(page);
86f740f2 2160 return cifs_get_writable_file(cinode, flags, ret_file);
8de9e86c
RS
2161 }
2162
2163 spin_unlock(&tcon->open_file_lock);
f6a9bc33 2164 free_dentry_path(page);
8de9e86c
RS
2165 return -ENOENT;
2166}
2167
496902dc
RS
2168int
2169cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2170 struct cifsFileInfo **ret_file)
2171{
496902dc 2172 struct cifsFileInfo *cfile;
f6a9bc33 2173 void *page = alloc_dentry_path();
496902dc
RS
2174
2175 *ret_file = NULL;
2176
2177 spin_lock(&tcon->open_file_lock);
f6a9bc33
AV
2178 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2179 struct cifsInodeInfo *cinode;
2180 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2181 if (IS_ERR(full_path)) {
496902dc 2182 spin_unlock(&tcon->open_file_lock);
f6a9bc33
AV
2183 free_dentry_path(page);
2184 return PTR_ERR(full_path);
496902dc 2185 }
f6a9bc33 2186 if (strcmp(full_path, name))
496902dc 2187 continue;
496902dc 2188
496902dc
RS
2189 cinode = CIFS_I(d_inode(cfile->dentry));
2190 spin_unlock(&tcon->open_file_lock);
f6a9bc33 2191 free_dentry_path(page);
496902dc
RS
2192 *ret_file = find_readable_file(cinode, 0);
2193 return *ret_file ? 0 : -ENOENT;
2194 }
2195
2196 spin_unlock(&tcon->open_file_lock);
f6a9bc33 2197 free_dentry_path(page);
496902dc
RS
2198 return -ENOENT;
2199}
2200
1da177e4
LT
2201static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2202{
2203 struct address_space *mapping = page->mapping;
09cbfeaf 2204 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1da177e4
LT
2205 char *write_data;
2206 int rc = -EFAULT;
2207 int bytes_written = 0;
1da177e4 2208 struct inode *inode;
6148a742 2209 struct cifsFileInfo *open_file;
1da177e4
LT
2210
2211 if (!mapping || !mapping->host)
2212 return -EFAULT;
2213
2214 inode = page->mapping->host;
1da177e4
LT
2215
2216 offset += (loff_t)from;
2217 write_data = kmap(page);
2218 write_data += from;
2219
09cbfeaf 2220 if ((to > PAGE_SIZE) || (from > to)) {
1da177e4
LT
2221 kunmap(page);
2222 return -EIO;
2223 }
2224
2225 /* racing with truncate? */
2226 if (offset > mapping->host->i_size) {
2227 kunmap(page);
2228 return 0; /* don't care */
2229 }
2230
2231 /* check to make sure that we are not extending the file */
2232 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 2233 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 2234
86f740f2
AA
2235 rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
2236 &open_file);
fe768d51 2237 if (!rc) {
fa2989f4
PS
2238 bytes_written = cifs_write(open_file, open_file->pid,
2239 write_data, to - from, &offset);
6ab409b5 2240 cifsFileInfo_put(open_file);
1da177e4 2241 /* Does mm or vfs already set times? */
c2050a45 2242 inode->i_atime = inode->i_mtime = current_time(inode);
bb5a9a04 2243 if ((bytes_written > 0) && (offset))
6148a742 2244 rc = 0;
bb5a9a04
SF
2245 else if (bytes_written < 0)
2246 rc = bytes_written;
fe768d51
PS
2247 else
2248 rc = -EFAULT;
6148a742 2249 } else {
fe768d51
PS
2250 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2251 if (!is_retryable_error(rc))
2252 rc = -EIO;
1da177e4
LT
2253 }
2254
2255 kunmap(page);
2256 return rc;
2257}
2258
90ac1387
PS
2259static struct cifs_writedata *
2260wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2261 pgoff_t end, pgoff_t *index,
2262 unsigned int *found_pages)
2263{
90ac1387
PS
2264 struct cifs_writedata *wdata;
2265
2266 wdata = cifs_writedata_alloc((unsigned int)tofind,
2267 cifs_writev_complete);
2268 if (!wdata)
2269 return NULL;
2270
9c19a9cb
JK
2271 *found_pages = find_get_pages_range_tag(mapping, index, end,
2272 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
90ac1387
PS
2273 return wdata;
2274}
2275
7e48ff82
PS
2276static unsigned int
2277wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2278 struct address_space *mapping,
2279 struct writeback_control *wbc,
2280 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2281{
2282 unsigned int nr_pages = 0, i;
2283 struct page *page;
2284
2285 for (i = 0; i < found_pages; i++) {
2286 page = wdata->pages[i];
2287 /*
b93b0163
MW
2288 * At this point we hold neither the i_pages lock nor the
2289 * page lock: the page may be truncated or invalidated
2290 * (changing page->mapping to NULL), or even swizzled
2291 * back from swapper_space to tmpfs file mapping
7e48ff82
PS
2292 */
2293
2294 if (nr_pages == 0)
2295 lock_page(page);
2296 else if (!trylock_page(page))
2297 break;
2298
2299 if (unlikely(page->mapping != mapping)) {
2300 unlock_page(page);
2301 break;
2302 }
2303
2304 if (!wbc->range_cyclic && page->index > end) {
2305 *done = true;
2306 unlock_page(page);
2307 break;
2308 }
2309
2310 if (*next && (page->index != *next)) {
2311 /* Not next consecutive page */
2312 unlock_page(page);
2313 break;
2314 }
2315
2316 if (wbc->sync_mode != WB_SYNC_NONE)
2317 wait_on_page_writeback(page);
2318
2319 if (PageWriteback(page) ||
2320 !clear_page_dirty_for_io(page)) {
2321 unlock_page(page);
2322 break;
2323 }
2324
2325 /*
2326 * This actually clears the dirty bit in the radix tree.
2327 * See cifs_writepage() for more commentary.
2328 */
2329 set_page_writeback(page);
2330 if (page_offset(page) >= i_size_read(mapping->host)) {
2331 *done = true;
2332 unlock_page(page);
2333 end_page_writeback(page);
2334 break;
2335 }
2336
2337 wdata->pages[i] = page;
2338 *next = page->index + 1;
2339 ++nr_pages;
2340 }
2341
2342 /* reset index to refind any pages skipped */
2343 if (nr_pages == 0)
2344 *index = wdata->pages[0]->index + 1;
2345
2346 /* put any pages we aren't going to use */
2347 for (i = nr_pages; i < found_pages; i++) {
09cbfeaf 2348 put_page(wdata->pages[i]);
7e48ff82
PS
2349 wdata->pages[i] = NULL;
2350 }
2351
2352 return nr_pages;
2353}
2354
619aa48e 2355static int
c4b8f657
PS
2356wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2357 struct address_space *mapping, struct writeback_control *wbc)
619aa48e 2358{
258f0603 2359 int rc;
619aa48e
PS
2360
2361 wdata->sync_mode = wbc->sync_mode;
2362 wdata->nr_pages = nr_pages;
2363 wdata->offset = page_offset(wdata->pages[0]);
09cbfeaf 2364 wdata->pagesz = PAGE_SIZE;
619aa48e
PS
2365 wdata->tailsz = min(i_size_read(mapping->host) -
2366 page_offset(wdata->pages[nr_pages - 1]),
09cbfeaf
KS
2367 (loff_t)PAGE_SIZE);
2368 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
c4b8f657 2369 wdata->pid = wdata->cfile->pid;
619aa48e 2370
352d96f3 2371 rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
9a1c67e8 2372 if (rc)
258f0603 2373 return rc;
9a1c67e8 2374
c4b8f657
PS
2375 if (wdata->cfile->invalidHandle)
2376 rc = -EAGAIN;
2377 else
352d96f3
AA
2378 rc = wdata->server->ops->async_writev(wdata,
2379 cifs_writedata_release);
619aa48e 2380
619aa48e
PS
2381 return rc;
2382}
2383
1da177e4 2384static int cifs_writepages(struct address_space *mapping,
37c0eb46 2385 struct writeback_control *wbc)
1da177e4 2386{
c7d38dbe
PS
2387 struct inode *inode = mapping->host;
2388 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cb7e9eab 2389 struct TCP_Server_Info *server;
c3d17b63
JL
2390 bool done = false, scanned = false, range_whole = false;
2391 pgoff_t end, index;
2392 struct cifs_writedata *wdata;
c7d38dbe 2393 struct cifsFileInfo *cfile = NULL;
37c0eb46 2394 int rc = 0;
9a66396f 2395 int saved_rc = 0;
0cb012d1 2396 unsigned int xid;
50c2f753 2397
37c0eb46 2398 /*
c3d17b63 2399 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
2400 * one page at a time via cifs_writepage
2401 */
522aa3b5 2402 if (cifs_sb->ctx->wsize < PAGE_SIZE)
37c0eb46
SF
2403 return generic_writepages(mapping, wbc);
2404
0cb012d1 2405 xid = get_xid();
111ebb6e 2406 if (wbc->range_cyclic) {
37c0eb46 2407 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
2408 end = -1;
2409 } else {
09cbfeaf
KS
2410 index = wbc->range_start >> PAGE_SHIFT;
2411 end = wbc->range_end >> PAGE_SHIFT;
111ebb6e 2412 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
2413 range_whole = true;
2414 scanned = true;
37c0eb46 2415 }
352d96f3
AA
2416 server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
2417
37c0eb46 2418retry:
c3d17b63 2419 while (!done && index <= end) {
335b7b62 2420 unsigned int i, nr_pages, found_pages, wsize;
66231a47 2421 pgoff_t next = 0, tofind, saved_index = index;
335b7b62
PS
2422 struct cifs_credits credits_on_stack;
2423 struct cifs_credits *credits = &credits_on_stack;
fe768d51 2424 int get_file_rc = 0;
c3d17b63 2425
c7d38dbe
PS
2426 if (cfile)
2427 cifsFileInfo_put(cfile);
2428
86f740f2 2429 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
fe768d51
PS
2430
2431 /* in case of an error store it to return later */
2432 if (rc)
2433 get_file_rc = rc;
c7d38dbe 2434
522aa3b5 2435 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
335b7b62 2436 &wsize, credits);
9a66396f
PS
2437 if (rc != 0) {
2438 done = true;
cb7e9eab 2439 break;
9a66396f 2440 }
c3d17b63 2441
09cbfeaf 2442 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
c3d17b63 2443
90ac1387
PS
2444 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2445 &found_pages);
c3d17b63
JL
2446 if (!wdata) {
2447 rc = -ENOMEM;
9a66396f 2448 done = true;
cb7e9eab 2449 add_credits_and_wake_if(server, credits, 0);
c3d17b63
JL
2450 break;
2451 }
2452
c3d17b63
JL
2453 if (found_pages == 0) {
2454 kref_put(&wdata->refcount, cifs_writedata_release);
cb7e9eab 2455 add_credits_and_wake_if(server, credits, 0);
c3d17b63
JL
2456 break;
2457 }
2458
7e48ff82
PS
2459 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2460 end, &index, &next, &done);
37c0eb46 2461
c3d17b63
JL
2462 /* nothing to write? */
2463 if (nr_pages == 0) {
2464 kref_put(&wdata->refcount, cifs_writedata_release);
cb7e9eab 2465 add_credits_and_wake_if(server, credits, 0);
c3d17b63 2466 continue;
37c0eb46 2467 }
fbec9ab9 2468
335b7b62 2469 wdata->credits = credits_on_stack;
c7d38dbe 2470 wdata->cfile = cfile;
352d96f3 2471 wdata->server = server;
c7d38dbe 2472 cfile = NULL;
941b853d 2473
c4b8f657 2474 if (!wdata->cfile) {
fe768d51
PS
2475 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2476 get_file_rc);
2477 if (is_retryable_error(get_file_rc))
2478 rc = get_file_rc;
2479 else
2480 rc = -EBADF;
c4b8f657
PS
2481 } else
2482 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
f3983c21 2483
258f0603
PS
2484 for (i = 0; i < nr_pages; ++i)
2485 unlock_page(wdata->pages[i]);
2486
c3d17b63
JL
2487 /* send failure -- clean up the mess */
2488 if (rc != 0) {
335b7b62 2489 add_credits_and_wake_if(server, &wdata->credits, 0);
c3d17b63 2490 for (i = 0; i < nr_pages; ++i) {
9a66396f 2491 if (is_retryable_error(rc))
c3d17b63
JL
2492 redirty_page_for_writepage(wbc,
2493 wdata->pages[i]);
2494 else
2495 SetPageError(wdata->pages[i]);
2496 end_page_writeback(wdata->pages[i]);
09cbfeaf 2497 put_page(wdata->pages[i]);
37c0eb46 2498 }
9a66396f 2499 if (!is_retryable_error(rc))
941b853d 2500 mapping_set_error(mapping, rc);
c3d17b63
JL
2501 }
2502 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 2503
66231a47
PS
2504 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2505 index = saved_index;
2506 continue;
2507 }
2508
9a66396f
PS
2509 /* Return immediately if we received a signal during writing */
2510 if (is_interrupt_error(rc)) {
2511 done = true;
2512 break;
2513 }
2514
2515 if (rc != 0 && saved_rc == 0)
2516 saved_rc = rc;
2517
c3d17b63
JL
2518 wbc->nr_to_write -= nr_pages;
2519 if (wbc->nr_to_write <= 0)
2520 done = true;
b066a48c 2521
c3d17b63 2522 index = next;
37c0eb46 2523 }
c3d17b63 2524
37c0eb46
SF
2525 if (!scanned && !done) {
2526 /*
2527 * We hit the last page and there is more work to be done: wrap
2528 * back to the start of the file
2529 */
c3d17b63 2530 scanned = true;
37c0eb46
SF
2531 index = 0;
2532 goto retry;
2533 }
c3d17b63 2534
9a66396f
PS
2535 if (saved_rc != 0)
2536 rc = saved_rc;
2537
111ebb6e 2538 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
2539 mapping->writeback_index = index;
2540
c7d38dbe
PS
2541 if (cfile)
2542 cifsFileInfo_put(cfile);
0cb012d1 2543 free_xid(xid);
c3f207ab
RS
2544 /* Indication to update ctime and mtime as close is deferred */
2545 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
1da177e4
LT
2546 return rc;
2547}
1da177e4 2548
9ad1506b
PS
2549static int
2550cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 2551{
9ad1506b 2552 int rc;
6d5786a3 2553 unsigned int xid;
1da177e4 2554
6d5786a3 2555 xid = get_xid();
1da177e4 2556/* BB add check for wbc flags */
09cbfeaf 2557 get_page(page);
ad7a2926 2558 if (!PageUptodate(page))
f96637be 2559 cifs_dbg(FYI, "ppw - page not up to date\n");
cb876f45
LT
2560
2561 /*
2562 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2563 *
2564 * A writepage() implementation always needs to do either this,
2565 * or re-dirty the page with "redirty_page_for_writepage()" in
2566 * the case of a failure.
2567 *
2568 * Just unlocking the page will cause the radix tree tag-bits
2569 * to fail to update with the state of the page correctly.
2570 */
fb8c4b14 2571 set_page_writeback(page);
9ad1506b 2572retry_write:
09cbfeaf 2573 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
9a66396f
PS
2574 if (is_retryable_error(rc)) {
2575 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
97b37f24 2576 goto retry_write;
9ad1506b 2577 redirty_page_for_writepage(wbc, page);
97b37f24 2578 } else if (rc != 0) {
9ad1506b 2579 SetPageError(page);
97b37f24
JL
2580 mapping_set_error(page->mapping, rc);
2581 } else {
9ad1506b 2582 SetPageUptodate(page);
97b37f24 2583 }
cb876f45 2584 end_page_writeback(page);
09cbfeaf 2585 put_page(page);
6d5786a3 2586 free_xid(xid);
1da177e4
LT
2587 return rc;
2588}
2589
9ad1506b
PS
2590static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2591{
2592 int rc = cifs_writepage_locked(page, wbc);
2593 unlock_page(page);
2594 return rc;
2595}
2596
d9414774
NP
2597static int cifs_write_end(struct file *file, struct address_space *mapping,
2598 loff_t pos, unsigned len, unsigned copied,
2599 struct page *page, void *fsdata)
1da177e4 2600{
d9414774
NP
2601 int rc;
2602 struct inode *inode = mapping->host;
d4ffff1f
PS
2603 struct cifsFileInfo *cfile = file->private_data;
2604 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2605 __u32 pid;
2606
2607 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2608 pid = cfile->pid;
2609 else
2610 pid = current->tgid;
1da177e4 2611
f96637be 2612 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
b6b38f70 2613 page, pos, copied);
d9414774 2614
a98ee8c1
JL
2615 if (PageChecked(page)) {
2616 if (copied == len)
2617 SetPageUptodate(page);
2618 ClearPageChecked(page);
09cbfeaf 2619 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
d9414774 2620 SetPageUptodate(page);
ad7a2926 2621
1da177e4 2622 if (!PageUptodate(page)) {
d9414774 2623 char *page_data;
09cbfeaf 2624 unsigned offset = pos & (PAGE_SIZE - 1);
6d5786a3 2625 unsigned int xid;
d9414774 2626
6d5786a3 2627 xid = get_xid();
1da177e4
LT
2628 /* this is probably better than directly calling
2629 partialpage_write since in this function the file handle is
2630 known which we might as well leverage */
2631 /* BB check if anything else missing out of ppw
2632 such as updating last write time */
2633 page_data = kmap(page);
d4ffff1f 2634 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2635 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2636 kunmap(page);
d9414774 2637
6d5786a3 2638 free_xid(xid);
fb8c4b14 2639 } else {
d9414774
NP
2640 rc = copied;
2641 pos += copied;
ca8aa29c 2642 set_page_dirty(page);
1da177e4
LT
2643 }
2644
d9414774
NP
2645 if (rc > 0) {
2646 spin_lock(&inode->i_lock);
78c09634 2647 if (pos > inode->i_size) {
d9414774 2648 i_size_write(inode, pos);
78c09634
RS
2649 inode->i_blocks = (512 - 1 + pos) >> 9;
2650 }
d9414774
NP
2651 spin_unlock(&inode->i_lock);
2652 }
2653
2654 unlock_page(page);
09cbfeaf 2655 put_page(page);
c3f207ab
RS
2656 /* Indication to update ctime and mtime as close is deferred */
2657 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
d9414774 2658
1da177e4
LT
2659 return rc;
2660}
2661
02c24a82
JB
2662int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2663 int datasync)
1da177e4 2664{
6d5786a3 2665 unsigned int xid;
1da177e4 2666 int rc = 0;
96daf2b0 2667 struct cifs_tcon *tcon;
1d8c4c00 2668 struct TCP_Server_Info *server;
c21dfb69 2669 struct cifsFileInfo *smbfile = file->private_data;
496ad9aa 2670 struct inode *inode = file_inode(file);
8be7e6ba 2671 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2672
3b49c9a1 2673 rc = file_write_and_wait_range(file, start, end);
2391ca41
SF
2674 if (rc) {
2675 trace_cifs_fsync_err(inode->i_ino, rc);
02c24a82 2676 return rc;
2391ca41 2677 }
02c24a82 2678
6d5786a3 2679 xid = get_xid();
1da177e4 2680
35c265e0
AV
2681 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2682 file, datasync);
50c2f753 2683
18cceb6a 2684 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 2685 rc = cifs_zap_mapping(inode);
6feb9891 2686 if (rc) {
f96637be 2687 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
6feb9891
PS
2688 rc = 0; /* don't care about it in fsync */
2689 }
2690 }
eb4b756b 2691
8be7e6ba 2692 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2693 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2694 server = tcon->ses->server;
659b1141 2695 if (server->ops->flush == NULL) {
1d8c4c00 2696 rc = -ENOSYS;
659b1141
SF
2697 goto strict_fsync_exit;
2698 }
2699
2700 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2701 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2702 if (smbfile) {
2703 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2704 cifsFileInfo_put(smbfile);
2705 } else
2706 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2707 } else
2708 rc = server->ops->flush(xid, tcon, &smbfile->fid);
1d8c4c00 2709 }
8be7e6ba 2710
659b1141 2711strict_fsync_exit:
6d5786a3 2712 free_xid(xid);
8be7e6ba
PS
2713 return rc;
2714}
2715
02c24a82 2716int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2717{
6d5786a3 2718 unsigned int xid;
8be7e6ba 2719 int rc = 0;
96daf2b0 2720 struct cifs_tcon *tcon;
1d8c4c00 2721 struct TCP_Server_Info *server;
8be7e6ba 2722 struct cifsFileInfo *smbfile = file->private_data;
659b1141 2723 struct inode *inode = file_inode(file);
7119e220 2724 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
02c24a82 2725
3b49c9a1 2726 rc = file_write_and_wait_range(file, start, end);
f2bf09e9
SF
2727 if (rc) {
2728 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
02c24a82 2729 return rc;
f2bf09e9 2730 }
8be7e6ba 2731
6d5786a3 2732 xid = get_xid();
8be7e6ba 2733
35c265e0
AV
2734 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2735 file, datasync);
8be7e6ba
PS
2736
2737 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2738 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2739 server = tcon->ses->server;
659b1141 2740 if (server->ops->flush == NULL) {
1d8c4c00 2741 rc = -ENOSYS;
659b1141
SF
2742 goto fsync_exit;
2743 }
2744
2745 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2746 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2747 if (smbfile) {
2748 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2749 cifsFileInfo_put(smbfile);
2750 } else
2751 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2752 } else
2753 rc = server->ops->flush(xid, tcon, &smbfile->fid);
1d8c4c00 2754 }
b298f223 2755
659b1141 2756fsync_exit:
6d5786a3 2757 free_xid(xid);
1da177e4
LT
2758 return rc;
2759}
2760
1da177e4
LT
2761/*
2762 * As file closes, flush all cached write data for this inode checking
2763 * for write behind errors.
2764 */
75e1fcc0 2765int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2766{
496ad9aa 2767 struct inode *inode = file_inode(file);
1da177e4
LT
2768 int rc = 0;
2769
eb4b756b 2770 if (file->f_mode & FMODE_WRITE)
d3f1322a 2771 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2772
f96637be 2773 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
f2bf09e9
SF
2774 if (rc)
2775 trace_cifs_flush_err(inode->i_ino, rc);
1da177e4
LT
2776 return rc;
2777}
2778
72432ffc
PS
2779static int
2780cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2781{
2782 int rc = 0;
2783 unsigned long i;
2784
2785 for (i = 0; i < num_pages; i++) {
e94f7ba1 2786 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2787 if (!pages[i]) {
2788 /*
2789 * save number of pages we have already allocated and
2790 * return with ENOMEM error
2791 */
2792 num_pages = i;
2793 rc = -ENOMEM;
e94f7ba1 2794 break;
72432ffc
PS
2795 }
2796 }
2797
e94f7ba1
JL
2798 if (rc) {
2799 for (i = 0; i < num_pages; i++)
2800 put_page(pages[i]);
2801 }
72432ffc
PS
2802 return rc;
2803}
2804
2805static inline
2806size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2807{
2808 size_t num_pages;
2809 size_t clen;
2810
2811 clen = min_t(const size_t, len, wsize);
a7103b99 2812 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2813
2814 if (cur_len)
2815 *cur_len = clen;
2816
2817 return num_pages;
2818}
2819
da82f7e7 2820static void
4a5c80d7 2821cifs_uncached_writedata_release(struct kref *refcount)
da82f7e7
JL
2822{
2823 int i;
4a5c80d7
SF
2824 struct cifs_writedata *wdata = container_of(refcount,
2825 struct cifs_writedata, refcount);
2826
c610c4b6 2827 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
4a5c80d7
SF
2828 for (i = 0; i < wdata->nr_pages; i++)
2829 put_page(wdata->pages[i]);
2830 cifs_writedata_release(refcount);
2831}
2832
c610c4b6
PS
2833static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2834
4a5c80d7
SF
2835static void
2836cifs_uncached_writev_complete(struct work_struct *work)
2837{
da82f7e7
JL
2838 struct cifs_writedata *wdata = container_of(work,
2839 struct cifs_writedata, work);
2b0143b5 2840 struct inode *inode = d_inode(wdata->cfile->dentry);
da82f7e7
JL
2841 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2842
2843 spin_lock(&inode->i_lock);
2844 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2845 if (cifsi->server_eof > inode->i_size)
2846 i_size_write(inode, cifsi->server_eof);
2847 spin_unlock(&inode->i_lock);
2848
2849 complete(&wdata->done);
c610c4b6
PS
2850 collect_uncached_write_data(wdata->ctx);
2851 /* the below call can possibly free the last ref to aio ctx */
4a5c80d7 2852 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
da82f7e7
JL
2853}
2854
da82f7e7 2855static int
66386c08
PS
2856wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2857 size_t *len, unsigned long *num_pages)
da82f7e7 2858{
66386c08
PS
2859 size_t save_len, copied, bytes, cur_len = *len;
2860 unsigned long i, nr_pages = *num_pages;
c9de5c80 2861
66386c08
PS
2862 save_len = cur_len;
2863 for (i = 0; i < nr_pages; i++) {
2864 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2865 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2866 cur_len -= copied;
2867 /*
2868 * If we didn't copy as much as we expected, then that
2869 * may mean we trod into an unmapped area. Stop copying
2870 * at that point. On the next pass through the big
2871 * loop, we'll likely end up getting a zero-length
2872 * write and bailing out of it.
2873 */
2874 if (copied < bytes)
2875 break;
2876 }
2877 cur_len = save_len - cur_len;
2878 *len = cur_len;
da82f7e7 2879
66386c08
PS
2880 /*
2881 * If we have no data to send, then that probably means that
2882 * the copy above failed altogether. That's most likely because
2883 * the address in the iovec was bogus. Return -EFAULT and let
2884 * the caller free anything we allocated and bail out.
2885 */
2886 if (!cur_len)
2887 return -EFAULT;
da82f7e7 2888
66386c08
PS
2889 /*
2890 * i + 1 now represents the number of pages we actually used in
2891 * the copy phase above.
2892 */
2893 *num_pages = i + 1;
2894 return 0;
da82f7e7
JL
2895}
2896
8c5f9c1a
LL
2897static int
2898cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2899 struct cifs_aio_ctx *ctx)
2900{
335b7b62
PS
2901 unsigned int wsize;
2902 struct cifs_credits credits;
8c5f9c1a 2903 int rc;
352d96f3 2904 struct TCP_Server_Info *server = wdata->server;
8c5f9c1a 2905
8c5f9c1a 2906 do {
d53e292f
LL
2907 if (wdata->cfile->invalidHandle) {
2908 rc = cifs_reopen_file(wdata->cfile, false);
2909 if (rc == -EAGAIN)
2910 continue;
2911 else if (rc)
2912 break;
2913 }
8c5f9c1a 2914
8c5f9c1a 2915
d53e292f
LL
2916 /*
2917 * Wait for credits to resend this wdata.
2918 * Note: we are attempting to resend the whole wdata not in
2919 * segments
2920 */
2921 do {
2922 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2923 &wsize, &credits);
2924 if (rc)
2925 goto fail;
2926
2927 if (wsize < wdata->bytes) {
2928 add_credits_and_wake_if(server, &credits, 0);
2929 msleep(1000);
2930 }
2931 } while (wsize < wdata->bytes);
2932 wdata->credits = credits;
8c5f9c1a 2933
d53e292f
LL
2934 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2935
2936 if (!rc) {
2937 if (wdata->cfile->invalidHandle)
2938 rc = -EAGAIN;
b7a55bbd
LL
2939 else {
2940#ifdef CONFIG_CIFS_SMB_DIRECT
2941 if (wdata->mr) {
2942 wdata->mr->need_invalidate = true;
2943 smbd_deregister_mr(wdata->mr);
2944 wdata->mr = NULL;
2945 }
2946#endif
d53e292f 2947 rc = server->ops->async_writev(wdata,
8c5f9c1a 2948 cifs_uncached_writedata_release);
b7a55bbd 2949 }
d53e292f 2950 }
8c5f9c1a 2951
d53e292f
LL
2952 /* If the write was successfully sent, we are done */
2953 if (!rc) {
2954 list_add_tail(&wdata->list, wdata_list);
2955 return 0;
2956 }
8c5f9c1a 2957
d53e292f
LL
2958 /* Roll back credits and retry if needed */
2959 add_credits_and_wake_if(server, &wdata->credits, 0);
2960 } while (rc == -EAGAIN);
8c5f9c1a 2961
d53e292f
LL
2962fail:
2963 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
8c5f9c1a
LL
2964 return rc;
2965}
2966
43de94ea
PS
2967static int
2968cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2969 struct cifsFileInfo *open_file,
c610c4b6
PS
2970 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2971 struct cifs_aio_ctx *ctx)
72432ffc 2972{
43de94ea
PS
2973 int rc = 0;
2974 size_t cur_len;
66386c08 2975 unsigned long nr_pages, num_pages, i;
43de94ea 2976 struct cifs_writedata *wdata;
fc56b983 2977 struct iov_iter saved_from = *from;
6ec0b01b 2978 loff_t saved_offset = offset;
da82f7e7 2979 pid_t pid;
6ec0b01b 2980 struct TCP_Server_Info *server;
8c5f9c1a
LL
2981 struct page **pagevec;
2982 size_t start;
335b7b62 2983 unsigned int xid;
d4ffff1f
PS
2984
2985 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2986 pid = open_file->pid;
2987 else
2988 pid = current->tgid;
2989
352d96f3 2990 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
335b7b62 2991 xid = get_xid();
6ec0b01b 2992
72432ffc 2993 do {
335b7b62
PS
2994 unsigned int wsize;
2995 struct cifs_credits credits_on_stack;
2996 struct cifs_credits *credits = &credits_on_stack;
cb7e9eab 2997
3e952994
PS
2998 if (open_file->invalidHandle) {
2999 rc = cifs_reopen_file(open_file, false);
3000 if (rc == -EAGAIN)
3001 continue;
3002 else if (rc)
3003 break;
3004 }
3005
522aa3b5 3006 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
335b7b62 3007 &wsize, credits);
cb7e9eab
PS
3008 if (rc)
3009 break;
da82f7e7 3010
b6bc8a7b
LL
3011 cur_len = min_t(const size_t, len, wsize);
3012
8c5f9c1a 3013 if (ctx->direct_io) {
b98e26df
SF
3014 ssize_t result;
3015
3016 result = iov_iter_get_pages_alloc(
b6bc8a7b 3017 from, &pagevec, cur_len, &start);
b98e26df 3018 if (result < 0) {
8c5f9c1a 3019 cifs_dbg(VFS,
a0a3036b
JP
3020 "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
3021 result, iov_iter_type(from),
3022 from->iov_offset, from->count);
8c5f9c1a 3023 dump_stack();
54e94ff9
LL
3024
3025 rc = result;
3026 add_credits_and_wake_if(server, credits, 0);
8c5f9c1a
LL
3027 break;
3028 }
b98e26df 3029 cur_len = (size_t)result;
8c5f9c1a
LL
3030 iov_iter_advance(from, cur_len);
3031
3032 nr_pages =
3033 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
3034
3035 wdata = cifs_writedata_direct_alloc(pagevec,
da82f7e7 3036 cifs_uncached_writev_complete);
8c5f9c1a
LL
3037 if (!wdata) {
3038 rc = -ENOMEM;
3039 add_credits_and_wake_if(server, credits, 0);
3040 break;
3041 }
da82f7e7 3042
da82f7e7 3043
8c5f9c1a
LL
3044 wdata->page_offset = start;
3045 wdata->tailsz =
3046 nr_pages > 1 ?
3047 cur_len - (PAGE_SIZE - start) -
3048 (nr_pages - 2) * PAGE_SIZE :
3049 cur_len;
3050 } else {
3051 nr_pages = get_numpages(wsize, len, &cur_len);
3052 wdata = cifs_writedata_alloc(nr_pages,
3053 cifs_uncached_writev_complete);
3054 if (!wdata) {
3055 rc = -ENOMEM;
3056 add_credits_and_wake_if(server, credits, 0);
3057 break;
3058 }
5d81de8e 3059
8c5f9c1a
LL
3060 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
3061 if (rc) {
9bda8723 3062 kvfree(wdata->pages);
8c5f9c1a
LL
3063 kfree(wdata);
3064 add_credits_and_wake_if(server, credits, 0);
3065 break;
3066 }
3067
3068 num_pages = nr_pages;
3069 rc = wdata_fill_from_iovec(
3070 wdata, from, &cur_len, &num_pages);
3071 if (rc) {
3072 for (i = 0; i < nr_pages; i++)
3073 put_page(wdata->pages[i]);
9bda8723 3074 kvfree(wdata->pages);
8c5f9c1a
LL
3075 kfree(wdata);
3076 add_credits_and_wake_if(server, credits, 0);
3077 break;
3078 }
3079
3080 /*
3081 * Bring nr_pages down to the number of pages we
3082 * actually used, and free any pages that we didn't use.
3083 */
3084 for ( ; nr_pages > num_pages; nr_pages--)
3085 put_page(wdata->pages[nr_pages - 1]);
3086
3087 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
3088 }
5d81de8e 3089
da82f7e7
JL
3090 wdata->sync_mode = WB_SYNC_ALL;
3091 wdata->nr_pages = nr_pages;
3092 wdata->offset = (__u64)offset;
3093 wdata->cfile = cifsFileInfo_get(open_file);
352d96f3 3094 wdata->server = server;
da82f7e7
JL
3095 wdata->pid = pid;
3096 wdata->bytes = cur_len;
eddb079d 3097 wdata->pagesz = PAGE_SIZE;
335b7b62 3098 wdata->credits = credits_on_stack;
c610c4b6
PS
3099 wdata->ctx = ctx;
3100 kref_get(&ctx->refcount);
6ec0b01b 3101
9a1c67e8
PS
3102 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
3103
3104 if (!rc) {
3105 if (wdata->cfile->invalidHandle)
3e952994
PS
3106 rc = -EAGAIN;
3107 else
9a1c67e8 3108 rc = server->ops->async_writev(wdata,
6ec0b01b 3109 cifs_uncached_writedata_release);
9a1c67e8
PS
3110 }
3111
da82f7e7 3112 if (rc) {
335b7b62 3113 add_credits_and_wake_if(server, &wdata->credits, 0);
4a5c80d7
SF
3114 kref_put(&wdata->refcount,
3115 cifs_uncached_writedata_release);
6ec0b01b 3116 if (rc == -EAGAIN) {
fc56b983 3117 *from = saved_from;
6ec0b01b
PS
3118 iov_iter_advance(from, offset - saved_offset);
3119 continue;
3120 }
72432ffc
PS
3121 break;
3122 }
3123
43de94ea 3124 list_add_tail(&wdata->list, wdata_list);
da82f7e7
JL
3125 offset += cur_len;
3126 len -= cur_len;
72432ffc
PS
3127 } while (len > 0);
3128
335b7b62 3129 free_xid(xid);
43de94ea
PS
3130 return rc;
3131}
3132
c610c4b6 3133static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
43de94ea 3134{
c610c4b6 3135 struct cifs_writedata *wdata, *tmp;
43de94ea
PS
3136 struct cifs_tcon *tcon;
3137 struct cifs_sb_info *cifs_sb;
c610c4b6 3138 struct dentry *dentry = ctx->cfile->dentry;
e946d3c8 3139 ssize_t rc;
43de94ea 3140
c610c4b6
PS
3141 tcon = tlink_tcon(ctx->cfile->tlink);
3142 cifs_sb = CIFS_SB(dentry->d_sb);
43de94ea 3143
c610c4b6 3144 mutex_lock(&ctx->aio_mutex);
43de94ea 3145
c610c4b6
PS
3146 if (list_empty(&ctx->list)) {
3147 mutex_unlock(&ctx->aio_mutex);
3148 return;
3149 }
da82f7e7 3150
c610c4b6 3151 rc = ctx->rc;
da82f7e7
JL
3152 /*
3153 * Wait for and collect replies for any successful sends in order of
c610c4b6
PS
3154 * increasing offset. Once an error is hit, then return without waiting
3155 * for any more replies.
da82f7e7
JL
3156 */
3157restart_loop:
c610c4b6 3158 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
da82f7e7 3159 if (!rc) {
c610c4b6
PS
3160 if (!try_wait_for_completion(&wdata->done)) {
3161 mutex_unlock(&ctx->aio_mutex);
3162 return;
3163 }
3164
3165 if (wdata->result)
da82f7e7
JL
3166 rc = wdata->result;
3167 else
c610c4b6 3168 ctx->total_len += wdata->bytes;
da82f7e7
JL
3169
3170 /* resend call if it's a retryable error */
3171 if (rc == -EAGAIN) {
6ec0b01b 3172 struct list_head tmp_list;
c610c4b6 3173 struct iov_iter tmp_from = ctx->iter;
6ec0b01b
PS
3174
3175 INIT_LIST_HEAD(&tmp_list);
3176 list_del_init(&wdata->list);
3177
8c5f9c1a
LL
3178 if (ctx->direct_io)
3179 rc = cifs_resend_wdata(
3180 wdata, &tmp_list, ctx);
3181 else {
3182 iov_iter_advance(&tmp_from,
c610c4b6 3183 wdata->offset - ctx->pos);
6ec0b01b 3184
8c5f9c1a 3185 rc = cifs_write_from_iter(wdata->offset,
6ec0b01b 3186 wdata->bytes, &tmp_from,
c610c4b6
PS
3187 ctx->cfile, cifs_sb, &tmp_list,
3188 ctx);
d53e292f
LL
3189
3190 kref_put(&wdata->refcount,
3191 cifs_uncached_writedata_release);
8c5f9c1a 3192 }
6ec0b01b 3193
c610c4b6 3194 list_splice(&tmp_list, &ctx->list);
da82f7e7
JL
3195 goto restart_loop;
3196 }
3197 }
3198 list_del_init(&wdata->list);
4a5c80d7 3199 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
72432ffc
PS
3200 }
3201
c610c4b6
PS
3202 cifs_stats_bytes_written(tcon, ctx->total_len);
3203 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3204
3205 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3206
3207 mutex_unlock(&ctx->aio_mutex);
3208
3209 if (ctx->iocb && ctx->iocb->ki_complete)
3210 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3211 else
3212 complete(&ctx->done);
3213}
3214
8c5f9c1a
LL
3215static ssize_t __cifs_writev(
3216 struct kiocb *iocb, struct iov_iter *from, bool direct)
c610c4b6
PS
3217{
3218 struct file *file = iocb->ki_filp;
3219 ssize_t total_written = 0;
3220 struct cifsFileInfo *cfile;
3221 struct cifs_tcon *tcon;
3222 struct cifs_sb_info *cifs_sb;
3223 struct cifs_aio_ctx *ctx;
3224 struct iov_iter saved_from = *from;
8c5f9c1a 3225 size_t len = iov_iter_count(from);
c610c4b6
PS
3226 int rc;
3227
3228 /*
8c5f9c1a
LL
3229 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
3230 * In this case, fall back to non-direct write function.
3231 * this could be improved by getting pages directly in ITER_KVEC
c610c4b6 3232 */
6629400a 3233 if (direct && iov_iter_is_kvec(from)) {
8c5f9c1a
LL
3234 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3235 direct = false;
3236 }
c610c4b6
PS
3237
3238 rc = generic_write_checks(iocb, from);
3239 if (rc <= 0)
3240 return rc;
3241
3242 cifs_sb = CIFS_FILE_SB(file);
3243 cfile = file->private_data;
3244 tcon = tlink_tcon(cfile->tlink);
3245
3246 if (!tcon->ses->server->ops->async_writev)
3247 return -ENOSYS;
3248
3249 ctx = cifs_aio_ctx_alloc();
3250 if (!ctx)
3251 return -ENOMEM;
3252
3253 ctx->cfile = cifsFileInfo_get(cfile);
3254
3255 if (!is_sync_kiocb(iocb))
3256 ctx->iocb = iocb;
3257
3258 ctx->pos = iocb->ki_pos;
3259
8c5f9c1a
LL
3260 if (direct) {
3261 ctx->direct_io = true;
3262 ctx->iter = *from;
3263 ctx->len = len;
3264 } else {
3265 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3266 if (rc) {
3267 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3268 return rc;
3269 }
c610c4b6
PS
3270 }
3271
3272 /* grab a lock here due to read response handlers can access ctx */
3273 mutex_lock(&ctx->aio_mutex);
3274
3275 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3276 cfile, cifs_sb, &ctx->list, ctx);
3277
3278 /*
3279 * If at least one write was successfully sent, then discard any rc
3280 * value from the later writes. If the other write succeeds, then
3281 * we'll end up returning whatever was written. If it fails, then
3282 * we'll get a new rc value from that.
3283 */
3284 if (!list_empty(&ctx->list))
3285 rc = 0;
3286
3287 mutex_unlock(&ctx->aio_mutex);
3288
3289 if (rc) {
3290 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3291 return rc;
3292 }
3293
3294 if (!is_sync_kiocb(iocb)) {
3295 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3296 return -EIOCBQUEUED;
3297 }
3298
3299 rc = wait_for_completion_killable(&ctx->done);
3300 if (rc) {
3301 mutex_lock(&ctx->aio_mutex);
3302 ctx->rc = rc = -EINTR;
3303 total_written = ctx->total_len;
3304 mutex_unlock(&ctx->aio_mutex);
3305 } else {
3306 rc = ctx->rc;
3307 total_written = ctx->total_len;
3308 }
3309
3310 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3311
e9d1593d
AV
3312 if (unlikely(!total_written))
3313 return rc;
72432ffc 3314
e9d1593d 3315 iocb->ki_pos += total_written;
e9d1593d 3316 return total_written;
72432ffc
PS
3317}
3318
8c5f9c1a
LL
3319ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3320{
3321 return __cifs_writev(iocb, from, true);
3322}
3323
3324ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3325{
3326 return __cifs_writev(iocb, from, false);
3327}
3328
579f9053 3329static ssize_t
3dae8750 3330cifs_writev(struct kiocb *iocb, struct iov_iter *from)
72432ffc 3331{
579f9053
PS
3332 struct file *file = iocb->ki_filp;
3333 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3334 struct inode *inode = file->f_mapping->host;
3335 struct cifsInodeInfo *cinode = CIFS_I(inode);
3336 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
5f380c7f 3337 ssize_t rc;
72432ffc 3338
966681c9 3339 inode_lock(inode);
579f9053
PS
3340 /*
3341 * We need to hold the sem to be sure nobody modifies lock list
3342 * with a brlock that prevents writing.
3343 */
3344 down_read(&cinode->lock_sem);
5f380c7f 3345
3309dd04
AV
3346 rc = generic_write_checks(iocb, from);
3347 if (rc <= 0)
5f380c7f
AV
3348 goto out;
3349
5f380c7f 3350 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
9645759c
RS
3351 server->vals->exclusive_lock_type, 0,
3352 NULL, CIFS_WRITE_OP))
3dae8750 3353 rc = __generic_file_write_iter(iocb, from);
5f380c7f
AV
3354 else
3355 rc = -EACCES;
3356out:
966681c9 3357 up_read(&cinode->lock_sem);
5955102c 3358 inode_unlock(inode);
19dfc1f5 3359
e2592217
CH
3360 if (rc > 0)
3361 rc = generic_write_sync(iocb, rc);
579f9053
PS
3362 return rc;
3363}
3364
3365ssize_t
3dae8750 3366cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
579f9053 3367{
496ad9aa 3368 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
3369 struct cifsInodeInfo *cinode = CIFS_I(inode);
3370 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3371 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3372 iocb->ki_filp->private_data;
3373 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
88cf75aa 3374 ssize_t written;
ca8aa29c 3375
c11f1df5
SP
3376 written = cifs_get_writer(cinode);
3377 if (written)
3378 return written;
3379
18cceb6a 3380 if (CIFS_CACHE_WRITE(cinode)) {
88cf75aa
PS
3381 if (cap_unix(tcon->ses) &&
3382 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
c11f1df5 3383 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3dae8750 3384 written = generic_file_write_iter(iocb, from);
c11f1df5
SP
3385 goto out;
3386 }
3dae8750 3387 written = cifs_writev(iocb, from);
c11f1df5 3388 goto out;
25078105 3389 }
25078105 3390 /*
ca8aa29c
PS
3391 * For non-oplocked files in strict cache mode we need to write the data
3392 * to the server exactly from the pos to pos+len-1 rather than flush all
3393 * affected pages because it may cause a error with mandatory locks on
3394 * these pages but not on the region from pos to ppos+len-1.
72432ffc 3395 */
3dae8750 3396 written = cifs_user_writev(iocb, from);
6dfbd846 3397 if (CIFS_CACHE_READ(cinode)) {
88cf75aa 3398 /*
6dfbd846
PS
3399 * We have read level caching and we have just sent a write
3400 * request to the server thus making data in the cache stale.
3401 * Zap the cache and set oplock/lease level to NONE to avoid
3402 * reading stale data from the cache. All subsequent read
3403 * operations will read new data from the server.
88cf75aa 3404 */
4f73c7d3 3405 cifs_zap_mapping(inode);
6dfbd846 3406 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
f96637be 3407 inode);
18cceb6a 3408 cinode->oplock = 0;
88cf75aa 3409 }
c11f1df5
SP
3410out:
3411 cifs_put_writer(cinode);
88cf75aa 3412 return written;
72432ffc
PS
3413}
3414
0471ca3f 3415static struct cifs_readdata *
f9f5aca1 3416cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
0471ca3f
JL
3417{
3418 struct cifs_readdata *rdata;
f4e49cd2 3419
f9f5aca1 3420 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
0471ca3f 3421 if (rdata != NULL) {
f9f5aca1 3422 rdata->pages = pages;
6993f74a 3423 kref_init(&rdata->refcount);
1c892549
JL
3424 INIT_LIST_HEAD(&rdata->list);
3425 init_completion(&rdata->done);
0471ca3f 3426 INIT_WORK(&rdata->work, complete);
0471ca3f 3427 }
f4e49cd2 3428
0471ca3f
JL
3429 return rdata;
3430}
3431
f9f5aca1
LL
3432static struct cifs_readdata *
3433cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3434{
3435 struct page **pages =
6396bb22 3436 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
f9f5aca1
LL
3437 struct cifs_readdata *ret = NULL;
3438
3439 if (pages) {
3440 ret = cifs_readdata_direct_alloc(pages, complete);
3441 if (!ret)
3442 kfree(pages);
3443 }
3444
3445 return ret;
3446}
3447
6993f74a
JL
3448void
3449cifs_readdata_release(struct kref *refcount)
0471ca3f 3450{
6993f74a
JL
3451 struct cifs_readdata *rdata = container_of(refcount,
3452 struct cifs_readdata, refcount);
bd3dcc6a
LL
3453#ifdef CONFIG_CIFS_SMB_DIRECT
3454 if (rdata->mr) {
3455 smbd_deregister_mr(rdata->mr);
3456 rdata->mr = NULL;
3457 }
3458#endif
6993f74a
JL
3459 if (rdata->cfile)
3460 cifsFileInfo_put(rdata->cfile);
3461
f9f5aca1 3462 kvfree(rdata->pages);
0471ca3f
JL
3463 kfree(rdata);
3464}
3465
1c892549 3466static int
c5fab6f4 3467cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
3468{
3469 int rc = 0;
c5fab6f4 3470 struct page *page;
1c892549
JL
3471 unsigned int i;
3472
c5fab6f4 3473 for (i = 0; i < nr_pages; i++) {
1c892549
JL
3474 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3475 if (!page) {
3476 rc = -ENOMEM;
3477 break;
3478 }
c5fab6f4 3479 rdata->pages[i] = page;
1c892549
JL
3480 }
3481
3482 if (rc) {
31fad7d4
RBC
3483 unsigned int nr_page_failed = i;
3484
3485 for (i = 0; i < nr_page_failed; i++) {
c5fab6f4
JL
3486 put_page(rdata->pages[i]);
3487 rdata->pages[i] = NULL;
1c892549
JL
3488 }
3489 }
3490 return rc;
3491}
3492
3493static void
3494cifs_uncached_readdata_release(struct kref *refcount)
3495{
1c892549
JL
3496 struct cifs_readdata *rdata = container_of(refcount,
3497 struct cifs_readdata, refcount);
c5fab6f4 3498 unsigned int i;
1c892549 3499
6685c5e2 3500 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
c5fab6f4
JL
3501 for (i = 0; i < rdata->nr_pages; i++) {
3502 put_page(rdata->pages[i]);
1c892549
JL
3503 }
3504 cifs_readdata_release(refcount);
3505}
3506
1c892549
JL
3507/**
3508 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3509 * @rdata: the readdata response with list of pages holding data
7f25bba8 3510 * @iter: destination for our data
1c892549
JL
3511 *
3512 * This function copies data from a list of pages in a readdata response into
3513 * an array of iovecs. It will first calculate where the data should go
3514 * based on the info in the readdata and then copy the data into that spot.
3515 */
7f25bba8
AV
3516static int
3517cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
1c892549 3518{
34a54d61 3519 size_t remaining = rdata->got_bytes;
c5fab6f4 3520 unsigned int i;
1c892549 3521
c5fab6f4 3522 for (i = 0; i < rdata->nr_pages; i++) {
c5fab6f4 3523 struct page *page = rdata->pages[i];
e686bd8d 3524 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
9c25702c
PS
3525 size_t written;
3526
00e23707 3527 if (unlikely(iov_iter_is_pipe(iter))) {
9c25702c
PS
3528 void *addr = kmap_atomic(page);
3529
3530 written = copy_to_iter(addr, copy, iter);
3531 kunmap_atomic(addr);
3532 } else
3533 written = copy_page_to_iter(page, 0, copy, iter);
7f25bba8
AV
3534 remaining -= written;
3535 if (written < copy && iov_iter_count(iter) > 0)
3536 break;
1c892549 3537 }
7f25bba8 3538 return remaining ? -EFAULT : 0;
1c892549
JL
3539}
3540
6685c5e2
PS
3541static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3542
1c892549
JL
3543static void
3544cifs_uncached_readv_complete(struct work_struct *work)
3545{
3546 struct cifs_readdata *rdata = container_of(work,
3547 struct cifs_readdata, work);
1c892549
JL
3548
3549 complete(&rdata->done);
6685c5e2
PS
3550 collect_uncached_read_data(rdata->ctx);
3551 /* the below call can possibly free the last ref to aio ctx */
1c892549
JL
3552 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3553}
3554
3555static int
d70b9104
PS
3556uncached_fill_pages(struct TCP_Server_Info *server,
3557 struct cifs_readdata *rdata, struct iov_iter *iter,
3558 unsigned int len)
1c892549 3559{
b3160aeb 3560 int result = 0;
c5fab6f4
JL
3561 unsigned int i;
3562 unsigned int nr_pages = rdata->nr_pages;
1dbe3466 3563 unsigned int page_offset = rdata->page_offset;
1c892549 3564
b3160aeb 3565 rdata->got_bytes = 0;
8321fec4 3566 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
3567 for (i = 0; i < nr_pages; i++) {
3568 struct page *page = rdata->pages[i];
71335664 3569 size_t n;
1dbe3466
LL
3570 unsigned int segment_size = rdata->pagesz;
3571
3572 if (i == 0)
3573 segment_size -= page_offset;
3574 else
3575 page_offset = 0;
3576
c5fab6f4 3577
71335664 3578 if (len <= 0) {
1c892549 3579 /* no need to hold page hostage */
c5fab6f4
JL
3580 rdata->pages[i] = NULL;
3581 rdata->nr_pages--;
1c892549 3582 put_page(page);
8321fec4 3583 continue;
1c892549 3584 }
1dbe3466 3585
71335664 3586 n = len;
1dbe3466 3587 if (len >= segment_size)
71335664 3588 /* enough data to fill the page */
1dbe3466
LL
3589 n = segment_size;
3590 else
71335664 3591 rdata->tailsz = len;
1dbe3466
LL
3592 len -= n;
3593
d70b9104 3594 if (iter)
1dbe3466
LL
3595 result = copy_page_from_iter(
3596 page, page_offset, n, iter);
bd3dcc6a
LL
3597#ifdef CONFIG_CIFS_SMB_DIRECT
3598 else if (rdata->mr)
3599 result = n;
3600#endif
d70b9104 3601 else
1dbe3466
LL
3602 result = cifs_read_page_from_socket(
3603 server, page, page_offset, n);
8321fec4
JL
3604 if (result < 0)
3605 break;
3606
b3160aeb 3607 rdata->got_bytes += result;
1c892549
JL
3608 }
3609
b3160aeb
PS
3610 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3611 rdata->got_bytes : result;
1c892549
JL
3612}
3613
d70b9104
PS
3614static int
3615cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3616 struct cifs_readdata *rdata, unsigned int len)
3617{
3618 return uncached_fill_pages(server, rdata, NULL, len);
3619}
3620
3621static int
3622cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3623 struct cifs_readdata *rdata,
3624 struct iov_iter *iter)
3625{
3626 return uncached_fill_pages(server, rdata, iter, iter->count);
3627}
3628
6e6e2b86
LL
3629static int cifs_resend_rdata(struct cifs_readdata *rdata,
3630 struct list_head *rdata_list,
3631 struct cifs_aio_ctx *ctx)
3632{
335b7b62
PS
3633 unsigned int rsize;
3634 struct cifs_credits credits;
6e6e2b86 3635 int rc;
352d96f3
AA
3636 struct TCP_Server_Info *server;
3637
3638 /* XXX: should we pick a new channel here? */
3639 server = rdata->server;
6e6e2b86 3640
6e6e2b86 3641 do {
0b0dfd59
LL
3642 if (rdata->cfile->invalidHandle) {
3643 rc = cifs_reopen_file(rdata->cfile, true);
3644 if (rc == -EAGAIN)
3645 continue;
3646 else if (rc)
3647 break;
3648 }
3649
3650 /*
3651 * Wait for credits to resend this rdata.
3652 * Note: we are attempting to resend the whole rdata not in
3653 * segments
3654 */
3655 do {
3656 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
6e6e2b86
LL
3657 &rsize, &credits);
3658
0b0dfd59
LL
3659 if (rc)
3660 goto fail;
6e6e2b86 3661
0b0dfd59
LL
3662 if (rsize < rdata->bytes) {
3663 add_credits_and_wake_if(server, &credits, 0);
3664 msleep(1000);
3665 }
3666 } while (rsize < rdata->bytes);
3667 rdata->credits = credits;
6e6e2b86 3668
0b0dfd59
LL
3669 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3670 if (!rc) {
3671 if (rdata->cfile->invalidHandle)
3672 rc = -EAGAIN;
b7a55bbd
LL
3673 else {
3674#ifdef CONFIG_CIFS_SMB_DIRECT
3675 if (rdata->mr) {
3676 rdata->mr->need_invalidate = true;
3677 smbd_deregister_mr(rdata->mr);
3678 rdata->mr = NULL;
3679 }
3680#endif
0b0dfd59 3681 rc = server->ops->async_readv(rdata);
b7a55bbd 3682 }
0b0dfd59 3683 }
6e6e2b86 3684
0b0dfd59
LL
3685 /* If the read was successfully sent, we are done */
3686 if (!rc) {
3687 /* Add to aio pending list */
3688 list_add_tail(&rdata->list, rdata_list);
3689 return 0;
3690 }
6e6e2b86 3691
0b0dfd59
LL
3692 /* Roll back credits and retry if needed */
3693 add_credits_and_wake_if(server, &rdata->credits, 0);
3694 } while (rc == -EAGAIN);
6e6e2b86 3695
0b0dfd59
LL
3696fail:
3697 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
6e6e2b86
LL
3698 return rc;
3699}
3700
0ada36b2
PS
3701static int
3702cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
6685c5e2
PS
3703 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3704 struct cifs_aio_ctx *ctx)
1da177e4 3705{
0ada36b2 3706 struct cifs_readdata *rdata;
335b7b62
PS
3707 unsigned int npages, rsize;
3708 struct cifs_credits credits_on_stack;
3709 struct cifs_credits *credits = &credits_on_stack;
0ada36b2
PS
3710 size_t cur_len;
3711 int rc;
1c892549 3712 pid_t pid;
25f40259 3713 struct TCP_Server_Info *server;
6e6e2b86
LL
3714 struct page **pagevec;
3715 size_t start;
3716 struct iov_iter direct_iov = ctx->iter;
a70307ee 3717
352d96f3 3718 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
fc9c5966 3719
d4ffff1f
PS
3720 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3721 pid = open_file->pid;
3722 else
3723 pid = current->tgid;
3724
6e6e2b86
LL
3725 if (ctx->direct_io)
3726 iov_iter_advance(&direct_iov, offset - ctx->pos);
3727
1c892549 3728 do {
3e952994
PS
3729 if (open_file->invalidHandle) {
3730 rc = cifs_reopen_file(open_file, true);
3731 if (rc == -EAGAIN)
3732 continue;
3733 else if (rc)
3734 break;
3735 }
3736
a41245f6
RS
3737 if (cifs_sb->ctx->rsize == 0)
3738 cifs_sb->ctx->rsize =
3739 server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
3740 cifs_sb->ctx);
3741
522aa3b5 3742 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
335b7b62 3743 &rsize, credits);
bed9da02
PS
3744 if (rc)
3745 break;
3746
3747 cur_len = min_t(const size_t, len, rsize);
a70307ee 3748
6e6e2b86 3749 if (ctx->direct_io) {
b98e26df 3750 ssize_t result;
6e6e2b86 3751
b98e26df 3752 result = iov_iter_get_pages_alloc(
6e6e2b86
LL
3753 &direct_iov, &pagevec,
3754 cur_len, &start);
b98e26df 3755 if (result < 0) {
6e6e2b86 3756 cifs_dbg(VFS,
a0a3036b
JP
3757 "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
3758 result, iov_iter_type(&direct_iov),
3759 direct_iov.iov_offset,
3760 direct_iov.count);
6e6e2b86 3761 dump_stack();
54e94ff9
LL
3762
3763 rc = result;
3764 add_credits_and_wake_if(server, credits, 0);
6e6e2b86
LL
3765 break;
3766 }
b98e26df 3767 cur_len = (size_t)result;
6e6e2b86
LL
3768 iov_iter_advance(&direct_iov, cur_len);
3769
3770 rdata = cifs_readdata_direct_alloc(
3771 pagevec, cifs_uncached_readv_complete);
3772 if (!rdata) {
3773 add_credits_and_wake_if(server, credits, 0);
3774 rc = -ENOMEM;
3775 break;
3776 }
3777
3778 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3779 rdata->page_offset = start;
3780 rdata->tailsz = npages > 1 ?
3781 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3782 cur_len;
3783
3784 } else {
3785
3786 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3787 /* allocate a readdata struct */
3788 rdata = cifs_readdata_alloc(npages,
1c892549 3789 cifs_uncached_readv_complete);
6e6e2b86
LL
3790 if (!rdata) {
3791 add_credits_and_wake_if(server, credits, 0);
3792 rc = -ENOMEM;
3793 break;
3794 }
a70307ee 3795
6e6e2b86 3796 rc = cifs_read_allocate_pages(rdata, npages);
9bda8723
PS
3797 if (rc) {
3798 kvfree(rdata->pages);
3799 kfree(rdata);
3800 add_credits_and_wake_if(server, credits, 0);
3801 break;
3802 }
6e6e2b86
LL
3803
3804 rdata->tailsz = PAGE_SIZE;
3805 }
1c892549 3806
352d96f3 3807 rdata->server = server;
1c892549 3808 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 3809 rdata->nr_pages = npages;
1c892549
JL
3810 rdata->offset = offset;
3811 rdata->bytes = cur_len;
3812 rdata->pid = pid;
8321fec4
JL
3813 rdata->pagesz = PAGE_SIZE;
3814 rdata->read_into_pages = cifs_uncached_read_into_pages;
d70b9104 3815 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
335b7b62 3816 rdata->credits = credits_on_stack;
6685c5e2
PS
3817 rdata->ctx = ctx;
3818 kref_get(&ctx->refcount);
1c892549 3819
9a1c67e8
PS
3820 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3821
3822 if (!rc) {
3823 if (rdata->cfile->invalidHandle)
3e952994
PS
3824 rc = -EAGAIN;
3825 else
9a1c67e8
PS
3826 rc = server->ops->async_readv(rdata);
3827 }
3828
1c892549 3829 if (rc) {
335b7b62 3830 add_credits_and_wake_if(server, &rdata->credits, 0);
1c892549 3831 kref_put(&rdata->refcount,
6e6e2b86
LL
3832 cifs_uncached_readdata_release);
3833 if (rc == -EAGAIN) {
3834 iov_iter_revert(&direct_iov, cur_len);
25f40259 3835 continue;
6e6e2b86 3836 }
1c892549
JL
3837 break;
3838 }
3839
0ada36b2 3840 list_add_tail(&rdata->list, rdata_list);
1c892549
JL
3841 offset += cur_len;
3842 len -= cur_len;
3843 } while (len > 0);
3844
0ada36b2
PS
3845 return rc;
3846}
3847
6685c5e2
PS
3848static void
3849collect_uncached_read_data(struct cifs_aio_ctx *ctx)
0ada36b2 3850{
6685c5e2
PS
3851 struct cifs_readdata *rdata, *tmp;
3852 struct iov_iter *to = &ctx->iter;
0ada36b2 3853 struct cifs_sb_info *cifs_sb;
6685c5e2 3854 int rc;
0ada36b2 3855
6685c5e2 3856 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
0ada36b2 3857
6685c5e2 3858 mutex_lock(&ctx->aio_mutex);
0ada36b2 3859
6685c5e2
PS
3860 if (list_empty(&ctx->list)) {
3861 mutex_unlock(&ctx->aio_mutex);
3862 return;
3863 }
1c892549 3864
6685c5e2 3865 rc = ctx->rc;
1c892549 3866 /* the loop below should proceed in the order of increasing offsets */
25f40259 3867again:
6685c5e2 3868 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
1c892549 3869 if (!rc) {
6685c5e2
PS
3870 if (!try_wait_for_completion(&rdata->done)) {
3871 mutex_unlock(&ctx->aio_mutex);
3872 return;
3873 }
3874
3875 if (rdata->result == -EAGAIN) {
74027f4a 3876 /* resend call if it's a retryable error */
fb8a3e52 3877 struct list_head tmp_list;
d913ed17 3878 unsigned int got_bytes = rdata->got_bytes;
25f40259 3879
fb8a3e52
PS
3880 list_del_init(&rdata->list);
3881 INIT_LIST_HEAD(&tmp_list);
25f40259 3882
d913ed17
PS
3883 /*
3884 * Got a part of data and then reconnect has
3885 * happened -- fill the buffer and continue
3886 * reading.
3887 */
3888 if (got_bytes && got_bytes < rdata->bytes) {
6e6e2b86
LL
3889 rc = 0;
3890 if (!ctx->direct_io)
3891 rc = cifs_readdata_to_iov(rdata, to);
d913ed17
PS
3892 if (rc) {
3893 kref_put(&rdata->refcount,
6e6e2b86 3894 cifs_uncached_readdata_release);
d913ed17
PS
3895 continue;
3896 }
74027f4a 3897 }
d913ed17 3898
6e6e2b86
LL
3899 if (ctx->direct_io) {
3900 /*
3901 * Re-use rdata as this is a
3902 * direct I/O
3903 */
3904 rc = cifs_resend_rdata(
3905 rdata,
3906 &tmp_list, ctx);
3907 } else {
3908 rc = cifs_send_async_read(
d913ed17
PS
3909 rdata->offset + got_bytes,
3910 rdata->bytes - got_bytes,
3911 rdata->cfile, cifs_sb,
6685c5e2 3912 &tmp_list, ctx);
25f40259 3913
6e6e2b86
LL
3914 kref_put(&rdata->refcount,
3915 cifs_uncached_readdata_release);
3916 }
3917
6685c5e2 3918 list_splice(&tmp_list, &ctx->list);
25f40259 3919
fb8a3e52
PS
3920 goto again;
3921 } else if (rdata->result)
3922 rc = rdata->result;
6e6e2b86 3923 else if (!ctx->direct_io)
e6a7bcb4 3924 rc = cifs_readdata_to_iov(rdata, to);
1c892549 3925
2e8a05d8
PS
3926 /* if there was a short read -- discard anything left */
3927 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3928 rc = -ENODATA;
6e6e2b86
LL
3929
3930 ctx->total_len += rdata->got_bytes;
1da177e4 3931 }
1c892549
JL
3932 list_del_init(&rdata->list);
3933 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 3934 }
a70307ee 3935
13f5938d 3936 if (!ctx->direct_io)
6e6e2b86 3937 ctx->total_len = ctx->len - iov_iter_count(to);
6685c5e2 3938
09a4707e
PS
3939 /* mask nodata case */
3940 if (rc == -ENODATA)
3941 rc = 0;
3942
97adda8b 3943 ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
6685c5e2
PS
3944
3945 mutex_unlock(&ctx->aio_mutex);
3946
3947 if (ctx->iocb && ctx->iocb->ki_complete)
3948 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3949 else
3950 complete(&ctx->done);
3951}
3952
6e6e2b86
LL
3953static ssize_t __cifs_readv(
3954 struct kiocb *iocb, struct iov_iter *to, bool direct)
6685c5e2 3955{
6685c5e2 3956 size_t len;
6e6e2b86 3957 struct file *file = iocb->ki_filp;
6685c5e2 3958 struct cifs_sb_info *cifs_sb;
6685c5e2 3959 struct cifsFileInfo *cfile;
6e6e2b86
LL
3960 struct cifs_tcon *tcon;
3961 ssize_t rc, total_read = 0;
3962 loff_t offset = iocb->ki_pos;
6685c5e2
PS
3963 struct cifs_aio_ctx *ctx;
3964
6e6e2b86
LL
3965 /*
3966 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3967 * fall back to data copy read path
3968 * this could be improved by getting pages directly in ITER_KVEC
3969 */
6629400a 3970 if (direct && iov_iter_is_kvec(to)) {
6e6e2b86
LL
3971 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3972 direct = false;
3973 }
3974
6685c5e2
PS
3975 len = iov_iter_count(to);
3976 if (!len)
3977 return 0;
3978
3979 cifs_sb = CIFS_FILE_SB(file);
3980 cfile = file->private_data;
3981 tcon = tlink_tcon(cfile->tlink);
3982
3983 if (!tcon->ses->server->ops->async_readv)
3984 return -ENOSYS;
3985
3986 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3987 cifs_dbg(FYI, "attempting read on write only file instance\n");
3988
3989 ctx = cifs_aio_ctx_alloc();
3990 if (!ctx)
3991 return -ENOMEM;
3992
3993 ctx->cfile = cifsFileInfo_get(cfile);
3994
3995 if (!is_sync_kiocb(iocb))
3996 ctx->iocb = iocb;
3997
00e23707 3998 if (iter_is_iovec(to))
6685c5e2
PS
3999 ctx->should_dirty = true;
4000
6e6e2b86
LL
4001 if (direct) {
4002 ctx->pos = offset;
4003 ctx->direct_io = true;
4004 ctx->iter = *to;
4005 ctx->len = len;
4006 } else {
4007 rc = setup_aio_ctx_iter(ctx, to, READ);
4008 if (rc) {
4009 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4010 return rc;
4011 }
4012 len = ctx->len;
6685c5e2
PS
4013 }
4014
6685c5e2
PS
4015 /* grab a lock here due to read response handlers can access ctx */
4016 mutex_lock(&ctx->aio_mutex);
4017
4018 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
4019
4020 /* if at least one read request send succeeded, then reset rc */
4021 if (!list_empty(&ctx->list))
4022 rc = 0;
4023
4024 mutex_unlock(&ctx->aio_mutex);
4025
4026 if (rc) {
4027 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4028 return rc;
4029 }
4030
4031 if (!is_sync_kiocb(iocb)) {
4032 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4033 return -EIOCBQUEUED;
4034 }
4035
4036 rc = wait_for_completion_killable(&ctx->done);
4037 if (rc) {
4038 mutex_lock(&ctx->aio_mutex);
4039 ctx->rc = rc = -EINTR;
4040 total_read = ctx->total_len;
4041 mutex_unlock(&ctx->aio_mutex);
4042 } else {
4043 rc = ctx->rc;
4044 total_read = ctx->total_len;
4045 }
4046
4047 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4048
0165e810 4049 if (total_read) {
e6a7bcb4 4050 iocb->ki_pos += total_read;
0165e810
AV
4051 return total_read;
4052 }
4053 return rc;
a70307ee
PS
4054}
4055
6e6e2b86
LL
4056ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
4057{
4058 return __cifs_readv(iocb, to, true);
4059}
4060
4061ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
4062{
4063 return __cifs_readv(iocb, to, false);
4064}
4065
579f9053 4066ssize_t
e6a7bcb4 4067cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
a70307ee 4068{
496ad9aa 4069 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
4070 struct cifsInodeInfo *cinode = CIFS_I(inode);
4071 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4072 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
4073 iocb->ki_filp->private_data;
4074 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4075 int rc = -EACCES;
a70307ee
PS
4076
4077 /*
4078 * In strict cache mode we need to read from the server all the time
4079 * if we don't have level II oplock because the server can delay mtime
4080 * change - so we can't make a decision about inode invalidating.
4081 * And we can also fail with pagereading if there are mandatory locks
4082 * on pages affected by this read but not on the region from pos to
4083 * pos+len-1.
4084 */
18cceb6a 4085 if (!CIFS_CACHE_READ(cinode))
e6a7bcb4 4086 return cifs_user_readv(iocb, to);
a70307ee 4087
579f9053
PS
4088 if (cap_unix(tcon->ses) &&
4089 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
4090 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
e6a7bcb4 4091 return generic_file_read_iter(iocb, to);
579f9053
PS
4092
4093 /*
4094 * We need to hold the sem to be sure nobody modifies lock list
4095 * with a brlock that prevents reading.
4096 */
4097 down_read(&cinode->lock_sem);
e6a7bcb4 4098 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
579f9053 4099 tcon->ses->server->vals->shared_lock_type,
9645759c 4100 0, NULL, CIFS_READ_OP))
e6a7bcb4 4101 rc = generic_file_read_iter(iocb, to);
579f9053
PS
4102 up_read(&cinode->lock_sem);
4103 return rc;
a70307ee 4104}
1da177e4 4105
f9c6e234
PS
4106static ssize_t
4107cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
4108{
4109 int rc = -EACCES;
4110 unsigned int bytes_read = 0;
4111 unsigned int total_read;
4112 unsigned int current_read_size;
5eba8ab3 4113 unsigned int rsize;
1da177e4 4114 struct cifs_sb_info *cifs_sb;
29e20f9c 4115 struct cifs_tcon *tcon;
f9c6e234 4116 struct TCP_Server_Info *server;
6d5786a3 4117 unsigned int xid;
f9c6e234 4118 char *cur_offset;
1da177e4 4119 struct cifsFileInfo *open_file;
7c06514a 4120 struct cifs_io_parms io_parms = {0};
ec637e3f 4121 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 4122 __u32 pid;
1da177e4 4123
6d5786a3 4124 xid = get_xid();
7119e220 4125 cifs_sb = CIFS_FILE_SB(file);
1da177e4 4126
5eba8ab3 4127 /* FIXME: set up handlers for larger reads and/or convert to async */
522aa3b5 4128 rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
5eba8ab3 4129
1da177e4 4130 if (file->private_data == NULL) {
0f3bc09e 4131 rc = -EBADF;
6d5786a3 4132 free_xid(xid);
0f3bc09e 4133 return rc;
1da177e4 4134 }
c21dfb69 4135 open_file = file->private_data;
29e20f9c 4136 tcon = tlink_tcon(open_file->tlink);
352d96f3 4137 server = cifs_pick_channel(tcon->ses);
f9c6e234
PS
4138
4139 if (!server->ops->sync_read) {
4140 free_xid(xid);
4141 return -ENOSYS;
4142 }
1da177e4 4143
d4ffff1f
PS
4144 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4145 pid = open_file->pid;
4146 else
4147 pid = current->tgid;
4148
1da177e4 4149 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
f96637be 4150 cifs_dbg(FYI, "attempting read on write only file instance\n");
1da177e4 4151
f9c6e234
PS
4152 for (total_read = 0, cur_offset = read_data; read_size > total_read;
4153 total_read += bytes_read, cur_offset += bytes_read) {
e374d90f
PS
4154 do {
4155 current_read_size = min_t(uint, read_size - total_read,
4156 rsize);
4157 /*
4158 * For windows me and 9x we do not want to request more
4159 * than it negotiated since it will refuse the read
4160 * then.
4161 */
9bd21d4b 4162 if (!(tcon->ses->capabilities &
29e20f9c 4163 tcon->ses->server->vals->cap_large_files)) {
e374d90f
PS
4164 current_read_size = min_t(uint,
4165 current_read_size, CIFSMaxBufSize);
4166 }
cdff08e7 4167 if (open_file->invalidHandle) {
15886177 4168 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
4169 if (rc != 0)
4170 break;
4171 }
d4ffff1f 4172 io_parms.pid = pid;
29e20f9c 4173 io_parms.tcon = tcon;
f9c6e234 4174 io_parms.offset = *offset;
d4ffff1f 4175 io_parms.length = current_read_size;
352d96f3 4176 io_parms.server = server;
db8b631d 4177 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
f9c6e234
PS
4178 &bytes_read, &cur_offset,
4179 &buf_type);
e374d90f
PS
4180 } while (rc == -EAGAIN);
4181
1da177e4
LT
4182 if (rc || (bytes_read == 0)) {
4183 if (total_read) {
4184 break;
4185 } else {
6d5786a3 4186 free_xid(xid);
1da177e4
LT
4187 return rc;
4188 }
4189 } else {
29e20f9c 4190 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 4191 *offset += bytes_read;
1da177e4
LT
4192 }
4193 }
6d5786a3 4194 free_xid(xid);
1da177e4
LT
4195 return total_read;
4196}
4197
ca83ce3d
JL
4198/*
4199 * If the page is mmap'ed into a process' page tables, then we need to make
4200 * sure that it doesn't change while being written back.
4201 */
a5240cbd 4202static vm_fault_t
11bac800 4203cifs_page_mkwrite(struct vm_fault *vmf)
ca83ce3d
JL
4204{
4205 struct page *page = vmf->page;
18d04062
SP
4206 struct file *file = vmf->vma->vm_file;
4207 struct inode *inode = file_inode(file);
4208
4209 cifs_fscache_wait_on_page_write(inode, page);
ca83ce3d
JL
4210
4211 lock_page(page);
4212 return VM_FAULT_LOCKED;
4213}
4214
7cbea8dc 4215static const struct vm_operations_struct cifs_file_vm_ops = {
ca83ce3d 4216 .fault = filemap_fault,
f1820361 4217 .map_pages = filemap_map_pages,
ca83ce3d
JL
4218 .page_mkwrite = cifs_page_mkwrite,
4219};
4220
7a6a19b1
PS
4221int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4222{
f04a703c 4223 int xid, rc = 0;
496ad9aa 4224 struct inode *inode = file_inode(file);
7a6a19b1 4225
6d5786a3 4226 xid = get_xid();
7a6a19b1 4227
f04a703c 4228 if (!CIFS_CACHE_READ(CIFS_I(inode)))
4f73c7d3 4229 rc = cifs_zap_mapping(inode);
f04a703c
MW
4230 if (!rc)
4231 rc = generic_file_mmap(file, vma);
4232 if (!rc)
ca83ce3d 4233 vma->vm_ops = &cifs_file_vm_ops;
f04a703c 4234
6d5786a3 4235 free_xid(xid);
7a6a19b1
PS
4236 return rc;
4237}
4238
1da177e4
LT
4239int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4240{
1da177e4
LT
4241 int rc, xid;
4242
6d5786a3 4243 xid = get_xid();
f04a703c 4244
abab095d 4245 rc = cifs_revalidate_file(file);
f04a703c 4246 if (rc)
f96637be
JP
4247 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4248 rc);
f04a703c
MW
4249 if (!rc)
4250 rc = generic_file_mmap(file, vma);
4251 if (!rc)
ca83ce3d 4252 vma->vm_ops = &cifs_file_vm_ops;
f04a703c 4253
6d5786a3 4254 free_xid(xid);
1da177e4
LT
4255 return rc;
4256}
4257
0471ca3f
JL
4258static void
4259cifs_readv_complete(struct work_struct *work)
4260{
b770ddfa 4261 unsigned int i, got_bytes;
0471ca3f
JL
4262 struct cifs_readdata *rdata = container_of(work,
4263 struct cifs_readdata, work);
0471ca3f 4264
b770ddfa 4265 got_bytes = rdata->got_bytes;
c5fab6f4
JL
4266 for (i = 0; i < rdata->nr_pages; i++) {
4267 struct page *page = rdata->pages[i];
4268
6058eaec 4269 lru_cache_add(page);
0471ca3f 4270
b770ddfa
PS
4271 if (rdata->result == 0 ||
4272 (rdata->result == -EAGAIN && got_bytes)) {
0471ca3f
JL
4273 flush_dcache_page(page);
4274 SetPageUptodate(page);
18d04062
SP
4275 } else
4276 SetPageError(page);
0471ca3f
JL
4277
4278 unlock_page(page);
4279
b770ddfa
PS
4280 if (rdata->result == 0 ||
4281 (rdata->result == -EAGAIN && got_bytes))
0471ca3f 4282 cifs_readpage_to_fscache(rdata->mapping->host, page);
18d04062
SP
4283 else
4284 cifs_fscache_uncache_page(rdata->mapping->host, page);
0471ca3f 4285
09cbfeaf 4286 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
b770ddfa 4287
09cbfeaf 4288 put_page(page);
c5fab6f4 4289 rdata->pages[i] = NULL;
0471ca3f 4290 }
6993f74a 4291 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
4292}
4293
8d5ce4d2 4294static int
d70b9104
PS
4295readpages_fill_pages(struct TCP_Server_Info *server,
4296 struct cifs_readdata *rdata, struct iov_iter *iter,
4297 unsigned int len)
8d5ce4d2 4298{
b3160aeb 4299 int result = 0;
c5fab6f4 4300 unsigned int i;
8d5ce4d2
JL
4301 u64 eof;
4302 pgoff_t eof_index;
c5fab6f4 4303 unsigned int nr_pages = rdata->nr_pages;
1dbe3466 4304 unsigned int page_offset = rdata->page_offset;
8d5ce4d2
JL
4305
4306 /* determine the eof that the server (probably) has */
4307 eof = CIFS_I(rdata->mapping->host)->server_eof;
09cbfeaf 4308 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
f96637be 4309 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
8d5ce4d2 4310
b3160aeb 4311 rdata->got_bytes = 0;
09cbfeaf 4312 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
4313 for (i = 0; i < nr_pages; i++) {
4314 struct page *page = rdata->pages[i];
1dbe3466
LL
4315 unsigned int to_read = rdata->pagesz;
4316 size_t n;
4317
4318 if (i == 0)
4319 to_read -= page_offset;
4320 else
4321 page_offset = 0;
4322
4323 n = to_read;
c5fab6f4 4324
1dbe3466
LL
4325 if (len >= to_read) {
4326 len -= to_read;
8321fec4 4327 } else if (len > 0) {
8d5ce4d2 4328 /* enough for partial page, fill and zero the rest */
1dbe3466 4329 zero_user(page, len + page_offset, to_read - len);
71335664 4330 n = rdata->tailsz = len;
8321fec4 4331 len = 0;
8d5ce4d2
JL
4332 } else if (page->index > eof_index) {
4333 /*
4334 * The VFS will not try to do readahead past the
4335 * i_size, but it's possible that we have outstanding
4336 * writes with gaps in the middle and the i_size hasn't
4337 * caught up yet. Populate those with zeroed out pages
4338 * to prevent the VFS from repeatedly attempting to
4339 * fill them until the writes are flushed.
4340 */
09cbfeaf 4341 zero_user(page, 0, PAGE_SIZE);
6058eaec 4342 lru_cache_add(page);
8d5ce4d2
JL
4343 flush_dcache_page(page);
4344 SetPageUptodate(page);
4345 unlock_page(page);
09cbfeaf 4346 put_page(page);
c5fab6f4
JL
4347 rdata->pages[i] = NULL;
4348 rdata->nr_pages--;
8321fec4 4349 continue;
8d5ce4d2
JL
4350 } else {
4351 /* no need to hold page hostage */
6058eaec 4352 lru_cache_add(page);
8d5ce4d2 4353 unlock_page(page);
09cbfeaf 4354 put_page(page);
c5fab6f4
JL
4355 rdata->pages[i] = NULL;
4356 rdata->nr_pages--;
8321fec4 4357 continue;
8d5ce4d2 4358 }
8321fec4 4359
d70b9104 4360 if (iter)
1dbe3466
LL
4361 result = copy_page_from_iter(
4362 page, page_offset, n, iter);
bd3dcc6a
LL
4363#ifdef CONFIG_CIFS_SMB_DIRECT
4364 else if (rdata->mr)
4365 result = n;
4366#endif
d70b9104 4367 else
1dbe3466
LL
4368 result = cifs_read_page_from_socket(
4369 server, page, page_offset, n);
8321fec4
JL
4370 if (result < 0)
4371 break;
4372
b3160aeb 4373 rdata->got_bytes += result;
8d5ce4d2
JL
4374 }
4375
b3160aeb
PS
4376 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4377 rdata->got_bytes : result;
8d5ce4d2
JL
4378}
4379
d70b9104
PS
4380static int
4381cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4382 struct cifs_readdata *rdata, unsigned int len)
4383{
4384 return readpages_fill_pages(server, rdata, NULL, len);
4385}
4386
4387static int
4388cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4389 struct cifs_readdata *rdata,
4390 struct iov_iter *iter)
4391{
4392 return readpages_fill_pages(server, rdata, iter, iter->count);
4393}
4394
387eb92a
PS
4395static int
4396readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4397 unsigned int rsize, struct list_head *tmplist,
4398 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4399{
4400 struct page *page, *tpage;
4401 unsigned int expected_index;
4402 int rc;
8a5c743e 4403 gfp_t gfp = readahead_gfp_mask(mapping);
387eb92a 4404
69cebd75
PS
4405 INIT_LIST_HEAD(tmplist);
4406
f86196ea 4407 page = lru_to_page(page_list);
387eb92a
PS
4408
4409 /*
4410 * Lock the page and put it in the cache. Since no one else
4411 * should have access to this page, we're safe to simply set
4412 * PG_locked without checking it first.
4413 */
48c935ad 4414 __SetPageLocked(page);
387eb92a 4415 rc = add_to_page_cache_locked(page, mapping,
063d99b4 4416 page->index, gfp);
387eb92a
PS
4417
4418 /* give up if we can't stick it in the cache */
4419 if (rc) {
48c935ad 4420 __ClearPageLocked(page);
387eb92a
PS
4421 return rc;
4422 }
4423
4424 /* move first page to the tmplist */
09cbfeaf
KS
4425 *offset = (loff_t)page->index << PAGE_SHIFT;
4426 *bytes = PAGE_SIZE;
387eb92a
PS
4427 *nr_pages = 1;
4428 list_move_tail(&page->lru, tmplist);
4429
4430 /* now try and add more pages onto the request */
4431 expected_index = page->index + 1;
4432 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4433 /* discontinuity ? */
4434 if (page->index != expected_index)
4435 break;
4436
4437 /* would this page push the read over the rsize? */
09cbfeaf 4438 if (*bytes + PAGE_SIZE > rsize)
387eb92a
PS
4439 break;
4440
48c935ad 4441 __SetPageLocked(page);
95a3d8f3
ZX
4442 rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
4443 if (rc) {
48c935ad 4444 __ClearPageLocked(page);
387eb92a
PS
4445 break;
4446 }
4447 list_move_tail(&page->lru, tmplist);
09cbfeaf 4448 (*bytes) += PAGE_SIZE;
387eb92a
PS
4449 expected_index++;
4450 (*nr_pages)++;
4451 }
4452 return rc;
8d5ce4d2
JL
4453}
4454
1da177e4
LT
4455static int cifs_readpages(struct file *file, struct address_space *mapping,
4456 struct list_head *page_list, unsigned num_pages)
4457{
690c5e31 4458 int rc;
95a3d8f3 4459 int err = 0;
690c5e31
JL
4460 struct list_head tmplist;
4461 struct cifsFileInfo *open_file = file->private_data;
7119e220 4462 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
69cebd75 4463 struct TCP_Server_Info *server;
690c5e31 4464 pid_t pid;
0cb012d1 4465 unsigned int xid;
1da177e4 4466
0cb012d1 4467 xid = get_xid();
56698236
SJ
4468 /*
4469 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4470 * immediately if the cookie is negative
54afa990
DH
4471 *
4472 * After this point, every page in the list might have PG_fscache set,
4473 * so we will need to clean that up off of every page we don't use.
56698236
SJ
4474 */
4475 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4476 &num_pages);
0cb012d1
SF
4477 if (rc == 0) {
4478 free_xid(xid);
690c5e31 4479 return rc;
0cb012d1 4480 }
56698236 4481
d4ffff1f
PS
4482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4483 pid = open_file->pid;
4484 else
4485 pid = current->tgid;
4486
690c5e31 4487 rc = 0;
352d96f3 4488 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
1da177e4 4489
f96637be
JP
4490 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4491 __func__, file, mapping, num_pages);
690c5e31
JL
4492
4493 /*
4494 * Start with the page at end of list and move it to private
4495 * list. Do the same with any following pages until we hit
4496 * the rsize limit, hit an index discontinuity, or run out of
4497 * pages. Issue the async read and then start the loop again
4498 * until the list is empty.
4499 *
4500 * Note that list order is important. The page_list is in
4501 * the order of declining indexes. When we put the pages in
4502 * the rdata->pages, then we want them in increasing order.
4503 */
95a3d8f3 4504 while (!list_empty(page_list) && !err) {
bed9da02 4505 unsigned int i, nr_pages, bytes, rsize;
690c5e31
JL
4506 loff_t offset;
4507 struct page *page, *tpage;
4508 struct cifs_readdata *rdata;
335b7b62
PS
4509 struct cifs_credits credits_on_stack;
4510 struct cifs_credits *credits = &credits_on_stack;
1da177e4 4511
3e952994
PS
4512 if (open_file->invalidHandle) {
4513 rc = cifs_reopen_file(open_file, true);
4514 if (rc == -EAGAIN)
4515 continue;
4516 else if (rc)
4517 break;
4518 }
4519
a41245f6
RS
4520 if (cifs_sb->ctx->rsize == 0)
4521 cifs_sb->ctx->rsize =
4522 server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
4523 cifs_sb->ctx);
4524
522aa3b5 4525 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
335b7b62 4526 &rsize, credits);
bed9da02
PS
4527 if (rc)
4528 break;
690c5e31
JL
4529
4530 /*
69cebd75
PS
4531 * Give up immediately if rsize is too small to read an entire
4532 * page. The VFS will fall back to readpage. We should never
4533 * reach this point however since we set ra_pages to 0 when the
4534 * rsize is smaller than a cache page.
690c5e31 4535 */
09cbfeaf 4536 if (unlikely(rsize < PAGE_SIZE)) {
bed9da02 4537 add_credits_and_wake_if(server, credits, 0);
0cb012d1 4538 free_xid(xid);
69cebd75 4539 return 0;
bed9da02 4540 }
690c5e31 4541
95a3d8f3
ZX
4542 nr_pages = 0;
4543 err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
bed9da02 4544 &nr_pages, &offset, &bytes);
95a3d8f3 4545 if (!nr_pages) {
bed9da02 4546 add_credits_and_wake_if(server, credits, 0);
690c5e31
JL
4547 break;
4548 }
4549
0471ca3f 4550 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
4551 if (!rdata) {
4552 /* best to give up if we're out of mem */
4553 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4554 list_del(&page->lru);
6058eaec 4555 lru_cache_add(page);
690c5e31 4556 unlock_page(page);
09cbfeaf 4557 put_page(page);
690c5e31
JL
4558 }
4559 rc = -ENOMEM;
bed9da02 4560 add_credits_and_wake_if(server, credits, 0);
690c5e31
JL
4561 break;
4562 }
4563
6993f74a 4564 rdata->cfile = cifsFileInfo_get(open_file);
352d96f3 4565 rdata->server = server;
690c5e31
JL
4566 rdata->mapping = mapping;
4567 rdata->offset = offset;
4568 rdata->bytes = bytes;
4569 rdata->pid = pid;
09cbfeaf 4570 rdata->pagesz = PAGE_SIZE;
1dbe3466 4571 rdata->tailsz = PAGE_SIZE;
8321fec4 4572 rdata->read_into_pages = cifs_readpages_read_into_pages;
d70b9104 4573 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
335b7b62 4574 rdata->credits = credits_on_stack;
c5fab6f4
JL
4575
4576 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4577 list_del(&page->lru);
4578 rdata->pages[rdata->nr_pages++] = page;
4579 }
690c5e31 4580
9a1c67e8
PS
4581 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4582
4583 if (!rc) {
4584 if (rdata->cfile->invalidHandle)
3e952994
PS
4585 rc = -EAGAIN;
4586 else
9a1c67e8
PS
4587 rc = server->ops->async_readv(rdata);
4588 }
4589
69cebd75 4590 if (rc) {
335b7b62 4591 add_credits_and_wake_if(server, &rdata->credits, 0);
c5fab6f4
JL
4592 for (i = 0; i < rdata->nr_pages; i++) {
4593 page = rdata->pages[i];
6058eaec 4594 lru_cache_add(page);
690c5e31 4595 unlock_page(page);
09cbfeaf 4596 put_page(page);
1da177e4 4597 }
1209bbdf 4598 /* Fallback to the readpage in error/reconnect cases */
6993f74a 4599 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
4600 break;
4601 }
6993f74a
JL
4602
4603 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
4604 }
4605
54afa990
DH
4606 /* Any pages that have been shown to fscache but didn't get added to
4607 * the pagecache must be uncached before they get returned to the
4608 * allocator.
4609 */
4610 cifs_fscache_readpages_cancel(mapping->host, page_list);
0cb012d1 4611 free_xid(xid);
1da177e4
LT
4612 return rc;
4613}
4614
a9e9b7bc
SP
4615/*
4616 * cifs_readpage_worker must be called with the page pinned
4617 */
1da177e4
LT
4618static int cifs_readpage_worker(struct file *file, struct page *page,
4619 loff_t *poffset)
4620{
4621 char *read_data;
4622 int rc;
4623
56698236 4624 /* Is the page cached? */
496ad9aa 4625 rc = cifs_readpage_from_fscache(file_inode(file), page);
56698236
SJ
4626 if (rc == 0)
4627 goto read_complete;
4628
1da177e4
LT
4629 read_data = kmap(page);
4630 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 4631
09cbfeaf 4632 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
fb8c4b14 4633
1da177e4
LT
4634 if (rc < 0)
4635 goto io_error;
4636 else
f96637be 4637 cifs_dbg(FYI, "Bytes read %d\n", rc);
fb8c4b14 4638
9b9c5bea
SF
4639 /* we do not want atime to be less than mtime, it broke some apps */
4640 file_inode(file)->i_atime = current_time(file_inode(file));
4641 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4642 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4643 else
4644 file_inode(file)->i_atime = current_time(file_inode(file));
fb8c4b14 4645
09cbfeaf
KS
4646 if (PAGE_SIZE > rc)
4647 memset(read_data + rc, 0, PAGE_SIZE - rc);
1da177e4
LT
4648
4649 flush_dcache_page(page);
4650 SetPageUptodate(page);
9dc06558
SJ
4651
4652 /* send this page to the cache */
496ad9aa 4653 cifs_readpage_to_fscache(file_inode(file), page);
9dc06558 4654
1da177e4 4655 rc = 0;
fb8c4b14 4656
1da177e4 4657io_error:
fb8c4b14 4658 kunmap(page);
466bd31b 4659 unlock_page(page);
56698236
SJ
4660
4661read_complete:
1da177e4
LT
4662 return rc;
4663}
4664
4665static int cifs_readpage(struct file *file, struct page *page)
4666{
f2a26a3c 4667 loff_t offset = page_file_offset(page);
1da177e4 4668 int rc = -EACCES;
6d5786a3 4669 unsigned int xid;
1da177e4 4670
6d5786a3 4671 xid = get_xid();
1da177e4
LT
4672
4673 if (file->private_data == NULL) {
0f3bc09e 4674 rc = -EBADF;
6d5786a3 4675 free_xid(xid);
0f3bc09e 4676 return rc;
1da177e4
LT
4677 }
4678
f96637be 4679 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
b6b38f70 4680 page, (int)offset, (int)offset);
1da177e4
LT
4681
4682 rc = cifs_readpage_worker(file, page, &offset);
4683
6d5786a3 4684 free_xid(xid);
1da177e4
LT
4685 return rc;
4686}
4687
a403a0a3
SF
4688static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4689{
4690 struct cifsFileInfo *open_file;
4691
cb248819 4692 spin_lock(&cifs_inode->open_file_lock);
a403a0a3 4693 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 4694 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
cb248819 4695 spin_unlock(&cifs_inode->open_file_lock);
a403a0a3
SF
4696 return 1;
4697 }
4698 }
cb248819 4699 spin_unlock(&cifs_inode->open_file_lock);
a403a0a3
SF
4700 return 0;
4701}
4702
1da177e4
LT
4703/* We do not want to update the file size from server for inodes
4704 open for write - to avoid races with writepage extending
4705 the file - in the future we could consider allowing
fb8c4b14 4706 refreshing the inode only on increases in the file size
1da177e4
LT
4707 but this is tricky to do without racing with writebehind
4708 page caching in the current Linux kernel design */
4b18f2a9 4709bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 4710{
a403a0a3 4711 if (!cifsInode)
4b18f2a9 4712 return true;
50c2f753 4713
a403a0a3
SF
4714 if (is_inode_writable(cifsInode)) {
4715 /* This inode is open for write at least once */
c32a0b68
SF
4716 struct cifs_sb_info *cifs_sb;
4717
c32a0b68 4718 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 4719 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 4720 /* since no page cache to corrupt on directio
c32a0b68 4721 we can change size safely */
4b18f2a9 4722 return true;
c32a0b68
SF
4723 }
4724
fb8c4b14 4725 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 4726 return true;
7ba52631 4727
4b18f2a9 4728 return false;
23e7dd7d 4729 } else
4b18f2a9 4730 return true;
1da177e4
LT
4731}
4732
d9414774
NP
4733static int cifs_write_begin(struct file *file, struct address_space *mapping,
4734 loff_t pos, unsigned len, unsigned flags,
4735 struct page **pagep, void **fsdata)
1da177e4 4736{
466bd31b 4737 int oncethru = 0;
09cbfeaf
KS
4738 pgoff_t index = pos >> PAGE_SHIFT;
4739 loff_t offset = pos & (PAGE_SIZE - 1);
a98ee8c1
JL
4740 loff_t page_start = pos & PAGE_MASK;
4741 loff_t i_size;
4742 struct page *page;
4743 int rc = 0;
d9414774 4744
f96637be 4745 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
d9414774 4746
466bd31b 4747start:
54566b2c 4748 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
4749 if (!page) {
4750 rc = -ENOMEM;
4751 goto out;
4752 }
8a236264 4753
a98ee8c1
JL
4754 if (PageUptodate(page))
4755 goto out;
8a236264 4756
a98ee8c1
JL
4757 /*
4758 * If we write a full page it will be up to date, no need to read from
4759 * the server. If the write is short, we'll end up doing a sync write
4760 * instead.
4761 */
09cbfeaf 4762 if (len == PAGE_SIZE)
a98ee8c1 4763 goto out;
8a236264 4764
a98ee8c1
JL
4765 /*
4766 * optimize away the read when we have an oplock, and we're not
4767 * expecting to use any of the data we'd be reading in. That
4768 * is, when the page lies beyond the EOF, or straddles the EOF
4769 * and the write will cover all of the existing data.
4770 */
18cceb6a 4771 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
a98ee8c1
JL
4772 i_size = i_size_read(mapping->host);
4773 if (page_start >= i_size ||
4774 (offset == 0 && (pos + len) >= i_size)) {
4775 zero_user_segments(page, 0, offset,
4776 offset + len,
09cbfeaf 4777 PAGE_SIZE);
a98ee8c1
JL
4778 /*
4779 * PageChecked means that the parts of the page
4780 * to which we're not writing are considered up
4781 * to date. Once the data is copied to the
4782 * page, it can be set uptodate.
4783 */
4784 SetPageChecked(page);
4785 goto out;
4786 }
4787 }
d9414774 4788
466bd31b 4789 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
a98ee8c1
JL
4790 /*
4791 * might as well read a page, it is fast enough. If we get
4792 * an error, we don't need to return it. cifs_write_end will
4793 * do a sync write instead since PG_uptodate isn't set.
4794 */
4795 cifs_readpage_worker(file, page, &page_start);
09cbfeaf 4796 put_page(page);
466bd31b
SP
4797 oncethru = 1;
4798 goto start;
8a236264
SF
4799 } else {
4800 /* we could try using another file handle if there is one -
4801 but how would we lock it to prevent close of that handle
4802 racing with this read? In any case
d9414774 4803 this will be written out by write_end so is fine */
1da177e4 4804 }
a98ee8c1
JL
4805out:
4806 *pagep = page;
4807 return rc;
1da177e4
LT
4808}
4809
85f2d6b4
SJ
4810static int cifs_release_page(struct page *page, gfp_t gfp)
4811{
4812 if (PagePrivate(page))
4813 return 0;
4814
4815 return cifs_fscache_release_page(page, gfp);
4816}
4817
d47992f8
LC
4818static void cifs_invalidate_page(struct page *page, unsigned int offset,
4819 unsigned int length)
85f2d6b4
SJ
4820{
4821 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4822
09cbfeaf 4823 if (offset == 0 && length == PAGE_SIZE)
85f2d6b4
SJ
4824 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4825}
4826
9ad1506b
PS
4827static int cifs_launder_page(struct page *page)
4828{
4829 int rc = 0;
4830 loff_t range_start = page_offset(page);
09cbfeaf 4831 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
9ad1506b
PS
4832 struct writeback_control wbc = {
4833 .sync_mode = WB_SYNC_ALL,
4834 .nr_to_write = 0,
4835 .range_start = range_start,
4836 .range_end = range_end,
4837 };
4838
f96637be 4839 cifs_dbg(FYI, "Launder page: %p\n", page);
9ad1506b
PS
4840
4841 if (clear_page_dirty_for_io(page))
4842 rc = cifs_writepage_locked(page, &wbc);
4843
4844 cifs_fscache_invalidate_page(page, page->mapping->host);
4845 return rc;
4846}
4847
9b646972 4848void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
4849{
4850 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4851 oplock_break);
2b0143b5 4852 struct inode *inode = d_inode(cfile->dentry);
3bc303c2 4853 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 4854 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
c11f1df5 4855 struct TCP_Server_Info *server = tcon->ses->server;
eb4b756b 4856 int rc = 0;
9bd45408 4857 bool purge_cache = false;
c3f207ab
RS
4858 bool is_deferred = false;
4859 struct cifs_deferred_close *dclose;
3bc303c2 4860
c11f1df5 4861 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
74316201 4862 TASK_UNINTERRUPTIBLE);
c11f1df5 4863
9bd45408
PS
4864 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
4865 cfile->oplock_epoch, &purge_cache);
c11f1df5 4866
18cceb6a 4867 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
63b7d3a4 4868 cifs_has_mand_locks(cinode)) {
f96637be
JP
4869 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4870 inode);
18cceb6a 4871 cinode->oplock = 0;
63b7d3a4
PS
4872 }
4873
3bc303c2 4874 if (inode && S_ISREG(inode->i_mode)) {
18cceb6a 4875 if (CIFS_CACHE_READ(cinode))
8737c930 4876 break_lease(inode, O_RDONLY);
d54ff732 4877 else
8737c930 4878 break_lease(inode, O_WRONLY);
3bc303c2 4879 rc = filemap_fdatawrite(inode->i_mapping);
9bd45408 4880 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
eb4b756b
JL
4881 rc = filemap_fdatawait(inode->i_mapping);
4882 mapping_set_error(inode->i_mapping, rc);
4f73c7d3 4883 cifs_zap_mapping(inode);
3bc303c2 4884 }
f96637be 4885 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
9bd45408
PS
4886 if (CIFS_CACHE_WRITE(cinode))
4887 goto oplock_break_ack;
3bc303c2
JL
4888 }
4889
85160e03
PS
4890 rc = cifs_push_locks(cfile);
4891 if (rc)
f96637be 4892 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
85160e03 4893
9bd45408 4894oplock_break_ack:
c3f207ab
RS
4895 /*
4896 * When oplock break is received and there are no active
860b69a9 4897 * file handles but cached, then schedule deferred close immediately.
c3f207ab
RS
4898 * So, new open will not use cached handle.
4899 */
4900 spin_lock(&CIFS_I(inode)->deferred_lock);
4901 is_deferred = cifs_is_deferred_close(cfile, &dclose);
9e992755 4902 spin_unlock(&CIFS_I(inode)->deferred_lock);
860b69a9
RS
4903 if (is_deferred &&
4904 cfile->deferred_close_scheduled &&
4905 delayed_work_pending(&cfile->deferred)) {
9e992755
RS
4906 if (cancel_delayed_work(&cfile->deferred)) {
4907 _cifsFileInfo_put(cfile, false, false);
4908 goto oplock_break_done;
4909 }
c3f207ab 4910 }
9e992755
RS
4911 /*
4912 * releasing stale oplock after recent reconnect of smb session using
4913 * a now incorrect file handle is not a data integrity issue but do
4914 * not bother sending an oplock release if session to server still is
4915 * disconnected since oplock already released by the server
4916 */
4917 if (!cfile->oplock_break_cancelled) {
4918 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4919 cinode);
4920 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
4921 }
4922oplock_break_done:
32546a95 4923 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
c11f1df5 4924 cifs_done_oplock_break(cinode);
3bc303c2
JL
4925}
4926
dca69288
SF
4927/*
4928 * The presence of cifs_direct_io() in the address space ops vector
4929 * allowes open() O_DIRECT flags which would have failed otherwise.
4930 *
4931 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4932 * so this method should never be called.
4933 *
4934 * Direct IO is not yet supported in the cached mode.
4935 */
4936static ssize_t
c8b8e32d 4937cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
dca69288
SF
4938{
4939 /*
4940 * FIXME
4941 * Eventually need to support direct IO for non forcedirectio mounts
4942 */
4943 return -EINVAL;
4944}
4945
4e8aea30
SF
4946static int cifs_swap_activate(struct swap_info_struct *sis,
4947 struct file *swap_file, sector_t *span)
4948{
4949 struct cifsFileInfo *cfile = swap_file->private_data;
4950 struct inode *inode = swap_file->f_mapping->host;
4951 unsigned long blocks;
4952 long long isize;
4953
4954 cifs_dbg(FYI, "swap activate\n");
4955
4956 spin_lock(&inode->i_lock);
4957 blocks = inode->i_blocks;
4958 isize = inode->i_size;
4959 spin_unlock(&inode->i_lock);
4960 if (blocks*512 < isize) {
4961 pr_warn("swap activate: swapfile has holes\n");
4962 return -EINVAL;
4963 }
4964 *span = sis->pages;
4965
a0a3036b 4966 pr_warn_once("Swap support over SMB3 is experimental\n");
4e8aea30
SF
4967
4968 /*
4969 * TODO: consider adding ACL (or documenting how) to prevent other
4970 * users (on this or other systems) from reading it
4971 */
4972
4973
4974 /* TODO: add sk_set_memalloc(inet) or similar */
4975
4976 if (cfile)
4977 cfile->swapfile = true;
4978 /*
4979 * TODO: Since file already open, we can't open with DENY_ALL here
4980 * but we could add call to grab a byte range lock to prevent others
4981 * from reading or writing the file
4982 */
4983
4984 return 0;
4985}
4986
4987static void cifs_swap_deactivate(struct file *file)
4988{
4989 struct cifsFileInfo *cfile = file->private_data;
4990
4991 cifs_dbg(FYI, "swap deactivate\n");
4992
4993 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
4994
4995 if (cfile)
4996 cfile->swapfile = false;
4997
4998 /* do we need to unpin (or unlock) the file */
4999}
dca69288 5000
f5e54d6e 5001const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
5002 .readpage = cifs_readpage,
5003 .readpages = cifs_readpages,
5004 .writepage = cifs_writepage,
37c0eb46 5005 .writepages = cifs_writepages,
d9414774
NP
5006 .write_begin = cifs_write_begin,
5007 .write_end = cifs_write_end,
1da177e4 5008 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4 5009 .releasepage = cifs_release_page,
dca69288 5010 .direct_IO = cifs_direct_io,
85f2d6b4 5011 .invalidatepage = cifs_invalidate_page,
9ad1506b 5012 .launder_page = cifs_launder_page,
4e8aea30
SF
5013 /*
5014 * TODO: investigate and if useful we could add an cifs_migratePage
5015 * helper (under an CONFIG_MIGRATION) in the future, and also
5016 * investigate and add an is_dirty_writeback helper if needed
5017 */
5018 .swap_activate = cifs_swap_activate,
5019 .swap_deactivate = cifs_swap_deactivate,
1da177e4 5020};
273d81d6
DK
5021
5022/*
5023 * cifs_readpages requires the server to support a buffer large enough to
5024 * contain the header plus one complete page of data. Otherwise, we need
5025 * to leave cifs_readpages out of the address space operations.
5026 */
f5e54d6e 5027const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
5028 .readpage = cifs_readpage,
5029 .writepage = cifs_writepage,
5030 .writepages = cifs_writepages,
d9414774
NP
5031 .write_begin = cifs_write_begin,
5032 .write_end = cifs_write_end,
273d81d6 5033 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
5034 .releasepage = cifs_release_page,
5035 .invalidatepage = cifs_invalidate_page,
9ad1506b 5036 .launder_page = cifs_launder_page,
273d81d6 5037};