]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - fs/cifs/file.c
Merge branches 'acpi-fan', 'acpi-video' and 'acpi-ec'
[mirror_ubuntu-focal-kernel.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
07b92d0d 46
1da177e4
LT
47static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
e10f7b55
JL
60 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
7fc8f4e9 63}
e10f7b55 64
608712fe 65static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 66{
608712fe 67 u32 posix_flags = 0;
e10f7b55 68
7fc8f4e9 69 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 70 posix_flags = SMB_O_RDONLY;
7fc8f4e9 71 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
72 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
07b92d0d 76 if (flags & O_CREAT) {
608712fe 77 posix_flags |= SMB_O_CREAT;
07b92d0d
SF
78 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
f96637be
JP
81 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
07b92d0d 83
608712fe
JL
84 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 87 if (flags & O_DSYNC)
608712fe 88 posix_flags |= SMB_O_SYNC;
7fc8f4e9 89 if (flags & O_DIRECTORY)
608712fe 90 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 91 if (flags & O_NOFOLLOW)
608712fe 92 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 93 if (flags & O_DIRECT)
608712fe 94 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
95
96 return posix_flags;
1da177e4
LT
97}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
55aa2e09
SF
107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
1da177e4
LT
109 else
110 return FILE_OPEN;
111}
112
608712fe
JL
113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
96daf2b0 123 struct cifs_tcon *tcon;
608712fe 124
f96637be 125 cifs_dbg(FYI, "posix open %s\n", full_path);
608712fe
JL
126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_sb->mnt_cifs_flags &
144 CIFS_MOUNT_MAP_SPECIAL_CHR);
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
eeb910a6
PS
175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
179{
180 int rc;
fb1214e4 181 int desired_access;
eeb910a6 182 int disposition;
3d3ea8e6 183 int create_options = CREATE_NOT_DIR;
eeb910a6 184 FILE_ALL_INFO *buf;
b8c32dbb 185 struct TCP_Server_Info *server = tcon->ses->server;
226730b4 186 struct cifs_open_parms oparms;
eeb910a6 187
b8c32dbb 188 if (!server->ops->open)
fb1214e4
PS
189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
3d3ea8e6
SP
225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
226730b4
PS
228 oparms.tcon = tcon;
229 oparms.cifs_sb = cifs_sb;
230 oparms.desired_access = desired_access;
231 oparms.create_options = create_options;
232 oparms.disposition = disposition;
233 oparms.path = full_path;
234 oparms.fid = fid;
9cbc0b73 235 oparms.reconnect = false;
226730b4
PS
236
237 rc = server->ops->open(xid, &oparms, oplock, buf);
eeb910a6
PS
238
239 if (rc)
240 goto out;
241
242 if (tcon->unix_ext)
243 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
244 xid);
245 else
246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
42eacf9e 247 xid, fid);
eeb910a6
PS
248
249out:
250 kfree(buf);
251 return rc;
252}
253
63b7d3a4
PS
254static bool
255cifs_has_mand_locks(struct cifsInodeInfo *cinode)
256{
257 struct cifs_fid_locks *cur;
258 bool has_locks = false;
259
260 down_read(&cinode->lock_sem);
261 list_for_each_entry(cur, &cinode->llist, llist) {
262 if (!list_empty(&cur->locks)) {
263 has_locks = true;
264 break;
265 }
266 }
267 up_read(&cinode->lock_sem);
268 return has_locks;
269}
270
15ecb436 271struct cifsFileInfo *
fb1214e4 272cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
273 struct tcon_link *tlink, __u32 oplock)
274{
275 struct dentry *dentry = file->f_path.dentry;
276 struct inode *inode = dentry->d_inode;
4b4de76e
PS
277 struct cifsInodeInfo *cinode = CIFS_I(inode);
278 struct cifsFileInfo *cfile;
f45d3416 279 struct cifs_fid_locks *fdlocks;
233839b1 280 struct cifs_tcon *tcon = tlink_tcon(tlink);
63b7d3a4 281 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e
PS
282
283 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
284 if (cfile == NULL)
285 return cfile;
286
f45d3416
PS
287 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
288 if (!fdlocks) {
289 kfree(cfile);
290 return NULL;
291 }
292
293 INIT_LIST_HEAD(&fdlocks->locks);
294 fdlocks->cfile = cfile;
295 cfile->llist = fdlocks;
1b4b55a1 296 down_write(&cinode->lock_sem);
f45d3416 297 list_add(&fdlocks->llist, &cinode->llist);
1b4b55a1 298 up_write(&cinode->lock_sem);
f45d3416 299
4b4de76e 300 cfile->count = 1;
4b4de76e
PS
301 cfile->pid = current->tgid;
302 cfile->uid = current_fsuid();
303 cfile->dentry = dget(dentry);
304 cfile->f_flags = file->f_flags;
305 cfile->invalidHandle = false;
306 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 307 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
f45d3416 308 mutex_init(&cfile->fh_mutex);
15ecb436 309
24261fc2
MG
310 cifs_sb_active(inode->i_sb);
311
63b7d3a4
PS
312 /*
313 * If the server returned a read oplock and we have mandatory brlocks,
314 * set oplock level to None.
315 */
53ef1016 316 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
f96637be 317 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
63b7d3a4
PS
318 oplock = 0;
319 }
320
4477288a 321 spin_lock(&cifs_file_list_lock);
63b7d3a4 322 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
233839b1
PS
323 oplock = fid->pending_open->oplock;
324 list_del(&fid->pending_open->olist);
325
42873b0a 326 fid->purge_cache = false;
63b7d3a4 327 server->ops->set_fid(cfile, fid, oplock);
233839b1
PS
328
329 list_add(&cfile->tlist, &tcon->openFileList);
15ecb436
JL
330 /* if readable file instance put first in list*/
331 if (file->f_mode & FMODE_READ)
4b4de76e 332 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 333 else
4b4de76e 334 list_add_tail(&cfile->flist, &cinode->openFileList);
4477288a 335 spin_unlock(&cifs_file_list_lock);
15ecb436 336
42873b0a 337 if (fid->purge_cache)
4f73c7d3 338 cifs_zap_mapping(inode);
42873b0a 339
4b4de76e
PS
340 file->private_data = cfile;
341 return cfile;
15ecb436
JL
342}
343
764a1b1a
JL
344struct cifsFileInfo *
345cifsFileInfo_get(struct cifsFileInfo *cifs_file)
346{
347 spin_lock(&cifs_file_list_lock);
348 cifsFileInfo_get_locked(cifs_file);
349 spin_unlock(&cifs_file_list_lock);
350 return cifs_file;
351}
352
cdff08e7
SF
353/*
354 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
355 * the filehandle out on the server. Must be called without holding
356 * cifs_file_list_lock.
cdff08e7 357 */
b33879aa
JL
358void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
359{
e66673e3 360 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 361 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
233839b1 362 struct TCP_Server_Info *server = tcon->ses->server;
e66673e3 363 struct cifsInodeInfo *cifsi = CIFS_I(inode);
24261fc2
MG
364 struct super_block *sb = inode->i_sb;
365 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cdff08e7 366 struct cifsLockInfo *li, *tmp;
233839b1
PS
367 struct cifs_fid fid;
368 struct cifs_pending_open open;
cdff08e7
SF
369
370 spin_lock(&cifs_file_list_lock);
5f6dbc9e 371 if (--cifs_file->count > 0) {
cdff08e7
SF
372 spin_unlock(&cifs_file_list_lock);
373 return;
374 }
375
233839b1
PS
376 if (server->ops->get_lease_key)
377 server->ops->get_lease_key(inode, &fid);
378
379 /* store open in pending opens to make sure we don't miss lease break */
380 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
381
cdff08e7
SF
382 /* remove it from the lists */
383 list_del(&cifs_file->flist);
384 list_del(&cifs_file->tlist);
385
386 if (list_empty(&cifsi->openFileList)) {
f96637be
JP
387 cifs_dbg(FYI, "closing last open instance for inode %p\n",
388 cifs_file->dentry->d_inode);
25364138
PS
389 /*
390 * In strict cache mode we need invalidate mapping on the last
391 * close because it may cause a error when we open this file
392 * again and get at least level II oplock.
393 */
4f8ba8a0 394 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
aff8d5ca 395 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
c6723628 396 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
397 }
398 spin_unlock(&cifs_file_list_lock);
399
ad635942
JL
400 cancel_work_sync(&cifs_file->oplock_break);
401
cdff08e7 402 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 403 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 404 unsigned int xid;
0ff78a22 405
6d5786a3 406 xid = get_xid();
0ff78a22 407 if (server->ops->close)
760ad0ca
PS
408 server->ops->close(xid, tcon, &cifs_file->fid);
409 _free_xid(xid);
cdff08e7
SF
410 }
411
233839b1
PS
412 cifs_del_pending_open(&open);
413
f45d3416
PS
414 /*
415 * Delete any outstanding lock records. We'll lose them when the file
cdff08e7
SF
416 * is closed anyway.
417 */
1b4b55a1 418 down_write(&cifsi->lock_sem);
f45d3416 419 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
cdff08e7 420 list_del(&li->llist);
85160e03 421 cifs_del_lock_waiters(li);
cdff08e7 422 kfree(li);
b33879aa 423 }
f45d3416
PS
424 list_del(&cifs_file->llist->llist);
425 kfree(cifs_file->llist);
1b4b55a1 426 up_write(&cifsi->lock_sem);
cdff08e7
SF
427
428 cifs_put_tlink(cifs_file->tlink);
429 dput(cifs_file->dentry);
24261fc2 430 cifs_sb_deactive(sb);
cdff08e7 431 kfree(cifs_file);
b33879aa
JL
432}
433
1da177e4 434int cifs_open(struct inode *inode, struct file *file)
233839b1 435
1da177e4
LT
436{
437 int rc = -EACCES;
6d5786a3 438 unsigned int xid;
590a3fe0 439 __u32 oplock;
1da177e4 440 struct cifs_sb_info *cifs_sb;
b8c32dbb 441 struct TCP_Server_Info *server;
96daf2b0 442 struct cifs_tcon *tcon;
7ffec372 443 struct tcon_link *tlink;
fb1214e4 444 struct cifsFileInfo *cfile = NULL;
1da177e4 445 char *full_path = NULL;
7e12eddb 446 bool posix_open_ok = false;
fb1214e4 447 struct cifs_fid fid;
233839b1 448 struct cifs_pending_open open;
1da177e4 449
6d5786a3 450 xid = get_xid();
1da177e4
LT
451
452 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
453 tlink = cifs_sb_tlink(cifs_sb);
454 if (IS_ERR(tlink)) {
6d5786a3 455 free_xid(xid);
7ffec372
JL
456 return PTR_ERR(tlink);
457 }
458 tcon = tlink_tcon(tlink);
b8c32dbb 459 server = tcon->ses->server;
1da177e4 460
e6a00296 461 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 462 if (full_path == NULL) {
0f3bc09e 463 rc = -ENOMEM;
232341ba 464 goto out;
1da177e4
LT
465 }
466
f96637be 467 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
b6b38f70 468 inode, file->f_flags, full_path);
276a74a4 469
787aded6
NJ
470 if (file->f_flags & O_DIRECT &&
471 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
472 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
473 file->f_op = &cifs_file_direct_nobrl_ops;
474 else
475 file->f_op = &cifs_file_direct_ops;
476 }
477
233839b1 478 if (server->oplocks)
276a74a4
SF
479 oplock = REQ_OPLOCK;
480 else
481 oplock = 0;
482
64cc2c63 483 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
484 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
485 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 486 /* can not refresh inode info since size could be stale */
2422f676 487 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 488 cifs_sb->mnt_file_mode /* ignored */,
fb1214e4 489 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 490 if (rc == 0) {
f96637be 491 cifs_dbg(FYI, "posix open succeeded\n");
7e12eddb 492 posix_open_ok = true;
64cc2c63
SF
493 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
494 if (tcon->ses->serverNOS)
f96637be
JP
495 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
496 tcon->ses->serverName,
497 tcon->ses->serverNOS);
64cc2c63 498 tcon->broken_posix_open = true;
276a74a4
SF
499 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
500 (rc != -EOPNOTSUPP)) /* path not found or net err */
501 goto out;
fb1214e4
PS
502 /*
503 * Else fallthrough to retry open the old way on network i/o
504 * or DFS errors.
505 */
276a74a4
SF
506 }
507
233839b1
PS
508 if (server->ops->get_lease_key)
509 server->ops->get_lease_key(inode, &fid);
510
511 cifs_add_pending_open(&fid, tlink, &open);
512
7e12eddb 513 if (!posix_open_ok) {
b8c32dbb
PS
514 if (server->ops->get_lease_key)
515 server->ops->get_lease_key(inode, &fid);
516
7e12eddb 517 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 518 file->f_flags, &oplock, &fid, xid);
233839b1
PS
519 if (rc) {
520 cifs_del_pending_open(&open);
7e12eddb 521 goto out;
233839b1 522 }
7e12eddb 523 }
47c78b7f 524
fb1214e4
PS
525 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
526 if (cfile == NULL) {
b8c32dbb
PS
527 if (server->ops->close)
528 server->ops->close(xid, tcon, &fid);
233839b1 529 cifs_del_pending_open(&open);
1da177e4
LT
530 rc = -ENOMEM;
531 goto out;
532 }
1da177e4 533
9451a9a5
SJ
534 cifs_fscache_set_inode_cookie(inode, file);
535
7e12eddb 536 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
537 /*
538 * Time to set mode which we can not set earlier due to
539 * problems creating new read-only files.
540 */
7e12eddb
PS
541 struct cifs_unix_set_info_args args = {
542 .mode = inode->i_mode,
49418b2c
EB
543 .uid = INVALID_UID, /* no change */
544 .gid = INVALID_GID, /* no change */
7e12eddb
PS
545 .ctime = NO_CHANGE_64,
546 .atime = NO_CHANGE_64,
547 .mtime = NO_CHANGE_64,
548 .device = 0,
549 };
fb1214e4
PS
550 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
551 cfile->pid);
1da177e4
LT
552 }
553
554out:
1da177e4 555 kfree(full_path);
6d5786a3 556 free_xid(xid);
7ffec372 557 cifs_put_tlink(tlink);
1da177e4
LT
558 return rc;
559}
560
f152fd5f
PS
561static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
562
2ae78ba8
PS
563/*
564 * Try to reacquire byte range locks that were released when session
f152fd5f 565 * to server was lost.
2ae78ba8 566 */
f152fd5f
PS
567static int
568cifs_relock_file(struct cifsFileInfo *cfile)
1da177e4 569{
f152fd5f
PS
570 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
571 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
572 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1da177e4
LT
573 int rc = 0;
574
689c3db4 575 down_read(&cinode->lock_sem);
f152fd5f 576 if (cinode->can_cache_brlcks) {
689c3db4
PS
577 /* can cache locks - no need to relock */
578 up_read(&cinode->lock_sem);
f152fd5f
PS
579 return rc;
580 }
581
582 if (cap_unix(tcon->ses) &&
583 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
584 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
585 rc = cifs_push_posix_locks(cfile);
586 else
587 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1da177e4 588
689c3db4 589 up_read(&cinode->lock_sem);
1da177e4
LT
590 return rc;
591}
592
2ae78ba8
PS
593static int
594cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
595{
596 int rc = -EACCES;
6d5786a3 597 unsigned int xid;
590a3fe0 598 __u32 oplock;
1da177e4 599 struct cifs_sb_info *cifs_sb;
96daf2b0 600 struct cifs_tcon *tcon;
2ae78ba8
PS
601 struct TCP_Server_Info *server;
602 struct cifsInodeInfo *cinode;
fb8c4b14 603 struct inode *inode;
1da177e4 604 char *full_path = NULL;
2ae78ba8 605 int desired_access;
1da177e4 606 int disposition = FILE_OPEN;
3d3ea8e6 607 int create_options = CREATE_NOT_DIR;
226730b4 608 struct cifs_open_parms oparms;
1da177e4 609
6d5786a3 610 xid = get_xid();
2ae78ba8
PS
611 mutex_lock(&cfile->fh_mutex);
612 if (!cfile->invalidHandle) {
613 mutex_unlock(&cfile->fh_mutex);
0f3bc09e 614 rc = 0;
6d5786a3 615 free_xid(xid);
0f3bc09e 616 return rc;
1da177e4
LT
617 }
618
2ae78ba8 619 inode = cfile->dentry->d_inode;
1da177e4 620 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
621 tcon = tlink_tcon(cfile->tlink);
622 server = tcon->ses->server;
623
624 /*
625 * Can not grab rename sem here because various ops, including those
626 * that already have the rename sem can end up causing writepage to get
627 * called and if the server was down that means we end up here, and we
628 * can never tell if the caller already has the rename_sem.
629 */
630 full_path = build_path_from_dentry(cfile->dentry);
1da177e4 631 if (full_path == NULL) {
3a9f462f 632 rc = -ENOMEM;
2ae78ba8 633 mutex_unlock(&cfile->fh_mutex);
6d5786a3 634 free_xid(xid);
3a9f462f 635 return rc;
1da177e4
LT
636 }
637
f96637be
JP
638 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
639 inode, cfile->f_flags, full_path);
1da177e4 640
10b9b98e 641 if (tcon->ses->server->oplocks)
1da177e4
LT
642 oplock = REQ_OPLOCK;
643 else
4b18f2a9 644 oplock = 0;
1da177e4 645
29e20f9c 646 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 647 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 648 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
649 /*
650 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
651 * original open. Must mask them off for a reopen.
652 */
2ae78ba8 653 unsigned int oflags = cfile->f_flags &
15886177 654 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 655
2422f676 656 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
2ae78ba8 657 cifs_sb->mnt_file_mode /* ignored */,
9cbc0b73 658 oflags, &oplock, &cfile->fid.netfid, xid);
7fc8f4e9 659 if (rc == 0) {
f96637be 660 cifs_dbg(FYI, "posix reopen succeeded\n");
fe090e4e 661 oparms.reconnect = true;
7fc8f4e9
SF
662 goto reopen_success;
663 }
2ae78ba8
PS
664 /*
665 * fallthrough to retry open the old way on errors, especially
666 * in the reconnect path it is important to retry hard
667 */
7fc8f4e9
SF
668 }
669
2ae78ba8 670 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 671
3d3ea8e6
SP
672 if (backup_cred(cifs_sb))
673 create_options |= CREATE_OPEN_BACKUP_INTENT;
674
b8c32dbb 675 if (server->ops->get_lease_key)
9cbc0b73 676 server->ops->get_lease_key(inode, &cfile->fid);
b8c32dbb 677
226730b4
PS
678 oparms.tcon = tcon;
679 oparms.cifs_sb = cifs_sb;
680 oparms.desired_access = desired_access;
681 oparms.create_options = create_options;
682 oparms.disposition = disposition;
683 oparms.path = full_path;
9cbc0b73
PS
684 oparms.fid = &cfile->fid;
685 oparms.reconnect = true;
226730b4 686
2ae78ba8
PS
687 /*
688 * Can not refresh inode by passing in file_info buf to be returned by
d81b8a40 689 * ops->open and then calling get_inode_info with returned buf since
2ae78ba8
PS
690 * file might have write behind data that needs to be flushed and server
691 * version of file size can be stale. If we knew for sure that inode was
692 * not dirty locally we could do this.
693 */
226730b4 694 rc = server->ops->open(xid, &oparms, &oplock, NULL);
b33fcf1c
PS
695 if (rc == -ENOENT && oparms.reconnect == false) {
696 /* durable handle timeout is expired - open the file again */
697 rc = server->ops->open(xid, &oparms, &oplock, NULL);
698 /* indicate that we need to relock the file */
699 oparms.reconnect = true;
700 }
701
1da177e4 702 if (rc) {
2ae78ba8 703 mutex_unlock(&cfile->fh_mutex);
f96637be
JP
704 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
705 cifs_dbg(FYI, "oplock: %d\n", oplock);
15886177
JL
706 goto reopen_error_exit;
707 }
708
7fc8f4e9 709reopen_success:
2ae78ba8
PS
710 cfile->invalidHandle = false;
711 mutex_unlock(&cfile->fh_mutex);
712 cinode = CIFS_I(inode);
15886177
JL
713
714 if (can_flush) {
715 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 716 mapping_set_error(inode->i_mapping, rc);
15886177 717
15886177 718 if (tcon->unix_ext)
2ae78ba8
PS
719 rc = cifs_get_inode_info_unix(&inode, full_path,
720 inode->i_sb, xid);
15886177 721 else
2ae78ba8
PS
722 rc = cifs_get_inode_info(&inode, full_path, NULL,
723 inode->i_sb, xid, NULL);
724 }
725 /*
726 * Else we are writing out data to server already and could deadlock if
727 * we tried to flush data, and since we do not know if we have data that
728 * would invalidate the current end of file on the server we can not go
729 * to the server to get the new inode info.
730 */
731
9cbc0b73
PS
732 server->ops->set_fid(cfile, &cfile->fid, oplock);
733 if (oparms.reconnect)
734 cifs_relock_file(cfile);
15886177
JL
735
736reopen_error_exit:
1da177e4 737 kfree(full_path);
6d5786a3 738 free_xid(xid);
1da177e4
LT
739 return rc;
740}
741
742int cifs_close(struct inode *inode, struct file *file)
743{
77970693
JL
744 if (file->private_data != NULL) {
745 cifsFileInfo_put(file->private_data);
746 file->private_data = NULL;
747 }
7ee1af76 748
cdff08e7
SF
749 /* return code from the ->release op is always ignored */
750 return 0;
1da177e4
LT
751}
752
753int cifs_closedir(struct inode *inode, struct file *file)
754{
755 int rc = 0;
6d5786a3 756 unsigned int xid;
4b4de76e 757 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
758 struct cifs_tcon *tcon;
759 struct TCP_Server_Info *server;
760 char *buf;
1da177e4 761
f96637be 762 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1da177e4 763
92fc65a7
PS
764 if (cfile == NULL)
765 return rc;
766
6d5786a3 767 xid = get_xid();
92fc65a7
PS
768 tcon = tlink_tcon(cfile->tlink);
769 server = tcon->ses->server;
1da177e4 770
f96637be 771 cifs_dbg(FYI, "Freeing private data in close dir\n");
92fc65a7 772 spin_lock(&cifs_file_list_lock);
52755808 773 if (server->ops->dir_needs_close(cfile)) {
92fc65a7
PS
774 cfile->invalidHandle = true;
775 spin_unlock(&cifs_file_list_lock);
776 if (server->ops->close_dir)
777 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
778 else
779 rc = -ENOSYS;
f96637be 780 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
92fc65a7
PS
781 /* not much we can do if it fails anyway, ignore rc */
782 rc = 0;
783 } else
784 spin_unlock(&cifs_file_list_lock);
785
786 buf = cfile->srch_inf.ntwrk_buf_start;
787 if (buf) {
f96637be 788 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
92fc65a7
PS
789 cfile->srch_inf.ntwrk_buf_start = NULL;
790 if (cfile->srch_inf.smallBuf)
791 cifs_small_buf_release(buf);
792 else
793 cifs_buf_release(buf);
1da177e4 794 }
92fc65a7
PS
795
796 cifs_put_tlink(cfile->tlink);
797 kfree(file->private_data);
798 file->private_data = NULL;
1da177e4 799 /* BB can we lock the filestruct while this is going on? */
6d5786a3 800 free_xid(xid);
1da177e4
LT
801 return rc;
802}
803
85160e03 804static struct cifsLockInfo *
fbd35aca 805cifs_lock_init(__u64 offset, __u64 length, __u8 type)
7ee1af76 806{
a88b4707 807 struct cifsLockInfo *lock =
fb8c4b14 808 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
809 if (!lock)
810 return lock;
811 lock->offset = offset;
812 lock->length = length;
813 lock->type = type;
a88b4707
PS
814 lock->pid = current->tgid;
815 INIT_LIST_HEAD(&lock->blist);
816 init_waitqueue_head(&lock->block_q);
817 return lock;
85160e03
PS
818}
819
f7ba7fe6 820void
85160e03
PS
821cifs_del_lock_waiters(struct cifsLockInfo *lock)
822{
823 struct cifsLockInfo *li, *tmp;
824 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
825 list_del_init(&li->blist);
826 wake_up(&li->block_q);
827 }
828}
829
081c0414
PS
830#define CIFS_LOCK_OP 0
831#define CIFS_READ_OP 1
832#define CIFS_WRITE_OP 2
833
834/* @rw_check : 0 - no op, 1 - read, 2 - write */
85160e03 835static bool
f45d3416
PS
836cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
837 __u64 length, __u8 type, struct cifsFileInfo *cfile,
081c0414 838 struct cifsLockInfo **conf_lock, int rw_check)
85160e03 839{
fbd35aca 840 struct cifsLockInfo *li;
f45d3416 841 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 842 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 843
f45d3416 844 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
845 if (offset + length <= li->offset ||
846 offset >= li->offset + li->length)
847 continue;
081c0414
PS
848 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
849 server->ops->compare_fids(cfile, cur_cfile)) {
850 /* shared lock prevents write op through the same fid */
851 if (!(li->type & server->vals->shared_lock_type) ||
852 rw_check != CIFS_WRITE_OP)
853 continue;
854 }
f45d3416
PS
855 if ((type & server->vals->shared_lock_type) &&
856 ((server->ops->compare_fids(cfile, cur_cfile) &&
857 current->tgid == li->pid) || type == li->type))
85160e03 858 continue;
579f9053
PS
859 if (conf_lock)
860 *conf_lock = li;
f45d3416 861 return true;
85160e03
PS
862 }
863 return false;
864}
865
579f9053 866bool
55157dfb 867cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
579f9053 868 __u8 type, struct cifsLockInfo **conf_lock,
081c0414 869 int rw_check)
161ebf9f 870{
fbd35aca 871 bool rc = false;
f45d3416 872 struct cifs_fid_locks *cur;
55157dfb 873 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
fbd35aca 874
f45d3416
PS
875 list_for_each_entry(cur, &cinode->llist, llist) {
876 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
579f9053 877 cfile, conf_lock, rw_check);
fbd35aca
PS
878 if (rc)
879 break;
880 }
fbd35aca
PS
881
882 return rc;
161ebf9f
PS
883}
884
9a5101c8
PS
885/*
886 * Check if there is another lock that prevents us to set the lock (mandatory
887 * style). If such a lock exists, update the flock structure with its
888 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
889 * or leave it the same if we can't. Returns 0 if we don't need to request to
890 * the server or 1 otherwise.
891 */
85160e03 892static int
fbd35aca
PS
893cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
894 __u8 type, struct file_lock *flock)
85160e03
PS
895{
896 int rc = 0;
897 struct cifsLockInfo *conf_lock;
fbd35aca 898 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
106dc538 899 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
900 bool exist;
901
1b4b55a1 902 down_read(&cinode->lock_sem);
85160e03 903
55157dfb 904 exist = cifs_find_lock_conflict(cfile, offset, length, type,
081c0414 905 &conf_lock, CIFS_LOCK_OP);
85160e03
PS
906 if (exist) {
907 flock->fl_start = conf_lock->offset;
908 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
909 flock->fl_pid = conf_lock->pid;
106dc538 910 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
911 flock->fl_type = F_RDLCK;
912 else
913 flock->fl_type = F_WRLCK;
914 } else if (!cinode->can_cache_brlcks)
915 rc = 1;
916 else
917 flock->fl_type = F_UNLCK;
918
1b4b55a1 919 up_read(&cinode->lock_sem);
85160e03
PS
920 return rc;
921}
922
161ebf9f 923static void
fbd35aca 924cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 925{
fbd35aca 926 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1b4b55a1 927 down_write(&cinode->lock_sem);
f45d3416 928 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 929 up_write(&cinode->lock_sem);
7ee1af76
JA
930}
931
9a5101c8
PS
932/*
933 * Set the byte-range lock (mandatory style). Returns:
934 * 1) 0, if we set the lock and don't need to request to the server;
935 * 2) 1, if no locks prevent us but we need to request to the server;
936 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
937 */
85160e03 938static int
fbd35aca 939cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 940 bool wait)
85160e03 941{
161ebf9f 942 struct cifsLockInfo *conf_lock;
fbd35aca 943 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
85160e03
PS
944 bool exist;
945 int rc = 0;
946
85160e03
PS
947try_again:
948 exist = false;
1b4b55a1 949 down_write(&cinode->lock_sem);
85160e03 950
55157dfb 951 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
081c0414 952 lock->type, &conf_lock, CIFS_LOCK_OP);
85160e03 953 if (!exist && cinode->can_cache_brlcks) {
f45d3416 954 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 955 up_write(&cinode->lock_sem);
85160e03
PS
956 return rc;
957 }
958
959 if (!exist)
960 rc = 1;
961 else if (!wait)
962 rc = -EACCES;
963 else {
964 list_add_tail(&lock->blist, &conf_lock->blist);
1b4b55a1 965 up_write(&cinode->lock_sem);
85160e03
PS
966 rc = wait_event_interruptible(lock->block_q,
967 (lock->blist.prev == &lock->blist) &&
968 (lock->blist.next == &lock->blist));
969 if (!rc)
970 goto try_again;
1b4b55a1 971 down_write(&cinode->lock_sem);
a88b4707 972 list_del_init(&lock->blist);
85160e03
PS
973 }
974
1b4b55a1 975 up_write(&cinode->lock_sem);
85160e03
PS
976 return rc;
977}
978
9a5101c8
PS
979/*
980 * Check if there is another lock that prevents us to set the lock (posix
981 * style). If such a lock exists, update the flock structure with its
982 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
983 * or leave it the same if we can't. Returns 0 if we don't need to request to
984 * the server or 1 otherwise.
985 */
85160e03 986static int
4f6bcec9
PS
987cifs_posix_lock_test(struct file *file, struct file_lock *flock)
988{
989 int rc = 0;
496ad9aa 990 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
4f6bcec9
PS
991 unsigned char saved_type = flock->fl_type;
992
50792760
PS
993 if ((flock->fl_flags & FL_POSIX) == 0)
994 return 1;
995
1b4b55a1 996 down_read(&cinode->lock_sem);
4f6bcec9
PS
997 posix_test_lock(file, flock);
998
999 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1000 flock->fl_type = saved_type;
1001 rc = 1;
1002 }
1003
1b4b55a1 1004 up_read(&cinode->lock_sem);
4f6bcec9
PS
1005 return rc;
1006}
1007
9a5101c8
PS
1008/*
1009 * Set the byte-range lock (posix style). Returns:
1010 * 1) 0, if we set the lock and don't need to request to the server;
1011 * 2) 1, if we need to request to the server;
1012 * 3) <0, if the error occurs while setting the lock.
1013 */
4f6bcec9
PS
1014static int
1015cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1016{
496ad9aa 1017 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
50792760
PS
1018 int rc = 1;
1019
1020 if ((flock->fl_flags & FL_POSIX) == 0)
1021 return rc;
4f6bcec9 1022
66189be7 1023try_again:
1b4b55a1 1024 down_write(&cinode->lock_sem);
4f6bcec9 1025 if (!cinode->can_cache_brlcks) {
1b4b55a1 1026 up_write(&cinode->lock_sem);
50792760 1027 return rc;
4f6bcec9 1028 }
66189be7
PS
1029
1030 rc = posix_lock_file(file, flock, NULL);
1b4b55a1 1031 up_write(&cinode->lock_sem);
66189be7
PS
1032 if (rc == FILE_LOCK_DEFERRED) {
1033 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1034 if (!rc)
1035 goto try_again;
1a9e64a7 1036 posix_unblock_lock(flock);
66189be7 1037 }
9ebb389d 1038 return rc;
4f6bcec9
PS
1039}
1040
d39a4f71 1041int
4f6bcec9 1042cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 1043{
6d5786a3
PS
1044 unsigned int xid;
1045 int rc = 0, stored_rc;
85160e03
PS
1046 struct cifsLockInfo *li, *tmp;
1047 struct cifs_tcon *tcon;
0013fb4c 1048 unsigned int num, max_num, max_buf;
32b9aaf1
PS
1049 LOCKING_ANDX_RANGE *buf, *cur;
1050 int types[] = {LOCKING_ANDX_LARGE_FILES,
1051 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1052 int i;
85160e03 1053
6d5786a3 1054 xid = get_xid();
85160e03
PS
1055 tcon = tlink_tcon(cfile->tlink);
1056
0013fb4c
PS
1057 /*
1058 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1059 * and check it for zero before using.
1060 */
1061 max_buf = tcon->ses->server->maxBuf;
1062 if (!max_buf) {
6d5786a3 1063 free_xid(xid);
0013fb4c
PS
1064 return -EINVAL;
1065 }
1066
1067 max_num = (max_buf - sizeof(struct smb_hdr)) /
1068 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 1069 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
32b9aaf1 1070 if (!buf) {
6d5786a3 1071 free_xid(xid);
e2f2886a 1072 return -ENOMEM;
32b9aaf1
PS
1073 }
1074
1075 for (i = 0; i < 2; i++) {
1076 cur = buf;
1077 num = 0;
f45d3416 1078 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
1079 if (li->type != types[i])
1080 continue;
1081 cur->Pid = cpu_to_le16(li->pid);
1082 cur->LengthLow = cpu_to_le32((u32)li->length);
1083 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1084 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1085 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1086 if (++num == max_num) {
4b4de76e
PS
1087 stored_rc = cifs_lockv(xid, tcon,
1088 cfile->fid.netfid,
04a6aa8a
PS
1089 (__u8)li->type, 0, num,
1090 buf);
32b9aaf1
PS
1091 if (stored_rc)
1092 rc = stored_rc;
1093 cur = buf;
1094 num = 0;
1095 } else
1096 cur++;
1097 }
1098
1099 if (num) {
4b4de76e 1100 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 1101 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
1102 if (stored_rc)
1103 rc = stored_rc;
1104 }
85160e03
PS
1105 }
1106
32b9aaf1 1107 kfree(buf);
6d5786a3 1108 free_xid(xid);
85160e03
PS
1109 return rc;
1110}
1111
4f6bcec9
PS
1112/* copied from fs/locks.c with a name change */
1113#define cifs_for_each_lock(inode, lockp) \
1114 for (lockp = &inode->i_flock; *lockp != NULL; \
1115 lockp = &(*lockp)->fl_next)
1116
d5751469
PS
1117struct lock_to_push {
1118 struct list_head llist;
1119 __u64 offset;
1120 __u64 length;
1121 __u32 pid;
1122 __u16 netfid;
1123 __u8 type;
1124};
1125
4f6bcec9 1126static int
b8db928b 1127cifs_push_posix_locks(struct cifsFileInfo *cfile)
4f6bcec9 1128{
1c8c601a 1129 struct inode *inode = cfile->dentry->d_inode;
4f6bcec9
PS
1130 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1131 struct file_lock *flock, **before;
d5751469 1132 unsigned int count = 0, i = 0;
4f6bcec9 1133 int rc = 0, xid, type;
d5751469
PS
1134 struct list_head locks_to_send, *el;
1135 struct lock_to_push *lck, *tmp;
4f6bcec9 1136 __u64 length;
4f6bcec9 1137
6d5786a3 1138 xid = get_xid();
4f6bcec9 1139
1c8c601a
JL
1140 spin_lock(&inode->i_lock);
1141 cifs_for_each_lock(inode, before) {
d5751469
PS
1142 if ((*before)->fl_flags & FL_POSIX)
1143 count++;
1144 }
1c8c601a 1145 spin_unlock(&inode->i_lock);
d5751469 1146
4f6bcec9
PS
1147 INIT_LIST_HEAD(&locks_to_send);
1148
d5751469 1149 /*
ce85852b 1150 * Allocating count locks is enough because no FL_POSIX locks can be
1b4b55a1 1151 * added to the list while we are holding cinode->lock_sem that
ce85852b 1152 * protects locking operations of this inode.
d5751469
PS
1153 */
1154 for (; i < count; i++) {
1155 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1156 if (!lck) {
1157 rc = -ENOMEM;
1158 goto err_out;
1159 }
1160 list_add_tail(&lck->llist, &locks_to_send);
1161 }
1162
d5751469 1163 el = locks_to_send.next;
1c8c601a
JL
1164 spin_lock(&inode->i_lock);
1165 cifs_for_each_lock(inode, before) {
ce85852b
PS
1166 flock = *before;
1167 if ((flock->fl_flags & FL_POSIX) == 0)
1168 continue;
d5751469 1169 if (el == &locks_to_send) {
ce85852b
PS
1170 /*
1171 * The list ended. We don't have enough allocated
1172 * structures - something is really wrong.
1173 */
f96637be 1174 cifs_dbg(VFS, "Can't push all brlocks!\n");
d5751469
PS
1175 break;
1176 }
4f6bcec9
PS
1177 length = 1 + flock->fl_end - flock->fl_start;
1178 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1179 type = CIFS_RDLCK;
1180 else
1181 type = CIFS_WRLCK;
d5751469 1182 lck = list_entry(el, struct lock_to_push, llist);
4f6bcec9 1183 lck->pid = flock->fl_pid;
4b4de76e 1184 lck->netfid = cfile->fid.netfid;
d5751469
PS
1185 lck->length = length;
1186 lck->type = type;
1187 lck->offset = flock->fl_start;
d5751469 1188 el = el->next;
4f6bcec9 1189 }
1c8c601a 1190 spin_unlock(&inode->i_lock);
4f6bcec9
PS
1191
1192 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1193 int stored_rc;
1194
4f6bcec9 1195 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1196 lck->offset, lck->length, NULL,
4f6bcec9
PS
1197 lck->type, 0);
1198 if (stored_rc)
1199 rc = stored_rc;
1200 list_del(&lck->llist);
1201 kfree(lck);
1202 }
1203
d5751469 1204out:
6d5786a3 1205 free_xid(xid);
4f6bcec9 1206 return rc;
d5751469
PS
1207err_out:
1208 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1209 list_del(&lck->llist);
1210 kfree(lck);
1211 }
1212 goto out;
4f6bcec9
PS
1213}
1214
9ec3c882 1215static int
b8db928b 1216cifs_push_locks(struct cifsFileInfo *cfile)
9ec3c882 1217{
b8db928b 1218 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
9ec3c882 1219 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
b8db928b 1220 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
9ec3c882
PS
1221 int rc = 0;
1222
1223 /* we are going to update can_cache_brlcks here - need a write access */
1224 down_write(&cinode->lock_sem);
1225 if (!cinode->can_cache_brlcks) {
1226 up_write(&cinode->lock_sem);
1227 return rc;
1228 }
4f6bcec9 1229
29e20f9c 1230 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1231 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1232 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
b8db928b
PS
1233 rc = cifs_push_posix_locks(cfile);
1234 else
1235 rc = tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9 1236
b8db928b
PS
1237 cinode->can_cache_brlcks = false;
1238 up_write(&cinode->lock_sem);
1239 return rc;
4f6bcec9
PS
1240}
1241
03776f45 1242static void
04a6aa8a 1243cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1244 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1245{
03776f45 1246 if (flock->fl_flags & FL_POSIX)
f96637be 1247 cifs_dbg(FYI, "Posix\n");
03776f45 1248 if (flock->fl_flags & FL_FLOCK)
f96637be 1249 cifs_dbg(FYI, "Flock\n");
03776f45 1250 if (flock->fl_flags & FL_SLEEP) {
f96637be 1251 cifs_dbg(FYI, "Blocking lock\n");
03776f45 1252 *wait_flag = true;
1da177e4 1253 }
03776f45 1254 if (flock->fl_flags & FL_ACCESS)
f96637be 1255 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
03776f45 1256 if (flock->fl_flags & FL_LEASE)
f96637be 1257 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
03776f45 1258 if (flock->fl_flags &
3d6d854a
JL
1259 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1260 FL_ACCESS | FL_LEASE | FL_CLOSE)))
f96637be 1261 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1da177e4 1262
106dc538 1263 *type = server->vals->large_lock_type;
03776f45 1264 if (flock->fl_type == F_WRLCK) {
f96637be 1265 cifs_dbg(FYI, "F_WRLCK\n");
106dc538 1266 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1267 *lock = 1;
1268 } else if (flock->fl_type == F_UNLCK) {
f96637be 1269 cifs_dbg(FYI, "F_UNLCK\n");
106dc538 1270 *type |= server->vals->unlock_lock_type;
03776f45
PS
1271 *unlock = 1;
1272 /* Check if unlock includes more than one lock range */
1273 } else if (flock->fl_type == F_RDLCK) {
f96637be 1274 cifs_dbg(FYI, "F_RDLCK\n");
106dc538 1275 *type |= server->vals->shared_lock_type;
03776f45
PS
1276 *lock = 1;
1277 } else if (flock->fl_type == F_EXLCK) {
f96637be 1278 cifs_dbg(FYI, "F_EXLCK\n");
106dc538 1279 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1280 *lock = 1;
1281 } else if (flock->fl_type == F_SHLCK) {
f96637be 1282 cifs_dbg(FYI, "F_SHLCK\n");
106dc538 1283 *type |= server->vals->shared_lock_type;
03776f45 1284 *lock = 1;
1da177e4 1285 } else
f96637be 1286 cifs_dbg(FYI, "Unknown type of lock\n");
03776f45 1287}
1da177e4 1288
03776f45 1289static int
04a6aa8a 1290cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1291 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1292{
1293 int rc = 0;
1294 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1295 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1296 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1297 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1298 __u16 netfid = cfile->fid.netfid;
f05337c6 1299
03776f45
PS
1300 if (posix_lck) {
1301 int posix_lock_type;
4f6bcec9
PS
1302
1303 rc = cifs_posix_lock_test(file, flock);
1304 if (!rc)
1305 return rc;
1306
106dc538 1307 if (type & server->vals->shared_lock_type)
03776f45
PS
1308 posix_lock_type = CIFS_RDLCK;
1309 else
1310 posix_lock_type = CIFS_WRLCK;
4f6bcec9 1311 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
c5fd363d 1312 flock->fl_start, length, flock,
4f6bcec9 1313 posix_lock_type, wait_flag);
03776f45
PS
1314 return rc;
1315 }
1da177e4 1316
fbd35aca 1317 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1318 if (!rc)
1319 return rc;
1320
03776f45 1321 /* BB we could chain these into one lock request BB */
d39a4f71
PS
1322 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1323 1, 0, false);
03776f45 1324 if (rc == 0) {
d39a4f71
PS
1325 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1326 type, 0, 1, false);
03776f45
PS
1327 flock->fl_type = F_UNLCK;
1328 if (rc != 0)
f96637be
JP
1329 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1330 rc);
a88b4707 1331 return 0;
1da177e4 1332 }
7ee1af76 1333
106dc538 1334 if (type & server->vals->shared_lock_type) {
03776f45 1335 flock->fl_type = F_WRLCK;
a88b4707 1336 return 0;
7ee1af76
JA
1337 }
1338
d39a4f71
PS
1339 type &= ~server->vals->exclusive_lock_type;
1340
1341 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1342 type | server->vals->shared_lock_type,
1343 1, 0, false);
03776f45 1344 if (rc == 0) {
d39a4f71
PS
1345 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1346 type | server->vals->shared_lock_type, 0, 1, false);
03776f45
PS
1347 flock->fl_type = F_RDLCK;
1348 if (rc != 0)
f96637be
JP
1349 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1350 rc);
03776f45
PS
1351 } else
1352 flock->fl_type = F_WRLCK;
1353
a88b4707 1354 return 0;
03776f45
PS
1355}
1356
f7ba7fe6 1357void
9ee305b7
PS
1358cifs_move_llist(struct list_head *source, struct list_head *dest)
1359{
1360 struct list_head *li, *tmp;
1361 list_for_each_safe(li, tmp, source)
1362 list_move(li, dest);
1363}
1364
f7ba7fe6 1365void
9ee305b7
PS
1366cifs_free_llist(struct list_head *llist)
1367{
1368 struct cifsLockInfo *li, *tmp;
1369 list_for_each_entry_safe(li, tmp, llist, llist) {
1370 cifs_del_lock_waiters(li);
1371 list_del(&li->llist);
1372 kfree(li);
1373 }
1374}
1375
d39a4f71 1376int
6d5786a3
PS
1377cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1378 unsigned int xid)
9ee305b7
PS
1379{
1380 int rc = 0, stored_rc;
1381 int types[] = {LOCKING_ANDX_LARGE_FILES,
1382 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1383 unsigned int i;
0013fb4c 1384 unsigned int max_num, num, max_buf;
9ee305b7
PS
1385 LOCKING_ANDX_RANGE *buf, *cur;
1386 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1387 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1388 struct cifsLockInfo *li, *tmp;
1389 __u64 length = 1 + flock->fl_end - flock->fl_start;
1390 struct list_head tmp_llist;
1391
1392 INIT_LIST_HEAD(&tmp_llist);
1393
0013fb4c
PS
1394 /*
1395 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1396 * and check it for zero before using.
1397 */
1398 max_buf = tcon->ses->server->maxBuf;
1399 if (!max_buf)
1400 return -EINVAL;
1401
1402 max_num = (max_buf - sizeof(struct smb_hdr)) /
1403 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 1404 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
9ee305b7
PS
1405 if (!buf)
1406 return -ENOMEM;
1407
1b4b55a1 1408 down_write(&cinode->lock_sem);
9ee305b7
PS
1409 for (i = 0; i < 2; i++) {
1410 cur = buf;
1411 num = 0;
f45d3416 1412 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1413 if (flock->fl_start > li->offset ||
1414 (flock->fl_start + length) <
1415 (li->offset + li->length))
1416 continue;
1417 if (current->tgid != li->pid)
1418 continue;
9ee305b7
PS
1419 if (types[i] != li->type)
1420 continue;
ea319d57 1421 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1422 /*
1423 * We can cache brlock requests - simply remove
fbd35aca 1424 * a lock from the file's list.
9ee305b7
PS
1425 */
1426 list_del(&li->llist);
1427 cifs_del_lock_waiters(li);
1428 kfree(li);
ea319d57 1429 continue;
9ee305b7 1430 }
ea319d57
PS
1431 cur->Pid = cpu_to_le16(li->pid);
1432 cur->LengthLow = cpu_to_le32((u32)li->length);
1433 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1434 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1435 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1436 /*
1437 * We need to save a lock here to let us add it again to
1438 * the file's list if the unlock range request fails on
1439 * the server.
1440 */
1441 list_move(&li->llist, &tmp_llist);
1442 if (++num == max_num) {
4b4de76e
PS
1443 stored_rc = cifs_lockv(xid, tcon,
1444 cfile->fid.netfid,
ea319d57
PS
1445 li->type, num, 0, buf);
1446 if (stored_rc) {
1447 /*
1448 * We failed on the unlock range
1449 * request - add all locks from the tmp
1450 * list to the head of the file's list.
1451 */
1452 cifs_move_llist(&tmp_llist,
f45d3416 1453 &cfile->llist->locks);
ea319d57
PS
1454 rc = stored_rc;
1455 } else
1456 /*
1457 * The unlock range request succeed -
1458 * free the tmp list.
1459 */
1460 cifs_free_llist(&tmp_llist);
1461 cur = buf;
1462 num = 0;
1463 } else
1464 cur++;
9ee305b7
PS
1465 }
1466 if (num) {
4b4de76e 1467 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1468 types[i], num, 0, buf);
1469 if (stored_rc) {
f45d3416
PS
1470 cifs_move_llist(&tmp_llist,
1471 &cfile->llist->locks);
9ee305b7
PS
1472 rc = stored_rc;
1473 } else
1474 cifs_free_llist(&tmp_llist);
1475 }
1476 }
1477
1b4b55a1 1478 up_write(&cinode->lock_sem);
9ee305b7
PS
1479 kfree(buf);
1480 return rc;
1481}
1482
03776f45 1483static int
f45d3416 1484cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1485 bool wait_flag, bool posix_lck, int lock, int unlock,
1486 unsigned int xid)
03776f45
PS
1487{
1488 int rc = 0;
1489 __u64 length = 1 + flock->fl_end - flock->fl_start;
1490 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1491 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1492 struct TCP_Server_Info *server = tcon->ses->server;
63b7d3a4 1493 struct inode *inode = cfile->dentry->d_inode;
03776f45
PS
1494
1495 if (posix_lck) {
08547b03 1496 int posix_lock_type;
4f6bcec9
PS
1497
1498 rc = cifs_posix_lock_set(file, flock);
1499 if (!rc || rc < 0)
1500 return rc;
1501
106dc538 1502 if (type & server->vals->shared_lock_type)
08547b03
SF
1503 posix_lock_type = CIFS_RDLCK;
1504 else
1505 posix_lock_type = CIFS_WRLCK;
50c2f753 1506
03776f45 1507 if (unlock == 1)
beb84dc8 1508 posix_lock_type = CIFS_UNLCK;
7ee1af76 1509
f45d3416
PS
1510 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1511 current->tgid, flock->fl_start, length,
1512 NULL, posix_lock_type, wait_flag);
03776f45
PS
1513 goto out;
1514 }
7ee1af76 1515
03776f45 1516 if (lock) {
161ebf9f
PS
1517 struct cifsLockInfo *lock;
1518
fbd35aca 1519 lock = cifs_lock_init(flock->fl_start, length, type);
161ebf9f
PS
1520 if (!lock)
1521 return -ENOMEM;
1522
fbd35aca 1523 rc = cifs_lock_add_if(cfile, lock, wait_flag);
21cb2d90 1524 if (rc < 0) {
161ebf9f 1525 kfree(lock);
21cb2d90
PS
1526 return rc;
1527 }
1528 if (!rc)
85160e03
PS
1529 goto out;
1530
63b7d3a4
PS
1531 /*
1532 * Windows 7 server can delay breaking lease from read to None
1533 * if we set a byte-range lock on a file - break it explicitly
1534 * before sending the lock to the server to be sure the next
1535 * read won't conflict with non-overlapted locks due to
1536 * pagereading.
1537 */
18cceb6a
PS
1538 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1539 CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 1540 cifs_zap_mapping(inode);
f96637be
JP
1541 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1542 inode);
18cceb6a 1543 CIFS_I(inode)->oplock = 0;
63b7d3a4
PS
1544 }
1545
d39a4f71
PS
1546 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1547 type, 1, 0, wait_flag);
161ebf9f
PS
1548 if (rc) {
1549 kfree(lock);
21cb2d90 1550 return rc;
03776f45 1551 }
161ebf9f 1552
fbd35aca 1553 cifs_lock_add(cfile, lock);
9ee305b7 1554 } else if (unlock)
d39a4f71 1555 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 1556
03776f45
PS
1557out:
1558 if (flock->fl_flags & FL_POSIX)
9ebb389d 1559 posix_lock_file_wait(file, flock);
03776f45
PS
1560 return rc;
1561}
1562
1563int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1564{
1565 int rc, xid;
1566 int lock = 0, unlock = 0;
1567 bool wait_flag = false;
1568 bool posix_lck = false;
1569 struct cifs_sb_info *cifs_sb;
1570 struct cifs_tcon *tcon;
1571 struct cifsInodeInfo *cinode;
1572 struct cifsFileInfo *cfile;
1573 __u16 netfid;
04a6aa8a 1574 __u32 type;
03776f45
PS
1575
1576 rc = -EACCES;
6d5786a3 1577 xid = get_xid();
03776f45 1578
f96637be
JP
1579 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1580 cmd, flock->fl_flags, flock->fl_type,
1581 flock->fl_start, flock->fl_end);
03776f45 1582
03776f45
PS
1583 cfile = (struct cifsFileInfo *)file->private_data;
1584 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1585
1586 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1587 tcon->ses->server);
1588
7119e220 1589 cifs_sb = CIFS_FILE_SB(file);
4b4de76e 1590 netfid = cfile->fid.netfid;
496ad9aa 1591 cinode = CIFS_I(file_inode(file));
03776f45 1592
29e20f9c 1593 if (cap_unix(tcon->ses) &&
03776f45
PS
1594 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1595 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1596 posix_lck = true;
1597 /*
1598 * BB add code here to normalize offset and length to account for
1599 * negative length which we can not accept over the wire.
1600 */
1601 if (IS_GETLK(cmd)) {
4f6bcec9 1602 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1603 free_xid(xid);
03776f45
PS
1604 return rc;
1605 }
1606
1607 if (!lock && !unlock) {
1608 /*
1609 * if no lock or unlock then nothing to do since we do not
1610 * know what it is
1611 */
6d5786a3 1612 free_xid(xid);
03776f45 1613 return -EOPNOTSUPP;
7ee1af76
JA
1614 }
1615
03776f45
PS
1616 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1617 xid);
6d5786a3 1618 free_xid(xid);
1da177e4
LT
1619 return rc;
1620}
1621
597b027f
JL
1622/*
1623 * update the file size (if needed) after a write. Should be called with
1624 * the inode->i_lock held
1625 */
72432ffc 1626void
fbec9ab9
JL
1627cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1628 unsigned int bytes_written)
1629{
1630 loff_t end_of_write = offset + bytes_written;
1631
1632 if (end_of_write > cifsi->server_eof)
1633 cifsi->server_eof = end_of_write;
1634}
1635
ba9ad725
PS
1636static ssize_t
1637cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1638 size_t write_size, loff_t *offset)
1da177e4
LT
1639{
1640 int rc = 0;
1641 unsigned int bytes_written = 0;
1642 unsigned int total_written;
1643 struct cifs_sb_info *cifs_sb;
ba9ad725
PS
1644 struct cifs_tcon *tcon;
1645 struct TCP_Server_Info *server;
6d5786a3 1646 unsigned int xid;
7da4b49a
JL
1647 struct dentry *dentry = open_file->dentry;
1648 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1649 struct cifs_io_parms io_parms;
1da177e4 1650
7da4b49a 1651 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1652
35c265e0
AV
1653 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1654 write_size, *offset, dentry);
1da177e4 1655
ba9ad725
PS
1656 tcon = tlink_tcon(open_file->tlink);
1657 server = tcon->ses->server;
1658
1659 if (!server->ops->sync_write)
1660 return -ENOSYS;
50c2f753 1661
6d5786a3 1662 xid = get_xid();
1da177e4 1663
1da177e4
LT
1664 for (total_written = 0; write_size > total_written;
1665 total_written += bytes_written) {
1666 rc = -EAGAIN;
1667 while (rc == -EAGAIN) {
ca83ce3d
JL
1668 struct kvec iov[2];
1669 unsigned int len;
1670
1da177e4 1671 if (open_file->invalidHandle) {
1da177e4
LT
1672 /* we could deadlock if we called
1673 filemap_fdatawait from here so tell
fb8c4b14 1674 reopen_file not to flush data to
1da177e4 1675 server now */
15886177 1676 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1677 if (rc != 0)
1678 break;
1679 }
ca83ce3d 1680
cb7e9eab
PS
1681 len = min(server->ops->wp_retry_size(dentry->d_inode),
1682 (unsigned int)write_size - total_written);
ca83ce3d
JL
1683 /* iov[0] is reserved for smb header */
1684 iov[1].iov_base = (char *)write_data + total_written;
1685 iov[1].iov_len = len;
fa2989f4 1686 io_parms.pid = pid;
ba9ad725
PS
1687 io_parms.tcon = tcon;
1688 io_parms.offset = *offset;
fa2989f4 1689 io_parms.length = len;
db8b631d
SF
1690 rc = server->ops->sync_write(xid, &open_file->fid,
1691 &io_parms, &bytes_written, iov, 1);
1da177e4
LT
1692 }
1693 if (rc || (bytes_written == 0)) {
1694 if (total_written)
1695 break;
1696 else {
6d5786a3 1697 free_xid(xid);
1da177e4
LT
1698 return rc;
1699 }
fbec9ab9 1700 } else {
597b027f 1701 spin_lock(&dentry->d_inode->i_lock);
ba9ad725 1702 cifs_update_eof(cifsi, *offset, bytes_written);
597b027f 1703 spin_unlock(&dentry->d_inode->i_lock);
ba9ad725 1704 *offset += bytes_written;
fbec9ab9 1705 }
1da177e4
LT
1706 }
1707
ba9ad725 1708 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1709
7da4b49a
JL
1710 if (total_written > 0) {
1711 spin_lock(&dentry->d_inode->i_lock);
ba9ad725
PS
1712 if (*offset > dentry->d_inode->i_size)
1713 i_size_write(dentry->d_inode, *offset);
7da4b49a 1714 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1715 }
7da4b49a 1716 mark_inode_dirty_sync(dentry->d_inode);
6d5786a3 1717 free_xid(xid);
1da177e4
LT
1718 return total_written;
1719}
1720
6508d904
JL
1721struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1722 bool fsuid_only)
630f3f0c
SF
1723{
1724 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1725 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1726
1727 /* only filter by fsuid on multiuser mounts */
1728 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1729 fsuid_only = false;
630f3f0c 1730
4477288a 1731 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1732 /* we could simply get the first_list_entry since write-only entries
1733 are always at the end of the list but since the first entry might
1734 have a close pending, we go through the whole list */
1735 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
fef59fd7 1736 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6508d904 1737 continue;
2e396b83 1738 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1739 if (!open_file->invalidHandle) {
1740 /* found a good file */
1741 /* lock it so it will not be closed on us */
764a1b1a 1742 cifsFileInfo_get_locked(open_file);
4477288a 1743 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1744 return open_file;
1745 } /* else might as well continue, and look for
1746 another, or simply have the caller reopen it
1747 again rather than trying to fix this handle */
1748 } else /* write only file */
1749 break; /* write only files are last so must be done */
1750 }
4477288a 1751 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1752 return NULL;
1753}
630f3f0c 1754
6508d904
JL
1755struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1756 bool fsuid_only)
6148a742 1757{
2c0c2a08 1758 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 1759 struct cifs_sb_info *cifs_sb;
2846d386 1760 bool any_available = false;
dd99cd80 1761 int rc;
2c0c2a08 1762 unsigned int refind = 0;
6148a742 1763
60808233
SF
1764 /* Having a null inode here (because mapping->host was set to zero by
1765 the VFS or MM) should not happen but we had reports of on oops (due to
1766 it being zero) during stress testcases so we need to check for it */
1767
fb8c4b14 1768 if (cifs_inode == NULL) {
f96637be 1769 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
60808233
SF
1770 dump_stack();
1771 return NULL;
1772 }
1773
d3892294
JL
1774 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1775
6508d904
JL
1776 /* only filter by fsuid on multiuser mounts */
1777 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1778 fsuid_only = false;
1779
4477288a 1780 spin_lock(&cifs_file_list_lock);
9b22b0b7 1781refind_writable:
2c0c2a08
SP
1782 if (refind > MAX_REOPEN_ATT) {
1783 spin_unlock(&cifs_file_list_lock);
1784 return NULL;
1785 }
6148a742 1786 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1787 if (!any_available && open_file->pid != current->tgid)
1788 continue;
fef59fd7 1789 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6148a742 1790 continue;
2e396b83 1791 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
1792 if (!open_file->invalidHandle) {
1793 /* found a good writable file */
764a1b1a 1794 cifsFileInfo_get_locked(open_file);
4477288a 1795 spin_unlock(&cifs_file_list_lock);
9b22b0b7 1796 return open_file;
2c0c2a08
SP
1797 } else {
1798 if (!inv_file)
1799 inv_file = open_file;
9b22b0b7 1800 }
6148a742
SF
1801 }
1802 }
2846d386
JL
1803 /* couldn't find useable FH with same pid, try any available */
1804 if (!any_available) {
1805 any_available = true;
1806 goto refind_writable;
1807 }
2c0c2a08
SP
1808
1809 if (inv_file) {
1810 any_available = false;
764a1b1a 1811 cifsFileInfo_get_locked(inv_file);
2c0c2a08
SP
1812 }
1813
4477288a 1814 spin_unlock(&cifs_file_list_lock);
2c0c2a08
SP
1815
1816 if (inv_file) {
1817 rc = cifs_reopen_file(inv_file, false);
1818 if (!rc)
1819 return inv_file;
1820 else {
1821 spin_lock(&cifs_file_list_lock);
1822 list_move_tail(&inv_file->flist,
1823 &cifs_inode->openFileList);
1824 spin_unlock(&cifs_file_list_lock);
1825 cifsFileInfo_put(inv_file);
1826 spin_lock(&cifs_file_list_lock);
1827 ++refind;
1828 goto refind_writable;
1829 }
1830 }
1831
6148a742
SF
1832 return NULL;
1833}
1834
1da177e4
LT
1835static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1836{
1837 struct address_space *mapping = page->mapping;
1838 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1839 char *write_data;
1840 int rc = -EFAULT;
1841 int bytes_written = 0;
1da177e4 1842 struct inode *inode;
6148a742 1843 struct cifsFileInfo *open_file;
1da177e4
LT
1844
1845 if (!mapping || !mapping->host)
1846 return -EFAULT;
1847
1848 inode = page->mapping->host;
1da177e4
LT
1849
1850 offset += (loff_t)from;
1851 write_data = kmap(page);
1852 write_data += from;
1853
1854 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1855 kunmap(page);
1856 return -EIO;
1857 }
1858
1859 /* racing with truncate? */
1860 if (offset > mapping->host->i_size) {
1861 kunmap(page);
1862 return 0; /* don't care */
1863 }
1864
1865 /* check to make sure that we are not extending the file */
1866 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1867 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1868
6508d904 1869 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1870 if (open_file) {
fa2989f4
PS
1871 bytes_written = cifs_write(open_file, open_file->pid,
1872 write_data, to - from, &offset);
6ab409b5 1873 cifsFileInfo_put(open_file);
1da177e4 1874 /* Does mm or vfs already set times? */
6148a742 1875 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1876 if ((bytes_written > 0) && (offset))
6148a742 1877 rc = 0;
bb5a9a04
SF
1878 else if (bytes_written < 0)
1879 rc = bytes_written;
6148a742 1880 } else {
f96637be 1881 cifs_dbg(FYI, "No writeable filehandles for inode\n");
1da177e4
LT
1882 rc = -EIO;
1883 }
1884
1885 kunmap(page);
1886 return rc;
1887}
1888
90ac1387
PS
1889static struct cifs_writedata *
1890wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1891 pgoff_t end, pgoff_t *index,
1892 unsigned int *found_pages)
1893{
1894 unsigned int nr_pages;
1895 struct page **pages;
1896 struct cifs_writedata *wdata;
1897
1898 wdata = cifs_writedata_alloc((unsigned int)tofind,
1899 cifs_writev_complete);
1900 if (!wdata)
1901 return NULL;
1902
1903 /*
1904 * find_get_pages_tag seems to return a max of 256 on each
1905 * iteration, so we must call it several times in order to
1906 * fill the array or the wsize is effectively limited to
1907 * 256 * PAGE_CACHE_SIZE.
1908 */
1909 *found_pages = 0;
1910 pages = wdata->pages;
1911 do {
1912 nr_pages = find_get_pages_tag(mapping, index,
1913 PAGECACHE_TAG_DIRTY, tofind,
1914 pages);
1915 *found_pages += nr_pages;
1916 tofind -= nr_pages;
1917 pages += nr_pages;
1918 } while (nr_pages && tofind && *index <= end);
1919
1920 return wdata;
1921}
1922
7e48ff82
PS
1923static unsigned int
1924wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1925 struct address_space *mapping,
1926 struct writeback_control *wbc,
1927 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1928{
1929 unsigned int nr_pages = 0, i;
1930 struct page *page;
1931
1932 for (i = 0; i < found_pages; i++) {
1933 page = wdata->pages[i];
1934 /*
1935 * At this point we hold neither mapping->tree_lock nor
1936 * lock on the page itself: the page may be truncated or
1937 * invalidated (changing page->mapping to NULL), or even
1938 * swizzled back from swapper_space to tmpfs file
1939 * mapping
1940 */
1941
1942 if (nr_pages == 0)
1943 lock_page(page);
1944 else if (!trylock_page(page))
1945 break;
1946
1947 if (unlikely(page->mapping != mapping)) {
1948 unlock_page(page);
1949 break;
1950 }
1951
1952 if (!wbc->range_cyclic && page->index > end) {
1953 *done = true;
1954 unlock_page(page);
1955 break;
1956 }
1957
1958 if (*next && (page->index != *next)) {
1959 /* Not next consecutive page */
1960 unlock_page(page);
1961 break;
1962 }
1963
1964 if (wbc->sync_mode != WB_SYNC_NONE)
1965 wait_on_page_writeback(page);
1966
1967 if (PageWriteback(page) ||
1968 !clear_page_dirty_for_io(page)) {
1969 unlock_page(page);
1970 break;
1971 }
1972
1973 /*
1974 * This actually clears the dirty bit in the radix tree.
1975 * See cifs_writepage() for more commentary.
1976 */
1977 set_page_writeback(page);
1978 if (page_offset(page) >= i_size_read(mapping->host)) {
1979 *done = true;
1980 unlock_page(page);
1981 end_page_writeback(page);
1982 break;
1983 }
1984
1985 wdata->pages[i] = page;
1986 *next = page->index + 1;
1987 ++nr_pages;
1988 }
1989
1990 /* reset index to refind any pages skipped */
1991 if (nr_pages == 0)
1992 *index = wdata->pages[0]->index + 1;
1993
1994 /* put any pages we aren't going to use */
1995 for (i = nr_pages; i < found_pages; i++) {
1996 page_cache_release(wdata->pages[i]);
1997 wdata->pages[i] = NULL;
1998 }
1999
2000 return nr_pages;
2001}
2002
619aa48e
PS
2003static int
2004wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2005 struct address_space *mapping, struct writeback_control *wbc)
2006{
2007 int rc = 0;
2008 struct TCP_Server_Info *server;
2009 unsigned int i;
2010
2011 wdata->sync_mode = wbc->sync_mode;
2012 wdata->nr_pages = nr_pages;
2013 wdata->offset = page_offset(wdata->pages[0]);
2014 wdata->pagesz = PAGE_CACHE_SIZE;
2015 wdata->tailsz = min(i_size_read(mapping->host) -
2016 page_offset(wdata->pages[nr_pages - 1]),
2017 (loff_t)PAGE_CACHE_SIZE);
2018 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz;
2019
66231a47
PS
2020 if (wdata->cfile != NULL)
2021 cifsFileInfo_put(wdata->cfile);
2022 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2023 if (!wdata->cfile) {
2024 cifs_dbg(VFS, "No writable handles for inode\n");
2025 rc = -EBADF;
2026 } else {
619aa48e
PS
2027 wdata->pid = wdata->cfile->pid;
2028 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2029 rc = server->ops->async_writev(wdata, cifs_writedata_release);
66231a47 2030 }
619aa48e
PS
2031
2032 for (i = 0; i < nr_pages; ++i)
2033 unlock_page(wdata->pages[i]);
2034
2035 return rc;
2036}
2037
1da177e4 2038static int cifs_writepages(struct address_space *mapping,
37c0eb46 2039 struct writeback_control *wbc)
1da177e4 2040{
c3d17b63 2041 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
cb7e9eab 2042 struct TCP_Server_Info *server;
c3d17b63
JL
2043 bool done = false, scanned = false, range_whole = false;
2044 pgoff_t end, index;
2045 struct cifs_writedata *wdata;
37c0eb46 2046 int rc = 0;
50c2f753 2047
37c0eb46 2048 /*
c3d17b63 2049 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
2050 * one page at a time via cifs_writepage
2051 */
2052 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
2053 return generic_writepages(mapping, wbc);
2054
111ebb6e 2055 if (wbc->range_cyclic) {
37c0eb46 2056 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
2057 end = -1;
2058 } else {
2059 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2060 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2061 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
2062 range_whole = true;
2063 scanned = true;
37c0eb46 2064 }
cb7e9eab 2065 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
37c0eb46 2066retry:
c3d17b63 2067 while (!done && index <= end) {
cb7e9eab 2068 unsigned int i, nr_pages, found_pages, wsize, credits;
66231a47 2069 pgoff_t next = 0, tofind, saved_index = index;
c3d17b63 2070
cb7e9eab
PS
2071 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2072 &wsize, &credits);
2073 if (rc)
2074 break;
c3d17b63 2075
cb7e9eab 2076 tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1;
c3d17b63 2077
90ac1387
PS
2078 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2079 &found_pages);
c3d17b63
JL
2080 if (!wdata) {
2081 rc = -ENOMEM;
cb7e9eab 2082 add_credits_and_wake_if(server, credits, 0);
c3d17b63
JL
2083 break;
2084 }
2085
c3d17b63
JL
2086 if (found_pages == 0) {
2087 kref_put(&wdata->refcount, cifs_writedata_release);
cb7e9eab 2088 add_credits_and_wake_if(server, credits, 0);
c3d17b63
JL
2089 break;
2090 }
2091
7e48ff82
PS
2092 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2093 end, &index, &next, &done);
37c0eb46 2094
c3d17b63
JL
2095 /* nothing to write? */
2096 if (nr_pages == 0) {
2097 kref_put(&wdata->refcount, cifs_writedata_release);
cb7e9eab 2098 add_credits_and_wake_if(server, credits, 0);
c3d17b63 2099 continue;
37c0eb46 2100 }
fbec9ab9 2101
cb7e9eab 2102 wdata->credits = credits;
941b853d 2103
619aa48e 2104 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
f3983c21 2105
c3d17b63
JL
2106 /* send failure -- clean up the mess */
2107 if (rc != 0) {
cb7e9eab 2108 add_credits_and_wake_if(server, wdata->credits, 0);
c3d17b63 2109 for (i = 0; i < nr_pages; ++i) {
941b853d 2110 if (rc == -EAGAIN)
c3d17b63
JL
2111 redirty_page_for_writepage(wbc,
2112 wdata->pages[i]);
2113 else
2114 SetPageError(wdata->pages[i]);
2115 end_page_writeback(wdata->pages[i]);
2116 page_cache_release(wdata->pages[i]);
37c0eb46 2117 }
941b853d
JL
2118 if (rc != -EAGAIN)
2119 mapping_set_error(mapping, rc);
c3d17b63
JL
2120 }
2121 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 2122
66231a47
PS
2123 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2124 index = saved_index;
2125 continue;
2126 }
2127
c3d17b63
JL
2128 wbc->nr_to_write -= nr_pages;
2129 if (wbc->nr_to_write <= 0)
2130 done = true;
b066a48c 2131
c3d17b63 2132 index = next;
37c0eb46 2133 }
c3d17b63 2134
37c0eb46
SF
2135 if (!scanned && !done) {
2136 /*
2137 * We hit the last page and there is more work to be done: wrap
2138 * back to the start of the file
2139 */
c3d17b63 2140 scanned = true;
37c0eb46
SF
2141 index = 0;
2142 goto retry;
2143 }
c3d17b63 2144
111ebb6e 2145 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
2146 mapping->writeback_index = index;
2147
1da177e4
LT
2148 return rc;
2149}
1da177e4 2150
9ad1506b
PS
2151static int
2152cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 2153{
9ad1506b 2154 int rc;
6d5786a3 2155 unsigned int xid;
1da177e4 2156
6d5786a3 2157 xid = get_xid();
1da177e4
LT
2158/* BB add check for wbc flags */
2159 page_cache_get(page);
ad7a2926 2160 if (!PageUptodate(page))
f96637be 2161 cifs_dbg(FYI, "ppw - page not up to date\n");
cb876f45
LT
2162
2163 /*
2164 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2165 *
2166 * A writepage() implementation always needs to do either this,
2167 * or re-dirty the page with "redirty_page_for_writepage()" in
2168 * the case of a failure.
2169 *
2170 * Just unlocking the page will cause the radix tree tag-bits
2171 * to fail to update with the state of the page correctly.
2172 */
fb8c4b14 2173 set_page_writeback(page);
9ad1506b 2174retry_write:
1da177e4 2175 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
2176 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2177 goto retry_write;
2178 else if (rc == -EAGAIN)
2179 redirty_page_for_writepage(wbc, page);
2180 else if (rc != 0)
2181 SetPageError(page);
2182 else
2183 SetPageUptodate(page);
cb876f45
LT
2184 end_page_writeback(page);
2185 page_cache_release(page);
6d5786a3 2186 free_xid(xid);
1da177e4
LT
2187 return rc;
2188}
2189
9ad1506b
PS
2190static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2191{
2192 int rc = cifs_writepage_locked(page, wbc);
2193 unlock_page(page);
2194 return rc;
2195}
2196
d9414774
NP
2197static int cifs_write_end(struct file *file, struct address_space *mapping,
2198 loff_t pos, unsigned len, unsigned copied,
2199 struct page *page, void *fsdata)
1da177e4 2200{
d9414774
NP
2201 int rc;
2202 struct inode *inode = mapping->host;
d4ffff1f
PS
2203 struct cifsFileInfo *cfile = file->private_data;
2204 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2205 __u32 pid;
2206
2207 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2208 pid = cfile->pid;
2209 else
2210 pid = current->tgid;
1da177e4 2211
f96637be 2212 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
b6b38f70 2213 page, pos, copied);
d9414774 2214
a98ee8c1
JL
2215 if (PageChecked(page)) {
2216 if (copied == len)
2217 SetPageUptodate(page);
2218 ClearPageChecked(page);
2219 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 2220 SetPageUptodate(page);
ad7a2926 2221
1da177e4 2222 if (!PageUptodate(page)) {
d9414774
NP
2223 char *page_data;
2224 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
6d5786a3 2225 unsigned int xid;
d9414774 2226
6d5786a3 2227 xid = get_xid();
1da177e4
LT
2228 /* this is probably better than directly calling
2229 partialpage_write since in this function the file handle is
2230 known which we might as well leverage */
2231 /* BB check if anything else missing out of ppw
2232 such as updating last write time */
2233 page_data = kmap(page);
d4ffff1f 2234 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2235 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2236 kunmap(page);
d9414774 2237
6d5786a3 2238 free_xid(xid);
fb8c4b14 2239 } else {
d9414774
NP
2240 rc = copied;
2241 pos += copied;
ca8aa29c 2242 set_page_dirty(page);
1da177e4
LT
2243 }
2244
d9414774
NP
2245 if (rc > 0) {
2246 spin_lock(&inode->i_lock);
2247 if (pos > inode->i_size)
2248 i_size_write(inode, pos);
2249 spin_unlock(&inode->i_lock);
2250 }
2251
2252 unlock_page(page);
2253 page_cache_release(page);
2254
1da177e4
LT
2255 return rc;
2256}
2257
02c24a82
JB
2258int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2259 int datasync)
1da177e4 2260{
6d5786a3 2261 unsigned int xid;
1da177e4 2262 int rc = 0;
96daf2b0 2263 struct cifs_tcon *tcon;
1d8c4c00 2264 struct TCP_Server_Info *server;
c21dfb69 2265 struct cifsFileInfo *smbfile = file->private_data;
496ad9aa 2266 struct inode *inode = file_inode(file);
8be7e6ba 2267 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2268
02c24a82
JB
2269 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2270 if (rc)
2271 return rc;
2272 mutex_lock(&inode->i_mutex);
2273
6d5786a3 2274 xid = get_xid();
1da177e4 2275
35c265e0
AV
2276 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2277 file, datasync);
50c2f753 2278
18cceb6a 2279 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 2280 rc = cifs_zap_mapping(inode);
6feb9891 2281 if (rc) {
f96637be 2282 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
6feb9891
PS
2283 rc = 0; /* don't care about it in fsync */
2284 }
2285 }
eb4b756b 2286
8be7e6ba 2287 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2288 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2289 server = tcon->ses->server;
2290 if (server->ops->flush)
2291 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2292 else
2293 rc = -ENOSYS;
2294 }
8be7e6ba 2295
6d5786a3 2296 free_xid(xid);
02c24a82 2297 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
2298 return rc;
2299}
2300
02c24a82 2301int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2302{
6d5786a3 2303 unsigned int xid;
8be7e6ba 2304 int rc = 0;
96daf2b0 2305 struct cifs_tcon *tcon;
1d8c4c00 2306 struct TCP_Server_Info *server;
8be7e6ba 2307 struct cifsFileInfo *smbfile = file->private_data;
7119e220 2308 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
02c24a82
JB
2309 struct inode *inode = file->f_mapping->host;
2310
2311 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2312 if (rc)
2313 return rc;
2314 mutex_lock(&inode->i_mutex);
8be7e6ba 2315
6d5786a3 2316 xid = get_xid();
8be7e6ba 2317
35c265e0
AV
2318 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2319 file, datasync);
8be7e6ba
PS
2320
2321 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2322 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2323 server = tcon->ses->server;
2324 if (server->ops->flush)
2325 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2326 else
2327 rc = -ENOSYS;
2328 }
b298f223 2329
6d5786a3 2330 free_xid(xid);
02c24a82 2331 mutex_unlock(&inode->i_mutex);
1da177e4
LT
2332 return rc;
2333}
2334
1da177e4
LT
2335/*
2336 * As file closes, flush all cached write data for this inode checking
2337 * for write behind errors.
2338 */
75e1fcc0 2339int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2340{
496ad9aa 2341 struct inode *inode = file_inode(file);
1da177e4
LT
2342 int rc = 0;
2343
eb4b756b 2344 if (file->f_mode & FMODE_WRITE)
d3f1322a 2345 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2346
f96637be 2347 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
1da177e4
LT
2348
2349 return rc;
2350}
2351
72432ffc
PS
2352static int
2353cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2354{
2355 int rc = 0;
2356 unsigned long i;
2357
2358 for (i = 0; i < num_pages; i++) {
e94f7ba1 2359 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2360 if (!pages[i]) {
2361 /*
2362 * save number of pages we have already allocated and
2363 * return with ENOMEM error
2364 */
2365 num_pages = i;
2366 rc = -ENOMEM;
e94f7ba1 2367 break;
72432ffc
PS
2368 }
2369 }
2370
e94f7ba1
JL
2371 if (rc) {
2372 for (i = 0; i < num_pages; i++)
2373 put_page(pages[i]);
2374 }
72432ffc
PS
2375 return rc;
2376}
2377
2378static inline
2379size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2380{
2381 size_t num_pages;
2382 size_t clen;
2383
2384 clen = min_t(const size_t, len, wsize);
a7103b99 2385 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2386
2387 if (cur_len)
2388 *cur_len = clen;
2389
2390 return num_pages;
2391}
2392
da82f7e7 2393static void
4a5c80d7 2394cifs_uncached_writedata_release(struct kref *refcount)
da82f7e7
JL
2395{
2396 int i;
4a5c80d7
SF
2397 struct cifs_writedata *wdata = container_of(refcount,
2398 struct cifs_writedata, refcount);
2399
2400 for (i = 0; i < wdata->nr_pages; i++)
2401 put_page(wdata->pages[i]);
2402 cifs_writedata_release(refcount);
2403}
2404
2405static void
2406cifs_uncached_writev_complete(struct work_struct *work)
2407{
da82f7e7
JL
2408 struct cifs_writedata *wdata = container_of(work,
2409 struct cifs_writedata, work);
2410 struct inode *inode = wdata->cfile->dentry->d_inode;
2411 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2412
2413 spin_lock(&inode->i_lock);
2414 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2415 if (cifsi->server_eof > inode->i_size)
2416 i_size_write(inode, cifsi->server_eof);
2417 spin_unlock(&inode->i_lock);
2418
2419 complete(&wdata->done);
2420
4a5c80d7 2421 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
da82f7e7
JL
2422}
2423
da82f7e7 2424static int
66386c08
PS
2425wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2426 size_t *len, unsigned long *num_pages)
da82f7e7 2427{
66386c08
PS
2428 size_t save_len, copied, bytes, cur_len = *len;
2429 unsigned long i, nr_pages = *num_pages;
c9de5c80 2430
66386c08
PS
2431 save_len = cur_len;
2432 for (i = 0; i < nr_pages; i++) {
2433 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2434 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2435 cur_len -= copied;
2436 /*
2437 * If we didn't copy as much as we expected, then that
2438 * may mean we trod into an unmapped area. Stop copying
2439 * at that point. On the next pass through the big
2440 * loop, we'll likely end up getting a zero-length
2441 * write and bailing out of it.
2442 */
2443 if (copied < bytes)
2444 break;
2445 }
2446 cur_len = save_len - cur_len;
2447 *len = cur_len;
da82f7e7 2448
66386c08
PS
2449 /*
2450 * If we have no data to send, then that probably means that
2451 * the copy above failed altogether. That's most likely because
2452 * the address in the iovec was bogus. Return -EFAULT and let
2453 * the caller free anything we allocated and bail out.
2454 */
2455 if (!cur_len)
2456 return -EFAULT;
da82f7e7 2457
66386c08
PS
2458 /*
2459 * i + 1 now represents the number of pages we actually used in
2460 * the copy phase above.
2461 */
2462 *num_pages = i + 1;
2463 return 0;
da82f7e7
JL
2464}
2465
43de94ea
PS
2466static int
2467cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2468 struct cifsFileInfo *open_file,
2469 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
72432ffc 2470{
43de94ea
PS
2471 int rc = 0;
2472 size_t cur_len;
66386c08 2473 unsigned long nr_pages, num_pages, i;
43de94ea 2474 struct cifs_writedata *wdata;
6ec0b01b
PS
2475 struct iov_iter saved_from;
2476 loff_t saved_offset = offset;
da82f7e7 2477 pid_t pid;
6ec0b01b 2478 struct TCP_Server_Info *server;
d4ffff1f
PS
2479
2480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2481 pid = open_file->pid;
2482 else
2483 pid = current->tgid;
2484
6ec0b01b
PS
2485 server = tlink_tcon(open_file->tlink)->ses->server;
2486 memcpy(&saved_from, from, sizeof(struct iov_iter));
2487
72432ffc 2488 do {
cb7e9eab
PS
2489 unsigned int wsize, credits;
2490
2491 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2492 &wsize, &credits);
2493 if (rc)
2494 break;
da82f7e7 2495
cb7e9eab 2496 nr_pages = get_numpages(wsize, len, &cur_len);
da82f7e7
JL
2497 wdata = cifs_writedata_alloc(nr_pages,
2498 cifs_uncached_writev_complete);
2499 if (!wdata) {
2500 rc = -ENOMEM;
cb7e9eab 2501 add_credits_and_wake_if(server, credits, 0);
da82f7e7
JL
2502 break;
2503 }
2504
2505 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2506 if (rc) {
2507 kfree(wdata);
cb7e9eab 2508 add_credits_and_wake_if(server, credits, 0);
da82f7e7
JL
2509 break;
2510 }
2511
66386c08
PS
2512 num_pages = nr_pages;
2513 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2514 if (rc) {
5d81de8e
JL
2515 for (i = 0; i < nr_pages; i++)
2516 put_page(wdata->pages[i]);
2517 kfree(wdata);
cb7e9eab 2518 add_credits_and_wake_if(server, credits, 0);
5d81de8e
JL
2519 break;
2520 }
2521
2522 /*
66386c08
PS
2523 * Bring nr_pages down to the number of pages we actually used,
2524 * and free any pages that we didn't use.
5d81de8e 2525 */
66386c08 2526 for ( ; nr_pages > num_pages; nr_pages--)
5d81de8e
JL
2527 put_page(wdata->pages[nr_pages - 1]);
2528
da82f7e7
JL
2529 wdata->sync_mode = WB_SYNC_ALL;
2530 wdata->nr_pages = nr_pages;
2531 wdata->offset = (__u64)offset;
2532 wdata->cfile = cifsFileInfo_get(open_file);
2533 wdata->pid = pid;
2534 wdata->bytes = cur_len;
eddb079d
JL
2535 wdata->pagesz = PAGE_SIZE;
2536 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
cb7e9eab 2537 wdata->credits = credits;
6ec0b01b
PS
2538
2539 if (!wdata->cfile->invalidHandle ||
2540 !cifs_reopen_file(wdata->cfile, false))
2541 rc = server->ops->async_writev(wdata,
2542 cifs_uncached_writedata_release);
da82f7e7 2543 if (rc) {
cb7e9eab 2544 add_credits_and_wake_if(server, wdata->credits, 0);
4a5c80d7
SF
2545 kref_put(&wdata->refcount,
2546 cifs_uncached_writedata_release);
6ec0b01b
PS
2547 if (rc == -EAGAIN) {
2548 memcpy(from, &saved_from,
2549 sizeof(struct iov_iter));
2550 iov_iter_advance(from, offset - saved_offset);
2551 continue;
2552 }
72432ffc
PS
2553 break;
2554 }
2555
43de94ea 2556 list_add_tail(&wdata->list, wdata_list);
da82f7e7
JL
2557 offset += cur_len;
2558 len -= cur_len;
72432ffc
PS
2559 } while (len > 0);
2560
43de94ea
PS
2561 return rc;
2562}
2563
2564static ssize_t
2565cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
2566{
2567 size_t len;
2568 ssize_t total_written = 0;
2569 struct cifsFileInfo *open_file;
2570 struct cifs_tcon *tcon;
2571 struct cifs_sb_info *cifs_sb;
2572 struct cifs_writedata *wdata, *tmp;
2573 struct list_head wdata_list;
6ec0b01b 2574 struct iov_iter saved_from;
43de94ea
PS
2575 int rc;
2576
2577 len = iov_iter_count(from);
2578 rc = generic_write_checks(file, poffset, &len, 0);
2579 if (rc)
2580 return rc;
2581
2582 if (!len)
2583 return 0;
2584
2585 iov_iter_truncate(from, len);
2586
2587 INIT_LIST_HEAD(&wdata_list);
7119e220 2588 cifs_sb = CIFS_FILE_SB(file);
43de94ea
PS
2589 open_file = file->private_data;
2590 tcon = tlink_tcon(open_file->tlink);
2591
2592 if (!tcon->ses->server->ops->async_writev)
2593 return -ENOSYS;
2594
6ec0b01b
PS
2595 memcpy(&saved_from, from, sizeof(struct iov_iter));
2596
43de94ea
PS
2597 rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb,
2598 &wdata_list);
2599
da82f7e7
JL
2600 /*
2601 * If at least one write was successfully sent, then discard any rc
2602 * value from the later writes. If the other write succeeds, then
2603 * we'll end up returning whatever was written. If it fails, then
2604 * we'll get a new rc value from that.
2605 */
2606 if (!list_empty(&wdata_list))
2607 rc = 0;
2608
2609 /*
2610 * Wait for and collect replies for any successful sends in order of
2611 * increasing offset. Once an error is hit or we get a fatal signal
2612 * while waiting, then return without waiting for any more replies.
2613 */
2614restart_loop:
2615 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2616 if (!rc) {
2617 /* FIXME: freezable too? */
2618 rc = wait_for_completion_killable(&wdata->done);
2619 if (rc)
2620 rc = -EINTR;
2621 else if (wdata->result)
2622 rc = wdata->result;
2623 else
2624 total_written += wdata->bytes;
2625
2626 /* resend call if it's a retryable error */
2627 if (rc == -EAGAIN) {
6ec0b01b
PS
2628 struct list_head tmp_list;
2629 struct iov_iter tmp_from;
2630
2631 INIT_LIST_HEAD(&tmp_list);
2632 list_del_init(&wdata->list);
2633
2634 memcpy(&tmp_from, &saved_from,
2635 sizeof(struct iov_iter));
2636 iov_iter_advance(&tmp_from,
2637 wdata->offset - *poffset);
2638
2639 rc = cifs_write_from_iter(wdata->offset,
2640 wdata->bytes, &tmp_from,
2641 open_file, cifs_sb, &tmp_list);
2642
2643 list_splice(&tmp_list, &wdata_list);
2644
2645 kref_put(&wdata->refcount,
2646 cifs_uncached_writedata_release);
da82f7e7
JL
2647 goto restart_loop;
2648 }
2649 }
2650 list_del_init(&wdata->list);
4a5c80d7 2651 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
72432ffc
PS
2652 }
2653
da82f7e7
JL
2654 if (total_written > 0)
2655 *poffset += total_written;
72432ffc 2656
da82f7e7
JL
2657 cifs_stats_bytes_written(tcon, total_written);
2658 return total_written ? total_written : (ssize_t)rc;
72432ffc
PS
2659}
2660
3dae8750 2661ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
72432ffc
PS
2662{
2663 ssize_t written;
2664 struct inode *inode;
3dae8750 2665 loff_t pos = iocb->ki_pos;
72432ffc 2666
496ad9aa 2667 inode = file_inode(iocb->ki_filp);
72432ffc
PS
2668
2669 /*
2670 * BB - optimize the way when signing is disabled. We can drop this
2671 * extra memory-to-memory copying and use iovec buffers for constructing
2672 * write request.
2673 */
2674
3dae8750 2675 written = cifs_iovec_write(iocb->ki_filp, from, &pos);
72432ffc 2676 if (written > 0) {
aff8d5ca 2677 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags);
72432ffc
PS
2678 iocb->ki_pos = pos;
2679 }
2680
2681 return written;
2682}
2683
579f9053 2684static ssize_t
3dae8750 2685cifs_writev(struct kiocb *iocb, struct iov_iter *from)
72432ffc 2686{
579f9053
PS
2687 struct file *file = iocb->ki_filp;
2688 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2689 struct inode *inode = file->f_mapping->host;
2690 struct cifsInodeInfo *cinode = CIFS_I(inode);
2691 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2692 ssize_t rc = -EACCES;
19dfc1f5 2693 loff_t lock_pos = iocb->ki_pos;
72432ffc 2694
579f9053
PS
2695 /*
2696 * We need to hold the sem to be sure nobody modifies lock list
2697 * with a brlock that prevents writing.
2698 */
2699 down_read(&cinode->lock_sem);
19dfc1f5
AV
2700 mutex_lock(&inode->i_mutex);
2701 if (file->f_flags & O_APPEND)
2702 lock_pos = i_size_read(inode);
3dae8750 2703 if (!cifs_find_lock_conflict(cfile, lock_pos, iov_iter_count(from),
579f9053 2704 server->vals->exclusive_lock_type, NULL,
19dfc1f5 2705 CIFS_WRITE_OP)) {
3dae8750 2706 rc = __generic_file_write_iter(iocb, from);
19dfc1f5
AV
2707 mutex_unlock(&inode->i_mutex);
2708
2709 if (rc > 0) {
2710 ssize_t err;
2711
2712 err = generic_write_sync(file, iocb->ki_pos - rc, rc);
1f80c0cc 2713 if (err < 0)
19dfc1f5
AV
2714 rc = err;
2715 }
2716 } else {
2717 mutex_unlock(&inode->i_mutex);
2718 }
579f9053 2719 up_read(&cinode->lock_sem);
579f9053
PS
2720 return rc;
2721}
2722
2723ssize_t
3dae8750 2724cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
579f9053 2725{
496ad9aa 2726 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
2727 struct cifsInodeInfo *cinode = CIFS_I(inode);
2728 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2729 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2730 iocb->ki_filp->private_data;
2731 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
88cf75aa 2732 ssize_t written;
ca8aa29c 2733
c11f1df5
SP
2734 written = cifs_get_writer(cinode);
2735 if (written)
2736 return written;
2737
18cceb6a 2738 if (CIFS_CACHE_WRITE(cinode)) {
88cf75aa
PS
2739 if (cap_unix(tcon->ses) &&
2740 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
c11f1df5 2741 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3dae8750 2742 written = generic_file_write_iter(iocb, from);
c11f1df5
SP
2743 goto out;
2744 }
3dae8750 2745 written = cifs_writev(iocb, from);
c11f1df5 2746 goto out;
25078105 2747 }
25078105 2748 /*
ca8aa29c
PS
2749 * For non-oplocked files in strict cache mode we need to write the data
2750 * to the server exactly from the pos to pos+len-1 rather than flush all
2751 * affected pages because it may cause a error with mandatory locks on
2752 * these pages but not on the region from pos to ppos+len-1.
72432ffc 2753 */
3dae8750 2754 written = cifs_user_writev(iocb, from);
18cceb6a 2755 if (written > 0 && CIFS_CACHE_READ(cinode)) {
88cf75aa
PS
2756 /*
2757 * Windows 7 server can delay breaking level2 oplock if a write
2758 * request comes - break it on the client to prevent reading
2759 * an old data.
2760 */
4f73c7d3 2761 cifs_zap_mapping(inode);
f96637be
JP
2762 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2763 inode);
18cceb6a 2764 cinode->oplock = 0;
88cf75aa 2765 }
c11f1df5
SP
2766out:
2767 cifs_put_writer(cinode);
88cf75aa 2768 return written;
72432ffc
PS
2769}
2770
0471ca3f 2771static struct cifs_readdata *
f4e49cd2 2772cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
0471ca3f
JL
2773{
2774 struct cifs_readdata *rdata;
f4e49cd2 2775
c5fab6f4
JL
2776 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2777 GFP_KERNEL);
0471ca3f 2778 if (rdata != NULL) {
6993f74a 2779 kref_init(&rdata->refcount);
1c892549
JL
2780 INIT_LIST_HEAD(&rdata->list);
2781 init_completion(&rdata->done);
0471ca3f 2782 INIT_WORK(&rdata->work, complete);
0471ca3f 2783 }
f4e49cd2 2784
0471ca3f
JL
2785 return rdata;
2786}
2787
6993f74a
JL
2788void
2789cifs_readdata_release(struct kref *refcount)
0471ca3f 2790{
6993f74a
JL
2791 struct cifs_readdata *rdata = container_of(refcount,
2792 struct cifs_readdata, refcount);
2793
2794 if (rdata->cfile)
2795 cifsFileInfo_put(rdata->cfile);
2796
0471ca3f
JL
2797 kfree(rdata);
2798}
2799
1c892549 2800static int
c5fab6f4 2801cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
2802{
2803 int rc = 0;
c5fab6f4 2804 struct page *page;
1c892549
JL
2805 unsigned int i;
2806
c5fab6f4 2807 for (i = 0; i < nr_pages; i++) {
1c892549
JL
2808 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2809 if (!page) {
2810 rc = -ENOMEM;
2811 break;
2812 }
c5fab6f4 2813 rdata->pages[i] = page;
1c892549
JL
2814 }
2815
2816 if (rc) {
c5fab6f4
JL
2817 for (i = 0; i < nr_pages; i++) {
2818 put_page(rdata->pages[i]);
2819 rdata->pages[i] = NULL;
1c892549
JL
2820 }
2821 }
2822 return rc;
2823}
2824
2825static void
2826cifs_uncached_readdata_release(struct kref *refcount)
2827{
1c892549
JL
2828 struct cifs_readdata *rdata = container_of(refcount,
2829 struct cifs_readdata, refcount);
c5fab6f4 2830 unsigned int i;
1c892549 2831
c5fab6f4
JL
2832 for (i = 0; i < rdata->nr_pages; i++) {
2833 put_page(rdata->pages[i]);
2834 rdata->pages[i] = NULL;
1c892549
JL
2835 }
2836 cifs_readdata_release(refcount);
2837}
2838
1c892549
JL
2839/**
2840 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2841 * @rdata: the readdata response with list of pages holding data
7f25bba8 2842 * @iter: destination for our data
1c892549
JL
2843 *
2844 * This function copies data from a list of pages in a readdata response into
2845 * an array of iovecs. It will first calculate where the data should go
2846 * based on the info in the readdata and then copy the data into that spot.
2847 */
7f25bba8
AV
2848static int
2849cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
1c892549 2850{
34a54d61 2851 size_t remaining = rdata->got_bytes;
c5fab6f4 2852 unsigned int i;
1c892549 2853
c5fab6f4 2854 for (i = 0; i < rdata->nr_pages; i++) {
c5fab6f4 2855 struct page *page = rdata->pages[i];
e686bd8d 2856 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
7f25bba8
AV
2857 size_t written = copy_page_to_iter(page, 0, copy, iter);
2858 remaining -= written;
2859 if (written < copy && iov_iter_count(iter) > 0)
2860 break;
1c892549 2861 }
7f25bba8 2862 return remaining ? -EFAULT : 0;
1c892549
JL
2863}
2864
2865static void
2866cifs_uncached_readv_complete(struct work_struct *work)
2867{
2868 struct cifs_readdata *rdata = container_of(work,
2869 struct cifs_readdata, work);
1c892549
JL
2870
2871 complete(&rdata->done);
2872 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2873}
2874
2875static int
8321fec4
JL
2876cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2877 struct cifs_readdata *rdata, unsigned int len)
1c892549 2878{
b3160aeb 2879 int result = 0;
c5fab6f4
JL
2880 unsigned int i;
2881 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2882 struct kvec iov;
1c892549 2883
b3160aeb 2884 rdata->got_bytes = 0;
8321fec4 2885 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
2886 for (i = 0; i < nr_pages; i++) {
2887 struct page *page = rdata->pages[i];
2888
8321fec4 2889 if (len >= PAGE_SIZE) {
1c892549 2890 /* enough data to fill the page */
8321fec4
JL
2891 iov.iov_base = kmap(page);
2892 iov.iov_len = PAGE_SIZE;
f96637be
JP
2893 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2894 i, iov.iov_base, iov.iov_len);
8321fec4
JL
2895 len -= PAGE_SIZE;
2896 } else if (len > 0) {
1c892549 2897 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2898 iov.iov_base = kmap(page);
2899 iov.iov_len = len;
f96637be
JP
2900 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2901 i, iov.iov_base, iov.iov_len);
8321fec4
JL
2902 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2903 rdata->tailsz = len;
2904 len = 0;
1c892549
JL
2905 } else {
2906 /* no need to hold page hostage */
c5fab6f4
JL
2907 rdata->pages[i] = NULL;
2908 rdata->nr_pages--;
1c892549 2909 put_page(page);
8321fec4 2910 continue;
1c892549 2911 }
8321fec4
JL
2912
2913 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2914 kunmap(page);
2915 if (result < 0)
2916 break;
2917
b3160aeb 2918 rdata->got_bytes += result;
1c892549
JL
2919 }
2920
b3160aeb
PS
2921 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
2922 rdata->got_bytes : result;
1c892549
JL
2923}
2924
0ada36b2
PS
2925static int
2926cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2927 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
1da177e4 2928{
0ada36b2 2929 struct cifs_readdata *rdata;
bed9da02 2930 unsigned int npages, rsize, credits;
0ada36b2
PS
2931 size_t cur_len;
2932 int rc;
1c892549 2933 pid_t pid;
25f40259 2934 struct TCP_Server_Info *server;
a70307ee 2935
25f40259 2936 server = tlink_tcon(open_file->tlink)->ses->server;
fc9c5966 2937
d4ffff1f
PS
2938 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2939 pid = open_file->pid;
2940 else
2941 pid = current->tgid;
2942
1c892549 2943 do {
bed9da02
PS
2944 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
2945 &rsize, &credits);
2946 if (rc)
2947 break;
2948
2949 cur_len = min_t(const size_t, len, rsize);
1c892549 2950 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
a70307ee 2951
1c892549
JL
2952 /* allocate a readdata struct */
2953 rdata = cifs_readdata_alloc(npages,
2954 cifs_uncached_readv_complete);
2955 if (!rdata) {
bed9da02 2956 add_credits_and_wake_if(server, credits, 0);
1c892549 2957 rc = -ENOMEM;
bae9f746 2958 break;
1da177e4 2959 }
a70307ee 2960
c5fab6f4 2961 rc = cifs_read_allocate_pages(rdata, npages);
1c892549
JL
2962 if (rc)
2963 goto error;
2964
2965 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 2966 rdata->nr_pages = npages;
1c892549
JL
2967 rdata->offset = offset;
2968 rdata->bytes = cur_len;
2969 rdata->pid = pid;
8321fec4
JL
2970 rdata->pagesz = PAGE_SIZE;
2971 rdata->read_into_pages = cifs_uncached_read_into_pages;
bed9da02 2972 rdata->credits = credits;
1c892549 2973
25f40259
PS
2974 if (!rdata->cfile->invalidHandle ||
2975 !cifs_reopen_file(rdata->cfile, true))
2976 rc = server->ops->async_readv(rdata);
1c892549
JL
2977error:
2978 if (rc) {
bed9da02 2979 add_credits_and_wake_if(server, rdata->credits, 0);
1c892549
JL
2980 kref_put(&rdata->refcount,
2981 cifs_uncached_readdata_release);
25f40259
PS
2982 if (rc == -EAGAIN)
2983 continue;
1c892549
JL
2984 break;
2985 }
2986
0ada36b2 2987 list_add_tail(&rdata->list, rdata_list);
1c892549
JL
2988 offset += cur_len;
2989 len -= cur_len;
2990 } while (len > 0);
2991
0ada36b2
PS
2992 return rc;
2993}
2994
2995ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
2996{
2997 struct file *file = iocb->ki_filp;
2998 ssize_t rc;
2999 size_t len;
3000 ssize_t total_read = 0;
3001 loff_t offset = iocb->ki_pos;
3002 struct cifs_sb_info *cifs_sb;
3003 struct cifs_tcon *tcon;
3004 struct cifsFileInfo *open_file;
3005 struct cifs_readdata *rdata, *tmp;
3006 struct list_head rdata_list;
3007
3008 len = iov_iter_count(to);
3009 if (!len)
3010 return 0;
3011
3012 INIT_LIST_HEAD(&rdata_list);
7119e220 3013 cifs_sb = CIFS_FILE_SB(file);
0ada36b2
PS
3014 open_file = file->private_data;
3015 tcon = tlink_tcon(open_file->tlink);
3016
3017 if (!tcon->ses->server->ops->async_readv)
3018 return -ENOSYS;
3019
3020 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3021 cifs_dbg(FYI, "attempting read on write only file instance\n");
3022
3023 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3024
1c892549
JL
3025 /* if at least one read request send succeeded, then reset rc */
3026 if (!list_empty(&rdata_list))
3027 rc = 0;
3028
e6a7bcb4 3029 len = iov_iter_count(to);
1c892549 3030 /* the loop below should proceed in the order of increasing offsets */
25f40259 3031again:
1c892549
JL
3032 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3033 if (!rc) {
1c892549
JL
3034 /* FIXME: freezable sleep too? */
3035 rc = wait_for_completion_killable(&rdata->done);
3036 if (rc)
3037 rc = -EINTR;
fb8a3e52 3038 else if (rdata->result == -EAGAIN) {
74027f4a 3039 /* resend call if it's a retryable error */
fb8a3e52 3040 struct list_head tmp_list;
d913ed17 3041 unsigned int got_bytes = rdata->got_bytes;
25f40259 3042
fb8a3e52
PS
3043 list_del_init(&rdata->list);
3044 INIT_LIST_HEAD(&tmp_list);
25f40259 3045
d913ed17
PS
3046 /*
3047 * Got a part of data and then reconnect has
3048 * happened -- fill the buffer and continue
3049 * reading.
3050 */
3051 if (got_bytes && got_bytes < rdata->bytes) {
3052 rc = cifs_readdata_to_iov(rdata, to);
3053 if (rc) {
3054 kref_put(&rdata->refcount,
3055 cifs_uncached_readdata_release);
3056 continue;
3057 }
74027f4a 3058 }
d913ed17
PS
3059
3060 rc = cifs_send_async_read(
3061 rdata->offset + got_bytes,
3062 rdata->bytes - got_bytes,
3063 rdata->cfile, cifs_sb,
3064 &tmp_list);
25f40259 3065
fb8a3e52 3066 list_splice(&tmp_list, &rdata_list);
25f40259 3067
fb8a3e52
PS
3068 kref_put(&rdata->refcount,
3069 cifs_uncached_readdata_release);
3070 goto again;
3071 } else if (rdata->result)
3072 rc = rdata->result;
3073 else
e6a7bcb4 3074 rc = cifs_readdata_to_iov(rdata, to);
1c892549 3075
2e8a05d8
PS
3076 /* if there was a short read -- discard anything left */
3077 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3078 rc = -ENODATA;
1da177e4 3079 }
1c892549
JL
3080 list_del_init(&rdata->list);
3081 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 3082 }
a70307ee 3083
e6a7bcb4 3084 total_read = len - iov_iter_count(to);
7f25bba8 3085
1c892549 3086 cifs_stats_bytes_read(tcon, total_read);
1c892549 3087
09a4707e
PS
3088 /* mask nodata case */
3089 if (rc == -ENODATA)
3090 rc = 0;
3091
0165e810 3092 if (total_read) {
e6a7bcb4 3093 iocb->ki_pos += total_read;
0165e810
AV
3094 return total_read;
3095 }
3096 return rc;
a70307ee
PS
3097}
3098
579f9053 3099ssize_t
e6a7bcb4 3100cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
a70307ee 3101{
496ad9aa 3102 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
3103 struct cifsInodeInfo *cinode = CIFS_I(inode);
3104 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3105 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3106 iocb->ki_filp->private_data;
3107 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3108 int rc = -EACCES;
a70307ee
PS
3109
3110 /*
3111 * In strict cache mode we need to read from the server all the time
3112 * if we don't have level II oplock because the server can delay mtime
3113 * change - so we can't make a decision about inode invalidating.
3114 * And we can also fail with pagereading if there are mandatory locks
3115 * on pages affected by this read but not on the region from pos to
3116 * pos+len-1.
3117 */
18cceb6a 3118 if (!CIFS_CACHE_READ(cinode))
e6a7bcb4 3119 return cifs_user_readv(iocb, to);
a70307ee 3120
579f9053
PS
3121 if (cap_unix(tcon->ses) &&
3122 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3123 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
e6a7bcb4 3124 return generic_file_read_iter(iocb, to);
579f9053
PS
3125
3126 /*
3127 * We need to hold the sem to be sure nobody modifies lock list
3128 * with a brlock that prevents reading.
3129 */
3130 down_read(&cinode->lock_sem);
e6a7bcb4 3131 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
579f9053 3132 tcon->ses->server->vals->shared_lock_type,
081c0414 3133 NULL, CIFS_READ_OP))
e6a7bcb4 3134 rc = generic_file_read_iter(iocb, to);
579f9053
PS
3135 up_read(&cinode->lock_sem);
3136 return rc;
a70307ee 3137}
1da177e4 3138
f9c6e234
PS
3139static ssize_t
3140cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
3141{
3142 int rc = -EACCES;
3143 unsigned int bytes_read = 0;
3144 unsigned int total_read;
3145 unsigned int current_read_size;
5eba8ab3 3146 unsigned int rsize;
1da177e4 3147 struct cifs_sb_info *cifs_sb;
29e20f9c 3148 struct cifs_tcon *tcon;
f9c6e234 3149 struct TCP_Server_Info *server;
6d5786a3 3150 unsigned int xid;
f9c6e234 3151 char *cur_offset;
1da177e4 3152 struct cifsFileInfo *open_file;
d4ffff1f 3153 struct cifs_io_parms io_parms;
ec637e3f 3154 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 3155 __u32 pid;
1da177e4 3156
6d5786a3 3157 xid = get_xid();
7119e220 3158 cifs_sb = CIFS_FILE_SB(file);
1da177e4 3159
5eba8ab3
JL
3160 /* FIXME: set up handlers for larger reads and/or convert to async */
3161 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3162
1da177e4 3163 if (file->private_data == NULL) {
0f3bc09e 3164 rc = -EBADF;
6d5786a3 3165 free_xid(xid);
0f3bc09e 3166 return rc;
1da177e4 3167 }
c21dfb69 3168 open_file = file->private_data;
29e20f9c 3169 tcon = tlink_tcon(open_file->tlink);
f9c6e234
PS
3170 server = tcon->ses->server;
3171
3172 if (!server->ops->sync_read) {
3173 free_xid(xid);
3174 return -ENOSYS;
3175 }
1da177e4 3176
d4ffff1f
PS
3177 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3178 pid = open_file->pid;
3179 else
3180 pid = current->tgid;
3181
1da177e4 3182 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
f96637be 3183 cifs_dbg(FYI, "attempting read on write only file instance\n");
1da177e4 3184
f9c6e234
PS
3185 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3186 total_read += bytes_read, cur_offset += bytes_read) {
e374d90f
PS
3187 do {
3188 current_read_size = min_t(uint, read_size - total_read,
3189 rsize);
3190 /*
3191 * For windows me and 9x we do not want to request more
3192 * than it negotiated since it will refuse the read
3193 * then.
3194 */
3195 if ((tcon->ses) && !(tcon->ses->capabilities &
29e20f9c 3196 tcon->ses->server->vals->cap_large_files)) {
e374d90f
PS
3197 current_read_size = min_t(uint,
3198 current_read_size, CIFSMaxBufSize);
3199 }
cdff08e7 3200 if (open_file->invalidHandle) {
15886177 3201 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
3202 if (rc != 0)
3203 break;
3204 }
d4ffff1f 3205 io_parms.pid = pid;
29e20f9c 3206 io_parms.tcon = tcon;
f9c6e234 3207 io_parms.offset = *offset;
d4ffff1f 3208 io_parms.length = current_read_size;
db8b631d 3209 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
f9c6e234
PS
3210 &bytes_read, &cur_offset,
3211 &buf_type);
e374d90f
PS
3212 } while (rc == -EAGAIN);
3213
1da177e4
LT
3214 if (rc || (bytes_read == 0)) {
3215 if (total_read) {
3216 break;
3217 } else {
6d5786a3 3218 free_xid(xid);
1da177e4
LT
3219 return rc;
3220 }
3221 } else {
29e20f9c 3222 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 3223 *offset += bytes_read;
1da177e4
LT
3224 }
3225 }
6d5786a3 3226 free_xid(xid);
1da177e4
LT
3227 return total_read;
3228}
3229
ca83ce3d
JL
3230/*
3231 * If the page is mmap'ed into a process' page tables, then we need to make
3232 * sure that it doesn't change while being written back.
3233 */
3234static int
3235cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3236{
3237 struct page *page = vmf->page;
3238
3239 lock_page(page);
3240 return VM_FAULT_LOCKED;
3241}
3242
3243static struct vm_operations_struct cifs_file_vm_ops = {
3244 .fault = filemap_fault,
f1820361 3245 .map_pages = filemap_map_pages,
ca83ce3d 3246 .page_mkwrite = cifs_page_mkwrite,
0b173bc4 3247 .remap_pages = generic_file_remap_pages,
ca83ce3d
JL
3248};
3249
7a6a19b1
PS
3250int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3251{
3252 int rc, xid;
496ad9aa 3253 struct inode *inode = file_inode(file);
7a6a19b1 3254
6d5786a3 3255 xid = get_xid();
7a6a19b1 3256
18cceb6a 3257 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 3258 rc = cifs_zap_mapping(inode);
6feb9891
PS
3259 if (rc)
3260 return rc;
3261 }
7a6a19b1
PS
3262
3263 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3264 if (rc == 0)
3265 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3266 free_xid(xid);
7a6a19b1
PS
3267 return rc;
3268}
3269
1da177e4
LT
3270int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3271{
1da177e4
LT
3272 int rc, xid;
3273
6d5786a3 3274 xid = get_xid();
abab095d 3275 rc = cifs_revalidate_file(file);
1da177e4 3276 if (rc) {
f96637be
JP
3277 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3278 rc);
6d5786a3 3279 free_xid(xid);
1da177e4
LT
3280 return rc;
3281 }
3282 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3283 if (rc == 0)
3284 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3285 free_xid(xid);
1da177e4
LT
3286 return rc;
3287}
3288
0471ca3f
JL
3289static void
3290cifs_readv_complete(struct work_struct *work)
3291{
b770ddfa 3292 unsigned int i, got_bytes;
0471ca3f
JL
3293 struct cifs_readdata *rdata = container_of(work,
3294 struct cifs_readdata, work);
0471ca3f 3295
b770ddfa 3296 got_bytes = rdata->got_bytes;
c5fab6f4
JL
3297 for (i = 0; i < rdata->nr_pages; i++) {
3298 struct page *page = rdata->pages[i];
3299
0471ca3f
JL
3300 lru_cache_add_file(page);
3301
b770ddfa
PS
3302 if (rdata->result == 0 ||
3303 (rdata->result == -EAGAIN && got_bytes)) {
0471ca3f
JL
3304 flush_dcache_page(page);
3305 SetPageUptodate(page);
3306 }
3307
3308 unlock_page(page);
3309
b770ddfa
PS
3310 if (rdata->result == 0 ||
3311 (rdata->result == -EAGAIN && got_bytes))
0471ca3f
JL
3312 cifs_readpage_to_fscache(rdata->mapping->host, page);
3313
b770ddfa
PS
3314 got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes);
3315
0471ca3f 3316 page_cache_release(page);
c5fab6f4 3317 rdata->pages[i] = NULL;
0471ca3f 3318 }
6993f74a 3319 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
3320}
3321
8d5ce4d2 3322static int
8321fec4
JL
3323cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3324 struct cifs_readdata *rdata, unsigned int len)
8d5ce4d2 3325{
b3160aeb 3326 int result = 0;
c5fab6f4 3327 unsigned int i;
8d5ce4d2
JL
3328 u64 eof;
3329 pgoff_t eof_index;
c5fab6f4 3330 unsigned int nr_pages = rdata->nr_pages;
8321fec4 3331 struct kvec iov;
8d5ce4d2
JL
3332
3333 /* determine the eof that the server (probably) has */
3334 eof = CIFS_I(rdata->mapping->host)->server_eof;
3335 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
f96637be 3336 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
8d5ce4d2 3337
b3160aeb 3338 rdata->got_bytes = 0;
8321fec4 3339 rdata->tailsz = PAGE_CACHE_SIZE;
c5fab6f4
JL
3340 for (i = 0; i < nr_pages; i++) {
3341 struct page *page = rdata->pages[i];
3342
8321fec4 3343 if (len >= PAGE_CACHE_SIZE) {
8d5ce4d2 3344 /* enough data to fill the page */
8321fec4
JL
3345 iov.iov_base = kmap(page);
3346 iov.iov_len = PAGE_CACHE_SIZE;
f96637be
JP
3347 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3348 i, page->index, iov.iov_base, iov.iov_len);
8321fec4
JL
3349 len -= PAGE_CACHE_SIZE;
3350 } else if (len > 0) {
8d5ce4d2 3351 /* enough for partial page, fill and zero the rest */
8321fec4
JL
3352 iov.iov_base = kmap(page);
3353 iov.iov_len = len;
f96637be
JP
3354 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3355 i, page->index, iov.iov_base, iov.iov_len);
8321fec4
JL
3356 memset(iov.iov_base + len,
3357 '\0', PAGE_CACHE_SIZE - len);
3358 rdata->tailsz = len;
3359 len = 0;
8d5ce4d2
JL
3360 } else if (page->index > eof_index) {
3361 /*
3362 * The VFS will not try to do readahead past the
3363 * i_size, but it's possible that we have outstanding
3364 * writes with gaps in the middle and the i_size hasn't
3365 * caught up yet. Populate those with zeroed out pages
3366 * to prevent the VFS from repeatedly attempting to
3367 * fill them until the writes are flushed.
3368 */
3369 zero_user(page, 0, PAGE_CACHE_SIZE);
8d5ce4d2
JL
3370 lru_cache_add_file(page);
3371 flush_dcache_page(page);
3372 SetPageUptodate(page);
3373 unlock_page(page);
3374 page_cache_release(page);
c5fab6f4
JL
3375 rdata->pages[i] = NULL;
3376 rdata->nr_pages--;
8321fec4 3377 continue;
8d5ce4d2
JL
3378 } else {
3379 /* no need to hold page hostage */
8d5ce4d2
JL
3380 lru_cache_add_file(page);
3381 unlock_page(page);
3382 page_cache_release(page);
c5fab6f4
JL
3383 rdata->pages[i] = NULL;
3384 rdata->nr_pages--;
8321fec4 3385 continue;
8d5ce4d2 3386 }
8321fec4
JL
3387
3388 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3389 kunmap(page);
3390 if (result < 0)
3391 break;
3392
b3160aeb 3393 rdata->got_bytes += result;
8d5ce4d2
JL
3394 }
3395
b3160aeb
PS
3396 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3397 rdata->got_bytes : result;
8d5ce4d2
JL
3398}
3399
387eb92a
PS
3400static int
3401readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3402 unsigned int rsize, struct list_head *tmplist,
3403 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3404{
3405 struct page *page, *tpage;
3406 unsigned int expected_index;
3407 int rc;
3408
69cebd75
PS
3409 INIT_LIST_HEAD(tmplist);
3410
387eb92a
PS
3411 page = list_entry(page_list->prev, struct page, lru);
3412
3413 /*
3414 * Lock the page and put it in the cache. Since no one else
3415 * should have access to this page, we're safe to simply set
3416 * PG_locked without checking it first.
3417 */
3418 __set_page_locked(page);
3419 rc = add_to_page_cache_locked(page, mapping,
3420 page->index, GFP_KERNEL);
3421
3422 /* give up if we can't stick it in the cache */
3423 if (rc) {
3424 __clear_page_locked(page);
3425 return rc;
3426 }
3427
3428 /* move first page to the tmplist */
3429 *offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3430 *bytes = PAGE_CACHE_SIZE;
3431 *nr_pages = 1;
3432 list_move_tail(&page->lru, tmplist);
3433
3434 /* now try and add more pages onto the request */
3435 expected_index = page->index + 1;
3436 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3437 /* discontinuity ? */
3438 if (page->index != expected_index)
3439 break;
3440
3441 /* would this page push the read over the rsize? */
3442 if (*bytes + PAGE_CACHE_SIZE > rsize)
3443 break;
3444
3445 __set_page_locked(page);
3446 if (add_to_page_cache_locked(page, mapping, page->index,
3447 GFP_KERNEL)) {
3448 __clear_page_locked(page);
3449 break;
3450 }
3451 list_move_tail(&page->lru, tmplist);
3452 (*bytes) += PAGE_CACHE_SIZE;
3453 expected_index++;
3454 (*nr_pages)++;
3455 }
3456 return rc;
8d5ce4d2
JL
3457}
3458
1da177e4
LT
3459static int cifs_readpages(struct file *file, struct address_space *mapping,
3460 struct list_head *page_list, unsigned num_pages)
3461{
690c5e31
JL
3462 int rc;
3463 struct list_head tmplist;
3464 struct cifsFileInfo *open_file = file->private_data;
7119e220 3465 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
69cebd75 3466 struct TCP_Server_Info *server;
690c5e31 3467 pid_t pid;
1da177e4 3468
56698236
SJ
3469 /*
3470 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3471 * immediately if the cookie is negative
54afa990
DH
3472 *
3473 * After this point, every page in the list might have PG_fscache set,
3474 * so we will need to clean that up off of every page we don't use.
56698236
SJ
3475 */
3476 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3477 &num_pages);
3478 if (rc == 0)
690c5e31 3479 return rc;
56698236 3480
d4ffff1f
PS
3481 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3482 pid = open_file->pid;
3483 else
3484 pid = current->tgid;
3485
690c5e31 3486 rc = 0;
69cebd75 3487 server = tlink_tcon(open_file->tlink)->ses->server;
1da177e4 3488
f96637be
JP
3489 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3490 __func__, file, mapping, num_pages);
690c5e31
JL
3491
3492 /*
3493 * Start with the page at end of list and move it to private
3494 * list. Do the same with any following pages until we hit
3495 * the rsize limit, hit an index discontinuity, or run out of
3496 * pages. Issue the async read and then start the loop again
3497 * until the list is empty.
3498 *
3499 * Note that list order is important. The page_list is in
3500 * the order of declining indexes. When we put the pages in
3501 * the rdata->pages, then we want them in increasing order.
3502 */
3503 while (!list_empty(page_list)) {
bed9da02 3504 unsigned int i, nr_pages, bytes, rsize;
690c5e31
JL
3505 loff_t offset;
3506 struct page *page, *tpage;
3507 struct cifs_readdata *rdata;
bed9da02 3508 unsigned credits;
1da177e4 3509
bed9da02
PS
3510 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3511 &rsize, &credits);
3512 if (rc)
3513 break;
690c5e31
JL
3514
3515 /*
69cebd75
PS
3516 * Give up immediately if rsize is too small to read an entire
3517 * page. The VFS will fall back to readpage. We should never
3518 * reach this point however since we set ra_pages to 0 when the
3519 * rsize is smaller than a cache page.
690c5e31 3520 */
bed9da02
PS
3521 if (unlikely(rsize < PAGE_CACHE_SIZE)) {
3522 add_credits_and_wake_if(server, credits, 0);
69cebd75 3523 return 0;
bed9da02 3524 }
690c5e31 3525
bed9da02
PS
3526 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3527 &nr_pages, &offset, &bytes);
690c5e31 3528 if (rc) {
bed9da02 3529 add_credits_and_wake_if(server, credits, 0);
690c5e31
JL
3530 break;
3531 }
3532
0471ca3f 3533 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
3534 if (!rdata) {
3535 /* best to give up if we're out of mem */
3536 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3537 list_del(&page->lru);
3538 lru_cache_add_file(page);
3539 unlock_page(page);
3540 page_cache_release(page);
3541 }
3542 rc = -ENOMEM;
bed9da02 3543 add_credits_and_wake_if(server, credits, 0);
690c5e31
JL
3544 break;
3545 }
3546
6993f74a 3547 rdata->cfile = cifsFileInfo_get(open_file);
690c5e31
JL
3548 rdata->mapping = mapping;
3549 rdata->offset = offset;
3550 rdata->bytes = bytes;
3551 rdata->pid = pid;
8321fec4
JL
3552 rdata->pagesz = PAGE_CACHE_SIZE;
3553 rdata->read_into_pages = cifs_readpages_read_into_pages;
bed9da02 3554 rdata->credits = credits;
c5fab6f4
JL
3555
3556 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3557 list_del(&page->lru);
3558 rdata->pages[rdata->nr_pages++] = page;
3559 }
690c5e31 3560
69cebd75
PS
3561 if (!rdata->cfile->invalidHandle ||
3562 !cifs_reopen_file(rdata->cfile, true))
3563 rc = server->ops->async_readv(rdata);
3564 if (rc) {
bed9da02 3565 add_credits_and_wake_if(server, rdata->credits, 0);
c5fab6f4
JL
3566 for (i = 0; i < rdata->nr_pages; i++) {
3567 page = rdata->pages[i];
690c5e31
JL
3568 lru_cache_add_file(page);
3569 unlock_page(page);
3570 page_cache_release(page);
1da177e4 3571 }
1209bbdf 3572 /* Fallback to the readpage in error/reconnect cases */
6993f74a 3573 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3574 break;
3575 }
6993f74a
JL
3576
3577 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3578 }
3579
54afa990
DH
3580 /* Any pages that have been shown to fscache but didn't get added to
3581 * the pagecache must be uncached before they get returned to the
3582 * allocator.
3583 */
3584 cifs_fscache_readpages_cancel(mapping->host, page_list);
1da177e4
LT
3585 return rc;
3586}
3587
a9e9b7bc
SP
3588/*
3589 * cifs_readpage_worker must be called with the page pinned
3590 */
1da177e4
LT
3591static int cifs_readpage_worker(struct file *file, struct page *page,
3592 loff_t *poffset)
3593{
3594 char *read_data;
3595 int rc;
3596
56698236 3597 /* Is the page cached? */
496ad9aa 3598 rc = cifs_readpage_from_fscache(file_inode(file), page);
56698236
SJ
3599 if (rc == 0)
3600 goto read_complete;
3601
1da177e4
LT
3602 read_data = kmap(page);
3603 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 3604
1da177e4 3605 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 3606
1da177e4
LT
3607 if (rc < 0)
3608 goto io_error;
3609 else
f96637be 3610 cifs_dbg(FYI, "Bytes read %d\n", rc);
fb8c4b14 3611
496ad9aa
AV
3612 file_inode(file)->i_atime =
3613 current_fs_time(file_inode(file)->i_sb);
fb8c4b14 3614
1da177e4
LT
3615 if (PAGE_CACHE_SIZE > rc)
3616 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3617
3618 flush_dcache_page(page);
3619 SetPageUptodate(page);
9dc06558
SJ
3620
3621 /* send this page to the cache */
496ad9aa 3622 cifs_readpage_to_fscache(file_inode(file), page);
9dc06558 3623
1da177e4 3624 rc = 0;
fb8c4b14 3625
1da177e4 3626io_error:
fb8c4b14 3627 kunmap(page);
466bd31b 3628 unlock_page(page);
56698236
SJ
3629
3630read_complete:
1da177e4
LT
3631 return rc;
3632}
3633
3634static int cifs_readpage(struct file *file, struct page *page)
3635{
3636 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3637 int rc = -EACCES;
6d5786a3 3638 unsigned int xid;
1da177e4 3639
6d5786a3 3640 xid = get_xid();
1da177e4
LT
3641
3642 if (file->private_data == NULL) {
0f3bc09e 3643 rc = -EBADF;
6d5786a3 3644 free_xid(xid);
0f3bc09e 3645 return rc;
1da177e4
LT
3646 }
3647
f96637be 3648 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
b6b38f70 3649 page, (int)offset, (int)offset);
1da177e4
LT
3650
3651 rc = cifs_readpage_worker(file, page, &offset);
3652
6d5786a3 3653 free_xid(xid);
1da177e4
LT
3654 return rc;
3655}
3656
a403a0a3
SF
3657static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3658{
3659 struct cifsFileInfo *open_file;
3660
4477288a 3661 spin_lock(&cifs_file_list_lock);
a403a0a3 3662 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 3663 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 3664 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3665 return 1;
3666 }
3667 }
4477288a 3668 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3669 return 0;
3670}
3671
1da177e4
LT
3672/* We do not want to update the file size from server for inodes
3673 open for write - to avoid races with writepage extending
3674 the file - in the future we could consider allowing
fb8c4b14 3675 refreshing the inode only on increases in the file size
1da177e4
LT
3676 but this is tricky to do without racing with writebehind
3677 page caching in the current Linux kernel design */
4b18f2a9 3678bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 3679{
a403a0a3 3680 if (!cifsInode)
4b18f2a9 3681 return true;
50c2f753 3682
a403a0a3
SF
3683 if (is_inode_writable(cifsInode)) {
3684 /* This inode is open for write at least once */
c32a0b68
SF
3685 struct cifs_sb_info *cifs_sb;
3686
c32a0b68 3687 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 3688 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3689 /* since no page cache to corrupt on directio
c32a0b68 3690 we can change size safely */
4b18f2a9 3691 return true;
c32a0b68
SF
3692 }
3693
fb8c4b14 3694 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 3695 return true;
7ba52631 3696
4b18f2a9 3697 return false;
23e7dd7d 3698 } else
4b18f2a9 3699 return true;
1da177e4
LT
3700}
3701
d9414774
NP
3702static int cifs_write_begin(struct file *file, struct address_space *mapping,
3703 loff_t pos, unsigned len, unsigned flags,
3704 struct page **pagep, void **fsdata)
1da177e4 3705{
466bd31b 3706 int oncethru = 0;
d9414774
NP
3707 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3708 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
3709 loff_t page_start = pos & PAGE_MASK;
3710 loff_t i_size;
3711 struct page *page;
3712 int rc = 0;
d9414774 3713
f96637be 3714 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
d9414774 3715
466bd31b 3716start:
54566b2c 3717 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
3718 if (!page) {
3719 rc = -ENOMEM;
3720 goto out;
3721 }
8a236264 3722
a98ee8c1
JL
3723 if (PageUptodate(page))
3724 goto out;
8a236264 3725
a98ee8c1
JL
3726 /*
3727 * If we write a full page it will be up to date, no need to read from
3728 * the server. If the write is short, we'll end up doing a sync write
3729 * instead.
3730 */
3731 if (len == PAGE_CACHE_SIZE)
3732 goto out;
8a236264 3733
a98ee8c1
JL
3734 /*
3735 * optimize away the read when we have an oplock, and we're not
3736 * expecting to use any of the data we'd be reading in. That
3737 * is, when the page lies beyond the EOF, or straddles the EOF
3738 * and the write will cover all of the existing data.
3739 */
18cceb6a 3740 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
a98ee8c1
JL
3741 i_size = i_size_read(mapping->host);
3742 if (page_start >= i_size ||
3743 (offset == 0 && (pos + len) >= i_size)) {
3744 zero_user_segments(page, 0, offset,
3745 offset + len,
3746 PAGE_CACHE_SIZE);
3747 /*
3748 * PageChecked means that the parts of the page
3749 * to which we're not writing are considered up
3750 * to date. Once the data is copied to the
3751 * page, it can be set uptodate.
3752 */
3753 SetPageChecked(page);
3754 goto out;
3755 }
3756 }
d9414774 3757
466bd31b 3758 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
a98ee8c1
JL
3759 /*
3760 * might as well read a page, it is fast enough. If we get
3761 * an error, we don't need to return it. cifs_write_end will
3762 * do a sync write instead since PG_uptodate isn't set.
3763 */
3764 cifs_readpage_worker(file, page, &page_start);
466bd31b
SP
3765 page_cache_release(page);
3766 oncethru = 1;
3767 goto start;
8a236264
SF
3768 } else {
3769 /* we could try using another file handle if there is one -
3770 but how would we lock it to prevent close of that handle
3771 racing with this read? In any case
d9414774 3772 this will be written out by write_end so is fine */
1da177e4 3773 }
a98ee8c1
JL
3774out:
3775 *pagep = page;
3776 return rc;
1da177e4
LT
3777}
3778
85f2d6b4
SJ
3779static int cifs_release_page(struct page *page, gfp_t gfp)
3780{
3781 if (PagePrivate(page))
3782 return 0;
3783
3784 return cifs_fscache_release_page(page, gfp);
3785}
3786
d47992f8
LC
3787static void cifs_invalidate_page(struct page *page, unsigned int offset,
3788 unsigned int length)
85f2d6b4
SJ
3789{
3790 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3791
d47992f8 3792 if (offset == 0 && length == PAGE_CACHE_SIZE)
85f2d6b4
SJ
3793 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3794}
3795
9ad1506b
PS
3796static int cifs_launder_page(struct page *page)
3797{
3798 int rc = 0;
3799 loff_t range_start = page_offset(page);
3800 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3801 struct writeback_control wbc = {
3802 .sync_mode = WB_SYNC_ALL,
3803 .nr_to_write = 0,
3804 .range_start = range_start,
3805 .range_end = range_end,
3806 };
3807
f96637be 3808 cifs_dbg(FYI, "Launder page: %p\n", page);
9ad1506b
PS
3809
3810 if (clear_page_dirty_for_io(page))
3811 rc = cifs_writepage_locked(page, &wbc);
3812
3813 cifs_fscache_invalidate_page(page, page->mapping->host);
3814 return rc;
3815}
3816
9b646972 3817void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3818{
3819 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3820 oplock_break);
a5e18bc3 3821 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 3822 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 3823 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
c11f1df5 3824 struct TCP_Server_Info *server = tcon->ses->server;
eb4b756b 3825 int rc = 0;
3bc303c2 3826
c11f1df5 3827 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
74316201 3828 TASK_UNINTERRUPTIBLE);
c11f1df5
SP
3829
3830 server->ops->downgrade_oplock(server, cinode,
3831 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3832
18cceb6a 3833 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
63b7d3a4 3834 cifs_has_mand_locks(cinode)) {
f96637be
JP
3835 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3836 inode);
18cceb6a 3837 cinode->oplock = 0;
63b7d3a4
PS
3838 }
3839
3bc303c2 3840 if (inode && S_ISREG(inode->i_mode)) {
18cceb6a 3841 if (CIFS_CACHE_READ(cinode))
8737c930 3842 break_lease(inode, O_RDONLY);
d54ff732 3843 else
8737c930 3844 break_lease(inode, O_WRONLY);
3bc303c2 3845 rc = filemap_fdatawrite(inode->i_mapping);
18cceb6a 3846 if (!CIFS_CACHE_READ(cinode)) {
eb4b756b
JL
3847 rc = filemap_fdatawait(inode->i_mapping);
3848 mapping_set_error(inode->i_mapping, rc);
4f73c7d3 3849 cifs_zap_mapping(inode);
3bc303c2 3850 }
f96637be 3851 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3bc303c2
JL
3852 }
3853
85160e03
PS
3854 rc = cifs_push_locks(cfile);
3855 if (rc)
f96637be 3856 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
85160e03 3857
3bc303c2
JL
3858 /*
3859 * releasing stale oplock after recent reconnect of smb session using
3860 * a now incorrect file handle is not a data integrity issue but do
3861 * not bother sending an oplock release if session to server still is
3862 * disconnected since oplock already released by the server
3863 */
cdff08e7 3864 if (!cfile->oplock_break_cancelled) {
95a3f2f3
PS
3865 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3866 cinode);
f96637be 3867 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3bc303c2 3868 }
c11f1df5 3869 cifs_done_oplock_break(cinode);
3bc303c2
JL
3870}
3871
dca69288
SF
3872/*
3873 * The presence of cifs_direct_io() in the address space ops vector
3874 * allowes open() O_DIRECT flags which would have failed otherwise.
3875 *
3876 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3877 * so this method should never be called.
3878 *
3879 * Direct IO is not yet supported in the cached mode.
3880 */
3881static ssize_t
d8d3d94b
AV
3882cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter,
3883 loff_t pos)
dca69288
SF
3884{
3885 /*
3886 * FIXME
3887 * Eventually need to support direct IO for non forcedirectio mounts
3888 */
3889 return -EINVAL;
3890}
3891
3892
f5e54d6e 3893const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3894 .readpage = cifs_readpage,
3895 .readpages = cifs_readpages,
3896 .writepage = cifs_writepage,
37c0eb46 3897 .writepages = cifs_writepages,
d9414774
NP
3898 .write_begin = cifs_write_begin,
3899 .write_end = cifs_write_end,
1da177e4 3900 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4 3901 .releasepage = cifs_release_page,
dca69288 3902 .direct_IO = cifs_direct_io,
85f2d6b4 3903 .invalidatepage = cifs_invalidate_page,
9ad1506b 3904 .launder_page = cifs_launder_page,
1da177e4 3905};
273d81d6
DK
3906
3907/*
3908 * cifs_readpages requires the server to support a buffer large enough to
3909 * contain the header plus one complete page of data. Otherwise, we need
3910 * to leave cifs_readpages out of the address space operations.
3911 */
f5e54d6e 3912const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3913 .readpage = cifs_readpage,
3914 .writepage = cifs_writepage,
3915 .writepages = cifs_writepages,
d9414774
NP
3916 .write_begin = cifs_write_begin,
3917 .write_end = cifs_write_end,
273d81d6 3918 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3919 .releasepage = cifs_release_page,
3920 .invalidatepage = cifs_invalidate_page,
9ad1506b 3921 .launder_page = cifs_launder_page,
273d81d6 3922};