]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/cifs/file.c
CIFS: Handle SMB2 lock flags
[mirror_ubuntu-artful-kernel.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
174{
175 int rc;
fb1214e4 176 int desired_access;
eeb910a6 177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6
PS
179 FILE_ALL_INFO *buf;
180
fb1214e4
PS
181 if (!tcon->ses->server->ops->open)
182 return -ENOSYS;
183
184 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
185
186/*********************************************************************
187 * open flag mapping table:
188 *
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
196 *
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
202 *?
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
209
210 disposition = cifs_get_disposition(f_flags);
211
212 /* BB pass O_SYNC flag through on file attributes .. BB */
213
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
3d3ea8e6
SP
218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
220
fb1214e4
PS
221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
eeb910a6
PS
224
225 if (rc)
226 goto out;
227
228 if (tcon->unix_ext)
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
230 xid);
231 else
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
fb1214e4 233 xid, &fid->netfid);
eeb910a6
PS
234
235out:
236 kfree(buf);
237 return rc;
238}
239
15ecb436 240struct cifsFileInfo *
fb1214e4 241cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
242 struct tcon_link *tlink, __u32 oplock)
243{
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
4b4de76e
PS
246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
f45d3416 248 struct cifs_fid_locks *fdlocks;
4b4de76e
PS
249
250 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
251 if (cfile == NULL)
252 return cfile;
253
f45d3416
PS
254 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
255 if (!fdlocks) {
256 kfree(cfile);
257 return NULL;
258 }
259
260 INIT_LIST_HEAD(&fdlocks->locks);
261 fdlocks->cfile = cfile;
262 cfile->llist = fdlocks;
263 mutex_lock(&cinode->lock_mutex);
264 list_add(&fdlocks->llist, &cinode->llist);
265 mutex_unlock(&cinode->lock_mutex);
266
4b4de76e 267 cfile->count = 1;
4b4de76e
PS
268 cfile->pid = current->tgid;
269 cfile->uid = current_fsuid();
270 cfile->dentry = dget(dentry);
271 cfile->f_flags = file->f_flags;
272 cfile->invalidHandle = false;
273 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 274 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
f45d3416 275 mutex_init(&cfile->fh_mutex);
fb1214e4 276 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
15ecb436 277
4477288a 278 spin_lock(&cifs_file_list_lock);
4b4de76e 279 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
15ecb436
JL
280 /* if readable file instance put first in list*/
281 if (file->f_mode & FMODE_READ)
4b4de76e 282 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 283 else
4b4de76e 284 list_add_tail(&cfile->flist, &cinode->openFileList);
4477288a 285 spin_unlock(&cifs_file_list_lock);
15ecb436 286
4b4de76e
PS
287 file->private_data = cfile;
288 return cfile;
15ecb436
JL
289}
290
85160e03
PS
291static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
292
764a1b1a
JL
293struct cifsFileInfo *
294cifsFileInfo_get(struct cifsFileInfo *cifs_file)
295{
296 spin_lock(&cifs_file_list_lock);
297 cifsFileInfo_get_locked(cifs_file);
298 spin_unlock(&cifs_file_list_lock);
299 return cifs_file;
300}
301
cdff08e7
SF
302/*
303 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
304 * the filehandle out on the server. Must be called without holding
305 * cifs_file_list_lock.
cdff08e7 306 */
b33879aa
JL
307void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
308{
e66673e3 309 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 310 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 311 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 312 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
313 struct cifsLockInfo *li, *tmp;
314
315 spin_lock(&cifs_file_list_lock);
5f6dbc9e 316 if (--cifs_file->count > 0) {
cdff08e7
SF
317 spin_unlock(&cifs_file_list_lock);
318 return;
319 }
320
321 /* remove it from the lists */
322 list_del(&cifs_file->flist);
323 list_del(&cifs_file->tlist);
324
325 if (list_empty(&cifsi->openFileList)) {
326 cFYI(1, "closing last open instance for inode %p",
327 cifs_file->dentry->d_inode);
25364138
PS
328 /*
329 * In strict cache mode we need invalidate mapping on the last
330 * close because it may cause a error when we open this file
331 * again and get at least level II oplock.
332 */
4f8ba8a0
PS
333 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
334 CIFS_I(inode)->invalid_mapping = true;
c6723628 335 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
336 }
337 spin_unlock(&cifs_file_list_lock);
338
ad635942
JL
339 cancel_work_sync(&cifs_file->oplock_break);
340
cdff08e7 341 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 342 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 343 unsigned int xid;
0ff78a22
PS
344 int rc = -ENOSYS;
345
6d5786a3 346 xid = get_xid();
0ff78a22
PS
347 if (server->ops->close)
348 rc = server->ops->close(xid, tcon, &cifs_file->fid);
6d5786a3 349 free_xid(xid);
cdff08e7
SF
350 }
351
f45d3416
PS
352 /*
353 * Delete any outstanding lock records. We'll lose them when the file
cdff08e7
SF
354 * is closed anyway.
355 */
d59dad2b 356 mutex_lock(&cifsi->lock_mutex);
f45d3416 357 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
cdff08e7 358 list_del(&li->llist);
85160e03 359 cifs_del_lock_waiters(li);
cdff08e7 360 kfree(li);
b33879aa 361 }
f45d3416
PS
362 list_del(&cifs_file->llist->llist);
363 kfree(cifs_file->llist);
d59dad2b 364 mutex_unlock(&cifsi->lock_mutex);
cdff08e7
SF
365
366 cifs_put_tlink(cifs_file->tlink);
367 dput(cifs_file->dentry);
368 kfree(cifs_file);
b33879aa
JL
369}
370
1da177e4
LT
371int cifs_open(struct inode *inode, struct file *file)
372{
373 int rc = -EACCES;
6d5786a3 374 unsigned int xid;
590a3fe0 375 __u32 oplock;
1da177e4 376 struct cifs_sb_info *cifs_sb;
96daf2b0 377 struct cifs_tcon *tcon;
7ffec372 378 struct tcon_link *tlink;
fb1214e4 379 struct cifsFileInfo *cfile = NULL;
1da177e4 380 char *full_path = NULL;
7e12eddb 381 bool posix_open_ok = false;
fb1214e4 382 struct cifs_fid fid;
1da177e4 383
6d5786a3 384 xid = get_xid();
1da177e4
LT
385
386 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
387 tlink = cifs_sb_tlink(cifs_sb);
388 if (IS_ERR(tlink)) {
6d5786a3 389 free_xid(xid);
7ffec372
JL
390 return PTR_ERR(tlink);
391 }
392 tcon = tlink_tcon(tlink);
1da177e4 393
e6a00296 394 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 395 if (full_path == NULL) {
0f3bc09e 396 rc = -ENOMEM;
232341ba 397 goto out;
1da177e4
LT
398 }
399
b6b38f70
JP
400 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
401 inode, file->f_flags, full_path);
276a74a4 402
10b9b98e 403 if (tcon->ses->server->oplocks)
276a74a4
SF
404 oplock = REQ_OPLOCK;
405 else
406 oplock = 0;
407
64cc2c63 408 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
409 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
410 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 411 /* can not refresh inode info since size could be stale */
2422f676 412 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 413 cifs_sb->mnt_file_mode /* ignored */,
fb1214e4 414 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 415 if (rc == 0) {
b6b38f70 416 cFYI(1, "posix open succeeded");
7e12eddb 417 posix_open_ok = true;
64cc2c63
SF
418 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
419 if (tcon->ses->serverNOS)
b6b38f70 420 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
421 " unexpected error on SMB posix open"
422 ", disabling posix open support."
423 " Check if server update available.",
424 tcon->ses->serverName,
b6b38f70 425 tcon->ses->serverNOS);
64cc2c63 426 tcon->broken_posix_open = true;
276a74a4
SF
427 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
428 (rc != -EOPNOTSUPP)) /* path not found or net err */
429 goto out;
fb1214e4
PS
430 /*
431 * Else fallthrough to retry open the old way on network i/o
432 * or DFS errors.
433 */
276a74a4
SF
434 }
435
7e12eddb
PS
436 if (!posix_open_ok) {
437 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 438 file->f_flags, &oplock, &fid, xid);
7e12eddb
PS
439 if (rc)
440 goto out;
441 }
47c78b7f 442
fb1214e4
PS
443 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
444 if (cfile == NULL) {
0ff78a22
PS
445 if (tcon->ses->server->ops->close)
446 tcon->ses->server->ops->close(xid, tcon, &fid);
1da177e4
LT
447 rc = -ENOMEM;
448 goto out;
449 }
1da177e4 450
9451a9a5
SJ
451 cifs_fscache_set_inode_cookie(inode, file);
452
7e12eddb 453 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
454 /*
455 * Time to set mode which we can not set earlier due to
456 * problems creating new read-only files.
457 */
7e12eddb
PS
458 struct cifs_unix_set_info_args args = {
459 .mode = inode->i_mode,
460 .uid = NO_CHANGE_64,
461 .gid = NO_CHANGE_64,
462 .ctime = NO_CHANGE_64,
463 .atime = NO_CHANGE_64,
464 .mtime = NO_CHANGE_64,
465 .device = 0,
466 };
fb1214e4
PS
467 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
468 cfile->pid);
1da177e4
LT
469 }
470
471out:
1da177e4 472 kfree(full_path);
6d5786a3 473 free_xid(xid);
7ffec372 474 cifs_put_tlink(tlink);
1da177e4
LT
475 return rc;
476}
477
2ae78ba8
PS
478/*
479 * Try to reacquire byte range locks that were released when session
480 * to server was lost
481 */
1da177e4
LT
482static int cifs_relock_file(struct cifsFileInfo *cifsFile)
483{
484 int rc = 0;
485
2ae78ba8 486 /* BB list all locks open on this file and relock */
1da177e4
LT
487
488 return rc;
489}
490
2ae78ba8
PS
491static int
492cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
493{
494 int rc = -EACCES;
6d5786a3 495 unsigned int xid;
590a3fe0 496 __u32 oplock;
1da177e4 497 struct cifs_sb_info *cifs_sb;
96daf2b0 498 struct cifs_tcon *tcon;
2ae78ba8
PS
499 struct TCP_Server_Info *server;
500 struct cifsInodeInfo *cinode;
fb8c4b14 501 struct inode *inode;
1da177e4 502 char *full_path = NULL;
2ae78ba8 503 int desired_access;
1da177e4 504 int disposition = FILE_OPEN;
3d3ea8e6 505 int create_options = CREATE_NOT_DIR;
2ae78ba8 506 struct cifs_fid fid;
1da177e4 507
6d5786a3 508 xid = get_xid();
2ae78ba8
PS
509 mutex_lock(&cfile->fh_mutex);
510 if (!cfile->invalidHandle) {
511 mutex_unlock(&cfile->fh_mutex);
0f3bc09e 512 rc = 0;
6d5786a3 513 free_xid(xid);
0f3bc09e 514 return rc;
1da177e4
LT
515 }
516
2ae78ba8 517 inode = cfile->dentry->d_inode;
1da177e4 518 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
519 tcon = tlink_tcon(cfile->tlink);
520 server = tcon->ses->server;
521
522 /*
523 * Can not grab rename sem here because various ops, including those
524 * that already have the rename sem can end up causing writepage to get
525 * called and if the server was down that means we end up here, and we
526 * can never tell if the caller already has the rename_sem.
527 */
528 full_path = build_path_from_dentry(cfile->dentry);
1da177e4 529 if (full_path == NULL) {
3a9f462f 530 rc = -ENOMEM;
2ae78ba8 531 mutex_unlock(&cfile->fh_mutex);
6d5786a3 532 free_xid(xid);
3a9f462f 533 return rc;
1da177e4
LT
534 }
535
2ae78ba8
PS
536 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
537 full_path);
1da177e4 538
10b9b98e 539 if (tcon->ses->server->oplocks)
1da177e4
LT
540 oplock = REQ_OPLOCK;
541 else
4b18f2a9 542 oplock = 0;
1da177e4 543
29e20f9c 544 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 545 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 546 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
547 /*
548 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
549 * original open. Must mask them off for a reopen.
550 */
2ae78ba8 551 unsigned int oflags = cfile->f_flags &
15886177 552 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 553
2422f676 554 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
2ae78ba8
PS
555 cifs_sb->mnt_file_mode /* ignored */,
556 oflags, &oplock, &fid.netfid, xid);
7fc8f4e9 557 if (rc == 0) {
b6b38f70 558 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
559 goto reopen_success;
560 }
2ae78ba8
PS
561 /*
562 * fallthrough to retry open the old way on errors, especially
563 * in the reconnect path it is important to retry hard
564 */
7fc8f4e9
SF
565 }
566
2ae78ba8 567 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 568
3d3ea8e6
SP
569 if (backup_cred(cifs_sb))
570 create_options |= CREATE_OPEN_BACKUP_INTENT;
571
2ae78ba8
PS
572 /*
573 * Can not refresh inode by passing in file_info buf to be returned by
574 * CIFSSMBOpen and then calling get_inode_info with returned buf since
575 * file might have write behind data that needs to be flushed and server
576 * version of file size can be stale. If we knew for sure that inode was
577 * not dirty locally we could do this.
578 */
579 rc = server->ops->open(xid, tcon, full_path, disposition,
580 desired_access, create_options, &fid, &oplock,
581 NULL, cifs_sb);
1da177e4 582 if (rc) {
2ae78ba8
PS
583 mutex_unlock(&cfile->fh_mutex);
584 cFYI(1, "cifs_reopen returned 0x%x", rc);
b6b38f70 585 cFYI(1, "oplock: %d", oplock);
15886177
JL
586 goto reopen_error_exit;
587 }
588
7fc8f4e9 589reopen_success:
2ae78ba8
PS
590 cfile->invalidHandle = false;
591 mutex_unlock(&cfile->fh_mutex);
592 cinode = CIFS_I(inode);
15886177
JL
593
594 if (can_flush) {
595 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 596 mapping_set_error(inode->i_mapping, rc);
15886177 597
15886177 598 if (tcon->unix_ext)
2ae78ba8
PS
599 rc = cifs_get_inode_info_unix(&inode, full_path,
600 inode->i_sb, xid);
15886177 601 else
2ae78ba8
PS
602 rc = cifs_get_inode_info(&inode, full_path, NULL,
603 inode->i_sb, xid, NULL);
604 }
605 /*
606 * Else we are writing out data to server already and could deadlock if
607 * we tried to flush data, and since we do not know if we have data that
608 * would invalidate the current end of file on the server we can not go
609 * to the server to get the new inode info.
610 */
611
612 server->ops->set_fid(cfile, &fid, oplock);
613 cifs_relock_file(cfile);
15886177
JL
614
615reopen_error_exit:
1da177e4 616 kfree(full_path);
6d5786a3 617 free_xid(xid);
1da177e4
LT
618 return rc;
619}
620
621int cifs_close(struct inode *inode, struct file *file)
622{
77970693
JL
623 if (file->private_data != NULL) {
624 cifsFileInfo_put(file->private_data);
625 file->private_data = NULL;
626 }
7ee1af76 627
cdff08e7
SF
628 /* return code from the ->release op is always ignored */
629 return 0;
1da177e4
LT
630}
631
632int cifs_closedir(struct inode *inode, struct file *file)
633{
634 int rc = 0;
6d5786a3 635 unsigned int xid;
4b4de76e 636 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
637 struct cifs_tcon *tcon;
638 struct TCP_Server_Info *server;
639 char *buf;
1da177e4 640
b6b38f70 641 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4 642
92fc65a7
PS
643 if (cfile == NULL)
644 return rc;
645
6d5786a3 646 xid = get_xid();
92fc65a7
PS
647 tcon = tlink_tcon(cfile->tlink);
648 server = tcon->ses->server;
1da177e4 649
92fc65a7
PS
650 cFYI(1, "Freeing private data in close dir");
651 spin_lock(&cifs_file_list_lock);
652 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
653 cfile->invalidHandle = true;
654 spin_unlock(&cifs_file_list_lock);
655 if (server->ops->close_dir)
656 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
657 else
658 rc = -ENOSYS;
659 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
660 /* not much we can do if it fails anyway, ignore rc */
661 rc = 0;
662 } else
663 spin_unlock(&cifs_file_list_lock);
664
665 buf = cfile->srch_inf.ntwrk_buf_start;
666 if (buf) {
667 cFYI(1, "closedir free smb buf in srch struct");
668 cfile->srch_inf.ntwrk_buf_start = NULL;
669 if (cfile->srch_inf.smallBuf)
670 cifs_small_buf_release(buf);
671 else
672 cifs_buf_release(buf);
1da177e4 673 }
92fc65a7
PS
674
675 cifs_put_tlink(cfile->tlink);
676 kfree(file->private_data);
677 file->private_data = NULL;
1da177e4 678 /* BB can we lock the filestruct while this is going on? */
6d5786a3 679 free_xid(xid);
1da177e4
LT
680 return rc;
681}
682
85160e03 683static struct cifsLockInfo *
fbd35aca 684cifs_lock_init(__u64 offset, __u64 length, __u8 type)
7ee1af76 685{
a88b4707 686 struct cifsLockInfo *lock =
fb8c4b14 687 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
688 if (!lock)
689 return lock;
690 lock->offset = offset;
691 lock->length = length;
692 lock->type = type;
a88b4707
PS
693 lock->pid = current->tgid;
694 INIT_LIST_HEAD(&lock->blist);
695 init_waitqueue_head(&lock->block_q);
696 return lock;
85160e03
PS
697}
698
699static void
700cifs_del_lock_waiters(struct cifsLockInfo *lock)
701{
702 struct cifsLockInfo *li, *tmp;
703 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
704 list_del_init(&li->blist);
705 wake_up(&li->block_q);
706 }
707}
708
709static bool
f45d3416
PS
710cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
711 __u64 length, __u8 type, struct cifsFileInfo *cfile,
fbd35aca 712 struct cifsLockInfo **conf_lock)
85160e03 713{
fbd35aca 714 struct cifsLockInfo *li;
f45d3416 715 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 716 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 717
f45d3416 718 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
719 if (offset + length <= li->offset ||
720 offset >= li->offset + li->length)
721 continue;
f45d3416
PS
722 if ((type & server->vals->shared_lock_type) &&
723 ((server->ops->compare_fids(cfile, cur_cfile) &&
724 current->tgid == li->pid) || type == li->type))
85160e03 725 continue;
f45d3416
PS
726 *conf_lock = li;
727 return true;
85160e03
PS
728 }
729 return false;
730}
731
161ebf9f 732static bool
55157dfb
PS
733cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
734 __u8 type, struct cifsLockInfo **conf_lock)
161ebf9f 735{
fbd35aca 736 bool rc = false;
f45d3416 737 struct cifs_fid_locks *cur;
55157dfb 738 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
fbd35aca 739
f45d3416
PS
740 list_for_each_entry(cur, &cinode->llist, llist) {
741 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
55157dfb 742 cfile, conf_lock);
fbd35aca
PS
743 if (rc)
744 break;
745 }
fbd35aca
PS
746
747 return rc;
161ebf9f
PS
748}
749
9a5101c8
PS
750/*
751 * Check if there is another lock that prevents us to set the lock (mandatory
752 * style). If such a lock exists, update the flock structure with its
753 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
754 * or leave it the same if we can't. Returns 0 if we don't need to request to
755 * the server or 1 otherwise.
756 */
85160e03 757static int
fbd35aca
PS
758cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
759 __u8 type, struct file_lock *flock)
85160e03
PS
760{
761 int rc = 0;
762 struct cifsLockInfo *conf_lock;
fbd35aca 763 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
106dc538 764 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
765 bool exist;
766
767 mutex_lock(&cinode->lock_mutex);
768
55157dfb
PS
769 exist = cifs_find_lock_conflict(cfile, offset, length, type,
770 &conf_lock);
85160e03
PS
771 if (exist) {
772 flock->fl_start = conf_lock->offset;
773 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
774 flock->fl_pid = conf_lock->pid;
106dc538 775 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
776 flock->fl_type = F_RDLCK;
777 else
778 flock->fl_type = F_WRLCK;
779 } else if (!cinode->can_cache_brlcks)
780 rc = 1;
781 else
782 flock->fl_type = F_UNLCK;
783
784 mutex_unlock(&cinode->lock_mutex);
785 return rc;
786}
787
161ebf9f 788static void
fbd35aca 789cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 790{
fbd35aca 791 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
d59dad2b 792 mutex_lock(&cinode->lock_mutex);
f45d3416 793 list_add_tail(&lock->llist, &cfile->llist->locks);
d59dad2b 794 mutex_unlock(&cinode->lock_mutex);
7ee1af76
JA
795}
796
9a5101c8
PS
797/*
798 * Set the byte-range lock (mandatory style). Returns:
799 * 1) 0, if we set the lock and don't need to request to the server;
800 * 2) 1, if no locks prevent us but we need to request to the server;
801 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
802 */
85160e03 803static int
fbd35aca 804cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 805 bool wait)
85160e03 806{
161ebf9f 807 struct cifsLockInfo *conf_lock;
fbd35aca 808 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
85160e03
PS
809 bool exist;
810 int rc = 0;
811
85160e03
PS
812try_again:
813 exist = false;
814 mutex_lock(&cinode->lock_mutex);
815
55157dfb
PS
816 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
817 lock->type, &conf_lock);
85160e03 818 if (!exist && cinode->can_cache_brlcks) {
f45d3416 819 list_add_tail(&lock->llist, &cfile->llist->locks);
85160e03
PS
820 mutex_unlock(&cinode->lock_mutex);
821 return rc;
822 }
823
824 if (!exist)
825 rc = 1;
826 else if (!wait)
827 rc = -EACCES;
828 else {
829 list_add_tail(&lock->blist, &conf_lock->blist);
830 mutex_unlock(&cinode->lock_mutex);
831 rc = wait_event_interruptible(lock->block_q,
832 (lock->blist.prev == &lock->blist) &&
833 (lock->blist.next == &lock->blist));
834 if (!rc)
835 goto try_again;
a88b4707
PS
836 mutex_lock(&cinode->lock_mutex);
837 list_del_init(&lock->blist);
85160e03
PS
838 }
839
85160e03
PS
840 mutex_unlock(&cinode->lock_mutex);
841 return rc;
842}
843
9a5101c8
PS
844/*
845 * Check if there is another lock that prevents us to set the lock (posix
846 * style). If such a lock exists, update the flock structure with its
847 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
848 * or leave it the same if we can't. Returns 0 if we don't need to request to
849 * the server or 1 otherwise.
850 */
85160e03 851static int
4f6bcec9
PS
852cifs_posix_lock_test(struct file *file, struct file_lock *flock)
853{
854 int rc = 0;
855 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
856 unsigned char saved_type = flock->fl_type;
857
50792760
PS
858 if ((flock->fl_flags & FL_POSIX) == 0)
859 return 1;
860
4f6bcec9
PS
861 mutex_lock(&cinode->lock_mutex);
862 posix_test_lock(file, flock);
863
864 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
865 flock->fl_type = saved_type;
866 rc = 1;
867 }
868
869 mutex_unlock(&cinode->lock_mutex);
870 return rc;
871}
872
9a5101c8
PS
873/*
874 * Set the byte-range lock (posix style). Returns:
875 * 1) 0, if we set the lock and don't need to request to the server;
876 * 2) 1, if we need to request to the server;
877 * 3) <0, if the error occurs while setting the lock.
878 */
4f6bcec9
PS
879static int
880cifs_posix_lock_set(struct file *file, struct file_lock *flock)
881{
882 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
50792760
PS
883 int rc = 1;
884
885 if ((flock->fl_flags & FL_POSIX) == 0)
886 return rc;
4f6bcec9 887
66189be7 888try_again:
4f6bcec9
PS
889 mutex_lock(&cinode->lock_mutex);
890 if (!cinode->can_cache_brlcks) {
891 mutex_unlock(&cinode->lock_mutex);
50792760 892 return rc;
4f6bcec9 893 }
66189be7
PS
894
895 rc = posix_lock_file(file, flock, NULL);
9ebb389d 896 mutex_unlock(&cinode->lock_mutex);
66189be7
PS
897 if (rc == FILE_LOCK_DEFERRED) {
898 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
899 if (!rc)
900 goto try_again;
901 locks_delete_block(flock);
902 }
9ebb389d 903 return rc;
4f6bcec9
PS
904}
905
d39a4f71 906int
4f6bcec9 907cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 908{
6d5786a3
PS
909 unsigned int xid;
910 int rc = 0, stored_rc;
85160e03
PS
911 struct cifsLockInfo *li, *tmp;
912 struct cifs_tcon *tcon;
913 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
0013fb4c 914 unsigned int num, max_num, max_buf;
32b9aaf1
PS
915 LOCKING_ANDX_RANGE *buf, *cur;
916 int types[] = {LOCKING_ANDX_LARGE_FILES,
917 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
918 int i;
85160e03 919
6d5786a3 920 xid = get_xid();
85160e03
PS
921 tcon = tlink_tcon(cfile->tlink);
922
923 mutex_lock(&cinode->lock_mutex);
924 if (!cinode->can_cache_brlcks) {
925 mutex_unlock(&cinode->lock_mutex);
6d5786a3 926 free_xid(xid);
85160e03
PS
927 return rc;
928 }
929
0013fb4c
PS
930 /*
931 * Accessing maxBuf is racy with cifs_reconnect - need to store value
932 * and check it for zero before using.
933 */
934 max_buf = tcon->ses->server->maxBuf;
935 if (!max_buf) {
936 mutex_unlock(&cinode->lock_mutex);
6d5786a3 937 free_xid(xid);
0013fb4c
PS
938 return -EINVAL;
939 }
940
941 max_num = (max_buf - sizeof(struct smb_hdr)) /
942 sizeof(LOCKING_ANDX_RANGE);
32b9aaf1
PS
943 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
944 if (!buf) {
945 mutex_unlock(&cinode->lock_mutex);
6d5786a3 946 free_xid(xid);
e2f2886a 947 return -ENOMEM;
32b9aaf1
PS
948 }
949
950 for (i = 0; i < 2; i++) {
951 cur = buf;
952 num = 0;
f45d3416 953 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
954 if (li->type != types[i])
955 continue;
956 cur->Pid = cpu_to_le16(li->pid);
957 cur->LengthLow = cpu_to_le32((u32)li->length);
958 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
959 cur->OffsetLow = cpu_to_le32((u32)li->offset);
960 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
961 if (++num == max_num) {
4b4de76e
PS
962 stored_rc = cifs_lockv(xid, tcon,
963 cfile->fid.netfid,
04a6aa8a
PS
964 (__u8)li->type, 0, num,
965 buf);
32b9aaf1
PS
966 if (stored_rc)
967 rc = stored_rc;
968 cur = buf;
969 num = 0;
970 } else
971 cur++;
972 }
973
974 if (num) {
4b4de76e 975 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 976 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
977 if (stored_rc)
978 rc = stored_rc;
979 }
85160e03
PS
980 }
981
982 cinode->can_cache_brlcks = false;
983 mutex_unlock(&cinode->lock_mutex);
984
32b9aaf1 985 kfree(buf);
6d5786a3 986 free_xid(xid);
85160e03
PS
987 return rc;
988}
989
4f6bcec9
PS
990/* copied from fs/locks.c with a name change */
991#define cifs_for_each_lock(inode, lockp) \
992 for (lockp = &inode->i_flock; *lockp != NULL; \
993 lockp = &(*lockp)->fl_next)
994
d5751469
PS
995struct lock_to_push {
996 struct list_head llist;
997 __u64 offset;
998 __u64 length;
999 __u32 pid;
1000 __u16 netfid;
1001 __u8 type;
1002};
1003
4f6bcec9
PS
1004static int
1005cifs_push_posix_locks(struct cifsFileInfo *cfile)
1006{
1007 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1008 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1009 struct file_lock *flock, **before;
d5751469 1010 unsigned int count = 0, i = 0;
4f6bcec9 1011 int rc = 0, xid, type;
d5751469
PS
1012 struct list_head locks_to_send, *el;
1013 struct lock_to_push *lck, *tmp;
4f6bcec9 1014 __u64 length;
4f6bcec9 1015
6d5786a3 1016 xid = get_xid();
4f6bcec9
PS
1017
1018 mutex_lock(&cinode->lock_mutex);
1019 if (!cinode->can_cache_brlcks) {
1020 mutex_unlock(&cinode->lock_mutex);
6d5786a3 1021 free_xid(xid);
4f6bcec9
PS
1022 return rc;
1023 }
1024
d5751469
PS
1025 lock_flocks();
1026 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1027 if ((*before)->fl_flags & FL_POSIX)
1028 count++;
1029 }
1030 unlock_flocks();
1031
4f6bcec9
PS
1032 INIT_LIST_HEAD(&locks_to_send);
1033
d5751469 1034 /*
ce85852b
PS
1035 * Allocating count locks is enough because no FL_POSIX locks can be
1036 * added to the list while we are holding cinode->lock_mutex that
1037 * protects locking operations of this inode.
d5751469
PS
1038 */
1039 for (; i < count; i++) {
1040 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1041 if (!lck) {
1042 rc = -ENOMEM;
1043 goto err_out;
1044 }
1045 list_add_tail(&lck->llist, &locks_to_send);
1046 }
1047
d5751469 1048 el = locks_to_send.next;
4f6bcec9
PS
1049 lock_flocks();
1050 cifs_for_each_lock(cfile->dentry->d_inode, before) {
ce85852b
PS
1051 flock = *before;
1052 if ((flock->fl_flags & FL_POSIX) == 0)
1053 continue;
d5751469 1054 if (el == &locks_to_send) {
ce85852b
PS
1055 /*
1056 * The list ended. We don't have enough allocated
1057 * structures - something is really wrong.
1058 */
d5751469
PS
1059 cERROR(1, "Can't push all brlocks!");
1060 break;
1061 }
4f6bcec9
PS
1062 length = 1 + flock->fl_end - flock->fl_start;
1063 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1064 type = CIFS_RDLCK;
1065 else
1066 type = CIFS_WRLCK;
d5751469 1067 lck = list_entry(el, struct lock_to_push, llist);
4f6bcec9 1068 lck->pid = flock->fl_pid;
4b4de76e 1069 lck->netfid = cfile->fid.netfid;
d5751469
PS
1070 lck->length = length;
1071 lck->type = type;
1072 lck->offset = flock->fl_start;
d5751469 1073 el = el->next;
4f6bcec9 1074 }
4f6bcec9
PS
1075 unlock_flocks();
1076
1077 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1078 int stored_rc;
1079
4f6bcec9 1080 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1081 lck->offset, lck->length, NULL,
4f6bcec9
PS
1082 lck->type, 0);
1083 if (stored_rc)
1084 rc = stored_rc;
1085 list_del(&lck->llist);
1086 kfree(lck);
1087 }
1088
d5751469 1089out:
4f6bcec9
PS
1090 cinode->can_cache_brlcks = false;
1091 mutex_unlock(&cinode->lock_mutex);
1092
6d5786a3 1093 free_xid(xid);
4f6bcec9 1094 return rc;
d5751469
PS
1095err_out:
1096 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1097 list_del(&lck->llist);
1098 kfree(lck);
1099 }
1100 goto out;
4f6bcec9
PS
1101}
1102
1103static int
1104cifs_push_locks(struct cifsFileInfo *cfile)
1105{
1106 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1107 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1108
29e20f9c 1109 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1110 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1111 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1112 return cifs_push_posix_locks(cfile);
1113
d39a4f71 1114 return tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9
PS
1115}
1116
03776f45 1117static void
04a6aa8a 1118cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1119 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1120{
03776f45 1121 if (flock->fl_flags & FL_POSIX)
b6b38f70 1122 cFYI(1, "Posix");
03776f45 1123 if (flock->fl_flags & FL_FLOCK)
b6b38f70 1124 cFYI(1, "Flock");
03776f45 1125 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 1126 cFYI(1, "Blocking lock");
03776f45 1127 *wait_flag = true;
1da177e4 1128 }
03776f45 1129 if (flock->fl_flags & FL_ACCESS)
b6b38f70 1130 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
1131 "not implemented yet");
1132 if (flock->fl_flags & FL_LEASE)
b6b38f70 1133 cFYI(1, "Lease on file - not implemented yet");
03776f45 1134 if (flock->fl_flags &
1da177e4 1135 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 1136 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 1137
106dc538 1138 *type = server->vals->large_lock_type;
03776f45 1139 if (flock->fl_type == F_WRLCK) {
b6b38f70 1140 cFYI(1, "F_WRLCK ");
106dc538 1141 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1142 *lock = 1;
1143 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 1144 cFYI(1, "F_UNLCK");
106dc538 1145 *type |= server->vals->unlock_lock_type;
03776f45
PS
1146 *unlock = 1;
1147 /* Check if unlock includes more than one lock range */
1148 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 1149 cFYI(1, "F_RDLCK");
106dc538 1150 *type |= server->vals->shared_lock_type;
03776f45
PS
1151 *lock = 1;
1152 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 1153 cFYI(1, "F_EXLCK");
106dc538 1154 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1155 *lock = 1;
1156 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 1157 cFYI(1, "F_SHLCK");
106dc538 1158 *type |= server->vals->shared_lock_type;
03776f45 1159 *lock = 1;
1da177e4 1160 } else
b6b38f70 1161 cFYI(1, "Unknown type of lock");
03776f45 1162}
1da177e4 1163
03776f45 1164static int
04a6aa8a 1165cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1166 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1167{
1168 int rc = 0;
1169 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1170 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1171 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1172 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1173 __u16 netfid = cfile->fid.netfid;
f05337c6 1174
03776f45
PS
1175 if (posix_lck) {
1176 int posix_lock_type;
4f6bcec9
PS
1177
1178 rc = cifs_posix_lock_test(file, flock);
1179 if (!rc)
1180 return rc;
1181
106dc538 1182 if (type & server->vals->shared_lock_type)
03776f45
PS
1183 posix_lock_type = CIFS_RDLCK;
1184 else
1185 posix_lock_type = CIFS_WRLCK;
4f6bcec9 1186 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
c5fd363d 1187 flock->fl_start, length, flock,
4f6bcec9 1188 posix_lock_type, wait_flag);
03776f45
PS
1189 return rc;
1190 }
1da177e4 1191
fbd35aca 1192 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1193 if (!rc)
1194 return rc;
1195
03776f45 1196 /* BB we could chain these into one lock request BB */
d39a4f71
PS
1197 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1198 1, 0, false);
03776f45 1199 if (rc == 0) {
d39a4f71
PS
1200 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1201 type, 0, 1, false);
03776f45
PS
1202 flock->fl_type = F_UNLCK;
1203 if (rc != 0)
1204 cERROR(1, "Error unlocking previously locked "
106dc538 1205 "range %d during test of lock", rc);
a88b4707 1206 return 0;
1da177e4 1207 }
7ee1af76 1208
106dc538 1209 if (type & server->vals->shared_lock_type) {
03776f45 1210 flock->fl_type = F_WRLCK;
a88b4707 1211 return 0;
7ee1af76
JA
1212 }
1213
d39a4f71
PS
1214 type &= ~server->vals->exclusive_lock_type;
1215
1216 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1217 type | server->vals->shared_lock_type,
1218 1, 0, false);
03776f45 1219 if (rc == 0) {
d39a4f71
PS
1220 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1221 type | server->vals->shared_lock_type, 0, 1, false);
03776f45
PS
1222 flock->fl_type = F_RDLCK;
1223 if (rc != 0)
1224 cERROR(1, "Error unlocking previously locked "
1225 "range %d during test of lock", rc);
1226 } else
1227 flock->fl_type = F_WRLCK;
1228
a88b4707 1229 return 0;
03776f45
PS
1230}
1231
9ee305b7
PS
1232static void
1233cifs_move_llist(struct list_head *source, struct list_head *dest)
1234{
1235 struct list_head *li, *tmp;
1236 list_for_each_safe(li, tmp, source)
1237 list_move(li, dest);
1238}
1239
1240static void
1241cifs_free_llist(struct list_head *llist)
1242{
1243 struct cifsLockInfo *li, *tmp;
1244 list_for_each_entry_safe(li, tmp, llist, llist) {
1245 cifs_del_lock_waiters(li);
1246 list_del(&li->llist);
1247 kfree(li);
1248 }
1249}
1250
d39a4f71 1251int
6d5786a3
PS
1252cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1253 unsigned int xid)
9ee305b7
PS
1254{
1255 int rc = 0, stored_rc;
1256 int types[] = {LOCKING_ANDX_LARGE_FILES,
1257 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1258 unsigned int i;
0013fb4c 1259 unsigned int max_num, num, max_buf;
9ee305b7
PS
1260 LOCKING_ANDX_RANGE *buf, *cur;
1261 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1262 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1263 struct cifsLockInfo *li, *tmp;
1264 __u64 length = 1 + flock->fl_end - flock->fl_start;
1265 struct list_head tmp_llist;
1266
1267 INIT_LIST_HEAD(&tmp_llist);
1268
0013fb4c
PS
1269 /*
1270 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1271 * and check it for zero before using.
1272 */
1273 max_buf = tcon->ses->server->maxBuf;
1274 if (!max_buf)
1275 return -EINVAL;
1276
1277 max_num = (max_buf - sizeof(struct smb_hdr)) /
1278 sizeof(LOCKING_ANDX_RANGE);
9ee305b7
PS
1279 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1280 if (!buf)
1281 return -ENOMEM;
1282
1283 mutex_lock(&cinode->lock_mutex);
1284 for (i = 0; i < 2; i++) {
1285 cur = buf;
1286 num = 0;
f45d3416 1287 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1288 if (flock->fl_start > li->offset ||
1289 (flock->fl_start + length) <
1290 (li->offset + li->length))
1291 continue;
1292 if (current->tgid != li->pid)
1293 continue;
9ee305b7
PS
1294 if (types[i] != li->type)
1295 continue;
ea319d57 1296 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1297 /*
1298 * We can cache brlock requests - simply remove
fbd35aca 1299 * a lock from the file's list.
9ee305b7
PS
1300 */
1301 list_del(&li->llist);
1302 cifs_del_lock_waiters(li);
1303 kfree(li);
ea319d57 1304 continue;
9ee305b7 1305 }
ea319d57
PS
1306 cur->Pid = cpu_to_le16(li->pid);
1307 cur->LengthLow = cpu_to_le32((u32)li->length);
1308 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1309 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1310 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1311 /*
1312 * We need to save a lock here to let us add it again to
1313 * the file's list if the unlock range request fails on
1314 * the server.
1315 */
1316 list_move(&li->llist, &tmp_llist);
1317 if (++num == max_num) {
4b4de76e
PS
1318 stored_rc = cifs_lockv(xid, tcon,
1319 cfile->fid.netfid,
ea319d57
PS
1320 li->type, num, 0, buf);
1321 if (stored_rc) {
1322 /*
1323 * We failed on the unlock range
1324 * request - add all locks from the tmp
1325 * list to the head of the file's list.
1326 */
1327 cifs_move_llist(&tmp_llist,
f45d3416 1328 &cfile->llist->locks);
ea319d57
PS
1329 rc = stored_rc;
1330 } else
1331 /*
1332 * The unlock range request succeed -
1333 * free the tmp list.
1334 */
1335 cifs_free_llist(&tmp_llist);
1336 cur = buf;
1337 num = 0;
1338 } else
1339 cur++;
9ee305b7
PS
1340 }
1341 if (num) {
4b4de76e 1342 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1343 types[i], num, 0, buf);
1344 if (stored_rc) {
f45d3416
PS
1345 cifs_move_llist(&tmp_llist,
1346 &cfile->llist->locks);
9ee305b7
PS
1347 rc = stored_rc;
1348 } else
1349 cifs_free_llist(&tmp_llist);
1350 }
1351 }
1352
1353 mutex_unlock(&cinode->lock_mutex);
1354 kfree(buf);
1355 return rc;
1356}
1357
03776f45 1358static int
f45d3416 1359cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1360 bool wait_flag, bool posix_lck, int lock, int unlock,
1361 unsigned int xid)
03776f45
PS
1362{
1363 int rc = 0;
1364 __u64 length = 1 + flock->fl_end - flock->fl_start;
1365 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1366 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1367 struct TCP_Server_Info *server = tcon->ses->server;
03776f45
PS
1368
1369 if (posix_lck) {
08547b03 1370 int posix_lock_type;
4f6bcec9
PS
1371
1372 rc = cifs_posix_lock_set(file, flock);
1373 if (!rc || rc < 0)
1374 return rc;
1375
106dc538 1376 if (type & server->vals->shared_lock_type)
08547b03
SF
1377 posix_lock_type = CIFS_RDLCK;
1378 else
1379 posix_lock_type = CIFS_WRLCK;
50c2f753 1380
03776f45 1381 if (unlock == 1)
beb84dc8 1382 posix_lock_type = CIFS_UNLCK;
7ee1af76 1383
f45d3416
PS
1384 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1385 current->tgid, flock->fl_start, length,
1386 NULL, posix_lock_type, wait_flag);
03776f45
PS
1387 goto out;
1388 }
7ee1af76 1389
03776f45 1390 if (lock) {
161ebf9f
PS
1391 struct cifsLockInfo *lock;
1392
fbd35aca 1393 lock = cifs_lock_init(flock->fl_start, length, type);
161ebf9f
PS
1394 if (!lock)
1395 return -ENOMEM;
1396
fbd35aca 1397 rc = cifs_lock_add_if(cfile, lock, wait_flag);
85160e03 1398 if (rc < 0)
161ebf9f
PS
1399 kfree(lock);
1400 if (rc <= 0)
85160e03
PS
1401 goto out;
1402
d39a4f71
PS
1403 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1404 type, 1, 0, wait_flag);
161ebf9f
PS
1405 if (rc) {
1406 kfree(lock);
1407 goto out;
03776f45 1408 }
161ebf9f 1409
fbd35aca 1410 cifs_lock_add(cfile, lock);
9ee305b7 1411 } else if (unlock)
d39a4f71 1412 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 1413
03776f45
PS
1414out:
1415 if (flock->fl_flags & FL_POSIX)
9ebb389d 1416 posix_lock_file_wait(file, flock);
03776f45
PS
1417 return rc;
1418}
1419
1420int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1421{
1422 int rc, xid;
1423 int lock = 0, unlock = 0;
1424 bool wait_flag = false;
1425 bool posix_lck = false;
1426 struct cifs_sb_info *cifs_sb;
1427 struct cifs_tcon *tcon;
1428 struct cifsInodeInfo *cinode;
1429 struct cifsFileInfo *cfile;
1430 __u16 netfid;
04a6aa8a 1431 __u32 type;
03776f45
PS
1432
1433 rc = -EACCES;
6d5786a3 1434 xid = get_xid();
03776f45
PS
1435
1436 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1437 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1438 flock->fl_start, flock->fl_end);
1439
03776f45
PS
1440 cfile = (struct cifsFileInfo *)file->private_data;
1441 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1442
1443 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1444 tcon->ses->server);
1445
1446 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
4b4de76e 1447 netfid = cfile->fid.netfid;
03776f45
PS
1448 cinode = CIFS_I(file->f_path.dentry->d_inode);
1449
29e20f9c 1450 if (cap_unix(tcon->ses) &&
03776f45
PS
1451 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1452 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1453 posix_lck = true;
1454 /*
1455 * BB add code here to normalize offset and length to account for
1456 * negative length which we can not accept over the wire.
1457 */
1458 if (IS_GETLK(cmd)) {
4f6bcec9 1459 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1460 free_xid(xid);
03776f45
PS
1461 return rc;
1462 }
1463
1464 if (!lock && !unlock) {
1465 /*
1466 * if no lock or unlock then nothing to do since we do not
1467 * know what it is
1468 */
6d5786a3 1469 free_xid(xid);
03776f45 1470 return -EOPNOTSUPP;
7ee1af76
JA
1471 }
1472
03776f45
PS
1473 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1474 xid);
6d5786a3 1475 free_xid(xid);
1da177e4
LT
1476 return rc;
1477}
1478
597b027f
JL
1479/*
1480 * update the file size (if needed) after a write. Should be called with
1481 * the inode->i_lock held
1482 */
72432ffc 1483void
fbec9ab9
JL
1484cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1485 unsigned int bytes_written)
1486{
1487 loff_t end_of_write = offset + bytes_written;
1488
1489 if (end_of_write > cifsi->server_eof)
1490 cifsi->server_eof = end_of_write;
1491}
1492
ba9ad725
PS
1493static ssize_t
1494cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1495 size_t write_size, loff_t *offset)
1da177e4
LT
1496{
1497 int rc = 0;
1498 unsigned int bytes_written = 0;
1499 unsigned int total_written;
1500 struct cifs_sb_info *cifs_sb;
ba9ad725
PS
1501 struct cifs_tcon *tcon;
1502 struct TCP_Server_Info *server;
6d5786a3 1503 unsigned int xid;
7da4b49a
JL
1504 struct dentry *dentry = open_file->dentry;
1505 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1506 struct cifs_io_parms io_parms;
1da177e4 1507
7da4b49a 1508 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1509
b6b38f70 1510 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
ba9ad725 1511 *offset, dentry->d_name.name);
1da177e4 1512
ba9ad725
PS
1513 tcon = tlink_tcon(open_file->tlink);
1514 server = tcon->ses->server;
1515
1516 if (!server->ops->sync_write)
1517 return -ENOSYS;
50c2f753 1518
6d5786a3 1519 xid = get_xid();
1da177e4 1520
1da177e4
LT
1521 for (total_written = 0; write_size > total_written;
1522 total_written += bytes_written) {
1523 rc = -EAGAIN;
1524 while (rc == -EAGAIN) {
ca83ce3d
JL
1525 struct kvec iov[2];
1526 unsigned int len;
1527
1da177e4 1528 if (open_file->invalidHandle) {
1da177e4
LT
1529 /* we could deadlock if we called
1530 filemap_fdatawait from here so tell
fb8c4b14 1531 reopen_file not to flush data to
1da177e4 1532 server now */
15886177 1533 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1534 if (rc != 0)
1535 break;
1536 }
ca83ce3d
JL
1537
1538 len = min((size_t)cifs_sb->wsize,
1539 write_size - total_written);
1540 /* iov[0] is reserved for smb header */
1541 iov[1].iov_base = (char *)write_data + total_written;
1542 iov[1].iov_len = len;
fa2989f4 1543 io_parms.pid = pid;
ba9ad725
PS
1544 io_parms.tcon = tcon;
1545 io_parms.offset = *offset;
fa2989f4 1546 io_parms.length = len;
ba9ad725
PS
1547 rc = server->ops->sync_write(xid, open_file, &io_parms,
1548 &bytes_written, iov, 1);
1da177e4
LT
1549 }
1550 if (rc || (bytes_written == 0)) {
1551 if (total_written)
1552 break;
1553 else {
6d5786a3 1554 free_xid(xid);
1da177e4
LT
1555 return rc;
1556 }
fbec9ab9 1557 } else {
597b027f 1558 spin_lock(&dentry->d_inode->i_lock);
ba9ad725 1559 cifs_update_eof(cifsi, *offset, bytes_written);
597b027f 1560 spin_unlock(&dentry->d_inode->i_lock);
ba9ad725 1561 *offset += bytes_written;
fbec9ab9 1562 }
1da177e4
LT
1563 }
1564
ba9ad725 1565 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1566
7da4b49a
JL
1567 if (total_written > 0) {
1568 spin_lock(&dentry->d_inode->i_lock);
ba9ad725
PS
1569 if (*offset > dentry->d_inode->i_size)
1570 i_size_write(dentry->d_inode, *offset);
7da4b49a 1571 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1572 }
7da4b49a 1573 mark_inode_dirty_sync(dentry->d_inode);
6d5786a3 1574 free_xid(xid);
1da177e4
LT
1575 return total_written;
1576}
1577
6508d904
JL
1578struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1579 bool fsuid_only)
630f3f0c
SF
1580{
1581 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1582 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1583
1584 /* only filter by fsuid on multiuser mounts */
1585 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1586 fsuid_only = false;
630f3f0c 1587
4477288a 1588 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1589 /* we could simply get the first_list_entry since write-only entries
1590 are always at the end of the list but since the first entry might
1591 have a close pending, we go through the whole list */
1592 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1593 if (fsuid_only && open_file->uid != current_fsuid())
1594 continue;
2e396b83 1595 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1596 if (!open_file->invalidHandle) {
1597 /* found a good file */
1598 /* lock it so it will not be closed on us */
764a1b1a 1599 cifsFileInfo_get_locked(open_file);
4477288a 1600 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1601 return open_file;
1602 } /* else might as well continue, and look for
1603 another, or simply have the caller reopen it
1604 again rather than trying to fix this handle */
1605 } else /* write only file */
1606 break; /* write only files are last so must be done */
1607 }
4477288a 1608 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1609 return NULL;
1610}
630f3f0c 1611
6508d904
JL
1612struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1613 bool fsuid_only)
6148a742 1614{
2c0c2a08 1615 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 1616 struct cifs_sb_info *cifs_sb;
2846d386 1617 bool any_available = false;
dd99cd80 1618 int rc;
2c0c2a08 1619 unsigned int refind = 0;
6148a742 1620
60808233
SF
1621 /* Having a null inode here (because mapping->host was set to zero by
1622 the VFS or MM) should not happen but we had reports of on oops (due to
1623 it being zero) during stress testcases so we need to check for it */
1624
fb8c4b14 1625 if (cifs_inode == NULL) {
b6b38f70 1626 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1627 dump_stack();
1628 return NULL;
1629 }
1630
d3892294
JL
1631 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1632
6508d904
JL
1633 /* only filter by fsuid on multiuser mounts */
1634 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1635 fsuid_only = false;
1636
4477288a 1637 spin_lock(&cifs_file_list_lock);
9b22b0b7 1638refind_writable:
2c0c2a08
SP
1639 if (refind > MAX_REOPEN_ATT) {
1640 spin_unlock(&cifs_file_list_lock);
1641 return NULL;
1642 }
6148a742 1643 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1644 if (!any_available && open_file->pid != current->tgid)
1645 continue;
1646 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1647 continue;
2e396b83 1648 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
1649 if (!open_file->invalidHandle) {
1650 /* found a good writable file */
764a1b1a 1651 cifsFileInfo_get_locked(open_file);
4477288a 1652 spin_unlock(&cifs_file_list_lock);
9b22b0b7 1653 return open_file;
2c0c2a08
SP
1654 } else {
1655 if (!inv_file)
1656 inv_file = open_file;
9b22b0b7 1657 }
6148a742
SF
1658 }
1659 }
2846d386
JL
1660 /* couldn't find useable FH with same pid, try any available */
1661 if (!any_available) {
1662 any_available = true;
1663 goto refind_writable;
1664 }
2c0c2a08
SP
1665
1666 if (inv_file) {
1667 any_available = false;
764a1b1a 1668 cifsFileInfo_get_locked(inv_file);
2c0c2a08
SP
1669 }
1670
4477288a 1671 spin_unlock(&cifs_file_list_lock);
2c0c2a08
SP
1672
1673 if (inv_file) {
1674 rc = cifs_reopen_file(inv_file, false);
1675 if (!rc)
1676 return inv_file;
1677 else {
1678 spin_lock(&cifs_file_list_lock);
1679 list_move_tail(&inv_file->flist,
1680 &cifs_inode->openFileList);
1681 spin_unlock(&cifs_file_list_lock);
1682 cifsFileInfo_put(inv_file);
1683 spin_lock(&cifs_file_list_lock);
1684 ++refind;
1685 goto refind_writable;
1686 }
1687 }
1688
6148a742
SF
1689 return NULL;
1690}
1691
1da177e4
LT
1692static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1693{
1694 struct address_space *mapping = page->mapping;
1695 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1696 char *write_data;
1697 int rc = -EFAULT;
1698 int bytes_written = 0;
1da177e4 1699 struct inode *inode;
6148a742 1700 struct cifsFileInfo *open_file;
1da177e4
LT
1701
1702 if (!mapping || !mapping->host)
1703 return -EFAULT;
1704
1705 inode = page->mapping->host;
1da177e4
LT
1706
1707 offset += (loff_t)from;
1708 write_data = kmap(page);
1709 write_data += from;
1710
1711 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1712 kunmap(page);
1713 return -EIO;
1714 }
1715
1716 /* racing with truncate? */
1717 if (offset > mapping->host->i_size) {
1718 kunmap(page);
1719 return 0; /* don't care */
1720 }
1721
1722 /* check to make sure that we are not extending the file */
1723 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1724 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1725
6508d904 1726 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1727 if (open_file) {
fa2989f4
PS
1728 bytes_written = cifs_write(open_file, open_file->pid,
1729 write_data, to - from, &offset);
6ab409b5 1730 cifsFileInfo_put(open_file);
1da177e4 1731 /* Does mm or vfs already set times? */
6148a742 1732 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1733 if ((bytes_written > 0) && (offset))
6148a742 1734 rc = 0;
bb5a9a04
SF
1735 else if (bytes_written < 0)
1736 rc = bytes_written;
6148a742 1737 } else {
b6b38f70 1738 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1739 rc = -EIO;
1740 }
1741
1742 kunmap(page);
1743 return rc;
1744}
1745
1da177e4 1746static int cifs_writepages(struct address_space *mapping,
37c0eb46 1747 struct writeback_control *wbc)
1da177e4 1748{
c3d17b63
JL
1749 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1750 bool done = false, scanned = false, range_whole = false;
1751 pgoff_t end, index;
1752 struct cifs_writedata *wdata;
c9de5c80 1753 struct TCP_Server_Info *server;
37c0eb46 1754 struct page *page;
37c0eb46 1755 int rc = 0;
eddb079d 1756 loff_t isize = i_size_read(mapping->host);
50c2f753 1757
37c0eb46 1758 /*
c3d17b63 1759 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1760 * one page at a time via cifs_writepage
1761 */
1762 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1763 return generic_writepages(mapping, wbc);
1764
111ebb6e 1765 if (wbc->range_cyclic) {
37c0eb46 1766 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1767 end = -1;
1768 } else {
1769 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1770 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1771 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1772 range_whole = true;
1773 scanned = true;
37c0eb46
SF
1774 }
1775retry:
c3d17b63
JL
1776 while (!done && index <= end) {
1777 unsigned int i, nr_pages, found_pages;
1778 pgoff_t next = 0, tofind;
1779 struct page **pages;
1780
1781 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1782 end - index) + 1;
1783
c2e87640
JL
1784 wdata = cifs_writedata_alloc((unsigned int)tofind,
1785 cifs_writev_complete);
c3d17b63
JL
1786 if (!wdata) {
1787 rc = -ENOMEM;
1788 break;
1789 }
1790
1791 /*
1792 * find_get_pages_tag seems to return a max of 256 on each
1793 * iteration, so we must call it several times in order to
1794 * fill the array or the wsize is effectively limited to
1795 * 256 * PAGE_CACHE_SIZE.
1796 */
1797 found_pages = 0;
1798 pages = wdata->pages;
1799 do {
1800 nr_pages = find_get_pages_tag(mapping, &index,
1801 PAGECACHE_TAG_DIRTY,
1802 tofind, pages);
1803 found_pages += nr_pages;
1804 tofind -= nr_pages;
1805 pages += nr_pages;
1806 } while (nr_pages && tofind && index <= end);
1807
1808 if (found_pages == 0) {
1809 kref_put(&wdata->refcount, cifs_writedata_release);
1810 break;
1811 }
1812
1813 nr_pages = 0;
1814 for (i = 0; i < found_pages; i++) {
1815 page = wdata->pages[i];
37c0eb46
SF
1816 /*
1817 * At this point we hold neither mapping->tree_lock nor
1818 * lock on the page itself: the page may be truncated or
1819 * invalidated (changing page->mapping to NULL), or even
1820 * swizzled back from swapper_space to tmpfs file
1821 * mapping
1822 */
1823
c3d17b63 1824 if (nr_pages == 0)
37c0eb46 1825 lock_page(page);
529ae9aa 1826 else if (!trylock_page(page))
37c0eb46
SF
1827 break;
1828
1829 if (unlikely(page->mapping != mapping)) {
1830 unlock_page(page);
1831 break;
1832 }
1833
111ebb6e 1834 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1835 done = true;
37c0eb46
SF
1836 unlock_page(page);
1837 break;
1838 }
1839
1840 if (next && (page->index != next)) {
1841 /* Not next consecutive page */
1842 unlock_page(page);
1843 break;
1844 }
1845
1846 if (wbc->sync_mode != WB_SYNC_NONE)
1847 wait_on_page_writeback(page);
1848
1849 if (PageWriteback(page) ||
cb876f45 1850 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1851 unlock_page(page);
1852 break;
1853 }
84d2f07e 1854
cb876f45
LT
1855 /*
1856 * This actually clears the dirty bit in the radix tree.
1857 * See cifs_writepage() for more commentary.
1858 */
1859 set_page_writeback(page);
1860
eddb079d 1861 if (page_offset(page) >= isize) {
c3d17b63 1862 done = true;
84d2f07e 1863 unlock_page(page);
cb876f45 1864 end_page_writeback(page);
84d2f07e
SF
1865 break;
1866 }
1867
c3d17b63
JL
1868 wdata->pages[i] = page;
1869 next = page->index + 1;
1870 ++nr_pages;
1871 }
37c0eb46 1872
c3d17b63
JL
1873 /* reset index to refind any pages skipped */
1874 if (nr_pages == 0)
1875 index = wdata->pages[0]->index + 1;
84d2f07e 1876
c3d17b63
JL
1877 /* put any pages we aren't going to use */
1878 for (i = nr_pages; i < found_pages; i++) {
1879 page_cache_release(wdata->pages[i]);
1880 wdata->pages[i] = NULL;
1881 }
37c0eb46 1882
c3d17b63
JL
1883 /* nothing to write? */
1884 if (nr_pages == 0) {
1885 kref_put(&wdata->refcount, cifs_writedata_release);
1886 continue;
37c0eb46 1887 }
fbec9ab9 1888
c3d17b63
JL
1889 wdata->sync_mode = wbc->sync_mode;
1890 wdata->nr_pages = nr_pages;
1891 wdata->offset = page_offset(wdata->pages[0]);
eddb079d
JL
1892 wdata->pagesz = PAGE_CACHE_SIZE;
1893 wdata->tailsz =
1894 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1895 (loff_t)PAGE_CACHE_SIZE);
1896 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1897 wdata->tailsz;
941b853d 1898
c3d17b63
JL
1899 do {
1900 if (wdata->cfile != NULL)
1901 cifsFileInfo_put(wdata->cfile);
1902 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1903 false);
1904 if (!wdata->cfile) {
1905 cERROR(1, "No writable handles for inode");
1906 rc = -EBADF;
1907 break;
941b853d 1908 }
fe5f5d2e 1909 wdata->pid = wdata->cfile->pid;
c9de5c80
PS
1910 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1911 rc = server->ops->async_writev(wdata);
c3d17b63 1912 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1913
c3d17b63
JL
1914 for (i = 0; i < nr_pages; ++i)
1915 unlock_page(wdata->pages[i]);
f3983c21 1916
c3d17b63
JL
1917 /* send failure -- clean up the mess */
1918 if (rc != 0) {
1919 for (i = 0; i < nr_pages; ++i) {
941b853d 1920 if (rc == -EAGAIN)
c3d17b63
JL
1921 redirty_page_for_writepage(wbc,
1922 wdata->pages[i]);
1923 else
1924 SetPageError(wdata->pages[i]);
1925 end_page_writeback(wdata->pages[i]);
1926 page_cache_release(wdata->pages[i]);
37c0eb46 1927 }
941b853d
JL
1928 if (rc != -EAGAIN)
1929 mapping_set_error(mapping, rc);
c3d17b63
JL
1930 }
1931 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1932
c3d17b63
JL
1933 wbc->nr_to_write -= nr_pages;
1934 if (wbc->nr_to_write <= 0)
1935 done = true;
b066a48c 1936
c3d17b63 1937 index = next;
37c0eb46 1938 }
c3d17b63 1939
37c0eb46
SF
1940 if (!scanned && !done) {
1941 /*
1942 * We hit the last page and there is more work to be done: wrap
1943 * back to the start of the file
1944 */
c3d17b63 1945 scanned = true;
37c0eb46
SF
1946 index = 0;
1947 goto retry;
1948 }
c3d17b63 1949
111ebb6e 1950 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1951 mapping->writeback_index = index;
1952
1da177e4
LT
1953 return rc;
1954}
1da177e4 1955
9ad1506b
PS
1956static int
1957cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1958{
9ad1506b 1959 int rc;
6d5786a3 1960 unsigned int xid;
1da177e4 1961
6d5786a3 1962 xid = get_xid();
1da177e4
LT
1963/* BB add check for wbc flags */
1964 page_cache_get(page);
ad7a2926 1965 if (!PageUptodate(page))
b6b38f70 1966 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1967
1968 /*
1969 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1970 *
1971 * A writepage() implementation always needs to do either this,
1972 * or re-dirty the page with "redirty_page_for_writepage()" in
1973 * the case of a failure.
1974 *
1975 * Just unlocking the page will cause the radix tree tag-bits
1976 * to fail to update with the state of the page correctly.
1977 */
fb8c4b14 1978 set_page_writeback(page);
9ad1506b 1979retry_write:
1da177e4 1980 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1981 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1982 goto retry_write;
1983 else if (rc == -EAGAIN)
1984 redirty_page_for_writepage(wbc, page);
1985 else if (rc != 0)
1986 SetPageError(page);
1987 else
1988 SetPageUptodate(page);
cb876f45
LT
1989 end_page_writeback(page);
1990 page_cache_release(page);
6d5786a3 1991 free_xid(xid);
1da177e4
LT
1992 return rc;
1993}
1994
9ad1506b
PS
1995static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1996{
1997 int rc = cifs_writepage_locked(page, wbc);
1998 unlock_page(page);
1999 return rc;
2000}
2001
d9414774
NP
2002static int cifs_write_end(struct file *file, struct address_space *mapping,
2003 loff_t pos, unsigned len, unsigned copied,
2004 struct page *page, void *fsdata)
1da177e4 2005{
d9414774
NP
2006 int rc;
2007 struct inode *inode = mapping->host;
d4ffff1f
PS
2008 struct cifsFileInfo *cfile = file->private_data;
2009 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2010 __u32 pid;
2011
2012 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2013 pid = cfile->pid;
2014 else
2015 pid = current->tgid;
1da177e4 2016
b6b38f70
JP
2017 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2018 page, pos, copied);
d9414774 2019
a98ee8c1
JL
2020 if (PageChecked(page)) {
2021 if (copied == len)
2022 SetPageUptodate(page);
2023 ClearPageChecked(page);
2024 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 2025 SetPageUptodate(page);
ad7a2926 2026
1da177e4 2027 if (!PageUptodate(page)) {
d9414774
NP
2028 char *page_data;
2029 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
6d5786a3 2030 unsigned int xid;
d9414774 2031
6d5786a3 2032 xid = get_xid();
1da177e4
LT
2033 /* this is probably better than directly calling
2034 partialpage_write since in this function the file handle is
2035 known which we might as well leverage */
2036 /* BB check if anything else missing out of ppw
2037 such as updating last write time */
2038 page_data = kmap(page);
d4ffff1f 2039 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2040 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2041 kunmap(page);
d9414774 2042
6d5786a3 2043 free_xid(xid);
fb8c4b14 2044 } else {
d9414774
NP
2045 rc = copied;
2046 pos += copied;
1da177e4
LT
2047 set_page_dirty(page);
2048 }
2049
d9414774
NP
2050 if (rc > 0) {
2051 spin_lock(&inode->i_lock);
2052 if (pos > inode->i_size)
2053 i_size_write(inode, pos);
2054 spin_unlock(&inode->i_lock);
2055 }
2056
2057 unlock_page(page);
2058 page_cache_release(page);
2059
1da177e4
LT
2060 return rc;
2061}
2062
02c24a82
JB
2063int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2064 int datasync)
1da177e4 2065{
6d5786a3 2066 unsigned int xid;
1da177e4 2067 int rc = 0;
96daf2b0 2068 struct cifs_tcon *tcon;
1d8c4c00 2069 struct TCP_Server_Info *server;
c21dfb69 2070 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 2071 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 2072 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2073
02c24a82
JB
2074 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2075 if (rc)
2076 return rc;
2077 mutex_lock(&inode->i_mutex);
2078
6d5786a3 2079 xid = get_xid();
1da177e4 2080
b6b38f70 2081 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 2082 file->f_path.dentry->d_name.name, datasync);
50c2f753 2083
6feb9891
PS
2084 if (!CIFS_I(inode)->clientCanCacheRead) {
2085 rc = cifs_invalidate_mapping(inode);
2086 if (rc) {
2087 cFYI(1, "rc: %d during invalidate phase", rc);
2088 rc = 0; /* don't care about it in fsync */
2089 }
2090 }
eb4b756b 2091
8be7e6ba 2092 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2093 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2094 server = tcon->ses->server;
2095 if (server->ops->flush)
2096 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2097 else
2098 rc = -ENOSYS;
2099 }
8be7e6ba 2100
6d5786a3 2101 free_xid(xid);
02c24a82 2102 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
2103 return rc;
2104}
2105
02c24a82 2106int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2107{
6d5786a3 2108 unsigned int xid;
8be7e6ba 2109 int rc = 0;
96daf2b0 2110 struct cifs_tcon *tcon;
1d8c4c00 2111 struct TCP_Server_Info *server;
8be7e6ba
PS
2112 struct cifsFileInfo *smbfile = file->private_data;
2113 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
2114 struct inode *inode = file->f_mapping->host;
2115
2116 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2117 if (rc)
2118 return rc;
2119 mutex_lock(&inode->i_mutex);
8be7e6ba 2120
6d5786a3 2121 xid = get_xid();
8be7e6ba
PS
2122
2123 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2124 file->f_path.dentry->d_name.name, datasync);
2125
2126 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2127 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2128 server = tcon->ses->server;
2129 if (server->ops->flush)
2130 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2131 else
2132 rc = -ENOSYS;
2133 }
b298f223 2134
6d5786a3 2135 free_xid(xid);
02c24a82 2136 mutex_unlock(&inode->i_mutex);
1da177e4
LT
2137 return rc;
2138}
2139
1da177e4
LT
2140/*
2141 * As file closes, flush all cached write data for this inode checking
2142 * for write behind errors.
2143 */
75e1fcc0 2144int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2145{
fb8c4b14 2146 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
2147 int rc = 0;
2148
eb4b756b 2149 if (file->f_mode & FMODE_WRITE)
d3f1322a 2150 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2151
b6b38f70 2152 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
2153
2154 return rc;
2155}
2156
72432ffc
PS
2157static int
2158cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2159{
2160 int rc = 0;
2161 unsigned long i;
2162
2163 for (i = 0; i < num_pages; i++) {
e94f7ba1 2164 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2165 if (!pages[i]) {
2166 /*
2167 * save number of pages we have already allocated and
2168 * return with ENOMEM error
2169 */
2170 num_pages = i;
2171 rc = -ENOMEM;
e94f7ba1 2172 break;
72432ffc
PS
2173 }
2174 }
2175
e94f7ba1
JL
2176 if (rc) {
2177 for (i = 0; i < num_pages; i++)
2178 put_page(pages[i]);
2179 }
72432ffc
PS
2180 return rc;
2181}
2182
2183static inline
2184size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2185{
2186 size_t num_pages;
2187 size_t clen;
2188
2189 clen = min_t(const size_t, len, wsize);
a7103b99 2190 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2191
2192 if (cur_len)
2193 *cur_len = clen;
2194
2195 return num_pages;
2196}
2197
da82f7e7
JL
2198static void
2199cifs_uncached_writev_complete(struct work_struct *work)
2200{
2201 int i;
2202 struct cifs_writedata *wdata = container_of(work,
2203 struct cifs_writedata, work);
2204 struct inode *inode = wdata->cfile->dentry->d_inode;
2205 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2206
2207 spin_lock(&inode->i_lock);
2208 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2209 if (cifsi->server_eof > inode->i_size)
2210 i_size_write(inode, cifsi->server_eof);
2211 spin_unlock(&inode->i_lock);
2212
2213 complete(&wdata->done);
2214
2215 if (wdata->result != -EAGAIN) {
2216 for (i = 0; i < wdata->nr_pages; i++)
2217 put_page(wdata->pages[i]);
2218 }
2219
2220 kref_put(&wdata->refcount, cifs_writedata_release);
2221}
2222
2223/* attempt to send write to server, retry on any -EAGAIN errors */
2224static int
2225cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2226{
2227 int rc;
c9de5c80
PS
2228 struct TCP_Server_Info *server;
2229
2230 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
da82f7e7
JL
2231
2232 do {
2233 if (wdata->cfile->invalidHandle) {
2234 rc = cifs_reopen_file(wdata->cfile, false);
2235 if (rc != 0)
2236 continue;
2237 }
c9de5c80 2238 rc = server->ops->async_writev(wdata);
da82f7e7
JL
2239 } while (rc == -EAGAIN);
2240
2241 return rc;
2242}
2243
72432ffc
PS
2244static ssize_t
2245cifs_iovec_write(struct file *file, const struct iovec *iov,
2246 unsigned long nr_segs, loff_t *poffset)
2247{
da82f7e7 2248 unsigned long nr_pages, i;
76429c14
PS
2249 size_t copied, len, cur_len;
2250 ssize_t total_written = 0;
3af9d8f2 2251 loff_t offset;
72432ffc 2252 struct iov_iter it;
72432ffc 2253 struct cifsFileInfo *open_file;
da82f7e7 2254 struct cifs_tcon *tcon;
72432ffc 2255 struct cifs_sb_info *cifs_sb;
da82f7e7
JL
2256 struct cifs_writedata *wdata, *tmp;
2257 struct list_head wdata_list;
2258 int rc;
2259 pid_t pid;
72432ffc
PS
2260
2261 len = iov_length(iov, nr_segs);
2262 if (!len)
2263 return 0;
2264
2265 rc = generic_write_checks(file, poffset, &len, 0);
2266 if (rc)
2267 return rc;
2268
da82f7e7 2269 INIT_LIST_HEAD(&wdata_list);
72432ffc 2270 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
72432ffc 2271 open_file = file->private_data;
da82f7e7 2272 tcon = tlink_tcon(open_file->tlink);
c9de5c80
PS
2273
2274 if (!tcon->ses->server->ops->async_writev)
2275 return -ENOSYS;
2276
3af9d8f2 2277 offset = *poffset;
d4ffff1f
PS
2278
2279 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2280 pid = open_file->pid;
2281 else
2282 pid = current->tgid;
2283
72432ffc 2284 iov_iter_init(&it, iov, nr_segs, len, 0);
72432ffc 2285 do {
da82f7e7
JL
2286 size_t save_len;
2287
2288 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2289 wdata = cifs_writedata_alloc(nr_pages,
2290 cifs_uncached_writev_complete);
2291 if (!wdata) {
2292 rc = -ENOMEM;
2293 break;
2294 }
2295
2296 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2297 if (rc) {
2298 kfree(wdata);
2299 break;
2300 }
2301
2302 save_len = cur_len;
2303 for (i = 0; i < nr_pages; i++) {
2304 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2305 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2306 0, copied);
72432ffc
PS
2307 cur_len -= copied;
2308 iov_iter_advance(&it, copied);
72432ffc 2309 }
72432ffc
PS
2310 cur_len = save_len - cur_len;
2311
da82f7e7
JL
2312 wdata->sync_mode = WB_SYNC_ALL;
2313 wdata->nr_pages = nr_pages;
2314 wdata->offset = (__u64)offset;
2315 wdata->cfile = cifsFileInfo_get(open_file);
2316 wdata->pid = pid;
2317 wdata->bytes = cur_len;
eddb079d
JL
2318 wdata->pagesz = PAGE_SIZE;
2319 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
da82f7e7
JL
2320 rc = cifs_uncached_retry_writev(wdata);
2321 if (rc) {
2322 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2323 break;
2324 }
2325
da82f7e7
JL
2326 list_add_tail(&wdata->list, &wdata_list);
2327 offset += cur_len;
2328 len -= cur_len;
72432ffc
PS
2329 } while (len > 0);
2330
da82f7e7
JL
2331 /*
2332 * If at least one write was successfully sent, then discard any rc
2333 * value from the later writes. If the other write succeeds, then
2334 * we'll end up returning whatever was written. If it fails, then
2335 * we'll get a new rc value from that.
2336 */
2337 if (!list_empty(&wdata_list))
2338 rc = 0;
2339
2340 /*
2341 * Wait for and collect replies for any successful sends in order of
2342 * increasing offset. Once an error is hit or we get a fatal signal
2343 * while waiting, then return without waiting for any more replies.
2344 */
2345restart_loop:
2346 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2347 if (!rc) {
2348 /* FIXME: freezable too? */
2349 rc = wait_for_completion_killable(&wdata->done);
2350 if (rc)
2351 rc = -EINTR;
2352 else if (wdata->result)
2353 rc = wdata->result;
2354 else
2355 total_written += wdata->bytes;
2356
2357 /* resend call if it's a retryable error */
2358 if (rc == -EAGAIN) {
2359 rc = cifs_uncached_retry_writev(wdata);
2360 goto restart_loop;
2361 }
2362 }
2363 list_del_init(&wdata->list);
2364 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2365 }
2366
da82f7e7
JL
2367 if (total_written > 0)
2368 *poffset += total_written;
72432ffc 2369
da82f7e7
JL
2370 cifs_stats_bytes_written(tcon, total_written);
2371 return total_written ? total_written : (ssize_t)rc;
72432ffc
PS
2372}
2373
0b81c1c4 2374ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
2375 unsigned long nr_segs, loff_t pos)
2376{
2377 ssize_t written;
2378 struct inode *inode;
2379
2380 inode = iocb->ki_filp->f_path.dentry->d_inode;
2381
2382 /*
2383 * BB - optimize the way when signing is disabled. We can drop this
2384 * extra memory-to-memory copying and use iovec buffers for constructing
2385 * write request.
2386 */
2387
2388 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2389 if (written > 0) {
2390 CIFS_I(inode)->invalid_mapping = true;
2391 iocb->ki_pos = pos;
2392 }
2393
2394 return written;
2395}
2396
2397ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2398 unsigned long nr_segs, loff_t pos)
2399{
2400 struct inode *inode;
2401
2402 inode = iocb->ki_filp->f_path.dentry->d_inode;
2403
2404 if (CIFS_I(inode)->clientCanCacheAll)
2405 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2406
2407 /*
2408 * In strict cache mode we need to write the data to the server exactly
2409 * from the pos to pos+len-1 rather than flush all affected pages
2410 * because it may cause a error with mandatory locks on these pages but
2411 * not on the region from pos to ppos+len-1.
2412 */
2413
2414 return cifs_user_writev(iocb, iov, nr_segs, pos);
2415}
2416
0471ca3f 2417static struct cifs_readdata *
f4e49cd2 2418cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
0471ca3f
JL
2419{
2420 struct cifs_readdata *rdata;
f4e49cd2 2421
c5fab6f4
JL
2422 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2423 GFP_KERNEL);
0471ca3f 2424 if (rdata != NULL) {
6993f74a 2425 kref_init(&rdata->refcount);
1c892549
JL
2426 INIT_LIST_HEAD(&rdata->list);
2427 init_completion(&rdata->done);
0471ca3f 2428 INIT_WORK(&rdata->work, complete);
0471ca3f 2429 }
f4e49cd2 2430
0471ca3f
JL
2431 return rdata;
2432}
2433
6993f74a
JL
2434void
2435cifs_readdata_release(struct kref *refcount)
0471ca3f 2436{
6993f74a
JL
2437 struct cifs_readdata *rdata = container_of(refcount,
2438 struct cifs_readdata, refcount);
2439
2440 if (rdata->cfile)
2441 cifsFileInfo_put(rdata->cfile);
2442
0471ca3f
JL
2443 kfree(rdata);
2444}
2445
1c892549 2446static int
c5fab6f4 2447cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
2448{
2449 int rc = 0;
c5fab6f4 2450 struct page *page;
1c892549
JL
2451 unsigned int i;
2452
c5fab6f4 2453 for (i = 0; i < nr_pages; i++) {
1c892549
JL
2454 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2455 if (!page) {
2456 rc = -ENOMEM;
2457 break;
2458 }
c5fab6f4 2459 rdata->pages[i] = page;
1c892549
JL
2460 }
2461
2462 if (rc) {
c5fab6f4
JL
2463 for (i = 0; i < nr_pages; i++) {
2464 put_page(rdata->pages[i]);
2465 rdata->pages[i] = NULL;
1c892549
JL
2466 }
2467 }
2468 return rc;
2469}
2470
2471static void
2472cifs_uncached_readdata_release(struct kref *refcount)
2473{
1c892549
JL
2474 struct cifs_readdata *rdata = container_of(refcount,
2475 struct cifs_readdata, refcount);
c5fab6f4 2476 unsigned int i;
1c892549 2477
c5fab6f4
JL
2478 for (i = 0; i < rdata->nr_pages; i++) {
2479 put_page(rdata->pages[i]);
2480 rdata->pages[i] = NULL;
1c892549
JL
2481 }
2482 cifs_readdata_release(refcount);
2483}
2484
2a1bb138
JL
2485static int
2486cifs_retry_async_readv(struct cifs_readdata *rdata)
2487{
2488 int rc;
fc9c5966
PS
2489 struct TCP_Server_Info *server;
2490
2491 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
2a1bb138
JL
2492
2493 do {
2494 if (rdata->cfile->invalidHandle) {
2495 rc = cifs_reopen_file(rdata->cfile, true);
2496 if (rc != 0)
2497 continue;
2498 }
fc9c5966 2499 rc = server->ops->async_readv(rdata);
2a1bb138
JL
2500 } while (rc == -EAGAIN);
2501
2502 return rc;
2503}
2504
1c892549
JL
2505/**
2506 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2507 * @rdata: the readdata response with list of pages holding data
2508 * @iov: vector in which we should copy the data
2509 * @nr_segs: number of segments in vector
2510 * @offset: offset into file of the first iovec
2511 * @copied: used to return the amount of data copied to the iov
2512 *
2513 * This function copies data from a list of pages in a readdata response into
2514 * an array of iovecs. It will first calculate where the data should go
2515 * based on the info in the readdata and then copy the data into that spot.
2516 */
2517static ssize_t
2518cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2519 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2520{
2521 int rc = 0;
2522 struct iov_iter ii;
2523 size_t pos = rdata->offset - offset;
1c892549
JL
2524 ssize_t remaining = rdata->bytes;
2525 unsigned char *pdata;
c5fab6f4 2526 unsigned int i;
1c892549
JL
2527
2528 /* set up iov_iter and advance to the correct offset */
2529 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2530 iov_iter_advance(&ii, pos);
2531
2532 *copied = 0;
c5fab6f4 2533 for (i = 0; i < rdata->nr_pages; i++) {
1c892549 2534 ssize_t copy;
c5fab6f4 2535 struct page *page = rdata->pages[i];
1c892549
JL
2536
2537 /* copy a whole page or whatever's left */
2538 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2539
2540 /* ...but limit it to whatever space is left in the iov */
2541 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2542
2543 /* go while there's data to be copied and no errors */
2544 if (copy && !rc) {
2545 pdata = kmap(page);
2546 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2547 (int)copy);
2548 kunmap(page);
2549 if (!rc) {
2550 *copied += copy;
2551 remaining -= copy;
2552 iov_iter_advance(&ii, copy);
2553 }
2554 }
1c892549
JL
2555 }
2556
2557 return rc;
2558}
2559
2560static void
2561cifs_uncached_readv_complete(struct work_struct *work)
2562{
2563 struct cifs_readdata *rdata = container_of(work,
2564 struct cifs_readdata, work);
1c892549
JL
2565
2566 complete(&rdata->done);
2567 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2568}
2569
2570static int
8321fec4
JL
2571cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2572 struct cifs_readdata *rdata, unsigned int len)
1c892549 2573{
8321fec4 2574 int total_read = 0, result = 0;
c5fab6f4
JL
2575 unsigned int i;
2576 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2577 struct kvec iov;
1c892549 2578
8321fec4 2579 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
2580 for (i = 0; i < nr_pages; i++) {
2581 struct page *page = rdata->pages[i];
2582
8321fec4 2583 if (len >= PAGE_SIZE) {
1c892549 2584 /* enough data to fill the page */
8321fec4
JL
2585 iov.iov_base = kmap(page);
2586 iov.iov_len = PAGE_SIZE;
2587 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2588 i, iov.iov_base, iov.iov_len);
2589 len -= PAGE_SIZE;
2590 } else if (len > 0) {
1c892549 2591 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2592 iov.iov_base = kmap(page);
2593 iov.iov_len = len;
2594 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2595 i, iov.iov_base, iov.iov_len);
2596 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2597 rdata->tailsz = len;
2598 len = 0;
1c892549
JL
2599 } else {
2600 /* no need to hold page hostage */
c5fab6f4
JL
2601 rdata->pages[i] = NULL;
2602 rdata->nr_pages--;
1c892549 2603 put_page(page);
8321fec4 2604 continue;
1c892549 2605 }
8321fec4
JL
2606
2607 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2608 kunmap(page);
2609 if (result < 0)
2610 break;
2611
2612 total_read += result;
1c892549
JL
2613 }
2614
8321fec4 2615 return total_read > 0 ? total_read : result;
1c892549
JL
2616}
2617
a70307ee
PS
2618static ssize_t
2619cifs_iovec_read(struct file *file, const struct iovec *iov,
2620 unsigned long nr_segs, loff_t *poffset)
1da177e4 2621{
1c892549 2622 ssize_t rc;
a70307ee 2623 size_t len, cur_len;
1c892549
JL
2624 ssize_t total_read = 0;
2625 loff_t offset = *poffset;
2626 unsigned int npages;
1da177e4 2627 struct cifs_sb_info *cifs_sb;
1c892549 2628 struct cifs_tcon *tcon;
1da177e4 2629 struct cifsFileInfo *open_file;
1c892549
JL
2630 struct cifs_readdata *rdata, *tmp;
2631 struct list_head rdata_list;
2632 pid_t pid;
a70307ee
PS
2633
2634 if (!nr_segs)
2635 return 0;
2636
2637 len = iov_length(iov, nr_segs);
2638 if (!len)
2639 return 0;
1da177e4 2640
1c892549 2641 INIT_LIST_HEAD(&rdata_list);
e6a00296 2642 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
c21dfb69 2643 open_file = file->private_data;
1c892549 2644 tcon = tlink_tcon(open_file->tlink);
1da177e4 2645
fc9c5966
PS
2646 if (!tcon->ses->server->ops->async_readv)
2647 return -ENOSYS;
2648
d4ffff1f
PS
2649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2650 pid = open_file->pid;
2651 else
2652 pid = current->tgid;
2653
ad7a2926 2654 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2655 cFYI(1, "attempting read on write only file instance");
ad7a2926 2656
1c892549
JL
2657 do {
2658 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2659 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
a70307ee 2660
1c892549
JL
2661 /* allocate a readdata struct */
2662 rdata = cifs_readdata_alloc(npages,
2663 cifs_uncached_readv_complete);
2664 if (!rdata) {
2665 rc = -ENOMEM;
2666 goto error;
1da177e4 2667 }
a70307ee 2668
c5fab6f4 2669 rc = cifs_read_allocate_pages(rdata, npages);
1c892549
JL
2670 if (rc)
2671 goto error;
2672
2673 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 2674 rdata->nr_pages = npages;
1c892549
JL
2675 rdata->offset = offset;
2676 rdata->bytes = cur_len;
2677 rdata->pid = pid;
8321fec4
JL
2678 rdata->pagesz = PAGE_SIZE;
2679 rdata->read_into_pages = cifs_uncached_read_into_pages;
1c892549
JL
2680
2681 rc = cifs_retry_async_readv(rdata);
2682error:
2683 if (rc) {
2684 kref_put(&rdata->refcount,
2685 cifs_uncached_readdata_release);
2686 break;
2687 }
2688
2689 list_add_tail(&rdata->list, &rdata_list);
2690 offset += cur_len;
2691 len -= cur_len;
2692 } while (len > 0);
2693
2694 /* if at least one read request send succeeded, then reset rc */
2695 if (!list_empty(&rdata_list))
2696 rc = 0;
2697
2698 /* the loop below should proceed in the order of increasing offsets */
2699restart_loop:
2700 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2701 if (!rc) {
2702 ssize_t copied;
2703
2704 /* FIXME: freezable sleep too? */
2705 rc = wait_for_completion_killable(&rdata->done);
2706 if (rc)
2707 rc = -EINTR;
2708 else if (rdata->result)
2709 rc = rdata->result;
2710 else {
2711 rc = cifs_readdata_to_iov(rdata, iov,
2712 nr_segs, *poffset,
2713 &copied);
2714 total_read += copied;
2715 }
2716
2717 /* resend call if it's a retryable error */
2718 if (rc == -EAGAIN) {
2719 rc = cifs_retry_async_readv(rdata);
2720 goto restart_loop;
1da177e4 2721 }
1da177e4 2722 }
1c892549
JL
2723 list_del_init(&rdata->list);
2724 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 2725 }
a70307ee 2726
1c892549
JL
2727 cifs_stats_bytes_read(tcon, total_read);
2728 *poffset += total_read;
2729
09a4707e
PS
2730 /* mask nodata case */
2731 if (rc == -ENODATA)
2732 rc = 0;
2733
1c892549 2734 return total_read ? total_read : rc;
1da177e4
LT
2735}
2736
0b81c1c4 2737ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2738 unsigned long nr_segs, loff_t pos)
2739{
2740 ssize_t read;
2741
2742 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2743 if (read > 0)
2744 iocb->ki_pos = pos;
2745
2746 return read;
2747}
2748
2749ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2750 unsigned long nr_segs, loff_t pos)
2751{
2752 struct inode *inode;
2753
2754 inode = iocb->ki_filp->f_path.dentry->d_inode;
2755
2756 if (CIFS_I(inode)->clientCanCacheRead)
2757 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2758
2759 /*
2760 * In strict cache mode we need to read from the server all the time
2761 * if we don't have level II oplock because the server can delay mtime
2762 * change - so we can't make a decision about inode invalidating.
2763 * And we can also fail with pagereading if there are mandatory locks
2764 * on pages affected by this read but not on the region from pos to
2765 * pos+len-1.
2766 */
2767
2768 return cifs_user_readv(iocb, iov, nr_segs, pos);
2769}
1da177e4 2770
f9c6e234
PS
2771static ssize_t
2772cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
2773{
2774 int rc = -EACCES;
2775 unsigned int bytes_read = 0;
2776 unsigned int total_read;
2777 unsigned int current_read_size;
5eba8ab3 2778 unsigned int rsize;
1da177e4 2779 struct cifs_sb_info *cifs_sb;
29e20f9c 2780 struct cifs_tcon *tcon;
f9c6e234 2781 struct TCP_Server_Info *server;
6d5786a3 2782 unsigned int xid;
f9c6e234 2783 char *cur_offset;
1da177e4 2784 struct cifsFileInfo *open_file;
d4ffff1f 2785 struct cifs_io_parms io_parms;
ec637e3f 2786 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2787 __u32 pid;
1da177e4 2788
6d5786a3 2789 xid = get_xid();
e6a00296 2790 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2791
5eba8ab3
JL
2792 /* FIXME: set up handlers for larger reads and/or convert to async */
2793 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2794
1da177e4 2795 if (file->private_data == NULL) {
0f3bc09e 2796 rc = -EBADF;
6d5786a3 2797 free_xid(xid);
0f3bc09e 2798 return rc;
1da177e4 2799 }
c21dfb69 2800 open_file = file->private_data;
29e20f9c 2801 tcon = tlink_tcon(open_file->tlink);
f9c6e234
PS
2802 server = tcon->ses->server;
2803
2804 if (!server->ops->sync_read) {
2805 free_xid(xid);
2806 return -ENOSYS;
2807 }
1da177e4 2808
d4ffff1f
PS
2809 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2810 pid = open_file->pid;
2811 else
2812 pid = current->tgid;
2813
1da177e4 2814 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2815 cFYI(1, "attempting read on write only file instance");
1da177e4 2816
f9c6e234
PS
2817 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2818 total_read += bytes_read, cur_offset += bytes_read) {
5eba8ab3 2819 current_read_size = min_t(uint, read_size - total_read, rsize);
29e20f9c
PS
2820 /*
2821 * For windows me and 9x we do not want to request more than it
2822 * negotiated since it will refuse the read then.
2823 */
2824 if ((tcon->ses) && !(tcon->ses->capabilities &
2825 tcon->ses->server->vals->cap_large_files)) {
7748dd6e 2826 current_read_size = min_t(uint, current_read_size,
c974befa 2827 CIFSMaxBufSize);
f9f5c817 2828 }
1da177e4
LT
2829 rc = -EAGAIN;
2830 while (rc == -EAGAIN) {
cdff08e7 2831 if (open_file->invalidHandle) {
15886177 2832 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2833 if (rc != 0)
2834 break;
2835 }
d4ffff1f 2836 io_parms.pid = pid;
29e20f9c 2837 io_parms.tcon = tcon;
f9c6e234 2838 io_parms.offset = *offset;
d4ffff1f 2839 io_parms.length = current_read_size;
f9c6e234
PS
2840 rc = server->ops->sync_read(xid, open_file, &io_parms,
2841 &bytes_read, &cur_offset,
2842 &buf_type);
1da177e4
LT
2843 }
2844 if (rc || (bytes_read == 0)) {
2845 if (total_read) {
2846 break;
2847 } else {
6d5786a3 2848 free_xid(xid);
1da177e4
LT
2849 return rc;
2850 }
2851 } else {
29e20f9c 2852 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 2853 *offset += bytes_read;
1da177e4
LT
2854 }
2855 }
6d5786a3 2856 free_xid(xid);
1da177e4
LT
2857 return total_read;
2858}
2859
ca83ce3d
JL
2860/*
2861 * If the page is mmap'ed into a process' page tables, then we need to make
2862 * sure that it doesn't change while being written back.
2863 */
2864static int
2865cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2866{
2867 struct page *page = vmf->page;
2868
2869 lock_page(page);
2870 return VM_FAULT_LOCKED;
2871}
2872
2873static struct vm_operations_struct cifs_file_vm_ops = {
2874 .fault = filemap_fault,
2875 .page_mkwrite = cifs_page_mkwrite,
2876};
2877
7a6a19b1
PS
2878int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2879{
2880 int rc, xid;
2881 struct inode *inode = file->f_path.dentry->d_inode;
2882
6d5786a3 2883 xid = get_xid();
7a6a19b1 2884
6feb9891
PS
2885 if (!CIFS_I(inode)->clientCanCacheRead) {
2886 rc = cifs_invalidate_mapping(inode);
2887 if (rc)
2888 return rc;
2889 }
7a6a19b1
PS
2890
2891 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2892 if (rc == 0)
2893 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 2894 free_xid(xid);
7a6a19b1
PS
2895 return rc;
2896}
2897
1da177e4
LT
2898int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2899{
1da177e4
LT
2900 int rc, xid;
2901
6d5786a3 2902 xid = get_xid();
abab095d 2903 rc = cifs_revalidate_file(file);
1da177e4 2904 if (rc) {
b6b38f70 2905 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
6d5786a3 2906 free_xid(xid);
1da177e4
LT
2907 return rc;
2908 }
2909 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2910 if (rc == 0)
2911 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 2912 free_xid(xid);
1da177e4
LT
2913 return rc;
2914}
2915
0471ca3f
JL
2916static void
2917cifs_readv_complete(struct work_struct *work)
2918{
c5fab6f4 2919 unsigned int i;
0471ca3f
JL
2920 struct cifs_readdata *rdata = container_of(work,
2921 struct cifs_readdata, work);
0471ca3f 2922
c5fab6f4
JL
2923 for (i = 0; i < rdata->nr_pages; i++) {
2924 struct page *page = rdata->pages[i];
2925
0471ca3f
JL
2926 lru_cache_add_file(page);
2927
2928 if (rdata->result == 0) {
0471ca3f
JL
2929 flush_dcache_page(page);
2930 SetPageUptodate(page);
2931 }
2932
2933 unlock_page(page);
2934
2935 if (rdata->result == 0)
2936 cifs_readpage_to_fscache(rdata->mapping->host, page);
2937
2938 page_cache_release(page);
c5fab6f4 2939 rdata->pages[i] = NULL;
0471ca3f 2940 }
6993f74a 2941 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
2942}
2943
8d5ce4d2 2944static int
8321fec4
JL
2945cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
2946 struct cifs_readdata *rdata, unsigned int len)
8d5ce4d2 2947{
8321fec4 2948 int total_read = 0, result = 0;
c5fab6f4 2949 unsigned int i;
8d5ce4d2
JL
2950 u64 eof;
2951 pgoff_t eof_index;
c5fab6f4 2952 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2953 struct kvec iov;
8d5ce4d2
JL
2954
2955 /* determine the eof that the server (probably) has */
2956 eof = CIFS_I(rdata->mapping->host)->server_eof;
2957 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2958 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2959
8321fec4 2960 rdata->tailsz = PAGE_CACHE_SIZE;
c5fab6f4
JL
2961 for (i = 0; i < nr_pages; i++) {
2962 struct page *page = rdata->pages[i];
2963
8321fec4 2964 if (len >= PAGE_CACHE_SIZE) {
8d5ce4d2 2965 /* enough data to fill the page */
8321fec4
JL
2966 iov.iov_base = kmap(page);
2967 iov.iov_len = PAGE_CACHE_SIZE;
8d5ce4d2 2968 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
2969 i, page->index, iov.iov_base, iov.iov_len);
2970 len -= PAGE_CACHE_SIZE;
2971 } else if (len > 0) {
8d5ce4d2 2972 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2973 iov.iov_base = kmap(page);
2974 iov.iov_len = len;
8d5ce4d2 2975 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
2976 i, page->index, iov.iov_base, iov.iov_len);
2977 memset(iov.iov_base + len,
2978 '\0', PAGE_CACHE_SIZE - len);
2979 rdata->tailsz = len;
2980 len = 0;
8d5ce4d2
JL
2981 } else if (page->index > eof_index) {
2982 /*
2983 * The VFS will not try to do readahead past the
2984 * i_size, but it's possible that we have outstanding
2985 * writes with gaps in the middle and the i_size hasn't
2986 * caught up yet. Populate those with zeroed out pages
2987 * to prevent the VFS from repeatedly attempting to
2988 * fill them until the writes are flushed.
2989 */
2990 zero_user(page, 0, PAGE_CACHE_SIZE);
8d5ce4d2
JL
2991 lru_cache_add_file(page);
2992 flush_dcache_page(page);
2993 SetPageUptodate(page);
2994 unlock_page(page);
2995 page_cache_release(page);
c5fab6f4
JL
2996 rdata->pages[i] = NULL;
2997 rdata->nr_pages--;
8321fec4 2998 continue;
8d5ce4d2
JL
2999 } else {
3000 /* no need to hold page hostage */
8d5ce4d2
JL
3001 lru_cache_add_file(page);
3002 unlock_page(page);
3003 page_cache_release(page);
c5fab6f4
JL
3004 rdata->pages[i] = NULL;
3005 rdata->nr_pages--;
8321fec4 3006 continue;
8d5ce4d2 3007 }
8321fec4
JL
3008
3009 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3010 kunmap(page);
3011 if (result < 0)
3012 break;
3013
3014 total_read += result;
8d5ce4d2
JL
3015 }
3016
8321fec4 3017 return total_read > 0 ? total_read : result;
8d5ce4d2
JL
3018}
3019
1da177e4
LT
3020static int cifs_readpages(struct file *file, struct address_space *mapping,
3021 struct list_head *page_list, unsigned num_pages)
3022{
690c5e31
JL
3023 int rc;
3024 struct list_head tmplist;
3025 struct cifsFileInfo *open_file = file->private_data;
3026 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3027 unsigned int rsize = cifs_sb->rsize;
3028 pid_t pid;
1da177e4 3029
690c5e31
JL
3030 /*
3031 * Give up immediately if rsize is too small to read an entire page.
3032 * The VFS will fall back to readpage. We should never reach this
3033 * point however since we set ra_pages to 0 when the rsize is smaller
3034 * than a cache page.
3035 */
3036 if (unlikely(rsize < PAGE_CACHE_SIZE))
3037 return 0;
bfa0d75a 3038
56698236
SJ
3039 /*
3040 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3041 * immediately if the cookie is negative
3042 */
3043 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3044 &num_pages);
3045 if (rc == 0)
690c5e31 3046 return rc;
56698236 3047
d4ffff1f
PS
3048 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3049 pid = open_file->pid;
3050 else
3051 pid = current->tgid;
3052
690c5e31
JL
3053 rc = 0;
3054 INIT_LIST_HEAD(&tmplist);
1da177e4 3055
690c5e31
JL
3056 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3057 mapping, num_pages);
3058
3059 /*
3060 * Start with the page at end of list and move it to private
3061 * list. Do the same with any following pages until we hit
3062 * the rsize limit, hit an index discontinuity, or run out of
3063 * pages. Issue the async read and then start the loop again
3064 * until the list is empty.
3065 *
3066 * Note that list order is important. The page_list is in
3067 * the order of declining indexes. When we put the pages in
3068 * the rdata->pages, then we want them in increasing order.
3069 */
3070 while (!list_empty(page_list)) {
c5fab6f4 3071 unsigned int i;
690c5e31
JL
3072 unsigned int bytes = PAGE_CACHE_SIZE;
3073 unsigned int expected_index;
3074 unsigned int nr_pages = 1;
3075 loff_t offset;
3076 struct page *page, *tpage;
3077 struct cifs_readdata *rdata;
1da177e4
LT
3078
3079 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
3080
3081 /*
3082 * Lock the page and put it in the cache. Since no one else
3083 * should have access to this page, we're safe to simply set
3084 * PG_locked without checking it first.
3085 */
3086 __set_page_locked(page);
3087 rc = add_to_page_cache_locked(page, mapping,
3088 page->index, GFP_KERNEL);
3089
3090 /* give up if we can't stick it in the cache */
3091 if (rc) {
3092 __clear_page_locked(page);
3093 break;
3094 }
3095
3096 /* move first page to the tmplist */
1da177e4 3097 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 3098 list_move_tail(&page->lru, &tmplist);
1da177e4 3099
690c5e31
JL
3100 /* now try and add more pages onto the request */
3101 expected_index = page->index + 1;
3102 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3103 /* discontinuity ? */
3104 if (page->index != expected_index)
fb8c4b14 3105 break;
690c5e31
JL
3106
3107 /* would this page push the read over the rsize? */
3108 if (bytes + PAGE_CACHE_SIZE > rsize)
3109 break;
3110
3111 __set_page_locked(page);
3112 if (add_to_page_cache_locked(page, mapping,
3113 page->index, GFP_KERNEL)) {
3114 __clear_page_locked(page);
3115 break;
3116 }
3117 list_move_tail(&page->lru, &tmplist);
3118 bytes += PAGE_CACHE_SIZE;
3119 expected_index++;
3120 nr_pages++;
1da177e4 3121 }
690c5e31 3122
0471ca3f 3123 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
3124 if (!rdata) {
3125 /* best to give up if we're out of mem */
3126 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3127 list_del(&page->lru);
3128 lru_cache_add_file(page);
3129 unlock_page(page);
3130 page_cache_release(page);
3131 }
3132 rc = -ENOMEM;
3133 break;
3134 }
3135
6993f74a 3136 rdata->cfile = cifsFileInfo_get(open_file);
690c5e31
JL
3137 rdata->mapping = mapping;
3138 rdata->offset = offset;
3139 rdata->bytes = bytes;
3140 rdata->pid = pid;
8321fec4
JL
3141 rdata->pagesz = PAGE_CACHE_SIZE;
3142 rdata->read_into_pages = cifs_readpages_read_into_pages;
c5fab6f4
JL
3143
3144 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3145 list_del(&page->lru);
3146 rdata->pages[rdata->nr_pages++] = page;
3147 }
690c5e31 3148
2a1bb138 3149 rc = cifs_retry_async_readv(rdata);
690c5e31 3150 if (rc != 0) {
c5fab6f4
JL
3151 for (i = 0; i < rdata->nr_pages; i++) {
3152 page = rdata->pages[i];
690c5e31
JL
3153 lru_cache_add_file(page);
3154 unlock_page(page);
3155 page_cache_release(page);
1da177e4 3156 }
6993f74a 3157 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3158 break;
3159 }
6993f74a
JL
3160
3161 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3162 }
3163
1da177e4
LT
3164 return rc;
3165}
3166
3167static int cifs_readpage_worker(struct file *file, struct page *page,
3168 loff_t *poffset)
3169{
3170 char *read_data;
3171 int rc;
3172
56698236
SJ
3173 /* Is the page cached? */
3174 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3175 if (rc == 0)
3176 goto read_complete;
3177
1da177e4
LT
3178 page_cache_get(page);
3179 read_data = kmap(page);
3180 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 3181
1da177e4 3182 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 3183
1da177e4
LT
3184 if (rc < 0)
3185 goto io_error;
3186 else
b6b38f70 3187 cFYI(1, "Bytes read %d", rc);
fb8c4b14 3188
e6a00296
JJS
3189 file->f_path.dentry->d_inode->i_atime =
3190 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 3191
1da177e4
LT
3192 if (PAGE_CACHE_SIZE > rc)
3193 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3194
3195 flush_dcache_page(page);
3196 SetPageUptodate(page);
9dc06558
SJ
3197
3198 /* send this page to the cache */
3199 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3200
1da177e4 3201 rc = 0;
fb8c4b14 3202
1da177e4 3203io_error:
fb8c4b14 3204 kunmap(page);
1da177e4 3205 page_cache_release(page);
56698236
SJ
3206
3207read_complete:
1da177e4
LT
3208 return rc;
3209}
3210
3211static int cifs_readpage(struct file *file, struct page *page)
3212{
3213 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3214 int rc = -EACCES;
6d5786a3 3215 unsigned int xid;
1da177e4 3216
6d5786a3 3217 xid = get_xid();
1da177e4
LT
3218
3219 if (file->private_data == NULL) {
0f3bc09e 3220 rc = -EBADF;
6d5786a3 3221 free_xid(xid);
0f3bc09e 3222 return rc;
1da177e4
LT
3223 }
3224
ac3aa2f8 3225 cFYI(1, "readpage %p at offset %d 0x%x",
b6b38f70 3226 page, (int)offset, (int)offset);
1da177e4
LT
3227
3228 rc = cifs_readpage_worker(file, page, &offset);
3229
3230 unlock_page(page);
3231
6d5786a3 3232 free_xid(xid);
1da177e4
LT
3233 return rc;
3234}
3235
a403a0a3
SF
3236static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3237{
3238 struct cifsFileInfo *open_file;
3239
4477288a 3240 spin_lock(&cifs_file_list_lock);
a403a0a3 3241 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 3242 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 3243 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3244 return 1;
3245 }
3246 }
4477288a 3247 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3248 return 0;
3249}
3250
1da177e4
LT
3251/* We do not want to update the file size from server for inodes
3252 open for write - to avoid races with writepage extending
3253 the file - in the future we could consider allowing
fb8c4b14 3254 refreshing the inode only on increases in the file size
1da177e4
LT
3255 but this is tricky to do without racing with writebehind
3256 page caching in the current Linux kernel design */
4b18f2a9 3257bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 3258{
a403a0a3 3259 if (!cifsInode)
4b18f2a9 3260 return true;
50c2f753 3261
a403a0a3
SF
3262 if (is_inode_writable(cifsInode)) {
3263 /* This inode is open for write at least once */
c32a0b68
SF
3264 struct cifs_sb_info *cifs_sb;
3265
c32a0b68 3266 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 3267 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3268 /* since no page cache to corrupt on directio
c32a0b68 3269 we can change size safely */
4b18f2a9 3270 return true;
c32a0b68
SF
3271 }
3272
fb8c4b14 3273 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 3274 return true;
7ba52631 3275
4b18f2a9 3276 return false;
23e7dd7d 3277 } else
4b18f2a9 3278 return true;
1da177e4
LT
3279}
3280
d9414774
NP
3281static int cifs_write_begin(struct file *file, struct address_space *mapping,
3282 loff_t pos, unsigned len, unsigned flags,
3283 struct page **pagep, void **fsdata)
1da177e4 3284{
d9414774
NP
3285 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3286 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
3287 loff_t page_start = pos & PAGE_MASK;
3288 loff_t i_size;
3289 struct page *page;
3290 int rc = 0;
d9414774 3291
b6b38f70 3292 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 3293
54566b2c 3294 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
3295 if (!page) {
3296 rc = -ENOMEM;
3297 goto out;
3298 }
8a236264 3299
a98ee8c1
JL
3300 if (PageUptodate(page))
3301 goto out;
8a236264 3302
a98ee8c1
JL
3303 /*
3304 * If we write a full page it will be up to date, no need to read from
3305 * the server. If the write is short, we'll end up doing a sync write
3306 * instead.
3307 */
3308 if (len == PAGE_CACHE_SIZE)
3309 goto out;
8a236264 3310
a98ee8c1
JL
3311 /*
3312 * optimize away the read when we have an oplock, and we're not
3313 * expecting to use any of the data we'd be reading in. That
3314 * is, when the page lies beyond the EOF, or straddles the EOF
3315 * and the write will cover all of the existing data.
3316 */
3317 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3318 i_size = i_size_read(mapping->host);
3319 if (page_start >= i_size ||
3320 (offset == 0 && (pos + len) >= i_size)) {
3321 zero_user_segments(page, 0, offset,
3322 offset + len,
3323 PAGE_CACHE_SIZE);
3324 /*
3325 * PageChecked means that the parts of the page
3326 * to which we're not writing are considered up
3327 * to date. Once the data is copied to the
3328 * page, it can be set uptodate.
3329 */
3330 SetPageChecked(page);
3331 goto out;
3332 }
3333 }
d9414774 3334
a98ee8c1
JL
3335 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
3336 /*
3337 * might as well read a page, it is fast enough. If we get
3338 * an error, we don't need to return it. cifs_write_end will
3339 * do a sync write instead since PG_uptodate isn't set.
3340 */
3341 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
3342 } else {
3343 /* we could try using another file handle if there is one -
3344 but how would we lock it to prevent close of that handle
3345 racing with this read? In any case
d9414774 3346 this will be written out by write_end so is fine */
1da177e4 3347 }
a98ee8c1
JL
3348out:
3349 *pagep = page;
3350 return rc;
1da177e4
LT
3351}
3352
85f2d6b4
SJ
3353static int cifs_release_page(struct page *page, gfp_t gfp)
3354{
3355 if (PagePrivate(page))
3356 return 0;
3357
3358 return cifs_fscache_release_page(page, gfp);
3359}
3360
3361static void cifs_invalidate_page(struct page *page, unsigned long offset)
3362{
3363 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3364
3365 if (offset == 0)
3366 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3367}
3368
9ad1506b
PS
3369static int cifs_launder_page(struct page *page)
3370{
3371 int rc = 0;
3372 loff_t range_start = page_offset(page);
3373 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3374 struct writeback_control wbc = {
3375 .sync_mode = WB_SYNC_ALL,
3376 .nr_to_write = 0,
3377 .range_start = range_start,
3378 .range_end = range_end,
3379 };
3380
3381 cFYI(1, "Launder page: %p", page);
3382
3383 if (clear_page_dirty_for_io(page))
3384 rc = cifs_writepage_locked(page, &wbc);
3385
3386 cifs_fscache_invalidate_page(page, page->mapping->host);
3387 return rc;
3388}
3389
9b646972 3390void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3391{
3392 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3393 oplock_break);
a5e18bc3 3394 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 3395 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 3396 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
eb4b756b 3397 int rc = 0;
3bc303c2
JL
3398
3399 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 3400 if (cinode->clientCanCacheRead)
8737c930 3401 break_lease(inode, O_RDONLY);
d54ff732 3402 else
8737c930 3403 break_lease(inode, O_WRONLY);
3bc303c2
JL
3404 rc = filemap_fdatawrite(inode->i_mapping);
3405 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
3406 rc = filemap_fdatawait(inode->i_mapping);
3407 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
3408 invalidate_remote_inode(inode);
3409 }
b6b38f70 3410 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
3411 }
3412
85160e03
PS
3413 rc = cifs_push_locks(cfile);
3414 if (rc)
3415 cERROR(1, "Push locks rc = %d", rc);
3416
3bc303c2
JL
3417 /*
3418 * releasing stale oplock after recent reconnect of smb session using
3419 * a now incorrect file handle is not a data integrity issue but do
3420 * not bother sending an oplock release if session to server still is
3421 * disconnected since oplock already released by the server
3422 */
cdff08e7 3423 if (!cfile->oplock_break_cancelled) {
95a3f2f3
PS
3424 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3425 cinode);
b6b38f70 3426 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 3427 }
3bc303c2
JL
3428}
3429
f5e54d6e 3430const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3431 .readpage = cifs_readpage,
3432 .readpages = cifs_readpages,
3433 .writepage = cifs_writepage,
37c0eb46 3434 .writepages = cifs_writepages,
d9414774
NP
3435 .write_begin = cifs_write_begin,
3436 .write_end = cifs_write_end,
1da177e4 3437 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3438 .releasepage = cifs_release_page,
3439 .invalidatepage = cifs_invalidate_page,
9ad1506b 3440 .launder_page = cifs_launder_page,
1da177e4 3441};
273d81d6
DK
3442
3443/*
3444 * cifs_readpages requires the server to support a buffer large enough to
3445 * contain the header plus one complete page of data. Otherwise, we need
3446 * to leave cifs_readpages out of the address space operations.
3447 */
f5e54d6e 3448const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3449 .readpage = cifs_readpage,
3450 .writepage = cifs_writepage,
3451 .writepages = cifs_writepages,
d9414774
NP
3452 .write_begin = cifs_write_begin,
3453 .write_end = cifs_write_end,
273d81d6 3454 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3455 .releasepage = cifs_release_page,
3456 .invalidatepage = cifs_invalidate_page,
9ad1506b 3457 .launder_page = cifs_launder_page,
273d81d6 3458};