]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/cifs/file.c
CIFS: Remove spinlock dependence in brlock processing
[mirror_ubuntu-artful-kernel.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
174{
175 int rc;
fb1214e4 176 int desired_access;
eeb910a6 177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6
PS
179 FILE_ALL_INFO *buf;
180
fb1214e4
PS
181 if (!tcon->ses->server->ops->open)
182 return -ENOSYS;
183
184 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
185
186/*********************************************************************
187 * open flag mapping table:
188 *
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
196 *
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
202 *?
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
209
210 disposition = cifs_get_disposition(f_flags);
211
212 /* BB pass O_SYNC flag through on file attributes .. BB */
213
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
3d3ea8e6
SP
218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
220
fb1214e4
PS
221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
eeb910a6
PS
224
225 if (rc)
226 goto out;
227
228 if (tcon->unix_ext)
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
230 xid);
231 else
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
fb1214e4 233 xid, &fid->netfid);
eeb910a6
PS
234
235out:
236 kfree(buf);
237 return rc;
238}
239
15ecb436 240struct cifsFileInfo *
fb1214e4 241cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
242 struct tcon_link *tlink, __u32 oplock)
243{
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
4b4de76e
PS
246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
f45d3416 248 struct cifs_fid_locks *fdlocks;
4b4de76e
PS
249
250 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
251 if (cfile == NULL)
252 return cfile;
253
f45d3416
PS
254 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
255 if (!fdlocks) {
256 kfree(cfile);
257 return NULL;
258 }
259
260 INIT_LIST_HEAD(&fdlocks->locks);
261 fdlocks->cfile = cfile;
262 cfile->llist = fdlocks;
263 mutex_lock(&cinode->lock_mutex);
264 list_add(&fdlocks->llist, &cinode->llist);
265 mutex_unlock(&cinode->lock_mutex);
266
4b4de76e 267 cfile->count = 1;
4b4de76e
PS
268 cfile->pid = current->tgid;
269 cfile->uid = current_fsuid();
270 cfile->dentry = dget(dentry);
271 cfile->f_flags = file->f_flags;
272 cfile->invalidHandle = false;
273 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 274 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
f45d3416 275 mutex_init(&cfile->fh_mutex);
fb1214e4 276 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
15ecb436 277
4477288a 278 spin_lock(&cifs_file_list_lock);
4b4de76e 279 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
15ecb436
JL
280 /* if readable file instance put first in list*/
281 if (file->f_mode & FMODE_READ)
4b4de76e 282 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 283 else
4b4de76e 284 list_add_tail(&cfile->flist, &cinode->openFileList);
4477288a 285 spin_unlock(&cifs_file_list_lock);
15ecb436 286
4b4de76e
PS
287 file->private_data = cfile;
288 return cfile;
15ecb436
JL
289}
290
85160e03
PS
291static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
292
764a1b1a
JL
293struct cifsFileInfo *
294cifsFileInfo_get(struct cifsFileInfo *cifs_file)
295{
296 spin_lock(&cifs_file_list_lock);
297 cifsFileInfo_get_locked(cifs_file);
298 spin_unlock(&cifs_file_list_lock);
299 return cifs_file;
300}
301
cdff08e7
SF
302/*
303 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
304 * the filehandle out on the server. Must be called without holding
305 * cifs_file_list_lock.
cdff08e7 306 */
b33879aa
JL
307void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
308{
e66673e3 309 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 310 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 311 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 312 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
313 struct cifsLockInfo *li, *tmp;
314
315 spin_lock(&cifs_file_list_lock);
5f6dbc9e 316 if (--cifs_file->count > 0) {
cdff08e7
SF
317 spin_unlock(&cifs_file_list_lock);
318 return;
319 }
320
321 /* remove it from the lists */
322 list_del(&cifs_file->flist);
323 list_del(&cifs_file->tlist);
324
325 if (list_empty(&cifsi->openFileList)) {
326 cFYI(1, "closing last open instance for inode %p",
327 cifs_file->dentry->d_inode);
25364138
PS
328 /*
329 * In strict cache mode we need invalidate mapping on the last
330 * close because it may cause a error when we open this file
331 * again and get at least level II oplock.
332 */
4f8ba8a0
PS
333 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
334 CIFS_I(inode)->invalid_mapping = true;
c6723628 335 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
336 }
337 spin_unlock(&cifs_file_list_lock);
338
ad635942
JL
339 cancel_work_sync(&cifs_file->oplock_break);
340
cdff08e7 341 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 342 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 343 unsigned int xid;
0ff78a22
PS
344 int rc = -ENOSYS;
345
6d5786a3 346 xid = get_xid();
0ff78a22
PS
347 if (server->ops->close)
348 rc = server->ops->close(xid, tcon, &cifs_file->fid);
6d5786a3 349 free_xid(xid);
cdff08e7
SF
350 }
351
f45d3416
PS
352 /*
353 * Delete any outstanding lock records. We'll lose them when the file
cdff08e7
SF
354 * is closed anyway.
355 */
d59dad2b 356 mutex_lock(&cifsi->lock_mutex);
f45d3416 357 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
cdff08e7 358 list_del(&li->llist);
85160e03 359 cifs_del_lock_waiters(li);
cdff08e7 360 kfree(li);
b33879aa 361 }
f45d3416
PS
362 list_del(&cifs_file->llist->llist);
363 kfree(cifs_file->llist);
d59dad2b 364 mutex_unlock(&cifsi->lock_mutex);
cdff08e7
SF
365
366 cifs_put_tlink(cifs_file->tlink);
367 dput(cifs_file->dentry);
368 kfree(cifs_file);
b33879aa
JL
369}
370
1da177e4
LT
371int cifs_open(struct inode *inode, struct file *file)
372{
373 int rc = -EACCES;
6d5786a3 374 unsigned int xid;
590a3fe0 375 __u32 oplock;
1da177e4 376 struct cifs_sb_info *cifs_sb;
96daf2b0 377 struct cifs_tcon *tcon;
7ffec372 378 struct tcon_link *tlink;
fb1214e4 379 struct cifsFileInfo *cfile = NULL;
1da177e4 380 char *full_path = NULL;
7e12eddb 381 bool posix_open_ok = false;
fb1214e4 382 struct cifs_fid fid;
1da177e4 383
6d5786a3 384 xid = get_xid();
1da177e4
LT
385
386 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
387 tlink = cifs_sb_tlink(cifs_sb);
388 if (IS_ERR(tlink)) {
6d5786a3 389 free_xid(xid);
7ffec372
JL
390 return PTR_ERR(tlink);
391 }
392 tcon = tlink_tcon(tlink);
1da177e4 393
e6a00296 394 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 395 if (full_path == NULL) {
0f3bc09e 396 rc = -ENOMEM;
232341ba 397 goto out;
1da177e4
LT
398 }
399
b6b38f70
JP
400 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
401 inode, file->f_flags, full_path);
276a74a4 402
10b9b98e 403 if (tcon->ses->server->oplocks)
276a74a4
SF
404 oplock = REQ_OPLOCK;
405 else
406 oplock = 0;
407
64cc2c63 408 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
409 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
410 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 411 /* can not refresh inode info since size could be stale */
2422f676 412 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 413 cifs_sb->mnt_file_mode /* ignored */,
fb1214e4 414 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 415 if (rc == 0) {
b6b38f70 416 cFYI(1, "posix open succeeded");
7e12eddb 417 posix_open_ok = true;
64cc2c63
SF
418 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
419 if (tcon->ses->serverNOS)
b6b38f70 420 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
421 " unexpected error on SMB posix open"
422 ", disabling posix open support."
423 " Check if server update available.",
424 tcon->ses->serverName,
b6b38f70 425 tcon->ses->serverNOS);
64cc2c63 426 tcon->broken_posix_open = true;
276a74a4
SF
427 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
428 (rc != -EOPNOTSUPP)) /* path not found or net err */
429 goto out;
fb1214e4
PS
430 /*
431 * Else fallthrough to retry open the old way on network i/o
432 * or DFS errors.
433 */
276a74a4
SF
434 }
435
7e12eddb
PS
436 if (!posix_open_ok) {
437 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 438 file->f_flags, &oplock, &fid, xid);
7e12eddb
PS
439 if (rc)
440 goto out;
441 }
47c78b7f 442
fb1214e4
PS
443 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
444 if (cfile == NULL) {
0ff78a22
PS
445 if (tcon->ses->server->ops->close)
446 tcon->ses->server->ops->close(xid, tcon, &fid);
1da177e4
LT
447 rc = -ENOMEM;
448 goto out;
449 }
1da177e4 450
9451a9a5
SJ
451 cifs_fscache_set_inode_cookie(inode, file);
452
7e12eddb 453 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
454 /*
455 * Time to set mode which we can not set earlier due to
456 * problems creating new read-only files.
457 */
7e12eddb
PS
458 struct cifs_unix_set_info_args args = {
459 .mode = inode->i_mode,
460 .uid = NO_CHANGE_64,
461 .gid = NO_CHANGE_64,
462 .ctime = NO_CHANGE_64,
463 .atime = NO_CHANGE_64,
464 .mtime = NO_CHANGE_64,
465 .device = 0,
466 };
fb1214e4
PS
467 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
468 cfile->pid);
1da177e4
LT
469 }
470
471out:
1da177e4 472 kfree(full_path);
6d5786a3 473 free_xid(xid);
7ffec372 474 cifs_put_tlink(tlink);
1da177e4
LT
475 return rc;
476}
477
2ae78ba8
PS
478/*
479 * Try to reacquire byte range locks that were released when session
480 * to server was lost
481 */
1da177e4
LT
482static int cifs_relock_file(struct cifsFileInfo *cifsFile)
483{
484 int rc = 0;
485
2ae78ba8 486 /* BB list all locks open on this file and relock */
1da177e4
LT
487
488 return rc;
489}
490
2ae78ba8
PS
491static int
492cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
493{
494 int rc = -EACCES;
6d5786a3 495 unsigned int xid;
590a3fe0 496 __u32 oplock;
1da177e4 497 struct cifs_sb_info *cifs_sb;
96daf2b0 498 struct cifs_tcon *tcon;
2ae78ba8
PS
499 struct TCP_Server_Info *server;
500 struct cifsInodeInfo *cinode;
fb8c4b14 501 struct inode *inode;
1da177e4 502 char *full_path = NULL;
2ae78ba8 503 int desired_access;
1da177e4 504 int disposition = FILE_OPEN;
3d3ea8e6 505 int create_options = CREATE_NOT_DIR;
2ae78ba8 506 struct cifs_fid fid;
1da177e4 507
6d5786a3 508 xid = get_xid();
2ae78ba8
PS
509 mutex_lock(&cfile->fh_mutex);
510 if (!cfile->invalidHandle) {
511 mutex_unlock(&cfile->fh_mutex);
0f3bc09e 512 rc = 0;
6d5786a3 513 free_xid(xid);
0f3bc09e 514 return rc;
1da177e4
LT
515 }
516
2ae78ba8 517 inode = cfile->dentry->d_inode;
1da177e4 518 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
519 tcon = tlink_tcon(cfile->tlink);
520 server = tcon->ses->server;
521
522 /*
523 * Can not grab rename sem here because various ops, including those
524 * that already have the rename sem can end up causing writepage to get
525 * called and if the server was down that means we end up here, and we
526 * can never tell if the caller already has the rename_sem.
527 */
528 full_path = build_path_from_dentry(cfile->dentry);
1da177e4 529 if (full_path == NULL) {
3a9f462f 530 rc = -ENOMEM;
2ae78ba8 531 mutex_unlock(&cfile->fh_mutex);
6d5786a3 532 free_xid(xid);
3a9f462f 533 return rc;
1da177e4
LT
534 }
535
2ae78ba8
PS
536 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
537 full_path);
1da177e4 538
10b9b98e 539 if (tcon->ses->server->oplocks)
1da177e4
LT
540 oplock = REQ_OPLOCK;
541 else
4b18f2a9 542 oplock = 0;
1da177e4 543
29e20f9c 544 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 545 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 546 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
547 /*
548 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
549 * original open. Must mask them off for a reopen.
550 */
2ae78ba8 551 unsigned int oflags = cfile->f_flags &
15886177 552 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 553
2422f676 554 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
2ae78ba8
PS
555 cifs_sb->mnt_file_mode /* ignored */,
556 oflags, &oplock, &fid.netfid, xid);
7fc8f4e9 557 if (rc == 0) {
b6b38f70 558 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
559 goto reopen_success;
560 }
2ae78ba8
PS
561 /*
562 * fallthrough to retry open the old way on errors, especially
563 * in the reconnect path it is important to retry hard
564 */
7fc8f4e9
SF
565 }
566
2ae78ba8 567 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 568
3d3ea8e6
SP
569 if (backup_cred(cifs_sb))
570 create_options |= CREATE_OPEN_BACKUP_INTENT;
571
2ae78ba8
PS
572 /*
573 * Can not refresh inode by passing in file_info buf to be returned by
574 * CIFSSMBOpen and then calling get_inode_info with returned buf since
575 * file might have write behind data that needs to be flushed and server
576 * version of file size can be stale. If we knew for sure that inode was
577 * not dirty locally we could do this.
578 */
579 rc = server->ops->open(xid, tcon, full_path, disposition,
580 desired_access, create_options, &fid, &oplock,
581 NULL, cifs_sb);
1da177e4 582 if (rc) {
2ae78ba8
PS
583 mutex_unlock(&cfile->fh_mutex);
584 cFYI(1, "cifs_reopen returned 0x%x", rc);
b6b38f70 585 cFYI(1, "oplock: %d", oplock);
15886177
JL
586 goto reopen_error_exit;
587 }
588
7fc8f4e9 589reopen_success:
2ae78ba8
PS
590 cfile->invalidHandle = false;
591 mutex_unlock(&cfile->fh_mutex);
592 cinode = CIFS_I(inode);
15886177
JL
593
594 if (can_flush) {
595 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 596 mapping_set_error(inode->i_mapping, rc);
15886177 597
15886177 598 if (tcon->unix_ext)
2ae78ba8
PS
599 rc = cifs_get_inode_info_unix(&inode, full_path,
600 inode->i_sb, xid);
15886177 601 else
2ae78ba8
PS
602 rc = cifs_get_inode_info(&inode, full_path, NULL,
603 inode->i_sb, xid, NULL);
604 }
605 /*
606 * Else we are writing out data to server already and could deadlock if
607 * we tried to flush data, and since we do not know if we have data that
608 * would invalidate the current end of file on the server we can not go
609 * to the server to get the new inode info.
610 */
611
612 server->ops->set_fid(cfile, &fid, oplock);
613 cifs_relock_file(cfile);
15886177
JL
614
615reopen_error_exit:
1da177e4 616 kfree(full_path);
6d5786a3 617 free_xid(xid);
1da177e4
LT
618 return rc;
619}
620
621int cifs_close(struct inode *inode, struct file *file)
622{
77970693
JL
623 if (file->private_data != NULL) {
624 cifsFileInfo_put(file->private_data);
625 file->private_data = NULL;
626 }
7ee1af76 627
cdff08e7
SF
628 /* return code from the ->release op is always ignored */
629 return 0;
1da177e4
LT
630}
631
632int cifs_closedir(struct inode *inode, struct file *file)
633{
634 int rc = 0;
6d5786a3 635 unsigned int xid;
4b4de76e 636 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
637 struct cifs_tcon *tcon;
638 struct TCP_Server_Info *server;
639 char *buf;
1da177e4 640
b6b38f70 641 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4 642
92fc65a7
PS
643 if (cfile == NULL)
644 return rc;
645
6d5786a3 646 xid = get_xid();
92fc65a7
PS
647 tcon = tlink_tcon(cfile->tlink);
648 server = tcon->ses->server;
1da177e4 649
92fc65a7
PS
650 cFYI(1, "Freeing private data in close dir");
651 spin_lock(&cifs_file_list_lock);
652 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
653 cfile->invalidHandle = true;
654 spin_unlock(&cifs_file_list_lock);
655 if (server->ops->close_dir)
656 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
657 else
658 rc = -ENOSYS;
659 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
660 /* not much we can do if it fails anyway, ignore rc */
661 rc = 0;
662 } else
663 spin_unlock(&cifs_file_list_lock);
664
665 buf = cfile->srch_inf.ntwrk_buf_start;
666 if (buf) {
667 cFYI(1, "closedir free smb buf in srch struct");
668 cfile->srch_inf.ntwrk_buf_start = NULL;
669 if (cfile->srch_inf.smallBuf)
670 cifs_small_buf_release(buf);
671 else
672 cifs_buf_release(buf);
1da177e4 673 }
92fc65a7
PS
674
675 cifs_put_tlink(cfile->tlink);
676 kfree(file->private_data);
677 file->private_data = NULL;
1da177e4 678 /* BB can we lock the filestruct while this is going on? */
6d5786a3 679 free_xid(xid);
1da177e4
LT
680 return rc;
681}
682
85160e03 683static struct cifsLockInfo *
fbd35aca 684cifs_lock_init(__u64 offset, __u64 length, __u8 type)
7ee1af76 685{
a88b4707 686 struct cifsLockInfo *lock =
fb8c4b14 687 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
688 if (!lock)
689 return lock;
690 lock->offset = offset;
691 lock->length = length;
692 lock->type = type;
a88b4707
PS
693 lock->pid = current->tgid;
694 INIT_LIST_HEAD(&lock->blist);
695 init_waitqueue_head(&lock->block_q);
696 return lock;
85160e03
PS
697}
698
699static void
700cifs_del_lock_waiters(struct cifsLockInfo *lock)
701{
702 struct cifsLockInfo *li, *tmp;
703 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
704 list_del_init(&li->blist);
705 wake_up(&li->block_q);
706 }
707}
708
709static bool
f45d3416
PS
710cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
711 __u64 length, __u8 type, struct cifsFileInfo *cfile,
fbd35aca 712 struct cifsLockInfo **conf_lock)
85160e03 713{
fbd35aca 714 struct cifsLockInfo *li;
f45d3416 715 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 716 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 717
f45d3416 718 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
719 if (offset + length <= li->offset ||
720 offset >= li->offset + li->length)
721 continue;
f45d3416
PS
722 if ((type & server->vals->shared_lock_type) &&
723 ((server->ops->compare_fids(cfile, cur_cfile) &&
724 current->tgid == li->pid) || type == li->type))
85160e03 725 continue;
f45d3416
PS
726 *conf_lock = li;
727 return true;
85160e03
PS
728 }
729 return false;
730}
731
161ebf9f 732static bool
55157dfb
PS
733cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
734 __u8 type, struct cifsLockInfo **conf_lock)
161ebf9f 735{
fbd35aca 736 bool rc = false;
f45d3416 737 struct cifs_fid_locks *cur;
55157dfb 738 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
fbd35aca 739
f45d3416
PS
740 list_for_each_entry(cur, &cinode->llist, llist) {
741 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
55157dfb 742 cfile, conf_lock);
fbd35aca
PS
743 if (rc)
744 break;
745 }
fbd35aca
PS
746
747 return rc;
161ebf9f
PS
748}
749
9a5101c8
PS
750/*
751 * Check if there is another lock that prevents us to set the lock (mandatory
752 * style). If such a lock exists, update the flock structure with its
753 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
754 * or leave it the same if we can't. Returns 0 if we don't need to request to
755 * the server or 1 otherwise.
756 */
85160e03 757static int
fbd35aca
PS
758cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
759 __u8 type, struct file_lock *flock)
85160e03
PS
760{
761 int rc = 0;
762 struct cifsLockInfo *conf_lock;
fbd35aca 763 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
106dc538 764 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
765 bool exist;
766
767 mutex_lock(&cinode->lock_mutex);
768
55157dfb
PS
769 exist = cifs_find_lock_conflict(cfile, offset, length, type,
770 &conf_lock);
85160e03
PS
771 if (exist) {
772 flock->fl_start = conf_lock->offset;
773 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
774 flock->fl_pid = conf_lock->pid;
106dc538 775 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
776 flock->fl_type = F_RDLCK;
777 else
778 flock->fl_type = F_WRLCK;
779 } else if (!cinode->can_cache_brlcks)
780 rc = 1;
781 else
782 flock->fl_type = F_UNLCK;
783
784 mutex_unlock(&cinode->lock_mutex);
785 return rc;
786}
787
161ebf9f 788static void
fbd35aca 789cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 790{
fbd35aca 791 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
d59dad2b 792 mutex_lock(&cinode->lock_mutex);
f45d3416 793 list_add_tail(&lock->llist, &cfile->llist->locks);
d59dad2b 794 mutex_unlock(&cinode->lock_mutex);
7ee1af76
JA
795}
796
9a5101c8
PS
797/*
798 * Set the byte-range lock (mandatory style). Returns:
799 * 1) 0, if we set the lock and don't need to request to the server;
800 * 2) 1, if no locks prevent us but we need to request to the server;
801 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
802 */
85160e03 803static int
fbd35aca 804cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 805 bool wait)
85160e03 806{
161ebf9f 807 struct cifsLockInfo *conf_lock;
fbd35aca 808 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
85160e03
PS
809 bool exist;
810 int rc = 0;
811
85160e03
PS
812try_again:
813 exist = false;
814 mutex_lock(&cinode->lock_mutex);
815
55157dfb
PS
816 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
817 lock->type, &conf_lock);
85160e03 818 if (!exist && cinode->can_cache_brlcks) {
f45d3416 819 list_add_tail(&lock->llist, &cfile->llist->locks);
85160e03
PS
820 mutex_unlock(&cinode->lock_mutex);
821 return rc;
822 }
823
824 if (!exist)
825 rc = 1;
826 else if (!wait)
827 rc = -EACCES;
828 else {
829 list_add_tail(&lock->blist, &conf_lock->blist);
830 mutex_unlock(&cinode->lock_mutex);
831 rc = wait_event_interruptible(lock->block_q,
832 (lock->blist.prev == &lock->blist) &&
833 (lock->blist.next == &lock->blist));
834 if (!rc)
835 goto try_again;
a88b4707
PS
836 mutex_lock(&cinode->lock_mutex);
837 list_del_init(&lock->blist);
85160e03
PS
838 }
839
85160e03
PS
840 mutex_unlock(&cinode->lock_mutex);
841 return rc;
842}
843
9a5101c8
PS
844/*
845 * Check if there is another lock that prevents us to set the lock (posix
846 * style). If such a lock exists, update the flock structure with its
847 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
848 * or leave it the same if we can't. Returns 0 if we don't need to request to
849 * the server or 1 otherwise.
850 */
85160e03 851static int
4f6bcec9
PS
852cifs_posix_lock_test(struct file *file, struct file_lock *flock)
853{
854 int rc = 0;
855 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
856 unsigned char saved_type = flock->fl_type;
857
50792760
PS
858 if ((flock->fl_flags & FL_POSIX) == 0)
859 return 1;
860
4f6bcec9
PS
861 mutex_lock(&cinode->lock_mutex);
862 posix_test_lock(file, flock);
863
864 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
865 flock->fl_type = saved_type;
866 rc = 1;
867 }
868
869 mutex_unlock(&cinode->lock_mutex);
870 return rc;
871}
872
9a5101c8
PS
873/*
874 * Set the byte-range lock (posix style). Returns:
875 * 1) 0, if we set the lock and don't need to request to the server;
876 * 2) 1, if we need to request to the server;
877 * 3) <0, if the error occurs while setting the lock.
878 */
4f6bcec9
PS
879static int
880cifs_posix_lock_set(struct file *file, struct file_lock *flock)
881{
882 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
50792760
PS
883 int rc = 1;
884
885 if ((flock->fl_flags & FL_POSIX) == 0)
886 return rc;
4f6bcec9 887
66189be7 888try_again:
4f6bcec9
PS
889 mutex_lock(&cinode->lock_mutex);
890 if (!cinode->can_cache_brlcks) {
891 mutex_unlock(&cinode->lock_mutex);
50792760 892 return rc;
4f6bcec9 893 }
66189be7
PS
894
895 rc = posix_lock_file(file, flock, NULL);
9ebb389d 896 mutex_unlock(&cinode->lock_mutex);
66189be7
PS
897 if (rc == FILE_LOCK_DEFERRED) {
898 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
899 if (!rc)
900 goto try_again;
901 locks_delete_block(flock);
902 }
9ebb389d 903 return rc;
4f6bcec9
PS
904}
905
906static int
907cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 908{
6d5786a3
PS
909 unsigned int xid;
910 int rc = 0, stored_rc;
85160e03
PS
911 struct cifsLockInfo *li, *tmp;
912 struct cifs_tcon *tcon;
913 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
0013fb4c 914 unsigned int num, max_num, max_buf;
32b9aaf1
PS
915 LOCKING_ANDX_RANGE *buf, *cur;
916 int types[] = {LOCKING_ANDX_LARGE_FILES,
917 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
918 int i;
85160e03 919
6d5786a3 920 xid = get_xid();
85160e03
PS
921 tcon = tlink_tcon(cfile->tlink);
922
923 mutex_lock(&cinode->lock_mutex);
924 if (!cinode->can_cache_brlcks) {
925 mutex_unlock(&cinode->lock_mutex);
6d5786a3 926 free_xid(xid);
85160e03
PS
927 return rc;
928 }
929
0013fb4c
PS
930 /*
931 * Accessing maxBuf is racy with cifs_reconnect - need to store value
932 * and check it for zero before using.
933 */
934 max_buf = tcon->ses->server->maxBuf;
935 if (!max_buf) {
936 mutex_unlock(&cinode->lock_mutex);
6d5786a3 937 free_xid(xid);
0013fb4c
PS
938 return -EINVAL;
939 }
940
941 max_num = (max_buf - sizeof(struct smb_hdr)) /
942 sizeof(LOCKING_ANDX_RANGE);
32b9aaf1
PS
943 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
944 if (!buf) {
945 mutex_unlock(&cinode->lock_mutex);
6d5786a3 946 free_xid(xid);
e2f2886a 947 return -ENOMEM;
32b9aaf1
PS
948 }
949
950 for (i = 0; i < 2; i++) {
951 cur = buf;
952 num = 0;
f45d3416 953 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
954 if (li->type != types[i])
955 continue;
956 cur->Pid = cpu_to_le16(li->pid);
957 cur->LengthLow = cpu_to_le32((u32)li->length);
958 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
959 cur->OffsetLow = cpu_to_le32((u32)li->offset);
960 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
961 if (++num == max_num) {
4b4de76e
PS
962 stored_rc = cifs_lockv(xid, tcon,
963 cfile->fid.netfid,
04a6aa8a
PS
964 (__u8)li->type, 0, num,
965 buf);
32b9aaf1
PS
966 if (stored_rc)
967 rc = stored_rc;
968 cur = buf;
969 num = 0;
970 } else
971 cur++;
972 }
973
974 if (num) {
4b4de76e 975 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 976 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
977 if (stored_rc)
978 rc = stored_rc;
979 }
85160e03
PS
980 }
981
982 cinode->can_cache_brlcks = false;
983 mutex_unlock(&cinode->lock_mutex);
984
32b9aaf1 985 kfree(buf);
6d5786a3 986 free_xid(xid);
85160e03
PS
987 return rc;
988}
989
4f6bcec9
PS
990/* copied from fs/locks.c with a name change */
991#define cifs_for_each_lock(inode, lockp) \
992 for (lockp = &inode->i_flock; *lockp != NULL; \
993 lockp = &(*lockp)->fl_next)
994
d5751469
PS
995struct lock_to_push {
996 struct list_head llist;
997 __u64 offset;
998 __u64 length;
999 __u32 pid;
1000 __u16 netfid;
1001 __u8 type;
1002};
1003
4f6bcec9
PS
1004static int
1005cifs_push_posix_locks(struct cifsFileInfo *cfile)
1006{
1007 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1008 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1009 struct file_lock *flock, **before;
d5751469 1010 unsigned int count = 0, i = 0;
4f6bcec9 1011 int rc = 0, xid, type;
d5751469
PS
1012 struct list_head locks_to_send, *el;
1013 struct lock_to_push *lck, *tmp;
4f6bcec9 1014 __u64 length;
4f6bcec9 1015
6d5786a3 1016 xid = get_xid();
4f6bcec9
PS
1017
1018 mutex_lock(&cinode->lock_mutex);
1019 if (!cinode->can_cache_brlcks) {
1020 mutex_unlock(&cinode->lock_mutex);
6d5786a3 1021 free_xid(xid);
4f6bcec9
PS
1022 return rc;
1023 }
1024
d5751469
PS
1025 lock_flocks();
1026 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1027 if ((*before)->fl_flags & FL_POSIX)
1028 count++;
1029 }
1030 unlock_flocks();
1031
4f6bcec9
PS
1032 INIT_LIST_HEAD(&locks_to_send);
1033
d5751469 1034 /*
ce85852b
PS
1035 * Allocating count locks is enough because no FL_POSIX locks can be
1036 * added to the list while we are holding cinode->lock_mutex that
1037 * protects locking operations of this inode.
d5751469
PS
1038 */
1039 for (; i < count; i++) {
1040 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1041 if (!lck) {
1042 rc = -ENOMEM;
1043 goto err_out;
1044 }
1045 list_add_tail(&lck->llist, &locks_to_send);
1046 }
1047
d5751469 1048 el = locks_to_send.next;
4f6bcec9
PS
1049 lock_flocks();
1050 cifs_for_each_lock(cfile->dentry->d_inode, before) {
ce85852b
PS
1051 flock = *before;
1052 if ((flock->fl_flags & FL_POSIX) == 0)
1053 continue;
d5751469 1054 if (el == &locks_to_send) {
ce85852b
PS
1055 /*
1056 * The list ended. We don't have enough allocated
1057 * structures - something is really wrong.
1058 */
d5751469
PS
1059 cERROR(1, "Can't push all brlocks!");
1060 break;
1061 }
4f6bcec9
PS
1062 length = 1 + flock->fl_end - flock->fl_start;
1063 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1064 type = CIFS_RDLCK;
1065 else
1066 type = CIFS_WRLCK;
d5751469 1067 lck = list_entry(el, struct lock_to_push, llist);
4f6bcec9 1068 lck->pid = flock->fl_pid;
4b4de76e 1069 lck->netfid = cfile->fid.netfid;
d5751469
PS
1070 lck->length = length;
1071 lck->type = type;
1072 lck->offset = flock->fl_start;
d5751469 1073 el = el->next;
4f6bcec9 1074 }
4f6bcec9
PS
1075 unlock_flocks();
1076
1077 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1078 int stored_rc;
1079
4f6bcec9 1080 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1081 lck->offset, lck->length, NULL,
4f6bcec9
PS
1082 lck->type, 0);
1083 if (stored_rc)
1084 rc = stored_rc;
1085 list_del(&lck->llist);
1086 kfree(lck);
1087 }
1088
d5751469 1089out:
4f6bcec9
PS
1090 cinode->can_cache_brlcks = false;
1091 mutex_unlock(&cinode->lock_mutex);
1092
6d5786a3 1093 free_xid(xid);
4f6bcec9 1094 return rc;
d5751469
PS
1095err_out:
1096 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1097 list_del(&lck->llist);
1098 kfree(lck);
1099 }
1100 goto out;
4f6bcec9
PS
1101}
1102
1103static int
1104cifs_push_locks(struct cifsFileInfo *cfile)
1105{
1106 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1107 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1108
29e20f9c 1109 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1110 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1111 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1112 return cifs_push_posix_locks(cfile);
1113
1114 return cifs_push_mandatory_locks(cfile);
1115}
1116
03776f45 1117static void
04a6aa8a 1118cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1119 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1120{
03776f45 1121 if (flock->fl_flags & FL_POSIX)
b6b38f70 1122 cFYI(1, "Posix");
03776f45 1123 if (flock->fl_flags & FL_FLOCK)
b6b38f70 1124 cFYI(1, "Flock");
03776f45 1125 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 1126 cFYI(1, "Blocking lock");
03776f45 1127 *wait_flag = true;
1da177e4 1128 }
03776f45 1129 if (flock->fl_flags & FL_ACCESS)
b6b38f70 1130 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
1131 "not implemented yet");
1132 if (flock->fl_flags & FL_LEASE)
b6b38f70 1133 cFYI(1, "Lease on file - not implemented yet");
03776f45 1134 if (flock->fl_flags &
1da177e4 1135 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 1136 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 1137
106dc538 1138 *type = server->vals->large_lock_type;
03776f45 1139 if (flock->fl_type == F_WRLCK) {
b6b38f70 1140 cFYI(1, "F_WRLCK ");
106dc538 1141 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1142 *lock = 1;
1143 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 1144 cFYI(1, "F_UNLCK");
106dc538 1145 *type |= server->vals->unlock_lock_type;
03776f45
PS
1146 *unlock = 1;
1147 /* Check if unlock includes more than one lock range */
1148 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 1149 cFYI(1, "F_RDLCK");
106dc538 1150 *type |= server->vals->shared_lock_type;
03776f45
PS
1151 *lock = 1;
1152 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 1153 cFYI(1, "F_EXLCK");
106dc538 1154 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1155 *lock = 1;
1156 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 1157 cFYI(1, "F_SHLCK");
106dc538 1158 *type |= server->vals->shared_lock_type;
03776f45 1159 *lock = 1;
1da177e4 1160 } else
b6b38f70 1161 cFYI(1, "Unknown type of lock");
03776f45 1162}
1da177e4 1163
55157dfb 1164static int
6d5786a3 1165cifs_mandatory_lock(unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
55157dfb
PS
1166 __u64 length, __u32 type, int lock, int unlock, bool wait)
1167{
4b4de76e 1168 return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid,
55157dfb
PS
1169 current->tgid, length, offset, unlock, lock,
1170 (__u8)type, wait, 0);
1171}
1172
03776f45 1173static int
04a6aa8a 1174cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1175 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1176{
1177 int rc = 0;
1178 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1179 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1180 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1181 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1182 __u16 netfid = cfile->fid.netfid;
f05337c6 1183
03776f45
PS
1184 if (posix_lck) {
1185 int posix_lock_type;
4f6bcec9
PS
1186
1187 rc = cifs_posix_lock_test(file, flock);
1188 if (!rc)
1189 return rc;
1190
106dc538 1191 if (type & server->vals->shared_lock_type)
03776f45
PS
1192 posix_lock_type = CIFS_RDLCK;
1193 else
1194 posix_lock_type = CIFS_WRLCK;
4f6bcec9 1195 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
c5fd363d 1196 flock->fl_start, length, flock,
4f6bcec9 1197 posix_lock_type, wait_flag);
03776f45
PS
1198 return rc;
1199 }
1da177e4 1200
fbd35aca 1201 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1202 if (!rc)
1203 return rc;
1204
03776f45 1205 /* BB we could chain these into one lock request BB */
55157dfb
PS
1206 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
1207 1, 0, false);
03776f45 1208 if (rc == 0) {
55157dfb
PS
1209 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1210 type, 0, 1, false);
03776f45
PS
1211 flock->fl_type = F_UNLCK;
1212 if (rc != 0)
1213 cERROR(1, "Error unlocking previously locked "
106dc538 1214 "range %d during test of lock", rc);
a88b4707 1215 return 0;
1da177e4 1216 }
7ee1af76 1217
106dc538 1218 if (type & server->vals->shared_lock_type) {
03776f45 1219 flock->fl_type = F_WRLCK;
a88b4707 1220 return 0;
7ee1af76
JA
1221 }
1222
55157dfb
PS
1223 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1224 type | server->vals->shared_lock_type, 1, 0,
1225 false);
03776f45 1226 if (rc == 0) {
55157dfb
PS
1227 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1228 type | server->vals->shared_lock_type,
1229 0, 1, false);
03776f45
PS
1230 flock->fl_type = F_RDLCK;
1231 if (rc != 0)
1232 cERROR(1, "Error unlocking previously locked "
1233 "range %d during test of lock", rc);
1234 } else
1235 flock->fl_type = F_WRLCK;
1236
a88b4707 1237 return 0;
03776f45
PS
1238}
1239
9ee305b7
PS
1240static void
1241cifs_move_llist(struct list_head *source, struct list_head *dest)
1242{
1243 struct list_head *li, *tmp;
1244 list_for_each_safe(li, tmp, source)
1245 list_move(li, dest);
1246}
1247
1248static void
1249cifs_free_llist(struct list_head *llist)
1250{
1251 struct cifsLockInfo *li, *tmp;
1252 list_for_each_entry_safe(li, tmp, llist, llist) {
1253 cifs_del_lock_waiters(li);
1254 list_del(&li->llist);
1255 kfree(li);
1256 }
1257}
1258
1259static int
6d5786a3
PS
1260cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1261 unsigned int xid)
9ee305b7
PS
1262{
1263 int rc = 0, stored_rc;
1264 int types[] = {LOCKING_ANDX_LARGE_FILES,
1265 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1266 unsigned int i;
0013fb4c 1267 unsigned int max_num, num, max_buf;
9ee305b7
PS
1268 LOCKING_ANDX_RANGE *buf, *cur;
1269 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1270 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1271 struct cifsLockInfo *li, *tmp;
1272 __u64 length = 1 + flock->fl_end - flock->fl_start;
1273 struct list_head tmp_llist;
1274
1275 INIT_LIST_HEAD(&tmp_llist);
1276
0013fb4c
PS
1277 /*
1278 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1279 * and check it for zero before using.
1280 */
1281 max_buf = tcon->ses->server->maxBuf;
1282 if (!max_buf)
1283 return -EINVAL;
1284
1285 max_num = (max_buf - sizeof(struct smb_hdr)) /
1286 sizeof(LOCKING_ANDX_RANGE);
9ee305b7
PS
1287 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1288 if (!buf)
1289 return -ENOMEM;
1290
1291 mutex_lock(&cinode->lock_mutex);
1292 for (i = 0; i < 2; i++) {
1293 cur = buf;
1294 num = 0;
f45d3416 1295 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1296 if (flock->fl_start > li->offset ||
1297 (flock->fl_start + length) <
1298 (li->offset + li->length))
1299 continue;
1300 if (current->tgid != li->pid)
1301 continue;
9ee305b7
PS
1302 if (types[i] != li->type)
1303 continue;
ea319d57 1304 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1305 /*
1306 * We can cache brlock requests - simply remove
fbd35aca 1307 * a lock from the file's list.
9ee305b7
PS
1308 */
1309 list_del(&li->llist);
1310 cifs_del_lock_waiters(li);
1311 kfree(li);
ea319d57 1312 continue;
9ee305b7 1313 }
ea319d57
PS
1314 cur->Pid = cpu_to_le16(li->pid);
1315 cur->LengthLow = cpu_to_le32((u32)li->length);
1316 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1317 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1318 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1319 /*
1320 * We need to save a lock here to let us add it again to
1321 * the file's list if the unlock range request fails on
1322 * the server.
1323 */
1324 list_move(&li->llist, &tmp_llist);
1325 if (++num == max_num) {
4b4de76e
PS
1326 stored_rc = cifs_lockv(xid, tcon,
1327 cfile->fid.netfid,
ea319d57
PS
1328 li->type, num, 0, buf);
1329 if (stored_rc) {
1330 /*
1331 * We failed on the unlock range
1332 * request - add all locks from the tmp
1333 * list to the head of the file's list.
1334 */
1335 cifs_move_llist(&tmp_llist,
f45d3416 1336 &cfile->llist->locks);
ea319d57
PS
1337 rc = stored_rc;
1338 } else
1339 /*
1340 * The unlock range request succeed -
1341 * free the tmp list.
1342 */
1343 cifs_free_llist(&tmp_llist);
1344 cur = buf;
1345 num = 0;
1346 } else
1347 cur++;
9ee305b7
PS
1348 }
1349 if (num) {
4b4de76e 1350 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1351 types[i], num, 0, buf);
1352 if (stored_rc) {
f45d3416
PS
1353 cifs_move_llist(&tmp_llist,
1354 &cfile->llist->locks);
9ee305b7
PS
1355 rc = stored_rc;
1356 } else
1357 cifs_free_llist(&tmp_llist);
1358 }
1359 }
1360
1361 mutex_unlock(&cinode->lock_mutex);
1362 kfree(buf);
1363 return rc;
1364}
1365
03776f45 1366static int
f45d3416 1367cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1368 bool wait_flag, bool posix_lck, int lock, int unlock,
1369 unsigned int xid)
03776f45
PS
1370{
1371 int rc = 0;
1372 __u64 length = 1 + flock->fl_end - flock->fl_start;
1373 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1374 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1375 struct TCP_Server_Info *server = tcon->ses->server;
03776f45
PS
1376
1377 if (posix_lck) {
08547b03 1378 int posix_lock_type;
4f6bcec9
PS
1379
1380 rc = cifs_posix_lock_set(file, flock);
1381 if (!rc || rc < 0)
1382 return rc;
1383
106dc538 1384 if (type & server->vals->shared_lock_type)
08547b03
SF
1385 posix_lock_type = CIFS_RDLCK;
1386 else
1387 posix_lock_type = CIFS_WRLCK;
50c2f753 1388
03776f45 1389 if (unlock == 1)
beb84dc8 1390 posix_lock_type = CIFS_UNLCK;
7ee1af76 1391
f45d3416
PS
1392 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1393 current->tgid, flock->fl_start, length,
1394 NULL, posix_lock_type, wait_flag);
03776f45
PS
1395 goto out;
1396 }
7ee1af76 1397
03776f45 1398 if (lock) {
161ebf9f
PS
1399 struct cifsLockInfo *lock;
1400
fbd35aca 1401 lock = cifs_lock_init(flock->fl_start, length, type);
161ebf9f
PS
1402 if (!lock)
1403 return -ENOMEM;
1404
fbd35aca 1405 rc = cifs_lock_add_if(cfile, lock, wait_flag);
85160e03 1406 if (rc < 0)
161ebf9f
PS
1407 kfree(lock);
1408 if (rc <= 0)
85160e03
PS
1409 goto out;
1410
7f92447a
PS
1411 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1412 type, 1, 0, wait_flag);
161ebf9f
PS
1413 if (rc) {
1414 kfree(lock);
1415 goto out;
03776f45 1416 }
161ebf9f 1417
fbd35aca 1418 cifs_lock_add(cfile, lock);
9ee305b7
PS
1419 } else if (unlock)
1420 rc = cifs_unlock_range(cfile, flock, xid);
03776f45 1421
03776f45
PS
1422out:
1423 if (flock->fl_flags & FL_POSIX)
9ebb389d 1424 posix_lock_file_wait(file, flock);
03776f45
PS
1425 return rc;
1426}
1427
1428int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1429{
1430 int rc, xid;
1431 int lock = 0, unlock = 0;
1432 bool wait_flag = false;
1433 bool posix_lck = false;
1434 struct cifs_sb_info *cifs_sb;
1435 struct cifs_tcon *tcon;
1436 struct cifsInodeInfo *cinode;
1437 struct cifsFileInfo *cfile;
1438 __u16 netfid;
04a6aa8a 1439 __u32 type;
03776f45
PS
1440
1441 rc = -EACCES;
6d5786a3 1442 xid = get_xid();
03776f45
PS
1443
1444 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1445 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1446 flock->fl_start, flock->fl_end);
1447
03776f45
PS
1448 cfile = (struct cifsFileInfo *)file->private_data;
1449 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1450
1451 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1452 tcon->ses->server);
1453
1454 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
4b4de76e 1455 netfid = cfile->fid.netfid;
03776f45
PS
1456 cinode = CIFS_I(file->f_path.dentry->d_inode);
1457
29e20f9c 1458 if (cap_unix(tcon->ses) &&
03776f45
PS
1459 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1460 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1461 posix_lck = true;
1462 /*
1463 * BB add code here to normalize offset and length to account for
1464 * negative length which we can not accept over the wire.
1465 */
1466 if (IS_GETLK(cmd)) {
4f6bcec9 1467 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1468 free_xid(xid);
03776f45
PS
1469 return rc;
1470 }
1471
1472 if (!lock && !unlock) {
1473 /*
1474 * if no lock or unlock then nothing to do since we do not
1475 * know what it is
1476 */
6d5786a3 1477 free_xid(xid);
03776f45 1478 return -EOPNOTSUPP;
7ee1af76
JA
1479 }
1480
03776f45
PS
1481 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1482 xid);
6d5786a3 1483 free_xid(xid);
1da177e4
LT
1484 return rc;
1485}
1486
597b027f
JL
1487/*
1488 * update the file size (if needed) after a write. Should be called with
1489 * the inode->i_lock held
1490 */
72432ffc 1491void
fbec9ab9
JL
1492cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1493 unsigned int bytes_written)
1494{
1495 loff_t end_of_write = offset + bytes_written;
1496
1497 if (end_of_write > cifsi->server_eof)
1498 cifsi->server_eof = end_of_write;
1499}
1500
ba9ad725
PS
1501static ssize_t
1502cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1503 size_t write_size, loff_t *offset)
1da177e4
LT
1504{
1505 int rc = 0;
1506 unsigned int bytes_written = 0;
1507 unsigned int total_written;
1508 struct cifs_sb_info *cifs_sb;
ba9ad725
PS
1509 struct cifs_tcon *tcon;
1510 struct TCP_Server_Info *server;
6d5786a3 1511 unsigned int xid;
7da4b49a
JL
1512 struct dentry *dentry = open_file->dentry;
1513 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1514 struct cifs_io_parms io_parms;
1da177e4 1515
7da4b49a 1516 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1517
b6b38f70 1518 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
ba9ad725 1519 *offset, dentry->d_name.name);
1da177e4 1520
ba9ad725
PS
1521 tcon = tlink_tcon(open_file->tlink);
1522 server = tcon->ses->server;
1523
1524 if (!server->ops->sync_write)
1525 return -ENOSYS;
50c2f753 1526
6d5786a3 1527 xid = get_xid();
1da177e4 1528
1da177e4
LT
1529 for (total_written = 0; write_size > total_written;
1530 total_written += bytes_written) {
1531 rc = -EAGAIN;
1532 while (rc == -EAGAIN) {
ca83ce3d
JL
1533 struct kvec iov[2];
1534 unsigned int len;
1535
1da177e4 1536 if (open_file->invalidHandle) {
1da177e4
LT
1537 /* we could deadlock if we called
1538 filemap_fdatawait from here so tell
fb8c4b14 1539 reopen_file not to flush data to
1da177e4 1540 server now */
15886177 1541 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1542 if (rc != 0)
1543 break;
1544 }
ca83ce3d
JL
1545
1546 len = min((size_t)cifs_sb->wsize,
1547 write_size - total_written);
1548 /* iov[0] is reserved for smb header */
1549 iov[1].iov_base = (char *)write_data + total_written;
1550 iov[1].iov_len = len;
fa2989f4 1551 io_parms.pid = pid;
ba9ad725
PS
1552 io_parms.tcon = tcon;
1553 io_parms.offset = *offset;
fa2989f4 1554 io_parms.length = len;
ba9ad725
PS
1555 rc = server->ops->sync_write(xid, open_file, &io_parms,
1556 &bytes_written, iov, 1);
1da177e4
LT
1557 }
1558 if (rc || (bytes_written == 0)) {
1559 if (total_written)
1560 break;
1561 else {
6d5786a3 1562 free_xid(xid);
1da177e4
LT
1563 return rc;
1564 }
fbec9ab9 1565 } else {
597b027f 1566 spin_lock(&dentry->d_inode->i_lock);
ba9ad725 1567 cifs_update_eof(cifsi, *offset, bytes_written);
597b027f 1568 spin_unlock(&dentry->d_inode->i_lock);
ba9ad725 1569 *offset += bytes_written;
fbec9ab9 1570 }
1da177e4
LT
1571 }
1572
ba9ad725 1573 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1574
7da4b49a
JL
1575 if (total_written > 0) {
1576 spin_lock(&dentry->d_inode->i_lock);
ba9ad725
PS
1577 if (*offset > dentry->d_inode->i_size)
1578 i_size_write(dentry->d_inode, *offset);
7da4b49a 1579 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1580 }
7da4b49a 1581 mark_inode_dirty_sync(dentry->d_inode);
6d5786a3 1582 free_xid(xid);
1da177e4
LT
1583 return total_written;
1584}
1585
6508d904
JL
1586struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1587 bool fsuid_only)
630f3f0c
SF
1588{
1589 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1590 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1591
1592 /* only filter by fsuid on multiuser mounts */
1593 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1594 fsuid_only = false;
630f3f0c 1595
4477288a 1596 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1597 /* we could simply get the first_list_entry since write-only entries
1598 are always at the end of the list but since the first entry might
1599 have a close pending, we go through the whole list */
1600 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1601 if (fsuid_only && open_file->uid != current_fsuid())
1602 continue;
2e396b83 1603 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1604 if (!open_file->invalidHandle) {
1605 /* found a good file */
1606 /* lock it so it will not be closed on us */
764a1b1a 1607 cifsFileInfo_get_locked(open_file);
4477288a 1608 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1609 return open_file;
1610 } /* else might as well continue, and look for
1611 another, or simply have the caller reopen it
1612 again rather than trying to fix this handle */
1613 } else /* write only file */
1614 break; /* write only files are last so must be done */
1615 }
4477288a 1616 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1617 return NULL;
1618}
630f3f0c 1619
6508d904
JL
1620struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1621 bool fsuid_only)
6148a742 1622{
2c0c2a08 1623 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 1624 struct cifs_sb_info *cifs_sb;
2846d386 1625 bool any_available = false;
dd99cd80 1626 int rc;
2c0c2a08 1627 unsigned int refind = 0;
6148a742 1628
60808233
SF
1629 /* Having a null inode here (because mapping->host was set to zero by
1630 the VFS or MM) should not happen but we had reports of on oops (due to
1631 it being zero) during stress testcases so we need to check for it */
1632
fb8c4b14 1633 if (cifs_inode == NULL) {
b6b38f70 1634 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1635 dump_stack();
1636 return NULL;
1637 }
1638
d3892294
JL
1639 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1640
6508d904
JL
1641 /* only filter by fsuid on multiuser mounts */
1642 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1643 fsuid_only = false;
1644
4477288a 1645 spin_lock(&cifs_file_list_lock);
9b22b0b7 1646refind_writable:
2c0c2a08
SP
1647 if (refind > MAX_REOPEN_ATT) {
1648 spin_unlock(&cifs_file_list_lock);
1649 return NULL;
1650 }
6148a742 1651 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1652 if (!any_available && open_file->pid != current->tgid)
1653 continue;
1654 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1655 continue;
2e396b83 1656 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
1657 if (!open_file->invalidHandle) {
1658 /* found a good writable file */
764a1b1a 1659 cifsFileInfo_get_locked(open_file);
4477288a 1660 spin_unlock(&cifs_file_list_lock);
9b22b0b7 1661 return open_file;
2c0c2a08
SP
1662 } else {
1663 if (!inv_file)
1664 inv_file = open_file;
9b22b0b7 1665 }
6148a742
SF
1666 }
1667 }
2846d386
JL
1668 /* couldn't find useable FH with same pid, try any available */
1669 if (!any_available) {
1670 any_available = true;
1671 goto refind_writable;
1672 }
2c0c2a08
SP
1673
1674 if (inv_file) {
1675 any_available = false;
764a1b1a 1676 cifsFileInfo_get_locked(inv_file);
2c0c2a08
SP
1677 }
1678
4477288a 1679 spin_unlock(&cifs_file_list_lock);
2c0c2a08
SP
1680
1681 if (inv_file) {
1682 rc = cifs_reopen_file(inv_file, false);
1683 if (!rc)
1684 return inv_file;
1685 else {
1686 spin_lock(&cifs_file_list_lock);
1687 list_move_tail(&inv_file->flist,
1688 &cifs_inode->openFileList);
1689 spin_unlock(&cifs_file_list_lock);
1690 cifsFileInfo_put(inv_file);
1691 spin_lock(&cifs_file_list_lock);
1692 ++refind;
1693 goto refind_writable;
1694 }
1695 }
1696
6148a742
SF
1697 return NULL;
1698}
1699
1da177e4
LT
1700static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1701{
1702 struct address_space *mapping = page->mapping;
1703 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1704 char *write_data;
1705 int rc = -EFAULT;
1706 int bytes_written = 0;
1da177e4 1707 struct inode *inode;
6148a742 1708 struct cifsFileInfo *open_file;
1da177e4
LT
1709
1710 if (!mapping || !mapping->host)
1711 return -EFAULT;
1712
1713 inode = page->mapping->host;
1da177e4
LT
1714
1715 offset += (loff_t)from;
1716 write_data = kmap(page);
1717 write_data += from;
1718
1719 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1720 kunmap(page);
1721 return -EIO;
1722 }
1723
1724 /* racing with truncate? */
1725 if (offset > mapping->host->i_size) {
1726 kunmap(page);
1727 return 0; /* don't care */
1728 }
1729
1730 /* check to make sure that we are not extending the file */
1731 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1732 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1733
6508d904 1734 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1735 if (open_file) {
fa2989f4
PS
1736 bytes_written = cifs_write(open_file, open_file->pid,
1737 write_data, to - from, &offset);
6ab409b5 1738 cifsFileInfo_put(open_file);
1da177e4 1739 /* Does mm or vfs already set times? */
6148a742 1740 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1741 if ((bytes_written > 0) && (offset))
6148a742 1742 rc = 0;
bb5a9a04
SF
1743 else if (bytes_written < 0)
1744 rc = bytes_written;
6148a742 1745 } else {
b6b38f70 1746 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1747 rc = -EIO;
1748 }
1749
1750 kunmap(page);
1751 return rc;
1752}
1753
1da177e4 1754static int cifs_writepages(struct address_space *mapping,
37c0eb46 1755 struct writeback_control *wbc)
1da177e4 1756{
c3d17b63
JL
1757 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1758 bool done = false, scanned = false, range_whole = false;
1759 pgoff_t end, index;
1760 struct cifs_writedata *wdata;
c9de5c80 1761 struct TCP_Server_Info *server;
37c0eb46 1762 struct page *page;
37c0eb46 1763 int rc = 0;
eddb079d 1764 loff_t isize = i_size_read(mapping->host);
50c2f753 1765
37c0eb46 1766 /*
c3d17b63 1767 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1768 * one page at a time via cifs_writepage
1769 */
1770 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1771 return generic_writepages(mapping, wbc);
1772
111ebb6e 1773 if (wbc->range_cyclic) {
37c0eb46 1774 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1775 end = -1;
1776 } else {
1777 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1778 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1779 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1780 range_whole = true;
1781 scanned = true;
37c0eb46
SF
1782 }
1783retry:
c3d17b63
JL
1784 while (!done && index <= end) {
1785 unsigned int i, nr_pages, found_pages;
1786 pgoff_t next = 0, tofind;
1787 struct page **pages;
1788
1789 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1790 end - index) + 1;
1791
c2e87640
JL
1792 wdata = cifs_writedata_alloc((unsigned int)tofind,
1793 cifs_writev_complete);
c3d17b63
JL
1794 if (!wdata) {
1795 rc = -ENOMEM;
1796 break;
1797 }
1798
1799 /*
1800 * find_get_pages_tag seems to return a max of 256 on each
1801 * iteration, so we must call it several times in order to
1802 * fill the array or the wsize is effectively limited to
1803 * 256 * PAGE_CACHE_SIZE.
1804 */
1805 found_pages = 0;
1806 pages = wdata->pages;
1807 do {
1808 nr_pages = find_get_pages_tag(mapping, &index,
1809 PAGECACHE_TAG_DIRTY,
1810 tofind, pages);
1811 found_pages += nr_pages;
1812 tofind -= nr_pages;
1813 pages += nr_pages;
1814 } while (nr_pages && tofind && index <= end);
1815
1816 if (found_pages == 0) {
1817 kref_put(&wdata->refcount, cifs_writedata_release);
1818 break;
1819 }
1820
1821 nr_pages = 0;
1822 for (i = 0; i < found_pages; i++) {
1823 page = wdata->pages[i];
37c0eb46
SF
1824 /*
1825 * At this point we hold neither mapping->tree_lock nor
1826 * lock on the page itself: the page may be truncated or
1827 * invalidated (changing page->mapping to NULL), or even
1828 * swizzled back from swapper_space to tmpfs file
1829 * mapping
1830 */
1831
c3d17b63 1832 if (nr_pages == 0)
37c0eb46 1833 lock_page(page);
529ae9aa 1834 else if (!trylock_page(page))
37c0eb46
SF
1835 break;
1836
1837 if (unlikely(page->mapping != mapping)) {
1838 unlock_page(page);
1839 break;
1840 }
1841
111ebb6e 1842 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1843 done = true;
37c0eb46
SF
1844 unlock_page(page);
1845 break;
1846 }
1847
1848 if (next && (page->index != next)) {
1849 /* Not next consecutive page */
1850 unlock_page(page);
1851 break;
1852 }
1853
1854 if (wbc->sync_mode != WB_SYNC_NONE)
1855 wait_on_page_writeback(page);
1856
1857 if (PageWriteback(page) ||
cb876f45 1858 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1859 unlock_page(page);
1860 break;
1861 }
84d2f07e 1862
cb876f45
LT
1863 /*
1864 * This actually clears the dirty bit in the radix tree.
1865 * See cifs_writepage() for more commentary.
1866 */
1867 set_page_writeback(page);
1868
eddb079d 1869 if (page_offset(page) >= isize) {
c3d17b63 1870 done = true;
84d2f07e 1871 unlock_page(page);
cb876f45 1872 end_page_writeback(page);
84d2f07e
SF
1873 break;
1874 }
1875
c3d17b63
JL
1876 wdata->pages[i] = page;
1877 next = page->index + 1;
1878 ++nr_pages;
1879 }
37c0eb46 1880
c3d17b63
JL
1881 /* reset index to refind any pages skipped */
1882 if (nr_pages == 0)
1883 index = wdata->pages[0]->index + 1;
84d2f07e 1884
c3d17b63
JL
1885 /* put any pages we aren't going to use */
1886 for (i = nr_pages; i < found_pages; i++) {
1887 page_cache_release(wdata->pages[i]);
1888 wdata->pages[i] = NULL;
1889 }
37c0eb46 1890
c3d17b63
JL
1891 /* nothing to write? */
1892 if (nr_pages == 0) {
1893 kref_put(&wdata->refcount, cifs_writedata_release);
1894 continue;
37c0eb46 1895 }
fbec9ab9 1896
c3d17b63
JL
1897 wdata->sync_mode = wbc->sync_mode;
1898 wdata->nr_pages = nr_pages;
1899 wdata->offset = page_offset(wdata->pages[0]);
eddb079d
JL
1900 wdata->pagesz = PAGE_CACHE_SIZE;
1901 wdata->tailsz =
1902 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1903 (loff_t)PAGE_CACHE_SIZE);
1904 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1905 wdata->tailsz;
941b853d 1906
c3d17b63
JL
1907 do {
1908 if (wdata->cfile != NULL)
1909 cifsFileInfo_put(wdata->cfile);
1910 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1911 false);
1912 if (!wdata->cfile) {
1913 cERROR(1, "No writable handles for inode");
1914 rc = -EBADF;
1915 break;
941b853d 1916 }
fe5f5d2e 1917 wdata->pid = wdata->cfile->pid;
c9de5c80
PS
1918 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1919 rc = server->ops->async_writev(wdata);
c3d17b63 1920 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1921
c3d17b63
JL
1922 for (i = 0; i < nr_pages; ++i)
1923 unlock_page(wdata->pages[i]);
f3983c21 1924
c3d17b63
JL
1925 /* send failure -- clean up the mess */
1926 if (rc != 0) {
1927 for (i = 0; i < nr_pages; ++i) {
941b853d 1928 if (rc == -EAGAIN)
c3d17b63
JL
1929 redirty_page_for_writepage(wbc,
1930 wdata->pages[i]);
1931 else
1932 SetPageError(wdata->pages[i]);
1933 end_page_writeback(wdata->pages[i]);
1934 page_cache_release(wdata->pages[i]);
37c0eb46 1935 }
941b853d
JL
1936 if (rc != -EAGAIN)
1937 mapping_set_error(mapping, rc);
c3d17b63
JL
1938 }
1939 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1940
c3d17b63
JL
1941 wbc->nr_to_write -= nr_pages;
1942 if (wbc->nr_to_write <= 0)
1943 done = true;
b066a48c 1944
c3d17b63 1945 index = next;
37c0eb46 1946 }
c3d17b63 1947
37c0eb46
SF
1948 if (!scanned && !done) {
1949 /*
1950 * We hit the last page and there is more work to be done: wrap
1951 * back to the start of the file
1952 */
c3d17b63 1953 scanned = true;
37c0eb46
SF
1954 index = 0;
1955 goto retry;
1956 }
c3d17b63 1957
111ebb6e 1958 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1959 mapping->writeback_index = index;
1960
1da177e4
LT
1961 return rc;
1962}
1da177e4 1963
9ad1506b
PS
1964static int
1965cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1966{
9ad1506b 1967 int rc;
6d5786a3 1968 unsigned int xid;
1da177e4 1969
6d5786a3 1970 xid = get_xid();
1da177e4
LT
1971/* BB add check for wbc flags */
1972 page_cache_get(page);
ad7a2926 1973 if (!PageUptodate(page))
b6b38f70 1974 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1975
1976 /*
1977 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1978 *
1979 * A writepage() implementation always needs to do either this,
1980 * or re-dirty the page with "redirty_page_for_writepage()" in
1981 * the case of a failure.
1982 *
1983 * Just unlocking the page will cause the radix tree tag-bits
1984 * to fail to update with the state of the page correctly.
1985 */
fb8c4b14 1986 set_page_writeback(page);
9ad1506b 1987retry_write:
1da177e4 1988 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1989 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1990 goto retry_write;
1991 else if (rc == -EAGAIN)
1992 redirty_page_for_writepage(wbc, page);
1993 else if (rc != 0)
1994 SetPageError(page);
1995 else
1996 SetPageUptodate(page);
cb876f45
LT
1997 end_page_writeback(page);
1998 page_cache_release(page);
6d5786a3 1999 free_xid(xid);
1da177e4
LT
2000 return rc;
2001}
2002
9ad1506b
PS
2003static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2004{
2005 int rc = cifs_writepage_locked(page, wbc);
2006 unlock_page(page);
2007 return rc;
2008}
2009
d9414774
NP
2010static int cifs_write_end(struct file *file, struct address_space *mapping,
2011 loff_t pos, unsigned len, unsigned copied,
2012 struct page *page, void *fsdata)
1da177e4 2013{
d9414774
NP
2014 int rc;
2015 struct inode *inode = mapping->host;
d4ffff1f
PS
2016 struct cifsFileInfo *cfile = file->private_data;
2017 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2018 __u32 pid;
2019
2020 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2021 pid = cfile->pid;
2022 else
2023 pid = current->tgid;
1da177e4 2024
b6b38f70
JP
2025 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2026 page, pos, copied);
d9414774 2027
a98ee8c1
JL
2028 if (PageChecked(page)) {
2029 if (copied == len)
2030 SetPageUptodate(page);
2031 ClearPageChecked(page);
2032 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 2033 SetPageUptodate(page);
ad7a2926 2034
1da177e4 2035 if (!PageUptodate(page)) {
d9414774
NP
2036 char *page_data;
2037 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
6d5786a3 2038 unsigned int xid;
d9414774 2039
6d5786a3 2040 xid = get_xid();
1da177e4
LT
2041 /* this is probably better than directly calling
2042 partialpage_write since in this function the file handle is
2043 known which we might as well leverage */
2044 /* BB check if anything else missing out of ppw
2045 such as updating last write time */
2046 page_data = kmap(page);
d4ffff1f 2047 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2048 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2049 kunmap(page);
d9414774 2050
6d5786a3 2051 free_xid(xid);
fb8c4b14 2052 } else {
d9414774
NP
2053 rc = copied;
2054 pos += copied;
1da177e4
LT
2055 set_page_dirty(page);
2056 }
2057
d9414774
NP
2058 if (rc > 0) {
2059 spin_lock(&inode->i_lock);
2060 if (pos > inode->i_size)
2061 i_size_write(inode, pos);
2062 spin_unlock(&inode->i_lock);
2063 }
2064
2065 unlock_page(page);
2066 page_cache_release(page);
2067
1da177e4
LT
2068 return rc;
2069}
2070
02c24a82
JB
2071int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2072 int datasync)
1da177e4 2073{
6d5786a3 2074 unsigned int xid;
1da177e4 2075 int rc = 0;
96daf2b0 2076 struct cifs_tcon *tcon;
1d8c4c00 2077 struct TCP_Server_Info *server;
c21dfb69 2078 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 2079 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 2080 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2081
02c24a82
JB
2082 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2083 if (rc)
2084 return rc;
2085 mutex_lock(&inode->i_mutex);
2086
6d5786a3 2087 xid = get_xid();
1da177e4 2088
b6b38f70 2089 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 2090 file->f_path.dentry->d_name.name, datasync);
50c2f753 2091
6feb9891
PS
2092 if (!CIFS_I(inode)->clientCanCacheRead) {
2093 rc = cifs_invalidate_mapping(inode);
2094 if (rc) {
2095 cFYI(1, "rc: %d during invalidate phase", rc);
2096 rc = 0; /* don't care about it in fsync */
2097 }
2098 }
eb4b756b 2099
8be7e6ba 2100 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2101 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2102 server = tcon->ses->server;
2103 if (server->ops->flush)
2104 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2105 else
2106 rc = -ENOSYS;
2107 }
8be7e6ba 2108
6d5786a3 2109 free_xid(xid);
02c24a82 2110 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
2111 return rc;
2112}
2113
02c24a82 2114int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2115{
6d5786a3 2116 unsigned int xid;
8be7e6ba 2117 int rc = 0;
96daf2b0 2118 struct cifs_tcon *tcon;
1d8c4c00 2119 struct TCP_Server_Info *server;
8be7e6ba
PS
2120 struct cifsFileInfo *smbfile = file->private_data;
2121 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
2122 struct inode *inode = file->f_mapping->host;
2123
2124 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2125 if (rc)
2126 return rc;
2127 mutex_lock(&inode->i_mutex);
8be7e6ba 2128
6d5786a3 2129 xid = get_xid();
8be7e6ba
PS
2130
2131 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2132 file->f_path.dentry->d_name.name, datasync);
2133
2134 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2135 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2136 server = tcon->ses->server;
2137 if (server->ops->flush)
2138 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2139 else
2140 rc = -ENOSYS;
2141 }
b298f223 2142
6d5786a3 2143 free_xid(xid);
02c24a82 2144 mutex_unlock(&inode->i_mutex);
1da177e4
LT
2145 return rc;
2146}
2147
1da177e4
LT
2148/*
2149 * As file closes, flush all cached write data for this inode checking
2150 * for write behind errors.
2151 */
75e1fcc0 2152int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2153{
fb8c4b14 2154 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
2155 int rc = 0;
2156
eb4b756b 2157 if (file->f_mode & FMODE_WRITE)
d3f1322a 2158 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2159
b6b38f70 2160 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
2161
2162 return rc;
2163}
2164
72432ffc
PS
2165static int
2166cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2167{
2168 int rc = 0;
2169 unsigned long i;
2170
2171 for (i = 0; i < num_pages; i++) {
e94f7ba1 2172 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2173 if (!pages[i]) {
2174 /*
2175 * save number of pages we have already allocated and
2176 * return with ENOMEM error
2177 */
2178 num_pages = i;
2179 rc = -ENOMEM;
e94f7ba1 2180 break;
72432ffc
PS
2181 }
2182 }
2183
e94f7ba1
JL
2184 if (rc) {
2185 for (i = 0; i < num_pages; i++)
2186 put_page(pages[i]);
2187 }
72432ffc
PS
2188 return rc;
2189}
2190
2191static inline
2192size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2193{
2194 size_t num_pages;
2195 size_t clen;
2196
2197 clen = min_t(const size_t, len, wsize);
a7103b99 2198 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2199
2200 if (cur_len)
2201 *cur_len = clen;
2202
2203 return num_pages;
2204}
2205
da82f7e7
JL
2206static void
2207cifs_uncached_writev_complete(struct work_struct *work)
2208{
2209 int i;
2210 struct cifs_writedata *wdata = container_of(work,
2211 struct cifs_writedata, work);
2212 struct inode *inode = wdata->cfile->dentry->d_inode;
2213 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2214
2215 spin_lock(&inode->i_lock);
2216 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2217 if (cifsi->server_eof > inode->i_size)
2218 i_size_write(inode, cifsi->server_eof);
2219 spin_unlock(&inode->i_lock);
2220
2221 complete(&wdata->done);
2222
2223 if (wdata->result != -EAGAIN) {
2224 for (i = 0; i < wdata->nr_pages; i++)
2225 put_page(wdata->pages[i]);
2226 }
2227
2228 kref_put(&wdata->refcount, cifs_writedata_release);
2229}
2230
2231/* attempt to send write to server, retry on any -EAGAIN errors */
2232static int
2233cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2234{
2235 int rc;
c9de5c80
PS
2236 struct TCP_Server_Info *server;
2237
2238 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
da82f7e7
JL
2239
2240 do {
2241 if (wdata->cfile->invalidHandle) {
2242 rc = cifs_reopen_file(wdata->cfile, false);
2243 if (rc != 0)
2244 continue;
2245 }
c9de5c80 2246 rc = server->ops->async_writev(wdata);
da82f7e7
JL
2247 } while (rc == -EAGAIN);
2248
2249 return rc;
2250}
2251
72432ffc
PS
2252static ssize_t
2253cifs_iovec_write(struct file *file, const struct iovec *iov,
2254 unsigned long nr_segs, loff_t *poffset)
2255{
da82f7e7 2256 unsigned long nr_pages, i;
76429c14
PS
2257 size_t copied, len, cur_len;
2258 ssize_t total_written = 0;
3af9d8f2 2259 loff_t offset;
72432ffc 2260 struct iov_iter it;
72432ffc 2261 struct cifsFileInfo *open_file;
da82f7e7 2262 struct cifs_tcon *tcon;
72432ffc 2263 struct cifs_sb_info *cifs_sb;
da82f7e7
JL
2264 struct cifs_writedata *wdata, *tmp;
2265 struct list_head wdata_list;
2266 int rc;
2267 pid_t pid;
72432ffc
PS
2268
2269 len = iov_length(iov, nr_segs);
2270 if (!len)
2271 return 0;
2272
2273 rc = generic_write_checks(file, poffset, &len, 0);
2274 if (rc)
2275 return rc;
2276
da82f7e7 2277 INIT_LIST_HEAD(&wdata_list);
72432ffc 2278 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
72432ffc 2279 open_file = file->private_data;
da82f7e7 2280 tcon = tlink_tcon(open_file->tlink);
c9de5c80
PS
2281
2282 if (!tcon->ses->server->ops->async_writev)
2283 return -ENOSYS;
2284
3af9d8f2 2285 offset = *poffset;
d4ffff1f
PS
2286
2287 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2288 pid = open_file->pid;
2289 else
2290 pid = current->tgid;
2291
72432ffc 2292 iov_iter_init(&it, iov, nr_segs, len, 0);
72432ffc 2293 do {
da82f7e7
JL
2294 size_t save_len;
2295
2296 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2297 wdata = cifs_writedata_alloc(nr_pages,
2298 cifs_uncached_writev_complete);
2299 if (!wdata) {
2300 rc = -ENOMEM;
2301 break;
2302 }
2303
2304 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2305 if (rc) {
2306 kfree(wdata);
2307 break;
2308 }
2309
2310 save_len = cur_len;
2311 for (i = 0; i < nr_pages; i++) {
2312 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2313 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2314 0, copied);
72432ffc
PS
2315 cur_len -= copied;
2316 iov_iter_advance(&it, copied);
72432ffc 2317 }
72432ffc
PS
2318 cur_len = save_len - cur_len;
2319
da82f7e7
JL
2320 wdata->sync_mode = WB_SYNC_ALL;
2321 wdata->nr_pages = nr_pages;
2322 wdata->offset = (__u64)offset;
2323 wdata->cfile = cifsFileInfo_get(open_file);
2324 wdata->pid = pid;
2325 wdata->bytes = cur_len;
eddb079d
JL
2326 wdata->pagesz = PAGE_SIZE;
2327 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
da82f7e7
JL
2328 rc = cifs_uncached_retry_writev(wdata);
2329 if (rc) {
2330 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2331 break;
2332 }
2333
da82f7e7
JL
2334 list_add_tail(&wdata->list, &wdata_list);
2335 offset += cur_len;
2336 len -= cur_len;
72432ffc
PS
2337 } while (len > 0);
2338
da82f7e7
JL
2339 /*
2340 * If at least one write was successfully sent, then discard any rc
2341 * value from the later writes. If the other write succeeds, then
2342 * we'll end up returning whatever was written. If it fails, then
2343 * we'll get a new rc value from that.
2344 */
2345 if (!list_empty(&wdata_list))
2346 rc = 0;
2347
2348 /*
2349 * Wait for and collect replies for any successful sends in order of
2350 * increasing offset. Once an error is hit or we get a fatal signal
2351 * while waiting, then return without waiting for any more replies.
2352 */
2353restart_loop:
2354 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2355 if (!rc) {
2356 /* FIXME: freezable too? */
2357 rc = wait_for_completion_killable(&wdata->done);
2358 if (rc)
2359 rc = -EINTR;
2360 else if (wdata->result)
2361 rc = wdata->result;
2362 else
2363 total_written += wdata->bytes;
2364
2365 /* resend call if it's a retryable error */
2366 if (rc == -EAGAIN) {
2367 rc = cifs_uncached_retry_writev(wdata);
2368 goto restart_loop;
2369 }
2370 }
2371 list_del_init(&wdata->list);
2372 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2373 }
2374
da82f7e7
JL
2375 if (total_written > 0)
2376 *poffset += total_written;
72432ffc 2377
da82f7e7
JL
2378 cifs_stats_bytes_written(tcon, total_written);
2379 return total_written ? total_written : (ssize_t)rc;
72432ffc
PS
2380}
2381
0b81c1c4 2382ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
2383 unsigned long nr_segs, loff_t pos)
2384{
2385 ssize_t written;
2386 struct inode *inode;
2387
2388 inode = iocb->ki_filp->f_path.dentry->d_inode;
2389
2390 /*
2391 * BB - optimize the way when signing is disabled. We can drop this
2392 * extra memory-to-memory copying and use iovec buffers for constructing
2393 * write request.
2394 */
2395
2396 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2397 if (written > 0) {
2398 CIFS_I(inode)->invalid_mapping = true;
2399 iocb->ki_pos = pos;
2400 }
2401
2402 return written;
2403}
2404
2405ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2406 unsigned long nr_segs, loff_t pos)
2407{
2408 struct inode *inode;
2409
2410 inode = iocb->ki_filp->f_path.dentry->d_inode;
2411
2412 if (CIFS_I(inode)->clientCanCacheAll)
2413 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2414
2415 /*
2416 * In strict cache mode we need to write the data to the server exactly
2417 * from the pos to pos+len-1 rather than flush all affected pages
2418 * because it may cause a error with mandatory locks on these pages but
2419 * not on the region from pos to ppos+len-1.
2420 */
2421
2422 return cifs_user_writev(iocb, iov, nr_segs, pos);
2423}
2424
0471ca3f 2425static struct cifs_readdata *
f4e49cd2 2426cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
0471ca3f
JL
2427{
2428 struct cifs_readdata *rdata;
f4e49cd2 2429
c5fab6f4
JL
2430 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2431 GFP_KERNEL);
0471ca3f 2432 if (rdata != NULL) {
6993f74a 2433 kref_init(&rdata->refcount);
1c892549
JL
2434 INIT_LIST_HEAD(&rdata->list);
2435 init_completion(&rdata->done);
0471ca3f 2436 INIT_WORK(&rdata->work, complete);
0471ca3f 2437 }
f4e49cd2 2438
0471ca3f
JL
2439 return rdata;
2440}
2441
6993f74a
JL
2442void
2443cifs_readdata_release(struct kref *refcount)
0471ca3f 2444{
6993f74a
JL
2445 struct cifs_readdata *rdata = container_of(refcount,
2446 struct cifs_readdata, refcount);
2447
2448 if (rdata->cfile)
2449 cifsFileInfo_put(rdata->cfile);
2450
0471ca3f
JL
2451 kfree(rdata);
2452}
2453
1c892549 2454static int
c5fab6f4 2455cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
2456{
2457 int rc = 0;
c5fab6f4 2458 struct page *page;
1c892549
JL
2459 unsigned int i;
2460
c5fab6f4 2461 for (i = 0; i < nr_pages; i++) {
1c892549
JL
2462 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2463 if (!page) {
2464 rc = -ENOMEM;
2465 break;
2466 }
c5fab6f4 2467 rdata->pages[i] = page;
1c892549
JL
2468 }
2469
2470 if (rc) {
c5fab6f4
JL
2471 for (i = 0; i < nr_pages; i++) {
2472 put_page(rdata->pages[i]);
2473 rdata->pages[i] = NULL;
1c892549
JL
2474 }
2475 }
2476 return rc;
2477}
2478
2479static void
2480cifs_uncached_readdata_release(struct kref *refcount)
2481{
1c892549
JL
2482 struct cifs_readdata *rdata = container_of(refcount,
2483 struct cifs_readdata, refcount);
c5fab6f4 2484 unsigned int i;
1c892549 2485
c5fab6f4
JL
2486 for (i = 0; i < rdata->nr_pages; i++) {
2487 put_page(rdata->pages[i]);
2488 rdata->pages[i] = NULL;
1c892549
JL
2489 }
2490 cifs_readdata_release(refcount);
2491}
2492
2a1bb138
JL
2493static int
2494cifs_retry_async_readv(struct cifs_readdata *rdata)
2495{
2496 int rc;
fc9c5966
PS
2497 struct TCP_Server_Info *server;
2498
2499 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
2a1bb138
JL
2500
2501 do {
2502 if (rdata->cfile->invalidHandle) {
2503 rc = cifs_reopen_file(rdata->cfile, true);
2504 if (rc != 0)
2505 continue;
2506 }
fc9c5966 2507 rc = server->ops->async_readv(rdata);
2a1bb138
JL
2508 } while (rc == -EAGAIN);
2509
2510 return rc;
2511}
2512
1c892549
JL
2513/**
2514 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2515 * @rdata: the readdata response with list of pages holding data
2516 * @iov: vector in which we should copy the data
2517 * @nr_segs: number of segments in vector
2518 * @offset: offset into file of the first iovec
2519 * @copied: used to return the amount of data copied to the iov
2520 *
2521 * This function copies data from a list of pages in a readdata response into
2522 * an array of iovecs. It will first calculate where the data should go
2523 * based on the info in the readdata and then copy the data into that spot.
2524 */
2525static ssize_t
2526cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2527 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2528{
2529 int rc = 0;
2530 struct iov_iter ii;
2531 size_t pos = rdata->offset - offset;
1c892549
JL
2532 ssize_t remaining = rdata->bytes;
2533 unsigned char *pdata;
c5fab6f4 2534 unsigned int i;
1c892549
JL
2535
2536 /* set up iov_iter and advance to the correct offset */
2537 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2538 iov_iter_advance(&ii, pos);
2539
2540 *copied = 0;
c5fab6f4 2541 for (i = 0; i < rdata->nr_pages; i++) {
1c892549 2542 ssize_t copy;
c5fab6f4 2543 struct page *page = rdata->pages[i];
1c892549
JL
2544
2545 /* copy a whole page or whatever's left */
2546 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2547
2548 /* ...but limit it to whatever space is left in the iov */
2549 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2550
2551 /* go while there's data to be copied and no errors */
2552 if (copy && !rc) {
2553 pdata = kmap(page);
2554 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2555 (int)copy);
2556 kunmap(page);
2557 if (!rc) {
2558 *copied += copy;
2559 remaining -= copy;
2560 iov_iter_advance(&ii, copy);
2561 }
2562 }
1c892549
JL
2563 }
2564
2565 return rc;
2566}
2567
2568static void
2569cifs_uncached_readv_complete(struct work_struct *work)
2570{
2571 struct cifs_readdata *rdata = container_of(work,
2572 struct cifs_readdata, work);
1c892549
JL
2573
2574 complete(&rdata->done);
2575 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2576}
2577
2578static int
8321fec4
JL
2579cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2580 struct cifs_readdata *rdata, unsigned int len)
1c892549 2581{
8321fec4 2582 int total_read = 0, result = 0;
c5fab6f4
JL
2583 unsigned int i;
2584 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2585 struct kvec iov;
1c892549 2586
8321fec4 2587 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
2588 for (i = 0; i < nr_pages; i++) {
2589 struct page *page = rdata->pages[i];
2590
8321fec4 2591 if (len >= PAGE_SIZE) {
1c892549 2592 /* enough data to fill the page */
8321fec4
JL
2593 iov.iov_base = kmap(page);
2594 iov.iov_len = PAGE_SIZE;
2595 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2596 i, iov.iov_base, iov.iov_len);
2597 len -= PAGE_SIZE;
2598 } else if (len > 0) {
1c892549 2599 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2600 iov.iov_base = kmap(page);
2601 iov.iov_len = len;
2602 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2603 i, iov.iov_base, iov.iov_len);
2604 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2605 rdata->tailsz = len;
2606 len = 0;
1c892549
JL
2607 } else {
2608 /* no need to hold page hostage */
c5fab6f4
JL
2609 rdata->pages[i] = NULL;
2610 rdata->nr_pages--;
1c892549 2611 put_page(page);
8321fec4 2612 continue;
1c892549 2613 }
8321fec4
JL
2614
2615 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2616 kunmap(page);
2617 if (result < 0)
2618 break;
2619
2620 total_read += result;
1c892549
JL
2621 }
2622
8321fec4 2623 return total_read > 0 ? total_read : result;
1c892549
JL
2624}
2625
a70307ee
PS
2626static ssize_t
2627cifs_iovec_read(struct file *file, const struct iovec *iov,
2628 unsigned long nr_segs, loff_t *poffset)
1da177e4 2629{
1c892549 2630 ssize_t rc;
a70307ee 2631 size_t len, cur_len;
1c892549
JL
2632 ssize_t total_read = 0;
2633 loff_t offset = *poffset;
2634 unsigned int npages;
1da177e4 2635 struct cifs_sb_info *cifs_sb;
1c892549 2636 struct cifs_tcon *tcon;
1da177e4 2637 struct cifsFileInfo *open_file;
1c892549
JL
2638 struct cifs_readdata *rdata, *tmp;
2639 struct list_head rdata_list;
2640 pid_t pid;
a70307ee
PS
2641
2642 if (!nr_segs)
2643 return 0;
2644
2645 len = iov_length(iov, nr_segs);
2646 if (!len)
2647 return 0;
1da177e4 2648
1c892549 2649 INIT_LIST_HEAD(&rdata_list);
e6a00296 2650 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
c21dfb69 2651 open_file = file->private_data;
1c892549 2652 tcon = tlink_tcon(open_file->tlink);
1da177e4 2653
fc9c5966
PS
2654 if (!tcon->ses->server->ops->async_readv)
2655 return -ENOSYS;
2656
d4ffff1f
PS
2657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2658 pid = open_file->pid;
2659 else
2660 pid = current->tgid;
2661
ad7a2926 2662 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2663 cFYI(1, "attempting read on write only file instance");
ad7a2926 2664
1c892549
JL
2665 do {
2666 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2667 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
a70307ee 2668
1c892549
JL
2669 /* allocate a readdata struct */
2670 rdata = cifs_readdata_alloc(npages,
2671 cifs_uncached_readv_complete);
2672 if (!rdata) {
2673 rc = -ENOMEM;
2674 goto error;
1da177e4 2675 }
a70307ee 2676
c5fab6f4 2677 rc = cifs_read_allocate_pages(rdata, npages);
1c892549
JL
2678 if (rc)
2679 goto error;
2680
2681 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 2682 rdata->nr_pages = npages;
1c892549
JL
2683 rdata->offset = offset;
2684 rdata->bytes = cur_len;
2685 rdata->pid = pid;
8321fec4
JL
2686 rdata->pagesz = PAGE_SIZE;
2687 rdata->read_into_pages = cifs_uncached_read_into_pages;
1c892549
JL
2688
2689 rc = cifs_retry_async_readv(rdata);
2690error:
2691 if (rc) {
2692 kref_put(&rdata->refcount,
2693 cifs_uncached_readdata_release);
2694 break;
2695 }
2696
2697 list_add_tail(&rdata->list, &rdata_list);
2698 offset += cur_len;
2699 len -= cur_len;
2700 } while (len > 0);
2701
2702 /* if at least one read request send succeeded, then reset rc */
2703 if (!list_empty(&rdata_list))
2704 rc = 0;
2705
2706 /* the loop below should proceed in the order of increasing offsets */
2707restart_loop:
2708 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2709 if (!rc) {
2710 ssize_t copied;
2711
2712 /* FIXME: freezable sleep too? */
2713 rc = wait_for_completion_killable(&rdata->done);
2714 if (rc)
2715 rc = -EINTR;
2716 else if (rdata->result)
2717 rc = rdata->result;
2718 else {
2719 rc = cifs_readdata_to_iov(rdata, iov,
2720 nr_segs, *poffset,
2721 &copied);
2722 total_read += copied;
2723 }
2724
2725 /* resend call if it's a retryable error */
2726 if (rc == -EAGAIN) {
2727 rc = cifs_retry_async_readv(rdata);
2728 goto restart_loop;
1da177e4 2729 }
1da177e4 2730 }
1c892549
JL
2731 list_del_init(&rdata->list);
2732 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 2733 }
a70307ee 2734
1c892549
JL
2735 cifs_stats_bytes_read(tcon, total_read);
2736 *poffset += total_read;
2737
09a4707e
PS
2738 /* mask nodata case */
2739 if (rc == -ENODATA)
2740 rc = 0;
2741
1c892549 2742 return total_read ? total_read : rc;
1da177e4
LT
2743}
2744
0b81c1c4 2745ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2746 unsigned long nr_segs, loff_t pos)
2747{
2748 ssize_t read;
2749
2750 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2751 if (read > 0)
2752 iocb->ki_pos = pos;
2753
2754 return read;
2755}
2756
2757ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2758 unsigned long nr_segs, loff_t pos)
2759{
2760 struct inode *inode;
2761
2762 inode = iocb->ki_filp->f_path.dentry->d_inode;
2763
2764 if (CIFS_I(inode)->clientCanCacheRead)
2765 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2766
2767 /*
2768 * In strict cache mode we need to read from the server all the time
2769 * if we don't have level II oplock because the server can delay mtime
2770 * change - so we can't make a decision about inode invalidating.
2771 * And we can also fail with pagereading if there are mandatory locks
2772 * on pages affected by this read but not on the region from pos to
2773 * pos+len-1.
2774 */
2775
2776 return cifs_user_readv(iocb, iov, nr_segs, pos);
2777}
1da177e4 2778
f9c6e234
PS
2779static ssize_t
2780cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
2781{
2782 int rc = -EACCES;
2783 unsigned int bytes_read = 0;
2784 unsigned int total_read;
2785 unsigned int current_read_size;
5eba8ab3 2786 unsigned int rsize;
1da177e4 2787 struct cifs_sb_info *cifs_sb;
29e20f9c 2788 struct cifs_tcon *tcon;
f9c6e234 2789 struct TCP_Server_Info *server;
6d5786a3 2790 unsigned int xid;
f9c6e234 2791 char *cur_offset;
1da177e4 2792 struct cifsFileInfo *open_file;
d4ffff1f 2793 struct cifs_io_parms io_parms;
ec637e3f 2794 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2795 __u32 pid;
1da177e4 2796
6d5786a3 2797 xid = get_xid();
e6a00296 2798 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2799
5eba8ab3
JL
2800 /* FIXME: set up handlers for larger reads and/or convert to async */
2801 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2802
1da177e4 2803 if (file->private_data == NULL) {
0f3bc09e 2804 rc = -EBADF;
6d5786a3 2805 free_xid(xid);
0f3bc09e 2806 return rc;
1da177e4 2807 }
c21dfb69 2808 open_file = file->private_data;
29e20f9c 2809 tcon = tlink_tcon(open_file->tlink);
f9c6e234
PS
2810 server = tcon->ses->server;
2811
2812 if (!server->ops->sync_read) {
2813 free_xid(xid);
2814 return -ENOSYS;
2815 }
1da177e4 2816
d4ffff1f
PS
2817 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2818 pid = open_file->pid;
2819 else
2820 pid = current->tgid;
2821
1da177e4 2822 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2823 cFYI(1, "attempting read on write only file instance");
1da177e4 2824
f9c6e234
PS
2825 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2826 total_read += bytes_read, cur_offset += bytes_read) {
5eba8ab3 2827 current_read_size = min_t(uint, read_size - total_read, rsize);
29e20f9c
PS
2828 /*
2829 * For windows me and 9x we do not want to request more than it
2830 * negotiated since it will refuse the read then.
2831 */
2832 if ((tcon->ses) && !(tcon->ses->capabilities &
2833 tcon->ses->server->vals->cap_large_files)) {
7748dd6e 2834 current_read_size = min_t(uint, current_read_size,
c974befa 2835 CIFSMaxBufSize);
f9f5c817 2836 }
1da177e4
LT
2837 rc = -EAGAIN;
2838 while (rc == -EAGAIN) {
cdff08e7 2839 if (open_file->invalidHandle) {
15886177 2840 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2841 if (rc != 0)
2842 break;
2843 }
d4ffff1f 2844 io_parms.pid = pid;
29e20f9c 2845 io_parms.tcon = tcon;
f9c6e234 2846 io_parms.offset = *offset;
d4ffff1f 2847 io_parms.length = current_read_size;
f9c6e234
PS
2848 rc = server->ops->sync_read(xid, open_file, &io_parms,
2849 &bytes_read, &cur_offset,
2850 &buf_type);
1da177e4
LT
2851 }
2852 if (rc || (bytes_read == 0)) {
2853 if (total_read) {
2854 break;
2855 } else {
6d5786a3 2856 free_xid(xid);
1da177e4
LT
2857 return rc;
2858 }
2859 } else {
29e20f9c 2860 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 2861 *offset += bytes_read;
1da177e4
LT
2862 }
2863 }
6d5786a3 2864 free_xid(xid);
1da177e4
LT
2865 return total_read;
2866}
2867
ca83ce3d
JL
2868/*
2869 * If the page is mmap'ed into a process' page tables, then we need to make
2870 * sure that it doesn't change while being written back.
2871 */
2872static int
2873cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2874{
2875 struct page *page = vmf->page;
2876
2877 lock_page(page);
2878 return VM_FAULT_LOCKED;
2879}
2880
2881static struct vm_operations_struct cifs_file_vm_ops = {
2882 .fault = filemap_fault,
2883 .page_mkwrite = cifs_page_mkwrite,
2884};
2885
7a6a19b1
PS
2886int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2887{
2888 int rc, xid;
2889 struct inode *inode = file->f_path.dentry->d_inode;
2890
6d5786a3 2891 xid = get_xid();
7a6a19b1 2892
6feb9891
PS
2893 if (!CIFS_I(inode)->clientCanCacheRead) {
2894 rc = cifs_invalidate_mapping(inode);
2895 if (rc)
2896 return rc;
2897 }
7a6a19b1
PS
2898
2899 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2900 if (rc == 0)
2901 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 2902 free_xid(xid);
7a6a19b1
PS
2903 return rc;
2904}
2905
1da177e4
LT
2906int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2907{
1da177e4
LT
2908 int rc, xid;
2909
6d5786a3 2910 xid = get_xid();
abab095d 2911 rc = cifs_revalidate_file(file);
1da177e4 2912 if (rc) {
b6b38f70 2913 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
6d5786a3 2914 free_xid(xid);
1da177e4
LT
2915 return rc;
2916 }
2917 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2918 if (rc == 0)
2919 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 2920 free_xid(xid);
1da177e4
LT
2921 return rc;
2922}
2923
0471ca3f
JL
2924static void
2925cifs_readv_complete(struct work_struct *work)
2926{
c5fab6f4 2927 unsigned int i;
0471ca3f
JL
2928 struct cifs_readdata *rdata = container_of(work,
2929 struct cifs_readdata, work);
0471ca3f 2930
c5fab6f4
JL
2931 for (i = 0; i < rdata->nr_pages; i++) {
2932 struct page *page = rdata->pages[i];
2933
0471ca3f
JL
2934 lru_cache_add_file(page);
2935
2936 if (rdata->result == 0) {
0471ca3f
JL
2937 flush_dcache_page(page);
2938 SetPageUptodate(page);
2939 }
2940
2941 unlock_page(page);
2942
2943 if (rdata->result == 0)
2944 cifs_readpage_to_fscache(rdata->mapping->host, page);
2945
2946 page_cache_release(page);
c5fab6f4 2947 rdata->pages[i] = NULL;
0471ca3f 2948 }
6993f74a 2949 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
2950}
2951
8d5ce4d2 2952static int
8321fec4
JL
2953cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
2954 struct cifs_readdata *rdata, unsigned int len)
8d5ce4d2 2955{
8321fec4 2956 int total_read = 0, result = 0;
c5fab6f4 2957 unsigned int i;
8d5ce4d2
JL
2958 u64 eof;
2959 pgoff_t eof_index;
c5fab6f4 2960 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2961 struct kvec iov;
8d5ce4d2
JL
2962
2963 /* determine the eof that the server (probably) has */
2964 eof = CIFS_I(rdata->mapping->host)->server_eof;
2965 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2966 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2967
8321fec4 2968 rdata->tailsz = PAGE_CACHE_SIZE;
c5fab6f4
JL
2969 for (i = 0; i < nr_pages; i++) {
2970 struct page *page = rdata->pages[i];
2971
8321fec4 2972 if (len >= PAGE_CACHE_SIZE) {
8d5ce4d2 2973 /* enough data to fill the page */
8321fec4
JL
2974 iov.iov_base = kmap(page);
2975 iov.iov_len = PAGE_CACHE_SIZE;
8d5ce4d2 2976 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
2977 i, page->index, iov.iov_base, iov.iov_len);
2978 len -= PAGE_CACHE_SIZE;
2979 } else if (len > 0) {
8d5ce4d2 2980 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2981 iov.iov_base = kmap(page);
2982 iov.iov_len = len;
8d5ce4d2 2983 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
2984 i, page->index, iov.iov_base, iov.iov_len);
2985 memset(iov.iov_base + len,
2986 '\0', PAGE_CACHE_SIZE - len);
2987 rdata->tailsz = len;
2988 len = 0;
8d5ce4d2
JL
2989 } else if (page->index > eof_index) {
2990 /*
2991 * The VFS will not try to do readahead past the
2992 * i_size, but it's possible that we have outstanding
2993 * writes with gaps in the middle and the i_size hasn't
2994 * caught up yet. Populate those with zeroed out pages
2995 * to prevent the VFS from repeatedly attempting to
2996 * fill them until the writes are flushed.
2997 */
2998 zero_user(page, 0, PAGE_CACHE_SIZE);
8d5ce4d2
JL
2999 lru_cache_add_file(page);
3000 flush_dcache_page(page);
3001 SetPageUptodate(page);
3002 unlock_page(page);
3003 page_cache_release(page);
c5fab6f4
JL
3004 rdata->pages[i] = NULL;
3005 rdata->nr_pages--;
8321fec4 3006 continue;
8d5ce4d2
JL
3007 } else {
3008 /* no need to hold page hostage */
8d5ce4d2
JL
3009 lru_cache_add_file(page);
3010 unlock_page(page);
3011 page_cache_release(page);
c5fab6f4
JL
3012 rdata->pages[i] = NULL;
3013 rdata->nr_pages--;
8321fec4 3014 continue;
8d5ce4d2 3015 }
8321fec4
JL
3016
3017 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3018 kunmap(page);
3019 if (result < 0)
3020 break;
3021
3022 total_read += result;
8d5ce4d2
JL
3023 }
3024
8321fec4 3025 return total_read > 0 ? total_read : result;
8d5ce4d2
JL
3026}
3027
1da177e4
LT
3028static int cifs_readpages(struct file *file, struct address_space *mapping,
3029 struct list_head *page_list, unsigned num_pages)
3030{
690c5e31
JL
3031 int rc;
3032 struct list_head tmplist;
3033 struct cifsFileInfo *open_file = file->private_data;
3034 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3035 unsigned int rsize = cifs_sb->rsize;
3036 pid_t pid;
1da177e4 3037
690c5e31
JL
3038 /*
3039 * Give up immediately if rsize is too small to read an entire page.
3040 * The VFS will fall back to readpage. We should never reach this
3041 * point however since we set ra_pages to 0 when the rsize is smaller
3042 * than a cache page.
3043 */
3044 if (unlikely(rsize < PAGE_CACHE_SIZE))
3045 return 0;
bfa0d75a 3046
56698236
SJ
3047 /*
3048 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3049 * immediately if the cookie is negative
3050 */
3051 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3052 &num_pages);
3053 if (rc == 0)
690c5e31 3054 return rc;
56698236 3055
d4ffff1f
PS
3056 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3057 pid = open_file->pid;
3058 else
3059 pid = current->tgid;
3060
690c5e31
JL
3061 rc = 0;
3062 INIT_LIST_HEAD(&tmplist);
1da177e4 3063
690c5e31
JL
3064 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3065 mapping, num_pages);
3066
3067 /*
3068 * Start with the page at end of list and move it to private
3069 * list. Do the same with any following pages until we hit
3070 * the rsize limit, hit an index discontinuity, or run out of
3071 * pages. Issue the async read and then start the loop again
3072 * until the list is empty.
3073 *
3074 * Note that list order is important. The page_list is in
3075 * the order of declining indexes. When we put the pages in
3076 * the rdata->pages, then we want them in increasing order.
3077 */
3078 while (!list_empty(page_list)) {
c5fab6f4 3079 unsigned int i;
690c5e31
JL
3080 unsigned int bytes = PAGE_CACHE_SIZE;
3081 unsigned int expected_index;
3082 unsigned int nr_pages = 1;
3083 loff_t offset;
3084 struct page *page, *tpage;
3085 struct cifs_readdata *rdata;
1da177e4
LT
3086
3087 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
3088
3089 /*
3090 * Lock the page and put it in the cache. Since no one else
3091 * should have access to this page, we're safe to simply set
3092 * PG_locked without checking it first.
3093 */
3094 __set_page_locked(page);
3095 rc = add_to_page_cache_locked(page, mapping,
3096 page->index, GFP_KERNEL);
3097
3098 /* give up if we can't stick it in the cache */
3099 if (rc) {
3100 __clear_page_locked(page);
3101 break;
3102 }
3103
3104 /* move first page to the tmplist */
1da177e4 3105 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 3106 list_move_tail(&page->lru, &tmplist);
1da177e4 3107
690c5e31
JL
3108 /* now try and add more pages onto the request */
3109 expected_index = page->index + 1;
3110 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3111 /* discontinuity ? */
3112 if (page->index != expected_index)
fb8c4b14 3113 break;
690c5e31
JL
3114
3115 /* would this page push the read over the rsize? */
3116 if (bytes + PAGE_CACHE_SIZE > rsize)
3117 break;
3118
3119 __set_page_locked(page);
3120 if (add_to_page_cache_locked(page, mapping,
3121 page->index, GFP_KERNEL)) {
3122 __clear_page_locked(page);
3123 break;
3124 }
3125 list_move_tail(&page->lru, &tmplist);
3126 bytes += PAGE_CACHE_SIZE;
3127 expected_index++;
3128 nr_pages++;
1da177e4 3129 }
690c5e31 3130
0471ca3f 3131 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
3132 if (!rdata) {
3133 /* best to give up if we're out of mem */
3134 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3135 list_del(&page->lru);
3136 lru_cache_add_file(page);
3137 unlock_page(page);
3138 page_cache_release(page);
3139 }
3140 rc = -ENOMEM;
3141 break;
3142 }
3143
6993f74a 3144 rdata->cfile = cifsFileInfo_get(open_file);
690c5e31
JL
3145 rdata->mapping = mapping;
3146 rdata->offset = offset;
3147 rdata->bytes = bytes;
3148 rdata->pid = pid;
8321fec4
JL
3149 rdata->pagesz = PAGE_CACHE_SIZE;
3150 rdata->read_into_pages = cifs_readpages_read_into_pages;
c5fab6f4
JL
3151
3152 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3153 list_del(&page->lru);
3154 rdata->pages[rdata->nr_pages++] = page;
3155 }
690c5e31 3156
2a1bb138 3157 rc = cifs_retry_async_readv(rdata);
690c5e31 3158 if (rc != 0) {
c5fab6f4
JL
3159 for (i = 0; i < rdata->nr_pages; i++) {
3160 page = rdata->pages[i];
690c5e31
JL
3161 lru_cache_add_file(page);
3162 unlock_page(page);
3163 page_cache_release(page);
1da177e4 3164 }
6993f74a 3165 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3166 break;
3167 }
6993f74a
JL
3168
3169 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3170 }
3171
1da177e4
LT
3172 return rc;
3173}
3174
3175static int cifs_readpage_worker(struct file *file, struct page *page,
3176 loff_t *poffset)
3177{
3178 char *read_data;
3179 int rc;
3180
56698236
SJ
3181 /* Is the page cached? */
3182 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3183 if (rc == 0)
3184 goto read_complete;
3185
1da177e4
LT
3186 page_cache_get(page);
3187 read_data = kmap(page);
3188 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 3189
1da177e4 3190 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 3191
1da177e4
LT
3192 if (rc < 0)
3193 goto io_error;
3194 else
b6b38f70 3195 cFYI(1, "Bytes read %d", rc);
fb8c4b14 3196
e6a00296
JJS
3197 file->f_path.dentry->d_inode->i_atime =
3198 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 3199
1da177e4
LT
3200 if (PAGE_CACHE_SIZE > rc)
3201 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3202
3203 flush_dcache_page(page);
3204 SetPageUptodate(page);
9dc06558
SJ
3205
3206 /* send this page to the cache */
3207 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3208
1da177e4 3209 rc = 0;
fb8c4b14 3210
1da177e4 3211io_error:
fb8c4b14 3212 kunmap(page);
1da177e4 3213 page_cache_release(page);
56698236
SJ
3214
3215read_complete:
1da177e4
LT
3216 return rc;
3217}
3218
3219static int cifs_readpage(struct file *file, struct page *page)
3220{
3221 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3222 int rc = -EACCES;
6d5786a3 3223 unsigned int xid;
1da177e4 3224
6d5786a3 3225 xid = get_xid();
1da177e4
LT
3226
3227 if (file->private_data == NULL) {
0f3bc09e 3228 rc = -EBADF;
6d5786a3 3229 free_xid(xid);
0f3bc09e 3230 return rc;
1da177e4
LT
3231 }
3232
ac3aa2f8 3233 cFYI(1, "readpage %p at offset %d 0x%x",
b6b38f70 3234 page, (int)offset, (int)offset);
1da177e4
LT
3235
3236 rc = cifs_readpage_worker(file, page, &offset);
3237
3238 unlock_page(page);
3239
6d5786a3 3240 free_xid(xid);
1da177e4
LT
3241 return rc;
3242}
3243
a403a0a3
SF
3244static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3245{
3246 struct cifsFileInfo *open_file;
3247
4477288a 3248 spin_lock(&cifs_file_list_lock);
a403a0a3 3249 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 3250 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 3251 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3252 return 1;
3253 }
3254 }
4477288a 3255 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3256 return 0;
3257}
3258
1da177e4
LT
3259/* We do not want to update the file size from server for inodes
3260 open for write - to avoid races with writepage extending
3261 the file - in the future we could consider allowing
fb8c4b14 3262 refreshing the inode only on increases in the file size
1da177e4
LT
3263 but this is tricky to do without racing with writebehind
3264 page caching in the current Linux kernel design */
4b18f2a9 3265bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 3266{
a403a0a3 3267 if (!cifsInode)
4b18f2a9 3268 return true;
50c2f753 3269
a403a0a3
SF
3270 if (is_inode_writable(cifsInode)) {
3271 /* This inode is open for write at least once */
c32a0b68
SF
3272 struct cifs_sb_info *cifs_sb;
3273
c32a0b68 3274 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 3275 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3276 /* since no page cache to corrupt on directio
c32a0b68 3277 we can change size safely */
4b18f2a9 3278 return true;
c32a0b68
SF
3279 }
3280
fb8c4b14 3281 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 3282 return true;
7ba52631 3283
4b18f2a9 3284 return false;
23e7dd7d 3285 } else
4b18f2a9 3286 return true;
1da177e4
LT
3287}
3288
d9414774
NP
3289static int cifs_write_begin(struct file *file, struct address_space *mapping,
3290 loff_t pos, unsigned len, unsigned flags,
3291 struct page **pagep, void **fsdata)
1da177e4 3292{
d9414774
NP
3293 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3294 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
3295 loff_t page_start = pos & PAGE_MASK;
3296 loff_t i_size;
3297 struct page *page;
3298 int rc = 0;
d9414774 3299
b6b38f70 3300 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 3301
54566b2c 3302 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
3303 if (!page) {
3304 rc = -ENOMEM;
3305 goto out;
3306 }
8a236264 3307
a98ee8c1
JL
3308 if (PageUptodate(page))
3309 goto out;
8a236264 3310
a98ee8c1
JL
3311 /*
3312 * If we write a full page it will be up to date, no need to read from
3313 * the server. If the write is short, we'll end up doing a sync write
3314 * instead.
3315 */
3316 if (len == PAGE_CACHE_SIZE)
3317 goto out;
8a236264 3318
a98ee8c1
JL
3319 /*
3320 * optimize away the read when we have an oplock, and we're not
3321 * expecting to use any of the data we'd be reading in. That
3322 * is, when the page lies beyond the EOF, or straddles the EOF
3323 * and the write will cover all of the existing data.
3324 */
3325 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3326 i_size = i_size_read(mapping->host);
3327 if (page_start >= i_size ||
3328 (offset == 0 && (pos + len) >= i_size)) {
3329 zero_user_segments(page, 0, offset,
3330 offset + len,
3331 PAGE_CACHE_SIZE);
3332 /*
3333 * PageChecked means that the parts of the page
3334 * to which we're not writing are considered up
3335 * to date. Once the data is copied to the
3336 * page, it can be set uptodate.
3337 */
3338 SetPageChecked(page);
3339 goto out;
3340 }
3341 }
d9414774 3342
a98ee8c1
JL
3343 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
3344 /*
3345 * might as well read a page, it is fast enough. If we get
3346 * an error, we don't need to return it. cifs_write_end will
3347 * do a sync write instead since PG_uptodate isn't set.
3348 */
3349 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
3350 } else {
3351 /* we could try using another file handle if there is one -
3352 but how would we lock it to prevent close of that handle
3353 racing with this read? In any case
d9414774 3354 this will be written out by write_end so is fine */
1da177e4 3355 }
a98ee8c1
JL
3356out:
3357 *pagep = page;
3358 return rc;
1da177e4
LT
3359}
3360
85f2d6b4
SJ
3361static int cifs_release_page(struct page *page, gfp_t gfp)
3362{
3363 if (PagePrivate(page))
3364 return 0;
3365
3366 return cifs_fscache_release_page(page, gfp);
3367}
3368
3369static void cifs_invalidate_page(struct page *page, unsigned long offset)
3370{
3371 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3372
3373 if (offset == 0)
3374 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3375}
3376
9ad1506b
PS
3377static int cifs_launder_page(struct page *page)
3378{
3379 int rc = 0;
3380 loff_t range_start = page_offset(page);
3381 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3382 struct writeback_control wbc = {
3383 .sync_mode = WB_SYNC_ALL,
3384 .nr_to_write = 0,
3385 .range_start = range_start,
3386 .range_end = range_end,
3387 };
3388
3389 cFYI(1, "Launder page: %p", page);
3390
3391 if (clear_page_dirty_for_io(page))
3392 rc = cifs_writepage_locked(page, &wbc);
3393
3394 cifs_fscache_invalidate_page(page, page->mapping->host);
3395 return rc;
3396}
3397
9b646972 3398void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3399{
3400 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3401 oplock_break);
a5e18bc3 3402 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 3403 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 3404 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
eb4b756b 3405 int rc = 0;
3bc303c2
JL
3406
3407 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 3408 if (cinode->clientCanCacheRead)
8737c930 3409 break_lease(inode, O_RDONLY);
d54ff732 3410 else
8737c930 3411 break_lease(inode, O_WRONLY);
3bc303c2
JL
3412 rc = filemap_fdatawrite(inode->i_mapping);
3413 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
3414 rc = filemap_fdatawait(inode->i_mapping);
3415 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
3416 invalidate_remote_inode(inode);
3417 }
b6b38f70 3418 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
3419 }
3420
85160e03
PS
3421 rc = cifs_push_locks(cfile);
3422 if (rc)
3423 cERROR(1, "Push locks rc = %d", rc);
3424
3bc303c2
JL
3425 /*
3426 * releasing stale oplock after recent reconnect of smb session using
3427 * a now incorrect file handle is not a data integrity issue but do
3428 * not bother sending an oplock release if session to server still is
3429 * disconnected since oplock already released by the server
3430 */
cdff08e7 3431 if (!cfile->oplock_break_cancelled) {
95a3f2f3
PS
3432 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3433 cinode);
b6b38f70 3434 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 3435 }
3bc303c2
JL
3436}
3437
f5e54d6e 3438const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3439 .readpage = cifs_readpage,
3440 .readpages = cifs_readpages,
3441 .writepage = cifs_writepage,
37c0eb46 3442 .writepages = cifs_writepages,
d9414774
NP
3443 .write_begin = cifs_write_begin,
3444 .write_end = cifs_write_end,
1da177e4 3445 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3446 .releasepage = cifs_release_page,
3447 .invalidatepage = cifs_invalidate_page,
9ad1506b 3448 .launder_page = cifs_launder_page,
1da177e4 3449};
273d81d6
DK
3450
3451/*
3452 * cifs_readpages requires the server to support a buffer large enough to
3453 * contain the header plus one complete page of data. Otherwise, we need
3454 * to leave cifs_readpages out of the address space operations.
3455 */
f5e54d6e 3456const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3457 .readpage = cifs_readpage,
3458 .writepage = cifs_writepage,
3459 .writepages = cifs_writepages,
d9414774
NP
3460 .write_begin = cifs_write_begin,
3461 .write_end = cifs_write_end,
273d81d6 3462 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3463 .releasepage = cifs_release_page,
3464 .invalidatepage = cifs_invalidate_page,
9ad1506b 3465 .launder_page = cifs_launder_page,
273d81d6 3466};