]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/cifs/file.c
[CIFS] Free small buffers earlier so we exceed the cifs
[mirror_ubuntu-artful-kernel.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/fs.h>
37c0eb46 24#include <linux/backing-dev.h>
1da177e4
LT
25#include <linux/stat.h>
26#include <linux/fcntl.h>
37c0eb46 27#include <linux/mpage.h>
1da177e4
LT
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
30#include <linux/smp_lock.h>
37c0eb46 31#include <linux/writeback.h>
23e7dd7d 32#include <linux/delay.h>
1da177e4
LT
33#include <asm/div64.h>
34#include "cifsfs.h"
35#include "cifspdu.h"
36#include "cifsglob.h"
37#include "cifsproto.h"
38#include "cifs_unicode.h"
39#include "cifs_debug.h"
40#include "cifs_fs_sb.h"
41
42static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
45{
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE;
53 private_data->closePend = FALSE;
23e7dd7d
SF
54 /* we have to track num writers to the inode, since writepages
55 does not tell us which handle the write is for so there can
56 be a close (overlapping with write) of the filehandle that
57 cifs_writepages chose to use */
58 atomic_set(&private_data->wrtPending,0);
1da177e4
LT
59
60 return private_data;
61}
62
63static inline int cifs_convert_flags(unsigned int flags)
64{
65 if ((flags & O_ACCMODE) == O_RDONLY)
66 return GENERIC_READ;
67 else if ((flags & O_ACCMODE) == O_WRONLY)
68 return GENERIC_WRITE;
69 else if ((flags & O_ACCMODE) == O_RDWR) {
70 /* GENERIC_ALL is too much permission to request
71 can cause unnecessary access denied on create */
72 /* return GENERIC_ALL; */
73 return (GENERIC_READ | GENERIC_WRITE);
74 }
75
76 return 0x20197;
77}
78
79static inline int cifs_get_disposition(unsigned int flags)
80{
81 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
82 return FILE_CREATE;
83 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
84 return FILE_OVERWRITE_IF;
85 else if ((flags & O_CREAT) == O_CREAT)
86 return FILE_OPEN_IF;
87 else
88 return FILE_OPEN;
89}
90
91/* all arguments to this function must be checked for validity in caller */
92static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
93 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
94 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
95 char *full_path, int xid)
96{
97 struct timespec temp;
98 int rc;
99
100 /* want handles we can use to read with first
101 in the list so we do not have to walk the
102 list to search for one in prepare_write */
103 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
104 list_add_tail(&pCifsFile->flist,
105 &pCifsInode->openFileList);
106 } else {
107 list_add(&pCifsFile->flist,
108 &pCifsInode->openFileList);
109 }
110 write_unlock(&GlobalSMBSeslock);
111 write_unlock(&file->f_owner.lock);
112 if (pCifsInode->clientCanCacheRead) {
113 /* we have the inode open somewhere else
114 no need to discard cache data */
115 goto client_can_cache;
116 }
117
118 /* BB need same check in cifs_create too? */
119 /* if not oplocked, invalidate inode pages if mtime or file
120 size changed */
121 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
122 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
123 (file->f_dentry->d_inode->i_size ==
124 (loff_t)le64_to_cpu(buf->EndOfFile))) {
125 cFYI(1, ("inode unchanged on server"));
126 } else {
127 if (file->f_dentry->d_inode->i_mapping) {
128 /* BB no need to lock inode until after invalidate
129 since namei code should already have it locked? */
28fd1298 130 filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
1da177e4
LT
131 }
132 cFYI(1, ("invalidating remote inode since open detected it "
133 "changed"));
134 invalidate_remote_inode(file->f_dentry->d_inode);
135 }
136
137client_can_cache:
138 if (pTcon->ses->capabilities & CAP_UNIX)
139 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
140 full_path, inode->i_sb, xid);
141 else
142 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
143 full_path, buf, inode->i_sb, xid);
144
145 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
146 pCifsInode->clientCanCacheAll = TRUE;
147 pCifsInode->clientCanCacheRead = TRUE;
148 cFYI(1, ("Exclusive Oplock granted on inode %p",
149 file->f_dentry->d_inode));
150 } else if ((*oplock & 0xF) == OPLOCK_READ)
151 pCifsInode->clientCanCacheRead = TRUE;
152
153 return rc;
154}
155
156int cifs_open(struct inode *inode, struct file *file)
157{
158 int rc = -EACCES;
159 int xid, oplock;
160 struct cifs_sb_info *cifs_sb;
161 struct cifsTconInfo *pTcon;
162 struct cifsFileInfo *pCifsFile;
163 struct cifsInodeInfo *pCifsInode;
164 struct list_head *tmp;
165 char *full_path = NULL;
166 int desiredAccess;
167 int disposition;
168 __u16 netfid;
169 FILE_ALL_INFO *buf = NULL;
170
171 xid = GetXid();
172
173 cifs_sb = CIFS_SB(inode->i_sb);
174 pTcon = cifs_sb->tcon;
175
176 if (file->f_flags & O_CREAT) {
177 /* search inode for this file and fill in file->private_data */
178 pCifsInode = CIFS_I(file->f_dentry->d_inode);
179 read_lock(&GlobalSMBSeslock);
180 list_for_each(tmp, &pCifsInode->openFileList) {
181 pCifsFile = list_entry(tmp, struct cifsFileInfo,
182 flist);
183 if ((pCifsFile->pfile == NULL) &&
184 (pCifsFile->pid == current->tgid)) {
185 /* mode set in cifs_create */
186
187 /* needed for writepage */
188 pCifsFile->pfile = file;
189
190 file->private_data = pCifsFile;
191 break;
192 }
193 }
194 read_unlock(&GlobalSMBSeslock);
195 if (file->private_data != NULL) {
196 rc = 0;
197 FreeXid(xid);
198 return rc;
199 } else {
200 if (file->f_flags & O_EXCL)
201 cERROR(1, ("could not find file instance for "
202 "new file %p ", file));
203 }
204 }
205
206 down(&inode->i_sb->s_vfs_rename_sem);
7f57356b 207 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
208 up(&inode->i_sb->s_vfs_rename_sem);
209 if (full_path == NULL) {
210 FreeXid(xid);
211 return -ENOMEM;
212 }
213
214 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
215 inode, file->f_flags, full_path));
216 desiredAccess = cifs_convert_flags(file->f_flags);
217
218/*********************************************************************
219 * open flag mapping table:
220 *
221 * POSIX Flag CIFS Disposition
222 * ---------- ----------------
223 * O_CREAT FILE_OPEN_IF
224 * O_CREAT | O_EXCL FILE_CREATE
225 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
226 * O_TRUNC FILE_OVERWRITE
227 * none of the above FILE_OPEN
228 *
229 * Note that there is not a direct match between disposition
230 * FILE_SUPERSEDE (ie create whether or not file exists although
231 * O_CREAT | O_TRUNC is similar but truncates the existing
232 * file rather than creating a new file as FILE_SUPERSEDE does
233 * (which uses the attributes / metadata passed in on open call)
234 *?
235 *? O_SYNC is a reasonable match to CIFS writethrough flag
236 *? and the read write flags match reasonably. O_LARGEFILE
237 *? is irrelevant because largefile support is always used
238 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
239 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
240 *********************************************************************/
241
242 disposition = cifs_get_disposition(file->f_flags);
243
244 if (oplockEnabled)
245 oplock = REQ_OPLOCK;
246 else
247 oplock = FALSE;
248
249 /* BB pass O_SYNC flag through on file attributes .. BB */
250
251 /* Also refresh inode by passing in file_info buf returned by SMBOpen
252 and calling get_inode_info with returned buf (at least helps
253 non-Unix server case) */
254
255 /* BB we can not do this if this is the second open of a file
256 and the first handle has writebehind data, we might be
257 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
258 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
259 if (!buf) {
260 rc = -ENOMEM;
261 goto out;
262 }
263 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
264 CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
265 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
266 & CIFS_MOUNT_MAP_SPECIAL_CHR);
a9d02ad4
SF
267 if (rc == -EIO) {
268 /* Old server, try legacy style OpenX */
269 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
270 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
271 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
272 & CIFS_MOUNT_MAP_SPECIAL_CHR);
273 }
1da177e4
LT
274 if (rc) {
275 cFYI(1, ("cifs_open returned 0x%x ", rc));
276 goto out;
277 }
278 file->private_data =
279 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
280 if (file->private_data == NULL) {
281 rc = -ENOMEM;
282 goto out;
283 }
284 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
285 write_lock(&file->f_owner.lock);
286 write_lock(&GlobalSMBSeslock);
287 list_add(&pCifsFile->tlist, &pTcon->openFileList);
288
289 pCifsInode = CIFS_I(file->f_dentry->d_inode);
290 if (pCifsInode) {
291 rc = cifs_open_inode_helper(inode, file, pCifsInode,
292 pCifsFile, pTcon,
293 &oplock, buf, full_path, xid);
294 } else {
295 write_unlock(&GlobalSMBSeslock);
296 write_unlock(&file->f_owner.lock);
297 }
298
299 if (oplock & CIFS_CREATE_ACTION) {
300 /* time to set mode which we can not set earlier due to
301 problems creating new read-only files */
302 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
303 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
304 inode->i_mode,
305 (__u64)-1, (__u64)-1, 0 /* dev */,
737b758c
SF
306 cifs_sb->local_nls,
307 cifs_sb->mnt_cifs_flags &
308 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
309 } else {
310 /* BB implement via Windows security descriptors eg
311 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
312 -1, -1, local_nls);
313 in the meantime could set r/o dos attribute when
314 perms are eg: mode & 0222 == 0 */
315 }
316 }
317
318out:
319 kfree(buf);
320 kfree(full_path);
321 FreeXid(xid);
322 return rc;
323}
324
325/* Try to reaquire byte range locks that were released when session */
326/* to server was lost */
327static int cifs_relock_file(struct cifsFileInfo *cifsFile)
328{
329 int rc = 0;
330
331/* BB list all locks open on this file and relock */
332
333 return rc;
334}
335
336static int cifs_reopen_file(struct inode *inode, struct file *file,
337 int can_flush)
338{
339 int rc = -EACCES;
340 int xid, oplock;
341 struct cifs_sb_info *cifs_sb;
342 struct cifsTconInfo *pTcon;
343 struct cifsFileInfo *pCifsFile;
344 struct cifsInodeInfo *pCifsInode;
345 char *full_path = NULL;
346 int desiredAccess;
347 int disposition = FILE_OPEN;
348 __u16 netfid;
349
350 if (inode == NULL)
351 return -EBADF;
352 if (file->private_data) {
353 pCifsFile = (struct cifsFileInfo *)file->private_data;
354 } else
355 return -EBADF;
356
357 xid = GetXid();
358 down(&pCifsFile->fh_sem);
359 if (pCifsFile->invalidHandle == FALSE) {
360 up(&pCifsFile->fh_sem);
361 FreeXid(xid);
362 return 0;
363 }
364
365 if (file->f_dentry == NULL) {
366 up(&pCifsFile->fh_sem);
367 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
368 FreeXid(xid);
369 return -EBADF;
370 }
371 cifs_sb = CIFS_SB(inode->i_sb);
372 pTcon = cifs_sb->tcon;
373/* can not grab rename sem here because various ops, including
374 those that already have the rename sem can end up causing writepage
375 to get called and if the server was down that means we end up here,
376 and we can never tell if the caller already has the rename_sem */
7f57356b 377 full_path = build_path_from_dentry(file->f_dentry);
1da177e4
LT
378 if (full_path == NULL) {
379 up(&pCifsFile->fh_sem);
380 FreeXid(xid);
381 return -ENOMEM;
382 }
383
384 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
385 inode, file->f_flags,full_path));
386 desiredAccess = cifs_convert_flags(file->f_flags);
387
388 if (oplockEnabled)
389 oplock = REQ_OPLOCK;
390 else
391 oplock = FALSE;
392
393 /* Can not refresh inode by passing in file_info buf to be returned
394 by SMBOpen and then calling get_inode_info with returned buf
395 since file might have write behind data that needs to be flushed
396 and server version of file size can be stale. If we knew for sure
397 that inode was not dirty locally we could do this */
398
399/* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
400 if (buf == 0) {
401 up(&pCifsFile->fh_sem);
402 kfree(full_path);
403 FreeXid(xid);
404 return -ENOMEM;
405 } */
406 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
407 CREATE_NOT_DIR, &netfid, &oplock, NULL,
737b758c
SF
408 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
409 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
410 if (rc) {
411 up(&pCifsFile->fh_sem);
412 cFYI(1, ("cifs_open returned 0x%x ", rc));
413 cFYI(1, ("oplock: %d ", oplock));
414 } else {
415 pCifsFile->netfid = netfid;
416 pCifsFile->invalidHandle = FALSE;
417 up(&pCifsFile->fh_sem);
418 pCifsInode = CIFS_I(inode);
419 if (pCifsInode) {
420 if (can_flush) {
28fd1298 421 filemap_write_and_wait(inode->i_mapping);
1da177e4
LT
422 /* temporarily disable caching while we
423 go to server to get inode info */
424 pCifsInode->clientCanCacheAll = FALSE;
425 pCifsInode->clientCanCacheRead = FALSE;
426 if (pTcon->ses->capabilities & CAP_UNIX)
427 rc = cifs_get_inode_info_unix(&inode,
428 full_path, inode->i_sb, xid);
429 else
430 rc = cifs_get_inode_info(&inode,
431 full_path, NULL, inode->i_sb,
432 xid);
433 } /* else we are writing out data to server already
434 and could deadlock if we tried to flush data, and
435 since we do not know if we have data that would
436 invalidate the current end of file on the server
437 we can not go to the server to get the new inod
438 info */
439 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
440 pCifsInode->clientCanCacheAll = TRUE;
441 pCifsInode->clientCanCacheRead = TRUE;
442 cFYI(1, ("Exclusive Oplock granted on inode %p",
443 file->f_dentry->d_inode));
444 } else if ((oplock & 0xF) == OPLOCK_READ) {
445 pCifsInode->clientCanCacheRead = TRUE;
446 pCifsInode->clientCanCacheAll = FALSE;
447 } else {
448 pCifsInode->clientCanCacheRead = FALSE;
449 pCifsInode->clientCanCacheAll = FALSE;
450 }
451 cifs_relock_file(pCifsFile);
452 }
453 }
454
455 kfree(full_path);
456 FreeXid(xid);
457 return rc;
458}
459
460int cifs_close(struct inode *inode, struct file *file)
461{
462 int rc = 0;
463 int xid;
464 struct cifs_sb_info *cifs_sb;
465 struct cifsTconInfo *pTcon;
466 struct cifsFileInfo *pSMBFile =
467 (struct cifsFileInfo *)file->private_data;
468
469 xid = GetXid();
470
471 cifs_sb = CIFS_SB(inode->i_sb);
472 pTcon = cifs_sb->tcon;
473 if (pSMBFile) {
474 pSMBFile->closePend = TRUE;
475 write_lock(&file->f_owner.lock);
476 if (pTcon) {
477 /* no sense reconnecting to close a file that is
478 already closed */
479 if (pTcon->tidStatus != CifsNeedReconnect) {
23e7dd7d
SF
480 int timeout = 2;
481 while((atomic_read(&pSMBFile->wrtPending) != 0)
482 && (timeout < 1000) ) {
483 /* Give write a better chance to get to
484 server ahead of the close. We do not
485 want to add a wait_q here as it would
486 increase the memory utilization as
487 the struct would be in each open file,
488 but this should give enough time to
489 clear the socket */
c119b87d 490 write_unlock(&file->f_owner.lock);
23e7dd7d
SF
491 cERROR(1,("close with pending writes"));
492 msleep(timeout);
c119b87d 493 write_lock(&file->f_owner.lock);
23e7dd7d
SF
494 timeout *= 4;
495 }
1da177e4
LT
496 write_unlock(&file->f_owner.lock);
497 rc = CIFSSMBClose(xid, pTcon,
498 pSMBFile->netfid);
499 write_lock(&file->f_owner.lock);
500 }
501 }
cbe0476f 502 write_lock(&GlobalSMBSeslock);
1da177e4
LT
503 list_del(&pSMBFile->flist);
504 list_del(&pSMBFile->tlist);
cbe0476f 505 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
506 write_unlock(&file->f_owner.lock);
507 kfree(pSMBFile->search_resume_name);
508 kfree(file->private_data);
509 file->private_data = NULL;
510 } else
511 rc = -EBADF;
512
513 if (list_empty(&(CIFS_I(inode)->openFileList))) {
514 cFYI(1, ("closing last open instance for inode %p", inode));
515 /* if the file is not open we do not know if we can cache info
516 on this inode, much less write behind and read ahead */
517 CIFS_I(inode)->clientCanCacheRead = FALSE;
518 CIFS_I(inode)->clientCanCacheAll = FALSE;
519 }
520 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
521 rc = CIFS_I(inode)->write_behind_rc;
522 FreeXid(xid);
523 return rc;
524}
525
526int cifs_closedir(struct inode *inode, struct file *file)
527{
528 int rc = 0;
529 int xid;
530 struct cifsFileInfo *pCFileStruct =
531 (struct cifsFileInfo *)file->private_data;
532 char *ptmp;
533
534 cFYI(1, ("Closedir inode = 0x%p with ", inode));
535
536 xid = GetXid();
537
538 if (pCFileStruct) {
539 struct cifsTconInfo *pTcon;
540 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
541
542 pTcon = cifs_sb->tcon;
543
544 cFYI(1, ("Freeing private data in close dir"));
31ca3bc3
SF
545 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
546 (pCFileStruct->invalidHandle == FALSE)) {
1da177e4
LT
547 pCFileStruct->invalidHandle = TRUE;
548 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
549 cFYI(1, ("Closing uncompleted readdir with rc %d",
550 rc));
551 /* not much we can do if it fails anyway, ignore rc */
552 rc = 0;
553 }
554 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
555 if (ptmp) {
ec637e3f 556 cFYI(1, ("closedir free smb buf in srch struct"));
1da177e4
LT
557 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
558 cifs_buf_release(ptmp);
559 }
560 ptmp = pCFileStruct->search_resume_name;
561 if (ptmp) {
ec637e3f 562 cFYI(1, ("closedir free resume name"));
1da177e4
LT
563 pCFileStruct->search_resume_name = NULL;
564 kfree(ptmp);
565 }
566 kfree(file->private_data);
567 file->private_data = NULL;
568 }
569 /* BB can we lock the filestruct while this is going on? */
570 FreeXid(xid);
571 return rc;
572}
573
574int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
575{
576 int rc, xid;
577 __u32 lockType = LOCKING_ANDX_LARGE_FILES;
578 __u32 numLock = 0;
579 __u32 numUnlock = 0;
580 __u64 length;
581 int wait_flag = FALSE;
582 struct cifs_sb_info *cifs_sb;
583 struct cifsTconInfo *pTcon;
584
585 length = 1 + pfLock->fl_end - pfLock->fl_start;
586 rc = -EACCES;
587 xid = GetXid();
588
589 cFYI(1, ("Lock parm: 0x%x flockflags: "
590 "0x%x flocktype: 0x%x start: %lld end: %lld",
591 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
592 pfLock->fl_end));
593
594 if (pfLock->fl_flags & FL_POSIX)
595 cFYI(1, ("Posix "));
596 if (pfLock->fl_flags & FL_FLOCK)
597 cFYI(1, ("Flock "));
598 if (pfLock->fl_flags & FL_SLEEP) {
599 cFYI(1, ("Blocking lock "));
600 wait_flag = TRUE;
601 }
602 if (pfLock->fl_flags & FL_ACCESS)
603 cFYI(1, ("Process suspended by mandatory locking - "
604 "not implemented yet "));
605 if (pfLock->fl_flags & FL_LEASE)
606 cFYI(1, ("Lease on file - not implemented yet"));
607 if (pfLock->fl_flags &
608 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
609 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
610
611 if (pfLock->fl_type == F_WRLCK) {
612 cFYI(1, ("F_WRLCK "));
613 numLock = 1;
614 } else if (pfLock->fl_type == F_UNLCK) {
615 cFYI(1, ("F_UNLCK "));
616 numUnlock = 1;
617 } else if (pfLock->fl_type == F_RDLCK) {
618 cFYI(1, ("F_RDLCK "));
619 lockType |= LOCKING_ANDX_SHARED_LOCK;
620 numLock = 1;
621 } else if (pfLock->fl_type == F_EXLCK) {
622 cFYI(1, ("F_EXLCK "));
623 numLock = 1;
624 } else if (pfLock->fl_type == F_SHLCK) {
625 cFYI(1, ("F_SHLCK "));
626 lockType |= LOCKING_ANDX_SHARED_LOCK;
627 numLock = 1;
628 } else
629 cFYI(1, ("Unknown type of lock "));
630
631 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
632 pTcon = cifs_sb->tcon;
633
634 if (file->private_data == NULL) {
635 FreeXid(xid);
636 return -EBADF;
637 }
638
639 if (IS_GETLK(cmd)) {
640 rc = CIFSSMBLock(xid, pTcon,
641 ((struct cifsFileInfo *)file->
642 private_data)->netfid,
643 length,
644 pfLock->fl_start, 0, 1, lockType,
645 0 /* wait flag */ );
646 if (rc == 0) {
647 rc = CIFSSMBLock(xid, pTcon,
648 ((struct cifsFileInfo *) file->
649 private_data)->netfid,
650 length,
651 pfLock->fl_start, 1 /* numUnlock */ ,
652 0 /* numLock */ , lockType,
653 0 /* wait flag */ );
654 pfLock->fl_type = F_UNLCK;
655 if (rc != 0)
656 cERROR(1, ("Error unlocking previously locked "
657 "range %d during test of lock ",
658 rc));
659 rc = 0;
660
661 } else {
662 /* if rc == ERR_SHARING_VIOLATION ? */
663 rc = 0; /* do not change lock type to unlock
664 since range in use */
665 }
666
667 FreeXid(xid);
668 return rc;
669 }
670
671 rc = CIFSSMBLock(xid, pTcon,
672 ((struct cifsFileInfo *) file->private_data)->
673 netfid, length,
674 pfLock->fl_start, numUnlock, numLock, lockType,
675 wait_flag);
d634cc15 676 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
677 posix_lock_file_wait(file, pfLock);
678 FreeXid(xid);
679 return rc;
680}
681
682ssize_t cifs_user_write(struct file *file, const char __user *write_data,
683 size_t write_size, loff_t *poffset)
684{
685 int rc = 0;
686 unsigned int bytes_written = 0;
687 unsigned int total_written;
688 struct cifs_sb_info *cifs_sb;
689 struct cifsTconInfo *pTcon;
690 int xid, long_op;
691 struct cifsFileInfo *open_file;
692
693 if (file->f_dentry == NULL)
694 return -EBADF;
695
696 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
697 if (cifs_sb == NULL)
698 return -EBADF;
699
700 pTcon = cifs_sb->tcon;
701
702 /* cFYI(1,
703 (" write %d bytes to offset %lld of %s", write_size,
704 *poffset, file->f_dentry->d_name.name)); */
705
706 if (file->private_data == NULL)
707 return -EBADF;
708 else
709 open_file = (struct cifsFileInfo *) file->private_data;
710
711 xid = GetXid();
712 if (file->f_dentry->d_inode == NULL) {
713 FreeXid(xid);
714 return -EBADF;
715 }
716
717 if (*poffset > file->f_dentry->d_inode->i_size)
718 long_op = 2; /* writes past end of file can take a long time */
719 else
720 long_op = 1;
721
722 for (total_written = 0; write_size > total_written;
723 total_written += bytes_written) {
724 rc = -EAGAIN;
725 while (rc == -EAGAIN) {
726 if (file->private_data == NULL) {
727 /* file has been closed on us */
728 FreeXid(xid);
729 /* if we have gotten here we have written some data
730 and blocked, and the file has been freed on us while
731 we blocked so return what we managed to write */
732 return total_written;
733 }
734 if (open_file->closePend) {
735 FreeXid(xid);
736 if (total_written)
737 return total_written;
738 else
739 return -EBADF;
740 }
741 if (open_file->invalidHandle) {
742 if ((file->f_dentry == NULL) ||
743 (file->f_dentry->d_inode == NULL)) {
744 FreeXid(xid);
745 return total_written;
746 }
747 /* we could deadlock if we called
748 filemap_fdatawait from here so tell
749 reopen_file not to flush data to server
750 now */
751 rc = cifs_reopen_file(file->f_dentry->d_inode,
752 file, FALSE);
753 if (rc != 0)
754 break;
755 }
756
757 rc = CIFSSMBWrite(xid, pTcon,
758 open_file->netfid,
759 min_t(const int, cifs_sb->wsize,
760 write_size - total_written),
761 *poffset, &bytes_written,
762 NULL, write_data + total_written, long_op);
763 }
764 if (rc || (bytes_written == 0)) {
765 if (total_written)
766 break;
767 else {
768 FreeXid(xid);
769 return rc;
770 }
771 } else
772 *poffset += bytes_written;
773 long_op = FALSE; /* subsequent writes fast -
774 15 seconds is plenty */
775 }
776
a4544347 777 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
778
779 /* since the write may have blocked check these pointers again */
780 if (file->f_dentry) {
781 if (file->f_dentry->d_inode) {
782 struct inode *inode = file->f_dentry->d_inode;
783 inode->i_ctime = inode->i_mtime =
784 current_fs_time(inode->i_sb);
785 if (total_written > 0) {
786 if (*poffset > file->f_dentry->d_inode->i_size)
787 i_size_write(file->f_dentry->d_inode,
788 *poffset);
789 }
790 mark_inode_dirty_sync(file->f_dentry->d_inode);
791 }
792 }
793 FreeXid(xid);
794 return total_written;
795}
796
797static ssize_t cifs_write(struct file *file, const char *write_data,
798 size_t write_size, loff_t *poffset)
799{
800 int rc = 0;
801 unsigned int bytes_written = 0;
802 unsigned int total_written;
803 struct cifs_sb_info *cifs_sb;
804 struct cifsTconInfo *pTcon;
805 int xid, long_op;
806 struct cifsFileInfo *open_file;
807
808 if (file->f_dentry == NULL)
809 return -EBADF;
810
811 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
812 if (cifs_sb == NULL)
813 return -EBADF;
814
815 pTcon = cifs_sb->tcon;
816
ab2f218f
SF
817 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
818 *poffset, file->f_dentry->d_name.name));
1da177e4
LT
819
820 if (file->private_data == NULL)
821 return -EBADF;
822 else
823 open_file = (struct cifsFileInfo *)file->private_data;
824
825 xid = GetXid();
826 if (file->f_dentry->d_inode == NULL) {
827 FreeXid(xid);
828 return -EBADF;
829 }
830
831 if (*poffset > file->f_dentry->d_inode->i_size)
832 long_op = 2; /* writes past end of file can take a long time */
833 else
834 long_op = 1;
835
836 for (total_written = 0; write_size > total_written;
837 total_written += bytes_written) {
838 rc = -EAGAIN;
839 while (rc == -EAGAIN) {
840 if (file->private_data == NULL) {
841 /* file has been closed on us */
842 FreeXid(xid);
843 /* if we have gotten here we have written some data
844 and blocked, and the file has been freed on us
845 while we blocked so return what we managed to
846 write */
847 return total_written;
848 }
849 if (open_file->closePend) {
850 FreeXid(xid);
851 if (total_written)
852 return total_written;
853 else
854 return -EBADF;
855 }
856 if (open_file->invalidHandle) {
857 if ((file->f_dentry == NULL) ||
858 (file->f_dentry->d_inode == NULL)) {
859 FreeXid(xid);
860 return total_written;
861 }
862 /* we could deadlock if we called
863 filemap_fdatawait from here so tell
864 reopen_file not to flush data to
865 server now */
866 rc = cifs_reopen_file(file->f_dentry->d_inode,
867 file, FALSE);
868 if (rc != 0)
869 break;
870 }
d6e04ae6 871 /* BB FIXME We can not sign across two buffers yet */
ec637e3f
SF
872 if((pTcon->ses->server->secMode &
873 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0) {
3e84469d
SF
874 struct kvec iov[2];
875 unsigned int len;
876
0ae0efad 877 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
878 write_size - total_written);
879 /* iov[0] is reserved for smb header */
880 iov[1].iov_base = (char *)write_data +
881 total_written;
882 iov[1].iov_len = len;
d6e04ae6 883 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 884 open_file->netfid, len,
d6e04ae6 885 *poffset, &bytes_written,
3e84469d 886 iov, 1, long_op);
d6e04ae6
SF
887 } else
888 /* BB FIXME fixup indentation of line below */
1da177e4
LT
889 rc = CIFSSMBWrite(xid, pTcon,
890 open_file->netfid,
891 min_t(const int, cifs_sb->wsize,
892 write_size - total_written),
893 *poffset, &bytes_written,
894 write_data + total_written, NULL, long_op);
895 }
896 if (rc || (bytes_written == 0)) {
897 if (total_written)
898 break;
899 else {
900 FreeXid(xid);
901 return rc;
902 }
903 } else
904 *poffset += bytes_written;
905 long_op = FALSE; /* subsequent writes fast -
906 15 seconds is plenty */
907 }
908
a4544347 909 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
910
911 /* since the write may have blocked check these pointers again */
912 if (file->f_dentry) {
913 if (file->f_dentry->d_inode) {
914 file->f_dentry->d_inode->i_ctime =
915 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
916 if (total_written > 0) {
917 if (*poffset > file->f_dentry->d_inode->i_size)
918 i_size_write(file->f_dentry->d_inode,
919 *poffset);
920 }
921 mark_inode_dirty_sync(file->f_dentry->d_inode);
922 }
923 }
924 FreeXid(xid);
925 return total_written;
926}
927
dd99cd80 928struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
929{
930 struct cifsFileInfo *open_file;
dd99cd80 931 int rc;
6148a742
SF
932
933 read_lock(&GlobalSMBSeslock);
934 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
935 if (open_file->closePend)
936 continue;
937 if (open_file->pfile &&
938 ((open_file->pfile->f_flags & O_RDWR) ||
939 (open_file->pfile->f_flags & O_WRONLY))) {
23e7dd7d 940 atomic_inc(&open_file->wrtPending);
6148a742 941 read_unlock(&GlobalSMBSeslock);
0ae0efad 942 if((open_file->invalidHandle) &&
23e7dd7d 943 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
dd99cd80 944 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
37c0eb46
SF
945 open_file->pfile, FALSE);
946 /* if it fails, try another handle - might be */
947 /* dangerous to hold up writepages with retry */
948 if(rc) {
4a77118c 949 cFYI(1,("failed on reopen file in wp"));
37c0eb46 950 read_lock(&GlobalSMBSeslock);
23e7dd7d
SF
951 /* can not use this handle, no write
952 pending on this one after all */
953 atomic_dec
954 (&open_file->wrtPending);
37c0eb46
SF
955 continue;
956 }
957 }
6148a742
SF
958 return open_file;
959 }
960 }
961 read_unlock(&GlobalSMBSeslock);
962 return NULL;
963}
964
1da177e4
LT
965static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
966{
967 struct address_space *mapping = page->mapping;
968 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
969 char *write_data;
970 int rc = -EFAULT;
971 int bytes_written = 0;
972 struct cifs_sb_info *cifs_sb;
973 struct cifsTconInfo *pTcon;
974 struct inode *inode;
6148a742 975 struct cifsFileInfo *open_file;
1da177e4
LT
976
977 if (!mapping || !mapping->host)
978 return -EFAULT;
979
980 inode = page->mapping->host;
981 cifs_sb = CIFS_SB(inode->i_sb);
982 pTcon = cifs_sb->tcon;
983
984 offset += (loff_t)from;
985 write_data = kmap(page);
986 write_data += from;
987
988 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
989 kunmap(page);
990 return -EIO;
991 }
992
993 /* racing with truncate? */
994 if (offset > mapping->host->i_size) {
995 kunmap(page);
996 return 0; /* don't care */
997 }
998
999 /* check to make sure that we are not extending the file */
1000 if (mapping->host->i_size - offset < (loff_t)to)
1001 to = (unsigned)(mapping->host->i_size - offset);
1002
6148a742
SF
1003 open_file = find_writable_file(CIFS_I(mapping->host));
1004 if (open_file) {
1005 bytes_written = cifs_write(open_file->pfile, write_data,
1006 to-from, &offset);
23e7dd7d 1007 atomic_dec(&open_file->wrtPending);
1da177e4 1008 /* Does mm or vfs already set times? */
6148a742
SF
1009 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1010 if ((bytes_written > 0) && (offset)) {
1011 rc = 0;
1012 } else if (bytes_written < 0) {
1013 if (rc != -EBADF)
1014 rc = bytes_written;
1da177e4 1015 }
6148a742 1016 } else {
1da177e4
LT
1017 cFYI(1, ("No writeable filehandles for inode"));
1018 rc = -EIO;
1019 }
1020
1021 kunmap(page);
1022 return rc;
1023}
1024
1da177e4 1025static int cifs_writepages(struct address_space *mapping,
37c0eb46 1026 struct writeback_control *wbc)
1da177e4 1027{
37c0eb46
SF
1028 struct backing_dev_info *bdi = mapping->backing_dev_info;
1029 unsigned int bytes_to_write;
1030 unsigned int bytes_written;
1031 struct cifs_sb_info *cifs_sb;
1032 int done = 0;
1033 pgoff_t end = -1;
1034 pgoff_t index;
1035 int is_range = 0;
1036 struct kvec iov[32];
84d2f07e 1037 int len;
37c0eb46
SF
1038 int n_iov = 0;
1039 pgoff_t next;
1040 int nr_pages;
1041 __u64 offset = 0;
23e7dd7d 1042 struct cifsFileInfo *open_file;
37c0eb46
SF
1043 struct page *page;
1044 struct pagevec pvec;
1045 int rc = 0;
1046 int scanned = 0;
1da177e4
LT
1047 int xid;
1048
37c0eb46
SF
1049 cifs_sb = CIFS_SB(mapping->host->i_sb);
1050
1051 /*
1052 * If wsize is smaller that the page cache size, default to writing
1053 * one page at a time via cifs_writepage
1054 */
1055 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1056 return generic_writepages(mapping, wbc);
1057
4a77118c
SF
1058 /* BB FIXME we do not have code to sign across multiple buffers yet,
1059 so go to older writepage style write which we can sign if needed */
1060 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1061 if(cifs_sb->tcon->ses->server->secMode &
1062 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1063 return generic_writepages(mapping, wbc);
1064
37c0eb46
SF
1065 /*
1066 * BB: Is this meaningful for a non-block-device file system?
1067 * If it is, we should test it again after we do I/O
1068 */
1069 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1070 wbc->encountered_congestion = 1;
1071 return 0;
1072 }
1073
1da177e4
LT
1074 xid = GetXid();
1075
37c0eb46
SF
1076 pagevec_init(&pvec, 0);
1077 if (wbc->sync_mode == WB_SYNC_NONE)
1078 index = mapping->writeback_index; /* Start from prev offset */
1079 else {
1080 index = 0;
1081 scanned = 1;
1082 }
1083 if (wbc->start || wbc->end) {
1084 index = wbc->start >> PAGE_CACHE_SHIFT;
1085 end = wbc->end >> PAGE_CACHE_SHIFT;
1086 is_range = 1;
1087 scanned = 1;
1088 }
1089retry:
1090 while (!done && (index <= end) &&
1091 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1092 PAGECACHE_TAG_DIRTY,
1093 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1094 int first;
1095 unsigned int i;
1096
37c0eb46
SF
1097 first = -1;
1098 next = 0;
1099 n_iov = 0;
1100 bytes_to_write = 0;
1101
1102 for (i = 0; i < nr_pages; i++) {
1103 page = pvec.pages[i];
1104 /*
1105 * At this point we hold neither mapping->tree_lock nor
1106 * lock on the page itself: the page may be truncated or
1107 * invalidated (changing page->mapping to NULL), or even
1108 * swizzled back from swapper_space to tmpfs file
1109 * mapping
1110 */
1111
1112 if (first < 0)
1113 lock_page(page);
1114 else if (TestSetPageLocked(page))
1115 break;
1116
1117 if (unlikely(page->mapping != mapping)) {
1118 unlock_page(page);
1119 break;
1120 }
1121
1122 if (unlikely(is_range) && (page->index > end)) {
1123 done = 1;
1124 unlock_page(page);
1125 break;
1126 }
1127
1128 if (next && (page->index != next)) {
1129 /* Not next consecutive page */
1130 unlock_page(page);
1131 break;
1132 }
1133
1134 if (wbc->sync_mode != WB_SYNC_NONE)
1135 wait_on_page_writeback(page);
1136
1137 if (PageWriteback(page) ||
1138 !test_clear_page_dirty(page)) {
1139 unlock_page(page);
1140 break;
1141 }
84d2f07e
SF
1142
1143 if (page_offset(page) >= mapping->host->i_size) {
1144 done = 1;
1145 unlock_page(page);
1146 break;
1147 }
1148
37c0eb46
SF
1149 /*
1150 * BB can we get rid of this? pages are held by pvec
1151 */
1152 page_cache_get(page);
1153
84d2f07e
SF
1154 len = min(mapping->host->i_size - page_offset(page),
1155 (loff_t)PAGE_CACHE_SIZE);
1156
37c0eb46
SF
1157 /* reserve iov[0] for the smb header */
1158 n_iov++;
1159 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1160 iov[n_iov].iov_len = len;
1161 bytes_to_write += len;
37c0eb46
SF
1162
1163 if (first < 0) {
1164 first = i;
1165 offset = page_offset(page);
1166 }
1167 next = page->index + 1;
1168 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1169 break;
1170 }
1171 if (n_iov) {
23e7dd7d
SF
1172 /* Search for a writable handle every time we call
1173 * CIFSSMBWrite2. We can't rely on the last handle
1174 * we used to still be valid
1175 */
1176 open_file = find_writable_file(CIFS_I(mapping->host));
1177 if (!open_file) {
1178 cERROR(1, ("No writable handles for inode"));
1179 rc = -EBADF;
1047abc1 1180 } else {
23e7dd7d
SF
1181 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1182 open_file->netfid,
1183 bytes_to_write, offset,
1184 &bytes_written, iov, n_iov,
1185 1);
1186 atomic_dec(&open_file->wrtPending);
1187 if (rc || bytes_written < bytes_to_write) {
1188 cERROR(1,("Write2 ret %d, written = %d",
1189 rc, bytes_written));
1190 /* BB what if continued retry is
1191 requested via mount flags? */
1192 set_bit(AS_EIO, &mapping->flags);
23e7dd7d
SF
1193 } else {
1194 cifs_stats_bytes_written(cifs_sb->tcon,
1195 bytes_written);
1196 }
37c0eb46
SF
1197 }
1198 for (i = 0; i < n_iov; i++) {
1199 page = pvec.pages[first + i];
eb9bdaa3
SF
1200 /* Should we also set page error on
1201 success rc but too little data written? */
1202 /* BB investigate retry logic on temporary
1203 server crash cases and how recovery works
1204 when page marked as error */
1205 if(rc)
1206 SetPageError(page);
37c0eb46
SF
1207 kunmap(page);
1208 unlock_page(page);
1209 page_cache_release(page);
1210 }
1211 if ((wbc->nr_to_write -= n_iov) <= 0)
1212 done = 1;
1213 index = next;
1214 }
1215 pagevec_release(&pvec);
1216 }
1217 if (!scanned && !done) {
1218 /*
1219 * We hit the last page and there is more work to be done: wrap
1220 * back to the start of the file
1221 */
1222 scanned = 1;
1223 index = 0;
1224 goto retry;
1225 }
1226 if (!is_range)
1227 mapping->writeback_index = index;
1228
1da177e4 1229 FreeXid(xid);
37c0eb46 1230
1da177e4
LT
1231 return rc;
1232}
1da177e4
LT
1233
1234static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1235{
1236 int rc = -EFAULT;
1237 int xid;
1238
1239 xid = GetXid();
1240/* BB add check for wbc flags */
1241 page_cache_get(page);
1242 if (!PageUptodate(page)) {
1243 cFYI(1, ("ppw - page not up to date"));
1244 }
1245
1246 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1247 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1248 unlock_page(page);
1249 page_cache_release(page);
1250 FreeXid(xid);
1251 return rc;
1252}
1253
1254static int cifs_commit_write(struct file *file, struct page *page,
1255 unsigned offset, unsigned to)
1256{
1257 int xid;
1258 int rc = 0;
1259 struct inode *inode = page->mapping->host;
1260 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1261 char *page_data;
1262
1263 xid = GetXid();
1264 cFYI(1, ("commit write for page %p up to position %lld for %d",
1265 page, position, to));
1266 if (position > inode->i_size) {
1267 i_size_write(inode, position);
1268 /* if (file->private_data == NULL) {
1269 rc = -EBADF;
1270 } else {
1271 open_file = (struct cifsFileInfo *)file->private_data;
1272 cifs_sb = CIFS_SB(inode->i_sb);
1273 rc = -EAGAIN;
1274 while (rc == -EAGAIN) {
1275 if ((open_file->invalidHandle) &&
1276 (!open_file->closePend)) {
1277 rc = cifs_reopen_file(
1278 file->f_dentry->d_inode, file);
1279 if (rc != 0)
1280 break;
1281 }
1282 if (!open_file->closePend) {
1283 rc = CIFSSMBSetFileSize(xid,
1284 cifs_sb->tcon, position,
1285 open_file->netfid,
1286 open_file->pid, FALSE);
1287 } else {
1288 rc = -EBADF;
1289 break;
1290 }
1291 }
1292 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1293 } */
1294 }
1295 if (!PageUptodate(page)) {
1296 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1297 /* can not rely on (or let) writepage write this data */
1298 if (to < offset) {
1299 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1300 offset, to));
1301 FreeXid(xid);
1302 return rc;
1303 }
1304 /* this is probably better than directly calling
1305 partialpage_write since in this function the file handle is
1306 known which we might as well leverage */
1307 /* BB check if anything else missing out of ppw
1308 such as updating last write time */
1309 page_data = kmap(page);
1310 rc = cifs_write(file, page_data + offset, to-offset,
1311 &position);
1312 if (rc > 0)
1313 rc = 0;
1314 /* else if (rc < 0) should we set writebehind rc? */
1315 kunmap(page);
1316 } else {
1317 set_page_dirty(page);
1318 }
1319
1320 FreeXid(xid);
1321 return rc;
1322}
1323
1324int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1325{
1326 int xid;
1327 int rc = 0;
1328 struct inode *inode = file->f_dentry->d_inode;
1329
1330 xid = GetXid();
1331
1332 cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
1333 dentry->d_name.name, datasync));
1334
1335 rc = filemap_fdatawrite(inode->i_mapping);
1336 if (rc == 0)
1337 CIFS_I(inode)->write_behind_rc = 0;
1338 FreeXid(xid);
1339 return rc;
1340}
1341
1342/* static int cifs_sync_page(struct page *page)
1343{
1344 struct address_space *mapping;
1345 struct inode *inode;
1346 unsigned long index = page->index;
1347 unsigned int rpages = 0;
1348 int rc = 0;
1349
1350 cFYI(1, ("sync page %p",page));
1351 mapping = page->mapping;
1352 if (!mapping)
1353 return 0;
1354 inode = mapping->host;
1355 if (!inode)
1356 return 0; */
1357
1358/* fill in rpages then
1359 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1360
1361/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1362
1363 if (rc < 0)
1364 return rc;
1365 return 0;
1366} */
1367
1368/*
1369 * As file closes, flush all cached write data for this inode checking
1370 * for write behind errors.
1371 */
1372int cifs_flush(struct file *file)
1373{
1374 struct inode * inode = file->f_dentry->d_inode;
1375 int rc = 0;
1376
1377 /* Rather than do the steps manually:
1378 lock the inode for writing
1379 loop through pages looking for write behind data (dirty pages)
1380 coalesce into contiguous 16K (or smaller) chunks to write to server
1381 send to server (prefer in parallel)
1382 deal with writebehind errors
1383 unlock inode for writing
1384 filemapfdatawrite appears easier for the time being */
1385
1386 rc = filemap_fdatawrite(inode->i_mapping);
1387 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1388 CIFS_I(inode)->write_behind_rc = 0;
1389
1390 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1391
1392 return rc;
1393}
1394
1395ssize_t cifs_user_read(struct file *file, char __user *read_data,
1396 size_t read_size, loff_t *poffset)
1397{
1398 int rc = -EACCES;
1399 unsigned int bytes_read = 0;
1400 unsigned int total_read = 0;
1401 unsigned int current_read_size;
1402 struct cifs_sb_info *cifs_sb;
1403 struct cifsTconInfo *pTcon;
1404 int xid;
1405 struct cifsFileInfo *open_file;
1406 char *smb_read_data;
1407 char __user *current_offset;
1408 struct smb_com_read_rsp *pSMBr;
1409
1410 xid = GetXid();
1411 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1412 pTcon = cifs_sb->tcon;
1413
1414 if (file->private_data == NULL) {
1415 FreeXid(xid);
1416 return -EBADF;
1417 }
1418 open_file = (struct cifsFileInfo *)file->private_data;
1419
1420 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1421 cFYI(1, ("attempting read on write only file instance"));
1422 }
1423 for (total_read = 0, current_offset = read_data;
1424 read_size > total_read;
1425 total_read += bytes_read, current_offset += bytes_read) {
1426 current_read_size = min_t(const int, read_size - total_read,
1427 cifs_sb->rsize);
1428 rc = -EAGAIN;
1429 smb_read_data = NULL;
1430 while (rc == -EAGAIN) {
ec637e3f 1431 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1432 if ((open_file->invalidHandle) &&
1433 (!open_file->closePend)) {
1434 rc = cifs_reopen_file(file->f_dentry->d_inode,
1435 file, TRUE);
1436 if (rc != 0)
1437 break;
1438 }
bfa0d75a 1439 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1440 open_file->netfid,
1441 current_read_size, *poffset,
1442 &bytes_read, &smb_read_data,
1443 &buf_type);
1da177e4 1444 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1445 if (smb_read_data) {
93544cc6
SF
1446 if (copy_to_user(current_offset,
1447 smb_read_data +
1448 4 /* RFC1001 length field */ +
1449 le16_to_cpu(pSMBr->DataOffset),
1450 bytes_read)) {
1451 rc = -EFAULT;
1452 }
1453
ec637e3f
SF
1454 if(buf_type == CIFS_SMALL_BUFFER)
1455 cifs_small_buf_release(smb_read_data);
1456 else if(buf_type == CIFS_LARGE_BUFFER)
1457 cifs_buf_release(smb_read_data);
1da177e4
LT
1458 smb_read_data = NULL;
1459 }
1460 }
1461 if (rc || (bytes_read == 0)) {
1462 if (total_read) {
1463 break;
1464 } else {
1465 FreeXid(xid);
1466 return rc;
1467 }
1468 } else {
a4544347 1469 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1470 *poffset += bytes_read;
1471 }
1472 }
1473 FreeXid(xid);
1474 return total_read;
1475}
1476
1477
1478static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1479 loff_t *poffset)
1480{
1481 int rc = -EACCES;
1482 unsigned int bytes_read = 0;
1483 unsigned int total_read;
1484 unsigned int current_read_size;
1485 struct cifs_sb_info *cifs_sb;
1486 struct cifsTconInfo *pTcon;
1487 int xid;
1488 char *current_offset;
1489 struct cifsFileInfo *open_file;
ec637e3f 1490 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1491
1492 xid = GetXid();
1493 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1494 pTcon = cifs_sb->tcon;
1495
1496 if (file->private_data == NULL) {
1497 FreeXid(xid);
1498 return -EBADF;
1499 }
1500 open_file = (struct cifsFileInfo *)file->private_data;
1501
1502 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1503 cFYI(1, ("attempting read on write only file instance"));
1504
1505 for (total_read = 0, current_offset = read_data;
1506 read_size > total_read;
1507 total_read += bytes_read, current_offset += bytes_read) {
1508 current_read_size = min_t(const int, read_size - total_read,
1509 cifs_sb->rsize);
f9f5c817
SF
1510 /* For windows me and 9x we do not want to request more
1511 than it negotiated since it will refuse the read then */
1512 if((pTcon->ses) &&
1513 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1514 current_read_size = min_t(const int, current_read_size,
1515 pTcon->ses->server->maxBuf - 128);
1516 }
1da177e4
LT
1517 rc = -EAGAIN;
1518 while (rc == -EAGAIN) {
1519 if ((open_file->invalidHandle) &&
1520 (!open_file->closePend)) {
1521 rc = cifs_reopen_file(file->f_dentry->d_inode,
1522 file, TRUE);
1523 if (rc != 0)
1524 break;
1525 }
bfa0d75a 1526 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1527 open_file->netfid,
1528 current_read_size, *poffset,
1529 &bytes_read, &current_offset,
1530 &buf_type);
1da177e4
LT
1531 }
1532 if (rc || (bytes_read == 0)) {
1533 if (total_read) {
1534 break;
1535 } else {
1536 FreeXid(xid);
1537 return rc;
1538 }
1539 } else {
a4544347 1540 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1541 *poffset += bytes_read;
1542 }
1543 }
1544 FreeXid(xid);
1545 return total_read;
1546}
1547
1548int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1549{
1550 struct dentry *dentry = file->f_dentry;
1551 int rc, xid;
1552
1553 xid = GetXid();
1554 rc = cifs_revalidate(dentry);
1555 if (rc) {
1556 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1557 FreeXid(xid);
1558 return rc;
1559 }
1560 rc = generic_file_mmap(file, vma);
1561 FreeXid(xid);
1562 return rc;
1563}
1564
1565
1566static void cifs_copy_cache_pages(struct address_space *mapping,
1567 struct list_head *pages, int bytes_read, char *data,
1568 struct pagevec *plru_pvec)
1569{
1570 struct page *page;
1571 char *target;
1572
1573 while (bytes_read > 0) {
1574 if (list_empty(pages))
1575 break;
1576
1577 page = list_entry(pages->prev, struct page, lru);
1578 list_del(&page->lru);
1579
1580 if (add_to_page_cache(page, mapping, page->index,
1581 GFP_KERNEL)) {
1582 page_cache_release(page);
1583 cFYI(1, ("Add page cache failed"));
3079ca62
SF
1584 data += PAGE_CACHE_SIZE;
1585 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1586 continue;
1587 }
1588
1589 target = kmap_atomic(page,KM_USER0);
1590
1591 if (PAGE_CACHE_SIZE > bytes_read) {
1592 memcpy(target, data, bytes_read);
1593 /* zero the tail end of this partial page */
1594 memset(target + bytes_read, 0,
1595 PAGE_CACHE_SIZE - bytes_read);
1596 bytes_read = 0;
1597 } else {
1598 memcpy(target, data, PAGE_CACHE_SIZE);
1599 bytes_read -= PAGE_CACHE_SIZE;
1600 }
1601 kunmap_atomic(target, KM_USER0);
1602
1603 flush_dcache_page(page);
1604 SetPageUptodate(page);
1605 unlock_page(page);
1606 if (!pagevec_add(plru_pvec, page))
1607 __pagevec_lru_add(plru_pvec);
1608 data += PAGE_CACHE_SIZE;
1609 }
1610 return;
1611}
1612
1613static int cifs_readpages(struct file *file, struct address_space *mapping,
1614 struct list_head *page_list, unsigned num_pages)
1615{
1616 int rc = -EACCES;
1617 int xid;
1618 loff_t offset;
1619 struct page *page;
1620 struct cifs_sb_info *cifs_sb;
1621 struct cifsTconInfo *pTcon;
1622 int bytes_read = 0;
1623 unsigned int read_size,i;
1624 char *smb_read_data = NULL;
1625 struct smb_com_read_rsp *pSMBr;
1626 struct pagevec lru_pvec;
1627 struct cifsFileInfo *open_file;
ec637e3f 1628 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1629
1630 xid = GetXid();
1631 if (file->private_data == NULL) {
1632 FreeXid(xid);
1633 return -EBADF;
1634 }
1635 open_file = (struct cifsFileInfo *)file->private_data;
1636 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1637 pTcon = cifs_sb->tcon;
bfa0d75a 1638
1da177e4
LT
1639 pagevec_init(&lru_pvec, 0);
1640
1641 for (i = 0; i < num_pages; ) {
1642 unsigned contig_pages;
1643 struct page *tmp_page;
1644 unsigned long expected_index;
1645
1646 if (list_empty(page_list))
1647 break;
1648
1649 page = list_entry(page_list->prev, struct page, lru);
1650 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1651
1652 /* count adjacent pages that we will read into */
1653 contig_pages = 0;
1654 expected_index =
1655 list_entry(page_list->prev, struct page, lru)->index;
1656 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1657 if (tmp_page->index == expected_index) {
1658 contig_pages++;
1659 expected_index++;
1660 } else
1661 break;
1662 }
1663 if (contig_pages + i > num_pages)
1664 contig_pages = num_pages - i;
1665
1666 /* for reads over a certain size could initiate async
1667 read ahead */
1668
1669 read_size = contig_pages * PAGE_CACHE_SIZE;
1670 /* Read size needs to be in multiples of one page */
1671 read_size = min_t(const unsigned int, read_size,
1672 cifs_sb->rsize & PAGE_CACHE_MASK);
1673
1674 rc = -EAGAIN;
1675 while (rc == -EAGAIN) {
1676 if ((open_file->invalidHandle) &&
1677 (!open_file->closePend)) {
1678 rc = cifs_reopen_file(file->f_dentry->d_inode,
1679 file, TRUE);
1680 if (rc != 0)
1681 break;
1682 }
1683
bfa0d75a 1684 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1685 open_file->netfid,
1686 read_size, offset,
1687 &bytes_read, &smb_read_data,
1688 &buf_type);
a9d02ad4 1689 /* BB more RC checks ? */
1da177e4
LT
1690 if (rc== -EAGAIN) {
1691 if (smb_read_data) {
ec637e3f
SF
1692 if(buf_type == CIFS_SMALL_BUFFER)
1693 cifs_small_buf_release(smb_read_data);
1694 else if(buf_type == CIFS_LARGE_BUFFER)
1695 cifs_buf_release(smb_read_data);
1da177e4
LT
1696 smb_read_data = NULL;
1697 }
1698 }
1699 }
1700 if ((rc < 0) || (smb_read_data == NULL)) {
1701 cFYI(1, ("Read error in readpages: %d", rc));
1702 /* clean up remaing pages off list */
1703 while (!list_empty(page_list) && (i < num_pages)) {
1704 page = list_entry(page_list->prev, struct page,
1705 lru);
1706 list_del(&page->lru);
1707 page_cache_release(page);
1708 }
1709 break;
1710 } else if (bytes_read > 0) {
1711 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1712 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1713 smb_read_data + 4 /* RFC1001 hdr */ +
1714 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1715
1716 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 1717 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1718 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1719 i++; /* account for partial page */
1720
1721 /* server copy of file can have smaller size
1722 than client */
1723 /* BB do we need to verify this common case ?
1724 this case is ok - if we are at server EOF
1725 we will hit it on next read */
1726
1727 /* while (!list_empty(page_list) && (i < num_pages)) {
1728 page = list_entry(page_list->prev,
1729 struct page, list);
1730 list_del(&page->list);
1731 page_cache_release(page);
1732 }
1733 break; */
1734 }
1735 } else {
1736 cFYI(1, ("No bytes read (%d) at offset %lld . "
1737 "Cleaning remaining pages from readahead list",
1738 bytes_read, offset));
1739 /* BB turn off caching and do new lookup on
1740 file size at server? */
1741 while (!list_empty(page_list) && (i < num_pages)) {
1742 page = list_entry(page_list->prev, struct page,
1743 lru);
1744 list_del(&page->lru);
1745
1746 /* BB removeme - replace with zero of page? */
1747 page_cache_release(page);
1748 }
1749 break;
1750 }
1751 if (smb_read_data) {
ec637e3f
SF
1752 if(buf_type == CIFS_SMALL_BUFFER)
1753 cifs_small_buf_release(smb_read_data);
1754 else if(buf_type == CIFS_LARGE_BUFFER)
1755 cifs_buf_release(smb_read_data);
1da177e4
LT
1756 smb_read_data = NULL;
1757 }
1758 bytes_read = 0;
1759 }
1760
1761 pagevec_lru_add(&lru_pvec);
1762
1763/* need to free smb_read_data buf before exit */
1764 if (smb_read_data) {
47c886b3
SF
1765 if(buf_type == CIFS_SMALL_BUFFER)
1766 cifs_small_buf_release(smb_read_data);
1767 else if(buf_type == CIFS_LARGE_BUFFER)
1768 cifs_buf_release(smb_read_data);
1da177e4
LT
1769 smb_read_data = NULL;
1770 }
1771
1772 FreeXid(xid);
1773 return rc;
1774}
1775
1776static int cifs_readpage_worker(struct file *file, struct page *page,
1777 loff_t *poffset)
1778{
1779 char *read_data;
1780 int rc;
1781
1782 page_cache_get(page);
1783 read_data = kmap(page);
1784 /* for reads over a certain size could initiate async read ahead */
1785
1786 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1787
1788 if (rc < 0)
1789 goto io_error;
1790 else
1791 cFYI(1, ("Bytes read %d ",rc));
1792
1793 file->f_dentry->d_inode->i_atime =
1794 current_fs_time(file->f_dentry->d_inode->i_sb);
1795
1796 if (PAGE_CACHE_SIZE > rc)
1797 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1798
1799 flush_dcache_page(page);
1800 SetPageUptodate(page);
1801 rc = 0;
1802
1803io_error:
1804 kunmap(page);
1805 page_cache_release(page);
1806 return rc;
1807}
1808
1809static int cifs_readpage(struct file *file, struct page *page)
1810{
1811 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1812 int rc = -EACCES;
1813 int xid;
1814
1815 xid = GetXid();
1816
1817 if (file->private_data == NULL) {
1818 FreeXid(xid);
1819 return -EBADF;
1820 }
1821
1822 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1823 page, (int)offset, (int)offset));
1824
1825 rc = cifs_readpage_worker(file, page, &offset);
1826
1827 unlock_page(page);
1828
1829 FreeXid(xid);
1830 return rc;
1831}
1832
1833/* We do not want to update the file size from server for inodes
1834 open for write - to avoid races with writepage extending
1835 the file - in the future we could consider allowing
1836 refreshing the inode only on increases in the file size
1837 but this is tricky to do without racing with writebehind
1838 page caching in the current Linux kernel design */
1839int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1840{
23e7dd7d
SF
1841 struct cifsFileInfo *open_file = NULL;
1842
1843 if (cifsInode)
1844 open_file = find_writable_file(cifsInode);
1845
1846 if(open_file) {
c32a0b68
SF
1847 struct cifs_sb_info *cifs_sb;
1848
23e7dd7d
SF
1849 /* there is not actually a write pending so let
1850 this handle go free and allow it to
1851 be closable if needed */
1852 atomic_dec(&open_file->wrtPending);
c32a0b68
SF
1853
1854 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1855 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1856 /* since no page cache to corrupt on directio
1857 we can change size safely */
1858 return 1;
1859 }
1860
6148a742 1861 return 0;
23e7dd7d 1862 } else
6148a742 1863 return 1;
1da177e4
LT
1864}
1865
1da177e4
LT
1866static int cifs_prepare_write(struct file *file, struct page *page,
1867 unsigned from, unsigned to)
1868{
1869 int rc = 0;
1870 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1871 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1872 if (!PageUptodate(page)) {
1873 /* if (to - from != PAGE_CACHE_SIZE) {
1874 void *kaddr = kmap_atomic(page, KM_USER0);
1875 memset(kaddr, 0, from);
1876 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1877 flush_dcache_page(page);
1878 kunmap_atomic(kaddr, KM_USER0);
1879 } */
1880 /* If we are writing a full page it will be up to date,
1881 no need to read from the server */
1882 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1883 SetPageUptodate(page);
1884
1885 /* might as well read a page, it is fast enough */
1886 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1887 rc = cifs_readpage_worker(file, page, &offset);
1888 } else {
1889 /* should we try using another file handle if there is one -
1890 how would we lock it to prevent close of that handle
1891 racing with this read?
1892 In any case this will be written out by commit_write */
1893 }
1894 }
1895
1896 /* BB should we pass any errors back?
1897 e.g. if we do not have read access to the file */
1898 return 0;
1899}
1900
1901struct address_space_operations cifs_addr_ops = {
1902 .readpage = cifs_readpage,
1903 .readpages = cifs_readpages,
1904 .writepage = cifs_writepage,
37c0eb46 1905 .writepages = cifs_writepages,
1da177e4
LT
1906 .prepare_write = cifs_prepare_write,
1907 .commit_write = cifs_commit_write,
1908 .set_page_dirty = __set_page_dirty_nobuffers,
1909 /* .sync_page = cifs_sync_page, */
1910 /* .direct_IO = */
1911};