cifs: fix handling of signing with writepages (try #6)
[deliverable/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
7fc8f4e9
SF
63static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
64{
65 fmode_t posix_flags = 0;
e10f7b55 66
7fc8f4e9
SF
67 if ((flags & O_ACCMODE) == O_RDONLY)
68 posix_flags = FMODE_READ;
69 else if ((flags & O_ACCMODE) == O_WRONLY)
70 posix_flags = FMODE_WRITE;
71 else if ((flags & O_ACCMODE) == O_RDWR) {
72 /* GENERIC_ALL is too much permission to request
73 can cause unnecessary access denied on create */
74 /* return GENERIC_ALL; */
75 posix_flags = FMODE_READ | FMODE_WRITE;
76 }
77 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
78 reopening a file. They had their effect on the original open */
79 if (flags & O_APPEND)
80 posix_flags |= (fmode_t)O_APPEND;
6b2f3d1f
CH
81 if (flags & O_DSYNC)
82 posix_flags |= (fmode_t)O_DSYNC;
83 if (flags & __O_SYNC)
84 posix_flags |= (fmode_t)__O_SYNC;
7fc8f4e9
SF
85 if (flags & O_DIRECTORY)
86 posix_flags |= (fmode_t)O_DIRECTORY;
87 if (flags & O_NOFOLLOW)
88 posix_flags |= (fmode_t)O_NOFOLLOW;
89 if (flags & O_DIRECT)
90 posix_flags |= (fmode_t)O_DIRECT;
91
92 return posix_flags;
1da177e4
LT
93}
94
95static inline int cifs_get_disposition(unsigned int flags)
96{
97 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
98 return FILE_CREATE;
99 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
100 return FILE_OVERWRITE_IF;
101 else if ((flags & O_CREAT) == O_CREAT)
102 return FILE_OPEN_IF;
55aa2e09
SF
103 else if ((flags & O_TRUNC) == O_TRUNC)
104 return FILE_OVERWRITE;
1da177e4
LT
105 else
106 return FILE_OPEN;
107}
108
276a74a4 109/* all arguments to this function must be checked for validity in caller */
590a3fe0
JL
110static inline int
111cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
51c81764 112 struct cifsInodeInfo *pCifsInode, __u32 oplock,
590a3fe0 113 u16 netfid)
276a74a4 114{
276a74a4 115
276a74a4 116 write_lock(&GlobalSMBSeslock);
276a74a4
SF
117
118 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
119 if (pCifsInode == NULL) {
120 write_unlock(&GlobalSMBSeslock);
121 return -EINVAL;
122 }
123
276a74a4
SF
124 if (pCifsInode->clientCanCacheRead) {
125 /* we have the inode open somewhere else
126 no need to discard cache data */
127 goto psx_client_can_cache;
128 }
129
130 /* BB FIXME need to fix this check to move it earlier into posix_open
131 BB fIX following section BB FIXME */
132
133 /* if not oplocked, invalidate inode pages if mtime or file
134 size changed */
135/* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
136 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
137 (file->f_path.dentry->d_inode->i_size ==
138 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 139 cFYI(1, "inode unchanged on server");
276a74a4
SF
140 } else {
141 if (file->f_path.dentry->d_inode->i_mapping) {
142 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
143 if (rc != 0)
144 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
145 }
b6b38f70
JP
146 cFYI(1, "invalidating remote inode since open detected it "
147 "changed");
276a74a4
SF
148 invalidate_remote_inode(file->f_path.dentry->d_inode);
149 } */
150
151psx_client_can_cache:
152 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
153 pCifsInode->clientCanCacheAll = true;
154 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
155 cFYI(1, "Exclusive Oplock granted on inode %p",
156 file->f_path.dentry->d_inode);
276a74a4
SF
157 } else if ((oplock & 0xF) == OPLOCK_READ)
158 pCifsInode->clientCanCacheRead = true;
159
160 /* will have to change the unlock if we reenable the
161 filemap_fdatawrite (which does not seem necessary */
162 write_unlock(&GlobalSMBSeslock);
163 return 0;
164}
165
1da177e4 166/* all arguments to this function must be checked for validity in caller */
db460242 167static inline int cifs_open_inode_helper(struct inode *inode,
a347ecb2 168 struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
1da177e4
LT
169 char *full_path, int xid)
170{
db460242 171 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
1da177e4
LT
172 struct timespec temp;
173 int rc;
174
1da177e4
LT
175 if (pCifsInode->clientCanCacheRead) {
176 /* we have the inode open somewhere else
177 no need to discard cache data */
178 goto client_can_cache;
179 }
180
181 /* BB need same check in cifs_create too? */
182 /* if not oplocked, invalidate inode pages if mtime or file
183 size changed */
07119a4d 184 temp = cifs_NTtimeToUnix(buf->LastWriteTime);
db460242
JL
185 if (timespec_equal(&inode->i_mtime, &temp) &&
186 (inode->i_size ==
1da177e4 187 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 188 cFYI(1, "inode unchanged on server");
1da177e4 189 } else {
db460242 190 if (inode->i_mapping) {
ff215713
SF
191 /* BB no need to lock inode until after invalidate
192 since namei code should already have it locked? */
db460242 193 rc = filemap_write_and_wait(inode->i_mapping);
cea21805 194 if (rc != 0)
db460242 195 pCifsInode->write_behind_rc = rc;
1da177e4 196 }
b6b38f70
JP
197 cFYI(1, "invalidating remote inode since open detected it "
198 "changed");
db460242 199 invalidate_remote_inode(inode);
1da177e4
LT
200 }
201
202client_can_cache:
c18c842b 203 if (pTcon->unix_ext)
db460242
JL
204 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
205 xid);
1da177e4 206 else
db460242
JL
207 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
208 xid, NULL);
1da177e4 209
a347ecb2 210 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
211 pCifsInode->clientCanCacheAll = true;
212 pCifsInode->clientCanCacheRead = true;
db460242 213 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
a347ecb2 214 } else if ((oplock & 0xF) == OPLOCK_READ)
4b18f2a9 215 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
216
217 return rc;
218}
219
220int cifs_open(struct inode *inode, struct file *file)
221{
222 int rc = -EACCES;
590a3fe0
JL
223 int xid;
224 __u32 oplock;
1da177e4 225 struct cifs_sb_info *cifs_sb;
276a74a4 226 struct cifsTconInfo *tcon;
6ca9f3ba 227 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 228 struct cifsInodeInfo *pCifsInode;
1da177e4
LT
229 char *full_path = NULL;
230 int desiredAccess;
231 int disposition;
232 __u16 netfid;
233 FILE_ALL_INFO *buf = NULL;
234
235 xid = GetXid();
236
237 cifs_sb = CIFS_SB(inode->i_sb);
a6e8a845 238 tcon = cifs_sb_tcon(cifs_sb);
1da177e4 239
a6ce4932 240 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 241
e6a00296 242 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 243 if (full_path == NULL) {
0f3bc09e 244 rc = -ENOMEM;
232341ba 245 goto out;
1da177e4
LT
246 }
247
b6b38f70
JP
248 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
249 inode, file->f_flags, full_path);
276a74a4
SF
250
251 if (oplockEnabled)
252 oplock = REQ_OPLOCK;
253 else
254 oplock = 0;
255
64cc2c63
SF
256 if (!tcon->broken_posix_open && tcon->unix_ext &&
257 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
258 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
259 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
260 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
fa588e0c 261 oflags |= SMB_O_CREAT;
276a74a4 262 /* can not refresh inode info since size could be stale */
2422f676 263 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c
SF
264 cifs_sb->mnt_file_mode /* ignored */,
265 oflags, &oplock, &netfid, xid);
276a74a4 266 if (rc == 0) {
b6b38f70 267 cFYI(1, "posix open succeeded");
276a74a4
SF
268 /* no need for special case handling of setting mode
269 on read only files needed here */
270
47c78b7f
JL
271 rc = cifs_posix_open_inode_helper(inode, file,
272 pCifsInode, oplock, netfid);
273 if (rc != 0) {
274 CIFSSMBClose(xid, tcon, netfid);
275 goto out;
276 }
277
2422f676
JL
278 pCifsFile = cifs_new_fileinfo(inode, netfid, file,
279 file->f_path.mnt,
f7a40689 280 tcon, oflags, oplock);
2422f676
JL
281 if (pCifsFile == NULL) {
282 CIFSSMBClose(xid, tcon, netfid);
283 rc = -ENOMEM;
2422f676 284 }
9451a9a5
SJ
285
286 cifs_fscache_set_inode_cookie(inode, file);
287
276a74a4 288 goto out;
64cc2c63
SF
289 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
290 if (tcon->ses->serverNOS)
b6b38f70 291 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
292 " unexpected error on SMB posix open"
293 ", disabling posix open support."
294 " Check if server update available.",
295 tcon->ses->serverName,
b6b38f70 296 tcon->ses->serverNOS);
64cc2c63 297 tcon->broken_posix_open = true;
276a74a4
SF
298 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
299 (rc != -EOPNOTSUPP)) /* path not found or net err */
300 goto out;
64cc2c63
SF
301 /* else fallthrough to retry open the old way on network i/o
302 or DFS errors */
276a74a4
SF
303 }
304
1da177e4
LT
305 desiredAccess = cifs_convert_flags(file->f_flags);
306
307/*********************************************************************
308 * open flag mapping table:
fb8c4b14 309 *
1da177e4 310 * POSIX Flag CIFS Disposition
fb8c4b14 311 * ---------- ----------------
1da177e4
LT
312 * O_CREAT FILE_OPEN_IF
313 * O_CREAT | O_EXCL FILE_CREATE
314 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
315 * O_TRUNC FILE_OVERWRITE
316 * none of the above FILE_OPEN
317 *
318 * Note that there is not a direct match between disposition
fb8c4b14 319 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
320 * O_CREAT | O_TRUNC is similar but truncates the existing
321 * file rather than creating a new file as FILE_SUPERSEDE does
322 * (which uses the attributes / metadata passed in on open call)
323 *?
fb8c4b14 324 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
325 *? and the read write flags match reasonably. O_LARGEFILE
326 *? is irrelevant because largefile support is always used
327 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
328 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
329 *********************************************************************/
330
331 disposition = cifs_get_disposition(file->f_flags);
332
1da177e4
LT
333 /* BB pass O_SYNC flag through on file attributes .. BB */
334
335 /* Also refresh inode by passing in file_info buf returned by SMBOpen
336 and calling get_inode_info with returned buf (at least helps
337 non-Unix server case) */
338
fb8c4b14
SF
339 /* BB we can not do this if this is the second open of a file
340 and the first handle has writebehind data, we might be
1da177e4
LT
341 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
342 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
343 if (!buf) {
344 rc = -ENOMEM;
345 goto out;
346 }
5bafd765 347
a6e8a845 348 if (tcon->ses->capabilities & CAP_NT_SMBS)
276a74a4 349 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
5bafd765 350 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
351 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
352 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
353 else
354 rc = -EIO; /* no NT SMB support fall into legacy open below */
355
a9d02ad4
SF
356 if (rc == -EIO) {
357 /* Old server, try legacy style OpenX */
276a74a4 358 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
a9d02ad4
SF
359 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
360 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
361 & CIFS_MOUNT_MAP_SPECIAL_CHR);
362 }
1da177e4 363 if (rc) {
b6b38f70 364 cFYI(1, "cifs_open returned 0x%x", rc);
1da177e4
LT
365 goto out;
366 }
3321b791 367
a347ecb2 368 rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
47c78b7f
JL
369 if (rc != 0)
370 goto out;
371
086f68bd 372 pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
f7a40689 373 tcon, file->f_flags, oplock);
6ca9f3ba 374 if (pCifsFile == NULL) {
1da177e4
LT
375 rc = -ENOMEM;
376 goto out;
377 }
1da177e4 378
9451a9a5
SJ
379 cifs_fscache_set_inode_cookie(inode, file);
380
fb8c4b14 381 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
382 /* time to set mode which we can not set earlier due to
383 problems creating new read-only files */
276a74a4 384 if (tcon->unix_ext) {
4e1e7fb9
JL
385 struct cifs_unix_set_info_args args = {
386 .mode = inode->i_mode,
387 .uid = NO_CHANGE_64,
388 .gid = NO_CHANGE_64,
389 .ctime = NO_CHANGE_64,
390 .atime = NO_CHANGE_64,
391 .mtime = NO_CHANGE_64,
392 .device = 0,
393 };
01ea95e3
JL
394 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
395 cifs_sb->local_nls,
396 cifs_sb->mnt_cifs_flags &
737b758c 397 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
398 }
399 }
400
401out:
402 kfree(buf);
403 kfree(full_path);
404 FreeXid(xid);
405 return rc;
406}
407
0418726b 408/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
409/* to server was lost */
410static int cifs_relock_file(struct cifsFileInfo *cifsFile)
411{
412 int rc = 0;
413
414/* BB list all locks open on this file and relock */
415
416 return rc;
417}
418
4b18f2a9 419static int cifs_reopen_file(struct file *file, bool can_flush)
1da177e4
LT
420{
421 int rc = -EACCES;
590a3fe0
JL
422 int xid;
423 __u32 oplock;
1da177e4 424 struct cifs_sb_info *cifs_sb;
7fc8f4e9 425 struct cifsTconInfo *tcon;
1da177e4
LT
426 struct cifsFileInfo *pCifsFile;
427 struct cifsInodeInfo *pCifsInode;
fb8c4b14 428 struct inode *inode;
1da177e4
LT
429 char *full_path = NULL;
430 int desiredAccess;
431 int disposition = FILE_OPEN;
432 __u16 netfid;
433
ad7a2926 434 if (file->private_data)
c21dfb69 435 pCifsFile = file->private_data;
ad7a2926 436 else
1da177e4
LT
437 return -EBADF;
438
439 xid = GetXid();
f0a71eb8 440 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 441 if (!pCifsFile->invalidHandle) {
f0a71eb8 442 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 443 rc = 0;
1da177e4 444 FreeXid(xid);
0f3bc09e 445 return rc;
1da177e4
LT
446 }
447
e6a00296 448 if (file->f_path.dentry == NULL) {
b6b38f70 449 cERROR(1, "no valid name if dentry freed");
3a9f462f
SF
450 dump_stack();
451 rc = -EBADF;
452 goto reopen_error_exit;
453 }
454
455 inode = file->f_path.dentry->d_inode;
fb8c4b14 456 if (inode == NULL) {
b6b38f70 457 cERROR(1, "inode not valid");
3a9f462f
SF
458 dump_stack();
459 rc = -EBADF;
460 goto reopen_error_exit;
1da177e4 461 }
50c2f753 462
1da177e4 463 cifs_sb = CIFS_SB(inode->i_sb);
ba00ba64 464 tcon = pCifsFile->tcon;
3a9f462f 465
1da177e4
LT
466/* can not grab rename sem here because various ops, including
467 those that already have the rename sem can end up causing writepage
468 to get called and if the server was down that means we end up here,
469 and we can never tell if the caller already has the rename_sem */
e6a00296 470 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 471 if (full_path == NULL) {
3a9f462f
SF
472 rc = -ENOMEM;
473reopen_error_exit:
f0a71eb8 474 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 475 FreeXid(xid);
3a9f462f 476 return rc;
1da177e4
LT
477 }
478
b6b38f70
JP
479 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
480 inode, file->f_flags, full_path);
1da177e4
LT
481
482 if (oplockEnabled)
483 oplock = REQ_OPLOCK;
484 else
4b18f2a9 485 oplock = 0;
1da177e4 486
7fc8f4e9
SF
487 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
488 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
489 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
490 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
491 /* can not refresh inode info since size could be stale */
2422f676 492 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
493 cifs_sb->mnt_file_mode /* ignored */,
494 oflags, &oplock, &netfid, xid);
7fc8f4e9 495 if (rc == 0) {
b6b38f70 496 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
497 goto reopen_success;
498 }
499 /* fallthrough to retry open the old way on errors, especially
500 in the reconnect path it is important to retry hard */
501 }
502
503 desiredAccess = cifs_convert_flags(file->f_flags);
504
1da177e4 505 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
506 by SMBOpen and then calling get_inode_info with returned buf
507 since file might have write behind data that needs to be flushed
1da177e4
LT
508 and server version of file size can be stale. If we knew for sure
509 that inode was not dirty locally we could do this */
510
7fc8f4e9 511 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 512 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 513 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 514 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 515 if (rc) {
f0a71eb8 516 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
517 cFYI(1, "cifs_open returned 0x%x", rc);
518 cFYI(1, "oplock: %d", oplock);
1da177e4 519 } else {
7fc8f4e9 520reopen_success:
1da177e4 521 pCifsFile->netfid = netfid;
4b18f2a9 522 pCifsFile->invalidHandle = false;
f0a71eb8 523 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4
LT
524 pCifsInode = CIFS_I(inode);
525 if (pCifsInode) {
526 if (can_flush) {
cea21805
JL
527 rc = filemap_write_and_wait(inode->i_mapping);
528 if (rc != 0)
529 CIFS_I(inode)->write_behind_rc = rc;
1da177e4
LT
530 /* temporarily disable caching while we
531 go to server to get inode info */
4b18f2a9
SF
532 pCifsInode->clientCanCacheAll = false;
533 pCifsInode->clientCanCacheRead = false;
7fc8f4e9 534 if (tcon->unix_ext)
1da177e4
LT
535 rc = cifs_get_inode_info_unix(&inode,
536 full_path, inode->i_sb, xid);
537 else
538 rc = cifs_get_inode_info(&inode,
539 full_path, NULL, inode->i_sb,
8b1327f6 540 xid, NULL);
1da177e4
LT
541 } /* else we are writing out data to server already
542 and could deadlock if we tried to flush data, and
543 since we do not know if we have data that would
544 invalidate the current end of file on the server
545 we can not go to the server to get the new inod
546 info */
547 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
548 pCifsInode->clientCanCacheAll = true;
549 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
550 cFYI(1, "Exclusive Oplock granted on inode %p",
551 file->f_path.dentry->d_inode);
1da177e4 552 } else if ((oplock & 0xF) == OPLOCK_READ) {
4b18f2a9
SF
553 pCifsInode->clientCanCacheRead = true;
554 pCifsInode->clientCanCacheAll = false;
1da177e4 555 } else {
4b18f2a9
SF
556 pCifsInode->clientCanCacheRead = false;
557 pCifsInode->clientCanCacheAll = false;
1da177e4
LT
558 }
559 cifs_relock_file(pCifsFile);
560 }
561 }
1da177e4
LT
562 kfree(full_path);
563 FreeXid(xid);
564 return rc;
565}
566
567int cifs_close(struct inode *inode, struct file *file)
568{
569 int rc = 0;
15745320 570 int xid, timeout;
1da177e4
LT
571 struct cifs_sb_info *cifs_sb;
572 struct cifsTconInfo *pTcon;
c21dfb69 573 struct cifsFileInfo *pSMBFile = file->private_data;
1da177e4
LT
574
575 xid = GetXid();
576
577 cifs_sb = CIFS_SB(inode->i_sb);
ba00ba64 578 pTcon = pSMBFile->tcon;
1da177e4 579 if (pSMBFile) {
7ee1af76 580 struct cifsLockInfo *li, *tmp;
ddb4cbfc 581 write_lock(&GlobalSMBSeslock);
4b18f2a9 582 pSMBFile->closePend = true;
1da177e4
LT
583 if (pTcon) {
584 /* no sense reconnecting to close a file that is
585 already closed */
3b795210 586 if (!pTcon->need_reconnect) {
ddb4cbfc 587 write_unlock(&GlobalSMBSeslock);
15745320 588 timeout = 2;
6ab409b5 589 while ((atomic_read(&pSMBFile->count) != 1)
15745320 590 && (timeout <= 2048)) {
23e7dd7d
SF
591 /* Give write a better chance to get to
592 server ahead of the close. We do not
593 want to add a wait_q here as it would
594 increase the memory utilization as
595 the struct would be in each open file,
fb8c4b14 596 but this should give enough time to
23e7dd7d 597 clear the socket */
b6b38f70 598 cFYI(DBG2, "close delay, write pending");
23e7dd7d
SF
599 msleep(timeout);
600 timeout *= 4;
4891d539 601 }
ddb4cbfc
SF
602 if (!pTcon->need_reconnect &&
603 !pSMBFile->invalidHandle)
604 rc = CIFSSMBClose(xid, pTcon,
1da177e4 605 pSMBFile->netfid);
ddb4cbfc
SF
606 } else
607 write_unlock(&GlobalSMBSeslock);
608 } else
609 write_unlock(&GlobalSMBSeslock);
7ee1af76
JA
610
611 /* Delete any outstanding lock records.
612 We'll lose them when the file is closed anyway. */
796e5661 613 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
614 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
615 list_del(&li->llist);
616 kfree(li);
617 }
796e5661 618 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 619
cbe0476f 620 write_lock(&GlobalSMBSeslock);
1da177e4
LT
621 list_del(&pSMBFile->flist);
622 list_del(&pSMBFile->tlist);
cbe0476f 623 write_unlock(&GlobalSMBSeslock);
6ab409b5 624 cifsFileInfo_put(file->private_data);
1da177e4
LT
625 file->private_data = NULL;
626 } else
627 rc = -EBADF;
628
4efa53f0 629 read_lock(&GlobalSMBSeslock);
1da177e4 630 if (list_empty(&(CIFS_I(inode)->openFileList))) {
b6b38f70 631 cFYI(1, "closing last open instance for inode %p", inode);
1da177e4
LT
632 /* if the file is not open we do not know if we can cache info
633 on this inode, much less write behind and read ahead */
4b18f2a9
SF
634 CIFS_I(inode)->clientCanCacheRead = false;
635 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 636 }
4efa53f0 637 read_unlock(&GlobalSMBSeslock);
fb8c4b14 638 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
639 rc = CIFS_I(inode)->write_behind_rc;
640 FreeXid(xid);
641 return rc;
642}
643
644int cifs_closedir(struct inode *inode, struct file *file)
645{
646 int rc = 0;
647 int xid;
c21dfb69 648 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
649 char *ptmp;
650
b6b38f70 651 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
652
653 xid = GetXid();
654
655 if (pCFileStruct) {
ba00ba64 656 struct cifsTconInfo *pTcon = pCFileStruct->tcon;
1da177e4 657
b6b38f70 658 cFYI(1, "Freeing private data in close dir");
ddb4cbfc 659 write_lock(&GlobalSMBSeslock);
4b18f2a9
SF
660 if (!pCFileStruct->srch_inf.endOfSearch &&
661 !pCFileStruct->invalidHandle) {
662 pCFileStruct->invalidHandle = true;
ddb4cbfc 663 write_unlock(&GlobalSMBSeslock);
1da177e4 664 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
665 cFYI(1, "Closing uncompleted readdir with rc %d",
666 rc);
1da177e4
LT
667 /* not much we can do if it fails anyway, ignore rc */
668 rc = 0;
ddb4cbfc
SF
669 } else
670 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
671 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
672 if (ptmp) {
b6b38f70 673 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 674 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 675 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
676 cifs_small_buf_release(ptmp);
677 else
678 cifs_buf_release(ptmp);
1da177e4 679 }
1da177e4
LT
680 kfree(file->private_data);
681 file->private_data = NULL;
682 }
683 /* BB can we lock the filestruct while this is going on? */
684 FreeXid(xid);
685 return rc;
686}
687
7ee1af76
JA
688static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
689 __u64 offset, __u8 lockType)
690{
fb8c4b14
SF
691 struct cifsLockInfo *li =
692 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
693 if (li == NULL)
694 return -ENOMEM;
695 li->offset = offset;
696 li->length = len;
697 li->type = lockType;
796e5661 698 mutex_lock(&fid->lock_mutex);
7ee1af76 699 list_add(&li->llist, &fid->llist);
796e5661 700 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
701 return 0;
702}
703
1da177e4
LT
704int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
705{
706 int rc, xid;
1da177e4
LT
707 __u32 numLock = 0;
708 __u32 numUnlock = 0;
709 __u64 length;
4b18f2a9 710 bool wait_flag = false;
1da177e4 711 struct cifs_sb_info *cifs_sb;
13a6e42a 712 struct cifsTconInfo *tcon;
08547b03
SF
713 __u16 netfid;
714 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 715 bool posix_locking = 0;
1da177e4
LT
716
717 length = 1 + pfLock->fl_end - pfLock->fl_start;
718 rc = -EACCES;
719 xid = GetXid();
720
b6b38f70 721 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 722 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 723 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 724 pfLock->fl_end);
1da177e4
LT
725
726 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 727 cFYI(1, "Posix");
1da177e4 728 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 729 cFYI(1, "Flock");
1da177e4 730 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 731 cFYI(1, "Blocking lock");
4b18f2a9 732 wait_flag = true;
1da177e4
LT
733 }
734 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
735 cFYI(1, "Process suspended by mandatory locking - "
736 "not implemented yet");
1da177e4 737 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 738 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 739 if (pfLock->fl_flags &
1da177e4 740 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 741 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
742
743 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 744 cFYI(1, "F_WRLCK ");
1da177e4
LT
745 numLock = 1;
746 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 747 cFYI(1, "F_UNLCK");
1da177e4 748 numUnlock = 1;
d47d7c1a
SF
749 /* Check if unlock includes more than
750 one lock range */
1da177e4 751 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 752 cFYI(1, "F_RDLCK");
1da177e4
LT
753 lockType |= LOCKING_ANDX_SHARED_LOCK;
754 numLock = 1;
755 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 756 cFYI(1, "F_EXLCK");
1da177e4
LT
757 numLock = 1;
758 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 759 cFYI(1, "F_SHLCK");
1da177e4
LT
760 lockType |= LOCKING_ANDX_SHARED_LOCK;
761 numLock = 1;
762 } else
b6b38f70 763 cFYI(1, "Unknown type of lock");
1da177e4 764
e6a00296 765 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
ba00ba64 766 tcon = ((struct cifsFileInfo *)file->private_data)->tcon;
1da177e4
LT
767
768 if (file->private_data == NULL) {
0f3bc09e 769 rc = -EBADF;
1da177e4 770 FreeXid(xid);
0f3bc09e 771 return rc;
1da177e4 772 }
08547b03
SF
773 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
774
13a6e42a
SF
775 if ((tcon->ses->capabilities & CAP_UNIX) &&
776 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 777 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 778 posix_locking = 1;
08547b03
SF
779 /* BB add code here to normalize offset and length to
780 account for negative length which we can not accept over the
781 wire */
1da177e4 782 if (IS_GETLK(cmd)) {
fb8c4b14 783 if (posix_locking) {
08547b03 784 int posix_lock_type;
fb8c4b14 785 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
786 posix_lock_type = CIFS_RDLCK;
787 else
788 posix_lock_type = CIFS_WRLCK;
13a6e42a 789 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 790 length, pfLock,
08547b03
SF
791 posix_lock_type, wait_flag);
792 FreeXid(xid);
793 return rc;
794 }
795
796 /* BB we could chain these into one lock request BB */
13a6e42a 797 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
08547b03 798 0, 1, lockType, 0 /* wait flag */ );
1da177e4 799 if (rc == 0) {
13a6e42a 800 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
801 pfLock->fl_start, 1 /* numUnlock */ ,
802 0 /* numLock */ , lockType,
803 0 /* wait flag */ );
804 pfLock->fl_type = F_UNLCK;
805 if (rc != 0)
b6b38f70
JP
806 cERROR(1, "Error unlocking previously locked "
807 "range %d during test of lock", rc);
1da177e4
LT
808 rc = 0;
809
810 } else {
811 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
812 rc = 0;
813
814 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
815 pfLock->fl_type = F_WRLCK;
816 } else {
817 rc = CIFSSMBLock(xid, tcon, netfid, length,
818 pfLock->fl_start, 0, 1,
819 lockType | LOCKING_ANDX_SHARED_LOCK,
820 0 /* wait flag */);
821 if (rc == 0) {
822 rc = CIFSSMBLock(xid, tcon, netfid,
823 length, pfLock->fl_start, 1, 0,
824 lockType |
825 LOCKING_ANDX_SHARED_LOCK,
826 0 /* wait flag */);
827 pfLock->fl_type = F_RDLCK;
828 if (rc != 0)
f19159dc 829 cERROR(1, "Error unlocking "
f05337c6 830 "previously locked range %d "
f19159dc 831 "during test of lock", rc);
f05337c6
PS
832 rc = 0;
833 } else {
834 pfLock->fl_type = F_WRLCK;
835 rc = 0;
836 }
837 }
1da177e4
LT
838 }
839
840 FreeXid(xid);
841 return rc;
842 }
7ee1af76
JA
843
844 if (!numLock && !numUnlock) {
845 /* if no lock or unlock then nothing
846 to do since we do not know what it is */
847 FreeXid(xid);
848 return -EOPNOTSUPP;
849 }
850
851 if (posix_locking) {
08547b03 852 int posix_lock_type;
fb8c4b14 853 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
854 posix_lock_type = CIFS_RDLCK;
855 else
856 posix_lock_type = CIFS_WRLCK;
50c2f753 857
fb8c4b14 858 if (numUnlock == 1)
beb84dc8 859 posix_lock_type = CIFS_UNLCK;
7ee1af76 860
13a6e42a 861 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 862 length, pfLock,
08547b03 863 posix_lock_type, wait_flag);
7ee1af76 864 } else {
c21dfb69 865 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
866
867 if (numLock) {
13a6e42a 868 rc = CIFSSMBLock(xid, tcon, netfid, length,
fb8c4b14 869 pfLock->fl_start,
7ee1af76
JA
870 0, numLock, lockType, wait_flag);
871
872 if (rc == 0) {
873 /* For Windows locks we must store them. */
874 rc = store_file_lock(fid, length,
875 pfLock->fl_start, lockType);
876 }
877 } else if (numUnlock) {
878 /* For each stored lock that this unlock overlaps
879 completely, unlock it. */
880 int stored_rc = 0;
881 struct cifsLockInfo *li, *tmp;
882
6b70c955 883 rc = 0;
796e5661 884 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
885 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
886 if (pfLock->fl_start <= li->offset &&
c19eb710 887 (pfLock->fl_start + length) >=
39db810c 888 (li->offset + li->length)) {
13a6e42a 889 stored_rc = CIFSSMBLock(xid, tcon,
fb8c4b14 890 netfid,
7ee1af76 891 li->length, li->offset,
4b18f2a9 892 1, 0, li->type, false);
7ee1af76
JA
893 if (stored_rc)
894 rc = stored_rc;
2c964d1f
PS
895 else {
896 list_del(&li->llist);
897 kfree(li);
898 }
7ee1af76
JA
899 }
900 }
796e5661 901 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
902 }
903 }
904
d634cc15 905 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
906 posix_lock_file_wait(file, pfLock);
907 FreeXid(xid);
908 return rc;
909}
910
fbec9ab9
JL
911/*
912 * Set the timeout on write requests past EOF. For some servers (Windows)
913 * these calls can be very long.
914 *
915 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
916 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
917 * The 10M cutoff is totally arbitrary. A better scheme for this would be
918 * welcome if someone wants to suggest one.
919 *
920 * We may be able to do a better job with this if there were some way to
921 * declare that a file should be sparse.
922 */
923static int
924cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
925{
926 if (offset <= cifsi->server_eof)
927 return CIFS_STD_OP;
928 else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
929 return CIFS_VLONG_OP;
930 else
931 return CIFS_LONG_OP;
932}
933
934/* update the file size (if needed) after a write */
935static void
936cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
937 unsigned int bytes_written)
938{
939 loff_t end_of_write = offset + bytes_written;
940
941 if (end_of_write > cifsi->server_eof)
942 cifsi->server_eof = end_of_write;
943}
944
1da177e4
LT
945ssize_t cifs_user_write(struct file *file, const char __user *write_data,
946 size_t write_size, loff_t *poffset)
947{
948 int rc = 0;
949 unsigned int bytes_written = 0;
950 unsigned int total_written;
951 struct cifs_sb_info *cifs_sb;
952 struct cifsTconInfo *pTcon;
953 int xid, long_op;
954 struct cifsFileInfo *open_file;
fbec9ab9 955 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 956
e6a00296 957 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 958
b6b38f70
JP
959 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
960 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
961
962 if (file->private_data == NULL)
963 return -EBADF;
ba00ba64 964
c21dfb69 965 open_file = file->private_data;
ba00ba64 966 pTcon = open_file->tcon;
50c2f753 967
838726c4
JL
968 rc = generic_write_checks(file, poffset, &write_size, 0);
969 if (rc)
970 return rc;
971
1da177e4 972 xid = GetXid();
1da177e4 973
fbec9ab9 974 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
975 for (total_written = 0; write_size > total_written;
976 total_written += bytes_written) {
977 rc = -EAGAIN;
978 while (rc == -EAGAIN) {
979 if (file->private_data == NULL) {
980 /* file has been closed on us */
981 FreeXid(xid);
982 /* if we have gotten here we have written some data
983 and blocked, and the file has been freed on us while
984 we blocked so return what we managed to write */
985 return total_written;
fb8c4b14 986 }
1da177e4
LT
987 if (open_file->closePend) {
988 FreeXid(xid);
989 if (total_written)
990 return total_written;
991 else
992 return -EBADF;
993 }
994 if (open_file->invalidHandle) {
1da177e4
LT
995 /* we could deadlock if we called
996 filemap_fdatawait from here so tell
997 reopen_file not to flush data to server
998 now */
4b18f2a9 999 rc = cifs_reopen_file(file, false);
1da177e4
LT
1000 if (rc != 0)
1001 break;
1002 }
1003
1004 rc = CIFSSMBWrite(xid, pTcon,
1005 open_file->netfid,
1006 min_t(const int, cifs_sb->wsize,
1007 write_size - total_written),
1008 *poffset, &bytes_written,
1009 NULL, write_data + total_written, long_op);
1010 }
1011 if (rc || (bytes_written == 0)) {
1012 if (total_written)
1013 break;
1014 else {
1015 FreeXid(xid);
1016 return rc;
1017 }
fbec9ab9
JL
1018 } else {
1019 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1020 *poffset += bytes_written;
fbec9ab9 1021 }
133672ef 1022 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1023 15 seconds is plenty */
1024 }
1025
a4544347 1026 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1027
1028 /* since the write may have blocked check these pointers again */
3677db10
SF
1029 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1030 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
1031/* Do not update local mtime - server will set its actual value on write
1032 * inode->i_ctime = inode->i_mtime =
3677db10
SF
1033 * current_fs_time(inode->i_sb);*/
1034 if (total_written > 0) {
1035 spin_lock(&inode->i_lock);
1036 if (*poffset > file->f_path.dentry->d_inode->i_size)
1037 i_size_write(file->f_path.dentry->d_inode,
1da177e4 1038 *poffset);
3677db10 1039 spin_unlock(&inode->i_lock);
1da177e4 1040 }
fb8c4b14 1041 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1042 }
1043 FreeXid(xid);
1044 return total_written;
1045}
1046
1047static ssize_t cifs_write(struct file *file, const char *write_data,
d9414774 1048 size_t write_size, loff_t *poffset)
1da177e4
LT
1049{
1050 int rc = 0;
1051 unsigned int bytes_written = 0;
1052 unsigned int total_written;
1053 struct cifs_sb_info *cifs_sb;
1054 struct cifsTconInfo *pTcon;
1055 int xid, long_op;
1056 struct cifsFileInfo *open_file;
fbec9ab9 1057 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 1058
e6a00296 1059 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1060
b6b38f70
JP
1061 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1062 *poffset, file->f_path.dentry->d_name.name);
1da177e4
LT
1063
1064 if (file->private_data == NULL)
1065 return -EBADF;
c21dfb69 1066 open_file = file->private_data;
ba00ba64 1067 pTcon = open_file->tcon;
50c2f753 1068
1da177e4 1069 xid = GetXid();
1da177e4 1070
fbec9ab9 1071 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
1072 for (total_written = 0; write_size > total_written;
1073 total_written += bytes_written) {
1074 rc = -EAGAIN;
1075 while (rc == -EAGAIN) {
1076 if (file->private_data == NULL) {
1077 /* file has been closed on us */
1078 FreeXid(xid);
1079 /* if we have gotten here we have written some data
1080 and blocked, and the file has been freed on us
fb8c4b14 1081 while we blocked so return what we managed to
1da177e4
LT
1082 write */
1083 return total_written;
fb8c4b14 1084 }
1da177e4
LT
1085 if (open_file->closePend) {
1086 FreeXid(xid);
1087 if (total_written)
1088 return total_written;
1089 else
1090 return -EBADF;
1091 }
1092 if (open_file->invalidHandle) {
1da177e4
LT
1093 /* we could deadlock if we called
1094 filemap_fdatawait from here so tell
fb8c4b14 1095 reopen_file not to flush data to
1da177e4 1096 server now */
4b18f2a9 1097 rc = cifs_reopen_file(file, false);
1da177e4
LT
1098 if (rc != 0)
1099 break;
1100 }
fb8c4b14
SF
1101 if (experimEnabled || (pTcon->ses->server &&
1102 ((pTcon->ses->server->secMode &
08775834 1103 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 1104 == 0))) {
3e84469d
SF
1105 struct kvec iov[2];
1106 unsigned int len;
1107
0ae0efad 1108 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
1109 write_size - total_written);
1110 /* iov[0] is reserved for smb header */
1111 iov[1].iov_base = (char *)write_data +
1112 total_written;
1113 iov[1].iov_len = len;
d6e04ae6 1114 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 1115 open_file->netfid, len,
d6e04ae6 1116 *poffset, &bytes_written,
3e84469d 1117 iov, 1, long_op);
d6e04ae6 1118 } else
60808233
SF
1119 rc = CIFSSMBWrite(xid, pTcon,
1120 open_file->netfid,
1121 min_t(const int, cifs_sb->wsize,
1122 write_size - total_written),
1123 *poffset, &bytes_written,
1124 write_data + total_written,
1125 NULL, long_op);
1da177e4
LT
1126 }
1127 if (rc || (bytes_written == 0)) {
1128 if (total_written)
1129 break;
1130 else {
1131 FreeXid(xid);
1132 return rc;
1133 }
fbec9ab9
JL
1134 } else {
1135 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1136 *poffset += bytes_written;
fbec9ab9 1137 }
133672ef 1138 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1139 15 seconds is plenty */
1140 }
1141
a4544347 1142 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1143
1144 /* since the write may have blocked check these pointers again */
3677db10 1145 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1146/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1147/* file->f_path.dentry->d_inode->i_ctime =
1148 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1149 if (total_written > 0) {
1150 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1151 if (*poffset > file->f_path.dentry->d_inode->i_size)
1152 i_size_write(file->f_path.dentry->d_inode,
1153 *poffset);
1154 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1155 }
3677db10 1156 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1157 }
1158 FreeXid(xid);
1159 return total_written;
1160}
1161
630f3f0c
SF
1162#ifdef CONFIG_CIFS_EXPERIMENTAL
1163struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1164{
1165 struct cifsFileInfo *open_file = NULL;
1166
1167 read_lock(&GlobalSMBSeslock);
1168 /* we could simply get the first_list_entry since write-only entries
1169 are always at the end of the list but since the first entry might
1170 have a close pending, we go through the whole list */
1171 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1172 if (open_file->closePend)
1173 continue;
1174 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1175 (open_file->pfile->f_flags & O_RDONLY))) {
1176 if (!open_file->invalidHandle) {
1177 /* found a good file */
1178 /* lock it so it will not be closed on us */
6ab409b5 1179 cifsFileInfo_get(open_file);
630f3f0c
SF
1180 read_unlock(&GlobalSMBSeslock);
1181 return open_file;
1182 } /* else might as well continue, and look for
1183 another, or simply have the caller reopen it
1184 again rather than trying to fix this handle */
1185 } else /* write only file */
1186 break; /* write only files are last so must be done */
1187 }
1188 read_unlock(&GlobalSMBSeslock);
1189 return NULL;
1190}
1191#endif
1192
dd99cd80 1193struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
6148a742
SF
1194{
1195 struct cifsFileInfo *open_file;
2846d386 1196 bool any_available = false;
dd99cd80 1197 int rc;
6148a742 1198
60808233
SF
1199 /* Having a null inode here (because mapping->host was set to zero by
1200 the VFS or MM) should not happen but we had reports of on oops (due to
1201 it being zero) during stress testcases so we need to check for it */
1202
fb8c4b14 1203 if (cifs_inode == NULL) {
b6b38f70 1204 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1205 dump_stack();
1206 return NULL;
1207 }
1208
6148a742 1209 read_lock(&GlobalSMBSeslock);
9b22b0b7 1210refind_writable:
6148a742 1211 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2846d386
JL
1212 if (open_file->closePend ||
1213 (!any_available && open_file->pid != current->tgid))
6148a742 1214 continue;
2846d386 1215
6148a742
SF
1216 if (open_file->pfile &&
1217 ((open_file->pfile->f_flags & O_RDWR) ||
1218 (open_file->pfile->f_flags & O_WRONLY))) {
6ab409b5 1219 cifsFileInfo_get(open_file);
9b22b0b7
SF
1220
1221 if (!open_file->invalidHandle) {
1222 /* found a good writable file */
1223 read_unlock(&GlobalSMBSeslock);
1224 return open_file;
1225 }
8840dee9 1226
6148a742 1227 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1228 /* Had to unlock since following call can block */
4b18f2a9 1229 rc = cifs_reopen_file(open_file->pfile, false);
8840dee9 1230 if (!rc) {
9b22b0b7
SF
1231 if (!open_file->closePend)
1232 return open_file;
1233 else { /* start over in case this was deleted */
1234 /* since the list could be modified */
37c0eb46 1235 read_lock(&GlobalSMBSeslock);
6ab409b5 1236 cifsFileInfo_put(open_file);
9b22b0b7 1237 goto refind_writable;
37c0eb46
SF
1238 }
1239 }
9b22b0b7
SF
1240
1241 /* if it fails, try another handle if possible -
1242 (we can not do this if closePending since
1243 loop could be modified - in which case we
1244 have to start at the beginning of the list
1245 again. Note that it would be bad
1246 to hold up writepages here (rather than
1247 in caller) with continuous retries */
b6b38f70 1248 cFYI(1, "wp failed on reopen file");
9b22b0b7
SF
1249 read_lock(&GlobalSMBSeslock);
1250 /* can not use this handle, no write
1251 pending on this one after all */
6ab409b5 1252 cifsFileInfo_put(open_file);
8840dee9 1253
9b22b0b7
SF
1254 if (open_file->closePend) /* list could have changed */
1255 goto refind_writable;
1256 /* else we simply continue to the next entry. Thus
1257 we do not loop on reopen errors. If we
1258 can not reopen the file, for example if we
1259 reconnected to a server with another client
1260 racing to delete or lock the file we would not
1261 make progress if we restarted before the beginning
1262 of the loop here. */
6148a742
SF
1263 }
1264 }
2846d386
JL
1265 /* couldn't find useable FH with same pid, try any available */
1266 if (!any_available) {
1267 any_available = true;
1268 goto refind_writable;
1269 }
6148a742
SF
1270 read_unlock(&GlobalSMBSeslock);
1271 return NULL;
1272}
1273
1da177e4
LT
1274static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1275{
1276 struct address_space *mapping = page->mapping;
1277 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1278 char *write_data;
1279 int rc = -EFAULT;
1280 int bytes_written = 0;
1281 struct cifs_sb_info *cifs_sb;
1da177e4 1282 struct inode *inode;
6148a742 1283 struct cifsFileInfo *open_file;
1da177e4
LT
1284
1285 if (!mapping || !mapping->host)
1286 return -EFAULT;
1287
1288 inode = page->mapping->host;
1289 cifs_sb = CIFS_SB(inode->i_sb);
1da177e4
LT
1290
1291 offset += (loff_t)from;
1292 write_data = kmap(page);
1293 write_data += from;
1294
1295 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1296 kunmap(page);
1297 return -EIO;
1298 }
1299
1300 /* racing with truncate? */
1301 if (offset > mapping->host->i_size) {
1302 kunmap(page);
1303 return 0; /* don't care */
1304 }
1305
1306 /* check to make sure that we are not extending the file */
1307 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1308 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1309
6148a742
SF
1310 open_file = find_writable_file(CIFS_I(mapping->host));
1311 if (open_file) {
1312 bytes_written = cifs_write(open_file->pfile, write_data,
1313 to-from, &offset);
6ab409b5 1314 cifsFileInfo_put(open_file);
1da177e4 1315 /* Does mm or vfs already set times? */
6148a742 1316 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1317 if ((bytes_written > 0) && (offset))
6148a742 1318 rc = 0;
bb5a9a04
SF
1319 else if (bytes_written < 0)
1320 rc = bytes_written;
6148a742 1321 } else {
b6b38f70 1322 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1323 rc = -EIO;
1324 }
1325
1326 kunmap(page);
1327 return rc;
1328}
1329
1da177e4 1330static int cifs_writepages(struct address_space *mapping,
37c0eb46 1331 struct writeback_control *wbc)
1da177e4 1332{
37c0eb46
SF
1333 struct backing_dev_info *bdi = mapping->backing_dev_info;
1334 unsigned int bytes_to_write;
1335 unsigned int bytes_written;
1336 struct cifs_sb_info *cifs_sb;
1337 int done = 0;
111ebb6e 1338 pgoff_t end;
37c0eb46 1339 pgoff_t index;
fb8c4b14
SF
1340 int range_whole = 0;
1341 struct kvec *iov;
84d2f07e 1342 int len;
37c0eb46
SF
1343 int n_iov = 0;
1344 pgoff_t next;
1345 int nr_pages;
1346 __u64 offset = 0;
23e7dd7d 1347 struct cifsFileInfo *open_file;
ba00ba64 1348 struct cifsTconInfo *tcon;
fbec9ab9 1349 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1350 struct page *page;
1351 struct pagevec pvec;
1352 int rc = 0;
1353 int scanned = 0;
fbec9ab9 1354 int xid, long_op;
1da177e4 1355
f3983c21
JL
1356 /*
1357 * BB: Is this meaningful for a non-block-device file system?
1358 * If it is, we should test it again after we do I/O
1359 */
1360 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1361 wbc->encountered_congestion = 1;
1362 return 0;
1363 }
1364
37c0eb46 1365 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1366
37c0eb46
SF
1367 /*
1368 * If wsize is smaller that the page cache size, default to writing
1369 * one page at a time via cifs_writepage
1370 */
1371 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1372 return generic_writepages(mapping, wbc);
1373
9a0c8230 1374 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1375 if (iov == NULL)
9a0c8230
SF
1376 return generic_writepages(mapping, wbc);
1377
37c0eb46 1378 /*
f3983c21
JL
1379 * if there's no open file, then this is likely to fail too,
1380 * but it'll at least handle the return. Maybe it should be
1381 * a BUG() instead?
37c0eb46 1382 */
f3983c21
JL
1383 open_file = find_writable_file(CIFS_I(mapping->host));
1384 if (!open_file) {
9a0c8230 1385 kfree(iov);
f3983c21
JL
1386 return generic_writepages(mapping, wbc);
1387 }
1388
1389 tcon = open_file->tcon;
1390 if (!experimEnabled && tcon->ses->server->secMode &
1391 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1392 cifsFileInfo_put(open_file);
1393 return generic_writepages(mapping, wbc);
37c0eb46 1394 }
f3983c21 1395 cifsFileInfo_put(open_file);
37c0eb46 1396
1da177e4
LT
1397 xid = GetXid();
1398
37c0eb46 1399 pagevec_init(&pvec, 0);
111ebb6e 1400 if (wbc->range_cyclic) {
37c0eb46 1401 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1402 end = -1;
1403 } else {
1404 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1405 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1406 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1407 range_whole = 1;
37c0eb46
SF
1408 scanned = 1;
1409 }
1410retry:
1411 while (!done && (index <= end) &&
1412 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1413 PAGECACHE_TAG_DIRTY,
1414 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1415 int first;
1416 unsigned int i;
1417
37c0eb46
SF
1418 first = -1;
1419 next = 0;
1420 n_iov = 0;
1421 bytes_to_write = 0;
1422
1423 for (i = 0; i < nr_pages; i++) {
1424 page = pvec.pages[i];
1425 /*
1426 * At this point we hold neither mapping->tree_lock nor
1427 * lock on the page itself: the page may be truncated or
1428 * invalidated (changing page->mapping to NULL), or even
1429 * swizzled back from swapper_space to tmpfs file
1430 * mapping
1431 */
1432
1433 if (first < 0)
1434 lock_page(page);
529ae9aa 1435 else if (!trylock_page(page))
37c0eb46
SF
1436 break;
1437
1438 if (unlikely(page->mapping != mapping)) {
1439 unlock_page(page);
1440 break;
1441 }
1442
111ebb6e 1443 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1444 done = 1;
1445 unlock_page(page);
1446 break;
1447 }
1448
1449 if (next && (page->index != next)) {
1450 /* Not next consecutive page */
1451 unlock_page(page);
1452 break;
1453 }
1454
1455 if (wbc->sync_mode != WB_SYNC_NONE)
1456 wait_on_page_writeback(page);
1457
1458 if (PageWriteback(page) ||
cb876f45 1459 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1460 unlock_page(page);
1461 break;
1462 }
84d2f07e 1463
cb876f45
LT
1464 /*
1465 * This actually clears the dirty bit in the radix tree.
1466 * See cifs_writepage() for more commentary.
1467 */
1468 set_page_writeback(page);
1469
84d2f07e
SF
1470 if (page_offset(page) >= mapping->host->i_size) {
1471 done = 1;
1472 unlock_page(page);
cb876f45 1473 end_page_writeback(page);
84d2f07e
SF
1474 break;
1475 }
1476
37c0eb46
SF
1477 /*
1478 * BB can we get rid of this? pages are held by pvec
1479 */
1480 page_cache_get(page);
1481
84d2f07e
SF
1482 len = min(mapping->host->i_size - page_offset(page),
1483 (loff_t)PAGE_CACHE_SIZE);
1484
37c0eb46
SF
1485 /* reserve iov[0] for the smb header */
1486 n_iov++;
1487 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1488 iov[n_iov].iov_len = len;
1489 bytes_to_write += len;
37c0eb46
SF
1490
1491 if (first < 0) {
1492 first = i;
1493 offset = page_offset(page);
1494 }
1495 next = page->index + 1;
1496 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1497 break;
1498 }
1499 if (n_iov) {
23e7dd7d
SF
1500 open_file = find_writable_file(CIFS_I(mapping->host));
1501 if (!open_file) {
b6b38f70 1502 cERROR(1, "No writable handles for inode");
23e7dd7d 1503 rc = -EBADF;
1047abc1 1504 } else {
fbec9ab9 1505 long_op = cifs_write_timeout(cifsi, offset);
f3983c21 1506 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
23e7dd7d
SF
1507 bytes_to_write, offset,
1508 &bytes_written, iov, n_iov,
fbec9ab9 1509 long_op);
6ab409b5 1510 cifsFileInfo_put(open_file);
fbec9ab9 1511 cifs_update_eof(cifsi, offset, bytes_written);
f3983c21 1512 }
fbec9ab9 1513
f3983c21
JL
1514 if (rc || bytes_written < bytes_to_write) {
1515 cERROR(1, "Write2 ret %d, wrote %d",
1516 rc, bytes_written);
1517 /* BB what if continued retry is
1518 requested via mount flags? */
1519 if (rc == -ENOSPC)
1520 set_bit(AS_ENOSPC, &mapping->flags);
1521 else
1522 set_bit(AS_EIO, &mapping->flags);
1523 } else {
1524 cifs_stats_bytes_written(tcon, bytes_written);
37c0eb46 1525 }
f3983c21 1526
37c0eb46
SF
1527 for (i = 0; i < n_iov; i++) {
1528 page = pvec.pages[first + i];
eb9bdaa3
SF
1529 /* Should we also set page error on
1530 success rc but too little data written? */
1531 /* BB investigate retry logic on temporary
1532 server crash cases and how recovery works
fb8c4b14
SF
1533 when page marked as error */
1534 if (rc)
eb9bdaa3 1535 SetPageError(page);
37c0eb46
SF
1536 kunmap(page);
1537 unlock_page(page);
cb876f45 1538 end_page_writeback(page);
37c0eb46
SF
1539 page_cache_release(page);
1540 }
1541 if ((wbc->nr_to_write -= n_iov) <= 0)
1542 done = 1;
1543 index = next;
b066a48c
DK
1544 } else
1545 /* Need to re-find the pages we skipped */
1546 index = pvec.pages[0]->index + 1;
1547
37c0eb46
SF
1548 pagevec_release(&pvec);
1549 }
1550 if (!scanned && !done) {
1551 /*
1552 * We hit the last page and there is more work to be done: wrap
1553 * back to the start of the file
1554 */
1555 scanned = 1;
1556 index = 0;
1557 goto retry;
1558 }
111ebb6e 1559 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1560 mapping->writeback_index = index;
1561
1da177e4 1562 FreeXid(xid);
9a0c8230 1563 kfree(iov);
1da177e4
LT
1564 return rc;
1565}
1da177e4 1566
fb8c4b14 1567static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1568{
1569 int rc = -EFAULT;
1570 int xid;
1571
1572 xid = GetXid();
1573/* BB add check for wbc flags */
1574 page_cache_get(page);
ad7a2926 1575 if (!PageUptodate(page))
b6b38f70 1576 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1577
1578 /*
1579 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1580 *
1581 * A writepage() implementation always needs to do either this,
1582 * or re-dirty the page with "redirty_page_for_writepage()" in
1583 * the case of a failure.
1584 *
1585 * Just unlocking the page will cause the radix tree tag-bits
1586 * to fail to update with the state of the page correctly.
1587 */
fb8c4b14 1588 set_page_writeback(page);
1da177e4
LT
1589 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1590 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1591 unlock_page(page);
cb876f45
LT
1592 end_page_writeback(page);
1593 page_cache_release(page);
1da177e4
LT
1594 FreeXid(xid);
1595 return rc;
1596}
1597
d9414774
NP
1598static int cifs_write_end(struct file *file, struct address_space *mapping,
1599 loff_t pos, unsigned len, unsigned copied,
1600 struct page *page, void *fsdata)
1da177e4 1601{
d9414774
NP
1602 int rc;
1603 struct inode *inode = mapping->host;
1da177e4 1604
b6b38f70
JP
1605 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1606 page, pos, copied);
d9414774 1607
a98ee8c1
JL
1608 if (PageChecked(page)) {
1609 if (copied == len)
1610 SetPageUptodate(page);
1611 ClearPageChecked(page);
1612 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1613 SetPageUptodate(page);
ad7a2926 1614
1da177e4 1615 if (!PageUptodate(page)) {
d9414774
NP
1616 char *page_data;
1617 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1618 int xid;
1619
1620 xid = GetXid();
1da177e4
LT
1621 /* this is probably better than directly calling
1622 partialpage_write since in this function the file handle is
1623 known which we might as well leverage */
1624 /* BB check if anything else missing out of ppw
1625 such as updating last write time */
1626 page_data = kmap(page);
d9414774
NP
1627 rc = cifs_write(file, page_data + offset, copied, &pos);
1628 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1629 kunmap(page);
d9414774
NP
1630
1631 FreeXid(xid);
fb8c4b14 1632 } else {
d9414774
NP
1633 rc = copied;
1634 pos += copied;
1da177e4
LT
1635 set_page_dirty(page);
1636 }
1637
d9414774
NP
1638 if (rc > 0) {
1639 spin_lock(&inode->i_lock);
1640 if (pos > inode->i_size)
1641 i_size_write(inode, pos);
1642 spin_unlock(&inode->i_lock);
1643 }
1644
1645 unlock_page(page);
1646 page_cache_release(page);
1647
1da177e4
LT
1648 return rc;
1649}
1650
7ea80859 1651int cifs_fsync(struct file *file, int datasync)
1da177e4
LT
1652{
1653 int xid;
1654 int rc = 0;
b298f223 1655 struct cifsTconInfo *tcon;
c21dfb69 1656 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1657 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1658
1659 xid = GetXid();
1660
b6b38f70 1661 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1662 file->f_path.dentry->d_name.name, datasync);
50c2f753 1663
cea21805
JL
1664 rc = filemap_write_and_wait(inode->i_mapping);
1665 if (rc == 0) {
1666 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1667 CIFS_I(inode)->write_behind_rc = 0;
ba00ba64 1668 tcon = smbfile->tcon;
be652445 1669 if (!rc && tcon && smbfile &&
4717bed6 1670 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
b298f223 1671 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
cea21805 1672 }
b298f223 1673
1da177e4
LT
1674 FreeXid(xid);
1675 return rc;
1676}
1677
3978d717 1678/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1679{
1680 struct address_space *mapping;
1681 struct inode *inode;
1682 unsigned long index = page->index;
1683 unsigned int rpages = 0;
1684 int rc = 0;
1685
f19159dc 1686 cFYI(1, "sync page %p", page);
1da177e4
LT
1687 mapping = page->mapping;
1688 if (!mapping)
1689 return 0;
1690 inode = mapping->host;
1691 if (!inode)
3978d717 1692 return; */
1da177e4 1693
fb8c4b14 1694/* fill in rpages then
1da177e4
LT
1695 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1696
b6b38f70 1697/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1da177e4 1698
3978d717 1699#if 0
1da177e4
LT
1700 if (rc < 0)
1701 return rc;
1702 return 0;
3978d717 1703#endif
1da177e4
LT
1704} */
1705
1706/*
1707 * As file closes, flush all cached write data for this inode checking
1708 * for write behind errors.
1709 */
75e1fcc0 1710int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1711{
fb8c4b14 1712 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1713 int rc = 0;
1714
1715 /* Rather than do the steps manually:
1716 lock the inode for writing
1717 loop through pages looking for write behind data (dirty pages)
1718 coalesce into contiguous 16K (or smaller) chunks to write to server
1719 send to server (prefer in parallel)
1720 deal with writebehind errors
1721 unlock inode for writing
1722 filemapfdatawrite appears easier for the time being */
1723
1724 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1725 /* reset wb rc if we were able to write out dirty pages */
1726 if (!rc) {
1727 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1728 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1729 }
50c2f753 1730
b6b38f70 1731 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1732
1733 return rc;
1734}
1735
1736ssize_t cifs_user_read(struct file *file, char __user *read_data,
1737 size_t read_size, loff_t *poffset)
1738{
1739 int rc = -EACCES;
1740 unsigned int bytes_read = 0;
1741 unsigned int total_read = 0;
1742 unsigned int current_read_size;
1743 struct cifs_sb_info *cifs_sb;
1744 struct cifsTconInfo *pTcon;
1745 int xid;
1746 struct cifsFileInfo *open_file;
1747 char *smb_read_data;
1748 char __user *current_offset;
1749 struct smb_com_read_rsp *pSMBr;
1750
1751 xid = GetXid();
e6a00296 1752 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1753
1754 if (file->private_data == NULL) {
0f3bc09e 1755 rc = -EBADF;
1da177e4 1756 FreeXid(xid);
0f3bc09e 1757 return rc;
1da177e4 1758 }
c21dfb69 1759 open_file = file->private_data;
ba00ba64 1760 pTcon = open_file->tcon;
1da177e4 1761
ad7a2926 1762 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1763 cFYI(1, "attempting read on write only file instance");
ad7a2926 1764
1da177e4
LT
1765 for (total_read = 0, current_offset = read_data;
1766 read_size > total_read;
1767 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1768 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1769 cifs_sb->rsize);
1770 rc = -EAGAIN;
1771 smb_read_data = NULL;
1772 while (rc == -EAGAIN) {
ec637e3f 1773 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1774 if ((open_file->invalidHandle) &&
1da177e4 1775 (!open_file->closePend)) {
4b18f2a9 1776 rc = cifs_reopen_file(file, true);
1da177e4
LT
1777 if (rc != 0)
1778 break;
1779 }
bfa0d75a 1780 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1781 open_file->netfid,
1782 current_read_size, *poffset,
1783 &bytes_read, &smb_read_data,
1784 &buf_type);
1da177e4 1785 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1786 if (smb_read_data) {
93544cc6
SF
1787 if (copy_to_user(current_offset,
1788 smb_read_data +
1789 4 /* RFC1001 length field */ +
1790 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1791 bytes_read))
93544cc6 1792 rc = -EFAULT;
93544cc6 1793
fb8c4b14 1794 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1795 cifs_small_buf_release(smb_read_data);
fb8c4b14 1796 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1797 cifs_buf_release(smb_read_data);
1da177e4
LT
1798 smb_read_data = NULL;
1799 }
1800 }
1801 if (rc || (bytes_read == 0)) {
1802 if (total_read) {
1803 break;
1804 } else {
1805 FreeXid(xid);
1806 return rc;
1807 }
1808 } else {
a4544347 1809 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1810 *poffset += bytes_read;
1811 }
1812 }
1813 FreeXid(xid);
1814 return total_read;
1815}
1816
1817
1818static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1819 loff_t *poffset)
1820{
1821 int rc = -EACCES;
1822 unsigned int bytes_read = 0;
1823 unsigned int total_read;
1824 unsigned int current_read_size;
1825 struct cifs_sb_info *cifs_sb;
1826 struct cifsTconInfo *pTcon;
1827 int xid;
1828 char *current_offset;
1829 struct cifsFileInfo *open_file;
ec637e3f 1830 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1831
1832 xid = GetXid();
e6a00296 1833 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1834
1835 if (file->private_data == NULL) {
0f3bc09e 1836 rc = -EBADF;
1da177e4 1837 FreeXid(xid);
0f3bc09e 1838 return rc;
1da177e4 1839 }
c21dfb69 1840 open_file = file->private_data;
ba00ba64 1841 pTcon = open_file->tcon;
1da177e4
LT
1842
1843 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1844 cFYI(1, "attempting read on write only file instance");
1da177e4 1845
fb8c4b14 1846 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1847 read_size > total_read;
1848 total_read += bytes_read, current_offset += bytes_read) {
1849 current_read_size = min_t(const int, read_size - total_read,
1850 cifs_sb->rsize);
f9f5c817
SF
1851 /* For windows me and 9x we do not want to request more
1852 than it negotiated since it will refuse the read then */
fb8c4b14 1853 if ((pTcon->ses) &&
f9f5c817
SF
1854 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1855 current_read_size = min_t(const int, current_read_size,
1856 pTcon->ses->server->maxBuf - 128);
1857 }
1da177e4
LT
1858 rc = -EAGAIN;
1859 while (rc == -EAGAIN) {
fb8c4b14 1860 if ((open_file->invalidHandle) &&
1da177e4 1861 (!open_file->closePend)) {
4b18f2a9 1862 rc = cifs_reopen_file(file, true);
1da177e4
LT
1863 if (rc != 0)
1864 break;
1865 }
bfa0d75a 1866 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1867 open_file->netfid,
1868 current_read_size, *poffset,
1869 &bytes_read, &current_offset,
1870 &buf_type);
1da177e4
LT
1871 }
1872 if (rc || (bytes_read == 0)) {
1873 if (total_read) {
1874 break;
1875 } else {
1876 FreeXid(xid);
1877 return rc;
1878 }
1879 } else {
a4544347 1880 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1881 *poffset += bytes_read;
1882 }
1883 }
1884 FreeXid(xid);
1885 return total_read;
1886}
1887
1888int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1889{
1da177e4
LT
1890 int rc, xid;
1891
1892 xid = GetXid();
abab095d 1893 rc = cifs_revalidate_file(file);
1da177e4 1894 if (rc) {
b6b38f70 1895 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1896 FreeXid(xid);
1897 return rc;
1898 }
1899 rc = generic_file_mmap(file, vma);
1900 FreeXid(xid);
1901 return rc;
1902}
1903
1904
fb8c4b14 1905static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1906 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1907{
1908 struct page *page;
1909 char *target;
1910
1911 while (bytes_read > 0) {
1912 if (list_empty(pages))
1913 break;
1914
1915 page = list_entry(pages->prev, struct page, lru);
1916 list_del(&page->lru);
1917
315e995c 1918 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1919 GFP_KERNEL)) {
1920 page_cache_release(page);
b6b38f70 1921 cFYI(1, "Add page cache failed");
3079ca62
SF
1922 data += PAGE_CACHE_SIZE;
1923 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1924 continue;
1925 }
06b43672 1926 page_cache_release(page);
1da177e4 1927
fb8c4b14 1928 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1929
1930 if (PAGE_CACHE_SIZE > bytes_read) {
1931 memcpy(target, data, bytes_read);
1932 /* zero the tail end of this partial page */
fb8c4b14 1933 memset(target + bytes_read, 0,
1da177e4
LT
1934 PAGE_CACHE_SIZE - bytes_read);
1935 bytes_read = 0;
1936 } else {
1937 memcpy(target, data, PAGE_CACHE_SIZE);
1938 bytes_read -= PAGE_CACHE_SIZE;
1939 }
1940 kunmap_atomic(target, KM_USER0);
1941
1942 flush_dcache_page(page);
1943 SetPageUptodate(page);
1944 unlock_page(page);
1da177e4 1945 data += PAGE_CACHE_SIZE;
9dc06558
SJ
1946
1947 /* add page to FS-Cache */
1948 cifs_readpage_to_fscache(mapping->host, page);
1da177e4
LT
1949 }
1950 return;
1951}
1952
1953static int cifs_readpages(struct file *file, struct address_space *mapping,
1954 struct list_head *page_list, unsigned num_pages)
1955{
1956 int rc = -EACCES;
1957 int xid;
1958 loff_t offset;
1959 struct page *page;
1960 struct cifs_sb_info *cifs_sb;
1961 struct cifsTconInfo *pTcon;
2c2130e1 1962 unsigned int bytes_read = 0;
fb8c4b14 1963 unsigned int read_size, i;
1da177e4
LT
1964 char *smb_read_data = NULL;
1965 struct smb_com_read_rsp *pSMBr;
1da177e4 1966 struct cifsFileInfo *open_file;
ec637e3f 1967 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1968
1969 xid = GetXid();
1970 if (file->private_data == NULL) {
0f3bc09e 1971 rc = -EBADF;
1da177e4 1972 FreeXid(xid);
0f3bc09e 1973 return rc;
1da177e4 1974 }
c21dfb69 1975 open_file = file->private_data;
e6a00296 1976 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
ba00ba64 1977 pTcon = open_file->tcon;
bfa0d75a 1978
56698236
SJ
1979 /*
1980 * Reads as many pages as possible from fscache. Returns -ENOBUFS
1981 * immediately if the cookie is negative
1982 */
1983 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
1984 &num_pages);
1985 if (rc == 0)
1986 goto read_complete;
1987
f19159dc 1988 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
1989 for (i = 0; i < num_pages; ) {
1990 unsigned contig_pages;
1991 struct page *tmp_page;
1992 unsigned long expected_index;
1993
1994 if (list_empty(page_list))
1995 break;
1996
1997 page = list_entry(page_list->prev, struct page, lru);
1998 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1999
2000 /* count adjacent pages that we will read into */
2001 contig_pages = 0;
fb8c4b14 2002 expected_index =
1da177e4 2003 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2004 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2005 if (tmp_page->index == expected_index) {
2006 contig_pages++;
2007 expected_index++;
2008 } else
fb8c4b14 2009 break;
1da177e4
LT
2010 }
2011 if (contig_pages + i > num_pages)
2012 contig_pages = num_pages - i;
2013
2014 /* for reads over a certain size could initiate async
2015 read ahead */
2016
2017 read_size = contig_pages * PAGE_CACHE_SIZE;
2018 /* Read size needs to be in multiples of one page */
2019 read_size = min_t(const unsigned int, read_size,
2020 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2021 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2022 read_size, contig_pages);
1da177e4
LT
2023 rc = -EAGAIN;
2024 while (rc == -EAGAIN) {
fb8c4b14 2025 if ((open_file->invalidHandle) &&
1da177e4 2026 (!open_file->closePend)) {
4b18f2a9 2027 rc = cifs_reopen_file(file, true);
1da177e4
LT
2028 if (rc != 0)
2029 break;
2030 }
2031
bfa0d75a 2032 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2033 open_file->netfid,
2034 read_size, offset,
2035 &bytes_read, &smb_read_data,
2036 &buf_type);
a9d02ad4 2037 /* BB more RC checks ? */
fb8c4b14 2038 if (rc == -EAGAIN) {
1da177e4 2039 if (smb_read_data) {
fb8c4b14 2040 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2041 cifs_small_buf_release(smb_read_data);
fb8c4b14 2042 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2043 cifs_buf_release(smb_read_data);
1da177e4
LT
2044 smb_read_data = NULL;
2045 }
2046 }
2047 }
2048 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2049 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2050 break;
2051 } else if (bytes_read > 0) {
6f88cc2e 2052 task_io_account_read(bytes_read);
1da177e4
LT
2053 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2054 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2055 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2056 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2057
2058 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2059 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2060 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2061 i++; /* account for partial page */
2062
fb8c4b14 2063 /* server copy of file can have smaller size
1da177e4 2064 than client */
fb8c4b14
SF
2065 /* BB do we need to verify this common case ?
2066 this case is ok - if we are at server EOF
1da177e4
LT
2067 we will hit it on next read */
2068
05ac9d4b 2069 /* break; */
1da177e4
LT
2070 }
2071 } else {
b6b38f70 2072 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2073 "Cleaning remaining pages from readahead list",
b6b38f70 2074 bytes_read, offset);
fb8c4b14 2075 /* BB turn off caching and do new lookup on
1da177e4 2076 file size at server? */
1da177e4
LT
2077 break;
2078 }
2079 if (smb_read_data) {
fb8c4b14 2080 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2081 cifs_small_buf_release(smb_read_data);
fb8c4b14 2082 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2083 cifs_buf_release(smb_read_data);
1da177e4
LT
2084 smb_read_data = NULL;
2085 }
2086 bytes_read = 0;
2087 }
2088
1da177e4
LT
2089/* need to free smb_read_data buf before exit */
2090 if (smb_read_data) {
fb8c4b14 2091 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2092 cifs_small_buf_release(smb_read_data);
fb8c4b14 2093 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2094 cifs_buf_release(smb_read_data);
1da177e4 2095 smb_read_data = NULL;
fb8c4b14 2096 }
1da177e4 2097
56698236 2098read_complete:
1da177e4
LT
2099 FreeXid(xid);
2100 return rc;
2101}
2102
2103static int cifs_readpage_worker(struct file *file, struct page *page,
2104 loff_t *poffset)
2105{
2106 char *read_data;
2107 int rc;
2108
56698236
SJ
2109 /* Is the page cached? */
2110 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2111 if (rc == 0)
2112 goto read_complete;
2113
1da177e4
LT
2114 page_cache_get(page);
2115 read_data = kmap(page);
2116 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2117
1da177e4 2118 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2119
1da177e4
LT
2120 if (rc < 0)
2121 goto io_error;
2122 else
b6b38f70 2123 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2124
e6a00296
JJS
2125 file->f_path.dentry->d_inode->i_atime =
2126 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2127
1da177e4
LT
2128 if (PAGE_CACHE_SIZE > rc)
2129 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2130
2131 flush_dcache_page(page);
2132 SetPageUptodate(page);
9dc06558
SJ
2133
2134 /* send this page to the cache */
2135 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2136
1da177e4 2137 rc = 0;
fb8c4b14 2138
1da177e4 2139io_error:
fb8c4b14 2140 kunmap(page);
1da177e4 2141 page_cache_release(page);
56698236
SJ
2142
2143read_complete:
1da177e4
LT
2144 return rc;
2145}
2146
2147static int cifs_readpage(struct file *file, struct page *page)
2148{
2149 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2150 int rc = -EACCES;
2151 int xid;
2152
2153 xid = GetXid();
2154
2155 if (file->private_data == NULL) {
0f3bc09e 2156 rc = -EBADF;
1da177e4 2157 FreeXid(xid);
0f3bc09e 2158 return rc;
1da177e4
LT
2159 }
2160
b6b38f70
JP
2161 cFYI(1, "readpage %p at offset %d 0x%x\n",
2162 page, (int)offset, (int)offset);
1da177e4
LT
2163
2164 rc = cifs_readpage_worker(file, page, &offset);
2165
2166 unlock_page(page);
2167
2168 FreeXid(xid);
2169 return rc;
2170}
2171
a403a0a3
SF
2172static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2173{
2174 struct cifsFileInfo *open_file;
2175
2176 read_lock(&GlobalSMBSeslock);
2177 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2178 if (open_file->closePend)
2179 continue;
2180 if (open_file->pfile &&
2181 ((open_file->pfile->f_flags & O_RDWR) ||
2182 (open_file->pfile->f_flags & O_WRONLY))) {
2183 read_unlock(&GlobalSMBSeslock);
2184 return 1;
2185 }
2186 }
2187 read_unlock(&GlobalSMBSeslock);
2188 return 0;
2189}
2190
1da177e4
LT
2191/* We do not want to update the file size from server for inodes
2192 open for write - to avoid races with writepage extending
2193 the file - in the future we could consider allowing
fb8c4b14 2194 refreshing the inode only on increases in the file size
1da177e4
LT
2195 but this is tricky to do without racing with writebehind
2196 page caching in the current Linux kernel design */
4b18f2a9 2197bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2198{
a403a0a3 2199 if (!cifsInode)
4b18f2a9 2200 return true;
50c2f753 2201
a403a0a3
SF
2202 if (is_inode_writable(cifsInode)) {
2203 /* This inode is open for write at least once */
c32a0b68
SF
2204 struct cifs_sb_info *cifs_sb;
2205
c32a0b68 2206 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2207 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2208 /* since no page cache to corrupt on directio
c32a0b68 2209 we can change size safely */
4b18f2a9 2210 return true;
c32a0b68
SF
2211 }
2212
fb8c4b14 2213 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2214 return true;
7ba52631 2215
4b18f2a9 2216 return false;
23e7dd7d 2217 } else
4b18f2a9 2218 return true;
1da177e4
LT
2219}
2220
d9414774
NP
2221static int cifs_write_begin(struct file *file, struct address_space *mapping,
2222 loff_t pos, unsigned len, unsigned flags,
2223 struct page **pagep, void **fsdata)
1da177e4 2224{
d9414774
NP
2225 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2226 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2227 loff_t page_start = pos & PAGE_MASK;
2228 loff_t i_size;
2229 struct page *page;
2230 int rc = 0;
d9414774 2231
b6b38f70 2232 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2233
54566b2c 2234 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2235 if (!page) {
2236 rc = -ENOMEM;
2237 goto out;
2238 }
8a236264 2239
a98ee8c1
JL
2240 if (PageUptodate(page))
2241 goto out;
8a236264 2242
a98ee8c1
JL
2243 /*
2244 * If we write a full page it will be up to date, no need to read from
2245 * the server. If the write is short, we'll end up doing a sync write
2246 * instead.
2247 */
2248 if (len == PAGE_CACHE_SIZE)
2249 goto out;
8a236264 2250
a98ee8c1
JL
2251 /*
2252 * optimize away the read when we have an oplock, and we're not
2253 * expecting to use any of the data we'd be reading in. That
2254 * is, when the page lies beyond the EOF, or straddles the EOF
2255 * and the write will cover all of the existing data.
2256 */
2257 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2258 i_size = i_size_read(mapping->host);
2259 if (page_start >= i_size ||
2260 (offset == 0 && (pos + len) >= i_size)) {
2261 zero_user_segments(page, 0, offset,
2262 offset + len,
2263 PAGE_CACHE_SIZE);
2264 /*
2265 * PageChecked means that the parts of the page
2266 * to which we're not writing are considered up
2267 * to date. Once the data is copied to the
2268 * page, it can be set uptodate.
2269 */
2270 SetPageChecked(page);
2271 goto out;
2272 }
2273 }
d9414774 2274
a98ee8c1
JL
2275 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2276 /*
2277 * might as well read a page, it is fast enough. If we get
2278 * an error, we don't need to return it. cifs_write_end will
2279 * do a sync write instead since PG_uptodate isn't set.
2280 */
2281 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2282 } else {
2283 /* we could try using another file handle if there is one -
2284 but how would we lock it to prevent close of that handle
2285 racing with this read? In any case
d9414774 2286 this will be written out by write_end so is fine */
1da177e4 2287 }
a98ee8c1
JL
2288out:
2289 *pagep = page;
2290 return rc;
1da177e4
LT
2291}
2292
85f2d6b4
SJ
2293static int cifs_release_page(struct page *page, gfp_t gfp)
2294{
2295 if (PagePrivate(page))
2296 return 0;
2297
2298 return cifs_fscache_release_page(page, gfp);
2299}
2300
2301static void cifs_invalidate_page(struct page *page, unsigned long offset)
2302{
2303 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2304
2305 if (offset == 0)
2306 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2307}
2308
9b646972 2309void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2310{
2311 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2312 oplock_break);
2313 struct inode *inode = cfile->pInode;
2314 struct cifsInodeInfo *cinode = CIFS_I(inode);
3bc303c2
JL
2315 int rc, waitrc = 0;
2316
2317 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2318 if (cinode->clientCanCacheRead)
8737c930 2319 break_lease(inode, O_RDONLY);
d54ff732 2320 else
8737c930 2321 break_lease(inode, O_WRONLY);
3bc303c2
JL
2322 rc = filemap_fdatawrite(inode->i_mapping);
2323 if (cinode->clientCanCacheRead == 0) {
2324 waitrc = filemap_fdatawait(inode->i_mapping);
2325 invalidate_remote_inode(inode);
2326 }
2327 if (!rc)
2328 rc = waitrc;
2329 if (rc)
2330 cinode->write_behind_rc = rc;
b6b38f70 2331 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2332 }
2333
2334 /*
2335 * releasing stale oplock after recent reconnect of smb session using
2336 * a now incorrect file handle is not a data integrity issue but do
2337 * not bother sending an oplock release if session to server still is
2338 * disconnected since oplock already released by the server
2339 */
2340 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
ba00ba64 2341 rc = CIFSSMBLock(0, cfile->tcon, cfile->netfid, 0, 0, 0, 0,
3bc303c2 2342 LOCKING_ANDX_OPLOCK_RELEASE, false);
b6b38f70 2343 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2344 }
9b646972
TH
2345
2346 /*
2347 * We might have kicked in before is_valid_oplock_break()
2348 * finished grabbing reference for us. Make sure it's done by
2349 * waiting for GlobalSMSSeslock.
2350 */
2351 write_lock(&GlobalSMBSeslock);
2352 write_unlock(&GlobalSMBSeslock);
2353
2354 cifs_oplock_break_put(cfile);
3bc303c2
JL
2355}
2356
9b646972 2357void cifs_oplock_break_get(struct cifsFileInfo *cfile)
3bc303c2 2358{
3bc303c2
JL
2359 mntget(cfile->mnt);
2360 cifsFileInfo_get(cfile);
3bc303c2
JL
2361}
2362
9b646972 2363void cifs_oplock_break_put(struct cifsFileInfo *cfile)
3bc303c2 2364{
3bc303c2
JL
2365 mntput(cfile->mnt);
2366 cifsFileInfo_put(cfile);
2367}
2368
f5e54d6e 2369const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2370 .readpage = cifs_readpage,
2371 .readpages = cifs_readpages,
2372 .writepage = cifs_writepage,
37c0eb46 2373 .writepages = cifs_writepages,
d9414774
NP
2374 .write_begin = cifs_write_begin,
2375 .write_end = cifs_write_end,
1da177e4 2376 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2377 .releasepage = cifs_release_page,
2378 .invalidatepage = cifs_invalidate_page,
1da177e4
LT
2379 /* .sync_page = cifs_sync_page, */
2380 /* .direct_IO = */
2381};
273d81d6
DK
2382
2383/*
2384 * cifs_readpages requires the server to support a buffer large enough to
2385 * contain the header plus one complete page of data. Otherwise, we need
2386 * to leave cifs_readpages out of the address space operations.
2387 */
f5e54d6e 2388const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2389 .readpage = cifs_readpage,
2390 .writepage = cifs_writepage,
2391 .writepages = cifs_writepages,
d9414774
NP
2392 .write_begin = cifs_write_begin,
2393 .write_end = cifs_write_end,
273d81d6 2394 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2395 .releasepage = cifs_release_page,
2396 .invalidatepage = cifs_invalidate_page,
273d81d6
DK
2397 /* .sync_page = cifs_sync_page, */
2398 /* .direct_IO = */
2399};
This page took 0.46979 seconds and 5 git commands to generate.