Linux 2.6.39-rc3
[deliverable/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
608712fe 63static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 64{
608712fe 65 u32 posix_flags = 0;
e10f7b55 66
7fc8f4e9 67 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 68 posix_flags = SMB_O_RDONLY;
7fc8f4e9 69 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
73
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 81 if (flags & O_DSYNC)
608712fe 82 posix_flags |= SMB_O_SYNC;
7fc8f4e9 83 if (flags & O_DIRECTORY)
608712fe 84 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 85 if (flags & O_NOFOLLOW)
608712fe 86 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 87 if (flags & O_DIRECT)
608712fe 88 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
89
90 return posix_flags;
1da177e4
LT
91}
92
93static inline int cifs_get_disposition(unsigned int flags)
94{
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
55aa2e09
SF
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
1da177e4
LT
103 else
104 return FILE_OPEN;
105}
106
608712fe
JL
107int cifs_posix_open(char *full_path, struct inode **pinode,
108 struct super_block *sb, int mode, unsigned int f_flags,
109 __u32 *poplock, __u16 *pnetfid, int xid)
110{
111 int rc;
112 FILE_UNIX_BASIC_INFO *presp_data;
113 __u32 posix_flags = 0;
114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 struct cifs_fattr fattr;
116 struct tcon_link *tlink;
117 struct cifsTconInfo *tcon;
118
119 cFYI(1, "posix open %s", full_path);
120
121 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122 if (presp_data == NULL)
123 return -ENOMEM;
124
125 tlink = cifs_sb_tlink(cifs_sb);
126 if (IS_ERR(tlink)) {
127 rc = PTR_ERR(tlink);
128 goto posix_open_ret;
129 }
130
131 tcon = tlink_tcon(tlink);
132 mode &= ~current_umask();
133
134 posix_flags = cifs_posix_convert_flags(f_flags);
135 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136 poplock, full_path, cifs_sb->local_nls,
137 cifs_sb->mnt_cifs_flags &
138 CIFS_MOUNT_MAP_SPECIAL_CHR);
139 cifs_put_tlink(tlink);
140
141 if (rc)
142 goto posix_open_ret;
143
144 if (presp_data->Type == cpu_to_le32(-1))
145 goto posix_open_ret; /* open ok, caller does qpathinfo */
146
147 if (!pinode)
148 goto posix_open_ret; /* caller does not need info */
149
150 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
151
152 /* get new inode and set it up */
153 if (*pinode == NULL) {
154 cifs_fill_uniqueid(sb, &fattr);
155 *pinode = cifs_iget(sb, &fattr);
156 if (!*pinode) {
157 rc = -ENOMEM;
158 goto posix_open_ret;
159 }
160 } else {
161 cifs_fattr_to_inode(*pinode, &fattr);
162 }
163
164posix_open_ret:
165 kfree(presp_data);
166 return rc;
167}
168
eeb910a6
PS
169static int
170cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
171 struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
172 __u16 *pnetfid, int xid)
173{
174 int rc;
175 int desiredAccess;
176 int disposition;
177 FILE_ALL_INFO *buf;
178
179 desiredAccess = cifs_convert_flags(f_flags);
180
181/*********************************************************************
182 * open flag mapping table:
183 *
184 * POSIX Flag CIFS Disposition
185 * ---------- ----------------
186 * O_CREAT FILE_OPEN_IF
187 * O_CREAT | O_EXCL FILE_CREATE
188 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
189 * O_TRUNC FILE_OVERWRITE
190 * none of the above FILE_OPEN
191 *
192 * Note that there is not a direct match between disposition
193 * FILE_SUPERSEDE (ie create whether or not file exists although
194 * O_CREAT | O_TRUNC is similar but truncates the existing
195 * file rather than creating a new file as FILE_SUPERSEDE does
196 * (which uses the attributes / metadata passed in on open call)
197 *?
198 *? O_SYNC is a reasonable match to CIFS writethrough flag
199 *? and the read write flags match reasonably. O_LARGEFILE
200 *? is irrelevant because largefile support is always used
201 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203 *********************************************************************/
204
205 disposition = cifs_get_disposition(f_flags);
206
207 /* BB pass O_SYNC flag through on file attributes .. BB */
208
209 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
210 if (!buf)
211 return -ENOMEM;
212
213 if (tcon->ses->capabilities & CAP_NT_SMBS)
214 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
216 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217 & CIFS_MOUNT_MAP_SPECIAL_CHR);
218 else
219 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
220 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223
224 if (rc)
225 goto out;
226
227 if (tcon->unix_ext)
228 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
229 xid);
230 else
231 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
232 xid, pnetfid);
233
234out:
235 kfree(buf);
236 return rc;
237}
238
15ecb436
JL
239struct cifsFileInfo *
240cifs_new_fileinfo(__u16 fileHandle, struct file *file,
241 struct tcon_link *tlink, __u32 oplock)
242{
243 struct dentry *dentry = file->f_path.dentry;
244 struct inode *inode = dentry->d_inode;
245 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
246 struct cifsFileInfo *pCifsFile;
247
248 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
249 if (pCifsFile == NULL)
250 return pCifsFile;
251
5f6dbc9e 252 pCifsFile->count = 1;
15ecb436
JL
253 pCifsFile->netfid = fileHandle;
254 pCifsFile->pid = current->tgid;
255 pCifsFile->uid = current_fsuid();
256 pCifsFile->dentry = dget(dentry);
257 pCifsFile->f_flags = file->f_flags;
258 pCifsFile->invalidHandle = false;
15ecb436
JL
259 pCifsFile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&pCifsFile->fh_mutex);
261 mutex_init(&pCifsFile->lock_mutex);
262 INIT_LIST_HEAD(&pCifsFile->llist);
15ecb436
JL
263 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
264
4477288a 265 spin_lock(&cifs_file_list_lock);
15ecb436
JL
266 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
269 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
270 else
271 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 272 spin_unlock(&cifs_file_list_lock);
15ecb436 273
c6723628 274 cifs_set_oplock_level(pCifsInode, oplock);
15ecb436
JL
275
276 file->private_data = pCifsFile;
277 return pCifsFile;
278}
279
cdff08e7
SF
280/*
281 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
282 * the filehandle out on the server. Must be called without holding
283 * cifs_file_list_lock.
cdff08e7 284 */
b33879aa
JL
285void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
286{
e66673e3 287 struct inode *inode = cifs_file->dentry->d_inode;
cdff08e7 288 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 289 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
291 struct cifsLockInfo *li, *tmp;
292
293 spin_lock(&cifs_file_list_lock);
5f6dbc9e 294 if (--cifs_file->count > 0) {
cdff08e7
SF
295 spin_unlock(&cifs_file_list_lock);
296 return;
297 }
298
299 /* remove it from the lists */
300 list_del(&cifs_file->flist);
301 list_del(&cifs_file->tlist);
302
303 if (list_empty(&cifsi->openFileList)) {
304 cFYI(1, "closing last open instance for inode %p",
305 cifs_file->dentry->d_inode);
4f8ba8a0
PS
306
307 /* in strict cache mode we need invalidate mapping on the last
308 close because it may cause a error when we open this file
309 again and get at least level II oplock */
310 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
311 CIFS_I(inode)->invalid_mapping = true;
312
c6723628 313 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
314 }
315 spin_unlock(&cifs_file_list_lock);
316
317 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
318 int xid, rc;
319
320 xid = GetXid();
321 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
322 FreeXid(xid);
323 }
324
325 /* Delete any outstanding lock records. We'll lose them when the file
326 * is closed anyway.
327 */
328 mutex_lock(&cifs_file->lock_mutex);
329 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
330 list_del(&li->llist);
331 kfree(li);
b33879aa 332 }
cdff08e7
SF
333 mutex_unlock(&cifs_file->lock_mutex);
334
335 cifs_put_tlink(cifs_file->tlink);
336 dput(cifs_file->dentry);
337 kfree(cifs_file);
b33879aa
JL
338}
339
1da177e4
LT
340int cifs_open(struct inode *inode, struct file *file)
341{
342 int rc = -EACCES;
590a3fe0
JL
343 int xid;
344 __u32 oplock;
1da177e4 345 struct cifs_sb_info *cifs_sb;
276a74a4 346 struct cifsTconInfo *tcon;
7ffec372 347 struct tcon_link *tlink;
6ca9f3ba 348 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 349 char *full_path = NULL;
7e12eddb 350 bool posix_open_ok = false;
1da177e4 351 __u16 netfid;
1da177e4
LT
352
353 xid = GetXid();
354
355 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
356 tlink = cifs_sb_tlink(cifs_sb);
357 if (IS_ERR(tlink)) {
358 FreeXid(xid);
359 return PTR_ERR(tlink);
360 }
361 tcon = tlink_tcon(tlink);
1da177e4 362
e6a00296 363 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 364 if (full_path == NULL) {
0f3bc09e 365 rc = -ENOMEM;
232341ba 366 goto out;
1da177e4
LT
367 }
368
b6b38f70
JP
369 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
370 inode, file->f_flags, full_path);
276a74a4
SF
371
372 if (oplockEnabled)
373 oplock = REQ_OPLOCK;
374 else
375 oplock = 0;
376
64cc2c63
SF
377 if (!tcon->broken_posix_open && tcon->unix_ext &&
378 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
379 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
380 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 381 /* can not refresh inode info since size could be stale */
2422f676 382 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 383 cifs_sb->mnt_file_mode /* ignored */,
608712fe 384 file->f_flags, &oplock, &netfid, xid);
276a74a4 385 if (rc == 0) {
b6b38f70 386 cFYI(1, "posix open succeeded");
7e12eddb 387 posix_open_ok = true;
64cc2c63
SF
388 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
389 if (tcon->ses->serverNOS)
b6b38f70 390 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
391 " unexpected error on SMB posix open"
392 ", disabling posix open support."
393 " Check if server update available.",
394 tcon->ses->serverName,
b6b38f70 395 tcon->ses->serverNOS);
64cc2c63 396 tcon->broken_posix_open = true;
276a74a4
SF
397 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
398 (rc != -EOPNOTSUPP)) /* path not found or net err */
399 goto out;
64cc2c63
SF
400 /* else fallthrough to retry open the old way on network i/o
401 or DFS errors */
276a74a4
SF
402 }
403
7e12eddb
PS
404 if (!posix_open_ok) {
405 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
406 file->f_flags, &oplock, &netfid, xid);
407 if (rc)
408 goto out;
409 }
47c78b7f 410
abfe1eed 411 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 412 if (pCifsFile == NULL) {
7e12eddb 413 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
414 rc = -ENOMEM;
415 goto out;
416 }
1da177e4 417
9451a9a5
SJ
418 cifs_fscache_set_inode_cookie(inode, file);
419
7e12eddb 420 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
421 /* time to set mode which we can not set earlier due to
422 problems creating new read-only files */
7e12eddb
PS
423 struct cifs_unix_set_info_args args = {
424 .mode = inode->i_mode,
425 .uid = NO_CHANGE_64,
426 .gid = NO_CHANGE_64,
427 .ctime = NO_CHANGE_64,
428 .atime = NO_CHANGE_64,
429 .mtime = NO_CHANGE_64,
430 .device = 0,
431 };
d44a9fe2
JL
432 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
433 pCifsFile->pid);
1da177e4
LT
434 }
435
436out:
1da177e4
LT
437 kfree(full_path);
438 FreeXid(xid);
7ffec372 439 cifs_put_tlink(tlink);
1da177e4
LT
440 return rc;
441}
442
0418726b 443/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
444/* to server was lost */
445static int cifs_relock_file(struct cifsFileInfo *cifsFile)
446{
447 int rc = 0;
448
449/* BB list all locks open on this file and relock */
450
451 return rc;
452}
453
15886177 454static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
455{
456 int rc = -EACCES;
590a3fe0
JL
457 int xid;
458 __u32 oplock;
1da177e4 459 struct cifs_sb_info *cifs_sb;
7fc8f4e9 460 struct cifsTconInfo *tcon;
1da177e4 461 struct cifsInodeInfo *pCifsInode;
fb8c4b14 462 struct inode *inode;
1da177e4
LT
463 char *full_path = NULL;
464 int desiredAccess;
465 int disposition = FILE_OPEN;
466 __u16 netfid;
467
1da177e4 468 xid = GetXid();
f0a71eb8 469 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 470 if (!pCifsFile->invalidHandle) {
f0a71eb8 471 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 472 rc = 0;
1da177e4 473 FreeXid(xid);
0f3bc09e 474 return rc;
1da177e4
LT
475 }
476
15886177 477 inode = pCifsFile->dentry->d_inode;
1da177e4 478 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 479 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 480
1da177e4
LT
481/* can not grab rename sem here because various ops, including
482 those that already have the rename sem can end up causing writepage
483 to get called and if the server was down that means we end up here,
484 and we can never tell if the caller already has the rename_sem */
15886177 485 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 486 if (full_path == NULL) {
3a9f462f 487 rc = -ENOMEM;
f0a71eb8 488 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 489 FreeXid(xid);
3a9f462f 490 return rc;
1da177e4
LT
491 }
492
b6b38f70 493 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 494 inode, pCifsFile->f_flags, full_path);
1da177e4
LT
495
496 if (oplockEnabled)
497 oplock = REQ_OPLOCK;
498 else
4b18f2a9 499 oplock = 0;
1da177e4 500
7fc8f4e9
SF
501 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
502 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
503 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
504
505 /*
506 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
507 * original open. Must mask them off for a reopen.
508 */
15886177
JL
509 unsigned int oflags = pCifsFile->f_flags &
510 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 511
2422f676 512 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
513 cifs_sb->mnt_file_mode /* ignored */,
514 oflags, &oplock, &netfid, xid);
7fc8f4e9 515 if (rc == 0) {
b6b38f70 516 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
517 goto reopen_success;
518 }
519 /* fallthrough to retry open the old way on errors, especially
520 in the reconnect path it is important to retry hard */
521 }
522
15886177 523 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 524
1da177e4 525 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
526 by SMBOpen and then calling get_inode_info with returned buf
527 since file might have write behind data that needs to be flushed
1da177e4
LT
528 and server version of file size can be stale. If we knew for sure
529 that inode was not dirty locally we could do this */
530
7fc8f4e9 531 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 532 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 533 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 534 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 535 if (rc) {
f0a71eb8 536 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
537 cFYI(1, "cifs_open returned 0x%x", rc);
538 cFYI(1, "oplock: %d", oplock);
15886177
JL
539 goto reopen_error_exit;
540 }
541
7fc8f4e9 542reopen_success:
15886177
JL
543 pCifsFile->netfid = netfid;
544 pCifsFile->invalidHandle = false;
545 mutex_unlock(&pCifsFile->fh_mutex);
546 pCifsInode = CIFS_I(inode);
547
548 if (can_flush) {
549 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 550 mapping_set_error(inode->i_mapping, rc);
15886177 551
15886177
JL
552 if (tcon->unix_ext)
553 rc = cifs_get_inode_info_unix(&inode,
554 full_path, inode->i_sb, xid);
555 else
556 rc = cifs_get_inode_info(&inode,
557 full_path, NULL, inode->i_sb,
558 xid, NULL);
559 } /* else we are writing out data to server already
560 and could deadlock if we tried to flush data, and
561 since we do not know if we have data that would
562 invalidate the current end of file on the server
563 we can not go to the server to get the new inod
564 info */
e66673e3 565
c6723628 566 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 567
15886177
JL
568 cifs_relock_file(pCifsFile);
569
570reopen_error_exit:
1da177e4
LT
571 kfree(full_path);
572 FreeXid(xid);
573 return rc;
574}
575
576int cifs_close(struct inode *inode, struct file *file)
577{
cdff08e7
SF
578 cifsFileInfo_put(file->private_data);
579 file->private_data = NULL;
7ee1af76 580
cdff08e7
SF
581 /* return code from the ->release op is always ignored */
582 return 0;
1da177e4
LT
583}
584
585int cifs_closedir(struct inode *inode, struct file *file)
586{
587 int rc = 0;
588 int xid;
c21dfb69 589 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
590 char *ptmp;
591
b6b38f70 592 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
593
594 xid = GetXid();
595
596 if (pCFileStruct) {
13cfb733 597 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 598
b6b38f70 599 cFYI(1, "Freeing private data in close dir");
4477288a 600 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
601 if (!pCFileStruct->srch_inf.endOfSearch &&
602 !pCFileStruct->invalidHandle) {
603 pCFileStruct->invalidHandle = true;
4477288a 604 spin_unlock(&cifs_file_list_lock);
1da177e4 605 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
606 cFYI(1, "Closing uncompleted readdir with rc %d",
607 rc);
1da177e4
LT
608 /* not much we can do if it fails anyway, ignore rc */
609 rc = 0;
ddb4cbfc 610 } else
4477288a 611 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
612 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
613 if (ptmp) {
b6b38f70 614 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 615 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 616 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
617 cifs_small_buf_release(ptmp);
618 else
619 cifs_buf_release(ptmp);
1da177e4 620 }
13cfb733 621 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
622 kfree(file->private_data);
623 file->private_data = NULL;
624 }
625 /* BB can we lock the filestruct while this is going on? */
626 FreeXid(xid);
627 return rc;
628}
629
7ee1af76
JA
630static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
631 __u64 offset, __u8 lockType)
632{
fb8c4b14
SF
633 struct cifsLockInfo *li =
634 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
635 if (li == NULL)
636 return -ENOMEM;
637 li->offset = offset;
638 li->length = len;
639 li->type = lockType;
796e5661 640 mutex_lock(&fid->lock_mutex);
7ee1af76 641 list_add(&li->llist, &fid->llist);
796e5661 642 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
643 return 0;
644}
645
1da177e4
LT
646int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
647{
648 int rc, xid;
1da177e4
LT
649 __u32 numLock = 0;
650 __u32 numUnlock = 0;
651 __u64 length;
4b18f2a9 652 bool wait_flag = false;
1da177e4 653 struct cifs_sb_info *cifs_sb;
13a6e42a 654 struct cifsTconInfo *tcon;
08547b03
SF
655 __u16 netfid;
656 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 657 bool posix_locking = 0;
1da177e4
LT
658
659 length = 1 + pfLock->fl_end - pfLock->fl_start;
660 rc = -EACCES;
661 xid = GetXid();
662
b6b38f70 663 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 664 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 665 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 666 pfLock->fl_end);
1da177e4
LT
667
668 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 669 cFYI(1, "Posix");
1da177e4 670 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 671 cFYI(1, "Flock");
1da177e4 672 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 673 cFYI(1, "Blocking lock");
4b18f2a9 674 wait_flag = true;
1da177e4
LT
675 }
676 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
677 cFYI(1, "Process suspended by mandatory locking - "
678 "not implemented yet");
1da177e4 679 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 680 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 681 if (pfLock->fl_flags &
1da177e4 682 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 683 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
684
685 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 686 cFYI(1, "F_WRLCK ");
1da177e4
LT
687 numLock = 1;
688 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 689 cFYI(1, "F_UNLCK");
1da177e4 690 numUnlock = 1;
d47d7c1a
SF
691 /* Check if unlock includes more than
692 one lock range */
1da177e4 693 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 694 cFYI(1, "F_RDLCK");
1da177e4
LT
695 lockType |= LOCKING_ANDX_SHARED_LOCK;
696 numLock = 1;
697 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 698 cFYI(1, "F_EXLCK");
1da177e4
LT
699 numLock = 1;
700 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 701 cFYI(1, "F_SHLCK");
1da177e4
LT
702 lockType |= LOCKING_ANDX_SHARED_LOCK;
703 numLock = 1;
704 } else
b6b38f70 705 cFYI(1, "Unknown type of lock");
1da177e4 706
e6a00296 707 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 708 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
08547b03
SF
709 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
710
13a6e42a
SF
711 if ((tcon->ses->capabilities & CAP_UNIX) &&
712 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 713 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 714 posix_locking = 1;
08547b03
SF
715 /* BB add code here to normalize offset and length to
716 account for negative length which we can not accept over the
717 wire */
1da177e4 718 if (IS_GETLK(cmd)) {
fb8c4b14 719 if (posix_locking) {
08547b03 720 int posix_lock_type;
fb8c4b14 721 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
722 posix_lock_type = CIFS_RDLCK;
723 else
724 posix_lock_type = CIFS_WRLCK;
13a6e42a 725 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 726 length, pfLock,
08547b03
SF
727 posix_lock_type, wait_flag);
728 FreeXid(xid);
729 return rc;
730 }
731
732 /* BB we could chain these into one lock request BB */
13a6e42a 733 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
12fed00d 734 0, 1, lockType, 0 /* wait flag */, 0);
1da177e4 735 if (rc == 0) {
13a6e42a 736 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
737 pfLock->fl_start, 1 /* numUnlock */ ,
738 0 /* numLock */ , lockType,
12fed00d 739 0 /* wait flag */, 0);
1da177e4
LT
740 pfLock->fl_type = F_UNLCK;
741 if (rc != 0)
b6b38f70
JP
742 cERROR(1, "Error unlocking previously locked "
743 "range %d during test of lock", rc);
1da177e4
LT
744 rc = 0;
745
746 } else {
747 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
748 rc = 0;
749
750 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
751 pfLock->fl_type = F_WRLCK;
752 } else {
753 rc = CIFSSMBLock(xid, tcon, netfid, length,
754 pfLock->fl_start, 0, 1,
755 lockType | LOCKING_ANDX_SHARED_LOCK,
12fed00d 756 0 /* wait flag */, 0);
f05337c6
PS
757 if (rc == 0) {
758 rc = CIFSSMBLock(xid, tcon, netfid,
759 length, pfLock->fl_start, 1, 0,
760 lockType |
761 LOCKING_ANDX_SHARED_LOCK,
12fed00d 762 0 /* wait flag */, 0);
f05337c6
PS
763 pfLock->fl_type = F_RDLCK;
764 if (rc != 0)
f19159dc 765 cERROR(1, "Error unlocking "
f05337c6 766 "previously locked range %d "
f19159dc 767 "during test of lock", rc);
f05337c6
PS
768 rc = 0;
769 } else {
770 pfLock->fl_type = F_WRLCK;
771 rc = 0;
772 }
773 }
1da177e4
LT
774 }
775
776 FreeXid(xid);
777 return rc;
778 }
7ee1af76
JA
779
780 if (!numLock && !numUnlock) {
781 /* if no lock or unlock then nothing
782 to do since we do not know what it is */
783 FreeXid(xid);
784 return -EOPNOTSUPP;
785 }
786
787 if (posix_locking) {
08547b03 788 int posix_lock_type;
fb8c4b14 789 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
790 posix_lock_type = CIFS_RDLCK;
791 else
792 posix_lock_type = CIFS_WRLCK;
50c2f753 793
fb8c4b14 794 if (numUnlock == 1)
beb84dc8 795 posix_lock_type = CIFS_UNLCK;
7ee1af76 796
13a6e42a 797 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 798 length, pfLock,
08547b03 799 posix_lock_type, wait_flag);
7ee1af76 800 } else {
c21dfb69 801 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
802
803 if (numLock) {
13a6e42a 804 rc = CIFSSMBLock(xid, tcon, netfid, length,
12fed00d
PS
805 pfLock->fl_start, 0, numLock, lockType,
806 wait_flag, 0);
7ee1af76
JA
807
808 if (rc == 0) {
809 /* For Windows locks we must store them. */
810 rc = store_file_lock(fid, length,
811 pfLock->fl_start, lockType);
812 }
813 } else if (numUnlock) {
814 /* For each stored lock that this unlock overlaps
815 completely, unlock it. */
816 int stored_rc = 0;
817 struct cifsLockInfo *li, *tmp;
818
6b70c955 819 rc = 0;
796e5661 820 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
821 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
822 if (pfLock->fl_start <= li->offset &&
c19eb710 823 (pfLock->fl_start + length) >=
39db810c 824 (li->offset + li->length)) {
13a6e42a 825 stored_rc = CIFSSMBLock(xid, tcon,
12fed00d
PS
826 netfid, li->length,
827 li->offset, 1, 0,
828 li->type, false, 0);
7ee1af76
JA
829 if (stored_rc)
830 rc = stored_rc;
2c964d1f
PS
831 else {
832 list_del(&li->llist);
833 kfree(li);
834 }
7ee1af76
JA
835 }
836 }
796e5661 837 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
838 }
839 }
840
d634cc15 841 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
842 posix_lock_file_wait(file, pfLock);
843 FreeXid(xid);
844 return rc;
845}
846
fbec9ab9 847/* update the file size (if needed) after a write */
72432ffc 848void
fbec9ab9
JL
849cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
850 unsigned int bytes_written)
851{
852 loff_t end_of_write = offset + bytes_written;
853
854 if (end_of_write > cifsi->server_eof)
855 cifsi->server_eof = end_of_write;
856}
857
1da177e4
LT
858ssize_t cifs_user_write(struct file *file, const char __user *write_data,
859 size_t write_size, loff_t *poffset)
860{
50ae28f0 861 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
862 int rc = 0;
863 unsigned int bytes_written = 0;
864 unsigned int total_written;
865 struct cifs_sb_info *cifs_sb;
866 struct cifsTconInfo *pTcon;
7749981e 867 int xid;
1da177e4 868 struct cifsFileInfo *open_file;
50ae28f0 869 struct cifsInodeInfo *cifsi = CIFS_I(inode);
1da177e4 870
e6a00296 871 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 872
b6b38f70
JP
873 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
874 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
875
876 if (file->private_data == NULL)
877 return -EBADF;
ba00ba64 878
c21dfb69 879 open_file = file->private_data;
13cfb733 880 pTcon = tlink_tcon(open_file->tlink);
50c2f753 881
838726c4
JL
882 rc = generic_write_checks(file, poffset, &write_size, 0);
883 if (rc)
884 return rc;
885
1da177e4 886 xid = GetXid();
1da177e4 887
1da177e4
LT
888 for (total_written = 0; write_size > total_written;
889 total_written += bytes_written) {
890 rc = -EAGAIN;
891 while (rc == -EAGAIN) {
892 if (file->private_data == NULL) {
893 /* file has been closed on us */
894 FreeXid(xid);
895 /* if we have gotten here we have written some data
896 and blocked, and the file has been freed on us while
897 we blocked so return what we managed to write */
898 return total_written;
fb8c4b14 899 }
1da177e4 900 if (open_file->invalidHandle) {
1da177e4
LT
901 /* we could deadlock if we called
902 filemap_fdatawait from here so tell
903 reopen_file not to flush data to server
904 now */
15886177 905 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
906 if (rc != 0)
907 break;
908 }
909
910 rc = CIFSSMBWrite(xid, pTcon,
911 open_file->netfid,
912 min_t(const int, cifs_sb->wsize,
913 write_size - total_written),
914 *poffset, &bytes_written,
7749981e 915 NULL, write_data + total_written, 0);
1da177e4
LT
916 }
917 if (rc || (bytes_written == 0)) {
918 if (total_written)
919 break;
920 else {
921 FreeXid(xid);
922 return rc;
923 }
fbec9ab9
JL
924 } else {
925 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 926 *poffset += bytes_written;
fbec9ab9 927 }
1da177e4
LT
928 }
929
a4544347 930 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 931
fb8c4b14 932/* Do not update local mtime - server will set its actual value on write
50ae28f0
JS
933 * inode->i_ctime = inode->i_mtime =
934 * current_fs_time(inode->i_sb);*/
935 if (total_written > 0) {
936 spin_lock(&inode->i_lock);
937 if (*poffset > inode->i_size)
938 i_size_write(inode, *poffset);
939 spin_unlock(&inode->i_lock);
1da177e4 940 }
50ae28f0
JS
941 mark_inode_dirty_sync(inode);
942
1da177e4
LT
943 FreeXid(xid);
944 return total_written;
945}
946
7da4b49a
JL
947static ssize_t cifs_write(struct cifsFileInfo *open_file,
948 const char *write_data, size_t write_size,
949 loff_t *poffset)
1da177e4
LT
950{
951 int rc = 0;
952 unsigned int bytes_written = 0;
953 unsigned int total_written;
954 struct cifs_sb_info *cifs_sb;
955 struct cifsTconInfo *pTcon;
7749981e 956 int xid;
7da4b49a
JL
957 struct dentry *dentry = open_file->dentry;
958 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1da177e4 959
7da4b49a 960 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 961
b6b38f70 962 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 963 *poffset, dentry->d_name.name);
1da177e4 964
13cfb733 965 pTcon = tlink_tcon(open_file->tlink);
50c2f753 966
1da177e4 967 xid = GetXid();
1da177e4 968
1da177e4
LT
969 for (total_written = 0; write_size > total_written;
970 total_written += bytes_written) {
971 rc = -EAGAIN;
972 while (rc == -EAGAIN) {
1da177e4 973 if (open_file->invalidHandle) {
1da177e4
LT
974 /* we could deadlock if we called
975 filemap_fdatawait from here so tell
fb8c4b14 976 reopen_file not to flush data to
1da177e4 977 server now */
15886177 978 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
979 if (rc != 0)
980 break;
981 }
fb8c4b14
SF
982 if (experimEnabled || (pTcon->ses->server &&
983 ((pTcon->ses->server->secMode &
08775834 984 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 985 == 0))) {
3e84469d
SF
986 struct kvec iov[2];
987 unsigned int len;
988
0ae0efad 989 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
990 write_size - total_written);
991 /* iov[0] is reserved for smb header */
992 iov[1].iov_base = (char *)write_data +
993 total_written;
994 iov[1].iov_len = len;
d6e04ae6 995 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 996 open_file->netfid, len,
d6e04ae6 997 *poffset, &bytes_written,
7749981e 998 iov, 1, 0);
d6e04ae6 999 } else
60808233
SF
1000 rc = CIFSSMBWrite(xid, pTcon,
1001 open_file->netfid,
1002 min_t(const int, cifs_sb->wsize,
1003 write_size - total_written),
1004 *poffset, &bytes_written,
1005 write_data + total_written,
7749981e 1006 NULL, 0);
1da177e4
LT
1007 }
1008 if (rc || (bytes_written == 0)) {
1009 if (total_written)
1010 break;
1011 else {
1012 FreeXid(xid);
1013 return rc;
1014 }
fbec9ab9
JL
1015 } else {
1016 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1017 *poffset += bytes_written;
fbec9ab9 1018 }
1da177e4
LT
1019 }
1020
a4544347 1021 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 1022
7da4b49a
JL
1023 if (total_written > 0) {
1024 spin_lock(&dentry->d_inode->i_lock);
1025 if (*poffset > dentry->d_inode->i_size)
1026 i_size_write(dentry->d_inode, *poffset);
1027 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1028 }
7da4b49a 1029 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
1030 FreeXid(xid);
1031 return total_written;
1032}
1033
6508d904
JL
1034struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1035 bool fsuid_only)
630f3f0c
SF
1036{
1037 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1038 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1039
1040 /* only filter by fsuid on multiuser mounts */
1041 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1042 fsuid_only = false;
630f3f0c 1043
4477288a 1044 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1045 /* we could simply get the first_list_entry since write-only entries
1046 are always at the end of the list but since the first entry might
1047 have a close pending, we go through the whole list */
1048 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1049 if (fsuid_only && open_file->uid != current_fsuid())
1050 continue;
2e396b83 1051 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1052 if (!open_file->invalidHandle) {
1053 /* found a good file */
1054 /* lock it so it will not be closed on us */
6ab409b5 1055 cifsFileInfo_get(open_file);
4477288a 1056 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1057 return open_file;
1058 } /* else might as well continue, and look for
1059 another, or simply have the caller reopen it
1060 again rather than trying to fix this handle */
1061 } else /* write only file */
1062 break; /* write only files are last so must be done */
1063 }
4477288a 1064 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1065 return NULL;
1066}
630f3f0c 1067
6508d904
JL
1068struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1069 bool fsuid_only)
6148a742
SF
1070{
1071 struct cifsFileInfo *open_file;
d3892294 1072 struct cifs_sb_info *cifs_sb;
2846d386 1073 bool any_available = false;
dd99cd80 1074 int rc;
6148a742 1075
60808233
SF
1076 /* Having a null inode here (because mapping->host was set to zero by
1077 the VFS or MM) should not happen but we had reports of on oops (due to
1078 it being zero) during stress testcases so we need to check for it */
1079
fb8c4b14 1080 if (cifs_inode == NULL) {
b6b38f70 1081 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1082 dump_stack();
1083 return NULL;
1084 }
1085
d3892294
JL
1086 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1087
6508d904
JL
1088 /* only filter by fsuid on multiuser mounts */
1089 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1090 fsuid_only = false;
1091
4477288a 1092 spin_lock(&cifs_file_list_lock);
9b22b0b7 1093refind_writable:
6148a742 1094 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1095 if (!any_available && open_file->pid != current->tgid)
1096 continue;
1097 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1098 continue;
2e396b83 1099 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 1100 cifsFileInfo_get(open_file);
9b22b0b7
SF
1101
1102 if (!open_file->invalidHandle) {
1103 /* found a good writable file */
4477288a 1104 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1105 return open_file;
1106 }
8840dee9 1107
4477288a 1108 spin_unlock(&cifs_file_list_lock);
cdff08e7 1109
9b22b0b7 1110 /* Had to unlock since following call can block */
15886177 1111 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1112 if (!rc)
1113 return open_file;
9b22b0b7 1114
cdff08e7 1115 /* if it fails, try another handle if possible */
b6b38f70 1116 cFYI(1, "wp failed on reopen file");
6ab409b5 1117 cifsFileInfo_put(open_file);
8840dee9 1118
cdff08e7
SF
1119 spin_lock(&cifs_file_list_lock);
1120
9b22b0b7
SF
1121 /* else we simply continue to the next entry. Thus
1122 we do not loop on reopen errors. If we
1123 can not reopen the file, for example if we
1124 reconnected to a server with another client
1125 racing to delete or lock the file we would not
1126 make progress if we restarted before the beginning
1127 of the loop here. */
6148a742
SF
1128 }
1129 }
2846d386
JL
1130 /* couldn't find useable FH with same pid, try any available */
1131 if (!any_available) {
1132 any_available = true;
1133 goto refind_writable;
1134 }
4477288a 1135 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1136 return NULL;
1137}
1138
1da177e4
LT
1139static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1140{
1141 struct address_space *mapping = page->mapping;
1142 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1143 char *write_data;
1144 int rc = -EFAULT;
1145 int bytes_written = 0;
1da177e4 1146 struct inode *inode;
6148a742 1147 struct cifsFileInfo *open_file;
1da177e4
LT
1148
1149 if (!mapping || !mapping->host)
1150 return -EFAULT;
1151
1152 inode = page->mapping->host;
1da177e4
LT
1153
1154 offset += (loff_t)from;
1155 write_data = kmap(page);
1156 write_data += from;
1157
1158 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1159 kunmap(page);
1160 return -EIO;
1161 }
1162
1163 /* racing with truncate? */
1164 if (offset > mapping->host->i_size) {
1165 kunmap(page);
1166 return 0; /* don't care */
1167 }
1168
1169 /* check to make sure that we are not extending the file */
1170 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1171 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1172
6508d904 1173 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1174 if (open_file) {
7da4b49a
JL
1175 bytes_written = cifs_write(open_file, write_data,
1176 to - from, &offset);
6ab409b5 1177 cifsFileInfo_put(open_file);
1da177e4 1178 /* Does mm or vfs already set times? */
6148a742 1179 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1180 if ((bytes_written > 0) && (offset))
6148a742 1181 rc = 0;
bb5a9a04
SF
1182 else if (bytes_written < 0)
1183 rc = bytes_written;
6148a742 1184 } else {
b6b38f70 1185 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1186 rc = -EIO;
1187 }
1188
1189 kunmap(page);
1190 return rc;
1191}
1192
1da177e4 1193static int cifs_writepages(struct address_space *mapping,
37c0eb46 1194 struct writeback_control *wbc)
1da177e4 1195{
37c0eb46
SF
1196 unsigned int bytes_to_write;
1197 unsigned int bytes_written;
1198 struct cifs_sb_info *cifs_sb;
1199 int done = 0;
111ebb6e 1200 pgoff_t end;
37c0eb46 1201 pgoff_t index;
fb8c4b14
SF
1202 int range_whole = 0;
1203 struct kvec *iov;
84d2f07e 1204 int len;
37c0eb46
SF
1205 int n_iov = 0;
1206 pgoff_t next;
1207 int nr_pages;
1208 __u64 offset = 0;
23e7dd7d 1209 struct cifsFileInfo *open_file;
ba00ba64 1210 struct cifsTconInfo *tcon;
fbec9ab9 1211 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1212 struct page *page;
1213 struct pagevec pvec;
1214 int rc = 0;
1215 int scanned = 0;
7749981e 1216 int xid;
1da177e4 1217
37c0eb46 1218 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1219
37c0eb46
SF
1220 /*
1221 * If wsize is smaller that the page cache size, default to writing
1222 * one page at a time via cifs_writepage
1223 */
1224 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1225 return generic_writepages(mapping, wbc);
1226
9a0c8230 1227 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1228 if (iov == NULL)
9a0c8230
SF
1229 return generic_writepages(mapping, wbc);
1230
37c0eb46 1231 /*
f3983c21
JL
1232 * if there's no open file, then this is likely to fail too,
1233 * but it'll at least handle the return. Maybe it should be
1234 * a BUG() instead?
37c0eb46 1235 */
6508d904 1236 open_file = find_writable_file(CIFS_I(mapping->host), false);
f3983c21 1237 if (!open_file) {
9a0c8230 1238 kfree(iov);
f3983c21
JL
1239 return generic_writepages(mapping, wbc);
1240 }
1241
13cfb733 1242 tcon = tlink_tcon(open_file->tlink);
f3983c21
JL
1243 if (!experimEnabled && tcon->ses->server->secMode &
1244 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1245 cifsFileInfo_put(open_file);
6b035904 1246 kfree(iov);
f3983c21 1247 return generic_writepages(mapping, wbc);
37c0eb46 1248 }
f3983c21 1249 cifsFileInfo_put(open_file);
37c0eb46 1250
1da177e4
LT
1251 xid = GetXid();
1252
37c0eb46 1253 pagevec_init(&pvec, 0);
111ebb6e 1254 if (wbc->range_cyclic) {
37c0eb46 1255 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1256 end = -1;
1257 } else {
1258 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1259 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1260 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1261 range_whole = 1;
37c0eb46
SF
1262 scanned = 1;
1263 }
1264retry:
1265 while (!done && (index <= end) &&
1266 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1267 PAGECACHE_TAG_DIRTY,
1268 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1269 int first;
1270 unsigned int i;
1271
37c0eb46
SF
1272 first = -1;
1273 next = 0;
1274 n_iov = 0;
1275 bytes_to_write = 0;
1276
1277 for (i = 0; i < nr_pages; i++) {
1278 page = pvec.pages[i];
1279 /*
1280 * At this point we hold neither mapping->tree_lock nor
1281 * lock on the page itself: the page may be truncated or
1282 * invalidated (changing page->mapping to NULL), or even
1283 * swizzled back from swapper_space to tmpfs file
1284 * mapping
1285 */
1286
1287 if (first < 0)
1288 lock_page(page);
529ae9aa 1289 else if (!trylock_page(page))
37c0eb46
SF
1290 break;
1291
1292 if (unlikely(page->mapping != mapping)) {
1293 unlock_page(page);
1294 break;
1295 }
1296
111ebb6e 1297 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1298 done = 1;
1299 unlock_page(page);
1300 break;
1301 }
1302
1303 if (next && (page->index != next)) {
1304 /* Not next consecutive page */
1305 unlock_page(page);
1306 break;
1307 }
1308
1309 if (wbc->sync_mode != WB_SYNC_NONE)
1310 wait_on_page_writeback(page);
1311
1312 if (PageWriteback(page) ||
cb876f45 1313 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1314 unlock_page(page);
1315 break;
1316 }
84d2f07e 1317
cb876f45
LT
1318 /*
1319 * This actually clears the dirty bit in the radix tree.
1320 * See cifs_writepage() for more commentary.
1321 */
1322 set_page_writeback(page);
1323
84d2f07e
SF
1324 if (page_offset(page) >= mapping->host->i_size) {
1325 done = 1;
1326 unlock_page(page);
cb876f45 1327 end_page_writeback(page);
84d2f07e
SF
1328 break;
1329 }
1330
37c0eb46
SF
1331 /*
1332 * BB can we get rid of this? pages are held by pvec
1333 */
1334 page_cache_get(page);
1335
84d2f07e
SF
1336 len = min(mapping->host->i_size - page_offset(page),
1337 (loff_t)PAGE_CACHE_SIZE);
1338
37c0eb46
SF
1339 /* reserve iov[0] for the smb header */
1340 n_iov++;
1341 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1342 iov[n_iov].iov_len = len;
1343 bytes_to_write += len;
37c0eb46
SF
1344
1345 if (first < 0) {
1346 first = i;
1347 offset = page_offset(page);
1348 }
1349 next = page->index + 1;
1350 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1351 break;
1352 }
1353 if (n_iov) {
941b853d 1354retry_write:
6508d904
JL
1355 open_file = find_writable_file(CIFS_I(mapping->host),
1356 false);
23e7dd7d 1357 if (!open_file) {
b6b38f70 1358 cERROR(1, "No writable handles for inode");
23e7dd7d 1359 rc = -EBADF;
1047abc1 1360 } else {
f3983c21 1361 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
23e7dd7d
SF
1362 bytes_to_write, offset,
1363 &bytes_written, iov, n_iov,
7749981e 1364 0);
6ab409b5 1365 cifsFileInfo_put(open_file);
f3983c21 1366 }
fbec9ab9 1367
941b853d
JL
1368 cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
1369
1370 /*
1371 * For now, treat a short write as if nothing got
1372 * written. A zero length write however indicates
1373 * ENOSPC or EFBIG. We have no way to know which
1374 * though, so call it ENOSPC for now. EFBIG would
1375 * get translated to AS_EIO anyway.
1376 *
1377 * FIXME: make it take into account the data that did
1378 * get written
1379 */
1380 if (rc == 0) {
1381 if (bytes_written == 0)
1382 rc = -ENOSPC;
1383 else if (bytes_written < bytes_to_write)
1384 rc = -EAGAIN;
1385 }
1386
1387 /* retry on data-integrity flush */
1388 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
1389 goto retry_write;
1390
1391 /* fix the stats and EOF */
1392 if (bytes_written > 0) {
f3983c21 1393 cifs_stats_bytes_written(tcon, bytes_written);
941b853d 1394 cifs_update_eof(cifsi, offset, bytes_written);
37c0eb46 1395 }
f3983c21 1396
37c0eb46
SF
1397 for (i = 0; i < n_iov; i++) {
1398 page = pvec.pages[first + i];
941b853d
JL
1399 /* on retryable write error, redirty page */
1400 if (rc == -EAGAIN)
1401 redirty_page_for_writepage(wbc, page);
1402 else if (rc != 0)
eb9bdaa3 1403 SetPageError(page);
37c0eb46
SF
1404 kunmap(page);
1405 unlock_page(page);
cb876f45 1406 end_page_writeback(page);
37c0eb46
SF
1407 page_cache_release(page);
1408 }
941b853d
JL
1409
1410 if (rc != -EAGAIN)
1411 mapping_set_error(mapping, rc);
1412 else
1413 rc = 0;
1414
37c0eb46
SF
1415 if ((wbc->nr_to_write -= n_iov) <= 0)
1416 done = 1;
1417 index = next;
b066a48c
DK
1418 } else
1419 /* Need to re-find the pages we skipped */
1420 index = pvec.pages[0]->index + 1;
1421
37c0eb46
SF
1422 pagevec_release(&pvec);
1423 }
1424 if (!scanned && !done) {
1425 /*
1426 * We hit the last page and there is more work to be done: wrap
1427 * back to the start of the file
1428 */
1429 scanned = 1;
1430 index = 0;
1431 goto retry;
1432 }
111ebb6e 1433 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1434 mapping->writeback_index = index;
1435
1da177e4 1436 FreeXid(xid);
9a0c8230 1437 kfree(iov);
1da177e4
LT
1438 return rc;
1439}
1da177e4 1440
fb8c4b14 1441static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1442{
1443 int rc = -EFAULT;
1444 int xid;
1445
1446 xid = GetXid();
1447/* BB add check for wbc flags */
1448 page_cache_get(page);
ad7a2926 1449 if (!PageUptodate(page))
b6b38f70 1450 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1451
1452 /*
1453 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1454 *
1455 * A writepage() implementation always needs to do either this,
1456 * or re-dirty the page with "redirty_page_for_writepage()" in
1457 * the case of a failure.
1458 *
1459 * Just unlocking the page will cause the radix tree tag-bits
1460 * to fail to update with the state of the page correctly.
1461 */
fb8c4b14 1462 set_page_writeback(page);
1da177e4
LT
1463 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1464 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1465 unlock_page(page);
cb876f45
LT
1466 end_page_writeback(page);
1467 page_cache_release(page);
1da177e4
LT
1468 FreeXid(xid);
1469 return rc;
1470}
1471
d9414774
NP
1472static int cifs_write_end(struct file *file, struct address_space *mapping,
1473 loff_t pos, unsigned len, unsigned copied,
1474 struct page *page, void *fsdata)
1da177e4 1475{
d9414774
NP
1476 int rc;
1477 struct inode *inode = mapping->host;
1da177e4 1478
b6b38f70
JP
1479 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1480 page, pos, copied);
d9414774 1481
a98ee8c1
JL
1482 if (PageChecked(page)) {
1483 if (copied == len)
1484 SetPageUptodate(page);
1485 ClearPageChecked(page);
1486 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1487 SetPageUptodate(page);
ad7a2926 1488
1da177e4 1489 if (!PageUptodate(page)) {
d9414774
NP
1490 char *page_data;
1491 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1492 int xid;
1493
1494 xid = GetXid();
1da177e4
LT
1495 /* this is probably better than directly calling
1496 partialpage_write since in this function the file handle is
1497 known which we might as well leverage */
1498 /* BB check if anything else missing out of ppw
1499 such as updating last write time */
1500 page_data = kmap(page);
7da4b49a
JL
1501 rc = cifs_write(file->private_data, page_data + offset,
1502 copied, &pos);
d9414774 1503 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1504 kunmap(page);
d9414774
NP
1505
1506 FreeXid(xid);
fb8c4b14 1507 } else {
d9414774
NP
1508 rc = copied;
1509 pos += copied;
1da177e4
LT
1510 set_page_dirty(page);
1511 }
1512
d9414774
NP
1513 if (rc > 0) {
1514 spin_lock(&inode->i_lock);
1515 if (pos > inode->i_size)
1516 i_size_write(inode, pos);
1517 spin_unlock(&inode->i_lock);
1518 }
1519
1520 unlock_page(page);
1521 page_cache_release(page);
1522
1da177e4
LT
1523 return rc;
1524}
1525
8be7e6ba 1526int cifs_strict_fsync(struct file *file, int datasync)
1da177e4
LT
1527{
1528 int xid;
1529 int rc = 0;
b298f223 1530 struct cifsTconInfo *tcon;
c21dfb69 1531 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1532 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 1533 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4
LT
1534
1535 xid = GetXid();
1536
b6b38f70 1537 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1538 file->f_path.dentry->d_name.name, datasync);
50c2f753 1539
8be7e6ba
PS
1540 if (!CIFS_I(inode)->clientCanCacheRead)
1541 cifs_invalidate_mapping(inode);
eb4b756b 1542
8be7e6ba
PS
1543 tcon = tlink_tcon(smbfile->tlink);
1544 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1545 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1546
1547 FreeXid(xid);
1548 return rc;
1549}
1550
1551int cifs_fsync(struct file *file, int datasync)
1552{
1553 int xid;
1554 int rc = 0;
1555 struct cifsTconInfo *tcon;
1556 struct cifsFileInfo *smbfile = file->private_data;
1557 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1558
1559 xid = GetXid();
1560
1561 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1562 file->f_path.dentry->d_name.name, datasync);
1563
1564 tcon = tlink_tcon(smbfile->tlink);
1565 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1566 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 1567
1da177e4
LT
1568 FreeXid(xid);
1569 return rc;
1570}
1571
1da177e4
LT
1572/*
1573 * As file closes, flush all cached write data for this inode checking
1574 * for write behind errors.
1575 */
75e1fcc0 1576int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1577{
fb8c4b14 1578 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1579 int rc = 0;
1580
eb4b756b 1581 if (file->f_mode & FMODE_WRITE)
d3f1322a 1582 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 1583
b6b38f70 1584 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1585
1586 return rc;
1587}
1588
72432ffc
PS
1589static int
1590cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1591{
1592 int rc = 0;
1593 unsigned long i;
1594
1595 for (i = 0; i < num_pages; i++) {
1596 pages[i] = alloc_page(__GFP_HIGHMEM);
1597 if (!pages[i]) {
1598 /*
1599 * save number of pages we have already allocated and
1600 * return with ENOMEM error
1601 */
1602 num_pages = i;
1603 rc = -ENOMEM;
1604 goto error;
1605 }
1606 }
1607
1608 return rc;
1609
1610error:
1611 for (i = 0; i < num_pages; i++)
1612 put_page(pages[i]);
1613 return rc;
1614}
1615
1616static inline
1617size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1618{
1619 size_t num_pages;
1620 size_t clen;
1621
1622 clen = min_t(const size_t, len, wsize);
1623 num_pages = clen / PAGE_CACHE_SIZE;
1624 if (clen % PAGE_CACHE_SIZE)
1625 num_pages++;
1626
1627 if (cur_len)
1628 *cur_len = clen;
1629
1630 return num_pages;
1631}
1632
1633static ssize_t
1634cifs_iovec_write(struct file *file, const struct iovec *iov,
1635 unsigned long nr_segs, loff_t *poffset)
1636{
76429c14
PS
1637 unsigned int written;
1638 unsigned long num_pages, npages, i;
1639 size_t copied, len, cur_len;
1640 ssize_t total_written = 0;
72432ffc
PS
1641 struct kvec *to_send;
1642 struct page **pages;
1643 struct iov_iter it;
1644 struct inode *inode;
1645 struct cifsFileInfo *open_file;
1646 struct cifsTconInfo *pTcon;
1647 struct cifs_sb_info *cifs_sb;
1648 int xid, rc;
1649
1650 len = iov_length(iov, nr_segs);
1651 if (!len)
1652 return 0;
1653
1654 rc = generic_write_checks(file, poffset, &len, 0);
1655 if (rc)
1656 return rc;
1657
1658 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1659 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1660
1661 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1662 if (!pages)
1663 return -ENOMEM;
1664
1665 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1666 if (!to_send) {
1667 kfree(pages);
1668 return -ENOMEM;
1669 }
1670
1671 rc = cifs_write_allocate_pages(pages, num_pages);
1672 if (rc) {
1673 kfree(pages);
1674 kfree(to_send);
1675 return rc;
1676 }
1677
1678 xid = GetXid();
1679 open_file = file->private_data;
1680 pTcon = tlink_tcon(open_file->tlink);
1681 inode = file->f_path.dentry->d_inode;
1682
1683 iov_iter_init(&it, iov, nr_segs, len, 0);
1684 npages = num_pages;
1685
1686 do {
1687 size_t save_len = cur_len;
1688 for (i = 0; i < npages; i++) {
1689 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1690 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1691 copied);
1692 cur_len -= copied;
1693 iov_iter_advance(&it, copied);
1694 to_send[i+1].iov_base = kmap(pages[i]);
1695 to_send[i+1].iov_len = copied;
1696 }
1697
1698 cur_len = save_len - cur_len;
1699
1700 do {
1701 if (open_file->invalidHandle) {
1702 rc = cifs_reopen_file(open_file, false);
1703 if (rc != 0)
1704 break;
1705 }
1706 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
1707 cur_len, *poffset, &written,
1708 to_send, npages, 0);
1709 } while (rc == -EAGAIN);
1710
1711 for (i = 0; i < npages; i++)
1712 kunmap(pages[i]);
1713
1714 if (written) {
1715 len -= written;
1716 total_written += written;
1717 cifs_update_eof(CIFS_I(inode), *poffset, written);
1718 *poffset += written;
1719 } else if (rc < 0) {
1720 if (!total_written)
1721 total_written = rc;
1722 break;
1723 }
1724
1725 /* get length and number of kvecs of the next write */
1726 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1727 } while (len > 0);
1728
1729 if (total_written > 0) {
1730 spin_lock(&inode->i_lock);
1731 if (*poffset > inode->i_size)
1732 i_size_write(inode, *poffset);
1733 spin_unlock(&inode->i_lock);
1734 }
1735
1736 cifs_stats_bytes_written(pTcon, total_written);
1737 mark_inode_dirty_sync(inode);
1738
1739 for (i = 0; i < num_pages; i++)
1740 put_page(pages[i]);
1741 kfree(to_send);
1742 kfree(pages);
1743 FreeXid(xid);
1744 return total_written;
1745}
1746
1747static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
1748 unsigned long nr_segs, loff_t pos)
1749{
1750 ssize_t written;
1751 struct inode *inode;
1752
1753 inode = iocb->ki_filp->f_path.dentry->d_inode;
1754
1755 /*
1756 * BB - optimize the way when signing is disabled. We can drop this
1757 * extra memory-to-memory copying and use iovec buffers for constructing
1758 * write request.
1759 */
1760
1761 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1762 if (written > 0) {
1763 CIFS_I(inode)->invalid_mapping = true;
1764 iocb->ki_pos = pos;
1765 }
1766
1767 return written;
1768}
1769
1770ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1771 unsigned long nr_segs, loff_t pos)
1772{
1773 struct inode *inode;
1774
1775 inode = iocb->ki_filp->f_path.dentry->d_inode;
1776
1777 if (CIFS_I(inode)->clientCanCacheAll)
1778 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1779
1780 /*
1781 * In strict cache mode we need to write the data to the server exactly
1782 * from the pos to pos+len-1 rather than flush all affected pages
1783 * because it may cause a error with mandatory locks on these pages but
1784 * not on the region from pos to ppos+len-1.
1785 */
1786
1787 return cifs_user_writev(iocb, iov, nr_segs, pos);
1788}
1789
a70307ee
PS
1790static ssize_t
1791cifs_iovec_read(struct file *file, const struct iovec *iov,
1792 unsigned long nr_segs, loff_t *poffset)
1da177e4 1793{
a70307ee
PS
1794 int rc;
1795 int xid;
76429c14
PS
1796 ssize_t total_read;
1797 unsigned int bytes_read = 0;
a70307ee
PS
1798 size_t len, cur_len;
1799 int iov_offset = 0;
1da177e4
LT
1800 struct cifs_sb_info *cifs_sb;
1801 struct cifsTconInfo *pTcon;
1da177e4 1802 struct cifsFileInfo *open_file;
1da177e4 1803 struct smb_com_read_rsp *pSMBr;
a70307ee
PS
1804 char *read_data;
1805
1806 if (!nr_segs)
1807 return 0;
1808
1809 len = iov_length(iov, nr_segs);
1810 if (!len)
1811 return 0;
1da177e4
LT
1812
1813 xid = GetXid();
e6a00296 1814 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1815
c21dfb69 1816 open_file = file->private_data;
13cfb733 1817 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1818
ad7a2926 1819 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1820 cFYI(1, "attempting read on write only file instance");
ad7a2926 1821
a70307ee
PS
1822 for (total_read = 0; total_read < len; total_read += bytes_read) {
1823 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1da177e4 1824 rc = -EAGAIN;
a70307ee
PS
1825 read_data = NULL;
1826
1da177e4 1827 while (rc == -EAGAIN) {
ec637e3f 1828 int buf_type = CIFS_NO_BUFFER;
cdff08e7 1829 if (open_file->invalidHandle) {
15886177 1830 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1831 if (rc != 0)
1832 break;
1833 }
a70307ee
PS
1834 rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
1835 cur_len, *poffset, &bytes_read,
1836 &read_data, &buf_type);
1837 pSMBr = (struct smb_com_read_rsp *)read_data;
1838 if (read_data) {
1839 char *data_offset = read_data + 4 +
1840 le16_to_cpu(pSMBr->DataOffset);
1841 if (memcpy_toiovecend(iov, data_offset,
1842 iov_offset, bytes_read))
93544cc6 1843 rc = -EFAULT;
fb8c4b14 1844 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 1845 cifs_small_buf_release(read_data);
fb8c4b14 1846 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
1847 cifs_buf_release(read_data);
1848 read_data = NULL;
1849 iov_offset += bytes_read;
1da177e4
LT
1850 }
1851 }
a70307ee 1852
1da177e4
LT
1853 if (rc || (bytes_read == 0)) {
1854 if (total_read) {
1855 break;
1856 } else {
1857 FreeXid(xid);
1858 return rc;
1859 }
1860 } else {
a4544347 1861 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1862 *poffset += bytes_read;
1863 }
1864 }
a70307ee 1865
1da177e4
LT
1866 FreeXid(xid);
1867 return total_read;
1868}
1869
a70307ee
PS
1870ssize_t cifs_user_read(struct file *file, char __user *read_data,
1871 size_t read_size, loff_t *poffset)
1872{
1873 struct iovec iov;
1874 iov.iov_base = read_data;
1875 iov.iov_len = read_size;
1876
1877 return cifs_iovec_read(file, &iov, 1, poffset);
1878}
1879
1880static ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
1881 unsigned long nr_segs, loff_t pos)
1882{
1883 ssize_t read;
1884
1885 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1886 if (read > 0)
1887 iocb->ki_pos = pos;
1888
1889 return read;
1890}
1891
1892ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1893 unsigned long nr_segs, loff_t pos)
1894{
1895 struct inode *inode;
1896
1897 inode = iocb->ki_filp->f_path.dentry->d_inode;
1898
1899 if (CIFS_I(inode)->clientCanCacheRead)
1900 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1901
1902 /*
1903 * In strict cache mode we need to read from the server all the time
1904 * if we don't have level II oplock because the server can delay mtime
1905 * change - so we can't make a decision about inode invalidating.
1906 * And we can also fail with pagereading if there are mandatory locks
1907 * on pages affected by this read but not on the region from pos to
1908 * pos+len-1.
1909 */
1910
1911 return cifs_user_readv(iocb, iov, nr_segs, pos);
1912}
1da177e4
LT
1913
1914static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 1915 loff_t *poffset)
1da177e4
LT
1916{
1917 int rc = -EACCES;
1918 unsigned int bytes_read = 0;
1919 unsigned int total_read;
1920 unsigned int current_read_size;
1921 struct cifs_sb_info *cifs_sb;
1922 struct cifsTconInfo *pTcon;
1923 int xid;
1924 char *current_offset;
1925 struct cifsFileInfo *open_file;
ec637e3f 1926 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1927
1928 xid = GetXid();
e6a00296 1929 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1930
1931 if (file->private_data == NULL) {
0f3bc09e 1932 rc = -EBADF;
1da177e4 1933 FreeXid(xid);
0f3bc09e 1934 return rc;
1da177e4 1935 }
c21dfb69 1936 open_file = file->private_data;
13cfb733 1937 pTcon = tlink_tcon(open_file->tlink);
1da177e4
LT
1938
1939 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1940 cFYI(1, "attempting read on write only file instance");
1da177e4 1941
fb8c4b14 1942 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1943 read_size > total_read;
1944 total_read += bytes_read, current_offset += bytes_read) {
1945 current_read_size = min_t(const int, read_size - total_read,
1946 cifs_sb->rsize);
f9f5c817
SF
1947 /* For windows me and 9x we do not want to request more
1948 than it negotiated since it will refuse the read then */
fb8c4b14 1949 if ((pTcon->ses) &&
f9f5c817
SF
1950 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1951 current_read_size = min_t(const int, current_read_size,
1952 pTcon->ses->server->maxBuf - 128);
1953 }
1da177e4
LT
1954 rc = -EAGAIN;
1955 while (rc == -EAGAIN) {
cdff08e7 1956 if (open_file->invalidHandle) {
15886177 1957 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1958 if (rc != 0)
1959 break;
1960 }
bfa0d75a 1961 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1962 open_file->netfid,
1963 current_read_size, *poffset,
1964 &bytes_read, &current_offset,
1965 &buf_type);
1da177e4
LT
1966 }
1967 if (rc || (bytes_read == 0)) {
1968 if (total_read) {
1969 break;
1970 } else {
1971 FreeXid(xid);
1972 return rc;
1973 }
1974 } else {
a4544347 1975 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1976 *poffset += bytes_read;
1977 }
1978 }
1979 FreeXid(xid);
1980 return total_read;
1981}
1982
7a6a19b1
PS
1983int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1984{
1985 int rc, xid;
1986 struct inode *inode = file->f_path.dentry->d_inode;
1987
1988 xid = GetXid();
1989
1990 if (!CIFS_I(inode)->clientCanCacheRead)
1991 cifs_invalidate_mapping(inode);
1992
1993 rc = generic_file_mmap(file, vma);
1994 FreeXid(xid);
1995 return rc;
1996}
1997
1da177e4
LT
1998int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1999{
1da177e4
LT
2000 int rc, xid;
2001
2002 xid = GetXid();
abab095d 2003 rc = cifs_revalidate_file(file);
1da177e4 2004 if (rc) {
b6b38f70 2005 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
2006 FreeXid(xid);
2007 return rc;
2008 }
2009 rc = generic_file_mmap(file, vma);
2010 FreeXid(xid);
2011 return rc;
2012}
2013
2014
fb8c4b14 2015static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 2016 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
2017{
2018 struct page *page;
2019 char *target;
2020
2021 while (bytes_read > 0) {
2022 if (list_empty(pages))
2023 break;
2024
2025 page = list_entry(pages->prev, struct page, lru);
2026 list_del(&page->lru);
2027
315e995c 2028 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
2029 GFP_KERNEL)) {
2030 page_cache_release(page);
b6b38f70 2031 cFYI(1, "Add page cache failed");
3079ca62
SF
2032 data += PAGE_CACHE_SIZE;
2033 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
2034 continue;
2035 }
06b43672 2036 page_cache_release(page);
1da177e4 2037
fb8c4b14 2038 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
2039
2040 if (PAGE_CACHE_SIZE > bytes_read) {
2041 memcpy(target, data, bytes_read);
2042 /* zero the tail end of this partial page */
fb8c4b14 2043 memset(target + bytes_read, 0,
1da177e4
LT
2044 PAGE_CACHE_SIZE - bytes_read);
2045 bytes_read = 0;
2046 } else {
2047 memcpy(target, data, PAGE_CACHE_SIZE);
2048 bytes_read -= PAGE_CACHE_SIZE;
2049 }
2050 kunmap_atomic(target, KM_USER0);
2051
2052 flush_dcache_page(page);
2053 SetPageUptodate(page);
2054 unlock_page(page);
1da177e4 2055 data += PAGE_CACHE_SIZE;
9dc06558
SJ
2056
2057 /* add page to FS-Cache */
2058 cifs_readpage_to_fscache(mapping->host, page);
1da177e4
LT
2059 }
2060 return;
2061}
2062
2063static int cifs_readpages(struct file *file, struct address_space *mapping,
2064 struct list_head *page_list, unsigned num_pages)
2065{
2066 int rc = -EACCES;
2067 int xid;
2068 loff_t offset;
2069 struct page *page;
2070 struct cifs_sb_info *cifs_sb;
2071 struct cifsTconInfo *pTcon;
2c2130e1 2072 unsigned int bytes_read = 0;
fb8c4b14 2073 unsigned int read_size, i;
1da177e4
LT
2074 char *smb_read_data = NULL;
2075 struct smb_com_read_rsp *pSMBr;
1da177e4 2076 struct cifsFileInfo *open_file;
ec637e3f 2077 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
2078
2079 xid = GetXid();
2080 if (file->private_data == NULL) {
0f3bc09e 2081 rc = -EBADF;
1da177e4 2082 FreeXid(xid);
0f3bc09e 2083 return rc;
1da177e4 2084 }
c21dfb69 2085 open_file = file->private_data;
e6a00296 2086 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 2087 pTcon = tlink_tcon(open_file->tlink);
bfa0d75a 2088
56698236
SJ
2089 /*
2090 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2091 * immediately if the cookie is negative
2092 */
2093 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2094 &num_pages);
2095 if (rc == 0)
2096 goto read_complete;
2097
f19159dc 2098 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
2099 for (i = 0; i < num_pages; ) {
2100 unsigned contig_pages;
2101 struct page *tmp_page;
2102 unsigned long expected_index;
2103
2104 if (list_empty(page_list))
2105 break;
2106
2107 page = list_entry(page_list->prev, struct page, lru);
2108 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2109
2110 /* count adjacent pages that we will read into */
2111 contig_pages = 0;
fb8c4b14 2112 expected_index =
1da177e4 2113 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2114 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2115 if (tmp_page->index == expected_index) {
2116 contig_pages++;
2117 expected_index++;
2118 } else
fb8c4b14 2119 break;
1da177e4
LT
2120 }
2121 if (contig_pages + i > num_pages)
2122 contig_pages = num_pages - i;
2123
2124 /* for reads over a certain size could initiate async
2125 read ahead */
2126
2127 read_size = contig_pages * PAGE_CACHE_SIZE;
2128 /* Read size needs to be in multiples of one page */
2129 read_size = min_t(const unsigned int, read_size,
2130 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2131 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2132 read_size, contig_pages);
1da177e4
LT
2133 rc = -EAGAIN;
2134 while (rc == -EAGAIN) {
cdff08e7 2135 if (open_file->invalidHandle) {
15886177 2136 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2137 if (rc != 0)
2138 break;
2139 }
2140
bfa0d75a 2141 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2142 open_file->netfid,
2143 read_size, offset,
2144 &bytes_read, &smb_read_data,
2145 &buf_type);
a9d02ad4 2146 /* BB more RC checks ? */
fb8c4b14 2147 if (rc == -EAGAIN) {
1da177e4 2148 if (smb_read_data) {
fb8c4b14 2149 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2150 cifs_small_buf_release(smb_read_data);
fb8c4b14 2151 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2152 cifs_buf_release(smb_read_data);
1da177e4
LT
2153 smb_read_data = NULL;
2154 }
2155 }
2156 }
2157 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2158 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2159 break;
2160 } else if (bytes_read > 0) {
6f88cc2e 2161 task_io_account_read(bytes_read);
1da177e4
LT
2162 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2163 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2164 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2165 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2166
2167 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2168 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2169 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2170 i++; /* account for partial page */
2171
fb8c4b14 2172 /* server copy of file can have smaller size
1da177e4 2173 than client */
fb8c4b14
SF
2174 /* BB do we need to verify this common case ?
2175 this case is ok - if we are at server EOF
1da177e4
LT
2176 we will hit it on next read */
2177
05ac9d4b 2178 /* break; */
1da177e4
LT
2179 }
2180 } else {
b6b38f70 2181 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2182 "Cleaning remaining pages from readahead list",
b6b38f70 2183 bytes_read, offset);
fb8c4b14 2184 /* BB turn off caching and do new lookup on
1da177e4 2185 file size at server? */
1da177e4
LT
2186 break;
2187 }
2188 if (smb_read_data) {
fb8c4b14 2189 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2190 cifs_small_buf_release(smb_read_data);
fb8c4b14 2191 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2192 cifs_buf_release(smb_read_data);
1da177e4
LT
2193 smb_read_data = NULL;
2194 }
2195 bytes_read = 0;
2196 }
2197
1da177e4
LT
2198/* need to free smb_read_data buf before exit */
2199 if (smb_read_data) {
fb8c4b14 2200 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2201 cifs_small_buf_release(smb_read_data);
fb8c4b14 2202 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2203 cifs_buf_release(smb_read_data);
1da177e4 2204 smb_read_data = NULL;
fb8c4b14 2205 }
1da177e4 2206
56698236 2207read_complete:
1da177e4
LT
2208 FreeXid(xid);
2209 return rc;
2210}
2211
2212static int cifs_readpage_worker(struct file *file, struct page *page,
2213 loff_t *poffset)
2214{
2215 char *read_data;
2216 int rc;
2217
56698236
SJ
2218 /* Is the page cached? */
2219 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2220 if (rc == 0)
2221 goto read_complete;
2222
1da177e4
LT
2223 page_cache_get(page);
2224 read_data = kmap(page);
2225 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2226
1da177e4 2227 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2228
1da177e4
LT
2229 if (rc < 0)
2230 goto io_error;
2231 else
b6b38f70 2232 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2233
e6a00296
JJS
2234 file->f_path.dentry->d_inode->i_atime =
2235 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2236
1da177e4
LT
2237 if (PAGE_CACHE_SIZE > rc)
2238 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2239
2240 flush_dcache_page(page);
2241 SetPageUptodate(page);
9dc06558
SJ
2242
2243 /* send this page to the cache */
2244 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2245
1da177e4 2246 rc = 0;
fb8c4b14 2247
1da177e4 2248io_error:
fb8c4b14 2249 kunmap(page);
1da177e4 2250 page_cache_release(page);
56698236
SJ
2251
2252read_complete:
1da177e4
LT
2253 return rc;
2254}
2255
2256static int cifs_readpage(struct file *file, struct page *page)
2257{
2258 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2259 int rc = -EACCES;
2260 int xid;
2261
2262 xid = GetXid();
2263
2264 if (file->private_data == NULL) {
0f3bc09e 2265 rc = -EBADF;
1da177e4 2266 FreeXid(xid);
0f3bc09e 2267 return rc;
1da177e4
LT
2268 }
2269
b6b38f70
JP
2270 cFYI(1, "readpage %p at offset %d 0x%x\n",
2271 page, (int)offset, (int)offset);
1da177e4
LT
2272
2273 rc = cifs_readpage_worker(file, page, &offset);
2274
2275 unlock_page(page);
2276
2277 FreeXid(xid);
2278 return rc;
2279}
2280
a403a0a3
SF
2281static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2282{
2283 struct cifsFileInfo *open_file;
2284
4477288a 2285 spin_lock(&cifs_file_list_lock);
a403a0a3 2286 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2287 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2288 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2289 return 1;
2290 }
2291 }
4477288a 2292 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2293 return 0;
2294}
2295
1da177e4
LT
2296/* We do not want to update the file size from server for inodes
2297 open for write - to avoid races with writepage extending
2298 the file - in the future we could consider allowing
fb8c4b14 2299 refreshing the inode only on increases in the file size
1da177e4
LT
2300 but this is tricky to do without racing with writebehind
2301 page caching in the current Linux kernel design */
4b18f2a9 2302bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2303{
a403a0a3 2304 if (!cifsInode)
4b18f2a9 2305 return true;
50c2f753 2306
a403a0a3
SF
2307 if (is_inode_writable(cifsInode)) {
2308 /* This inode is open for write at least once */
c32a0b68
SF
2309 struct cifs_sb_info *cifs_sb;
2310
c32a0b68 2311 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2312 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2313 /* since no page cache to corrupt on directio
c32a0b68 2314 we can change size safely */
4b18f2a9 2315 return true;
c32a0b68
SF
2316 }
2317
fb8c4b14 2318 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2319 return true;
7ba52631 2320
4b18f2a9 2321 return false;
23e7dd7d 2322 } else
4b18f2a9 2323 return true;
1da177e4
LT
2324}
2325
d9414774
NP
2326static int cifs_write_begin(struct file *file, struct address_space *mapping,
2327 loff_t pos, unsigned len, unsigned flags,
2328 struct page **pagep, void **fsdata)
1da177e4 2329{
d9414774
NP
2330 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2331 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2332 loff_t page_start = pos & PAGE_MASK;
2333 loff_t i_size;
2334 struct page *page;
2335 int rc = 0;
d9414774 2336
b6b38f70 2337 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2338
54566b2c 2339 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2340 if (!page) {
2341 rc = -ENOMEM;
2342 goto out;
2343 }
8a236264 2344
a98ee8c1
JL
2345 if (PageUptodate(page))
2346 goto out;
8a236264 2347
a98ee8c1
JL
2348 /*
2349 * If we write a full page it will be up to date, no need to read from
2350 * the server. If the write is short, we'll end up doing a sync write
2351 * instead.
2352 */
2353 if (len == PAGE_CACHE_SIZE)
2354 goto out;
8a236264 2355
a98ee8c1
JL
2356 /*
2357 * optimize away the read when we have an oplock, and we're not
2358 * expecting to use any of the data we'd be reading in. That
2359 * is, when the page lies beyond the EOF, or straddles the EOF
2360 * and the write will cover all of the existing data.
2361 */
2362 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2363 i_size = i_size_read(mapping->host);
2364 if (page_start >= i_size ||
2365 (offset == 0 && (pos + len) >= i_size)) {
2366 zero_user_segments(page, 0, offset,
2367 offset + len,
2368 PAGE_CACHE_SIZE);
2369 /*
2370 * PageChecked means that the parts of the page
2371 * to which we're not writing are considered up
2372 * to date. Once the data is copied to the
2373 * page, it can be set uptodate.
2374 */
2375 SetPageChecked(page);
2376 goto out;
2377 }
2378 }
d9414774 2379
a98ee8c1
JL
2380 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2381 /*
2382 * might as well read a page, it is fast enough. If we get
2383 * an error, we don't need to return it. cifs_write_end will
2384 * do a sync write instead since PG_uptodate isn't set.
2385 */
2386 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2387 } else {
2388 /* we could try using another file handle if there is one -
2389 but how would we lock it to prevent close of that handle
2390 racing with this read? In any case
d9414774 2391 this will be written out by write_end so is fine */
1da177e4 2392 }
a98ee8c1
JL
2393out:
2394 *pagep = page;
2395 return rc;
1da177e4
LT
2396}
2397
85f2d6b4
SJ
2398static int cifs_release_page(struct page *page, gfp_t gfp)
2399{
2400 if (PagePrivate(page))
2401 return 0;
2402
2403 return cifs_fscache_release_page(page, gfp);
2404}
2405
2406static void cifs_invalidate_page(struct page *page, unsigned long offset)
2407{
2408 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2409
2410 if (offset == 0)
2411 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2412}
2413
9b646972 2414void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2415{
2416 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2417 oplock_break);
a5e18bc3 2418 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2419 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 2420 int rc = 0;
3bc303c2
JL
2421
2422 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2423 if (cinode->clientCanCacheRead)
8737c930 2424 break_lease(inode, O_RDONLY);
d54ff732 2425 else
8737c930 2426 break_lease(inode, O_WRONLY);
3bc303c2
JL
2427 rc = filemap_fdatawrite(inode->i_mapping);
2428 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
2429 rc = filemap_fdatawait(inode->i_mapping);
2430 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
2431 invalidate_remote_inode(inode);
2432 }
b6b38f70 2433 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2434 }
2435
2436 /*
2437 * releasing stale oplock after recent reconnect of smb session using
2438 * a now incorrect file handle is not a data integrity issue but do
2439 * not bother sending an oplock release if session to server still is
2440 * disconnected since oplock already released by the server
2441 */
cdff08e7 2442 if (!cfile->oplock_break_cancelled) {
13cfb733 2443 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
12fed00d
PS
2444 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
2445 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 2446 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2447 }
9b646972
TH
2448
2449 /*
2450 * We might have kicked in before is_valid_oplock_break()
2451 * finished grabbing reference for us. Make sure it's done by
6573e9b7 2452 * waiting for cifs_file_list_lock.
9b646972 2453 */
4477288a
JL
2454 spin_lock(&cifs_file_list_lock);
2455 spin_unlock(&cifs_file_list_lock);
9b646972
TH
2456
2457 cifs_oplock_break_put(cfile);
3bc303c2
JL
2458}
2459
5f6dbc9e 2460/* must be called while holding cifs_file_list_lock */
9b646972 2461void cifs_oplock_break_get(struct cifsFileInfo *cfile)
3bc303c2 2462{
d7c86ff8 2463 cifs_sb_active(cfile->dentry->d_sb);
3bc303c2 2464 cifsFileInfo_get(cfile);
3bc303c2
JL
2465}
2466
9b646972 2467void cifs_oplock_break_put(struct cifsFileInfo *cfile)
3bc303c2 2468{
ebe2e91e
JL
2469 struct super_block *sb = cfile->dentry->d_sb;
2470
3bc303c2 2471 cifsFileInfo_put(cfile);
ebe2e91e 2472 cifs_sb_deactive(sb);
3bc303c2
JL
2473}
2474
f5e54d6e 2475const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2476 .readpage = cifs_readpage,
2477 .readpages = cifs_readpages,
2478 .writepage = cifs_writepage,
37c0eb46 2479 .writepages = cifs_writepages,
d9414774
NP
2480 .write_begin = cifs_write_begin,
2481 .write_end = cifs_write_end,
1da177e4 2482 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2483 .releasepage = cifs_release_page,
2484 .invalidatepage = cifs_invalidate_page,
1da177e4
LT
2485 /* .direct_IO = */
2486};
273d81d6
DK
2487
2488/*
2489 * cifs_readpages requires the server to support a buffer large enough to
2490 * contain the header plus one complete page of data. Otherwise, we need
2491 * to leave cifs_readpages out of the address space operations.
2492 */
f5e54d6e 2493const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2494 .readpage = cifs_readpage,
2495 .writepage = cifs_writepage,
2496 .writepages = cifs_writepages,
d9414774
NP
2497 .write_begin = cifs_write_begin,
2498 .write_end = cifs_write_end,
273d81d6 2499 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2500 .releasepage = cifs_release_page,
2501 .invalidatepage = cifs_invalidate_page,
273d81d6
DK
2502 /* .direct_IO = */
2503};
This page took 0.513827 seconds and 5 git commands to generate.