Merge 2.6.38-rc5 into staging-next
[deliverable/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
608712fe 63static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 64{
608712fe 65 u32 posix_flags = 0;
e10f7b55 66
7fc8f4e9 67 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 68 posix_flags = SMB_O_RDONLY;
7fc8f4e9 69 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
73
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 81 if (flags & O_DSYNC)
608712fe 82 posix_flags |= SMB_O_SYNC;
7fc8f4e9 83 if (flags & O_DIRECTORY)
608712fe 84 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 85 if (flags & O_NOFOLLOW)
608712fe 86 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 87 if (flags & O_DIRECT)
608712fe 88 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
89
90 return posix_flags;
1da177e4
LT
91}
92
93static inline int cifs_get_disposition(unsigned int flags)
94{
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
55aa2e09
SF
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
1da177e4
LT
103 else
104 return FILE_OPEN;
105}
106
608712fe
JL
107int cifs_posix_open(char *full_path, struct inode **pinode,
108 struct super_block *sb, int mode, unsigned int f_flags,
109 __u32 *poplock, __u16 *pnetfid, int xid)
110{
111 int rc;
112 FILE_UNIX_BASIC_INFO *presp_data;
113 __u32 posix_flags = 0;
114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 struct cifs_fattr fattr;
116 struct tcon_link *tlink;
117 struct cifsTconInfo *tcon;
118
119 cFYI(1, "posix open %s", full_path);
120
121 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122 if (presp_data == NULL)
123 return -ENOMEM;
124
125 tlink = cifs_sb_tlink(cifs_sb);
126 if (IS_ERR(tlink)) {
127 rc = PTR_ERR(tlink);
128 goto posix_open_ret;
129 }
130
131 tcon = tlink_tcon(tlink);
132 mode &= ~current_umask();
133
134 posix_flags = cifs_posix_convert_flags(f_flags);
135 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136 poplock, full_path, cifs_sb->local_nls,
137 cifs_sb->mnt_cifs_flags &
138 CIFS_MOUNT_MAP_SPECIAL_CHR);
139 cifs_put_tlink(tlink);
140
141 if (rc)
142 goto posix_open_ret;
143
144 if (presp_data->Type == cpu_to_le32(-1))
145 goto posix_open_ret; /* open ok, caller does qpathinfo */
146
147 if (!pinode)
148 goto posix_open_ret; /* caller does not need info */
149
150 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
151
152 /* get new inode and set it up */
153 if (*pinode == NULL) {
154 cifs_fill_uniqueid(sb, &fattr);
155 *pinode = cifs_iget(sb, &fattr);
156 if (!*pinode) {
157 rc = -ENOMEM;
158 goto posix_open_ret;
159 }
160 } else {
161 cifs_fattr_to_inode(*pinode, &fattr);
162 }
163
164posix_open_ret:
165 kfree(presp_data);
166 return rc;
167}
168
eeb910a6
PS
169static int
170cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
171 struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
172 __u16 *pnetfid, int xid)
173{
174 int rc;
175 int desiredAccess;
176 int disposition;
177 FILE_ALL_INFO *buf;
178
179 desiredAccess = cifs_convert_flags(f_flags);
180
181/*********************************************************************
182 * open flag mapping table:
183 *
184 * POSIX Flag CIFS Disposition
185 * ---------- ----------------
186 * O_CREAT FILE_OPEN_IF
187 * O_CREAT | O_EXCL FILE_CREATE
188 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
189 * O_TRUNC FILE_OVERWRITE
190 * none of the above FILE_OPEN
191 *
192 * Note that there is not a direct match between disposition
193 * FILE_SUPERSEDE (ie create whether or not file exists although
194 * O_CREAT | O_TRUNC is similar but truncates the existing
195 * file rather than creating a new file as FILE_SUPERSEDE does
196 * (which uses the attributes / metadata passed in on open call)
197 *?
198 *? O_SYNC is a reasonable match to CIFS writethrough flag
199 *? and the read write flags match reasonably. O_LARGEFILE
200 *? is irrelevant because largefile support is always used
201 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203 *********************************************************************/
204
205 disposition = cifs_get_disposition(f_flags);
206
207 /* BB pass O_SYNC flag through on file attributes .. BB */
208
209 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
210 if (!buf)
211 return -ENOMEM;
212
213 if (tcon->ses->capabilities & CAP_NT_SMBS)
214 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
216 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217 & CIFS_MOUNT_MAP_SPECIAL_CHR);
218 else
219 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
220 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223
224 if (rc)
225 goto out;
226
227 if (tcon->unix_ext)
228 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
229 xid);
230 else
231 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
232 xid, pnetfid);
233
234out:
235 kfree(buf);
236 return rc;
237}
238
15ecb436
JL
239struct cifsFileInfo *
240cifs_new_fileinfo(__u16 fileHandle, struct file *file,
241 struct tcon_link *tlink, __u32 oplock)
242{
243 struct dentry *dentry = file->f_path.dentry;
244 struct inode *inode = dentry->d_inode;
245 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
246 struct cifsFileInfo *pCifsFile;
247
248 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
249 if (pCifsFile == NULL)
250 return pCifsFile;
251
5f6dbc9e 252 pCifsFile->count = 1;
15ecb436
JL
253 pCifsFile->netfid = fileHandle;
254 pCifsFile->pid = current->tgid;
255 pCifsFile->uid = current_fsuid();
256 pCifsFile->dentry = dget(dentry);
257 pCifsFile->f_flags = file->f_flags;
258 pCifsFile->invalidHandle = false;
15ecb436
JL
259 pCifsFile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&pCifsFile->fh_mutex);
261 mutex_init(&pCifsFile->lock_mutex);
262 INIT_LIST_HEAD(&pCifsFile->llist);
15ecb436
JL
263 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
264
4477288a 265 spin_lock(&cifs_file_list_lock);
15ecb436
JL
266 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
269 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
270 else
271 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 272 spin_unlock(&cifs_file_list_lock);
15ecb436 273
c6723628 274 cifs_set_oplock_level(pCifsInode, oplock);
15ecb436
JL
275
276 file->private_data = pCifsFile;
277 return pCifsFile;
278}
279
cdff08e7
SF
280/*
281 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
282 * the filehandle out on the server. Must be called without holding
283 * cifs_file_list_lock.
cdff08e7 284 */
b33879aa
JL
285void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
286{
e66673e3 287 struct inode *inode = cifs_file->dentry->d_inode;
cdff08e7 288 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 289 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
291 struct cifsLockInfo *li, *tmp;
292
293 spin_lock(&cifs_file_list_lock);
5f6dbc9e 294 if (--cifs_file->count > 0) {
cdff08e7
SF
295 spin_unlock(&cifs_file_list_lock);
296 return;
297 }
298
299 /* remove it from the lists */
300 list_del(&cifs_file->flist);
301 list_del(&cifs_file->tlist);
302
303 if (list_empty(&cifsi->openFileList)) {
304 cFYI(1, "closing last open instance for inode %p",
305 cifs_file->dentry->d_inode);
4f8ba8a0
PS
306
307 /* in strict cache mode we need invalidate mapping on the last
308 close because it may cause a error when we open this file
309 again and get at least level II oplock */
310 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
311 CIFS_I(inode)->invalid_mapping = true;
312
c6723628 313 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
314 }
315 spin_unlock(&cifs_file_list_lock);
316
317 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
318 int xid, rc;
319
320 xid = GetXid();
321 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
322 FreeXid(xid);
323 }
324
325 /* Delete any outstanding lock records. We'll lose them when the file
326 * is closed anyway.
327 */
328 mutex_lock(&cifs_file->lock_mutex);
329 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
330 list_del(&li->llist);
331 kfree(li);
b33879aa 332 }
cdff08e7
SF
333 mutex_unlock(&cifs_file->lock_mutex);
334
335 cifs_put_tlink(cifs_file->tlink);
336 dput(cifs_file->dentry);
337 kfree(cifs_file);
b33879aa
JL
338}
339
1da177e4
LT
340int cifs_open(struct inode *inode, struct file *file)
341{
342 int rc = -EACCES;
590a3fe0
JL
343 int xid;
344 __u32 oplock;
1da177e4 345 struct cifs_sb_info *cifs_sb;
276a74a4 346 struct cifsTconInfo *tcon;
7ffec372 347 struct tcon_link *tlink;
6ca9f3ba 348 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 349 char *full_path = NULL;
7e12eddb 350 bool posix_open_ok = false;
1da177e4 351 __u16 netfid;
1da177e4
LT
352
353 xid = GetXid();
354
355 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
356 tlink = cifs_sb_tlink(cifs_sb);
357 if (IS_ERR(tlink)) {
358 FreeXid(xid);
359 return PTR_ERR(tlink);
360 }
361 tcon = tlink_tcon(tlink);
1da177e4 362
e6a00296 363 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 364 if (full_path == NULL) {
0f3bc09e 365 rc = -ENOMEM;
232341ba 366 goto out;
1da177e4
LT
367 }
368
b6b38f70
JP
369 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
370 inode, file->f_flags, full_path);
276a74a4
SF
371
372 if (oplockEnabled)
373 oplock = REQ_OPLOCK;
374 else
375 oplock = 0;
376
64cc2c63
SF
377 if (!tcon->broken_posix_open && tcon->unix_ext &&
378 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
379 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
380 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 381 /* can not refresh inode info since size could be stale */
2422f676 382 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 383 cifs_sb->mnt_file_mode /* ignored */,
608712fe 384 file->f_flags, &oplock, &netfid, xid);
276a74a4 385 if (rc == 0) {
b6b38f70 386 cFYI(1, "posix open succeeded");
7e12eddb 387 posix_open_ok = true;
64cc2c63
SF
388 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
389 if (tcon->ses->serverNOS)
b6b38f70 390 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
391 " unexpected error on SMB posix open"
392 ", disabling posix open support."
393 " Check if server update available.",
394 tcon->ses->serverName,
b6b38f70 395 tcon->ses->serverNOS);
64cc2c63 396 tcon->broken_posix_open = true;
276a74a4
SF
397 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
398 (rc != -EOPNOTSUPP)) /* path not found or net err */
399 goto out;
64cc2c63
SF
400 /* else fallthrough to retry open the old way on network i/o
401 or DFS errors */
276a74a4
SF
402 }
403
7e12eddb
PS
404 if (!posix_open_ok) {
405 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
406 file->f_flags, &oplock, &netfid, xid);
407 if (rc)
408 goto out;
409 }
47c78b7f 410
abfe1eed 411 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 412 if (pCifsFile == NULL) {
7e12eddb 413 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
414 rc = -ENOMEM;
415 goto out;
416 }
1da177e4 417
9451a9a5
SJ
418 cifs_fscache_set_inode_cookie(inode, file);
419
7e12eddb 420 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
421 /* time to set mode which we can not set earlier due to
422 problems creating new read-only files */
7e12eddb
PS
423 struct cifs_unix_set_info_args args = {
424 .mode = inode->i_mode,
425 .uid = NO_CHANGE_64,
426 .gid = NO_CHANGE_64,
427 .ctime = NO_CHANGE_64,
428 .atime = NO_CHANGE_64,
429 .mtime = NO_CHANGE_64,
430 .device = 0,
431 };
d44a9fe2
JL
432 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
433 pCifsFile->pid);
1da177e4
LT
434 }
435
436out:
1da177e4
LT
437 kfree(full_path);
438 FreeXid(xid);
7ffec372 439 cifs_put_tlink(tlink);
1da177e4
LT
440 return rc;
441}
442
0418726b 443/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
444/* to server was lost */
445static int cifs_relock_file(struct cifsFileInfo *cifsFile)
446{
447 int rc = 0;
448
449/* BB list all locks open on this file and relock */
450
451 return rc;
452}
453
15886177 454static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
455{
456 int rc = -EACCES;
590a3fe0
JL
457 int xid;
458 __u32 oplock;
1da177e4 459 struct cifs_sb_info *cifs_sb;
7fc8f4e9 460 struct cifsTconInfo *tcon;
1da177e4 461 struct cifsInodeInfo *pCifsInode;
fb8c4b14 462 struct inode *inode;
1da177e4
LT
463 char *full_path = NULL;
464 int desiredAccess;
465 int disposition = FILE_OPEN;
466 __u16 netfid;
467
1da177e4 468 xid = GetXid();
f0a71eb8 469 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 470 if (!pCifsFile->invalidHandle) {
f0a71eb8 471 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 472 rc = 0;
1da177e4 473 FreeXid(xid);
0f3bc09e 474 return rc;
1da177e4
LT
475 }
476
15886177 477 inode = pCifsFile->dentry->d_inode;
1da177e4 478 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 479 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 480
1da177e4
LT
481/* can not grab rename sem here because various ops, including
482 those that already have the rename sem can end up causing writepage
483 to get called and if the server was down that means we end up here,
484 and we can never tell if the caller already has the rename_sem */
15886177 485 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 486 if (full_path == NULL) {
3a9f462f 487 rc = -ENOMEM;
f0a71eb8 488 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 489 FreeXid(xid);
3a9f462f 490 return rc;
1da177e4
LT
491 }
492
b6b38f70 493 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 494 inode, pCifsFile->f_flags, full_path);
1da177e4
LT
495
496 if (oplockEnabled)
497 oplock = REQ_OPLOCK;
498 else
4b18f2a9 499 oplock = 0;
1da177e4 500
7fc8f4e9
SF
501 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
502 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
503 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
504
505 /*
506 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
507 * original open. Must mask them off for a reopen.
508 */
15886177
JL
509 unsigned int oflags = pCifsFile->f_flags &
510 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 511
2422f676 512 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
513 cifs_sb->mnt_file_mode /* ignored */,
514 oflags, &oplock, &netfid, xid);
7fc8f4e9 515 if (rc == 0) {
b6b38f70 516 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
517 goto reopen_success;
518 }
519 /* fallthrough to retry open the old way on errors, especially
520 in the reconnect path it is important to retry hard */
521 }
522
15886177 523 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 524
1da177e4 525 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
526 by SMBOpen and then calling get_inode_info with returned buf
527 since file might have write behind data that needs to be flushed
1da177e4
LT
528 and server version of file size can be stale. If we knew for sure
529 that inode was not dirty locally we could do this */
530
7fc8f4e9 531 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 532 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 533 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 534 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 535 if (rc) {
f0a71eb8 536 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
537 cFYI(1, "cifs_open returned 0x%x", rc);
538 cFYI(1, "oplock: %d", oplock);
15886177
JL
539 goto reopen_error_exit;
540 }
541
7fc8f4e9 542reopen_success:
15886177
JL
543 pCifsFile->netfid = netfid;
544 pCifsFile->invalidHandle = false;
545 mutex_unlock(&pCifsFile->fh_mutex);
546 pCifsInode = CIFS_I(inode);
547
548 if (can_flush) {
549 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 550 mapping_set_error(inode->i_mapping, rc);
15886177 551
15886177
JL
552 if (tcon->unix_ext)
553 rc = cifs_get_inode_info_unix(&inode,
554 full_path, inode->i_sb, xid);
555 else
556 rc = cifs_get_inode_info(&inode,
557 full_path, NULL, inode->i_sb,
558 xid, NULL);
559 } /* else we are writing out data to server already
560 and could deadlock if we tried to flush data, and
561 since we do not know if we have data that would
562 invalidate the current end of file on the server
563 we can not go to the server to get the new inod
564 info */
e66673e3 565
c6723628 566 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 567
15886177
JL
568 cifs_relock_file(pCifsFile);
569
570reopen_error_exit:
1da177e4
LT
571 kfree(full_path);
572 FreeXid(xid);
573 return rc;
574}
575
576int cifs_close(struct inode *inode, struct file *file)
577{
cdff08e7
SF
578 cifsFileInfo_put(file->private_data);
579 file->private_data = NULL;
7ee1af76 580
cdff08e7
SF
581 /* return code from the ->release op is always ignored */
582 return 0;
1da177e4
LT
583}
584
585int cifs_closedir(struct inode *inode, struct file *file)
586{
587 int rc = 0;
588 int xid;
c21dfb69 589 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
590 char *ptmp;
591
b6b38f70 592 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
593
594 xid = GetXid();
595
596 if (pCFileStruct) {
13cfb733 597 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 598
b6b38f70 599 cFYI(1, "Freeing private data in close dir");
4477288a 600 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
601 if (!pCFileStruct->srch_inf.endOfSearch &&
602 !pCFileStruct->invalidHandle) {
603 pCFileStruct->invalidHandle = true;
4477288a 604 spin_unlock(&cifs_file_list_lock);
1da177e4 605 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
606 cFYI(1, "Closing uncompleted readdir with rc %d",
607 rc);
1da177e4
LT
608 /* not much we can do if it fails anyway, ignore rc */
609 rc = 0;
ddb4cbfc 610 } else
4477288a 611 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
612 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
613 if (ptmp) {
b6b38f70 614 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 615 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 616 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
617 cifs_small_buf_release(ptmp);
618 else
619 cifs_buf_release(ptmp);
1da177e4 620 }
13cfb733 621 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
622 kfree(file->private_data);
623 file->private_data = NULL;
624 }
625 /* BB can we lock the filestruct while this is going on? */
626 FreeXid(xid);
627 return rc;
628}
629
7ee1af76
JA
630static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
631 __u64 offset, __u8 lockType)
632{
fb8c4b14
SF
633 struct cifsLockInfo *li =
634 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
635 if (li == NULL)
636 return -ENOMEM;
637 li->offset = offset;
638 li->length = len;
639 li->type = lockType;
796e5661 640 mutex_lock(&fid->lock_mutex);
7ee1af76 641 list_add(&li->llist, &fid->llist);
796e5661 642 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
643 return 0;
644}
645
1da177e4
LT
646int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
647{
648 int rc, xid;
1da177e4
LT
649 __u32 numLock = 0;
650 __u32 numUnlock = 0;
651 __u64 length;
4b18f2a9 652 bool wait_flag = false;
1da177e4 653 struct cifs_sb_info *cifs_sb;
13a6e42a 654 struct cifsTconInfo *tcon;
08547b03
SF
655 __u16 netfid;
656 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 657 bool posix_locking = 0;
1da177e4
LT
658
659 length = 1 + pfLock->fl_end - pfLock->fl_start;
660 rc = -EACCES;
661 xid = GetXid();
662
b6b38f70 663 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 664 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 665 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 666 pfLock->fl_end);
1da177e4
LT
667
668 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 669 cFYI(1, "Posix");
1da177e4 670 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 671 cFYI(1, "Flock");
1da177e4 672 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 673 cFYI(1, "Blocking lock");
4b18f2a9 674 wait_flag = true;
1da177e4
LT
675 }
676 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
677 cFYI(1, "Process suspended by mandatory locking - "
678 "not implemented yet");
1da177e4 679 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 680 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 681 if (pfLock->fl_flags &
1da177e4 682 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 683 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
684
685 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 686 cFYI(1, "F_WRLCK ");
1da177e4
LT
687 numLock = 1;
688 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 689 cFYI(1, "F_UNLCK");
1da177e4 690 numUnlock = 1;
d47d7c1a
SF
691 /* Check if unlock includes more than
692 one lock range */
1da177e4 693 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 694 cFYI(1, "F_RDLCK");
1da177e4
LT
695 lockType |= LOCKING_ANDX_SHARED_LOCK;
696 numLock = 1;
697 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 698 cFYI(1, "F_EXLCK");
1da177e4
LT
699 numLock = 1;
700 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 701 cFYI(1, "F_SHLCK");
1da177e4
LT
702 lockType |= LOCKING_ANDX_SHARED_LOCK;
703 numLock = 1;
704 } else
b6b38f70 705 cFYI(1, "Unknown type of lock");
1da177e4 706
e6a00296 707 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 708 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
08547b03
SF
709 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
710
13a6e42a
SF
711 if ((tcon->ses->capabilities & CAP_UNIX) &&
712 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 713 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 714 posix_locking = 1;
08547b03
SF
715 /* BB add code here to normalize offset and length to
716 account for negative length which we can not accept over the
717 wire */
1da177e4 718 if (IS_GETLK(cmd)) {
fb8c4b14 719 if (posix_locking) {
08547b03 720 int posix_lock_type;
fb8c4b14 721 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
722 posix_lock_type = CIFS_RDLCK;
723 else
724 posix_lock_type = CIFS_WRLCK;
13a6e42a 725 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 726 length, pfLock,
08547b03
SF
727 posix_lock_type, wait_flag);
728 FreeXid(xid);
729 return rc;
730 }
731
732 /* BB we could chain these into one lock request BB */
13a6e42a 733 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
12fed00d 734 0, 1, lockType, 0 /* wait flag */, 0);
1da177e4 735 if (rc == 0) {
13a6e42a 736 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
737 pfLock->fl_start, 1 /* numUnlock */ ,
738 0 /* numLock */ , lockType,
12fed00d 739 0 /* wait flag */, 0);
1da177e4
LT
740 pfLock->fl_type = F_UNLCK;
741 if (rc != 0)
b6b38f70
JP
742 cERROR(1, "Error unlocking previously locked "
743 "range %d during test of lock", rc);
1da177e4
LT
744 rc = 0;
745
746 } else {
747 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
748 rc = 0;
749
750 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
751 pfLock->fl_type = F_WRLCK;
752 } else {
753 rc = CIFSSMBLock(xid, tcon, netfid, length,
754 pfLock->fl_start, 0, 1,
755 lockType | LOCKING_ANDX_SHARED_LOCK,
12fed00d 756 0 /* wait flag */, 0);
f05337c6
PS
757 if (rc == 0) {
758 rc = CIFSSMBLock(xid, tcon, netfid,
759 length, pfLock->fl_start, 1, 0,
760 lockType |
761 LOCKING_ANDX_SHARED_LOCK,
12fed00d 762 0 /* wait flag */, 0);
f05337c6
PS
763 pfLock->fl_type = F_RDLCK;
764 if (rc != 0)
f19159dc 765 cERROR(1, "Error unlocking "
f05337c6 766 "previously locked range %d "
f19159dc 767 "during test of lock", rc);
f05337c6
PS
768 rc = 0;
769 } else {
770 pfLock->fl_type = F_WRLCK;
771 rc = 0;
772 }
773 }
1da177e4
LT
774 }
775
776 FreeXid(xid);
777 return rc;
778 }
7ee1af76
JA
779
780 if (!numLock && !numUnlock) {
781 /* if no lock or unlock then nothing
782 to do since we do not know what it is */
783 FreeXid(xid);
784 return -EOPNOTSUPP;
785 }
786
787 if (posix_locking) {
08547b03 788 int posix_lock_type;
fb8c4b14 789 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
790 posix_lock_type = CIFS_RDLCK;
791 else
792 posix_lock_type = CIFS_WRLCK;
50c2f753 793
fb8c4b14 794 if (numUnlock == 1)
beb84dc8 795 posix_lock_type = CIFS_UNLCK;
7ee1af76 796
13a6e42a 797 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 798 length, pfLock,
08547b03 799 posix_lock_type, wait_flag);
7ee1af76 800 } else {
c21dfb69 801 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
802
803 if (numLock) {
13a6e42a 804 rc = CIFSSMBLock(xid, tcon, netfid, length,
12fed00d
PS
805 pfLock->fl_start, 0, numLock, lockType,
806 wait_flag, 0);
7ee1af76
JA
807
808 if (rc == 0) {
809 /* For Windows locks we must store them. */
810 rc = store_file_lock(fid, length,
811 pfLock->fl_start, lockType);
812 }
813 } else if (numUnlock) {
814 /* For each stored lock that this unlock overlaps
815 completely, unlock it. */
816 int stored_rc = 0;
817 struct cifsLockInfo *li, *tmp;
818
6b70c955 819 rc = 0;
796e5661 820 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
821 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
822 if (pfLock->fl_start <= li->offset &&
c19eb710 823 (pfLock->fl_start + length) >=
39db810c 824 (li->offset + li->length)) {
13a6e42a 825 stored_rc = CIFSSMBLock(xid, tcon,
12fed00d
PS
826 netfid, li->length,
827 li->offset, 1, 0,
828 li->type, false, 0);
7ee1af76
JA
829 if (stored_rc)
830 rc = stored_rc;
2c964d1f
PS
831 else {
832 list_del(&li->llist);
833 kfree(li);
834 }
7ee1af76
JA
835 }
836 }
796e5661 837 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
838 }
839 }
840
d634cc15 841 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
842 posix_lock_file_wait(file, pfLock);
843 FreeXid(xid);
844 return rc;
845}
846
fbec9ab9 847/* update the file size (if needed) after a write */
72432ffc 848void
fbec9ab9
JL
849cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
850 unsigned int bytes_written)
851{
852 loff_t end_of_write = offset + bytes_written;
853
854 if (end_of_write > cifsi->server_eof)
855 cifsi->server_eof = end_of_write;
856}
857
1da177e4
LT
858ssize_t cifs_user_write(struct file *file, const char __user *write_data,
859 size_t write_size, loff_t *poffset)
860{
50ae28f0 861 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
862 int rc = 0;
863 unsigned int bytes_written = 0;
864 unsigned int total_written;
865 struct cifs_sb_info *cifs_sb;
866 struct cifsTconInfo *pTcon;
7749981e 867 int xid;
1da177e4 868 struct cifsFileInfo *open_file;
50ae28f0 869 struct cifsInodeInfo *cifsi = CIFS_I(inode);
1da177e4 870
e6a00296 871 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 872
b6b38f70
JP
873 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
874 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
875
876 if (file->private_data == NULL)
877 return -EBADF;
ba00ba64 878
c21dfb69 879 open_file = file->private_data;
13cfb733 880 pTcon = tlink_tcon(open_file->tlink);
50c2f753 881
838726c4
JL
882 rc = generic_write_checks(file, poffset, &write_size, 0);
883 if (rc)
884 return rc;
885
1da177e4 886 xid = GetXid();
1da177e4 887
1da177e4
LT
888 for (total_written = 0; write_size > total_written;
889 total_written += bytes_written) {
890 rc = -EAGAIN;
891 while (rc == -EAGAIN) {
892 if (file->private_data == NULL) {
893 /* file has been closed on us */
894 FreeXid(xid);
895 /* if we have gotten here we have written some data
896 and blocked, and the file has been freed on us while
897 we blocked so return what we managed to write */
898 return total_written;
fb8c4b14 899 }
1da177e4 900 if (open_file->invalidHandle) {
1da177e4
LT
901 /* we could deadlock if we called
902 filemap_fdatawait from here so tell
903 reopen_file not to flush data to server
904 now */
15886177 905 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
906 if (rc != 0)
907 break;
908 }
909
910 rc = CIFSSMBWrite(xid, pTcon,
911 open_file->netfid,
912 min_t(const int, cifs_sb->wsize,
913 write_size - total_written),
914 *poffset, &bytes_written,
7749981e 915 NULL, write_data + total_written, 0);
1da177e4
LT
916 }
917 if (rc || (bytes_written == 0)) {
918 if (total_written)
919 break;
920 else {
921 FreeXid(xid);
922 return rc;
923 }
fbec9ab9
JL
924 } else {
925 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 926 *poffset += bytes_written;
fbec9ab9 927 }
1da177e4
LT
928 }
929
a4544347 930 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 931
fb8c4b14 932/* Do not update local mtime - server will set its actual value on write
50ae28f0
JS
933 * inode->i_ctime = inode->i_mtime =
934 * current_fs_time(inode->i_sb);*/
935 if (total_written > 0) {
936 spin_lock(&inode->i_lock);
937 if (*poffset > inode->i_size)
938 i_size_write(inode, *poffset);
939 spin_unlock(&inode->i_lock);
1da177e4 940 }
50ae28f0
JS
941 mark_inode_dirty_sync(inode);
942
1da177e4
LT
943 FreeXid(xid);
944 return total_written;
945}
946
7da4b49a
JL
947static ssize_t cifs_write(struct cifsFileInfo *open_file,
948 const char *write_data, size_t write_size,
949 loff_t *poffset)
1da177e4
LT
950{
951 int rc = 0;
952 unsigned int bytes_written = 0;
953 unsigned int total_written;
954 struct cifs_sb_info *cifs_sb;
955 struct cifsTconInfo *pTcon;
7749981e 956 int xid;
7da4b49a
JL
957 struct dentry *dentry = open_file->dentry;
958 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1da177e4 959
7da4b49a 960 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 961
b6b38f70 962 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 963 *poffset, dentry->d_name.name);
1da177e4 964
13cfb733 965 pTcon = tlink_tcon(open_file->tlink);
50c2f753 966
1da177e4 967 xid = GetXid();
1da177e4 968
1da177e4
LT
969 for (total_written = 0; write_size > total_written;
970 total_written += bytes_written) {
971 rc = -EAGAIN;
972 while (rc == -EAGAIN) {
1da177e4 973 if (open_file->invalidHandle) {
1da177e4
LT
974 /* we could deadlock if we called
975 filemap_fdatawait from here so tell
fb8c4b14 976 reopen_file not to flush data to
1da177e4 977 server now */
15886177 978 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
979 if (rc != 0)
980 break;
981 }
fb8c4b14
SF
982 if (experimEnabled || (pTcon->ses->server &&
983 ((pTcon->ses->server->secMode &
08775834 984 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 985 == 0))) {
3e84469d
SF
986 struct kvec iov[2];
987 unsigned int len;
988
0ae0efad 989 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
990 write_size - total_written);
991 /* iov[0] is reserved for smb header */
992 iov[1].iov_base = (char *)write_data +
993 total_written;
994 iov[1].iov_len = len;
d6e04ae6 995 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 996 open_file->netfid, len,
d6e04ae6 997 *poffset, &bytes_written,
7749981e 998 iov, 1, 0);
d6e04ae6 999 } else
60808233
SF
1000 rc = CIFSSMBWrite(xid, pTcon,
1001 open_file->netfid,
1002 min_t(const int, cifs_sb->wsize,
1003 write_size - total_written),
1004 *poffset, &bytes_written,
1005 write_data + total_written,
7749981e 1006 NULL, 0);
1da177e4
LT
1007 }
1008 if (rc || (bytes_written == 0)) {
1009 if (total_written)
1010 break;
1011 else {
1012 FreeXid(xid);
1013 return rc;
1014 }
fbec9ab9
JL
1015 } else {
1016 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1017 *poffset += bytes_written;
fbec9ab9 1018 }
1da177e4
LT
1019 }
1020
a4544347 1021 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 1022
7da4b49a
JL
1023 if (total_written > 0) {
1024 spin_lock(&dentry->d_inode->i_lock);
1025 if (*poffset > dentry->d_inode->i_size)
1026 i_size_write(dentry->d_inode, *poffset);
1027 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1028 }
7da4b49a 1029 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
1030 FreeXid(xid);
1031 return total_written;
1032}
1033
6508d904
JL
1034struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1035 bool fsuid_only)
630f3f0c
SF
1036{
1037 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1038 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1039
1040 /* only filter by fsuid on multiuser mounts */
1041 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1042 fsuid_only = false;
630f3f0c 1043
4477288a 1044 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1045 /* we could simply get the first_list_entry since write-only entries
1046 are always at the end of the list but since the first entry might
1047 have a close pending, we go through the whole list */
1048 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1049 if (fsuid_only && open_file->uid != current_fsuid())
1050 continue;
2e396b83 1051 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1052 if (!open_file->invalidHandle) {
1053 /* found a good file */
1054 /* lock it so it will not be closed on us */
6ab409b5 1055 cifsFileInfo_get(open_file);
4477288a 1056 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1057 return open_file;
1058 } /* else might as well continue, and look for
1059 another, or simply have the caller reopen it
1060 again rather than trying to fix this handle */
1061 } else /* write only file */
1062 break; /* write only files are last so must be done */
1063 }
4477288a 1064 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1065 return NULL;
1066}
630f3f0c 1067
6508d904
JL
1068struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1069 bool fsuid_only)
6148a742
SF
1070{
1071 struct cifsFileInfo *open_file;
d3892294 1072 struct cifs_sb_info *cifs_sb;
2846d386 1073 bool any_available = false;
dd99cd80 1074 int rc;
6148a742 1075
60808233
SF
1076 /* Having a null inode here (because mapping->host was set to zero by
1077 the VFS or MM) should not happen but we had reports of on oops (due to
1078 it being zero) during stress testcases so we need to check for it */
1079
fb8c4b14 1080 if (cifs_inode == NULL) {
b6b38f70 1081 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1082 dump_stack();
1083 return NULL;
1084 }
1085
d3892294
JL
1086 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1087
6508d904
JL
1088 /* only filter by fsuid on multiuser mounts */
1089 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1090 fsuid_only = false;
1091
4477288a 1092 spin_lock(&cifs_file_list_lock);
9b22b0b7 1093refind_writable:
6148a742 1094 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1095 if (!any_available && open_file->pid != current->tgid)
1096 continue;
1097 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1098 continue;
2e396b83 1099 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 1100 cifsFileInfo_get(open_file);
9b22b0b7
SF
1101
1102 if (!open_file->invalidHandle) {
1103 /* found a good writable file */
4477288a 1104 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1105 return open_file;
1106 }
8840dee9 1107
4477288a 1108 spin_unlock(&cifs_file_list_lock);
cdff08e7 1109
9b22b0b7 1110 /* Had to unlock since following call can block */
15886177 1111 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1112 if (!rc)
1113 return open_file;
9b22b0b7 1114
cdff08e7 1115 /* if it fails, try another handle if possible */
b6b38f70 1116 cFYI(1, "wp failed on reopen file");
6ab409b5 1117 cifsFileInfo_put(open_file);
8840dee9 1118
cdff08e7
SF
1119 spin_lock(&cifs_file_list_lock);
1120
9b22b0b7
SF
1121 /* else we simply continue to the next entry. Thus
1122 we do not loop on reopen errors. If we
1123 can not reopen the file, for example if we
1124 reconnected to a server with another client
1125 racing to delete or lock the file we would not
1126 make progress if we restarted before the beginning
1127 of the loop here. */
6148a742
SF
1128 }
1129 }
2846d386
JL
1130 /* couldn't find useable FH with same pid, try any available */
1131 if (!any_available) {
1132 any_available = true;
1133 goto refind_writable;
1134 }
4477288a 1135 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1136 return NULL;
1137}
1138
1da177e4
LT
1139static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1140{
1141 struct address_space *mapping = page->mapping;
1142 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1143 char *write_data;
1144 int rc = -EFAULT;
1145 int bytes_written = 0;
1da177e4 1146 struct inode *inode;
6148a742 1147 struct cifsFileInfo *open_file;
1da177e4
LT
1148
1149 if (!mapping || !mapping->host)
1150 return -EFAULT;
1151
1152 inode = page->mapping->host;
1da177e4
LT
1153
1154 offset += (loff_t)from;
1155 write_data = kmap(page);
1156 write_data += from;
1157
1158 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1159 kunmap(page);
1160 return -EIO;
1161 }
1162
1163 /* racing with truncate? */
1164 if (offset > mapping->host->i_size) {
1165 kunmap(page);
1166 return 0; /* don't care */
1167 }
1168
1169 /* check to make sure that we are not extending the file */
1170 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1171 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1172
6508d904 1173 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1174 if (open_file) {
7da4b49a
JL
1175 bytes_written = cifs_write(open_file, write_data,
1176 to - from, &offset);
6ab409b5 1177 cifsFileInfo_put(open_file);
1da177e4 1178 /* Does mm or vfs already set times? */
6148a742 1179 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1180 if ((bytes_written > 0) && (offset))
6148a742 1181 rc = 0;
bb5a9a04
SF
1182 else if (bytes_written < 0)
1183 rc = bytes_written;
6148a742 1184 } else {
b6b38f70 1185 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1186 rc = -EIO;
1187 }
1188
1189 kunmap(page);
1190 return rc;
1191}
1192
1da177e4 1193static int cifs_writepages(struct address_space *mapping,
37c0eb46 1194 struct writeback_control *wbc)
1da177e4 1195{
37c0eb46
SF
1196 unsigned int bytes_to_write;
1197 unsigned int bytes_written;
1198 struct cifs_sb_info *cifs_sb;
1199 int done = 0;
111ebb6e 1200 pgoff_t end;
37c0eb46 1201 pgoff_t index;
fb8c4b14
SF
1202 int range_whole = 0;
1203 struct kvec *iov;
84d2f07e 1204 int len;
37c0eb46
SF
1205 int n_iov = 0;
1206 pgoff_t next;
1207 int nr_pages;
1208 __u64 offset = 0;
23e7dd7d 1209 struct cifsFileInfo *open_file;
ba00ba64 1210 struct cifsTconInfo *tcon;
fbec9ab9 1211 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1212 struct page *page;
1213 struct pagevec pvec;
1214 int rc = 0;
1215 int scanned = 0;
7749981e 1216 int xid;
1da177e4 1217
37c0eb46 1218 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1219
37c0eb46
SF
1220 /*
1221 * If wsize is smaller that the page cache size, default to writing
1222 * one page at a time via cifs_writepage
1223 */
1224 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1225 return generic_writepages(mapping, wbc);
1226
9a0c8230 1227 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1228 if (iov == NULL)
9a0c8230
SF
1229 return generic_writepages(mapping, wbc);
1230
37c0eb46 1231 /*
f3983c21
JL
1232 * if there's no open file, then this is likely to fail too,
1233 * but it'll at least handle the return. Maybe it should be
1234 * a BUG() instead?
37c0eb46 1235 */
6508d904 1236 open_file = find_writable_file(CIFS_I(mapping->host), false);
f3983c21 1237 if (!open_file) {
9a0c8230 1238 kfree(iov);
f3983c21
JL
1239 return generic_writepages(mapping, wbc);
1240 }
1241
13cfb733 1242 tcon = tlink_tcon(open_file->tlink);
f3983c21
JL
1243 if (!experimEnabled && tcon->ses->server->secMode &
1244 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1245 cifsFileInfo_put(open_file);
6b035904 1246 kfree(iov);
f3983c21 1247 return generic_writepages(mapping, wbc);
37c0eb46 1248 }
f3983c21 1249 cifsFileInfo_put(open_file);
37c0eb46 1250
1da177e4
LT
1251 xid = GetXid();
1252
37c0eb46 1253 pagevec_init(&pvec, 0);
111ebb6e 1254 if (wbc->range_cyclic) {
37c0eb46 1255 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1256 end = -1;
1257 } else {
1258 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1259 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1260 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1261 range_whole = 1;
37c0eb46
SF
1262 scanned = 1;
1263 }
1264retry:
1265 while (!done && (index <= end) &&
1266 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1267 PAGECACHE_TAG_DIRTY,
1268 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1269 int first;
1270 unsigned int i;
1271
37c0eb46
SF
1272 first = -1;
1273 next = 0;
1274 n_iov = 0;
1275 bytes_to_write = 0;
1276
1277 for (i = 0; i < nr_pages; i++) {
1278 page = pvec.pages[i];
1279 /*
1280 * At this point we hold neither mapping->tree_lock nor
1281 * lock on the page itself: the page may be truncated or
1282 * invalidated (changing page->mapping to NULL), or even
1283 * swizzled back from swapper_space to tmpfs file
1284 * mapping
1285 */
1286
1287 if (first < 0)
1288 lock_page(page);
529ae9aa 1289 else if (!trylock_page(page))
37c0eb46
SF
1290 break;
1291
1292 if (unlikely(page->mapping != mapping)) {
1293 unlock_page(page);
1294 break;
1295 }
1296
111ebb6e 1297 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1298 done = 1;
1299 unlock_page(page);
1300 break;
1301 }
1302
1303 if (next && (page->index != next)) {
1304 /* Not next consecutive page */
1305 unlock_page(page);
1306 break;
1307 }
1308
1309 if (wbc->sync_mode != WB_SYNC_NONE)
1310 wait_on_page_writeback(page);
1311
1312 if (PageWriteback(page) ||
cb876f45 1313 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1314 unlock_page(page);
1315 break;
1316 }
84d2f07e 1317
cb876f45
LT
1318 /*
1319 * This actually clears the dirty bit in the radix tree.
1320 * See cifs_writepage() for more commentary.
1321 */
1322 set_page_writeback(page);
1323
84d2f07e
SF
1324 if (page_offset(page) >= mapping->host->i_size) {
1325 done = 1;
1326 unlock_page(page);
cb876f45 1327 end_page_writeback(page);
84d2f07e
SF
1328 break;
1329 }
1330
37c0eb46
SF
1331 /*
1332 * BB can we get rid of this? pages are held by pvec
1333 */
1334 page_cache_get(page);
1335
84d2f07e
SF
1336 len = min(mapping->host->i_size - page_offset(page),
1337 (loff_t)PAGE_CACHE_SIZE);
1338
37c0eb46
SF
1339 /* reserve iov[0] for the smb header */
1340 n_iov++;
1341 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1342 iov[n_iov].iov_len = len;
1343 bytes_to_write += len;
37c0eb46
SF
1344
1345 if (first < 0) {
1346 first = i;
1347 offset = page_offset(page);
1348 }
1349 next = page->index + 1;
1350 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1351 break;
1352 }
1353 if (n_iov) {
941b853d 1354retry_write:
6508d904
JL
1355 open_file = find_writable_file(CIFS_I(mapping->host),
1356 false);
23e7dd7d 1357 if (!open_file) {
b6b38f70 1358 cERROR(1, "No writable handles for inode");
23e7dd7d 1359 rc = -EBADF;
1047abc1 1360 } else {
f3983c21 1361 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
23e7dd7d
SF
1362 bytes_to_write, offset,
1363 &bytes_written, iov, n_iov,
7749981e 1364 0);
6ab409b5 1365 cifsFileInfo_put(open_file);
f3983c21 1366 }
fbec9ab9 1367
941b853d
JL
1368 cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
1369
1370 /*
1371 * For now, treat a short write as if nothing got
1372 * written. A zero length write however indicates
1373 * ENOSPC or EFBIG. We have no way to know which
1374 * though, so call it ENOSPC for now. EFBIG would
1375 * get translated to AS_EIO anyway.
1376 *
1377 * FIXME: make it take into account the data that did
1378 * get written
1379 */
1380 if (rc == 0) {
1381 if (bytes_written == 0)
1382 rc = -ENOSPC;
1383 else if (bytes_written < bytes_to_write)
1384 rc = -EAGAIN;
1385 }
1386
1387 /* retry on data-integrity flush */
1388 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
1389 goto retry_write;
1390
1391 /* fix the stats and EOF */
1392 if (bytes_written > 0) {
f3983c21 1393 cifs_stats_bytes_written(tcon, bytes_written);
941b853d 1394 cifs_update_eof(cifsi, offset, bytes_written);
37c0eb46 1395 }
f3983c21 1396
37c0eb46
SF
1397 for (i = 0; i < n_iov; i++) {
1398 page = pvec.pages[first + i];
941b853d
JL
1399 /* on retryable write error, redirty page */
1400 if (rc == -EAGAIN)
1401 redirty_page_for_writepage(wbc, page);
1402 else if (rc != 0)
eb9bdaa3 1403 SetPageError(page);
37c0eb46
SF
1404 kunmap(page);
1405 unlock_page(page);
cb876f45 1406 end_page_writeback(page);
37c0eb46
SF
1407 page_cache_release(page);
1408 }
941b853d
JL
1409
1410 if (rc != -EAGAIN)
1411 mapping_set_error(mapping, rc);
1412 else
1413 rc = 0;
1414
37c0eb46
SF
1415 if ((wbc->nr_to_write -= n_iov) <= 0)
1416 done = 1;
1417 index = next;
b066a48c
DK
1418 } else
1419 /* Need to re-find the pages we skipped */
1420 index = pvec.pages[0]->index + 1;
1421
37c0eb46
SF
1422 pagevec_release(&pvec);
1423 }
1424 if (!scanned && !done) {
1425 /*
1426 * We hit the last page and there is more work to be done: wrap
1427 * back to the start of the file
1428 */
1429 scanned = 1;
1430 index = 0;
1431 goto retry;
1432 }
111ebb6e 1433 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1434 mapping->writeback_index = index;
1435
1da177e4 1436 FreeXid(xid);
9a0c8230 1437 kfree(iov);
1da177e4
LT
1438 return rc;
1439}
1da177e4 1440
fb8c4b14 1441static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1442{
1443 int rc = -EFAULT;
1444 int xid;
1445
1446 xid = GetXid();
1447/* BB add check for wbc flags */
1448 page_cache_get(page);
ad7a2926 1449 if (!PageUptodate(page))
b6b38f70 1450 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1451
1452 /*
1453 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1454 *
1455 * A writepage() implementation always needs to do either this,
1456 * or re-dirty the page with "redirty_page_for_writepage()" in
1457 * the case of a failure.
1458 *
1459 * Just unlocking the page will cause the radix tree tag-bits
1460 * to fail to update with the state of the page correctly.
1461 */
fb8c4b14 1462 set_page_writeback(page);
1da177e4
LT
1463 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1464 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1465 unlock_page(page);
cb876f45
LT
1466 end_page_writeback(page);
1467 page_cache_release(page);
1da177e4
LT
1468 FreeXid(xid);
1469 return rc;
1470}
1471
d9414774
NP
1472static int cifs_write_end(struct file *file, struct address_space *mapping,
1473 loff_t pos, unsigned len, unsigned copied,
1474 struct page *page, void *fsdata)
1da177e4 1475{
d9414774
NP
1476 int rc;
1477 struct inode *inode = mapping->host;
1da177e4 1478
b6b38f70
JP
1479 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1480 page, pos, copied);
d9414774 1481
a98ee8c1
JL
1482 if (PageChecked(page)) {
1483 if (copied == len)
1484 SetPageUptodate(page);
1485 ClearPageChecked(page);
1486 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1487 SetPageUptodate(page);
ad7a2926 1488
1da177e4 1489 if (!PageUptodate(page)) {
d9414774
NP
1490 char *page_data;
1491 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1492 int xid;
1493
1494 xid = GetXid();
1da177e4
LT
1495 /* this is probably better than directly calling
1496 partialpage_write since in this function the file handle is
1497 known which we might as well leverage */
1498 /* BB check if anything else missing out of ppw
1499 such as updating last write time */
1500 page_data = kmap(page);
7da4b49a
JL
1501 rc = cifs_write(file->private_data, page_data + offset,
1502 copied, &pos);
d9414774 1503 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1504 kunmap(page);
d9414774
NP
1505
1506 FreeXid(xid);
fb8c4b14 1507 } else {
d9414774
NP
1508 rc = copied;
1509 pos += copied;
1da177e4
LT
1510 set_page_dirty(page);
1511 }
1512
d9414774
NP
1513 if (rc > 0) {
1514 spin_lock(&inode->i_lock);
1515 if (pos > inode->i_size)
1516 i_size_write(inode, pos);
1517 spin_unlock(&inode->i_lock);
1518 }
1519
1520 unlock_page(page);
1521 page_cache_release(page);
1522
1da177e4
LT
1523 return rc;
1524}
1525
8be7e6ba 1526int cifs_strict_fsync(struct file *file, int datasync)
1da177e4
LT
1527{
1528 int xid;
1529 int rc = 0;
b298f223 1530 struct cifsTconInfo *tcon;
c21dfb69 1531 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1532 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 1533 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4
LT
1534
1535 xid = GetXid();
1536
b6b38f70 1537 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1538 file->f_path.dentry->d_name.name, datasync);
50c2f753 1539
8be7e6ba
PS
1540 if (!CIFS_I(inode)->clientCanCacheRead)
1541 cifs_invalidate_mapping(inode);
eb4b756b 1542
8be7e6ba
PS
1543 tcon = tlink_tcon(smbfile->tlink);
1544 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1545 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1546
1547 FreeXid(xid);
1548 return rc;
1549}
1550
1551int cifs_fsync(struct file *file, int datasync)
1552{
1553 int xid;
1554 int rc = 0;
1555 struct cifsTconInfo *tcon;
1556 struct cifsFileInfo *smbfile = file->private_data;
1557 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1558
1559 xid = GetXid();
1560
1561 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1562 file->f_path.dentry->d_name.name, datasync);
1563
1564 tcon = tlink_tcon(smbfile->tlink);
1565 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1566 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 1567
1da177e4
LT
1568 FreeXid(xid);
1569 return rc;
1570}
1571
3978d717 1572/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1573{
1574 struct address_space *mapping;
1575 struct inode *inode;
1576 unsigned long index = page->index;
1577 unsigned int rpages = 0;
1578 int rc = 0;
1579
f19159dc 1580 cFYI(1, "sync page %p", page);
1da177e4
LT
1581 mapping = page->mapping;
1582 if (!mapping)
1583 return 0;
1584 inode = mapping->host;
1585 if (!inode)
3978d717 1586 return; */
1da177e4 1587
fb8c4b14 1588/* fill in rpages then
1da177e4
LT
1589 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1590
b6b38f70 1591/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1da177e4 1592
3978d717 1593#if 0
1da177e4
LT
1594 if (rc < 0)
1595 return rc;
1596 return 0;
3978d717 1597#endif
1da177e4
LT
1598} */
1599
1600/*
1601 * As file closes, flush all cached write data for this inode checking
1602 * for write behind errors.
1603 */
75e1fcc0 1604int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1605{
fb8c4b14 1606 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1607 int rc = 0;
1608
eb4b756b 1609 if (file->f_mode & FMODE_WRITE)
d3f1322a 1610 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 1611
b6b38f70 1612 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1613
1614 return rc;
1615}
1616
72432ffc
PS
1617static int
1618cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1619{
1620 int rc = 0;
1621 unsigned long i;
1622
1623 for (i = 0; i < num_pages; i++) {
1624 pages[i] = alloc_page(__GFP_HIGHMEM);
1625 if (!pages[i]) {
1626 /*
1627 * save number of pages we have already allocated and
1628 * return with ENOMEM error
1629 */
1630 num_pages = i;
1631 rc = -ENOMEM;
1632 goto error;
1633 }
1634 }
1635
1636 return rc;
1637
1638error:
1639 for (i = 0; i < num_pages; i++)
1640 put_page(pages[i]);
1641 return rc;
1642}
1643
1644static inline
1645size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1646{
1647 size_t num_pages;
1648 size_t clen;
1649
1650 clen = min_t(const size_t, len, wsize);
1651 num_pages = clen / PAGE_CACHE_SIZE;
1652 if (clen % PAGE_CACHE_SIZE)
1653 num_pages++;
1654
1655 if (cur_len)
1656 *cur_len = clen;
1657
1658 return num_pages;
1659}
1660
1661static ssize_t
1662cifs_iovec_write(struct file *file, const struct iovec *iov,
1663 unsigned long nr_segs, loff_t *poffset)
1664{
76429c14
PS
1665 unsigned int written;
1666 unsigned long num_pages, npages, i;
1667 size_t copied, len, cur_len;
1668 ssize_t total_written = 0;
72432ffc
PS
1669 struct kvec *to_send;
1670 struct page **pages;
1671 struct iov_iter it;
1672 struct inode *inode;
1673 struct cifsFileInfo *open_file;
1674 struct cifsTconInfo *pTcon;
1675 struct cifs_sb_info *cifs_sb;
1676 int xid, rc;
1677
1678 len = iov_length(iov, nr_segs);
1679 if (!len)
1680 return 0;
1681
1682 rc = generic_write_checks(file, poffset, &len, 0);
1683 if (rc)
1684 return rc;
1685
1686 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1687 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1688
1689 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1690 if (!pages)
1691 return -ENOMEM;
1692
1693 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1694 if (!to_send) {
1695 kfree(pages);
1696 return -ENOMEM;
1697 }
1698
1699 rc = cifs_write_allocate_pages(pages, num_pages);
1700 if (rc) {
1701 kfree(pages);
1702 kfree(to_send);
1703 return rc;
1704 }
1705
1706 xid = GetXid();
1707 open_file = file->private_data;
1708 pTcon = tlink_tcon(open_file->tlink);
1709 inode = file->f_path.dentry->d_inode;
1710
1711 iov_iter_init(&it, iov, nr_segs, len, 0);
1712 npages = num_pages;
1713
1714 do {
1715 size_t save_len = cur_len;
1716 for (i = 0; i < npages; i++) {
1717 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1718 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1719 copied);
1720 cur_len -= copied;
1721 iov_iter_advance(&it, copied);
1722 to_send[i+1].iov_base = kmap(pages[i]);
1723 to_send[i+1].iov_len = copied;
1724 }
1725
1726 cur_len = save_len - cur_len;
1727
1728 do {
1729 if (open_file->invalidHandle) {
1730 rc = cifs_reopen_file(open_file, false);
1731 if (rc != 0)
1732 break;
1733 }
1734 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
1735 cur_len, *poffset, &written,
1736 to_send, npages, 0);
1737 } while (rc == -EAGAIN);
1738
1739 for (i = 0; i < npages; i++)
1740 kunmap(pages[i]);
1741
1742 if (written) {
1743 len -= written;
1744 total_written += written;
1745 cifs_update_eof(CIFS_I(inode), *poffset, written);
1746 *poffset += written;
1747 } else if (rc < 0) {
1748 if (!total_written)
1749 total_written = rc;
1750 break;
1751 }
1752
1753 /* get length and number of kvecs of the next write */
1754 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1755 } while (len > 0);
1756
1757 if (total_written > 0) {
1758 spin_lock(&inode->i_lock);
1759 if (*poffset > inode->i_size)
1760 i_size_write(inode, *poffset);
1761 spin_unlock(&inode->i_lock);
1762 }
1763
1764 cifs_stats_bytes_written(pTcon, total_written);
1765 mark_inode_dirty_sync(inode);
1766
1767 for (i = 0; i < num_pages; i++)
1768 put_page(pages[i]);
1769 kfree(to_send);
1770 kfree(pages);
1771 FreeXid(xid);
1772 return total_written;
1773}
1774
1775static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
1776 unsigned long nr_segs, loff_t pos)
1777{
1778 ssize_t written;
1779 struct inode *inode;
1780
1781 inode = iocb->ki_filp->f_path.dentry->d_inode;
1782
1783 /*
1784 * BB - optimize the way when signing is disabled. We can drop this
1785 * extra memory-to-memory copying and use iovec buffers for constructing
1786 * write request.
1787 */
1788
1789 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1790 if (written > 0) {
1791 CIFS_I(inode)->invalid_mapping = true;
1792 iocb->ki_pos = pos;
1793 }
1794
1795 return written;
1796}
1797
1798ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1799 unsigned long nr_segs, loff_t pos)
1800{
1801 struct inode *inode;
1802
1803 inode = iocb->ki_filp->f_path.dentry->d_inode;
1804
1805 if (CIFS_I(inode)->clientCanCacheAll)
1806 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1807
1808 /*
1809 * In strict cache mode we need to write the data to the server exactly
1810 * from the pos to pos+len-1 rather than flush all affected pages
1811 * because it may cause a error with mandatory locks on these pages but
1812 * not on the region from pos to ppos+len-1.
1813 */
1814
1815 return cifs_user_writev(iocb, iov, nr_segs, pos);
1816}
1817
a70307ee
PS
1818static ssize_t
1819cifs_iovec_read(struct file *file, const struct iovec *iov,
1820 unsigned long nr_segs, loff_t *poffset)
1da177e4 1821{
a70307ee
PS
1822 int rc;
1823 int xid;
76429c14
PS
1824 ssize_t total_read;
1825 unsigned int bytes_read = 0;
a70307ee
PS
1826 size_t len, cur_len;
1827 int iov_offset = 0;
1da177e4
LT
1828 struct cifs_sb_info *cifs_sb;
1829 struct cifsTconInfo *pTcon;
1da177e4 1830 struct cifsFileInfo *open_file;
1da177e4 1831 struct smb_com_read_rsp *pSMBr;
a70307ee
PS
1832 char *read_data;
1833
1834 if (!nr_segs)
1835 return 0;
1836
1837 len = iov_length(iov, nr_segs);
1838 if (!len)
1839 return 0;
1da177e4
LT
1840
1841 xid = GetXid();
e6a00296 1842 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1843
c21dfb69 1844 open_file = file->private_data;
13cfb733 1845 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1846
ad7a2926 1847 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1848 cFYI(1, "attempting read on write only file instance");
ad7a2926 1849
a70307ee
PS
1850 for (total_read = 0; total_read < len; total_read += bytes_read) {
1851 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1da177e4 1852 rc = -EAGAIN;
a70307ee
PS
1853 read_data = NULL;
1854
1da177e4 1855 while (rc == -EAGAIN) {
ec637e3f 1856 int buf_type = CIFS_NO_BUFFER;
cdff08e7 1857 if (open_file->invalidHandle) {
15886177 1858 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1859 if (rc != 0)
1860 break;
1861 }
a70307ee
PS
1862 rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
1863 cur_len, *poffset, &bytes_read,
1864 &read_data, &buf_type);
1865 pSMBr = (struct smb_com_read_rsp *)read_data;
1866 if (read_data) {
1867 char *data_offset = read_data + 4 +
1868 le16_to_cpu(pSMBr->DataOffset);
1869 if (memcpy_toiovecend(iov, data_offset,
1870 iov_offset, bytes_read))
93544cc6 1871 rc = -EFAULT;
fb8c4b14 1872 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 1873 cifs_small_buf_release(read_data);
fb8c4b14 1874 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
1875 cifs_buf_release(read_data);
1876 read_data = NULL;
1877 iov_offset += bytes_read;
1da177e4
LT
1878 }
1879 }
a70307ee 1880
1da177e4
LT
1881 if (rc || (bytes_read == 0)) {
1882 if (total_read) {
1883 break;
1884 } else {
1885 FreeXid(xid);
1886 return rc;
1887 }
1888 } else {
a4544347 1889 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1890 *poffset += bytes_read;
1891 }
1892 }
a70307ee 1893
1da177e4
LT
1894 FreeXid(xid);
1895 return total_read;
1896}
1897
a70307ee
PS
1898ssize_t cifs_user_read(struct file *file, char __user *read_data,
1899 size_t read_size, loff_t *poffset)
1900{
1901 struct iovec iov;
1902 iov.iov_base = read_data;
1903 iov.iov_len = read_size;
1904
1905 return cifs_iovec_read(file, &iov, 1, poffset);
1906}
1907
1908static ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
1909 unsigned long nr_segs, loff_t pos)
1910{
1911 ssize_t read;
1912
1913 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1914 if (read > 0)
1915 iocb->ki_pos = pos;
1916
1917 return read;
1918}
1919
1920ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1921 unsigned long nr_segs, loff_t pos)
1922{
1923 struct inode *inode;
1924
1925 inode = iocb->ki_filp->f_path.dentry->d_inode;
1926
1927 if (CIFS_I(inode)->clientCanCacheRead)
1928 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1929
1930 /*
1931 * In strict cache mode we need to read from the server all the time
1932 * if we don't have level II oplock because the server can delay mtime
1933 * change - so we can't make a decision about inode invalidating.
1934 * And we can also fail with pagereading if there are mandatory locks
1935 * on pages affected by this read but not on the region from pos to
1936 * pos+len-1.
1937 */
1938
1939 return cifs_user_readv(iocb, iov, nr_segs, pos);
1940}
1da177e4
LT
1941
1942static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 1943 loff_t *poffset)
1da177e4
LT
1944{
1945 int rc = -EACCES;
1946 unsigned int bytes_read = 0;
1947 unsigned int total_read;
1948 unsigned int current_read_size;
1949 struct cifs_sb_info *cifs_sb;
1950 struct cifsTconInfo *pTcon;
1951 int xid;
1952 char *current_offset;
1953 struct cifsFileInfo *open_file;
ec637e3f 1954 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1955
1956 xid = GetXid();
e6a00296 1957 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1958
1959 if (file->private_data == NULL) {
0f3bc09e 1960 rc = -EBADF;
1da177e4 1961 FreeXid(xid);
0f3bc09e 1962 return rc;
1da177e4 1963 }
c21dfb69 1964 open_file = file->private_data;
13cfb733 1965 pTcon = tlink_tcon(open_file->tlink);
1da177e4
LT
1966
1967 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1968 cFYI(1, "attempting read on write only file instance");
1da177e4 1969
fb8c4b14 1970 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1971 read_size > total_read;
1972 total_read += bytes_read, current_offset += bytes_read) {
1973 current_read_size = min_t(const int, read_size - total_read,
1974 cifs_sb->rsize);
f9f5c817
SF
1975 /* For windows me and 9x we do not want to request more
1976 than it negotiated since it will refuse the read then */
fb8c4b14 1977 if ((pTcon->ses) &&
f9f5c817
SF
1978 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1979 current_read_size = min_t(const int, current_read_size,
1980 pTcon->ses->server->maxBuf - 128);
1981 }
1da177e4
LT
1982 rc = -EAGAIN;
1983 while (rc == -EAGAIN) {
cdff08e7 1984 if (open_file->invalidHandle) {
15886177 1985 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1986 if (rc != 0)
1987 break;
1988 }
bfa0d75a 1989 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1990 open_file->netfid,
1991 current_read_size, *poffset,
1992 &bytes_read, &current_offset,
1993 &buf_type);
1da177e4
LT
1994 }
1995 if (rc || (bytes_read == 0)) {
1996 if (total_read) {
1997 break;
1998 } else {
1999 FreeXid(xid);
2000 return rc;
2001 }
2002 } else {
a4544347 2003 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
2004 *poffset += bytes_read;
2005 }
2006 }
2007 FreeXid(xid);
2008 return total_read;
2009}
2010
7a6a19b1
PS
2011int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2012{
2013 int rc, xid;
2014 struct inode *inode = file->f_path.dentry->d_inode;
2015
2016 xid = GetXid();
2017
2018 if (!CIFS_I(inode)->clientCanCacheRead)
2019 cifs_invalidate_mapping(inode);
2020
2021 rc = generic_file_mmap(file, vma);
2022 FreeXid(xid);
2023 return rc;
2024}
2025
1da177e4
LT
2026int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2027{
1da177e4
LT
2028 int rc, xid;
2029
2030 xid = GetXid();
abab095d 2031 rc = cifs_revalidate_file(file);
1da177e4 2032 if (rc) {
b6b38f70 2033 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
2034 FreeXid(xid);
2035 return rc;
2036 }
2037 rc = generic_file_mmap(file, vma);
2038 FreeXid(xid);
2039 return rc;
2040}
2041
2042
fb8c4b14 2043static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 2044 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
2045{
2046 struct page *page;
2047 char *target;
2048
2049 while (bytes_read > 0) {
2050 if (list_empty(pages))
2051 break;
2052
2053 page = list_entry(pages->prev, struct page, lru);
2054 list_del(&page->lru);
2055
315e995c 2056 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
2057 GFP_KERNEL)) {
2058 page_cache_release(page);
b6b38f70 2059 cFYI(1, "Add page cache failed");
3079ca62
SF
2060 data += PAGE_CACHE_SIZE;
2061 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
2062 continue;
2063 }
06b43672 2064 page_cache_release(page);
1da177e4 2065
fb8c4b14 2066 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
2067
2068 if (PAGE_CACHE_SIZE > bytes_read) {
2069 memcpy(target, data, bytes_read);
2070 /* zero the tail end of this partial page */
fb8c4b14 2071 memset(target + bytes_read, 0,
1da177e4
LT
2072 PAGE_CACHE_SIZE - bytes_read);
2073 bytes_read = 0;
2074 } else {
2075 memcpy(target, data, PAGE_CACHE_SIZE);
2076 bytes_read -= PAGE_CACHE_SIZE;
2077 }
2078 kunmap_atomic(target, KM_USER0);
2079
2080 flush_dcache_page(page);
2081 SetPageUptodate(page);
2082 unlock_page(page);
1da177e4 2083 data += PAGE_CACHE_SIZE;
9dc06558
SJ
2084
2085 /* add page to FS-Cache */
2086 cifs_readpage_to_fscache(mapping->host, page);
1da177e4
LT
2087 }
2088 return;
2089}
2090
2091static int cifs_readpages(struct file *file, struct address_space *mapping,
2092 struct list_head *page_list, unsigned num_pages)
2093{
2094 int rc = -EACCES;
2095 int xid;
2096 loff_t offset;
2097 struct page *page;
2098 struct cifs_sb_info *cifs_sb;
2099 struct cifsTconInfo *pTcon;
2c2130e1 2100 unsigned int bytes_read = 0;
fb8c4b14 2101 unsigned int read_size, i;
1da177e4
LT
2102 char *smb_read_data = NULL;
2103 struct smb_com_read_rsp *pSMBr;
1da177e4 2104 struct cifsFileInfo *open_file;
ec637e3f 2105 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
2106
2107 xid = GetXid();
2108 if (file->private_data == NULL) {
0f3bc09e 2109 rc = -EBADF;
1da177e4 2110 FreeXid(xid);
0f3bc09e 2111 return rc;
1da177e4 2112 }
c21dfb69 2113 open_file = file->private_data;
e6a00296 2114 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 2115 pTcon = tlink_tcon(open_file->tlink);
bfa0d75a 2116
56698236
SJ
2117 /*
2118 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2119 * immediately if the cookie is negative
2120 */
2121 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2122 &num_pages);
2123 if (rc == 0)
2124 goto read_complete;
2125
f19159dc 2126 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
2127 for (i = 0; i < num_pages; ) {
2128 unsigned contig_pages;
2129 struct page *tmp_page;
2130 unsigned long expected_index;
2131
2132 if (list_empty(page_list))
2133 break;
2134
2135 page = list_entry(page_list->prev, struct page, lru);
2136 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2137
2138 /* count adjacent pages that we will read into */
2139 contig_pages = 0;
fb8c4b14 2140 expected_index =
1da177e4 2141 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2142 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2143 if (tmp_page->index == expected_index) {
2144 contig_pages++;
2145 expected_index++;
2146 } else
fb8c4b14 2147 break;
1da177e4
LT
2148 }
2149 if (contig_pages + i > num_pages)
2150 contig_pages = num_pages - i;
2151
2152 /* for reads over a certain size could initiate async
2153 read ahead */
2154
2155 read_size = contig_pages * PAGE_CACHE_SIZE;
2156 /* Read size needs to be in multiples of one page */
2157 read_size = min_t(const unsigned int, read_size,
2158 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2159 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2160 read_size, contig_pages);
1da177e4
LT
2161 rc = -EAGAIN;
2162 while (rc == -EAGAIN) {
cdff08e7 2163 if (open_file->invalidHandle) {
15886177 2164 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2165 if (rc != 0)
2166 break;
2167 }
2168
bfa0d75a 2169 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2170 open_file->netfid,
2171 read_size, offset,
2172 &bytes_read, &smb_read_data,
2173 &buf_type);
a9d02ad4 2174 /* BB more RC checks ? */
fb8c4b14 2175 if (rc == -EAGAIN) {
1da177e4 2176 if (smb_read_data) {
fb8c4b14 2177 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2178 cifs_small_buf_release(smb_read_data);
fb8c4b14 2179 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2180 cifs_buf_release(smb_read_data);
1da177e4
LT
2181 smb_read_data = NULL;
2182 }
2183 }
2184 }
2185 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2186 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2187 break;
2188 } else if (bytes_read > 0) {
6f88cc2e 2189 task_io_account_read(bytes_read);
1da177e4
LT
2190 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2191 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2192 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2193 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2194
2195 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2196 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2197 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2198 i++; /* account for partial page */
2199
fb8c4b14 2200 /* server copy of file can have smaller size
1da177e4 2201 than client */
fb8c4b14
SF
2202 /* BB do we need to verify this common case ?
2203 this case is ok - if we are at server EOF
1da177e4
LT
2204 we will hit it on next read */
2205
05ac9d4b 2206 /* break; */
1da177e4
LT
2207 }
2208 } else {
b6b38f70 2209 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2210 "Cleaning remaining pages from readahead list",
b6b38f70 2211 bytes_read, offset);
fb8c4b14 2212 /* BB turn off caching and do new lookup on
1da177e4 2213 file size at server? */
1da177e4
LT
2214 break;
2215 }
2216 if (smb_read_data) {
fb8c4b14 2217 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2218 cifs_small_buf_release(smb_read_data);
fb8c4b14 2219 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2220 cifs_buf_release(smb_read_data);
1da177e4
LT
2221 smb_read_data = NULL;
2222 }
2223 bytes_read = 0;
2224 }
2225
1da177e4
LT
2226/* need to free smb_read_data buf before exit */
2227 if (smb_read_data) {
fb8c4b14 2228 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2229 cifs_small_buf_release(smb_read_data);
fb8c4b14 2230 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2231 cifs_buf_release(smb_read_data);
1da177e4 2232 smb_read_data = NULL;
fb8c4b14 2233 }
1da177e4 2234
56698236 2235read_complete:
1da177e4
LT
2236 FreeXid(xid);
2237 return rc;
2238}
2239
2240static int cifs_readpage_worker(struct file *file, struct page *page,
2241 loff_t *poffset)
2242{
2243 char *read_data;
2244 int rc;
2245
56698236
SJ
2246 /* Is the page cached? */
2247 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2248 if (rc == 0)
2249 goto read_complete;
2250
1da177e4
LT
2251 page_cache_get(page);
2252 read_data = kmap(page);
2253 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2254
1da177e4 2255 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2256
1da177e4
LT
2257 if (rc < 0)
2258 goto io_error;
2259 else
b6b38f70 2260 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2261
e6a00296
JJS
2262 file->f_path.dentry->d_inode->i_atime =
2263 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2264
1da177e4
LT
2265 if (PAGE_CACHE_SIZE > rc)
2266 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2267
2268 flush_dcache_page(page);
2269 SetPageUptodate(page);
9dc06558
SJ
2270
2271 /* send this page to the cache */
2272 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2273
1da177e4 2274 rc = 0;
fb8c4b14 2275
1da177e4 2276io_error:
fb8c4b14 2277 kunmap(page);
1da177e4 2278 page_cache_release(page);
56698236
SJ
2279
2280read_complete:
1da177e4
LT
2281 return rc;
2282}
2283
2284static int cifs_readpage(struct file *file, struct page *page)
2285{
2286 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2287 int rc = -EACCES;
2288 int xid;
2289
2290 xid = GetXid();
2291
2292 if (file->private_data == NULL) {
0f3bc09e 2293 rc = -EBADF;
1da177e4 2294 FreeXid(xid);
0f3bc09e 2295 return rc;
1da177e4
LT
2296 }
2297
b6b38f70
JP
2298 cFYI(1, "readpage %p at offset %d 0x%x\n",
2299 page, (int)offset, (int)offset);
1da177e4
LT
2300
2301 rc = cifs_readpage_worker(file, page, &offset);
2302
2303 unlock_page(page);
2304
2305 FreeXid(xid);
2306 return rc;
2307}
2308
a403a0a3
SF
2309static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2310{
2311 struct cifsFileInfo *open_file;
2312
4477288a 2313 spin_lock(&cifs_file_list_lock);
a403a0a3 2314 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2315 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2316 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2317 return 1;
2318 }
2319 }
4477288a 2320 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2321 return 0;
2322}
2323
1da177e4
LT
2324/* We do not want to update the file size from server for inodes
2325 open for write - to avoid races with writepage extending
2326 the file - in the future we could consider allowing
fb8c4b14 2327 refreshing the inode only on increases in the file size
1da177e4
LT
2328 but this is tricky to do without racing with writebehind
2329 page caching in the current Linux kernel design */
4b18f2a9 2330bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2331{
a403a0a3 2332 if (!cifsInode)
4b18f2a9 2333 return true;
50c2f753 2334
a403a0a3
SF
2335 if (is_inode_writable(cifsInode)) {
2336 /* This inode is open for write at least once */
c32a0b68
SF
2337 struct cifs_sb_info *cifs_sb;
2338
c32a0b68 2339 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2340 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2341 /* since no page cache to corrupt on directio
c32a0b68 2342 we can change size safely */
4b18f2a9 2343 return true;
c32a0b68
SF
2344 }
2345
fb8c4b14 2346 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2347 return true;
7ba52631 2348
4b18f2a9 2349 return false;
23e7dd7d 2350 } else
4b18f2a9 2351 return true;
1da177e4
LT
2352}
2353
d9414774
NP
2354static int cifs_write_begin(struct file *file, struct address_space *mapping,
2355 loff_t pos, unsigned len, unsigned flags,
2356 struct page **pagep, void **fsdata)
1da177e4 2357{
d9414774
NP
2358 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2359 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2360 loff_t page_start = pos & PAGE_MASK;
2361 loff_t i_size;
2362 struct page *page;
2363 int rc = 0;
d9414774 2364
b6b38f70 2365 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2366
54566b2c 2367 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2368 if (!page) {
2369 rc = -ENOMEM;
2370 goto out;
2371 }
8a236264 2372
a98ee8c1
JL
2373 if (PageUptodate(page))
2374 goto out;
8a236264 2375
a98ee8c1
JL
2376 /*
2377 * If we write a full page it will be up to date, no need to read from
2378 * the server. If the write is short, we'll end up doing a sync write
2379 * instead.
2380 */
2381 if (len == PAGE_CACHE_SIZE)
2382 goto out;
8a236264 2383
a98ee8c1
JL
2384 /*
2385 * optimize away the read when we have an oplock, and we're not
2386 * expecting to use any of the data we'd be reading in. That
2387 * is, when the page lies beyond the EOF, or straddles the EOF
2388 * and the write will cover all of the existing data.
2389 */
2390 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2391 i_size = i_size_read(mapping->host);
2392 if (page_start >= i_size ||
2393 (offset == 0 && (pos + len) >= i_size)) {
2394 zero_user_segments(page, 0, offset,
2395 offset + len,
2396 PAGE_CACHE_SIZE);
2397 /*
2398 * PageChecked means that the parts of the page
2399 * to which we're not writing are considered up
2400 * to date. Once the data is copied to the
2401 * page, it can be set uptodate.
2402 */
2403 SetPageChecked(page);
2404 goto out;
2405 }
2406 }
d9414774 2407
a98ee8c1
JL
2408 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2409 /*
2410 * might as well read a page, it is fast enough. If we get
2411 * an error, we don't need to return it. cifs_write_end will
2412 * do a sync write instead since PG_uptodate isn't set.
2413 */
2414 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2415 } else {
2416 /* we could try using another file handle if there is one -
2417 but how would we lock it to prevent close of that handle
2418 racing with this read? In any case
d9414774 2419 this will be written out by write_end so is fine */
1da177e4 2420 }
a98ee8c1
JL
2421out:
2422 *pagep = page;
2423 return rc;
1da177e4
LT
2424}
2425
85f2d6b4
SJ
2426static int cifs_release_page(struct page *page, gfp_t gfp)
2427{
2428 if (PagePrivate(page))
2429 return 0;
2430
2431 return cifs_fscache_release_page(page, gfp);
2432}
2433
2434static void cifs_invalidate_page(struct page *page, unsigned long offset)
2435{
2436 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2437
2438 if (offset == 0)
2439 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2440}
2441
9b646972 2442void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2443{
2444 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2445 oplock_break);
a5e18bc3 2446 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2447 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 2448 int rc = 0;
3bc303c2
JL
2449
2450 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2451 if (cinode->clientCanCacheRead)
8737c930 2452 break_lease(inode, O_RDONLY);
d54ff732 2453 else
8737c930 2454 break_lease(inode, O_WRONLY);
3bc303c2
JL
2455 rc = filemap_fdatawrite(inode->i_mapping);
2456 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
2457 rc = filemap_fdatawait(inode->i_mapping);
2458 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
2459 invalidate_remote_inode(inode);
2460 }
b6b38f70 2461 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2462 }
2463
2464 /*
2465 * releasing stale oplock after recent reconnect of smb session using
2466 * a now incorrect file handle is not a data integrity issue but do
2467 * not bother sending an oplock release if session to server still is
2468 * disconnected since oplock already released by the server
2469 */
cdff08e7 2470 if (!cfile->oplock_break_cancelled) {
13cfb733 2471 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
12fed00d
PS
2472 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
2473 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 2474 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2475 }
9b646972
TH
2476
2477 /*
2478 * We might have kicked in before is_valid_oplock_break()
2479 * finished grabbing reference for us. Make sure it's done by
6573e9b7 2480 * waiting for cifs_file_list_lock.
9b646972 2481 */
4477288a
JL
2482 spin_lock(&cifs_file_list_lock);
2483 spin_unlock(&cifs_file_list_lock);
9b646972
TH
2484
2485 cifs_oplock_break_put(cfile);
3bc303c2
JL
2486}
2487
5f6dbc9e 2488/* must be called while holding cifs_file_list_lock */
9b646972 2489void cifs_oplock_break_get(struct cifsFileInfo *cfile)
3bc303c2 2490{
d7c86ff8 2491 cifs_sb_active(cfile->dentry->d_sb);
3bc303c2 2492 cifsFileInfo_get(cfile);
3bc303c2
JL
2493}
2494
9b646972 2495void cifs_oplock_break_put(struct cifsFileInfo *cfile)
3bc303c2 2496{
ebe2e91e
JL
2497 struct super_block *sb = cfile->dentry->d_sb;
2498
3bc303c2 2499 cifsFileInfo_put(cfile);
ebe2e91e 2500 cifs_sb_deactive(sb);
3bc303c2
JL
2501}
2502
f5e54d6e 2503const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2504 .readpage = cifs_readpage,
2505 .readpages = cifs_readpages,
2506 .writepage = cifs_writepage,
37c0eb46 2507 .writepages = cifs_writepages,
d9414774
NP
2508 .write_begin = cifs_write_begin,
2509 .write_end = cifs_write_end,
1da177e4 2510 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2511 .releasepage = cifs_release_page,
2512 .invalidatepage = cifs_invalidate_page,
1da177e4
LT
2513 /* .sync_page = cifs_sync_page, */
2514 /* .direct_IO = */
2515};
273d81d6
DK
2516
2517/*
2518 * cifs_readpages requires the server to support a buffer large enough to
2519 * contain the header plus one complete page of data. Otherwise, we need
2520 * to leave cifs_readpages out of the address space operations.
2521 */
f5e54d6e 2522const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2523 .readpage = cifs_readpage,
2524 .writepage = cifs_writepage,
2525 .writepages = cifs_writepages,
d9414774
NP
2526 .write_begin = cifs_write_begin,
2527 .write_end = cifs_write_end,
273d81d6 2528 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2529 .releasepage = cifs_release_page,
2530 .invalidatepage = cifs_invalidate_page,
273d81d6
DK
2531 /* .sync_page = cifs_sync_page, */
2532 /* .direct_IO = */
2533};
This page took 0.499217 seconds and 5 git commands to generate.