CIFS: Implement caching mechanism for mandatory brlocks
[deliverable/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
96daf2b0 172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
eeb910a6
PS
173 __u16 *pnetfid, int xid)
174{
175 int rc;
176 int desiredAccess;
177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6
PS
179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
3d3ea8e6
SP
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
eeb910a6
PS
218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
3d3ea8e6 220 desiredAccess, create_options, pnetfid, poplock, buf,
eeb910a6
PS
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
15ecb436
JL
244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
5f6dbc9e 257 pCifsFile->count = 1;
15ecb436
JL
258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
15ecb436
JL
264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
15ecb436
JL
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267
4477288a 268 spin_lock(&cifs_file_list_lock);
15ecb436
JL
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273 else
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 275 spin_unlock(&cifs_file_list_lock);
15ecb436 276
c6723628 277 cifs_set_oplock_level(pCifsInode, oplock);
85160e03 278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
15ecb436
JL
279
280 file->private_data = pCifsFile;
281 return pCifsFile;
282}
283
85160e03
PS
284static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
285
cdff08e7
SF
286/*
287 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
cdff08e7 290 */
b33879aa
JL
291void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
292{
e66673e3 293 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
297 struct cifsLockInfo *li, *tmp;
298
299 spin_lock(&cifs_file_list_lock);
5f6dbc9e 300 if (--cifs_file->count > 0) {
cdff08e7
SF
301 spin_unlock(&cifs_file_list_lock);
302 return;
303 }
304
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
308
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
4f8ba8a0
PS
312
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
318
c6723628 319 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
320 }
321 spin_unlock(&cifs_file_list_lock);
322
ad635942
JL
323 cancel_work_sync(&cifs_file->oplock_break);
324
cdff08e7
SF
325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
326 int xid, rc;
327
328 xid = GetXid();
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
330 FreeXid(xid);
331 }
332
333 /* Delete any outstanding lock records. We'll lose them when the file
334 * is closed anyway.
335 */
d59dad2b
PS
336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
cdff08e7 340 list_del(&li->llist);
85160e03 341 cifs_del_lock_waiters(li);
cdff08e7 342 kfree(li);
b33879aa 343 }
d59dad2b 344 mutex_unlock(&cifsi->lock_mutex);
cdff08e7
SF
345
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
348 kfree(cifs_file);
b33879aa
JL
349}
350
1da177e4
LT
351int cifs_open(struct inode *inode, struct file *file)
352{
353 int rc = -EACCES;
590a3fe0
JL
354 int xid;
355 __u32 oplock;
1da177e4 356 struct cifs_sb_info *cifs_sb;
96daf2b0 357 struct cifs_tcon *tcon;
7ffec372 358 struct tcon_link *tlink;
6ca9f3ba 359 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 360 char *full_path = NULL;
7e12eddb 361 bool posix_open_ok = false;
1da177e4 362 __u16 netfid;
1da177e4
LT
363
364 xid = GetXid();
365
366 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
367 tlink = cifs_sb_tlink(cifs_sb);
368 if (IS_ERR(tlink)) {
369 FreeXid(xid);
370 return PTR_ERR(tlink);
371 }
372 tcon = tlink_tcon(tlink);
1da177e4 373
e6a00296 374 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 375 if (full_path == NULL) {
0f3bc09e 376 rc = -ENOMEM;
232341ba 377 goto out;
1da177e4
LT
378 }
379
b6b38f70
JP
380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
276a74a4 382
e7504734 383 if (enable_oplocks)
276a74a4
SF
384 oplock = REQ_OPLOCK;
385 else
386 oplock = 0;
387
64cc2c63
SF
388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 392 /* can not refresh inode info since size could be stale */
2422f676 393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 394 cifs_sb->mnt_file_mode /* ignored */,
608712fe 395 file->f_flags, &oplock, &netfid, xid);
276a74a4 396 if (rc == 0) {
b6b38f70 397 cFYI(1, "posix open succeeded");
7e12eddb 398 posix_open_ok = true;
64cc2c63
SF
399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
b6b38f70 401 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
b6b38f70 406 tcon->ses->serverNOS);
64cc2c63 407 tcon->broken_posix_open = true;
276a74a4
SF
408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
410 goto out;
64cc2c63
SF
411 /* else fallthrough to retry open the old way on network i/o
412 or DFS errors */
276a74a4
SF
413 }
414
7e12eddb
PS
415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
418 if (rc)
419 goto out;
420 }
47c78b7f 421
abfe1eed 422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 423 if (pCifsFile == NULL) {
7e12eddb 424 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
425 rc = -ENOMEM;
426 goto out;
427 }
1da177e4 428
9451a9a5
SJ
429 cifs_fscache_set_inode_cookie(inode, file);
430
7e12eddb 431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
7e12eddb
PS
434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
436 .uid = NO_CHANGE_64,
437 .gid = NO_CHANGE_64,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
441 .device = 0,
442 };
d44a9fe2
JL
443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
444 pCifsFile->pid);
1da177e4
LT
445 }
446
447out:
1da177e4
LT
448 kfree(full_path);
449 FreeXid(xid);
7ffec372 450 cifs_put_tlink(tlink);
1da177e4
LT
451 return rc;
452}
453
0418726b 454/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
455/* to server was lost */
456static int cifs_relock_file(struct cifsFileInfo *cifsFile)
457{
458 int rc = 0;
459
460/* BB list all locks open on this file and relock */
461
462 return rc;
463}
464
15886177 465static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
466{
467 int rc = -EACCES;
590a3fe0
JL
468 int xid;
469 __u32 oplock;
1da177e4 470 struct cifs_sb_info *cifs_sb;
96daf2b0 471 struct cifs_tcon *tcon;
1da177e4 472 struct cifsInodeInfo *pCifsInode;
fb8c4b14 473 struct inode *inode;
1da177e4
LT
474 char *full_path = NULL;
475 int desiredAccess;
476 int disposition = FILE_OPEN;
3d3ea8e6 477 int create_options = CREATE_NOT_DIR;
1da177e4
LT
478 __u16 netfid;
479
1da177e4 480 xid = GetXid();
f0a71eb8 481 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 482 if (!pCifsFile->invalidHandle) {
f0a71eb8 483 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 484 rc = 0;
1da177e4 485 FreeXid(xid);
0f3bc09e 486 return rc;
1da177e4
LT
487 }
488
15886177 489 inode = pCifsFile->dentry->d_inode;
1da177e4 490 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 491 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 492
1da177e4
LT
493/* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
15886177 497 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 498 if (full_path == NULL) {
3a9f462f 499 rc = -ENOMEM;
f0a71eb8 500 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 501 FreeXid(xid);
3a9f462f 502 return rc;
1da177e4
LT
503 }
504
b6b38f70 505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 506 inode, pCifsFile->f_flags, full_path);
1da177e4 507
e7504734 508 if (enable_oplocks)
1da177e4
LT
509 oplock = REQ_OPLOCK;
510 else
4b18f2a9 511 oplock = 0;
1da177e4 512
7fc8f4e9
SF
513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
516
517 /*
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
520 */
15886177
JL
521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 523
2422f676 524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
7fc8f4e9 527 if (rc == 0) {
b6b38f70 528 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
529 goto reopen_success;
530 }
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
533 }
534
15886177 535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 536
3d3ea8e6
SP
537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
539
1da177e4 540 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
1da177e4
LT
543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
545
7fc8f4e9 546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
3d3ea8e6 547 create_options, &netfid, &oplock, NULL,
fb8c4b14 548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 549 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 550 if (rc) {
f0a71eb8 551 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
15886177
JL
554 goto reopen_error_exit;
555 }
556
7fc8f4e9 557reopen_success:
15886177
JL
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
562
563 if (can_flush) {
564 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 565 mapping_set_error(inode->i_mapping, rc);
15886177 566
15886177
JL
567 if (tcon->unix_ext)
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
570 else
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
573 xid, NULL);
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
579 info */
e66673e3 580
c6723628 581 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 582
15886177
JL
583 cifs_relock_file(pCifsFile);
584
585reopen_error_exit:
1da177e4
LT
586 kfree(full_path);
587 FreeXid(xid);
588 return rc;
589}
590
591int cifs_close(struct inode *inode, struct file *file)
592{
77970693
JL
593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
596 }
7ee1af76 597
cdff08e7
SF
598 /* return code from the ->release op is always ignored */
599 return 0;
1da177e4
LT
600}
601
602int cifs_closedir(struct inode *inode, struct file *file)
603{
604 int rc = 0;
605 int xid;
c21dfb69 606 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
607 char *ptmp;
608
b6b38f70 609 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
610
611 xid = GetXid();
612
613 if (pCFileStruct) {
96daf2b0 614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 615
b6b38f70 616 cFYI(1, "Freeing private data in close dir");
4477288a 617 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
4477288a 621 spin_unlock(&cifs_file_list_lock);
1da177e4 622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
623 cFYI(1, "Closing uncompleted readdir with rc %d",
624 rc);
1da177e4
LT
625 /* not much we can do if it fails anyway, ignore rc */
626 rc = 0;
ddb4cbfc 627 } else
4477288a 628 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
630 if (ptmp) {
b6b38f70 631 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 633 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
634 cifs_small_buf_release(ptmp);
635 else
636 cifs_buf_release(ptmp);
1da177e4 637 }
13cfb733 638 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
639 kfree(file->private_data);
640 file->private_data = NULL;
641 }
642 /* BB can we lock the filestruct while this is going on? */
643 FreeXid(xid);
644 return rc;
645}
646
85160e03
PS
647static struct cifsLockInfo *
648cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid)
7ee1af76 649{
fb8c4b14
SF
650 struct cifsLockInfo *li =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
85160e03
PS
652 if (!li)
653 return li;
d59dad2b 654 li->netfid = netfid;
7ee1af76
JA
655 li->offset = offset;
656 li->length = len;
03776f45
PS
657 li->type = type;
658 li->pid = current->tgid;
85160e03
PS
659 INIT_LIST_HEAD(&li->blist);
660 init_waitqueue_head(&li->block_q);
661 return li;
662}
663
664static void
665cifs_del_lock_waiters(struct cifsLockInfo *lock)
666{
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
671 }
672}
673
674static bool
675cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
678{
679 struct cifsLockInfo *li, *tmp;
680
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
684 continue;
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
687 type == li->type))
688 continue;
689 else {
690 *conf_lock = li;
691 return true;
692 }
693 }
694 return false;
695}
696
697static int
698cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
699 __u8 type, __u16 netfid, struct file_lock *flock)
700{
701 int rc = 0;
702 struct cifsLockInfo *conf_lock;
703 bool exist;
704
705 mutex_lock(&cinode->lock_mutex);
706
707 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
708 &conf_lock);
709 if (exist) {
710 flock->fl_start = conf_lock->offset;
711 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
712 flock->fl_pid = conf_lock->pid;
713 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
714 flock->fl_type = F_RDLCK;
715 else
716 flock->fl_type = F_WRLCK;
717 } else if (!cinode->can_cache_brlcks)
718 rc = 1;
719 else
720 flock->fl_type = F_UNLCK;
721
722 mutex_unlock(&cinode->lock_mutex);
723 return rc;
724}
725
726static int
727cifs_lock_add(struct cifsInodeInfo *cinode, __u64 len, __u64 offset,
728 __u8 type, __u16 netfid)
729{
730 struct cifsLockInfo *li;
731
732 li = cifs_lock_init(len, offset, type, netfid);
733 if (!li)
734 return -ENOMEM;
735
d59dad2b
PS
736 mutex_lock(&cinode->lock_mutex);
737 list_add_tail(&li->llist, &cinode->llist);
738 mutex_unlock(&cinode->lock_mutex);
7ee1af76
JA
739 return 0;
740}
741
85160e03
PS
742static int
743cifs_lock_add_if(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
744 __u8 type, __u16 netfid, bool wait)
745{
746 struct cifsLockInfo *lock, *conf_lock;
747 bool exist;
748 int rc = 0;
749
750 lock = cifs_lock_init(length, offset, type, netfid);
751 if (!lock)
752 return -ENOMEM;
753
754try_again:
755 exist = false;
756 mutex_lock(&cinode->lock_mutex);
757
758 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
759 &conf_lock);
760 if (!exist && cinode->can_cache_brlcks) {
761 list_add_tail(&lock->llist, &cinode->llist);
762 mutex_unlock(&cinode->lock_mutex);
763 return rc;
764 }
765
766 if (!exist)
767 rc = 1;
768 else if (!wait)
769 rc = -EACCES;
770 else {
771 list_add_tail(&lock->blist, &conf_lock->blist);
772 mutex_unlock(&cinode->lock_mutex);
773 rc = wait_event_interruptible(lock->block_q,
774 (lock->blist.prev == &lock->blist) &&
775 (lock->blist.next == &lock->blist));
776 if (!rc)
777 goto try_again;
778 else {
779 mutex_lock(&cinode->lock_mutex);
780 list_del_init(&lock->blist);
781 mutex_unlock(&cinode->lock_mutex);
782 }
783 }
784
785 kfree(lock);
786 mutex_unlock(&cinode->lock_mutex);
787 return rc;
788}
789
790static int
791cifs_push_locks(struct cifsFileInfo *cfile)
792{
793 int xid, rc = 0, stored_rc;
794 struct cifsLockInfo *li, *tmp;
795 struct cifs_tcon *tcon;
796 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
797
798 xid = GetXid();
799 tcon = tlink_tcon(cfile->tlink);
800
801 mutex_lock(&cinode->lock_mutex);
802 if (!cinode->can_cache_brlcks) {
803 mutex_unlock(&cinode->lock_mutex);
804 FreeXid(xid);
805 return rc;
806 }
807
808 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
809 stored_rc = CIFSSMBLock(xid, tcon, cfile->netfid,
810 li->pid, li->length, li->offset,
811 0, 1, li->type, 0, 0);
812 if (stored_rc)
813 rc = stored_rc;
814 }
815
816 cinode->can_cache_brlcks = false;
817 mutex_unlock(&cinode->lock_mutex);
818
819 FreeXid(xid);
820 return rc;
821}
822
03776f45
PS
823static void
824cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
825 bool *wait_flag)
1da177e4 826{
03776f45 827 if (flock->fl_flags & FL_POSIX)
b6b38f70 828 cFYI(1, "Posix");
03776f45 829 if (flock->fl_flags & FL_FLOCK)
b6b38f70 830 cFYI(1, "Flock");
03776f45 831 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 832 cFYI(1, "Blocking lock");
03776f45 833 *wait_flag = true;
1da177e4 834 }
03776f45 835 if (flock->fl_flags & FL_ACCESS)
b6b38f70 836 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
837 "not implemented yet");
838 if (flock->fl_flags & FL_LEASE)
b6b38f70 839 cFYI(1, "Lease on file - not implemented yet");
03776f45 840 if (flock->fl_flags &
1da177e4 841 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 842 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 843
03776f45
PS
844 *type = LOCKING_ANDX_LARGE_FILES;
845 if (flock->fl_type == F_WRLCK) {
b6b38f70 846 cFYI(1, "F_WRLCK ");
03776f45
PS
847 *lock = 1;
848 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 849 cFYI(1, "F_UNLCK");
03776f45
PS
850 *unlock = 1;
851 /* Check if unlock includes more than one lock range */
852 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 853 cFYI(1, "F_RDLCK");
03776f45
PS
854 *type |= LOCKING_ANDX_SHARED_LOCK;
855 *lock = 1;
856 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 857 cFYI(1, "F_EXLCK");
03776f45
PS
858 *lock = 1;
859 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 860 cFYI(1, "F_SHLCK");
03776f45
PS
861 *type |= LOCKING_ANDX_SHARED_LOCK;
862 *lock = 1;
1da177e4 863 } else
b6b38f70 864 cFYI(1, "Unknown type of lock");
03776f45 865}
1da177e4 866
03776f45
PS
867static int
868cifs_getlk(struct cifsFileInfo *cfile, struct file_lock *flock, __u8 type,
869 bool wait_flag, bool posix_lck, int xid)
870{
871 int rc = 0;
872 __u64 length = 1 + flock->fl_end - flock->fl_start;
85160e03 873 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
03776f45
PS
874 __u16 netfid = cfile->netfid;
875 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
f05337c6 876
03776f45
PS
877 if (posix_lck) {
878 int posix_lock_type;
879 if (type & LOCKING_ANDX_SHARED_LOCK)
880 posix_lock_type = CIFS_RDLCK;
881 else
882 posix_lock_type = CIFS_WRLCK;
883 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
884 length, flock, posix_lock_type,
885 wait_flag);
886 return rc;
887 }
1da177e4 888
85160e03
PS
889 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
890 flock);
891 if (!rc)
892 return rc;
893
03776f45
PS
894 /* BB we could chain these into one lock request BB */
895 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
896 flock->fl_start, 0, 1, type, 0, 0);
897 if (rc == 0) {
898 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
899 length, flock->fl_start, 1, 0,
900 type, 0, 0);
901 flock->fl_type = F_UNLCK;
902 if (rc != 0)
903 cERROR(1, "Error unlocking previously locked "
904 "range %d during test of lock", rc);
905 rc = 0;
1da177e4
LT
906 return rc;
907 }
7ee1af76 908
03776f45
PS
909 if (type & LOCKING_ANDX_SHARED_LOCK) {
910 flock->fl_type = F_WRLCK;
911 rc = 0;
912 return rc;
7ee1af76
JA
913 }
914
03776f45
PS
915 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
916 flock->fl_start, 0, 1,
917 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
918 if (rc == 0) {
919 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
920 length, flock->fl_start, 1, 0,
921 type | LOCKING_ANDX_SHARED_LOCK,
922 0, 0);
923 flock->fl_type = F_RDLCK;
924 if (rc != 0)
925 cERROR(1, "Error unlocking previously locked "
926 "range %d during test of lock", rc);
927 } else
928 flock->fl_type = F_WRLCK;
929
930 rc = 0;
931 return rc;
932}
933
934static int
935cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
936 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
937{
938 int rc = 0;
939 __u64 length = 1 + flock->fl_end - flock->fl_start;
940 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
941 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
d59dad2b 942 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
03776f45
PS
943 __u16 netfid = cfile->netfid;
944
945 if (posix_lck) {
08547b03 946 int posix_lock_type;
03776f45 947 if (type & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
948 posix_lock_type = CIFS_RDLCK;
949 else
950 posix_lock_type = CIFS_WRLCK;
50c2f753 951
03776f45 952 if (unlock == 1)
beb84dc8 953 posix_lock_type = CIFS_UNLCK;
7ee1af76 954
03776f45
PS
955 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, length,
956 flock, posix_lock_type, wait_flag);
957 goto out;
958 }
7ee1af76 959
03776f45 960 if (lock) {
85160e03
PS
961 rc = cifs_lock_add_if(cinode, flock->fl_start, length,
962 type, netfid, wait_flag);
963 if (rc < 0)
964 return rc;
965 else if (!rc)
966 goto out;
967
03776f45 968 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
85160e03 969 flock->fl_start, 0, 1, type, wait_flag, 0);
03776f45
PS
970 if (rc == 0) {
971 /* For Windows locks we must store them. */
85160e03
PS
972 rc = cifs_lock_add(cinode, length, flock->fl_start,
973 type, netfid);
03776f45
PS
974 }
975 } else if (unlock) {
976 /*
977 * For each stored lock that this unlock overlaps completely,
978 * unlock it.
979 */
980 int stored_rc = 0;
981 struct cifsLockInfo *li, *tmp;
982
d59dad2b
PS
983 mutex_lock(&cinode->lock_mutex);
984 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
03776f45
PS
985 if (flock->fl_start > li->offset ||
986 (flock->fl_start + length) <
987 (li->offset + li->length))
988 continue;
989 if (current->tgid != li->pid)
990 continue;
d59dad2b
PS
991 if (cfile->netfid != li->netfid)
992 continue;
03776f45 993
85160e03
PS
994 if (!cinode->can_cache_brlcks)
995 stored_rc = CIFSSMBLock(xid, tcon, netfid,
996 current->tgid,
997 li->length, li->offset,
998 1, 0, li->type, 0, 0);
999 else
1000 stored_rc = 0;
1001
03776f45
PS
1002 if (stored_rc)
1003 rc = stored_rc;
1004 else {
1005 list_del(&li->llist);
85160e03 1006 cifs_del_lock_waiters(li);
03776f45 1007 kfree(li);
7ee1af76 1008 }
7ee1af76 1009 }
d59dad2b 1010 mutex_unlock(&cinode->lock_mutex);
03776f45
PS
1011 }
1012out:
1013 if (flock->fl_flags & FL_POSIX)
1014 posix_lock_file_wait(file, flock);
1015 return rc;
1016}
1017
1018int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1019{
1020 int rc, xid;
1021 int lock = 0, unlock = 0;
1022 bool wait_flag = false;
1023 bool posix_lck = false;
1024 struct cifs_sb_info *cifs_sb;
1025 struct cifs_tcon *tcon;
1026 struct cifsInodeInfo *cinode;
1027 struct cifsFileInfo *cfile;
1028 __u16 netfid;
1029 __u8 type;
1030
1031 rc = -EACCES;
1032 xid = GetXid();
1033
1034 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1035 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1036 flock->fl_start, flock->fl_end);
1037
1038 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1039
1040 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1041 cfile = (struct cifsFileInfo *)file->private_data;
1042 tcon = tlink_tcon(cfile->tlink);
1043 netfid = cfile->netfid;
1044 cinode = CIFS_I(file->f_path.dentry->d_inode);
1045
1046 if ((tcon->ses->capabilities & CAP_UNIX) &&
1047 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1048 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1049 posix_lck = true;
1050 /*
1051 * BB add code here to normalize offset and length to account for
1052 * negative length which we can not accept over the wire.
1053 */
1054 if (IS_GETLK(cmd)) {
1055 rc = cifs_getlk(cfile, flock, type, wait_flag, posix_lck, xid);
1056 FreeXid(xid);
1057 return rc;
1058 }
1059
1060 if (!lock && !unlock) {
1061 /*
1062 * if no lock or unlock then nothing to do since we do not
1063 * know what it is
1064 */
1065 FreeXid(xid);
1066 return -EOPNOTSUPP;
7ee1af76
JA
1067 }
1068
03776f45
PS
1069 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1070 xid);
1da177e4
LT
1071 FreeXid(xid);
1072 return rc;
1073}
1074
fbec9ab9 1075/* update the file size (if needed) after a write */
72432ffc 1076void
fbec9ab9
JL
1077cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1078 unsigned int bytes_written)
1079{
1080 loff_t end_of_write = offset + bytes_written;
1081
1082 if (end_of_write > cifsi->server_eof)
1083 cifsi->server_eof = end_of_write;
1084}
1085
fa2989f4 1086static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
7da4b49a
JL
1087 const char *write_data, size_t write_size,
1088 loff_t *poffset)
1da177e4
LT
1089{
1090 int rc = 0;
1091 unsigned int bytes_written = 0;
1092 unsigned int total_written;
1093 struct cifs_sb_info *cifs_sb;
96daf2b0 1094 struct cifs_tcon *pTcon;
7749981e 1095 int xid;
7da4b49a
JL
1096 struct dentry *dentry = open_file->dentry;
1097 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1098 struct cifs_io_parms io_parms;
1da177e4 1099
7da4b49a 1100 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1101
b6b38f70 1102 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 1103 *poffset, dentry->d_name.name);
1da177e4 1104
13cfb733 1105 pTcon = tlink_tcon(open_file->tlink);
50c2f753 1106
1da177e4 1107 xid = GetXid();
1da177e4 1108
1da177e4
LT
1109 for (total_written = 0; write_size > total_written;
1110 total_written += bytes_written) {
1111 rc = -EAGAIN;
1112 while (rc == -EAGAIN) {
ca83ce3d
JL
1113 struct kvec iov[2];
1114 unsigned int len;
1115
1da177e4 1116 if (open_file->invalidHandle) {
1da177e4
LT
1117 /* we could deadlock if we called
1118 filemap_fdatawait from here so tell
fb8c4b14 1119 reopen_file not to flush data to
1da177e4 1120 server now */
15886177 1121 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1122 if (rc != 0)
1123 break;
1124 }
ca83ce3d
JL
1125
1126 len = min((size_t)cifs_sb->wsize,
1127 write_size - total_written);
1128 /* iov[0] is reserved for smb header */
1129 iov[1].iov_base = (char *)write_data + total_written;
1130 iov[1].iov_len = len;
fa2989f4
PS
1131 io_parms.netfid = open_file->netfid;
1132 io_parms.pid = pid;
1133 io_parms.tcon = pTcon;
1134 io_parms.offset = *poffset;
1135 io_parms.length = len;
1136 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1137 1, 0);
1da177e4
LT
1138 }
1139 if (rc || (bytes_written == 0)) {
1140 if (total_written)
1141 break;
1142 else {
1143 FreeXid(xid);
1144 return rc;
1145 }
fbec9ab9
JL
1146 } else {
1147 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1148 *poffset += bytes_written;
fbec9ab9 1149 }
1da177e4
LT
1150 }
1151
a4544347 1152 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 1153
7da4b49a
JL
1154 if (total_written > 0) {
1155 spin_lock(&dentry->d_inode->i_lock);
1156 if (*poffset > dentry->d_inode->i_size)
1157 i_size_write(dentry->d_inode, *poffset);
1158 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1159 }
7da4b49a 1160 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
1161 FreeXid(xid);
1162 return total_written;
1163}
1164
6508d904
JL
1165struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1166 bool fsuid_only)
630f3f0c
SF
1167{
1168 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1169 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1170
1171 /* only filter by fsuid on multiuser mounts */
1172 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1173 fsuid_only = false;
630f3f0c 1174
4477288a 1175 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1176 /* we could simply get the first_list_entry since write-only entries
1177 are always at the end of the list but since the first entry might
1178 have a close pending, we go through the whole list */
1179 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1180 if (fsuid_only && open_file->uid != current_fsuid())
1181 continue;
2e396b83 1182 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1183 if (!open_file->invalidHandle) {
1184 /* found a good file */
1185 /* lock it so it will not be closed on us */
6ab409b5 1186 cifsFileInfo_get(open_file);
4477288a 1187 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1188 return open_file;
1189 } /* else might as well continue, and look for
1190 another, or simply have the caller reopen it
1191 again rather than trying to fix this handle */
1192 } else /* write only file */
1193 break; /* write only files are last so must be done */
1194 }
4477288a 1195 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1196 return NULL;
1197}
630f3f0c 1198
6508d904
JL
1199struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1200 bool fsuid_only)
6148a742
SF
1201{
1202 struct cifsFileInfo *open_file;
d3892294 1203 struct cifs_sb_info *cifs_sb;
2846d386 1204 bool any_available = false;
dd99cd80 1205 int rc;
6148a742 1206
60808233
SF
1207 /* Having a null inode here (because mapping->host was set to zero by
1208 the VFS or MM) should not happen but we had reports of on oops (due to
1209 it being zero) during stress testcases so we need to check for it */
1210
fb8c4b14 1211 if (cifs_inode == NULL) {
b6b38f70 1212 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1213 dump_stack();
1214 return NULL;
1215 }
1216
d3892294
JL
1217 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1218
6508d904
JL
1219 /* only filter by fsuid on multiuser mounts */
1220 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1221 fsuid_only = false;
1222
4477288a 1223 spin_lock(&cifs_file_list_lock);
9b22b0b7 1224refind_writable:
6148a742 1225 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1226 if (!any_available && open_file->pid != current->tgid)
1227 continue;
1228 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1229 continue;
2e396b83 1230 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 1231 cifsFileInfo_get(open_file);
9b22b0b7
SF
1232
1233 if (!open_file->invalidHandle) {
1234 /* found a good writable file */
4477288a 1235 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1236 return open_file;
1237 }
8840dee9 1238
4477288a 1239 spin_unlock(&cifs_file_list_lock);
cdff08e7 1240
9b22b0b7 1241 /* Had to unlock since following call can block */
15886177 1242 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1243 if (!rc)
1244 return open_file;
9b22b0b7 1245
cdff08e7 1246 /* if it fails, try another handle if possible */
b6b38f70 1247 cFYI(1, "wp failed on reopen file");
6ab409b5 1248 cifsFileInfo_put(open_file);
8840dee9 1249
cdff08e7
SF
1250 spin_lock(&cifs_file_list_lock);
1251
9b22b0b7
SF
1252 /* else we simply continue to the next entry. Thus
1253 we do not loop on reopen errors. If we
1254 can not reopen the file, for example if we
1255 reconnected to a server with another client
1256 racing to delete or lock the file we would not
1257 make progress if we restarted before the beginning
1258 of the loop here. */
6148a742
SF
1259 }
1260 }
2846d386
JL
1261 /* couldn't find useable FH with same pid, try any available */
1262 if (!any_available) {
1263 any_available = true;
1264 goto refind_writable;
1265 }
4477288a 1266 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1267 return NULL;
1268}
1269
1da177e4
LT
1270static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1271{
1272 struct address_space *mapping = page->mapping;
1273 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1274 char *write_data;
1275 int rc = -EFAULT;
1276 int bytes_written = 0;
1da177e4 1277 struct inode *inode;
6148a742 1278 struct cifsFileInfo *open_file;
1da177e4
LT
1279
1280 if (!mapping || !mapping->host)
1281 return -EFAULT;
1282
1283 inode = page->mapping->host;
1da177e4
LT
1284
1285 offset += (loff_t)from;
1286 write_data = kmap(page);
1287 write_data += from;
1288
1289 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1290 kunmap(page);
1291 return -EIO;
1292 }
1293
1294 /* racing with truncate? */
1295 if (offset > mapping->host->i_size) {
1296 kunmap(page);
1297 return 0; /* don't care */
1298 }
1299
1300 /* check to make sure that we are not extending the file */
1301 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1302 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1303
6508d904 1304 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1305 if (open_file) {
fa2989f4
PS
1306 bytes_written = cifs_write(open_file, open_file->pid,
1307 write_data, to - from, &offset);
6ab409b5 1308 cifsFileInfo_put(open_file);
1da177e4 1309 /* Does mm or vfs already set times? */
6148a742 1310 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1311 if ((bytes_written > 0) && (offset))
6148a742 1312 rc = 0;
bb5a9a04
SF
1313 else if (bytes_written < 0)
1314 rc = bytes_written;
6148a742 1315 } else {
b6b38f70 1316 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1317 rc = -EIO;
1318 }
1319
1320 kunmap(page);
1321 return rc;
1322}
1323
1da177e4 1324static int cifs_writepages(struct address_space *mapping,
37c0eb46 1325 struct writeback_control *wbc)
1da177e4 1326{
c3d17b63
JL
1327 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1328 bool done = false, scanned = false, range_whole = false;
1329 pgoff_t end, index;
1330 struct cifs_writedata *wdata;
37c0eb46 1331 struct page *page;
37c0eb46 1332 int rc = 0;
50c2f753 1333
37c0eb46 1334 /*
c3d17b63 1335 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1336 * one page at a time via cifs_writepage
1337 */
1338 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1339 return generic_writepages(mapping, wbc);
1340
111ebb6e 1341 if (wbc->range_cyclic) {
37c0eb46 1342 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1343 end = -1;
1344 } else {
1345 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1346 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1347 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1348 range_whole = true;
1349 scanned = true;
37c0eb46
SF
1350 }
1351retry:
c3d17b63
JL
1352 while (!done && index <= end) {
1353 unsigned int i, nr_pages, found_pages;
1354 pgoff_t next = 0, tofind;
1355 struct page **pages;
1356
1357 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1358 end - index) + 1;
1359
1360 wdata = cifs_writedata_alloc((unsigned int)tofind);
1361 if (!wdata) {
1362 rc = -ENOMEM;
1363 break;
1364 }
1365
1366 /*
1367 * find_get_pages_tag seems to return a max of 256 on each
1368 * iteration, so we must call it several times in order to
1369 * fill the array or the wsize is effectively limited to
1370 * 256 * PAGE_CACHE_SIZE.
1371 */
1372 found_pages = 0;
1373 pages = wdata->pages;
1374 do {
1375 nr_pages = find_get_pages_tag(mapping, &index,
1376 PAGECACHE_TAG_DIRTY,
1377 tofind, pages);
1378 found_pages += nr_pages;
1379 tofind -= nr_pages;
1380 pages += nr_pages;
1381 } while (nr_pages && tofind && index <= end);
1382
1383 if (found_pages == 0) {
1384 kref_put(&wdata->refcount, cifs_writedata_release);
1385 break;
1386 }
1387
1388 nr_pages = 0;
1389 for (i = 0; i < found_pages; i++) {
1390 page = wdata->pages[i];
37c0eb46
SF
1391 /*
1392 * At this point we hold neither mapping->tree_lock nor
1393 * lock on the page itself: the page may be truncated or
1394 * invalidated (changing page->mapping to NULL), or even
1395 * swizzled back from swapper_space to tmpfs file
1396 * mapping
1397 */
1398
c3d17b63 1399 if (nr_pages == 0)
37c0eb46 1400 lock_page(page);
529ae9aa 1401 else if (!trylock_page(page))
37c0eb46
SF
1402 break;
1403
1404 if (unlikely(page->mapping != mapping)) {
1405 unlock_page(page);
1406 break;
1407 }
1408
111ebb6e 1409 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1410 done = true;
37c0eb46
SF
1411 unlock_page(page);
1412 break;
1413 }
1414
1415 if (next && (page->index != next)) {
1416 /* Not next consecutive page */
1417 unlock_page(page);
1418 break;
1419 }
1420
1421 if (wbc->sync_mode != WB_SYNC_NONE)
1422 wait_on_page_writeback(page);
1423
1424 if (PageWriteback(page) ||
cb876f45 1425 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1426 unlock_page(page);
1427 break;
1428 }
84d2f07e 1429
cb876f45
LT
1430 /*
1431 * This actually clears the dirty bit in the radix tree.
1432 * See cifs_writepage() for more commentary.
1433 */
1434 set_page_writeback(page);
1435
84d2f07e 1436 if (page_offset(page) >= mapping->host->i_size) {
c3d17b63 1437 done = true;
84d2f07e 1438 unlock_page(page);
cb876f45 1439 end_page_writeback(page);
84d2f07e
SF
1440 break;
1441 }
1442
c3d17b63
JL
1443 wdata->pages[i] = page;
1444 next = page->index + 1;
1445 ++nr_pages;
1446 }
37c0eb46 1447
c3d17b63
JL
1448 /* reset index to refind any pages skipped */
1449 if (nr_pages == 0)
1450 index = wdata->pages[0]->index + 1;
84d2f07e 1451
c3d17b63
JL
1452 /* put any pages we aren't going to use */
1453 for (i = nr_pages; i < found_pages; i++) {
1454 page_cache_release(wdata->pages[i]);
1455 wdata->pages[i] = NULL;
1456 }
37c0eb46 1457
c3d17b63
JL
1458 /* nothing to write? */
1459 if (nr_pages == 0) {
1460 kref_put(&wdata->refcount, cifs_writedata_release);
1461 continue;
37c0eb46 1462 }
fbec9ab9 1463
c3d17b63
JL
1464 wdata->sync_mode = wbc->sync_mode;
1465 wdata->nr_pages = nr_pages;
1466 wdata->offset = page_offset(wdata->pages[0]);
941b853d 1467
c3d17b63
JL
1468 do {
1469 if (wdata->cfile != NULL)
1470 cifsFileInfo_put(wdata->cfile);
1471 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1472 false);
1473 if (!wdata->cfile) {
1474 cERROR(1, "No writable handles for inode");
1475 rc = -EBADF;
1476 break;
941b853d 1477 }
c3d17b63
JL
1478 rc = cifs_async_writev(wdata);
1479 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1480
c3d17b63
JL
1481 for (i = 0; i < nr_pages; ++i)
1482 unlock_page(wdata->pages[i]);
f3983c21 1483
c3d17b63
JL
1484 /* send failure -- clean up the mess */
1485 if (rc != 0) {
1486 for (i = 0; i < nr_pages; ++i) {
941b853d 1487 if (rc == -EAGAIN)
c3d17b63
JL
1488 redirty_page_for_writepage(wbc,
1489 wdata->pages[i]);
1490 else
1491 SetPageError(wdata->pages[i]);
1492 end_page_writeback(wdata->pages[i]);
1493 page_cache_release(wdata->pages[i]);
37c0eb46 1494 }
941b853d
JL
1495 if (rc != -EAGAIN)
1496 mapping_set_error(mapping, rc);
c3d17b63
JL
1497 }
1498 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1499
c3d17b63
JL
1500 wbc->nr_to_write -= nr_pages;
1501 if (wbc->nr_to_write <= 0)
1502 done = true;
b066a48c 1503
c3d17b63 1504 index = next;
37c0eb46 1505 }
c3d17b63 1506
37c0eb46
SF
1507 if (!scanned && !done) {
1508 /*
1509 * We hit the last page and there is more work to be done: wrap
1510 * back to the start of the file
1511 */
c3d17b63 1512 scanned = true;
37c0eb46
SF
1513 index = 0;
1514 goto retry;
1515 }
c3d17b63 1516
111ebb6e 1517 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1518 mapping->writeback_index = index;
1519
1da177e4
LT
1520 return rc;
1521}
1da177e4 1522
9ad1506b
PS
1523static int
1524cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1525{
9ad1506b 1526 int rc;
1da177e4
LT
1527 int xid;
1528
1529 xid = GetXid();
1530/* BB add check for wbc flags */
1531 page_cache_get(page);
ad7a2926 1532 if (!PageUptodate(page))
b6b38f70 1533 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1534
1535 /*
1536 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1537 *
1538 * A writepage() implementation always needs to do either this,
1539 * or re-dirty the page with "redirty_page_for_writepage()" in
1540 * the case of a failure.
1541 *
1542 * Just unlocking the page will cause the radix tree tag-bits
1543 * to fail to update with the state of the page correctly.
1544 */
fb8c4b14 1545 set_page_writeback(page);
9ad1506b 1546retry_write:
1da177e4 1547 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1548 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1549 goto retry_write;
1550 else if (rc == -EAGAIN)
1551 redirty_page_for_writepage(wbc, page);
1552 else if (rc != 0)
1553 SetPageError(page);
1554 else
1555 SetPageUptodate(page);
cb876f45
LT
1556 end_page_writeback(page);
1557 page_cache_release(page);
1da177e4
LT
1558 FreeXid(xid);
1559 return rc;
1560}
1561
9ad1506b
PS
1562static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1563{
1564 int rc = cifs_writepage_locked(page, wbc);
1565 unlock_page(page);
1566 return rc;
1567}
1568
d9414774
NP
1569static int cifs_write_end(struct file *file, struct address_space *mapping,
1570 loff_t pos, unsigned len, unsigned copied,
1571 struct page *page, void *fsdata)
1da177e4 1572{
d9414774
NP
1573 int rc;
1574 struct inode *inode = mapping->host;
d4ffff1f
PS
1575 struct cifsFileInfo *cfile = file->private_data;
1576 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1577 __u32 pid;
1578
1579 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1580 pid = cfile->pid;
1581 else
1582 pid = current->tgid;
1da177e4 1583
b6b38f70
JP
1584 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1585 page, pos, copied);
d9414774 1586
a98ee8c1
JL
1587 if (PageChecked(page)) {
1588 if (copied == len)
1589 SetPageUptodate(page);
1590 ClearPageChecked(page);
1591 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1592 SetPageUptodate(page);
ad7a2926 1593
1da177e4 1594 if (!PageUptodate(page)) {
d9414774
NP
1595 char *page_data;
1596 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1597 int xid;
1598
1599 xid = GetXid();
1da177e4
LT
1600 /* this is probably better than directly calling
1601 partialpage_write since in this function the file handle is
1602 known which we might as well leverage */
1603 /* BB check if anything else missing out of ppw
1604 such as updating last write time */
1605 page_data = kmap(page);
d4ffff1f 1606 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 1607 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1608 kunmap(page);
d9414774
NP
1609
1610 FreeXid(xid);
fb8c4b14 1611 } else {
d9414774
NP
1612 rc = copied;
1613 pos += copied;
1da177e4
LT
1614 set_page_dirty(page);
1615 }
1616
d9414774
NP
1617 if (rc > 0) {
1618 spin_lock(&inode->i_lock);
1619 if (pos > inode->i_size)
1620 i_size_write(inode, pos);
1621 spin_unlock(&inode->i_lock);
1622 }
1623
1624 unlock_page(page);
1625 page_cache_release(page);
1626
1da177e4
LT
1627 return rc;
1628}
1629
02c24a82
JB
1630int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1631 int datasync)
1da177e4
LT
1632{
1633 int xid;
1634 int rc = 0;
96daf2b0 1635 struct cifs_tcon *tcon;
c21dfb69 1636 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1637 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 1638 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 1639
02c24a82
JB
1640 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1641 if (rc)
1642 return rc;
1643 mutex_lock(&inode->i_mutex);
1644
1da177e4
LT
1645 xid = GetXid();
1646
b6b38f70 1647 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1648 file->f_path.dentry->d_name.name, datasync);
50c2f753 1649
6feb9891
PS
1650 if (!CIFS_I(inode)->clientCanCacheRead) {
1651 rc = cifs_invalidate_mapping(inode);
1652 if (rc) {
1653 cFYI(1, "rc: %d during invalidate phase", rc);
1654 rc = 0; /* don't care about it in fsync */
1655 }
1656 }
eb4b756b 1657
8be7e6ba
PS
1658 tcon = tlink_tcon(smbfile->tlink);
1659 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1660 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1661
1662 FreeXid(xid);
02c24a82 1663 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
1664 return rc;
1665}
1666
02c24a82 1667int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba
PS
1668{
1669 int xid;
1670 int rc = 0;
96daf2b0 1671 struct cifs_tcon *tcon;
8be7e6ba
PS
1672 struct cifsFileInfo *smbfile = file->private_data;
1673 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
1674 struct inode *inode = file->f_mapping->host;
1675
1676 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1677 if (rc)
1678 return rc;
1679 mutex_lock(&inode->i_mutex);
8be7e6ba
PS
1680
1681 xid = GetXid();
1682
1683 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1684 file->f_path.dentry->d_name.name, datasync);
1685
1686 tcon = tlink_tcon(smbfile->tlink);
1687 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1688 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 1689
1da177e4 1690 FreeXid(xid);
02c24a82 1691 mutex_unlock(&inode->i_mutex);
1da177e4
LT
1692 return rc;
1693}
1694
1da177e4
LT
1695/*
1696 * As file closes, flush all cached write data for this inode checking
1697 * for write behind errors.
1698 */
75e1fcc0 1699int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1700{
fb8c4b14 1701 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1702 int rc = 0;
1703
eb4b756b 1704 if (file->f_mode & FMODE_WRITE)
d3f1322a 1705 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 1706
b6b38f70 1707 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1708
1709 return rc;
1710}
1711
72432ffc
PS
1712static int
1713cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1714{
1715 int rc = 0;
1716 unsigned long i;
1717
1718 for (i = 0; i < num_pages; i++) {
1719 pages[i] = alloc_page(__GFP_HIGHMEM);
1720 if (!pages[i]) {
1721 /*
1722 * save number of pages we have already allocated and
1723 * return with ENOMEM error
1724 */
1725 num_pages = i;
1726 rc = -ENOMEM;
1727 goto error;
1728 }
1729 }
1730
1731 return rc;
1732
1733error:
1734 for (i = 0; i < num_pages; i++)
1735 put_page(pages[i]);
1736 return rc;
1737}
1738
1739static inline
1740size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1741{
1742 size_t num_pages;
1743 size_t clen;
1744
1745 clen = min_t(const size_t, len, wsize);
1746 num_pages = clen / PAGE_CACHE_SIZE;
1747 if (clen % PAGE_CACHE_SIZE)
1748 num_pages++;
1749
1750 if (cur_len)
1751 *cur_len = clen;
1752
1753 return num_pages;
1754}
1755
1756static ssize_t
1757cifs_iovec_write(struct file *file, const struct iovec *iov,
1758 unsigned long nr_segs, loff_t *poffset)
1759{
76429c14
PS
1760 unsigned int written;
1761 unsigned long num_pages, npages, i;
1762 size_t copied, len, cur_len;
1763 ssize_t total_written = 0;
72432ffc
PS
1764 struct kvec *to_send;
1765 struct page **pages;
1766 struct iov_iter it;
1767 struct inode *inode;
1768 struct cifsFileInfo *open_file;
96daf2b0 1769 struct cifs_tcon *pTcon;
72432ffc 1770 struct cifs_sb_info *cifs_sb;
fa2989f4 1771 struct cifs_io_parms io_parms;
72432ffc 1772 int xid, rc;
d4ffff1f 1773 __u32 pid;
72432ffc
PS
1774
1775 len = iov_length(iov, nr_segs);
1776 if (!len)
1777 return 0;
1778
1779 rc = generic_write_checks(file, poffset, &len, 0);
1780 if (rc)
1781 return rc;
1782
1783 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1784 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1785
1786 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1787 if (!pages)
1788 return -ENOMEM;
1789
1790 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1791 if (!to_send) {
1792 kfree(pages);
1793 return -ENOMEM;
1794 }
1795
1796 rc = cifs_write_allocate_pages(pages, num_pages);
1797 if (rc) {
1798 kfree(pages);
1799 kfree(to_send);
1800 return rc;
1801 }
1802
1803 xid = GetXid();
1804 open_file = file->private_data;
d4ffff1f
PS
1805
1806 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1807 pid = open_file->pid;
1808 else
1809 pid = current->tgid;
1810
72432ffc
PS
1811 pTcon = tlink_tcon(open_file->tlink);
1812 inode = file->f_path.dentry->d_inode;
1813
1814 iov_iter_init(&it, iov, nr_segs, len, 0);
1815 npages = num_pages;
1816
1817 do {
1818 size_t save_len = cur_len;
1819 for (i = 0; i < npages; i++) {
1820 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1821 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1822 copied);
1823 cur_len -= copied;
1824 iov_iter_advance(&it, copied);
1825 to_send[i+1].iov_base = kmap(pages[i]);
1826 to_send[i+1].iov_len = copied;
1827 }
1828
1829 cur_len = save_len - cur_len;
1830
1831 do {
1832 if (open_file->invalidHandle) {
1833 rc = cifs_reopen_file(open_file, false);
1834 if (rc != 0)
1835 break;
1836 }
fa2989f4 1837 io_parms.netfid = open_file->netfid;
d4ffff1f 1838 io_parms.pid = pid;
fa2989f4
PS
1839 io_parms.tcon = pTcon;
1840 io_parms.offset = *poffset;
1841 io_parms.length = cur_len;
1842 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1843 npages, 0);
72432ffc
PS
1844 } while (rc == -EAGAIN);
1845
1846 for (i = 0; i < npages; i++)
1847 kunmap(pages[i]);
1848
1849 if (written) {
1850 len -= written;
1851 total_written += written;
1852 cifs_update_eof(CIFS_I(inode), *poffset, written);
1853 *poffset += written;
1854 } else if (rc < 0) {
1855 if (!total_written)
1856 total_written = rc;
1857 break;
1858 }
1859
1860 /* get length and number of kvecs of the next write */
1861 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1862 } while (len > 0);
1863
1864 if (total_written > 0) {
1865 spin_lock(&inode->i_lock);
1866 if (*poffset > inode->i_size)
1867 i_size_write(inode, *poffset);
1868 spin_unlock(&inode->i_lock);
1869 }
1870
1871 cifs_stats_bytes_written(pTcon, total_written);
1872 mark_inode_dirty_sync(inode);
1873
1874 for (i = 0; i < num_pages; i++)
1875 put_page(pages[i]);
1876 kfree(to_send);
1877 kfree(pages);
1878 FreeXid(xid);
1879 return total_written;
1880}
1881
0b81c1c4 1882ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
1883 unsigned long nr_segs, loff_t pos)
1884{
1885 ssize_t written;
1886 struct inode *inode;
1887
1888 inode = iocb->ki_filp->f_path.dentry->d_inode;
1889
1890 /*
1891 * BB - optimize the way when signing is disabled. We can drop this
1892 * extra memory-to-memory copying and use iovec buffers for constructing
1893 * write request.
1894 */
1895
1896 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1897 if (written > 0) {
1898 CIFS_I(inode)->invalid_mapping = true;
1899 iocb->ki_pos = pos;
1900 }
1901
1902 return written;
1903}
1904
1905ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1906 unsigned long nr_segs, loff_t pos)
1907{
1908 struct inode *inode;
1909
1910 inode = iocb->ki_filp->f_path.dentry->d_inode;
1911
1912 if (CIFS_I(inode)->clientCanCacheAll)
1913 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1914
1915 /*
1916 * In strict cache mode we need to write the data to the server exactly
1917 * from the pos to pos+len-1 rather than flush all affected pages
1918 * because it may cause a error with mandatory locks on these pages but
1919 * not on the region from pos to ppos+len-1.
1920 */
1921
1922 return cifs_user_writev(iocb, iov, nr_segs, pos);
1923}
1924
a70307ee
PS
1925static ssize_t
1926cifs_iovec_read(struct file *file, const struct iovec *iov,
1927 unsigned long nr_segs, loff_t *poffset)
1da177e4 1928{
a70307ee
PS
1929 int rc;
1930 int xid;
76429c14
PS
1931 ssize_t total_read;
1932 unsigned int bytes_read = 0;
a70307ee
PS
1933 size_t len, cur_len;
1934 int iov_offset = 0;
1da177e4 1935 struct cifs_sb_info *cifs_sb;
96daf2b0 1936 struct cifs_tcon *pTcon;
1da177e4 1937 struct cifsFileInfo *open_file;
1da177e4 1938 struct smb_com_read_rsp *pSMBr;
d4ffff1f 1939 struct cifs_io_parms io_parms;
a70307ee 1940 char *read_data;
5eba8ab3 1941 unsigned int rsize;
d4ffff1f 1942 __u32 pid;
a70307ee
PS
1943
1944 if (!nr_segs)
1945 return 0;
1946
1947 len = iov_length(iov, nr_segs);
1948 if (!len)
1949 return 0;
1da177e4
LT
1950
1951 xid = GetXid();
e6a00296 1952 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1953
5eba8ab3
JL
1954 /* FIXME: set up handlers for larger reads and/or convert to async */
1955 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
1956
c21dfb69 1957 open_file = file->private_data;
13cfb733 1958 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1959
d4ffff1f
PS
1960 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1961 pid = open_file->pid;
1962 else
1963 pid = current->tgid;
1964
ad7a2926 1965 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1966 cFYI(1, "attempting read on write only file instance");
ad7a2926 1967
a70307ee 1968 for (total_read = 0; total_read < len; total_read += bytes_read) {
5eba8ab3 1969 cur_len = min_t(const size_t, len - total_read, rsize);
1da177e4 1970 rc = -EAGAIN;
a70307ee
PS
1971 read_data = NULL;
1972
1da177e4 1973 while (rc == -EAGAIN) {
ec637e3f 1974 int buf_type = CIFS_NO_BUFFER;
cdff08e7 1975 if (open_file->invalidHandle) {
15886177 1976 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1977 if (rc != 0)
1978 break;
1979 }
d4ffff1f
PS
1980 io_parms.netfid = open_file->netfid;
1981 io_parms.pid = pid;
1982 io_parms.tcon = pTcon;
1983 io_parms.offset = *poffset;
2cebaa58 1984 io_parms.length = cur_len;
d4ffff1f 1985 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
a70307ee
PS
1986 &read_data, &buf_type);
1987 pSMBr = (struct smb_com_read_rsp *)read_data;
1988 if (read_data) {
1989 char *data_offset = read_data + 4 +
1990 le16_to_cpu(pSMBr->DataOffset);
1991 if (memcpy_toiovecend(iov, data_offset,
1992 iov_offset, bytes_read))
93544cc6 1993 rc = -EFAULT;
fb8c4b14 1994 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 1995 cifs_small_buf_release(read_data);
fb8c4b14 1996 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
1997 cifs_buf_release(read_data);
1998 read_data = NULL;
1999 iov_offset += bytes_read;
1da177e4
LT
2000 }
2001 }
a70307ee 2002
1da177e4
LT
2003 if (rc || (bytes_read == 0)) {
2004 if (total_read) {
2005 break;
2006 } else {
2007 FreeXid(xid);
2008 return rc;
2009 }
2010 } else {
a4544347 2011 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
2012 *poffset += bytes_read;
2013 }
2014 }
a70307ee 2015
1da177e4
LT
2016 FreeXid(xid);
2017 return total_read;
2018}
2019
0b81c1c4 2020ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2021 unsigned long nr_segs, loff_t pos)
2022{
2023 ssize_t read;
2024
2025 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2026 if (read > 0)
2027 iocb->ki_pos = pos;
2028
2029 return read;
2030}
2031
2032ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2033 unsigned long nr_segs, loff_t pos)
2034{
2035 struct inode *inode;
2036
2037 inode = iocb->ki_filp->f_path.dentry->d_inode;
2038
2039 if (CIFS_I(inode)->clientCanCacheRead)
2040 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2041
2042 /*
2043 * In strict cache mode we need to read from the server all the time
2044 * if we don't have level II oplock because the server can delay mtime
2045 * change - so we can't make a decision about inode invalidating.
2046 * And we can also fail with pagereading if there are mandatory locks
2047 * on pages affected by this read but not on the region from pos to
2048 * pos+len-1.
2049 */
2050
2051 return cifs_user_readv(iocb, iov, nr_segs, pos);
2052}
1da177e4
LT
2053
2054static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 2055 loff_t *poffset)
1da177e4
LT
2056{
2057 int rc = -EACCES;
2058 unsigned int bytes_read = 0;
2059 unsigned int total_read;
2060 unsigned int current_read_size;
5eba8ab3 2061 unsigned int rsize;
1da177e4 2062 struct cifs_sb_info *cifs_sb;
96daf2b0 2063 struct cifs_tcon *pTcon;
1da177e4
LT
2064 int xid;
2065 char *current_offset;
2066 struct cifsFileInfo *open_file;
d4ffff1f 2067 struct cifs_io_parms io_parms;
ec637e3f 2068 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2069 __u32 pid;
1da177e4
LT
2070
2071 xid = GetXid();
e6a00296 2072 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2073
5eba8ab3
JL
2074 /* FIXME: set up handlers for larger reads and/or convert to async */
2075 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2076
1da177e4 2077 if (file->private_data == NULL) {
0f3bc09e 2078 rc = -EBADF;
1da177e4 2079 FreeXid(xid);
0f3bc09e 2080 return rc;
1da177e4 2081 }
c21dfb69 2082 open_file = file->private_data;
13cfb733 2083 pTcon = tlink_tcon(open_file->tlink);
1da177e4 2084
d4ffff1f
PS
2085 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2086 pid = open_file->pid;
2087 else
2088 pid = current->tgid;
2089
1da177e4 2090 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2091 cFYI(1, "attempting read on write only file instance");
1da177e4 2092
fb8c4b14 2093 for (total_read = 0, current_offset = read_data;
1da177e4
LT
2094 read_size > total_read;
2095 total_read += bytes_read, current_offset += bytes_read) {
5eba8ab3
JL
2096 current_read_size = min_t(uint, read_size - total_read, rsize);
2097
f9f5c817
SF
2098 /* For windows me and 9x we do not want to request more
2099 than it negotiated since it will refuse the read then */
fb8c4b14 2100 if ((pTcon->ses) &&
f9f5c817 2101 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
7748dd6e 2102 current_read_size = min_t(uint, current_read_size,
c974befa 2103 CIFSMaxBufSize);
f9f5c817 2104 }
1da177e4
LT
2105 rc = -EAGAIN;
2106 while (rc == -EAGAIN) {
cdff08e7 2107 if (open_file->invalidHandle) {
15886177 2108 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2109 if (rc != 0)
2110 break;
2111 }
d4ffff1f
PS
2112 io_parms.netfid = open_file->netfid;
2113 io_parms.pid = pid;
2114 io_parms.tcon = pTcon;
2115 io_parms.offset = *poffset;
2116 io_parms.length = current_read_size;
2117 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2118 &current_offset, &buf_type);
1da177e4
LT
2119 }
2120 if (rc || (bytes_read == 0)) {
2121 if (total_read) {
2122 break;
2123 } else {
2124 FreeXid(xid);
2125 return rc;
2126 }
2127 } else {
a4544347 2128 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
2129 *poffset += bytes_read;
2130 }
2131 }
2132 FreeXid(xid);
2133 return total_read;
2134}
2135
ca83ce3d
JL
2136/*
2137 * If the page is mmap'ed into a process' page tables, then we need to make
2138 * sure that it doesn't change while being written back.
2139 */
2140static int
2141cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2142{
2143 struct page *page = vmf->page;
2144
2145 lock_page(page);
2146 return VM_FAULT_LOCKED;
2147}
2148
2149static struct vm_operations_struct cifs_file_vm_ops = {
2150 .fault = filemap_fault,
2151 .page_mkwrite = cifs_page_mkwrite,
2152};
2153
7a6a19b1
PS
2154int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2155{
2156 int rc, xid;
2157 struct inode *inode = file->f_path.dentry->d_inode;
2158
2159 xid = GetXid();
2160
6feb9891
PS
2161 if (!CIFS_I(inode)->clientCanCacheRead) {
2162 rc = cifs_invalidate_mapping(inode);
2163 if (rc)
2164 return rc;
2165 }
7a6a19b1
PS
2166
2167 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2168 if (rc == 0)
2169 vma->vm_ops = &cifs_file_vm_ops;
7a6a19b1
PS
2170 FreeXid(xid);
2171 return rc;
2172}
2173
1da177e4
LT
2174int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2175{
1da177e4
LT
2176 int rc, xid;
2177
2178 xid = GetXid();
abab095d 2179 rc = cifs_revalidate_file(file);
1da177e4 2180 if (rc) {
b6b38f70 2181 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
2182 FreeXid(xid);
2183 return rc;
2184 }
2185 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2186 if (rc == 0)
2187 vma->vm_ops = &cifs_file_vm_ops;
1da177e4
LT
2188 FreeXid(xid);
2189 return rc;
2190}
2191
1da177e4
LT
2192static int cifs_readpages(struct file *file, struct address_space *mapping,
2193 struct list_head *page_list, unsigned num_pages)
2194{
690c5e31
JL
2195 int rc;
2196 struct list_head tmplist;
2197 struct cifsFileInfo *open_file = file->private_data;
2198 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2199 unsigned int rsize = cifs_sb->rsize;
2200 pid_t pid;
1da177e4 2201
690c5e31
JL
2202 /*
2203 * Give up immediately if rsize is too small to read an entire page.
2204 * The VFS will fall back to readpage. We should never reach this
2205 * point however since we set ra_pages to 0 when the rsize is smaller
2206 * than a cache page.
2207 */
2208 if (unlikely(rsize < PAGE_CACHE_SIZE))
2209 return 0;
bfa0d75a 2210
56698236
SJ
2211 /*
2212 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2213 * immediately if the cookie is negative
2214 */
2215 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2216 &num_pages);
2217 if (rc == 0)
690c5e31 2218 return rc;
56698236 2219
d4ffff1f
PS
2220 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2221 pid = open_file->pid;
2222 else
2223 pid = current->tgid;
2224
690c5e31
JL
2225 rc = 0;
2226 INIT_LIST_HEAD(&tmplist);
1da177e4 2227
690c5e31
JL
2228 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2229 mapping, num_pages);
2230
2231 /*
2232 * Start with the page at end of list and move it to private
2233 * list. Do the same with any following pages until we hit
2234 * the rsize limit, hit an index discontinuity, or run out of
2235 * pages. Issue the async read and then start the loop again
2236 * until the list is empty.
2237 *
2238 * Note that list order is important. The page_list is in
2239 * the order of declining indexes. When we put the pages in
2240 * the rdata->pages, then we want them in increasing order.
2241 */
2242 while (!list_empty(page_list)) {
2243 unsigned int bytes = PAGE_CACHE_SIZE;
2244 unsigned int expected_index;
2245 unsigned int nr_pages = 1;
2246 loff_t offset;
2247 struct page *page, *tpage;
2248 struct cifs_readdata *rdata;
1da177e4
LT
2249
2250 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
2251
2252 /*
2253 * Lock the page and put it in the cache. Since no one else
2254 * should have access to this page, we're safe to simply set
2255 * PG_locked without checking it first.
2256 */
2257 __set_page_locked(page);
2258 rc = add_to_page_cache_locked(page, mapping,
2259 page->index, GFP_KERNEL);
2260
2261 /* give up if we can't stick it in the cache */
2262 if (rc) {
2263 __clear_page_locked(page);
2264 break;
2265 }
2266
2267 /* move first page to the tmplist */
1da177e4 2268 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 2269 list_move_tail(&page->lru, &tmplist);
1da177e4 2270
690c5e31
JL
2271 /* now try and add more pages onto the request */
2272 expected_index = page->index + 1;
2273 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2274 /* discontinuity ? */
2275 if (page->index != expected_index)
fb8c4b14 2276 break;
690c5e31
JL
2277
2278 /* would this page push the read over the rsize? */
2279 if (bytes + PAGE_CACHE_SIZE > rsize)
2280 break;
2281
2282 __set_page_locked(page);
2283 if (add_to_page_cache_locked(page, mapping,
2284 page->index, GFP_KERNEL)) {
2285 __clear_page_locked(page);
2286 break;
2287 }
2288 list_move_tail(&page->lru, &tmplist);
2289 bytes += PAGE_CACHE_SIZE;
2290 expected_index++;
2291 nr_pages++;
1da177e4 2292 }
690c5e31
JL
2293
2294 rdata = cifs_readdata_alloc(nr_pages);
2295 if (!rdata) {
2296 /* best to give up if we're out of mem */
2297 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2298 list_del(&page->lru);
2299 lru_cache_add_file(page);
2300 unlock_page(page);
2301 page_cache_release(page);
2302 }
2303 rc = -ENOMEM;
2304 break;
2305 }
2306
2307 spin_lock(&cifs_file_list_lock);
2308 cifsFileInfo_get(open_file);
2309 spin_unlock(&cifs_file_list_lock);
2310 rdata->cfile = open_file;
2311 rdata->mapping = mapping;
2312 rdata->offset = offset;
2313 rdata->bytes = bytes;
2314 rdata->pid = pid;
2315 list_splice_init(&tmplist, &rdata->pages);
2316
2317 do {
cdff08e7 2318 if (open_file->invalidHandle) {
15886177 2319 rc = cifs_reopen_file(open_file, true);
1da177e4 2320 if (rc != 0)
690c5e31 2321 continue;
1da177e4 2322 }
690c5e31
JL
2323 rc = cifs_async_readv(rdata);
2324 } while (rc == -EAGAIN);
1da177e4 2325
690c5e31
JL
2326 if (rc != 0) {
2327 list_for_each_entry_safe(page, tpage, &rdata->pages,
2328 lru) {
2329 list_del(&page->lru);
2330 lru_cache_add_file(page);
2331 unlock_page(page);
2332 page_cache_release(page);
1da177e4 2333 }
690c5e31 2334 cifs_readdata_free(rdata);
1da177e4
LT
2335 break;
2336 }
1da177e4
LT
2337 }
2338
1da177e4
LT
2339 return rc;
2340}
2341
2342static int cifs_readpage_worker(struct file *file, struct page *page,
2343 loff_t *poffset)
2344{
2345 char *read_data;
2346 int rc;
2347
56698236
SJ
2348 /* Is the page cached? */
2349 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2350 if (rc == 0)
2351 goto read_complete;
2352
1da177e4
LT
2353 page_cache_get(page);
2354 read_data = kmap(page);
2355 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2356
1da177e4 2357 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2358
1da177e4
LT
2359 if (rc < 0)
2360 goto io_error;
2361 else
b6b38f70 2362 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2363
e6a00296
JJS
2364 file->f_path.dentry->d_inode->i_atime =
2365 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2366
1da177e4
LT
2367 if (PAGE_CACHE_SIZE > rc)
2368 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2369
2370 flush_dcache_page(page);
2371 SetPageUptodate(page);
9dc06558
SJ
2372
2373 /* send this page to the cache */
2374 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2375
1da177e4 2376 rc = 0;
fb8c4b14 2377
1da177e4 2378io_error:
fb8c4b14 2379 kunmap(page);
1da177e4 2380 page_cache_release(page);
56698236
SJ
2381
2382read_complete:
1da177e4
LT
2383 return rc;
2384}
2385
2386static int cifs_readpage(struct file *file, struct page *page)
2387{
2388 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2389 int rc = -EACCES;
2390 int xid;
2391
2392 xid = GetXid();
2393
2394 if (file->private_data == NULL) {
0f3bc09e 2395 rc = -EBADF;
1da177e4 2396 FreeXid(xid);
0f3bc09e 2397 return rc;
1da177e4
LT
2398 }
2399
b6b38f70
JP
2400 cFYI(1, "readpage %p at offset %d 0x%x\n",
2401 page, (int)offset, (int)offset);
1da177e4
LT
2402
2403 rc = cifs_readpage_worker(file, page, &offset);
2404
2405 unlock_page(page);
2406
2407 FreeXid(xid);
2408 return rc;
2409}
2410
a403a0a3
SF
2411static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2412{
2413 struct cifsFileInfo *open_file;
2414
4477288a 2415 spin_lock(&cifs_file_list_lock);
a403a0a3 2416 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2417 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2418 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2419 return 1;
2420 }
2421 }
4477288a 2422 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2423 return 0;
2424}
2425
1da177e4
LT
2426/* We do not want to update the file size from server for inodes
2427 open for write - to avoid races with writepage extending
2428 the file - in the future we could consider allowing
fb8c4b14 2429 refreshing the inode only on increases in the file size
1da177e4
LT
2430 but this is tricky to do without racing with writebehind
2431 page caching in the current Linux kernel design */
4b18f2a9 2432bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2433{
a403a0a3 2434 if (!cifsInode)
4b18f2a9 2435 return true;
50c2f753 2436
a403a0a3
SF
2437 if (is_inode_writable(cifsInode)) {
2438 /* This inode is open for write at least once */
c32a0b68
SF
2439 struct cifs_sb_info *cifs_sb;
2440
c32a0b68 2441 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2442 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2443 /* since no page cache to corrupt on directio
c32a0b68 2444 we can change size safely */
4b18f2a9 2445 return true;
c32a0b68
SF
2446 }
2447
fb8c4b14 2448 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2449 return true;
7ba52631 2450
4b18f2a9 2451 return false;
23e7dd7d 2452 } else
4b18f2a9 2453 return true;
1da177e4
LT
2454}
2455
d9414774
NP
2456static int cifs_write_begin(struct file *file, struct address_space *mapping,
2457 loff_t pos, unsigned len, unsigned flags,
2458 struct page **pagep, void **fsdata)
1da177e4 2459{
d9414774
NP
2460 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2461 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2462 loff_t page_start = pos & PAGE_MASK;
2463 loff_t i_size;
2464 struct page *page;
2465 int rc = 0;
d9414774 2466
b6b38f70 2467 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2468
54566b2c 2469 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2470 if (!page) {
2471 rc = -ENOMEM;
2472 goto out;
2473 }
8a236264 2474
a98ee8c1
JL
2475 if (PageUptodate(page))
2476 goto out;
8a236264 2477
a98ee8c1
JL
2478 /*
2479 * If we write a full page it will be up to date, no need to read from
2480 * the server. If the write is short, we'll end up doing a sync write
2481 * instead.
2482 */
2483 if (len == PAGE_CACHE_SIZE)
2484 goto out;
8a236264 2485
a98ee8c1
JL
2486 /*
2487 * optimize away the read when we have an oplock, and we're not
2488 * expecting to use any of the data we'd be reading in. That
2489 * is, when the page lies beyond the EOF, or straddles the EOF
2490 * and the write will cover all of the existing data.
2491 */
2492 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2493 i_size = i_size_read(mapping->host);
2494 if (page_start >= i_size ||
2495 (offset == 0 && (pos + len) >= i_size)) {
2496 zero_user_segments(page, 0, offset,
2497 offset + len,
2498 PAGE_CACHE_SIZE);
2499 /*
2500 * PageChecked means that the parts of the page
2501 * to which we're not writing are considered up
2502 * to date. Once the data is copied to the
2503 * page, it can be set uptodate.
2504 */
2505 SetPageChecked(page);
2506 goto out;
2507 }
2508 }
d9414774 2509
a98ee8c1
JL
2510 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2511 /*
2512 * might as well read a page, it is fast enough. If we get
2513 * an error, we don't need to return it. cifs_write_end will
2514 * do a sync write instead since PG_uptodate isn't set.
2515 */
2516 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2517 } else {
2518 /* we could try using another file handle if there is one -
2519 but how would we lock it to prevent close of that handle
2520 racing with this read? In any case
d9414774 2521 this will be written out by write_end so is fine */
1da177e4 2522 }
a98ee8c1
JL
2523out:
2524 *pagep = page;
2525 return rc;
1da177e4
LT
2526}
2527
85f2d6b4
SJ
2528static int cifs_release_page(struct page *page, gfp_t gfp)
2529{
2530 if (PagePrivate(page))
2531 return 0;
2532
2533 return cifs_fscache_release_page(page, gfp);
2534}
2535
2536static void cifs_invalidate_page(struct page *page, unsigned long offset)
2537{
2538 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2539
2540 if (offset == 0)
2541 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2542}
2543
9ad1506b
PS
2544static int cifs_launder_page(struct page *page)
2545{
2546 int rc = 0;
2547 loff_t range_start = page_offset(page);
2548 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2549 struct writeback_control wbc = {
2550 .sync_mode = WB_SYNC_ALL,
2551 .nr_to_write = 0,
2552 .range_start = range_start,
2553 .range_end = range_end,
2554 };
2555
2556 cFYI(1, "Launder page: %p", page);
2557
2558 if (clear_page_dirty_for_io(page))
2559 rc = cifs_writepage_locked(page, &wbc);
2560
2561 cifs_fscache_invalidate_page(page, page->mapping->host);
2562 return rc;
2563}
2564
9b646972 2565void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2566{
2567 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2568 oplock_break);
a5e18bc3 2569 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2570 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 2571 int rc = 0;
3bc303c2
JL
2572
2573 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2574 if (cinode->clientCanCacheRead)
8737c930 2575 break_lease(inode, O_RDONLY);
d54ff732 2576 else
8737c930 2577 break_lease(inode, O_WRONLY);
3bc303c2
JL
2578 rc = filemap_fdatawrite(inode->i_mapping);
2579 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
2580 rc = filemap_fdatawait(inode->i_mapping);
2581 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
2582 invalidate_remote_inode(inode);
2583 }
b6b38f70 2584 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2585 }
2586
85160e03
PS
2587 rc = cifs_push_locks(cfile);
2588 if (rc)
2589 cERROR(1, "Push locks rc = %d", rc);
2590
3bc303c2
JL
2591 /*
2592 * releasing stale oplock after recent reconnect of smb session using
2593 * a now incorrect file handle is not a data integrity issue but do
2594 * not bother sending an oplock release if session to server still is
2595 * disconnected since oplock already released by the server
2596 */
cdff08e7 2597 if (!cfile->oplock_break_cancelled) {
03776f45
PS
2598 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2599 current->tgid, 0, 0, 0, 0,
2600 LOCKING_ANDX_OPLOCK_RELEASE, false,
12fed00d 2601 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 2602 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2603 }
3bc303c2
JL
2604}
2605
f5e54d6e 2606const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2607 .readpage = cifs_readpage,
2608 .readpages = cifs_readpages,
2609 .writepage = cifs_writepage,
37c0eb46 2610 .writepages = cifs_writepages,
d9414774
NP
2611 .write_begin = cifs_write_begin,
2612 .write_end = cifs_write_end,
1da177e4 2613 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2614 .releasepage = cifs_release_page,
2615 .invalidatepage = cifs_invalidate_page,
9ad1506b 2616 .launder_page = cifs_launder_page,
1da177e4 2617};
273d81d6
DK
2618
2619/*
2620 * cifs_readpages requires the server to support a buffer large enough to
2621 * contain the header plus one complete page of data. Otherwise, we need
2622 * to leave cifs_readpages out of the address space operations.
2623 */
f5e54d6e 2624const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2625 .readpage = cifs_readpage,
2626 .writepage = cifs_writepage,
2627 .writepages = cifs_writepages,
d9414774
NP
2628 .write_begin = cifs_write_begin,
2629 .write_end = cifs_write_end,
273d81d6 2630 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2631 .releasepage = cifs_release_page,
2632 .invalidatepage = cifs_invalidate_page,
9ad1506b 2633 .launder_page = cifs_launder_page,
273d81d6 2634};
This page took 0.685525 seconds and 5 git commands to generate.