Merge branch 'dev' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
[deliverable/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
96daf2b0 172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
eeb910a6
PS
173 __u16 *pnetfid, int xid)
174{
175 int rc;
176 int desiredAccess;
177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6
PS
179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
3d3ea8e6
SP
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
eeb910a6
PS
218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
3d3ea8e6 220 desiredAccess, create_options, pnetfid, poplock, buf,
eeb910a6
PS
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
15ecb436
JL
244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
5f6dbc9e 257 pCifsFile->count = 1;
15ecb436
JL
258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
15ecb436
JL
264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
15ecb436
JL
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267
4477288a 268 spin_lock(&cifs_file_list_lock);
15ecb436
JL
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273 else
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 275 spin_unlock(&cifs_file_list_lock);
15ecb436 276
c6723628 277 cifs_set_oplock_level(pCifsInode, oplock);
85160e03 278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
15ecb436
JL
279
280 file->private_data = pCifsFile;
281 return pCifsFile;
282}
283
85160e03
PS
284static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
285
cdff08e7
SF
286/*
287 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
cdff08e7 290 */
b33879aa
JL
291void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
292{
e66673e3 293 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
297 struct cifsLockInfo *li, *tmp;
298
299 spin_lock(&cifs_file_list_lock);
5f6dbc9e 300 if (--cifs_file->count > 0) {
cdff08e7
SF
301 spin_unlock(&cifs_file_list_lock);
302 return;
303 }
304
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
308
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
4f8ba8a0
PS
312
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
318
c6723628 319 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
320 }
321 spin_unlock(&cifs_file_list_lock);
322
ad635942
JL
323 cancel_work_sync(&cifs_file->oplock_break);
324
cdff08e7
SF
325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
326 int xid, rc;
327
328 xid = GetXid();
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
330 FreeXid(xid);
331 }
332
333 /* Delete any outstanding lock records. We'll lose them when the file
334 * is closed anyway.
335 */
d59dad2b
PS
336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
cdff08e7 340 list_del(&li->llist);
85160e03 341 cifs_del_lock_waiters(li);
cdff08e7 342 kfree(li);
b33879aa 343 }
d59dad2b 344 mutex_unlock(&cifsi->lock_mutex);
cdff08e7
SF
345
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
348 kfree(cifs_file);
b33879aa
JL
349}
350
1da177e4
LT
351int cifs_open(struct inode *inode, struct file *file)
352{
353 int rc = -EACCES;
590a3fe0
JL
354 int xid;
355 __u32 oplock;
1da177e4 356 struct cifs_sb_info *cifs_sb;
96daf2b0 357 struct cifs_tcon *tcon;
7ffec372 358 struct tcon_link *tlink;
6ca9f3ba 359 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 360 char *full_path = NULL;
7e12eddb 361 bool posix_open_ok = false;
1da177e4 362 __u16 netfid;
1da177e4
LT
363
364 xid = GetXid();
365
366 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
367 tlink = cifs_sb_tlink(cifs_sb);
368 if (IS_ERR(tlink)) {
369 FreeXid(xid);
370 return PTR_ERR(tlink);
371 }
372 tcon = tlink_tcon(tlink);
1da177e4 373
e6a00296 374 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 375 if (full_path == NULL) {
0f3bc09e 376 rc = -ENOMEM;
232341ba 377 goto out;
1da177e4
LT
378 }
379
b6b38f70
JP
380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
276a74a4 382
e7504734 383 if (enable_oplocks)
276a74a4
SF
384 oplock = REQ_OPLOCK;
385 else
386 oplock = 0;
387
64cc2c63
SF
388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 392 /* can not refresh inode info since size could be stale */
2422f676 393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 394 cifs_sb->mnt_file_mode /* ignored */,
608712fe 395 file->f_flags, &oplock, &netfid, xid);
276a74a4 396 if (rc == 0) {
b6b38f70 397 cFYI(1, "posix open succeeded");
7e12eddb 398 posix_open_ok = true;
64cc2c63
SF
399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
b6b38f70 401 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
b6b38f70 406 tcon->ses->serverNOS);
64cc2c63 407 tcon->broken_posix_open = true;
276a74a4
SF
408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
410 goto out;
64cc2c63
SF
411 /* else fallthrough to retry open the old way on network i/o
412 or DFS errors */
276a74a4
SF
413 }
414
7e12eddb
PS
415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
418 if (rc)
419 goto out;
420 }
47c78b7f 421
abfe1eed 422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 423 if (pCifsFile == NULL) {
7e12eddb 424 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
425 rc = -ENOMEM;
426 goto out;
427 }
1da177e4 428
9451a9a5
SJ
429 cifs_fscache_set_inode_cookie(inode, file);
430
7e12eddb 431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
7e12eddb
PS
434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
436 .uid = NO_CHANGE_64,
437 .gid = NO_CHANGE_64,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
441 .device = 0,
442 };
d44a9fe2
JL
443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
444 pCifsFile->pid);
1da177e4
LT
445 }
446
447out:
1da177e4
LT
448 kfree(full_path);
449 FreeXid(xid);
7ffec372 450 cifs_put_tlink(tlink);
1da177e4
LT
451 return rc;
452}
453
0418726b 454/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
455/* to server was lost */
456static int cifs_relock_file(struct cifsFileInfo *cifsFile)
457{
458 int rc = 0;
459
460/* BB list all locks open on this file and relock */
461
462 return rc;
463}
464
15886177 465static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
466{
467 int rc = -EACCES;
590a3fe0
JL
468 int xid;
469 __u32 oplock;
1da177e4 470 struct cifs_sb_info *cifs_sb;
96daf2b0 471 struct cifs_tcon *tcon;
1da177e4 472 struct cifsInodeInfo *pCifsInode;
fb8c4b14 473 struct inode *inode;
1da177e4
LT
474 char *full_path = NULL;
475 int desiredAccess;
476 int disposition = FILE_OPEN;
3d3ea8e6 477 int create_options = CREATE_NOT_DIR;
1da177e4
LT
478 __u16 netfid;
479
1da177e4 480 xid = GetXid();
f0a71eb8 481 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 482 if (!pCifsFile->invalidHandle) {
f0a71eb8 483 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 484 rc = 0;
1da177e4 485 FreeXid(xid);
0f3bc09e 486 return rc;
1da177e4
LT
487 }
488
15886177 489 inode = pCifsFile->dentry->d_inode;
1da177e4 490 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 491 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 492
1da177e4
LT
493/* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
15886177 497 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 498 if (full_path == NULL) {
3a9f462f 499 rc = -ENOMEM;
f0a71eb8 500 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 501 FreeXid(xid);
3a9f462f 502 return rc;
1da177e4
LT
503 }
504
b6b38f70 505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 506 inode, pCifsFile->f_flags, full_path);
1da177e4 507
e7504734 508 if (enable_oplocks)
1da177e4
LT
509 oplock = REQ_OPLOCK;
510 else
4b18f2a9 511 oplock = 0;
1da177e4 512
7fc8f4e9
SF
513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
516
517 /*
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
520 */
15886177
JL
521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 523
2422f676 524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
7fc8f4e9 527 if (rc == 0) {
b6b38f70 528 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
529 goto reopen_success;
530 }
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
533 }
534
15886177 535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 536
3d3ea8e6
SP
537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
539
1da177e4 540 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
1da177e4
LT
543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
545
7fc8f4e9 546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
3d3ea8e6 547 create_options, &netfid, &oplock, NULL,
fb8c4b14 548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 549 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 550 if (rc) {
f0a71eb8 551 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
15886177
JL
554 goto reopen_error_exit;
555 }
556
7fc8f4e9 557reopen_success:
15886177
JL
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
562
563 if (can_flush) {
564 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 565 mapping_set_error(inode->i_mapping, rc);
15886177 566
15886177
JL
567 if (tcon->unix_ext)
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
570 else
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
573 xid, NULL);
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
579 info */
e66673e3 580
c6723628 581 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 582
15886177
JL
583 cifs_relock_file(pCifsFile);
584
585reopen_error_exit:
1da177e4
LT
586 kfree(full_path);
587 FreeXid(xid);
588 return rc;
589}
590
591int cifs_close(struct inode *inode, struct file *file)
592{
77970693
JL
593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
596 }
7ee1af76 597
cdff08e7
SF
598 /* return code from the ->release op is always ignored */
599 return 0;
1da177e4
LT
600}
601
602int cifs_closedir(struct inode *inode, struct file *file)
603{
604 int rc = 0;
605 int xid;
c21dfb69 606 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
607 char *ptmp;
608
b6b38f70 609 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
610
611 xid = GetXid();
612
613 if (pCFileStruct) {
96daf2b0 614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 615
b6b38f70 616 cFYI(1, "Freeing private data in close dir");
4477288a 617 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
4477288a 621 spin_unlock(&cifs_file_list_lock);
1da177e4 622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
623 cFYI(1, "Closing uncompleted readdir with rc %d",
624 rc);
1da177e4
LT
625 /* not much we can do if it fails anyway, ignore rc */
626 rc = 0;
ddb4cbfc 627 } else
4477288a 628 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
630 if (ptmp) {
b6b38f70 631 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 633 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
634 cifs_small_buf_release(ptmp);
635 else
636 cifs_buf_release(ptmp);
1da177e4 637 }
13cfb733 638 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
639 kfree(file->private_data);
640 file->private_data = NULL;
641 }
642 /* BB can we lock the filestruct while this is going on? */
643 FreeXid(xid);
644 return rc;
645}
646
85160e03 647static struct cifsLockInfo *
a88b4707 648cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
7ee1af76 649{
a88b4707 650 struct cifsLockInfo *lock =
fb8c4b14 651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
652 if (!lock)
653 return lock;
654 lock->offset = offset;
655 lock->length = length;
656 lock->type = type;
657 lock->netfid = netfid;
658 lock->pid = current->tgid;
659 INIT_LIST_HEAD(&lock->blist);
660 init_waitqueue_head(&lock->block_q);
661 return lock;
85160e03
PS
662}
663
664static void
665cifs_del_lock_waiters(struct cifsLockInfo *lock)
666{
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
671 }
672}
673
674static bool
161ebf9f 675__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
85160e03
PS
676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
678{
679 struct cifsLockInfo *li, *tmp;
680
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
684 continue;
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
687 type == li->type))
688 continue;
689 else {
690 *conf_lock = li;
691 return true;
692 }
693 }
694 return false;
695}
696
161ebf9f
PS
697static bool
698cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
699 struct cifsLockInfo **conf_lock)
700{
701 return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
702 lock->type, lock->netfid, conf_lock);
703}
704
85160e03
PS
705static int
706cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
707 __u8 type, __u16 netfid, struct file_lock *flock)
708{
709 int rc = 0;
710 struct cifsLockInfo *conf_lock;
711 bool exist;
712
713 mutex_lock(&cinode->lock_mutex);
714
161ebf9f
PS
715 exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
716 &conf_lock);
85160e03
PS
717 if (exist) {
718 flock->fl_start = conf_lock->offset;
719 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
720 flock->fl_pid = conf_lock->pid;
721 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
722 flock->fl_type = F_RDLCK;
723 else
724 flock->fl_type = F_WRLCK;
725 } else if (!cinode->can_cache_brlcks)
726 rc = 1;
727 else
728 flock->fl_type = F_UNLCK;
729
730 mutex_unlock(&cinode->lock_mutex);
731 return rc;
732}
733
161ebf9f
PS
734static void
735cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
85160e03 736{
d59dad2b 737 mutex_lock(&cinode->lock_mutex);
161ebf9f 738 list_add_tail(&lock->llist, &cinode->llist);
d59dad2b 739 mutex_unlock(&cinode->lock_mutex);
7ee1af76
JA
740}
741
85160e03 742static int
161ebf9f
PS
743cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
744 bool wait)
85160e03 745{
161ebf9f 746 struct cifsLockInfo *conf_lock;
85160e03
PS
747 bool exist;
748 int rc = 0;
749
85160e03
PS
750try_again:
751 exist = false;
752 mutex_lock(&cinode->lock_mutex);
753
161ebf9f 754 exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
85160e03
PS
755 if (!exist && cinode->can_cache_brlcks) {
756 list_add_tail(&lock->llist, &cinode->llist);
757 mutex_unlock(&cinode->lock_mutex);
758 return rc;
759 }
760
761 if (!exist)
762 rc = 1;
763 else if (!wait)
764 rc = -EACCES;
765 else {
766 list_add_tail(&lock->blist, &conf_lock->blist);
767 mutex_unlock(&cinode->lock_mutex);
768 rc = wait_event_interruptible(lock->block_q,
769 (lock->blist.prev == &lock->blist) &&
770 (lock->blist.next == &lock->blist));
771 if (!rc)
772 goto try_again;
a88b4707
PS
773 mutex_lock(&cinode->lock_mutex);
774 list_del_init(&lock->blist);
85160e03
PS
775 }
776
85160e03
PS
777 mutex_unlock(&cinode->lock_mutex);
778 return rc;
779}
780
781static int
4f6bcec9
PS
782cifs_posix_lock_test(struct file *file, struct file_lock *flock)
783{
784 int rc = 0;
785 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
786 unsigned char saved_type = flock->fl_type;
787
50792760
PS
788 if ((flock->fl_flags & FL_POSIX) == 0)
789 return 1;
790
4f6bcec9
PS
791 mutex_lock(&cinode->lock_mutex);
792 posix_test_lock(file, flock);
793
794 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
795 flock->fl_type = saved_type;
796 rc = 1;
797 }
798
799 mutex_unlock(&cinode->lock_mutex);
800 return rc;
801}
802
803static int
804cifs_posix_lock_set(struct file *file, struct file_lock *flock)
805{
806 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
50792760
PS
807 int rc = 1;
808
809 if ((flock->fl_flags & FL_POSIX) == 0)
810 return rc;
4f6bcec9
PS
811
812 mutex_lock(&cinode->lock_mutex);
813 if (!cinode->can_cache_brlcks) {
814 mutex_unlock(&cinode->lock_mutex);
50792760 815 return rc;
4f6bcec9
PS
816 }
817 rc = posix_lock_file_wait(file, flock);
818 mutex_unlock(&cinode->lock_mutex);
819 return rc;
820}
821
822static int
823cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03
PS
824{
825 int xid, rc = 0, stored_rc;
826 struct cifsLockInfo *li, *tmp;
827 struct cifs_tcon *tcon;
828 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
32b9aaf1
PS
829 unsigned int num, max_num;
830 LOCKING_ANDX_RANGE *buf, *cur;
831 int types[] = {LOCKING_ANDX_LARGE_FILES,
832 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
833 int i;
85160e03
PS
834
835 xid = GetXid();
836 tcon = tlink_tcon(cfile->tlink);
837
838 mutex_lock(&cinode->lock_mutex);
839 if (!cinode->can_cache_brlcks) {
840 mutex_unlock(&cinode->lock_mutex);
841 FreeXid(xid);
842 return rc;
843 }
844
32b9aaf1
PS
845 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
846 sizeof(LOCKING_ANDX_RANGE);
847 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
848 if (!buf) {
849 mutex_unlock(&cinode->lock_mutex);
850 FreeXid(xid);
851 return rc;
852 }
853
854 for (i = 0; i < 2; i++) {
855 cur = buf;
856 num = 0;
857 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
858 if (li->type != types[i])
859 continue;
860 cur->Pid = cpu_to_le16(li->pid);
861 cur->LengthLow = cpu_to_le32((u32)li->length);
862 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
863 cur->OffsetLow = cpu_to_le32((u32)li->offset);
864 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
865 if (++num == max_num) {
866 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
867 li->type, 0, num, buf);
868 if (stored_rc)
869 rc = stored_rc;
870 cur = buf;
871 num = 0;
872 } else
873 cur++;
874 }
875
876 if (num) {
877 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
878 types[i], 0, num, buf);
879 if (stored_rc)
880 rc = stored_rc;
881 }
85160e03
PS
882 }
883
884 cinode->can_cache_brlcks = false;
885 mutex_unlock(&cinode->lock_mutex);
886
32b9aaf1 887 kfree(buf);
85160e03
PS
888 FreeXid(xid);
889 return rc;
890}
891
4f6bcec9
PS
892/* copied from fs/locks.c with a name change */
893#define cifs_for_each_lock(inode, lockp) \
894 for (lockp = &inode->i_flock; *lockp != NULL; \
895 lockp = &(*lockp)->fl_next)
896
897static int
898cifs_push_posix_locks(struct cifsFileInfo *cfile)
899{
900 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
901 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
902 struct file_lock *flock, **before;
903 struct cifsLockInfo *lck, *tmp;
904 int rc = 0, xid, type;
905 __u64 length;
906 struct list_head locks_to_send;
907
908 xid = GetXid();
909
910 mutex_lock(&cinode->lock_mutex);
911 if (!cinode->can_cache_brlcks) {
912 mutex_unlock(&cinode->lock_mutex);
913 FreeXid(xid);
914 return rc;
915 }
916
917 INIT_LIST_HEAD(&locks_to_send);
918
919 lock_flocks();
920 cifs_for_each_lock(cfile->dentry->d_inode, before) {
921 flock = *before;
922 length = 1 + flock->fl_end - flock->fl_start;
923 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
924 type = CIFS_RDLCK;
925 else
926 type = CIFS_WRLCK;
927
a88b4707 928 lck = cifs_lock_init(flock->fl_start, length, type,
4f6bcec9
PS
929 cfile->netfid);
930 if (!lck) {
931 rc = -ENOMEM;
932 goto send_locks;
933 }
934 lck->pid = flock->fl_pid;
935
936 list_add_tail(&lck->llist, &locks_to_send);
937 }
938
939send_locks:
940 unlock_flocks();
941
942 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
943 struct file_lock tmp_lock;
944 int stored_rc;
945
946 tmp_lock.fl_start = lck->offset;
947 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
948 0, lck->length, &tmp_lock,
949 lck->type, 0);
950 if (stored_rc)
951 rc = stored_rc;
952 list_del(&lck->llist);
953 kfree(lck);
954 }
955
956 cinode->can_cache_brlcks = false;
957 mutex_unlock(&cinode->lock_mutex);
958
959 FreeXid(xid);
960 return rc;
961}
962
963static int
964cifs_push_locks(struct cifsFileInfo *cfile)
965{
966 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
967 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
968
969 if ((tcon->ses->capabilities & CAP_UNIX) &&
970 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
971 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
972 return cifs_push_posix_locks(cfile);
973
974 return cifs_push_mandatory_locks(cfile);
975}
976
03776f45
PS
977static void
978cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
979 bool *wait_flag)
1da177e4 980{
03776f45 981 if (flock->fl_flags & FL_POSIX)
b6b38f70 982 cFYI(1, "Posix");
03776f45 983 if (flock->fl_flags & FL_FLOCK)
b6b38f70 984 cFYI(1, "Flock");
03776f45 985 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 986 cFYI(1, "Blocking lock");
03776f45 987 *wait_flag = true;
1da177e4 988 }
03776f45 989 if (flock->fl_flags & FL_ACCESS)
b6b38f70 990 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
991 "not implemented yet");
992 if (flock->fl_flags & FL_LEASE)
b6b38f70 993 cFYI(1, "Lease on file - not implemented yet");
03776f45 994 if (flock->fl_flags &
1da177e4 995 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 996 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 997
03776f45
PS
998 *type = LOCKING_ANDX_LARGE_FILES;
999 if (flock->fl_type == F_WRLCK) {
b6b38f70 1000 cFYI(1, "F_WRLCK ");
03776f45
PS
1001 *lock = 1;
1002 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 1003 cFYI(1, "F_UNLCK");
03776f45
PS
1004 *unlock = 1;
1005 /* Check if unlock includes more than one lock range */
1006 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 1007 cFYI(1, "F_RDLCK");
03776f45
PS
1008 *type |= LOCKING_ANDX_SHARED_LOCK;
1009 *lock = 1;
1010 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 1011 cFYI(1, "F_EXLCK");
03776f45
PS
1012 *lock = 1;
1013 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 1014 cFYI(1, "F_SHLCK");
03776f45
PS
1015 *type |= LOCKING_ANDX_SHARED_LOCK;
1016 *lock = 1;
1da177e4 1017 } else
b6b38f70 1018 cFYI(1, "Unknown type of lock");
03776f45 1019}
1da177e4 1020
03776f45 1021static int
4f6bcec9 1022cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
03776f45
PS
1023 bool wait_flag, bool posix_lck, int xid)
1024{
1025 int rc = 0;
1026 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1027 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1028 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
85160e03 1029 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
03776f45 1030 __u16 netfid = cfile->netfid;
f05337c6 1031
03776f45
PS
1032 if (posix_lck) {
1033 int posix_lock_type;
4f6bcec9
PS
1034
1035 rc = cifs_posix_lock_test(file, flock);
1036 if (!rc)
1037 return rc;
1038
03776f45
PS
1039 if (type & LOCKING_ANDX_SHARED_LOCK)
1040 posix_lock_type = CIFS_RDLCK;
1041 else
1042 posix_lock_type = CIFS_WRLCK;
4f6bcec9
PS
1043 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1044 1 /* get */, length, flock,
1045 posix_lock_type, wait_flag);
03776f45
PS
1046 return rc;
1047 }
1da177e4 1048
85160e03
PS
1049 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1050 flock);
1051 if (!rc)
1052 return rc;
1053
03776f45
PS
1054 /* BB we could chain these into one lock request BB */
1055 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1056 flock->fl_start, 0, 1, type, 0, 0);
1057 if (rc == 0) {
1058 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1059 length, flock->fl_start, 1, 0,
1060 type, 0, 0);
1061 flock->fl_type = F_UNLCK;
1062 if (rc != 0)
1063 cERROR(1, "Error unlocking previously locked "
1064 "range %d during test of lock", rc);
a88b4707 1065 return 0;
1da177e4 1066 }
7ee1af76 1067
03776f45
PS
1068 if (type & LOCKING_ANDX_SHARED_LOCK) {
1069 flock->fl_type = F_WRLCK;
a88b4707 1070 return 0;
7ee1af76
JA
1071 }
1072
03776f45
PS
1073 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1074 flock->fl_start, 0, 1,
1075 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1076 if (rc == 0) {
1077 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1078 length, flock->fl_start, 1, 0,
1079 type | LOCKING_ANDX_SHARED_LOCK,
1080 0, 0);
1081 flock->fl_type = F_RDLCK;
1082 if (rc != 0)
1083 cERROR(1, "Error unlocking previously locked "
1084 "range %d during test of lock", rc);
1085 } else
1086 flock->fl_type = F_WRLCK;
1087
a88b4707 1088 return 0;
03776f45
PS
1089}
1090
9ee305b7
PS
1091static void
1092cifs_move_llist(struct list_head *source, struct list_head *dest)
1093{
1094 struct list_head *li, *tmp;
1095 list_for_each_safe(li, tmp, source)
1096 list_move(li, dest);
1097}
1098
1099static void
1100cifs_free_llist(struct list_head *llist)
1101{
1102 struct cifsLockInfo *li, *tmp;
1103 list_for_each_entry_safe(li, tmp, llist, llist) {
1104 cifs_del_lock_waiters(li);
1105 list_del(&li->llist);
1106 kfree(li);
1107 }
1108}
1109
1110static int
1111cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1112{
1113 int rc = 0, stored_rc;
1114 int types[] = {LOCKING_ANDX_LARGE_FILES,
1115 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1116 unsigned int i;
1117 unsigned int max_num, num;
1118 LOCKING_ANDX_RANGE *buf, *cur;
1119 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1120 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1121 struct cifsLockInfo *li, *tmp;
1122 __u64 length = 1 + flock->fl_end - flock->fl_start;
1123 struct list_head tmp_llist;
1124
1125 INIT_LIST_HEAD(&tmp_llist);
1126
1127 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1128 sizeof(LOCKING_ANDX_RANGE);
1129 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1130 if (!buf)
1131 return -ENOMEM;
1132
1133 mutex_lock(&cinode->lock_mutex);
1134 for (i = 0; i < 2; i++) {
1135 cur = buf;
1136 num = 0;
1137 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1138 if (flock->fl_start > li->offset ||
1139 (flock->fl_start + length) <
1140 (li->offset + li->length))
1141 continue;
1142 if (current->tgid != li->pid)
1143 continue;
1144 if (cfile->netfid != li->netfid)
1145 continue;
1146 if (types[i] != li->type)
1147 continue;
1148 if (!cinode->can_cache_brlcks) {
1149 cur->Pid = cpu_to_le16(li->pid);
1150 cur->LengthLow = cpu_to_le32((u32)li->length);
1151 cur->LengthHigh =
1152 cpu_to_le32((u32)(li->length>>32));
1153 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1154 cur->OffsetHigh =
1155 cpu_to_le32((u32)(li->offset>>32));
1156 /*
1157 * We need to save a lock here to let us add
1158 * it again to the inode list if the unlock
1159 * range request fails on the server.
1160 */
1161 list_move(&li->llist, &tmp_llist);
1162 if (++num == max_num) {
1163 stored_rc = cifs_lockv(xid, tcon,
1164 cfile->netfid,
1165 li->type, num,
1166 0, buf);
1167 if (stored_rc) {
1168 /*
1169 * We failed on the unlock range
1170 * request - add all locks from
1171 * the tmp list to the head of
1172 * the inode list.
1173 */
1174 cifs_move_llist(&tmp_llist,
1175 &cinode->llist);
1176 rc = stored_rc;
1177 } else
1178 /*
1179 * The unlock range request
1180 * succeed - free the tmp list.
1181 */
1182 cifs_free_llist(&tmp_llist);
1183 cur = buf;
1184 num = 0;
1185 } else
1186 cur++;
1187 } else {
1188 /*
1189 * We can cache brlock requests - simply remove
1190 * a lock from the inode list.
1191 */
1192 list_del(&li->llist);
1193 cifs_del_lock_waiters(li);
1194 kfree(li);
1195 }
1196 }
1197 if (num) {
1198 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1199 types[i], num, 0, buf);
1200 if (stored_rc) {
1201 cifs_move_llist(&tmp_llist, &cinode->llist);
1202 rc = stored_rc;
1203 } else
1204 cifs_free_llist(&tmp_llist);
1205 }
1206 }
1207
1208 mutex_unlock(&cinode->lock_mutex);
1209 kfree(buf);
1210 return rc;
1211}
1212
03776f45
PS
1213static int
1214cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1215 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1216{
1217 int rc = 0;
1218 __u64 length = 1 + flock->fl_end - flock->fl_start;
1219 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1220 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
d59dad2b 1221 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
03776f45
PS
1222 __u16 netfid = cfile->netfid;
1223
1224 if (posix_lck) {
08547b03 1225 int posix_lock_type;
4f6bcec9
PS
1226
1227 rc = cifs_posix_lock_set(file, flock);
1228 if (!rc || rc < 0)
1229 return rc;
1230
03776f45 1231 if (type & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
1232 posix_lock_type = CIFS_RDLCK;
1233 else
1234 posix_lock_type = CIFS_WRLCK;
50c2f753 1235
03776f45 1236 if (unlock == 1)
beb84dc8 1237 posix_lock_type = CIFS_UNLCK;
7ee1af76 1238
4f6bcec9
PS
1239 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1240 0 /* set */, length, flock,
1241 posix_lock_type, wait_flag);
03776f45
PS
1242 goto out;
1243 }
7ee1af76 1244
03776f45 1245 if (lock) {
161ebf9f
PS
1246 struct cifsLockInfo *lock;
1247
a88b4707 1248 lock = cifs_lock_init(flock->fl_start, length, type, netfid);
161ebf9f
PS
1249 if (!lock)
1250 return -ENOMEM;
1251
1252 rc = cifs_lock_add_if(cinode, lock, wait_flag);
85160e03 1253 if (rc < 0)
161ebf9f
PS
1254 kfree(lock);
1255 if (rc <= 0)
85160e03
PS
1256 goto out;
1257
03776f45 1258 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
85160e03 1259 flock->fl_start, 0, 1, type, wait_flag, 0);
161ebf9f
PS
1260 if (rc) {
1261 kfree(lock);
1262 goto out;
03776f45 1263 }
161ebf9f
PS
1264
1265 cifs_lock_add(cinode, lock);
9ee305b7
PS
1266 } else if (unlock)
1267 rc = cifs_unlock_range(cfile, flock, xid);
03776f45 1268
03776f45
PS
1269out:
1270 if (flock->fl_flags & FL_POSIX)
1271 posix_lock_file_wait(file, flock);
1272 return rc;
1273}
1274
1275int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1276{
1277 int rc, xid;
1278 int lock = 0, unlock = 0;
1279 bool wait_flag = false;
1280 bool posix_lck = false;
1281 struct cifs_sb_info *cifs_sb;
1282 struct cifs_tcon *tcon;
1283 struct cifsInodeInfo *cinode;
1284 struct cifsFileInfo *cfile;
1285 __u16 netfid;
1286 __u8 type;
1287
1288 rc = -EACCES;
1289 xid = GetXid();
1290
1291 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1292 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1293 flock->fl_start, flock->fl_end);
1294
1295 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1296
1297 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1298 cfile = (struct cifsFileInfo *)file->private_data;
1299 tcon = tlink_tcon(cfile->tlink);
1300 netfid = cfile->netfid;
1301 cinode = CIFS_I(file->f_path.dentry->d_inode);
1302
1303 if ((tcon->ses->capabilities & CAP_UNIX) &&
1304 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1305 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1306 posix_lck = true;
1307 /*
1308 * BB add code here to normalize offset and length to account for
1309 * negative length which we can not accept over the wire.
1310 */
1311 if (IS_GETLK(cmd)) {
4f6bcec9 1312 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
03776f45
PS
1313 FreeXid(xid);
1314 return rc;
1315 }
1316
1317 if (!lock && !unlock) {
1318 /*
1319 * if no lock or unlock then nothing to do since we do not
1320 * know what it is
1321 */
1322 FreeXid(xid);
1323 return -EOPNOTSUPP;
7ee1af76
JA
1324 }
1325
03776f45
PS
1326 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1327 xid);
1da177e4
LT
1328 FreeXid(xid);
1329 return rc;
1330}
1331
fbec9ab9 1332/* update the file size (if needed) after a write */
72432ffc 1333void
fbec9ab9
JL
1334cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1335 unsigned int bytes_written)
1336{
1337 loff_t end_of_write = offset + bytes_written;
1338
1339 if (end_of_write > cifsi->server_eof)
1340 cifsi->server_eof = end_of_write;
1341}
1342
fa2989f4 1343static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
7da4b49a
JL
1344 const char *write_data, size_t write_size,
1345 loff_t *poffset)
1da177e4
LT
1346{
1347 int rc = 0;
1348 unsigned int bytes_written = 0;
1349 unsigned int total_written;
1350 struct cifs_sb_info *cifs_sb;
96daf2b0 1351 struct cifs_tcon *pTcon;
7749981e 1352 int xid;
7da4b49a
JL
1353 struct dentry *dentry = open_file->dentry;
1354 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1355 struct cifs_io_parms io_parms;
1da177e4 1356
7da4b49a 1357 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1358
b6b38f70 1359 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 1360 *poffset, dentry->d_name.name);
1da177e4 1361
13cfb733 1362 pTcon = tlink_tcon(open_file->tlink);
50c2f753 1363
1da177e4 1364 xid = GetXid();
1da177e4 1365
1da177e4
LT
1366 for (total_written = 0; write_size > total_written;
1367 total_written += bytes_written) {
1368 rc = -EAGAIN;
1369 while (rc == -EAGAIN) {
ca83ce3d
JL
1370 struct kvec iov[2];
1371 unsigned int len;
1372
1da177e4 1373 if (open_file->invalidHandle) {
1da177e4
LT
1374 /* we could deadlock if we called
1375 filemap_fdatawait from here so tell
fb8c4b14 1376 reopen_file not to flush data to
1da177e4 1377 server now */
15886177 1378 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1379 if (rc != 0)
1380 break;
1381 }
ca83ce3d
JL
1382
1383 len = min((size_t)cifs_sb->wsize,
1384 write_size - total_written);
1385 /* iov[0] is reserved for smb header */
1386 iov[1].iov_base = (char *)write_data + total_written;
1387 iov[1].iov_len = len;
fa2989f4
PS
1388 io_parms.netfid = open_file->netfid;
1389 io_parms.pid = pid;
1390 io_parms.tcon = pTcon;
1391 io_parms.offset = *poffset;
1392 io_parms.length = len;
1393 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1394 1, 0);
1da177e4
LT
1395 }
1396 if (rc || (bytes_written == 0)) {
1397 if (total_written)
1398 break;
1399 else {
1400 FreeXid(xid);
1401 return rc;
1402 }
fbec9ab9
JL
1403 } else {
1404 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1405 *poffset += bytes_written;
fbec9ab9 1406 }
1da177e4
LT
1407 }
1408
a4544347 1409 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 1410
7da4b49a
JL
1411 if (total_written > 0) {
1412 spin_lock(&dentry->d_inode->i_lock);
1413 if (*poffset > dentry->d_inode->i_size)
1414 i_size_write(dentry->d_inode, *poffset);
1415 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1416 }
7da4b49a 1417 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
1418 FreeXid(xid);
1419 return total_written;
1420}
1421
6508d904
JL
1422struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1423 bool fsuid_only)
630f3f0c
SF
1424{
1425 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1426 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1427
1428 /* only filter by fsuid on multiuser mounts */
1429 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1430 fsuid_only = false;
630f3f0c 1431
4477288a 1432 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1433 /* we could simply get the first_list_entry since write-only entries
1434 are always at the end of the list but since the first entry might
1435 have a close pending, we go through the whole list */
1436 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1437 if (fsuid_only && open_file->uid != current_fsuid())
1438 continue;
2e396b83 1439 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1440 if (!open_file->invalidHandle) {
1441 /* found a good file */
1442 /* lock it so it will not be closed on us */
6ab409b5 1443 cifsFileInfo_get(open_file);
4477288a 1444 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1445 return open_file;
1446 } /* else might as well continue, and look for
1447 another, or simply have the caller reopen it
1448 again rather than trying to fix this handle */
1449 } else /* write only file */
1450 break; /* write only files are last so must be done */
1451 }
4477288a 1452 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1453 return NULL;
1454}
630f3f0c 1455
6508d904
JL
1456struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1457 bool fsuid_only)
6148a742
SF
1458{
1459 struct cifsFileInfo *open_file;
d3892294 1460 struct cifs_sb_info *cifs_sb;
2846d386 1461 bool any_available = false;
dd99cd80 1462 int rc;
6148a742 1463
60808233
SF
1464 /* Having a null inode here (because mapping->host was set to zero by
1465 the VFS or MM) should not happen but we had reports of on oops (due to
1466 it being zero) during stress testcases so we need to check for it */
1467
fb8c4b14 1468 if (cifs_inode == NULL) {
b6b38f70 1469 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1470 dump_stack();
1471 return NULL;
1472 }
1473
d3892294
JL
1474 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1475
6508d904
JL
1476 /* only filter by fsuid on multiuser mounts */
1477 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1478 fsuid_only = false;
1479
4477288a 1480 spin_lock(&cifs_file_list_lock);
9b22b0b7 1481refind_writable:
6148a742 1482 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1483 if (!any_available && open_file->pid != current->tgid)
1484 continue;
1485 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1486 continue;
2e396b83 1487 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 1488 cifsFileInfo_get(open_file);
9b22b0b7
SF
1489
1490 if (!open_file->invalidHandle) {
1491 /* found a good writable file */
4477288a 1492 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1493 return open_file;
1494 }
8840dee9 1495
4477288a 1496 spin_unlock(&cifs_file_list_lock);
cdff08e7 1497
9b22b0b7 1498 /* Had to unlock since following call can block */
15886177 1499 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1500 if (!rc)
1501 return open_file;
9b22b0b7 1502
cdff08e7 1503 /* if it fails, try another handle if possible */
b6b38f70 1504 cFYI(1, "wp failed on reopen file");
6ab409b5 1505 cifsFileInfo_put(open_file);
8840dee9 1506
cdff08e7
SF
1507 spin_lock(&cifs_file_list_lock);
1508
9b22b0b7
SF
1509 /* else we simply continue to the next entry. Thus
1510 we do not loop on reopen errors. If we
1511 can not reopen the file, for example if we
1512 reconnected to a server with another client
1513 racing to delete or lock the file we would not
1514 make progress if we restarted before the beginning
1515 of the loop here. */
6148a742
SF
1516 }
1517 }
2846d386
JL
1518 /* couldn't find useable FH with same pid, try any available */
1519 if (!any_available) {
1520 any_available = true;
1521 goto refind_writable;
1522 }
4477288a 1523 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1524 return NULL;
1525}
1526
1da177e4
LT
1527static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1528{
1529 struct address_space *mapping = page->mapping;
1530 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1531 char *write_data;
1532 int rc = -EFAULT;
1533 int bytes_written = 0;
1da177e4 1534 struct inode *inode;
6148a742 1535 struct cifsFileInfo *open_file;
1da177e4
LT
1536
1537 if (!mapping || !mapping->host)
1538 return -EFAULT;
1539
1540 inode = page->mapping->host;
1da177e4
LT
1541
1542 offset += (loff_t)from;
1543 write_data = kmap(page);
1544 write_data += from;
1545
1546 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1547 kunmap(page);
1548 return -EIO;
1549 }
1550
1551 /* racing with truncate? */
1552 if (offset > mapping->host->i_size) {
1553 kunmap(page);
1554 return 0; /* don't care */
1555 }
1556
1557 /* check to make sure that we are not extending the file */
1558 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1559 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1560
6508d904 1561 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1562 if (open_file) {
fa2989f4
PS
1563 bytes_written = cifs_write(open_file, open_file->pid,
1564 write_data, to - from, &offset);
6ab409b5 1565 cifsFileInfo_put(open_file);
1da177e4 1566 /* Does mm or vfs already set times? */
6148a742 1567 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1568 if ((bytes_written > 0) && (offset))
6148a742 1569 rc = 0;
bb5a9a04
SF
1570 else if (bytes_written < 0)
1571 rc = bytes_written;
6148a742 1572 } else {
b6b38f70 1573 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1574 rc = -EIO;
1575 }
1576
1577 kunmap(page);
1578 return rc;
1579}
1580
1da177e4 1581static int cifs_writepages(struct address_space *mapping,
37c0eb46 1582 struct writeback_control *wbc)
1da177e4 1583{
c3d17b63
JL
1584 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1585 bool done = false, scanned = false, range_whole = false;
1586 pgoff_t end, index;
1587 struct cifs_writedata *wdata;
37c0eb46 1588 struct page *page;
37c0eb46 1589 int rc = 0;
50c2f753 1590
37c0eb46 1591 /*
c3d17b63 1592 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1593 * one page at a time via cifs_writepage
1594 */
1595 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1596 return generic_writepages(mapping, wbc);
1597
111ebb6e 1598 if (wbc->range_cyclic) {
37c0eb46 1599 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1600 end = -1;
1601 } else {
1602 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1603 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1604 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1605 range_whole = true;
1606 scanned = true;
37c0eb46
SF
1607 }
1608retry:
c3d17b63
JL
1609 while (!done && index <= end) {
1610 unsigned int i, nr_pages, found_pages;
1611 pgoff_t next = 0, tofind;
1612 struct page **pages;
1613
1614 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1615 end - index) + 1;
1616
1617 wdata = cifs_writedata_alloc((unsigned int)tofind);
1618 if (!wdata) {
1619 rc = -ENOMEM;
1620 break;
1621 }
1622
1623 /*
1624 * find_get_pages_tag seems to return a max of 256 on each
1625 * iteration, so we must call it several times in order to
1626 * fill the array or the wsize is effectively limited to
1627 * 256 * PAGE_CACHE_SIZE.
1628 */
1629 found_pages = 0;
1630 pages = wdata->pages;
1631 do {
1632 nr_pages = find_get_pages_tag(mapping, &index,
1633 PAGECACHE_TAG_DIRTY,
1634 tofind, pages);
1635 found_pages += nr_pages;
1636 tofind -= nr_pages;
1637 pages += nr_pages;
1638 } while (nr_pages && tofind && index <= end);
1639
1640 if (found_pages == 0) {
1641 kref_put(&wdata->refcount, cifs_writedata_release);
1642 break;
1643 }
1644
1645 nr_pages = 0;
1646 for (i = 0; i < found_pages; i++) {
1647 page = wdata->pages[i];
37c0eb46
SF
1648 /*
1649 * At this point we hold neither mapping->tree_lock nor
1650 * lock on the page itself: the page may be truncated or
1651 * invalidated (changing page->mapping to NULL), or even
1652 * swizzled back from swapper_space to tmpfs file
1653 * mapping
1654 */
1655
c3d17b63 1656 if (nr_pages == 0)
37c0eb46 1657 lock_page(page);
529ae9aa 1658 else if (!trylock_page(page))
37c0eb46
SF
1659 break;
1660
1661 if (unlikely(page->mapping != mapping)) {
1662 unlock_page(page);
1663 break;
1664 }
1665
111ebb6e 1666 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1667 done = true;
37c0eb46
SF
1668 unlock_page(page);
1669 break;
1670 }
1671
1672 if (next && (page->index != next)) {
1673 /* Not next consecutive page */
1674 unlock_page(page);
1675 break;
1676 }
1677
1678 if (wbc->sync_mode != WB_SYNC_NONE)
1679 wait_on_page_writeback(page);
1680
1681 if (PageWriteback(page) ||
cb876f45 1682 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1683 unlock_page(page);
1684 break;
1685 }
84d2f07e 1686
cb876f45
LT
1687 /*
1688 * This actually clears the dirty bit in the radix tree.
1689 * See cifs_writepage() for more commentary.
1690 */
1691 set_page_writeback(page);
1692
84d2f07e 1693 if (page_offset(page) >= mapping->host->i_size) {
c3d17b63 1694 done = true;
84d2f07e 1695 unlock_page(page);
cb876f45 1696 end_page_writeback(page);
84d2f07e
SF
1697 break;
1698 }
1699
c3d17b63
JL
1700 wdata->pages[i] = page;
1701 next = page->index + 1;
1702 ++nr_pages;
1703 }
37c0eb46 1704
c3d17b63
JL
1705 /* reset index to refind any pages skipped */
1706 if (nr_pages == 0)
1707 index = wdata->pages[0]->index + 1;
84d2f07e 1708
c3d17b63
JL
1709 /* put any pages we aren't going to use */
1710 for (i = nr_pages; i < found_pages; i++) {
1711 page_cache_release(wdata->pages[i]);
1712 wdata->pages[i] = NULL;
1713 }
37c0eb46 1714
c3d17b63
JL
1715 /* nothing to write? */
1716 if (nr_pages == 0) {
1717 kref_put(&wdata->refcount, cifs_writedata_release);
1718 continue;
37c0eb46 1719 }
fbec9ab9 1720
c3d17b63
JL
1721 wdata->sync_mode = wbc->sync_mode;
1722 wdata->nr_pages = nr_pages;
1723 wdata->offset = page_offset(wdata->pages[0]);
941b853d 1724
c3d17b63
JL
1725 do {
1726 if (wdata->cfile != NULL)
1727 cifsFileInfo_put(wdata->cfile);
1728 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1729 false);
1730 if (!wdata->cfile) {
1731 cERROR(1, "No writable handles for inode");
1732 rc = -EBADF;
1733 break;
941b853d 1734 }
c3d17b63
JL
1735 rc = cifs_async_writev(wdata);
1736 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1737
c3d17b63
JL
1738 for (i = 0; i < nr_pages; ++i)
1739 unlock_page(wdata->pages[i]);
f3983c21 1740
c3d17b63
JL
1741 /* send failure -- clean up the mess */
1742 if (rc != 0) {
1743 for (i = 0; i < nr_pages; ++i) {
941b853d 1744 if (rc == -EAGAIN)
c3d17b63
JL
1745 redirty_page_for_writepage(wbc,
1746 wdata->pages[i]);
1747 else
1748 SetPageError(wdata->pages[i]);
1749 end_page_writeback(wdata->pages[i]);
1750 page_cache_release(wdata->pages[i]);
37c0eb46 1751 }
941b853d
JL
1752 if (rc != -EAGAIN)
1753 mapping_set_error(mapping, rc);
c3d17b63
JL
1754 }
1755 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1756
c3d17b63
JL
1757 wbc->nr_to_write -= nr_pages;
1758 if (wbc->nr_to_write <= 0)
1759 done = true;
b066a48c 1760
c3d17b63 1761 index = next;
37c0eb46 1762 }
c3d17b63 1763
37c0eb46
SF
1764 if (!scanned && !done) {
1765 /*
1766 * We hit the last page and there is more work to be done: wrap
1767 * back to the start of the file
1768 */
c3d17b63 1769 scanned = true;
37c0eb46
SF
1770 index = 0;
1771 goto retry;
1772 }
c3d17b63 1773
111ebb6e 1774 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1775 mapping->writeback_index = index;
1776
1da177e4
LT
1777 return rc;
1778}
1da177e4 1779
9ad1506b
PS
1780static int
1781cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1782{
9ad1506b 1783 int rc;
1da177e4
LT
1784 int xid;
1785
1786 xid = GetXid();
1787/* BB add check for wbc flags */
1788 page_cache_get(page);
ad7a2926 1789 if (!PageUptodate(page))
b6b38f70 1790 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1791
1792 /*
1793 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1794 *
1795 * A writepage() implementation always needs to do either this,
1796 * or re-dirty the page with "redirty_page_for_writepage()" in
1797 * the case of a failure.
1798 *
1799 * Just unlocking the page will cause the radix tree tag-bits
1800 * to fail to update with the state of the page correctly.
1801 */
fb8c4b14 1802 set_page_writeback(page);
9ad1506b 1803retry_write:
1da177e4 1804 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1805 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1806 goto retry_write;
1807 else if (rc == -EAGAIN)
1808 redirty_page_for_writepage(wbc, page);
1809 else if (rc != 0)
1810 SetPageError(page);
1811 else
1812 SetPageUptodate(page);
cb876f45
LT
1813 end_page_writeback(page);
1814 page_cache_release(page);
1da177e4
LT
1815 FreeXid(xid);
1816 return rc;
1817}
1818
9ad1506b
PS
1819static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1820{
1821 int rc = cifs_writepage_locked(page, wbc);
1822 unlock_page(page);
1823 return rc;
1824}
1825
d9414774
NP
1826static int cifs_write_end(struct file *file, struct address_space *mapping,
1827 loff_t pos, unsigned len, unsigned copied,
1828 struct page *page, void *fsdata)
1da177e4 1829{
d9414774
NP
1830 int rc;
1831 struct inode *inode = mapping->host;
d4ffff1f
PS
1832 struct cifsFileInfo *cfile = file->private_data;
1833 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1834 __u32 pid;
1835
1836 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1837 pid = cfile->pid;
1838 else
1839 pid = current->tgid;
1da177e4 1840
b6b38f70
JP
1841 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1842 page, pos, copied);
d9414774 1843
a98ee8c1
JL
1844 if (PageChecked(page)) {
1845 if (copied == len)
1846 SetPageUptodate(page);
1847 ClearPageChecked(page);
1848 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1849 SetPageUptodate(page);
ad7a2926 1850
1da177e4 1851 if (!PageUptodate(page)) {
d9414774
NP
1852 char *page_data;
1853 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1854 int xid;
1855
1856 xid = GetXid();
1da177e4
LT
1857 /* this is probably better than directly calling
1858 partialpage_write since in this function the file handle is
1859 known which we might as well leverage */
1860 /* BB check if anything else missing out of ppw
1861 such as updating last write time */
1862 page_data = kmap(page);
d4ffff1f 1863 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 1864 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1865 kunmap(page);
d9414774
NP
1866
1867 FreeXid(xid);
fb8c4b14 1868 } else {
d9414774
NP
1869 rc = copied;
1870 pos += copied;
1da177e4
LT
1871 set_page_dirty(page);
1872 }
1873
d9414774
NP
1874 if (rc > 0) {
1875 spin_lock(&inode->i_lock);
1876 if (pos > inode->i_size)
1877 i_size_write(inode, pos);
1878 spin_unlock(&inode->i_lock);
1879 }
1880
1881 unlock_page(page);
1882 page_cache_release(page);
1883
1da177e4
LT
1884 return rc;
1885}
1886
02c24a82
JB
1887int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1888 int datasync)
1da177e4
LT
1889{
1890 int xid;
1891 int rc = 0;
96daf2b0 1892 struct cifs_tcon *tcon;
c21dfb69 1893 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1894 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 1895 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 1896
02c24a82
JB
1897 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1898 if (rc)
1899 return rc;
1900 mutex_lock(&inode->i_mutex);
1901
1da177e4
LT
1902 xid = GetXid();
1903
b6b38f70 1904 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1905 file->f_path.dentry->d_name.name, datasync);
50c2f753 1906
6feb9891
PS
1907 if (!CIFS_I(inode)->clientCanCacheRead) {
1908 rc = cifs_invalidate_mapping(inode);
1909 if (rc) {
1910 cFYI(1, "rc: %d during invalidate phase", rc);
1911 rc = 0; /* don't care about it in fsync */
1912 }
1913 }
eb4b756b 1914
8be7e6ba
PS
1915 tcon = tlink_tcon(smbfile->tlink);
1916 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1917 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1918
1919 FreeXid(xid);
02c24a82 1920 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
1921 return rc;
1922}
1923
02c24a82 1924int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba
PS
1925{
1926 int xid;
1927 int rc = 0;
96daf2b0 1928 struct cifs_tcon *tcon;
8be7e6ba
PS
1929 struct cifsFileInfo *smbfile = file->private_data;
1930 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
1931 struct inode *inode = file->f_mapping->host;
1932
1933 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1934 if (rc)
1935 return rc;
1936 mutex_lock(&inode->i_mutex);
8be7e6ba
PS
1937
1938 xid = GetXid();
1939
1940 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1941 file->f_path.dentry->d_name.name, datasync);
1942
1943 tcon = tlink_tcon(smbfile->tlink);
1944 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1945 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 1946
1da177e4 1947 FreeXid(xid);
02c24a82 1948 mutex_unlock(&inode->i_mutex);
1da177e4
LT
1949 return rc;
1950}
1951
1da177e4
LT
1952/*
1953 * As file closes, flush all cached write data for this inode checking
1954 * for write behind errors.
1955 */
75e1fcc0 1956int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1957{
fb8c4b14 1958 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1959 int rc = 0;
1960
eb4b756b 1961 if (file->f_mode & FMODE_WRITE)
d3f1322a 1962 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 1963
b6b38f70 1964 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1965
1966 return rc;
1967}
1968
72432ffc
PS
1969static int
1970cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1971{
1972 int rc = 0;
1973 unsigned long i;
1974
1975 for (i = 0; i < num_pages; i++) {
1976 pages[i] = alloc_page(__GFP_HIGHMEM);
1977 if (!pages[i]) {
1978 /*
1979 * save number of pages we have already allocated and
1980 * return with ENOMEM error
1981 */
1982 num_pages = i;
1983 rc = -ENOMEM;
1984 goto error;
1985 }
1986 }
1987
1988 return rc;
1989
1990error:
1991 for (i = 0; i < num_pages; i++)
1992 put_page(pages[i]);
1993 return rc;
1994}
1995
1996static inline
1997size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1998{
1999 size_t num_pages;
2000 size_t clen;
2001
2002 clen = min_t(const size_t, len, wsize);
2003 num_pages = clen / PAGE_CACHE_SIZE;
2004 if (clen % PAGE_CACHE_SIZE)
2005 num_pages++;
2006
2007 if (cur_len)
2008 *cur_len = clen;
2009
2010 return num_pages;
2011}
2012
2013static ssize_t
2014cifs_iovec_write(struct file *file, const struct iovec *iov,
2015 unsigned long nr_segs, loff_t *poffset)
2016{
76429c14
PS
2017 unsigned int written;
2018 unsigned long num_pages, npages, i;
2019 size_t copied, len, cur_len;
2020 ssize_t total_written = 0;
72432ffc
PS
2021 struct kvec *to_send;
2022 struct page **pages;
2023 struct iov_iter it;
2024 struct inode *inode;
2025 struct cifsFileInfo *open_file;
96daf2b0 2026 struct cifs_tcon *pTcon;
72432ffc 2027 struct cifs_sb_info *cifs_sb;
fa2989f4 2028 struct cifs_io_parms io_parms;
72432ffc 2029 int xid, rc;
d4ffff1f 2030 __u32 pid;
72432ffc
PS
2031
2032 len = iov_length(iov, nr_segs);
2033 if (!len)
2034 return 0;
2035
2036 rc = generic_write_checks(file, poffset, &len, 0);
2037 if (rc)
2038 return rc;
2039
2040 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2041 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2042
2043 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
2044 if (!pages)
2045 return -ENOMEM;
2046
2047 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
2048 if (!to_send) {
2049 kfree(pages);
2050 return -ENOMEM;
2051 }
2052
2053 rc = cifs_write_allocate_pages(pages, num_pages);
2054 if (rc) {
2055 kfree(pages);
2056 kfree(to_send);
2057 return rc;
2058 }
2059
2060 xid = GetXid();
2061 open_file = file->private_data;
d4ffff1f
PS
2062
2063 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2064 pid = open_file->pid;
2065 else
2066 pid = current->tgid;
2067
72432ffc
PS
2068 pTcon = tlink_tcon(open_file->tlink);
2069 inode = file->f_path.dentry->d_inode;
2070
2071 iov_iter_init(&it, iov, nr_segs, len, 0);
2072 npages = num_pages;
2073
2074 do {
2075 size_t save_len = cur_len;
2076 for (i = 0; i < npages; i++) {
2077 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
2078 copied = iov_iter_copy_from_user(pages[i], &it, 0,
2079 copied);
2080 cur_len -= copied;
2081 iov_iter_advance(&it, copied);
2082 to_send[i+1].iov_base = kmap(pages[i]);
2083 to_send[i+1].iov_len = copied;
2084 }
2085
2086 cur_len = save_len - cur_len;
2087
2088 do {
2089 if (open_file->invalidHandle) {
2090 rc = cifs_reopen_file(open_file, false);
2091 if (rc != 0)
2092 break;
2093 }
fa2989f4 2094 io_parms.netfid = open_file->netfid;
d4ffff1f 2095 io_parms.pid = pid;
fa2989f4
PS
2096 io_parms.tcon = pTcon;
2097 io_parms.offset = *poffset;
2098 io_parms.length = cur_len;
2099 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
2100 npages, 0);
72432ffc
PS
2101 } while (rc == -EAGAIN);
2102
2103 for (i = 0; i < npages; i++)
2104 kunmap(pages[i]);
2105
2106 if (written) {
2107 len -= written;
2108 total_written += written;
2109 cifs_update_eof(CIFS_I(inode), *poffset, written);
2110 *poffset += written;
2111 } else if (rc < 0) {
2112 if (!total_written)
2113 total_written = rc;
2114 break;
2115 }
2116
2117 /* get length and number of kvecs of the next write */
2118 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
2119 } while (len > 0);
2120
2121 if (total_written > 0) {
2122 spin_lock(&inode->i_lock);
2123 if (*poffset > inode->i_size)
2124 i_size_write(inode, *poffset);
2125 spin_unlock(&inode->i_lock);
2126 }
2127
2128 cifs_stats_bytes_written(pTcon, total_written);
2129 mark_inode_dirty_sync(inode);
2130
2131 for (i = 0; i < num_pages; i++)
2132 put_page(pages[i]);
2133 kfree(to_send);
2134 kfree(pages);
2135 FreeXid(xid);
2136 return total_written;
2137}
2138
0b81c1c4 2139ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
2140 unsigned long nr_segs, loff_t pos)
2141{
2142 ssize_t written;
2143 struct inode *inode;
2144
2145 inode = iocb->ki_filp->f_path.dentry->d_inode;
2146
2147 /*
2148 * BB - optimize the way when signing is disabled. We can drop this
2149 * extra memory-to-memory copying and use iovec buffers for constructing
2150 * write request.
2151 */
2152
2153 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2154 if (written > 0) {
2155 CIFS_I(inode)->invalid_mapping = true;
2156 iocb->ki_pos = pos;
2157 }
2158
2159 return written;
2160}
2161
2162ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2163 unsigned long nr_segs, loff_t pos)
2164{
2165 struct inode *inode;
2166
2167 inode = iocb->ki_filp->f_path.dentry->d_inode;
2168
2169 if (CIFS_I(inode)->clientCanCacheAll)
2170 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2171
2172 /*
2173 * In strict cache mode we need to write the data to the server exactly
2174 * from the pos to pos+len-1 rather than flush all affected pages
2175 * because it may cause a error with mandatory locks on these pages but
2176 * not on the region from pos to ppos+len-1.
2177 */
2178
2179 return cifs_user_writev(iocb, iov, nr_segs, pos);
2180}
2181
a70307ee
PS
2182static ssize_t
2183cifs_iovec_read(struct file *file, const struct iovec *iov,
2184 unsigned long nr_segs, loff_t *poffset)
1da177e4 2185{
a70307ee
PS
2186 int rc;
2187 int xid;
76429c14
PS
2188 ssize_t total_read;
2189 unsigned int bytes_read = 0;
a70307ee
PS
2190 size_t len, cur_len;
2191 int iov_offset = 0;
1da177e4 2192 struct cifs_sb_info *cifs_sb;
96daf2b0 2193 struct cifs_tcon *pTcon;
1da177e4 2194 struct cifsFileInfo *open_file;
1da177e4 2195 struct smb_com_read_rsp *pSMBr;
d4ffff1f 2196 struct cifs_io_parms io_parms;
a70307ee 2197 char *read_data;
5eba8ab3 2198 unsigned int rsize;
d4ffff1f 2199 __u32 pid;
a70307ee
PS
2200
2201 if (!nr_segs)
2202 return 0;
2203
2204 len = iov_length(iov, nr_segs);
2205 if (!len)
2206 return 0;
1da177e4
LT
2207
2208 xid = GetXid();
e6a00296 2209 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2210
5eba8ab3
JL
2211 /* FIXME: set up handlers for larger reads and/or convert to async */
2212 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2213
c21dfb69 2214 open_file = file->private_data;
13cfb733 2215 pTcon = tlink_tcon(open_file->tlink);
1da177e4 2216
d4ffff1f
PS
2217 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2218 pid = open_file->pid;
2219 else
2220 pid = current->tgid;
2221
ad7a2926 2222 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2223 cFYI(1, "attempting read on write only file instance");
ad7a2926 2224
a70307ee 2225 for (total_read = 0; total_read < len; total_read += bytes_read) {
5eba8ab3 2226 cur_len = min_t(const size_t, len - total_read, rsize);
1da177e4 2227 rc = -EAGAIN;
a70307ee
PS
2228 read_data = NULL;
2229
1da177e4 2230 while (rc == -EAGAIN) {
ec637e3f 2231 int buf_type = CIFS_NO_BUFFER;
cdff08e7 2232 if (open_file->invalidHandle) {
15886177 2233 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2234 if (rc != 0)
2235 break;
2236 }
d4ffff1f
PS
2237 io_parms.netfid = open_file->netfid;
2238 io_parms.pid = pid;
2239 io_parms.tcon = pTcon;
2240 io_parms.offset = *poffset;
2cebaa58 2241 io_parms.length = cur_len;
d4ffff1f 2242 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
a70307ee
PS
2243 &read_data, &buf_type);
2244 pSMBr = (struct smb_com_read_rsp *)read_data;
2245 if (read_data) {
2246 char *data_offset = read_data + 4 +
2247 le16_to_cpu(pSMBr->DataOffset);
2248 if (memcpy_toiovecend(iov, data_offset,
2249 iov_offset, bytes_read))
93544cc6 2250 rc = -EFAULT;
fb8c4b14 2251 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 2252 cifs_small_buf_release(read_data);
fb8c4b14 2253 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
2254 cifs_buf_release(read_data);
2255 read_data = NULL;
2256 iov_offset += bytes_read;
1da177e4
LT
2257 }
2258 }
a70307ee 2259
1da177e4
LT
2260 if (rc || (bytes_read == 0)) {
2261 if (total_read) {
2262 break;
2263 } else {
2264 FreeXid(xid);
2265 return rc;
2266 }
2267 } else {
a4544347 2268 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
2269 *poffset += bytes_read;
2270 }
2271 }
a70307ee 2272
1da177e4
LT
2273 FreeXid(xid);
2274 return total_read;
2275}
2276
0b81c1c4 2277ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2278 unsigned long nr_segs, loff_t pos)
2279{
2280 ssize_t read;
2281
2282 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2283 if (read > 0)
2284 iocb->ki_pos = pos;
2285
2286 return read;
2287}
2288
2289ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2290 unsigned long nr_segs, loff_t pos)
2291{
2292 struct inode *inode;
2293
2294 inode = iocb->ki_filp->f_path.dentry->d_inode;
2295
2296 if (CIFS_I(inode)->clientCanCacheRead)
2297 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2298
2299 /*
2300 * In strict cache mode we need to read from the server all the time
2301 * if we don't have level II oplock because the server can delay mtime
2302 * change - so we can't make a decision about inode invalidating.
2303 * And we can also fail with pagereading if there are mandatory locks
2304 * on pages affected by this read but not on the region from pos to
2305 * pos+len-1.
2306 */
2307
2308 return cifs_user_readv(iocb, iov, nr_segs, pos);
2309}
1da177e4
LT
2310
2311static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 2312 loff_t *poffset)
1da177e4
LT
2313{
2314 int rc = -EACCES;
2315 unsigned int bytes_read = 0;
2316 unsigned int total_read;
2317 unsigned int current_read_size;
5eba8ab3 2318 unsigned int rsize;
1da177e4 2319 struct cifs_sb_info *cifs_sb;
96daf2b0 2320 struct cifs_tcon *pTcon;
1da177e4
LT
2321 int xid;
2322 char *current_offset;
2323 struct cifsFileInfo *open_file;
d4ffff1f 2324 struct cifs_io_parms io_parms;
ec637e3f 2325 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2326 __u32 pid;
1da177e4
LT
2327
2328 xid = GetXid();
e6a00296 2329 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2330
5eba8ab3
JL
2331 /* FIXME: set up handlers for larger reads and/or convert to async */
2332 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2333
1da177e4 2334 if (file->private_data == NULL) {
0f3bc09e 2335 rc = -EBADF;
1da177e4 2336 FreeXid(xid);
0f3bc09e 2337 return rc;
1da177e4 2338 }
c21dfb69 2339 open_file = file->private_data;
13cfb733 2340 pTcon = tlink_tcon(open_file->tlink);
1da177e4 2341
d4ffff1f
PS
2342 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2343 pid = open_file->pid;
2344 else
2345 pid = current->tgid;
2346
1da177e4 2347 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2348 cFYI(1, "attempting read on write only file instance");
1da177e4 2349
fb8c4b14 2350 for (total_read = 0, current_offset = read_data;
1da177e4
LT
2351 read_size > total_read;
2352 total_read += bytes_read, current_offset += bytes_read) {
5eba8ab3
JL
2353 current_read_size = min_t(uint, read_size - total_read, rsize);
2354
f9f5c817
SF
2355 /* For windows me and 9x we do not want to request more
2356 than it negotiated since it will refuse the read then */
fb8c4b14 2357 if ((pTcon->ses) &&
f9f5c817 2358 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
7748dd6e 2359 current_read_size = min_t(uint, current_read_size,
c974befa 2360 CIFSMaxBufSize);
f9f5c817 2361 }
1da177e4
LT
2362 rc = -EAGAIN;
2363 while (rc == -EAGAIN) {
cdff08e7 2364 if (open_file->invalidHandle) {
15886177 2365 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2366 if (rc != 0)
2367 break;
2368 }
d4ffff1f
PS
2369 io_parms.netfid = open_file->netfid;
2370 io_parms.pid = pid;
2371 io_parms.tcon = pTcon;
2372 io_parms.offset = *poffset;
2373 io_parms.length = current_read_size;
2374 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2375 &current_offset, &buf_type);
1da177e4
LT
2376 }
2377 if (rc || (bytes_read == 0)) {
2378 if (total_read) {
2379 break;
2380 } else {
2381 FreeXid(xid);
2382 return rc;
2383 }
2384 } else {
a4544347 2385 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
2386 *poffset += bytes_read;
2387 }
2388 }
2389 FreeXid(xid);
2390 return total_read;
2391}
2392
ca83ce3d
JL
2393/*
2394 * If the page is mmap'ed into a process' page tables, then we need to make
2395 * sure that it doesn't change while being written back.
2396 */
2397static int
2398cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2399{
2400 struct page *page = vmf->page;
2401
2402 lock_page(page);
2403 return VM_FAULT_LOCKED;
2404}
2405
2406static struct vm_operations_struct cifs_file_vm_ops = {
2407 .fault = filemap_fault,
2408 .page_mkwrite = cifs_page_mkwrite,
2409};
2410
7a6a19b1
PS
2411int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2412{
2413 int rc, xid;
2414 struct inode *inode = file->f_path.dentry->d_inode;
2415
2416 xid = GetXid();
2417
6feb9891
PS
2418 if (!CIFS_I(inode)->clientCanCacheRead) {
2419 rc = cifs_invalidate_mapping(inode);
2420 if (rc)
2421 return rc;
2422 }
7a6a19b1
PS
2423
2424 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2425 if (rc == 0)
2426 vma->vm_ops = &cifs_file_vm_ops;
7a6a19b1
PS
2427 FreeXid(xid);
2428 return rc;
2429}
2430
1da177e4
LT
2431int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2432{
1da177e4
LT
2433 int rc, xid;
2434
2435 xid = GetXid();
abab095d 2436 rc = cifs_revalidate_file(file);
1da177e4 2437 if (rc) {
b6b38f70 2438 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
2439 FreeXid(xid);
2440 return rc;
2441 }
2442 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2443 if (rc == 0)
2444 vma->vm_ops = &cifs_file_vm_ops;
1da177e4
LT
2445 FreeXid(xid);
2446 return rc;
2447}
2448
1da177e4
LT
2449static int cifs_readpages(struct file *file, struct address_space *mapping,
2450 struct list_head *page_list, unsigned num_pages)
2451{
690c5e31
JL
2452 int rc;
2453 struct list_head tmplist;
2454 struct cifsFileInfo *open_file = file->private_data;
2455 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2456 unsigned int rsize = cifs_sb->rsize;
2457 pid_t pid;
1da177e4 2458
690c5e31
JL
2459 /*
2460 * Give up immediately if rsize is too small to read an entire page.
2461 * The VFS will fall back to readpage. We should never reach this
2462 * point however since we set ra_pages to 0 when the rsize is smaller
2463 * than a cache page.
2464 */
2465 if (unlikely(rsize < PAGE_CACHE_SIZE))
2466 return 0;
bfa0d75a 2467
56698236
SJ
2468 /*
2469 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2470 * immediately if the cookie is negative
2471 */
2472 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2473 &num_pages);
2474 if (rc == 0)
690c5e31 2475 return rc;
56698236 2476
d4ffff1f
PS
2477 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2478 pid = open_file->pid;
2479 else
2480 pid = current->tgid;
2481
690c5e31
JL
2482 rc = 0;
2483 INIT_LIST_HEAD(&tmplist);
1da177e4 2484
690c5e31
JL
2485 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2486 mapping, num_pages);
2487
2488 /*
2489 * Start with the page at end of list and move it to private
2490 * list. Do the same with any following pages until we hit
2491 * the rsize limit, hit an index discontinuity, or run out of
2492 * pages. Issue the async read and then start the loop again
2493 * until the list is empty.
2494 *
2495 * Note that list order is important. The page_list is in
2496 * the order of declining indexes. When we put the pages in
2497 * the rdata->pages, then we want them in increasing order.
2498 */
2499 while (!list_empty(page_list)) {
2500 unsigned int bytes = PAGE_CACHE_SIZE;
2501 unsigned int expected_index;
2502 unsigned int nr_pages = 1;
2503 loff_t offset;
2504 struct page *page, *tpage;
2505 struct cifs_readdata *rdata;
1da177e4
LT
2506
2507 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
2508
2509 /*
2510 * Lock the page and put it in the cache. Since no one else
2511 * should have access to this page, we're safe to simply set
2512 * PG_locked without checking it first.
2513 */
2514 __set_page_locked(page);
2515 rc = add_to_page_cache_locked(page, mapping,
2516 page->index, GFP_KERNEL);
2517
2518 /* give up if we can't stick it in the cache */
2519 if (rc) {
2520 __clear_page_locked(page);
2521 break;
2522 }
2523
2524 /* move first page to the tmplist */
1da177e4 2525 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 2526 list_move_tail(&page->lru, &tmplist);
1da177e4 2527
690c5e31
JL
2528 /* now try and add more pages onto the request */
2529 expected_index = page->index + 1;
2530 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2531 /* discontinuity ? */
2532 if (page->index != expected_index)
fb8c4b14 2533 break;
690c5e31
JL
2534
2535 /* would this page push the read over the rsize? */
2536 if (bytes + PAGE_CACHE_SIZE > rsize)
2537 break;
2538
2539 __set_page_locked(page);
2540 if (add_to_page_cache_locked(page, mapping,
2541 page->index, GFP_KERNEL)) {
2542 __clear_page_locked(page);
2543 break;
2544 }
2545 list_move_tail(&page->lru, &tmplist);
2546 bytes += PAGE_CACHE_SIZE;
2547 expected_index++;
2548 nr_pages++;
1da177e4 2549 }
690c5e31
JL
2550
2551 rdata = cifs_readdata_alloc(nr_pages);
2552 if (!rdata) {
2553 /* best to give up if we're out of mem */
2554 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2555 list_del(&page->lru);
2556 lru_cache_add_file(page);
2557 unlock_page(page);
2558 page_cache_release(page);
2559 }
2560 rc = -ENOMEM;
2561 break;
2562 }
2563
2564 spin_lock(&cifs_file_list_lock);
2565 cifsFileInfo_get(open_file);
2566 spin_unlock(&cifs_file_list_lock);
2567 rdata->cfile = open_file;
2568 rdata->mapping = mapping;
2569 rdata->offset = offset;
2570 rdata->bytes = bytes;
2571 rdata->pid = pid;
2572 list_splice_init(&tmplist, &rdata->pages);
2573
2574 do {
cdff08e7 2575 if (open_file->invalidHandle) {
15886177 2576 rc = cifs_reopen_file(open_file, true);
1da177e4 2577 if (rc != 0)
690c5e31 2578 continue;
1da177e4 2579 }
690c5e31
JL
2580 rc = cifs_async_readv(rdata);
2581 } while (rc == -EAGAIN);
1da177e4 2582
690c5e31
JL
2583 if (rc != 0) {
2584 list_for_each_entry_safe(page, tpage, &rdata->pages,
2585 lru) {
2586 list_del(&page->lru);
2587 lru_cache_add_file(page);
2588 unlock_page(page);
2589 page_cache_release(page);
1da177e4 2590 }
690c5e31 2591 cifs_readdata_free(rdata);
1da177e4
LT
2592 break;
2593 }
1da177e4
LT
2594 }
2595
1da177e4
LT
2596 return rc;
2597}
2598
2599static int cifs_readpage_worker(struct file *file, struct page *page,
2600 loff_t *poffset)
2601{
2602 char *read_data;
2603 int rc;
2604
56698236
SJ
2605 /* Is the page cached? */
2606 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2607 if (rc == 0)
2608 goto read_complete;
2609
1da177e4
LT
2610 page_cache_get(page);
2611 read_data = kmap(page);
2612 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2613
1da177e4 2614 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2615
1da177e4
LT
2616 if (rc < 0)
2617 goto io_error;
2618 else
b6b38f70 2619 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2620
e6a00296
JJS
2621 file->f_path.dentry->d_inode->i_atime =
2622 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2623
1da177e4
LT
2624 if (PAGE_CACHE_SIZE > rc)
2625 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2626
2627 flush_dcache_page(page);
2628 SetPageUptodate(page);
9dc06558
SJ
2629
2630 /* send this page to the cache */
2631 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2632
1da177e4 2633 rc = 0;
fb8c4b14 2634
1da177e4 2635io_error:
fb8c4b14 2636 kunmap(page);
1da177e4 2637 page_cache_release(page);
56698236
SJ
2638
2639read_complete:
1da177e4
LT
2640 return rc;
2641}
2642
2643static int cifs_readpage(struct file *file, struct page *page)
2644{
2645 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2646 int rc = -EACCES;
2647 int xid;
2648
2649 xid = GetXid();
2650
2651 if (file->private_data == NULL) {
0f3bc09e 2652 rc = -EBADF;
1da177e4 2653 FreeXid(xid);
0f3bc09e 2654 return rc;
1da177e4
LT
2655 }
2656
b6b38f70
JP
2657 cFYI(1, "readpage %p at offset %d 0x%x\n",
2658 page, (int)offset, (int)offset);
1da177e4
LT
2659
2660 rc = cifs_readpage_worker(file, page, &offset);
2661
2662 unlock_page(page);
2663
2664 FreeXid(xid);
2665 return rc;
2666}
2667
a403a0a3
SF
2668static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2669{
2670 struct cifsFileInfo *open_file;
2671
4477288a 2672 spin_lock(&cifs_file_list_lock);
a403a0a3 2673 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2674 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2675 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2676 return 1;
2677 }
2678 }
4477288a 2679 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2680 return 0;
2681}
2682
1da177e4
LT
2683/* We do not want to update the file size from server for inodes
2684 open for write - to avoid races with writepage extending
2685 the file - in the future we could consider allowing
fb8c4b14 2686 refreshing the inode only on increases in the file size
1da177e4
LT
2687 but this is tricky to do without racing with writebehind
2688 page caching in the current Linux kernel design */
4b18f2a9 2689bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2690{
a403a0a3 2691 if (!cifsInode)
4b18f2a9 2692 return true;
50c2f753 2693
a403a0a3
SF
2694 if (is_inode_writable(cifsInode)) {
2695 /* This inode is open for write at least once */
c32a0b68
SF
2696 struct cifs_sb_info *cifs_sb;
2697
c32a0b68 2698 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2699 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2700 /* since no page cache to corrupt on directio
c32a0b68 2701 we can change size safely */
4b18f2a9 2702 return true;
c32a0b68
SF
2703 }
2704
fb8c4b14 2705 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2706 return true;
7ba52631 2707
4b18f2a9 2708 return false;
23e7dd7d 2709 } else
4b18f2a9 2710 return true;
1da177e4
LT
2711}
2712
d9414774
NP
2713static int cifs_write_begin(struct file *file, struct address_space *mapping,
2714 loff_t pos, unsigned len, unsigned flags,
2715 struct page **pagep, void **fsdata)
1da177e4 2716{
d9414774
NP
2717 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2718 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2719 loff_t page_start = pos & PAGE_MASK;
2720 loff_t i_size;
2721 struct page *page;
2722 int rc = 0;
d9414774 2723
b6b38f70 2724 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2725
54566b2c 2726 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2727 if (!page) {
2728 rc = -ENOMEM;
2729 goto out;
2730 }
8a236264 2731
a98ee8c1
JL
2732 if (PageUptodate(page))
2733 goto out;
8a236264 2734
a98ee8c1
JL
2735 /*
2736 * If we write a full page it will be up to date, no need to read from
2737 * the server. If the write is short, we'll end up doing a sync write
2738 * instead.
2739 */
2740 if (len == PAGE_CACHE_SIZE)
2741 goto out;
8a236264 2742
a98ee8c1
JL
2743 /*
2744 * optimize away the read when we have an oplock, and we're not
2745 * expecting to use any of the data we'd be reading in. That
2746 * is, when the page lies beyond the EOF, or straddles the EOF
2747 * and the write will cover all of the existing data.
2748 */
2749 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2750 i_size = i_size_read(mapping->host);
2751 if (page_start >= i_size ||
2752 (offset == 0 && (pos + len) >= i_size)) {
2753 zero_user_segments(page, 0, offset,
2754 offset + len,
2755 PAGE_CACHE_SIZE);
2756 /*
2757 * PageChecked means that the parts of the page
2758 * to which we're not writing are considered up
2759 * to date. Once the data is copied to the
2760 * page, it can be set uptodate.
2761 */
2762 SetPageChecked(page);
2763 goto out;
2764 }
2765 }
d9414774 2766
a98ee8c1
JL
2767 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2768 /*
2769 * might as well read a page, it is fast enough. If we get
2770 * an error, we don't need to return it. cifs_write_end will
2771 * do a sync write instead since PG_uptodate isn't set.
2772 */
2773 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2774 } else {
2775 /* we could try using another file handle if there is one -
2776 but how would we lock it to prevent close of that handle
2777 racing with this read? In any case
d9414774 2778 this will be written out by write_end so is fine */
1da177e4 2779 }
a98ee8c1
JL
2780out:
2781 *pagep = page;
2782 return rc;
1da177e4
LT
2783}
2784
85f2d6b4
SJ
2785static int cifs_release_page(struct page *page, gfp_t gfp)
2786{
2787 if (PagePrivate(page))
2788 return 0;
2789
2790 return cifs_fscache_release_page(page, gfp);
2791}
2792
2793static void cifs_invalidate_page(struct page *page, unsigned long offset)
2794{
2795 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2796
2797 if (offset == 0)
2798 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2799}
2800
9ad1506b
PS
2801static int cifs_launder_page(struct page *page)
2802{
2803 int rc = 0;
2804 loff_t range_start = page_offset(page);
2805 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2806 struct writeback_control wbc = {
2807 .sync_mode = WB_SYNC_ALL,
2808 .nr_to_write = 0,
2809 .range_start = range_start,
2810 .range_end = range_end,
2811 };
2812
2813 cFYI(1, "Launder page: %p", page);
2814
2815 if (clear_page_dirty_for_io(page))
2816 rc = cifs_writepage_locked(page, &wbc);
2817
2818 cifs_fscache_invalidate_page(page, page->mapping->host);
2819 return rc;
2820}
2821
9b646972 2822void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2823{
2824 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2825 oplock_break);
a5e18bc3 2826 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2827 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 2828 int rc = 0;
3bc303c2
JL
2829
2830 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2831 if (cinode->clientCanCacheRead)
8737c930 2832 break_lease(inode, O_RDONLY);
d54ff732 2833 else
8737c930 2834 break_lease(inode, O_WRONLY);
3bc303c2
JL
2835 rc = filemap_fdatawrite(inode->i_mapping);
2836 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
2837 rc = filemap_fdatawait(inode->i_mapping);
2838 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
2839 invalidate_remote_inode(inode);
2840 }
b6b38f70 2841 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2842 }
2843
85160e03
PS
2844 rc = cifs_push_locks(cfile);
2845 if (rc)
2846 cERROR(1, "Push locks rc = %d", rc);
2847
3bc303c2
JL
2848 /*
2849 * releasing stale oplock after recent reconnect of smb session using
2850 * a now incorrect file handle is not a data integrity issue but do
2851 * not bother sending an oplock release if session to server still is
2852 * disconnected since oplock already released by the server
2853 */
cdff08e7 2854 if (!cfile->oplock_break_cancelled) {
03776f45
PS
2855 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2856 current->tgid, 0, 0, 0, 0,
2857 LOCKING_ANDX_OPLOCK_RELEASE, false,
12fed00d 2858 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 2859 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2860 }
3bc303c2
JL
2861}
2862
f5e54d6e 2863const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2864 .readpage = cifs_readpage,
2865 .readpages = cifs_readpages,
2866 .writepage = cifs_writepage,
37c0eb46 2867 .writepages = cifs_writepages,
d9414774
NP
2868 .write_begin = cifs_write_begin,
2869 .write_end = cifs_write_end,
1da177e4 2870 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2871 .releasepage = cifs_release_page,
2872 .invalidatepage = cifs_invalidate_page,
9ad1506b 2873 .launder_page = cifs_launder_page,
1da177e4 2874};
273d81d6
DK
2875
2876/*
2877 * cifs_readpages requires the server to support a buffer large enough to
2878 * contain the header plus one complete page of data. Otherwise, we need
2879 * to leave cifs_readpages out of the address space operations.
2880 */
f5e54d6e 2881const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2882 .readpage = cifs_readpage,
2883 .writepage = cifs_writepage,
2884 .writepages = cifs_writepages,
d9414774
NP
2885 .write_begin = cifs_write_begin,
2886 .write_end = cifs_write_end,
273d81d6 2887 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2888 .releasepage = cifs_release_page,
2889 .invalidatepage = cifs_invalidate_page,
9ad1506b 2890 .launder_page = cifs_launder_page,
273d81d6 2891};
This page took 0.69277 seconds and 5 git commands to generate.