cifs: Mangle string used for unc in /proc/mounts
[deliverable/linux.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
174{
175 int rc;
fb1214e4 176 int desired_access;
eeb910a6 177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6 179 FILE_ALL_INFO *buf;
b8c32dbb 180 struct TCP_Server_Info *server = tcon->ses->server;
eeb910a6 181
b8c32dbb 182 if (!server->ops->open)
fb1214e4
PS
183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
3d3ea8e6
SP
219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
b8c32dbb
PS
222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
eeb910a6
PS
225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
fb1214e4 234 xid, &fid->netfid);
eeb910a6
PS
235
236out:
237 kfree(buf);
238 return rc;
239}
240
15ecb436 241struct cifsFileInfo *
fb1214e4 242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
4b4de76e
PS
247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
f45d3416 249 struct cifs_fid_locks *fdlocks;
233839b1 250 struct cifs_tcon *tcon = tlink_tcon(tlink);
4b4de76e
PS
251
252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
255
f45d3416
PS
256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
1b4b55a1 265 down_write(&cinode->lock_sem);
f45d3416 266 list_add(&fdlocks->llist, &cinode->llist);
1b4b55a1 267 up_write(&cinode->lock_sem);
f45d3416 268
4b4de76e 269 cfile->count = 1;
4b4de76e
PS
270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
f45d3416 277 mutex_init(&cfile->fh_mutex);
15ecb436 278
4477288a 279 spin_lock(&cifs_file_list_lock);
233839b1
PS
280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
15ecb436
JL
287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
4b4de76e 289 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 290 else
4b4de76e 291 list_add_tail(&cfile->flist, &cinode->openFileList);
4477288a 292 spin_unlock(&cifs_file_list_lock);
15ecb436 293
4b4de76e
PS
294 file->private_data = cfile;
295 return cfile;
15ecb436
JL
296}
297
764a1b1a
JL
298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
cdff08e7
SF
307/*
308 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
cdff08e7 311 */
b33879aa
JL
312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
e66673e3 314 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
233839b1 316 struct TCP_Server_Info *server = tcon->ses->server;
e66673e3 317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7 319 struct cifsLockInfo *li, *tmp;
233839b1
PS
320 struct cifs_fid fid;
321 struct cifs_pending_open open;
cdff08e7
SF
322
323 spin_lock(&cifs_file_list_lock);
5f6dbc9e 324 if (--cifs_file->count > 0) {
cdff08e7
SF
325 spin_unlock(&cifs_file_list_lock);
326 return;
327 }
328
233839b1
PS
329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
cdff08e7
SF
335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
25364138
PS
342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
4f8ba8a0
PS
347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
c6723628 349 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
350 }
351 spin_unlock(&cifs_file_list_lock);
352
ad635942
JL
353 cancel_work_sync(&cifs_file->oplock_break);
354
cdff08e7 355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 356 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 357 unsigned int xid;
0ff78a22
PS
358 int rc = -ENOSYS;
359
6d5786a3 360 xid = get_xid();
0ff78a22
PS
361 if (server->ops->close)
362 rc = server->ops->close(xid, tcon, &cifs_file->fid);
6d5786a3 363 free_xid(xid);
cdff08e7
SF
364 }
365
233839b1
PS
366 cifs_del_pending_open(&open);
367
f45d3416
PS
368 /*
369 * Delete any outstanding lock records. We'll lose them when the file
cdff08e7
SF
370 * is closed anyway.
371 */
1b4b55a1 372 down_write(&cifsi->lock_sem);
f45d3416 373 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
cdff08e7 374 list_del(&li->llist);
85160e03 375 cifs_del_lock_waiters(li);
cdff08e7 376 kfree(li);
b33879aa 377 }
f45d3416
PS
378 list_del(&cifs_file->llist->llist);
379 kfree(cifs_file->llist);
1b4b55a1 380 up_write(&cifsi->lock_sem);
cdff08e7
SF
381
382 cifs_put_tlink(cifs_file->tlink);
383 dput(cifs_file->dentry);
384 kfree(cifs_file);
b33879aa
JL
385}
386
1da177e4 387int cifs_open(struct inode *inode, struct file *file)
233839b1 388
1da177e4
LT
389{
390 int rc = -EACCES;
6d5786a3 391 unsigned int xid;
590a3fe0 392 __u32 oplock;
1da177e4 393 struct cifs_sb_info *cifs_sb;
b8c32dbb 394 struct TCP_Server_Info *server;
96daf2b0 395 struct cifs_tcon *tcon;
7ffec372 396 struct tcon_link *tlink;
fb1214e4 397 struct cifsFileInfo *cfile = NULL;
1da177e4 398 char *full_path = NULL;
7e12eddb 399 bool posix_open_ok = false;
fb1214e4 400 struct cifs_fid fid;
233839b1 401 struct cifs_pending_open open;
1da177e4 402
6d5786a3 403 xid = get_xid();
1da177e4
LT
404
405 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
406 tlink = cifs_sb_tlink(cifs_sb);
407 if (IS_ERR(tlink)) {
6d5786a3 408 free_xid(xid);
7ffec372
JL
409 return PTR_ERR(tlink);
410 }
411 tcon = tlink_tcon(tlink);
b8c32dbb 412 server = tcon->ses->server;
1da177e4 413
e6a00296 414 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 415 if (full_path == NULL) {
0f3bc09e 416 rc = -ENOMEM;
232341ba 417 goto out;
1da177e4
LT
418 }
419
b6b38f70
JP
420 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
421 inode, file->f_flags, full_path);
276a74a4 422
233839b1 423 if (server->oplocks)
276a74a4
SF
424 oplock = REQ_OPLOCK;
425 else
426 oplock = 0;
427
64cc2c63 428 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
429 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
430 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 431 /* can not refresh inode info since size could be stale */
2422f676 432 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 433 cifs_sb->mnt_file_mode /* ignored */,
fb1214e4 434 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 435 if (rc == 0) {
b6b38f70 436 cFYI(1, "posix open succeeded");
7e12eddb 437 posix_open_ok = true;
64cc2c63
SF
438 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
439 if (tcon->ses->serverNOS)
b6b38f70 440 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
441 " unexpected error on SMB posix open"
442 ", disabling posix open support."
443 " Check if server update available.",
444 tcon->ses->serverName,
b6b38f70 445 tcon->ses->serverNOS);
64cc2c63 446 tcon->broken_posix_open = true;
276a74a4
SF
447 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
448 (rc != -EOPNOTSUPP)) /* path not found or net err */
449 goto out;
fb1214e4
PS
450 /*
451 * Else fallthrough to retry open the old way on network i/o
452 * or DFS errors.
453 */
276a74a4
SF
454 }
455
233839b1
PS
456 if (server->ops->get_lease_key)
457 server->ops->get_lease_key(inode, &fid);
458
459 cifs_add_pending_open(&fid, tlink, &open);
460
7e12eddb 461 if (!posix_open_ok) {
b8c32dbb
PS
462 if (server->ops->get_lease_key)
463 server->ops->get_lease_key(inode, &fid);
464
7e12eddb 465 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 466 file->f_flags, &oplock, &fid, xid);
233839b1
PS
467 if (rc) {
468 cifs_del_pending_open(&open);
7e12eddb 469 goto out;
233839b1 470 }
7e12eddb 471 }
47c78b7f 472
fb1214e4
PS
473 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
474 if (cfile == NULL) {
b8c32dbb
PS
475 if (server->ops->close)
476 server->ops->close(xid, tcon, &fid);
233839b1 477 cifs_del_pending_open(&open);
1da177e4
LT
478 rc = -ENOMEM;
479 goto out;
480 }
1da177e4 481
9451a9a5
SJ
482 cifs_fscache_set_inode_cookie(inode, file);
483
7e12eddb 484 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
485 /*
486 * Time to set mode which we can not set earlier due to
487 * problems creating new read-only files.
488 */
7e12eddb
PS
489 struct cifs_unix_set_info_args args = {
490 .mode = inode->i_mode,
491 .uid = NO_CHANGE_64,
492 .gid = NO_CHANGE_64,
493 .ctime = NO_CHANGE_64,
494 .atime = NO_CHANGE_64,
495 .mtime = NO_CHANGE_64,
496 .device = 0,
497 };
fb1214e4
PS
498 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
499 cfile->pid);
1da177e4
LT
500 }
501
502out:
1da177e4 503 kfree(full_path);
6d5786a3 504 free_xid(xid);
7ffec372 505 cifs_put_tlink(tlink);
1da177e4
LT
506 return rc;
507}
508
2ae78ba8
PS
509/*
510 * Try to reacquire byte range locks that were released when session
511 * to server was lost
512 */
1da177e4
LT
513static int cifs_relock_file(struct cifsFileInfo *cifsFile)
514{
515 int rc = 0;
516
2ae78ba8 517 /* BB list all locks open on this file and relock */
1da177e4
LT
518
519 return rc;
520}
521
2ae78ba8
PS
522static int
523cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
524{
525 int rc = -EACCES;
6d5786a3 526 unsigned int xid;
590a3fe0 527 __u32 oplock;
1da177e4 528 struct cifs_sb_info *cifs_sb;
96daf2b0 529 struct cifs_tcon *tcon;
2ae78ba8
PS
530 struct TCP_Server_Info *server;
531 struct cifsInodeInfo *cinode;
fb8c4b14 532 struct inode *inode;
1da177e4 533 char *full_path = NULL;
2ae78ba8 534 int desired_access;
1da177e4 535 int disposition = FILE_OPEN;
3d3ea8e6 536 int create_options = CREATE_NOT_DIR;
2ae78ba8 537 struct cifs_fid fid;
1da177e4 538
6d5786a3 539 xid = get_xid();
2ae78ba8
PS
540 mutex_lock(&cfile->fh_mutex);
541 if (!cfile->invalidHandle) {
542 mutex_unlock(&cfile->fh_mutex);
0f3bc09e 543 rc = 0;
6d5786a3 544 free_xid(xid);
0f3bc09e 545 return rc;
1da177e4
LT
546 }
547
2ae78ba8 548 inode = cfile->dentry->d_inode;
1da177e4 549 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
550 tcon = tlink_tcon(cfile->tlink);
551 server = tcon->ses->server;
552
553 /*
554 * Can not grab rename sem here because various ops, including those
555 * that already have the rename sem can end up causing writepage to get
556 * called and if the server was down that means we end up here, and we
557 * can never tell if the caller already has the rename_sem.
558 */
559 full_path = build_path_from_dentry(cfile->dentry);
1da177e4 560 if (full_path == NULL) {
3a9f462f 561 rc = -ENOMEM;
2ae78ba8 562 mutex_unlock(&cfile->fh_mutex);
6d5786a3 563 free_xid(xid);
3a9f462f 564 return rc;
1da177e4
LT
565 }
566
2ae78ba8
PS
567 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
568 full_path);
1da177e4 569
10b9b98e 570 if (tcon->ses->server->oplocks)
1da177e4
LT
571 oplock = REQ_OPLOCK;
572 else
4b18f2a9 573 oplock = 0;
1da177e4 574
29e20f9c 575 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 576 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 577 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
578 /*
579 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
580 * original open. Must mask them off for a reopen.
581 */
2ae78ba8 582 unsigned int oflags = cfile->f_flags &
15886177 583 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 584
2422f676 585 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
2ae78ba8
PS
586 cifs_sb->mnt_file_mode /* ignored */,
587 oflags, &oplock, &fid.netfid, xid);
7fc8f4e9 588 if (rc == 0) {
b6b38f70 589 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
590 goto reopen_success;
591 }
2ae78ba8
PS
592 /*
593 * fallthrough to retry open the old way on errors, especially
594 * in the reconnect path it is important to retry hard
595 */
7fc8f4e9
SF
596 }
597
2ae78ba8 598 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 599
3d3ea8e6
SP
600 if (backup_cred(cifs_sb))
601 create_options |= CREATE_OPEN_BACKUP_INTENT;
602
b8c32dbb
PS
603 if (server->ops->get_lease_key)
604 server->ops->get_lease_key(inode, &fid);
605
2ae78ba8
PS
606 /*
607 * Can not refresh inode by passing in file_info buf to be returned by
608 * CIFSSMBOpen and then calling get_inode_info with returned buf since
609 * file might have write behind data that needs to be flushed and server
610 * version of file size can be stale. If we knew for sure that inode was
611 * not dirty locally we could do this.
612 */
613 rc = server->ops->open(xid, tcon, full_path, disposition,
614 desired_access, create_options, &fid, &oplock,
615 NULL, cifs_sb);
1da177e4 616 if (rc) {
2ae78ba8
PS
617 mutex_unlock(&cfile->fh_mutex);
618 cFYI(1, "cifs_reopen returned 0x%x", rc);
b6b38f70 619 cFYI(1, "oplock: %d", oplock);
15886177
JL
620 goto reopen_error_exit;
621 }
622
7fc8f4e9 623reopen_success:
2ae78ba8
PS
624 cfile->invalidHandle = false;
625 mutex_unlock(&cfile->fh_mutex);
626 cinode = CIFS_I(inode);
15886177
JL
627
628 if (can_flush) {
629 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 630 mapping_set_error(inode->i_mapping, rc);
15886177 631
15886177 632 if (tcon->unix_ext)
2ae78ba8
PS
633 rc = cifs_get_inode_info_unix(&inode, full_path,
634 inode->i_sb, xid);
15886177 635 else
2ae78ba8
PS
636 rc = cifs_get_inode_info(&inode, full_path, NULL,
637 inode->i_sb, xid, NULL);
638 }
639 /*
640 * Else we are writing out data to server already and could deadlock if
641 * we tried to flush data, and since we do not know if we have data that
642 * would invalidate the current end of file on the server we can not go
643 * to the server to get the new inode info.
644 */
645
646 server->ops->set_fid(cfile, &fid, oplock);
647 cifs_relock_file(cfile);
15886177
JL
648
649reopen_error_exit:
1da177e4 650 kfree(full_path);
6d5786a3 651 free_xid(xid);
1da177e4
LT
652 return rc;
653}
654
655int cifs_close(struct inode *inode, struct file *file)
656{
77970693
JL
657 if (file->private_data != NULL) {
658 cifsFileInfo_put(file->private_data);
659 file->private_data = NULL;
660 }
7ee1af76 661
cdff08e7
SF
662 /* return code from the ->release op is always ignored */
663 return 0;
1da177e4
LT
664}
665
666int cifs_closedir(struct inode *inode, struct file *file)
667{
668 int rc = 0;
6d5786a3 669 unsigned int xid;
4b4de76e 670 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
671 struct cifs_tcon *tcon;
672 struct TCP_Server_Info *server;
673 char *buf;
1da177e4 674
b6b38f70 675 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4 676
92fc65a7
PS
677 if (cfile == NULL)
678 return rc;
679
6d5786a3 680 xid = get_xid();
92fc65a7
PS
681 tcon = tlink_tcon(cfile->tlink);
682 server = tcon->ses->server;
1da177e4 683
92fc65a7
PS
684 cFYI(1, "Freeing private data in close dir");
685 spin_lock(&cifs_file_list_lock);
686 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
687 cfile->invalidHandle = true;
688 spin_unlock(&cifs_file_list_lock);
689 if (server->ops->close_dir)
690 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
691 else
692 rc = -ENOSYS;
693 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
694 /* not much we can do if it fails anyway, ignore rc */
695 rc = 0;
696 } else
697 spin_unlock(&cifs_file_list_lock);
698
699 buf = cfile->srch_inf.ntwrk_buf_start;
700 if (buf) {
701 cFYI(1, "closedir free smb buf in srch struct");
702 cfile->srch_inf.ntwrk_buf_start = NULL;
703 if (cfile->srch_inf.smallBuf)
704 cifs_small_buf_release(buf);
705 else
706 cifs_buf_release(buf);
1da177e4 707 }
92fc65a7
PS
708
709 cifs_put_tlink(cfile->tlink);
710 kfree(file->private_data);
711 file->private_data = NULL;
1da177e4 712 /* BB can we lock the filestruct while this is going on? */
6d5786a3 713 free_xid(xid);
1da177e4
LT
714 return rc;
715}
716
85160e03 717static struct cifsLockInfo *
fbd35aca 718cifs_lock_init(__u64 offset, __u64 length, __u8 type)
7ee1af76 719{
a88b4707 720 struct cifsLockInfo *lock =
fb8c4b14 721 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
722 if (!lock)
723 return lock;
724 lock->offset = offset;
725 lock->length = length;
726 lock->type = type;
a88b4707
PS
727 lock->pid = current->tgid;
728 INIT_LIST_HEAD(&lock->blist);
729 init_waitqueue_head(&lock->block_q);
730 return lock;
85160e03
PS
731}
732
f7ba7fe6 733void
85160e03
PS
734cifs_del_lock_waiters(struct cifsLockInfo *lock)
735{
736 struct cifsLockInfo *li, *tmp;
737 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
738 list_del_init(&li->blist);
739 wake_up(&li->block_q);
740 }
741}
742
743static bool
f45d3416
PS
744cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
745 __u64 length, __u8 type, struct cifsFileInfo *cfile,
579f9053 746 struct cifsLockInfo **conf_lock, bool rw_check)
85160e03 747{
fbd35aca 748 struct cifsLockInfo *li;
f45d3416 749 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 750 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 751
f45d3416 752 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
753 if (offset + length <= li->offset ||
754 offset >= li->offset + li->length)
755 continue;
579f9053
PS
756 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
757 current->tgid == li->pid)
758 continue;
f45d3416
PS
759 if ((type & server->vals->shared_lock_type) &&
760 ((server->ops->compare_fids(cfile, cur_cfile) &&
761 current->tgid == li->pid) || type == li->type))
85160e03 762 continue;
579f9053
PS
763 if (conf_lock)
764 *conf_lock = li;
f45d3416 765 return true;
85160e03
PS
766 }
767 return false;
768}
769
579f9053 770bool
55157dfb 771cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
579f9053
PS
772 __u8 type, struct cifsLockInfo **conf_lock,
773 bool rw_check)
161ebf9f 774{
fbd35aca 775 bool rc = false;
f45d3416 776 struct cifs_fid_locks *cur;
55157dfb 777 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
fbd35aca 778
f45d3416
PS
779 list_for_each_entry(cur, &cinode->llist, llist) {
780 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
579f9053 781 cfile, conf_lock, rw_check);
fbd35aca
PS
782 if (rc)
783 break;
784 }
fbd35aca
PS
785
786 return rc;
161ebf9f
PS
787}
788
9a5101c8
PS
789/*
790 * Check if there is another lock that prevents us to set the lock (mandatory
791 * style). If such a lock exists, update the flock structure with its
792 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
793 * or leave it the same if we can't. Returns 0 if we don't need to request to
794 * the server or 1 otherwise.
795 */
85160e03 796static int
fbd35aca
PS
797cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
798 __u8 type, struct file_lock *flock)
85160e03
PS
799{
800 int rc = 0;
801 struct cifsLockInfo *conf_lock;
fbd35aca 802 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
106dc538 803 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
804 bool exist;
805
1b4b55a1 806 down_read(&cinode->lock_sem);
85160e03 807
55157dfb 808 exist = cifs_find_lock_conflict(cfile, offset, length, type,
579f9053 809 &conf_lock, false);
85160e03
PS
810 if (exist) {
811 flock->fl_start = conf_lock->offset;
812 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
813 flock->fl_pid = conf_lock->pid;
106dc538 814 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
815 flock->fl_type = F_RDLCK;
816 else
817 flock->fl_type = F_WRLCK;
818 } else if (!cinode->can_cache_brlcks)
819 rc = 1;
820 else
821 flock->fl_type = F_UNLCK;
822
1b4b55a1 823 up_read(&cinode->lock_sem);
85160e03
PS
824 return rc;
825}
826
161ebf9f 827static void
fbd35aca 828cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 829{
fbd35aca 830 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1b4b55a1 831 down_write(&cinode->lock_sem);
f45d3416 832 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 833 up_write(&cinode->lock_sem);
7ee1af76
JA
834}
835
9a5101c8
PS
836/*
837 * Set the byte-range lock (mandatory style). Returns:
838 * 1) 0, if we set the lock and don't need to request to the server;
839 * 2) 1, if no locks prevent us but we need to request to the server;
840 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
841 */
85160e03 842static int
fbd35aca 843cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 844 bool wait)
85160e03 845{
161ebf9f 846 struct cifsLockInfo *conf_lock;
fbd35aca 847 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
85160e03
PS
848 bool exist;
849 int rc = 0;
850
85160e03
PS
851try_again:
852 exist = false;
1b4b55a1 853 down_write(&cinode->lock_sem);
85160e03 854
55157dfb 855 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
579f9053 856 lock->type, &conf_lock, false);
85160e03 857 if (!exist && cinode->can_cache_brlcks) {
f45d3416 858 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 859 up_write(&cinode->lock_sem);
85160e03
PS
860 return rc;
861 }
862
863 if (!exist)
864 rc = 1;
865 else if (!wait)
866 rc = -EACCES;
867 else {
868 list_add_tail(&lock->blist, &conf_lock->blist);
1b4b55a1 869 up_write(&cinode->lock_sem);
85160e03
PS
870 rc = wait_event_interruptible(lock->block_q,
871 (lock->blist.prev == &lock->blist) &&
872 (lock->blist.next == &lock->blist));
873 if (!rc)
874 goto try_again;
1b4b55a1 875 down_write(&cinode->lock_sem);
a88b4707 876 list_del_init(&lock->blist);
85160e03
PS
877 }
878
1b4b55a1 879 up_write(&cinode->lock_sem);
85160e03
PS
880 return rc;
881}
882
9a5101c8
PS
883/*
884 * Check if there is another lock that prevents us to set the lock (posix
885 * style). If such a lock exists, update the flock structure with its
886 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
887 * or leave it the same if we can't. Returns 0 if we don't need to request to
888 * the server or 1 otherwise.
889 */
85160e03 890static int
4f6bcec9
PS
891cifs_posix_lock_test(struct file *file, struct file_lock *flock)
892{
893 int rc = 0;
894 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
895 unsigned char saved_type = flock->fl_type;
896
50792760
PS
897 if ((flock->fl_flags & FL_POSIX) == 0)
898 return 1;
899
1b4b55a1 900 down_read(&cinode->lock_sem);
4f6bcec9
PS
901 posix_test_lock(file, flock);
902
903 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
904 flock->fl_type = saved_type;
905 rc = 1;
906 }
907
1b4b55a1 908 up_read(&cinode->lock_sem);
4f6bcec9
PS
909 return rc;
910}
911
9a5101c8
PS
912/*
913 * Set the byte-range lock (posix style). Returns:
914 * 1) 0, if we set the lock and don't need to request to the server;
915 * 2) 1, if we need to request to the server;
916 * 3) <0, if the error occurs while setting the lock.
917 */
4f6bcec9
PS
918static int
919cifs_posix_lock_set(struct file *file, struct file_lock *flock)
920{
921 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
50792760
PS
922 int rc = 1;
923
924 if ((flock->fl_flags & FL_POSIX) == 0)
925 return rc;
4f6bcec9 926
66189be7 927try_again:
1b4b55a1 928 down_write(&cinode->lock_sem);
4f6bcec9 929 if (!cinode->can_cache_brlcks) {
1b4b55a1 930 up_write(&cinode->lock_sem);
50792760 931 return rc;
4f6bcec9 932 }
66189be7
PS
933
934 rc = posix_lock_file(file, flock, NULL);
1b4b55a1 935 up_write(&cinode->lock_sem);
66189be7
PS
936 if (rc == FILE_LOCK_DEFERRED) {
937 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
938 if (!rc)
939 goto try_again;
940 locks_delete_block(flock);
941 }
9ebb389d 942 return rc;
4f6bcec9
PS
943}
944
d39a4f71 945int
4f6bcec9 946cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 947{
6d5786a3
PS
948 unsigned int xid;
949 int rc = 0, stored_rc;
85160e03
PS
950 struct cifsLockInfo *li, *tmp;
951 struct cifs_tcon *tcon;
952 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
0013fb4c 953 unsigned int num, max_num, max_buf;
32b9aaf1
PS
954 LOCKING_ANDX_RANGE *buf, *cur;
955 int types[] = {LOCKING_ANDX_LARGE_FILES,
956 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
957 int i;
85160e03 958
6d5786a3 959 xid = get_xid();
85160e03
PS
960 tcon = tlink_tcon(cfile->tlink);
961
1b4b55a1
PS
962 /* we are going to update can_cache_brlcks here - need a write access */
963 down_write(&cinode->lock_sem);
85160e03 964 if (!cinode->can_cache_brlcks) {
1b4b55a1 965 up_write(&cinode->lock_sem);
6d5786a3 966 free_xid(xid);
85160e03
PS
967 return rc;
968 }
969
0013fb4c
PS
970 /*
971 * Accessing maxBuf is racy with cifs_reconnect - need to store value
972 * and check it for zero before using.
973 */
974 max_buf = tcon->ses->server->maxBuf;
975 if (!max_buf) {
1b4b55a1 976 up_write(&cinode->lock_sem);
6d5786a3 977 free_xid(xid);
0013fb4c
PS
978 return -EINVAL;
979 }
980
981 max_num = (max_buf - sizeof(struct smb_hdr)) /
982 sizeof(LOCKING_ANDX_RANGE);
32b9aaf1
PS
983 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
984 if (!buf) {
1b4b55a1 985 up_write(&cinode->lock_sem);
6d5786a3 986 free_xid(xid);
e2f2886a 987 return -ENOMEM;
32b9aaf1
PS
988 }
989
990 for (i = 0; i < 2; i++) {
991 cur = buf;
992 num = 0;
f45d3416 993 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
994 if (li->type != types[i])
995 continue;
996 cur->Pid = cpu_to_le16(li->pid);
997 cur->LengthLow = cpu_to_le32((u32)li->length);
998 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
999 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1000 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1001 if (++num == max_num) {
4b4de76e
PS
1002 stored_rc = cifs_lockv(xid, tcon,
1003 cfile->fid.netfid,
04a6aa8a
PS
1004 (__u8)li->type, 0, num,
1005 buf);
32b9aaf1
PS
1006 if (stored_rc)
1007 rc = stored_rc;
1008 cur = buf;
1009 num = 0;
1010 } else
1011 cur++;
1012 }
1013
1014 if (num) {
4b4de76e 1015 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 1016 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
1017 if (stored_rc)
1018 rc = stored_rc;
1019 }
85160e03
PS
1020 }
1021
1022 cinode->can_cache_brlcks = false;
1b4b55a1 1023 up_write(&cinode->lock_sem);
85160e03 1024
32b9aaf1 1025 kfree(buf);
6d5786a3 1026 free_xid(xid);
85160e03
PS
1027 return rc;
1028}
1029
4f6bcec9
PS
1030/* copied from fs/locks.c with a name change */
1031#define cifs_for_each_lock(inode, lockp) \
1032 for (lockp = &inode->i_flock; *lockp != NULL; \
1033 lockp = &(*lockp)->fl_next)
1034
d5751469
PS
1035struct lock_to_push {
1036 struct list_head llist;
1037 __u64 offset;
1038 __u64 length;
1039 __u32 pid;
1040 __u16 netfid;
1041 __u8 type;
1042};
1043
4f6bcec9
PS
1044static int
1045cifs_push_posix_locks(struct cifsFileInfo *cfile)
1046{
1047 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1048 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1049 struct file_lock *flock, **before;
d5751469 1050 unsigned int count = 0, i = 0;
4f6bcec9 1051 int rc = 0, xid, type;
d5751469
PS
1052 struct list_head locks_to_send, *el;
1053 struct lock_to_push *lck, *tmp;
4f6bcec9 1054 __u64 length;
4f6bcec9 1055
6d5786a3 1056 xid = get_xid();
4f6bcec9 1057
1b4b55a1
PS
1058 /* we are going to update can_cache_brlcks here - need a write access */
1059 down_write(&cinode->lock_sem);
4f6bcec9 1060 if (!cinode->can_cache_brlcks) {
1b4b55a1 1061 up_write(&cinode->lock_sem);
6d5786a3 1062 free_xid(xid);
4f6bcec9
PS
1063 return rc;
1064 }
1065
d5751469
PS
1066 lock_flocks();
1067 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1068 if ((*before)->fl_flags & FL_POSIX)
1069 count++;
1070 }
1071 unlock_flocks();
1072
4f6bcec9
PS
1073 INIT_LIST_HEAD(&locks_to_send);
1074
d5751469 1075 /*
ce85852b 1076 * Allocating count locks is enough because no FL_POSIX locks can be
1b4b55a1 1077 * added to the list while we are holding cinode->lock_sem that
ce85852b 1078 * protects locking operations of this inode.
d5751469
PS
1079 */
1080 for (; i < count; i++) {
1081 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1082 if (!lck) {
1083 rc = -ENOMEM;
1084 goto err_out;
1085 }
1086 list_add_tail(&lck->llist, &locks_to_send);
1087 }
1088
d5751469 1089 el = locks_to_send.next;
4f6bcec9
PS
1090 lock_flocks();
1091 cifs_for_each_lock(cfile->dentry->d_inode, before) {
ce85852b
PS
1092 flock = *before;
1093 if ((flock->fl_flags & FL_POSIX) == 0)
1094 continue;
d5751469 1095 if (el == &locks_to_send) {
ce85852b
PS
1096 /*
1097 * The list ended. We don't have enough allocated
1098 * structures - something is really wrong.
1099 */
d5751469
PS
1100 cERROR(1, "Can't push all brlocks!");
1101 break;
1102 }
4f6bcec9
PS
1103 length = 1 + flock->fl_end - flock->fl_start;
1104 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1105 type = CIFS_RDLCK;
1106 else
1107 type = CIFS_WRLCK;
d5751469 1108 lck = list_entry(el, struct lock_to_push, llist);
4f6bcec9 1109 lck->pid = flock->fl_pid;
4b4de76e 1110 lck->netfid = cfile->fid.netfid;
d5751469
PS
1111 lck->length = length;
1112 lck->type = type;
1113 lck->offset = flock->fl_start;
d5751469 1114 el = el->next;
4f6bcec9 1115 }
4f6bcec9
PS
1116 unlock_flocks();
1117
1118 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1119 int stored_rc;
1120
4f6bcec9 1121 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1122 lck->offset, lck->length, NULL,
4f6bcec9
PS
1123 lck->type, 0);
1124 if (stored_rc)
1125 rc = stored_rc;
1126 list_del(&lck->llist);
1127 kfree(lck);
1128 }
1129
d5751469 1130out:
4f6bcec9 1131 cinode->can_cache_brlcks = false;
1b4b55a1 1132 up_write(&cinode->lock_sem);
4f6bcec9 1133
6d5786a3 1134 free_xid(xid);
4f6bcec9 1135 return rc;
d5751469
PS
1136err_out:
1137 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1138 list_del(&lck->llist);
1139 kfree(lck);
1140 }
1141 goto out;
4f6bcec9
PS
1142}
1143
1144static int
1145cifs_push_locks(struct cifsFileInfo *cfile)
1146{
1147 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1148 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1149
29e20f9c 1150 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1151 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1152 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1153 return cifs_push_posix_locks(cfile);
1154
d39a4f71 1155 return tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9
PS
1156}
1157
03776f45 1158static void
04a6aa8a 1159cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1160 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1161{
03776f45 1162 if (flock->fl_flags & FL_POSIX)
b6b38f70 1163 cFYI(1, "Posix");
03776f45 1164 if (flock->fl_flags & FL_FLOCK)
b6b38f70 1165 cFYI(1, "Flock");
03776f45 1166 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 1167 cFYI(1, "Blocking lock");
03776f45 1168 *wait_flag = true;
1da177e4 1169 }
03776f45 1170 if (flock->fl_flags & FL_ACCESS)
b6b38f70 1171 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
1172 "not implemented yet");
1173 if (flock->fl_flags & FL_LEASE)
b6b38f70 1174 cFYI(1, "Lease on file - not implemented yet");
03776f45 1175 if (flock->fl_flags &
1da177e4 1176 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 1177 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 1178
106dc538 1179 *type = server->vals->large_lock_type;
03776f45 1180 if (flock->fl_type == F_WRLCK) {
b6b38f70 1181 cFYI(1, "F_WRLCK ");
106dc538 1182 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1183 *lock = 1;
1184 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 1185 cFYI(1, "F_UNLCK");
106dc538 1186 *type |= server->vals->unlock_lock_type;
03776f45
PS
1187 *unlock = 1;
1188 /* Check if unlock includes more than one lock range */
1189 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 1190 cFYI(1, "F_RDLCK");
106dc538 1191 *type |= server->vals->shared_lock_type;
03776f45
PS
1192 *lock = 1;
1193 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 1194 cFYI(1, "F_EXLCK");
106dc538 1195 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1196 *lock = 1;
1197 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 1198 cFYI(1, "F_SHLCK");
106dc538 1199 *type |= server->vals->shared_lock_type;
03776f45 1200 *lock = 1;
1da177e4 1201 } else
b6b38f70 1202 cFYI(1, "Unknown type of lock");
03776f45 1203}
1da177e4 1204
03776f45 1205static int
04a6aa8a 1206cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1207 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1208{
1209 int rc = 0;
1210 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1211 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1212 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1213 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1214 __u16 netfid = cfile->fid.netfid;
f05337c6 1215
03776f45
PS
1216 if (posix_lck) {
1217 int posix_lock_type;
4f6bcec9
PS
1218
1219 rc = cifs_posix_lock_test(file, flock);
1220 if (!rc)
1221 return rc;
1222
106dc538 1223 if (type & server->vals->shared_lock_type)
03776f45
PS
1224 posix_lock_type = CIFS_RDLCK;
1225 else
1226 posix_lock_type = CIFS_WRLCK;
4f6bcec9 1227 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
c5fd363d 1228 flock->fl_start, length, flock,
4f6bcec9 1229 posix_lock_type, wait_flag);
03776f45
PS
1230 return rc;
1231 }
1da177e4 1232
fbd35aca 1233 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1234 if (!rc)
1235 return rc;
1236
03776f45 1237 /* BB we could chain these into one lock request BB */
d39a4f71
PS
1238 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1239 1, 0, false);
03776f45 1240 if (rc == 0) {
d39a4f71
PS
1241 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1242 type, 0, 1, false);
03776f45
PS
1243 flock->fl_type = F_UNLCK;
1244 if (rc != 0)
1245 cERROR(1, "Error unlocking previously locked "
106dc538 1246 "range %d during test of lock", rc);
a88b4707 1247 return 0;
1da177e4 1248 }
7ee1af76 1249
106dc538 1250 if (type & server->vals->shared_lock_type) {
03776f45 1251 flock->fl_type = F_WRLCK;
a88b4707 1252 return 0;
7ee1af76
JA
1253 }
1254
d39a4f71
PS
1255 type &= ~server->vals->exclusive_lock_type;
1256
1257 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1258 type | server->vals->shared_lock_type,
1259 1, 0, false);
03776f45 1260 if (rc == 0) {
d39a4f71
PS
1261 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1262 type | server->vals->shared_lock_type, 0, 1, false);
03776f45
PS
1263 flock->fl_type = F_RDLCK;
1264 if (rc != 0)
1265 cERROR(1, "Error unlocking previously locked "
1266 "range %d during test of lock", rc);
1267 } else
1268 flock->fl_type = F_WRLCK;
1269
a88b4707 1270 return 0;
03776f45
PS
1271}
1272
f7ba7fe6 1273void
9ee305b7
PS
1274cifs_move_llist(struct list_head *source, struct list_head *dest)
1275{
1276 struct list_head *li, *tmp;
1277 list_for_each_safe(li, tmp, source)
1278 list_move(li, dest);
1279}
1280
f7ba7fe6 1281void
9ee305b7
PS
1282cifs_free_llist(struct list_head *llist)
1283{
1284 struct cifsLockInfo *li, *tmp;
1285 list_for_each_entry_safe(li, tmp, llist, llist) {
1286 cifs_del_lock_waiters(li);
1287 list_del(&li->llist);
1288 kfree(li);
1289 }
1290}
1291
d39a4f71 1292int
6d5786a3
PS
1293cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1294 unsigned int xid)
9ee305b7
PS
1295{
1296 int rc = 0, stored_rc;
1297 int types[] = {LOCKING_ANDX_LARGE_FILES,
1298 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1299 unsigned int i;
0013fb4c 1300 unsigned int max_num, num, max_buf;
9ee305b7
PS
1301 LOCKING_ANDX_RANGE *buf, *cur;
1302 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1303 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1304 struct cifsLockInfo *li, *tmp;
1305 __u64 length = 1 + flock->fl_end - flock->fl_start;
1306 struct list_head tmp_llist;
1307
1308 INIT_LIST_HEAD(&tmp_llist);
1309
0013fb4c
PS
1310 /*
1311 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1312 * and check it for zero before using.
1313 */
1314 max_buf = tcon->ses->server->maxBuf;
1315 if (!max_buf)
1316 return -EINVAL;
1317
1318 max_num = (max_buf - sizeof(struct smb_hdr)) /
1319 sizeof(LOCKING_ANDX_RANGE);
9ee305b7
PS
1320 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1321 if (!buf)
1322 return -ENOMEM;
1323
1b4b55a1 1324 down_write(&cinode->lock_sem);
9ee305b7
PS
1325 for (i = 0; i < 2; i++) {
1326 cur = buf;
1327 num = 0;
f45d3416 1328 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1329 if (flock->fl_start > li->offset ||
1330 (flock->fl_start + length) <
1331 (li->offset + li->length))
1332 continue;
1333 if (current->tgid != li->pid)
1334 continue;
9ee305b7
PS
1335 if (types[i] != li->type)
1336 continue;
ea319d57 1337 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1338 /*
1339 * We can cache brlock requests - simply remove
fbd35aca 1340 * a lock from the file's list.
9ee305b7
PS
1341 */
1342 list_del(&li->llist);
1343 cifs_del_lock_waiters(li);
1344 kfree(li);
ea319d57 1345 continue;
9ee305b7 1346 }
ea319d57
PS
1347 cur->Pid = cpu_to_le16(li->pid);
1348 cur->LengthLow = cpu_to_le32((u32)li->length);
1349 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1350 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1351 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1352 /*
1353 * We need to save a lock here to let us add it again to
1354 * the file's list if the unlock range request fails on
1355 * the server.
1356 */
1357 list_move(&li->llist, &tmp_llist);
1358 if (++num == max_num) {
4b4de76e
PS
1359 stored_rc = cifs_lockv(xid, tcon,
1360 cfile->fid.netfid,
ea319d57
PS
1361 li->type, num, 0, buf);
1362 if (stored_rc) {
1363 /*
1364 * We failed on the unlock range
1365 * request - add all locks from the tmp
1366 * list to the head of the file's list.
1367 */
1368 cifs_move_llist(&tmp_llist,
f45d3416 1369 &cfile->llist->locks);
ea319d57
PS
1370 rc = stored_rc;
1371 } else
1372 /*
1373 * The unlock range request succeed -
1374 * free the tmp list.
1375 */
1376 cifs_free_llist(&tmp_llist);
1377 cur = buf;
1378 num = 0;
1379 } else
1380 cur++;
9ee305b7
PS
1381 }
1382 if (num) {
4b4de76e 1383 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1384 types[i], num, 0, buf);
1385 if (stored_rc) {
f45d3416
PS
1386 cifs_move_llist(&tmp_llist,
1387 &cfile->llist->locks);
9ee305b7
PS
1388 rc = stored_rc;
1389 } else
1390 cifs_free_llist(&tmp_llist);
1391 }
1392 }
1393
1b4b55a1 1394 up_write(&cinode->lock_sem);
9ee305b7
PS
1395 kfree(buf);
1396 return rc;
1397}
1398
03776f45 1399static int
f45d3416 1400cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1401 bool wait_flag, bool posix_lck, int lock, int unlock,
1402 unsigned int xid)
03776f45
PS
1403{
1404 int rc = 0;
1405 __u64 length = 1 + flock->fl_end - flock->fl_start;
1406 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1407 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1408 struct TCP_Server_Info *server = tcon->ses->server;
03776f45
PS
1409
1410 if (posix_lck) {
08547b03 1411 int posix_lock_type;
4f6bcec9
PS
1412
1413 rc = cifs_posix_lock_set(file, flock);
1414 if (!rc || rc < 0)
1415 return rc;
1416
106dc538 1417 if (type & server->vals->shared_lock_type)
08547b03
SF
1418 posix_lock_type = CIFS_RDLCK;
1419 else
1420 posix_lock_type = CIFS_WRLCK;
50c2f753 1421
03776f45 1422 if (unlock == 1)
beb84dc8 1423 posix_lock_type = CIFS_UNLCK;
7ee1af76 1424
f45d3416
PS
1425 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1426 current->tgid, flock->fl_start, length,
1427 NULL, posix_lock_type, wait_flag);
03776f45
PS
1428 goto out;
1429 }
7ee1af76 1430
03776f45 1431 if (lock) {
161ebf9f
PS
1432 struct cifsLockInfo *lock;
1433
fbd35aca 1434 lock = cifs_lock_init(flock->fl_start, length, type);
161ebf9f
PS
1435 if (!lock)
1436 return -ENOMEM;
1437
fbd35aca 1438 rc = cifs_lock_add_if(cfile, lock, wait_flag);
85160e03 1439 if (rc < 0)
161ebf9f
PS
1440 kfree(lock);
1441 if (rc <= 0)
85160e03
PS
1442 goto out;
1443
d39a4f71
PS
1444 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1445 type, 1, 0, wait_flag);
161ebf9f
PS
1446 if (rc) {
1447 kfree(lock);
1448 goto out;
03776f45 1449 }
161ebf9f 1450
fbd35aca 1451 cifs_lock_add(cfile, lock);
9ee305b7 1452 } else if (unlock)
d39a4f71 1453 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 1454
03776f45
PS
1455out:
1456 if (flock->fl_flags & FL_POSIX)
9ebb389d 1457 posix_lock_file_wait(file, flock);
03776f45
PS
1458 return rc;
1459}
1460
1461int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1462{
1463 int rc, xid;
1464 int lock = 0, unlock = 0;
1465 bool wait_flag = false;
1466 bool posix_lck = false;
1467 struct cifs_sb_info *cifs_sb;
1468 struct cifs_tcon *tcon;
1469 struct cifsInodeInfo *cinode;
1470 struct cifsFileInfo *cfile;
1471 __u16 netfid;
04a6aa8a 1472 __u32 type;
03776f45
PS
1473
1474 rc = -EACCES;
6d5786a3 1475 xid = get_xid();
03776f45
PS
1476
1477 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1478 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1479 flock->fl_start, flock->fl_end);
1480
03776f45
PS
1481 cfile = (struct cifsFileInfo *)file->private_data;
1482 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1483
1484 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1485 tcon->ses->server);
1486
1487 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
4b4de76e 1488 netfid = cfile->fid.netfid;
03776f45
PS
1489 cinode = CIFS_I(file->f_path.dentry->d_inode);
1490
29e20f9c 1491 if (cap_unix(tcon->ses) &&
03776f45
PS
1492 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1493 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1494 posix_lck = true;
1495 /*
1496 * BB add code here to normalize offset and length to account for
1497 * negative length which we can not accept over the wire.
1498 */
1499 if (IS_GETLK(cmd)) {
4f6bcec9 1500 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1501 free_xid(xid);
03776f45
PS
1502 return rc;
1503 }
1504
1505 if (!lock && !unlock) {
1506 /*
1507 * if no lock or unlock then nothing to do since we do not
1508 * know what it is
1509 */
6d5786a3 1510 free_xid(xid);
03776f45 1511 return -EOPNOTSUPP;
7ee1af76
JA
1512 }
1513
03776f45
PS
1514 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1515 xid);
6d5786a3 1516 free_xid(xid);
1da177e4
LT
1517 return rc;
1518}
1519
597b027f
JL
1520/*
1521 * update the file size (if needed) after a write. Should be called with
1522 * the inode->i_lock held
1523 */
72432ffc 1524void
fbec9ab9
JL
1525cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1526 unsigned int bytes_written)
1527{
1528 loff_t end_of_write = offset + bytes_written;
1529
1530 if (end_of_write > cifsi->server_eof)
1531 cifsi->server_eof = end_of_write;
1532}
1533
ba9ad725
PS
1534static ssize_t
1535cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1536 size_t write_size, loff_t *offset)
1da177e4
LT
1537{
1538 int rc = 0;
1539 unsigned int bytes_written = 0;
1540 unsigned int total_written;
1541 struct cifs_sb_info *cifs_sb;
ba9ad725
PS
1542 struct cifs_tcon *tcon;
1543 struct TCP_Server_Info *server;
6d5786a3 1544 unsigned int xid;
7da4b49a
JL
1545 struct dentry *dentry = open_file->dentry;
1546 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1547 struct cifs_io_parms io_parms;
1da177e4 1548
7da4b49a 1549 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1550
b6b38f70 1551 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
ba9ad725 1552 *offset, dentry->d_name.name);
1da177e4 1553
ba9ad725
PS
1554 tcon = tlink_tcon(open_file->tlink);
1555 server = tcon->ses->server;
1556
1557 if (!server->ops->sync_write)
1558 return -ENOSYS;
50c2f753 1559
6d5786a3 1560 xid = get_xid();
1da177e4 1561
1da177e4
LT
1562 for (total_written = 0; write_size > total_written;
1563 total_written += bytes_written) {
1564 rc = -EAGAIN;
1565 while (rc == -EAGAIN) {
ca83ce3d
JL
1566 struct kvec iov[2];
1567 unsigned int len;
1568
1da177e4 1569 if (open_file->invalidHandle) {
1da177e4
LT
1570 /* we could deadlock if we called
1571 filemap_fdatawait from here so tell
fb8c4b14 1572 reopen_file not to flush data to
1da177e4 1573 server now */
15886177 1574 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1575 if (rc != 0)
1576 break;
1577 }
ca83ce3d
JL
1578
1579 len = min((size_t)cifs_sb->wsize,
1580 write_size - total_written);
1581 /* iov[0] is reserved for smb header */
1582 iov[1].iov_base = (char *)write_data + total_written;
1583 iov[1].iov_len = len;
fa2989f4 1584 io_parms.pid = pid;
ba9ad725
PS
1585 io_parms.tcon = tcon;
1586 io_parms.offset = *offset;
fa2989f4 1587 io_parms.length = len;
ba9ad725
PS
1588 rc = server->ops->sync_write(xid, open_file, &io_parms,
1589 &bytes_written, iov, 1);
1da177e4
LT
1590 }
1591 if (rc || (bytes_written == 0)) {
1592 if (total_written)
1593 break;
1594 else {
6d5786a3 1595 free_xid(xid);
1da177e4
LT
1596 return rc;
1597 }
fbec9ab9 1598 } else {
597b027f 1599 spin_lock(&dentry->d_inode->i_lock);
ba9ad725 1600 cifs_update_eof(cifsi, *offset, bytes_written);
597b027f 1601 spin_unlock(&dentry->d_inode->i_lock);
ba9ad725 1602 *offset += bytes_written;
fbec9ab9 1603 }
1da177e4
LT
1604 }
1605
ba9ad725 1606 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1607
7da4b49a
JL
1608 if (total_written > 0) {
1609 spin_lock(&dentry->d_inode->i_lock);
ba9ad725
PS
1610 if (*offset > dentry->d_inode->i_size)
1611 i_size_write(dentry->d_inode, *offset);
7da4b49a 1612 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1613 }
7da4b49a 1614 mark_inode_dirty_sync(dentry->d_inode);
6d5786a3 1615 free_xid(xid);
1da177e4
LT
1616 return total_written;
1617}
1618
6508d904
JL
1619struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1620 bool fsuid_only)
630f3f0c
SF
1621{
1622 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1623 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1624
1625 /* only filter by fsuid on multiuser mounts */
1626 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1627 fsuid_only = false;
630f3f0c 1628
4477288a 1629 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1630 /* we could simply get the first_list_entry since write-only entries
1631 are always at the end of the list but since the first entry might
1632 have a close pending, we go through the whole list */
1633 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1634 if (fsuid_only && open_file->uid != current_fsuid())
1635 continue;
2e396b83 1636 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1637 if (!open_file->invalidHandle) {
1638 /* found a good file */
1639 /* lock it so it will not be closed on us */
764a1b1a 1640 cifsFileInfo_get_locked(open_file);
4477288a 1641 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1642 return open_file;
1643 } /* else might as well continue, and look for
1644 another, or simply have the caller reopen it
1645 again rather than trying to fix this handle */
1646 } else /* write only file */
1647 break; /* write only files are last so must be done */
1648 }
4477288a 1649 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1650 return NULL;
1651}
630f3f0c 1652
6508d904
JL
1653struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1654 bool fsuid_only)
6148a742 1655{
2c0c2a08 1656 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 1657 struct cifs_sb_info *cifs_sb;
2846d386 1658 bool any_available = false;
dd99cd80 1659 int rc;
2c0c2a08 1660 unsigned int refind = 0;
6148a742 1661
60808233
SF
1662 /* Having a null inode here (because mapping->host was set to zero by
1663 the VFS or MM) should not happen but we had reports of on oops (due to
1664 it being zero) during stress testcases so we need to check for it */
1665
fb8c4b14 1666 if (cifs_inode == NULL) {
b6b38f70 1667 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1668 dump_stack();
1669 return NULL;
1670 }
1671
d3892294
JL
1672 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1673
6508d904
JL
1674 /* only filter by fsuid on multiuser mounts */
1675 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1676 fsuid_only = false;
1677
4477288a 1678 spin_lock(&cifs_file_list_lock);
9b22b0b7 1679refind_writable:
2c0c2a08
SP
1680 if (refind > MAX_REOPEN_ATT) {
1681 spin_unlock(&cifs_file_list_lock);
1682 return NULL;
1683 }
6148a742 1684 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1685 if (!any_available && open_file->pid != current->tgid)
1686 continue;
1687 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1688 continue;
2e396b83 1689 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
1690 if (!open_file->invalidHandle) {
1691 /* found a good writable file */
764a1b1a 1692 cifsFileInfo_get_locked(open_file);
4477288a 1693 spin_unlock(&cifs_file_list_lock);
9b22b0b7 1694 return open_file;
2c0c2a08
SP
1695 } else {
1696 if (!inv_file)
1697 inv_file = open_file;
9b22b0b7 1698 }
6148a742
SF
1699 }
1700 }
2846d386
JL
1701 /* couldn't find useable FH with same pid, try any available */
1702 if (!any_available) {
1703 any_available = true;
1704 goto refind_writable;
1705 }
2c0c2a08
SP
1706
1707 if (inv_file) {
1708 any_available = false;
764a1b1a 1709 cifsFileInfo_get_locked(inv_file);
2c0c2a08
SP
1710 }
1711
4477288a 1712 spin_unlock(&cifs_file_list_lock);
2c0c2a08
SP
1713
1714 if (inv_file) {
1715 rc = cifs_reopen_file(inv_file, false);
1716 if (!rc)
1717 return inv_file;
1718 else {
1719 spin_lock(&cifs_file_list_lock);
1720 list_move_tail(&inv_file->flist,
1721 &cifs_inode->openFileList);
1722 spin_unlock(&cifs_file_list_lock);
1723 cifsFileInfo_put(inv_file);
1724 spin_lock(&cifs_file_list_lock);
1725 ++refind;
1726 goto refind_writable;
1727 }
1728 }
1729
6148a742
SF
1730 return NULL;
1731}
1732
1da177e4
LT
1733static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1734{
1735 struct address_space *mapping = page->mapping;
1736 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1737 char *write_data;
1738 int rc = -EFAULT;
1739 int bytes_written = 0;
1da177e4 1740 struct inode *inode;
6148a742 1741 struct cifsFileInfo *open_file;
1da177e4
LT
1742
1743 if (!mapping || !mapping->host)
1744 return -EFAULT;
1745
1746 inode = page->mapping->host;
1da177e4
LT
1747
1748 offset += (loff_t)from;
1749 write_data = kmap(page);
1750 write_data += from;
1751
1752 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1753 kunmap(page);
1754 return -EIO;
1755 }
1756
1757 /* racing with truncate? */
1758 if (offset > mapping->host->i_size) {
1759 kunmap(page);
1760 return 0; /* don't care */
1761 }
1762
1763 /* check to make sure that we are not extending the file */
1764 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1765 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1766
6508d904 1767 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1768 if (open_file) {
fa2989f4
PS
1769 bytes_written = cifs_write(open_file, open_file->pid,
1770 write_data, to - from, &offset);
6ab409b5 1771 cifsFileInfo_put(open_file);
1da177e4 1772 /* Does mm or vfs already set times? */
6148a742 1773 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1774 if ((bytes_written > 0) && (offset))
6148a742 1775 rc = 0;
bb5a9a04
SF
1776 else if (bytes_written < 0)
1777 rc = bytes_written;
6148a742 1778 } else {
b6b38f70 1779 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1780 rc = -EIO;
1781 }
1782
1783 kunmap(page);
1784 return rc;
1785}
1786
1da177e4 1787static int cifs_writepages(struct address_space *mapping,
37c0eb46 1788 struct writeback_control *wbc)
1da177e4 1789{
c3d17b63
JL
1790 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1791 bool done = false, scanned = false, range_whole = false;
1792 pgoff_t end, index;
1793 struct cifs_writedata *wdata;
c9de5c80 1794 struct TCP_Server_Info *server;
37c0eb46 1795 struct page *page;
37c0eb46 1796 int rc = 0;
eddb079d 1797 loff_t isize = i_size_read(mapping->host);
50c2f753 1798
37c0eb46 1799 /*
c3d17b63 1800 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1801 * one page at a time via cifs_writepage
1802 */
1803 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1804 return generic_writepages(mapping, wbc);
1805
111ebb6e 1806 if (wbc->range_cyclic) {
37c0eb46 1807 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1808 end = -1;
1809 } else {
1810 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1811 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1812 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1813 range_whole = true;
1814 scanned = true;
37c0eb46
SF
1815 }
1816retry:
c3d17b63
JL
1817 while (!done && index <= end) {
1818 unsigned int i, nr_pages, found_pages;
1819 pgoff_t next = 0, tofind;
1820 struct page **pages;
1821
1822 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1823 end - index) + 1;
1824
c2e87640
JL
1825 wdata = cifs_writedata_alloc((unsigned int)tofind,
1826 cifs_writev_complete);
c3d17b63
JL
1827 if (!wdata) {
1828 rc = -ENOMEM;
1829 break;
1830 }
1831
1832 /*
1833 * find_get_pages_tag seems to return a max of 256 on each
1834 * iteration, so we must call it several times in order to
1835 * fill the array or the wsize is effectively limited to
1836 * 256 * PAGE_CACHE_SIZE.
1837 */
1838 found_pages = 0;
1839 pages = wdata->pages;
1840 do {
1841 nr_pages = find_get_pages_tag(mapping, &index,
1842 PAGECACHE_TAG_DIRTY,
1843 tofind, pages);
1844 found_pages += nr_pages;
1845 tofind -= nr_pages;
1846 pages += nr_pages;
1847 } while (nr_pages && tofind && index <= end);
1848
1849 if (found_pages == 0) {
1850 kref_put(&wdata->refcount, cifs_writedata_release);
1851 break;
1852 }
1853
1854 nr_pages = 0;
1855 for (i = 0; i < found_pages; i++) {
1856 page = wdata->pages[i];
37c0eb46
SF
1857 /*
1858 * At this point we hold neither mapping->tree_lock nor
1859 * lock on the page itself: the page may be truncated or
1860 * invalidated (changing page->mapping to NULL), or even
1861 * swizzled back from swapper_space to tmpfs file
1862 * mapping
1863 */
1864
c3d17b63 1865 if (nr_pages == 0)
37c0eb46 1866 lock_page(page);
529ae9aa 1867 else if (!trylock_page(page))
37c0eb46
SF
1868 break;
1869
1870 if (unlikely(page->mapping != mapping)) {
1871 unlock_page(page);
1872 break;
1873 }
1874
111ebb6e 1875 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1876 done = true;
37c0eb46
SF
1877 unlock_page(page);
1878 break;
1879 }
1880
1881 if (next && (page->index != next)) {
1882 /* Not next consecutive page */
1883 unlock_page(page);
1884 break;
1885 }
1886
1887 if (wbc->sync_mode != WB_SYNC_NONE)
1888 wait_on_page_writeback(page);
1889
1890 if (PageWriteback(page) ||
cb876f45 1891 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1892 unlock_page(page);
1893 break;
1894 }
84d2f07e 1895
cb876f45
LT
1896 /*
1897 * This actually clears the dirty bit in the radix tree.
1898 * See cifs_writepage() for more commentary.
1899 */
1900 set_page_writeback(page);
1901
eddb079d 1902 if (page_offset(page) >= isize) {
c3d17b63 1903 done = true;
84d2f07e 1904 unlock_page(page);
cb876f45 1905 end_page_writeback(page);
84d2f07e
SF
1906 break;
1907 }
1908
c3d17b63
JL
1909 wdata->pages[i] = page;
1910 next = page->index + 1;
1911 ++nr_pages;
1912 }
37c0eb46 1913
c3d17b63
JL
1914 /* reset index to refind any pages skipped */
1915 if (nr_pages == 0)
1916 index = wdata->pages[0]->index + 1;
84d2f07e 1917
c3d17b63
JL
1918 /* put any pages we aren't going to use */
1919 for (i = nr_pages; i < found_pages; i++) {
1920 page_cache_release(wdata->pages[i]);
1921 wdata->pages[i] = NULL;
1922 }
37c0eb46 1923
c3d17b63
JL
1924 /* nothing to write? */
1925 if (nr_pages == 0) {
1926 kref_put(&wdata->refcount, cifs_writedata_release);
1927 continue;
37c0eb46 1928 }
fbec9ab9 1929
c3d17b63
JL
1930 wdata->sync_mode = wbc->sync_mode;
1931 wdata->nr_pages = nr_pages;
1932 wdata->offset = page_offset(wdata->pages[0]);
eddb079d
JL
1933 wdata->pagesz = PAGE_CACHE_SIZE;
1934 wdata->tailsz =
1935 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1936 (loff_t)PAGE_CACHE_SIZE);
1937 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1938 wdata->tailsz;
941b853d 1939
c3d17b63
JL
1940 do {
1941 if (wdata->cfile != NULL)
1942 cifsFileInfo_put(wdata->cfile);
1943 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1944 false);
1945 if (!wdata->cfile) {
1946 cERROR(1, "No writable handles for inode");
1947 rc = -EBADF;
1948 break;
941b853d 1949 }
fe5f5d2e 1950 wdata->pid = wdata->cfile->pid;
c9de5c80
PS
1951 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1952 rc = server->ops->async_writev(wdata);
c3d17b63 1953 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1954
c3d17b63
JL
1955 for (i = 0; i < nr_pages; ++i)
1956 unlock_page(wdata->pages[i]);
f3983c21 1957
c3d17b63
JL
1958 /* send failure -- clean up the mess */
1959 if (rc != 0) {
1960 for (i = 0; i < nr_pages; ++i) {
941b853d 1961 if (rc == -EAGAIN)
c3d17b63
JL
1962 redirty_page_for_writepage(wbc,
1963 wdata->pages[i]);
1964 else
1965 SetPageError(wdata->pages[i]);
1966 end_page_writeback(wdata->pages[i]);
1967 page_cache_release(wdata->pages[i]);
37c0eb46 1968 }
941b853d
JL
1969 if (rc != -EAGAIN)
1970 mapping_set_error(mapping, rc);
c3d17b63
JL
1971 }
1972 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1973
c3d17b63
JL
1974 wbc->nr_to_write -= nr_pages;
1975 if (wbc->nr_to_write <= 0)
1976 done = true;
b066a48c 1977
c3d17b63 1978 index = next;
37c0eb46 1979 }
c3d17b63 1980
37c0eb46
SF
1981 if (!scanned && !done) {
1982 /*
1983 * We hit the last page and there is more work to be done: wrap
1984 * back to the start of the file
1985 */
c3d17b63 1986 scanned = true;
37c0eb46
SF
1987 index = 0;
1988 goto retry;
1989 }
c3d17b63 1990
111ebb6e 1991 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1992 mapping->writeback_index = index;
1993
1da177e4
LT
1994 return rc;
1995}
1da177e4 1996
9ad1506b
PS
1997static int
1998cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1999{
9ad1506b 2000 int rc;
6d5786a3 2001 unsigned int xid;
1da177e4 2002
6d5786a3 2003 xid = get_xid();
1da177e4
LT
2004/* BB add check for wbc flags */
2005 page_cache_get(page);
ad7a2926 2006 if (!PageUptodate(page))
b6b38f70 2007 cFYI(1, "ppw - page not up to date");
cb876f45
LT
2008
2009 /*
2010 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2011 *
2012 * A writepage() implementation always needs to do either this,
2013 * or re-dirty the page with "redirty_page_for_writepage()" in
2014 * the case of a failure.
2015 *
2016 * Just unlocking the page will cause the radix tree tag-bits
2017 * to fail to update with the state of the page correctly.
2018 */
fb8c4b14 2019 set_page_writeback(page);
9ad1506b 2020retry_write:
1da177e4 2021 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
2022 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2023 goto retry_write;
2024 else if (rc == -EAGAIN)
2025 redirty_page_for_writepage(wbc, page);
2026 else if (rc != 0)
2027 SetPageError(page);
2028 else
2029 SetPageUptodate(page);
cb876f45
LT
2030 end_page_writeback(page);
2031 page_cache_release(page);
6d5786a3 2032 free_xid(xid);
1da177e4
LT
2033 return rc;
2034}
2035
9ad1506b
PS
2036static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2037{
2038 int rc = cifs_writepage_locked(page, wbc);
2039 unlock_page(page);
2040 return rc;
2041}
2042
d9414774
NP
2043static int cifs_write_end(struct file *file, struct address_space *mapping,
2044 loff_t pos, unsigned len, unsigned copied,
2045 struct page *page, void *fsdata)
1da177e4 2046{
d9414774
NP
2047 int rc;
2048 struct inode *inode = mapping->host;
d4ffff1f
PS
2049 struct cifsFileInfo *cfile = file->private_data;
2050 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2051 __u32 pid;
2052
2053 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2054 pid = cfile->pid;
2055 else
2056 pid = current->tgid;
1da177e4 2057
b6b38f70
JP
2058 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2059 page, pos, copied);
d9414774 2060
a98ee8c1
JL
2061 if (PageChecked(page)) {
2062 if (copied == len)
2063 SetPageUptodate(page);
2064 ClearPageChecked(page);
2065 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 2066 SetPageUptodate(page);
ad7a2926 2067
1da177e4 2068 if (!PageUptodate(page)) {
d9414774
NP
2069 char *page_data;
2070 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
6d5786a3 2071 unsigned int xid;
d9414774 2072
6d5786a3 2073 xid = get_xid();
1da177e4
LT
2074 /* this is probably better than directly calling
2075 partialpage_write since in this function the file handle is
2076 known which we might as well leverage */
2077 /* BB check if anything else missing out of ppw
2078 such as updating last write time */
2079 page_data = kmap(page);
d4ffff1f 2080 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2081 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2082 kunmap(page);
d9414774 2083
6d5786a3 2084 free_xid(xid);
fb8c4b14 2085 } else {
d9414774
NP
2086 rc = copied;
2087 pos += copied;
1da177e4
LT
2088 set_page_dirty(page);
2089 }
2090
d9414774
NP
2091 if (rc > 0) {
2092 spin_lock(&inode->i_lock);
2093 if (pos > inode->i_size)
2094 i_size_write(inode, pos);
2095 spin_unlock(&inode->i_lock);
2096 }
2097
2098 unlock_page(page);
2099 page_cache_release(page);
2100
1da177e4
LT
2101 return rc;
2102}
2103
02c24a82
JB
2104int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2105 int datasync)
1da177e4 2106{
6d5786a3 2107 unsigned int xid;
1da177e4 2108 int rc = 0;
96daf2b0 2109 struct cifs_tcon *tcon;
1d8c4c00 2110 struct TCP_Server_Info *server;
c21dfb69 2111 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 2112 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 2113 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2114
02c24a82
JB
2115 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2116 if (rc)
2117 return rc;
2118 mutex_lock(&inode->i_mutex);
2119
6d5786a3 2120 xid = get_xid();
1da177e4 2121
b6b38f70 2122 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 2123 file->f_path.dentry->d_name.name, datasync);
50c2f753 2124
6feb9891
PS
2125 if (!CIFS_I(inode)->clientCanCacheRead) {
2126 rc = cifs_invalidate_mapping(inode);
2127 if (rc) {
2128 cFYI(1, "rc: %d during invalidate phase", rc);
2129 rc = 0; /* don't care about it in fsync */
2130 }
2131 }
eb4b756b 2132
8be7e6ba 2133 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2134 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2135 server = tcon->ses->server;
2136 if (server->ops->flush)
2137 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2138 else
2139 rc = -ENOSYS;
2140 }
8be7e6ba 2141
6d5786a3 2142 free_xid(xid);
02c24a82 2143 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
2144 return rc;
2145}
2146
02c24a82 2147int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2148{
6d5786a3 2149 unsigned int xid;
8be7e6ba 2150 int rc = 0;
96daf2b0 2151 struct cifs_tcon *tcon;
1d8c4c00 2152 struct TCP_Server_Info *server;
8be7e6ba
PS
2153 struct cifsFileInfo *smbfile = file->private_data;
2154 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
2155 struct inode *inode = file->f_mapping->host;
2156
2157 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2158 if (rc)
2159 return rc;
2160 mutex_lock(&inode->i_mutex);
8be7e6ba 2161
6d5786a3 2162 xid = get_xid();
8be7e6ba
PS
2163
2164 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2165 file->f_path.dentry->d_name.name, datasync);
2166
2167 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2168 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2169 server = tcon->ses->server;
2170 if (server->ops->flush)
2171 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2172 else
2173 rc = -ENOSYS;
2174 }
b298f223 2175
6d5786a3 2176 free_xid(xid);
02c24a82 2177 mutex_unlock(&inode->i_mutex);
1da177e4
LT
2178 return rc;
2179}
2180
1da177e4
LT
2181/*
2182 * As file closes, flush all cached write data for this inode checking
2183 * for write behind errors.
2184 */
75e1fcc0 2185int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2186{
fb8c4b14 2187 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
2188 int rc = 0;
2189
eb4b756b 2190 if (file->f_mode & FMODE_WRITE)
d3f1322a 2191 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2192
b6b38f70 2193 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
2194
2195 return rc;
2196}
2197
72432ffc
PS
2198static int
2199cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2200{
2201 int rc = 0;
2202 unsigned long i;
2203
2204 for (i = 0; i < num_pages; i++) {
e94f7ba1 2205 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2206 if (!pages[i]) {
2207 /*
2208 * save number of pages we have already allocated and
2209 * return with ENOMEM error
2210 */
2211 num_pages = i;
2212 rc = -ENOMEM;
e94f7ba1 2213 break;
72432ffc
PS
2214 }
2215 }
2216
e94f7ba1
JL
2217 if (rc) {
2218 for (i = 0; i < num_pages; i++)
2219 put_page(pages[i]);
2220 }
72432ffc
PS
2221 return rc;
2222}
2223
2224static inline
2225size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2226{
2227 size_t num_pages;
2228 size_t clen;
2229
2230 clen = min_t(const size_t, len, wsize);
a7103b99 2231 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2232
2233 if (cur_len)
2234 *cur_len = clen;
2235
2236 return num_pages;
2237}
2238
da82f7e7
JL
2239static void
2240cifs_uncached_writev_complete(struct work_struct *work)
2241{
2242 int i;
2243 struct cifs_writedata *wdata = container_of(work,
2244 struct cifs_writedata, work);
2245 struct inode *inode = wdata->cfile->dentry->d_inode;
2246 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2247
2248 spin_lock(&inode->i_lock);
2249 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2250 if (cifsi->server_eof > inode->i_size)
2251 i_size_write(inode, cifsi->server_eof);
2252 spin_unlock(&inode->i_lock);
2253
2254 complete(&wdata->done);
2255
2256 if (wdata->result != -EAGAIN) {
2257 for (i = 0; i < wdata->nr_pages; i++)
2258 put_page(wdata->pages[i]);
2259 }
2260
2261 kref_put(&wdata->refcount, cifs_writedata_release);
2262}
2263
2264/* attempt to send write to server, retry on any -EAGAIN errors */
2265static int
2266cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2267{
2268 int rc;
c9de5c80
PS
2269 struct TCP_Server_Info *server;
2270
2271 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
da82f7e7
JL
2272
2273 do {
2274 if (wdata->cfile->invalidHandle) {
2275 rc = cifs_reopen_file(wdata->cfile, false);
2276 if (rc != 0)
2277 continue;
2278 }
c9de5c80 2279 rc = server->ops->async_writev(wdata);
da82f7e7
JL
2280 } while (rc == -EAGAIN);
2281
2282 return rc;
2283}
2284
72432ffc
PS
2285static ssize_t
2286cifs_iovec_write(struct file *file, const struct iovec *iov,
2287 unsigned long nr_segs, loff_t *poffset)
2288{
da82f7e7 2289 unsigned long nr_pages, i;
76429c14
PS
2290 size_t copied, len, cur_len;
2291 ssize_t total_written = 0;
3af9d8f2 2292 loff_t offset;
72432ffc 2293 struct iov_iter it;
72432ffc 2294 struct cifsFileInfo *open_file;
da82f7e7 2295 struct cifs_tcon *tcon;
72432ffc 2296 struct cifs_sb_info *cifs_sb;
da82f7e7
JL
2297 struct cifs_writedata *wdata, *tmp;
2298 struct list_head wdata_list;
2299 int rc;
2300 pid_t pid;
72432ffc
PS
2301
2302 len = iov_length(iov, nr_segs);
2303 if (!len)
2304 return 0;
2305
2306 rc = generic_write_checks(file, poffset, &len, 0);
2307 if (rc)
2308 return rc;
2309
da82f7e7 2310 INIT_LIST_HEAD(&wdata_list);
72432ffc 2311 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
72432ffc 2312 open_file = file->private_data;
da82f7e7 2313 tcon = tlink_tcon(open_file->tlink);
c9de5c80
PS
2314
2315 if (!tcon->ses->server->ops->async_writev)
2316 return -ENOSYS;
2317
3af9d8f2 2318 offset = *poffset;
d4ffff1f
PS
2319
2320 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2321 pid = open_file->pid;
2322 else
2323 pid = current->tgid;
2324
72432ffc 2325 iov_iter_init(&it, iov, nr_segs, len, 0);
72432ffc 2326 do {
da82f7e7
JL
2327 size_t save_len;
2328
2329 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2330 wdata = cifs_writedata_alloc(nr_pages,
2331 cifs_uncached_writev_complete);
2332 if (!wdata) {
2333 rc = -ENOMEM;
2334 break;
2335 }
2336
2337 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2338 if (rc) {
2339 kfree(wdata);
2340 break;
2341 }
2342
2343 save_len = cur_len;
2344 for (i = 0; i < nr_pages; i++) {
2345 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2346 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2347 0, copied);
72432ffc
PS
2348 cur_len -= copied;
2349 iov_iter_advance(&it, copied);
72432ffc 2350 }
72432ffc
PS
2351 cur_len = save_len - cur_len;
2352
da82f7e7
JL
2353 wdata->sync_mode = WB_SYNC_ALL;
2354 wdata->nr_pages = nr_pages;
2355 wdata->offset = (__u64)offset;
2356 wdata->cfile = cifsFileInfo_get(open_file);
2357 wdata->pid = pid;
2358 wdata->bytes = cur_len;
eddb079d
JL
2359 wdata->pagesz = PAGE_SIZE;
2360 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
da82f7e7
JL
2361 rc = cifs_uncached_retry_writev(wdata);
2362 if (rc) {
2363 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2364 break;
2365 }
2366
da82f7e7
JL
2367 list_add_tail(&wdata->list, &wdata_list);
2368 offset += cur_len;
2369 len -= cur_len;
72432ffc
PS
2370 } while (len > 0);
2371
da82f7e7
JL
2372 /*
2373 * If at least one write was successfully sent, then discard any rc
2374 * value from the later writes. If the other write succeeds, then
2375 * we'll end up returning whatever was written. If it fails, then
2376 * we'll get a new rc value from that.
2377 */
2378 if (!list_empty(&wdata_list))
2379 rc = 0;
2380
2381 /*
2382 * Wait for and collect replies for any successful sends in order of
2383 * increasing offset. Once an error is hit or we get a fatal signal
2384 * while waiting, then return without waiting for any more replies.
2385 */
2386restart_loop:
2387 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2388 if (!rc) {
2389 /* FIXME: freezable too? */
2390 rc = wait_for_completion_killable(&wdata->done);
2391 if (rc)
2392 rc = -EINTR;
2393 else if (wdata->result)
2394 rc = wdata->result;
2395 else
2396 total_written += wdata->bytes;
2397
2398 /* resend call if it's a retryable error */
2399 if (rc == -EAGAIN) {
2400 rc = cifs_uncached_retry_writev(wdata);
2401 goto restart_loop;
2402 }
2403 }
2404 list_del_init(&wdata->list);
2405 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2406 }
2407
da82f7e7
JL
2408 if (total_written > 0)
2409 *poffset += total_written;
72432ffc 2410
da82f7e7
JL
2411 cifs_stats_bytes_written(tcon, total_written);
2412 return total_written ? total_written : (ssize_t)rc;
72432ffc
PS
2413}
2414
0b81c1c4 2415ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
2416 unsigned long nr_segs, loff_t pos)
2417{
2418 ssize_t written;
2419 struct inode *inode;
2420
2421 inode = iocb->ki_filp->f_path.dentry->d_inode;
2422
2423 /*
2424 * BB - optimize the way when signing is disabled. We can drop this
2425 * extra memory-to-memory copying and use iovec buffers for constructing
2426 * write request.
2427 */
2428
2429 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2430 if (written > 0) {
2431 CIFS_I(inode)->invalid_mapping = true;
2432 iocb->ki_pos = pos;
2433 }
2434
2435 return written;
2436}
2437
579f9053
PS
2438static ssize_t
2439cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2440 unsigned long nr_segs, loff_t pos)
72432ffc 2441{
579f9053
PS
2442 struct file *file = iocb->ki_filp;
2443 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2444 struct inode *inode = file->f_mapping->host;
2445 struct cifsInodeInfo *cinode = CIFS_I(inode);
2446 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2447 ssize_t rc = -EACCES;
72432ffc 2448
579f9053 2449 BUG_ON(iocb->ki_pos != pos);
72432ffc 2450
579f9053
PS
2451 sb_start_write(inode->i_sb);
2452
2453 /*
2454 * We need to hold the sem to be sure nobody modifies lock list
2455 * with a brlock that prevents writing.
2456 */
2457 down_read(&cinode->lock_sem);
2458 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2459 server->vals->exclusive_lock_type, NULL,
2460 true)) {
2461 mutex_lock(&inode->i_mutex);
2462 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2463 &iocb->ki_pos);
2464 mutex_unlock(&inode->i_mutex);
2465 }
2466
2467 if (rc > 0 || rc == -EIOCBQUEUED) {
2468 ssize_t err;
2469
2470 err = generic_write_sync(file, pos, rc);
2471 if (err < 0 && rc > 0)
2472 rc = err;
2473 }
2474
2475 up_read(&cinode->lock_sem);
2476 sb_end_write(inode->i_sb);
2477 return rc;
2478}
2479
2480ssize_t
2481cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2482 unsigned long nr_segs, loff_t pos)
2483{
2484 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2485 struct cifsInodeInfo *cinode = CIFS_I(inode);
2486 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2487 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2488 iocb->ki_filp->private_data;
2489 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
72432ffc 2490
25078105 2491#ifdef CONFIG_CIFS_SMB2
72432ffc 2492 /*
25078105
PS
2493 * If we have an oplock for read and want to write a data to the file
2494 * we need to store it in the page cache and then push it to the server
2495 * to be sure the next read will get a valid data.
2496 */
2497 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
2498 ssize_t written;
2499 int rc;
2500
2501 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2502 rc = filemap_fdatawrite(inode->i_mapping);
2503 if (rc)
2504 return (ssize_t)rc;
2505
2506 return written;
2507 }
2508#endif
2509
2510 /*
2511 * For non-oplocked files in strict cache mode we need to write the data
2512 * to the server exactly from the pos to pos+len-1 rather than flush all
2513 * affected pages because it may cause a error with mandatory locks on
2514 * these pages but not on the region from pos to ppos+len-1.
72432ffc
PS
2515 */
2516
579f9053
PS
2517 if (!cinode->clientCanCacheAll)
2518 return cifs_user_writev(iocb, iov, nr_segs, pos);
2519
2520 if (cap_unix(tcon->ses) &&
2521 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2522 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2523 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2524
2525 return cifs_writev(iocb, iov, nr_segs, pos);
72432ffc
PS
2526}
2527
0471ca3f 2528static struct cifs_readdata *
f4e49cd2 2529cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
0471ca3f
JL
2530{
2531 struct cifs_readdata *rdata;
f4e49cd2 2532
c5fab6f4
JL
2533 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2534 GFP_KERNEL);
0471ca3f 2535 if (rdata != NULL) {
6993f74a 2536 kref_init(&rdata->refcount);
1c892549
JL
2537 INIT_LIST_HEAD(&rdata->list);
2538 init_completion(&rdata->done);
0471ca3f 2539 INIT_WORK(&rdata->work, complete);
0471ca3f 2540 }
f4e49cd2 2541
0471ca3f
JL
2542 return rdata;
2543}
2544
6993f74a
JL
2545void
2546cifs_readdata_release(struct kref *refcount)
0471ca3f 2547{
6993f74a
JL
2548 struct cifs_readdata *rdata = container_of(refcount,
2549 struct cifs_readdata, refcount);
2550
2551 if (rdata->cfile)
2552 cifsFileInfo_put(rdata->cfile);
2553
0471ca3f
JL
2554 kfree(rdata);
2555}
2556
1c892549 2557static int
c5fab6f4 2558cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
2559{
2560 int rc = 0;
c5fab6f4 2561 struct page *page;
1c892549
JL
2562 unsigned int i;
2563
c5fab6f4 2564 for (i = 0; i < nr_pages; i++) {
1c892549
JL
2565 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2566 if (!page) {
2567 rc = -ENOMEM;
2568 break;
2569 }
c5fab6f4 2570 rdata->pages[i] = page;
1c892549
JL
2571 }
2572
2573 if (rc) {
c5fab6f4
JL
2574 for (i = 0; i < nr_pages; i++) {
2575 put_page(rdata->pages[i]);
2576 rdata->pages[i] = NULL;
1c892549
JL
2577 }
2578 }
2579 return rc;
2580}
2581
2582static void
2583cifs_uncached_readdata_release(struct kref *refcount)
2584{
1c892549
JL
2585 struct cifs_readdata *rdata = container_of(refcount,
2586 struct cifs_readdata, refcount);
c5fab6f4 2587 unsigned int i;
1c892549 2588
c5fab6f4
JL
2589 for (i = 0; i < rdata->nr_pages; i++) {
2590 put_page(rdata->pages[i]);
2591 rdata->pages[i] = NULL;
1c892549
JL
2592 }
2593 cifs_readdata_release(refcount);
2594}
2595
2a1bb138
JL
2596static int
2597cifs_retry_async_readv(struct cifs_readdata *rdata)
2598{
2599 int rc;
fc9c5966
PS
2600 struct TCP_Server_Info *server;
2601
2602 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
2a1bb138
JL
2603
2604 do {
2605 if (rdata->cfile->invalidHandle) {
2606 rc = cifs_reopen_file(rdata->cfile, true);
2607 if (rc != 0)
2608 continue;
2609 }
fc9c5966 2610 rc = server->ops->async_readv(rdata);
2a1bb138
JL
2611 } while (rc == -EAGAIN);
2612
2613 return rc;
2614}
2615
1c892549
JL
2616/**
2617 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2618 * @rdata: the readdata response with list of pages holding data
2619 * @iov: vector in which we should copy the data
2620 * @nr_segs: number of segments in vector
2621 * @offset: offset into file of the first iovec
2622 * @copied: used to return the amount of data copied to the iov
2623 *
2624 * This function copies data from a list of pages in a readdata response into
2625 * an array of iovecs. It will first calculate where the data should go
2626 * based on the info in the readdata and then copy the data into that spot.
2627 */
2628static ssize_t
2629cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2630 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2631{
2632 int rc = 0;
2633 struct iov_iter ii;
2634 size_t pos = rdata->offset - offset;
1c892549
JL
2635 ssize_t remaining = rdata->bytes;
2636 unsigned char *pdata;
c5fab6f4 2637 unsigned int i;
1c892549
JL
2638
2639 /* set up iov_iter and advance to the correct offset */
2640 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2641 iov_iter_advance(&ii, pos);
2642
2643 *copied = 0;
c5fab6f4 2644 for (i = 0; i < rdata->nr_pages; i++) {
1c892549 2645 ssize_t copy;
c5fab6f4 2646 struct page *page = rdata->pages[i];
1c892549
JL
2647
2648 /* copy a whole page or whatever's left */
2649 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2650
2651 /* ...but limit it to whatever space is left in the iov */
2652 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2653
2654 /* go while there's data to be copied and no errors */
2655 if (copy && !rc) {
2656 pdata = kmap(page);
2657 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2658 (int)copy);
2659 kunmap(page);
2660 if (!rc) {
2661 *copied += copy;
2662 remaining -= copy;
2663 iov_iter_advance(&ii, copy);
2664 }
2665 }
1c892549
JL
2666 }
2667
2668 return rc;
2669}
2670
2671static void
2672cifs_uncached_readv_complete(struct work_struct *work)
2673{
2674 struct cifs_readdata *rdata = container_of(work,
2675 struct cifs_readdata, work);
1c892549
JL
2676
2677 complete(&rdata->done);
2678 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2679}
2680
2681static int
8321fec4
JL
2682cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2683 struct cifs_readdata *rdata, unsigned int len)
1c892549 2684{
8321fec4 2685 int total_read = 0, result = 0;
c5fab6f4
JL
2686 unsigned int i;
2687 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2688 struct kvec iov;
1c892549 2689
8321fec4 2690 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
2691 for (i = 0; i < nr_pages; i++) {
2692 struct page *page = rdata->pages[i];
2693
8321fec4 2694 if (len >= PAGE_SIZE) {
1c892549 2695 /* enough data to fill the page */
8321fec4
JL
2696 iov.iov_base = kmap(page);
2697 iov.iov_len = PAGE_SIZE;
2698 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2699 i, iov.iov_base, iov.iov_len);
2700 len -= PAGE_SIZE;
2701 } else if (len > 0) {
1c892549 2702 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2703 iov.iov_base = kmap(page);
2704 iov.iov_len = len;
2705 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2706 i, iov.iov_base, iov.iov_len);
2707 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2708 rdata->tailsz = len;
2709 len = 0;
1c892549
JL
2710 } else {
2711 /* no need to hold page hostage */
c5fab6f4
JL
2712 rdata->pages[i] = NULL;
2713 rdata->nr_pages--;
1c892549 2714 put_page(page);
8321fec4 2715 continue;
1c892549 2716 }
8321fec4
JL
2717
2718 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2719 kunmap(page);
2720 if (result < 0)
2721 break;
2722
2723 total_read += result;
1c892549
JL
2724 }
2725
8321fec4 2726 return total_read > 0 ? total_read : result;
1c892549
JL
2727}
2728
a70307ee
PS
2729static ssize_t
2730cifs_iovec_read(struct file *file, const struct iovec *iov,
2731 unsigned long nr_segs, loff_t *poffset)
1da177e4 2732{
1c892549 2733 ssize_t rc;
a70307ee 2734 size_t len, cur_len;
1c892549
JL
2735 ssize_t total_read = 0;
2736 loff_t offset = *poffset;
2737 unsigned int npages;
1da177e4 2738 struct cifs_sb_info *cifs_sb;
1c892549 2739 struct cifs_tcon *tcon;
1da177e4 2740 struct cifsFileInfo *open_file;
1c892549
JL
2741 struct cifs_readdata *rdata, *tmp;
2742 struct list_head rdata_list;
2743 pid_t pid;
a70307ee
PS
2744
2745 if (!nr_segs)
2746 return 0;
2747
2748 len = iov_length(iov, nr_segs);
2749 if (!len)
2750 return 0;
1da177e4 2751
1c892549 2752 INIT_LIST_HEAD(&rdata_list);
e6a00296 2753 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
c21dfb69 2754 open_file = file->private_data;
1c892549 2755 tcon = tlink_tcon(open_file->tlink);
1da177e4 2756
fc9c5966
PS
2757 if (!tcon->ses->server->ops->async_readv)
2758 return -ENOSYS;
2759
d4ffff1f
PS
2760 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2761 pid = open_file->pid;
2762 else
2763 pid = current->tgid;
2764
ad7a2926 2765 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2766 cFYI(1, "attempting read on write only file instance");
ad7a2926 2767
1c892549
JL
2768 do {
2769 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2770 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
a70307ee 2771
1c892549
JL
2772 /* allocate a readdata struct */
2773 rdata = cifs_readdata_alloc(npages,
2774 cifs_uncached_readv_complete);
2775 if (!rdata) {
2776 rc = -ENOMEM;
2777 goto error;
1da177e4 2778 }
a70307ee 2779
c5fab6f4 2780 rc = cifs_read_allocate_pages(rdata, npages);
1c892549
JL
2781 if (rc)
2782 goto error;
2783
2784 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 2785 rdata->nr_pages = npages;
1c892549
JL
2786 rdata->offset = offset;
2787 rdata->bytes = cur_len;
2788 rdata->pid = pid;
8321fec4
JL
2789 rdata->pagesz = PAGE_SIZE;
2790 rdata->read_into_pages = cifs_uncached_read_into_pages;
1c892549
JL
2791
2792 rc = cifs_retry_async_readv(rdata);
2793error:
2794 if (rc) {
2795 kref_put(&rdata->refcount,
2796 cifs_uncached_readdata_release);
2797 break;
2798 }
2799
2800 list_add_tail(&rdata->list, &rdata_list);
2801 offset += cur_len;
2802 len -= cur_len;
2803 } while (len > 0);
2804
2805 /* if at least one read request send succeeded, then reset rc */
2806 if (!list_empty(&rdata_list))
2807 rc = 0;
2808
2809 /* the loop below should proceed in the order of increasing offsets */
2810restart_loop:
2811 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2812 if (!rc) {
2813 ssize_t copied;
2814
2815 /* FIXME: freezable sleep too? */
2816 rc = wait_for_completion_killable(&rdata->done);
2817 if (rc)
2818 rc = -EINTR;
2819 else if (rdata->result)
2820 rc = rdata->result;
2821 else {
2822 rc = cifs_readdata_to_iov(rdata, iov,
2823 nr_segs, *poffset,
2824 &copied);
2825 total_read += copied;
2826 }
2827
2828 /* resend call if it's a retryable error */
2829 if (rc == -EAGAIN) {
2830 rc = cifs_retry_async_readv(rdata);
2831 goto restart_loop;
1da177e4 2832 }
1da177e4 2833 }
1c892549
JL
2834 list_del_init(&rdata->list);
2835 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 2836 }
a70307ee 2837
1c892549
JL
2838 cifs_stats_bytes_read(tcon, total_read);
2839 *poffset += total_read;
2840
09a4707e
PS
2841 /* mask nodata case */
2842 if (rc == -ENODATA)
2843 rc = 0;
2844
1c892549 2845 return total_read ? total_read : rc;
1da177e4
LT
2846}
2847
0b81c1c4 2848ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2849 unsigned long nr_segs, loff_t pos)
2850{
2851 ssize_t read;
2852
2853 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2854 if (read > 0)
2855 iocb->ki_pos = pos;
2856
2857 return read;
2858}
2859
579f9053
PS
2860ssize_t
2861cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2862 unsigned long nr_segs, loff_t pos)
a70307ee 2863{
579f9053
PS
2864 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2865 struct cifsInodeInfo *cinode = CIFS_I(inode);
2866 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2867 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2868 iocb->ki_filp->private_data;
2869 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2870 int rc = -EACCES;
a70307ee
PS
2871
2872 /*
2873 * In strict cache mode we need to read from the server all the time
2874 * if we don't have level II oplock because the server can delay mtime
2875 * change - so we can't make a decision about inode invalidating.
2876 * And we can also fail with pagereading if there are mandatory locks
2877 * on pages affected by this read but not on the region from pos to
2878 * pos+len-1.
2879 */
579f9053
PS
2880 if (!cinode->clientCanCacheRead)
2881 return cifs_user_readv(iocb, iov, nr_segs, pos);
a70307ee 2882
579f9053
PS
2883 if (cap_unix(tcon->ses) &&
2884 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2885 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2886 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2887
2888 /*
2889 * We need to hold the sem to be sure nobody modifies lock list
2890 * with a brlock that prevents reading.
2891 */
2892 down_read(&cinode->lock_sem);
2893 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2894 tcon->ses->server->vals->shared_lock_type,
2895 NULL, true))
2896 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2897 up_read(&cinode->lock_sem);
2898 return rc;
a70307ee 2899}
1da177e4 2900
f9c6e234
PS
2901static ssize_t
2902cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
2903{
2904 int rc = -EACCES;
2905 unsigned int bytes_read = 0;
2906 unsigned int total_read;
2907 unsigned int current_read_size;
5eba8ab3 2908 unsigned int rsize;
1da177e4 2909 struct cifs_sb_info *cifs_sb;
29e20f9c 2910 struct cifs_tcon *tcon;
f9c6e234 2911 struct TCP_Server_Info *server;
6d5786a3 2912 unsigned int xid;
f9c6e234 2913 char *cur_offset;
1da177e4 2914 struct cifsFileInfo *open_file;
d4ffff1f 2915 struct cifs_io_parms io_parms;
ec637e3f 2916 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2917 __u32 pid;
1da177e4 2918
6d5786a3 2919 xid = get_xid();
e6a00296 2920 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2921
5eba8ab3
JL
2922 /* FIXME: set up handlers for larger reads and/or convert to async */
2923 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2924
1da177e4 2925 if (file->private_data == NULL) {
0f3bc09e 2926 rc = -EBADF;
6d5786a3 2927 free_xid(xid);
0f3bc09e 2928 return rc;
1da177e4 2929 }
c21dfb69 2930 open_file = file->private_data;
29e20f9c 2931 tcon = tlink_tcon(open_file->tlink);
f9c6e234
PS
2932 server = tcon->ses->server;
2933
2934 if (!server->ops->sync_read) {
2935 free_xid(xid);
2936 return -ENOSYS;
2937 }
1da177e4 2938
d4ffff1f
PS
2939 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2940 pid = open_file->pid;
2941 else
2942 pid = current->tgid;
2943
1da177e4 2944 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2945 cFYI(1, "attempting read on write only file instance");
1da177e4 2946
f9c6e234
PS
2947 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2948 total_read += bytes_read, cur_offset += bytes_read) {
5eba8ab3 2949 current_read_size = min_t(uint, read_size - total_read, rsize);
29e20f9c
PS
2950 /*
2951 * For windows me and 9x we do not want to request more than it
2952 * negotiated since it will refuse the read then.
2953 */
2954 if ((tcon->ses) && !(tcon->ses->capabilities &
2955 tcon->ses->server->vals->cap_large_files)) {
7748dd6e 2956 current_read_size = min_t(uint, current_read_size,
c974befa 2957 CIFSMaxBufSize);
f9f5c817 2958 }
1da177e4
LT
2959 rc = -EAGAIN;
2960 while (rc == -EAGAIN) {
cdff08e7 2961 if (open_file->invalidHandle) {
15886177 2962 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2963 if (rc != 0)
2964 break;
2965 }
d4ffff1f 2966 io_parms.pid = pid;
29e20f9c 2967 io_parms.tcon = tcon;
f9c6e234 2968 io_parms.offset = *offset;
d4ffff1f 2969 io_parms.length = current_read_size;
f9c6e234
PS
2970 rc = server->ops->sync_read(xid, open_file, &io_parms,
2971 &bytes_read, &cur_offset,
2972 &buf_type);
1da177e4
LT
2973 }
2974 if (rc || (bytes_read == 0)) {
2975 if (total_read) {
2976 break;
2977 } else {
6d5786a3 2978 free_xid(xid);
1da177e4
LT
2979 return rc;
2980 }
2981 } else {
29e20f9c 2982 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 2983 *offset += bytes_read;
1da177e4
LT
2984 }
2985 }
6d5786a3 2986 free_xid(xid);
1da177e4
LT
2987 return total_read;
2988}
2989
ca83ce3d
JL
2990/*
2991 * If the page is mmap'ed into a process' page tables, then we need to make
2992 * sure that it doesn't change while being written back.
2993 */
2994static int
2995cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2996{
2997 struct page *page = vmf->page;
2998
2999 lock_page(page);
3000 return VM_FAULT_LOCKED;
3001}
3002
3003static struct vm_operations_struct cifs_file_vm_ops = {
3004 .fault = filemap_fault,
3005 .page_mkwrite = cifs_page_mkwrite,
3006};
3007
7a6a19b1
PS
3008int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3009{
3010 int rc, xid;
3011 struct inode *inode = file->f_path.dentry->d_inode;
3012
6d5786a3 3013 xid = get_xid();
7a6a19b1 3014
6feb9891
PS
3015 if (!CIFS_I(inode)->clientCanCacheRead) {
3016 rc = cifs_invalidate_mapping(inode);
3017 if (rc)
3018 return rc;
3019 }
7a6a19b1
PS
3020
3021 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3022 if (rc == 0)
3023 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3024 free_xid(xid);
7a6a19b1
PS
3025 return rc;
3026}
3027
1da177e4
LT
3028int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3029{
1da177e4
LT
3030 int rc, xid;
3031
6d5786a3 3032 xid = get_xid();
abab095d 3033 rc = cifs_revalidate_file(file);
1da177e4 3034 if (rc) {
b6b38f70 3035 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
6d5786a3 3036 free_xid(xid);
1da177e4
LT
3037 return rc;
3038 }
3039 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
3040 if (rc == 0)
3041 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 3042 free_xid(xid);
1da177e4
LT
3043 return rc;
3044}
3045
0471ca3f
JL
3046static void
3047cifs_readv_complete(struct work_struct *work)
3048{
c5fab6f4 3049 unsigned int i;
0471ca3f
JL
3050 struct cifs_readdata *rdata = container_of(work,
3051 struct cifs_readdata, work);
0471ca3f 3052
c5fab6f4
JL
3053 for (i = 0; i < rdata->nr_pages; i++) {
3054 struct page *page = rdata->pages[i];
3055
0471ca3f
JL
3056 lru_cache_add_file(page);
3057
3058 if (rdata->result == 0) {
0471ca3f
JL
3059 flush_dcache_page(page);
3060 SetPageUptodate(page);
3061 }
3062
3063 unlock_page(page);
3064
3065 if (rdata->result == 0)
3066 cifs_readpage_to_fscache(rdata->mapping->host, page);
3067
3068 page_cache_release(page);
c5fab6f4 3069 rdata->pages[i] = NULL;
0471ca3f 3070 }
6993f74a 3071 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
3072}
3073
8d5ce4d2 3074static int
8321fec4
JL
3075cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3076 struct cifs_readdata *rdata, unsigned int len)
8d5ce4d2 3077{
8321fec4 3078 int total_read = 0, result = 0;
c5fab6f4 3079 unsigned int i;
8d5ce4d2
JL
3080 u64 eof;
3081 pgoff_t eof_index;
c5fab6f4 3082 unsigned int nr_pages = rdata->nr_pages;
8321fec4 3083 struct kvec iov;
8d5ce4d2
JL
3084
3085 /* determine the eof that the server (probably) has */
3086 eof = CIFS_I(rdata->mapping->host)->server_eof;
3087 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3088 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3089
8321fec4 3090 rdata->tailsz = PAGE_CACHE_SIZE;
c5fab6f4
JL
3091 for (i = 0; i < nr_pages; i++) {
3092 struct page *page = rdata->pages[i];
3093
8321fec4 3094 if (len >= PAGE_CACHE_SIZE) {
8d5ce4d2 3095 /* enough data to fill the page */
8321fec4
JL
3096 iov.iov_base = kmap(page);
3097 iov.iov_len = PAGE_CACHE_SIZE;
8d5ce4d2 3098 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
3099 i, page->index, iov.iov_base, iov.iov_len);
3100 len -= PAGE_CACHE_SIZE;
3101 } else if (len > 0) {
8d5ce4d2 3102 /* enough for partial page, fill and zero the rest */
8321fec4
JL
3103 iov.iov_base = kmap(page);
3104 iov.iov_len = len;
8d5ce4d2 3105 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
3106 i, page->index, iov.iov_base, iov.iov_len);
3107 memset(iov.iov_base + len,
3108 '\0', PAGE_CACHE_SIZE - len);
3109 rdata->tailsz = len;
3110 len = 0;
8d5ce4d2
JL
3111 } else if (page->index > eof_index) {
3112 /*
3113 * The VFS will not try to do readahead past the
3114 * i_size, but it's possible that we have outstanding
3115 * writes with gaps in the middle and the i_size hasn't
3116 * caught up yet. Populate those with zeroed out pages
3117 * to prevent the VFS from repeatedly attempting to
3118 * fill them until the writes are flushed.
3119 */
3120 zero_user(page, 0, PAGE_CACHE_SIZE);
8d5ce4d2
JL
3121 lru_cache_add_file(page);
3122 flush_dcache_page(page);
3123 SetPageUptodate(page);
3124 unlock_page(page);
3125 page_cache_release(page);
c5fab6f4
JL
3126 rdata->pages[i] = NULL;
3127 rdata->nr_pages--;
8321fec4 3128 continue;
8d5ce4d2
JL
3129 } else {
3130 /* no need to hold page hostage */
8d5ce4d2
JL
3131 lru_cache_add_file(page);
3132 unlock_page(page);
3133 page_cache_release(page);
c5fab6f4
JL
3134 rdata->pages[i] = NULL;
3135 rdata->nr_pages--;
8321fec4 3136 continue;
8d5ce4d2 3137 }
8321fec4
JL
3138
3139 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3140 kunmap(page);
3141 if (result < 0)
3142 break;
3143
3144 total_read += result;
8d5ce4d2
JL
3145 }
3146
8321fec4 3147 return total_read > 0 ? total_read : result;
8d5ce4d2
JL
3148}
3149
1da177e4
LT
3150static int cifs_readpages(struct file *file, struct address_space *mapping,
3151 struct list_head *page_list, unsigned num_pages)
3152{
690c5e31
JL
3153 int rc;
3154 struct list_head tmplist;
3155 struct cifsFileInfo *open_file = file->private_data;
3156 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3157 unsigned int rsize = cifs_sb->rsize;
3158 pid_t pid;
1da177e4 3159
690c5e31
JL
3160 /*
3161 * Give up immediately if rsize is too small to read an entire page.
3162 * The VFS will fall back to readpage. We should never reach this
3163 * point however since we set ra_pages to 0 when the rsize is smaller
3164 * than a cache page.
3165 */
3166 if (unlikely(rsize < PAGE_CACHE_SIZE))
3167 return 0;
bfa0d75a 3168
56698236
SJ
3169 /*
3170 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3171 * immediately if the cookie is negative
3172 */
3173 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3174 &num_pages);
3175 if (rc == 0)
690c5e31 3176 return rc;
56698236 3177
d4ffff1f
PS
3178 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3179 pid = open_file->pid;
3180 else
3181 pid = current->tgid;
3182
690c5e31
JL
3183 rc = 0;
3184 INIT_LIST_HEAD(&tmplist);
1da177e4 3185
690c5e31
JL
3186 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3187 mapping, num_pages);
3188
3189 /*
3190 * Start with the page at end of list and move it to private
3191 * list. Do the same with any following pages until we hit
3192 * the rsize limit, hit an index discontinuity, or run out of
3193 * pages. Issue the async read and then start the loop again
3194 * until the list is empty.
3195 *
3196 * Note that list order is important. The page_list is in
3197 * the order of declining indexes. When we put the pages in
3198 * the rdata->pages, then we want them in increasing order.
3199 */
3200 while (!list_empty(page_list)) {
c5fab6f4 3201 unsigned int i;
690c5e31
JL
3202 unsigned int bytes = PAGE_CACHE_SIZE;
3203 unsigned int expected_index;
3204 unsigned int nr_pages = 1;
3205 loff_t offset;
3206 struct page *page, *tpage;
3207 struct cifs_readdata *rdata;
1da177e4
LT
3208
3209 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
3210
3211 /*
3212 * Lock the page and put it in the cache. Since no one else
3213 * should have access to this page, we're safe to simply set
3214 * PG_locked without checking it first.
3215 */
3216 __set_page_locked(page);
3217 rc = add_to_page_cache_locked(page, mapping,
3218 page->index, GFP_KERNEL);
3219
3220 /* give up if we can't stick it in the cache */
3221 if (rc) {
3222 __clear_page_locked(page);
3223 break;
3224 }
3225
3226 /* move first page to the tmplist */
1da177e4 3227 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 3228 list_move_tail(&page->lru, &tmplist);
1da177e4 3229
690c5e31
JL
3230 /* now try and add more pages onto the request */
3231 expected_index = page->index + 1;
3232 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3233 /* discontinuity ? */
3234 if (page->index != expected_index)
fb8c4b14 3235 break;
690c5e31
JL
3236
3237 /* would this page push the read over the rsize? */
3238 if (bytes + PAGE_CACHE_SIZE > rsize)
3239 break;
3240
3241 __set_page_locked(page);
3242 if (add_to_page_cache_locked(page, mapping,
3243 page->index, GFP_KERNEL)) {
3244 __clear_page_locked(page);
3245 break;
3246 }
3247 list_move_tail(&page->lru, &tmplist);
3248 bytes += PAGE_CACHE_SIZE;
3249 expected_index++;
3250 nr_pages++;
1da177e4 3251 }
690c5e31 3252
0471ca3f 3253 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
3254 if (!rdata) {
3255 /* best to give up if we're out of mem */
3256 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3257 list_del(&page->lru);
3258 lru_cache_add_file(page);
3259 unlock_page(page);
3260 page_cache_release(page);
3261 }
3262 rc = -ENOMEM;
3263 break;
3264 }
3265
6993f74a 3266 rdata->cfile = cifsFileInfo_get(open_file);
690c5e31
JL
3267 rdata->mapping = mapping;
3268 rdata->offset = offset;
3269 rdata->bytes = bytes;
3270 rdata->pid = pid;
8321fec4
JL
3271 rdata->pagesz = PAGE_CACHE_SIZE;
3272 rdata->read_into_pages = cifs_readpages_read_into_pages;
c5fab6f4
JL
3273
3274 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3275 list_del(&page->lru);
3276 rdata->pages[rdata->nr_pages++] = page;
3277 }
690c5e31 3278
2a1bb138 3279 rc = cifs_retry_async_readv(rdata);
690c5e31 3280 if (rc != 0) {
c5fab6f4
JL
3281 for (i = 0; i < rdata->nr_pages; i++) {
3282 page = rdata->pages[i];
690c5e31
JL
3283 lru_cache_add_file(page);
3284 unlock_page(page);
3285 page_cache_release(page);
1da177e4 3286 }
6993f74a 3287 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3288 break;
3289 }
6993f74a
JL
3290
3291 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3292 }
3293
1da177e4
LT
3294 return rc;
3295}
3296
3297static int cifs_readpage_worker(struct file *file, struct page *page,
3298 loff_t *poffset)
3299{
3300 char *read_data;
3301 int rc;
3302
56698236
SJ
3303 /* Is the page cached? */
3304 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3305 if (rc == 0)
3306 goto read_complete;
3307
1da177e4
LT
3308 page_cache_get(page);
3309 read_data = kmap(page);
3310 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 3311
1da177e4 3312 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 3313
1da177e4
LT
3314 if (rc < 0)
3315 goto io_error;
3316 else
b6b38f70 3317 cFYI(1, "Bytes read %d", rc);
fb8c4b14 3318
e6a00296
JJS
3319 file->f_path.dentry->d_inode->i_atime =
3320 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 3321
1da177e4
LT
3322 if (PAGE_CACHE_SIZE > rc)
3323 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3324
3325 flush_dcache_page(page);
3326 SetPageUptodate(page);
9dc06558
SJ
3327
3328 /* send this page to the cache */
3329 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3330
1da177e4 3331 rc = 0;
fb8c4b14 3332
1da177e4 3333io_error:
fb8c4b14 3334 kunmap(page);
1da177e4 3335 page_cache_release(page);
56698236
SJ
3336
3337read_complete:
1da177e4
LT
3338 return rc;
3339}
3340
3341static int cifs_readpage(struct file *file, struct page *page)
3342{
3343 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3344 int rc = -EACCES;
6d5786a3 3345 unsigned int xid;
1da177e4 3346
6d5786a3 3347 xid = get_xid();
1da177e4
LT
3348
3349 if (file->private_data == NULL) {
0f3bc09e 3350 rc = -EBADF;
6d5786a3 3351 free_xid(xid);
0f3bc09e 3352 return rc;
1da177e4
LT
3353 }
3354
ac3aa2f8 3355 cFYI(1, "readpage %p at offset %d 0x%x",
b6b38f70 3356 page, (int)offset, (int)offset);
1da177e4
LT
3357
3358 rc = cifs_readpage_worker(file, page, &offset);
3359
3360 unlock_page(page);
3361
6d5786a3 3362 free_xid(xid);
1da177e4
LT
3363 return rc;
3364}
3365
a403a0a3
SF
3366static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3367{
3368 struct cifsFileInfo *open_file;
3369
4477288a 3370 spin_lock(&cifs_file_list_lock);
a403a0a3 3371 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 3372 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 3373 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3374 return 1;
3375 }
3376 }
4477288a 3377 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3378 return 0;
3379}
3380
1da177e4
LT
3381/* We do not want to update the file size from server for inodes
3382 open for write - to avoid races with writepage extending
3383 the file - in the future we could consider allowing
fb8c4b14 3384 refreshing the inode only on increases in the file size
1da177e4
LT
3385 but this is tricky to do without racing with writebehind
3386 page caching in the current Linux kernel design */
4b18f2a9 3387bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 3388{
a403a0a3 3389 if (!cifsInode)
4b18f2a9 3390 return true;
50c2f753 3391
a403a0a3
SF
3392 if (is_inode_writable(cifsInode)) {
3393 /* This inode is open for write at least once */
c32a0b68
SF
3394 struct cifs_sb_info *cifs_sb;
3395
c32a0b68 3396 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 3397 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3398 /* since no page cache to corrupt on directio
c32a0b68 3399 we can change size safely */
4b18f2a9 3400 return true;
c32a0b68
SF
3401 }
3402
fb8c4b14 3403 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 3404 return true;
7ba52631 3405
4b18f2a9 3406 return false;
23e7dd7d 3407 } else
4b18f2a9 3408 return true;
1da177e4
LT
3409}
3410
d9414774
NP
3411static int cifs_write_begin(struct file *file, struct address_space *mapping,
3412 loff_t pos, unsigned len, unsigned flags,
3413 struct page **pagep, void **fsdata)
1da177e4 3414{
d9414774
NP
3415 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3416 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
3417 loff_t page_start = pos & PAGE_MASK;
3418 loff_t i_size;
3419 struct page *page;
3420 int rc = 0;
d9414774 3421
b6b38f70 3422 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 3423
54566b2c 3424 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
3425 if (!page) {
3426 rc = -ENOMEM;
3427 goto out;
3428 }
8a236264 3429
a98ee8c1
JL
3430 if (PageUptodate(page))
3431 goto out;
8a236264 3432
a98ee8c1
JL
3433 /*
3434 * If we write a full page it will be up to date, no need to read from
3435 * the server. If the write is short, we'll end up doing a sync write
3436 * instead.
3437 */
3438 if (len == PAGE_CACHE_SIZE)
3439 goto out;
8a236264 3440
a98ee8c1
JL
3441 /*
3442 * optimize away the read when we have an oplock, and we're not
3443 * expecting to use any of the data we'd be reading in. That
3444 * is, when the page lies beyond the EOF, or straddles the EOF
3445 * and the write will cover all of the existing data.
3446 */
3447 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3448 i_size = i_size_read(mapping->host);
3449 if (page_start >= i_size ||
3450 (offset == 0 && (pos + len) >= i_size)) {
3451 zero_user_segments(page, 0, offset,
3452 offset + len,
3453 PAGE_CACHE_SIZE);
3454 /*
3455 * PageChecked means that the parts of the page
3456 * to which we're not writing are considered up
3457 * to date. Once the data is copied to the
3458 * page, it can be set uptodate.
3459 */
3460 SetPageChecked(page);
3461 goto out;
3462 }
3463 }
d9414774 3464
a98ee8c1
JL
3465 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
3466 /*
3467 * might as well read a page, it is fast enough. If we get
3468 * an error, we don't need to return it. cifs_write_end will
3469 * do a sync write instead since PG_uptodate isn't set.
3470 */
3471 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
3472 } else {
3473 /* we could try using another file handle if there is one -
3474 but how would we lock it to prevent close of that handle
3475 racing with this read? In any case
d9414774 3476 this will be written out by write_end so is fine */
1da177e4 3477 }
a98ee8c1
JL
3478out:
3479 *pagep = page;
3480 return rc;
1da177e4
LT
3481}
3482
85f2d6b4
SJ
3483static int cifs_release_page(struct page *page, gfp_t gfp)
3484{
3485 if (PagePrivate(page))
3486 return 0;
3487
3488 return cifs_fscache_release_page(page, gfp);
3489}
3490
3491static void cifs_invalidate_page(struct page *page, unsigned long offset)
3492{
3493 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3494
3495 if (offset == 0)
3496 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3497}
3498
9ad1506b
PS
3499static int cifs_launder_page(struct page *page)
3500{
3501 int rc = 0;
3502 loff_t range_start = page_offset(page);
3503 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3504 struct writeback_control wbc = {
3505 .sync_mode = WB_SYNC_ALL,
3506 .nr_to_write = 0,
3507 .range_start = range_start,
3508 .range_end = range_end,
3509 };
3510
3511 cFYI(1, "Launder page: %p", page);
3512
3513 if (clear_page_dirty_for_io(page))
3514 rc = cifs_writepage_locked(page, &wbc);
3515
3516 cifs_fscache_invalidate_page(page, page->mapping->host);
3517 return rc;
3518}
3519
9b646972 3520void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3521{
3522 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3523 oplock_break);
a5e18bc3 3524 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 3525 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 3526 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
eb4b756b 3527 int rc = 0;
3bc303c2
JL
3528
3529 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 3530 if (cinode->clientCanCacheRead)
8737c930 3531 break_lease(inode, O_RDONLY);
d54ff732 3532 else
8737c930 3533 break_lease(inode, O_WRONLY);
3bc303c2
JL
3534 rc = filemap_fdatawrite(inode->i_mapping);
3535 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
3536 rc = filemap_fdatawait(inode->i_mapping);
3537 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
3538 invalidate_remote_inode(inode);
3539 }
b6b38f70 3540 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
3541 }
3542
85160e03
PS
3543 rc = cifs_push_locks(cfile);
3544 if (rc)
3545 cERROR(1, "Push locks rc = %d", rc);
3546
3bc303c2
JL
3547 /*
3548 * releasing stale oplock after recent reconnect of smb session using
3549 * a now incorrect file handle is not a data integrity issue but do
3550 * not bother sending an oplock release if session to server still is
3551 * disconnected since oplock already released by the server
3552 */
cdff08e7 3553 if (!cfile->oplock_break_cancelled) {
95a3f2f3
PS
3554 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3555 cinode);
b6b38f70 3556 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 3557 }
3bc303c2
JL
3558}
3559
f5e54d6e 3560const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3561 .readpage = cifs_readpage,
3562 .readpages = cifs_readpages,
3563 .writepage = cifs_writepage,
37c0eb46 3564 .writepages = cifs_writepages,
d9414774
NP
3565 .write_begin = cifs_write_begin,
3566 .write_end = cifs_write_end,
1da177e4 3567 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3568 .releasepage = cifs_release_page,
3569 .invalidatepage = cifs_invalidate_page,
9ad1506b 3570 .launder_page = cifs_launder_page,
1da177e4 3571};
273d81d6
DK
3572
3573/*
3574 * cifs_readpages requires the server to support a buffer large enough to
3575 * contain the header plus one complete page of data. Otherwise, we need
3576 * to leave cifs_readpages out of the address space operations.
3577 */
f5e54d6e 3578const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3579 .readpage = cifs_readpage,
3580 .writepage = cifs_writepage,
3581 .writepages = cifs_writepages,
d9414774
NP
3582 .write_begin = cifs_write_begin,
3583 .write_end = cifs_write_end,
273d81d6 3584 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3585 .releasepage = cifs_release_page,
3586 .invalidatepage = cifs_invalidate_page,
9ad1506b 3587 .launder_page = cifs_launder_page,
273d81d6 3588};
This page took 0.962516 seconds and 5 git commands to generate.