CIFS: Request SMB2.1 leases
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
174{
175 int rc;
fb1214e4 176 int desired_access;
eeb910a6 177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6 179 FILE_ALL_INFO *buf;
b8c32dbb 180 struct TCP_Server_Info *server = tcon->ses->server;
eeb910a6 181
b8c32dbb 182 if (!server->ops->open)
fb1214e4
PS
183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
3d3ea8e6
SP
219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
b8c32dbb
PS
222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
eeb910a6
PS
225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
fb1214e4 234 xid, &fid->netfid);
eeb910a6
PS
235
236out:
237 kfree(buf);
238 return rc;
239}
240
15ecb436 241struct cifsFileInfo *
fb1214e4 242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
4b4de76e
PS
247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
f45d3416 249 struct cifs_fid_locks *fdlocks;
4b4de76e
PS
250
251 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
252 if (cfile == NULL)
253 return cfile;
254
f45d3416
PS
255 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
256 if (!fdlocks) {
257 kfree(cfile);
258 return NULL;
259 }
260
261 INIT_LIST_HEAD(&fdlocks->locks);
262 fdlocks->cfile = cfile;
263 cfile->llist = fdlocks;
1b4b55a1 264 down_write(&cinode->lock_sem);
f45d3416 265 list_add(&fdlocks->llist, &cinode->llist);
1b4b55a1 266 up_write(&cinode->lock_sem);
f45d3416 267
4b4de76e 268 cfile->count = 1;
4b4de76e
PS
269 cfile->pid = current->tgid;
270 cfile->uid = current_fsuid();
271 cfile->dentry = dget(dentry);
272 cfile->f_flags = file->f_flags;
273 cfile->invalidHandle = false;
274 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 275 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
f45d3416 276 mutex_init(&cfile->fh_mutex);
fb1214e4 277 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
15ecb436 278
4477288a 279 spin_lock(&cifs_file_list_lock);
4b4de76e 280 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
15ecb436
JL
281 /* if readable file instance put first in list*/
282 if (file->f_mode & FMODE_READ)
4b4de76e 283 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 284 else
4b4de76e 285 list_add_tail(&cfile->flist, &cinode->openFileList);
4477288a 286 spin_unlock(&cifs_file_list_lock);
15ecb436 287
4b4de76e
PS
288 file->private_data = cfile;
289 return cfile;
15ecb436
JL
290}
291
764a1b1a
JL
292struct cifsFileInfo *
293cifsFileInfo_get(struct cifsFileInfo *cifs_file)
294{
295 spin_lock(&cifs_file_list_lock);
296 cifsFileInfo_get_locked(cifs_file);
297 spin_unlock(&cifs_file_list_lock);
298 return cifs_file;
299}
300
cdff08e7
SF
301/*
302 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
303 * the filehandle out on the server. Must be called without holding
304 * cifs_file_list_lock.
cdff08e7 305 */
b33879aa
JL
306void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
307{
e66673e3 308 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 309 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 310 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 311 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
312 struct cifsLockInfo *li, *tmp;
313
314 spin_lock(&cifs_file_list_lock);
5f6dbc9e 315 if (--cifs_file->count > 0) {
cdff08e7
SF
316 spin_unlock(&cifs_file_list_lock);
317 return;
318 }
319
320 /* remove it from the lists */
321 list_del(&cifs_file->flist);
322 list_del(&cifs_file->tlist);
323
324 if (list_empty(&cifsi->openFileList)) {
325 cFYI(1, "closing last open instance for inode %p",
326 cifs_file->dentry->d_inode);
25364138
PS
327 /*
328 * In strict cache mode we need invalidate mapping on the last
329 * close because it may cause a error when we open this file
330 * again and get at least level II oplock.
331 */
4f8ba8a0
PS
332 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
333 CIFS_I(inode)->invalid_mapping = true;
c6723628 334 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
335 }
336 spin_unlock(&cifs_file_list_lock);
337
ad635942
JL
338 cancel_work_sync(&cifs_file->oplock_break);
339
cdff08e7 340 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 341 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 342 unsigned int xid;
0ff78a22
PS
343 int rc = -ENOSYS;
344
6d5786a3 345 xid = get_xid();
0ff78a22
PS
346 if (server->ops->close)
347 rc = server->ops->close(xid, tcon, &cifs_file->fid);
6d5786a3 348 free_xid(xid);
cdff08e7
SF
349 }
350
f45d3416
PS
351 /*
352 * Delete any outstanding lock records. We'll lose them when the file
cdff08e7
SF
353 * is closed anyway.
354 */
1b4b55a1 355 down_write(&cifsi->lock_sem);
f45d3416 356 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
cdff08e7 357 list_del(&li->llist);
85160e03 358 cifs_del_lock_waiters(li);
cdff08e7 359 kfree(li);
b33879aa 360 }
f45d3416
PS
361 list_del(&cifs_file->llist->llist);
362 kfree(cifs_file->llist);
1b4b55a1 363 up_write(&cifsi->lock_sem);
cdff08e7
SF
364
365 cifs_put_tlink(cifs_file->tlink);
366 dput(cifs_file->dentry);
367 kfree(cifs_file);
b33879aa
JL
368}
369
1da177e4
LT
370int cifs_open(struct inode *inode, struct file *file)
371{
372 int rc = -EACCES;
6d5786a3 373 unsigned int xid;
590a3fe0 374 __u32 oplock;
1da177e4 375 struct cifs_sb_info *cifs_sb;
b8c32dbb 376 struct TCP_Server_Info *server;
96daf2b0 377 struct cifs_tcon *tcon;
7ffec372 378 struct tcon_link *tlink;
fb1214e4 379 struct cifsFileInfo *cfile = NULL;
1da177e4 380 char *full_path = NULL;
7e12eddb 381 bool posix_open_ok = false;
fb1214e4 382 struct cifs_fid fid;
1da177e4 383
6d5786a3 384 xid = get_xid();
1da177e4
LT
385
386 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
387 tlink = cifs_sb_tlink(cifs_sb);
388 if (IS_ERR(tlink)) {
6d5786a3 389 free_xid(xid);
7ffec372
JL
390 return PTR_ERR(tlink);
391 }
392 tcon = tlink_tcon(tlink);
b8c32dbb 393 server = tcon->ses->server;
1da177e4 394
e6a00296 395 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 396 if (full_path == NULL) {
0f3bc09e 397 rc = -ENOMEM;
232341ba 398 goto out;
1da177e4
LT
399 }
400
b6b38f70
JP
401 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
402 inode, file->f_flags, full_path);
276a74a4 403
10b9b98e 404 if (tcon->ses->server->oplocks)
276a74a4
SF
405 oplock = REQ_OPLOCK;
406 else
407 oplock = 0;
408
64cc2c63 409 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
410 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
411 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 412 /* can not refresh inode info since size could be stale */
2422f676 413 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 414 cifs_sb->mnt_file_mode /* ignored */,
fb1214e4 415 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 416 if (rc == 0) {
b6b38f70 417 cFYI(1, "posix open succeeded");
7e12eddb 418 posix_open_ok = true;
64cc2c63
SF
419 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
420 if (tcon->ses->serverNOS)
b6b38f70 421 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
422 " unexpected error on SMB posix open"
423 ", disabling posix open support."
424 " Check if server update available.",
425 tcon->ses->serverName,
b6b38f70 426 tcon->ses->serverNOS);
64cc2c63 427 tcon->broken_posix_open = true;
276a74a4
SF
428 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
429 (rc != -EOPNOTSUPP)) /* path not found or net err */
430 goto out;
fb1214e4
PS
431 /*
432 * Else fallthrough to retry open the old way on network i/o
433 * or DFS errors.
434 */
276a74a4
SF
435 }
436
7e12eddb 437 if (!posix_open_ok) {
b8c32dbb
PS
438 if (server->ops->get_lease_key)
439 server->ops->get_lease_key(inode, &fid);
440
7e12eddb 441 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 442 file->f_flags, &oplock, &fid, xid);
7e12eddb
PS
443 if (rc)
444 goto out;
445 }
47c78b7f 446
fb1214e4
PS
447 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
448 if (cfile == NULL) {
b8c32dbb
PS
449 if (server->ops->close)
450 server->ops->close(xid, tcon, &fid);
1da177e4
LT
451 rc = -ENOMEM;
452 goto out;
453 }
1da177e4 454
9451a9a5
SJ
455 cifs_fscache_set_inode_cookie(inode, file);
456
7e12eddb 457 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
458 /*
459 * Time to set mode which we can not set earlier due to
460 * problems creating new read-only files.
461 */
7e12eddb
PS
462 struct cifs_unix_set_info_args args = {
463 .mode = inode->i_mode,
464 .uid = NO_CHANGE_64,
465 .gid = NO_CHANGE_64,
466 .ctime = NO_CHANGE_64,
467 .atime = NO_CHANGE_64,
468 .mtime = NO_CHANGE_64,
469 .device = 0,
470 };
fb1214e4
PS
471 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
472 cfile->pid);
1da177e4
LT
473 }
474
475out:
1da177e4 476 kfree(full_path);
6d5786a3 477 free_xid(xid);
7ffec372 478 cifs_put_tlink(tlink);
1da177e4
LT
479 return rc;
480}
481
2ae78ba8
PS
482/*
483 * Try to reacquire byte range locks that were released when session
484 * to server was lost
485 */
1da177e4
LT
486static int cifs_relock_file(struct cifsFileInfo *cifsFile)
487{
488 int rc = 0;
489
2ae78ba8 490 /* BB list all locks open on this file and relock */
1da177e4
LT
491
492 return rc;
493}
494
2ae78ba8
PS
495static int
496cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
497{
498 int rc = -EACCES;
6d5786a3 499 unsigned int xid;
590a3fe0 500 __u32 oplock;
1da177e4 501 struct cifs_sb_info *cifs_sb;
96daf2b0 502 struct cifs_tcon *tcon;
2ae78ba8
PS
503 struct TCP_Server_Info *server;
504 struct cifsInodeInfo *cinode;
fb8c4b14 505 struct inode *inode;
1da177e4 506 char *full_path = NULL;
2ae78ba8 507 int desired_access;
1da177e4 508 int disposition = FILE_OPEN;
3d3ea8e6 509 int create_options = CREATE_NOT_DIR;
2ae78ba8 510 struct cifs_fid fid;
1da177e4 511
6d5786a3 512 xid = get_xid();
2ae78ba8
PS
513 mutex_lock(&cfile->fh_mutex);
514 if (!cfile->invalidHandle) {
515 mutex_unlock(&cfile->fh_mutex);
0f3bc09e 516 rc = 0;
6d5786a3 517 free_xid(xid);
0f3bc09e 518 return rc;
1da177e4
LT
519 }
520
2ae78ba8 521 inode = cfile->dentry->d_inode;
1da177e4 522 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
523 tcon = tlink_tcon(cfile->tlink);
524 server = tcon->ses->server;
525
526 /*
527 * Can not grab rename sem here because various ops, including those
528 * that already have the rename sem can end up causing writepage to get
529 * called and if the server was down that means we end up here, and we
530 * can never tell if the caller already has the rename_sem.
531 */
532 full_path = build_path_from_dentry(cfile->dentry);
1da177e4 533 if (full_path == NULL) {
3a9f462f 534 rc = -ENOMEM;
2ae78ba8 535 mutex_unlock(&cfile->fh_mutex);
6d5786a3 536 free_xid(xid);
3a9f462f 537 return rc;
1da177e4
LT
538 }
539
2ae78ba8
PS
540 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
541 full_path);
1da177e4 542
10b9b98e 543 if (tcon->ses->server->oplocks)
1da177e4
LT
544 oplock = REQ_OPLOCK;
545 else
4b18f2a9 546 oplock = 0;
1da177e4 547
29e20f9c 548 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 549 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 550 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
551 /*
552 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
553 * original open. Must mask them off for a reopen.
554 */
2ae78ba8 555 unsigned int oflags = cfile->f_flags &
15886177 556 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 557
2422f676 558 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
2ae78ba8
PS
559 cifs_sb->mnt_file_mode /* ignored */,
560 oflags, &oplock, &fid.netfid, xid);
7fc8f4e9 561 if (rc == 0) {
b6b38f70 562 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
563 goto reopen_success;
564 }
2ae78ba8
PS
565 /*
566 * fallthrough to retry open the old way on errors, especially
567 * in the reconnect path it is important to retry hard
568 */
7fc8f4e9
SF
569 }
570
2ae78ba8 571 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 572
3d3ea8e6
SP
573 if (backup_cred(cifs_sb))
574 create_options |= CREATE_OPEN_BACKUP_INTENT;
575
b8c32dbb
PS
576 if (server->ops->get_lease_key)
577 server->ops->get_lease_key(inode, &fid);
578
2ae78ba8
PS
579 /*
580 * Can not refresh inode by passing in file_info buf to be returned by
581 * CIFSSMBOpen and then calling get_inode_info with returned buf since
582 * file might have write behind data that needs to be flushed and server
583 * version of file size can be stale. If we knew for sure that inode was
584 * not dirty locally we could do this.
585 */
586 rc = server->ops->open(xid, tcon, full_path, disposition,
587 desired_access, create_options, &fid, &oplock,
588 NULL, cifs_sb);
1da177e4 589 if (rc) {
2ae78ba8
PS
590 mutex_unlock(&cfile->fh_mutex);
591 cFYI(1, "cifs_reopen returned 0x%x", rc);
b6b38f70 592 cFYI(1, "oplock: %d", oplock);
15886177
JL
593 goto reopen_error_exit;
594 }
595
7fc8f4e9 596reopen_success:
2ae78ba8
PS
597 cfile->invalidHandle = false;
598 mutex_unlock(&cfile->fh_mutex);
599 cinode = CIFS_I(inode);
15886177
JL
600
601 if (can_flush) {
602 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 603 mapping_set_error(inode->i_mapping, rc);
15886177 604
15886177 605 if (tcon->unix_ext)
2ae78ba8
PS
606 rc = cifs_get_inode_info_unix(&inode, full_path,
607 inode->i_sb, xid);
15886177 608 else
2ae78ba8
PS
609 rc = cifs_get_inode_info(&inode, full_path, NULL,
610 inode->i_sb, xid, NULL);
611 }
612 /*
613 * Else we are writing out data to server already and could deadlock if
614 * we tried to flush data, and since we do not know if we have data that
615 * would invalidate the current end of file on the server we can not go
616 * to the server to get the new inode info.
617 */
618
619 server->ops->set_fid(cfile, &fid, oplock);
620 cifs_relock_file(cfile);
15886177
JL
621
622reopen_error_exit:
1da177e4 623 kfree(full_path);
6d5786a3 624 free_xid(xid);
1da177e4
LT
625 return rc;
626}
627
628int cifs_close(struct inode *inode, struct file *file)
629{
77970693
JL
630 if (file->private_data != NULL) {
631 cifsFileInfo_put(file->private_data);
632 file->private_data = NULL;
633 }
7ee1af76 634
cdff08e7
SF
635 /* return code from the ->release op is always ignored */
636 return 0;
1da177e4
LT
637}
638
639int cifs_closedir(struct inode *inode, struct file *file)
640{
641 int rc = 0;
6d5786a3 642 unsigned int xid;
4b4de76e 643 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
644 struct cifs_tcon *tcon;
645 struct TCP_Server_Info *server;
646 char *buf;
1da177e4 647
b6b38f70 648 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4 649
92fc65a7
PS
650 if (cfile == NULL)
651 return rc;
652
6d5786a3 653 xid = get_xid();
92fc65a7
PS
654 tcon = tlink_tcon(cfile->tlink);
655 server = tcon->ses->server;
1da177e4 656
92fc65a7
PS
657 cFYI(1, "Freeing private data in close dir");
658 spin_lock(&cifs_file_list_lock);
659 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
660 cfile->invalidHandle = true;
661 spin_unlock(&cifs_file_list_lock);
662 if (server->ops->close_dir)
663 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
664 else
665 rc = -ENOSYS;
666 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
667 /* not much we can do if it fails anyway, ignore rc */
668 rc = 0;
669 } else
670 spin_unlock(&cifs_file_list_lock);
671
672 buf = cfile->srch_inf.ntwrk_buf_start;
673 if (buf) {
674 cFYI(1, "closedir free smb buf in srch struct");
675 cfile->srch_inf.ntwrk_buf_start = NULL;
676 if (cfile->srch_inf.smallBuf)
677 cifs_small_buf_release(buf);
678 else
679 cifs_buf_release(buf);
1da177e4 680 }
92fc65a7
PS
681
682 cifs_put_tlink(cfile->tlink);
683 kfree(file->private_data);
684 file->private_data = NULL;
1da177e4 685 /* BB can we lock the filestruct while this is going on? */
6d5786a3 686 free_xid(xid);
1da177e4
LT
687 return rc;
688}
689
85160e03 690static struct cifsLockInfo *
fbd35aca 691cifs_lock_init(__u64 offset, __u64 length, __u8 type)
7ee1af76 692{
a88b4707 693 struct cifsLockInfo *lock =
fb8c4b14 694 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
695 if (!lock)
696 return lock;
697 lock->offset = offset;
698 lock->length = length;
699 lock->type = type;
a88b4707
PS
700 lock->pid = current->tgid;
701 INIT_LIST_HEAD(&lock->blist);
702 init_waitqueue_head(&lock->block_q);
703 return lock;
85160e03
PS
704}
705
f7ba7fe6 706void
85160e03
PS
707cifs_del_lock_waiters(struct cifsLockInfo *lock)
708{
709 struct cifsLockInfo *li, *tmp;
710 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
711 list_del_init(&li->blist);
712 wake_up(&li->block_q);
713 }
714}
715
716static bool
f45d3416
PS
717cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
718 __u64 length, __u8 type, struct cifsFileInfo *cfile,
579f9053 719 struct cifsLockInfo **conf_lock, bool rw_check)
85160e03 720{
fbd35aca 721 struct cifsLockInfo *li;
f45d3416 722 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 723 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 724
f45d3416 725 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
726 if (offset + length <= li->offset ||
727 offset >= li->offset + li->length)
728 continue;
579f9053
PS
729 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
730 current->tgid == li->pid)
731 continue;
f45d3416
PS
732 if ((type & server->vals->shared_lock_type) &&
733 ((server->ops->compare_fids(cfile, cur_cfile) &&
734 current->tgid == li->pid) || type == li->type))
85160e03 735 continue;
579f9053
PS
736 if (conf_lock)
737 *conf_lock = li;
f45d3416 738 return true;
85160e03
PS
739 }
740 return false;
741}
742
579f9053 743bool
55157dfb 744cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
579f9053
PS
745 __u8 type, struct cifsLockInfo **conf_lock,
746 bool rw_check)
161ebf9f 747{
fbd35aca 748 bool rc = false;
f45d3416 749 struct cifs_fid_locks *cur;
55157dfb 750 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
fbd35aca 751
f45d3416
PS
752 list_for_each_entry(cur, &cinode->llist, llist) {
753 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
579f9053 754 cfile, conf_lock, rw_check);
fbd35aca
PS
755 if (rc)
756 break;
757 }
fbd35aca
PS
758
759 return rc;
161ebf9f
PS
760}
761
9a5101c8
PS
762/*
763 * Check if there is another lock that prevents us to set the lock (mandatory
764 * style). If such a lock exists, update the flock structure with its
765 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
766 * or leave it the same if we can't. Returns 0 if we don't need to request to
767 * the server or 1 otherwise.
768 */
85160e03 769static int
fbd35aca
PS
770cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
771 __u8 type, struct file_lock *flock)
85160e03
PS
772{
773 int rc = 0;
774 struct cifsLockInfo *conf_lock;
fbd35aca 775 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
106dc538 776 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
777 bool exist;
778
1b4b55a1 779 down_read(&cinode->lock_sem);
85160e03 780
55157dfb 781 exist = cifs_find_lock_conflict(cfile, offset, length, type,
579f9053 782 &conf_lock, false);
85160e03
PS
783 if (exist) {
784 flock->fl_start = conf_lock->offset;
785 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
786 flock->fl_pid = conf_lock->pid;
106dc538 787 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
788 flock->fl_type = F_RDLCK;
789 else
790 flock->fl_type = F_WRLCK;
791 } else if (!cinode->can_cache_brlcks)
792 rc = 1;
793 else
794 flock->fl_type = F_UNLCK;
795
1b4b55a1 796 up_read(&cinode->lock_sem);
85160e03
PS
797 return rc;
798}
799
161ebf9f 800static void
fbd35aca 801cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 802{
fbd35aca 803 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1b4b55a1 804 down_write(&cinode->lock_sem);
f45d3416 805 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 806 up_write(&cinode->lock_sem);
7ee1af76
JA
807}
808
9a5101c8
PS
809/*
810 * Set the byte-range lock (mandatory style). Returns:
811 * 1) 0, if we set the lock and don't need to request to the server;
812 * 2) 1, if no locks prevent us but we need to request to the server;
813 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
814 */
85160e03 815static int
fbd35aca 816cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 817 bool wait)
85160e03 818{
161ebf9f 819 struct cifsLockInfo *conf_lock;
fbd35aca 820 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
85160e03
PS
821 bool exist;
822 int rc = 0;
823
85160e03
PS
824try_again:
825 exist = false;
1b4b55a1 826 down_write(&cinode->lock_sem);
85160e03 827
55157dfb 828 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
579f9053 829 lock->type, &conf_lock, false);
85160e03 830 if (!exist && cinode->can_cache_brlcks) {
f45d3416 831 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 832 up_write(&cinode->lock_sem);
85160e03
PS
833 return rc;
834 }
835
836 if (!exist)
837 rc = 1;
838 else if (!wait)
839 rc = -EACCES;
840 else {
841 list_add_tail(&lock->blist, &conf_lock->blist);
1b4b55a1 842 up_write(&cinode->lock_sem);
85160e03
PS
843 rc = wait_event_interruptible(lock->block_q,
844 (lock->blist.prev == &lock->blist) &&
845 (lock->blist.next == &lock->blist));
846 if (!rc)
847 goto try_again;
1b4b55a1 848 down_write(&cinode->lock_sem);
a88b4707 849 list_del_init(&lock->blist);
85160e03
PS
850 }
851
1b4b55a1 852 up_write(&cinode->lock_sem);
85160e03
PS
853 return rc;
854}
855
9a5101c8
PS
856/*
857 * Check if there is another lock that prevents us to set the lock (posix
858 * style). If such a lock exists, update the flock structure with its
859 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
860 * or leave it the same if we can't. Returns 0 if we don't need to request to
861 * the server or 1 otherwise.
862 */
85160e03 863static int
4f6bcec9
PS
864cifs_posix_lock_test(struct file *file, struct file_lock *flock)
865{
866 int rc = 0;
867 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
868 unsigned char saved_type = flock->fl_type;
869
50792760
PS
870 if ((flock->fl_flags & FL_POSIX) == 0)
871 return 1;
872
1b4b55a1 873 down_read(&cinode->lock_sem);
4f6bcec9
PS
874 posix_test_lock(file, flock);
875
876 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
877 flock->fl_type = saved_type;
878 rc = 1;
879 }
880
1b4b55a1 881 up_read(&cinode->lock_sem);
4f6bcec9
PS
882 return rc;
883}
884
9a5101c8
PS
885/*
886 * Set the byte-range lock (posix style). Returns:
887 * 1) 0, if we set the lock and don't need to request to the server;
888 * 2) 1, if we need to request to the server;
889 * 3) <0, if the error occurs while setting the lock.
890 */
4f6bcec9
PS
891static int
892cifs_posix_lock_set(struct file *file, struct file_lock *flock)
893{
894 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
50792760
PS
895 int rc = 1;
896
897 if ((flock->fl_flags & FL_POSIX) == 0)
898 return rc;
4f6bcec9 899
66189be7 900try_again:
1b4b55a1 901 down_write(&cinode->lock_sem);
4f6bcec9 902 if (!cinode->can_cache_brlcks) {
1b4b55a1 903 up_write(&cinode->lock_sem);
50792760 904 return rc;
4f6bcec9 905 }
66189be7
PS
906
907 rc = posix_lock_file(file, flock, NULL);
1b4b55a1 908 up_write(&cinode->lock_sem);
66189be7
PS
909 if (rc == FILE_LOCK_DEFERRED) {
910 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
911 if (!rc)
912 goto try_again;
913 locks_delete_block(flock);
914 }
9ebb389d 915 return rc;
4f6bcec9
PS
916}
917
d39a4f71 918int
4f6bcec9 919cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 920{
6d5786a3
PS
921 unsigned int xid;
922 int rc = 0, stored_rc;
85160e03
PS
923 struct cifsLockInfo *li, *tmp;
924 struct cifs_tcon *tcon;
925 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
0013fb4c 926 unsigned int num, max_num, max_buf;
32b9aaf1
PS
927 LOCKING_ANDX_RANGE *buf, *cur;
928 int types[] = {LOCKING_ANDX_LARGE_FILES,
929 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
930 int i;
85160e03 931
6d5786a3 932 xid = get_xid();
85160e03
PS
933 tcon = tlink_tcon(cfile->tlink);
934
1b4b55a1
PS
935 /* we are going to update can_cache_brlcks here - need a write access */
936 down_write(&cinode->lock_sem);
85160e03 937 if (!cinode->can_cache_brlcks) {
1b4b55a1 938 up_write(&cinode->lock_sem);
6d5786a3 939 free_xid(xid);
85160e03
PS
940 return rc;
941 }
942
0013fb4c
PS
943 /*
944 * Accessing maxBuf is racy with cifs_reconnect - need to store value
945 * and check it for zero before using.
946 */
947 max_buf = tcon->ses->server->maxBuf;
948 if (!max_buf) {
1b4b55a1 949 up_write(&cinode->lock_sem);
6d5786a3 950 free_xid(xid);
0013fb4c
PS
951 return -EINVAL;
952 }
953
954 max_num = (max_buf - sizeof(struct smb_hdr)) /
955 sizeof(LOCKING_ANDX_RANGE);
32b9aaf1
PS
956 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
957 if (!buf) {
1b4b55a1 958 up_write(&cinode->lock_sem);
6d5786a3 959 free_xid(xid);
e2f2886a 960 return -ENOMEM;
32b9aaf1
PS
961 }
962
963 for (i = 0; i < 2; i++) {
964 cur = buf;
965 num = 0;
f45d3416 966 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
967 if (li->type != types[i])
968 continue;
969 cur->Pid = cpu_to_le16(li->pid);
970 cur->LengthLow = cpu_to_le32((u32)li->length);
971 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
972 cur->OffsetLow = cpu_to_le32((u32)li->offset);
973 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
974 if (++num == max_num) {
4b4de76e
PS
975 stored_rc = cifs_lockv(xid, tcon,
976 cfile->fid.netfid,
04a6aa8a
PS
977 (__u8)li->type, 0, num,
978 buf);
32b9aaf1
PS
979 if (stored_rc)
980 rc = stored_rc;
981 cur = buf;
982 num = 0;
983 } else
984 cur++;
985 }
986
987 if (num) {
4b4de76e 988 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 989 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
990 if (stored_rc)
991 rc = stored_rc;
992 }
85160e03
PS
993 }
994
995 cinode->can_cache_brlcks = false;
1b4b55a1 996 up_write(&cinode->lock_sem);
85160e03 997
32b9aaf1 998 kfree(buf);
6d5786a3 999 free_xid(xid);
85160e03
PS
1000 return rc;
1001}
1002
4f6bcec9
PS
1003/* copied from fs/locks.c with a name change */
1004#define cifs_for_each_lock(inode, lockp) \
1005 for (lockp = &inode->i_flock; *lockp != NULL; \
1006 lockp = &(*lockp)->fl_next)
1007
d5751469
PS
1008struct lock_to_push {
1009 struct list_head llist;
1010 __u64 offset;
1011 __u64 length;
1012 __u32 pid;
1013 __u16 netfid;
1014 __u8 type;
1015};
1016
4f6bcec9
PS
1017static int
1018cifs_push_posix_locks(struct cifsFileInfo *cfile)
1019{
1020 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1021 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1022 struct file_lock *flock, **before;
d5751469 1023 unsigned int count = 0, i = 0;
4f6bcec9 1024 int rc = 0, xid, type;
d5751469
PS
1025 struct list_head locks_to_send, *el;
1026 struct lock_to_push *lck, *tmp;
4f6bcec9 1027 __u64 length;
4f6bcec9 1028
6d5786a3 1029 xid = get_xid();
4f6bcec9 1030
1b4b55a1
PS
1031 /* we are going to update can_cache_brlcks here - need a write access */
1032 down_write(&cinode->lock_sem);
4f6bcec9 1033 if (!cinode->can_cache_brlcks) {
1b4b55a1 1034 up_write(&cinode->lock_sem);
6d5786a3 1035 free_xid(xid);
4f6bcec9
PS
1036 return rc;
1037 }
1038
d5751469
PS
1039 lock_flocks();
1040 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1041 if ((*before)->fl_flags & FL_POSIX)
1042 count++;
1043 }
1044 unlock_flocks();
1045
4f6bcec9
PS
1046 INIT_LIST_HEAD(&locks_to_send);
1047
d5751469 1048 /*
ce85852b 1049 * Allocating count locks is enough because no FL_POSIX locks can be
1b4b55a1 1050 * added to the list while we are holding cinode->lock_sem that
ce85852b 1051 * protects locking operations of this inode.
d5751469
PS
1052 */
1053 for (; i < count; i++) {
1054 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1055 if (!lck) {
1056 rc = -ENOMEM;
1057 goto err_out;
1058 }
1059 list_add_tail(&lck->llist, &locks_to_send);
1060 }
1061
d5751469 1062 el = locks_to_send.next;
4f6bcec9
PS
1063 lock_flocks();
1064 cifs_for_each_lock(cfile->dentry->d_inode, before) {
ce85852b
PS
1065 flock = *before;
1066 if ((flock->fl_flags & FL_POSIX) == 0)
1067 continue;
d5751469 1068 if (el == &locks_to_send) {
ce85852b
PS
1069 /*
1070 * The list ended. We don't have enough allocated
1071 * structures - something is really wrong.
1072 */
d5751469
PS
1073 cERROR(1, "Can't push all brlocks!");
1074 break;
1075 }
4f6bcec9
PS
1076 length = 1 + flock->fl_end - flock->fl_start;
1077 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1078 type = CIFS_RDLCK;
1079 else
1080 type = CIFS_WRLCK;
d5751469 1081 lck = list_entry(el, struct lock_to_push, llist);
4f6bcec9 1082 lck->pid = flock->fl_pid;
4b4de76e 1083 lck->netfid = cfile->fid.netfid;
d5751469
PS
1084 lck->length = length;
1085 lck->type = type;
1086 lck->offset = flock->fl_start;
d5751469 1087 el = el->next;
4f6bcec9 1088 }
4f6bcec9
PS
1089 unlock_flocks();
1090
1091 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1092 int stored_rc;
1093
4f6bcec9 1094 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1095 lck->offset, lck->length, NULL,
4f6bcec9
PS
1096 lck->type, 0);
1097 if (stored_rc)
1098 rc = stored_rc;
1099 list_del(&lck->llist);
1100 kfree(lck);
1101 }
1102
d5751469 1103out:
4f6bcec9 1104 cinode->can_cache_brlcks = false;
1b4b55a1 1105 up_write(&cinode->lock_sem);
4f6bcec9 1106
6d5786a3 1107 free_xid(xid);
4f6bcec9 1108 return rc;
d5751469
PS
1109err_out:
1110 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1111 list_del(&lck->llist);
1112 kfree(lck);
1113 }
1114 goto out;
4f6bcec9
PS
1115}
1116
1117static int
1118cifs_push_locks(struct cifsFileInfo *cfile)
1119{
1120 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1121 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1122
29e20f9c 1123 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1124 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1125 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1126 return cifs_push_posix_locks(cfile);
1127
d39a4f71 1128 return tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9
PS
1129}
1130
03776f45 1131static void
04a6aa8a 1132cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1133 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1134{
03776f45 1135 if (flock->fl_flags & FL_POSIX)
b6b38f70 1136 cFYI(1, "Posix");
03776f45 1137 if (flock->fl_flags & FL_FLOCK)
b6b38f70 1138 cFYI(1, "Flock");
03776f45 1139 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 1140 cFYI(1, "Blocking lock");
03776f45 1141 *wait_flag = true;
1da177e4 1142 }
03776f45 1143 if (flock->fl_flags & FL_ACCESS)
b6b38f70 1144 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
1145 "not implemented yet");
1146 if (flock->fl_flags & FL_LEASE)
b6b38f70 1147 cFYI(1, "Lease on file - not implemented yet");
03776f45 1148 if (flock->fl_flags &
1da177e4 1149 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 1150 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 1151
106dc538 1152 *type = server->vals->large_lock_type;
03776f45 1153 if (flock->fl_type == F_WRLCK) {
b6b38f70 1154 cFYI(1, "F_WRLCK ");
106dc538 1155 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1156 *lock = 1;
1157 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 1158 cFYI(1, "F_UNLCK");
106dc538 1159 *type |= server->vals->unlock_lock_type;
03776f45
PS
1160 *unlock = 1;
1161 /* Check if unlock includes more than one lock range */
1162 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 1163 cFYI(1, "F_RDLCK");
106dc538 1164 *type |= server->vals->shared_lock_type;
03776f45
PS
1165 *lock = 1;
1166 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 1167 cFYI(1, "F_EXLCK");
106dc538 1168 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1169 *lock = 1;
1170 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 1171 cFYI(1, "F_SHLCK");
106dc538 1172 *type |= server->vals->shared_lock_type;
03776f45 1173 *lock = 1;
1da177e4 1174 } else
b6b38f70 1175 cFYI(1, "Unknown type of lock");
03776f45 1176}
1da177e4 1177
03776f45 1178static int
04a6aa8a 1179cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1180 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1181{
1182 int rc = 0;
1183 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1184 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1185 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1186 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1187 __u16 netfid = cfile->fid.netfid;
f05337c6 1188
03776f45
PS
1189 if (posix_lck) {
1190 int posix_lock_type;
4f6bcec9
PS
1191
1192 rc = cifs_posix_lock_test(file, flock);
1193 if (!rc)
1194 return rc;
1195
106dc538 1196 if (type & server->vals->shared_lock_type)
03776f45
PS
1197 posix_lock_type = CIFS_RDLCK;
1198 else
1199 posix_lock_type = CIFS_WRLCK;
4f6bcec9 1200 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
c5fd363d 1201 flock->fl_start, length, flock,
4f6bcec9 1202 posix_lock_type, wait_flag);
03776f45
PS
1203 return rc;
1204 }
1da177e4 1205
fbd35aca 1206 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1207 if (!rc)
1208 return rc;
1209
03776f45 1210 /* BB we could chain these into one lock request BB */
d39a4f71
PS
1211 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1212 1, 0, false);
03776f45 1213 if (rc == 0) {
d39a4f71
PS
1214 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1215 type, 0, 1, false);
03776f45
PS
1216 flock->fl_type = F_UNLCK;
1217 if (rc != 0)
1218 cERROR(1, "Error unlocking previously locked "
106dc538 1219 "range %d during test of lock", rc);
a88b4707 1220 return 0;
1da177e4 1221 }
7ee1af76 1222
106dc538 1223 if (type & server->vals->shared_lock_type) {
03776f45 1224 flock->fl_type = F_WRLCK;
a88b4707 1225 return 0;
7ee1af76
JA
1226 }
1227
d39a4f71
PS
1228 type &= ~server->vals->exclusive_lock_type;
1229
1230 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1231 type | server->vals->shared_lock_type,
1232 1, 0, false);
03776f45 1233 if (rc == 0) {
d39a4f71
PS
1234 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1235 type | server->vals->shared_lock_type, 0, 1, false);
03776f45
PS
1236 flock->fl_type = F_RDLCK;
1237 if (rc != 0)
1238 cERROR(1, "Error unlocking previously locked "
1239 "range %d during test of lock", rc);
1240 } else
1241 flock->fl_type = F_WRLCK;
1242
a88b4707 1243 return 0;
03776f45
PS
1244}
1245
f7ba7fe6 1246void
9ee305b7
PS
1247cifs_move_llist(struct list_head *source, struct list_head *dest)
1248{
1249 struct list_head *li, *tmp;
1250 list_for_each_safe(li, tmp, source)
1251 list_move(li, dest);
1252}
1253
f7ba7fe6 1254void
9ee305b7
PS
1255cifs_free_llist(struct list_head *llist)
1256{
1257 struct cifsLockInfo *li, *tmp;
1258 list_for_each_entry_safe(li, tmp, llist, llist) {
1259 cifs_del_lock_waiters(li);
1260 list_del(&li->llist);
1261 kfree(li);
1262 }
1263}
1264
d39a4f71 1265int
6d5786a3
PS
1266cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1267 unsigned int xid)
9ee305b7
PS
1268{
1269 int rc = 0, stored_rc;
1270 int types[] = {LOCKING_ANDX_LARGE_FILES,
1271 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1272 unsigned int i;
0013fb4c 1273 unsigned int max_num, num, max_buf;
9ee305b7
PS
1274 LOCKING_ANDX_RANGE *buf, *cur;
1275 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1276 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1277 struct cifsLockInfo *li, *tmp;
1278 __u64 length = 1 + flock->fl_end - flock->fl_start;
1279 struct list_head tmp_llist;
1280
1281 INIT_LIST_HEAD(&tmp_llist);
1282
0013fb4c
PS
1283 /*
1284 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1285 * and check it for zero before using.
1286 */
1287 max_buf = tcon->ses->server->maxBuf;
1288 if (!max_buf)
1289 return -EINVAL;
1290
1291 max_num = (max_buf - sizeof(struct smb_hdr)) /
1292 sizeof(LOCKING_ANDX_RANGE);
9ee305b7
PS
1293 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1294 if (!buf)
1295 return -ENOMEM;
1296
1b4b55a1 1297 down_write(&cinode->lock_sem);
9ee305b7
PS
1298 for (i = 0; i < 2; i++) {
1299 cur = buf;
1300 num = 0;
f45d3416 1301 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1302 if (flock->fl_start > li->offset ||
1303 (flock->fl_start + length) <
1304 (li->offset + li->length))
1305 continue;
1306 if (current->tgid != li->pid)
1307 continue;
9ee305b7
PS
1308 if (types[i] != li->type)
1309 continue;
ea319d57 1310 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1311 /*
1312 * We can cache brlock requests - simply remove
fbd35aca 1313 * a lock from the file's list.
9ee305b7
PS
1314 */
1315 list_del(&li->llist);
1316 cifs_del_lock_waiters(li);
1317 kfree(li);
ea319d57 1318 continue;
9ee305b7 1319 }
ea319d57
PS
1320 cur->Pid = cpu_to_le16(li->pid);
1321 cur->LengthLow = cpu_to_le32((u32)li->length);
1322 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1323 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1324 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1325 /*
1326 * We need to save a lock here to let us add it again to
1327 * the file's list if the unlock range request fails on
1328 * the server.
1329 */
1330 list_move(&li->llist, &tmp_llist);
1331 if (++num == max_num) {
4b4de76e
PS
1332 stored_rc = cifs_lockv(xid, tcon,
1333 cfile->fid.netfid,
ea319d57
PS
1334 li->type, num, 0, buf);
1335 if (stored_rc) {
1336 /*
1337 * We failed on the unlock range
1338 * request - add all locks from the tmp
1339 * list to the head of the file's list.
1340 */
1341 cifs_move_llist(&tmp_llist,
f45d3416 1342 &cfile->llist->locks);
ea319d57
PS
1343 rc = stored_rc;
1344 } else
1345 /*
1346 * The unlock range request succeed -
1347 * free the tmp list.
1348 */
1349 cifs_free_llist(&tmp_llist);
1350 cur = buf;
1351 num = 0;
1352 } else
1353 cur++;
9ee305b7
PS
1354 }
1355 if (num) {
4b4de76e 1356 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1357 types[i], num, 0, buf);
1358 if (stored_rc) {
f45d3416
PS
1359 cifs_move_llist(&tmp_llist,
1360 &cfile->llist->locks);
9ee305b7
PS
1361 rc = stored_rc;
1362 } else
1363 cifs_free_llist(&tmp_llist);
1364 }
1365 }
1366
1b4b55a1 1367 up_write(&cinode->lock_sem);
9ee305b7
PS
1368 kfree(buf);
1369 return rc;
1370}
1371
03776f45 1372static int
f45d3416 1373cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1374 bool wait_flag, bool posix_lck, int lock, int unlock,
1375 unsigned int xid)
03776f45
PS
1376{
1377 int rc = 0;
1378 __u64 length = 1 + flock->fl_end - flock->fl_start;
1379 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1380 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1381 struct TCP_Server_Info *server = tcon->ses->server;
03776f45
PS
1382
1383 if (posix_lck) {
08547b03 1384 int posix_lock_type;
4f6bcec9
PS
1385
1386 rc = cifs_posix_lock_set(file, flock);
1387 if (!rc || rc < 0)
1388 return rc;
1389
106dc538 1390 if (type & server->vals->shared_lock_type)
08547b03
SF
1391 posix_lock_type = CIFS_RDLCK;
1392 else
1393 posix_lock_type = CIFS_WRLCK;
50c2f753 1394
03776f45 1395 if (unlock == 1)
beb84dc8 1396 posix_lock_type = CIFS_UNLCK;
7ee1af76 1397
f45d3416
PS
1398 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1399 current->tgid, flock->fl_start, length,
1400 NULL, posix_lock_type, wait_flag);
03776f45
PS
1401 goto out;
1402 }
7ee1af76 1403
03776f45 1404 if (lock) {
161ebf9f
PS
1405 struct cifsLockInfo *lock;
1406
fbd35aca 1407 lock = cifs_lock_init(flock->fl_start, length, type);
161ebf9f
PS
1408 if (!lock)
1409 return -ENOMEM;
1410
fbd35aca 1411 rc = cifs_lock_add_if(cfile, lock, wait_flag);
85160e03 1412 if (rc < 0)
161ebf9f
PS
1413 kfree(lock);
1414 if (rc <= 0)
85160e03
PS
1415 goto out;
1416
d39a4f71
PS
1417 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1418 type, 1, 0, wait_flag);
161ebf9f
PS
1419 if (rc) {
1420 kfree(lock);
1421 goto out;
03776f45 1422 }
161ebf9f 1423
fbd35aca 1424 cifs_lock_add(cfile, lock);
9ee305b7 1425 } else if (unlock)
d39a4f71 1426 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 1427
03776f45
PS
1428out:
1429 if (flock->fl_flags & FL_POSIX)
9ebb389d 1430 posix_lock_file_wait(file, flock);
03776f45
PS
1431 return rc;
1432}
1433
1434int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1435{
1436 int rc, xid;
1437 int lock = 0, unlock = 0;
1438 bool wait_flag = false;
1439 bool posix_lck = false;
1440 struct cifs_sb_info *cifs_sb;
1441 struct cifs_tcon *tcon;
1442 struct cifsInodeInfo *cinode;
1443 struct cifsFileInfo *cfile;
1444 __u16 netfid;
04a6aa8a 1445 __u32 type;
03776f45
PS
1446
1447 rc = -EACCES;
6d5786a3 1448 xid = get_xid();
03776f45
PS
1449
1450 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1451 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1452 flock->fl_start, flock->fl_end);
1453
03776f45
PS
1454 cfile = (struct cifsFileInfo *)file->private_data;
1455 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1456
1457 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1458 tcon->ses->server);
1459
1460 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
4b4de76e 1461 netfid = cfile->fid.netfid;
03776f45
PS
1462 cinode = CIFS_I(file->f_path.dentry->d_inode);
1463
29e20f9c 1464 if (cap_unix(tcon->ses) &&
03776f45
PS
1465 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1466 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1467 posix_lck = true;
1468 /*
1469 * BB add code here to normalize offset and length to account for
1470 * negative length which we can not accept over the wire.
1471 */
1472 if (IS_GETLK(cmd)) {
4f6bcec9 1473 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1474 free_xid(xid);
03776f45
PS
1475 return rc;
1476 }
1477
1478 if (!lock && !unlock) {
1479 /*
1480 * if no lock or unlock then nothing to do since we do not
1481 * know what it is
1482 */
6d5786a3 1483 free_xid(xid);
03776f45 1484 return -EOPNOTSUPP;
7ee1af76
JA
1485 }
1486
03776f45
PS
1487 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1488 xid);
6d5786a3 1489 free_xid(xid);
1da177e4
LT
1490 return rc;
1491}
1492
597b027f
JL
1493/*
1494 * update the file size (if needed) after a write. Should be called with
1495 * the inode->i_lock held
1496 */
72432ffc 1497void
fbec9ab9
JL
1498cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1499 unsigned int bytes_written)
1500{
1501 loff_t end_of_write = offset + bytes_written;
1502
1503 if (end_of_write > cifsi->server_eof)
1504 cifsi->server_eof = end_of_write;
1505}
1506
ba9ad725
PS
1507static ssize_t
1508cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1509 size_t write_size, loff_t *offset)
1da177e4
LT
1510{
1511 int rc = 0;
1512 unsigned int bytes_written = 0;
1513 unsigned int total_written;
1514 struct cifs_sb_info *cifs_sb;
ba9ad725
PS
1515 struct cifs_tcon *tcon;
1516 struct TCP_Server_Info *server;
6d5786a3 1517 unsigned int xid;
7da4b49a
JL
1518 struct dentry *dentry = open_file->dentry;
1519 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1520 struct cifs_io_parms io_parms;
1da177e4 1521
7da4b49a 1522 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1523
b6b38f70 1524 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
ba9ad725 1525 *offset, dentry->d_name.name);
1da177e4 1526
ba9ad725
PS
1527 tcon = tlink_tcon(open_file->tlink);
1528 server = tcon->ses->server;
1529
1530 if (!server->ops->sync_write)
1531 return -ENOSYS;
50c2f753 1532
6d5786a3 1533 xid = get_xid();
1da177e4 1534
1da177e4
LT
1535 for (total_written = 0; write_size > total_written;
1536 total_written += bytes_written) {
1537 rc = -EAGAIN;
1538 while (rc == -EAGAIN) {
ca83ce3d
JL
1539 struct kvec iov[2];
1540 unsigned int len;
1541
1da177e4 1542 if (open_file->invalidHandle) {
1da177e4
LT
1543 /* we could deadlock if we called
1544 filemap_fdatawait from here so tell
fb8c4b14 1545 reopen_file not to flush data to
1da177e4 1546 server now */
15886177 1547 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1548 if (rc != 0)
1549 break;
1550 }
ca83ce3d
JL
1551
1552 len = min((size_t)cifs_sb->wsize,
1553 write_size - total_written);
1554 /* iov[0] is reserved for smb header */
1555 iov[1].iov_base = (char *)write_data + total_written;
1556 iov[1].iov_len = len;
fa2989f4 1557 io_parms.pid = pid;
ba9ad725
PS
1558 io_parms.tcon = tcon;
1559 io_parms.offset = *offset;
fa2989f4 1560 io_parms.length = len;
ba9ad725
PS
1561 rc = server->ops->sync_write(xid, open_file, &io_parms,
1562 &bytes_written, iov, 1);
1da177e4
LT
1563 }
1564 if (rc || (bytes_written == 0)) {
1565 if (total_written)
1566 break;
1567 else {
6d5786a3 1568 free_xid(xid);
1da177e4
LT
1569 return rc;
1570 }
fbec9ab9 1571 } else {
597b027f 1572 spin_lock(&dentry->d_inode->i_lock);
ba9ad725 1573 cifs_update_eof(cifsi, *offset, bytes_written);
597b027f 1574 spin_unlock(&dentry->d_inode->i_lock);
ba9ad725 1575 *offset += bytes_written;
fbec9ab9 1576 }
1da177e4
LT
1577 }
1578
ba9ad725 1579 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1580
7da4b49a
JL
1581 if (total_written > 0) {
1582 spin_lock(&dentry->d_inode->i_lock);
ba9ad725
PS
1583 if (*offset > dentry->d_inode->i_size)
1584 i_size_write(dentry->d_inode, *offset);
7da4b49a 1585 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1586 }
7da4b49a 1587 mark_inode_dirty_sync(dentry->d_inode);
6d5786a3 1588 free_xid(xid);
1da177e4
LT
1589 return total_written;
1590}
1591
6508d904
JL
1592struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1593 bool fsuid_only)
630f3f0c
SF
1594{
1595 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1596 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1597
1598 /* only filter by fsuid on multiuser mounts */
1599 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1600 fsuid_only = false;
630f3f0c 1601
4477288a 1602 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1603 /* we could simply get the first_list_entry since write-only entries
1604 are always at the end of the list but since the first entry might
1605 have a close pending, we go through the whole list */
1606 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1607 if (fsuid_only && open_file->uid != current_fsuid())
1608 continue;
2e396b83 1609 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1610 if (!open_file->invalidHandle) {
1611 /* found a good file */
1612 /* lock it so it will not be closed on us */
764a1b1a 1613 cifsFileInfo_get_locked(open_file);
4477288a 1614 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1615 return open_file;
1616 } /* else might as well continue, and look for
1617 another, or simply have the caller reopen it
1618 again rather than trying to fix this handle */
1619 } else /* write only file */
1620 break; /* write only files are last so must be done */
1621 }
4477288a 1622 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1623 return NULL;
1624}
630f3f0c 1625
6508d904
JL
1626struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1627 bool fsuid_only)
6148a742 1628{
2c0c2a08 1629 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 1630 struct cifs_sb_info *cifs_sb;
2846d386 1631 bool any_available = false;
dd99cd80 1632 int rc;
2c0c2a08 1633 unsigned int refind = 0;
6148a742 1634
60808233
SF
1635 /* Having a null inode here (because mapping->host was set to zero by
1636 the VFS or MM) should not happen but we had reports of on oops (due to
1637 it being zero) during stress testcases so we need to check for it */
1638
fb8c4b14 1639 if (cifs_inode == NULL) {
b6b38f70 1640 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1641 dump_stack();
1642 return NULL;
1643 }
1644
d3892294
JL
1645 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1646
6508d904
JL
1647 /* only filter by fsuid on multiuser mounts */
1648 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1649 fsuid_only = false;
1650
4477288a 1651 spin_lock(&cifs_file_list_lock);
9b22b0b7 1652refind_writable:
2c0c2a08
SP
1653 if (refind > MAX_REOPEN_ATT) {
1654 spin_unlock(&cifs_file_list_lock);
1655 return NULL;
1656 }
6148a742 1657 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1658 if (!any_available && open_file->pid != current->tgid)
1659 continue;
1660 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1661 continue;
2e396b83 1662 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
1663 if (!open_file->invalidHandle) {
1664 /* found a good writable file */
764a1b1a 1665 cifsFileInfo_get_locked(open_file);
4477288a 1666 spin_unlock(&cifs_file_list_lock);
9b22b0b7 1667 return open_file;
2c0c2a08
SP
1668 } else {
1669 if (!inv_file)
1670 inv_file = open_file;
9b22b0b7 1671 }
6148a742
SF
1672 }
1673 }
2846d386
JL
1674 /* couldn't find useable FH with same pid, try any available */
1675 if (!any_available) {
1676 any_available = true;
1677 goto refind_writable;
1678 }
2c0c2a08
SP
1679
1680 if (inv_file) {
1681 any_available = false;
764a1b1a 1682 cifsFileInfo_get_locked(inv_file);
2c0c2a08
SP
1683 }
1684
4477288a 1685 spin_unlock(&cifs_file_list_lock);
2c0c2a08
SP
1686
1687 if (inv_file) {
1688 rc = cifs_reopen_file(inv_file, false);
1689 if (!rc)
1690 return inv_file;
1691 else {
1692 spin_lock(&cifs_file_list_lock);
1693 list_move_tail(&inv_file->flist,
1694 &cifs_inode->openFileList);
1695 spin_unlock(&cifs_file_list_lock);
1696 cifsFileInfo_put(inv_file);
1697 spin_lock(&cifs_file_list_lock);
1698 ++refind;
1699 goto refind_writable;
1700 }
1701 }
1702
6148a742
SF
1703 return NULL;
1704}
1705
1da177e4
LT
1706static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1707{
1708 struct address_space *mapping = page->mapping;
1709 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1710 char *write_data;
1711 int rc = -EFAULT;
1712 int bytes_written = 0;
1da177e4 1713 struct inode *inode;
6148a742 1714 struct cifsFileInfo *open_file;
1da177e4
LT
1715
1716 if (!mapping || !mapping->host)
1717 return -EFAULT;
1718
1719 inode = page->mapping->host;
1da177e4
LT
1720
1721 offset += (loff_t)from;
1722 write_data = kmap(page);
1723 write_data += from;
1724
1725 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1726 kunmap(page);
1727 return -EIO;
1728 }
1729
1730 /* racing with truncate? */
1731 if (offset > mapping->host->i_size) {
1732 kunmap(page);
1733 return 0; /* don't care */
1734 }
1735
1736 /* check to make sure that we are not extending the file */
1737 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1738 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1739
6508d904 1740 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1741 if (open_file) {
fa2989f4
PS
1742 bytes_written = cifs_write(open_file, open_file->pid,
1743 write_data, to - from, &offset);
6ab409b5 1744 cifsFileInfo_put(open_file);
1da177e4 1745 /* Does mm or vfs already set times? */
6148a742 1746 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1747 if ((bytes_written > 0) && (offset))
6148a742 1748 rc = 0;
bb5a9a04
SF
1749 else if (bytes_written < 0)
1750 rc = bytes_written;
6148a742 1751 } else {
b6b38f70 1752 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1753 rc = -EIO;
1754 }
1755
1756 kunmap(page);
1757 return rc;
1758}
1759
1da177e4 1760static int cifs_writepages(struct address_space *mapping,
37c0eb46 1761 struct writeback_control *wbc)
1da177e4 1762{
c3d17b63
JL
1763 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1764 bool done = false, scanned = false, range_whole = false;
1765 pgoff_t end, index;
1766 struct cifs_writedata *wdata;
c9de5c80 1767 struct TCP_Server_Info *server;
37c0eb46 1768 struct page *page;
37c0eb46 1769 int rc = 0;
eddb079d 1770 loff_t isize = i_size_read(mapping->host);
50c2f753 1771
37c0eb46 1772 /*
c3d17b63 1773 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1774 * one page at a time via cifs_writepage
1775 */
1776 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1777 return generic_writepages(mapping, wbc);
1778
111ebb6e 1779 if (wbc->range_cyclic) {
37c0eb46 1780 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1781 end = -1;
1782 } else {
1783 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1784 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1785 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1786 range_whole = true;
1787 scanned = true;
37c0eb46
SF
1788 }
1789retry:
c3d17b63
JL
1790 while (!done && index <= end) {
1791 unsigned int i, nr_pages, found_pages;
1792 pgoff_t next = 0, tofind;
1793 struct page **pages;
1794
1795 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1796 end - index) + 1;
1797
c2e87640
JL
1798 wdata = cifs_writedata_alloc((unsigned int)tofind,
1799 cifs_writev_complete);
c3d17b63
JL
1800 if (!wdata) {
1801 rc = -ENOMEM;
1802 break;
1803 }
1804
1805 /*
1806 * find_get_pages_tag seems to return a max of 256 on each
1807 * iteration, so we must call it several times in order to
1808 * fill the array or the wsize is effectively limited to
1809 * 256 * PAGE_CACHE_SIZE.
1810 */
1811 found_pages = 0;
1812 pages = wdata->pages;
1813 do {
1814 nr_pages = find_get_pages_tag(mapping, &index,
1815 PAGECACHE_TAG_DIRTY,
1816 tofind, pages);
1817 found_pages += nr_pages;
1818 tofind -= nr_pages;
1819 pages += nr_pages;
1820 } while (nr_pages && tofind && index <= end);
1821
1822 if (found_pages == 0) {
1823 kref_put(&wdata->refcount, cifs_writedata_release);
1824 break;
1825 }
1826
1827 nr_pages = 0;
1828 for (i = 0; i < found_pages; i++) {
1829 page = wdata->pages[i];
37c0eb46
SF
1830 /*
1831 * At this point we hold neither mapping->tree_lock nor
1832 * lock on the page itself: the page may be truncated or
1833 * invalidated (changing page->mapping to NULL), or even
1834 * swizzled back from swapper_space to tmpfs file
1835 * mapping
1836 */
1837
c3d17b63 1838 if (nr_pages == 0)
37c0eb46 1839 lock_page(page);
529ae9aa 1840 else if (!trylock_page(page))
37c0eb46
SF
1841 break;
1842
1843 if (unlikely(page->mapping != mapping)) {
1844 unlock_page(page);
1845 break;
1846 }
1847
111ebb6e 1848 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1849 done = true;
37c0eb46
SF
1850 unlock_page(page);
1851 break;
1852 }
1853
1854 if (next && (page->index != next)) {
1855 /* Not next consecutive page */
1856 unlock_page(page);
1857 break;
1858 }
1859
1860 if (wbc->sync_mode != WB_SYNC_NONE)
1861 wait_on_page_writeback(page);
1862
1863 if (PageWriteback(page) ||
cb876f45 1864 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1865 unlock_page(page);
1866 break;
1867 }
84d2f07e 1868
cb876f45
LT
1869 /*
1870 * This actually clears the dirty bit in the radix tree.
1871 * See cifs_writepage() for more commentary.
1872 */
1873 set_page_writeback(page);
1874
eddb079d 1875 if (page_offset(page) >= isize) {
c3d17b63 1876 done = true;
84d2f07e 1877 unlock_page(page);
cb876f45 1878 end_page_writeback(page);
84d2f07e
SF
1879 break;
1880 }
1881
c3d17b63
JL
1882 wdata->pages[i] = page;
1883 next = page->index + 1;
1884 ++nr_pages;
1885 }
37c0eb46 1886
c3d17b63
JL
1887 /* reset index to refind any pages skipped */
1888 if (nr_pages == 0)
1889 index = wdata->pages[0]->index + 1;
84d2f07e 1890
c3d17b63
JL
1891 /* put any pages we aren't going to use */
1892 for (i = nr_pages; i < found_pages; i++) {
1893 page_cache_release(wdata->pages[i]);
1894 wdata->pages[i] = NULL;
1895 }
37c0eb46 1896
c3d17b63
JL
1897 /* nothing to write? */
1898 if (nr_pages == 0) {
1899 kref_put(&wdata->refcount, cifs_writedata_release);
1900 continue;
37c0eb46 1901 }
fbec9ab9 1902
c3d17b63
JL
1903 wdata->sync_mode = wbc->sync_mode;
1904 wdata->nr_pages = nr_pages;
1905 wdata->offset = page_offset(wdata->pages[0]);
eddb079d
JL
1906 wdata->pagesz = PAGE_CACHE_SIZE;
1907 wdata->tailsz =
1908 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1909 (loff_t)PAGE_CACHE_SIZE);
1910 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1911 wdata->tailsz;
941b853d 1912
c3d17b63
JL
1913 do {
1914 if (wdata->cfile != NULL)
1915 cifsFileInfo_put(wdata->cfile);
1916 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1917 false);
1918 if (!wdata->cfile) {
1919 cERROR(1, "No writable handles for inode");
1920 rc = -EBADF;
1921 break;
941b853d 1922 }
fe5f5d2e 1923 wdata->pid = wdata->cfile->pid;
c9de5c80
PS
1924 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1925 rc = server->ops->async_writev(wdata);
c3d17b63 1926 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1927
c3d17b63
JL
1928 for (i = 0; i < nr_pages; ++i)
1929 unlock_page(wdata->pages[i]);
f3983c21 1930
c3d17b63
JL
1931 /* send failure -- clean up the mess */
1932 if (rc != 0) {
1933 for (i = 0; i < nr_pages; ++i) {
941b853d 1934 if (rc == -EAGAIN)
c3d17b63
JL
1935 redirty_page_for_writepage(wbc,
1936 wdata->pages[i]);
1937 else
1938 SetPageError(wdata->pages[i]);
1939 end_page_writeback(wdata->pages[i]);
1940 page_cache_release(wdata->pages[i]);
37c0eb46 1941 }
941b853d
JL
1942 if (rc != -EAGAIN)
1943 mapping_set_error(mapping, rc);
c3d17b63
JL
1944 }
1945 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1946
c3d17b63
JL
1947 wbc->nr_to_write -= nr_pages;
1948 if (wbc->nr_to_write <= 0)
1949 done = true;
b066a48c 1950
c3d17b63 1951 index = next;
37c0eb46 1952 }
c3d17b63 1953
37c0eb46
SF
1954 if (!scanned && !done) {
1955 /*
1956 * We hit the last page and there is more work to be done: wrap
1957 * back to the start of the file
1958 */
c3d17b63 1959 scanned = true;
37c0eb46
SF
1960 index = 0;
1961 goto retry;
1962 }
c3d17b63 1963
111ebb6e 1964 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1965 mapping->writeback_index = index;
1966
1da177e4
LT
1967 return rc;
1968}
1da177e4 1969
9ad1506b
PS
1970static int
1971cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1972{
9ad1506b 1973 int rc;
6d5786a3 1974 unsigned int xid;
1da177e4 1975
6d5786a3 1976 xid = get_xid();
1da177e4
LT
1977/* BB add check for wbc flags */
1978 page_cache_get(page);
ad7a2926 1979 if (!PageUptodate(page))
b6b38f70 1980 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1981
1982 /*
1983 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1984 *
1985 * A writepage() implementation always needs to do either this,
1986 * or re-dirty the page with "redirty_page_for_writepage()" in
1987 * the case of a failure.
1988 *
1989 * Just unlocking the page will cause the radix tree tag-bits
1990 * to fail to update with the state of the page correctly.
1991 */
fb8c4b14 1992 set_page_writeback(page);
9ad1506b 1993retry_write:
1da177e4 1994 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1995 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1996 goto retry_write;
1997 else if (rc == -EAGAIN)
1998 redirty_page_for_writepage(wbc, page);
1999 else if (rc != 0)
2000 SetPageError(page);
2001 else
2002 SetPageUptodate(page);
cb876f45
LT
2003 end_page_writeback(page);
2004 page_cache_release(page);
6d5786a3 2005 free_xid(xid);
1da177e4
LT
2006 return rc;
2007}
2008
9ad1506b
PS
2009static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2010{
2011 int rc = cifs_writepage_locked(page, wbc);
2012 unlock_page(page);
2013 return rc;
2014}
2015
d9414774
NP
2016static int cifs_write_end(struct file *file, struct address_space *mapping,
2017 loff_t pos, unsigned len, unsigned copied,
2018 struct page *page, void *fsdata)
1da177e4 2019{
d9414774
NP
2020 int rc;
2021 struct inode *inode = mapping->host;
d4ffff1f
PS
2022 struct cifsFileInfo *cfile = file->private_data;
2023 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2024 __u32 pid;
2025
2026 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2027 pid = cfile->pid;
2028 else
2029 pid = current->tgid;
1da177e4 2030
b6b38f70
JP
2031 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2032 page, pos, copied);
d9414774 2033
a98ee8c1
JL
2034 if (PageChecked(page)) {
2035 if (copied == len)
2036 SetPageUptodate(page);
2037 ClearPageChecked(page);
2038 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 2039 SetPageUptodate(page);
ad7a2926 2040
1da177e4 2041 if (!PageUptodate(page)) {
d9414774
NP
2042 char *page_data;
2043 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
6d5786a3 2044 unsigned int xid;
d9414774 2045
6d5786a3 2046 xid = get_xid();
1da177e4
LT
2047 /* this is probably better than directly calling
2048 partialpage_write since in this function the file handle is
2049 known which we might as well leverage */
2050 /* BB check if anything else missing out of ppw
2051 such as updating last write time */
2052 page_data = kmap(page);
d4ffff1f 2053 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2054 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2055 kunmap(page);
d9414774 2056
6d5786a3 2057 free_xid(xid);
fb8c4b14 2058 } else {
d9414774
NP
2059 rc = copied;
2060 pos += copied;
1da177e4
LT
2061 set_page_dirty(page);
2062 }
2063
d9414774
NP
2064 if (rc > 0) {
2065 spin_lock(&inode->i_lock);
2066 if (pos > inode->i_size)
2067 i_size_write(inode, pos);
2068 spin_unlock(&inode->i_lock);
2069 }
2070
2071 unlock_page(page);
2072 page_cache_release(page);
2073
1da177e4
LT
2074 return rc;
2075}
2076
02c24a82
JB
2077int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2078 int datasync)
1da177e4 2079{
6d5786a3 2080 unsigned int xid;
1da177e4 2081 int rc = 0;
96daf2b0 2082 struct cifs_tcon *tcon;
1d8c4c00 2083 struct TCP_Server_Info *server;
c21dfb69 2084 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 2085 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 2086 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2087
02c24a82
JB
2088 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2089 if (rc)
2090 return rc;
2091 mutex_lock(&inode->i_mutex);
2092
6d5786a3 2093 xid = get_xid();
1da177e4 2094
b6b38f70 2095 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 2096 file->f_path.dentry->d_name.name, datasync);
50c2f753 2097
6feb9891
PS
2098 if (!CIFS_I(inode)->clientCanCacheRead) {
2099 rc = cifs_invalidate_mapping(inode);
2100 if (rc) {
2101 cFYI(1, "rc: %d during invalidate phase", rc);
2102 rc = 0; /* don't care about it in fsync */
2103 }
2104 }
eb4b756b 2105
8be7e6ba 2106 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2107 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2108 server = tcon->ses->server;
2109 if (server->ops->flush)
2110 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2111 else
2112 rc = -ENOSYS;
2113 }
8be7e6ba 2114
6d5786a3 2115 free_xid(xid);
02c24a82 2116 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
2117 return rc;
2118}
2119
02c24a82 2120int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2121{
6d5786a3 2122 unsigned int xid;
8be7e6ba 2123 int rc = 0;
96daf2b0 2124 struct cifs_tcon *tcon;
1d8c4c00 2125 struct TCP_Server_Info *server;
8be7e6ba
PS
2126 struct cifsFileInfo *smbfile = file->private_data;
2127 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
2128 struct inode *inode = file->f_mapping->host;
2129
2130 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2131 if (rc)
2132 return rc;
2133 mutex_lock(&inode->i_mutex);
8be7e6ba 2134
6d5786a3 2135 xid = get_xid();
8be7e6ba
PS
2136
2137 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2138 file->f_path.dentry->d_name.name, datasync);
2139
2140 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2141 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2142 server = tcon->ses->server;
2143 if (server->ops->flush)
2144 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2145 else
2146 rc = -ENOSYS;
2147 }
b298f223 2148
6d5786a3 2149 free_xid(xid);
02c24a82 2150 mutex_unlock(&inode->i_mutex);
1da177e4
LT
2151 return rc;
2152}
2153
1da177e4
LT
2154/*
2155 * As file closes, flush all cached write data for this inode checking
2156 * for write behind errors.
2157 */
75e1fcc0 2158int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2159{
fb8c4b14 2160 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
2161 int rc = 0;
2162
eb4b756b 2163 if (file->f_mode & FMODE_WRITE)
d3f1322a 2164 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2165
b6b38f70 2166 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
2167
2168 return rc;
2169}
2170
72432ffc
PS
2171static int
2172cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2173{
2174 int rc = 0;
2175 unsigned long i;
2176
2177 for (i = 0; i < num_pages; i++) {
e94f7ba1 2178 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2179 if (!pages[i]) {
2180 /*
2181 * save number of pages we have already allocated and
2182 * return with ENOMEM error
2183 */
2184 num_pages = i;
2185 rc = -ENOMEM;
e94f7ba1 2186 break;
72432ffc
PS
2187 }
2188 }
2189
e94f7ba1
JL
2190 if (rc) {
2191 for (i = 0; i < num_pages; i++)
2192 put_page(pages[i]);
2193 }
72432ffc
PS
2194 return rc;
2195}
2196
2197static inline
2198size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2199{
2200 size_t num_pages;
2201 size_t clen;
2202
2203 clen = min_t(const size_t, len, wsize);
a7103b99 2204 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2205
2206 if (cur_len)
2207 *cur_len = clen;
2208
2209 return num_pages;
2210}
2211
da82f7e7
JL
2212static void
2213cifs_uncached_writev_complete(struct work_struct *work)
2214{
2215 int i;
2216 struct cifs_writedata *wdata = container_of(work,
2217 struct cifs_writedata, work);
2218 struct inode *inode = wdata->cfile->dentry->d_inode;
2219 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2220
2221 spin_lock(&inode->i_lock);
2222 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2223 if (cifsi->server_eof > inode->i_size)
2224 i_size_write(inode, cifsi->server_eof);
2225 spin_unlock(&inode->i_lock);
2226
2227 complete(&wdata->done);
2228
2229 if (wdata->result != -EAGAIN) {
2230 for (i = 0; i < wdata->nr_pages; i++)
2231 put_page(wdata->pages[i]);
2232 }
2233
2234 kref_put(&wdata->refcount, cifs_writedata_release);
2235}
2236
2237/* attempt to send write to server, retry on any -EAGAIN errors */
2238static int
2239cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2240{
2241 int rc;
c9de5c80
PS
2242 struct TCP_Server_Info *server;
2243
2244 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
da82f7e7
JL
2245
2246 do {
2247 if (wdata->cfile->invalidHandle) {
2248 rc = cifs_reopen_file(wdata->cfile, false);
2249 if (rc != 0)
2250 continue;
2251 }
c9de5c80 2252 rc = server->ops->async_writev(wdata);
da82f7e7
JL
2253 } while (rc == -EAGAIN);
2254
2255 return rc;
2256}
2257
72432ffc
PS
2258static ssize_t
2259cifs_iovec_write(struct file *file, const struct iovec *iov,
2260 unsigned long nr_segs, loff_t *poffset)
2261{
da82f7e7 2262 unsigned long nr_pages, i;
76429c14
PS
2263 size_t copied, len, cur_len;
2264 ssize_t total_written = 0;
3af9d8f2 2265 loff_t offset;
72432ffc 2266 struct iov_iter it;
72432ffc 2267 struct cifsFileInfo *open_file;
da82f7e7 2268 struct cifs_tcon *tcon;
72432ffc 2269 struct cifs_sb_info *cifs_sb;
da82f7e7
JL
2270 struct cifs_writedata *wdata, *tmp;
2271 struct list_head wdata_list;
2272 int rc;
2273 pid_t pid;
72432ffc
PS
2274
2275 len = iov_length(iov, nr_segs);
2276 if (!len)
2277 return 0;
2278
2279 rc = generic_write_checks(file, poffset, &len, 0);
2280 if (rc)
2281 return rc;
2282
da82f7e7 2283 INIT_LIST_HEAD(&wdata_list);
72432ffc 2284 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
72432ffc 2285 open_file = file->private_data;
da82f7e7 2286 tcon = tlink_tcon(open_file->tlink);
c9de5c80
PS
2287
2288 if (!tcon->ses->server->ops->async_writev)
2289 return -ENOSYS;
2290
3af9d8f2 2291 offset = *poffset;
d4ffff1f
PS
2292
2293 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2294 pid = open_file->pid;
2295 else
2296 pid = current->tgid;
2297
72432ffc 2298 iov_iter_init(&it, iov, nr_segs, len, 0);
72432ffc 2299 do {
da82f7e7
JL
2300 size_t save_len;
2301
2302 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2303 wdata = cifs_writedata_alloc(nr_pages,
2304 cifs_uncached_writev_complete);
2305 if (!wdata) {
2306 rc = -ENOMEM;
2307 break;
2308 }
2309
2310 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2311 if (rc) {
2312 kfree(wdata);
2313 break;
2314 }
2315
2316 save_len = cur_len;
2317 for (i = 0; i < nr_pages; i++) {
2318 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2319 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2320 0, copied);
72432ffc
PS
2321 cur_len -= copied;
2322 iov_iter_advance(&it, copied);
72432ffc 2323 }
72432ffc
PS
2324 cur_len = save_len - cur_len;
2325
da82f7e7
JL
2326 wdata->sync_mode = WB_SYNC_ALL;
2327 wdata->nr_pages = nr_pages;
2328 wdata->offset = (__u64)offset;
2329 wdata->cfile = cifsFileInfo_get(open_file);
2330 wdata->pid = pid;
2331 wdata->bytes = cur_len;
eddb079d
JL
2332 wdata->pagesz = PAGE_SIZE;
2333 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
da82f7e7
JL
2334 rc = cifs_uncached_retry_writev(wdata);
2335 if (rc) {
2336 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2337 break;
2338 }
2339
da82f7e7
JL
2340 list_add_tail(&wdata->list, &wdata_list);
2341 offset += cur_len;
2342 len -= cur_len;
72432ffc
PS
2343 } while (len > 0);
2344
da82f7e7
JL
2345 /*
2346 * If at least one write was successfully sent, then discard any rc
2347 * value from the later writes. If the other write succeeds, then
2348 * we'll end up returning whatever was written. If it fails, then
2349 * we'll get a new rc value from that.
2350 */
2351 if (!list_empty(&wdata_list))
2352 rc = 0;
2353
2354 /*
2355 * Wait for and collect replies for any successful sends in order of
2356 * increasing offset. Once an error is hit or we get a fatal signal
2357 * while waiting, then return without waiting for any more replies.
2358 */
2359restart_loop:
2360 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2361 if (!rc) {
2362 /* FIXME: freezable too? */
2363 rc = wait_for_completion_killable(&wdata->done);
2364 if (rc)
2365 rc = -EINTR;
2366 else if (wdata->result)
2367 rc = wdata->result;
2368 else
2369 total_written += wdata->bytes;
2370
2371 /* resend call if it's a retryable error */
2372 if (rc == -EAGAIN) {
2373 rc = cifs_uncached_retry_writev(wdata);
2374 goto restart_loop;
2375 }
2376 }
2377 list_del_init(&wdata->list);
2378 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2379 }
2380
da82f7e7
JL
2381 if (total_written > 0)
2382 *poffset += total_written;
72432ffc 2383
da82f7e7
JL
2384 cifs_stats_bytes_written(tcon, total_written);
2385 return total_written ? total_written : (ssize_t)rc;
72432ffc
PS
2386}
2387
0b81c1c4 2388ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
2389 unsigned long nr_segs, loff_t pos)
2390{
2391 ssize_t written;
2392 struct inode *inode;
2393
2394 inode = iocb->ki_filp->f_path.dentry->d_inode;
2395
2396 /*
2397 * BB - optimize the way when signing is disabled. We can drop this
2398 * extra memory-to-memory copying and use iovec buffers for constructing
2399 * write request.
2400 */
2401
2402 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2403 if (written > 0) {
2404 CIFS_I(inode)->invalid_mapping = true;
2405 iocb->ki_pos = pos;
2406 }
2407
2408 return written;
2409}
2410
579f9053
PS
2411static ssize_t
2412cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2413 unsigned long nr_segs, loff_t pos)
72432ffc 2414{
579f9053
PS
2415 struct file *file = iocb->ki_filp;
2416 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2417 struct inode *inode = file->f_mapping->host;
2418 struct cifsInodeInfo *cinode = CIFS_I(inode);
2419 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2420 ssize_t rc = -EACCES;
72432ffc 2421
579f9053 2422 BUG_ON(iocb->ki_pos != pos);
72432ffc 2423
579f9053
PS
2424 sb_start_write(inode->i_sb);
2425
2426 /*
2427 * We need to hold the sem to be sure nobody modifies lock list
2428 * with a brlock that prevents writing.
2429 */
2430 down_read(&cinode->lock_sem);
2431 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2432 server->vals->exclusive_lock_type, NULL,
2433 true)) {
2434 mutex_lock(&inode->i_mutex);
2435 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2436 &iocb->ki_pos);
2437 mutex_unlock(&inode->i_mutex);
2438 }
2439
2440 if (rc > 0 || rc == -EIOCBQUEUED) {
2441 ssize_t err;
2442
2443 err = generic_write_sync(file, pos, rc);
2444 if (err < 0 && rc > 0)
2445 rc = err;
2446 }
2447
2448 up_read(&cinode->lock_sem);
2449 sb_end_write(inode->i_sb);
2450 return rc;
2451}
2452
2453ssize_t
2454cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2455 unsigned long nr_segs, loff_t pos)
2456{
2457 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2458 struct cifsInodeInfo *cinode = CIFS_I(inode);
2459 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2460 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2461 iocb->ki_filp->private_data;
2462 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
72432ffc
PS
2463
2464 /*
2465 * In strict cache mode we need to write the data to the server exactly
2466 * from the pos to pos+len-1 rather than flush all affected pages
2467 * because it may cause a error with mandatory locks on these pages but
2468 * not on the region from pos to ppos+len-1.
2469 */
2470
579f9053
PS
2471 if (!cinode->clientCanCacheAll)
2472 return cifs_user_writev(iocb, iov, nr_segs, pos);
2473
2474 if (cap_unix(tcon->ses) &&
2475 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2476 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2477 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2478
2479 return cifs_writev(iocb, iov, nr_segs, pos);
72432ffc
PS
2480}
2481
0471ca3f 2482static struct cifs_readdata *
f4e49cd2 2483cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
0471ca3f
JL
2484{
2485 struct cifs_readdata *rdata;
f4e49cd2 2486
c5fab6f4
JL
2487 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2488 GFP_KERNEL);
0471ca3f 2489 if (rdata != NULL) {
6993f74a 2490 kref_init(&rdata->refcount);
1c892549
JL
2491 INIT_LIST_HEAD(&rdata->list);
2492 init_completion(&rdata->done);
0471ca3f 2493 INIT_WORK(&rdata->work, complete);
0471ca3f 2494 }
f4e49cd2 2495
0471ca3f
JL
2496 return rdata;
2497}
2498
6993f74a
JL
2499void
2500cifs_readdata_release(struct kref *refcount)
0471ca3f 2501{
6993f74a
JL
2502 struct cifs_readdata *rdata = container_of(refcount,
2503 struct cifs_readdata, refcount);
2504
2505 if (rdata->cfile)
2506 cifsFileInfo_put(rdata->cfile);
2507
0471ca3f
JL
2508 kfree(rdata);
2509}
2510
1c892549 2511static int
c5fab6f4 2512cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
2513{
2514 int rc = 0;
c5fab6f4 2515 struct page *page;
1c892549
JL
2516 unsigned int i;
2517
c5fab6f4 2518 for (i = 0; i < nr_pages; i++) {
1c892549
JL
2519 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2520 if (!page) {
2521 rc = -ENOMEM;
2522 break;
2523 }
c5fab6f4 2524 rdata->pages[i] = page;
1c892549
JL
2525 }
2526
2527 if (rc) {
c5fab6f4
JL
2528 for (i = 0; i < nr_pages; i++) {
2529 put_page(rdata->pages[i]);
2530 rdata->pages[i] = NULL;
1c892549
JL
2531 }
2532 }
2533 return rc;
2534}
2535
2536static void
2537cifs_uncached_readdata_release(struct kref *refcount)
2538{
1c892549
JL
2539 struct cifs_readdata *rdata = container_of(refcount,
2540 struct cifs_readdata, refcount);
c5fab6f4 2541 unsigned int i;
1c892549 2542
c5fab6f4
JL
2543 for (i = 0; i < rdata->nr_pages; i++) {
2544 put_page(rdata->pages[i]);
2545 rdata->pages[i] = NULL;
1c892549
JL
2546 }
2547 cifs_readdata_release(refcount);
2548}
2549
2a1bb138
JL
2550static int
2551cifs_retry_async_readv(struct cifs_readdata *rdata)
2552{
2553 int rc;
fc9c5966
PS
2554 struct TCP_Server_Info *server;
2555
2556 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
2a1bb138
JL
2557
2558 do {
2559 if (rdata->cfile->invalidHandle) {
2560 rc = cifs_reopen_file(rdata->cfile, true);
2561 if (rc != 0)
2562 continue;
2563 }
fc9c5966 2564 rc = server->ops->async_readv(rdata);
2a1bb138
JL
2565 } while (rc == -EAGAIN);
2566
2567 return rc;
2568}
2569
1c892549
JL
2570/**
2571 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2572 * @rdata: the readdata response with list of pages holding data
2573 * @iov: vector in which we should copy the data
2574 * @nr_segs: number of segments in vector
2575 * @offset: offset into file of the first iovec
2576 * @copied: used to return the amount of data copied to the iov
2577 *
2578 * This function copies data from a list of pages in a readdata response into
2579 * an array of iovecs. It will first calculate where the data should go
2580 * based on the info in the readdata and then copy the data into that spot.
2581 */
2582static ssize_t
2583cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2584 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2585{
2586 int rc = 0;
2587 struct iov_iter ii;
2588 size_t pos = rdata->offset - offset;
1c892549
JL
2589 ssize_t remaining = rdata->bytes;
2590 unsigned char *pdata;
c5fab6f4 2591 unsigned int i;
1c892549
JL
2592
2593 /* set up iov_iter and advance to the correct offset */
2594 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2595 iov_iter_advance(&ii, pos);
2596
2597 *copied = 0;
c5fab6f4 2598 for (i = 0; i < rdata->nr_pages; i++) {
1c892549 2599 ssize_t copy;
c5fab6f4 2600 struct page *page = rdata->pages[i];
1c892549
JL
2601
2602 /* copy a whole page or whatever's left */
2603 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2604
2605 /* ...but limit it to whatever space is left in the iov */
2606 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2607
2608 /* go while there's data to be copied and no errors */
2609 if (copy && !rc) {
2610 pdata = kmap(page);
2611 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2612 (int)copy);
2613 kunmap(page);
2614 if (!rc) {
2615 *copied += copy;
2616 remaining -= copy;
2617 iov_iter_advance(&ii, copy);
2618 }
2619 }
1c892549
JL
2620 }
2621
2622 return rc;
2623}
2624
2625static void
2626cifs_uncached_readv_complete(struct work_struct *work)
2627{
2628 struct cifs_readdata *rdata = container_of(work,
2629 struct cifs_readdata, work);
1c892549
JL
2630
2631 complete(&rdata->done);
2632 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2633}
2634
2635static int
8321fec4
JL
2636cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2637 struct cifs_readdata *rdata, unsigned int len)
1c892549 2638{
8321fec4 2639 int total_read = 0, result = 0;
c5fab6f4
JL
2640 unsigned int i;
2641 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2642 struct kvec iov;
1c892549 2643
8321fec4 2644 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
2645 for (i = 0; i < nr_pages; i++) {
2646 struct page *page = rdata->pages[i];
2647
8321fec4 2648 if (len >= PAGE_SIZE) {
1c892549 2649 /* enough data to fill the page */
8321fec4
JL
2650 iov.iov_base = kmap(page);
2651 iov.iov_len = PAGE_SIZE;
2652 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2653 i, iov.iov_base, iov.iov_len);
2654 len -= PAGE_SIZE;
2655 } else if (len > 0) {
1c892549 2656 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2657 iov.iov_base = kmap(page);
2658 iov.iov_len = len;
2659 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2660 i, iov.iov_base, iov.iov_len);
2661 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2662 rdata->tailsz = len;
2663 len = 0;
1c892549
JL
2664 } else {
2665 /* no need to hold page hostage */
c5fab6f4
JL
2666 rdata->pages[i] = NULL;
2667 rdata->nr_pages--;
1c892549 2668 put_page(page);
8321fec4 2669 continue;
1c892549 2670 }
8321fec4
JL
2671
2672 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2673 kunmap(page);
2674 if (result < 0)
2675 break;
2676
2677 total_read += result;
1c892549
JL
2678 }
2679
8321fec4 2680 return total_read > 0 ? total_read : result;
1c892549
JL
2681}
2682
a70307ee
PS
2683static ssize_t
2684cifs_iovec_read(struct file *file, const struct iovec *iov,
2685 unsigned long nr_segs, loff_t *poffset)
1da177e4 2686{
1c892549 2687 ssize_t rc;
a70307ee 2688 size_t len, cur_len;
1c892549
JL
2689 ssize_t total_read = 0;
2690 loff_t offset = *poffset;
2691 unsigned int npages;
1da177e4 2692 struct cifs_sb_info *cifs_sb;
1c892549 2693 struct cifs_tcon *tcon;
1da177e4 2694 struct cifsFileInfo *open_file;
1c892549
JL
2695 struct cifs_readdata *rdata, *tmp;
2696 struct list_head rdata_list;
2697 pid_t pid;
a70307ee
PS
2698
2699 if (!nr_segs)
2700 return 0;
2701
2702 len = iov_length(iov, nr_segs);
2703 if (!len)
2704 return 0;
1da177e4 2705
1c892549 2706 INIT_LIST_HEAD(&rdata_list);
e6a00296 2707 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
c21dfb69 2708 open_file = file->private_data;
1c892549 2709 tcon = tlink_tcon(open_file->tlink);
1da177e4 2710
fc9c5966
PS
2711 if (!tcon->ses->server->ops->async_readv)
2712 return -ENOSYS;
2713
d4ffff1f
PS
2714 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2715 pid = open_file->pid;
2716 else
2717 pid = current->tgid;
2718
ad7a2926 2719 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2720 cFYI(1, "attempting read on write only file instance");
ad7a2926 2721
1c892549
JL
2722 do {
2723 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2724 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
a70307ee 2725
1c892549
JL
2726 /* allocate a readdata struct */
2727 rdata = cifs_readdata_alloc(npages,
2728 cifs_uncached_readv_complete);
2729 if (!rdata) {
2730 rc = -ENOMEM;
2731 goto error;
1da177e4 2732 }
a70307ee 2733
c5fab6f4 2734 rc = cifs_read_allocate_pages(rdata, npages);
1c892549
JL
2735 if (rc)
2736 goto error;
2737
2738 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 2739 rdata->nr_pages = npages;
1c892549
JL
2740 rdata->offset = offset;
2741 rdata->bytes = cur_len;
2742 rdata->pid = pid;
8321fec4
JL
2743 rdata->pagesz = PAGE_SIZE;
2744 rdata->read_into_pages = cifs_uncached_read_into_pages;
1c892549
JL
2745
2746 rc = cifs_retry_async_readv(rdata);
2747error:
2748 if (rc) {
2749 kref_put(&rdata->refcount,
2750 cifs_uncached_readdata_release);
2751 break;
2752 }
2753
2754 list_add_tail(&rdata->list, &rdata_list);
2755 offset += cur_len;
2756 len -= cur_len;
2757 } while (len > 0);
2758
2759 /* if at least one read request send succeeded, then reset rc */
2760 if (!list_empty(&rdata_list))
2761 rc = 0;
2762
2763 /* the loop below should proceed in the order of increasing offsets */
2764restart_loop:
2765 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2766 if (!rc) {
2767 ssize_t copied;
2768
2769 /* FIXME: freezable sleep too? */
2770 rc = wait_for_completion_killable(&rdata->done);
2771 if (rc)
2772 rc = -EINTR;
2773 else if (rdata->result)
2774 rc = rdata->result;
2775 else {
2776 rc = cifs_readdata_to_iov(rdata, iov,
2777 nr_segs, *poffset,
2778 &copied);
2779 total_read += copied;
2780 }
2781
2782 /* resend call if it's a retryable error */
2783 if (rc == -EAGAIN) {
2784 rc = cifs_retry_async_readv(rdata);
2785 goto restart_loop;
1da177e4 2786 }
1da177e4 2787 }
1c892549
JL
2788 list_del_init(&rdata->list);
2789 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 2790 }
a70307ee 2791
1c892549
JL
2792 cifs_stats_bytes_read(tcon, total_read);
2793 *poffset += total_read;
2794
09a4707e
PS
2795 /* mask nodata case */
2796 if (rc == -ENODATA)
2797 rc = 0;
2798
1c892549 2799 return total_read ? total_read : rc;
1da177e4
LT
2800}
2801
0b81c1c4 2802ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2803 unsigned long nr_segs, loff_t pos)
2804{
2805 ssize_t read;
2806
2807 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2808 if (read > 0)
2809 iocb->ki_pos = pos;
2810
2811 return read;
2812}
2813
579f9053
PS
2814ssize_t
2815cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2816 unsigned long nr_segs, loff_t pos)
a70307ee 2817{
579f9053
PS
2818 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2819 struct cifsInodeInfo *cinode = CIFS_I(inode);
2820 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2821 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2822 iocb->ki_filp->private_data;
2823 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2824 int rc = -EACCES;
a70307ee
PS
2825
2826 /*
2827 * In strict cache mode we need to read from the server all the time
2828 * if we don't have level II oplock because the server can delay mtime
2829 * change - so we can't make a decision about inode invalidating.
2830 * And we can also fail with pagereading if there are mandatory locks
2831 * on pages affected by this read but not on the region from pos to
2832 * pos+len-1.
2833 */
579f9053
PS
2834 if (!cinode->clientCanCacheRead)
2835 return cifs_user_readv(iocb, iov, nr_segs, pos);
a70307ee 2836
579f9053
PS
2837 if (cap_unix(tcon->ses) &&
2838 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2839 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2840 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2841
2842 /*
2843 * We need to hold the sem to be sure nobody modifies lock list
2844 * with a brlock that prevents reading.
2845 */
2846 down_read(&cinode->lock_sem);
2847 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2848 tcon->ses->server->vals->shared_lock_type,
2849 NULL, true))
2850 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2851 up_read(&cinode->lock_sem);
2852 return rc;
a70307ee 2853}
1da177e4 2854
f9c6e234
PS
2855static ssize_t
2856cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
2857{
2858 int rc = -EACCES;
2859 unsigned int bytes_read = 0;
2860 unsigned int total_read;
2861 unsigned int current_read_size;
5eba8ab3 2862 unsigned int rsize;
1da177e4 2863 struct cifs_sb_info *cifs_sb;
29e20f9c 2864 struct cifs_tcon *tcon;
f9c6e234 2865 struct TCP_Server_Info *server;
6d5786a3 2866 unsigned int xid;
f9c6e234 2867 char *cur_offset;
1da177e4 2868 struct cifsFileInfo *open_file;
d4ffff1f 2869 struct cifs_io_parms io_parms;
ec637e3f 2870 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2871 __u32 pid;
1da177e4 2872
6d5786a3 2873 xid = get_xid();
e6a00296 2874 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2875
5eba8ab3
JL
2876 /* FIXME: set up handlers for larger reads and/or convert to async */
2877 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2878
1da177e4 2879 if (file->private_data == NULL) {
0f3bc09e 2880 rc = -EBADF;
6d5786a3 2881 free_xid(xid);
0f3bc09e 2882 return rc;
1da177e4 2883 }
c21dfb69 2884 open_file = file->private_data;
29e20f9c 2885 tcon = tlink_tcon(open_file->tlink);
f9c6e234
PS
2886 server = tcon->ses->server;
2887
2888 if (!server->ops->sync_read) {
2889 free_xid(xid);
2890 return -ENOSYS;
2891 }
1da177e4 2892
d4ffff1f
PS
2893 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2894 pid = open_file->pid;
2895 else
2896 pid = current->tgid;
2897
1da177e4 2898 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2899 cFYI(1, "attempting read on write only file instance");
1da177e4 2900
f9c6e234
PS
2901 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2902 total_read += bytes_read, cur_offset += bytes_read) {
5eba8ab3 2903 current_read_size = min_t(uint, read_size - total_read, rsize);
29e20f9c
PS
2904 /*
2905 * For windows me and 9x we do not want to request more than it
2906 * negotiated since it will refuse the read then.
2907 */
2908 if ((tcon->ses) && !(tcon->ses->capabilities &
2909 tcon->ses->server->vals->cap_large_files)) {
7748dd6e 2910 current_read_size = min_t(uint, current_read_size,
c974befa 2911 CIFSMaxBufSize);
f9f5c817 2912 }
1da177e4
LT
2913 rc = -EAGAIN;
2914 while (rc == -EAGAIN) {
cdff08e7 2915 if (open_file->invalidHandle) {
15886177 2916 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2917 if (rc != 0)
2918 break;
2919 }
d4ffff1f 2920 io_parms.pid = pid;
29e20f9c 2921 io_parms.tcon = tcon;
f9c6e234 2922 io_parms.offset = *offset;
d4ffff1f 2923 io_parms.length = current_read_size;
f9c6e234
PS
2924 rc = server->ops->sync_read(xid, open_file, &io_parms,
2925 &bytes_read, &cur_offset,
2926 &buf_type);
1da177e4
LT
2927 }
2928 if (rc || (bytes_read == 0)) {
2929 if (total_read) {
2930 break;
2931 } else {
6d5786a3 2932 free_xid(xid);
1da177e4
LT
2933 return rc;
2934 }
2935 } else {
29e20f9c 2936 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 2937 *offset += bytes_read;
1da177e4
LT
2938 }
2939 }
6d5786a3 2940 free_xid(xid);
1da177e4
LT
2941 return total_read;
2942}
2943
ca83ce3d
JL
2944/*
2945 * If the page is mmap'ed into a process' page tables, then we need to make
2946 * sure that it doesn't change while being written back.
2947 */
2948static int
2949cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2950{
2951 struct page *page = vmf->page;
2952
2953 lock_page(page);
2954 return VM_FAULT_LOCKED;
2955}
2956
2957static struct vm_operations_struct cifs_file_vm_ops = {
2958 .fault = filemap_fault,
2959 .page_mkwrite = cifs_page_mkwrite,
2960};
2961
7a6a19b1
PS
2962int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2963{
2964 int rc, xid;
2965 struct inode *inode = file->f_path.dentry->d_inode;
2966
6d5786a3 2967 xid = get_xid();
7a6a19b1 2968
6feb9891
PS
2969 if (!CIFS_I(inode)->clientCanCacheRead) {
2970 rc = cifs_invalidate_mapping(inode);
2971 if (rc)
2972 return rc;
2973 }
7a6a19b1
PS
2974
2975 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2976 if (rc == 0)
2977 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 2978 free_xid(xid);
7a6a19b1
PS
2979 return rc;
2980}
2981
1da177e4
LT
2982int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2983{
1da177e4
LT
2984 int rc, xid;
2985
6d5786a3 2986 xid = get_xid();
abab095d 2987 rc = cifs_revalidate_file(file);
1da177e4 2988 if (rc) {
b6b38f70 2989 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
6d5786a3 2990 free_xid(xid);
1da177e4
LT
2991 return rc;
2992 }
2993 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2994 if (rc == 0)
2995 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 2996 free_xid(xid);
1da177e4
LT
2997 return rc;
2998}
2999
0471ca3f
JL
3000static void
3001cifs_readv_complete(struct work_struct *work)
3002{
c5fab6f4 3003 unsigned int i;
0471ca3f
JL
3004 struct cifs_readdata *rdata = container_of(work,
3005 struct cifs_readdata, work);
0471ca3f 3006
c5fab6f4
JL
3007 for (i = 0; i < rdata->nr_pages; i++) {
3008 struct page *page = rdata->pages[i];
3009
0471ca3f
JL
3010 lru_cache_add_file(page);
3011
3012 if (rdata->result == 0) {
0471ca3f
JL
3013 flush_dcache_page(page);
3014 SetPageUptodate(page);
3015 }
3016
3017 unlock_page(page);
3018
3019 if (rdata->result == 0)
3020 cifs_readpage_to_fscache(rdata->mapping->host, page);
3021
3022 page_cache_release(page);
c5fab6f4 3023 rdata->pages[i] = NULL;
0471ca3f 3024 }
6993f74a 3025 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
3026}
3027
8d5ce4d2 3028static int
8321fec4
JL
3029cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3030 struct cifs_readdata *rdata, unsigned int len)
8d5ce4d2 3031{
8321fec4 3032 int total_read = 0, result = 0;
c5fab6f4 3033 unsigned int i;
8d5ce4d2
JL
3034 u64 eof;
3035 pgoff_t eof_index;
c5fab6f4 3036 unsigned int nr_pages = rdata->nr_pages;
8321fec4 3037 struct kvec iov;
8d5ce4d2
JL
3038
3039 /* determine the eof that the server (probably) has */
3040 eof = CIFS_I(rdata->mapping->host)->server_eof;
3041 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3042 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3043
8321fec4 3044 rdata->tailsz = PAGE_CACHE_SIZE;
c5fab6f4
JL
3045 for (i = 0; i < nr_pages; i++) {
3046 struct page *page = rdata->pages[i];
3047
8321fec4 3048 if (len >= PAGE_CACHE_SIZE) {
8d5ce4d2 3049 /* enough data to fill the page */
8321fec4
JL
3050 iov.iov_base = kmap(page);
3051 iov.iov_len = PAGE_CACHE_SIZE;
8d5ce4d2 3052 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
3053 i, page->index, iov.iov_base, iov.iov_len);
3054 len -= PAGE_CACHE_SIZE;
3055 } else if (len > 0) {
8d5ce4d2 3056 /* enough for partial page, fill and zero the rest */
8321fec4
JL
3057 iov.iov_base = kmap(page);
3058 iov.iov_len = len;
8d5ce4d2 3059 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
3060 i, page->index, iov.iov_base, iov.iov_len);
3061 memset(iov.iov_base + len,
3062 '\0', PAGE_CACHE_SIZE - len);
3063 rdata->tailsz = len;
3064 len = 0;
8d5ce4d2
JL
3065 } else if (page->index > eof_index) {
3066 /*
3067 * The VFS will not try to do readahead past the
3068 * i_size, but it's possible that we have outstanding
3069 * writes with gaps in the middle and the i_size hasn't
3070 * caught up yet. Populate those with zeroed out pages
3071 * to prevent the VFS from repeatedly attempting to
3072 * fill them until the writes are flushed.
3073 */
3074 zero_user(page, 0, PAGE_CACHE_SIZE);
8d5ce4d2
JL
3075 lru_cache_add_file(page);
3076 flush_dcache_page(page);
3077 SetPageUptodate(page);
3078 unlock_page(page);
3079 page_cache_release(page);
c5fab6f4
JL
3080 rdata->pages[i] = NULL;
3081 rdata->nr_pages--;
8321fec4 3082 continue;
8d5ce4d2
JL
3083 } else {
3084 /* no need to hold page hostage */
8d5ce4d2
JL
3085 lru_cache_add_file(page);
3086 unlock_page(page);
3087 page_cache_release(page);
c5fab6f4
JL
3088 rdata->pages[i] = NULL;
3089 rdata->nr_pages--;
8321fec4 3090 continue;
8d5ce4d2 3091 }
8321fec4
JL
3092
3093 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3094 kunmap(page);
3095 if (result < 0)
3096 break;
3097
3098 total_read += result;
8d5ce4d2
JL
3099 }
3100
8321fec4 3101 return total_read > 0 ? total_read : result;
8d5ce4d2
JL
3102}
3103
1da177e4
LT
3104static int cifs_readpages(struct file *file, struct address_space *mapping,
3105 struct list_head *page_list, unsigned num_pages)
3106{
690c5e31
JL
3107 int rc;
3108 struct list_head tmplist;
3109 struct cifsFileInfo *open_file = file->private_data;
3110 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3111 unsigned int rsize = cifs_sb->rsize;
3112 pid_t pid;
1da177e4 3113
690c5e31
JL
3114 /*
3115 * Give up immediately if rsize is too small to read an entire page.
3116 * The VFS will fall back to readpage. We should never reach this
3117 * point however since we set ra_pages to 0 when the rsize is smaller
3118 * than a cache page.
3119 */
3120 if (unlikely(rsize < PAGE_CACHE_SIZE))
3121 return 0;
bfa0d75a 3122
56698236
SJ
3123 /*
3124 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3125 * immediately if the cookie is negative
3126 */
3127 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3128 &num_pages);
3129 if (rc == 0)
690c5e31 3130 return rc;
56698236 3131
d4ffff1f
PS
3132 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3133 pid = open_file->pid;
3134 else
3135 pid = current->tgid;
3136
690c5e31
JL
3137 rc = 0;
3138 INIT_LIST_HEAD(&tmplist);
1da177e4 3139
690c5e31
JL
3140 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3141 mapping, num_pages);
3142
3143 /*
3144 * Start with the page at end of list and move it to private
3145 * list. Do the same with any following pages until we hit
3146 * the rsize limit, hit an index discontinuity, or run out of
3147 * pages. Issue the async read and then start the loop again
3148 * until the list is empty.
3149 *
3150 * Note that list order is important. The page_list is in
3151 * the order of declining indexes. When we put the pages in
3152 * the rdata->pages, then we want them in increasing order.
3153 */
3154 while (!list_empty(page_list)) {
c5fab6f4 3155 unsigned int i;
690c5e31
JL
3156 unsigned int bytes = PAGE_CACHE_SIZE;
3157 unsigned int expected_index;
3158 unsigned int nr_pages = 1;
3159 loff_t offset;
3160 struct page *page, *tpage;
3161 struct cifs_readdata *rdata;
1da177e4
LT
3162
3163 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
3164
3165 /*
3166 * Lock the page and put it in the cache. Since no one else
3167 * should have access to this page, we're safe to simply set
3168 * PG_locked without checking it first.
3169 */
3170 __set_page_locked(page);
3171 rc = add_to_page_cache_locked(page, mapping,
3172 page->index, GFP_KERNEL);
3173
3174 /* give up if we can't stick it in the cache */
3175 if (rc) {
3176 __clear_page_locked(page);
3177 break;
3178 }
3179
3180 /* move first page to the tmplist */
1da177e4 3181 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 3182 list_move_tail(&page->lru, &tmplist);
1da177e4 3183
690c5e31
JL
3184 /* now try and add more pages onto the request */
3185 expected_index = page->index + 1;
3186 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3187 /* discontinuity ? */
3188 if (page->index != expected_index)
fb8c4b14 3189 break;
690c5e31
JL
3190
3191 /* would this page push the read over the rsize? */
3192 if (bytes + PAGE_CACHE_SIZE > rsize)
3193 break;
3194
3195 __set_page_locked(page);
3196 if (add_to_page_cache_locked(page, mapping,
3197 page->index, GFP_KERNEL)) {
3198 __clear_page_locked(page);
3199 break;
3200 }
3201 list_move_tail(&page->lru, &tmplist);
3202 bytes += PAGE_CACHE_SIZE;
3203 expected_index++;
3204 nr_pages++;
1da177e4 3205 }
690c5e31 3206
0471ca3f 3207 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
3208 if (!rdata) {
3209 /* best to give up if we're out of mem */
3210 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3211 list_del(&page->lru);
3212 lru_cache_add_file(page);
3213 unlock_page(page);
3214 page_cache_release(page);
3215 }
3216 rc = -ENOMEM;
3217 break;
3218 }
3219
6993f74a 3220 rdata->cfile = cifsFileInfo_get(open_file);
690c5e31
JL
3221 rdata->mapping = mapping;
3222 rdata->offset = offset;
3223 rdata->bytes = bytes;
3224 rdata->pid = pid;
8321fec4
JL
3225 rdata->pagesz = PAGE_CACHE_SIZE;
3226 rdata->read_into_pages = cifs_readpages_read_into_pages;
c5fab6f4
JL
3227
3228 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3229 list_del(&page->lru);
3230 rdata->pages[rdata->nr_pages++] = page;
3231 }
690c5e31 3232
2a1bb138 3233 rc = cifs_retry_async_readv(rdata);
690c5e31 3234 if (rc != 0) {
c5fab6f4
JL
3235 for (i = 0; i < rdata->nr_pages; i++) {
3236 page = rdata->pages[i];
690c5e31
JL
3237 lru_cache_add_file(page);
3238 unlock_page(page);
3239 page_cache_release(page);
1da177e4 3240 }
6993f74a 3241 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3242 break;
3243 }
6993f74a
JL
3244
3245 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3246 }
3247
1da177e4
LT
3248 return rc;
3249}
3250
3251static int cifs_readpage_worker(struct file *file, struct page *page,
3252 loff_t *poffset)
3253{
3254 char *read_data;
3255 int rc;
3256
56698236
SJ
3257 /* Is the page cached? */
3258 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3259 if (rc == 0)
3260 goto read_complete;
3261
1da177e4
LT
3262 page_cache_get(page);
3263 read_data = kmap(page);
3264 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 3265
1da177e4 3266 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 3267
1da177e4
LT
3268 if (rc < 0)
3269 goto io_error;
3270 else
b6b38f70 3271 cFYI(1, "Bytes read %d", rc);
fb8c4b14 3272
e6a00296
JJS
3273 file->f_path.dentry->d_inode->i_atime =
3274 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 3275
1da177e4
LT
3276 if (PAGE_CACHE_SIZE > rc)
3277 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3278
3279 flush_dcache_page(page);
3280 SetPageUptodate(page);
9dc06558
SJ
3281
3282 /* send this page to the cache */
3283 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3284
1da177e4 3285 rc = 0;
fb8c4b14 3286
1da177e4 3287io_error:
fb8c4b14 3288 kunmap(page);
1da177e4 3289 page_cache_release(page);
56698236
SJ
3290
3291read_complete:
1da177e4
LT
3292 return rc;
3293}
3294
3295static int cifs_readpage(struct file *file, struct page *page)
3296{
3297 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3298 int rc = -EACCES;
6d5786a3 3299 unsigned int xid;
1da177e4 3300
6d5786a3 3301 xid = get_xid();
1da177e4
LT
3302
3303 if (file->private_data == NULL) {
0f3bc09e 3304 rc = -EBADF;
6d5786a3 3305 free_xid(xid);
0f3bc09e 3306 return rc;
1da177e4
LT
3307 }
3308
ac3aa2f8 3309 cFYI(1, "readpage %p at offset %d 0x%x",
b6b38f70 3310 page, (int)offset, (int)offset);
1da177e4
LT
3311
3312 rc = cifs_readpage_worker(file, page, &offset);
3313
3314 unlock_page(page);
3315
6d5786a3 3316 free_xid(xid);
1da177e4
LT
3317 return rc;
3318}
3319
a403a0a3
SF
3320static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3321{
3322 struct cifsFileInfo *open_file;
3323
4477288a 3324 spin_lock(&cifs_file_list_lock);
a403a0a3 3325 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 3326 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 3327 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3328 return 1;
3329 }
3330 }
4477288a 3331 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3332 return 0;
3333}
3334
1da177e4
LT
3335/* We do not want to update the file size from server for inodes
3336 open for write - to avoid races with writepage extending
3337 the file - in the future we could consider allowing
fb8c4b14 3338 refreshing the inode only on increases in the file size
1da177e4
LT
3339 but this is tricky to do without racing with writebehind
3340 page caching in the current Linux kernel design */
4b18f2a9 3341bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 3342{
a403a0a3 3343 if (!cifsInode)
4b18f2a9 3344 return true;
50c2f753 3345
a403a0a3
SF
3346 if (is_inode_writable(cifsInode)) {
3347 /* This inode is open for write at least once */
c32a0b68
SF
3348 struct cifs_sb_info *cifs_sb;
3349
c32a0b68 3350 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 3351 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3352 /* since no page cache to corrupt on directio
c32a0b68 3353 we can change size safely */
4b18f2a9 3354 return true;
c32a0b68
SF
3355 }
3356
fb8c4b14 3357 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 3358 return true;
7ba52631 3359
4b18f2a9 3360 return false;
23e7dd7d 3361 } else
4b18f2a9 3362 return true;
1da177e4
LT
3363}
3364
d9414774
NP
3365static int cifs_write_begin(struct file *file, struct address_space *mapping,
3366 loff_t pos, unsigned len, unsigned flags,
3367 struct page **pagep, void **fsdata)
1da177e4 3368{
d9414774
NP
3369 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3370 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
3371 loff_t page_start = pos & PAGE_MASK;
3372 loff_t i_size;
3373 struct page *page;
3374 int rc = 0;
d9414774 3375
b6b38f70 3376 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 3377
54566b2c 3378 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
3379 if (!page) {
3380 rc = -ENOMEM;
3381 goto out;
3382 }
8a236264 3383
a98ee8c1
JL
3384 if (PageUptodate(page))
3385 goto out;
8a236264 3386
a98ee8c1
JL
3387 /*
3388 * If we write a full page it will be up to date, no need to read from
3389 * the server. If the write is short, we'll end up doing a sync write
3390 * instead.
3391 */
3392 if (len == PAGE_CACHE_SIZE)
3393 goto out;
8a236264 3394
a98ee8c1
JL
3395 /*
3396 * optimize away the read when we have an oplock, and we're not
3397 * expecting to use any of the data we'd be reading in. That
3398 * is, when the page lies beyond the EOF, or straddles the EOF
3399 * and the write will cover all of the existing data.
3400 */
3401 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3402 i_size = i_size_read(mapping->host);
3403 if (page_start >= i_size ||
3404 (offset == 0 && (pos + len) >= i_size)) {
3405 zero_user_segments(page, 0, offset,
3406 offset + len,
3407 PAGE_CACHE_SIZE);
3408 /*
3409 * PageChecked means that the parts of the page
3410 * to which we're not writing are considered up
3411 * to date. Once the data is copied to the
3412 * page, it can be set uptodate.
3413 */
3414 SetPageChecked(page);
3415 goto out;
3416 }
3417 }
d9414774 3418
a98ee8c1
JL
3419 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
3420 /*
3421 * might as well read a page, it is fast enough. If we get
3422 * an error, we don't need to return it. cifs_write_end will
3423 * do a sync write instead since PG_uptodate isn't set.
3424 */
3425 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
3426 } else {
3427 /* we could try using another file handle if there is one -
3428 but how would we lock it to prevent close of that handle
3429 racing with this read? In any case
d9414774 3430 this will be written out by write_end so is fine */
1da177e4 3431 }
a98ee8c1
JL
3432out:
3433 *pagep = page;
3434 return rc;
1da177e4
LT
3435}
3436
85f2d6b4
SJ
3437static int cifs_release_page(struct page *page, gfp_t gfp)
3438{
3439 if (PagePrivate(page))
3440 return 0;
3441
3442 return cifs_fscache_release_page(page, gfp);
3443}
3444
3445static void cifs_invalidate_page(struct page *page, unsigned long offset)
3446{
3447 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3448
3449 if (offset == 0)
3450 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3451}
3452
9ad1506b
PS
3453static int cifs_launder_page(struct page *page)
3454{
3455 int rc = 0;
3456 loff_t range_start = page_offset(page);
3457 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3458 struct writeback_control wbc = {
3459 .sync_mode = WB_SYNC_ALL,
3460 .nr_to_write = 0,
3461 .range_start = range_start,
3462 .range_end = range_end,
3463 };
3464
3465 cFYI(1, "Launder page: %p", page);
3466
3467 if (clear_page_dirty_for_io(page))
3468 rc = cifs_writepage_locked(page, &wbc);
3469
3470 cifs_fscache_invalidate_page(page, page->mapping->host);
3471 return rc;
3472}
3473
9b646972 3474void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3475{
3476 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3477 oplock_break);
a5e18bc3 3478 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 3479 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 3480 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
eb4b756b 3481 int rc = 0;
3bc303c2
JL
3482
3483 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 3484 if (cinode->clientCanCacheRead)
8737c930 3485 break_lease(inode, O_RDONLY);
d54ff732 3486 else
8737c930 3487 break_lease(inode, O_WRONLY);
3bc303c2
JL
3488 rc = filemap_fdatawrite(inode->i_mapping);
3489 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
3490 rc = filemap_fdatawait(inode->i_mapping);
3491 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
3492 invalidate_remote_inode(inode);
3493 }
b6b38f70 3494 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
3495 }
3496
85160e03
PS
3497 rc = cifs_push_locks(cfile);
3498 if (rc)
3499 cERROR(1, "Push locks rc = %d", rc);
3500
3bc303c2
JL
3501 /*
3502 * releasing stale oplock after recent reconnect of smb session using
3503 * a now incorrect file handle is not a data integrity issue but do
3504 * not bother sending an oplock release if session to server still is
3505 * disconnected since oplock already released by the server
3506 */
cdff08e7 3507 if (!cfile->oplock_break_cancelled) {
95a3f2f3
PS
3508 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3509 cinode);
b6b38f70 3510 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 3511 }
3bc303c2
JL
3512}
3513
f5e54d6e 3514const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3515 .readpage = cifs_readpage,
3516 .readpages = cifs_readpages,
3517 .writepage = cifs_writepage,
37c0eb46 3518 .writepages = cifs_writepages,
d9414774
NP
3519 .write_begin = cifs_write_begin,
3520 .write_end = cifs_write_end,
1da177e4 3521 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3522 .releasepage = cifs_release_page,
3523 .invalidatepage = cifs_invalidate_page,
9ad1506b 3524 .launder_page = cifs_launder_page,
1da177e4 3525};
273d81d6
DK
3526
3527/*
3528 * cifs_readpages requires the server to support a buffer large enough to
3529 * contain the header plus one complete page of data. Otherwise, we need
3530 * to leave cifs_readpages out of the address space operations.
3531 */
f5e54d6e 3532const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3533 .readpage = cifs_readpage,
3534 .writepage = cifs_writepage,
3535 .writepages = cifs_writepages,
d9414774
NP
3536 .write_begin = cifs_write_begin,
3537 .write_end = cifs_write_end,
273d81d6 3538 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3539 .releasepage = cifs_release_page,
3540 .invalidatepage = cifs_invalidate_page,
9ad1506b 3541 .launder_page = cifs_launder_page,
273d81d6 3542};