cifs: add cifs_async_readv
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
608712fe 63static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 64{
608712fe 65 u32 posix_flags = 0;
e10f7b55 66
7fc8f4e9 67 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 68 posix_flags = SMB_O_RDONLY;
7fc8f4e9 69 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
73
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 81 if (flags & O_DSYNC)
608712fe 82 posix_flags |= SMB_O_SYNC;
7fc8f4e9 83 if (flags & O_DIRECTORY)
608712fe 84 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 85 if (flags & O_NOFOLLOW)
608712fe 86 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 87 if (flags & O_DIRECT)
608712fe 88 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
89
90 return posix_flags;
1da177e4
LT
91}
92
93static inline int cifs_get_disposition(unsigned int flags)
94{
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
55aa2e09
SF
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
1da177e4
LT
103 else
104 return FILE_OPEN;
105}
106
608712fe
JL
107int cifs_posix_open(char *full_path, struct inode **pinode,
108 struct super_block *sb, int mode, unsigned int f_flags,
109 __u32 *poplock, __u16 *pnetfid, int xid)
110{
111 int rc;
112 FILE_UNIX_BASIC_INFO *presp_data;
113 __u32 posix_flags = 0;
114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 struct cifs_fattr fattr;
116 struct tcon_link *tlink;
96daf2b0 117 struct cifs_tcon *tcon;
608712fe
JL
118
119 cFYI(1, "posix open %s", full_path);
120
121 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122 if (presp_data == NULL)
123 return -ENOMEM;
124
125 tlink = cifs_sb_tlink(cifs_sb);
126 if (IS_ERR(tlink)) {
127 rc = PTR_ERR(tlink);
128 goto posix_open_ret;
129 }
130
131 tcon = tlink_tcon(tlink);
132 mode &= ~current_umask();
133
134 posix_flags = cifs_posix_convert_flags(f_flags);
135 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136 poplock, full_path, cifs_sb->local_nls,
137 cifs_sb->mnt_cifs_flags &
138 CIFS_MOUNT_MAP_SPECIAL_CHR);
139 cifs_put_tlink(tlink);
140
141 if (rc)
142 goto posix_open_ret;
143
144 if (presp_data->Type == cpu_to_le32(-1))
145 goto posix_open_ret; /* open ok, caller does qpathinfo */
146
147 if (!pinode)
148 goto posix_open_ret; /* caller does not need info */
149
150 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
151
152 /* get new inode and set it up */
153 if (*pinode == NULL) {
154 cifs_fill_uniqueid(sb, &fattr);
155 *pinode = cifs_iget(sb, &fattr);
156 if (!*pinode) {
157 rc = -ENOMEM;
158 goto posix_open_ret;
159 }
160 } else {
161 cifs_fattr_to_inode(*pinode, &fattr);
162 }
163
164posix_open_ret:
165 kfree(presp_data);
166 return rc;
167}
168
eeb910a6
PS
169static int
170cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
96daf2b0 171 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
eeb910a6
PS
172 __u16 *pnetfid, int xid)
173{
174 int rc;
175 int desiredAccess;
176 int disposition;
3d3ea8e6 177 int create_options = CREATE_NOT_DIR;
eeb910a6
PS
178 FILE_ALL_INFO *buf;
179
180 desiredAccess = cifs_convert_flags(f_flags);
181
182/*********************************************************************
183 * open flag mapping table:
184 *
185 * POSIX Flag CIFS Disposition
186 * ---------- ----------------
187 * O_CREAT FILE_OPEN_IF
188 * O_CREAT | O_EXCL FILE_CREATE
189 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
190 * O_TRUNC FILE_OVERWRITE
191 * none of the above FILE_OPEN
192 *
193 * Note that there is not a direct match between disposition
194 * FILE_SUPERSEDE (ie create whether or not file exists although
195 * O_CREAT | O_TRUNC is similar but truncates the existing
196 * file rather than creating a new file as FILE_SUPERSEDE does
197 * (which uses the attributes / metadata passed in on open call)
198 *?
199 *? O_SYNC is a reasonable match to CIFS writethrough flag
200 *? and the read write flags match reasonably. O_LARGEFILE
201 *? is irrelevant because largefile support is always used
202 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
203 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
204 *********************************************************************/
205
206 disposition = cifs_get_disposition(f_flags);
207
208 /* BB pass O_SYNC flag through on file attributes .. BB */
209
210 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
211 if (!buf)
212 return -ENOMEM;
213
3d3ea8e6
SP
214 if (backup_cred(cifs_sb))
215 create_options |= CREATE_OPEN_BACKUP_INTENT;
216
eeb910a6
PS
217 if (tcon->ses->capabilities & CAP_NT_SMBS)
218 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
3d3ea8e6 219 desiredAccess, create_options, pnetfid, poplock, buf,
eeb910a6
PS
220 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
221 & CIFS_MOUNT_MAP_SPECIAL_CHR);
222 else
223 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
224 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
225 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
226 & CIFS_MOUNT_MAP_SPECIAL_CHR);
227
228 if (rc)
229 goto out;
230
231 if (tcon->unix_ext)
232 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
233 xid);
234 else
235 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
236 xid, pnetfid);
237
238out:
239 kfree(buf);
240 return rc;
241}
242
15ecb436
JL
243struct cifsFileInfo *
244cifs_new_fileinfo(__u16 fileHandle, struct file *file,
245 struct tcon_link *tlink, __u32 oplock)
246{
247 struct dentry *dentry = file->f_path.dentry;
248 struct inode *inode = dentry->d_inode;
249 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
250 struct cifsFileInfo *pCifsFile;
251
252 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (pCifsFile == NULL)
254 return pCifsFile;
255
5f6dbc9e 256 pCifsFile->count = 1;
15ecb436
JL
257 pCifsFile->netfid = fileHandle;
258 pCifsFile->pid = current->tgid;
259 pCifsFile->uid = current_fsuid();
260 pCifsFile->dentry = dget(dentry);
261 pCifsFile->f_flags = file->f_flags;
262 pCifsFile->invalidHandle = false;
15ecb436
JL
263 pCifsFile->tlink = cifs_get_tlink(tlink);
264 mutex_init(&pCifsFile->fh_mutex);
15ecb436
JL
265 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
266
4477288a 267 spin_lock(&cifs_file_list_lock);
15ecb436
JL
268 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
269 /* if readable file instance put first in list*/
270 if (file->f_mode & FMODE_READ)
271 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
272 else
273 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 274 spin_unlock(&cifs_file_list_lock);
15ecb436 275
c6723628 276 cifs_set_oplock_level(pCifsInode, oplock);
15ecb436
JL
277
278 file->private_data = pCifsFile;
279 return pCifsFile;
280}
281
cdff08e7
SF
282/*
283 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
284 * the filehandle out on the server. Must be called without holding
285 * cifs_file_list_lock.
cdff08e7 286 */
b33879aa
JL
287void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
288{
e66673e3 289 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 290 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 291 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 292 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
293 struct cifsLockInfo *li, *tmp;
294
295 spin_lock(&cifs_file_list_lock);
5f6dbc9e 296 if (--cifs_file->count > 0) {
cdff08e7
SF
297 spin_unlock(&cifs_file_list_lock);
298 return;
299 }
300
301 /* remove it from the lists */
302 list_del(&cifs_file->flist);
303 list_del(&cifs_file->tlist);
304
305 if (list_empty(&cifsi->openFileList)) {
306 cFYI(1, "closing last open instance for inode %p",
307 cifs_file->dentry->d_inode);
4f8ba8a0
PS
308
309 /* in strict cache mode we need invalidate mapping on the last
310 close because it may cause a error when we open this file
311 again and get at least level II oplock */
312 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
313 CIFS_I(inode)->invalid_mapping = true;
314
c6723628 315 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
316 }
317 spin_unlock(&cifs_file_list_lock);
318
ad635942
JL
319 cancel_work_sync(&cifs_file->oplock_break);
320
cdff08e7
SF
321 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
322 int xid, rc;
323
324 xid = GetXid();
325 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
326 FreeXid(xid);
327 }
328
329 /* Delete any outstanding lock records. We'll lose them when the file
330 * is closed anyway.
331 */
d59dad2b
PS
332 mutex_lock(&cifsi->lock_mutex);
333 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
334 if (li->netfid != cifs_file->netfid)
335 continue;
cdff08e7
SF
336 list_del(&li->llist);
337 kfree(li);
b33879aa 338 }
d59dad2b 339 mutex_unlock(&cifsi->lock_mutex);
cdff08e7
SF
340
341 cifs_put_tlink(cifs_file->tlink);
342 dput(cifs_file->dentry);
343 kfree(cifs_file);
b33879aa
JL
344}
345
1da177e4
LT
346int cifs_open(struct inode *inode, struct file *file)
347{
348 int rc = -EACCES;
590a3fe0
JL
349 int xid;
350 __u32 oplock;
1da177e4 351 struct cifs_sb_info *cifs_sb;
96daf2b0 352 struct cifs_tcon *tcon;
7ffec372 353 struct tcon_link *tlink;
6ca9f3ba 354 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 355 char *full_path = NULL;
7e12eddb 356 bool posix_open_ok = false;
1da177e4 357 __u16 netfid;
1da177e4
LT
358
359 xid = GetXid();
360
361 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
362 tlink = cifs_sb_tlink(cifs_sb);
363 if (IS_ERR(tlink)) {
364 FreeXid(xid);
365 return PTR_ERR(tlink);
366 }
367 tcon = tlink_tcon(tlink);
1da177e4 368
e6a00296 369 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 370 if (full_path == NULL) {
0f3bc09e 371 rc = -ENOMEM;
232341ba 372 goto out;
1da177e4
LT
373 }
374
b6b38f70
JP
375 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
376 inode, file->f_flags, full_path);
276a74a4 377
e7504734 378 if (enable_oplocks)
276a74a4
SF
379 oplock = REQ_OPLOCK;
380 else
381 oplock = 0;
382
64cc2c63
SF
383 if (!tcon->broken_posix_open && tcon->unix_ext &&
384 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
385 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
386 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 387 /* can not refresh inode info since size could be stale */
2422f676 388 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 389 cifs_sb->mnt_file_mode /* ignored */,
608712fe 390 file->f_flags, &oplock, &netfid, xid);
276a74a4 391 if (rc == 0) {
b6b38f70 392 cFYI(1, "posix open succeeded");
7e12eddb 393 posix_open_ok = true;
64cc2c63
SF
394 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
395 if (tcon->ses->serverNOS)
b6b38f70 396 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
397 " unexpected error on SMB posix open"
398 ", disabling posix open support."
399 " Check if server update available.",
400 tcon->ses->serverName,
b6b38f70 401 tcon->ses->serverNOS);
64cc2c63 402 tcon->broken_posix_open = true;
276a74a4
SF
403 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
404 (rc != -EOPNOTSUPP)) /* path not found or net err */
405 goto out;
64cc2c63
SF
406 /* else fallthrough to retry open the old way on network i/o
407 or DFS errors */
276a74a4
SF
408 }
409
7e12eddb
PS
410 if (!posix_open_ok) {
411 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
412 file->f_flags, &oplock, &netfid, xid);
413 if (rc)
414 goto out;
415 }
47c78b7f 416
abfe1eed 417 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 418 if (pCifsFile == NULL) {
7e12eddb 419 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
420 rc = -ENOMEM;
421 goto out;
422 }
1da177e4 423
9451a9a5
SJ
424 cifs_fscache_set_inode_cookie(inode, file);
425
7e12eddb 426 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
427 /* time to set mode which we can not set earlier due to
428 problems creating new read-only files */
7e12eddb
PS
429 struct cifs_unix_set_info_args args = {
430 .mode = inode->i_mode,
431 .uid = NO_CHANGE_64,
432 .gid = NO_CHANGE_64,
433 .ctime = NO_CHANGE_64,
434 .atime = NO_CHANGE_64,
435 .mtime = NO_CHANGE_64,
436 .device = 0,
437 };
d44a9fe2
JL
438 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
439 pCifsFile->pid);
1da177e4
LT
440 }
441
442out:
1da177e4
LT
443 kfree(full_path);
444 FreeXid(xid);
7ffec372 445 cifs_put_tlink(tlink);
1da177e4
LT
446 return rc;
447}
448
0418726b 449/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
450/* to server was lost */
451static int cifs_relock_file(struct cifsFileInfo *cifsFile)
452{
453 int rc = 0;
454
455/* BB list all locks open on this file and relock */
456
457 return rc;
458}
459
15886177 460static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
461{
462 int rc = -EACCES;
590a3fe0
JL
463 int xid;
464 __u32 oplock;
1da177e4 465 struct cifs_sb_info *cifs_sb;
96daf2b0 466 struct cifs_tcon *tcon;
1da177e4 467 struct cifsInodeInfo *pCifsInode;
fb8c4b14 468 struct inode *inode;
1da177e4
LT
469 char *full_path = NULL;
470 int desiredAccess;
471 int disposition = FILE_OPEN;
3d3ea8e6 472 int create_options = CREATE_NOT_DIR;
1da177e4
LT
473 __u16 netfid;
474
1da177e4 475 xid = GetXid();
f0a71eb8 476 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 477 if (!pCifsFile->invalidHandle) {
f0a71eb8 478 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 479 rc = 0;
1da177e4 480 FreeXid(xid);
0f3bc09e 481 return rc;
1da177e4
LT
482 }
483
15886177 484 inode = pCifsFile->dentry->d_inode;
1da177e4 485 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 486 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 487
1da177e4
LT
488/* can not grab rename sem here because various ops, including
489 those that already have the rename sem can end up causing writepage
490 to get called and if the server was down that means we end up here,
491 and we can never tell if the caller already has the rename_sem */
15886177 492 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 493 if (full_path == NULL) {
3a9f462f 494 rc = -ENOMEM;
f0a71eb8 495 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 496 FreeXid(xid);
3a9f462f 497 return rc;
1da177e4
LT
498 }
499
b6b38f70 500 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 501 inode, pCifsFile->f_flags, full_path);
1da177e4 502
e7504734 503 if (enable_oplocks)
1da177e4
LT
504 oplock = REQ_OPLOCK;
505 else
4b18f2a9 506 oplock = 0;
1da177e4 507
7fc8f4e9
SF
508 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
509 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
510 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
511
512 /*
513 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
514 * original open. Must mask them off for a reopen.
515 */
15886177
JL
516 unsigned int oflags = pCifsFile->f_flags &
517 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 518
2422f676 519 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
520 cifs_sb->mnt_file_mode /* ignored */,
521 oflags, &oplock, &netfid, xid);
7fc8f4e9 522 if (rc == 0) {
b6b38f70 523 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
524 goto reopen_success;
525 }
526 /* fallthrough to retry open the old way on errors, especially
527 in the reconnect path it is important to retry hard */
528 }
529
15886177 530 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 531
3d3ea8e6
SP
532 if (backup_cred(cifs_sb))
533 create_options |= CREATE_OPEN_BACKUP_INTENT;
534
1da177e4 535 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
536 by SMBOpen and then calling get_inode_info with returned buf
537 since file might have write behind data that needs to be flushed
1da177e4
LT
538 and server version of file size can be stale. If we knew for sure
539 that inode was not dirty locally we could do this */
540
7fc8f4e9 541 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
3d3ea8e6 542 create_options, &netfid, &oplock, NULL,
fb8c4b14 543 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 544 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 545 if (rc) {
f0a71eb8 546 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
547 cFYI(1, "cifs_open returned 0x%x", rc);
548 cFYI(1, "oplock: %d", oplock);
15886177
JL
549 goto reopen_error_exit;
550 }
551
7fc8f4e9 552reopen_success:
15886177
JL
553 pCifsFile->netfid = netfid;
554 pCifsFile->invalidHandle = false;
555 mutex_unlock(&pCifsFile->fh_mutex);
556 pCifsInode = CIFS_I(inode);
557
558 if (can_flush) {
559 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 560 mapping_set_error(inode->i_mapping, rc);
15886177 561
15886177
JL
562 if (tcon->unix_ext)
563 rc = cifs_get_inode_info_unix(&inode,
564 full_path, inode->i_sb, xid);
565 else
566 rc = cifs_get_inode_info(&inode,
567 full_path, NULL, inode->i_sb,
568 xid, NULL);
569 } /* else we are writing out data to server already
570 and could deadlock if we tried to flush data, and
571 since we do not know if we have data that would
572 invalidate the current end of file on the server
573 we can not go to the server to get the new inod
574 info */
e66673e3 575
c6723628 576 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 577
15886177
JL
578 cifs_relock_file(pCifsFile);
579
580reopen_error_exit:
1da177e4
LT
581 kfree(full_path);
582 FreeXid(xid);
583 return rc;
584}
585
586int cifs_close(struct inode *inode, struct file *file)
587{
77970693
JL
588 if (file->private_data != NULL) {
589 cifsFileInfo_put(file->private_data);
590 file->private_data = NULL;
591 }
7ee1af76 592
cdff08e7
SF
593 /* return code from the ->release op is always ignored */
594 return 0;
1da177e4
LT
595}
596
597int cifs_closedir(struct inode *inode, struct file *file)
598{
599 int rc = 0;
600 int xid;
c21dfb69 601 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
602 char *ptmp;
603
b6b38f70 604 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
605
606 xid = GetXid();
607
608 if (pCFileStruct) {
96daf2b0 609 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 610
b6b38f70 611 cFYI(1, "Freeing private data in close dir");
4477288a 612 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
613 if (!pCFileStruct->srch_inf.endOfSearch &&
614 !pCFileStruct->invalidHandle) {
615 pCFileStruct->invalidHandle = true;
4477288a 616 spin_unlock(&cifs_file_list_lock);
1da177e4 617 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
618 cFYI(1, "Closing uncompleted readdir with rc %d",
619 rc);
1da177e4
LT
620 /* not much we can do if it fails anyway, ignore rc */
621 rc = 0;
ddb4cbfc 622 } else
4477288a 623 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
624 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
625 if (ptmp) {
b6b38f70 626 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 627 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 628 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
629 cifs_small_buf_release(ptmp);
630 else
631 cifs_buf_release(ptmp);
1da177e4 632 }
13cfb733 633 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
634 kfree(file->private_data);
635 file->private_data = NULL;
636 }
637 /* BB can we lock the filestruct while this is going on? */
638 FreeXid(xid);
639 return rc;
640}
641
d59dad2b 642static int store_file_lock(struct cifsInodeInfo *cinode, __u64 len,
03776f45 643 __u64 offset, __u8 type, __u16 netfid)
7ee1af76 644{
fb8c4b14
SF
645 struct cifsLockInfo *li =
646 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
647 if (li == NULL)
648 return -ENOMEM;
d59dad2b 649 li->netfid = netfid;
7ee1af76
JA
650 li->offset = offset;
651 li->length = len;
03776f45
PS
652 li->type = type;
653 li->pid = current->tgid;
d59dad2b
PS
654 mutex_lock(&cinode->lock_mutex);
655 list_add_tail(&li->llist, &cinode->llist);
656 mutex_unlock(&cinode->lock_mutex);
7ee1af76
JA
657 return 0;
658}
659
03776f45
PS
660static void
661cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
662 bool *wait_flag)
1da177e4 663{
03776f45 664 if (flock->fl_flags & FL_POSIX)
b6b38f70 665 cFYI(1, "Posix");
03776f45 666 if (flock->fl_flags & FL_FLOCK)
b6b38f70 667 cFYI(1, "Flock");
03776f45 668 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 669 cFYI(1, "Blocking lock");
03776f45 670 *wait_flag = true;
1da177e4 671 }
03776f45 672 if (flock->fl_flags & FL_ACCESS)
b6b38f70 673 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
674 "not implemented yet");
675 if (flock->fl_flags & FL_LEASE)
b6b38f70 676 cFYI(1, "Lease on file - not implemented yet");
03776f45 677 if (flock->fl_flags &
1da177e4 678 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 679 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 680
03776f45
PS
681 *type = LOCKING_ANDX_LARGE_FILES;
682 if (flock->fl_type == F_WRLCK) {
b6b38f70 683 cFYI(1, "F_WRLCK ");
03776f45
PS
684 *lock = 1;
685 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 686 cFYI(1, "F_UNLCK");
03776f45
PS
687 *unlock = 1;
688 /* Check if unlock includes more than one lock range */
689 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 690 cFYI(1, "F_RDLCK");
03776f45
PS
691 *type |= LOCKING_ANDX_SHARED_LOCK;
692 *lock = 1;
693 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 694 cFYI(1, "F_EXLCK");
03776f45
PS
695 *lock = 1;
696 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 697 cFYI(1, "F_SHLCK");
03776f45
PS
698 *type |= LOCKING_ANDX_SHARED_LOCK;
699 *lock = 1;
1da177e4 700 } else
b6b38f70 701 cFYI(1, "Unknown type of lock");
03776f45 702}
1da177e4 703
03776f45
PS
704static int
705cifs_getlk(struct cifsFileInfo *cfile, struct file_lock *flock, __u8 type,
706 bool wait_flag, bool posix_lck, int xid)
707{
708 int rc = 0;
709 __u64 length = 1 + flock->fl_end - flock->fl_start;
710 __u16 netfid = cfile->netfid;
711 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
f05337c6 712
03776f45
PS
713 if (posix_lck) {
714 int posix_lock_type;
715 if (type & LOCKING_ANDX_SHARED_LOCK)
716 posix_lock_type = CIFS_RDLCK;
717 else
718 posix_lock_type = CIFS_WRLCK;
719 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
720 length, flock, posix_lock_type,
721 wait_flag);
722 return rc;
723 }
1da177e4 724
03776f45
PS
725 /* BB we could chain these into one lock request BB */
726 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
727 flock->fl_start, 0, 1, type, 0, 0);
728 if (rc == 0) {
729 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
730 length, flock->fl_start, 1, 0,
731 type, 0, 0);
732 flock->fl_type = F_UNLCK;
733 if (rc != 0)
734 cERROR(1, "Error unlocking previously locked "
735 "range %d during test of lock", rc);
736 rc = 0;
1da177e4
LT
737 return rc;
738 }
7ee1af76 739
03776f45
PS
740 if (type & LOCKING_ANDX_SHARED_LOCK) {
741 flock->fl_type = F_WRLCK;
742 rc = 0;
743 return rc;
7ee1af76
JA
744 }
745
03776f45
PS
746 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
747 flock->fl_start, 0, 1,
748 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
749 if (rc == 0) {
750 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
751 length, flock->fl_start, 1, 0,
752 type | LOCKING_ANDX_SHARED_LOCK,
753 0, 0);
754 flock->fl_type = F_RDLCK;
755 if (rc != 0)
756 cERROR(1, "Error unlocking previously locked "
757 "range %d during test of lock", rc);
758 } else
759 flock->fl_type = F_WRLCK;
760
761 rc = 0;
762 return rc;
763}
764
765static int
766cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
767 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
768{
769 int rc = 0;
770 __u64 length = 1 + flock->fl_end - flock->fl_start;
771 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
772 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
d59dad2b 773 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
03776f45
PS
774 __u16 netfid = cfile->netfid;
775
776 if (posix_lck) {
08547b03 777 int posix_lock_type;
03776f45 778 if (type & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
779 posix_lock_type = CIFS_RDLCK;
780 else
781 posix_lock_type = CIFS_WRLCK;
50c2f753 782
03776f45 783 if (unlock == 1)
beb84dc8 784 posix_lock_type = CIFS_UNLCK;
7ee1af76 785
03776f45
PS
786 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, length,
787 flock, posix_lock_type, wait_flag);
788 goto out;
789 }
7ee1af76 790
03776f45
PS
791 if (lock) {
792 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
793 flock->fl_start, 0, lock, type, wait_flag, 0);
794 if (rc == 0) {
795 /* For Windows locks we must store them. */
d59dad2b 796 rc = store_file_lock(cinode, length, flock->fl_start,
03776f45
PS
797 type, netfid);
798 }
799 } else if (unlock) {
800 /*
801 * For each stored lock that this unlock overlaps completely,
802 * unlock it.
803 */
804 int stored_rc = 0;
805 struct cifsLockInfo *li, *tmp;
806
d59dad2b
PS
807 mutex_lock(&cinode->lock_mutex);
808 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
03776f45
PS
809 if (flock->fl_start > li->offset ||
810 (flock->fl_start + length) <
811 (li->offset + li->length))
812 continue;
813 if (current->tgid != li->pid)
814 continue;
d59dad2b
PS
815 if (cfile->netfid != li->netfid)
816 continue;
03776f45
PS
817
818 stored_rc = CIFSSMBLock(xid, tcon, netfid,
819 current->tgid, li->length,
820 li->offset, 1, 0, li->type,
821 0, 0);
822 if (stored_rc)
823 rc = stored_rc;
824 else {
825 list_del(&li->llist);
826 kfree(li);
7ee1af76 827 }
7ee1af76 828 }
d59dad2b 829 mutex_unlock(&cinode->lock_mutex);
03776f45
PS
830 }
831out:
832 if (flock->fl_flags & FL_POSIX)
833 posix_lock_file_wait(file, flock);
834 return rc;
835}
836
837int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
838{
839 int rc, xid;
840 int lock = 0, unlock = 0;
841 bool wait_flag = false;
842 bool posix_lck = false;
843 struct cifs_sb_info *cifs_sb;
844 struct cifs_tcon *tcon;
845 struct cifsInodeInfo *cinode;
846 struct cifsFileInfo *cfile;
847 __u16 netfid;
848 __u8 type;
849
850 rc = -EACCES;
851 xid = GetXid();
852
853 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
854 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
855 flock->fl_start, flock->fl_end);
856
857 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
858
859 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
860 cfile = (struct cifsFileInfo *)file->private_data;
861 tcon = tlink_tcon(cfile->tlink);
862 netfid = cfile->netfid;
863 cinode = CIFS_I(file->f_path.dentry->d_inode);
864
865 if ((tcon->ses->capabilities & CAP_UNIX) &&
866 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
867 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
868 posix_lck = true;
869 /*
870 * BB add code here to normalize offset and length to account for
871 * negative length which we can not accept over the wire.
872 */
873 if (IS_GETLK(cmd)) {
874 rc = cifs_getlk(cfile, flock, type, wait_flag, posix_lck, xid);
875 FreeXid(xid);
876 return rc;
877 }
878
879 if (!lock && !unlock) {
880 /*
881 * if no lock or unlock then nothing to do since we do not
882 * know what it is
883 */
884 FreeXid(xid);
885 return -EOPNOTSUPP;
7ee1af76
JA
886 }
887
03776f45
PS
888 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
889 xid);
1da177e4
LT
890 FreeXid(xid);
891 return rc;
892}
893
fbec9ab9 894/* update the file size (if needed) after a write */
72432ffc 895void
fbec9ab9
JL
896cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
897 unsigned int bytes_written)
898{
899 loff_t end_of_write = offset + bytes_written;
900
901 if (end_of_write > cifsi->server_eof)
902 cifsi->server_eof = end_of_write;
903}
904
fa2989f4 905static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
7da4b49a
JL
906 const char *write_data, size_t write_size,
907 loff_t *poffset)
1da177e4
LT
908{
909 int rc = 0;
910 unsigned int bytes_written = 0;
911 unsigned int total_written;
912 struct cifs_sb_info *cifs_sb;
96daf2b0 913 struct cifs_tcon *pTcon;
7749981e 914 int xid;
7da4b49a
JL
915 struct dentry *dentry = open_file->dentry;
916 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 917 struct cifs_io_parms io_parms;
1da177e4 918
7da4b49a 919 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 920
b6b38f70 921 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 922 *poffset, dentry->d_name.name);
1da177e4 923
13cfb733 924 pTcon = tlink_tcon(open_file->tlink);
50c2f753 925
1da177e4 926 xid = GetXid();
1da177e4 927
1da177e4
LT
928 for (total_written = 0; write_size > total_written;
929 total_written += bytes_written) {
930 rc = -EAGAIN;
931 while (rc == -EAGAIN) {
ca83ce3d
JL
932 struct kvec iov[2];
933 unsigned int len;
934
1da177e4 935 if (open_file->invalidHandle) {
1da177e4
LT
936 /* we could deadlock if we called
937 filemap_fdatawait from here so tell
fb8c4b14 938 reopen_file not to flush data to
1da177e4 939 server now */
15886177 940 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
941 if (rc != 0)
942 break;
943 }
ca83ce3d
JL
944
945 len = min((size_t)cifs_sb->wsize,
946 write_size - total_written);
947 /* iov[0] is reserved for smb header */
948 iov[1].iov_base = (char *)write_data + total_written;
949 iov[1].iov_len = len;
fa2989f4
PS
950 io_parms.netfid = open_file->netfid;
951 io_parms.pid = pid;
952 io_parms.tcon = pTcon;
953 io_parms.offset = *poffset;
954 io_parms.length = len;
955 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
956 1, 0);
1da177e4
LT
957 }
958 if (rc || (bytes_written == 0)) {
959 if (total_written)
960 break;
961 else {
962 FreeXid(xid);
963 return rc;
964 }
fbec9ab9
JL
965 } else {
966 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 967 *poffset += bytes_written;
fbec9ab9 968 }
1da177e4
LT
969 }
970
a4544347 971 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 972
7da4b49a
JL
973 if (total_written > 0) {
974 spin_lock(&dentry->d_inode->i_lock);
975 if (*poffset > dentry->d_inode->i_size)
976 i_size_write(dentry->d_inode, *poffset);
977 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 978 }
7da4b49a 979 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
980 FreeXid(xid);
981 return total_written;
982}
983
6508d904
JL
984struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
985 bool fsuid_only)
630f3f0c
SF
986{
987 struct cifsFileInfo *open_file = NULL;
6508d904
JL
988 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
989
990 /* only filter by fsuid on multiuser mounts */
991 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
992 fsuid_only = false;
630f3f0c 993
4477288a 994 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
995 /* we could simply get the first_list_entry since write-only entries
996 are always at the end of the list but since the first entry might
997 have a close pending, we go through the whole list */
998 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
999 if (fsuid_only && open_file->uid != current_fsuid())
1000 continue;
2e396b83 1001 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1002 if (!open_file->invalidHandle) {
1003 /* found a good file */
1004 /* lock it so it will not be closed on us */
6ab409b5 1005 cifsFileInfo_get(open_file);
4477288a 1006 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1007 return open_file;
1008 } /* else might as well continue, and look for
1009 another, or simply have the caller reopen it
1010 again rather than trying to fix this handle */
1011 } else /* write only file */
1012 break; /* write only files are last so must be done */
1013 }
4477288a 1014 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1015 return NULL;
1016}
630f3f0c 1017
6508d904
JL
1018struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1019 bool fsuid_only)
6148a742
SF
1020{
1021 struct cifsFileInfo *open_file;
d3892294 1022 struct cifs_sb_info *cifs_sb;
2846d386 1023 bool any_available = false;
dd99cd80 1024 int rc;
6148a742 1025
60808233
SF
1026 /* Having a null inode here (because mapping->host was set to zero by
1027 the VFS or MM) should not happen but we had reports of on oops (due to
1028 it being zero) during stress testcases so we need to check for it */
1029
fb8c4b14 1030 if (cifs_inode == NULL) {
b6b38f70 1031 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1032 dump_stack();
1033 return NULL;
1034 }
1035
d3892294
JL
1036 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1037
6508d904
JL
1038 /* only filter by fsuid on multiuser mounts */
1039 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1040 fsuid_only = false;
1041
4477288a 1042 spin_lock(&cifs_file_list_lock);
9b22b0b7 1043refind_writable:
6148a742 1044 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1045 if (!any_available && open_file->pid != current->tgid)
1046 continue;
1047 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1048 continue;
2e396b83 1049 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 1050 cifsFileInfo_get(open_file);
9b22b0b7
SF
1051
1052 if (!open_file->invalidHandle) {
1053 /* found a good writable file */
4477288a 1054 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1055 return open_file;
1056 }
8840dee9 1057
4477288a 1058 spin_unlock(&cifs_file_list_lock);
cdff08e7 1059
9b22b0b7 1060 /* Had to unlock since following call can block */
15886177 1061 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1062 if (!rc)
1063 return open_file;
9b22b0b7 1064
cdff08e7 1065 /* if it fails, try another handle if possible */
b6b38f70 1066 cFYI(1, "wp failed on reopen file");
6ab409b5 1067 cifsFileInfo_put(open_file);
8840dee9 1068
cdff08e7
SF
1069 spin_lock(&cifs_file_list_lock);
1070
9b22b0b7
SF
1071 /* else we simply continue to the next entry. Thus
1072 we do not loop on reopen errors. If we
1073 can not reopen the file, for example if we
1074 reconnected to a server with another client
1075 racing to delete or lock the file we would not
1076 make progress if we restarted before the beginning
1077 of the loop here. */
6148a742
SF
1078 }
1079 }
2846d386
JL
1080 /* couldn't find useable FH with same pid, try any available */
1081 if (!any_available) {
1082 any_available = true;
1083 goto refind_writable;
1084 }
4477288a 1085 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1086 return NULL;
1087}
1088
1da177e4
LT
1089static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1090{
1091 struct address_space *mapping = page->mapping;
1092 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1093 char *write_data;
1094 int rc = -EFAULT;
1095 int bytes_written = 0;
1da177e4 1096 struct inode *inode;
6148a742 1097 struct cifsFileInfo *open_file;
1da177e4
LT
1098
1099 if (!mapping || !mapping->host)
1100 return -EFAULT;
1101
1102 inode = page->mapping->host;
1da177e4
LT
1103
1104 offset += (loff_t)from;
1105 write_data = kmap(page);
1106 write_data += from;
1107
1108 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1109 kunmap(page);
1110 return -EIO;
1111 }
1112
1113 /* racing with truncate? */
1114 if (offset > mapping->host->i_size) {
1115 kunmap(page);
1116 return 0; /* don't care */
1117 }
1118
1119 /* check to make sure that we are not extending the file */
1120 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1121 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1122
6508d904 1123 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1124 if (open_file) {
fa2989f4
PS
1125 bytes_written = cifs_write(open_file, open_file->pid,
1126 write_data, to - from, &offset);
6ab409b5 1127 cifsFileInfo_put(open_file);
1da177e4 1128 /* Does mm or vfs already set times? */
6148a742 1129 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1130 if ((bytes_written > 0) && (offset))
6148a742 1131 rc = 0;
bb5a9a04
SF
1132 else if (bytes_written < 0)
1133 rc = bytes_written;
6148a742 1134 } else {
b6b38f70 1135 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1136 rc = -EIO;
1137 }
1138
1139 kunmap(page);
1140 return rc;
1141}
1142
1da177e4 1143static int cifs_writepages(struct address_space *mapping,
37c0eb46 1144 struct writeback_control *wbc)
1da177e4 1145{
c3d17b63
JL
1146 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1147 bool done = false, scanned = false, range_whole = false;
1148 pgoff_t end, index;
1149 struct cifs_writedata *wdata;
37c0eb46 1150 struct page *page;
37c0eb46 1151 int rc = 0;
50c2f753 1152
37c0eb46 1153 /*
c3d17b63 1154 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1155 * one page at a time via cifs_writepage
1156 */
1157 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1158 return generic_writepages(mapping, wbc);
1159
111ebb6e 1160 if (wbc->range_cyclic) {
37c0eb46 1161 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1162 end = -1;
1163 } else {
1164 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1165 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1166 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1167 range_whole = true;
1168 scanned = true;
37c0eb46
SF
1169 }
1170retry:
c3d17b63
JL
1171 while (!done && index <= end) {
1172 unsigned int i, nr_pages, found_pages;
1173 pgoff_t next = 0, tofind;
1174 struct page **pages;
1175
1176 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1177 end - index) + 1;
1178
1179 wdata = cifs_writedata_alloc((unsigned int)tofind);
1180 if (!wdata) {
1181 rc = -ENOMEM;
1182 break;
1183 }
1184
1185 /*
1186 * find_get_pages_tag seems to return a max of 256 on each
1187 * iteration, so we must call it several times in order to
1188 * fill the array or the wsize is effectively limited to
1189 * 256 * PAGE_CACHE_SIZE.
1190 */
1191 found_pages = 0;
1192 pages = wdata->pages;
1193 do {
1194 nr_pages = find_get_pages_tag(mapping, &index,
1195 PAGECACHE_TAG_DIRTY,
1196 tofind, pages);
1197 found_pages += nr_pages;
1198 tofind -= nr_pages;
1199 pages += nr_pages;
1200 } while (nr_pages && tofind && index <= end);
1201
1202 if (found_pages == 0) {
1203 kref_put(&wdata->refcount, cifs_writedata_release);
1204 break;
1205 }
1206
1207 nr_pages = 0;
1208 for (i = 0; i < found_pages; i++) {
1209 page = wdata->pages[i];
37c0eb46
SF
1210 /*
1211 * At this point we hold neither mapping->tree_lock nor
1212 * lock on the page itself: the page may be truncated or
1213 * invalidated (changing page->mapping to NULL), or even
1214 * swizzled back from swapper_space to tmpfs file
1215 * mapping
1216 */
1217
c3d17b63 1218 if (nr_pages == 0)
37c0eb46 1219 lock_page(page);
529ae9aa 1220 else if (!trylock_page(page))
37c0eb46
SF
1221 break;
1222
1223 if (unlikely(page->mapping != mapping)) {
1224 unlock_page(page);
1225 break;
1226 }
1227
111ebb6e 1228 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1229 done = true;
37c0eb46
SF
1230 unlock_page(page);
1231 break;
1232 }
1233
1234 if (next && (page->index != next)) {
1235 /* Not next consecutive page */
1236 unlock_page(page);
1237 break;
1238 }
1239
1240 if (wbc->sync_mode != WB_SYNC_NONE)
1241 wait_on_page_writeback(page);
1242
1243 if (PageWriteback(page) ||
cb876f45 1244 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1245 unlock_page(page);
1246 break;
1247 }
84d2f07e 1248
cb876f45
LT
1249 /*
1250 * This actually clears the dirty bit in the radix tree.
1251 * See cifs_writepage() for more commentary.
1252 */
1253 set_page_writeback(page);
1254
84d2f07e 1255 if (page_offset(page) >= mapping->host->i_size) {
c3d17b63 1256 done = true;
84d2f07e 1257 unlock_page(page);
cb876f45 1258 end_page_writeback(page);
84d2f07e
SF
1259 break;
1260 }
1261
c3d17b63
JL
1262 wdata->pages[i] = page;
1263 next = page->index + 1;
1264 ++nr_pages;
1265 }
37c0eb46 1266
c3d17b63
JL
1267 /* reset index to refind any pages skipped */
1268 if (nr_pages == 0)
1269 index = wdata->pages[0]->index + 1;
84d2f07e 1270
c3d17b63
JL
1271 /* put any pages we aren't going to use */
1272 for (i = nr_pages; i < found_pages; i++) {
1273 page_cache_release(wdata->pages[i]);
1274 wdata->pages[i] = NULL;
1275 }
37c0eb46 1276
c3d17b63
JL
1277 /* nothing to write? */
1278 if (nr_pages == 0) {
1279 kref_put(&wdata->refcount, cifs_writedata_release);
1280 continue;
37c0eb46 1281 }
fbec9ab9 1282
c3d17b63
JL
1283 wdata->sync_mode = wbc->sync_mode;
1284 wdata->nr_pages = nr_pages;
1285 wdata->offset = page_offset(wdata->pages[0]);
941b853d 1286
c3d17b63
JL
1287 do {
1288 if (wdata->cfile != NULL)
1289 cifsFileInfo_put(wdata->cfile);
1290 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1291 false);
1292 if (!wdata->cfile) {
1293 cERROR(1, "No writable handles for inode");
1294 rc = -EBADF;
1295 break;
941b853d 1296 }
c3d17b63
JL
1297 rc = cifs_async_writev(wdata);
1298 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1299
c3d17b63
JL
1300 for (i = 0; i < nr_pages; ++i)
1301 unlock_page(wdata->pages[i]);
f3983c21 1302
c3d17b63
JL
1303 /* send failure -- clean up the mess */
1304 if (rc != 0) {
1305 for (i = 0; i < nr_pages; ++i) {
941b853d 1306 if (rc == -EAGAIN)
c3d17b63
JL
1307 redirty_page_for_writepage(wbc,
1308 wdata->pages[i]);
1309 else
1310 SetPageError(wdata->pages[i]);
1311 end_page_writeback(wdata->pages[i]);
1312 page_cache_release(wdata->pages[i]);
37c0eb46 1313 }
941b853d
JL
1314 if (rc != -EAGAIN)
1315 mapping_set_error(mapping, rc);
c3d17b63
JL
1316 }
1317 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1318
c3d17b63
JL
1319 wbc->nr_to_write -= nr_pages;
1320 if (wbc->nr_to_write <= 0)
1321 done = true;
b066a48c 1322
c3d17b63 1323 index = next;
37c0eb46 1324 }
c3d17b63 1325
37c0eb46
SF
1326 if (!scanned && !done) {
1327 /*
1328 * We hit the last page and there is more work to be done: wrap
1329 * back to the start of the file
1330 */
c3d17b63 1331 scanned = true;
37c0eb46
SF
1332 index = 0;
1333 goto retry;
1334 }
c3d17b63 1335
111ebb6e 1336 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1337 mapping->writeback_index = index;
1338
1da177e4
LT
1339 return rc;
1340}
1da177e4 1341
9ad1506b
PS
1342static int
1343cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1344{
9ad1506b 1345 int rc;
1da177e4
LT
1346 int xid;
1347
1348 xid = GetXid();
1349/* BB add check for wbc flags */
1350 page_cache_get(page);
ad7a2926 1351 if (!PageUptodate(page))
b6b38f70 1352 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1353
1354 /*
1355 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1356 *
1357 * A writepage() implementation always needs to do either this,
1358 * or re-dirty the page with "redirty_page_for_writepage()" in
1359 * the case of a failure.
1360 *
1361 * Just unlocking the page will cause the radix tree tag-bits
1362 * to fail to update with the state of the page correctly.
1363 */
fb8c4b14 1364 set_page_writeback(page);
9ad1506b 1365retry_write:
1da177e4 1366 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1367 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1368 goto retry_write;
1369 else if (rc == -EAGAIN)
1370 redirty_page_for_writepage(wbc, page);
1371 else if (rc != 0)
1372 SetPageError(page);
1373 else
1374 SetPageUptodate(page);
cb876f45
LT
1375 end_page_writeback(page);
1376 page_cache_release(page);
1da177e4
LT
1377 FreeXid(xid);
1378 return rc;
1379}
1380
9ad1506b
PS
1381static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1382{
1383 int rc = cifs_writepage_locked(page, wbc);
1384 unlock_page(page);
1385 return rc;
1386}
1387
d9414774
NP
1388static int cifs_write_end(struct file *file, struct address_space *mapping,
1389 loff_t pos, unsigned len, unsigned copied,
1390 struct page *page, void *fsdata)
1da177e4 1391{
d9414774
NP
1392 int rc;
1393 struct inode *inode = mapping->host;
d4ffff1f
PS
1394 struct cifsFileInfo *cfile = file->private_data;
1395 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1396 __u32 pid;
1397
1398 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1399 pid = cfile->pid;
1400 else
1401 pid = current->tgid;
1da177e4 1402
b6b38f70
JP
1403 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1404 page, pos, copied);
d9414774 1405
a98ee8c1
JL
1406 if (PageChecked(page)) {
1407 if (copied == len)
1408 SetPageUptodate(page);
1409 ClearPageChecked(page);
1410 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1411 SetPageUptodate(page);
ad7a2926 1412
1da177e4 1413 if (!PageUptodate(page)) {
d9414774
NP
1414 char *page_data;
1415 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1416 int xid;
1417
1418 xid = GetXid();
1da177e4
LT
1419 /* this is probably better than directly calling
1420 partialpage_write since in this function the file handle is
1421 known which we might as well leverage */
1422 /* BB check if anything else missing out of ppw
1423 such as updating last write time */
1424 page_data = kmap(page);
d4ffff1f 1425 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 1426 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1427 kunmap(page);
d9414774
NP
1428
1429 FreeXid(xid);
fb8c4b14 1430 } else {
d9414774
NP
1431 rc = copied;
1432 pos += copied;
1da177e4
LT
1433 set_page_dirty(page);
1434 }
1435
d9414774
NP
1436 if (rc > 0) {
1437 spin_lock(&inode->i_lock);
1438 if (pos > inode->i_size)
1439 i_size_write(inode, pos);
1440 spin_unlock(&inode->i_lock);
1441 }
1442
1443 unlock_page(page);
1444 page_cache_release(page);
1445
1da177e4
LT
1446 return rc;
1447}
1448
02c24a82
JB
1449int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1450 int datasync)
1da177e4
LT
1451{
1452 int xid;
1453 int rc = 0;
96daf2b0 1454 struct cifs_tcon *tcon;
c21dfb69 1455 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1456 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 1457 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 1458
02c24a82
JB
1459 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1460 if (rc)
1461 return rc;
1462 mutex_lock(&inode->i_mutex);
1463
1da177e4
LT
1464 xid = GetXid();
1465
b6b38f70 1466 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1467 file->f_path.dentry->d_name.name, datasync);
50c2f753 1468
6feb9891
PS
1469 if (!CIFS_I(inode)->clientCanCacheRead) {
1470 rc = cifs_invalidate_mapping(inode);
1471 if (rc) {
1472 cFYI(1, "rc: %d during invalidate phase", rc);
1473 rc = 0; /* don't care about it in fsync */
1474 }
1475 }
eb4b756b 1476
8be7e6ba
PS
1477 tcon = tlink_tcon(smbfile->tlink);
1478 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1479 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1480
1481 FreeXid(xid);
02c24a82 1482 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
1483 return rc;
1484}
1485
02c24a82 1486int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba
PS
1487{
1488 int xid;
1489 int rc = 0;
96daf2b0 1490 struct cifs_tcon *tcon;
8be7e6ba
PS
1491 struct cifsFileInfo *smbfile = file->private_data;
1492 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
1493 struct inode *inode = file->f_mapping->host;
1494
1495 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1496 if (rc)
1497 return rc;
1498 mutex_lock(&inode->i_mutex);
8be7e6ba
PS
1499
1500 xid = GetXid();
1501
1502 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1503 file->f_path.dentry->d_name.name, datasync);
1504
1505 tcon = tlink_tcon(smbfile->tlink);
1506 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1507 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 1508
1da177e4 1509 FreeXid(xid);
02c24a82 1510 mutex_unlock(&inode->i_mutex);
1da177e4
LT
1511 return rc;
1512}
1513
1da177e4
LT
1514/*
1515 * As file closes, flush all cached write data for this inode checking
1516 * for write behind errors.
1517 */
75e1fcc0 1518int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1519{
fb8c4b14 1520 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1521 int rc = 0;
1522
eb4b756b 1523 if (file->f_mode & FMODE_WRITE)
d3f1322a 1524 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 1525
b6b38f70 1526 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1527
1528 return rc;
1529}
1530
72432ffc
PS
1531static int
1532cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1533{
1534 int rc = 0;
1535 unsigned long i;
1536
1537 for (i = 0; i < num_pages; i++) {
1538 pages[i] = alloc_page(__GFP_HIGHMEM);
1539 if (!pages[i]) {
1540 /*
1541 * save number of pages we have already allocated and
1542 * return with ENOMEM error
1543 */
1544 num_pages = i;
1545 rc = -ENOMEM;
1546 goto error;
1547 }
1548 }
1549
1550 return rc;
1551
1552error:
1553 for (i = 0; i < num_pages; i++)
1554 put_page(pages[i]);
1555 return rc;
1556}
1557
1558static inline
1559size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1560{
1561 size_t num_pages;
1562 size_t clen;
1563
1564 clen = min_t(const size_t, len, wsize);
1565 num_pages = clen / PAGE_CACHE_SIZE;
1566 if (clen % PAGE_CACHE_SIZE)
1567 num_pages++;
1568
1569 if (cur_len)
1570 *cur_len = clen;
1571
1572 return num_pages;
1573}
1574
1575static ssize_t
1576cifs_iovec_write(struct file *file, const struct iovec *iov,
1577 unsigned long nr_segs, loff_t *poffset)
1578{
76429c14
PS
1579 unsigned int written;
1580 unsigned long num_pages, npages, i;
1581 size_t copied, len, cur_len;
1582 ssize_t total_written = 0;
72432ffc
PS
1583 struct kvec *to_send;
1584 struct page **pages;
1585 struct iov_iter it;
1586 struct inode *inode;
1587 struct cifsFileInfo *open_file;
96daf2b0 1588 struct cifs_tcon *pTcon;
72432ffc 1589 struct cifs_sb_info *cifs_sb;
fa2989f4 1590 struct cifs_io_parms io_parms;
72432ffc 1591 int xid, rc;
d4ffff1f 1592 __u32 pid;
72432ffc
PS
1593
1594 len = iov_length(iov, nr_segs);
1595 if (!len)
1596 return 0;
1597
1598 rc = generic_write_checks(file, poffset, &len, 0);
1599 if (rc)
1600 return rc;
1601
1602 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1603 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1604
1605 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1606 if (!pages)
1607 return -ENOMEM;
1608
1609 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1610 if (!to_send) {
1611 kfree(pages);
1612 return -ENOMEM;
1613 }
1614
1615 rc = cifs_write_allocate_pages(pages, num_pages);
1616 if (rc) {
1617 kfree(pages);
1618 kfree(to_send);
1619 return rc;
1620 }
1621
1622 xid = GetXid();
1623 open_file = file->private_data;
d4ffff1f
PS
1624
1625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1626 pid = open_file->pid;
1627 else
1628 pid = current->tgid;
1629
72432ffc
PS
1630 pTcon = tlink_tcon(open_file->tlink);
1631 inode = file->f_path.dentry->d_inode;
1632
1633 iov_iter_init(&it, iov, nr_segs, len, 0);
1634 npages = num_pages;
1635
1636 do {
1637 size_t save_len = cur_len;
1638 for (i = 0; i < npages; i++) {
1639 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1640 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1641 copied);
1642 cur_len -= copied;
1643 iov_iter_advance(&it, copied);
1644 to_send[i+1].iov_base = kmap(pages[i]);
1645 to_send[i+1].iov_len = copied;
1646 }
1647
1648 cur_len = save_len - cur_len;
1649
1650 do {
1651 if (open_file->invalidHandle) {
1652 rc = cifs_reopen_file(open_file, false);
1653 if (rc != 0)
1654 break;
1655 }
fa2989f4 1656 io_parms.netfid = open_file->netfid;
d4ffff1f 1657 io_parms.pid = pid;
fa2989f4
PS
1658 io_parms.tcon = pTcon;
1659 io_parms.offset = *poffset;
1660 io_parms.length = cur_len;
1661 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1662 npages, 0);
72432ffc
PS
1663 } while (rc == -EAGAIN);
1664
1665 for (i = 0; i < npages; i++)
1666 kunmap(pages[i]);
1667
1668 if (written) {
1669 len -= written;
1670 total_written += written;
1671 cifs_update_eof(CIFS_I(inode), *poffset, written);
1672 *poffset += written;
1673 } else if (rc < 0) {
1674 if (!total_written)
1675 total_written = rc;
1676 break;
1677 }
1678
1679 /* get length and number of kvecs of the next write */
1680 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1681 } while (len > 0);
1682
1683 if (total_written > 0) {
1684 spin_lock(&inode->i_lock);
1685 if (*poffset > inode->i_size)
1686 i_size_write(inode, *poffset);
1687 spin_unlock(&inode->i_lock);
1688 }
1689
1690 cifs_stats_bytes_written(pTcon, total_written);
1691 mark_inode_dirty_sync(inode);
1692
1693 for (i = 0; i < num_pages; i++)
1694 put_page(pages[i]);
1695 kfree(to_send);
1696 kfree(pages);
1697 FreeXid(xid);
1698 return total_written;
1699}
1700
0b81c1c4 1701ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
1702 unsigned long nr_segs, loff_t pos)
1703{
1704 ssize_t written;
1705 struct inode *inode;
1706
1707 inode = iocb->ki_filp->f_path.dentry->d_inode;
1708
1709 /*
1710 * BB - optimize the way when signing is disabled. We can drop this
1711 * extra memory-to-memory copying and use iovec buffers for constructing
1712 * write request.
1713 */
1714
1715 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1716 if (written > 0) {
1717 CIFS_I(inode)->invalid_mapping = true;
1718 iocb->ki_pos = pos;
1719 }
1720
1721 return written;
1722}
1723
1724ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1725 unsigned long nr_segs, loff_t pos)
1726{
1727 struct inode *inode;
1728
1729 inode = iocb->ki_filp->f_path.dentry->d_inode;
1730
1731 if (CIFS_I(inode)->clientCanCacheAll)
1732 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1733
1734 /*
1735 * In strict cache mode we need to write the data to the server exactly
1736 * from the pos to pos+len-1 rather than flush all affected pages
1737 * because it may cause a error with mandatory locks on these pages but
1738 * not on the region from pos to ppos+len-1.
1739 */
1740
1741 return cifs_user_writev(iocb, iov, nr_segs, pos);
1742}
1743
a70307ee
PS
1744static ssize_t
1745cifs_iovec_read(struct file *file, const struct iovec *iov,
1746 unsigned long nr_segs, loff_t *poffset)
1da177e4 1747{
a70307ee
PS
1748 int rc;
1749 int xid;
76429c14
PS
1750 ssize_t total_read;
1751 unsigned int bytes_read = 0;
a70307ee
PS
1752 size_t len, cur_len;
1753 int iov_offset = 0;
1da177e4 1754 struct cifs_sb_info *cifs_sb;
96daf2b0 1755 struct cifs_tcon *pTcon;
1da177e4 1756 struct cifsFileInfo *open_file;
1da177e4 1757 struct smb_com_read_rsp *pSMBr;
d4ffff1f 1758 struct cifs_io_parms io_parms;
a70307ee 1759 char *read_data;
d4ffff1f 1760 __u32 pid;
a70307ee
PS
1761
1762 if (!nr_segs)
1763 return 0;
1764
1765 len = iov_length(iov, nr_segs);
1766 if (!len)
1767 return 0;
1da177e4
LT
1768
1769 xid = GetXid();
e6a00296 1770 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1771
c21dfb69 1772 open_file = file->private_data;
13cfb733 1773 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1774
d4ffff1f
PS
1775 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1776 pid = open_file->pid;
1777 else
1778 pid = current->tgid;
1779
ad7a2926 1780 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1781 cFYI(1, "attempting read on write only file instance");
ad7a2926 1782
a70307ee
PS
1783 for (total_read = 0; total_read < len; total_read += bytes_read) {
1784 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1da177e4 1785 rc = -EAGAIN;
a70307ee
PS
1786 read_data = NULL;
1787
1da177e4 1788 while (rc == -EAGAIN) {
ec637e3f 1789 int buf_type = CIFS_NO_BUFFER;
cdff08e7 1790 if (open_file->invalidHandle) {
15886177 1791 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1792 if (rc != 0)
1793 break;
1794 }
d4ffff1f
PS
1795 io_parms.netfid = open_file->netfid;
1796 io_parms.pid = pid;
1797 io_parms.tcon = pTcon;
1798 io_parms.offset = *poffset;
2cebaa58 1799 io_parms.length = cur_len;
d4ffff1f 1800 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
a70307ee
PS
1801 &read_data, &buf_type);
1802 pSMBr = (struct smb_com_read_rsp *)read_data;
1803 if (read_data) {
1804 char *data_offset = read_data + 4 +
1805 le16_to_cpu(pSMBr->DataOffset);
1806 if (memcpy_toiovecend(iov, data_offset,
1807 iov_offset, bytes_read))
93544cc6 1808 rc = -EFAULT;
fb8c4b14 1809 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 1810 cifs_small_buf_release(read_data);
fb8c4b14 1811 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
1812 cifs_buf_release(read_data);
1813 read_data = NULL;
1814 iov_offset += bytes_read;
1da177e4
LT
1815 }
1816 }
a70307ee 1817
1da177e4
LT
1818 if (rc || (bytes_read == 0)) {
1819 if (total_read) {
1820 break;
1821 } else {
1822 FreeXid(xid);
1823 return rc;
1824 }
1825 } else {
a4544347 1826 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1827 *poffset += bytes_read;
1828 }
1829 }
a70307ee 1830
1da177e4
LT
1831 FreeXid(xid);
1832 return total_read;
1833}
1834
0b81c1c4 1835ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
1836 unsigned long nr_segs, loff_t pos)
1837{
1838 ssize_t read;
1839
1840 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1841 if (read > 0)
1842 iocb->ki_pos = pos;
1843
1844 return read;
1845}
1846
1847ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1848 unsigned long nr_segs, loff_t pos)
1849{
1850 struct inode *inode;
1851
1852 inode = iocb->ki_filp->f_path.dentry->d_inode;
1853
1854 if (CIFS_I(inode)->clientCanCacheRead)
1855 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1856
1857 /*
1858 * In strict cache mode we need to read from the server all the time
1859 * if we don't have level II oplock because the server can delay mtime
1860 * change - so we can't make a decision about inode invalidating.
1861 * And we can also fail with pagereading if there are mandatory locks
1862 * on pages affected by this read but not on the region from pos to
1863 * pos+len-1.
1864 */
1865
1866 return cifs_user_readv(iocb, iov, nr_segs, pos);
1867}
1da177e4
LT
1868
1869static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 1870 loff_t *poffset)
1da177e4
LT
1871{
1872 int rc = -EACCES;
1873 unsigned int bytes_read = 0;
1874 unsigned int total_read;
1875 unsigned int current_read_size;
1876 struct cifs_sb_info *cifs_sb;
96daf2b0 1877 struct cifs_tcon *pTcon;
1da177e4
LT
1878 int xid;
1879 char *current_offset;
1880 struct cifsFileInfo *open_file;
d4ffff1f 1881 struct cifs_io_parms io_parms;
ec637e3f 1882 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 1883 __u32 pid;
1da177e4
LT
1884
1885 xid = GetXid();
e6a00296 1886 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1887
1888 if (file->private_data == NULL) {
0f3bc09e 1889 rc = -EBADF;
1da177e4 1890 FreeXid(xid);
0f3bc09e 1891 return rc;
1da177e4 1892 }
c21dfb69 1893 open_file = file->private_data;
13cfb733 1894 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1895
d4ffff1f
PS
1896 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1897 pid = open_file->pid;
1898 else
1899 pid = current->tgid;
1900
1da177e4 1901 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1902 cFYI(1, "attempting read on write only file instance");
1da177e4 1903
fb8c4b14 1904 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1905 read_size > total_read;
1906 total_read += bytes_read, current_offset += bytes_read) {
7748dd6e 1907 current_read_size = min_t(uint, read_size - total_read,
1da177e4 1908 cifs_sb->rsize);
f9f5c817
SF
1909 /* For windows me and 9x we do not want to request more
1910 than it negotiated since it will refuse the read then */
fb8c4b14 1911 if ((pTcon->ses) &&
f9f5c817 1912 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
7748dd6e 1913 current_read_size = min_t(uint, current_read_size,
c974befa 1914 CIFSMaxBufSize);
f9f5c817 1915 }
1da177e4
LT
1916 rc = -EAGAIN;
1917 while (rc == -EAGAIN) {
cdff08e7 1918 if (open_file->invalidHandle) {
15886177 1919 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1920 if (rc != 0)
1921 break;
1922 }
d4ffff1f
PS
1923 io_parms.netfid = open_file->netfid;
1924 io_parms.pid = pid;
1925 io_parms.tcon = pTcon;
1926 io_parms.offset = *poffset;
1927 io_parms.length = current_read_size;
1928 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1929 &current_offset, &buf_type);
1da177e4
LT
1930 }
1931 if (rc || (bytes_read == 0)) {
1932 if (total_read) {
1933 break;
1934 } else {
1935 FreeXid(xid);
1936 return rc;
1937 }
1938 } else {
a4544347 1939 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1940 *poffset += bytes_read;
1941 }
1942 }
1943 FreeXid(xid);
1944 return total_read;
1945}
1946
ca83ce3d
JL
1947/*
1948 * If the page is mmap'ed into a process' page tables, then we need to make
1949 * sure that it doesn't change while being written back.
1950 */
1951static int
1952cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1953{
1954 struct page *page = vmf->page;
1955
1956 lock_page(page);
1957 return VM_FAULT_LOCKED;
1958}
1959
1960static struct vm_operations_struct cifs_file_vm_ops = {
1961 .fault = filemap_fault,
1962 .page_mkwrite = cifs_page_mkwrite,
1963};
1964
7a6a19b1
PS
1965int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1966{
1967 int rc, xid;
1968 struct inode *inode = file->f_path.dentry->d_inode;
1969
1970 xid = GetXid();
1971
6feb9891
PS
1972 if (!CIFS_I(inode)->clientCanCacheRead) {
1973 rc = cifs_invalidate_mapping(inode);
1974 if (rc)
1975 return rc;
1976 }
7a6a19b1
PS
1977
1978 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
1979 if (rc == 0)
1980 vma->vm_ops = &cifs_file_vm_ops;
7a6a19b1
PS
1981 FreeXid(xid);
1982 return rc;
1983}
1984
1da177e4
LT
1985int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1986{
1da177e4
LT
1987 int rc, xid;
1988
1989 xid = GetXid();
abab095d 1990 rc = cifs_revalidate_file(file);
1da177e4 1991 if (rc) {
b6b38f70 1992 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1993 FreeXid(xid);
1994 return rc;
1995 }
1996 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
1997 if (rc == 0)
1998 vma->vm_ops = &cifs_file_vm_ops;
1da177e4
LT
1999 FreeXid(xid);
2000 return rc;
2001}
2002
2003
fb8c4b14 2004static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 2005 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
2006{
2007 struct page *page;
2008 char *target;
2009
2010 while (bytes_read > 0) {
2011 if (list_empty(pages))
2012 break;
2013
2014 page = list_entry(pages->prev, struct page, lru);
2015 list_del(&page->lru);
2016
315e995c 2017 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
2018 GFP_KERNEL)) {
2019 page_cache_release(page);
b6b38f70 2020 cFYI(1, "Add page cache failed");
3079ca62
SF
2021 data += PAGE_CACHE_SIZE;
2022 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
2023 continue;
2024 }
06b43672 2025 page_cache_release(page);
1da177e4 2026
fb8c4b14 2027 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
2028
2029 if (PAGE_CACHE_SIZE > bytes_read) {
2030 memcpy(target, data, bytes_read);
2031 /* zero the tail end of this partial page */
fb8c4b14 2032 memset(target + bytes_read, 0,
1da177e4
LT
2033 PAGE_CACHE_SIZE - bytes_read);
2034 bytes_read = 0;
2035 } else {
2036 memcpy(target, data, PAGE_CACHE_SIZE);
2037 bytes_read -= PAGE_CACHE_SIZE;
2038 }
2039 kunmap_atomic(target, KM_USER0);
2040
2041 flush_dcache_page(page);
2042 SetPageUptodate(page);
2043 unlock_page(page);
1da177e4 2044 data += PAGE_CACHE_SIZE;
9dc06558
SJ
2045
2046 /* add page to FS-Cache */
2047 cifs_readpage_to_fscache(mapping->host, page);
1da177e4
LT
2048 }
2049 return;
2050}
2051
2052static int cifs_readpages(struct file *file, struct address_space *mapping,
2053 struct list_head *page_list, unsigned num_pages)
2054{
2055 int rc = -EACCES;
2056 int xid;
2057 loff_t offset;
2058 struct page *page;
2059 struct cifs_sb_info *cifs_sb;
96daf2b0 2060 struct cifs_tcon *pTcon;
2c2130e1 2061 unsigned int bytes_read = 0;
fb8c4b14 2062 unsigned int read_size, i;
1da177e4
LT
2063 char *smb_read_data = NULL;
2064 struct smb_com_read_rsp *pSMBr;
1da177e4 2065 struct cifsFileInfo *open_file;
d4ffff1f 2066 struct cifs_io_parms io_parms;
ec637e3f 2067 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2068 __u32 pid;
1da177e4
LT
2069
2070 xid = GetXid();
2071 if (file->private_data == NULL) {
0f3bc09e 2072 rc = -EBADF;
1da177e4 2073 FreeXid(xid);
0f3bc09e 2074 return rc;
1da177e4 2075 }
c21dfb69 2076 open_file = file->private_data;
e6a00296 2077 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 2078 pTcon = tlink_tcon(open_file->tlink);
bfa0d75a 2079
56698236
SJ
2080 /*
2081 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2082 * immediately if the cookie is negative
2083 */
2084 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2085 &num_pages);
2086 if (rc == 0)
2087 goto read_complete;
2088
f19159dc 2089 cFYI(DBG2, "rpages: num pages %d", num_pages);
d4ffff1f
PS
2090 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2091 pid = open_file->pid;
2092 else
2093 pid = current->tgid;
2094
1da177e4
LT
2095 for (i = 0; i < num_pages; ) {
2096 unsigned contig_pages;
2097 struct page *tmp_page;
2098 unsigned long expected_index;
2099
2100 if (list_empty(page_list))
2101 break;
2102
2103 page = list_entry(page_list->prev, struct page, lru);
2104 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2105
2106 /* count adjacent pages that we will read into */
2107 contig_pages = 0;
fb8c4b14 2108 expected_index =
1da177e4 2109 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2110 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2111 if (tmp_page->index == expected_index) {
2112 contig_pages++;
2113 expected_index++;
2114 } else
fb8c4b14 2115 break;
1da177e4
LT
2116 }
2117 if (contig_pages + i > num_pages)
2118 contig_pages = num_pages - i;
2119
2120 /* for reads over a certain size could initiate async
2121 read ahead */
2122
2123 read_size = contig_pages * PAGE_CACHE_SIZE;
2124 /* Read size needs to be in multiples of one page */
2125 read_size = min_t(const unsigned int, read_size,
2126 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2127 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2128 read_size, contig_pages);
1da177e4
LT
2129 rc = -EAGAIN;
2130 while (rc == -EAGAIN) {
cdff08e7 2131 if (open_file->invalidHandle) {
15886177 2132 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2133 if (rc != 0)
2134 break;
2135 }
d4ffff1f
PS
2136 io_parms.netfid = open_file->netfid;
2137 io_parms.pid = pid;
2138 io_parms.tcon = pTcon;
2139 io_parms.offset = offset;
2140 io_parms.length = read_size;
2141 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2142 &smb_read_data, &buf_type);
a9d02ad4 2143 /* BB more RC checks ? */
fb8c4b14 2144 if (rc == -EAGAIN) {
1da177e4 2145 if (smb_read_data) {
fb8c4b14 2146 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2147 cifs_small_buf_release(smb_read_data);
fb8c4b14 2148 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2149 cifs_buf_release(smb_read_data);
1da177e4
LT
2150 smb_read_data = NULL;
2151 }
2152 }
2153 }
2154 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2155 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2156 break;
2157 } else if (bytes_read > 0) {
6f88cc2e 2158 task_io_account_read(bytes_read);
1da177e4
LT
2159 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2160 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2161 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2162 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2163
2164 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2165 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2166 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2167 i++; /* account for partial page */
2168
fb8c4b14 2169 /* server copy of file can have smaller size
1da177e4 2170 than client */
fb8c4b14
SF
2171 /* BB do we need to verify this common case ?
2172 this case is ok - if we are at server EOF
1da177e4
LT
2173 we will hit it on next read */
2174
05ac9d4b 2175 /* break; */
1da177e4
LT
2176 }
2177 } else {
b6b38f70 2178 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2179 "Cleaning remaining pages from readahead list",
b6b38f70 2180 bytes_read, offset);
fb8c4b14 2181 /* BB turn off caching and do new lookup on
1da177e4 2182 file size at server? */
1da177e4
LT
2183 break;
2184 }
2185 if (smb_read_data) {
fb8c4b14 2186 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2187 cifs_small_buf_release(smb_read_data);
fb8c4b14 2188 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2189 cifs_buf_release(smb_read_data);
1da177e4
LT
2190 smb_read_data = NULL;
2191 }
2192 bytes_read = 0;
2193 }
2194
1da177e4
LT
2195/* need to free smb_read_data buf before exit */
2196 if (smb_read_data) {
fb8c4b14 2197 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2198 cifs_small_buf_release(smb_read_data);
fb8c4b14 2199 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2200 cifs_buf_release(smb_read_data);
1da177e4 2201 smb_read_data = NULL;
fb8c4b14 2202 }
1da177e4 2203
56698236 2204read_complete:
1da177e4
LT
2205 FreeXid(xid);
2206 return rc;
2207}
2208
2209static int cifs_readpage_worker(struct file *file, struct page *page,
2210 loff_t *poffset)
2211{
2212 char *read_data;
2213 int rc;
2214
56698236
SJ
2215 /* Is the page cached? */
2216 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2217 if (rc == 0)
2218 goto read_complete;
2219
1da177e4
LT
2220 page_cache_get(page);
2221 read_data = kmap(page);
2222 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2223
1da177e4 2224 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2225
1da177e4
LT
2226 if (rc < 0)
2227 goto io_error;
2228 else
b6b38f70 2229 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2230
e6a00296
JJS
2231 file->f_path.dentry->d_inode->i_atime =
2232 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2233
1da177e4
LT
2234 if (PAGE_CACHE_SIZE > rc)
2235 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2236
2237 flush_dcache_page(page);
2238 SetPageUptodate(page);
9dc06558
SJ
2239
2240 /* send this page to the cache */
2241 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2242
1da177e4 2243 rc = 0;
fb8c4b14 2244
1da177e4 2245io_error:
fb8c4b14 2246 kunmap(page);
1da177e4 2247 page_cache_release(page);
56698236
SJ
2248
2249read_complete:
1da177e4
LT
2250 return rc;
2251}
2252
2253static int cifs_readpage(struct file *file, struct page *page)
2254{
2255 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2256 int rc = -EACCES;
2257 int xid;
2258
2259 xid = GetXid();
2260
2261 if (file->private_data == NULL) {
0f3bc09e 2262 rc = -EBADF;
1da177e4 2263 FreeXid(xid);
0f3bc09e 2264 return rc;
1da177e4
LT
2265 }
2266
b6b38f70
JP
2267 cFYI(1, "readpage %p at offset %d 0x%x\n",
2268 page, (int)offset, (int)offset);
1da177e4
LT
2269
2270 rc = cifs_readpage_worker(file, page, &offset);
2271
2272 unlock_page(page);
2273
2274 FreeXid(xid);
2275 return rc;
2276}
2277
a403a0a3
SF
2278static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2279{
2280 struct cifsFileInfo *open_file;
2281
4477288a 2282 spin_lock(&cifs_file_list_lock);
a403a0a3 2283 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2284 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2285 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2286 return 1;
2287 }
2288 }
4477288a 2289 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2290 return 0;
2291}
2292
1da177e4
LT
2293/* We do not want to update the file size from server for inodes
2294 open for write - to avoid races with writepage extending
2295 the file - in the future we could consider allowing
fb8c4b14 2296 refreshing the inode only on increases in the file size
1da177e4
LT
2297 but this is tricky to do without racing with writebehind
2298 page caching in the current Linux kernel design */
4b18f2a9 2299bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2300{
a403a0a3 2301 if (!cifsInode)
4b18f2a9 2302 return true;
50c2f753 2303
a403a0a3
SF
2304 if (is_inode_writable(cifsInode)) {
2305 /* This inode is open for write at least once */
c32a0b68
SF
2306 struct cifs_sb_info *cifs_sb;
2307
c32a0b68 2308 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2309 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2310 /* since no page cache to corrupt on directio
c32a0b68 2311 we can change size safely */
4b18f2a9 2312 return true;
c32a0b68
SF
2313 }
2314
fb8c4b14 2315 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2316 return true;
7ba52631 2317
4b18f2a9 2318 return false;
23e7dd7d 2319 } else
4b18f2a9 2320 return true;
1da177e4
LT
2321}
2322
d9414774
NP
2323static int cifs_write_begin(struct file *file, struct address_space *mapping,
2324 loff_t pos, unsigned len, unsigned flags,
2325 struct page **pagep, void **fsdata)
1da177e4 2326{
d9414774
NP
2327 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2328 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2329 loff_t page_start = pos & PAGE_MASK;
2330 loff_t i_size;
2331 struct page *page;
2332 int rc = 0;
d9414774 2333
b6b38f70 2334 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2335
54566b2c 2336 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2337 if (!page) {
2338 rc = -ENOMEM;
2339 goto out;
2340 }
8a236264 2341
a98ee8c1
JL
2342 if (PageUptodate(page))
2343 goto out;
8a236264 2344
a98ee8c1
JL
2345 /*
2346 * If we write a full page it will be up to date, no need to read from
2347 * the server. If the write is short, we'll end up doing a sync write
2348 * instead.
2349 */
2350 if (len == PAGE_CACHE_SIZE)
2351 goto out;
8a236264 2352
a98ee8c1
JL
2353 /*
2354 * optimize away the read when we have an oplock, and we're not
2355 * expecting to use any of the data we'd be reading in. That
2356 * is, when the page lies beyond the EOF, or straddles the EOF
2357 * and the write will cover all of the existing data.
2358 */
2359 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2360 i_size = i_size_read(mapping->host);
2361 if (page_start >= i_size ||
2362 (offset == 0 && (pos + len) >= i_size)) {
2363 zero_user_segments(page, 0, offset,
2364 offset + len,
2365 PAGE_CACHE_SIZE);
2366 /*
2367 * PageChecked means that the parts of the page
2368 * to which we're not writing are considered up
2369 * to date. Once the data is copied to the
2370 * page, it can be set uptodate.
2371 */
2372 SetPageChecked(page);
2373 goto out;
2374 }
2375 }
d9414774 2376
a98ee8c1
JL
2377 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2378 /*
2379 * might as well read a page, it is fast enough. If we get
2380 * an error, we don't need to return it. cifs_write_end will
2381 * do a sync write instead since PG_uptodate isn't set.
2382 */
2383 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2384 } else {
2385 /* we could try using another file handle if there is one -
2386 but how would we lock it to prevent close of that handle
2387 racing with this read? In any case
d9414774 2388 this will be written out by write_end so is fine */
1da177e4 2389 }
a98ee8c1
JL
2390out:
2391 *pagep = page;
2392 return rc;
1da177e4
LT
2393}
2394
85f2d6b4
SJ
2395static int cifs_release_page(struct page *page, gfp_t gfp)
2396{
2397 if (PagePrivate(page))
2398 return 0;
2399
2400 return cifs_fscache_release_page(page, gfp);
2401}
2402
2403static void cifs_invalidate_page(struct page *page, unsigned long offset)
2404{
2405 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2406
2407 if (offset == 0)
2408 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2409}
2410
9ad1506b
PS
2411static int cifs_launder_page(struct page *page)
2412{
2413 int rc = 0;
2414 loff_t range_start = page_offset(page);
2415 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2416 struct writeback_control wbc = {
2417 .sync_mode = WB_SYNC_ALL,
2418 .nr_to_write = 0,
2419 .range_start = range_start,
2420 .range_end = range_end,
2421 };
2422
2423 cFYI(1, "Launder page: %p", page);
2424
2425 if (clear_page_dirty_for_io(page))
2426 rc = cifs_writepage_locked(page, &wbc);
2427
2428 cifs_fscache_invalidate_page(page, page->mapping->host);
2429 return rc;
2430}
2431
9b646972 2432void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2433{
2434 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2435 oplock_break);
a5e18bc3 2436 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2437 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 2438 int rc = 0;
3bc303c2
JL
2439
2440 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2441 if (cinode->clientCanCacheRead)
8737c930 2442 break_lease(inode, O_RDONLY);
d54ff732 2443 else
8737c930 2444 break_lease(inode, O_WRONLY);
3bc303c2
JL
2445 rc = filemap_fdatawrite(inode->i_mapping);
2446 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
2447 rc = filemap_fdatawait(inode->i_mapping);
2448 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
2449 invalidate_remote_inode(inode);
2450 }
b6b38f70 2451 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2452 }
2453
2454 /*
2455 * releasing stale oplock after recent reconnect of smb session using
2456 * a now incorrect file handle is not a data integrity issue but do
2457 * not bother sending an oplock release if session to server still is
2458 * disconnected since oplock already released by the server
2459 */
cdff08e7 2460 if (!cfile->oplock_break_cancelled) {
03776f45
PS
2461 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2462 current->tgid, 0, 0, 0, 0,
2463 LOCKING_ANDX_OPLOCK_RELEASE, false,
12fed00d 2464 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 2465 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2466 }
3bc303c2
JL
2467}
2468
f5e54d6e 2469const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2470 .readpage = cifs_readpage,
2471 .readpages = cifs_readpages,
2472 .writepage = cifs_writepage,
37c0eb46 2473 .writepages = cifs_writepages,
d9414774
NP
2474 .write_begin = cifs_write_begin,
2475 .write_end = cifs_write_end,
1da177e4 2476 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2477 .releasepage = cifs_release_page,
2478 .invalidatepage = cifs_invalidate_page,
9ad1506b 2479 .launder_page = cifs_launder_page,
1da177e4 2480};
273d81d6
DK
2481
2482/*
2483 * cifs_readpages requires the server to support a buffer large enough to
2484 * contain the header plus one complete page of data. Otherwise, we need
2485 * to leave cifs_readpages out of the address space operations.
2486 */
f5e54d6e 2487const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2488 .readpage = cifs_readpage,
2489 .writepage = cifs_writepage,
2490 .writepages = cifs_writepages,
d9414774
NP
2491 .write_begin = cifs_write_begin,
2492 .write_end = cifs_write_end,
273d81d6 2493 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2494 .releasepage = cifs_release_page,
2495 .invalidatepage = cifs_invalidate_page,
9ad1506b 2496 .launder_page = cifs_launder_page,
273d81d6 2497};