Merge git://git.samba.org/sfrench/cifs-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
608712fe 63static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 64{
608712fe 65 u32 posix_flags = 0;
e10f7b55 66
7fc8f4e9 67 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 68 posix_flags = SMB_O_RDONLY;
7fc8f4e9 69 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
70 posix_flags = SMB_O_WRONLY;
71 else if ((flags & O_ACCMODE) == O_RDWR)
72 posix_flags = SMB_O_RDWR;
73
74 if (flags & O_CREAT)
75 posix_flags |= SMB_O_CREAT;
76 if (flags & O_EXCL)
77 posix_flags |= SMB_O_EXCL;
78 if (flags & O_TRUNC)
79 posix_flags |= SMB_O_TRUNC;
80 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 81 if (flags & O_DSYNC)
608712fe 82 posix_flags |= SMB_O_SYNC;
7fc8f4e9 83 if (flags & O_DIRECTORY)
608712fe 84 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 85 if (flags & O_NOFOLLOW)
608712fe 86 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 87 if (flags & O_DIRECT)
608712fe 88 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
89
90 return posix_flags;
1da177e4
LT
91}
92
93static inline int cifs_get_disposition(unsigned int flags)
94{
95 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
96 return FILE_CREATE;
97 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
98 return FILE_OVERWRITE_IF;
99 else if ((flags & O_CREAT) == O_CREAT)
100 return FILE_OPEN_IF;
55aa2e09
SF
101 else if ((flags & O_TRUNC) == O_TRUNC)
102 return FILE_OVERWRITE;
1da177e4
LT
103 else
104 return FILE_OPEN;
105}
106
608712fe
JL
107int cifs_posix_open(char *full_path, struct inode **pinode,
108 struct super_block *sb, int mode, unsigned int f_flags,
109 __u32 *poplock, __u16 *pnetfid, int xid)
110{
111 int rc;
112 FILE_UNIX_BASIC_INFO *presp_data;
113 __u32 posix_flags = 0;
114 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
115 struct cifs_fattr fattr;
116 struct tcon_link *tlink;
96daf2b0 117 struct cifs_tcon *tcon;
608712fe
JL
118
119 cFYI(1, "posix open %s", full_path);
120
121 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
122 if (presp_data == NULL)
123 return -ENOMEM;
124
125 tlink = cifs_sb_tlink(cifs_sb);
126 if (IS_ERR(tlink)) {
127 rc = PTR_ERR(tlink);
128 goto posix_open_ret;
129 }
130
131 tcon = tlink_tcon(tlink);
132 mode &= ~current_umask();
133
134 posix_flags = cifs_posix_convert_flags(f_flags);
135 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
136 poplock, full_path, cifs_sb->local_nls,
137 cifs_sb->mnt_cifs_flags &
138 CIFS_MOUNT_MAP_SPECIAL_CHR);
139 cifs_put_tlink(tlink);
140
141 if (rc)
142 goto posix_open_ret;
143
144 if (presp_data->Type == cpu_to_le32(-1))
145 goto posix_open_ret; /* open ok, caller does qpathinfo */
146
147 if (!pinode)
148 goto posix_open_ret; /* caller does not need info */
149
150 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
151
152 /* get new inode and set it up */
153 if (*pinode == NULL) {
154 cifs_fill_uniqueid(sb, &fattr);
155 *pinode = cifs_iget(sb, &fattr);
156 if (!*pinode) {
157 rc = -ENOMEM;
158 goto posix_open_ret;
159 }
160 } else {
161 cifs_fattr_to_inode(*pinode, &fattr);
162 }
163
164posix_open_ret:
165 kfree(presp_data);
166 return rc;
167}
168
eeb910a6
PS
169static int
170cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
96daf2b0 171 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
eeb910a6
PS
172 __u16 *pnetfid, int xid)
173{
174 int rc;
175 int desiredAccess;
176 int disposition;
177 FILE_ALL_INFO *buf;
178
179 desiredAccess = cifs_convert_flags(f_flags);
180
181/*********************************************************************
182 * open flag mapping table:
183 *
184 * POSIX Flag CIFS Disposition
185 * ---------- ----------------
186 * O_CREAT FILE_OPEN_IF
187 * O_CREAT | O_EXCL FILE_CREATE
188 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
189 * O_TRUNC FILE_OVERWRITE
190 * none of the above FILE_OPEN
191 *
192 * Note that there is not a direct match between disposition
193 * FILE_SUPERSEDE (ie create whether or not file exists although
194 * O_CREAT | O_TRUNC is similar but truncates the existing
195 * file rather than creating a new file as FILE_SUPERSEDE does
196 * (which uses the attributes / metadata passed in on open call)
197 *?
198 *? O_SYNC is a reasonable match to CIFS writethrough flag
199 *? and the read write flags match reasonably. O_LARGEFILE
200 *? is irrelevant because largefile support is always used
201 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
202 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
203 *********************************************************************/
204
205 disposition = cifs_get_disposition(f_flags);
206
207 /* BB pass O_SYNC flag through on file attributes .. BB */
208
209 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
210 if (!buf)
211 return -ENOMEM;
212
213 if (tcon->ses->capabilities & CAP_NT_SMBS)
214 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
216 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217 & CIFS_MOUNT_MAP_SPECIAL_CHR);
218 else
219 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
220 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223
224 if (rc)
225 goto out;
226
227 if (tcon->unix_ext)
228 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
229 xid);
230 else
231 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
232 xid, pnetfid);
233
234out:
235 kfree(buf);
236 return rc;
237}
238
15ecb436
JL
239struct cifsFileInfo *
240cifs_new_fileinfo(__u16 fileHandle, struct file *file,
241 struct tcon_link *tlink, __u32 oplock)
242{
243 struct dentry *dentry = file->f_path.dentry;
244 struct inode *inode = dentry->d_inode;
245 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
246 struct cifsFileInfo *pCifsFile;
247
248 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
249 if (pCifsFile == NULL)
250 return pCifsFile;
251
5f6dbc9e 252 pCifsFile->count = 1;
15ecb436
JL
253 pCifsFile->netfid = fileHandle;
254 pCifsFile->pid = current->tgid;
255 pCifsFile->uid = current_fsuid();
256 pCifsFile->dentry = dget(dentry);
257 pCifsFile->f_flags = file->f_flags;
258 pCifsFile->invalidHandle = false;
15ecb436
JL
259 pCifsFile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&pCifsFile->fh_mutex);
261 mutex_init(&pCifsFile->lock_mutex);
262 INIT_LIST_HEAD(&pCifsFile->llist);
15ecb436
JL
263 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
264
4477288a 265 spin_lock(&cifs_file_list_lock);
15ecb436
JL
266 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
269 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
270 else
271 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 272 spin_unlock(&cifs_file_list_lock);
15ecb436 273
c6723628 274 cifs_set_oplock_level(pCifsInode, oplock);
15ecb436
JL
275
276 file->private_data = pCifsFile;
277 return pCifsFile;
278}
279
cdff08e7
SF
280/*
281 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
282 * the filehandle out on the server. Must be called without holding
283 * cifs_file_list_lock.
cdff08e7 284 */
b33879aa
JL
285void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
286{
e66673e3 287 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 288 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 289 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 290 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
291 struct cifsLockInfo *li, *tmp;
292
293 spin_lock(&cifs_file_list_lock);
5f6dbc9e 294 if (--cifs_file->count > 0) {
cdff08e7
SF
295 spin_unlock(&cifs_file_list_lock);
296 return;
297 }
298
299 /* remove it from the lists */
300 list_del(&cifs_file->flist);
301 list_del(&cifs_file->tlist);
302
303 if (list_empty(&cifsi->openFileList)) {
304 cFYI(1, "closing last open instance for inode %p",
305 cifs_file->dentry->d_inode);
4f8ba8a0
PS
306
307 /* in strict cache mode we need invalidate mapping on the last
308 close because it may cause a error when we open this file
309 again and get at least level II oplock */
310 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
311 CIFS_I(inode)->invalid_mapping = true;
312
c6723628 313 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
314 }
315 spin_unlock(&cifs_file_list_lock);
316
ad635942
JL
317 cancel_work_sync(&cifs_file->oplock_break);
318
cdff08e7
SF
319 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
320 int xid, rc;
321
322 xid = GetXid();
323 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
324 FreeXid(xid);
325 }
326
327 /* Delete any outstanding lock records. We'll lose them when the file
328 * is closed anyway.
329 */
330 mutex_lock(&cifs_file->lock_mutex);
331 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
332 list_del(&li->llist);
333 kfree(li);
b33879aa 334 }
cdff08e7
SF
335 mutex_unlock(&cifs_file->lock_mutex);
336
337 cifs_put_tlink(cifs_file->tlink);
338 dput(cifs_file->dentry);
339 kfree(cifs_file);
b33879aa
JL
340}
341
1da177e4
LT
342int cifs_open(struct inode *inode, struct file *file)
343{
344 int rc = -EACCES;
590a3fe0
JL
345 int xid;
346 __u32 oplock;
1da177e4 347 struct cifs_sb_info *cifs_sb;
96daf2b0 348 struct cifs_tcon *tcon;
7ffec372 349 struct tcon_link *tlink;
6ca9f3ba 350 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 351 char *full_path = NULL;
7e12eddb 352 bool posix_open_ok = false;
1da177e4 353 __u16 netfid;
1da177e4
LT
354
355 xid = GetXid();
356
357 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
358 tlink = cifs_sb_tlink(cifs_sb);
359 if (IS_ERR(tlink)) {
360 FreeXid(xid);
361 return PTR_ERR(tlink);
362 }
363 tcon = tlink_tcon(tlink);
1da177e4 364
e6a00296 365 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 366 if (full_path == NULL) {
0f3bc09e 367 rc = -ENOMEM;
232341ba 368 goto out;
1da177e4
LT
369 }
370
b6b38f70
JP
371 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
372 inode, file->f_flags, full_path);
276a74a4
SF
373
374 if (oplockEnabled)
375 oplock = REQ_OPLOCK;
376 else
377 oplock = 0;
378
64cc2c63
SF
379 if (!tcon->broken_posix_open && tcon->unix_ext &&
380 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
381 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
382 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 383 /* can not refresh inode info since size could be stale */
2422f676 384 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 385 cifs_sb->mnt_file_mode /* ignored */,
608712fe 386 file->f_flags, &oplock, &netfid, xid);
276a74a4 387 if (rc == 0) {
b6b38f70 388 cFYI(1, "posix open succeeded");
7e12eddb 389 posix_open_ok = true;
64cc2c63
SF
390 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
391 if (tcon->ses->serverNOS)
b6b38f70 392 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
393 " unexpected error on SMB posix open"
394 ", disabling posix open support."
395 " Check if server update available.",
396 tcon->ses->serverName,
b6b38f70 397 tcon->ses->serverNOS);
64cc2c63 398 tcon->broken_posix_open = true;
276a74a4
SF
399 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
400 (rc != -EOPNOTSUPP)) /* path not found or net err */
401 goto out;
64cc2c63
SF
402 /* else fallthrough to retry open the old way on network i/o
403 or DFS errors */
276a74a4
SF
404 }
405
7e12eddb
PS
406 if (!posix_open_ok) {
407 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
408 file->f_flags, &oplock, &netfid, xid);
409 if (rc)
410 goto out;
411 }
47c78b7f 412
abfe1eed 413 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 414 if (pCifsFile == NULL) {
7e12eddb 415 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
416 rc = -ENOMEM;
417 goto out;
418 }
1da177e4 419
9451a9a5
SJ
420 cifs_fscache_set_inode_cookie(inode, file);
421
7e12eddb 422 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
423 /* time to set mode which we can not set earlier due to
424 problems creating new read-only files */
7e12eddb
PS
425 struct cifs_unix_set_info_args args = {
426 .mode = inode->i_mode,
427 .uid = NO_CHANGE_64,
428 .gid = NO_CHANGE_64,
429 .ctime = NO_CHANGE_64,
430 .atime = NO_CHANGE_64,
431 .mtime = NO_CHANGE_64,
432 .device = 0,
433 };
d44a9fe2
JL
434 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
435 pCifsFile->pid);
1da177e4
LT
436 }
437
438out:
1da177e4
LT
439 kfree(full_path);
440 FreeXid(xid);
7ffec372 441 cifs_put_tlink(tlink);
1da177e4
LT
442 return rc;
443}
444
0418726b 445/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
446/* to server was lost */
447static int cifs_relock_file(struct cifsFileInfo *cifsFile)
448{
449 int rc = 0;
450
451/* BB list all locks open on this file and relock */
452
453 return rc;
454}
455
15886177 456static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
457{
458 int rc = -EACCES;
590a3fe0
JL
459 int xid;
460 __u32 oplock;
1da177e4 461 struct cifs_sb_info *cifs_sb;
96daf2b0 462 struct cifs_tcon *tcon;
1da177e4 463 struct cifsInodeInfo *pCifsInode;
fb8c4b14 464 struct inode *inode;
1da177e4
LT
465 char *full_path = NULL;
466 int desiredAccess;
467 int disposition = FILE_OPEN;
468 __u16 netfid;
469
1da177e4 470 xid = GetXid();
f0a71eb8 471 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 472 if (!pCifsFile->invalidHandle) {
f0a71eb8 473 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 474 rc = 0;
1da177e4 475 FreeXid(xid);
0f3bc09e 476 return rc;
1da177e4
LT
477 }
478
15886177 479 inode = pCifsFile->dentry->d_inode;
1da177e4 480 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 481 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 482
1da177e4
LT
483/* can not grab rename sem here because various ops, including
484 those that already have the rename sem can end up causing writepage
485 to get called and if the server was down that means we end up here,
486 and we can never tell if the caller already has the rename_sem */
15886177 487 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 488 if (full_path == NULL) {
3a9f462f 489 rc = -ENOMEM;
f0a71eb8 490 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 491 FreeXid(xid);
3a9f462f 492 return rc;
1da177e4
LT
493 }
494
b6b38f70 495 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 496 inode, pCifsFile->f_flags, full_path);
1da177e4
LT
497
498 if (oplockEnabled)
499 oplock = REQ_OPLOCK;
500 else
4b18f2a9 501 oplock = 0;
1da177e4 502
7fc8f4e9
SF
503 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
504 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
505 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
506
507 /*
508 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
509 * original open. Must mask them off for a reopen.
510 */
15886177
JL
511 unsigned int oflags = pCifsFile->f_flags &
512 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 513
2422f676 514 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
515 cifs_sb->mnt_file_mode /* ignored */,
516 oflags, &oplock, &netfid, xid);
7fc8f4e9 517 if (rc == 0) {
b6b38f70 518 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
519 goto reopen_success;
520 }
521 /* fallthrough to retry open the old way on errors, especially
522 in the reconnect path it is important to retry hard */
523 }
524
15886177 525 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 526
1da177e4 527 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
528 by SMBOpen and then calling get_inode_info with returned buf
529 since file might have write behind data that needs to be flushed
1da177e4
LT
530 and server version of file size can be stale. If we knew for sure
531 that inode was not dirty locally we could do this */
532
7fc8f4e9 533 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 534 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 535 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 536 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 537 if (rc) {
f0a71eb8 538 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
539 cFYI(1, "cifs_open returned 0x%x", rc);
540 cFYI(1, "oplock: %d", oplock);
15886177
JL
541 goto reopen_error_exit;
542 }
543
7fc8f4e9 544reopen_success:
15886177
JL
545 pCifsFile->netfid = netfid;
546 pCifsFile->invalidHandle = false;
547 mutex_unlock(&pCifsFile->fh_mutex);
548 pCifsInode = CIFS_I(inode);
549
550 if (can_flush) {
551 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 552 mapping_set_error(inode->i_mapping, rc);
15886177 553
15886177
JL
554 if (tcon->unix_ext)
555 rc = cifs_get_inode_info_unix(&inode,
556 full_path, inode->i_sb, xid);
557 else
558 rc = cifs_get_inode_info(&inode,
559 full_path, NULL, inode->i_sb,
560 xid, NULL);
561 } /* else we are writing out data to server already
562 and could deadlock if we tried to flush data, and
563 since we do not know if we have data that would
564 invalidate the current end of file on the server
565 we can not go to the server to get the new inod
566 info */
e66673e3 567
c6723628 568 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 569
15886177
JL
570 cifs_relock_file(pCifsFile);
571
572reopen_error_exit:
1da177e4
LT
573 kfree(full_path);
574 FreeXid(xid);
575 return rc;
576}
577
578int cifs_close(struct inode *inode, struct file *file)
579{
77970693
JL
580 if (file->private_data != NULL) {
581 cifsFileInfo_put(file->private_data);
582 file->private_data = NULL;
583 }
7ee1af76 584
cdff08e7
SF
585 /* return code from the ->release op is always ignored */
586 return 0;
1da177e4
LT
587}
588
589int cifs_closedir(struct inode *inode, struct file *file)
590{
591 int rc = 0;
592 int xid;
c21dfb69 593 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
594 char *ptmp;
595
b6b38f70 596 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
597
598 xid = GetXid();
599
600 if (pCFileStruct) {
96daf2b0 601 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 602
b6b38f70 603 cFYI(1, "Freeing private data in close dir");
4477288a 604 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
605 if (!pCFileStruct->srch_inf.endOfSearch &&
606 !pCFileStruct->invalidHandle) {
607 pCFileStruct->invalidHandle = true;
4477288a 608 spin_unlock(&cifs_file_list_lock);
1da177e4 609 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
610 cFYI(1, "Closing uncompleted readdir with rc %d",
611 rc);
1da177e4
LT
612 /* not much we can do if it fails anyway, ignore rc */
613 rc = 0;
ddb4cbfc 614 } else
4477288a 615 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
616 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
617 if (ptmp) {
b6b38f70 618 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 619 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 620 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
621 cifs_small_buf_release(ptmp);
622 else
623 cifs_buf_release(ptmp);
1da177e4 624 }
13cfb733 625 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
626 kfree(file->private_data);
627 file->private_data = NULL;
628 }
629 /* BB can we lock the filestruct while this is going on? */
630 FreeXid(xid);
631 return rc;
632}
633
7ee1af76
JA
634static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
635 __u64 offset, __u8 lockType)
636{
fb8c4b14
SF
637 struct cifsLockInfo *li =
638 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
639 if (li == NULL)
640 return -ENOMEM;
641 li->offset = offset;
642 li->length = len;
643 li->type = lockType;
796e5661 644 mutex_lock(&fid->lock_mutex);
7ee1af76 645 list_add(&li->llist, &fid->llist);
796e5661 646 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
647 return 0;
648}
649
1da177e4
LT
650int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
651{
652 int rc, xid;
1da177e4
LT
653 __u32 numLock = 0;
654 __u32 numUnlock = 0;
655 __u64 length;
4b18f2a9 656 bool wait_flag = false;
1da177e4 657 struct cifs_sb_info *cifs_sb;
96daf2b0 658 struct cifs_tcon *tcon;
08547b03
SF
659 __u16 netfid;
660 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 661 bool posix_locking = 0;
1da177e4
LT
662
663 length = 1 + pfLock->fl_end - pfLock->fl_start;
664 rc = -EACCES;
665 xid = GetXid();
666
b6b38f70 667 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 668 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 669 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 670 pfLock->fl_end);
1da177e4
LT
671
672 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 673 cFYI(1, "Posix");
1da177e4 674 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 675 cFYI(1, "Flock");
1da177e4 676 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 677 cFYI(1, "Blocking lock");
4b18f2a9 678 wait_flag = true;
1da177e4
LT
679 }
680 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
681 cFYI(1, "Process suspended by mandatory locking - "
682 "not implemented yet");
1da177e4 683 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 684 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 685 if (pfLock->fl_flags &
1da177e4 686 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 687 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
688
689 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 690 cFYI(1, "F_WRLCK ");
1da177e4
LT
691 numLock = 1;
692 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 693 cFYI(1, "F_UNLCK");
1da177e4 694 numUnlock = 1;
d47d7c1a
SF
695 /* Check if unlock includes more than
696 one lock range */
1da177e4 697 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 698 cFYI(1, "F_RDLCK");
1da177e4
LT
699 lockType |= LOCKING_ANDX_SHARED_LOCK;
700 numLock = 1;
701 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 702 cFYI(1, "F_EXLCK");
1da177e4
LT
703 numLock = 1;
704 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 705 cFYI(1, "F_SHLCK");
1da177e4
LT
706 lockType |= LOCKING_ANDX_SHARED_LOCK;
707 numLock = 1;
708 } else
b6b38f70 709 cFYI(1, "Unknown type of lock");
1da177e4 710
e6a00296 711 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 712 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
08547b03
SF
713 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
714
13a6e42a
SF
715 if ((tcon->ses->capabilities & CAP_UNIX) &&
716 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 717 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 718 posix_locking = 1;
08547b03
SF
719 /* BB add code here to normalize offset and length to
720 account for negative length which we can not accept over the
721 wire */
1da177e4 722 if (IS_GETLK(cmd)) {
fb8c4b14 723 if (posix_locking) {
08547b03 724 int posix_lock_type;
fb8c4b14 725 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
726 posix_lock_type = CIFS_RDLCK;
727 else
728 posix_lock_type = CIFS_WRLCK;
13a6e42a 729 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
d4ffff1f
PS
730 length, pfLock, posix_lock_type,
731 wait_flag);
08547b03
SF
732 FreeXid(xid);
733 return rc;
734 }
735
736 /* BB we could chain these into one lock request BB */
13a6e42a 737 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
12fed00d 738 0, 1, lockType, 0 /* wait flag */, 0);
1da177e4 739 if (rc == 0) {
13a6e42a 740 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
741 pfLock->fl_start, 1 /* numUnlock */ ,
742 0 /* numLock */ , lockType,
12fed00d 743 0 /* wait flag */, 0);
1da177e4
LT
744 pfLock->fl_type = F_UNLCK;
745 if (rc != 0)
b6b38f70
JP
746 cERROR(1, "Error unlocking previously locked "
747 "range %d during test of lock", rc);
1da177e4
LT
748 rc = 0;
749
750 } else {
751 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
752 rc = 0;
753
754 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
755 pfLock->fl_type = F_WRLCK;
756 } else {
757 rc = CIFSSMBLock(xid, tcon, netfid, length,
758 pfLock->fl_start, 0, 1,
759 lockType | LOCKING_ANDX_SHARED_LOCK,
12fed00d 760 0 /* wait flag */, 0);
f05337c6
PS
761 if (rc == 0) {
762 rc = CIFSSMBLock(xid, tcon, netfid,
763 length, pfLock->fl_start, 1, 0,
764 lockType |
765 LOCKING_ANDX_SHARED_LOCK,
12fed00d 766 0 /* wait flag */, 0);
f05337c6
PS
767 pfLock->fl_type = F_RDLCK;
768 if (rc != 0)
f19159dc 769 cERROR(1, "Error unlocking "
f05337c6 770 "previously locked range %d "
f19159dc 771 "during test of lock", rc);
f05337c6
PS
772 rc = 0;
773 } else {
774 pfLock->fl_type = F_WRLCK;
775 rc = 0;
776 }
777 }
1da177e4
LT
778 }
779
780 FreeXid(xid);
781 return rc;
782 }
7ee1af76
JA
783
784 if (!numLock && !numUnlock) {
785 /* if no lock or unlock then nothing
786 to do since we do not know what it is */
787 FreeXid(xid);
788 return -EOPNOTSUPP;
789 }
790
791 if (posix_locking) {
08547b03 792 int posix_lock_type;
fb8c4b14 793 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
794 posix_lock_type = CIFS_RDLCK;
795 else
796 posix_lock_type = CIFS_WRLCK;
50c2f753 797
fb8c4b14 798 if (numUnlock == 1)
beb84dc8 799 posix_lock_type = CIFS_UNLCK;
7ee1af76 800
13a6e42a 801 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
d4ffff1f
PS
802 length, pfLock, posix_lock_type,
803 wait_flag);
7ee1af76 804 } else {
c21dfb69 805 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
806
807 if (numLock) {
13a6e42a 808 rc = CIFSSMBLock(xid, tcon, netfid, length,
12fed00d
PS
809 pfLock->fl_start, 0, numLock, lockType,
810 wait_flag, 0);
7ee1af76
JA
811
812 if (rc == 0) {
813 /* For Windows locks we must store them. */
814 rc = store_file_lock(fid, length,
815 pfLock->fl_start, lockType);
816 }
817 } else if (numUnlock) {
818 /* For each stored lock that this unlock overlaps
819 completely, unlock it. */
820 int stored_rc = 0;
821 struct cifsLockInfo *li, *tmp;
822
6b70c955 823 rc = 0;
796e5661 824 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
825 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
826 if (pfLock->fl_start <= li->offset &&
c19eb710 827 (pfLock->fl_start + length) >=
39db810c 828 (li->offset + li->length)) {
13a6e42a 829 stored_rc = CIFSSMBLock(xid, tcon,
12fed00d
PS
830 netfid, li->length,
831 li->offset, 1, 0,
832 li->type, false, 0);
7ee1af76
JA
833 if (stored_rc)
834 rc = stored_rc;
2c964d1f
PS
835 else {
836 list_del(&li->llist);
837 kfree(li);
838 }
7ee1af76
JA
839 }
840 }
796e5661 841 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
842 }
843 }
844
d634cc15 845 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
846 posix_lock_file_wait(file, pfLock);
847 FreeXid(xid);
848 return rc;
849}
850
fbec9ab9 851/* update the file size (if needed) after a write */
72432ffc 852void
fbec9ab9
JL
853cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
854 unsigned int bytes_written)
855{
856 loff_t end_of_write = offset + bytes_written;
857
858 if (end_of_write > cifsi->server_eof)
859 cifsi->server_eof = end_of_write;
860}
861
fa2989f4 862static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
7da4b49a
JL
863 const char *write_data, size_t write_size,
864 loff_t *poffset)
1da177e4
LT
865{
866 int rc = 0;
867 unsigned int bytes_written = 0;
868 unsigned int total_written;
869 struct cifs_sb_info *cifs_sb;
96daf2b0 870 struct cifs_tcon *pTcon;
7749981e 871 int xid;
7da4b49a
JL
872 struct dentry *dentry = open_file->dentry;
873 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 874 struct cifs_io_parms io_parms;
1da177e4 875
7da4b49a 876 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 877
b6b38f70 878 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 879 *poffset, dentry->d_name.name);
1da177e4 880
13cfb733 881 pTcon = tlink_tcon(open_file->tlink);
50c2f753 882
1da177e4 883 xid = GetXid();
1da177e4 884
1da177e4
LT
885 for (total_written = 0; write_size > total_written;
886 total_written += bytes_written) {
887 rc = -EAGAIN;
888 while (rc == -EAGAIN) {
ca83ce3d
JL
889 struct kvec iov[2];
890 unsigned int len;
891
1da177e4 892 if (open_file->invalidHandle) {
1da177e4
LT
893 /* we could deadlock if we called
894 filemap_fdatawait from here so tell
fb8c4b14 895 reopen_file not to flush data to
1da177e4 896 server now */
15886177 897 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
898 if (rc != 0)
899 break;
900 }
ca83ce3d
JL
901
902 len = min((size_t)cifs_sb->wsize,
903 write_size - total_written);
904 /* iov[0] is reserved for smb header */
905 iov[1].iov_base = (char *)write_data + total_written;
906 iov[1].iov_len = len;
fa2989f4
PS
907 io_parms.netfid = open_file->netfid;
908 io_parms.pid = pid;
909 io_parms.tcon = pTcon;
910 io_parms.offset = *poffset;
911 io_parms.length = len;
912 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
913 1, 0);
1da177e4
LT
914 }
915 if (rc || (bytes_written == 0)) {
916 if (total_written)
917 break;
918 else {
919 FreeXid(xid);
920 return rc;
921 }
fbec9ab9
JL
922 } else {
923 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 924 *poffset += bytes_written;
fbec9ab9 925 }
1da177e4
LT
926 }
927
a4544347 928 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 929
7da4b49a
JL
930 if (total_written > 0) {
931 spin_lock(&dentry->d_inode->i_lock);
932 if (*poffset > dentry->d_inode->i_size)
933 i_size_write(dentry->d_inode, *poffset);
934 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 935 }
7da4b49a 936 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
937 FreeXid(xid);
938 return total_written;
939}
940
6508d904
JL
941struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
942 bool fsuid_only)
630f3f0c
SF
943{
944 struct cifsFileInfo *open_file = NULL;
6508d904
JL
945 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
946
947 /* only filter by fsuid on multiuser mounts */
948 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
949 fsuid_only = false;
630f3f0c 950
4477288a 951 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
952 /* we could simply get the first_list_entry since write-only entries
953 are always at the end of the list but since the first entry might
954 have a close pending, we go through the whole list */
955 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
956 if (fsuid_only && open_file->uid != current_fsuid())
957 continue;
2e396b83 958 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
959 if (!open_file->invalidHandle) {
960 /* found a good file */
961 /* lock it so it will not be closed on us */
6ab409b5 962 cifsFileInfo_get(open_file);
4477288a 963 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
964 return open_file;
965 } /* else might as well continue, and look for
966 another, or simply have the caller reopen it
967 again rather than trying to fix this handle */
968 } else /* write only file */
969 break; /* write only files are last so must be done */
970 }
4477288a 971 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
972 return NULL;
973}
630f3f0c 974
6508d904
JL
975struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
976 bool fsuid_only)
6148a742
SF
977{
978 struct cifsFileInfo *open_file;
d3892294 979 struct cifs_sb_info *cifs_sb;
2846d386 980 bool any_available = false;
dd99cd80 981 int rc;
6148a742 982
60808233
SF
983 /* Having a null inode here (because mapping->host was set to zero by
984 the VFS or MM) should not happen but we had reports of on oops (due to
985 it being zero) during stress testcases so we need to check for it */
986
fb8c4b14 987 if (cifs_inode == NULL) {
b6b38f70 988 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
989 dump_stack();
990 return NULL;
991 }
992
d3892294
JL
993 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
994
6508d904
JL
995 /* only filter by fsuid on multiuser mounts */
996 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
997 fsuid_only = false;
998
4477288a 999 spin_lock(&cifs_file_list_lock);
9b22b0b7 1000refind_writable:
6148a742 1001 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1002 if (!any_available && open_file->pid != current->tgid)
1003 continue;
1004 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1005 continue;
2e396b83 1006 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 1007 cifsFileInfo_get(open_file);
9b22b0b7
SF
1008
1009 if (!open_file->invalidHandle) {
1010 /* found a good writable file */
4477288a 1011 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1012 return open_file;
1013 }
8840dee9 1014
4477288a 1015 spin_unlock(&cifs_file_list_lock);
cdff08e7 1016
9b22b0b7 1017 /* Had to unlock since following call can block */
15886177 1018 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1019 if (!rc)
1020 return open_file;
9b22b0b7 1021
cdff08e7 1022 /* if it fails, try another handle if possible */
b6b38f70 1023 cFYI(1, "wp failed on reopen file");
6ab409b5 1024 cifsFileInfo_put(open_file);
8840dee9 1025
cdff08e7
SF
1026 spin_lock(&cifs_file_list_lock);
1027
9b22b0b7
SF
1028 /* else we simply continue to the next entry. Thus
1029 we do not loop on reopen errors. If we
1030 can not reopen the file, for example if we
1031 reconnected to a server with another client
1032 racing to delete or lock the file we would not
1033 make progress if we restarted before the beginning
1034 of the loop here. */
6148a742
SF
1035 }
1036 }
2846d386
JL
1037 /* couldn't find useable FH with same pid, try any available */
1038 if (!any_available) {
1039 any_available = true;
1040 goto refind_writable;
1041 }
4477288a 1042 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1043 return NULL;
1044}
1045
1da177e4
LT
1046static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1047{
1048 struct address_space *mapping = page->mapping;
1049 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1050 char *write_data;
1051 int rc = -EFAULT;
1052 int bytes_written = 0;
1da177e4 1053 struct inode *inode;
6148a742 1054 struct cifsFileInfo *open_file;
1da177e4
LT
1055
1056 if (!mapping || !mapping->host)
1057 return -EFAULT;
1058
1059 inode = page->mapping->host;
1da177e4
LT
1060
1061 offset += (loff_t)from;
1062 write_data = kmap(page);
1063 write_data += from;
1064
1065 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1066 kunmap(page);
1067 return -EIO;
1068 }
1069
1070 /* racing with truncate? */
1071 if (offset > mapping->host->i_size) {
1072 kunmap(page);
1073 return 0; /* don't care */
1074 }
1075
1076 /* check to make sure that we are not extending the file */
1077 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1078 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1079
6508d904 1080 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1081 if (open_file) {
fa2989f4
PS
1082 bytes_written = cifs_write(open_file, open_file->pid,
1083 write_data, to - from, &offset);
6ab409b5 1084 cifsFileInfo_put(open_file);
1da177e4 1085 /* Does mm or vfs already set times? */
6148a742 1086 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1087 if ((bytes_written > 0) && (offset))
6148a742 1088 rc = 0;
bb5a9a04
SF
1089 else if (bytes_written < 0)
1090 rc = bytes_written;
6148a742 1091 } else {
b6b38f70 1092 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1093 rc = -EIO;
1094 }
1095
1096 kunmap(page);
1097 return rc;
1098}
1099
1da177e4 1100static int cifs_writepages(struct address_space *mapping,
37c0eb46 1101 struct writeback_control *wbc)
1da177e4 1102{
c3d17b63
JL
1103 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1104 bool done = false, scanned = false, range_whole = false;
1105 pgoff_t end, index;
1106 struct cifs_writedata *wdata;
37c0eb46 1107 struct page *page;
37c0eb46 1108 int rc = 0;
50c2f753 1109
37c0eb46 1110 /*
c3d17b63 1111 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1112 * one page at a time via cifs_writepage
1113 */
1114 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1115 return generic_writepages(mapping, wbc);
1116
111ebb6e 1117 if (wbc->range_cyclic) {
37c0eb46 1118 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1119 end = -1;
1120 } else {
1121 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1122 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1123 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1124 range_whole = true;
1125 scanned = true;
37c0eb46
SF
1126 }
1127retry:
c3d17b63
JL
1128 while (!done && index <= end) {
1129 unsigned int i, nr_pages, found_pages;
1130 pgoff_t next = 0, tofind;
1131 struct page **pages;
1132
1133 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1134 end - index) + 1;
1135
1136 wdata = cifs_writedata_alloc((unsigned int)tofind);
1137 if (!wdata) {
1138 rc = -ENOMEM;
1139 break;
1140 }
1141
1142 /*
1143 * find_get_pages_tag seems to return a max of 256 on each
1144 * iteration, so we must call it several times in order to
1145 * fill the array or the wsize is effectively limited to
1146 * 256 * PAGE_CACHE_SIZE.
1147 */
1148 found_pages = 0;
1149 pages = wdata->pages;
1150 do {
1151 nr_pages = find_get_pages_tag(mapping, &index,
1152 PAGECACHE_TAG_DIRTY,
1153 tofind, pages);
1154 found_pages += nr_pages;
1155 tofind -= nr_pages;
1156 pages += nr_pages;
1157 } while (nr_pages && tofind && index <= end);
1158
1159 if (found_pages == 0) {
1160 kref_put(&wdata->refcount, cifs_writedata_release);
1161 break;
1162 }
1163
1164 nr_pages = 0;
1165 for (i = 0; i < found_pages; i++) {
1166 page = wdata->pages[i];
37c0eb46
SF
1167 /*
1168 * At this point we hold neither mapping->tree_lock nor
1169 * lock on the page itself: the page may be truncated or
1170 * invalidated (changing page->mapping to NULL), or even
1171 * swizzled back from swapper_space to tmpfs file
1172 * mapping
1173 */
1174
c3d17b63 1175 if (nr_pages == 0)
37c0eb46 1176 lock_page(page);
529ae9aa 1177 else if (!trylock_page(page))
37c0eb46
SF
1178 break;
1179
1180 if (unlikely(page->mapping != mapping)) {
1181 unlock_page(page);
1182 break;
1183 }
1184
111ebb6e 1185 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1186 done = true;
37c0eb46
SF
1187 unlock_page(page);
1188 break;
1189 }
1190
1191 if (next && (page->index != next)) {
1192 /* Not next consecutive page */
1193 unlock_page(page);
1194 break;
1195 }
1196
1197 if (wbc->sync_mode != WB_SYNC_NONE)
1198 wait_on_page_writeback(page);
1199
1200 if (PageWriteback(page) ||
cb876f45 1201 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1202 unlock_page(page);
1203 break;
1204 }
84d2f07e 1205
cb876f45
LT
1206 /*
1207 * This actually clears the dirty bit in the radix tree.
1208 * See cifs_writepage() for more commentary.
1209 */
1210 set_page_writeback(page);
1211
84d2f07e 1212 if (page_offset(page) >= mapping->host->i_size) {
c3d17b63 1213 done = true;
84d2f07e 1214 unlock_page(page);
cb876f45 1215 end_page_writeback(page);
84d2f07e
SF
1216 break;
1217 }
1218
c3d17b63
JL
1219 wdata->pages[i] = page;
1220 next = page->index + 1;
1221 ++nr_pages;
1222 }
37c0eb46 1223
c3d17b63
JL
1224 /* reset index to refind any pages skipped */
1225 if (nr_pages == 0)
1226 index = wdata->pages[0]->index + 1;
84d2f07e 1227
c3d17b63
JL
1228 /* put any pages we aren't going to use */
1229 for (i = nr_pages; i < found_pages; i++) {
1230 page_cache_release(wdata->pages[i]);
1231 wdata->pages[i] = NULL;
1232 }
37c0eb46 1233
c3d17b63
JL
1234 /* nothing to write? */
1235 if (nr_pages == 0) {
1236 kref_put(&wdata->refcount, cifs_writedata_release);
1237 continue;
37c0eb46 1238 }
fbec9ab9 1239
c3d17b63
JL
1240 wdata->sync_mode = wbc->sync_mode;
1241 wdata->nr_pages = nr_pages;
1242 wdata->offset = page_offset(wdata->pages[0]);
941b853d 1243
c3d17b63
JL
1244 do {
1245 if (wdata->cfile != NULL)
1246 cifsFileInfo_put(wdata->cfile);
1247 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1248 false);
1249 if (!wdata->cfile) {
1250 cERROR(1, "No writable handles for inode");
1251 rc = -EBADF;
1252 break;
941b853d 1253 }
c3d17b63
JL
1254 rc = cifs_async_writev(wdata);
1255 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1256
c3d17b63
JL
1257 for (i = 0; i < nr_pages; ++i)
1258 unlock_page(wdata->pages[i]);
f3983c21 1259
c3d17b63
JL
1260 /* send failure -- clean up the mess */
1261 if (rc != 0) {
1262 for (i = 0; i < nr_pages; ++i) {
941b853d 1263 if (rc == -EAGAIN)
c3d17b63
JL
1264 redirty_page_for_writepage(wbc,
1265 wdata->pages[i]);
1266 else
1267 SetPageError(wdata->pages[i]);
1268 end_page_writeback(wdata->pages[i]);
1269 page_cache_release(wdata->pages[i]);
37c0eb46 1270 }
941b853d
JL
1271 if (rc != -EAGAIN)
1272 mapping_set_error(mapping, rc);
c3d17b63
JL
1273 }
1274 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1275
c3d17b63
JL
1276 wbc->nr_to_write -= nr_pages;
1277 if (wbc->nr_to_write <= 0)
1278 done = true;
b066a48c 1279
c3d17b63 1280 index = next;
37c0eb46 1281 }
c3d17b63 1282
37c0eb46
SF
1283 if (!scanned && !done) {
1284 /*
1285 * We hit the last page and there is more work to be done: wrap
1286 * back to the start of the file
1287 */
c3d17b63 1288 scanned = true;
37c0eb46
SF
1289 index = 0;
1290 goto retry;
1291 }
c3d17b63 1292
111ebb6e 1293 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1294 mapping->writeback_index = index;
1295
1da177e4
LT
1296 return rc;
1297}
1da177e4 1298
9ad1506b
PS
1299static int
1300cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1301{
9ad1506b 1302 int rc;
1da177e4
LT
1303 int xid;
1304
1305 xid = GetXid();
1306/* BB add check for wbc flags */
1307 page_cache_get(page);
ad7a2926 1308 if (!PageUptodate(page))
b6b38f70 1309 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1310
1311 /*
1312 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1313 *
1314 * A writepage() implementation always needs to do either this,
1315 * or re-dirty the page with "redirty_page_for_writepage()" in
1316 * the case of a failure.
1317 *
1318 * Just unlocking the page will cause the radix tree tag-bits
1319 * to fail to update with the state of the page correctly.
1320 */
fb8c4b14 1321 set_page_writeback(page);
9ad1506b 1322retry_write:
1da177e4 1323 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1324 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1325 goto retry_write;
1326 else if (rc == -EAGAIN)
1327 redirty_page_for_writepage(wbc, page);
1328 else if (rc != 0)
1329 SetPageError(page);
1330 else
1331 SetPageUptodate(page);
cb876f45
LT
1332 end_page_writeback(page);
1333 page_cache_release(page);
1da177e4
LT
1334 FreeXid(xid);
1335 return rc;
1336}
1337
9ad1506b
PS
1338static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1339{
1340 int rc = cifs_writepage_locked(page, wbc);
1341 unlock_page(page);
1342 return rc;
1343}
1344
d9414774
NP
1345static int cifs_write_end(struct file *file, struct address_space *mapping,
1346 loff_t pos, unsigned len, unsigned copied,
1347 struct page *page, void *fsdata)
1da177e4 1348{
d9414774
NP
1349 int rc;
1350 struct inode *inode = mapping->host;
d4ffff1f
PS
1351 struct cifsFileInfo *cfile = file->private_data;
1352 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1353 __u32 pid;
1354
1355 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1356 pid = cfile->pid;
1357 else
1358 pid = current->tgid;
1da177e4 1359
b6b38f70
JP
1360 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1361 page, pos, copied);
d9414774 1362
a98ee8c1
JL
1363 if (PageChecked(page)) {
1364 if (copied == len)
1365 SetPageUptodate(page);
1366 ClearPageChecked(page);
1367 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1368 SetPageUptodate(page);
ad7a2926 1369
1da177e4 1370 if (!PageUptodate(page)) {
d9414774
NP
1371 char *page_data;
1372 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1373 int xid;
1374
1375 xid = GetXid();
1da177e4
LT
1376 /* this is probably better than directly calling
1377 partialpage_write since in this function the file handle is
1378 known which we might as well leverage */
1379 /* BB check if anything else missing out of ppw
1380 such as updating last write time */
1381 page_data = kmap(page);
d4ffff1f 1382 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 1383 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1384 kunmap(page);
d9414774
NP
1385
1386 FreeXid(xid);
fb8c4b14 1387 } else {
d9414774
NP
1388 rc = copied;
1389 pos += copied;
1da177e4
LT
1390 set_page_dirty(page);
1391 }
1392
d9414774
NP
1393 if (rc > 0) {
1394 spin_lock(&inode->i_lock);
1395 if (pos > inode->i_size)
1396 i_size_write(inode, pos);
1397 spin_unlock(&inode->i_lock);
1398 }
1399
1400 unlock_page(page);
1401 page_cache_release(page);
1402
1da177e4
LT
1403 return rc;
1404}
1405
02c24a82
JB
1406int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1407 int datasync)
1da177e4
LT
1408{
1409 int xid;
1410 int rc = 0;
96daf2b0 1411 struct cifs_tcon *tcon;
c21dfb69 1412 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1413 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 1414 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 1415
02c24a82
JB
1416 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1417 if (rc)
1418 return rc;
1419 mutex_lock(&inode->i_mutex);
1420
1da177e4
LT
1421 xid = GetXid();
1422
b6b38f70 1423 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1424 file->f_path.dentry->d_name.name, datasync);
50c2f753 1425
6feb9891
PS
1426 if (!CIFS_I(inode)->clientCanCacheRead) {
1427 rc = cifs_invalidate_mapping(inode);
1428 if (rc) {
1429 cFYI(1, "rc: %d during invalidate phase", rc);
1430 rc = 0; /* don't care about it in fsync */
1431 }
1432 }
eb4b756b 1433
8be7e6ba
PS
1434 tcon = tlink_tcon(smbfile->tlink);
1435 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1436 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1437
1438 FreeXid(xid);
02c24a82 1439 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
1440 return rc;
1441}
1442
02c24a82 1443int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba
PS
1444{
1445 int xid;
1446 int rc = 0;
96daf2b0 1447 struct cifs_tcon *tcon;
8be7e6ba
PS
1448 struct cifsFileInfo *smbfile = file->private_data;
1449 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
1450 struct inode *inode = file->f_mapping->host;
1451
1452 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1453 if (rc)
1454 return rc;
1455 mutex_lock(&inode->i_mutex);
8be7e6ba
PS
1456
1457 xid = GetXid();
1458
1459 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1460 file->f_path.dentry->d_name.name, datasync);
1461
1462 tcon = tlink_tcon(smbfile->tlink);
1463 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1464 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 1465
1da177e4 1466 FreeXid(xid);
02c24a82 1467 mutex_unlock(&inode->i_mutex);
1da177e4
LT
1468 return rc;
1469}
1470
1da177e4
LT
1471/*
1472 * As file closes, flush all cached write data for this inode checking
1473 * for write behind errors.
1474 */
75e1fcc0 1475int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1476{
fb8c4b14 1477 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1478 int rc = 0;
1479
eb4b756b 1480 if (file->f_mode & FMODE_WRITE)
d3f1322a 1481 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 1482
b6b38f70 1483 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1484
1485 return rc;
1486}
1487
72432ffc
PS
1488static int
1489cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1490{
1491 int rc = 0;
1492 unsigned long i;
1493
1494 for (i = 0; i < num_pages; i++) {
1495 pages[i] = alloc_page(__GFP_HIGHMEM);
1496 if (!pages[i]) {
1497 /*
1498 * save number of pages we have already allocated and
1499 * return with ENOMEM error
1500 */
1501 num_pages = i;
1502 rc = -ENOMEM;
1503 goto error;
1504 }
1505 }
1506
1507 return rc;
1508
1509error:
1510 for (i = 0; i < num_pages; i++)
1511 put_page(pages[i]);
1512 return rc;
1513}
1514
1515static inline
1516size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1517{
1518 size_t num_pages;
1519 size_t clen;
1520
1521 clen = min_t(const size_t, len, wsize);
1522 num_pages = clen / PAGE_CACHE_SIZE;
1523 if (clen % PAGE_CACHE_SIZE)
1524 num_pages++;
1525
1526 if (cur_len)
1527 *cur_len = clen;
1528
1529 return num_pages;
1530}
1531
1532static ssize_t
1533cifs_iovec_write(struct file *file, const struct iovec *iov,
1534 unsigned long nr_segs, loff_t *poffset)
1535{
76429c14
PS
1536 unsigned int written;
1537 unsigned long num_pages, npages, i;
1538 size_t copied, len, cur_len;
1539 ssize_t total_written = 0;
72432ffc
PS
1540 struct kvec *to_send;
1541 struct page **pages;
1542 struct iov_iter it;
1543 struct inode *inode;
1544 struct cifsFileInfo *open_file;
96daf2b0 1545 struct cifs_tcon *pTcon;
72432ffc 1546 struct cifs_sb_info *cifs_sb;
fa2989f4 1547 struct cifs_io_parms io_parms;
72432ffc 1548 int xid, rc;
d4ffff1f 1549 __u32 pid;
72432ffc
PS
1550
1551 len = iov_length(iov, nr_segs);
1552 if (!len)
1553 return 0;
1554
1555 rc = generic_write_checks(file, poffset, &len, 0);
1556 if (rc)
1557 return rc;
1558
1559 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1560 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1561
1562 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1563 if (!pages)
1564 return -ENOMEM;
1565
1566 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1567 if (!to_send) {
1568 kfree(pages);
1569 return -ENOMEM;
1570 }
1571
1572 rc = cifs_write_allocate_pages(pages, num_pages);
1573 if (rc) {
1574 kfree(pages);
1575 kfree(to_send);
1576 return rc;
1577 }
1578
1579 xid = GetXid();
1580 open_file = file->private_data;
d4ffff1f
PS
1581
1582 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1583 pid = open_file->pid;
1584 else
1585 pid = current->tgid;
1586
72432ffc
PS
1587 pTcon = tlink_tcon(open_file->tlink);
1588 inode = file->f_path.dentry->d_inode;
1589
1590 iov_iter_init(&it, iov, nr_segs, len, 0);
1591 npages = num_pages;
1592
1593 do {
1594 size_t save_len = cur_len;
1595 for (i = 0; i < npages; i++) {
1596 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1597 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1598 copied);
1599 cur_len -= copied;
1600 iov_iter_advance(&it, copied);
1601 to_send[i+1].iov_base = kmap(pages[i]);
1602 to_send[i+1].iov_len = copied;
1603 }
1604
1605 cur_len = save_len - cur_len;
1606
1607 do {
1608 if (open_file->invalidHandle) {
1609 rc = cifs_reopen_file(open_file, false);
1610 if (rc != 0)
1611 break;
1612 }
fa2989f4 1613 io_parms.netfid = open_file->netfid;
d4ffff1f 1614 io_parms.pid = pid;
fa2989f4
PS
1615 io_parms.tcon = pTcon;
1616 io_parms.offset = *poffset;
1617 io_parms.length = cur_len;
1618 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1619 npages, 0);
72432ffc
PS
1620 } while (rc == -EAGAIN);
1621
1622 for (i = 0; i < npages; i++)
1623 kunmap(pages[i]);
1624
1625 if (written) {
1626 len -= written;
1627 total_written += written;
1628 cifs_update_eof(CIFS_I(inode), *poffset, written);
1629 *poffset += written;
1630 } else if (rc < 0) {
1631 if (!total_written)
1632 total_written = rc;
1633 break;
1634 }
1635
1636 /* get length and number of kvecs of the next write */
1637 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1638 } while (len > 0);
1639
1640 if (total_written > 0) {
1641 spin_lock(&inode->i_lock);
1642 if (*poffset > inode->i_size)
1643 i_size_write(inode, *poffset);
1644 spin_unlock(&inode->i_lock);
1645 }
1646
1647 cifs_stats_bytes_written(pTcon, total_written);
1648 mark_inode_dirty_sync(inode);
1649
1650 for (i = 0; i < num_pages; i++)
1651 put_page(pages[i]);
1652 kfree(to_send);
1653 kfree(pages);
1654 FreeXid(xid);
1655 return total_written;
1656}
1657
0b81c1c4 1658ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
1659 unsigned long nr_segs, loff_t pos)
1660{
1661 ssize_t written;
1662 struct inode *inode;
1663
1664 inode = iocb->ki_filp->f_path.dentry->d_inode;
1665
1666 /*
1667 * BB - optimize the way when signing is disabled. We can drop this
1668 * extra memory-to-memory copying and use iovec buffers for constructing
1669 * write request.
1670 */
1671
1672 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1673 if (written > 0) {
1674 CIFS_I(inode)->invalid_mapping = true;
1675 iocb->ki_pos = pos;
1676 }
1677
1678 return written;
1679}
1680
1681ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1682 unsigned long nr_segs, loff_t pos)
1683{
1684 struct inode *inode;
1685
1686 inode = iocb->ki_filp->f_path.dentry->d_inode;
1687
1688 if (CIFS_I(inode)->clientCanCacheAll)
1689 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1690
1691 /*
1692 * In strict cache mode we need to write the data to the server exactly
1693 * from the pos to pos+len-1 rather than flush all affected pages
1694 * because it may cause a error with mandatory locks on these pages but
1695 * not on the region from pos to ppos+len-1.
1696 */
1697
1698 return cifs_user_writev(iocb, iov, nr_segs, pos);
1699}
1700
a70307ee
PS
1701static ssize_t
1702cifs_iovec_read(struct file *file, const struct iovec *iov,
1703 unsigned long nr_segs, loff_t *poffset)
1da177e4 1704{
a70307ee
PS
1705 int rc;
1706 int xid;
76429c14
PS
1707 ssize_t total_read;
1708 unsigned int bytes_read = 0;
a70307ee
PS
1709 size_t len, cur_len;
1710 int iov_offset = 0;
1da177e4 1711 struct cifs_sb_info *cifs_sb;
96daf2b0 1712 struct cifs_tcon *pTcon;
1da177e4 1713 struct cifsFileInfo *open_file;
1da177e4 1714 struct smb_com_read_rsp *pSMBr;
d4ffff1f 1715 struct cifs_io_parms io_parms;
a70307ee 1716 char *read_data;
d4ffff1f 1717 __u32 pid;
a70307ee
PS
1718
1719 if (!nr_segs)
1720 return 0;
1721
1722 len = iov_length(iov, nr_segs);
1723 if (!len)
1724 return 0;
1da177e4
LT
1725
1726 xid = GetXid();
e6a00296 1727 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1728
c21dfb69 1729 open_file = file->private_data;
13cfb733 1730 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1731
d4ffff1f
PS
1732 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1733 pid = open_file->pid;
1734 else
1735 pid = current->tgid;
1736
ad7a2926 1737 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1738 cFYI(1, "attempting read on write only file instance");
ad7a2926 1739
a70307ee
PS
1740 for (total_read = 0; total_read < len; total_read += bytes_read) {
1741 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1da177e4 1742 rc = -EAGAIN;
a70307ee
PS
1743 read_data = NULL;
1744
1da177e4 1745 while (rc == -EAGAIN) {
ec637e3f 1746 int buf_type = CIFS_NO_BUFFER;
cdff08e7 1747 if (open_file->invalidHandle) {
15886177 1748 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1749 if (rc != 0)
1750 break;
1751 }
d4ffff1f
PS
1752 io_parms.netfid = open_file->netfid;
1753 io_parms.pid = pid;
1754 io_parms.tcon = pTcon;
1755 io_parms.offset = *poffset;
2cebaa58 1756 io_parms.length = cur_len;
d4ffff1f 1757 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
a70307ee
PS
1758 &read_data, &buf_type);
1759 pSMBr = (struct smb_com_read_rsp *)read_data;
1760 if (read_data) {
1761 char *data_offset = read_data + 4 +
1762 le16_to_cpu(pSMBr->DataOffset);
1763 if (memcpy_toiovecend(iov, data_offset,
1764 iov_offset, bytes_read))
93544cc6 1765 rc = -EFAULT;
fb8c4b14 1766 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 1767 cifs_small_buf_release(read_data);
fb8c4b14 1768 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
1769 cifs_buf_release(read_data);
1770 read_data = NULL;
1771 iov_offset += bytes_read;
1da177e4
LT
1772 }
1773 }
a70307ee 1774
1da177e4
LT
1775 if (rc || (bytes_read == 0)) {
1776 if (total_read) {
1777 break;
1778 } else {
1779 FreeXid(xid);
1780 return rc;
1781 }
1782 } else {
a4544347 1783 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1784 *poffset += bytes_read;
1785 }
1786 }
a70307ee 1787
1da177e4
LT
1788 FreeXid(xid);
1789 return total_read;
1790}
1791
0b81c1c4 1792ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
1793 unsigned long nr_segs, loff_t pos)
1794{
1795 ssize_t read;
1796
1797 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1798 if (read > 0)
1799 iocb->ki_pos = pos;
1800
1801 return read;
1802}
1803
1804ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1805 unsigned long nr_segs, loff_t pos)
1806{
1807 struct inode *inode;
1808
1809 inode = iocb->ki_filp->f_path.dentry->d_inode;
1810
1811 if (CIFS_I(inode)->clientCanCacheRead)
1812 return generic_file_aio_read(iocb, iov, nr_segs, pos);
1813
1814 /*
1815 * In strict cache mode we need to read from the server all the time
1816 * if we don't have level II oplock because the server can delay mtime
1817 * change - so we can't make a decision about inode invalidating.
1818 * And we can also fail with pagereading if there are mandatory locks
1819 * on pages affected by this read but not on the region from pos to
1820 * pos+len-1.
1821 */
1822
1823 return cifs_user_readv(iocb, iov, nr_segs, pos);
1824}
1da177e4
LT
1825
1826static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 1827 loff_t *poffset)
1da177e4
LT
1828{
1829 int rc = -EACCES;
1830 unsigned int bytes_read = 0;
1831 unsigned int total_read;
1832 unsigned int current_read_size;
1833 struct cifs_sb_info *cifs_sb;
96daf2b0 1834 struct cifs_tcon *pTcon;
1da177e4
LT
1835 int xid;
1836 char *current_offset;
1837 struct cifsFileInfo *open_file;
d4ffff1f 1838 struct cifs_io_parms io_parms;
ec637e3f 1839 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 1840 __u32 pid;
1da177e4
LT
1841
1842 xid = GetXid();
e6a00296 1843 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1844
1845 if (file->private_data == NULL) {
0f3bc09e 1846 rc = -EBADF;
1da177e4 1847 FreeXid(xid);
0f3bc09e 1848 return rc;
1da177e4 1849 }
c21dfb69 1850 open_file = file->private_data;
13cfb733 1851 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1852
d4ffff1f
PS
1853 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1854 pid = open_file->pid;
1855 else
1856 pid = current->tgid;
1857
1da177e4 1858 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1859 cFYI(1, "attempting read on write only file instance");
1da177e4 1860
fb8c4b14 1861 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1862 read_size > total_read;
1863 total_read += bytes_read, current_offset += bytes_read) {
1864 current_read_size = min_t(const int, read_size - total_read,
1865 cifs_sb->rsize);
f9f5c817
SF
1866 /* For windows me and 9x we do not want to request more
1867 than it negotiated since it will refuse the read then */
fb8c4b14 1868 if ((pTcon->ses) &&
f9f5c817
SF
1869 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1870 current_read_size = min_t(const int, current_read_size,
1871 pTcon->ses->server->maxBuf - 128);
1872 }
1da177e4
LT
1873 rc = -EAGAIN;
1874 while (rc == -EAGAIN) {
cdff08e7 1875 if (open_file->invalidHandle) {
15886177 1876 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
1877 if (rc != 0)
1878 break;
1879 }
d4ffff1f
PS
1880 io_parms.netfid = open_file->netfid;
1881 io_parms.pid = pid;
1882 io_parms.tcon = pTcon;
1883 io_parms.offset = *poffset;
1884 io_parms.length = current_read_size;
1885 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1886 &current_offset, &buf_type);
1da177e4
LT
1887 }
1888 if (rc || (bytes_read == 0)) {
1889 if (total_read) {
1890 break;
1891 } else {
1892 FreeXid(xid);
1893 return rc;
1894 }
1895 } else {
a4544347 1896 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1897 *poffset += bytes_read;
1898 }
1899 }
1900 FreeXid(xid);
1901 return total_read;
1902}
1903
ca83ce3d
JL
1904/*
1905 * If the page is mmap'ed into a process' page tables, then we need to make
1906 * sure that it doesn't change while being written back.
1907 */
1908static int
1909cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1910{
1911 struct page *page = vmf->page;
1912
1913 lock_page(page);
1914 return VM_FAULT_LOCKED;
1915}
1916
1917static struct vm_operations_struct cifs_file_vm_ops = {
1918 .fault = filemap_fault,
1919 .page_mkwrite = cifs_page_mkwrite,
1920};
1921
7a6a19b1
PS
1922int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
1923{
1924 int rc, xid;
1925 struct inode *inode = file->f_path.dentry->d_inode;
1926
1927 xid = GetXid();
1928
6feb9891
PS
1929 if (!CIFS_I(inode)->clientCanCacheRead) {
1930 rc = cifs_invalidate_mapping(inode);
1931 if (rc)
1932 return rc;
1933 }
7a6a19b1
PS
1934
1935 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
1936 if (rc == 0)
1937 vma->vm_ops = &cifs_file_vm_ops;
7a6a19b1
PS
1938 FreeXid(xid);
1939 return rc;
1940}
1941
1da177e4
LT
1942int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1943{
1da177e4
LT
1944 int rc, xid;
1945
1946 xid = GetXid();
abab095d 1947 rc = cifs_revalidate_file(file);
1da177e4 1948 if (rc) {
b6b38f70 1949 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1950 FreeXid(xid);
1951 return rc;
1952 }
1953 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
1954 if (rc == 0)
1955 vma->vm_ops = &cifs_file_vm_ops;
1da177e4
LT
1956 FreeXid(xid);
1957 return rc;
1958}
1959
1960
fb8c4b14 1961static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1962 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1963{
1964 struct page *page;
1965 char *target;
1966
1967 while (bytes_read > 0) {
1968 if (list_empty(pages))
1969 break;
1970
1971 page = list_entry(pages->prev, struct page, lru);
1972 list_del(&page->lru);
1973
315e995c 1974 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1975 GFP_KERNEL)) {
1976 page_cache_release(page);
b6b38f70 1977 cFYI(1, "Add page cache failed");
3079ca62
SF
1978 data += PAGE_CACHE_SIZE;
1979 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1980 continue;
1981 }
06b43672 1982 page_cache_release(page);
1da177e4 1983
fb8c4b14 1984 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1985
1986 if (PAGE_CACHE_SIZE > bytes_read) {
1987 memcpy(target, data, bytes_read);
1988 /* zero the tail end of this partial page */
fb8c4b14 1989 memset(target + bytes_read, 0,
1da177e4
LT
1990 PAGE_CACHE_SIZE - bytes_read);
1991 bytes_read = 0;
1992 } else {
1993 memcpy(target, data, PAGE_CACHE_SIZE);
1994 bytes_read -= PAGE_CACHE_SIZE;
1995 }
1996 kunmap_atomic(target, KM_USER0);
1997
1998 flush_dcache_page(page);
1999 SetPageUptodate(page);
2000 unlock_page(page);
1da177e4 2001 data += PAGE_CACHE_SIZE;
9dc06558
SJ
2002
2003 /* add page to FS-Cache */
2004 cifs_readpage_to_fscache(mapping->host, page);
1da177e4
LT
2005 }
2006 return;
2007}
2008
2009static int cifs_readpages(struct file *file, struct address_space *mapping,
2010 struct list_head *page_list, unsigned num_pages)
2011{
2012 int rc = -EACCES;
2013 int xid;
2014 loff_t offset;
2015 struct page *page;
2016 struct cifs_sb_info *cifs_sb;
96daf2b0 2017 struct cifs_tcon *pTcon;
2c2130e1 2018 unsigned int bytes_read = 0;
fb8c4b14 2019 unsigned int read_size, i;
1da177e4
LT
2020 char *smb_read_data = NULL;
2021 struct smb_com_read_rsp *pSMBr;
1da177e4 2022 struct cifsFileInfo *open_file;
d4ffff1f 2023 struct cifs_io_parms io_parms;
ec637e3f 2024 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2025 __u32 pid;
1da177e4
LT
2026
2027 xid = GetXid();
2028 if (file->private_data == NULL) {
0f3bc09e 2029 rc = -EBADF;
1da177e4 2030 FreeXid(xid);
0f3bc09e 2031 return rc;
1da177e4 2032 }
c21dfb69 2033 open_file = file->private_data;
e6a00296 2034 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 2035 pTcon = tlink_tcon(open_file->tlink);
bfa0d75a 2036
56698236
SJ
2037 /*
2038 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2039 * immediately if the cookie is negative
2040 */
2041 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2042 &num_pages);
2043 if (rc == 0)
2044 goto read_complete;
2045
f19159dc 2046 cFYI(DBG2, "rpages: num pages %d", num_pages);
d4ffff1f
PS
2047 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2048 pid = open_file->pid;
2049 else
2050 pid = current->tgid;
2051
1da177e4
LT
2052 for (i = 0; i < num_pages; ) {
2053 unsigned contig_pages;
2054 struct page *tmp_page;
2055 unsigned long expected_index;
2056
2057 if (list_empty(page_list))
2058 break;
2059
2060 page = list_entry(page_list->prev, struct page, lru);
2061 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2062
2063 /* count adjacent pages that we will read into */
2064 contig_pages = 0;
fb8c4b14 2065 expected_index =
1da177e4 2066 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2067 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2068 if (tmp_page->index == expected_index) {
2069 contig_pages++;
2070 expected_index++;
2071 } else
fb8c4b14 2072 break;
1da177e4
LT
2073 }
2074 if (contig_pages + i > num_pages)
2075 contig_pages = num_pages - i;
2076
2077 /* for reads over a certain size could initiate async
2078 read ahead */
2079
2080 read_size = contig_pages * PAGE_CACHE_SIZE;
2081 /* Read size needs to be in multiples of one page */
2082 read_size = min_t(const unsigned int, read_size,
2083 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2084 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2085 read_size, contig_pages);
1da177e4
LT
2086 rc = -EAGAIN;
2087 while (rc == -EAGAIN) {
cdff08e7 2088 if (open_file->invalidHandle) {
15886177 2089 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2090 if (rc != 0)
2091 break;
2092 }
d4ffff1f
PS
2093 io_parms.netfid = open_file->netfid;
2094 io_parms.pid = pid;
2095 io_parms.tcon = pTcon;
2096 io_parms.offset = offset;
2097 io_parms.length = read_size;
2098 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2099 &smb_read_data, &buf_type);
a9d02ad4 2100 /* BB more RC checks ? */
fb8c4b14 2101 if (rc == -EAGAIN) {
1da177e4 2102 if (smb_read_data) {
fb8c4b14 2103 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2104 cifs_small_buf_release(smb_read_data);
fb8c4b14 2105 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2106 cifs_buf_release(smb_read_data);
1da177e4
LT
2107 smb_read_data = NULL;
2108 }
2109 }
2110 }
2111 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2112 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2113 break;
2114 } else if (bytes_read > 0) {
6f88cc2e 2115 task_io_account_read(bytes_read);
1da177e4
LT
2116 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2117 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2118 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2119 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2120
2121 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2122 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2123 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2124 i++; /* account for partial page */
2125
fb8c4b14 2126 /* server copy of file can have smaller size
1da177e4 2127 than client */
fb8c4b14
SF
2128 /* BB do we need to verify this common case ?
2129 this case is ok - if we are at server EOF
1da177e4
LT
2130 we will hit it on next read */
2131
05ac9d4b 2132 /* break; */
1da177e4
LT
2133 }
2134 } else {
b6b38f70 2135 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2136 "Cleaning remaining pages from readahead list",
b6b38f70 2137 bytes_read, offset);
fb8c4b14 2138 /* BB turn off caching and do new lookup on
1da177e4 2139 file size at server? */
1da177e4
LT
2140 break;
2141 }
2142 if (smb_read_data) {
fb8c4b14 2143 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2144 cifs_small_buf_release(smb_read_data);
fb8c4b14 2145 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2146 cifs_buf_release(smb_read_data);
1da177e4
LT
2147 smb_read_data = NULL;
2148 }
2149 bytes_read = 0;
2150 }
2151
1da177e4
LT
2152/* need to free smb_read_data buf before exit */
2153 if (smb_read_data) {
fb8c4b14 2154 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2155 cifs_small_buf_release(smb_read_data);
fb8c4b14 2156 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2157 cifs_buf_release(smb_read_data);
1da177e4 2158 smb_read_data = NULL;
fb8c4b14 2159 }
1da177e4 2160
56698236 2161read_complete:
1da177e4
LT
2162 FreeXid(xid);
2163 return rc;
2164}
2165
2166static int cifs_readpage_worker(struct file *file, struct page *page,
2167 loff_t *poffset)
2168{
2169 char *read_data;
2170 int rc;
2171
56698236
SJ
2172 /* Is the page cached? */
2173 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2174 if (rc == 0)
2175 goto read_complete;
2176
1da177e4
LT
2177 page_cache_get(page);
2178 read_data = kmap(page);
2179 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2180
1da177e4 2181 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2182
1da177e4
LT
2183 if (rc < 0)
2184 goto io_error;
2185 else
b6b38f70 2186 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2187
e6a00296
JJS
2188 file->f_path.dentry->d_inode->i_atime =
2189 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2190
1da177e4
LT
2191 if (PAGE_CACHE_SIZE > rc)
2192 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2193
2194 flush_dcache_page(page);
2195 SetPageUptodate(page);
9dc06558
SJ
2196
2197 /* send this page to the cache */
2198 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2199
1da177e4 2200 rc = 0;
fb8c4b14 2201
1da177e4 2202io_error:
fb8c4b14 2203 kunmap(page);
1da177e4 2204 page_cache_release(page);
56698236
SJ
2205
2206read_complete:
1da177e4
LT
2207 return rc;
2208}
2209
2210static int cifs_readpage(struct file *file, struct page *page)
2211{
2212 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2213 int rc = -EACCES;
2214 int xid;
2215
2216 xid = GetXid();
2217
2218 if (file->private_data == NULL) {
0f3bc09e 2219 rc = -EBADF;
1da177e4 2220 FreeXid(xid);
0f3bc09e 2221 return rc;
1da177e4
LT
2222 }
2223
b6b38f70
JP
2224 cFYI(1, "readpage %p at offset %d 0x%x\n",
2225 page, (int)offset, (int)offset);
1da177e4
LT
2226
2227 rc = cifs_readpage_worker(file, page, &offset);
2228
2229 unlock_page(page);
2230
2231 FreeXid(xid);
2232 return rc;
2233}
2234
a403a0a3
SF
2235static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2236{
2237 struct cifsFileInfo *open_file;
2238
4477288a 2239 spin_lock(&cifs_file_list_lock);
a403a0a3 2240 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2241 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2242 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2243 return 1;
2244 }
2245 }
4477288a 2246 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2247 return 0;
2248}
2249
1da177e4
LT
2250/* We do not want to update the file size from server for inodes
2251 open for write - to avoid races with writepage extending
2252 the file - in the future we could consider allowing
fb8c4b14 2253 refreshing the inode only on increases in the file size
1da177e4
LT
2254 but this is tricky to do without racing with writebehind
2255 page caching in the current Linux kernel design */
4b18f2a9 2256bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2257{
a403a0a3 2258 if (!cifsInode)
4b18f2a9 2259 return true;
50c2f753 2260
a403a0a3
SF
2261 if (is_inode_writable(cifsInode)) {
2262 /* This inode is open for write at least once */
c32a0b68
SF
2263 struct cifs_sb_info *cifs_sb;
2264
c32a0b68 2265 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2266 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2267 /* since no page cache to corrupt on directio
c32a0b68 2268 we can change size safely */
4b18f2a9 2269 return true;
c32a0b68
SF
2270 }
2271
fb8c4b14 2272 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2273 return true;
7ba52631 2274
4b18f2a9 2275 return false;
23e7dd7d 2276 } else
4b18f2a9 2277 return true;
1da177e4
LT
2278}
2279
d9414774
NP
2280static int cifs_write_begin(struct file *file, struct address_space *mapping,
2281 loff_t pos, unsigned len, unsigned flags,
2282 struct page **pagep, void **fsdata)
1da177e4 2283{
d9414774
NP
2284 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2285 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2286 loff_t page_start = pos & PAGE_MASK;
2287 loff_t i_size;
2288 struct page *page;
2289 int rc = 0;
d9414774 2290
b6b38f70 2291 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2292
54566b2c 2293 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2294 if (!page) {
2295 rc = -ENOMEM;
2296 goto out;
2297 }
8a236264 2298
a98ee8c1
JL
2299 if (PageUptodate(page))
2300 goto out;
8a236264 2301
a98ee8c1
JL
2302 /*
2303 * If we write a full page it will be up to date, no need to read from
2304 * the server. If the write is short, we'll end up doing a sync write
2305 * instead.
2306 */
2307 if (len == PAGE_CACHE_SIZE)
2308 goto out;
8a236264 2309
a98ee8c1
JL
2310 /*
2311 * optimize away the read when we have an oplock, and we're not
2312 * expecting to use any of the data we'd be reading in. That
2313 * is, when the page lies beyond the EOF, or straddles the EOF
2314 * and the write will cover all of the existing data.
2315 */
2316 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2317 i_size = i_size_read(mapping->host);
2318 if (page_start >= i_size ||
2319 (offset == 0 && (pos + len) >= i_size)) {
2320 zero_user_segments(page, 0, offset,
2321 offset + len,
2322 PAGE_CACHE_SIZE);
2323 /*
2324 * PageChecked means that the parts of the page
2325 * to which we're not writing are considered up
2326 * to date. Once the data is copied to the
2327 * page, it can be set uptodate.
2328 */
2329 SetPageChecked(page);
2330 goto out;
2331 }
2332 }
d9414774 2333
a98ee8c1
JL
2334 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2335 /*
2336 * might as well read a page, it is fast enough. If we get
2337 * an error, we don't need to return it. cifs_write_end will
2338 * do a sync write instead since PG_uptodate isn't set.
2339 */
2340 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2341 } else {
2342 /* we could try using another file handle if there is one -
2343 but how would we lock it to prevent close of that handle
2344 racing with this read? In any case
d9414774 2345 this will be written out by write_end so is fine */
1da177e4 2346 }
a98ee8c1
JL
2347out:
2348 *pagep = page;
2349 return rc;
1da177e4
LT
2350}
2351
85f2d6b4
SJ
2352static int cifs_release_page(struct page *page, gfp_t gfp)
2353{
2354 if (PagePrivate(page))
2355 return 0;
2356
2357 return cifs_fscache_release_page(page, gfp);
2358}
2359
2360static void cifs_invalidate_page(struct page *page, unsigned long offset)
2361{
2362 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2363
2364 if (offset == 0)
2365 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2366}
2367
9ad1506b
PS
2368static int cifs_launder_page(struct page *page)
2369{
2370 int rc = 0;
2371 loff_t range_start = page_offset(page);
2372 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2373 struct writeback_control wbc = {
2374 .sync_mode = WB_SYNC_ALL,
2375 .nr_to_write = 0,
2376 .range_start = range_start,
2377 .range_end = range_end,
2378 };
2379
2380 cFYI(1, "Launder page: %p", page);
2381
2382 if (clear_page_dirty_for_io(page))
2383 rc = cifs_writepage_locked(page, &wbc);
2384
2385 cifs_fscache_invalidate_page(page, page->mapping->host);
2386 return rc;
2387}
2388
9b646972 2389void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2390{
2391 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2392 oplock_break);
a5e18bc3 2393 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 2394 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 2395 int rc = 0;
3bc303c2
JL
2396
2397 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2398 if (cinode->clientCanCacheRead)
8737c930 2399 break_lease(inode, O_RDONLY);
d54ff732 2400 else
8737c930 2401 break_lease(inode, O_WRONLY);
3bc303c2
JL
2402 rc = filemap_fdatawrite(inode->i_mapping);
2403 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
2404 rc = filemap_fdatawait(inode->i_mapping);
2405 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
2406 invalidate_remote_inode(inode);
2407 }
b6b38f70 2408 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2409 }
2410
2411 /*
2412 * releasing stale oplock after recent reconnect of smb session using
2413 * a now incorrect file handle is not a data integrity issue but do
2414 * not bother sending an oplock release if session to server still is
2415 * disconnected since oplock already released by the server
2416 */
cdff08e7 2417 if (!cfile->oplock_break_cancelled) {
13cfb733 2418 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
12fed00d
PS
2419 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
2420 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 2421 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2422 }
3bc303c2
JL
2423}
2424
f5e54d6e 2425const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2426 .readpage = cifs_readpage,
2427 .readpages = cifs_readpages,
2428 .writepage = cifs_writepage,
37c0eb46 2429 .writepages = cifs_writepages,
d9414774
NP
2430 .write_begin = cifs_write_begin,
2431 .write_end = cifs_write_end,
1da177e4 2432 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2433 .releasepage = cifs_release_page,
2434 .invalidatepage = cifs_invalidate_page,
9ad1506b 2435 .launder_page = cifs_launder_page,
1da177e4 2436};
273d81d6
DK
2437
2438/*
2439 * cifs_readpages requires the server to support a buffer large enough to
2440 * contain the header plus one complete page of data. Otherwise, we need
2441 * to leave cifs_readpages out of the address space operations.
2442 */
f5e54d6e 2443const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2444 .readpage = cifs_readpage,
2445 .writepage = cifs_writepage,
2446 .writepages = cifs_writepages,
d9414774
NP
2447 .write_begin = cifs_write_begin,
2448 .write_end = cifs_write_end,
273d81d6 2449 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2450 .releasepage = cifs_release_page,
2451 .invalidatepage = cifs_invalidate_page,
9ad1506b 2452 .launder_page = cifs_launder_page,
273d81d6 2453};