cifs: writing past end of struct in cifs_convert_address()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
96daf2b0 172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
eeb910a6
PS
173 __u16 *pnetfid, int xid)
174{
175 int rc;
176 int desiredAccess;
177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6
PS
179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
3d3ea8e6
SP
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
eeb910a6
PS
218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
3d3ea8e6 220 desiredAccess, create_options, pnetfid, poplock, buf,
eeb910a6
PS
221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
15ecb436
JL
244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
5f6dbc9e 257 pCifsFile->count = 1;
15ecb436
JL
258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
15ecb436
JL
264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
15ecb436
JL
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267
4477288a 268 spin_lock(&cifs_file_list_lock);
15ecb436
JL
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273 else
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
4477288a 275 spin_unlock(&cifs_file_list_lock);
15ecb436 276
c6723628 277 cifs_set_oplock_level(pCifsInode, oplock);
85160e03 278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
15ecb436
JL
279
280 file->private_data = pCifsFile;
281 return pCifsFile;
282}
283
85160e03
PS
284static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
285
cdff08e7
SF
286/*
287 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
cdff08e7 290 */
b33879aa
JL
291void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
292{
e66673e3 293 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
297 struct cifsLockInfo *li, *tmp;
298
299 spin_lock(&cifs_file_list_lock);
5f6dbc9e 300 if (--cifs_file->count > 0) {
cdff08e7
SF
301 spin_unlock(&cifs_file_list_lock);
302 return;
303 }
304
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
308
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
4f8ba8a0
PS
312
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
318
c6723628 319 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
320 }
321 spin_unlock(&cifs_file_list_lock);
322
ad635942
JL
323 cancel_work_sync(&cifs_file->oplock_break);
324
cdff08e7
SF
325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
326 int xid, rc;
327
328 xid = GetXid();
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
330 FreeXid(xid);
331 }
332
333 /* Delete any outstanding lock records. We'll lose them when the file
334 * is closed anyway.
335 */
d59dad2b
PS
336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
cdff08e7 340 list_del(&li->llist);
85160e03 341 cifs_del_lock_waiters(li);
cdff08e7 342 kfree(li);
b33879aa 343 }
d59dad2b 344 mutex_unlock(&cifsi->lock_mutex);
cdff08e7
SF
345
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
348 kfree(cifs_file);
b33879aa
JL
349}
350
1da177e4
LT
351int cifs_open(struct inode *inode, struct file *file)
352{
353 int rc = -EACCES;
590a3fe0
JL
354 int xid;
355 __u32 oplock;
1da177e4 356 struct cifs_sb_info *cifs_sb;
96daf2b0 357 struct cifs_tcon *tcon;
7ffec372 358 struct tcon_link *tlink;
6ca9f3ba 359 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 360 char *full_path = NULL;
7e12eddb 361 bool posix_open_ok = false;
1da177e4 362 __u16 netfid;
1da177e4
LT
363
364 xid = GetXid();
365
366 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
367 tlink = cifs_sb_tlink(cifs_sb);
368 if (IS_ERR(tlink)) {
369 FreeXid(xid);
370 return PTR_ERR(tlink);
371 }
372 tcon = tlink_tcon(tlink);
1da177e4 373
e6a00296 374 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 375 if (full_path == NULL) {
0f3bc09e 376 rc = -ENOMEM;
232341ba 377 goto out;
1da177e4
LT
378 }
379
b6b38f70
JP
380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
276a74a4 382
10b9b98e 383 if (tcon->ses->server->oplocks)
276a74a4
SF
384 oplock = REQ_OPLOCK;
385 else
386 oplock = 0;
387
64cc2c63
SF
388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 392 /* can not refresh inode info since size could be stale */
2422f676 393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 394 cifs_sb->mnt_file_mode /* ignored */,
608712fe 395 file->f_flags, &oplock, &netfid, xid);
276a74a4 396 if (rc == 0) {
b6b38f70 397 cFYI(1, "posix open succeeded");
7e12eddb 398 posix_open_ok = true;
64cc2c63
SF
399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
b6b38f70 401 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
b6b38f70 406 tcon->ses->serverNOS);
64cc2c63 407 tcon->broken_posix_open = true;
276a74a4
SF
408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
410 goto out;
64cc2c63
SF
411 /* else fallthrough to retry open the old way on network i/o
412 or DFS errors */
276a74a4
SF
413 }
414
7e12eddb
PS
415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
418 if (rc)
419 goto out;
420 }
47c78b7f 421
abfe1eed 422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
6ca9f3ba 423 if (pCifsFile == NULL) {
7e12eddb 424 CIFSSMBClose(xid, tcon, netfid);
1da177e4
LT
425 rc = -ENOMEM;
426 goto out;
427 }
1da177e4 428
9451a9a5
SJ
429 cifs_fscache_set_inode_cookie(inode, file);
430
7e12eddb 431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1da177e4
LT
432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
7e12eddb
PS
434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
436 .uid = NO_CHANGE_64,
437 .gid = NO_CHANGE_64,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
441 .device = 0,
442 };
d44a9fe2
JL
443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
444 pCifsFile->pid);
1da177e4
LT
445 }
446
447out:
1da177e4
LT
448 kfree(full_path);
449 FreeXid(xid);
7ffec372 450 cifs_put_tlink(tlink);
1da177e4
LT
451 return rc;
452}
453
0418726b 454/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
455/* to server was lost */
456static int cifs_relock_file(struct cifsFileInfo *cifsFile)
457{
458 int rc = 0;
459
460/* BB list all locks open on this file and relock */
461
462 return rc;
463}
464
15886177 465static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
1da177e4
LT
466{
467 int rc = -EACCES;
590a3fe0
JL
468 int xid;
469 __u32 oplock;
1da177e4 470 struct cifs_sb_info *cifs_sb;
96daf2b0 471 struct cifs_tcon *tcon;
1da177e4 472 struct cifsInodeInfo *pCifsInode;
fb8c4b14 473 struct inode *inode;
1da177e4
LT
474 char *full_path = NULL;
475 int desiredAccess;
476 int disposition = FILE_OPEN;
3d3ea8e6 477 int create_options = CREATE_NOT_DIR;
1da177e4
LT
478 __u16 netfid;
479
1da177e4 480 xid = GetXid();
f0a71eb8 481 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 482 if (!pCifsFile->invalidHandle) {
f0a71eb8 483 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 484 rc = 0;
1da177e4 485 FreeXid(xid);
0f3bc09e 486 return rc;
1da177e4
LT
487 }
488
15886177 489 inode = pCifsFile->dentry->d_inode;
1da177e4 490 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 491 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 492
1da177e4
LT
493/* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
15886177 497 full_path = build_path_from_dentry(pCifsFile->dentry);
1da177e4 498 if (full_path == NULL) {
3a9f462f 499 rc = -ENOMEM;
f0a71eb8 500 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 501 FreeXid(xid);
3a9f462f 502 return rc;
1da177e4
LT
503 }
504
b6b38f70 505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
15886177 506 inode, pCifsFile->f_flags, full_path);
1da177e4 507
10b9b98e 508 if (tcon->ses->server->oplocks)
1da177e4
LT
509 oplock = REQ_OPLOCK;
510 else
4b18f2a9 511 oplock = 0;
1da177e4 512
7fc8f4e9
SF
513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
516
517 /*
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
520 */
15886177
JL
521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 523
2422f676 524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
7fc8f4e9 527 if (rc == 0) {
b6b38f70 528 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
529 goto reopen_success;
530 }
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
533 }
534
15886177 535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
7fc8f4e9 536
3d3ea8e6
SP
537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
539
1da177e4 540 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
1da177e4
LT
543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
545
7fc8f4e9 546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
3d3ea8e6 547 create_options, &netfid, &oplock, NULL,
fb8c4b14 548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 549 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 550 if (rc) {
f0a71eb8 551 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
15886177
JL
554 goto reopen_error_exit;
555 }
556
7fc8f4e9 557reopen_success:
15886177
JL
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
562
563 if (can_flush) {
564 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 565 mapping_set_error(inode->i_mapping, rc);
15886177 566
15886177
JL
567 if (tcon->unix_ext)
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
570 else
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
573 xid, NULL);
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
579 info */
e66673e3 580
c6723628 581 cifs_set_oplock_level(pCifsInode, oplock);
e66673e3 582
15886177
JL
583 cifs_relock_file(pCifsFile);
584
585reopen_error_exit:
1da177e4
LT
586 kfree(full_path);
587 FreeXid(xid);
588 return rc;
589}
590
591int cifs_close(struct inode *inode, struct file *file)
592{
77970693
JL
593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
596 }
7ee1af76 597
cdff08e7
SF
598 /* return code from the ->release op is always ignored */
599 return 0;
1da177e4
LT
600}
601
602int cifs_closedir(struct inode *inode, struct file *file)
603{
604 int rc = 0;
605 int xid;
c21dfb69 606 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
607 char *ptmp;
608
b6b38f70 609 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
610
611 xid = GetXid();
612
613 if (pCFileStruct) {
96daf2b0 614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 615
b6b38f70 616 cFYI(1, "Freeing private data in close dir");
4477288a 617 spin_lock(&cifs_file_list_lock);
4b18f2a9
SF
618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
4477288a 621 spin_unlock(&cifs_file_list_lock);
1da177e4 622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
623 cFYI(1, "Closing uncompleted readdir with rc %d",
624 rc);
1da177e4
LT
625 /* not much we can do if it fails anyway, ignore rc */
626 rc = 0;
ddb4cbfc 627 } else
4477288a 628 spin_unlock(&cifs_file_list_lock);
1da177e4
LT
629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
630 if (ptmp) {
b6b38f70 631 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 633 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
634 cifs_small_buf_release(ptmp);
635 else
636 cifs_buf_release(ptmp);
1da177e4 637 }
13cfb733 638 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
639 kfree(file->private_data);
640 file->private_data = NULL;
641 }
642 /* BB can we lock the filestruct while this is going on? */
643 FreeXid(xid);
644 return rc;
645}
646
85160e03 647static struct cifsLockInfo *
a88b4707 648cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
7ee1af76 649{
a88b4707 650 struct cifsLockInfo *lock =
fb8c4b14 651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
652 if (!lock)
653 return lock;
654 lock->offset = offset;
655 lock->length = length;
656 lock->type = type;
657 lock->netfid = netfid;
658 lock->pid = current->tgid;
659 INIT_LIST_HEAD(&lock->blist);
660 init_waitqueue_head(&lock->block_q);
661 return lock;
85160e03
PS
662}
663
664static void
665cifs_del_lock_waiters(struct cifsLockInfo *lock)
666{
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
671 }
672}
673
b5efb978
PS
674/*
675 * Copied from fs/locks.c with small changes.
676 * Remove waiter from blocker's block list.
677 * When blocker ends up pointing to itself then the list is empty.
678 */
679static void
680cifs_locks_delete_block(struct file_lock *waiter)
681{
682 lock_flocks();
683 list_del_init(&waiter->fl_block);
684 list_del_init(&waiter->fl_link);
685 waiter->fl_next = NULL;
686 unlock_flocks();
687}
688
85160e03 689static bool
161ebf9f 690__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
85160e03
PS
691 __u64 length, __u8 type, __u16 netfid,
692 struct cifsLockInfo **conf_lock)
693{
694 struct cifsLockInfo *li, *tmp;
695
696 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
697 if (offset + length <= li->offset ||
698 offset >= li->offset + li->length)
699 continue;
700 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
701 ((netfid == li->netfid && current->tgid == li->pid) ||
702 type == li->type))
703 continue;
704 else {
705 *conf_lock = li;
706 return true;
707 }
708 }
709 return false;
710}
711
161ebf9f
PS
712static bool
713cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
714 struct cifsLockInfo **conf_lock)
715{
716 return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
717 lock->type, lock->netfid, conf_lock);
718}
719
9a5101c8
PS
720/*
721 * Check if there is another lock that prevents us to set the lock (mandatory
722 * style). If such a lock exists, update the flock structure with its
723 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
724 * or leave it the same if we can't. Returns 0 if we don't need to request to
725 * the server or 1 otherwise.
726 */
85160e03
PS
727static int
728cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
729 __u8 type, __u16 netfid, struct file_lock *flock)
730{
731 int rc = 0;
732 struct cifsLockInfo *conf_lock;
733 bool exist;
734
735 mutex_lock(&cinode->lock_mutex);
736
161ebf9f
PS
737 exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
738 &conf_lock);
85160e03
PS
739 if (exist) {
740 flock->fl_start = conf_lock->offset;
741 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
742 flock->fl_pid = conf_lock->pid;
743 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
744 flock->fl_type = F_RDLCK;
745 else
746 flock->fl_type = F_WRLCK;
747 } else if (!cinode->can_cache_brlcks)
748 rc = 1;
749 else
750 flock->fl_type = F_UNLCK;
751
752 mutex_unlock(&cinode->lock_mutex);
753 return rc;
754}
755
161ebf9f
PS
756static void
757cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
85160e03 758{
d59dad2b 759 mutex_lock(&cinode->lock_mutex);
161ebf9f 760 list_add_tail(&lock->llist, &cinode->llist);
d59dad2b 761 mutex_unlock(&cinode->lock_mutex);
7ee1af76
JA
762}
763
9a5101c8
PS
764/*
765 * Set the byte-range lock (mandatory style). Returns:
766 * 1) 0, if we set the lock and don't need to request to the server;
767 * 2) 1, if no locks prevent us but we need to request to the server;
768 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
769 */
85160e03 770static int
161ebf9f
PS
771cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
772 bool wait)
85160e03 773{
161ebf9f 774 struct cifsLockInfo *conf_lock;
85160e03
PS
775 bool exist;
776 int rc = 0;
777
85160e03
PS
778try_again:
779 exist = false;
780 mutex_lock(&cinode->lock_mutex);
781
161ebf9f 782 exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
85160e03
PS
783 if (!exist && cinode->can_cache_brlcks) {
784 list_add_tail(&lock->llist, &cinode->llist);
785 mutex_unlock(&cinode->lock_mutex);
786 return rc;
787 }
788
789 if (!exist)
790 rc = 1;
791 else if (!wait)
792 rc = -EACCES;
793 else {
794 list_add_tail(&lock->blist, &conf_lock->blist);
795 mutex_unlock(&cinode->lock_mutex);
796 rc = wait_event_interruptible(lock->block_q,
797 (lock->blist.prev == &lock->blist) &&
798 (lock->blist.next == &lock->blist));
799 if (!rc)
800 goto try_again;
a88b4707
PS
801 mutex_lock(&cinode->lock_mutex);
802 list_del_init(&lock->blist);
85160e03
PS
803 }
804
85160e03
PS
805 mutex_unlock(&cinode->lock_mutex);
806 return rc;
807}
808
9a5101c8
PS
809/*
810 * Check if there is another lock that prevents us to set the lock (posix
811 * style). If such a lock exists, update the flock structure with its
812 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
813 * or leave it the same if we can't. Returns 0 if we don't need to request to
814 * the server or 1 otherwise.
815 */
85160e03 816static int
4f6bcec9
PS
817cifs_posix_lock_test(struct file *file, struct file_lock *flock)
818{
819 int rc = 0;
820 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
821 unsigned char saved_type = flock->fl_type;
822
50792760
PS
823 if ((flock->fl_flags & FL_POSIX) == 0)
824 return 1;
825
4f6bcec9
PS
826 mutex_lock(&cinode->lock_mutex);
827 posix_test_lock(file, flock);
828
829 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
830 flock->fl_type = saved_type;
831 rc = 1;
832 }
833
834 mutex_unlock(&cinode->lock_mutex);
835 return rc;
836}
837
b5efb978
PS
838/* Called with locked lock_mutex, return with unlocked. */
839static int
840cifs_posix_lock_file_wait_locked(struct file *file, struct file_lock *flock)
841{
842 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
843 int rc;
844
845 while (true) {
846 rc = posix_lock_file(file, flock, NULL);
847 mutex_unlock(&cinode->lock_mutex);
848 if (rc != FILE_LOCK_DEFERRED)
849 break;
850 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
851 if (!rc) {
852 mutex_lock(&cinode->lock_mutex);
853 continue;
854 }
855 cifs_locks_delete_block(flock);
856 break;
857 }
858 return rc;
859}
860
861static int
862cifs_posix_lock_file_wait(struct file *file, struct file_lock *flock)
863{
864 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
865
866 mutex_lock(&cinode->lock_mutex);
867 /* lock_mutex will be released by the function below */
868 return cifs_posix_lock_file_wait_locked(file, flock);
869}
870
9a5101c8
PS
871/*
872 * Set the byte-range lock (posix style). Returns:
873 * 1) 0, if we set the lock and don't need to request to the server;
874 * 2) 1, if we need to request to the server;
875 * 3) <0, if the error occurs while setting the lock.
876 */
4f6bcec9
PS
877static int
878cifs_posix_lock_set(struct file *file, struct file_lock *flock)
879{
880 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
50792760
PS
881 int rc = 1;
882
883 if ((flock->fl_flags & FL_POSIX) == 0)
884 return rc;
4f6bcec9
PS
885
886 mutex_lock(&cinode->lock_mutex);
887 if (!cinode->can_cache_brlcks) {
888 mutex_unlock(&cinode->lock_mutex);
50792760 889 return rc;
4f6bcec9 890 }
b5efb978
PS
891
892 /* lock_mutex will be released by the function below */
893 return cifs_posix_lock_file_wait_locked(file, flock);
4f6bcec9
PS
894}
895
896static int
897cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03
PS
898{
899 int xid, rc = 0, stored_rc;
900 struct cifsLockInfo *li, *tmp;
901 struct cifs_tcon *tcon;
902 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
32b9aaf1
PS
903 unsigned int num, max_num;
904 LOCKING_ANDX_RANGE *buf, *cur;
905 int types[] = {LOCKING_ANDX_LARGE_FILES,
906 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
907 int i;
85160e03
PS
908
909 xid = GetXid();
910 tcon = tlink_tcon(cfile->tlink);
911
912 mutex_lock(&cinode->lock_mutex);
913 if (!cinode->can_cache_brlcks) {
914 mutex_unlock(&cinode->lock_mutex);
915 FreeXid(xid);
916 return rc;
917 }
918
32b9aaf1
PS
919 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
920 sizeof(LOCKING_ANDX_RANGE);
921 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
922 if (!buf) {
923 mutex_unlock(&cinode->lock_mutex);
924 FreeXid(xid);
925 return rc;
926 }
927
928 for (i = 0; i < 2; i++) {
929 cur = buf;
930 num = 0;
931 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
932 if (li->type != types[i])
933 continue;
934 cur->Pid = cpu_to_le16(li->pid);
935 cur->LengthLow = cpu_to_le32((u32)li->length);
936 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
937 cur->OffsetLow = cpu_to_le32((u32)li->offset);
938 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
939 if (++num == max_num) {
940 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
941 li->type, 0, num, buf);
942 if (stored_rc)
943 rc = stored_rc;
944 cur = buf;
945 num = 0;
946 } else
947 cur++;
948 }
949
950 if (num) {
951 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
952 types[i], 0, num, buf);
953 if (stored_rc)
954 rc = stored_rc;
955 }
85160e03
PS
956 }
957
958 cinode->can_cache_brlcks = false;
959 mutex_unlock(&cinode->lock_mutex);
960
32b9aaf1 961 kfree(buf);
85160e03
PS
962 FreeXid(xid);
963 return rc;
964}
965
4f6bcec9
PS
966/* copied from fs/locks.c with a name change */
967#define cifs_for_each_lock(inode, lockp) \
968 for (lockp = &inode->i_flock; *lockp != NULL; \
969 lockp = &(*lockp)->fl_next)
970
d5751469
PS
971struct lock_to_push {
972 struct list_head llist;
973 __u64 offset;
974 __u64 length;
975 __u32 pid;
976 __u16 netfid;
977 __u8 type;
978};
979
4f6bcec9
PS
980static int
981cifs_push_posix_locks(struct cifsFileInfo *cfile)
982{
983 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
984 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
985 struct file_lock *flock, **before;
d5751469 986 unsigned int count = 0, i = 0;
4f6bcec9 987 int rc = 0, xid, type;
d5751469
PS
988 struct list_head locks_to_send, *el;
989 struct lock_to_push *lck, *tmp;
4f6bcec9 990 __u64 length;
4f6bcec9
PS
991
992 xid = GetXid();
993
994 mutex_lock(&cinode->lock_mutex);
995 if (!cinode->can_cache_brlcks) {
996 mutex_unlock(&cinode->lock_mutex);
997 FreeXid(xid);
998 return rc;
999 }
1000
d5751469
PS
1001 lock_flocks();
1002 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1003 if ((*before)->fl_flags & FL_POSIX)
1004 count++;
1005 }
1006 unlock_flocks();
1007
4f6bcec9
PS
1008 INIT_LIST_HEAD(&locks_to_send);
1009
d5751469 1010 /*
ce85852b
PS
1011 * Allocating count locks is enough because no FL_POSIX locks can be
1012 * added to the list while we are holding cinode->lock_mutex that
1013 * protects locking operations of this inode.
d5751469
PS
1014 */
1015 for (; i < count; i++) {
1016 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1017 if (!lck) {
1018 rc = -ENOMEM;
1019 goto err_out;
1020 }
1021 list_add_tail(&lck->llist, &locks_to_send);
1022 }
1023
d5751469 1024 el = locks_to_send.next;
4f6bcec9
PS
1025 lock_flocks();
1026 cifs_for_each_lock(cfile->dentry->d_inode, before) {
ce85852b
PS
1027 flock = *before;
1028 if ((flock->fl_flags & FL_POSIX) == 0)
1029 continue;
d5751469 1030 if (el == &locks_to_send) {
ce85852b
PS
1031 /*
1032 * The list ended. We don't have enough allocated
1033 * structures - something is really wrong.
1034 */
d5751469
PS
1035 cERROR(1, "Can't push all brlocks!");
1036 break;
1037 }
4f6bcec9
PS
1038 length = 1 + flock->fl_end - flock->fl_start;
1039 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1040 type = CIFS_RDLCK;
1041 else
1042 type = CIFS_WRLCK;
d5751469 1043 lck = list_entry(el, struct lock_to_push, llist);
4f6bcec9 1044 lck->pid = flock->fl_pid;
d5751469
PS
1045 lck->netfid = cfile->netfid;
1046 lck->length = length;
1047 lck->type = type;
1048 lck->offset = flock->fl_start;
d5751469 1049 el = el->next;
4f6bcec9 1050 }
4f6bcec9
PS
1051 unlock_flocks();
1052
1053 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1054 struct file_lock tmp_lock;
1055 int stored_rc;
1056
1057 tmp_lock.fl_start = lck->offset;
1058 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1059 0, lck->length, &tmp_lock,
1060 lck->type, 0);
1061 if (stored_rc)
1062 rc = stored_rc;
1063 list_del(&lck->llist);
1064 kfree(lck);
1065 }
1066
d5751469 1067out:
4f6bcec9
PS
1068 cinode->can_cache_brlcks = false;
1069 mutex_unlock(&cinode->lock_mutex);
1070
1071 FreeXid(xid);
1072 return rc;
d5751469
PS
1073err_out:
1074 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1075 list_del(&lck->llist);
1076 kfree(lck);
1077 }
1078 goto out;
4f6bcec9
PS
1079}
1080
1081static int
1082cifs_push_locks(struct cifsFileInfo *cfile)
1083{
1084 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1085 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1086
1087 if ((tcon->ses->capabilities & CAP_UNIX) &&
1088 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1089 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1090 return cifs_push_posix_locks(cfile);
1091
1092 return cifs_push_mandatory_locks(cfile);
1093}
1094
03776f45
PS
1095static void
1096cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
1097 bool *wait_flag)
1da177e4 1098{
03776f45 1099 if (flock->fl_flags & FL_POSIX)
b6b38f70 1100 cFYI(1, "Posix");
03776f45 1101 if (flock->fl_flags & FL_FLOCK)
b6b38f70 1102 cFYI(1, "Flock");
03776f45 1103 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 1104 cFYI(1, "Blocking lock");
03776f45 1105 *wait_flag = true;
1da177e4 1106 }
03776f45 1107 if (flock->fl_flags & FL_ACCESS)
b6b38f70 1108 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
1109 "not implemented yet");
1110 if (flock->fl_flags & FL_LEASE)
b6b38f70 1111 cFYI(1, "Lease on file - not implemented yet");
03776f45 1112 if (flock->fl_flags &
1da177e4 1113 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 1114 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 1115
03776f45
PS
1116 *type = LOCKING_ANDX_LARGE_FILES;
1117 if (flock->fl_type == F_WRLCK) {
b6b38f70 1118 cFYI(1, "F_WRLCK ");
03776f45
PS
1119 *lock = 1;
1120 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 1121 cFYI(1, "F_UNLCK");
03776f45
PS
1122 *unlock = 1;
1123 /* Check if unlock includes more than one lock range */
1124 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 1125 cFYI(1, "F_RDLCK");
03776f45
PS
1126 *type |= LOCKING_ANDX_SHARED_LOCK;
1127 *lock = 1;
1128 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 1129 cFYI(1, "F_EXLCK");
03776f45
PS
1130 *lock = 1;
1131 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 1132 cFYI(1, "F_SHLCK");
03776f45
PS
1133 *type |= LOCKING_ANDX_SHARED_LOCK;
1134 *lock = 1;
1da177e4 1135 } else
b6b38f70 1136 cFYI(1, "Unknown type of lock");
03776f45 1137}
1da177e4 1138
03776f45 1139static int
4f6bcec9 1140cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
03776f45
PS
1141 bool wait_flag, bool posix_lck, int xid)
1142{
1143 int rc = 0;
1144 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1145 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1146 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
85160e03 1147 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
03776f45 1148 __u16 netfid = cfile->netfid;
f05337c6 1149
03776f45
PS
1150 if (posix_lck) {
1151 int posix_lock_type;
4f6bcec9
PS
1152
1153 rc = cifs_posix_lock_test(file, flock);
1154 if (!rc)
1155 return rc;
1156
03776f45
PS
1157 if (type & LOCKING_ANDX_SHARED_LOCK)
1158 posix_lock_type = CIFS_RDLCK;
1159 else
1160 posix_lock_type = CIFS_WRLCK;
4f6bcec9
PS
1161 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1162 1 /* get */, length, flock,
1163 posix_lock_type, wait_flag);
03776f45
PS
1164 return rc;
1165 }
1da177e4 1166
85160e03
PS
1167 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1168 flock);
1169 if (!rc)
1170 return rc;
1171
03776f45
PS
1172 /* BB we could chain these into one lock request BB */
1173 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1174 flock->fl_start, 0, 1, type, 0, 0);
1175 if (rc == 0) {
1176 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1177 length, flock->fl_start, 1, 0,
1178 type, 0, 0);
1179 flock->fl_type = F_UNLCK;
1180 if (rc != 0)
1181 cERROR(1, "Error unlocking previously locked "
1182 "range %d during test of lock", rc);
a88b4707 1183 return 0;
1da177e4 1184 }
7ee1af76 1185
03776f45
PS
1186 if (type & LOCKING_ANDX_SHARED_LOCK) {
1187 flock->fl_type = F_WRLCK;
a88b4707 1188 return 0;
7ee1af76
JA
1189 }
1190
03776f45
PS
1191 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1192 flock->fl_start, 0, 1,
1193 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1194 if (rc == 0) {
1195 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1196 length, flock->fl_start, 1, 0,
1197 type | LOCKING_ANDX_SHARED_LOCK,
1198 0, 0);
1199 flock->fl_type = F_RDLCK;
1200 if (rc != 0)
1201 cERROR(1, "Error unlocking previously locked "
1202 "range %d during test of lock", rc);
1203 } else
1204 flock->fl_type = F_WRLCK;
1205
a88b4707 1206 return 0;
03776f45
PS
1207}
1208
9ee305b7
PS
1209static void
1210cifs_move_llist(struct list_head *source, struct list_head *dest)
1211{
1212 struct list_head *li, *tmp;
1213 list_for_each_safe(li, tmp, source)
1214 list_move(li, dest);
1215}
1216
1217static void
1218cifs_free_llist(struct list_head *llist)
1219{
1220 struct cifsLockInfo *li, *tmp;
1221 list_for_each_entry_safe(li, tmp, llist, llist) {
1222 cifs_del_lock_waiters(li);
1223 list_del(&li->llist);
1224 kfree(li);
1225 }
1226}
1227
1228static int
1229cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1230{
1231 int rc = 0, stored_rc;
1232 int types[] = {LOCKING_ANDX_LARGE_FILES,
1233 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1234 unsigned int i;
1235 unsigned int max_num, num;
1236 LOCKING_ANDX_RANGE *buf, *cur;
1237 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1238 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1239 struct cifsLockInfo *li, *tmp;
1240 __u64 length = 1 + flock->fl_end - flock->fl_start;
1241 struct list_head tmp_llist;
1242
1243 INIT_LIST_HEAD(&tmp_llist);
1244
1245 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1246 sizeof(LOCKING_ANDX_RANGE);
1247 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1248 if (!buf)
1249 return -ENOMEM;
1250
1251 mutex_lock(&cinode->lock_mutex);
1252 for (i = 0; i < 2; i++) {
1253 cur = buf;
1254 num = 0;
1255 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1256 if (flock->fl_start > li->offset ||
1257 (flock->fl_start + length) <
1258 (li->offset + li->length))
1259 continue;
1260 if (current->tgid != li->pid)
1261 continue;
1262 if (cfile->netfid != li->netfid)
1263 continue;
1264 if (types[i] != li->type)
1265 continue;
1266 if (!cinode->can_cache_brlcks) {
1267 cur->Pid = cpu_to_le16(li->pid);
1268 cur->LengthLow = cpu_to_le32((u32)li->length);
1269 cur->LengthHigh =
1270 cpu_to_le32((u32)(li->length>>32));
1271 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1272 cur->OffsetHigh =
1273 cpu_to_le32((u32)(li->offset>>32));
1274 /*
1275 * We need to save a lock here to let us add
1276 * it again to the inode list if the unlock
1277 * range request fails on the server.
1278 */
1279 list_move(&li->llist, &tmp_llist);
1280 if (++num == max_num) {
1281 stored_rc = cifs_lockv(xid, tcon,
1282 cfile->netfid,
1283 li->type, num,
1284 0, buf);
1285 if (stored_rc) {
1286 /*
1287 * We failed on the unlock range
1288 * request - add all locks from
1289 * the tmp list to the head of
1290 * the inode list.
1291 */
1292 cifs_move_llist(&tmp_llist,
1293 &cinode->llist);
1294 rc = stored_rc;
1295 } else
1296 /*
1297 * The unlock range request
1298 * succeed - free the tmp list.
1299 */
1300 cifs_free_llist(&tmp_llist);
1301 cur = buf;
1302 num = 0;
1303 } else
1304 cur++;
1305 } else {
1306 /*
1307 * We can cache brlock requests - simply remove
1308 * a lock from the inode list.
1309 */
1310 list_del(&li->llist);
1311 cifs_del_lock_waiters(li);
1312 kfree(li);
1313 }
1314 }
1315 if (num) {
1316 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1317 types[i], num, 0, buf);
1318 if (stored_rc) {
1319 cifs_move_llist(&tmp_llist, &cinode->llist);
1320 rc = stored_rc;
1321 } else
1322 cifs_free_llist(&tmp_llist);
1323 }
1324 }
1325
1326 mutex_unlock(&cinode->lock_mutex);
1327 kfree(buf);
1328 return rc;
1329}
1330
03776f45
PS
1331static int
1332cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1333 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1334{
1335 int rc = 0;
1336 __u64 length = 1 + flock->fl_end - flock->fl_start;
1337 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1338 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
d59dad2b 1339 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
03776f45
PS
1340 __u16 netfid = cfile->netfid;
1341
1342 if (posix_lck) {
08547b03 1343 int posix_lock_type;
4f6bcec9
PS
1344
1345 rc = cifs_posix_lock_set(file, flock);
1346 if (!rc || rc < 0)
1347 return rc;
1348
03776f45 1349 if (type & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
1350 posix_lock_type = CIFS_RDLCK;
1351 else
1352 posix_lock_type = CIFS_WRLCK;
50c2f753 1353
03776f45 1354 if (unlock == 1)
beb84dc8 1355 posix_lock_type = CIFS_UNLCK;
7ee1af76 1356
4f6bcec9
PS
1357 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1358 0 /* set */, length, flock,
1359 posix_lock_type, wait_flag);
03776f45
PS
1360 goto out;
1361 }
7ee1af76 1362
03776f45 1363 if (lock) {
161ebf9f
PS
1364 struct cifsLockInfo *lock;
1365
a88b4707 1366 lock = cifs_lock_init(flock->fl_start, length, type, netfid);
161ebf9f
PS
1367 if (!lock)
1368 return -ENOMEM;
1369
1370 rc = cifs_lock_add_if(cinode, lock, wait_flag);
85160e03 1371 if (rc < 0)
161ebf9f
PS
1372 kfree(lock);
1373 if (rc <= 0)
85160e03
PS
1374 goto out;
1375
03776f45 1376 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
85160e03 1377 flock->fl_start, 0, 1, type, wait_flag, 0);
161ebf9f
PS
1378 if (rc) {
1379 kfree(lock);
1380 goto out;
03776f45 1381 }
161ebf9f
PS
1382
1383 cifs_lock_add(cinode, lock);
9ee305b7
PS
1384 } else if (unlock)
1385 rc = cifs_unlock_range(cfile, flock, xid);
03776f45 1386
03776f45
PS
1387out:
1388 if (flock->fl_flags & FL_POSIX)
b5efb978 1389 cifs_posix_lock_file_wait(file, flock);
03776f45
PS
1390 return rc;
1391}
1392
1393int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1394{
1395 int rc, xid;
1396 int lock = 0, unlock = 0;
1397 bool wait_flag = false;
1398 bool posix_lck = false;
1399 struct cifs_sb_info *cifs_sb;
1400 struct cifs_tcon *tcon;
1401 struct cifsInodeInfo *cinode;
1402 struct cifsFileInfo *cfile;
1403 __u16 netfid;
1404 __u8 type;
1405
1406 rc = -EACCES;
1407 xid = GetXid();
1408
1409 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1410 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1411 flock->fl_start, flock->fl_end);
1412
1413 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1414
1415 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1416 cfile = (struct cifsFileInfo *)file->private_data;
1417 tcon = tlink_tcon(cfile->tlink);
1418 netfid = cfile->netfid;
1419 cinode = CIFS_I(file->f_path.dentry->d_inode);
1420
1421 if ((tcon->ses->capabilities & CAP_UNIX) &&
1422 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1423 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1424 posix_lck = true;
1425 /*
1426 * BB add code here to normalize offset and length to account for
1427 * negative length which we can not accept over the wire.
1428 */
1429 if (IS_GETLK(cmd)) {
4f6bcec9 1430 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
03776f45
PS
1431 FreeXid(xid);
1432 return rc;
1433 }
1434
1435 if (!lock && !unlock) {
1436 /*
1437 * if no lock or unlock then nothing to do since we do not
1438 * know what it is
1439 */
1440 FreeXid(xid);
1441 return -EOPNOTSUPP;
7ee1af76
JA
1442 }
1443
03776f45
PS
1444 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1445 xid);
1da177e4
LT
1446 FreeXid(xid);
1447 return rc;
1448}
1449
597b027f
JL
1450/*
1451 * update the file size (if needed) after a write. Should be called with
1452 * the inode->i_lock held
1453 */
72432ffc 1454void
fbec9ab9
JL
1455cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1456 unsigned int bytes_written)
1457{
1458 loff_t end_of_write = offset + bytes_written;
1459
1460 if (end_of_write > cifsi->server_eof)
1461 cifsi->server_eof = end_of_write;
1462}
1463
fa2989f4 1464static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
7da4b49a
JL
1465 const char *write_data, size_t write_size,
1466 loff_t *poffset)
1da177e4
LT
1467{
1468 int rc = 0;
1469 unsigned int bytes_written = 0;
1470 unsigned int total_written;
1471 struct cifs_sb_info *cifs_sb;
96daf2b0 1472 struct cifs_tcon *pTcon;
7749981e 1473 int xid;
7da4b49a
JL
1474 struct dentry *dentry = open_file->dentry;
1475 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1476 struct cifs_io_parms io_parms;
1da177e4 1477
7da4b49a 1478 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1479
b6b38f70 1480 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
7da4b49a 1481 *poffset, dentry->d_name.name);
1da177e4 1482
13cfb733 1483 pTcon = tlink_tcon(open_file->tlink);
50c2f753 1484
1da177e4 1485 xid = GetXid();
1da177e4 1486
1da177e4
LT
1487 for (total_written = 0; write_size > total_written;
1488 total_written += bytes_written) {
1489 rc = -EAGAIN;
1490 while (rc == -EAGAIN) {
ca83ce3d
JL
1491 struct kvec iov[2];
1492 unsigned int len;
1493
1da177e4 1494 if (open_file->invalidHandle) {
1da177e4
LT
1495 /* we could deadlock if we called
1496 filemap_fdatawait from here so tell
fb8c4b14 1497 reopen_file not to flush data to
1da177e4 1498 server now */
15886177 1499 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1500 if (rc != 0)
1501 break;
1502 }
ca83ce3d
JL
1503
1504 len = min((size_t)cifs_sb->wsize,
1505 write_size - total_written);
1506 /* iov[0] is reserved for smb header */
1507 iov[1].iov_base = (char *)write_data + total_written;
1508 iov[1].iov_len = len;
fa2989f4
PS
1509 io_parms.netfid = open_file->netfid;
1510 io_parms.pid = pid;
1511 io_parms.tcon = pTcon;
1512 io_parms.offset = *poffset;
1513 io_parms.length = len;
1514 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1515 1, 0);
1da177e4
LT
1516 }
1517 if (rc || (bytes_written == 0)) {
1518 if (total_written)
1519 break;
1520 else {
1521 FreeXid(xid);
1522 return rc;
1523 }
fbec9ab9 1524 } else {
597b027f 1525 spin_lock(&dentry->d_inode->i_lock);
fbec9ab9 1526 cifs_update_eof(cifsi, *poffset, bytes_written);
597b027f 1527 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1528 *poffset += bytes_written;
fbec9ab9 1529 }
1da177e4
LT
1530 }
1531
a4544347 1532 cifs_stats_bytes_written(pTcon, total_written);
1da177e4 1533
7da4b49a
JL
1534 if (total_written > 0) {
1535 spin_lock(&dentry->d_inode->i_lock);
1536 if (*poffset > dentry->d_inode->i_size)
1537 i_size_write(dentry->d_inode, *poffset);
1538 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1539 }
7da4b49a 1540 mark_inode_dirty_sync(dentry->d_inode);
1da177e4
LT
1541 FreeXid(xid);
1542 return total_written;
1543}
1544
6508d904
JL
1545struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1546 bool fsuid_only)
630f3f0c
SF
1547{
1548 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1549 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1550
1551 /* only filter by fsuid on multiuser mounts */
1552 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1553 fsuid_only = false;
630f3f0c 1554
4477288a 1555 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1556 /* we could simply get the first_list_entry since write-only entries
1557 are always at the end of the list but since the first entry might
1558 have a close pending, we go through the whole list */
1559 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1560 if (fsuid_only && open_file->uid != current_fsuid())
1561 continue;
2e396b83 1562 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1563 if (!open_file->invalidHandle) {
1564 /* found a good file */
1565 /* lock it so it will not be closed on us */
6ab409b5 1566 cifsFileInfo_get(open_file);
4477288a 1567 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1568 return open_file;
1569 } /* else might as well continue, and look for
1570 another, or simply have the caller reopen it
1571 again rather than trying to fix this handle */
1572 } else /* write only file */
1573 break; /* write only files are last so must be done */
1574 }
4477288a 1575 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1576 return NULL;
1577}
630f3f0c 1578
6508d904
JL
1579struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1580 bool fsuid_only)
6148a742
SF
1581{
1582 struct cifsFileInfo *open_file;
d3892294 1583 struct cifs_sb_info *cifs_sb;
2846d386 1584 bool any_available = false;
dd99cd80 1585 int rc;
6148a742 1586
60808233
SF
1587 /* Having a null inode here (because mapping->host was set to zero by
1588 the VFS or MM) should not happen but we had reports of on oops (due to
1589 it being zero) during stress testcases so we need to check for it */
1590
fb8c4b14 1591 if (cifs_inode == NULL) {
b6b38f70 1592 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1593 dump_stack();
1594 return NULL;
1595 }
1596
d3892294
JL
1597 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1598
6508d904
JL
1599 /* only filter by fsuid on multiuser mounts */
1600 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1601 fsuid_only = false;
1602
4477288a 1603 spin_lock(&cifs_file_list_lock);
9b22b0b7 1604refind_writable:
6148a742 1605 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1606 if (!any_available && open_file->pid != current->tgid)
1607 continue;
1608 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1609 continue;
2e396b83 1610 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
6ab409b5 1611 cifsFileInfo_get(open_file);
9b22b0b7
SF
1612
1613 if (!open_file->invalidHandle) {
1614 /* found a good writable file */
4477288a 1615 spin_unlock(&cifs_file_list_lock);
9b22b0b7
SF
1616 return open_file;
1617 }
8840dee9 1618
4477288a 1619 spin_unlock(&cifs_file_list_lock);
cdff08e7 1620
9b22b0b7 1621 /* Had to unlock since following call can block */
15886177 1622 rc = cifs_reopen_file(open_file, false);
cdff08e7
SF
1623 if (!rc)
1624 return open_file;
9b22b0b7 1625
cdff08e7 1626 /* if it fails, try another handle if possible */
b6b38f70 1627 cFYI(1, "wp failed on reopen file");
6ab409b5 1628 cifsFileInfo_put(open_file);
8840dee9 1629
cdff08e7
SF
1630 spin_lock(&cifs_file_list_lock);
1631
9b22b0b7
SF
1632 /* else we simply continue to the next entry. Thus
1633 we do not loop on reopen errors. If we
1634 can not reopen the file, for example if we
1635 reconnected to a server with another client
1636 racing to delete or lock the file we would not
1637 make progress if we restarted before the beginning
1638 of the loop here. */
6148a742
SF
1639 }
1640 }
2846d386
JL
1641 /* couldn't find useable FH with same pid, try any available */
1642 if (!any_available) {
1643 any_available = true;
1644 goto refind_writable;
1645 }
4477288a 1646 spin_unlock(&cifs_file_list_lock);
6148a742
SF
1647 return NULL;
1648}
1649
1da177e4
LT
1650static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1651{
1652 struct address_space *mapping = page->mapping;
1653 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1654 char *write_data;
1655 int rc = -EFAULT;
1656 int bytes_written = 0;
1da177e4 1657 struct inode *inode;
6148a742 1658 struct cifsFileInfo *open_file;
1da177e4
LT
1659
1660 if (!mapping || !mapping->host)
1661 return -EFAULT;
1662
1663 inode = page->mapping->host;
1da177e4
LT
1664
1665 offset += (loff_t)from;
1666 write_data = kmap(page);
1667 write_data += from;
1668
1669 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1670 kunmap(page);
1671 return -EIO;
1672 }
1673
1674 /* racing with truncate? */
1675 if (offset > mapping->host->i_size) {
1676 kunmap(page);
1677 return 0; /* don't care */
1678 }
1679
1680 /* check to make sure that we are not extending the file */
1681 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1682 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1683
6508d904 1684 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1685 if (open_file) {
fa2989f4
PS
1686 bytes_written = cifs_write(open_file, open_file->pid,
1687 write_data, to - from, &offset);
6ab409b5 1688 cifsFileInfo_put(open_file);
1da177e4 1689 /* Does mm or vfs already set times? */
6148a742 1690 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1691 if ((bytes_written > 0) && (offset))
6148a742 1692 rc = 0;
bb5a9a04
SF
1693 else if (bytes_written < 0)
1694 rc = bytes_written;
6148a742 1695 } else {
b6b38f70 1696 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1697 rc = -EIO;
1698 }
1699
1700 kunmap(page);
1701 return rc;
1702}
1703
e9492871
JL
1704/*
1705 * Marshal up the iov array, reserving the first one for the header. Also,
1706 * set wdata->bytes.
1707 */
1708static void
1709cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1710{
1711 int i;
1712 struct inode *inode = wdata->cfile->dentry->d_inode;
1713 loff_t size = i_size_read(inode);
1714
1715 /* marshal up the pages into iov array */
1716 wdata->bytes = 0;
1717 for (i = 0; i < wdata->nr_pages; i++) {
1718 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1719 (loff_t)PAGE_CACHE_SIZE);
1720 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1721 wdata->bytes += iov[i + 1].iov_len;
1722 }
1723}
1724
1da177e4 1725static int cifs_writepages(struct address_space *mapping,
37c0eb46 1726 struct writeback_control *wbc)
1da177e4 1727{
c3d17b63
JL
1728 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1729 bool done = false, scanned = false, range_whole = false;
1730 pgoff_t end, index;
1731 struct cifs_writedata *wdata;
37c0eb46 1732 struct page *page;
37c0eb46 1733 int rc = 0;
50c2f753 1734
37c0eb46 1735 /*
c3d17b63 1736 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1737 * one page at a time via cifs_writepage
1738 */
1739 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1740 return generic_writepages(mapping, wbc);
1741
111ebb6e 1742 if (wbc->range_cyclic) {
37c0eb46 1743 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1744 end = -1;
1745 } else {
1746 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1747 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1748 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1749 range_whole = true;
1750 scanned = true;
37c0eb46
SF
1751 }
1752retry:
c3d17b63
JL
1753 while (!done && index <= end) {
1754 unsigned int i, nr_pages, found_pages;
1755 pgoff_t next = 0, tofind;
1756 struct page **pages;
1757
1758 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1759 end - index) + 1;
1760
c2e87640
JL
1761 wdata = cifs_writedata_alloc((unsigned int)tofind,
1762 cifs_writev_complete);
c3d17b63
JL
1763 if (!wdata) {
1764 rc = -ENOMEM;
1765 break;
1766 }
1767
1768 /*
1769 * find_get_pages_tag seems to return a max of 256 on each
1770 * iteration, so we must call it several times in order to
1771 * fill the array or the wsize is effectively limited to
1772 * 256 * PAGE_CACHE_SIZE.
1773 */
1774 found_pages = 0;
1775 pages = wdata->pages;
1776 do {
1777 nr_pages = find_get_pages_tag(mapping, &index,
1778 PAGECACHE_TAG_DIRTY,
1779 tofind, pages);
1780 found_pages += nr_pages;
1781 tofind -= nr_pages;
1782 pages += nr_pages;
1783 } while (nr_pages && tofind && index <= end);
1784
1785 if (found_pages == 0) {
1786 kref_put(&wdata->refcount, cifs_writedata_release);
1787 break;
1788 }
1789
1790 nr_pages = 0;
1791 for (i = 0; i < found_pages; i++) {
1792 page = wdata->pages[i];
37c0eb46
SF
1793 /*
1794 * At this point we hold neither mapping->tree_lock nor
1795 * lock on the page itself: the page may be truncated or
1796 * invalidated (changing page->mapping to NULL), or even
1797 * swizzled back from swapper_space to tmpfs file
1798 * mapping
1799 */
1800
c3d17b63 1801 if (nr_pages == 0)
37c0eb46 1802 lock_page(page);
529ae9aa 1803 else if (!trylock_page(page))
37c0eb46
SF
1804 break;
1805
1806 if (unlikely(page->mapping != mapping)) {
1807 unlock_page(page);
1808 break;
1809 }
1810
111ebb6e 1811 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1812 done = true;
37c0eb46
SF
1813 unlock_page(page);
1814 break;
1815 }
1816
1817 if (next && (page->index != next)) {
1818 /* Not next consecutive page */
1819 unlock_page(page);
1820 break;
1821 }
1822
1823 if (wbc->sync_mode != WB_SYNC_NONE)
1824 wait_on_page_writeback(page);
1825
1826 if (PageWriteback(page) ||
cb876f45 1827 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1828 unlock_page(page);
1829 break;
1830 }
84d2f07e 1831
cb876f45
LT
1832 /*
1833 * This actually clears the dirty bit in the radix tree.
1834 * See cifs_writepage() for more commentary.
1835 */
1836 set_page_writeback(page);
1837
84d2f07e 1838 if (page_offset(page) >= mapping->host->i_size) {
c3d17b63 1839 done = true;
84d2f07e 1840 unlock_page(page);
cb876f45 1841 end_page_writeback(page);
84d2f07e
SF
1842 break;
1843 }
1844
c3d17b63
JL
1845 wdata->pages[i] = page;
1846 next = page->index + 1;
1847 ++nr_pages;
1848 }
37c0eb46 1849
c3d17b63
JL
1850 /* reset index to refind any pages skipped */
1851 if (nr_pages == 0)
1852 index = wdata->pages[0]->index + 1;
84d2f07e 1853
c3d17b63
JL
1854 /* put any pages we aren't going to use */
1855 for (i = nr_pages; i < found_pages; i++) {
1856 page_cache_release(wdata->pages[i]);
1857 wdata->pages[i] = NULL;
1858 }
37c0eb46 1859
c3d17b63
JL
1860 /* nothing to write? */
1861 if (nr_pages == 0) {
1862 kref_put(&wdata->refcount, cifs_writedata_release);
1863 continue;
37c0eb46 1864 }
fbec9ab9 1865
c3d17b63
JL
1866 wdata->sync_mode = wbc->sync_mode;
1867 wdata->nr_pages = nr_pages;
1868 wdata->offset = page_offset(wdata->pages[0]);
e9492871 1869 wdata->marshal_iov = cifs_writepages_marshal_iov;
941b853d 1870
c3d17b63
JL
1871 do {
1872 if (wdata->cfile != NULL)
1873 cifsFileInfo_put(wdata->cfile);
1874 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1875 false);
1876 if (!wdata->cfile) {
1877 cERROR(1, "No writable handles for inode");
1878 rc = -EBADF;
1879 break;
941b853d 1880 }
fe5f5d2e 1881 wdata->pid = wdata->cfile->pid;
c3d17b63
JL
1882 rc = cifs_async_writev(wdata);
1883 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1884
c3d17b63
JL
1885 for (i = 0; i < nr_pages; ++i)
1886 unlock_page(wdata->pages[i]);
f3983c21 1887
c3d17b63
JL
1888 /* send failure -- clean up the mess */
1889 if (rc != 0) {
1890 for (i = 0; i < nr_pages; ++i) {
941b853d 1891 if (rc == -EAGAIN)
c3d17b63
JL
1892 redirty_page_for_writepage(wbc,
1893 wdata->pages[i]);
1894 else
1895 SetPageError(wdata->pages[i]);
1896 end_page_writeback(wdata->pages[i]);
1897 page_cache_release(wdata->pages[i]);
37c0eb46 1898 }
941b853d
JL
1899 if (rc != -EAGAIN)
1900 mapping_set_error(mapping, rc);
c3d17b63
JL
1901 }
1902 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1903
c3d17b63
JL
1904 wbc->nr_to_write -= nr_pages;
1905 if (wbc->nr_to_write <= 0)
1906 done = true;
b066a48c 1907
c3d17b63 1908 index = next;
37c0eb46 1909 }
c3d17b63 1910
37c0eb46
SF
1911 if (!scanned && !done) {
1912 /*
1913 * We hit the last page and there is more work to be done: wrap
1914 * back to the start of the file
1915 */
c3d17b63 1916 scanned = true;
37c0eb46
SF
1917 index = 0;
1918 goto retry;
1919 }
c3d17b63 1920
111ebb6e 1921 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1922 mapping->writeback_index = index;
1923
1da177e4
LT
1924 return rc;
1925}
1da177e4 1926
9ad1506b
PS
1927static int
1928cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1929{
9ad1506b 1930 int rc;
1da177e4
LT
1931 int xid;
1932
1933 xid = GetXid();
1934/* BB add check for wbc flags */
1935 page_cache_get(page);
ad7a2926 1936 if (!PageUptodate(page))
b6b38f70 1937 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1938
1939 /*
1940 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1941 *
1942 * A writepage() implementation always needs to do either this,
1943 * or re-dirty the page with "redirty_page_for_writepage()" in
1944 * the case of a failure.
1945 *
1946 * Just unlocking the page will cause the radix tree tag-bits
1947 * to fail to update with the state of the page correctly.
1948 */
fb8c4b14 1949 set_page_writeback(page);
9ad1506b 1950retry_write:
1da177e4 1951 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1952 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1953 goto retry_write;
1954 else if (rc == -EAGAIN)
1955 redirty_page_for_writepage(wbc, page);
1956 else if (rc != 0)
1957 SetPageError(page);
1958 else
1959 SetPageUptodate(page);
cb876f45
LT
1960 end_page_writeback(page);
1961 page_cache_release(page);
1da177e4
LT
1962 FreeXid(xid);
1963 return rc;
1964}
1965
9ad1506b
PS
1966static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1967{
1968 int rc = cifs_writepage_locked(page, wbc);
1969 unlock_page(page);
1970 return rc;
1971}
1972
d9414774
NP
1973static int cifs_write_end(struct file *file, struct address_space *mapping,
1974 loff_t pos, unsigned len, unsigned copied,
1975 struct page *page, void *fsdata)
1da177e4 1976{
d9414774
NP
1977 int rc;
1978 struct inode *inode = mapping->host;
d4ffff1f
PS
1979 struct cifsFileInfo *cfile = file->private_data;
1980 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1981 __u32 pid;
1982
1983 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1984 pid = cfile->pid;
1985 else
1986 pid = current->tgid;
1da177e4 1987
b6b38f70
JP
1988 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1989 page, pos, copied);
d9414774 1990
a98ee8c1
JL
1991 if (PageChecked(page)) {
1992 if (copied == len)
1993 SetPageUptodate(page);
1994 ClearPageChecked(page);
1995 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1996 SetPageUptodate(page);
ad7a2926 1997
1da177e4 1998 if (!PageUptodate(page)) {
d9414774
NP
1999 char *page_data;
2000 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
2001 int xid;
2002
2003 xid = GetXid();
1da177e4
LT
2004 /* this is probably better than directly calling
2005 partialpage_write since in this function the file handle is
2006 known which we might as well leverage */
2007 /* BB check if anything else missing out of ppw
2008 such as updating last write time */
2009 page_data = kmap(page);
d4ffff1f 2010 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2011 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2012 kunmap(page);
d9414774
NP
2013
2014 FreeXid(xid);
fb8c4b14 2015 } else {
d9414774
NP
2016 rc = copied;
2017 pos += copied;
1da177e4
LT
2018 set_page_dirty(page);
2019 }
2020
d9414774
NP
2021 if (rc > 0) {
2022 spin_lock(&inode->i_lock);
2023 if (pos > inode->i_size)
2024 i_size_write(inode, pos);
2025 spin_unlock(&inode->i_lock);
2026 }
2027
2028 unlock_page(page);
2029 page_cache_release(page);
2030
1da177e4
LT
2031 return rc;
2032}
2033
02c24a82
JB
2034int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2035 int datasync)
1da177e4
LT
2036{
2037 int xid;
2038 int rc = 0;
96daf2b0 2039 struct cifs_tcon *tcon;
c21dfb69 2040 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 2041 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 2042 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2043
02c24a82
JB
2044 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2045 if (rc)
2046 return rc;
2047 mutex_lock(&inode->i_mutex);
2048
1da177e4
LT
2049 xid = GetXid();
2050
b6b38f70 2051 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 2052 file->f_path.dentry->d_name.name, datasync);
50c2f753 2053
6feb9891
PS
2054 if (!CIFS_I(inode)->clientCanCacheRead) {
2055 rc = cifs_invalidate_mapping(inode);
2056 if (rc) {
2057 cFYI(1, "rc: %d during invalidate phase", rc);
2058 rc = 0; /* don't care about it in fsync */
2059 }
2060 }
eb4b756b 2061
8be7e6ba
PS
2062 tcon = tlink_tcon(smbfile->tlink);
2063 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2064 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2065
2066 FreeXid(xid);
02c24a82 2067 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
2068 return rc;
2069}
2070
02c24a82 2071int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba
PS
2072{
2073 int xid;
2074 int rc = 0;
96daf2b0 2075 struct cifs_tcon *tcon;
8be7e6ba
PS
2076 struct cifsFileInfo *smbfile = file->private_data;
2077 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
2078 struct inode *inode = file->f_mapping->host;
2079
2080 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2081 if (rc)
2082 return rc;
2083 mutex_lock(&inode->i_mutex);
8be7e6ba
PS
2084
2085 xid = GetXid();
2086
2087 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2088 file->f_path.dentry->d_name.name, datasync);
2089
2090 tcon = tlink_tcon(smbfile->tlink);
2091 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2092 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
b298f223 2093
1da177e4 2094 FreeXid(xid);
02c24a82 2095 mutex_unlock(&inode->i_mutex);
1da177e4
LT
2096 return rc;
2097}
2098
1da177e4
LT
2099/*
2100 * As file closes, flush all cached write data for this inode checking
2101 * for write behind errors.
2102 */
75e1fcc0 2103int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2104{
fb8c4b14 2105 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
2106 int rc = 0;
2107
eb4b756b 2108 if (file->f_mode & FMODE_WRITE)
d3f1322a 2109 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2110
b6b38f70 2111 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
2112
2113 return rc;
2114}
2115
72432ffc
PS
2116static int
2117cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2118{
2119 int rc = 0;
2120 unsigned long i;
2121
2122 for (i = 0; i < num_pages; i++) {
e94f7ba1 2123 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2124 if (!pages[i]) {
2125 /*
2126 * save number of pages we have already allocated and
2127 * return with ENOMEM error
2128 */
2129 num_pages = i;
2130 rc = -ENOMEM;
e94f7ba1 2131 break;
72432ffc
PS
2132 }
2133 }
2134
e94f7ba1
JL
2135 if (rc) {
2136 for (i = 0; i < num_pages; i++)
2137 put_page(pages[i]);
2138 }
72432ffc
PS
2139 return rc;
2140}
2141
2142static inline
2143size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2144{
2145 size_t num_pages;
2146 size_t clen;
2147
2148 clen = min_t(const size_t, len, wsize);
a7103b99 2149 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2150
2151 if (cur_len)
2152 *cur_len = clen;
2153
2154 return num_pages;
2155}
2156
da82f7e7
JL
2157static void
2158cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2159{
2160 int i;
2161 size_t bytes = wdata->bytes;
2162
2163 /* marshal up the pages into iov array */
2164 for (i = 0; i < wdata->nr_pages; i++) {
c7ad42b5 2165 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
da82f7e7
JL
2166 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2167 bytes -= iov[i + 1].iov_len;
2168 }
2169}
2170
2171static void
2172cifs_uncached_writev_complete(struct work_struct *work)
2173{
2174 int i;
2175 struct cifs_writedata *wdata = container_of(work,
2176 struct cifs_writedata, work);
2177 struct inode *inode = wdata->cfile->dentry->d_inode;
2178 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2179
2180 spin_lock(&inode->i_lock);
2181 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2182 if (cifsi->server_eof > inode->i_size)
2183 i_size_write(inode, cifsi->server_eof);
2184 spin_unlock(&inode->i_lock);
2185
2186 complete(&wdata->done);
2187
2188 if (wdata->result != -EAGAIN) {
2189 for (i = 0; i < wdata->nr_pages; i++)
2190 put_page(wdata->pages[i]);
2191 }
2192
2193 kref_put(&wdata->refcount, cifs_writedata_release);
2194}
2195
2196/* attempt to send write to server, retry on any -EAGAIN errors */
2197static int
2198cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2199{
2200 int rc;
2201
2202 do {
2203 if (wdata->cfile->invalidHandle) {
2204 rc = cifs_reopen_file(wdata->cfile, false);
2205 if (rc != 0)
2206 continue;
2207 }
2208 rc = cifs_async_writev(wdata);
2209 } while (rc == -EAGAIN);
2210
2211 return rc;
2212}
2213
72432ffc
PS
2214static ssize_t
2215cifs_iovec_write(struct file *file, const struct iovec *iov,
2216 unsigned long nr_segs, loff_t *poffset)
2217{
da82f7e7 2218 unsigned long nr_pages, i;
76429c14
PS
2219 size_t copied, len, cur_len;
2220 ssize_t total_written = 0;
da82f7e7 2221 loff_t offset = *poffset;
72432ffc 2222 struct iov_iter it;
72432ffc 2223 struct cifsFileInfo *open_file;
da82f7e7 2224 struct cifs_tcon *tcon;
72432ffc 2225 struct cifs_sb_info *cifs_sb;
da82f7e7
JL
2226 struct cifs_writedata *wdata, *tmp;
2227 struct list_head wdata_list;
2228 int rc;
2229 pid_t pid;
72432ffc
PS
2230
2231 len = iov_length(iov, nr_segs);
2232 if (!len)
2233 return 0;
2234
2235 rc = generic_write_checks(file, poffset, &len, 0);
2236 if (rc)
2237 return rc;
2238
da82f7e7 2239 INIT_LIST_HEAD(&wdata_list);
72432ffc 2240 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
72432ffc 2241 open_file = file->private_data;
da82f7e7 2242 tcon = tlink_tcon(open_file->tlink);
d4ffff1f
PS
2243
2244 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2245 pid = open_file->pid;
2246 else
2247 pid = current->tgid;
2248
72432ffc 2249 iov_iter_init(&it, iov, nr_segs, len, 0);
72432ffc 2250 do {
da82f7e7
JL
2251 size_t save_len;
2252
2253 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2254 wdata = cifs_writedata_alloc(nr_pages,
2255 cifs_uncached_writev_complete);
2256 if (!wdata) {
2257 rc = -ENOMEM;
2258 break;
2259 }
2260
2261 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2262 if (rc) {
2263 kfree(wdata);
2264 break;
2265 }
2266
2267 save_len = cur_len;
2268 for (i = 0; i < nr_pages; i++) {
2269 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2270 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2271 0, copied);
72432ffc
PS
2272 cur_len -= copied;
2273 iov_iter_advance(&it, copied);
72432ffc 2274 }
72432ffc
PS
2275 cur_len = save_len - cur_len;
2276
da82f7e7
JL
2277 wdata->sync_mode = WB_SYNC_ALL;
2278 wdata->nr_pages = nr_pages;
2279 wdata->offset = (__u64)offset;
2280 wdata->cfile = cifsFileInfo_get(open_file);
2281 wdata->pid = pid;
2282 wdata->bytes = cur_len;
2283 wdata->marshal_iov = cifs_uncached_marshal_iov;
2284 rc = cifs_uncached_retry_writev(wdata);
2285 if (rc) {
2286 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2287 break;
2288 }
2289
da82f7e7
JL
2290 list_add_tail(&wdata->list, &wdata_list);
2291 offset += cur_len;
2292 len -= cur_len;
72432ffc
PS
2293 } while (len > 0);
2294
da82f7e7
JL
2295 /*
2296 * If at least one write was successfully sent, then discard any rc
2297 * value from the later writes. If the other write succeeds, then
2298 * we'll end up returning whatever was written. If it fails, then
2299 * we'll get a new rc value from that.
2300 */
2301 if (!list_empty(&wdata_list))
2302 rc = 0;
2303
2304 /*
2305 * Wait for and collect replies for any successful sends in order of
2306 * increasing offset. Once an error is hit or we get a fatal signal
2307 * while waiting, then return without waiting for any more replies.
2308 */
2309restart_loop:
2310 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2311 if (!rc) {
2312 /* FIXME: freezable too? */
2313 rc = wait_for_completion_killable(&wdata->done);
2314 if (rc)
2315 rc = -EINTR;
2316 else if (wdata->result)
2317 rc = wdata->result;
2318 else
2319 total_written += wdata->bytes;
2320
2321 /* resend call if it's a retryable error */
2322 if (rc == -EAGAIN) {
2323 rc = cifs_uncached_retry_writev(wdata);
2324 goto restart_loop;
2325 }
2326 }
2327 list_del_init(&wdata->list);
2328 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2329 }
2330
da82f7e7
JL
2331 if (total_written > 0)
2332 *poffset += total_written;
72432ffc 2333
da82f7e7
JL
2334 cifs_stats_bytes_written(tcon, total_written);
2335 return total_written ? total_written : (ssize_t)rc;
72432ffc
PS
2336}
2337
0b81c1c4 2338ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
2339 unsigned long nr_segs, loff_t pos)
2340{
2341 ssize_t written;
2342 struct inode *inode;
2343
2344 inode = iocb->ki_filp->f_path.dentry->d_inode;
2345
2346 /*
2347 * BB - optimize the way when signing is disabled. We can drop this
2348 * extra memory-to-memory copying and use iovec buffers for constructing
2349 * write request.
2350 */
2351
2352 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2353 if (written > 0) {
2354 CIFS_I(inode)->invalid_mapping = true;
2355 iocb->ki_pos = pos;
2356 }
2357
2358 return written;
2359}
2360
2361ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2362 unsigned long nr_segs, loff_t pos)
2363{
2364 struct inode *inode;
2365
2366 inode = iocb->ki_filp->f_path.dentry->d_inode;
2367
2368 if (CIFS_I(inode)->clientCanCacheAll)
2369 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2370
2371 /*
2372 * In strict cache mode we need to write the data to the server exactly
2373 * from the pos to pos+len-1 rather than flush all affected pages
2374 * because it may cause a error with mandatory locks on these pages but
2375 * not on the region from pos to ppos+len-1.
2376 */
2377
2378 return cifs_user_writev(iocb, iov, nr_segs, pos);
2379}
2380
a70307ee
PS
2381static ssize_t
2382cifs_iovec_read(struct file *file, const struct iovec *iov,
2383 unsigned long nr_segs, loff_t *poffset)
1da177e4 2384{
a70307ee
PS
2385 int rc;
2386 int xid;
76429c14
PS
2387 ssize_t total_read;
2388 unsigned int bytes_read = 0;
a70307ee
PS
2389 size_t len, cur_len;
2390 int iov_offset = 0;
1da177e4 2391 struct cifs_sb_info *cifs_sb;
96daf2b0 2392 struct cifs_tcon *pTcon;
1da177e4 2393 struct cifsFileInfo *open_file;
1da177e4 2394 struct smb_com_read_rsp *pSMBr;
d4ffff1f 2395 struct cifs_io_parms io_parms;
a70307ee 2396 char *read_data;
5eba8ab3 2397 unsigned int rsize;
d4ffff1f 2398 __u32 pid;
a70307ee
PS
2399
2400 if (!nr_segs)
2401 return 0;
2402
2403 len = iov_length(iov, nr_segs);
2404 if (!len)
2405 return 0;
1da177e4
LT
2406
2407 xid = GetXid();
e6a00296 2408 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2409
5eba8ab3
JL
2410 /* FIXME: set up handlers for larger reads and/or convert to async */
2411 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2412
c21dfb69 2413 open_file = file->private_data;
13cfb733 2414 pTcon = tlink_tcon(open_file->tlink);
1da177e4 2415
d4ffff1f
PS
2416 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2417 pid = open_file->pid;
2418 else
2419 pid = current->tgid;
2420
ad7a2926 2421 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2422 cFYI(1, "attempting read on write only file instance");
ad7a2926 2423
a70307ee 2424 for (total_read = 0; total_read < len; total_read += bytes_read) {
5eba8ab3 2425 cur_len = min_t(const size_t, len - total_read, rsize);
1da177e4 2426 rc = -EAGAIN;
a70307ee
PS
2427 read_data = NULL;
2428
1da177e4 2429 while (rc == -EAGAIN) {
ec637e3f 2430 int buf_type = CIFS_NO_BUFFER;
cdff08e7 2431 if (open_file->invalidHandle) {
15886177 2432 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2433 if (rc != 0)
2434 break;
2435 }
d4ffff1f
PS
2436 io_parms.netfid = open_file->netfid;
2437 io_parms.pid = pid;
2438 io_parms.tcon = pTcon;
2439 io_parms.offset = *poffset;
2cebaa58 2440 io_parms.length = cur_len;
d4ffff1f 2441 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
a70307ee
PS
2442 &read_data, &buf_type);
2443 pSMBr = (struct smb_com_read_rsp *)read_data;
2444 if (read_data) {
2445 char *data_offset = read_data + 4 +
2446 le16_to_cpu(pSMBr->DataOffset);
2447 if (memcpy_toiovecend(iov, data_offset,
2448 iov_offset, bytes_read))
93544cc6 2449 rc = -EFAULT;
fb8c4b14 2450 if (buf_type == CIFS_SMALL_BUFFER)
a70307ee 2451 cifs_small_buf_release(read_data);
fb8c4b14 2452 else if (buf_type == CIFS_LARGE_BUFFER)
a70307ee
PS
2453 cifs_buf_release(read_data);
2454 read_data = NULL;
2455 iov_offset += bytes_read;
1da177e4
LT
2456 }
2457 }
a70307ee 2458
1da177e4
LT
2459 if (rc || (bytes_read == 0)) {
2460 if (total_read) {
2461 break;
2462 } else {
2463 FreeXid(xid);
2464 return rc;
2465 }
2466 } else {
a4544347 2467 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
2468 *poffset += bytes_read;
2469 }
2470 }
a70307ee 2471
1da177e4
LT
2472 FreeXid(xid);
2473 return total_read;
2474}
2475
0b81c1c4 2476ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2477 unsigned long nr_segs, loff_t pos)
2478{
2479 ssize_t read;
2480
2481 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2482 if (read > 0)
2483 iocb->ki_pos = pos;
2484
2485 return read;
2486}
2487
2488ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2489 unsigned long nr_segs, loff_t pos)
2490{
2491 struct inode *inode;
2492
2493 inode = iocb->ki_filp->f_path.dentry->d_inode;
2494
2495 if (CIFS_I(inode)->clientCanCacheRead)
2496 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2497
2498 /*
2499 * In strict cache mode we need to read from the server all the time
2500 * if we don't have level II oplock because the server can delay mtime
2501 * change - so we can't make a decision about inode invalidating.
2502 * And we can also fail with pagereading if there are mandatory locks
2503 * on pages affected by this read but not on the region from pos to
2504 * pos+len-1.
2505 */
2506
2507 return cifs_user_readv(iocb, iov, nr_segs, pos);
2508}
1da177e4
LT
2509
2510static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
a70307ee 2511 loff_t *poffset)
1da177e4
LT
2512{
2513 int rc = -EACCES;
2514 unsigned int bytes_read = 0;
2515 unsigned int total_read;
2516 unsigned int current_read_size;
5eba8ab3 2517 unsigned int rsize;
1da177e4 2518 struct cifs_sb_info *cifs_sb;
96daf2b0 2519 struct cifs_tcon *pTcon;
1da177e4
LT
2520 int xid;
2521 char *current_offset;
2522 struct cifsFileInfo *open_file;
d4ffff1f 2523 struct cifs_io_parms io_parms;
ec637e3f 2524 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2525 __u32 pid;
1da177e4
LT
2526
2527 xid = GetXid();
e6a00296 2528 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2529
5eba8ab3
JL
2530 /* FIXME: set up handlers for larger reads and/or convert to async */
2531 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2532
1da177e4 2533 if (file->private_data == NULL) {
0f3bc09e 2534 rc = -EBADF;
1da177e4 2535 FreeXid(xid);
0f3bc09e 2536 return rc;
1da177e4 2537 }
c21dfb69 2538 open_file = file->private_data;
13cfb733 2539 pTcon = tlink_tcon(open_file->tlink);
1da177e4 2540
d4ffff1f
PS
2541 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2542 pid = open_file->pid;
2543 else
2544 pid = current->tgid;
2545
1da177e4 2546 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2547 cFYI(1, "attempting read on write only file instance");
1da177e4 2548
fb8c4b14 2549 for (total_read = 0, current_offset = read_data;
1da177e4
LT
2550 read_size > total_read;
2551 total_read += bytes_read, current_offset += bytes_read) {
5eba8ab3
JL
2552 current_read_size = min_t(uint, read_size - total_read, rsize);
2553
f9f5c817
SF
2554 /* For windows me and 9x we do not want to request more
2555 than it negotiated since it will refuse the read then */
fb8c4b14 2556 if ((pTcon->ses) &&
f9f5c817 2557 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
7748dd6e 2558 current_read_size = min_t(uint, current_read_size,
c974befa 2559 CIFSMaxBufSize);
f9f5c817 2560 }
1da177e4
LT
2561 rc = -EAGAIN;
2562 while (rc == -EAGAIN) {
cdff08e7 2563 if (open_file->invalidHandle) {
15886177 2564 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2565 if (rc != 0)
2566 break;
2567 }
d4ffff1f
PS
2568 io_parms.netfid = open_file->netfid;
2569 io_parms.pid = pid;
2570 io_parms.tcon = pTcon;
2571 io_parms.offset = *poffset;
2572 io_parms.length = current_read_size;
2573 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2574 &current_offset, &buf_type);
1da177e4
LT
2575 }
2576 if (rc || (bytes_read == 0)) {
2577 if (total_read) {
2578 break;
2579 } else {
2580 FreeXid(xid);
2581 return rc;
2582 }
2583 } else {
a4544347 2584 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
2585 *poffset += bytes_read;
2586 }
2587 }
2588 FreeXid(xid);
2589 return total_read;
2590}
2591
ca83ce3d
JL
2592/*
2593 * If the page is mmap'ed into a process' page tables, then we need to make
2594 * sure that it doesn't change while being written back.
2595 */
2596static int
2597cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2598{
2599 struct page *page = vmf->page;
2600
2601 lock_page(page);
2602 return VM_FAULT_LOCKED;
2603}
2604
2605static struct vm_operations_struct cifs_file_vm_ops = {
2606 .fault = filemap_fault,
2607 .page_mkwrite = cifs_page_mkwrite,
2608};
2609
7a6a19b1
PS
2610int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2611{
2612 int rc, xid;
2613 struct inode *inode = file->f_path.dentry->d_inode;
2614
2615 xid = GetXid();
2616
6feb9891
PS
2617 if (!CIFS_I(inode)->clientCanCacheRead) {
2618 rc = cifs_invalidate_mapping(inode);
2619 if (rc)
2620 return rc;
2621 }
7a6a19b1
PS
2622
2623 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2624 if (rc == 0)
2625 vma->vm_ops = &cifs_file_vm_ops;
7a6a19b1
PS
2626 FreeXid(xid);
2627 return rc;
2628}
2629
1da177e4
LT
2630int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2631{
1da177e4
LT
2632 int rc, xid;
2633
2634 xid = GetXid();
abab095d 2635 rc = cifs_revalidate_file(file);
1da177e4 2636 if (rc) {
b6b38f70 2637 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
2638 FreeXid(xid);
2639 return rc;
2640 }
2641 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2642 if (rc == 0)
2643 vma->vm_ops = &cifs_file_vm_ops;
1da177e4
LT
2644 FreeXid(xid);
2645 return rc;
2646}
2647
1da177e4
LT
2648static int cifs_readpages(struct file *file, struct address_space *mapping,
2649 struct list_head *page_list, unsigned num_pages)
2650{
690c5e31
JL
2651 int rc;
2652 struct list_head tmplist;
2653 struct cifsFileInfo *open_file = file->private_data;
2654 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2655 unsigned int rsize = cifs_sb->rsize;
2656 pid_t pid;
1da177e4 2657
690c5e31
JL
2658 /*
2659 * Give up immediately if rsize is too small to read an entire page.
2660 * The VFS will fall back to readpage. We should never reach this
2661 * point however since we set ra_pages to 0 when the rsize is smaller
2662 * than a cache page.
2663 */
2664 if (unlikely(rsize < PAGE_CACHE_SIZE))
2665 return 0;
bfa0d75a 2666
56698236
SJ
2667 /*
2668 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2669 * immediately if the cookie is negative
2670 */
2671 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2672 &num_pages);
2673 if (rc == 0)
690c5e31 2674 return rc;
56698236 2675
d4ffff1f
PS
2676 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2677 pid = open_file->pid;
2678 else
2679 pid = current->tgid;
2680
690c5e31
JL
2681 rc = 0;
2682 INIT_LIST_HEAD(&tmplist);
1da177e4 2683
690c5e31
JL
2684 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2685 mapping, num_pages);
2686
2687 /*
2688 * Start with the page at end of list and move it to private
2689 * list. Do the same with any following pages until we hit
2690 * the rsize limit, hit an index discontinuity, or run out of
2691 * pages. Issue the async read and then start the loop again
2692 * until the list is empty.
2693 *
2694 * Note that list order is important. The page_list is in
2695 * the order of declining indexes. When we put the pages in
2696 * the rdata->pages, then we want them in increasing order.
2697 */
2698 while (!list_empty(page_list)) {
2699 unsigned int bytes = PAGE_CACHE_SIZE;
2700 unsigned int expected_index;
2701 unsigned int nr_pages = 1;
2702 loff_t offset;
2703 struct page *page, *tpage;
2704 struct cifs_readdata *rdata;
1da177e4
LT
2705
2706 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
2707
2708 /*
2709 * Lock the page and put it in the cache. Since no one else
2710 * should have access to this page, we're safe to simply set
2711 * PG_locked without checking it first.
2712 */
2713 __set_page_locked(page);
2714 rc = add_to_page_cache_locked(page, mapping,
2715 page->index, GFP_KERNEL);
2716
2717 /* give up if we can't stick it in the cache */
2718 if (rc) {
2719 __clear_page_locked(page);
2720 break;
2721 }
2722
2723 /* move first page to the tmplist */
1da177e4 2724 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 2725 list_move_tail(&page->lru, &tmplist);
1da177e4 2726
690c5e31
JL
2727 /* now try and add more pages onto the request */
2728 expected_index = page->index + 1;
2729 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2730 /* discontinuity ? */
2731 if (page->index != expected_index)
fb8c4b14 2732 break;
690c5e31
JL
2733
2734 /* would this page push the read over the rsize? */
2735 if (bytes + PAGE_CACHE_SIZE > rsize)
2736 break;
2737
2738 __set_page_locked(page);
2739 if (add_to_page_cache_locked(page, mapping,
2740 page->index, GFP_KERNEL)) {
2741 __clear_page_locked(page);
2742 break;
2743 }
2744 list_move_tail(&page->lru, &tmplist);
2745 bytes += PAGE_CACHE_SIZE;
2746 expected_index++;
2747 nr_pages++;
1da177e4 2748 }
690c5e31
JL
2749
2750 rdata = cifs_readdata_alloc(nr_pages);
2751 if (!rdata) {
2752 /* best to give up if we're out of mem */
2753 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2754 list_del(&page->lru);
2755 lru_cache_add_file(page);
2756 unlock_page(page);
2757 page_cache_release(page);
2758 }
2759 rc = -ENOMEM;
2760 break;
2761 }
2762
2763 spin_lock(&cifs_file_list_lock);
2764 cifsFileInfo_get(open_file);
2765 spin_unlock(&cifs_file_list_lock);
2766 rdata->cfile = open_file;
2767 rdata->mapping = mapping;
2768 rdata->offset = offset;
2769 rdata->bytes = bytes;
2770 rdata->pid = pid;
2771 list_splice_init(&tmplist, &rdata->pages);
2772
2773 do {
cdff08e7 2774 if (open_file->invalidHandle) {
15886177 2775 rc = cifs_reopen_file(open_file, true);
1da177e4 2776 if (rc != 0)
690c5e31 2777 continue;
1da177e4 2778 }
690c5e31
JL
2779 rc = cifs_async_readv(rdata);
2780 } while (rc == -EAGAIN);
1da177e4 2781
690c5e31
JL
2782 if (rc != 0) {
2783 list_for_each_entry_safe(page, tpage, &rdata->pages,
2784 lru) {
2785 list_del(&page->lru);
2786 lru_cache_add_file(page);
2787 unlock_page(page);
2788 page_cache_release(page);
1da177e4 2789 }
690c5e31 2790 cifs_readdata_free(rdata);
1da177e4
LT
2791 break;
2792 }
1da177e4
LT
2793 }
2794
1da177e4
LT
2795 return rc;
2796}
2797
2798static int cifs_readpage_worker(struct file *file, struct page *page,
2799 loff_t *poffset)
2800{
2801 char *read_data;
2802 int rc;
2803
56698236
SJ
2804 /* Is the page cached? */
2805 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2806 if (rc == 0)
2807 goto read_complete;
2808
1da177e4
LT
2809 page_cache_get(page);
2810 read_data = kmap(page);
2811 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2812
1da177e4 2813 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2814
1da177e4
LT
2815 if (rc < 0)
2816 goto io_error;
2817 else
b6b38f70 2818 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2819
e6a00296
JJS
2820 file->f_path.dentry->d_inode->i_atime =
2821 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2822
1da177e4
LT
2823 if (PAGE_CACHE_SIZE > rc)
2824 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2825
2826 flush_dcache_page(page);
2827 SetPageUptodate(page);
9dc06558
SJ
2828
2829 /* send this page to the cache */
2830 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2831
1da177e4 2832 rc = 0;
fb8c4b14 2833
1da177e4 2834io_error:
fb8c4b14 2835 kunmap(page);
1da177e4 2836 page_cache_release(page);
56698236
SJ
2837
2838read_complete:
1da177e4
LT
2839 return rc;
2840}
2841
2842static int cifs_readpage(struct file *file, struct page *page)
2843{
2844 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2845 int rc = -EACCES;
2846 int xid;
2847
2848 xid = GetXid();
2849
2850 if (file->private_data == NULL) {
0f3bc09e 2851 rc = -EBADF;
1da177e4 2852 FreeXid(xid);
0f3bc09e 2853 return rc;
1da177e4
LT
2854 }
2855
b6b38f70
JP
2856 cFYI(1, "readpage %p at offset %d 0x%x\n",
2857 page, (int)offset, (int)offset);
1da177e4
LT
2858
2859 rc = cifs_readpage_worker(file, page, &offset);
2860
2861 unlock_page(page);
2862
2863 FreeXid(xid);
2864 return rc;
2865}
2866
a403a0a3
SF
2867static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2868{
2869 struct cifsFileInfo *open_file;
2870
4477288a 2871 spin_lock(&cifs_file_list_lock);
a403a0a3 2872 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2873 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 2874 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2875 return 1;
2876 }
2877 }
4477288a 2878 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
2879 return 0;
2880}
2881
1da177e4
LT
2882/* We do not want to update the file size from server for inodes
2883 open for write - to avoid races with writepage extending
2884 the file - in the future we could consider allowing
fb8c4b14 2885 refreshing the inode only on increases in the file size
1da177e4
LT
2886 but this is tricky to do without racing with writebehind
2887 page caching in the current Linux kernel design */
4b18f2a9 2888bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2889{
a403a0a3 2890 if (!cifsInode)
4b18f2a9 2891 return true;
50c2f753 2892
a403a0a3
SF
2893 if (is_inode_writable(cifsInode)) {
2894 /* This inode is open for write at least once */
c32a0b68
SF
2895 struct cifs_sb_info *cifs_sb;
2896
c32a0b68 2897 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2898 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2899 /* since no page cache to corrupt on directio
c32a0b68 2900 we can change size safely */
4b18f2a9 2901 return true;
c32a0b68
SF
2902 }
2903
fb8c4b14 2904 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2905 return true;
7ba52631 2906
4b18f2a9 2907 return false;
23e7dd7d 2908 } else
4b18f2a9 2909 return true;
1da177e4
LT
2910}
2911
d9414774
NP
2912static int cifs_write_begin(struct file *file, struct address_space *mapping,
2913 loff_t pos, unsigned len, unsigned flags,
2914 struct page **pagep, void **fsdata)
1da177e4 2915{
d9414774
NP
2916 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2917 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2918 loff_t page_start = pos & PAGE_MASK;
2919 loff_t i_size;
2920 struct page *page;
2921 int rc = 0;
d9414774 2922
b6b38f70 2923 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2924
54566b2c 2925 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2926 if (!page) {
2927 rc = -ENOMEM;
2928 goto out;
2929 }
8a236264 2930
a98ee8c1
JL
2931 if (PageUptodate(page))
2932 goto out;
8a236264 2933
a98ee8c1
JL
2934 /*
2935 * If we write a full page it will be up to date, no need to read from
2936 * the server. If the write is short, we'll end up doing a sync write
2937 * instead.
2938 */
2939 if (len == PAGE_CACHE_SIZE)
2940 goto out;
8a236264 2941
a98ee8c1
JL
2942 /*
2943 * optimize away the read when we have an oplock, and we're not
2944 * expecting to use any of the data we'd be reading in. That
2945 * is, when the page lies beyond the EOF, or straddles the EOF
2946 * and the write will cover all of the existing data.
2947 */
2948 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2949 i_size = i_size_read(mapping->host);
2950 if (page_start >= i_size ||
2951 (offset == 0 && (pos + len) >= i_size)) {
2952 zero_user_segments(page, 0, offset,
2953 offset + len,
2954 PAGE_CACHE_SIZE);
2955 /*
2956 * PageChecked means that the parts of the page
2957 * to which we're not writing are considered up
2958 * to date. Once the data is copied to the
2959 * page, it can be set uptodate.
2960 */
2961 SetPageChecked(page);
2962 goto out;
2963 }
2964 }
d9414774 2965
a98ee8c1
JL
2966 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2967 /*
2968 * might as well read a page, it is fast enough. If we get
2969 * an error, we don't need to return it. cifs_write_end will
2970 * do a sync write instead since PG_uptodate isn't set.
2971 */
2972 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2973 } else {
2974 /* we could try using another file handle if there is one -
2975 but how would we lock it to prevent close of that handle
2976 racing with this read? In any case
d9414774 2977 this will be written out by write_end so is fine */
1da177e4 2978 }
a98ee8c1
JL
2979out:
2980 *pagep = page;
2981 return rc;
1da177e4
LT
2982}
2983
85f2d6b4
SJ
2984static int cifs_release_page(struct page *page, gfp_t gfp)
2985{
2986 if (PagePrivate(page))
2987 return 0;
2988
2989 return cifs_fscache_release_page(page, gfp);
2990}
2991
2992static void cifs_invalidate_page(struct page *page, unsigned long offset)
2993{
2994 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2995
2996 if (offset == 0)
2997 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2998}
2999
9ad1506b
PS
3000static int cifs_launder_page(struct page *page)
3001{
3002 int rc = 0;
3003 loff_t range_start = page_offset(page);
3004 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3005 struct writeback_control wbc = {
3006 .sync_mode = WB_SYNC_ALL,
3007 .nr_to_write = 0,
3008 .range_start = range_start,
3009 .range_end = range_end,
3010 };
3011
3012 cFYI(1, "Launder page: %p", page);
3013
3014 if (clear_page_dirty_for_io(page))
3015 rc = cifs_writepage_locked(page, &wbc);
3016
3017 cifs_fscache_invalidate_page(page, page->mapping->host);
3018 return rc;
3019}
3020
9b646972 3021void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3022{
3023 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3024 oplock_break);
a5e18bc3 3025 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 3026 struct cifsInodeInfo *cinode = CIFS_I(inode);
eb4b756b 3027 int rc = 0;
3bc303c2
JL
3028
3029 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 3030 if (cinode->clientCanCacheRead)
8737c930 3031 break_lease(inode, O_RDONLY);
d54ff732 3032 else
8737c930 3033 break_lease(inode, O_WRONLY);
3bc303c2
JL
3034 rc = filemap_fdatawrite(inode->i_mapping);
3035 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
3036 rc = filemap_fdatawait(inode->i_mapping);
3037 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
3038 invalidate_remote_inode(inode);
3039 }
b6b38f70 3040 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
3041 }
3042
85160e03
PS
3043 rc = cifs_push_locks(cfile);
3044 if (rc)
3045 cERROR(1, "Push locks rc = %d", rc);
3046
3bc303c2
JL
3047 /*
3048 * releasing stale oplock after recent reconnect of smb session using
3049 * a now incorrect file handle is not a data integrity issue but do
3050 * not bother sending an oplock release if session to server still is
3051 * disconnected since oplock already released by the server
3052 */
cdff08e7 3053 if (!cfile->oplock_break_cancelled) {
03776f45
PS
3054 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
3055 current->tgid, 0, 0, 0, 0,
3056 LOCKING_ANDX_OPLOCK_RELEASE, false,
12fed00d 3057 cinode->clientCanCacheRead ? 1 : 0);
b6b38f70 3058 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 3059 }
3bc303c2
JL
3060}
3061
f5e54d6e 3062const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3063 .readpage = cifs_readpage,
3064 .readpages = cifs_readpages,
3065 .writepage = cifs_writepage,
37c0eb46 3066 .writepages = cifs_writepages,
d9414774
NP
3067 .write_begin = cifs_write_begin,
3068 .write_end = cifs_write_end,
1da177e4 3069 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3070 .releasepage = cifs_release_page,
3071 .invalidatepage = cifs_invalidate_page,
9ad1506b 3072 .launder_page = cifs_launder_page,
1da177e4 3073};
273d81d6
DK
3074
3075/*
3076 * cifs_readpages requires the server to support a buffer large enough to
3077 * contain the header plus one complete page of data. Otherwise, we need
3078 * to leave cifs_readpages out of the address space operations.
3079 */
f5e54d6e 3080const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3081 .readpage = cifs_readpage,
3082 .writepage = cifs_writepage,
3083 .writepages = cifs_writepages,
d9414774
NP
3084 .write_begin = cifs_write_begin,
3085 .write_end = cifs_write_end,
273d81d6 3086 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3087 .releasepage = cifs_release_page,
3088 .invalidatepage = cifs_invalidate_page,
9ad1506b 3089 .launder_page = cifs_launder_page,
273d81d6 3090};