CIFS: Use brlock cache for SMB2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
690c5e31 35#include <linux/swap.h>
1da177e4
LT
36#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
9451a9a5 44#include "fscache.h"
1da177e4 45
1da177e4
LT
46static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
e10f7b55
JL
59 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
7fc8f4e9 62}
e10f7b55 63
608712fe 64static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 65{
608712fe 66 u32 posix_flags = 0;
e10f7b55 67
7fc8f4e9 68 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 69 posix_flags = SMB_O_RDONLY;
7fc8f4e9 70 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
71 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 82 if (flags & O_DSYNC)
608712fe 83 posix_flags |= SMB_O_SYNC;
7fc8f4e9 84 if (flags & O_DIRECTORY)
608712fe 85 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 86 if (flags & O_NOFOLLOW)
608712fe 87 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 88 if (flags & O_DIRECT)
608712fe 89 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
90
91 return posix_flags;
1da177e4
LT
92}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
55aa2e09
SF
102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
1da177e4
LT
104 else
105 return FILE_OPEN;
106}
107
608712fe
JL
108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
96daf2b0 118 struct cifs_tcon *tcon;
608712fe
JL
119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
eeb910a6
PS
170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
fb1214e4
PS
172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
eeb910a6
PS
174{
175 int rc;
fb1214e4 176 int desired_access;
eeb910a6 177 int disposition;
3d3ea8e6 178 int create_options = CREATE_NOT_DIR;
eeb910a6
PS
179 FILE_ALL_INFO *buf;
180
fb1214e4
PS
181 if (!tcon->ses->server->ops->open)
182 return -ENOSYS;
183
184 desired_access = cifs_convert_flags(f_flags);
eeb910a6
PS
185
186/*********************************************************************
187 * open flag mapping table:
188 *
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
196 *
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
202 *?
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
209
210 disposition = cifs_get_disposition(f_flags);
211
212 /* BB pass O_SYNC flag through on file attributes .. BB */
213
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
3d3ea8e6
SP
218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
220
fb1214e4
PS
221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
eeb910a6
PS
224
225 if (rc)
226 goto out;
227
228 if (tcon->unix_ext)
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
230 xid);
231 else
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
fb1214e4 233 xid, &fid->netfid);
eeb910a6
PS
234
235out:
236 kfree(buf);
237 return rc;
238}
239
15ecb436 240struct cifsFileInfo *
fb1214e4 241cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
15ecb436
JL
242 struct tcon_link *tlink, __u32 oplock)
243{
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
4b4de76e
PS
246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
f45d3416 248 struct cifs_fid_locks *fdlocks;
4b4de76e
PS
249
250 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
251 if (cfile == NULL)
252 return cfile;
253
f45d3416
PS
254 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
255 if (!fdlocks) {
256 kfree(cfile);
257 return NULL;
258 }
259
260 INIT_LIST_HEAD(&fdlocks->locks);
261 fdlocks->cfile = cfile;
262 cfile->llist = fdlocks;
263 mutex_lock(&cinode->lock_mutex);
264 list_add(&fdlocks->llist, &cinode->llist);
265 mutex_unlock(&cinode->lock_mutex);
266
4b4de76e 267 cfile->count = 1;
4b4de76e
PS
268 cfile->pid = current->tgid;
269 cfile->uid = current_fsuid();
270 cfile->dentry = dget(dentry);
271 cfile->f_flags = file->f_flags;
272 cfile->invalidHandle = false;
273 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 274 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
f45d3416 275 mutex_init(&cfile->fh_mutex);
fb1214e4 276 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
15ecb436 277
4477288a 278 spin_lock(&cifs_file_list_lock);
4b4de76e 279 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
15ecb436
JL
280 /* if readable file instance put first in list*/
281 if (file->f_mode & FMODE_READ)
4b4de76e 282 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 283 else
4b4de76e 284 list_add_tail(&cfile->flist, &cinode->openFileList);
4477288a 285 spin_unlock(&cifs_file_list_lock);
15ecb436 286
4b4de76e
PS
287 file->private_data = cfile;
288 return cfile;
15ecb436
JL
289}
290
764a1b1a
JL
291struct cifsFileInfo *
292cifsFileInfo_get(struct cifsFileInfo *cifs_file)
293{
294 spin_lock(&cifs_file_list_lock);
295 cifsFileInfo_get_locked(cifs_file);
296 spin_unlock(&cifs_file_list_lock);
297 return cifs_file;
298}
299
cdff08e7
SF
300/*
301 * Release a reference on the file private data. This may involve closing
5f6dbc9e
JL
302 * the filehandle out on the server. Must be called without holding
303 * cifs_file_list_lock.
cdff08e7 304 */
b33879aa
JL
305void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
306{
e66673e3 307 struct inode *inode = cifs_file->dentry->d_inode;
96daf2b0 308 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
e66673e3 309 struct cifsInodeInfo *cifsi = CIFS_I(inode);
4f8ba8a0 310 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cdff08e7
SF
311 struct cifsLockInfo *li, *tmp;
312
313 spin_lock(&cifs_file_list_lock);
5f6dbc9e 314 if (--cifs_file->count > 0) {
cdff08e7
SF
315 spin_unlock(&cifs_file_list_lock);
316 return;
317 }
318
319 /* remove it from the lists */
320 list_del(&cifs_file->flist);
321 list_del(&cifs_file->tlist);
322
323 if (list_empty(&cifsi->openFileList)) {
324 cFYI(1, "closing last open instance for inode %p",
325 cifs_file->dentry->d_inode);
25364138
PS
326 /*
327 * In strict cache mode we need invalidate mapping on the last
328 * close because it may cause a error when we open this file
329 * again and get at least level II oplock.
330 */
4f8ba8a0
PS
331 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
332 CIFS_I(inode)->invalid_mapping = true;
c6723628 333 cifs_set_oplock_level(cifsi, 0);
cdff08e7
SF
334 }
335 spin_unlock(&cifs_file_list_lock);
336
ad635942
JL
337 cancel_work_sync(&cifs_file->oplock_break);
338
cdff08e7 339 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 340 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 341 unsigned int xid;
0ff78a22
PS
342 int rc = -ENOSYS;
343
6d5786a3 344 xid = get_xid();
0ff78a22
PS
345 if (server->ops->close)
346 rc = server->ops->close(xid, tcon, &cifs_file->fid);
6d5786a3 347 free_xid(xid);
cdff08e7
SF
348 }
349
f45d3416
PS
350 /*
351 * Delete any outstanding lock records. We'll lose them when the file
cdff08e7
SF
352 * is closed anyway.
353 */
d59dad2b 354 mutex_lock(&cifsi->lock_mutex);
f45d3416 355 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
cdff08e7 356 list_del(&li->llist);
85160e03 357 cifs_del_lock_waiters(li);
cdff08e7 358 kfree(li);
b33879aa 359 }
f45d3416
PS
360 list_del(&cifs_file->llist->llist);
361 kfree(cifs_file->llist);
d59dad2b 362 mutex_unlock(&cifsi->lock_mutex);
cdff08e7
SF
363
364 cifs_put_tlink(cifs_file->tlink);
365 dput(cifs_file->dentry);
366 kfree(cifs_file);
b33879aa
JL
367}
368
1da177e4
LT
369int cifs_open(struct inode *inode, struct file *file)
370{
371 int rc = -EACCES;
6d5786a3 372 unsigned int xid;
590a3fe0 373 __u32 oplock;
1da177e4 374 struct cifs_sb_info *cifs_sb;
96daf2b0 375 struct cifs_tcon *tcon;
7ffec372 376 struct tcon_link *tlink;
fb1214e4 377 struct cifsFileInfo *cfile = NULL;
1da177e4 378 char *full_path = NULL;
7e12eddb 379 bool posix_open_ok = false;
fb1214e4 380 struct cifs_fid fid;
1da177e4 381
6d5786a3 382 xid = get_xid();
1da177e4
LT
383
384 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
385 tlink = cifs_sb_tlink(cifs_sb);
386 if (IS_ERR(tlink)) {
6d5786a3 387 free_xid(xid);
7ffec372
JL
388 return PTR_ERR(tlink);
389 }
390 tcon = tlink_tcon(tlink);
1da177e4 391
e6a00296 392 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 393 if (full_path == NULL) {
0f3bc09e 394 rc = -ENOMEM;
232341ba 395 goto out;
1da177e4
LT
396 }
397
b6b38f70
JP
398 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
399 inode, file->f_flags, full_path);
276a74a4 400
10b9b98e 401 if (tcon->ses->server->oplocks)
276a74a4
SF
402 oplock = REQ_OPLOCK;
403 else
404 oplock = 0;
405
64cc2c63 406 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
407 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
408 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 409 /* can not refresh inode info since size could be stale */
2422f676 410 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c 411 cifs_sb->mnt_file_mode /* ignored */,
fb1214e4 412 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 413 if (rc == 0) {
b6b38f70 414 cFYI(1, "posix open succeeded");
7e12eddb 415 posix_open_ok = true;
64cc2c63
SF
416 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
417 if (tcon->ses->serverNOS)
b6b38f70 418 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
419 " unexpected error on SMB posix open"
420 ", disabling posix open support."
421 " Check if server update available.",
422 tcon->ses->serverName,
b6b38f70 423 tcon->ses->serverNOS);
64cc2c63 424 tcon->broken_posix_open = true;
276a74a4
SF
425 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
426 (rc != -EOPNOTSUPP)) /* path not found or net err */
427 goto out;
fb1214e4
PS
428 /*
429 * Else fallthrough to retry open the old way on network i/o
430 * or DFS errors.
431 */
276a74a4
SF
432 }
433
7e12eddb
PS
434 if (!posix_open_ok) {
435 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
fb1214e4 436 file->f_flags, &oplock, &fid, xid);
7e12eddb
PS
437 if (rc)
438 goto out;
439 }
47c78b7f 440
fb1214e4
PS
441 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
442 if (cfile == NULL) {
0ff78a22
PS
443 if (tcon->ses->server->ops->close)
444 tcon->ses->server->ops->close(xid, tcon, &fid);
1da177e4
LT
445 rc = -ENOMEM;
446 goto out;
447 }
1da177e4 448
9451a9a5
SJ
449 cifs_fscache_set_inode_cookie(inode, file);
450
7e12eddb 451 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
452 /*
453 * Time to set mode which we can not set earlier due to
454 * problems creating new read-only files.
455 */
7e12eddb
PS
456 struct cifs_unix_set_info_args args = {
457 .mode = inode->i_mode,
458 .uid = NO_CHANGE_64,
459 .gid = NO_CHANGE_64,
460 .ctime = NO_CHANGE_64,
461 .atime = NO_CHANGE_64,
462 .mtime = NO_CHANGE_64,
463 .device = 0,
464 };
fb1214e4
PS
465 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
466 cfile->pid);
1da177e4
LT
467 }
468
469out:
1da177e4 470 kfree(full_path);
6d5786a3 471 free_xid(xid);
7ffec372 472 cifs_put_tlink(tlink);
1da177e4
LT
473 return rc;
474}
475
2ae78ba8
PS
476/*
477 * Try to reacquire byte range locks that were released when session
478 * to server was lost
479 */
1da177e4
LT
480static int cifs_relock_file(struct cifsFileInfo *cifsFile)
481{
482 int rc = 0;
483
2ae78ba8 484 /* BB list all locks open on this file and relock */
1da177e4
LT
485
486 return rc;
487}
488
2ae78ba8
PS
489static int
490cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
491{
492 int rc = -EACCES;
6d5786a3 493 unsigned int xid;
590a3fe0 494 __u32 oplock;
1da177e4 495 struct cifs_sb_info *cifs_sb;
96daf2b0 496 struct cifs_tcon *tcon;
2ae78ba8
PS
497 struct TCP_Server_Info *server;
498 struct cifsInodeInfo *cinode;
fb8c4b14 499 struct inode *inode;
1da177e4 500 char *full_path = NULL;
2ae78ba8 501 int desired_access;
1da177e4 502 int disposition = FILE_OPEN;
3d3ea8e6 503 int create_options = CREATE_NOT_DIR;
2ae78ba8 504 struct cifs_fid fid;
1da177e4 505
6d5786a3 506 xid = get_xid();
2ae78ba8
PS
507 mutex_lock(&cfile->fh_mutex);
508 if (!cfile->invalidHandle) {
509 mutex_unlock(&cfile->fh_mutex);
0f3bc09e 510 rc = 0;
6d5786a3 511 free_xid(xid);
0f3bc09e 512 return rc;
1da177e4
LT
513 }
514
2ae78ba8 515 inode = cfile->dentry->d_inode;
1da177e4 516 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
517 tcon = tlink_tcon(cfile->tlink);
518 server = tcon->ses->server;
519
520 /*
521 * Can not grab rename sem here because various ops, including those
522 * that already have the rename sem can end up causing writepage to get
523 * called and if the server was down that means we end up here, and we
524 * can never tell if the caller already has the rename_sem.
525 */
526 full_path = build_path_from_dentry(cfile->dentry);
1da177e4 527 if (full_path == NULL) {
3a9f462f 528 rc = -ENOMEM;
2ae78ba8 529 mutex_unlock(&cfile->fh_mutex);
6d5786a3 530 free_xid(xid);
3a9f462f 531 return rc;
1da177e4
LT
532 }
533
2ae78ba8
PS
534 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
535 full_path);
1da177e4 536
10b9b98e 537 if (tcon->ses->server->oplocks)
1da177e4
LT
538 oplock = REQ_OPLOCK;
539 else
4b18f2a9 540 oplock = 0;
1da177e4 541
29e20f9c 542 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 543 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 544 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
545 /*
546 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
547 * original open. Must mask them off for a reopen.
548 */
2ae78ba8 549 unsigned int oflags = cfile->f_flags &
15886177 550 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 551
2422f676 552 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
2ae78ba8
PS
553 cifs_sb->mnt_file_mode /* ignored */,
554 oflags, &oplock, &fid.netfid, xid);
7fc8f4e9 555 if (rc == 0) {
b6b38f70 556 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
557 goto reopen_success;
558 }
2ae78ba8
PS
559 /*
560 * fallthrough to retry open the old way on errors, especially
561 * in the reconnect path it is important to retry hard
562 */
7fc8f4e9
SF
563 }
564
2ae78ba8 565 desired_access = cifs_convert_flags(cfile->f_flags);
7fc8f4e9 566
3d3ea8e6
SP
567 if (backup_cred(cifs_sb))
568 create_options |= CREATE_OPEN_BACKUP_INTENT;
569
2ae78ba8
PS
570 /*
571 * Can not refresh inode by passing in file_info buf to be returned by
572 * CIFSSMBOpen and then calling get_inode_info with returned buf since
573 * file might have write behind data that needs to be flushed and server
574 * version of file size can be stale. If we knew for sure that inode was
575 * not dirty locally we could do this.
576 */
577 rc = server->ops->open(xid, tcon, full_path, disposition,
578 desired_access, create_options, &fid, &oplock,
579 NULL, cifs_sb);
1da177e4 580 if (rc) {
2ae78ba8
PS
581 mutex_unlock(&cfile->fh_mutex);
582 cFYI(1, "cifs_reopen returned 0x%x", rc);
b6b38f70 583 cFYI(1, "oplock: %d", oplock);
15886177
JL
584 goto reopen_error_exit;
585 }
586
7fc8f4e9 587reopen_success:
2ae78ba8
PS
588 cfile->invalidHandle = false;
589 mutex_unlock(&cfile->fh_mutex);
590 cinode = CIFS_I(inode);
15886177
JL
591
592 if (can_flush) {
593 rc = filemap_write_and_wait(inode->i_mapping);
eb4b756b 594 mapping_set_error(inode->i_mapping, rc);
15886177 595
15886177 596 if (tcon->unix_ext)
2ae78ba8
PS
597 rc = cifs_get_inode_info_unix(&inode, full_path,
598 inode->i_sb, xid);
15886177 599 else
2ae78ba8
PS
600 rc = cifs_get_inode_info(&inode, full_path, NULL,
601 inode->i_sb, xid, NULL);
602 }
603 /*
604 * Else we are writing out data to server already and could deadlock if
605 * we tried to flush data, and since we do not know if we have data that
606 * would invalidate the current end of file on the server we can not go
607 * to the server to get the new inode info.
608 */
609
610 server->ops->set_fid(cfile, &fid, oplock);
611 cifs_relock_file(cfile);
15886177
JL
612
613reopen_error_exit:
1da177e4 614 kfree(full_path);
6d5786a3 615 free_xid(xid);
1da177e4
LT
616 return rc;
617}
618
619int cifs_close(struct inode *inode, struct file *file)
620{
77970693
JL
621 if (file->private_data != NULL) {
622 cifsFileInfo_put(file->private_data);
623 file->private_data = NULL;
624 }
7ee1af76 625
cdff08e7
SF
626 /* return code from the ->release op is always ignored */
627 return 0;
1da177e4
LT
628}
629
630int cifs_closedir(struct inode *inode, struct file *file)
631{
632 int rc = 0;
6d5786a3 633 unsigned int xid;
4b4de76e 634 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
635 struct cifs_tcon *tcon;
636 struct TCP_Server_Info *server;
637 char *buf;
1da177e4 638
b6b38f70 639 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4 640
92fc65a7
PS
641 if (cfile == NULL)
642 return rc;
643
6d5786a3 644 xid = get_xid();
92fc65a7
PS
645 tcon = tlink_tcon(cfile->tlink);
646 server = tcon->ses->server;
1da177e4 647
92fc65a7
PS
648 cFYI(1, "Freeing private data in close dir");
649 spin_lock(&cifs_file_list_lock);
650 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
651 cfile->invalidHandle = true;
652 spin_unlock(&cifs_file_list_lock);
653 if (server->ops->close_dir)
654 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
655 else
656 rc = -ENOSYS;
657 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
658 /* not much we can do if it fails anyway, ignore rc */
659 rc = 0;
660 } else
661 spin_unlock(&cifs_file_list_lock);
662
663 buf = cfile->srch_inf.ntwrk_buf_start;
664 if (buf) {
665 cFYI(1, "closedir free smb buf in srch struct");
666 cfile->srch_inf.ntwrk_buf_start = NULL;
667 if (cfile->srch_inf.smallBuf)
668 cifs_small_buf_release(buf);
669 else
670 cifs_buf_release(buf);
1da177e4 671 }
92fc65a7
PS
672
673 cifs_put_tlink(cfile->tlink);
674 kfree(file->private_data);
675 file->private_data = NULL;
1da177e4 676 /* BB can we lock the filestruct while this is going on? */
6d5786a3 677 free_xid(xid);
1da177e4
LT
678 return rc;
679}
680
85160e03 681static struct cifsLockInfo *
fbd35aca 682cifs_lock_init(__u64 offset, __u64 length, __u8 type)
7ee1af76 683{
a88b4707 684 struct cifsLockInfo *lock =
fb8c4b14 685 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
686 if (!lock)
687 return lock;
688 lock->offset = offset;
689 lock->length = length;
690 lock->type = type;
a88b4707
PS
691 lock->pid = current->tgid;
692 INIT_LIST_HEAD(&lock->blist);
693 init_waitqueue_head(&lock->block_q);
694 return lock;
85160e03
PS
695}
696
f7ba7fe6 697void
85160e03
PS
698cifs_del_lock_waiters(struct cifsLockInfo *lock)
699{
700 struct cifsLockInfo *li, *tmp;
701 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
702 list_del_init(&li->blist);
703 wake_up(&li->block_q);
704 }
705}
706
707static bool
f45d3416
PS
708cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
709 __u64 length, __u8 type, struct cifsFileInfo *cfile,
fbd35aca 710 struct cifsLockInfo **conf_lock)
85160e03 711{
fbd35aca 712 struct cifsLockInfo *li;
f45d3416 713 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 714 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 715
f45d3416 716 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
717 if (offset + length <= li->offset ||
718 offset >= li->offset + li->length)
719 continue;
f45d3416
PS
720 if ((type & server->vals->shared_lock_type) &&
721 ((server->ops->compare_fids(cfile, cur_cfile) &&
722 current->tgid == li->pid) || type == li->type))
85160e03 723 continue;
f45d3416
PS
724 *conf_lock = li;
725 return true;
85160e03
PS
726 }
727 return false;
728}
729
161ebf9f 730static bool
55157dfb
PS
731cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
732 __u8 type, struct cifsLockInfo **conf_lock)
161ebf9f 733{
fbd35aca 734 bool rc = false;
f45d3416 735 struct cifs_fid_locks *cur;
55157dfb 736 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
fbd35aca 737
f45d3416
PS
738 list_for_each_entry(cur, &cinode->llist, llist) {
739 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
55157dfb 740 cfile, conf_lock);
fbd35aca
PS
741 if (rc)
742 break;
743 }
fbd35aca
PS
744
745 return rc;
161ebf9f
PS
746}
747
9a5101c8
PS
748/*
749 * Check if there is another lock that prevents us to set the lock (mandatory
750 * style). If such a lock exists, update the flock structure with its
751 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
752 * or leave it the same if we can't. Returns 0 if we don't need to request to
753 * the server or 1 otherwise.
754 */
85160e03 755static int
fbd35aca
PS
756cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
757 __u8 type, struct file_lock *flock)
85160e03
PS
758{
759 int rc = 0;
760 struct cifsLockInfo *conf_lock;
fbd35aca 761 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
106dc538 762 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
763 bool exist;
764
765 mutex_lock(&cinode->lock_mutex);
766
55157dfb
PS
767 exist = cifs_find_lock_conflict(cfile, offset, length, type,
768 &conf_lock);
85160e03
PS
769 if (exist) {
770 flock->fl_start = conf_lock->offset;
771 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
772 flock->fl_pid = conf_lock->pid;
106dc538 773 if (conf_lock->type & server->vals->shared_lock_type)
85160e03
PS
774 flock->fl_type = F_RDLCK;
775 else
776 flock->fl_type = F_WRLCK;
777 } else if (!cinode->can_cache_brlcks)
778 rc = 1;
779 else
780 flock->fl_type = F_UNLCK;
781
782 mutex_unlock(&cinode->lock_mutex);
783 return rc;
784}
785
161ebf9f 786static void
fbd35aca 787cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 788{
fbd35aca 789 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
d59dad2b 790 mutex_lock(&cinode->lock_mutex);
f45d3416 791 list_add_tail(&lock->llist, &cfile->llist->locks);
d59dad2b 792 mutex_unlock(&cinode->lock_mutex);
7ee1af76
JA
793}
794
9a5101c8
PS
795/*
796 * Set the byte-range lock (mandatory style). Returns:
797 * 1) 0, if we set the lock and don't need to request to the server;
798 * 2) 1, if no locks prevent us but we need to request to the server;
799 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
800 */
85160e03 801static int
fbd35aca 802cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 803 bool wait)
85160e03 804{
161ebf9f 805 struct cifsLockInfo *conf_lock;
fbd35aca 806 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
85160e03
PS
807 bool exist;
808 int rc = 0;
809
85160e03
PS
810try_again:
811 exist = false;
812 mutex_lock(&cinode->lock_mutex);
813
55157dfb
PS
814 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
815 lock->type, &conf_lock);
85160e03 816 if (!exist && cinode->can_cache_brlcks) {
f45d3416 817 list_add_tail(&lock->llist, &cfile->llist->locks);
85160e03
PS
818 mutex_unlock(&cinode->lock_mutex);
819 return rc;
820 }
821
822 if (!exist)
823 rc = 1;
824 else if (!wait)
825 rc = -EACCES;
826 else {
827 list_add_tail(&lock->blist, &conf_lock->blist);
828 mutex_unlock(&cinode->lock_mutex);
829 rc = wait_event_interruptible(lock->block_q,
830 (lock->blist.prev == &lock->blist) &&
831 (lock->blist.next == &lock->blist));
832 if (!rc)
833 goto try_again;
a88b4707
PS
834 mutex_lock(&cinode->lock_mutex);
835 list_del_init(&lock->blist);
85160e03
PS
836 }
837
85160e03
PS
838 mutex_unlock(&cinode->lock_mutex);
839 return rc;
840}
841
9a5101c8
PS
842/*
843 * Check if there is another lock that prevents us to set the lock (posix
844 * style). If such a lock exists, update the flock structure with its
845 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
846 * or leave it the same if we can't. Returns 0 if we don't need to request to
847 * the server or 1 otherwise.
848 */
85160e03 849static int
4f6bcec9
PS
850cifs_posix_lock_test(struct file *file, struct file_lock *flock)
851{
852 int rc = 0;
853 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
854 unsigned char saved_type = flock->fl_type;
855
50792760
PS
856 if ((flock->fl_flags & FL_POSIX) == 0)
857 return 1;
858
4f6bcec9
PS
859 mutex_lock(&cinode->lock_mutex);
860 posix_test_lock(file, flock);
861
862 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
863 flock->fl_type = saved_type;
864 rc = 1;
865 }
866
867 mutex_unlock(&cinode->lock_mutex);
868 return rc;
869}
870
9a5101c8
PS
871/*
872 * Set the byte-range lock (posix style). Returns:
873 * 1) 0, if we set the lock and don't need to request to the server;
874 * 2) 1, if we need to request to the server;
875 * 3) <0, if the error occurs while setting the lock.
876 */
4f6bcec9
PS
877static int
878cifs_posix_lock_set(struct file *file, struct file_lock *flock)
879{
880 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
50792760
PS
881 int rc = 1;
882
883 if ((flock->fl_flags & FL_POSIX) == 0)
884 return rc;
4f6bcec9 885
66189be7 886try_again:
4f6bcec9
PS
887 mutex_lock(&cinode->lock_mutex);
888 if (!cinode->can_cache_brlcks) {
889 mutex_unlock(&cinode->lock_mutex);
50792760 890 return rc;
4f6bcec9 891 }
66189be7
PS
892
893 rc = posix_lock_file(file, flock, NULL);
9ebb389d 894 mutex_unlock(&cinode->lock_mutex);
66189be7
PS
895 if (rc == FILE_LOCK_DEFERRED) {
896 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
897 if (!rc)
898 goto try_again;
899 locks_delete_block(flock);
900 }
9ebb389d 901 return rc;
4f6bcec9
PS
902}
903
d39a4f71 904int
4f6bcec9 905cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 906{
6d5786a3
PS
907 unsigned int xid;
908 int rc = 0, stored_rc;
85160e03
PS
909 struct cifsLockInfo *li, *tmp;
910 struct cifs_tcon *tcon;
911 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
0013fb4c 912 unsigned int num, max_num, max_buf;
32b9aaf1
PS
913 LOCKING_ANDX_RANGE *buf, *cur;
914 int types[] = {LOCKING_ANDX_LARGE_FILES,
915 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
916 int i;
85160e03 917
6d5786a3 918 xid = get_xid();
85160e03
PS
919 tcon = tlink_tcon(cfile->tlink);
920
921 mutex_lock(&cinode->lock_mutex);
922 if (!cinode->can_cache_brlcks) {
923 mutex_unlock(&cinode->lock_mutex);
6d5786a3 924 free_xid(xid);
85160e03
PS
925 return rc;
926 }
927
0013fb4c
PS
928 /*
929 * Accessing maxBuf is racy with cifs_reconnect - need to store value
930 * and check it for zero before using.
931 */
932 max_buf = tcon->ses->server->maxBuf;
933 if (!max_buf) {
934 mutex_unlock(&cinode->lock_mutex);
6d5786a3 935 free_xid(xid);
0013fb4c
PS
936 return -EINVAL;
937 }
938
939 max_num = (max_buf - sizeof(struct smb_hdr)) /
940 sizeof(LOCKING_ANDX_RANGE);
32b9aaf1
PS
941 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
942 if (!buf) {
943 mutex_unlock(&cinode->lock_mutex);
6d5786a3 944 free_xid(xid);
e2f2886a 945 return -ENOMEM;
32b9aaf1
PS
946 }
947
948 for (i = 0; i < 2; i++) {
949 cur = buf;
950 num = 0;
f45d3416 951 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
952 if (li->type != types[i])
953 continue;
954 cur->Pid = cpu_to_le16(li->pid);
955 cur->LengthLow = cpu_to_le32((u32)li->length);
956 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
957 cur->OffsetLow = cpu_to_le32((u32)li->offset);
958 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
959 if (++num == max_num) {
4b4de76e
PS
960 stored_rc = cifs_lockv(xid, tcon,
961 cfile->fid.netfid,
04a6aa8a
PS
962 (__u8)li->type, 0, num,
963 buf);
32b9aaf1
PS
964 if (stored_rc)
965 rc = stored_rc;
966 cur = buf;
967 num = 0;
968 } else
969 cur++;
970 }
971
972 if (num) {
4b4de76e 973 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 974 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
975 if (stored_rc)
976 rc = stored_rc;
977 }
85160e03
PS
978 }
979
980 cinode->can_cache_brlcks = false;
981 mutex_unlock(&cinode->lock_mutex);
982
32b9aaf1 983 kfree(buf);
6d5786a3 984 free_xid(xid);
85160e03
PS
985 return rc;
986}
987
4f6bcec9
PS
988/* copied from fs/locks.c with a name change */
989#define cifs_for_each_lock(inode, lockp) \
990 for (lockp = &inode->i_flock; *lockp != NULL; \
991 lockp = &(*lockp)->fl_next)
992
d5751469
PS
993struct lock_to_push {
994 struct list_head llist;
995 __u64 offset;
996 __u64 length;
997 __u32 pid;
998 __u16 netfid;
999 __u8 type;
1000};
1001
4f6bcec9
PS
1002static int
1003cifs_push_posix_locks(struct cifsFileInfo *cfile)
1004{
1005 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1006 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1007 struct file_lock *flock, **before;
d5751469 1008 unsigned int count = 0, i = 0;
4f6bcec9 1009 int rc = 0, xid, type;
d5751469
PS
1010 struct list_head locks_to_send, *el;
1011 struct lock_to_push *lck, *tmp;
4f6bcec9 1012 __u64 length;
4f6bcec9 1013
6d5786a3 1014 xid = get_xid();
4f6bcec9
PS
1015
1016 mutex_lock(&cinode->lock_mutex);
1017 if (!cinode->can_cache_brlcks) {
1018 mutex_unlock(&cinode->lock_mutex);
6d5786a3 1019 free_xid(xid);
4f6bcec9
PS
1020 return rc;
1021 }
1022
d5751469
PS
1023 lock_flocks();
1024 cifs_for_each_lock(cfile->dentry->d_inode, before) {
1025 if ((*before)->fl_flags & FL_POSIX)
1026 count++;
1027 }
1028 unlock_flocks();
1029
4f6bcec9
PS
1030 INIT_LIST_HEAD(&locks_to_send);
1031
d5751469 1032 /*
ce85852b
PS
1033 * Allocating count locks is enough because no FL_POSIX locks can be
1034 * added to the list while we are holding cinode->lock_mutex that
1035 * protects locking operations of this inode.
d5751469
PS
1036 */
1037 for (; i < count; i++) {
1038 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1039 if (!lck) {
1040 rc = -ENOMEM;
1041 goto err_out;
1042 }
1043 list_add_tail(&lck->llist, &locks_to_send);
1044 }
1045
d5751469 1046 el = locks_to_send.next;
4f6bcec9
PS
1047 lock_flocks();
1048 cifs_for_each_lock(cfile->dentry->d_inode, before) {
ce85852b
PS
1049 flock = *before;
1050 if ((flock->fl_flags & FL_POSIX) == 0)
1051 continue;
d5751469 1052 if (el == &locks_to_send) {
ce85852b
PS
1053 /*
1054 * The list ended. We don't have enough allocated
1055 * structures - something is really wrong.
1056 */
d5751469
PS
1057 cERROR(1, "Can't push all brlocks!");
1058 break;
1059 }
4f6bcec9
PS
1060 length = 1 + flock->fl_end - flock->fl_start;
1061 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1062 type = CIFS_RDLCK;
1063 else
1064 type = CIFS_WRLCK;
d5751469 1065 lck = list_entry(el, struct lock_to_push, llist);
4f6bcec9 1066 lck->pid = flock->fl_pid;
4b4de76e 1067 lck->netfid = cfile->fid.netfid;
d5751469
PS
1068 lck->length = length;
1069 lck->type = type;
1070 lck->offset = flock->fl_start;
d5751469 1071 el = el->next;
4f6bcec9 1072 }
4f6bcec9
PS
1073 unlock_flocks();
1074
1075 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1076 int stored_rc;
1077
4f6bcec9 1078 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1079 lck->offset, lck->length, NULL,
4f6bcec9
PS
1080 lck->type, 0);
1081 if (stored_rc)
1082 rc = stored_rc;
1083 list_del(&lck->llist);
1084 kfree(lck);
1085 }
1086
d5751469 1087out:
4f6bcec9
PS
1088 cinode->can_cache_brlcks = false;
1089 mutex_unlock(&cinode->lock_mutex);
1090
6d5786a3 1091 free_xid(xid);
4f6bcec9 1092 return rc;
d5751469
PS
1093err_out:
1094 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1095 list_del(&lck->llist);
1096 kfree(lck);
1097 }
1098 goto out;
4f6bcec9
PS
1099}
1100
1101static int
1102cifs_push_locks(struct cifsFileInfo *cfile)
1103{
1104 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1105 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1106
29e20f9c 1107 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1108 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1109 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1110 return cifs_push_posix_locks(cfile);
1111
d39a4f71 1112 return tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9
PS
1113}
1114
03776f45 1115static void
04a6aa8a 1116cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1117 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1118{
03776f45 1119 if (flock->fl_flags & FL_POSIX)
b6b38f70 1120 cFYI(1, "Posix");
03776f45 1121 if (flock->fl_flags & FL_FLOCK)
b6b38f70 1122 cFYI(1, "Flock");
03776f45 1123 if (flock->fl_flags & FL_SLEEP) {
b6b38f70 1124 cFYI(1, "Blocking lock");
03776f45 1125 *wait_flag = true;
1da177e4 1126 }
03776f45 1127 if (flock->fl_flags & FL_ACCESS)
b6b38f70 1128 cFYI(1, "Process suspended by mandatory locking - "
03776f45
PS
1129 "not implemented yet");
1130 if (flock->fl_flags & FL_LEASE)
b6b38f70 1131 cFYI(1, "Lease on file - not implemented yet");
03776f45 1132 if (flock->fl_flags &
1da177e4 1133 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
03776f45 1134 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1da177e4 1135
106dc538 1136 *type = server->vals->large_lock_type;
03776f45 1137 if (flock->fl_type == F_WRLCK) {
b6b38f70 1138 cFYI(1, "F_WRLCK ");
106dc538 1139 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1140 *lock = 1;
1141 } else if (flock->fl_type == F_UNLCK) {
b6b38f70 1142 cFYI(1, "F_UNLCK");
106dc538 1143 *type |= server->vals->unlock_lock_type;
03776f45
PS
1144 *unlock = 1;
1145 /* Check if unlock includes more than one lock range */
1146 } else if (flock->fl_type == F_RDLCK) {
b6b38f70 1147 cFYI(1, "F_RDLCK");
106dc538 1148 *type |= server->vals->shared_lock_type;
03776f45
PS
1149 *lock = 1;
1150 } else if (flock->fl_type == F_EXLCK) {
b6b38f70 1151 cFYI(1, "F_EXLCK");
106dc538 1152 *type |= server->vals->exclusive_lock_type;
03776f45
PS
1153 *lock = 1;
1154 } else if (flock->fl_type == F_SHLCK) {
b6b38f70 1155 cFYI(1, "F_SHLCK");
106dc538 1156 *type |= server->vals->shared_lock_type;
03776f45 1157 *lock = 1;
1da177e4 1158 } else
b6b38f70 1159 cFYI(1, "Unknown type of lock");
03776f45 1160}
1da177e4 1161
03776f45 1162static int
04a6aa8a 1163cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1164 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1165{
1166 int rc = 0;
1167 __u64 length = 1 + flock->fl_end - flock->fl_start;
4f6bcec9
PS
1168 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1169 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1170 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e 1171 __u16 netfid = cfile->fid.netfid;
f05337c6 1172
03776f45
PS
1173 if (posix_lck) {
1174 int posix_lock_type;
4f6bcec9
PS
1175
1176 rc = cifs_posix_lock_test(file, flock);
1177 if (!rc)
1178 return rc;
1179
106dc538 1180 if (type & server->vals->shared_lock_type)
03776f45
PS
1181 posix_lock_type = CIFS_RDLCK;
1182 else
1183 posix_lock_type = CIFS_WRLCK;
4f6bcec9 1184 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
c5fd363d 1185 flock->fl_start, length, flock,
4f6bcec9 1186 posix_lock_type, wait_flag);
03776f45
PS
1187 return rc;
1188 }
1da177e4 1189
fbd35aca 1190 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
1191 if (!rc)
1192 return rc;
1193
03776f45 1194 /* BB we could chain these into one lock request BB */
d39a4f71
PS
1195 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1196 1, 0, false);
03776f45 1197 if (rc == 0) {
d39a4f71
PS
1198 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1199 type, 0, 1, false);
03776f45
PS
1200 flock->fl_type = F_UNLCK;
1201 if (rc != 0)
1202 cERROR(1, "Error unlocking previously locked "
106dc538 1203 "range %d during test of lock", rc);
a88b4707 1204 return 0;
1da177e4 1205 }
7ee1af76 1206
106dc538 1207 if (type & server->vals->shared_lock_type) {
03776f45 1208 flock->fl_type = F_WRLCK;
a88b4707 1209 return 0;
7ee1af76
JA
1210 }
1211
d39a4f71
PS
1212 type &= ~server->vals->exclusive_lock_type;
1213
1214 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1215 type | server->vals->shared_lock_type,
1216 1, 0, false);
03776f45 1217 if (rc == 0) {
d39a4f71
PS
1218 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1219 type | server->vals->shared_lock_type, 0, 1, false);
03776f45
PS
1220 flock->fl_type = F_RDLCK;
1221 if (rc != 0)
1222 cERROR(1, "Error unlocking previously locked "
1223 "range %d during test of lock", rc);
1224 } else
1225 flock->fl_type = F_WRLCK;
1226
a88b4707 1227 return 0;
03776f45
PS
1228}
1229
f7ba7fe6 1230void
9ee305b7
PS
1231cifs_move_llist(struct list_head *source, struct list_head *dest)
1232{
1233 struct list_head *li, *tmp;
1234 list_for_each_safe(li, tmp, source)
1235 list_move(li, dest);
1236}
1237
f7ba7fe6 1238void
9ee305b7
PS
1239cifs_free_llist(struct list_head *llist)
1240{
1241 struct cifsLockInfo *li, *tmp;
1242 list_for_each_entry_safe(li, tmp, llist, llist) {
1243 cifs_del_lock_waiters(li);
1244 list_del(&li->llist);
1245 kfree(li);
1246 }
1247}
1248
d39a4f71 1249int
6d5786a3
PS
1250cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1251 unsigned int xid)
9ee305b7
PS
1252{
1253 int rc = 0, stored_rc;
1254 int types[] = {LOCKING_ANDX_LARGE_FILES,
1255 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1256 unsigned int i;
0013fb4c 1257 unsigned int max_num, num, max_buf;
9ee305b7
PS
1258 LOCKING_ANDX_RANGE *buf, *cur;
1259 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1260 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1261 struct cifsLockInfo *li, *tmp;
1262 __u64 length = 1 + flock->fl_end - flock->fl_start;
1263 struct list_head tmp_llist;
1264
1265 INIT_LIST_HEAD(&tmp_llist);
1266
0013fb4c
PS
1267 /*
1268 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1269 * and check it for zero before using.
1270 */
1271 max_buf = tcon->ses->server->maxBuf;
1272 if (!max_buf)
1273 return -EINVAL;
1274
1275 max_num = (max_buf - sizeof(struct smb_hdr)) /
1276 sizeof(LOCKING_ANDX_RANGE);
9ee305b7
PS
1277 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1278 if (!buf)
1279 return -ENOMEM;
1280
1281 mutex_lock(&cinode->lock_mutex);
1282 for (i = 0; i < 2; i++) {
1283 cur = buf;
1284 num = 0;
f45d3416 1285 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
1286 if (flock->fl_start > li->offset ||
1287 (flock->fl_start + length) <
1288 (li->offset + li->length))
1289 continue;
1290 if (current->tgid != li->pid)
1291 continue;
9ee305b7
PS
1292 if (types[i] != li->type)
1293 continue;
ea319d57 1294 if (cinode->can_cache_brlcks) {
9ee305b7
PS
1295 /*
1296 * We can cache brlock requests - simply remove
fbd35aca 1297 * a lock from the file's list.
9ee305b7
PS
1298 */
1299 list_del(&li->llist);
1300 cifs_del_lock_waiters(li);
1301 kfree(li);
ea319d57 1302 continue;
9ee305b7 1303 }
ea319d57
PS
1304 cur->Pid = cpu_to_le16(li->pid);
1305 cur->LengthLow = cpu_to_le32((u32)li->length);
1306 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1307 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1308 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1309 /*
1310 * We need to save a lock here to let us add it again to
1311 * the file's list if the unlock range request fails on
1312 * the server.
1313 */
1314 list_move(&li->llist, &tmp_llist);
1315 if (++num == max_num) {
4b4de76e
PS
1316 stored_rc = cifs_lockv(xid, tcon,
1317 cfile->fid.netfid,
ea319d57
PS
1318 li->type, num, 0, buf);
1319 if (stored_rc) {
1320 /*
1321 * We failed on the unlock range
1322 * request - add all locks from the tmp
1323 * list to the head of the file's list.
1324 */
1325 cifs_move_llist(&tmp_llist,
f45d3416 1326 &cfile->llist->locks);
ea319d57
PS
1327 rc = stored_rc;
1328 } else
1329 /*
1330 * The unlock range request succeed -
1331 * free the tmp list.
1332 */
1333 cifs_free_llist(&tmp_llist);
1334 cur = buf;
1335 num = 0;
1336 } else
1337 cur++;
9ee305b7
PS
1338 }
1339 if (num) {
4b4de76e 1340 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
1341 types[i], num, 0, buf);
1342 if (stored_rc) {
f45d3416
PS
1343 cifs_move_llist(&tmp_llist,
1344 &cfile->llist->locks);
9ee305b7
PS
1345 rc = stored_rc;
1346 } else
1347 cifs_free_llist(&tmp_llist);
1348 }
1349 }
1350
1351 mutex_unlock(&cinode->lock_mutex);
1352 kfree(buf);
1353 return rc;
1354}
1355
03776f45 1356static int
f45d3416 1357cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
1358 bool wait_flag, bool posix_lck, int lock, int unlock,
1359 unsigned int xid)
03776f45
PS
1360{
1361 int rc = 0;
1362 __u64 length = 1 + flock->fl_end - flock->fl_start;
1363 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1364 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1365 struct TCP_Server_Info *server = tcon->ses->server;
03776f45
PS
1366
1367 if (posix_lck) {
08547b03 1368 int posix_lock_type;
4f6bcec9
PS
1369
1370 rc = cifs_posix_lock_set(file, flock);
1371 if (!rc || rc < 0)
1372 return rc;
1373
106dc538 1374 if (type & server->vals->shared_lock_type)
08547b03
SF
1375 posix_lock_type = CIFS_RDLCK;
1376 else
1377 posix_lock_type = CIFS_WRLCK;
50c2f753 1378
03776f45 1379 if (unlock == 1)
beb84dc8 1380 posix_lock_type = CIFS_UNLCK;
7ee1af76 1381
f45d3416
PS
1382 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1383 current->tgid, flock->fl_start, length,
1384 NULL, posix_lock_type, wait_flag);
03776f45
PS
1385 goto out;
1386 }
7ee1af76 1387
03776f45 1388 if (lock) {
161ebf9f
PS
1389 struct cifsLockInfo *lock;
1390
fbd35aca 1391 lock = cifs_lock_init(flock->fl_start, length, type);
161ebf9f
PS
1392 if (!lock)
1393 return -ENOMEM;
1394
fbd35aca 1395 rc = cifs_lock_add_if(cfile, lock, wait_flag);
85160e03 1396 if (rc < 0)
161ebf9f
PS
1397 kfree(lock);
1398 if (rc <= 0)
85160e03
PS
1399 goto out;
1400
d39a4f71
PS
1401 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1402 type, 1, 0, wait_flag);
161ebf9f
PS
1403 if (rc) {
1404 kfree(lock);
1405 goto out;
03776f45 1406 }
161ebf9f 1407
fbd35aca 1408 cifs_lock_add(cfile, lock);
9ee305b7 1409 } else if (unlock)
d39a4f71 1410 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 1411
03776f45
PS
1412out:
1413 if (flock->fl_flags & FL_POSIX)
9ebb389d 1414 posix_lock_file_wait(file, flock);
03776f45
PS
1415 return rc;
1416}
1417
1418int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1419{
1420 int rc, xid;
1421 int lock = 0, unlock = 0;
1422 bool wait_flag = false;
1423 bool posix_lck = false;
1424 struct cifs_sb_info *cifs_sb;
1425 struct cifs_tcon *tcon;
1426 struct cifsInodeInfo *cinode;
1427 struct cifsFileInfo *cfile;
1428 __u16 netfid;
04a6aa8a 1429 __u32 type;
03776f45
PS
1430
1431 rc = -EACCES;
6d5786a3 1432 xid = get_xid();
03776f45
PS
1433
1434 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1435 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1436 flock->fl_start, flock->fl_end);
1437
03776f45
PS
1438 cfile = (struct cifsFileInfo *)file->private_data;
1439 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
1440
1441 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1442 tcon->ses->server);
1443
1444 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
4b4de76e 1445 netfid = cfile->fid.netfid;
03776f45
PS
1446 cinode = CIFS_I(file->f_path.dentry->d_inode);
1447
29e20f9c 1448 if (cap_unix(tcon->ses) &&
03776f45
PS
1449 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1450 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1451 posix_lck = true;
1452 /*
1453 * BB add code here to normalize offset and length to account for
1454 * negative length which we can not accept over the wire.
1455 */
1456 if (IS_GETLK(cmd)) {
4f6bcec9 1457 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 1458 free_xid(xid);
03776f45
PS
1459 return rc;
1460 }
1461
1462 if (!lock && !unlock) {
1463 /*
1464 * if no lock or unlock then nothing to do since we do not
1465 * know what it is
1466 */
6d5786a3 1467 free_xid(xid);
03776f45 1468 return -EOPNOTSUPP;
7ee1af76
JA
1469 }
1470
03776f45
PS
1471 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1472 xid);
6d5786a3 1473 free_xid(xid);
1da177e4
LT
1474 return rc;
1475}
1476
597b027f
JL
1477/*
1478 * update the file size (if needed) after a write. Should be called with
1479 * the inode->i_lock held
1480 */
72432ffc 1481void
fbec9ab9
JL
1482cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1483 unsigned int bytes_written)
1484{
1485 loff_t end_of_write = offset + bytes_written;
1486
1487 if (end_of_write > cifsi->server_eof)
1488 cifsi->server_eof = end_of_write;
1489}
1490
ba9ad725
PS
1491static ssize_t
1492cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1493 size_t write_size, loff_t *offset)
1da177e4
LT
1494{
1495 int rc = 0;
1496 unsigned int bytes_written = 0;
1497 unsigned int total_written;
1498 struct cifs_sb_info *cifs_sb;
ba9ad725
PS
1499 struct cifs_tcon *tcon;
1500 struct TCP_Server_Info *server;
6d5786a3 1501 unsigned int xid;
7da4b49a
JL
1502 struct dentry *dentry = open_file->dentry;
1503 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
fa2989f4 1504 struct cifs_io_parms io_parms;
1da177e4 1505
7da4b49a 1506 cifs_sb = CIFS_SB(dentry->d_sb);
1da177e4 1507
b6b38f70 1508 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
ba9ad725 1509 *offset, dentry->d_name.name);
1da177e4 1510
ba9ad725
PS
1511 tcon = tlink_tcon(open_file->tlink);
1512 server = tcon->ses->server;
1513
1514 if (!server->ops->sync_write)
1515 return -ENOSYS;
50c2f753 1516
6d5786a3 1517 xid = get_xid();
1da177e4 1518
1da177e4
LT
1519 for (total_written = 0; write_size > total_written;
1520 total_written += bytes_written) {
1521 rc = -EAGAIN;
1522 while (rc == -EAGAIN) {
ca83ce3d
JL
1523 struct kvec iov[2];
1524 unsigned int len;
1525
1da177e4 1526 if (open_file->invalidHandle) {
1da177e4
LT
1527 /* we could deadlock if we called
1528 filemap_fdatawait from here so tell
fb8c4b14 1529 reopen_file not to flush data to
1da177e4 1530 server now */
15886177 1531 rc = cifs_reopen_file(open_file, false);
1da177e4
LT
1532 if (rc != 0)
1533 break;
1534 }
ca83ce3d
JL
1535
1536 len = min((size_t)cifs_sb->wsize,
1537 write_size - total_written);
1538 /* iov[0] is reserved for smb header */
1539 iov[1].iov_base = (char *)write_data + total_written;
1540 iov[1].iov_len = len;
fa2989f4 1541 io_parms.pid = pid;
ba9ad725
PS
1542 io_parms.tcon = tcon;
1543 io_parms.offset = *offset;
fa2989f4 1544 io_parms.length = len;
ba9ad725
PS
1545 rc = server->ops->sync_write(xid, open_file, &io_parms,
1546 &bytes_written, iov, 1);
1da177e4
LT
1547 }
1548 if (rc || (bytes_written == 0)) {
1549 if (total_written)
1550 break;
1551 else {
6d5786a3 1552 free_xid(xid);
1da177e4
LT
1553 return rc;
1554 }
fbec9ab9 1555 } else {
597b027f 1556 spin_lock(&dentry->d_inode->i_lock);
ba9ad725 1557 cifs_update_eof(cifsi, *offset, bytes_written);
597b027f 1558 spin_unlock(&dentry->d_inode->i_lock);
ba9ad725 1559 *offset += bytes_written;
fbec9ab9 1560 }
1da177e4
LT
1561 }
1562
ba9ad725 1563 cifs_stats_bytes_written(tcon, total_written);
1da177e4 1564
7da4b49a
JL
1565 if (total_written > 0) {
1566 spin_lock(&dentry->d_inode->i_lock);
ba9ad725
PS
1567 if (*offset > dentry->d_inode->i_size)
1568 i_size_write(dentry->d_inode, *offset);
7da4b49a 1569 spin_unlock(&dentry->d_inode->i_lock);
1da177e4 1570 }
7da4b49a 1571 mark_inode_dirty_sync(dentry->d_inode);
6d5786a3 1572 free_xid(xid);
1da177e4
LT
1573 return total_written;
1574}
1575
6508d904
JL
1576struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1577 bool fsuid_only)
630f3f0c
SF
1578{
1579 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1580 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1581
1582 /* only filter by fsuid on multiuser mounts */
1583 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1584 fsuid_only = false;
630f3f0c 1585
4477288a 1586 spin_lock(&cifs_file_list_lock);
630f3f0c
SF
1587 /* we could simply get the first_list_entry since write-only entries
1588 are always at the end of the list but since the first entry might
1589 have a close pending, we go through the whole list */
1590 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1591 if (fsuid_only && open_file->uid != current_fsuid())
1592 continue;
2e396b83 1593 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
630f3f0c
SF
1594 if (!open_file->invalidHandle) {
1595 /* found a good file */
1596 /* lock it so it will not be closed on us */
764a1b1a 1597 cifsFileInfo_get_locked(open_file);
4477288a 1598 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1599 return open_file;
1600 } /* else might as well continue, and look for
1601 another, or simply have the caller reopen it
1602 again rather than trying to fix this handle */
1603 } else /* write only file */
1604 break; /* write only files are last so must be done */
1605 }
4477288a 1606 spin_unlock(&cifs_file_list_lock);
630f3f0c
SF
1607 return NULL;
1608}
630f3f0c 1609
6508d904
JL
1610struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1611 bool fsuid_only)
6148a742 1612{
2c0c2a08 1613 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 1614 struct cifs_sb_info *cifs_sb;
2846d386 1615 bool any_available = false;
dd99cd80 1616 int rc;
2c0c2a08 1617 unsigned int refind = 0;
6148a742 1618
60808233
SF
1619 /* Having a null inode here (because mapping->host was set to zero by
1620 the VFS or MM) should not happen but we had reports of on oops (due to
1621 it being zero) during stress testcases so we need to check for it */
1622
fb8c4b14 1623 if (cifs_inode == NULL) {
b6b38f70 1624 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1625 dump_stack();
1626 return NULL;
1627 }
1628
d3892294
JL
1629 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1630
6508d904
JL
1631 /* only filter by fsuid on multiuser mounts */
1632 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1633 fsuid_only = false;
1634
4477288a 1635 spin_lock(&cifs_file_list_lock);
9b22b0b7 1636refind_writable:
2c0c2a08
SP
1637 if (refind > MAX_REOPEN_ATT) {
1638 spin_unlock(&cifs_file_list_lock);
1639 return NULL;
1640 }
6148a742 1641 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1642 if (!any_available && open_file->pid != current->tgid)
1643 continue;
1644 if (fsuid_only && open_file->uid != current_fsuid())
6148a742 1645 continue;
2e396b83 1646 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
1647 if (!open_file->invalidHandle) {
1648 /* found a good writable file */
764a1b1a 1649 cifsFileInfo_get_locked(open_file);
4477288a 1650 spin_unlock(&cifs_file_list_lock);
9b22b0b7 1651 return open_file;
2c0c2a08
SP
1652 } else {
1653 if (!inv_file)
1654 inv_file = open_file;
9b22b0b7 1655 }
6148a742
SF
1656 }
1657 }
2846d386
JL
1658 /* couldn't find useable FH with same pid, try any available */
1659 if (!any_available) {
1660 any_available = true;
1661 goto refind_writable;
1662 }
2c0c2a08
SP
1663
1664 if (inv_file) {
1665 any_available = false;
764a1b1a 1666 cifsFileInfo_get_locked(inv_file);
2c0c2a08
SP
1667 }
1668
4477288a 1669 spin_unlock(&cifs_file_list_lock);
2c0c2a08
SP
1670
1671 if (inv_file) {
1672 rc = cifs_reopen_file(inv_file, false);
1673 if (!rc)
1674 return inv_file;
1675 else {
1676 spin_lock(&cifs_file_list_lock);
1677 list_move_tail(&inv_file->flist,
1678 &cifs_inode->openFileList);
1679 spin_unlock(&cifs_file_list_lock);
1680 cifsFileInfo_put(inv_file);
1681 spin_lock(&cifs_file_list_lock);
1682 ++refind;
1683 goto refind_writable;
1684 }
1685 }
1686
6148a742
SF
1687 return NULL;
1688}
1689
1da177e4
LT
1690static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1691{
1692 struct address_space *mapping = page->mapping;
1693 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1694 char *write_data;
1695 int rc = -EFAULT;
1696 int bytes_written = 0;
1da177e4 1697 struct inode *inode;
6148a742 1698 struct cifsFileInfo *open_file;
1da177e4
LT
1699
1700 if (!mapping || !mapping->host)
1701 return -EFAULT;
1702
1703 inode = page->mapping->host;
1da177e4
LT
1704
1705 offset += (loff_t)from;
1706 write_data = kmap(page);
1707 write_data += from;
1708
1709 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1710 kunmap(page);
1711 return -EIO;
1712 }
1713
1714 /* racing with truncate? */
1715 if (offset > mapping->host->i_size) {
1716 kunmap(page);
1717 return 0; /* don't care */
1718 }
1719
1720 /* check to make sure that we are not extending the file */
1721 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1722 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1723
6508d904 1724 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742 1725 if (open_file) {
fa2989f4
PS
1726 bytes_written = cifs_write(open_file, open_file->pid,
1727 write_data, to - from, &offset);
6ab409b5 1728 cifsFileInfo_put(open_file);
1da177e4 1729 /* Does mm or vfs already set times? */
6148a742 1730 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1731 if ((bytes_written > 0) && (offset))
6148a742 1732 rc = 0;
bb5a9a04
SF
1733 else if (bytes_written < 0)
1734 rc = bytes_written;
6148a742 1735 } else {
b6b38f70 1736 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1737 rc = -EIO;
1738 }
1739
1740 kunmap(page);
1741 return rc;
1742}
1743
1da177e4 1744static int cifs_writepages(struct address_space *mapping,
37c0eb46 1745 struct writeback_control *wbc)
1da177e4 1746{
c3d17b63
JL
1747 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1748 bool done = false, scanned = false, range_whole = false;
1749 pgoff_t end, index;
1750 struct cifs_writedata *wdata;
c9de5c80 1751 struct TCP_Server_Info *server;
37c0eb46 1752 struct page *page;
37c0eb46 1753 int rc = 0;
eddb079d 1754 loff_t isize = i_size_read(mapping->host);
50c2f753 1755
37c0eb46 1756 /*
c3d17b63 1757 * If wsize is smaller than the page cache size, default to writing
37c0eb46
SF
1758 * one page at a time via cifs_writepage
1759 */
1760 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1761 return generic_writepages(mapping, wbc);
1762
111ebb6e 1763 if (wbc->range_cyclic) {
37c0eb46 1764 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1765 end = -1;
1766 } else {
1767 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1768 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1769 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
c3d17b63
JL
1770 range_whole = true;
1771 scanned = true;
37c0eb46
SF
1772 }
1773retry:
c3d17b63
JL
1774 while (!done && index <= end) {
1775 unsigned int i, nr_pages, found_pages;
1776 pgoff_t next = 0, tofind;
1777 struct page **pages;
1778
1779 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1780 end - index) + 1;
1781
c2e87640
JL
1782 wdata = cifs_writedata_alloc((unsigned int)tofind,
1783 cifs_writev_complete);
c3d17b63
JL
1784 if (!wdata) {
1785 rc = -ENOMEM;
1786 break;
1787 }
1788
1789 /*
1790 * find_get_pages_tag seems to return a max of 256 on each
1791 * iteration, so we must call it several times in order to
1792 * fill the array or the wsize is effectively limited to
1793 * 256 * PAGE_CACHE_SIZE.
1794 */
1795 found_pages = 0;
1796 pages = wdata->pages;
1797 do {
1798 nr_pages = find_get_pages_tag(mapping, &index,
1799 PAGECACHE_TAG_DIRTY,
1800 tofind, pages);
1801 found_pages += nr_pages;
1802 tofind -= nr_pages;
1803 pages += nr_pages;
1804 } while (nr_pages && tofind && index <= end);
1805
1806 if (found_pages == 0) {
1807 kref_put(&wdata->refcount, cifs_writedata_release);
1808 break;
1809 }
1810
1811 nr_pages = 0;
1812 for (i = 0; i < found_pages; i++) {
1813 page = wdata->pages[i];
37c0eb46
SF
1814 /*
1815 * At this point we hold neither mapping->tree_lock nor
1816 * lock on the page itself: the page may be truncated or
1817 * invalidated (changing page->mapping to NULL), or even
1818 * swizzled back from swapper_space to tmpfs file
1819 * mapping
1820 */
1821
c3d17b63 1822 if (nr_pages == 0)
37c0eb46 1823 lock_page(page);
529ae9aa 1824 else if (!trylock_page(page))
37c0eb46
SF
1825 break;
1826
1827 if (unlikely(page->mapping != mapping)) {
1828 unlock_page(page);
1829 break;
1830 }
1831
111ebb6e 1832 if (!wbc->range_cyclic && page->index > end) {
c3d17b63 1833 done = true;
37c0eb46
SF
1834 unlock_page(page);
1835 break;
1836 }
1837
1838 if (next && (page->index != next)) {
1839 /* Not next consecutive page */
1840 unlock_page(page);
1841 break;
1842 }
1843
1844 if (wbc->sync_mode != WB_SYNC_NONE)
1845 wait_on_page_writeback(page);
1846
1847 if (PageWriteback(page) ||
cb876f45 1848 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1849 unlock_page(page);
1850 break;
1851 }
84d2f07e 1852
cb876f45
LT
1853 /*
1854 * This actually clears the dirty bit in the radix tree.
1855 * See cifs_writepage() for more commentary.
1856 */
1857 set_page_writeback(page);
1858
eddb079d 1859 if (page_offset(page) >= isize) {
c3d17b63 1860 done = true;
84d2f07e 1861 unlock_page(page);
cb876f45 1862 end_page_writeback(page);
84d2f07e
SF
1863 break;
1864 }
1865
c3d17b63
JL
1866 wdata->pages[i] = page;
1867 next = page->index + 1;
1868 ++nr_pages;
1869 }
37c0eb46 1870
c3d17b63
JL
1871 /* reset index to refind any pages skipped */
1872 if (nr_pages == 0)
1873 index = wdata->pages[0]->index + 1;
84d2f07e 1874
c3d17b63
JL
1875 /* put any pages we aren't going to use */
1876 for (i = nr_pages; i < found_pages; i++) {
1877 page_cache_release(wdata->pages[i]);
1878 wdata->pages[i] = NULL;
1879 }
37c0eb46 1880
c3d17b63
JL
1881 /* nothing to write? */
1882 if (nr_pages == 0) {
1883 kref_put(&wdata->refcount, cifs_writedata_release);
1884 continue;
37c0eb46 1885 }
fbec9ab9 1886
c3d17b63
JL
1887 wdata->sync_mode = wbc->sync_mode;
1888 wdata->nr_pages = nr_pages;
1889 wdata->offset = page_offset(wdata->pages[0]);
eddb079d
JL
1890 wdata->pagesz = PAGE_CACHE_SIZE;
1891 wdata->tailsz =
1892 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1893 (loff_t)PAGE_CACHE_SIZE);
1894 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1895 wdata->tailsz;
941b853d 1896
c3d17b63
JL
1897 do {
1898 if (wdata->cfile != NULL)
1899 cifsFileInfo_put(wdata->cfile);
1900 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1901 false);
1902 if (!wdata->cfile) {
1903 cERROR(1, "No writable handles for inode");
1904 rc = -EBADF;
1905 break;
941b853d 1906 }
fe5f5d2e 1907 wdata->pid = wdata->cfile->pid;
c9de5c80
PS
1908 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1909 rc = server->ops->async_writev(wdata);
c3d17b63 1910 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
941b853d 1911
c3d17b63
JL
1912 for (i = 0; i < nr_pages; ++i)
1913 unlock_page(wdata->pages[i]);
f3983c21 1914
c3d17b63
JL
1915 /* send failure -- clean up the mess */
1916 if (rc != 0) {
1917 for (i = 0; i < nr_pages; ++i) {
941b853d 1918 if (rc == -EAGAIN)
c3d17b63
JL
1919 redirty_page_for_writepage(wbc,
1920 wdata->pages[i]);
1921 else
1922 SetPageError(wdata->pages[i]);
1923 end_page_writeback(wdata->pages[i]);
1924 page_cache_release(wdata->pages[i]);
37c0eb46 1925 }
941b853d
JL
1926 if (rc != -EAGAIN)
1927 mapping_set_error(mapping, rc);
c3d17b63
JL
1928 }
1929 kref_put(&wdata->refcount, cifs_writedata_release);
941b853d 1930
c3d17b63
JL
1931 wbc->nr_to_write -= nr_pages;
1932 if (wbc->nr_to_write <= 0)
1933 done = true;
b066a48c 1934
c3d17b63 1935 index = next;
37c0eb46 1936 }
c3d17b63 1937
37c0eb46
SF
1938 if (!scanned && !done) {
1939 /*
1940 * We hit the last page and there is more work to be done: wrap
1941 * back to the start of the file
1942 */
c3d17b63 1943 scanned = true;
37c0eb46
SF
1944 index = 0;
1945 goto retry;
1946 }
c3d17b63 1947
111ebb6e 1948 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1949 mapping->writeback_index = index;
1950
1da177e4
LT
1951 return rc;
1952}
1da177e4 1953
9ad1506b
PS
1954static int
1955cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4 1956{
9ad1506b 1957 int rc;
6d5786a3 1958 unsigned int xid;
1da177e4 1959
6d5786a3 1960 xid = get_xid();
1da177e4
LT
1961/* BB add check for wbc flags */
1962 page_cache_get(page);
ad7a2926 1963 if (!PageUptodate(page))
b6b38f70 1964 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1965
1966 /*
1967 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1968 *
1969 * A writepage() implementation always needs to do either this,
1970 * or re-dirty the page with "redirty_page_for_writepage()" in
1971 * the case of a failure.
1972 *
1973 * Just unlocking the page will cause the radix tree tag-bits
1974 * to fail to update with the state of the page correctly.
1975 */
fb8c4b14 1976 set_page_writeback(page);
9ad1506b 1977retry_write:
1da177e4 1978 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
9ad1506b
PS
1979 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1980 goto retry_write;
1981 else if (rc == -EAGAIN)
1982 redirty_page_for_writepage(wbc, page);
1983 else if (rc != 0)
1984 SetPageError(page);
1985 else
1986 SetPageUptodate(page);
cb876f45
LT
1987 end_page_writeback(page);
1988 page_cache_release(page);
6d5786a3 1989 free_xid(xid);
1da177e4
LT
1990 return rc;
1991}
1992
9ad1506b
PS
1993static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1994{
1995 int rc = cifs_writepage_locked(page, wbc);
1996 unlock_page(page);
1997 return rc;
1998}
1999
d9414774
NP
2000static int cifs_write_end(struct file *file, struct address_space *mapping,
2001 loff_t pos, unsigned len, unsigned copied,
2002 struct page *page, void *fsdata)
1da177e4 2003{
d9414774
NP
2004 int rc;
2005 struct inode *inode = mapping->host;
d4ffff1f
PS
2006 struct cifsFileInfo *cfile = file->private_data;
2007 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2008 __u32 pid;
2009
2010 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2011 pid = cfile->pid;
2012 else
2013 pid = current->tgid;
1da177e4 2014
b6b38f70
JP
2015 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2016 page, pos, copied);
d9414774 2017
a98ee8c1
JL
2018 if (PageChecked(page)) {
2019 if (copied == len)
2020 SetPageUptodate(page);
2021 ClearPageChecked(page);
2022 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 2023 SetPageUptodate(page);
ad7a2926 2024
1da177e4 2025 if (!PageUptodate(page)) {
d9414774
NP
2026 char *page_data;
2027 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
6d5786a3 2028 unsigned int xid;
d9414774 2029
6d5786a3 2030 xid = get_xid();
1da177e4
LT
2031 /* this is probably better than directly calling
2032 partialpage_write since in this function the file handle is
2033 known which we might as well leverage */
2034 /* BB check if anything else missing out of ppw
2035 such as updating last write time */
2036 page_data = kmap(page);
d4ffff1f 2037 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
d9414774 2038 /* if (rc < 0) should we set writebehind rc? */
1da177e4 2039 kunmap(page);
d9414774 2040
6d5786a3 2041 free_xid(xid);
fb8c4b14 2042 } else {
d9414774
NP
2043 rc = copied;
2044 pos += copied;
1da177e4
LT
2045 set_page_dirty(page);
2046 }
2047
d9414774
NP
2048 if (rc > 0) {
2049 spin_lock(&inode->i_lock);
2050 if (pos > inode->i_size)
2051 i_size_write(inode, pos);
2052 spin_unlock(&inode->i_lock);
2053 }
2054
2055 unlock_page(page);
2056 page_cache_release(page);
2057
1da177e4
LT
2058 return rc;
2059}
2060
02c24a82
JB
2061int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2062 int datasync)
1da177e4 2063{
6d5786a3 2064 unsigned int xid;
1da177e4 2065 int rc = 0;
96daf2b0 2066 struct cifs_tcon *tcon;
1d8c4c00 2067 struct TCP_Server_Info *server;
c21dfb69 2068 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 2069 struct inode *inode = file->f_path.dentry->d_inode;
8be7e6ba 2070 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2071
02c24a82
JB
2072 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2073 if (rc)
2074 return rc;
2075 mutex_lock(&inode->i_mutex);
2076
6d5786a3 2077 xid = get_xid();
1da177e4 2078
b6b38f70 2079 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 2080 file->f_path.dentry->d_name.name, datasync);
50c2f753 2081
6feb9891
PS
2082 if (!CIFS_I(inode)->clientCanCacheRead) {
2083 rc = cifs_invalidate_mapping(inode);
2084 if (rc) {
2085 cFYI(1, "rc: %d during invalidate phase", rc);
2086 rc = 0; /* don't care about it in fsync */
2087 }
2088 }
eb4b756b 2089
8be7e6ba 2090 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2091 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2092 server = tcon->ses->server;
2093 if (server->ops->flush)
2094 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2095 else
2096 rc = -ENOSYS;
2097 }
8be7e6ba 2098
6d5786a3 2099 free_xid(xid);
02c24a82 2100 mutex_unlock(&inode->i_mutex);
8be7e6ba
PS
2101 return rc;
2102}
2103
02c24a82 2104int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2105{
6d5786a3 2106 unsigned int xid;
8be7e6ba 2107 int rc = 0;
96daf2b0 2108 struct cifs_tcon *tcon;
1d8c4c00 2109 struct TCP_Server_Info *server;
8be7e6ba
PS
2110 struct cifsFileInfo *smbfile = file->private_data;
2111 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
02c24a82
JB
2112 struct inode *inode = file->f_mapping->host;
2113
2114 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2115 if (rc)
2116 return rc;
2117 mutex_lock(&inode->i_mutex);
8be7e6ba 2118
6d5786a3 2119 xid = get_xid();
8be7e6ba
PS
2120
2121 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2122 file->f_path.dentry->d_name.name, datasync);
2123
2124 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2125 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2126 server = tcon->ses->server;
2127 if (server->ops->flush)
2128 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2129 else
2130 rc = -ENOSYS;
2131 }
b298f223 2132
6d5786a3 2133 free_xid(xid);
02c24a82 2134 mutex_unlock(&inode->i_mutex);
1da177e4
LT
2135 return rc;
2136}
2137
1da177e4
LT
2138/*
2139 * As file closes, flush all cached write data for this inode checking
2140 * for write behind errors.
2141 */
75e1fcc0 2142int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2143{
fb8c4b14 2144 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
2145 int rc = 0;
2146
eb4b756b 2147 if (file->f_mode & FMODE_WRITE)
d3f1322a 2148 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2149
b6b38f70 2150 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
2151
2152 return rc;
2153}
2154
72432ffc
PS
2155static int
2156cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2157{
2158 int rc = 0;
2159 unsigned long i;
2160
2161 for (i = 0; i < num_pages; i++) {
e94f7ba1 2162 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
72432ffc
PS
2163 if (!pages[i]) {
2164 /*
2165 * save number of pages we have already allocated and
2166 * return with ENOMEM error
2167 */
2168 num_pages = i;
2169 rc = -ENOMEM;
e94f7ba1 2170 break;
72432ffc
PS
2171 }
2172 }
2173
e94f7ba1
JL
2174 if (rc) {
2175 for (i = 0; i < num_pages; i++)
2176 put_page(pages[i]);
2177 }
72432ffc
PS
2178 return rc;
2179}
2180
2181static inline
2182size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2183{
2184 size_t num_pages;
2185 size_t clen;
2186
2187 clen = min_t(const size_t, len, wsize);
a7103b99 2188 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
72432ffc
PS
2189
2190 if (cur_len)
2191 *cur_len = clen;
2192
2193 return num_pages;
2194}
2195
da82f7e7
JL
2196static void
2197cifs_uncached_writev_complete(struct work_struct *work)
2198{
2199 int i;
2200 struct cifs_writedata *wdata = container_of(work,
2201 struct cifs_writedata, work);
2202 struct inode *inode = wdata->cfile->dentry->d_inode;
2203 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2204
2205 spin_lock(&inode->i_lock);
2206 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2207 if (cifsi->server_eof > inode->i_size)
2208 i_size_write(inode, cifsi->server_eof);
2209 spin_unlock(&inode->i_lock);
2210
2211 complete(&wdata->done);
2212
2213 if (wdata->result != -EAGAIN) {
2214 for (i = 0; i < wdata->nr_pages; i++)
2215 put_page(wdata->pages[i]);
2216 }
2217
2218 kref_put(&wdata->refcount, cifs_writedata_release);
2219}
2220
2221/* attempt to send write to server, retry on any -EAGAIN errors */
2222static int
2223cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2224{
2225 int rc;
c9de5c80
PS
2226 struct TCP_Server_Info *server;
2227
2228 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
da82f7e7
JL
2229
2230 do {
2231 if (wdata->cfile->invalidHandle) {
2232 rc = cifs_reopen_file(wdata->cfile, false);
2233 if (rc != 0)
2234 continue;
2235 }
c9de5c80 2236 rc = server->ops->async_writev(wdata);
da82f7e7
JL
2237 } while (rc == -EAGAIN);
2238
2239 return rc;
2240}
2241
72432ffc
PS
2242static ssize_t
2243cifs_iovec_write(struct file *file, const struct iovec *iov,
2244 unsigned long nr_segs, loff_t *poffset)
2245{
da82f7e7 2246 unsigned long nr_pages, i;
76429c14
PS
2247 size_t copied, len, cur_len;
2248 ssize_t total_written = 0;
3af9d8f2 2249 loff_t offset;
72432ffc 2250 struct iov_iter it;
72432ffc 2251 struct cifsFileInfo *open_file;
da82f7e7 2252 struct cifs_tcon *tcon;
72432ffc 2253 struct cifs_sb_info *cifs_sb;
da82f7e7
JL
2254 struct cifs_writedata *wdata, *tmp;
2255 struct list_head wdata_list;
2256 int rc;
2257 pid_t pid;
72432ffc
PS
2258
2259 len = iov_length(iov, nr_segs);
2260 if (!len)
2261 return 0;
2262
2263 rc = generic_write_checks(file, poffset, &len, 0);
2264 if (rc)
2265 return rc;
2266
da82f7e7 2267 INIT_LIST_HEAD(&wdata_list);
72432ffc 2268 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
72432ffc 2269 open_file = file->private_data;
da82f7e7 2270 tcon = tlink_tcon(open_file->tlink);
c9de5c80
PS
2271
2272 if (!tcon->ses->server->ops->async_writev)
2273 return -ENOSYS;
2274
3af9d8f2 2275 offset = *poffset;
d4ffff1f
PS
2276
2277 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2278 pid = open_file->pid;
2279 else
2280 pid = current->tgid;
2281
72432ffc 2282 iov_iter_init(&it, iov, nr_segs, len, 0);
72432ffc 2283 do {
da82f7e7
JL
2284 size_t save_len;
2285
2286 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2287 wdata = cifs_writedata_alloc(nr_pages,
2288 cifs_uncached_writev_complete);
2289 if (!wdata) {
2290 rc = -ENOMEM;
2291 break;
2292 }
2293
2294 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2295 if (rc) {
2296 kfree(wdata);
2297 break;
2298 }
2299
2300 save_len = cur_len;
2301 for (i = 0; i < nr_pages; i++) {
2302 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2303 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2304 0, copied);
72432ffc
PS
2305 cur_len -= copied;
2306 iov_iter_advance(&it, copied);
72432ffc 2307 }
72432ffc
PS
2308 cur_len = save_len - cur_len;
2309
da82f7e7
JL
2310 wdata->sync_mode = WB_SYNC_ALL;
2311 wdata->nr_pages = nr_pages;
2312 wdata->offset = (__u64)offset;
2313 wdata->cfile = cifsFileInfo_get(open_file);
2314 wdata->pid = pid;
2315 wdata->bytes = cur_len;
eddb079d
JL
2316 wdata->pagesz = PAGE_SIZE;
2317 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
da82f7e7
JL
2318 rc = cifs_uncached_retry_writev(wdata);
2319 if (rc) {
2320 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2321 break;
2322 }
2323
da82f7e7
JL
2324 list_add_tail(&wdata->list, &wdata_list);
2325 offset += cur_len;
2326 len -= cur_len;
72432ffc
PS
2327 } while (len > 0);
2328
da82f7e7
JL
2329 /*
2330 * If at least one write was successfully sent, then discard any rc
2331 * value from the later writes. If the other write succeeds, then
2332 * we'll end up returning whatever was written. If it fails, then
2333 * we'll get a new rc value from that.
2334 */
2335 if (!list_empty(&wdata_list))
2336 rc = 0;
2337
2338 /*
2339 * Wait for and collect replies for any successful sends in order of
2340 * increasing offset. Once an error is hit or we get a fatal signal
2341 * while waiting, then return without waiting for any more replies.
2342 */
2343restart_loop:
2344 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2345 if (!rc) {
2346 /* FIXME: freezable too? */
2347 rc = wait_for_completion_killable(&wdata->done);
2348 if (rc)
2349 rc = -EINTR;
2350 else if (wdata->result)
2351 rc = wdata->result;
2352 else
2353 total_written += wdata->bytes;
2354
2355 /* resend call if it's a retryable error */
2356 if (rc == -EAGAIN) {
2357 rc = cifs_uncached_retry_writev(wdata);
2358 goto restart_loop;
2359 }
2360 }
2361 list_del_init(&wdata->list);
2362 kref_put(&wdata->refcount, cifs_writedata_release);
72432ffc
PS
2363 }
2364
da82f7e7
JL
2365 if (total_written > 0)
2366 *poffset += total_written;
72432ffc 2367
da82f7e7
JL
2368 cifs_stats_bytes_written(tcon, total_written);
2369 return total_written ? total_written : (ssize_t)rc;
72432ffc
PS
2370}
2371
0b81c1c4 2372ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
72432ffc
PS
2373 unsigned long nr_segs, loff_t pos)
2374{
2375 ssize_t written;
2376 struct inode *inode;
2377
2378 inode = iocb->ki_filp->f_path.dentry->d_inode;
2379
2380 /*
2381 * BB - optimize the way when signing is disabled. We can drop this
2382 * extra memory-to-memory copying and use iovec buffers for constructing
2383 * write request.
2384 */
2385
2386 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2387 if (written > 0) {
2388 CIFS_I(inode)->invalid_mapping = true;
2389 iocb->ki_pos = pos;
2390 }
2391
2392 return written;
2393}
2394
2395ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2396 unsigned long nr_segs, loff_t pos)
2397{
2398 struct inode *inode;
2399
2400 inode = iocb->ki_filp->f_path.dentry->d_inode;
2401
2402 if (CIFS_I(inode)->clientCanCacheAll)
2403 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2404
2405 /*
2406 * In strict cache mode we need to write the data to the server exactly
2407 * from the pos to pos+len-1 rather than flush all affected pages
2408 * because it may cause a error with mandatory locks on these pages but
2409 * not on the region from pos to ppos+len-1.
2410 */
2411
2412 return cifs_user_writev(iocb, iov, nr_segs, pos);
2413}
2414
0471ca3f 2415static struct cifs_readdata *
f4e49cd2 2416cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
0471ca3f
JL
2417{
2418 struct cifs_readdata *rdata;
f4e49cd2 2419
c5fab6f4
JL
2420 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2421 GFP_KERNEL);
0471ca3f 2422 if (rdata != NULL) {
6993f74a 2423 kref_init(&rdata->refcount);
1c892549
JL
2424 INIT_LIST_HEAD(&rdata->list);
2425 init_completion(&rdata->done);
0471ca3f 2426 INIT_WORK(&rdata->work, complete);
0471ca3f 2427 }
f4e49cd2 2428
0471ca3f
JL
2429 return rdata;
2430}
2431
6993f74a
JL
2432void
2433cifs_readdata_release(struct kref *refcount)
0471ca3f 2434{
6993f74a
JL
2435 struct cifs_readdata *rdata = container_of(refcount,
2436 struct cifs_readdata, refcount);
2437
2438 if (rdata->cfile)
2439 cifsFileInfo_put(rdata->cfile);
2440
0471ca3f
JL
2441 kfree(rdata);
2442}
2443
1c892549 2444static int
c5fab6f4 2445cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
1c892549
JL
2446{
2447 int rc = 0;
c5fab6f4 2448 struct page *page;
1c892549
JL
2449 unsigned int i;
2450
c5fab6f4 2451 for (i = 0; i < nr_pages; i++) {
1c892549
JL
2452 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2453 if (!page) {
2454 rc = -ENOMEM;
2455 break;
2456 }
c5fab6f4 2457 rdata->pages[i] = page;
1c892549
JL
2458 }
2459
2460 if (rc) {
c5fab6f4
JL
2461 for (i = 0; i < nr_pages; i++) {
2462 put_page(rdata->pages[i]);
2463 rdata->pages[i] = NULL;
1c892549
JL
2464 }
2465 }
2466 return rc;
2467}
2468
2469static void
2470cifs_uncached_readdata_release(struct kref *refcount)
2471{
1c892549
JL
2472 struct cifs_readdata *rdata = container_of(refcount,
2473 struct cifs_readdata, refcount);
c5fab6f4 2474 unsigned int i;
1c892549 2475
c5fab6f4
JL
2476 for (i = 0; i < rdata->nr_pages; i++) {
2477 put_page(rdata->pages[i]);
2478 rdata->pages[i] = NULL;
1c892549
JL
2479 }
2480 cifs_readdata_release(refcount);
2481}
2482
2a1bb138
JL
2483static int
2484cifs_retry_async_readv(struct cifs_readdata *rdata)
2485{
2486 int rc;
fc9c5966
PS
2487 struct TCP_Server_Info *server;
2488
2489 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
2a1bb138
JL
2490
2491 do {
2492 if (rdata->cfile->invalidHandle) {
2493 rc = cifs_reopen_file(rdata->cfile, true);
2494 if (rc != 0)
2495 continue;
2496 }
fc9c5966 2497 rc = server->ops->async_readv(rdata);
2a1bb138
JL
2498 } while (rc == -EAGAIN);
2499
2500 return rc;
2501}
2502
1c892549
JL
2503/**
2504 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2505 * @rdata: the readdata response with list of pages holding data
2506 * @iov: vector in which we should copy the data
2507 * @nr_segs: number of segments in vector
2508 * @offset: offset into file of the first iovec
2509 * @copied: used to return the amount of data copied to the iov
2510 *
2511 * This function copies data from a list of pages in a readdata response into
2512 * an array of iovecs. It will first calculate where the data should go
2513 * based on the info in the readdata and then copy the data into that spot.
2514 */
2515static ssize_t
2516cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2517 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2518{
2519 int rc = 0;
2520 struct iov_iter ii;
2521 size_t pos = rdata->offset - offset;
1c892549
JL
2522 ssize_t remaining = rdata->bytes;
2523 unsigned char *pdata;
c5fab6f4 2524 unsigned int i;
1c892549
JL
2525
2526 /* set up iov_iter and advance to the correct offset */
2527 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2528 iov_iter_advance(&ii, pos);
2529
2530 *copied = 0;
c5fab6f4 2531 for (i = 0; i < rdata->nr_pages; i++) {
1c892549 2532 ssize_t copy;
c5fab6f4 2533 struct page *page = rdata->pages[i];
1c892549
JL
2534
2535 /* copy a whole page or whatever's left */
2536 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2537
2538 /* ...but limit it to whatever space is left in the iov */
2539 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2540
2541 /* go while there's data to be copied and no errors */
2542 if (copy && !rc) {
2543 pdata = kmap(page);
2544 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2545 (int)copy);
2546 kunmap(page);
2547 if (!rc) {
2548 *copied += copy;
2549 remaining -= copy;
2550 iov_iter_advance(&ii, copy);
2551 }
2552 }
1c892549
JL
2553 }
2554
2555 return rc;
2556}
2557
2558static void
2559cifs_uncached_readv_complete(struct work_struct *work)
2560{
2561 struct cifs_readdata *rdata = container_of(work,
2562 struct cifs_readdata, work);
1c892549
JL
2563
2564 complete(&rdata->done);
2565 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2566}
2567
2568static int
8321fec4
JL
2569cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2570 struct cifs_readdata *rdata, unsigned int len)
1c892549 2571{
8321fec4 2572 int total_read = 0, result = 0;
c5fab6f4
JL
2573 unsigned int i;
2574 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2575 struct kvec iov;
1c892549 2576
8321fec4 2577 rdata->tailsz = PAGE_SIZE;
c5fab6f4
JL
2578 for (i = 0; i < nr_pages; i++) {
2579 struct page *page = rdata->pages[i];
2580
8321fec4 2581 if (len >= PAGE_SIZE) {
1c892549 2582 /* enough data to fill the page */
8321fec4
JL
2583 iov.iov_base = kmap(page);
2584 iov.iov_len = PAGE_SIZE;
2585 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2586 i, iov.iov_base, iov.iov_len);
2587 len -= PAGE_SIZE;
2588 } else if (len > 0) {
1c892549 2589 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2590 iov.iov_base = kmap(page);
2591 iov.iov_len = len;
2592 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2593 i, iov.iov_base, iov.iov_len);
2594 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2595 rdata->tailsz = len;
2596 len = 0;
1c892549
JL
2597 } else {
2598 /* no need to hold page hostage */
c5fab6f4
JL
2599 rdata->pages[i] = NULL;
2600 rdata->nr_pages--;
1c892549 2601 put_page(page);
8321fec4 2602 continue;
1c892549 2603 }
8321fec4
JL
2604
2605 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2606 kunmap(page);
2607 if (result < 0)
2608 break;
2609
2610 total_read += result;
1c892549
JL
2611 }
2612
8321fec4 2613 return total_read > 0 ? total_read : result;
1c892549
JL
2614}
2615
a70307ee
PS
2616static ssize_t
2617cifs_iovec_read(struct file *file, const struct iovec *iov,
2618 unsigned long nr_segs, loff_t *poffset)
1da177e4 2619{
1c892549 2620 ssize_t rc;
a70307ee 2621 size_t len, cur_len;
1c892549
JL
2622 ssize_t total_read = 0;
2623 loff_t offset = *poffset;
2624 unsigned int npages;
1da177e4 2625 struct cifs_sb_info *cifs_sb;
1c892549 2626 struct cifs_tcon *tcon;
1da177e4 2627 struct cifsFileInfo *open_file;
1c892549
JL
2628 struct cifs_readdata *rdata, *tmp;
2629 struct list_head rdata_list;
2630 pid_t pid;
a70307ee
PS
2631
2632 if (!nr_segs)
2633 return 0;
2634
2635 len = iov_length(iov, nr_segs);
2636 if (!len)
2637 return 0;
1da177e4 2638
1c892549 2639 INIT_LIST_HEAD(&rdata_list);
e6a00296 2640 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
c21dfb69 2641 open_file = file->private_data;
1c892549 2642 tcon = tlink_tcon(open_file->tlink);
1da177e4 2643
fc9c5966
PS
2644 if (!tcon->ses->server->ops->async_readv)
2645 return -ENOSYS;
2646
d4ffff1f
PS
2647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2648 pid = open_file->pid;
2649 else
2650 pid = current->tgid;
2651
ad7a2926 2652 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2653 cFYI(1, "attempting read on write only file instance");
ad7a2926 2654
1c892549
JL
2655 do {
2656 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2657 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
a70307ee 2658
1c892549
JL
2659 /* allocate a readdata struct */
2660 rdata = cifs_readdata_alloc(npages,
2661 cifs_uncached_readv_complete);
2662 if (!rdata) {
2663 rc = -ENOMEM;
2664 goto error;
1da177e4 2665 }
a70307ee 2666
c5fab6f4 2667 rc = cifs_read_allocate_pages(rdata, npages);
1c892549
JL
2668 if (rc)
2669 goto error;
2670
2671 rdata->cfile = cifsFileInfo_get(open_file);
c5fab6f4 2672 rdata->nr_pages = npages;
1c892549
JL
2673 rdata->offset = offset;
2674 rdata->bytes = cur_len;
2675 rdata->pid = pid;
8321fec4
JL
2676 rdata->pagesz = PAGE_SIZE;
2677 rdata->read_into_pages = cifs_uncached_read_into_pages;
1c892549
JL
2678
2679 rc = cifs_retry_async_readv(rdata);
2680error:
2681 if (rc) {
2682 kref_put(&rdata->refcount,
2683 cifs_uncached_readdata_release);
2684 break;
2685 }
2686
2687 list_add_tail(&rdata->list, &rdata_list);
2688 offset += cur_len;
2689 len -= cur_len;
2690 } while (len > 0);
2691
2692 /* if at least one read request send succeeded, then reset rc */
2693 if (!list_empty(&rdata_list))
2694 rc = 0;
2695
2696 /* the loop below should proceed in the order of increasing offsets */
2697restart_loop:
2698 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2699 if (!rc) {
2700 ssize_t copied;
2701
2702 /* FIXME: freezable sleep too? */
2703 rc = wait_for_completion_killable(&rdata->done);
2704 if (rc)
2705 rc = -EINTR;
2706 else if (rdata->result)
2707 rc = rdata->result;
2708 else {
2709 rc = cifs_readdata_to_iov(rdata, iov,
2710 nr_segs, *poffset,
2711 &copied);
2712 total_read += copied;
2713 }
2714
2715 /* resend call if it's a retryable error */
2716 if (rc == -EAGAIN) {
2717 rc = cifs_retry_async_readv(rdata);
2718 goto restart_loop;
1da177e4 2719 }
1da177e4 2720 }
1c892549
JL
2721 list_del_init(&rdata->list);
2722 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
1da177e4 2723 }
a70307ee 2724
1c892549
JL
2725 cifs_stats_bytes_read(tcon, total_read);
2726 *poffset += total_read;
2727
09a4707e
PS
2728 /* mask nodata case */
2729 if (rc == -ENODATA)
2730 rc = 0;
2731
1c892549 2732 return total_read ? total_read : rc;
1da177e4
LT
2733}
2734
0b81c1c4 2735ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
a70307ee
PS
2736 unsigned long nr_segs, loff_t pos)
2737{
2738 ssize_t read;
2739
2740 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2741 if (read > 0)
2742 iocb->ki_pos = pos;
2743
2744 return read;
2745}
2746
2747ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2748 unsigned long nr_segs, loff_t pos)
2749{
2750 struct inode *inode;
2751
2752 inode = iocb->ki_filp->f_path.dentry->d_inode;
2753
2754 if (CIFS_I(inode)->clientCanCacheRead)
2755 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2756
2757 /*
2758 * In strict cache mode we need to read from the server all the time
2759 * if we don't have level II oplock because the server can delay mtime
2760 * change - so we can't make a decision about inode invalidating.
2761 * And we can also fail with pagereading if there are mandatory locks
2762 * on pages affected by this read but not on the region from pos to
2763 * pos+len-1.
2764 */
2765
2766 return cifs_user_readv(iocb, iov, nr_segs, pos);
2767}
1da177e4 2768
f9c6e234
PS
2769static ssize_t
2770cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
1da177e4
LT
2771{
2772 int rc = -EACCES;
2773 unsigned int bytes_read = 0;
2774 unsigned int total_read;
2775 unsigned int current_read_size;
5eba8ab3 2776 unsigned int rsize;
1da177e4 2777 struct cifs_sb_info *cifs_sb;
29e20f9c 2778 struct cifs_tcon *tcon;
f9c6e234 2779 struct TCP_Server_Info *server;
6d5786a3 2780 unsigned int xid;
f9c6e234 2781 char *cur_offset;
1da177e4 2782 struct cifsFileInfo *open_file;
d4ffff1f 2783 struct cifs_io_parms io_parms;
ec637e3f 2784 int buf_type = CIFS_NO_BUFFER;
d4ffff1f 2785 __u32 pid;
1da177e4 2786
6d5786a3 2787 xid = get_xid();
e6a00296 2788 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 2789
5eba8ab3
JL
2790 /* FIXME: set up handlers for larger reads and/or convert to async */
2791 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2792
1da177e4 2793 if (file->private_data == NULL) {
0f3bc09e 2794 rc = -EBADF;
6d5786a3 2795 free_xid(xid);
0f3bc09e 2796 return rc;
1da177e4 2797 }
c21dfb69 2798 open_file = file->private_data;
29e20f9c 2799 tcon = tlink_tcon(open_file->tlink);
f9c6e234
PS
2800 server = tcon->ses->server;
2801
2802 if (!server->ops->sync_read) {
2803 free_xid(xid);
2804 return -ENOSYS;
2805 }
1da177e4 2806
d4ffff1f
PS
2807 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2808 pid = open_file->pid;
2809 else
2810 pid = current->tgid;
2811
1da177e4 2812 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 2813 cFYI(1, "attempting read on write only file instance");
1da177e4 2814
f9c6e234
PS
2815 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2816 total_read += bytes_read, cur_offset += bytes_read) {
5eba8ab3 2817 current_read_size = min_t(uint, read_size - total_read, rsize);
29e20f9c
PS
2818 /*
2819 * For windows me and 9x we do not want to request more than it
2820 * negotiated since it will refuse the read then.
2821 */
2822 if ((tcon->ses) && !(tcon->ses->capabilities &
2823 tcon->ses->server->vals->cap_large_files)) {
7748dd6e 2824 current_read_size = min_t(uint, current_read_size,
c974befa 2825 CIFSMaxBufSize);
f9f5c817 2826 }
1da177e4
LT
2827 rc = -EAGAIN;
2828 while (rc == -EAGAIN) {
cdff08e7 2829 if (open_file->invalidHandle) {
15886177 2830 rc = cifs_reopen_file(open_file, true);
1da177e4
LT
2831 if (rc != 0)
2832 break;
2833 }
d4ffff1f 2834 io_parms.pid = pid;
29e20f9c 2835 io_parms.tcon = tcon;
f9c6e234 2836 io_parms.offset = *offset;
d4ffff1f 2837 io_parms.length = current_read_size;
f9c6e234
PS
2838 rc = server->ops->sync_read(xid, open_file, &io_parms,
2839 &bytes_read, &cur_offset,
2840 &buf_type);
1da177e4
LT
2841 }
2842 if (rc || (bytes_read == 0)) {
2843 if (total_read) {
2844 break;
2845 } else {
6d5786a3 2846 free_xid(xid);
1da177e4
LT
2847 return rc;
2848 }
2849 } else {
29e20f9c 2850 cifs_stats_bytes_read(tcon, total_read);
f9c6e234 2851 *offset += bytes_read;
1da177e4
LT
2852 }
2853 }
6d5786a3 2854 free_xid(xid);
1da177e4
LT
2855 return total_read;
2856}
2857
ca83ce3d
JL
2858/*
2859 * If the page is mmap'ed into a process' page tables, then we need to make
2860 * sure that it doesn't change while being written back.
2861 */
2862static int
2863cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2864{
2865 struct page *page = vmf->page;
2866
2867 lock_page(page);
2868 return VM_FAULT_LOCKED;
2869}
2870
2871static struct vm_operations_struct cifs_file_vm_ops = {
2872 .fault = filemap_fault,
2873 .page_mkwrite = cifs_page_mkwrite,
2874};
2875
7a6a19b1
PS
2876int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2877{
2878 int rc, xid;
2879 struct inode *inode = file->f_path.dentry->d_inode;
2880
6d5786a3 2881 xid = get_xid();
7a6a19b1 2882
6feb9891
PS
2883 if (!CIFS_I(inode)->clientCanCacheRead) {
2884 rc = cifs_invalidate_mapping(inode);
2885 if (rc)
2886 return rc;
2887 }
7a6a19b1
PS
2888
2889 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2890 if (rc == 0)
2891 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 2892 free_xid(xid);
7a6a19b1
PS
2893 return rc;
2894}
2895
1da177e4
LT
2896int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2897{
1da177e4
LT
2898 int rc, xid;
2899
6d5786a3 2900 xid = get_xid();
abab095d 2901 rc = cifs_revalidate_file(file);
1da177e4 2902 if (rc) {
b6b38f70 2903 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
6d5786a3 2904 free_xid(xid);
1da177e4
LT
2905 return rc;
2906 }
2907 rc = generic_file_mmap(file, vma);
ca83ce3d
JL
2908 if (rc == 0)
2909 vma->vm_ops = &cifs_file_vm_ops;
6d5786a3 2910 free_xid(xid);
1da177e4
LT
2911 return rc;
2912}
2913
0471ca3f
JL
2914static void
2915cifs_readv_complete(struct work_struct *work)
2916{
c5fab6f4 2917 unsigned int i;
0471ca3f
JL
2918 struct cifs_readdata *rdata = container_of(work,
2919 struct cifs_readdata, work);
0471ca3f 2920
c5fab6f4
JL
2921 for (i = 0; i < rdata->nr_pages; i++) {
2922 struct page *page = rdata->pages[i];
2923
0471ca3f
JL
2924 lru_cache_add_file(page);
2925
2926 if (rdata->result == 0) {
0471ca3f
JL
2927 flush_dcache_page(page);
2928 SetPageUptodate(page);
2929 }
2930
2931 unlock_page(page);
2932
2933 if (rdata->result == 0)
2934 cifs_readpage_to_fscache(rdata->mapping->host, page);
2935
2936 page_cache_release(page);
c5fab6f4 2937 rdata->pages[i] = NULL;
0471ca3f 2938 }
6993f74a 2939 kref_put(&rdata->refcount, cifs_readdata_release);
0471ca3f
JL
2940}
2941
8d5ce4d2 2942static int
8321fec4
JL
2943cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
2944 struct cifs_readdata *rdata, unsigned int len)
8d5ce4d2 2945{
8321fec4 2946 int total_read = 0, result = 0;
c5fab6f4 2947 unsigned int i;
8d5ce4d2
JL
2948 u64 eof;
2949 pgoff_t eof_index;
c5fab6f4 2950 unsigned int nr_pages = rdata->nr_pages;
8321fec4 2951 struct kvec iov;
8d5ce4d2
JL
2952
2953 /* determine the eof that the server (probably) has */
2954 eof = CIFS_I(rdata->mapping->host)->server_eof;
2955 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2956 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2957
8321fec4 2958 rdata->tailsz = PAGE_CACHE_SIZE;
c5fab6f4
JL
2959 for (i = 0; i < nr_pages; i++) {
2960 struct page *page = rdata->pages[i];
2961
8321fec4 2962 if (len >= PAGE_CACHE_SIZE) {
8d5ce4d2 2963 /* enough data to fill the page */
8321fec4
JL
2964 iov.iov_base = kmap(page);
2965 iov.iov_len = PAGE_CACHE_SIZE;
8d5ce4d2 2966 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
2967 i, page->index, iov.iov_base, iov.iov_len);
2968 len -= PAGE_CACHE_SIZE;
2969 } else if (len > 0) {
8d5ce4d2 2970 /* enough for partial page, fill and zero the rest */
8321fec4
JL
2971 iov.iov_base = kmap(page);
2972 iov.iov_len = len;
8d5ce4d2 2973 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
8321fec4
JL
2974 i, page->index, iov.iov_base, iov.iov_len);
2975 memset(iov.iov_base + len,
2976 '\0', PAGE_CACHE_SIZE - len);
2977 rdata->tailsz = len;
2978 len = 0;
8d5ce4d2
JL
2979 } else if (page->index > eof_index) {
2980 /*
2981 * The VFS will not try to do readahead past the
2982 * i_size, but it's possible that we have outstanding
2983 * writes with gaps in the middle and the i_size hasn't
2984 * caught up yet. Populate those with zeroed out pages
2985 * to prevent the VFS from repeatedly attempting to
2986 * fill them until the writes are flushed.
2987 */
2988 zero_user(page, 0, PAGE_CACHE_SIZE);
8d5ce4d2
JL
2989 lru_cache_add_file(page);
2990 flush_dcache_page(page);
2991 SetPageUptodate(page);
2992 unlock_page(page);
2993 page_cache_release(page);
c5fab6f4
JL
2994 rdata->pages[i] = NULL;
2995 rdata->nr_pages--;
8321fec4 2996 continue;
8d5ce4d2
JL
2997 } else {
2998 /* no need to hold page hostage */
8d5ce4d2
JL
2999 lru_cache_add_file(page);
3000 unlock_page(page);
3001 page_cache_release(page);
c5fab6f4
JL
3002 rdata->pages[i] = NULL;
3003 rdata->nr_pages--;
8321fec4 3004 continue;
8d5ce4d2 3005 }
8321fec4
JL
3006
3007 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3008 kunmap(page);
3009 if (result < 0)
3010 break;
3011
3012 total_read += result;
8d5ce4d2
JL
3013 }
3014
8321fec4 3015 return total_read > 0 ? total_read : result;
8d5ce4d2
JL
3016}
3017
1da177e4
LT
3018static int cifs_readpages(struct file *file, struct address_space *mapping,
3019 struct list_head *page_list, unsigned num_pages)
3020{
690c5e31
JL
3021 int rc;
3022 struct list_head tmplist;
3023 struct cifsFileInfo *open_file = file->private_data;
3024 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3025 unsigned int rsize = cifs_sb->rsize;
3026 pid_t pid;
1da177e4 3027
690c5e31
JL
3028 /*
3029 * Give up immediately if rsize is too small to read an entire page.
3030 * The VFS will fall back to readpage. We should never reach this
3031 * point however since we set ra_pages to 0 when the rsize is smaller
3032 * than a cache page.
3033 */
3034 if (unlikely(rsize < PAGE_CACHE_SIZE))
3035 return 0;
bfa0d75a 3036
56698236
SJ
3037 /*
3038 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3039 * immediately if the cookie is negative
3040 */
3041 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3042 &num_pages);
3043 if (rc == 0)
690c5e31 3044 return rc;
56698236 3045
d4ffff1f
PS
3046 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3047 pid = open_file->pid;
3048 else
3049 pid = current->tgid;
3050
690c5e31
JL
3051 rc = 0;
3052 INIT_LIST_HEAD(&tmplist);
1da177e4 3053
690c5e31
JL
3054 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3055 mapping, num_pages);
3056
3057 /*
3058 * Start with the page at end of list and move it to private
3059 * list. Do the same with any following pages until we hit
3060 * the rsize limit, hit an index discontinuity, or run out of
3061 * pages. Issue the async read and then start the loop again
3062 * until the list is empty.
3063 *
3064 * Note that list order is important. The page_list is in
3065 * the order of declining indexes. When we put the pages in
3066 * the rdata->pages, then we want them in increasing order.
3067 */
3068 while (!list_empty(page_list)) {
c5fab6f4 3069 unsigned int i;
690c5e31
JL
3070 unsigned int bytes = PAGE_CACHE_SIZE;
3071 unsigned int expected_index;
3072 unsigned int nr_pages = 1;
3073 loff_t offset;
3074 struct page *page, *tpage;
3075 struct cifs_readdata *rdata;
1da177e4
LT
3076
3077 page = list_entry(page_list->prev, struct page, lru);
690c5e31
JL
3078
3079 /*
3080 * Lock the page and put it in the cache. Since no one else
3081 * should have access to this page, we're safe to simply set
3082 * PG_locked without checking it first.
3083 */
3084 __set_page_locked(page);
3085 rc = add_to_page_cache_locked(page, mapping,
3086 page->index, GFP_KERNEL);
3087
3088 /* give up if we can't stick it in the cache */
3089 if (rc) {
3090 __clear_page_locked(page);
3091 break;
3092 }
3093
3094 /* move first page to the tmplist */
1da177e4 3095 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
690c5e31 3096 list_move_tail(&page->lru, &tmplist);
1da177e4 3097
690c5e31
JL
3098 /* now try and add more pages onto the request */
3099 expected_index = page->index + 1;
3100 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3101 /* discontinuity ? */
3102 if (page->index != expected_index)
fb8c4b14 3103 break;
690c5e31
JL
3104
3105 /* would this page push the read over the rsize? */
3106 if (bytes + PAGE_CACHE_SIZE > rsize)
3107 break;
3108
3109 __set_page_locked(page);
3110 if (add_to_page_cache_locked(page, mapping,
3111 page->index, GFP_KERNEL)) {
3112 __clear_page_locked(page);
3113 break;
3114 }
3115 list_move_tail(&page->lru, &tmplist);
3116 bytes += PAGE_CACHE_SIZE;
3117 expected_index++;
3118 nr_pages++;
1da177e4 3119 }
690c5e31 3120
0471ca3f 3121 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
690c5e31
JL
3122 if (!rdata) {
3123 /* best to give up if we're out of mem */
3124 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3125 list_del(&page->lru);
3126 lru_cache_add_file(page);
3127 unlock_page(page);
3128 page_cache_release(page);
3129 }
3130 rc = -ENOMEM;
3131 break;
3132 }
3133
6993f74a 3134 rdata->cfile = cifsFileInfo_get(open_file);
690c5e31
JL
3135 rdata->mapping = mapping;
3136 rdata->offset = offset;
3137 rdata->bytes = bytes;
3138 rdata->pid = pid;
8321fec4
JL
3139 rdata->pagesz = PAGE_CACHE_SIZE;
3140 rdata->read_into_pages = cifs_readpages_read_into_pages;
c5fab6f4
JL
3141
3142 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3143 list_del(&page->lru);
3144 rdata->pages[rdata->nr_pages++] = page;
3145 }
690c5e31 3146
2a1bb138 3147 rc = cifs_retry_async_readv(rdata);
690c5e31 3148 if (rc != 0) {
c5fab6f4
JL
3149 for (i = 0; i < rdata->nr_pages; i++) {
3150 page = rdata->pages[i];
690c5e31
JL
3151 lru_cache_add_file(page);
3152 unlock_page(page);
3153 page_cache_release(page);
1da177e4 3154 }
6993f74a 3155 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3156 break;
3157 }
6993f74a
JL
3158
3159 kref_put(&rdata->refcount, cifs_readdata_release);
1da177e4
LT
3160 }
3161
1da177e4
LT
3162 return rc;
3163}
3164
3165static int cifs_readpage_worker(struct file *file, struct page *page,
3166 loff_t *poffset)
3167{
3168 char *read_data;
3169 int rc;
3170
56698236
SJ
3171 /* Is the page cached? */
3172 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3173 if (rc == 0)
3174 goto read_complete;
3175
1da177e4
LT
3176 page_cache_get(page);
3177 read_data = kmap(page);
3178 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 3179
1da177e4 3180 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 3181
1da177e4
LT
3182 if (rc < 0)
3183 goto io_error;
3184 else
b6b38f70 3185 cFYI(1, "Bytes read %d", rc);
fb8c4b14 3186
e6a00296
JJS
3187 file->f_path.dentry->d_inode->i_atime =
3188 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 3189
1da177e4
LT
3190 if (PAGE_CACHE_SIZE > rc)
3191 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3192
3193 flush_dcache_page(page);
3194 SetPageUptodate(page);
9dc06558
SJ
3195
3196 /* send this page to the cache */
3197 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3198
1da177e4 3199 rc = 0;
fb8c4b14 3200
1da177e4 3201io_error:
fb8c4b14 3202 kunmap(page);
1da177e4 3203 page_cache_release(page);
56698236
SJ
3204
3205read_complete:
1da177e4
LT
3206 return rc;
3207}
3208
3209static int cifs_readpage(struct file *file, struct page *page)
3210{
3211 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3212 int rc = -EACCES;
6d5786a3 3213 unsigned int xid;
1da177e4 3214
6d5786a3 3215 xid = get_xid();
1da177e4
LT
3216
3217 if (file->private_data == NULL) {
0f3bc09e 3218 rc = -EBADF;
6d5786a3 3219 free_xid(xid);
0f3bc09e 3220 return rc;
1da177e4
LT
3221 }
3222
ac3aa2f8 3223 cFYI(1, "readpage %p at offset %d 0x%x",
b6b38f70 3224 page, (int)offset, (int)offset);
1da177e4
LT
3225
3226 rc = cifs_readpage_worker(file, page, &offset);
3227
3228 unlock_page(page);
3229
6d5786a3 3230 free_xid(xid);
1da177e4
LT
3231 return rc;
3232}
3233
a403a0a3
SF
3234static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3235{
3236 struct cifsFileInfo *open_file;
3237
4477288a 3238 spin_lock(&cifs_file_list_lock);
a403a0a3 3239 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 3240 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4477288a 3241 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3242 return 1;
3243 }
3244 }
4477288a 3245 spin_unlock(&cifs_file_list_lock);
a403a0a3
SF
3246 return 0;
3247}
3248
1da177e4
LT
3249/* We do not want to update the file size from server for inodes
3250 open for write - to avoid races with writepage extending
3251 the file - in the future we could consider allowing
fb8c4b14 3252 refreshing the inode only on increases in the file size
1da177e4
LT
3253 but this is tricky to do without racing with writebehind
3254 page caching in the current Linux kernel design */
4b18f2a9 3255bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 3256{
a403a0a3 3257 if (!cifsInode)
4b18f2a9 3258 return true;
50c2f753 3259
a403a0a3
SF
3260 if (is_inode_writable(cifsInode)) {
3261 /* This inode is open for write at least once */
c32a0b68
SF
3262 struct cifs_sb_info *cifs_sb;
3263
c32a0b68 3264 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 3265 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3266 /* since no page cache to corrupt on directio
c32a0b68 3267 we can change size safely */
4b18f2a9 3268 return true;
c32a0b68
SF
3269 }
3270
fb8c4b14 3271 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 3272 return true;
7ba52631 3273
4b18f2a9 3274 return false;
23e7dd7d 3275 } else
4b18f2a9 3276 return true;
1da177e4
LT
3277}
3278
d9414774
NP
3279static int cifs_write_begin(struct file *file, struct address_space *mapping,
3280 loff_t pos, unsigned len, unsigned flags,
3281 struct page **pagep, void **fsdata)
1da177e4 3282{
d9414774
NP
3283 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3284 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
3285 loff_t page_start = pos & PAGE_MASK;
3286 loff_t i_size;
3287 struct page *page;
3288 int rc = 0;
d9414774 3289
b6b38f70 3290 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 3291
54566b2c 3292 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
3293 if (!page) {
3294 rc = -ENOMEM;
3295 goto out;
3296 }
8a236264 3297
a98ee8c1
JL
3298 if (PageUptodate(page))
3299 goto out;
8a236264 3300
a98ee8c1
JL
3301 /*
3302 * If we write a full page it will be up to date, no need to read from
3303 * the server. If the write is short, we'll end up doing a sync write
3304 * instead.
3305 */
3306 if (len == PAGE_CACHE_SIZE)
3307 goto out;
8a236264 3308
a98ee8c1
JL
3309 /*
3310 * optimize away the read when we have an oplock, and we're not
3311 * expecting to use any of the data we'd be reading in. That
3312 * is, when the page lies beyond the EOF, or straddles the EOF
3313 * and the write will cover all of the existing data.
3314 */
3315 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3316 i_size = i_size_read(mapping->host);
3317 if (page_start >= i_size ||
3318 (offset == 0 && (pos + len) >= i_size)) {
3319 zero_user_segments(page, 0, offset,
3320 offset + len,
3321 PAGE_CACHE_SIZE);
3322 /*
3323 * PageChecked means that the parts of the page
3324 * to which we're not writing are considered up
3325 * to date. Once the data is copied to the
3326 * page, it can be set uptodate.
3327 */
3328 SetPageChecked(page);
3329 goto out;
3330 }
3331 }
d9414774 3332
a98ee8c1
JL
3333 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
3334 /*
3335 * might as well read a page, it is fast enough. If we get
3336 * an error, we don't need to return it. cifs_write_end will
3337 * do a sync write instead since PG_uptodate isn't set.
3338 */
3339 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
3340 } else {
3341 /* we could try using another file handle if there is one -
3342 but how would we lock it to prevent close of that handle
3343 racing with this read? In any case
d9414774 3344 this will be written out by write_end so is fine */
1da177e4 3345 }
a98ee8c1
JL
3346out:
3347 *pagep = page;
3348 return rc;
1da177e4
LT
3349}
3350
85f2d6b4
SJ
3351static int cifs_release_page(struct page *page, gfp_t gfp)
3352{
3353 if (PagePrivate(page))
3354 return 0;
3355
3356 return cifs_fscache_release_page(page, gfp);
3357}
3358
3359static void cifs_invalidate_page(struct page *page, unsigned long offset)
3360{
3361 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3362
3363 if (offset == 0)
3364 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3365}
3366
9ad1506b
PS
3367static int cifs_launder_page(struct page *page)
3368{
3369 int rc = 0;
3370 loff_t range_start = page_offset(page);
3371 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3372 struct writeback_control wbc = {
3373 .sync_mode = WB_SYNC_ALL,
3374 .nr_to_write = 0,
3375 .range_start = range_start,
3376 .range_end = range_end,
3377 };
3378
3379 cFYI(1, "Launder page: %p", page);
3380
3381 if (clear_page_dirty_for_io(page))
3382 rc = cifs_writepage_locked(page, &wbc);
3383
3384 cifs_fscache_invalidate_page(page, page->mapping->host);
3385 return rc;
3386}
3387
9b646972 3388void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3389{
3390 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3391 oplock_break);
a5e18bc3 3392 struct inode *inode = cfile->dentry->d_inode;
3bc303c2 3393 struct cifsInodeInfo *cinode = CIFS_I(inode);
95a3f2f3 3394 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
eb4b756b 3395 int rc = 0;
3bc303c2
JL
3396
3397 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 3398 if (cinode->clientCanCacheRead)
8737c930 3399 break_lease(inode, O_RDONLY);
d54ff732 3400 else
8737c930 3401 break_lease(inode, O_WRONLY);
3bc303c2
JL
3402 rc = filemap_fdatawrite(inode->i_mapping);
3403 if (cinode->clientCanCacheRead == 0) {
eb4b756b
JL
3404 rc = filemap_fdatawait(inode->i_mapping);
3405 mapping_set_error(inode->i_mapping, rc);
3bc303c2
JL
3406 invalidate_remote_inode(inode);
3407 }
b6b38f70 3408 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
3409 }
3410
85160e03
PS
3411 rc = cifs_push_locks(cfile);
3412 if (rc)
3413 cERROR(1, "Push locks rc = %d", rc);
3414
3bc303c2
JL
3415 /*
3416 * releasing stale oplock after recent reconnect of smb session using
3417 * a now incorrect file handle is not a data integrity issue but do
3418 * not bother sending an oplock release if session to server still is
3419 * disconnected since oplock already released by the server
3420 */
cdff08e7 3421 if (!cfile->oplock_break_cancelled) {
95a3f2f3
PS
3422 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3423 cinode);
b6b38f70 3424 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 3425 }
3bc303c2
JL
3426}
3427
f5e54d6e 3428const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
3429 .readpage = cifs_readpage,
3430 .readpages = cifs_readpages,
3431 .writepage = cifs_writepage,
37c0eb46 3432 .writepages = cifs_writepages,
d9414774
NP
3433 .write_begin = cifs_write_begin,
3434 .write_end = cifs_write_end,
1da177e4 3435 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3436 .releasepage = cifs_release_page,
3437 .invalidatepage = cifs_invalidate_page,
9ad1506b 3438 .launder_page = cifs_launder_page,
1da177e4 3439};
273d81d6
DK
3440
3441/*
3442 * cifs_readpages requires the server to support a buffer large enough to
3443 * contain the header plus one complete page of data. Otherwise, we need
3444 * to leave cifs_readpages out of the address space operations.
3445 */
f5e54d6e 3446const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
3447 .readpage = cifs_readpage,
3448 .writepage = cifs_writepage,
3449 .writepages = cifs_writepages,
d9414774
NP
3450 .write_begin = cifs_write_begin,
3451 .write_end = cifs_write_end,
273d81d6 3452 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
3453 .releasepage = cifs_release_page,
3454 .invalidatepage = cifs_invalidate_page,
9ad1506b 3455 .launder_page = cifs_launder_page,
273d81d6 3456};