cifs: on multiuser mount, set ownership to current_fsuid/current_fsgid (try #7)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / file.c
CommitLineData
1da177e4
LT
1/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French (sfrench@us.ibm.com)
7ee1af76 8 * Jeremy Allison (jra@samba.org)
1da177e4
LT
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
37c0eb46 25#include <linux/backing-dev.h>
1da177e4
LT
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
37c0eb46 30#include <linux/writeback.h>
6f88cc2e 31#include <linux/task_io_accounting_ops.h>
23e7dd7d 32#include <linux/delay.h>
3bc303c2 33#include <linux/mount.h>
5a0e3ad6 34#include <linux/slab.h>
1da177e4
LT
35#include <asm/div64.h>
36#include "cifsfs.h"
37#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_unicode.h"
41#include "cifs_debug.h"
42#include "cifs_fs_sb.h"
9451a9a5 43#include "fscache.h"
1da177e4 44
1da177e4
LT
45static inline int cifs_convert_flags(unsigned int flags)
46{
47 if ((flags & O_ACCMODE) == O_RDONLY)
48 return GENERIC_READ;
49 else if ((flags & O_ACCMODE) == O_WRONLY)
50 return GENERIC_WRITE;
51 else if ((flags & O_ACCMODE) == O_RDWR) {
52 /* GENERIC_ALL is too much permission to request
53 can cause unnecessary access denied on create */
54 /* return GENERIC_ALL; */
55 return (GENERIC_READ | GENERIC_WRITE);
56 }
57
e10f7b55
JL
58 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
59 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
60 FILE_READ_DATA);
7fc8f4e9 61}
e10f7b55 62
7fc8f4e9
SF
63static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
64{
65 fmode_t posix_flags = 0;
e10f7b55 66
7fc8f4e9
SF
67 if ((flags & O_ACCMODE) == O_RDONLY)
68 posix_flags = FMODE_READ;
69 else if ((flags & O_ACCMODE) == O_WRONLY)
70 posix_flags = FMODE_WRITE;
71 else if ((flags & O_ACCMODE) == O_RDWR) {
72 /* GENERIC_ALL is too much permission to request
73 can cause unnecessary access denied on create */
74 /* return GENERIC_ALL; */
75 posix_flags = FMODE_READ | FMODE_WRITE;
76 }
77 /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
78 reopening a file. They had their effect on the original open */
79 if (flags & O_APPEND)
80 posix_flags |= (fmode_t)O_APPEND;
6b2f3d1f
CH
81 if (flags & O_DSYNC)
82 posix_flags |= (fmode_t)O_DSYNC;
83 if (flags & __O_SYNC)
84 posix_flags |= (fmode_t)__O_SYNC;
7fc8f4e9
SF
85 if (flags & O_DIRECTORY)
86 posix_flags |= (fmode_t)O_DIRECTORY;
87 if (flags & O_NOFOLLOW)
88 posix_flags |= (fmode_t)O_NOFOLLOW;
89 if (flags & O_DIRECT)
90 posix_flags |= (fmode_t)O_DIRECT;
91
92 return posix_flags;
1da177e4
LT
93}
94
95static inline int cifs_get_disposition(unsigned int flags)
96{
97 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
98 return FILE_CREATE;
99 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
100 return FILE_OVERWRITE_IF;
101 else if ((flags & O_CREAT) == O_CREAT)
102 return FILE_OPEN_IF;
55aa2e09
SF
103 else if ((flags & O_TRUNC) == O_TRUNC)
104 return FILE_OVERWRITE;
1da177e4
LT
105 else
106 return FILE_OPEN;
107}
108
276a74a4 109/* all arguments to this function must be checked for validity in caller */
590a3fe0
JL
110static inline int
111cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
51c81764 112 struct cifsInodeInfo *pCifsInode, __u32 oplock,
590a3fe0 113 u16 netfid)
276a74a4 114{
276a74a4 115
276a74a4 116 write_lock(&GlobalSMBSeslock);
276a74a4
SF
117
118 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
119 if (pCifsInode == NULL) {
120 write_unlock(&GlobalSMBSeslock);
121 return -EINVAL;
122 }
123
276a74a4
SF
124 if (pCifsInode->clientCanCacheRead) {
125 /* we have the inode open somewhere else
126 no need to discard cache data */
127 goto psx_client_can_cache;
128 }
129
130 /* BB FIXME need to fix this check to move it earlier into posix_open
131 BB fIX following section BB FIXME */
132
133 /* if not oplocked, invalidate inode pages if mtime or file
134 size changed */
135/* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
136 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
137 (file->f_path.dentry->d_inode->i_size ==
138 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 139 cFYI(1, "inode unchanged on server");
276a74a4
SF
140 } else {
141 if (file->f_path.dentry->d_inode->i_mapping) {
142 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
143 if (rc != 0)
144 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
145 }
b6b38f70
JP
146 cFYI(1, "invalidating remote inode since open detected it "
147 "changed");
276a74a4
SF
148 invalidate_remote_inode(file->f_path.dentry->d_inode);
149 } */
150
151psx_client_can_cache:
152 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
153 pCifsInode->clientCanCacheAll = true;
154 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
155 cFYI(1, "Exclusive Oplock granted on inode %p",
156 file->f_path.dentry->d_inode);
276a74a4
SF
157 } else if ((oplock & 0xF) == OPLOCK_READ)
158 pCifsInode->clientCanCacheRead = true;
159
160 /* will have to change the unlock if we reenable the
161 filemap_fdatawrite (which does not seem necessary */
162 write_unlock(&GlobalSMBSeslock);
163 return 0;
164}
165
1da177e4 166/* all arguments to this function must be checked for validity in caller */
db460242 167static inline int cifs_open_inode_helper(struct inode *inode,
a347ecb2 168 struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
1da177e4
LT
169 char *full_path, int xid)
170{
db460242 171 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
1da177e4
LT
172 struct timespec temp;
173 int rc;
174
1da177e4
LT
175 if (pCifsInode->clientCanCacheRead) {
176 /* we have the inode open somewhere else
177 no need to discard cache data */
178 goto client_can_cache;
179 }
180
181 /* BB need same check in cifs_create too? */
182 /* if not oplocked, invalidate inode pages if mtime or file
183 size changed */
07119a4d 184 temp = cifs_NTtimeToUnix(buf->LastWriteTime);
db460242
JL
185 if (timespec_equal(&inode->i_mtime, &temp) &&
186 (inode->i_size ==
1da177e4 187 (loff_t)le64_to_cpu(buf->EndOfFile))) {
b6b38f70 188 cFYI(1, "inode unchanged on server");
1da177e4 189 } else {
db460242 190 if (inode->i_mapping) {
ff215713
SF
191 /* BB no need to lock inode until after invalidate
192 since namei code should already have it locked? */
db460242 193 rc = filemap_write_and_wait(inode->i_mapping);
cea21805 194 if (rc != 0)
db460242 195 pCifsInode->write_behind_rc = rc;
1da177e4 196 }
b6b38f70
JP
197 cFYI(1, "invalidating remote inode since open detected it "
198 "changed");
db460242 199 invalidate_remote_inode(inode);
1da177e4
LT
200 }
201
202client_can_cache:
c18c842b 203 if (pTcon->unix_ext)
db460242
JL
204 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
205 xid);
1da177e4 206 else
db460242
JL
207 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
208 xid, NULL);
1da177e4 209
a347ecb2 210 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
211 pCifsInode->clientCanCacheAll = true;
212 pCifsInode->clientCanCacheRead = true;
db460242 213 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
a347ecb2 214 } else if ((oplock & 0xF) == OPLOCK_READ)
4b18f2a9 215 pCifsInode->clientCanCacheRead = true;
1da177e4
LT
216
217 return rc;
218}
219
220int cifs_open(struct inode *inode, struct file *file)
221{
222 int rc = -EACCES;
590a3fe0
JL
223 int xid;
224 __u32 oplock;
1da177e4 225 struct cifs_sb_info *cifs_sb;
276a74a4 226 struct cifsTconInfo *tcon;
7ffec372 227 struct tcon_link *tlink;
6ca9f3ba 228 struct cifsFileInfo *pCifsFile = NULL;
1da177e4 229 struct cifsInodeInfo *pCifsInode;
1da177e4
LT
230 char *full_path = NULL;
231 int desiredAccess;
232 int disposition;
233 __u16 netfid;
234 FILE_ALL_INFO *buf = NULL;
235
236 xid = GetXid();
237
238 cifs_sb = CIFS_SB(inode->i_sb);
7ffec372
JL
239 tlink = cifs_sb_tlink(cifs_sb);
240 if (IS_ERR(tlink)) {
241 FreeXid(xid);
242 return PTR_ERR(tlink);
243 }
244 tcon = tlink_tcon(tlink);
1da177e4 245
a6ce4932 246 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 247
e6a00296 248 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 249 if (full_path == NULL) {
0f3bc09e 250 rc = -ENOMEM;
232341ba 251 goto out;
1da177e4
LT
252 }
253
b6b38f70
JP
254 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
255 inode, file->f_flags, full_path);
276a74a4
SF
256
257 if (oplockEnabled)
258 oplock = REQ_OPLOCK;
259 else
260 oplock = 0;
261
64cc2c63
SF
262 if (!tcon->broken_posix_open && tcon->unix_ext &&
263 (tcon->ses->capabilities & CAP_UNIX) &&
276a74a4
SF
264 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
265 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
266 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
fa588e0c 267 oflags |= SMB_O_CREAT;
276a74a4 268 /* can not refresh inode info since size could be stale */
2422f676 269 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
fa588e0c
SF
270 cifs_sb->mnt_file_mode /* ignored */,
271 oflags, &oplock, &netfid, xid);
276a74a4 272 if (rc == 0) {
b6b38f70 273 cFYI(1, "posix open succeeded");
276a74a4
SF
274 /* no need for special case handling of setting mode
275 on read only files needed here */
276
47c78b7f
JL
277 rc = cifs_posix_open_inode_helper(inode, file,
278 pCifsInode, oplock, netfid);
279 if (rc != 0) {
280 CIFSSMBClose(xid, tcon, netfid);
281 goto out;
282 }
283
2422f676
JL
284 pCifsFile = cifs_new_fileinfo(inode, netfid, file,
285 file->f_path.mnt,
13cfb733 286 tlink, oflags, oplock);
2422f676
JL
287 if (pCifsFile == NULL) {
288 CIFSSMBClose(xid, tcon, netfid);
289 rc = -ENOMEM;
2422f676 290 }
9451a9a5
SJ
291
292 cifs_fscache_set_inode_cookie(inode, file);
293
276a74a4 294 goto out;
64cc2c63
SF
295 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
296 if (tcon->ses->serverNOS)
b6b38f70 297 cERROR(1, "server %s of type %s returned"
64cc2c63
SF
298 " unexpected error on SMB posix open"
299 ", disabling posix open support."
300 " Check if server update available.",
301 tcon->ses->serverName,
b6b38f70 302 tcon->ses->serverNOS);
64cc2c63 303 tcon->broken_posix_open = true;
276a74a4
SF
304 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
305 (rc != -EOPNOTSUPP)) /* path not found or net err */
306 goto out;
64cc2c63
SF
307 /* else fallthrough to retry open the old way on network i/o
308 or DFS errors */
276a74a4
SF
309 }
310
1da177e4
LT
311 desiredAccess = cifs_convert_flags(file->f_flags);
312
313/*********************************************************************
314 * open flag mapping table:
fb8c4b14 315 *
1da177e4 316 * POSIX Flag CIFS Disposition
fb8c4b14 317 * ---------- ----------------
1da177e4
LT
318 * O_CREAT FILE_OPEN_IF
319 * O_CREAT | O_EXCL FILE_CREATE
320 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
321 * O_TRUNC FILE_OVERWRITE
322 * none of the above FILE_OPEN
323 *
324 * Note that there is not a direct match between disposition
fb8c4b14 325 * FILE_SUPERSEDE (ie create whether or not file exists although
1da177e4
LT
326 * O_CREAT | O_TRUNC is similar but truncates the existing
327 * file rather than creating a new file as FILE_SUPERSEDE does
328 * (which uses the attributes / metadata passed in on open call)
329 *?
fb8c4b14 330 *? O_SYNC is a reasonable match to CIFS writethrough flag
1da177e4
LT
331 *? and the read write flags match reasonably. O_LARGEFILE
332 *? is irrelevant because largefile support is always used
333 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
334 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
335 *********************************************************************/
336
337 disposition = cifs_get_disposition(file->f_flags);
338
1da177e4
LT
339 /* BB pass O_SYNC flag through on file attributes .. BB */
340
341 /* Also refresh inode by passing in file_info buf returned by SMBOpen
342 and calling get_inode_info with returned buf (at least helps
343 non-Unix server case) */
344
fb8c4b14
SF
345 /* BB we can not do this if this is the second open of a file
346 and the first handle has writebehind data, we might be
1da177e4
LT
347 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
348 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
349 if (!buf) {
350 rc = -ENOMEM;
351 goto out;
352 }
5bafd765 353
a6e8a845 354 if (tcon->ses->capabilities & CAP_NT_SMBS)
276a74a4 355 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
5bafd765 356 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
737b758c
SF
357 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
358 & CIFS_MOUNT_MAP_SPECIAL_CHR);
5bafd765
SF
359 else
360 rc = -EIO; /* no NT SMB support fall into legacy open below */
361
a9d02ad4
SF
362 if (rc == -EIO) {
363 /* Old server, try legacy style OpenX */
276a74a4 364 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
a9d02ad4
SF
365 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
366 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
367 & CIFS_MOUNT_MAP_SPECIAL_CHR);
368 }
1da177e4 369 if (rc) {
b6b38f70 370 cFYI(1, "cifs_open returned 0x%x", rc);
1da177e4
LT
371 goto out;
372 }
3321b791 373
a347ecb2 374 rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
47c78b7f
JL
375 if (rc != 0)
376 goto out;
377
086f68bd 378 pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
13cfb733 379 tlink, file->f_flags, oplock);
6ca9f3ba 380 if (pCifsFile == NULL) {
1da177e4
LT
381 rc = -ENOMEM;
382 goto out;
383 }
1da177e4 384
9451a9a5
SJ
385 cifs_fscache_set_inode_cookie(inode, file);
386
fb8c4b14 387 if (oplock & CIFS_CREATE_ACTION) {
1da177e4
LT
388 /* time to set mode which we can not set earlier due to
389 problems creating new read-only files */
276a74a4 390 if (tcon->unix_ext) {
4e1e7fb9
JL
391 struct cifs_unix_set_info_args args = {
392 .mode = inode->i_mode,
393 .uid = NO_CHANGE_64,
394 .gid = NO_CHANGE_64,
395 .ctime = NO_CHANGE_64,
396 .atime = NO_CHANGE_64,
397 .mtime = NO_CHANGE_64,
398 .device = 0,
399 };
01ea95e3
JL
400 CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
401 cifs_sb->local_nls,
402 cifs_sb->mnt_cifs_flags &
737b758c 403 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4
LT
404 }
405 }
406
407out:
408 kfree(buf);
409 kfree(full_path);
410 FreeXid(xid);
7ffec372 411 cifs_put_tlink(tlink);
1da177e4
LT
412 return rc;
413}
414
0418726b 415/* Try to reacquire byte range locks that were released when session */
1da177e4
LT
416/* to server was lost */
417static int cifs_relock_file(struct cifsFileInfo *cifsFile)
418{
419 int rc = 0;
420
421/* BB list all locks open on this file and relock */
422
423 return rc;
424}
425
4b18f2a9 426static int cifs_reopen_file(struct file *file, bool can_flush)
1da177e4
LT
427{
428 int rc = -EACCES;
590a3fe0
JL
429 int xid;
430 __u32 oplock;
1da177e4 431 struct cifs_sb_info *cifs_sb;
7fc8f4e9 432 struct cifsTconInfo *tcon;
1da177e4
LT
433 struct cifsFileInfo *pCifsFile;
434 struct cifsInodeInfo *pCifsInode;
fb8c4b14 435 struct inode *inode;
1da177e4
LT
436 char *full_path = NULL;
437 int desiredAccess;
438 int disposition = FILE_OPEN;
439 __u16 netfid;
440
ad7a2926 441 if (file->private_data)
c21dfb69 442 pCifsFile = file->private_data;
ad7a2926 443 else
1da177e4
LT
444 return -EBADF;
445
446 xid = GetXid();
f0a71eb8 447 mutex_lock(&pCifsFile->fh_mutex);
4b18f2a9 448 if (!pCifsFile->invalidHandle) {
f0a71eb8 449 mutex_unlock(&pCifsFile->fh_mutex);
0f3bc09e 450 rc = 0;
1da177e4 451 FreeXid(xid);
0f3bc09e 452 return rc;
1da177e4
LT
453 }
454
e6a00296 455 if (file->f_path.dentry == NULL) {
b6b38f70 456 cERROR(1, "no valid name if dentry freed");
3a9f462f
SF
457 dump_stack();
458 rc = -EBADF;
459 goto reopen_error_exit;
460 }
461
462 inode = file->f_path.dentry->d_inode;
fb8c4b14 463 if (inode == NULL) {
b6b38f70 464 cERROR(1, "inode not valid");
3a9f462f
SF
465 dump_stack();
466 rc = -EBADF;
467 goto reopen_error_exit;
1da177e4 468 }
50c2f753 469
1da177e4 470 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 471 tcon = tlink_tcon(pCifsFile->tlink);
3a9f462f 472
1da177e4
LT
473/* can not grab rename sem here because various ops, including
474 those that already have the rename sem can end up causing writepage
475 to get called and if the server was down that means we end up here,
476 and we can never tell if the caller already has the rename_sem */
e6a00296 477 full_path = build_path_from_dentry(file->f_path.dentry);
1da177e4 478 if (full_path == NULL) {
3a9f462f
SF
479 rc = -ENOMEM;
480reopen_error_exit:
f0a71eb8 481 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4 482 FreeXid(xid);
3a9f462f 483 return rc;
1da177e4
LT
484 }
485
b6b38f70
JP
486 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
487 inode, file->f_flags, full_path);
1da177e4
LT
488
489 if (oplockEnabled)
490 oplock = REQ_OPLOCK;
491 else
4b18f2a9 492 oplock = 0;
1da177e4 493
7fc8f4e9
SF
494 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
495 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
496 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
497 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
498 /* can not refresh inode info since size could be stale */
2422f676 499 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
fa588e0c
SF
500 cifs_sb->mnt_file_mode /* ignored */,
501 oflags, &oplock, &netfid, xid);
7fc8f4e9 502 if (rc == 0) {
b6b38f70 503 cFYI(1, "posix reopen succeeded");
7fc8f4e9
SF
504 goto reopen_success;
505 }
506 /* fallthrough to retry open the old way on errors, especially
507 in the reconnect path it is important to retry hard */
508 }
509
510 desiredAccess = cifs_convert_flags(file->f_flags);
511
1da177e4 512 /* Can not refresh inode by passing in file_info buf to be returned
fb8c4b14
SF
513 by SMBOpen and then calling get_inode_info with returned buf
514 since file might have write behind data that needs to be flushed
1da177e4
LT
515 and server version of file size can be stale. If we knew for sure
516 that inode was not dirty locally we could do this */
517
7fc8f4e9 518 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
1da177e4 519 CREATE_NOT_DIR, &netfid, &oplock, NULL,
fb8c4b14 520 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
737b758c 521 CIFS_MOUNT_MAP_SPECIAL_CHR);
1da177e4 522 if (rc) {
f0a71eb8 523 mutex_unlock(&pCifsFile->fh_mutex);
b6b38f70
JP
524 cFYI(1, "cifs_open returned 0x%x", rc);
525 cFYI(1, "oplock: %d", oplock);
1da177e4 526 } else {
7fc8f4e9 527reopen_success:
1da177e4 528 pCifsFile->netfid = netfid;
4b18f2a9 529 pCifsFile->invalidHandle = false;
f0a71eb8 530 mutex_unlock(&pCifsFile->fh_mutex);
1da177e4
LT
531 pCifsInode = CIFS_I(inode);
532 if (pCifsInode) {
533 if (can_flush) {
cea21805
JL
534 rc = filemap_write_and_wait(inode->i_mapping);
535 if (rc != 0)
536 CIFS_I(inode)->write_behind_rc = rc;
1da177e4
LT
537 /* temporarily disable caching while we
538 go to server to get inode info */
4b18f2a9
SF
539 pCifsInode->clientCanCacheAll = false;
540 pCifsInode->clientCanCacheRead = false;
7fc8f4e9 541 if (tcon->unix_ext)
1da177e4
LT
542 rc = cifs_get_inode_info_unix(&inode,
543 full_path, inode->i_sb, xid);
544 else
545 rc = cifs_get_inode_info(&inode,
546 full_path, NULL, inode->i_sb,
8b1327f6 547 xid, NULL);
1da177e4
LT
548 } /* else we are writing out data to server already
549 and could deadlock if we tried to flush data, and
550 since we do not know if we have data that would
551 invalidate the current end of file on the server
552 we can not go to the server to get the new inod
553 info */
554 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
4b18f2a9
SF
555 pCifsInode->clientCanCacheAll = true;
556 pCifsInode->clientCanCacheRead = true;
b6b38f70
JP
557 cFYI(1, "Exclusive Oplock granted on inode %p",
558 file->f_path.dentry->d_inode);
1da177e4 559 } else if ((oplock & 0xF) == OPLOCK_READ) {
4b18f2a9
SF
560 pCifsInode->clientCanCacheRead = true;
561 pCifsInode->clientCanCacheAll = false;
1da177e4 562 } else {
4b18f2a9
SF
563 pCifsInode->clientCanCacheRead = false;
564 pCifsInode->clientCanCacheAll = false;
1da177e4
LT
565 }
566 cifs_relock_file(pCifsFile);
567 }
568 }
1da177e4
LT
569 kfree(full_path);
570 FreeXid(xid);
571 return rc;
572}
573
574int cifs_close(struct inode *inode, struct file *file)
575{
576 int rc = 0;
15745320 577 int xid, timeout;
1da177e4
LT
578 struct cifs_sb_info *cifs_sb;
579 struct cifsTconInfo *pTcon;
c21dfb69 580 struct cifsFileInfo *pSMBFile = file->private_data;
1da177e4
LT
581
582 xid = GetXid();
583
584 cifs_sb = CIFS_SB(inode->i_sb);
13cfb733 585 pTcon = tlink_tcon(pSMBFile->tlink);
1da177e4 586 if (pSMBFile) {
7ee1af76 587 struct cifsLockInfo *li, *tmp;
ddb4cbfc 588 write_lock(&GlobalSMBSeslock);
4b18f2a9 589 pSMBFile->closePend = true;
1da177e4
LT
590 if (pTcon) {
591 /* no sense reconnecting to close a file that is
592 already closed */
3b795210 593 if (!pTcon->need_reconnect) {
ddb4cbfc 594 write_unlock(&GlobalSMBSeslock);
15745320 595 timeout = 2;
6ab409b5 596 while ((atomic_read(&pSMBFile->count) != 1)
15745320 597 && (timeout <= 2048)) {
23e7dd7d
SF
598 /* Give write a better chance to get to
599 server ahead of the close. We do not
600 want to add a wait_q here as it would
601 increase the memory utilization as
602 the struct would be in each open file,
fb8c4b14 603 but this should give enough time to
23e7dd7d 604 clear the socket */
b6b38f70 605 cFYI(DBG2, "close delay, write pending");
23e7dd7d
SF
606 msleep(timeout);
607 timeout *= 4;
4891d539 608 }
ddb4cbfc
SF
609 if (!pTcon->need_reconnect &&
610 !pSMBFile->invalidHandle)
611 rc = CIFSSMBClose(xid, pTcon,
1da177e4 612 pSMBFile->netfid);
ddb4cbfc
SF
613 } else
614 write_unlock(&GlobalSMBSeslock);
615 } else
616 write_unlock(&GlobalSMBSeslock);
7ee1af76
JA
617
618 /* Delete any outstanding lock records.
619 We'll lose them when the file is closed anyway. */
796e5661 620 mutex_lock(&pSMBFile->lock_mutex);
7ee1af76
JA
621 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
622 list_del(&li->llist);
623 kfree(li);
624 }
796e5661 625 mutex_unlock(&pSMBFile->lock_mutex);
7ee1af76 626
cbe0476f 627 write_lock(&GlobalSMBSeslock);
1da177e4
LT
628 list_del(&pSMBFile->flist);
629 list_del(&pSMBFile->tlist);
cbe0476f 630 write_unlock(&GlobalSMBSeslock);
6ab409b5 631 cifsFileInfo_put(file->private_data);
1da177e4
LT
632 file->private_data = NULL;
633 } else
634 rc = -EBADF;
635
4efa53f0 636 read_lock(&GlobalSMBSeslock);
1da177e4 637 if (list_empty(&(CIFS_I(inode)->openFileList))) {
b6b38f70 638 cFYI(1, "closing last open instance for inode %p", inode);
1da177e4
LT
639 /* if the file is not open we do not know if we can cache info
640 on this inode, much less write behind and read ahead */
4b18f2a9
SF
641 CIFS_I(inode)->clientCanCacheRead = false;
642 CIFS_I(inode)->clientCanCacheAll = false;
1da177e4 643 }
4efa53f0 644 read_unlock(&GlobalSMBSeslock);
fb8c4b14 645 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
1da177e4
LT
646 rc = CIFS_I(inode)->write_behind_rc;
647 FreeXid(xid);
648 return rc;
649}
650
651int cifs_closedir(struct inode *inode, struct file *file)
652{
653 int rc = 0;
654 int xid;
c21dfb69 655 struct cifsFileInfo *pCFileStruct = file->private_data;
1da177e4
LT
656 char *ptmp;
657
b6b38f70 658 cFYI(1, "Closedir inode = 0x%p", inode);
1da177e4
LT
659
660 xid = GetXid();
661
662 if (pCFileStruct) {
13cfb733 663 struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
1da177e4 664
b6b38f70 665 cFYI(1, "Freeing private data in close dir");
ddb4cbfc 666 write_lock(&GlobalSMBSeslock);
4b18f2a9
SF
667 if (!pCFileStruct->srch_inf.endOfSearch &&
668 !pCFileStruct->invalidHandle) {
669 pCFileStruct->invalidHandle = true;
ddb4cbfc 670 write_unlock(&GlobalSMBSeslock);
1da177e4 671 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
b6b38f70
JP
672 cFYI(1, "Closing uncompleted readdir with rc %d",
673 rc);
1da177e4
LT
674 /* not much we can do if it fails anyway, ignore rc */
675 rc = 0;
ddb4cbfc
SF
676 } else
677 write_unlock(&GlobalSMBSeslock);
1da177e4
LT
678 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
679 if (ptmp) {
b6b38f70 680 cFYI(1, "closedir free smb buf in srch struct");
1da177e4 681 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
fb8c4b14 682 if (pCFileStruct->srch_inf.smallBuf)
d47d7c1a
SF
683 cifs_small_buf_release(ptmp);
684 else
685 cifs_buf_release(ptmp);
1da177e4 686 }
13cfb733 687 cifs_put_tlink(pCFileStruct->tlink);
1da177e4
LT
688 kfree(file->private_data);
689 file->private_data = NULL;
690 }
691 /* BB can we lock the filestruct while this is going on? */
692 FreeXid(xid);
693 return rc;
694}
695
7ee1af76
JA
696static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
697 __u64 offset, __u8 lockType)
698{
fb8c4b14
SF
699 struct cifsLockInfo *li =
700 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
7ee1af76
JA
701 if (li == NULL)
702 return -ENOMEM;
703 li->offset = offset;
704 li->length = len;
705 li->type = lockType;
796e5661 706 mutex_lock(&fid->lock_mutex);
7ee1af76 707 list_add(&li->llist, &fid->llist);
796e5661 708 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
709 return 0;
710}
711
1da177e4
LT
712int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
713{
714 int rc, xid;
1da177e4
LT
715 __u32 numLock = 0;
716 __u32 numUnlock = 0;
717 __u64 length;
4b18f2a9 718 bool wait_flag = false;
1da177e4 719 struct cifs_sb_info *cifs_sb;
13a6e42a 720 struct cifsTconInfo *tcon;
08547b03
SF
721 __u16 netfid;
722 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
13a6e42a 723 bool posix_locking = 0;
1da177e4
LT
724
725 length = 1 + pfLock->fl_end - pfLock->fl_start;
726 rc = -EACCES;
727 xid = GetXid();
728
b6b38f70 729 cFYI(1, "Lock parm: 0x%x flockflags: "
1da177e4 730 "0x%x flocktype: 0x%x start: %lld end: %lld",
fb8c4b14 731 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
b6b38f70 732 pfLock->fl_end);
1da177e4
LT
733
734 if (pfLock->fl_flags & FL_POSIX)
b6b38f70 735 cFYI(1, "Posix");
1da177e4 736 if (pfLock->fl_flags & FL_FLOCK)
b6b38f70 737 cFYI(1, "Flock");
1da177e4 738 if (pfLock->fl_flags & FL_SLEEP) {
b6b38f70 739 cFYI(1, "Blocking lock");
4b18f2a9 740 wait_flag = true;
1da177e4
LT
741 }
742 if (pfLock->fl_flags & FL_ACCESS)
b6b38f70
JP
743 cFYI(1, "Process suspended by mandatory locking - "
744 "not implemented yet");
1da177e4 745 if (pfLock->fl_flags & FL_LEASE)
b6b38f70 746 cFYI(1, "Lease on file - not implemented yet");
fb8c4b14 747 if (pfLock->fl_flags &
1da177e4 748 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
b6b38f70 749 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
1da177e4
LT
750
751 if (pfLock->fl_type == F_WRLCK) {
b6b38f70 752 cFYI(1, "F_WRLCK ");
1da177e4
LT
753 numLock = 1;
754 } else if (pfLock->fl_type == F_UNLCK) {
b6b38f70 755 cFYI(1, "F_UNLCK");
1da177e4 756 numUnlock = 1;
d47d7c1a
SF
757 /* Check if unlock includes more than
758 one lock range */
1da177e4 759 } else if (pfLock->fl_type == F_RDLCK) {
b6b38f70 760 cFYI(1, "F_RDLCK");
1da177e4
LT
761 lockType |= LOCKING_ANDX_SHARED_LOCK;
762 numLock = 1;
763 } else if (pfLock->fl_type == F_EXLCK) {
b6b38f70 764 cFYI(1, "F_EXLCK");
1da177e4
LT
765 numLock = 1;
766 } else if (pfLock->fl_type == F_SHLCK) {
b6b38f70 767 cFYI(1, "F_SHLCK");
1da177e4
LT
768 lockType |= LOCKING_ANDX_SHARED_LOCK;
769 numLock = 1;
770 } else
b6b38f70 771 cFYI(1, "Unknown type of lock");
1da177e4 772
e6a00296 773 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 774 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
1da177e4
LT
775
776 if (file->private_data == NULL) {
0f3bc09e 777 rc = -EBADF;
1da177e4 778 FreeXid(xid);
0f3bc09e 779 return rc;
1da177e4 780 }
08547b03
SF
781 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
782
13a6e42a
SF
783 if ((tcon->ses->capabilities & CAP_UNIX) &&
784 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
acc18aa1 785 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
13a6e42a 786 posix_locking = 1;
08547b03
SF
787 /* BB add code here to normalize offset and length to
788 account for negative length which we can not accept over the
789 wire */
1da177e4 790 if (IS_GETLK(cmd)) {
fb8c4b14 791 if (posix_locking) {
08547b03 792 int posix_lock_type;
fb8c4b14 793 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
794 posix_lock_type = CIFS_RDLCK;
795 else
796 posix_lock_type = CIFS_WRLCK;
13a6e42a 797 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
fc94cdb9 798 length, pfLock,
08547b03
SF
799 posix_lock_type, wait_flag);
800 FreeXid(xid);
801 return rc;
802 }
803
804 /* BB we could chain these into one lock request BB */
13a6e42a 805 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
08547b03 806 0, 1, lockType, 0 /* wait flag */ );
1da177e4 807 if (rc == 0) {
13a6e42a 808 rc = CIFSSMBLock(xid, tcon, netfid, length,
1da177e4
LT
809 pfLock->fl_start, 1 /* numUnlock */ ,
810 0 /* numLock */ , lockType,
811 0 /* wait flag */ );
812 pfLock->fl_type = F_UNLCK;
813 if (rc != 0)
b6b38f70
JP
814 cERROR(1, "Error unlocking previously locked "
815 "range %d during test of lock", rc);
1da177e4
LT
816 rc = 0;
817
818 } else {
819 /* if rc == ERR_SHARING_VIOLATION ? */
f05337c6
PS
820 rc = 0;
821
822 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
823 pfLock->fl_type = F_WRLCK;
824 } else {
825 rc = CIFSSMBLock(xid, tcon, netfid, length,
826 pfLock->fl_start, 0, 1,
827 lockType | LOCKING_ANDX_SHARED_LOCK,
828 0 /* wait flag */);
829 if (rc == 0) {
830 rc = CIFSSMBLock(xid, tcon, netfid,
831 length, pfLock->fl_start, 1, 0,
832 lockType |
833 LOCKING_ANDX_SHARED_LOCK,
834 0 /* wait flag */);
835 pfLock->fl_type = F_RDLCK;
836 if (rc != 0)
f19159dc 837 cERROR(1, "Error unlocking "
f05337c6 838 "previously locked range %d "
f19159dc 839 "during test of lock", rc);
f05337c6
PS
840 rc = 0;
841 } else {
842 pfLock->fl_type = F_WRLCK;
843 rc = 0;
844 }
845 }
1da177e4
LT
846 }
847
848 FreeXid(xid);
849 return rc;
850 }
7ee1af76
JA
851
852 if (!numLock && !numUnlock) {
853 /* if no lock or unlock then nothing
854 to do since we do not know what it is */
855 FreeXid(xid);
856 return -EOPNOTSUPP;
857 }
858
859 if (posix_locking) {
08547b03 860 int posix_lock_type;
fb8c4b14 861 if (lockType & LOCKING_ANDX_SHARED_LOCK)
08547b03
SF
862 posix_lock_type = CIFS_RDLCK;
863 else
864 posix_lock_type = CIFS_WRLCK;
50c2f753 865
fb8c4b14 866 if (numUnlock == 1)
beb84dc8 867 posix_lock_type = CIFS_UNLCK;
7ee1af76 868
13a6e42a 869 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
fc94cdb9 870 length, pfLock,
08547b03 871 posix_lock_type, wait_flag);
7ee1af76 872 } else {
c21dfb69 873 struct cifsFileInfo *fid = file->private_data;
7ee1af76
JA
874
875 if (numLock) {
13a6e42a 876 rc = CIFSSMBLock(xid, tcon, netfid, length,
fb8c4b14 877 pfLock->fl_start,
7ee1af76
JA
878 0, numLock, lockType, wait_flag);
879
880 if (rc == 0) {
881 /* For Windows locks we must store them. */
882 rc = store_file_lock(fid, length,
883 pfLock->fl_start, lockType);
884 }
885 } else if (numUnlock) {
886 /* For each stored lock that this unlock overlaps
887 completely, unlock it. */
888 int stored_rc = 0;
889 struct cifsLockInfo *li, *tmp;
890
6b70c955 891 rc = 0;
796e5661 892 mutex_lock(&fid->lock_mutex);
7ee1af76
JA
893 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
894 if (pfLock->fl_start <= li->offset &&
c19eb710 895 (pfLock->fl_start + length) >=
39db810c 896 (li->offset + li->length)) {
13a6e42a 897 stored_rc = CIFSSMBLock(xid, tcon,
fb8c4b14 898 netfid,
7ee1af76 899 li->length, li->offset,
4b18f2a9 900 1, 0, li->type, false);
7ee1af76
JA
901 if (stored_rc)
902 rc = stored_rc;
2c964d1f
PS
903 else {
904 list_del(&li->llist);
905 kfree(li);
906 }
7ee1af76
JA
907 }
908 }
796e5661 909 mutex_unlock(&fid->lock_mutex);
7ee1af76
JA
910 }
911 }
912
d634cc15 913 if (pfLock->fl_flags & FL_POSIX)
1da177e4
LT
914 posix_lock_file_wait(file, pfLock);
915 FreeXid(xid);
916 return rc;
917}
918
fbec9ab9
JL
919/*
920 * Set the timeout on write requests past EOF. For some servers (Windows)
921 * these calls can be very long.
922 *
923 * If we're writing >10M past the EOF we give a 180s timeout. Anything less
924 * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
925 * The 10M cutoff is totally arbitrary. A better scheme for this would be
926 * welcome if someone wants to suggest one.
927 *
928 * We may be able to do a better job with this if there were some way to
929 * declare that a file should be sparse.
930 */
931static int
932cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
933{
934 if (offset <= cifsi->server_eof)
935 return CIFS_STD_OP;
936 else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
937 return CIFS_VLONG_OP;
938 else
939 return CIFS_LONG_OP;
940}
941
942/* update the file size (if needed) after a write */
943static void
944cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
945 unsigned int bytes_written)
946{
947 loff_t end_of_write = offset + bytes_written;
948
949 if (end_of_write > cifsi->server_eof)
950 cifsi->server_eof = end_of_write;
951}
952
1da177e4
LT
953ssize_t cifs_user_write(struct file *file, const char __user *write_data,
954 size_t write_size, loff_t *poffset)
955{
956 int rc = 0;
957 unsigned int bytes_written = 0;
958 unsigned int total_written;
959 struct cifs_sb_info *cifs_sb;
960 struct cifsTconInfo *pTcon;
961 int xid, long_op;
962 struct cifsFileInfo *open_file;
fbec9ab9 963 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 964
e6a00296 965 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 966
b6b38f70
JP
967 /* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
968 *poffset, file->f_path.dentry->d_name.name); */
1da177e4
LT
969
970 if (file->private_data == NULL)
971 return -EBADF;
ba00ba64 972
c21dfb69 973 open_file = file->private_data;
13cfb733 974 pTcon = tlink_tcon(open_file->tlink);
50c2f753 975
838726c4
JL
976 rc = generic_write_checks(file, poffset, &write_size, 0);
977 if (rc)
978 return rc;
979
1da177e4 980 xid = GetXid();
1da177e4 981
fbec9ab9 982 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
983 for (total_written = 0; write_size > total_written;
984 total_written += bytes_written) {
985 rc = -EAGAIN;
986 while (rc == -EAGAIN) {
987 if (file->private_data == NULL) {
988 /* file has been closed on us */
989 FreeXid(xid);
990 /* if we have gotten here we have written some data
991 and blocked, and the file has been freed on us while
992 we blocked so return what we managed to write */
993 return total_written;
fb8c4b14 994 }
1da177e4
LT
995 if (open_file->closePend) {
996 FreeXid(xid);
997 if (total_written)
998 return total_written;
999 else
1000 return -EBADF;
1001 }
1002 if (open_file->invalidHandle) {
1da177e4
LT
1003 /* we could deadlock if we called
1004 filemap_fdatawait from here so tell
1005 reopen_file not to flush data to server
1006 now */
4b18f2a9 1007 rc = cifs_reopen_file(file, false);
1da177e4
LT
1008 if (rc != 0)
1009 break;
1010 }
1011
1012 rc = CIFSSMBWrite(xid, pTcon,
1013 open_file->netfid,
1014 min_t(const int, cifs_sb->wsize,
1015 write_size - total_written),
1016 *poffset, &bytes_written,
1017 NULL, write_data + total_written, long_op);
1018 }
1019 if (rc || (bytes_written == 0)) {
1020 if (total_written)
1021 break;
1022 else {
1023 FreeXid(xid);
1024 return rc;
1025 }
fbec9ab9
JL
1026 } else {
1027 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1028 *poffset += bytes_written;
fbec9ab9 1029 }
133672ef 1030 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1031 15 seconds is plenty */
1032 }
1033
a4544347 1034 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1035
1036 /* since the write may have blocked check these pointers again */
3677db10
SF
1037 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1038 struct inode *inode = file->f_path.dentry->d_inode;
fb8c4b14
SF
1039/* Do not update local mtime - server will set its actual value on write
1040 * inode->i_ctime = inode->i_mtime =
3677db10
SF
1041 * current_fs_time(inode->i_sb);*/
1042 if (total_written > 0) {
1043 spin_lock(&inode->i_lock);
1044 if (*poffset > file->f_path.dentry->d_inode->i_size)
1045 i_size_write(file->f_path.dentry->d_inode,
1da177e4 1046 *poffset);
3677db10 1047 spin_unlock(&inode->i_lock);
1da177e4 1048 }
fb8c4b14 1049 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1050 }
1051 FreeXid(xid);
1052 return total_written;
1053}
1054
1055static ssize_t cifs_write(struct file *file, const char *write_data,
d9414774 1056 size_t write_size, loff_t *poffset)
1da177e4
LT
1057{
1058 int rc = 0;
1059 unsigned int bytes_written = 0;
1060 unsigned int total_written;
1061 struct cifs_sb_info *cifs_sb;
1062 struct cifsTconInfo *pTcon;
1063 int xid, long_op;
1064 struct cifsFileInfo *open_file;
fbec9ab9 1065 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
1da177e4 1066
e6a00296 1067 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4 1068
b6b38f70
JP
1069 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1070 *poffset, file->f_path.dentry->d_name.name);
1da177e4
LT
1071
1072 if (file->private_data == NULL)
1073 return -EBADF;
c21dfb69 1074 open_file = file->private_data;
13cfb733 1075 pTcon = tlink_tcon(open_file->tlink);
50c2f753 1076
1da177e4 1077 xid = GetXid();
1da177e4 1078
fbec9ab9 1079 long_op = cifs_write_timeout(cifsi, *poffset);
1da177e4
LT
1080 for (total_written = 0; write_size > total_written;
1081 total_written += bytes_written) {
1082 rc = -EAGAIN;
1083 while (rc == -EAGAIN) {
1084 if (file->private_data == NULL) {
1085 /* file has been closed on us */
1086 FreeXid(xid);
1087 /* if we have gotten here we have written some data
1088 and blocked, and the file has been freed on us
fb8c4b14 1089 while we blocked so return what we managed to
1da177e4
LT
1090 write */
1091 return total_written;
fb8c4b14 1092 }
1da177e4
LT
1093 if (open_file->closePend) {
1094 FreeXid(xid);
1095 if (total_written)
1096 return total_written;
1097 else
1098 return -EBADF;
1099 }
1100 if (open_file->invalidHandle) {
1da177e4
LT
1101 /* we could deadlock if we called
1102 filemap_fdatawait from here so tell
fb8c4b14 1103 reopen_file not to flush data to
1da177e4 1104 server now */
4b18f2a9 1105 rc = cifs_reopen_file(file, false);
1da177e4
LT
1106 if (rc != 0)
1107 break;
1108 }
fb8c4b14
SF
1109 if (experimEnabled || (pTcon->ses->server &&
1110 ((pTcon->ses->server->secMode &
08775834 1111 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
c01f36a8 1112 == 0))) {
3e84469d
SF
1113 struct kvec iov[2];
1114 unsigned int len;
1115
0ae0efad 1116 len = min((size_t)cifs_sb->wsize,
3e84469d
SF
1117 write_size - total_written);
1118 /* iov[0] is reserved for smb header */
1119 iov[1].iov_base = (char *)write_data +
1120 total_written;
1121 iov[1].iov_len = len;
d6e04ae6 1122 rc = CIFSSMBWrite2(xid, pTcon,
3e84469d 1123 open_file->netfid, len,
d6e04ae6 1124 *poffset, &bytes_written,
3e84469d 1125 iov, 1, long_op);
d6e04ae6 1126 } else
60808233
SF
1127 rc = CIFSSMBWrite(xid, pTcon,
1128 open_file->netfid,
1129 min_t(const int, cifs_sb->wsize,
1130 write_size - total_written),
1131 *poffset, &bytes_written,
1132 write_data + total_written,
1133 NULL, long_op);
1da177e4
LT
1134 }
1135 if (rc || (bytes_written == 0)) {
1136 if (total_written)
1137 break;
1138 else {
1139 FreeXid(xid);
1140 return rc;
1141 }
fbec9ab9
JL
1142 } else {
1143 cifs_update_eof(cifsi, *poffset, bytes_written);
1da177e4 1144 *poffset += bytes_written;
fbec9ab9 1145 }
133672ef 1146 long_op = CIFS_STD_OP; /* subsequent writes fast -
1da177e4
LT
1147 15 seconds is plenty */
1148 }
1149
a4544347 1150 cifs_stats_bytes_written(pTcon, total_written);
1da177e4
LT
1151
1152 /* since the write may have blocked check these pointers again */
3677db10 1153 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
004c46b9 1154/*BB We could make this contingent on superblock ATIME flag too */
3677db10
SF
1155/* file->f_path.dentry->d_inode->i_ctime =
1156 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1157 if (total_written > 0) {
1158 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1159 if (*poffset > file->f_path.dentry->d_inode->i_size)
1160 i_size_write(file->f_path.dentry->d_inode,
1161 *poffset);
1162 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1da177e4 1163 }
3677db10 1164 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1da177e4
LT
1165 }
1166 FreeXid(xid);
1167 return total_written;
1168}
1169
630f3f0c 1170#ifdef CONFIG_CIFS_EXPERIMENTAL
6508d904
JL
1171struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1172 bool fsuid_only)
630f3f0c
SF
1173{
1174 struct cifsFileInfo *open_file = NULL;
6508d904
JL
1175 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1176
1177 /* only filter by fsuid on multiuser mounts */
1178 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1179 fsuid_only = false;
630f3f0c
SF
1180
1181 read_lock(&GlobalSMBSeslock);
1182 /* we could simply get the first_list_entry since write-only entries
1183 are always at the end of the list but since the first entry might
1184 have a close pending, we go through the whole list */
1185 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1186 if (open_file->closePend)
1187 continue;
6508d904
JL
1188 if (fsuid_only && open_file->uid != current_fsuid())
1189 continue;
630f3f0c
SF
1190 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1191 (open_file->pfile->f_flags & O_RDONLY))) {
1192 if (!open_file->invalidHandle) {
1193 /* found a good file */
1194 /* lock it so it will not be closed on us */
6ab409b5 1195 cifsFileInfo_get(open_file);
630f3f0c
SF
1196 read_unlock(&GlobalSMBSeslock);
1197 return open_file;
1198 } /* else might as well continue, and look for
1199 another, or simply have the caller reopen it
1200 again rather than trying to fix this handle */
1201 } else /* write only file */
1202 break; /* write only files are last so must be done */
1203 }
1204 read_unlock(&GlobalSMBSeslock);
1205 return NULL;
1206}
1207#endif
1208
6508d904
JL
1209struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1210 bool fsuid_only)
6148a742
SF
1211{
1212 struct cifsFileInfo *open_file;
6508d904 1213 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2846d386 1214 bool any_available = false;
dd99cd80 1215 int rc;
6148a742 1216
60808233
SF
1217 /* Having a null inode here (because mapping->host was set to zero by
1218 the VFS or MM) should not happen but we had reports of on oops (due to
1219 it being zero) during stress testcases so we need to check for it */
1220
fb8c4b14 1221 if (cifs_inode == NULL) {
b6b38f70 1222 cERROR(1, "Null inode passed to cifs_writeable_file");
60808233
SF
1223 dump_stack();
1224 return NULL;
1225 }
1226
6508d904
JL
1227 /* only filter by fsuid on multiuser mounts */
1228 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1229 fsuid_only = false;
1230
6148a742 1231 read_lock(&GlobalSMBSeslock);
9b22b0b7 1232refind_writable:
6148a742 1233 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
1234 if (open_file->closePend)
1235 continue;
1236 if (!any_available && open_file->pid != current->tgid)
1237 continue;
1238 if (fsuid_only && open_file->uid != current_fsuid())
6148a742
SF
1239 continue;
1240 if (open_file->pfile &&
1241 ((open_file->pfile->f_flags & O_RDWR) ||
1242 (open_file->pfile->f_flags & O_WRONLY))) {
6ab409b5 1243 cifsFileInfo_get(open_file);
9b22b0b7
SF
1244
1245 if (!open_file->invalidHandle) {
1246 /* found a good writable file */
1247 read_unlock(&GlobalSMBSeslock);
1248 return open_file;
1249 }
8840dee9 1250
6148a742 1251 read_unlock(&GlobalSMBSeslock);
9b22b0b7 1252 /* Had to unlock since following call can block */
4b18f2a9 1253 rc = cifs_reopen_file(open_file->pfile, false);
8840dee9 1254 if (!rc) {
9b22b0b7
SF
1255 if (!open_file->closePend)
1256 return open_file;
1257 else { /* start over in case this was deleted */
1258 /* since the list could be modified */
37c0eb46 1259 read_lock(&GlobalSMBSeslock);
6ab409b5 1260 cifsFileInfo_put(open_file);
9b22b0b7 1261 goto refind_writable;
37c0eb46
SF
1262 }
1263 }
9b22b0b7
SF
1264
1265 /* if it fails, try another handle if possible -
1266 (we can not do this if closePending since
1267 loop could be modified - in which case we
1268 have to start at the beginning of the list
1269 again. Note that it would be bad
1270 to hold up writepages here (rather than
1271 in caller) with continuous retries */
b6b38f70 1272 cFYI(1, "wp failed on reopen file");
9b22b0b7
SF
1273 read_lock(&GlobalSMBSeslock);
1274 /* can not use this handle, no write
1275 pending on this one after all */
6ab409b5 1276 cifsFileInfo_put(open_file);
8840dee9 1277
9b22b0b7
SF
1278 if (open_file->closePend) /* list could have changed */
1279 goto refind_writable;
1280 /* else we simply continue to the next entry. Thus
1281 we do not loop on reopen errors. If we
1282 can not reopen the file, for example if we
1283 reconnected to a server with another client
1284 racing to delete or lock the file we would not
1285 make progress if we restarted before the beginning
1286 of the loop here. */
6148a742
SF
1287 }
1288 }
2846d386
JL
1289 /* couldn't find useable FH with same pid, try any available */
1290 if (!any_available) {
1291 any_available = true;
1292 goto refind_writable;
1293 }
6148a742
SF
1294 read_unlock(&GlobalSMBSeslock);
1295 return NULL;
1296}
1297
1da177e4
LT
1298static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1299{
1300 struct address_space *mapping = page->mapping;
1301 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1302 char *write_data;
1303 int rc = -EFAULT;
1304 int bytes_written = 0;
1305 struct cifs_sb_info *cifs_sb;
1da177e4 1306 struct inode *inode;
6148a742 1307 struct cifsFileInfo *open_file;
1da177e4
LT
1308
1309 if (!mapping || !mapping->host)
1310 return -EFAULT;
1311
1312 inode = page->mapping->host;
1313 cifs_sb = CIFS_SB(inode->i_sb);
1da177e4
LT
1314
1315 offset += (loff_t)from;
1316 write_data = kmap(page);
1317 write_data += from;
1318
1319 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1320 kunmap(page);
1321 return -EIO;
1322 }
1323
1324 /* racing with truncate? */
1325 if (offset > mapping->host->i_size) {
1326 kunmap(page);
1327 return 0; /* don't care */
1328 }
1329
1330 /* check to make sure that we are not extending the file */
1331 if (mapping->host->i_size - offset < (loff_t)to)
fb8c4b14 1332 to = (unsigned)(mapping->host->i_size - offset);
1da177e4 1333
6508d904 1334 open_file = find_writable_file(CIFS_I(mapping->host), false);
6148a742
SF
1335 if (open_file) {
1336 bytes_written = cifs_write(open_file->pfile, write_data,
1337 to-from, &offset);
6ab409b5 1338 cifsFileInfo_put(open_file);
1da177e4 1339 /* Does mm or vfs already set times? */
6148a742 1340 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
bb5a9a04 1341 if ((bytes_written > 0) && (offset))
6148a742 1342 rc = 0;
bb5a9a04
SF
1343 else if (bytes_written < 0)
1344 rc = bytes_written;
6148a742 1345 } else {
b6b38f70 1346 cFYI(1, "No writeable filehandles for inode");
1da177e4
LT
1347 rc = -EIO;
1348 }
1349
1350 kunmap(page);
1351 return rc;
1352}
1353
1da177e4 1354static int cifs_writepages(struct address_space *mapping,
37c0eb46 1355 struct writeback_control *wbc)
1da177e4 1356{
37c0eb46
SF
1357 struct backing_dev_info *bdi = mapping->backing_dev_info;
1358 unsigned int bytes_to_write;
1359 unsigned int bytes_written;
1360 struct cifs_sb_info *cifs_sb;
1361 int done = 0;
111ebb6e 1362 pgoff_t end;
37c0eb46 1363 pgoff_t index;
fb8c4b14
SF
1364 int range_whole = 0;
1365 struct kvec *iov;
84d2f07e 1366 int len;
37c0eb46
SF
1367 int n_iov = 0;
1368 pgoff_t next;
1369 int nr_pages;
1370 __u64 offset = 0;
23e7dd7d 1371 struct cifsFileInfo *open_file;
ba00ba64 1372 struct cifsTconInfo *tcon;
fbec9ab9 1373 struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
37c0eb46
SF
1374 struct page *page;
1375 struct pagevec pvec;
1376 int rc = 0;
1377 int scanned = 0;
fbec9ab9 1378 int xid, long_op;
1da177e4 1379
f3983c21
JL
1380 /*
1381 * BB: Is this meaningful for a non-block-device file system?
1382 * If it is, we should test it again after we do I/O
1383 */
1384 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1385 wbc->encountered_congestion = 1;
1386 return 0;
1387 }
1388
37c0eb46 1389 cifs_sb = CIFS_SB(mapping->host->i_sb);
50c2f753 1390
37c0eb46
SF
1391 /*
1392 * If wsize is smaller that the page cache size, default to writing
1393 * one page at a time via cifs_writepage
1394 */
1395 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1396 return generic_writepages(mapping, wbc);
1397
9a0c8230 1398 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
fb8c4b14 1399 if (iov == NULL)
9a0c8230
SF
1400 return generic_writepages(mapping, wbc);
1401
37c0eb46 1402 /*
f3983c21
JL
1403 * if there's no open file, then this is likely to fail too,
1404 * but it'll at least handle the return. Maybe it should be
1405 * a BUG() instead?
37c0eb46 1406 */
6508d904 1407 open_file = find_writable_file(CIFS_I(mapping->host), false);
f3983c21 1408 if (!open_file) {
9a0c8230 1409 kfree(iov);
f3983c21
JL
1410 return generic_writepages(mapping, wbc);
1411 }
1412
13cfb733 1413 tcon = tlink_tcon(open_file->tlink);
f3983c21
JL
1414 if (!experimEnabled && tcon->ses->server->secMode &
1415 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1416 cifsFileInfo_put(open_file);
1417 return generic_writepages(mapping, wbc);
37c0eb46 1418 }
f3983c21 1419 cifsFileInfo_put(open_file);
37c0eb46 1420
1da177e4
LT
1421 xid = GetXid();
1422
37c0eb46 1423 pagevec_init(&pvec, 0);
111ebb6e 1424 if (wbc->range_cyclic) {
37c0eb46 1425 index = mapping->writeback_index; /* Start from prev offset */
111ebb6e
OH
1426 end = -1;
1427 } else {
1428 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1429 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1430 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1431 range_whole = 1;
37c0eb46
SF
1432 scanned = 1;
1433 }
1434retry:
1435 while (!done && (index <= end) &&
1436 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1437 PAGECACHE_TAG_DIRTY,
1438 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1439 int first;
1440 unsigned int i;
1441
37c0eb46
SF
1442 first = -1;
1443 next = 0;
1444 n_iov = 0;
1445 bytes_to_write = 0;
1446
1447 for (i = 0; i < nr_pages; i++) {
1448 page = pvec.pages[i];
1449 /*
1450 * At this point we hold neither mapping->tree_lock nor
1451 * lock on the page itself: the page may be truncated or
1452 * invalidated (changing page->mapping to NULL), or even
1453 * swizzled back from swapper_space to tmpfs file
1454 * mapping
1455 */
1456
1457 if (first < 0)
1458 lock_page(page);
529ae9aa 1459 else if (!trylock_page(page))
37c0eb46
SF
1460 break;
1461
1462 if (unlikely(page->mapping != mapping)) {
1463 unlock_page(page);
1464 break;
1465 }
1466
111ebb6e 1467 if (!wbc->range_cyclic && page->index > end) {
37c0eb46
SF
1468 done = 1;
1469 unlock_page(page);
1470 break;
1471 }
1472
1473 if (next && (page->index != next)) {
1474 /* Not next consecutive page */
1475 unlock_page(page);
1476 break;
1477 }
1478
1479 if (wbc->sync_mode != WB_SYNC_NONE)
1480 wait_on_page_writeback(page);
1481
1482 if (PageWriteback(page) ||
cb876f45 1483 !clear_page_dirty_for_io(page)) {
37c0eb46
SF
1484 unlock_page(page);
1485 break;
1486 }
84d2f07e 1487
cb876f45
LT
1488 /*
1489 * This actually clears the dirty bit in the radix tree.
1490 * See cifs_writepage() for more commentary.
1491 */
1492 set_page_writeback(page);
1493
84d2f07e
SF
1494 if (page_offset(page) >= mapping->host->i_size) {
1495 done = 1;
1496 unlock_page(page);
cb876f45 1497 end_page_writeback(page);
84d2f07e
SF
1498 break;
1499 }
1500
37c0eb46
SF
1501 /*
1502 * BB can we get rid of this? pages are held by pvec
1503 */
1504 page_cache_get(page);
1505
84d2f07e
SF
1506 len = min(mapping->host->i_size - page_offset(page),
1507 (loff_t)PAGE_CACHE_SIZE);
1508
37c0eb46
SF
1509 /* reserve iov[0] for the smb header */
1510 n_iov++;
1511 iov[n_iov].iov_base = kmap(page);
84d2f07e
SF
1512 iov[n_iov].iov_len = len;
1513 bytes_to_write += len;
37c0eb46
SF
1514
1515 if (first < 0) {
1516 first = i;
1517 offset = page_offset(page);
1518 }
1519 next = page->index + 1;
1520 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1521 break;
1522 }
1523 if (n_iov) {
6508d904
JL
1524 open_file = find_writable_file(CIFS_I(mapping->host),
1525 false);
23e7dd7d 1526 if (!open_file) {
b6b38f70 1527 cERROR(1, "No writable handles for inode");
23e7dd7d 1528 rc = -EBADF;
1047abc1 1529 } else {
fbec9ab9 1530 long_op = cifs_write_timeout(cifsi, offset);
f3983c21 1531 rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
23e7dd7d
SF
1532 bytes_to_write, offset,
1533 &bytes_written, iov, n_iov,
fbec9ab9 1534 long_op);
6ab409b5 1535 cifsFileInfo_put(open_file);
fbec9ab9 1536 cifs_update_eof(cifsi, offset, bytes_written);
f3983c21 1537 }
fbec9ab9 1538
f3983c21
JL
1539 if (rc || bytes_written < bytes_to_write) {
1540 cERROR(1, "Write2 ret %d, wrote %d",
1541 rc, bytes_written);
1542 /* BB what if continued retry is
1543 requested via mount flags? */
1544 if (rc == -ENOSPC)
1545 set_bit(AS_ENOSPC, &mapping->flags);
1546 else
1547 set_bit(AS_EIO, &mapping->flags);
1548 } else {
1549 cifs_stats_bytes_written(tcon, bytes_written);
37c0eb46 1550 }
f3983c21 1551
37c0eb46
SF
1552 for (i = 0; i < n_iov; i++) {
1553 page = pvec.pages[first + i];
eb9bdaa3
SF
1554 /* Should we also set page error on
1555 success rc but too little data written? */
1556 /* BB investigate retry logic on temporary
1557 server crash cases and how recovery works
fb8c4b14
SF
1558 when page marked as error */
1559 if (rc)
eb9bdaa3 1560 SetPageError(page);
37c0eb46
SF
1561 kunmap(page);
1562 unlock_page(page);
cb876f45 1563 end_page_writeback(page);
37c0eb46
SF
1564 page_cache_release(page);
1565 }
1566 if ((wbc->nr_to_write -= n_iov) <= 0)
1567 done = 1;
1568 index = next;
b066a48c
DK
1569 } else
1570 /* Need to re-find the pages we skipped */
1571 index = pvec.pages[0]->index + 1;
1572
37c0eb46
SF
1573 pagevec_release(&pvec);
1574 }
1575 if (!scanned && !done) {
1576 /*
1577 * We hit the last page and there is more work to be done: wrap
1578 * back to the start of the file
1579 */
1580 scanned = 1;
1581 index = 0;
1582 goto retry;
1583 }
111ebb6e 1584 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
37c0eb46
SF
1585 mapping->writeback_index = index;
1586
1da177e4 1587 FreeXid(xid);
9a0c8230 1588 kfree(iov);
1da177e4
LT
1589 return rc;
1590}
1da177e4 1591
fb8c4b14 1592static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1da177e4
LT
1593{
1594 int rc = -EFAULT;
1595 int xid;
1596
1597 xid = GetXid();
1598/* BB add check for wbc flags */
1599 page_cache_get(page);
ad7a2926 1600 if (!PageUptodate(page))
b6b38f70 1601 cFYI(1, "ppw - page not up to date");
cb876f45
LT
1602
1603 /*
1604 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1605 *
1606 * A writepage() implementation always needs to do either this,
1607 * or re-dirty the page with "redirty_page_for_writepage()" in
1608 * the case of a failure.
1609 *
1610 * Just unlocking the page will cause the radix tree tag-bits
1611 * to fail to update with the state of the page correctly.
1612 */
fb8c4b14 1613 set_page_writeback(page);
1da177e4
LT
1614 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1615 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1616 unlock_page(page);
cb876f45
LT
1617 end_page_writeback(page);
1618 page_cache_release(page);
1da177e4
LT
1619 FreeXid(xid);
1620 return rc;
1621}
1622
d9414774
NP
1623static int cifs_write_end(struct file *file, struct address_space *mapping,
1624 loff_t pos, unsigned len, unsigned copied,
1625 struct page *page, void *fsdata)
1da177e4 1626{
d9414774
NP
1627 int rc;
1628 struct inode *inode = mapping->host;
1da177e4 1629
b6b38f70
JP
1630 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1631 page, pos, copied);
d9414774 1632
a98ee8c1
JL
1633 if (PageChecked(page)) {
1634 if (copied == len)
1635 SetPageUptodate(page);
1636 ClearPageChecked(page);
1637 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
d9414774 1638 SetPageUptodate(page);
ad7a2926 1639
1da177e4 1640 if (!PageUptodate(page)) {
d9414774
NP
1641 char *page_data;
1642 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1643 int xid;
1644
1645 xid = GetXid();
1da177e4
LT
1646 /* this is probably better than directly calling
1647 partialpage_write since in this function the file handle is
1648 known which we might as well leverage */
1649 /* BB check if anything else missing out of ppw
1650 such as updating last write time */
1651 page_data = kmap(page);
d9414774
NP
1652 rc = cifs_write(file, page_data + offset, copied, &pos);
1653 /* if (rc < 0) should we set writebehind rc? */
1da177e4 1654 kunmap(page);
d9414774
NP
1655
1656 FreeXid(xid);
fb8c4b14 1657 } else {
d9414774
NP
1658 rc = copied;
1659 pos += copied;
1da177e4
LT
1660 set_page_dirty(page);
1661 }
1662
d9414774
NP
1663 if (rc > 0) {
1664 spin_lock(&inode->i_lock);
1665 if (pos > inode->i_size)
1666 i_size_write(inode, pos);
1667 spin_unlock(&inode->i_lock);
1668 }
1669
1670 unlock_page(page);
1671 page_cache_release(page);
1672
1da177e4
LT
1673 return rc;
1674}
1675
7ea80859 1676int cifs_fsync(struct file *file, int datasync)
1da177e4
LT
1677{
1678 int xid;
1679 int rc = 0;
b298f223 1680 struct cifsTconInfo *tcon;
c21dfb69 1681 struct cifsFileInfo *smbfile = file->private_data;
e6a00296 1682 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1683
1684 xid = GetXid();
1685
b6b38f70 1686 cFYI(1, "Sync file - name: %s datasync: 0x%x",
7ea80859 1687 file->f_path.dentry->d_name.name, datasync);
50c2f753 1688
cea21805
JL
1689 rc = filemap_write_and_wait(inode->i_mapping);
1690 if (rc == 0) {
1691 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1692 CIFS_I(inode)->write_behind_rc = 0;
13cfb733 1693 tcon = tlink_tcon(smbfile->tlink);
be652445 1694 if (!rc && tcon && smbfile &&
4717bed6 1695 !(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
b298f223 1696 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
cea21805 1697 }
b298f223 1698
1da177e4
LT
1699 FreeXid(xid);
1700 return rc;
1701}
1702
3978d717 1703/* static void cifs_sync_page(struct page *page)
1da177e4
LT
1704{
1705 struct address_space *mapping;
1706 struct inode *inode;
1707 unsigned long index = page->index;
1708 unsigned int rpages = 0;
1709 int rc = 0;
1710
f19159dc 1711 cFYI(1, "sync page %p", page);
1da177e4
LT
1712 mapping = page->mapping;
1713 if (!mapping)
1714 return 0;
1715 inode = mapping->host;
1716 if (!inode)
3978d717 1717 return; */
1da177e4 1718
fb8c4b14 1719/* fill in rpages then
1da177e4
LT
1720 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1721
b6b38f70 1722/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
1da177e4 1723
3978d717 1724#if 0
1da177e4
LT
1725 if (rc < 0)
1726 return rc;
1727 return 0;
3978d717 1728#endif
1da177e4
LT
1729} */
1730
1731/*
1732 * As file closes, flush all cached write data for this inode checking
1733 * for write behind errors.
1734 */
75e1fcc0 1735int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 1736{
fb8c4b14 1737 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1738 int rc = 0;
1739
1740 /* Rather than do the steps manually:
1741 lock the inode for writing
1742 loop through pages looking for write behind data (dirty pages)
1743 coalesce into contiguous 16K (or smaller) chunks to write to server
1744 send to server (prefer in parallel)
1745 deal with writebehind errors
1746 unlock inode for writing
1747 filemapfdatawrite appears easier for the time being */
1748
1749 rc = filemap_fdatawrite(inode->i_mapping);
cea21805
JL
1750 /* reset wb rc if we were able to write out dirty pages */
1751 if (!rc) {
1752 rc = CIFS_I(inode)->write_behind_rc;
1da177e4 1753 CIFS_I(inode)->write_behind_rc = 0;
cea21805 1754 }
50c2f753 1755
b6b38f70 1756 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1da177e4
LT
1757
1758 return rc;
1759}
1760
1761ssize_t cifs_user_read(struct file *file, char __user *read_data,
1762 size_t read_size, loff_t *poffset)
1763{
1764 int rc = -EACCES;
1765 unsigned int bytes_read = 0;
1766 unsigned int total_read = 0;
1767 unsigned int current_read_size;
1768 struct cifs_sb_info *cifs_sb;
1769 struct cifsTconInfo *pTcon;
1770 int xid;
1771 struct cifsFileInfo *open_file;
1772 char *smb_read_data;
1773 char __user *current_offset;
1774 struct smb_com_read_rsp *pSMBr;
1775
1776 xid = GetXid();
e6a00296 1777 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1778
1779 if (file->private_data == NULL) {
0f3bc09e 1780 rc = -EBADF;
1da177e4 1781 FreeXid(xid);
0f3bc09e 1782 return rc;
1da177e4 1783 }
c21dfb69 1784 open_file = file->private_data;
13cfb733 1785 pTcon = tlink_tcon(open_file->tlink);
1da177e4 1786
ad7a2926 1787 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1788 cFYI(1, "attempting read on write only file instance");
ad7a2926 1789
1da177e4
LT
1790 for (total_read = 0, current_offset = read_data;
1791 read_size > total_read;
1792 total_read += bytes_read, current_offset += bytes_read) {
fb8c4b14 1793 current_read_size = min_t(const int, read_size - total_read,
1da177e4
LT
1794 cifs_sb->rsize);
1795 rc = -EAGAIN;
1796 smb_read_data = NULL;
1797 while (rc == -EAGAIN) {
ec637e3f 1798 int buf_type = CIFS_NO_BUFFER;
fb8c4b14 1799 if ((open_file->invalidHandle) &&
1da177e4 1800 (!open_file->closePend)) {
4b18f2a9 1801 rc = cifs_reopen_file(file, true);
1da177e4
LT
1802 if (rc != 0)
1803 break;
1804 }
bfa0d75a 1805 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1806 open_file->netfid,
1807 current_read_size, *poffset,
1808 &bytes_read, &smb_read_data,
1809 &buf_type);
1da177e4 1810 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1da177e4 1811 if (smb_read_data) {
93544cc6
SF
1812 if (copy_to_user(current_offset,
1813 smb_read_data +
1814 4 /* RFC1001 length field */ +
1815 le16_to_cpu(pSMBr->DataOffset),
ad7a2926 1816 bytes_read))
93544cc6 1817 rc = -EFAULT;
93544cc6 1818
fb8c4b14 1819 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 1820 cifs_small_buf_release(smb_read_data);
fb8c4b14 1821 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 1822 cifs_buf_release(smb_read_data);
1da177e4
LT
1823 smb_read_data = NULL;
1824 }
1825 }
1826 if (rc || (bytes_read == 0)) {
1827 if (total_read) {
1828 break;
1829 } else {
1830 FreeXid(xid);
1831 return rc;
1832 }
1833 } else {
a4544347 1834 cifs_stats_bytes_read(pTcon, bytes_read);
1da177e4
LT
1835 *poffset += bytes_read;
1836 }
1837 }
1838 FreeXid(xid);
1839 return total_read;
1840}
1841
1842
1843static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1844 loff_t *poffset)
1845{
1846 int rc = -EACCES;
1847 unsigned int bytes_read = 0;
1848 unsigned int total_read;
1849 unsigned int current_read_size;
1850 struct cifs_sb_info *cifs_sb;
1851 struct cifsTconInfo *pTcon;
1852 int xid;
1853 char *current_offset;
1854 struct cifsFileInfo *open_file;
ec637e3f 1855 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1856
1857 xid = GetXid();
e6a00296 1858 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1da177e4
LT
1859
1860 if (file->private_data == NULL) {
0f3bc09e 1861 rc = -EBADF;
1da177e4 1862 FreeXid(xid);
0f3bc09e 1863 return rc;
1da177e4 1864 }
c21dfb69 1865 open_file = file->private_data;
13cfb733 1866 pTcon = tlink_tcon(open_file->tlink);
1da177e4
LT
1867
1868 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
b6b38f70 1869 cFYI(1, "attempting read on write only file instance");
1da177e4 1870
fb8c4b14 1871 for (total_read = 0, current_offset = read_data;
1da177e4
LT
1872 read_size > total_read;
1873 total_read += bytes_read, current_offset += bytes_read) {
1874 current_read_size = min_t(const int, read_size - total_read,
1875 cifs_sb->rsize);
f9f5c817
SF
1876 /* For windows me and 9x we do not want to request more
1877 than it negotiated since it will refuse the read then */
fb8c4b14 1878 if ((pTcon->ses) &&
f9f5c817
SF
1879 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1880 current_read_size = min_t(const int, current_read_size,
1881 pTcon->ses->server->maxBuf - 128);
1882 }
1da177e4
LT
1883 rc = -EAGAIN;
1884 while (rc == -EAGAIN) {
fb8c4b14 1885 if ((open_file->invalidHandle) &&
1da177e4 1886 (!open_file->closePend)) {
4b18f2a9 1887 rc = cifs_reopen_file(file, true);
1da177e4
LT
1888 if (rc != 0)
1889 break;
1890 }
bfa0d75a 1891 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
1892 open_file->netfid,
1893 current_read_size, *poffset,
1894 &bytes_read, &current_offset,
1895 &buf_type);
1da177e4
LT
1896 }
1897 if (rc || (bytes_read == 0)) {
1898 if (total_read) {
1899 break;
1900 } else {
1901 FreeXid(xid);
1902 return rc;
1903 }
1904 } else {
a4544347 1905 cifs_stats_bytes_read(pTcon, total_read);
1da177e4
LT
1906 *poffset += bytes_read;
1907 }
1908 }
1909 FreeXid(xid);
1910 return total_read;
1911}
1912
1913int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1914{
1da177e4
LT
1915 int rc, xid;
1916
1917 xid = GetXid();
abab095d 1918 rc = cifs_revalidate_file(file);
1da177e4 1919 if (rc) {
b6b38f70 1920 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
1da177e4
LT
1921 FreeXid(xid);
1922 return rc;
1923 }
1924 rc = generic_file_mmap(file, vma);
1925 FreeXid(xid);
1926 return rc;
1927}
1928
1929
fb8c4b14 1930static void cifs_copy_cache_pages(struct address_space *mapping,
315e995c 1931 struct list_head *pages, int bytes_read, char *data)
1da177e4
LT
1932{
1933 struct page *page;
1934 char *target;
1935
1936 while (bytes_read > 0) {
1937 if (list_empty(pages))
1938 break;
1939
1940 page = list_entry(pages->prev, struct page, lru);
1941 list_del(&page->lru);
1942
315e995c 1943 if (add_to_page_cache_lru(page, mapping, page->index,
1da177e4
LT
1944 GFP_KERNEL)) {
1945 page_cache_release(page);
b6b38f70 1946 cFYI(1, "Add page cache failed");
3079ca62
SF
1947 data += PAGE_CACHE_SIZE;
1948 bytes_read -= PAGE_CACHE_SIZE;
1da177e4
LT
1949 continue;
1950 }
06b43672 1951 page_cache_release(page);
1da177e4 1952
fb8c4b14 1953 target = kmap_atomic(page, KM_USER0);
1da177e4
LT
1954
1955 if (PAGE_CACHE_SIZE > bytes_read) {
1956 memcpy(target, data, bytes_read);
1957 /* zero the tail end of this partial page */
fb8c4b14 1958 memset(target + bytes_read, 0,
1da177e4
LT
1959 PAGE_CACHE_SIZE - bytes_read);
1960 bytes_read = 0;
1961 } else {
1962 memcpy(target, data, PAGE_CACHE_SIZE);
1963 bytes_read -= PAGE_CACHE_SIZE;
1964 }
1965 kunmap_atomic(target, KM_USER0);
1966
1967 flush_dcache_page(page);
1968 SetPageUptodate(page);
1969 unlock_page(page);
1da177e4 1970 data += PAGE_CACHE_SIZE;
9dc06558
SJ
1971
1972 /* add page to FS-Cache */
1973 cifs_readpage_to_fscache(mapping->host, page);
1da177e4
LT
1974 }
1975 return;
1976}
1977
1978static int cifs_readpages(struct file *file, struct address_space *mapping,
1979 struct list_head *page_list, unsigned num_pages)
1980{
1981 int rc = -EACCES;
1982 int xid;
1983 loff_t offset;
1984 struct page *page;
1985 struct cifs_sb_info *cifs_sb;
1986 struct cifsTconInfo *pTcon;
2c2130e1 1987 unsigned int bytes_read = 0;
fb8c4b14 1988 unsigned int read_size, i;
1da177e4
LT
1989 char *smb_read_data = NULL;
1990 struct smb_com_read_rsp *pSMBr;
1da177e4 1991 struct cifsFileInfo *open_file;
ec637e3f 1992 int buf_type = CIFS_NO_BUFFER;
1da177e4
LT
1993
1994 xid = GetXid();
1995 if (file->private_data == NULL) {
0f3bc09e 1996 rc = -EBADF;
1da177e4 1997 FreeXid(xid);
0f3bc09e 1998 return rc;
1da177e4 1999 }
c21dfb69 2000 open_file = file->private_data;
e6a00296 2001 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
13cfb733 2002 pTcon = tlink_tcon(open_file->tlink);
bfa0d75a 2003
56698236
SJ
2004 /*
2005 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2006 * immediately if the cookie is negative
2007 */
2008 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2009 &num_pages);
2010 if (rc == 0)
2011 goto read_complete;
2012
f19159dc 2013 cFYI(DBG2, "rpages: num pages %d", num_pages);
1da177e4
LT
2014 for (i = 0; i < num_pages; ) {
2015 unsigned contig_pages;
2016 struct page *tmp_page;
2017 unsigned long expected_index;
2018
2019 if (list_empty(page_list))
2020 break;
2021
2022 page = list_entry(page_list->prev, struct page, lru);
2023 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2024
2025 /* count adjacent pages that we will read into */
2026 contig_pages = 0;
fb8c4b14 2027 expected_index =
1da177e4 2028 list_entry(page_list->prev, struct page, lru)->index;
fb8c4b14 2029 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1da177e4
LT
2030 if (tmp_page->index == expected_index) {
2031 contig_pages++;
2032 expected_index++;
2033 } else
fb8c4b14 2034 break;
1da177e4
LT
2035 }
2036 if (contig_pages + i > num_pages)
2037 contig_pages = num_pages - i;
2038
2039 /* for reads over a certain size could initiate async
2040 read ahead */
2041
2042 read_size = contig_pages * PAGE_CACHE_SIZE;
2043 /* Read size needs to be in multiples of one page */
2044 read_size = min_t(const unsigned int, read_size,
2045 cifs_sb->rsize & PAGE_CACHE_MASK);
b6b38f70
JP
2046 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d",
2047 read_size, contig_pages);
1da177e4
LT
2048 rc = -EAGAIN;
2049 while (rc == -EAGAIN) {
fb8c4b14 2050 if ((open_file->invalidHandle) &&
1da177e4 2051 (!open_file->closePend)) {
4b18f2a9 2052 rc = cifs_reopen_file(file, true);
1da177e4
LT
2053 if (rc != 0)
2054 break;
2055 }
2056
bfa0d75a 2057 rc = CIFSSMBRead(xid, pTcon,
ec637e3f
SF
2058 open_file->netfid,
2059 read_size, offset,
2060 &bytes_read, &smb_read_data,
2061 &buf_type);
a9d02ad4 2062 /* BB more RC checks ? */
fb8c4b14 2063 if (rc == -EAGAIN) {
1da177e4 2064 if (smb_read_data) {
fb8c4b14 2065 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2066 cifs_small_buf_release(smb_read_data);
fb8c4b14 2067 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2068 cifs_buf_release(smb_read_data);
1da177e4
LT
2069 smb_read_data = NULL;
2070 }
2071 }
2072 }
2073 if ((rc < 0) || (smb_read_data == NULL)) {
b6b38f70 2074 cFYI(1, "Read error in readpages: %d", rc);
1da177e4
LT
2075 break;
2076 } else if (bytes_read > 0) {
6f88cc2e 2077 task_io_account_read(bytes_read);
1da177e4
LT
2078 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2079 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2080 smb_read_data + 4 /* RFC1001 hdr */ +
315e995c 2081 le16_to_cpu(pSMBr->DataOffset));
1da177e4
LT
2082
2083 i += bytes_read >> PAGE_CACHE_SHIFT;
a4544347 2084 cifs_stats_bytes_read(pTcon, bytes_read);
2c2130e1 2085 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1da177e4
LT
2086 i++; /* account for partial page */
2087
fb8c4b14 2088 /* server copy of file can have smaller size
1da177e4 2089 than client */
fb8c4b14
SF
2090 /* BB do we need to verify this common case ?
2091 this case is ok - if we are at server EOF
1da177e4
LT
2092 we will hit it on next read */
2093
05ac9d4b 2094 /* break; */
1da177e4
LT
2095 }
2096 } else {
b6b38f70 2097 cFYI(1, "No bytes read (%d) at offset %lld . "
f19159dc 2098 "Cleaning remaining pages from readahead list",
b6b38f70 2099 bytes_read, offset);
fb8c4b14 2100 /* BB turn off caching and do new lookup on
1da177e4 2101 file size at server? */
1da177e4
LT
2102 break;
2103 }
2104 if (smb_read_data) {
fb8c4b14 2105 if (buf_type == CIFS_SMALL_BUFFER)
ec637e3f 2106 cifs_small_buf_release(smb_read_data);
fb8c4b14 2107 else if (buf_type == CIFS_LARGE_BUFFER)
ec637e3f 2108 cifs_buf_release(smb_read_data);
1da177e4
LT
2109 smb_read_data = NULL;
2110 }
2111 bytes_read = 0;
2112 }
2113
1da177e4
LT
2114/* need to free smb_read_data buf before exit */
2115 if (smb_read_data) {
fb8c4b14 2116 if (buf_type == CIFS_SMALL_BUFFER)
47c886b3 2117 cifs_small_buf_release(smb_read_data);
fb8c4b14 2118 else if (buf_type == CIFS_LARGE_BUFFER)
47c886b3 2119 cifs_buf_release(smb_read_data);
1da177e4 2120 smb_read_data = NULL;
fb8c4b14 2121 }
1da177e4 2122
56698236 2123read_complete:
1da177e4
LT
2124 FreeXid(xid);
2125 return rc;
2126}
2127
2128static int cifs_readpage_worker(struct file *file, struct page *page,
2129 loff_t *poffset)
2130{
2131 char *read_data;
2132 int rc;
2133
56698236
SJ
2134 /* Is the page cached? */
2135 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2136 if (rc == 0)
2137 goto read_complete;
2138
1da177e4
LT
2139 page_cache_get(page);
2140 read_data = kmap(page);
2141 /* for reads over a certain size could initiate async read ahead */
fb8c4b14 2142
1da177e4 2143 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
fb8c4b14 2144
1da177e4
LT
2145 if (rc < 0)
2146 goto io_error;
2147 else
b6b38f70 2148 cFYI(1, "Bytes read %d", rc);
fb8c4b14 2149
e6a00296
JJS
2150 file->f_path.dentry->d_inode->i_atime =
2151 current_fs_time(file->f_path.dentry->d_inode->i_sb);
fb8c4b14 2152
1da177e4
LT
2153 if (PAGE_CACHE_SIZE > rc)
2154 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2155
2156 flush_dcache_page(page);
2157 SetPageUptodate(page);
9dc06558
SJ
2158
2159 /* send this page to the cache */
2160 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2161
1da177e4 2162 rc = 0;
fb8c4b14 2163
1da177e4 2164io_error:
fb8c4b14 2165 kunmap(page);
1da177e4 2166 page_cache_release(page);
56698236
SJ
2167
2168read_complete:
1da177e4
LT
2169 return rc;
2170}
2171
2172static int cifs_readpage(struct file *file, struct page *page)
2173{
2174 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2175 int rc = -EACCES;
2176 int xid;
2177
2178 xid = GetXid();
2179
2180 if (file->private_data == NULL) {
0f3bc09e 2181 rc = -EBADF;
1da177e4 2182 FreeXid(xid);
0f3bc09e 2183 return rc;
1da177e4
LT
2184 }
2185
b6b38f70
JP
2186 cFYI(1, "readpage %p at offset %d 0x%x\n",
2187 page, (int)offset, (int)offset);
1da177e4
LT
2188
2189 rc = cifs_readpage_worker(file, page, &offset);
2190
2191 unlock_page(page);
2192
2193 FreeXid(xid);
2194 return rc;
2195}
2196
a403a0a3
SF
2197static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2198{
2199 struct cifsFileInfo *open_file;
2200
2201 read_lock(&GlobalSMBSeslock);
2202 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2203 if (open_file->closePend)
2204 continue;
2205 if (open_file->pfile &&
2206 ((open_file->pfile->f_flags & O_RDWR) ||
2207 (open_file->pfile->f_flags & O_WRONLY))) {
2208 read_unlock(&GlobalSMBSeslock);
2209 return 1;
2210 }
2211 }
2212 read_unlock(&GlobalSMBSeslock);
2213 return 0;
2214}
2215
1da177e4
LT
2216/* We do not want to update the file size from server for inodes
2217 open for write - to avoid races with writepage extending
2218 the file - in the future we could consider allowing
fb8c4b14 2219 refreshing the inode only on increases in the file size
1da177e4
LT
2220 but this is tricky to do without racing with writebehind
2221 page caching in the current Linux kernel design */
4b18f2a9 2222bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1da177e4 2223{
a403a0a3 2224 if (!cifsInode)
4b18f2a9 2225 return true;
50c2f753 2226
a403a0a3
SF
2227 if (is_inode_writable(cifsInode)) {
2228 /* This inode is open for write at least once */
c32a0b68
SF
2229 struct cifs_sb_info *cifs_sb;
2230
c32a0b68 2231 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
ad7a2926 2232 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 2233 /* since no page cache to corrupt on directio
c32a0b68 2234 we can change size safely */
4b18f2a9 2235 return true;
c32a0b68
SF
2236 }
2237
fb8c4b14 2238 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4b18f2a9 2239 return true;
7ba52631 2240
4b18f2a9 2241 return false;
23e7dd7d 2242 } else
4b18f2a9 2243 return true;
1da177e4
LT
2244}
2245
d9414774
NP
2246static int cifs_write_begin(struct file *file, struct address_space *mapping,
2247 loff_t pos, unsigned len, unsigned flags,
2248 struct page **pagep, void **fsdata)
1da177e4 2249{
d9414774
NP
2250 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2251 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
a98ee8c1
JL
2252 loff_t page_start = pos & PAGE_MASK;
2253 loff_t i_size;
2254 struct page *page;
2255 int rc = 0;
d9414774 2256
b6b38f70 2257 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
d9414774 2258
54566b2c 2259 page = grab_cache_page_write_begin(mapping, index, flags);
a98ee8c1
JL
2260 if (!page) {
2261 rc = -ENOMEM;
2262 goto out;
2263 }
8a236264 2264
a98ee8c1
JL
2265 if (PageUptodate(page))
2266 goto out;
8a236264 2267
a98ee8c1
JL
2268 /*
2269 * If we write a full page it will be up to date, no need to read from
2270 * the server. If the write is short, we'll end up doing a sync write
2271 * instead.
2272 */
2273 if (len == PAGE_CACHE_SIZE)
2274 goto out;
8a236264 2275
a98ee8c1
JL
2276 /*
2277 * optimize away the read when we have an oplock, and we're not
2278 * expecting to use any of the data we'd be reading in. That
2279 * is, when the page lies beyond the EOF, or straddles the EOF
2280 * and the write will cover all of the existing data.
2281 */
2282 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2283 i_size = i_size_read(mapping->host);
2284 if (page_start >= i_size ||
2285 (offset == 0 && (pos + len) >= i_size)) {
2286 zero_user_segments(page, 0, offset,
2287 offset + len,
2288 PAGE_CACHE_SIZE);
2289 /*
2290 * PageChecked means that the parts of the page
2291 * to which we're not writing are considered up
2292 * to date. Once the data is copied to the
2293 * page, it can be set uptodate.
2294 */
2295 SetPageChecked(page);
2296 goto out;
2297 }
2298 }
d9414774 2299
a98ee8c1
JL
2300 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2301 /*
2302 * might as well read a page, it is fast enough. If we get
2303 * an error, we don't need to return it. cifs_write_end will
2304 * do a sync write instead since PG_uptodate isn't set.
2305 */
2306 cifs_readpage_worker(file, page, &page_start);
8a236264
SF
2307 } else {
2308 /* we could try using another file handle if there is one -
2309 but how would we lock it to prevent close of that handle
2310 racing with this read? In any case
d9414774 2311 this will be written out by write_end so is fine */
1da177e4 2312 }
a98ee8c1
JL
2313out:
2314 *pagep = page;
2315 return rc;
1da177e4
LT
2316}
2317
85f2d6b4
SJ
2318static int cifs_release_page(struct page *page, gfp_t gfp)
2319{
2320 if (PagePrivate(page))
2321 return 0;
2322
2323 return cifs_fscache_release_page(page, gfp);
2324}
2325
2326static void cifs_invalidate_page(struct page *page, unsigned long offset)
2327{
2328 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2329
2330 if (offset == 0)
2331 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2332}
2333
9b646972 2334void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
2335{
2336 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2337 oplock_break);
2338 struct inode *inode = cfile->pInode;
2339 struct cifsInodeInfo *cinode = CIFS_I(inode);
3bc303c2
JL
2340 int rc, waitrc = 0;
2341
2342 if (inode && S_ISREG(inode->i_mode)) {
d54ff732 2343 if (cinode->clientCanCacheRead)
8737c930 2344 break_lease(inode, O_RDONLY);
d54ff732 2345 else
8737c930 2346 break_lease(inode, O_WRONLY);
3bc303c2
JL
2347 rc = filemap_fdatawrite(inode->i_mapping);
2348 if (cinode->clientCanCacheRead == 0) {
2349 waitrc = filemap_fdatawait(inode->i_mapping);
2350 invalidate_remote_inode(inode);
2351 }
2352 if (!rc)
2353 rc = waitrc;
2354 if (rc)
2355 cinode->write_behind_rc = rc;
b6b38f70 2356 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3bc303c2
JL
2357 }
2358
2359 /*
2360 * releasing stale oplock after recent reconnect of smb session using
2361 * a now incorrect file handle is not a data integrity issue but do
2362 * not bother sending an oplock release if session to server still is
2363 * disconnected since oplock already released by the server
2364 */
2365 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
13cfb733
JL
2366 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
2367 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
b6b38f70 2368 cFYI(1, "Oplock release rc = %d", rc);
3bc303c2 2369 }
9b646972
TH
2370
2371 /*
2372 * We might have kicked in before is_valid_oplock_break()
2373 * finished grabbing reference for us. Make sure it's done by
2374 * waiting for GlobalSMSSeslock.
2375 */
2376 write_lock(&GlobalSMBSeslock);
2377 write_unlock(&GlobalSMBSeslock);
2378
2379 cifs_oplock_break_put(cfile);
3bc303c2
JL
2380}
2381
9b646972 2382void cifs_oplock_break_get(struct cifsFileInfo *cfile)
3bc303c2 2383{
3bc303c2
JL
2384 mntget(cfile->mnt);
2385 cifsFileInfo_get(cfile);
3bc303c2
JL
2386}
2387
9b646972 2388void cifs_oplock_break_put(struct cifsFileInfo *cfile)
3bc303c2 2389{
3bc303c2
JL
2390 mntput(cfile->mnt);
2391 cifsFileInfo_put(cfile);
2392}
2393
f5e54d6e 2394const struct address_space_operations cifs_addr_ops = {
1da177e4
LT
2395 .readpage = cifs_readpage,
2396 .readpages = cifs_readpages,
2397 .writepage = cifs_writepage,
37c0eb46 2398 .writepages = cifs_writepages,
d9414774
NP
2399 .write_begin = cifs_write_begin,
2400 .write_end = cifs_write_end,
1da177e4 2401 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2402 .releasepage = cifs_release_page,
2403 .invalidatepage = cifs_invalidate_page,
1da177e4
LT
2404 /* .sync_page = cifs_sync_page, */
2405 /* .direct_IO = */
2406};
273d81d6
DK
2407
2408/*
2409 * cifs_readpages requires the server to support a buffer large enough to
2410 * contain the header plus one complete page of data. Otherwise, we need
2411 * to leave cifs_readpages out of the address space operations.
2412 */
f5e54d6e 2413const struct address_space_operations cifs_addr_ops_smallbuf = {
273d81d6
DK
2414 .readpage = cifs_readpage,
2415 .writepage = cifs_writepage,
2416 .writepages = cifs_writepages,
d9414774
NP
2417 .write_begin = cifs_write_begin,
2418 .write_end = cifs_write_end,
273d81d6 2419 .set_page_dirty = __set_page_dirty_nobuffers,
85f2d6b4
SJ
2420 .releasepage = cifs_release_page,
2421 .invalidatepage = cifs_invalidate_page,
273d81d6
DK
2422 /* .sync_page = cifs_sync_page, */
2423 /* .direct_IO = */
2424};