Merge commit 'v2.6.27-rc3' into x86/urgent
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include "cifsfs.h"
39 #include "cifspdu.h"
40 #define DECLARE_GLOBALS_HERE
41 #include "cifsglob.h"
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
45 #include <linux/mm.h>
46 #include <linux/key-type.h>
47 #include "dns_resolve.h"
48 #include "cifs_spnego.h"
49 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
50
51 #ifdef CONFIG_CIFS_QUOTA
52 static struct quotactl_ops cifs_quotactl_ops;
53 #endif /* QUOTA */
54
55 int cifsFYI = 0;
56 int cifsERROR = 1;
57 int traceSMB = 0;
58 unsigned int oplockEnabled = 1;
59 unsigned int experimEnabled = 0;
60 unsigned int linuxExtEnabled = 1;
61 unsigned int lookupCacheEnabled = 1;
62 unsigned int multiuser_mount = 0;
63 unsigned int extended_security = CIFSSEC_DEF;
64 /* unsigned int ntlmv2_support = 0; */
65 unsigned int sign_CIFS_PDUs = 1;
66 extern struct task_struct *oplockThread; /* remove sparse warning */
67 struct task_struct *oplockThread = NULL;
68 /* extern struct task_struct * dnotifyThread; remove sparse warning */
69 static struct task_struct *dnotifyThread = NULL;
70 static const struct super_operations cifs_super_ops;
71 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72 module_param(CIFSMaxBufSize, int, 0);
73 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
74 "Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
78 "1 to 64");
79 unsigned int cifs_min_small = 30;
80 module_param(cifs_min_small, int, 0);
81 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
82 "Range: 2 to 256");
83 unsigned int cifs_max_pending = CIFS_MAX_REQ;
84 module_param(cifs_max_pending, int, 0);
85 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
86 "Default: 50 Range: 2 to 256");
87
88 extern mempool_t *cifs_sm_req_poolp;
89 extern mempool_t *cifs_req_poolp;
90 extern mempool_t *cifs_mid_poolp;
91
92 extern struct kmem_cache *cifs_oplock_cachep;
93
94 static int
95 cifs_read_super(struct super_block *sb, void *data,
96 const char *devname, int silent)
97 {
98 struct inode *inode;
99 struct cifs_sb_info *cifs_sb;
100 int rc = 0;
101
102 /* BB should we make this contingent on mount parm? */
103 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
104 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
105 cifs_sb = CIFS_SB(sb);
106 if (cifs_sb == NULL)
107 return -ENOMEM;
108
109 #ifdef CONFIG_CIFS_DFS_UPCALL
110 /* copy mount params to sb for use in submounts */
111 /* BB: should we move this after the mount so we
112 * do not have to do the copy on failed mounts?
113 * BB: May be it is better to do simple copy before
114 * complex operation (mount), and in case of fail
115 * just exit instead of doing mount and attempting
116 * undo it if this copy fails?*/
117 if (data) {
118 int len = strlen(data);
119 cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
120 if (cifs_sb->mountdata == NULL) {
121 kfree(sb->s_fs_info);
122 sb->s_fs_info = NULL;
123 return -ENOMEM;
124 }
125 strncpy(cifs_sb->mountdata, data, len + 1);
126 cifs_sb->mountdata[len] = '\0';
127 }
128 #endif
129
130 rc = cifs_mount(sb, cifs_sb, data, devname);
131
132 if (rc) {
133 if (!silent)
134 cERROR(1,
135 ("cifs_mount failed w/return code = %d", rc));
136 goto out_mount_failed;
137 }
138
139 sb->s_magic = CIFS_MAGIC_NUMBER;
140 sb->s_op = &cifs_super_ops;
141 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
142 sb->s_blocksize =
143 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
144 #ifdef CONFIG_CIFS_QUOTA
145 sb->s_qcop = &cifs_quotactl_ops;
146 #endif
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_iget(sb, ROOT_I);
150
151 if (IS_ERR(inode)) {
152 rc = PTR_ERR(inode);
153 inode = NULL;
154 goto out_no_root;
155 }
156
157 sb->s_root = d_alloc_root(inode);
158
159 if (!sb->s_root) {
160 rc = -ENOMEM;
161 goto out_no_root;
162 }
163
164 #ifdef CONFIG_CIFS_EXPERIMENTAL
165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
166 cFYI(1, ("export ops supported"));
167 sb->s_export_op = &cifs_export_ops;
168 }
169 #endif /* EXPERIMENTAL */
170
171 return 0;
172
173 out_no_root:
174 cERROR(1, ("cifs_read_super: get root inode failed"));
175 if (inode)
176 iput(inode);
177
178 out_mount_failed:
179 if (cifs_sb) {
180 #ifdef CONFIG_CIFS_DFS_UPCALL
181 if (cifs_sb->mountdata) {
182 kfree(cifs_sb->mountdata);
183 cifs_sb->mountdata = NULL;
184 }
185 #endif
186 if (cifs_sb->local_nls)
187 unload_nls(cifs_sb->local_nls);
188 kfree(cifs_sb);
189 }
190 return rc;
191 }
192
193 static void
194 cifs_put_super(struct super_block *sb)
195 {
196 int rc = 0;
197 struct cifs_sb_info *cifs_sb;
198
199 cFYI(1, ("In cifs_put_super"));
200 cifs_sb = CIFS_SB(sb);
201 if (cifs_sb == NULL) {
202 cFYI(1, ("Empty cifs superblock info passed to unmount"));
203 return;
204 }
205 rc = cifs_umount(sb, cifs_sb);
206 if (rc)
207 cERROR(1, ("cifs_umount failed with return code %d", rc));
208 #ifdef CONFIG_CIFS_DFS_UPCALL
209 if (cifs_sb->mountdata) {
210 kfree(cifs_sb->mountdata);
211 cifs_sb->mountdata = NULL;
212 }
213 #endif
214
215 unload_nls(cifs_sb->local_nls);
216 kfree(cifs_sb);
217 return;
218 }
219
220 static int
221 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
222 {
223 struct super_block *sb = dentry->d_sb;
224 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
225 struct cifsTconInfo *tcon = cifs_sb->tcon;
226 int rc = -EOPNOTSUPP;
227 int xid;
228
229 xid = GetXid();
230
231 buf->f_type = CIFS_MAGIC_NUMBER;
232
233 /*
234 * PATH_MAX may be too long - it would presumably be total path,
235 * but note that some servers (includinng Samba 3) have a shorter
236 * maximum path.
237 *
238 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
239 */
240 buf->f_namelen = PATH_MAX;
241 buf->f_files = 0; /* undefined */
242 buf->f_ffree = 0; /* unlimited */
243
244 /*
245 * We could add a second check for a QFS Unix capability bit
246 */
247 if ((tcon->ses->capabilities & CAP_UNIX) &&
248 (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
249 rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
250
251 /*
252 * Only need to call the old QFSInfo if failed on newer one,
253 * e.g. by OS/2.
254 **/
255 if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
256 rc = CIFSSMBQFSInfo(xid, tcon, buf);
257
258 /*
259 * Some old Windows servers also do not support level 103, retry with
260 * older level one if old server failed the previous call or we
261 * bypassed it because we detected that this was an older LANMAN sess
262 */
263 if (rc)
264 rc = SMBOldQFSInfo(xid, tcon, buf);
265
266 FreeXid(xid);
267 return 0;
268 }
269
270 static int cifs_permission(struct inode *inode, int mask)
271 {
272 struct cifs_sb_info *cifs_sb;
273
274 cifs_sb = CIFS_SB(inode->i_sb);
275
276 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
277 return 0;
278 else /* file mode might have been restricted at mount time
279 on the client (above and beyond ACL on servers) for
280 servers which do not support setting and viewing mode bits,
281 so allowing client to check permissions is useful */
282 return generic_permission(inode, mask, NULL);
283 }
284
285 static struct kmem_cache *cifs_inode_cachep;
286 static struct kmem_cache *cifs_req_cachep;
287 static struct kmem_cache *cifs_mid_cachep;
288 struct kmem_cache *cifs_oplock_cachep;
289 static struct kmem_cache *cifs_sm_req_cachep;
290 mempool_t *cifs_sm_req_poolp;
291 mempool_t *cifs_req_poolp;
292 mempool_t *cifs_mid_poolp;
293
294 static struct inode *
295 cifs_alloc_inode(struct super_block *sb)
296 {
297 struct cifsInodeInfo *cifs_inode;
298 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
299 if (!cifs_inode)
300 return NULL;
301 cifs_inode->cifsAttrs = 0x20; /* default */
302 atomic_set(&cifs_inode->inUse, 0);
303 cifs_inode->time = 0;
304 cifs_inode->write_behind_rc = 0;
305 /* Until the file is open and we have gotten oplock
306 info back from the server, can not assume caching of
307 file data or metadata */
308 cifs_inode->clientCanCacheRead = false;
309 cifs_inode->clientCanCacheAll = false;
310 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
311
312 /* Can not set i_flags here - they get immediately overwritten
313 to zero by the VFS */
314 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
315 INIT_LIST_HEAD(&cifs_inode->openFileList);
316 return &cifs_inode->vfs_inode;
317 }
318
319 static void
320 cifs_destroy_inode(struct inode *inode)
321 {
322 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
323 }
324
325 /*
326 * cifs_show_options() is for displaying mount options in /proc/mounts.
327 * Not all settable options are displayed but most of the important
328 * ones are.
329 */
330 static int
331 cifs_show_options(struct seq_file *s, struct vfsmount *m)
332 {
333 struct cifs_sb_info *cifs_sb;
334
335 cifs_sb = CIFS_SB(m->mnt_sb);
336
337 if (cifs_sb) {
338 if (cifs_sb->tcon) {
339 /* BB add prepath to mount options displayed */
340 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
341 if (cifs_sb->tcon->ses) {
342 if (cifs_sb->tcon->ses->userName)
343 seq_printf(s, ",username=%s",
344 cifs_sb->tcon->ses->userName);
345 if (cifs_sb->tcon->ses->domainName)
346 seq_printf(s, ",domain=%s",
347 cifs_sb->tcon->ses->domainName);
348 }
349 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
350 !(cifs_sb->tcon->unix_ext))
351 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
352 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
353 !(cifs_sb->tcon->unix_ext))
354 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
355 if (!cifs_sb->tcon->unix_ext) {
356 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
357 cifs_sb->mnt_file_mode,
358 cifs_sb->mnt_dir_mode);
359 }
360 if (cifs_sb->tcon->seal)
361 seq_printf(s, ",seal");
362 if (cifs_sb->tcon->nocase)
363 seq_printf(s, ",nocase");
364 if (cifs_sb->tcon->retry)
365 seq_printf(s, ",hard");
366 }
367 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
368 seq_printf(s, ",posixpaths");
369 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
370 seq_printf(s, ",setuids");
371 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
372 seq_printf(s, ",serverino");
373 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
374 seq_printf(s, ",directio");
375 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
376 seq_printf(s, ",nouser_xattr");
377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
378 seq_printf(s, ",mapchars");
379 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
380 seq_printf(s, ",sfu");
381 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
382 seq_printf(s, ",nobrl");
383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
384 seq_printf(s, ",cifsacl");
385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
386 seq_printf(s, ",dynperm");
387 if (m->mnt_sb->s_flags & MS_POSIXACL)
388 seq_printf(s, ",acl");
389
390 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
391 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
392 }
393 return 0;
394 }
395
396 #ifdef CONFIG_CIFS_QUOTA
397 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
398 struct fs_disk_quota *pdquota)
399 {
400 int xid;
401 int rc = 0;
402 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
403 struct cifsTconInfo *pTcon;
404
405 if (cifs_sb)
406 pTcon = cifs_sb->tcon;
407 else
408 return -EIO;
409
410
411 xid = GetXid();
412 if (pTcon) {
413 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
414 } else {
415 rc = -EIO;
416 }
417
418 FreeXid(xid);
419 return rc;
420 }
421
422 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
423 struct fs_disk_quota *pdquota)
424 {
425 int xid;
426 int rc = 0;
427 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
428 struct cifsTconInfo *pTcon;
429
430 if (cifs_sb)
431 pTcon = cifs_sb->tcon;
432 else
433 return -EIO;
434
435 xid = GetXid();
436 if (pTcon) {
437 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
438 } else {
439 rc = -EIO;
440 }
441
442 FreeXid(xid);
443 return rc;
444 }
445
446 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
447 {
448 int xid;
449 int rc = 0;
450 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
451 struct cifsTconInfo *pTcon;
452
453 if (cifs_sb)
454 pTcon = cifs_sb->tcon;
455 else
456 return -EIO;
457
458 xid = GetXid();
459 if (pTcon) {
460 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
461 } else {
462 rc = -EIO;
463 }
464
465 FreeXid(xid);
466 return rc;
467 }
468
469 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
470 {
471 int xid;
472 int rc = 0;
473 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
474 struct cifsTconInfo *pTcon;
475
476 if (cifs_sb) {
477 pTcon = cifs_sb->tcon;
478 } else {
479 return -EIO;
480 }
481 xid = GetXid();
482 if (pTcon) {
483 cFYI(1, ("pqstats %p", qstats));
484 } else {
485 rc = -EIO;
486 }
487
488 FreeXid(xid);
489 return rc;
490 }
491
492 static struct quotactl_ops cifs_quotactl_ops = {
493 .set_xquota = cifs_xquota_set,
494 .get_xquota = cifs_xquota_get,
495 .set_xstate = cifs_xstate_set,
496 .get_xstate = cifs_xstate_get,
497 };
498 #endif
499
500 static void cifs_umount_begin(struct super_block *sb)
501 {
502 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
503 struct cifsTconInfo *tcon;
504
505 if (cifs_sb == NULL)
506 return;
507
508 tcon = cifs_sb->tcon;
509 if (tcon == NULL)
510 return;
511 down(&tcon->tconSem);
512 if (atomic_read(&tcon->useCount) == 1)
513 tcon->tidStatus = CifsExiting;
514 up(&tcon->tconSem);
515
516 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
517 /* cancel_notify_requests(tcon); */
518 if (tcon->ses && tcon->ses->server) {
519 cFYI(1, ("wake up tasks now - umount begin not complete"));
520 wake_up_all(&tcon->ses->server->request_q);
521 wake_up_all(&tcon->ses->server->response_q);
522 msleep(1); /* yield */
523 /* we have to kick the requests once more */
524 wake_up_all(&tcon->ses->server->response_q);
525 msleep(1);
526 }
527 /* BB FIXME - finish add checks for tidStatus BB */
528
529 return;
530 }
531
532 #ifdef CONFIG_CIFS_STATS2
533 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
534 {
535 /* BB FIXME */
536 return 0;
537 }
538 #endif
539
540 static int cifs_remount(struct super_block *sb, int *flags, char *data)
541 {
542 *flags |= MS_NODIRATIME;
543 return 0;
544 }
545
546 static const struct super_operations cifs_super_ops = {
547 .put_super = cifs_put_super,
548 .statfs = cifs_statfs,
549 .alloc_inode = cifs_alloc_inode,
550 .destroy_inode = cifs_destroy_inode,
551 /* .drop_inode = generic_delete_inode,
552 .delete_inode = cifs_delete_inode, */ /* Do not need above two
553 functions unless later we add lazy close of inodes or unless the
554 kernel forgets to call us with the same number of releases (closes)
555 as opens */
556 .show_options = cifs_show_options,
557 .umount_begin = cifs_umount_begin,
558 .remount_fs = cifs_remount,
559 #ifdef CONFIG_CIFS_STATS2
560 .show_stats = cifs_show_stats,
561 #endif
562 };
563
564 static int
565 cifs_get_sb(struct file_system_type *fs_type,
566 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
567 {
568 int rc;
569 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
570
571 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
572
573 if (IS_ERR(sb))
574 return PTR_ERR(sb);
575
576 sb->s_flags = flags;
577
578 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
579 if (rc) {
580 up_write(&sb->s_umount);
581 deactivate_super(sb);
582 return rc;
583 }
584 sb->s_flags |= MS_ACTIVE;
585 return simple_set_mnt(mnt, sb);
586 }
587
588 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
589 unsigned long nr_segs, loff_t pos)
590 {
591 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
592 ssize_t written;
593
594 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
595 if (!CIFS_I(inode)->clientCanCacheAll)
596 filemap_fdatawrite(inode->i_mapping);
597 return written;
598 }
599
600 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
601 {
602 /* origin == SEEK_END => we must revalidate the cached file length */
603 if (origin == SEEK_END) {
604 int retval;
605
606 /* some applications poll for the file length in this strange
607 way so we must seek to end on non-oplocked files by
608 setting the revalidate time to zero */
609 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
610
611 retval = cifs_revalidate(file->f_path.dentry);
612 if (retval < 0)
613 return (loff_t)retval;
614 }
615 return generic_file_llseek_unlocked(file, offset, origin);
616 }
617
618 struct file_system_type cifs_fs_type = {
619 .owner = THIS_MODULE,
620 .name = "cifs",
621 .get_sb = cifs_get_sb,
622 .kill_sb = kill_anon_super,
623 /* .fs_flags */
624 };
625 const struct inode_operations cifs_dir_inode_ops = {
626 .create = cifs_create,
627 .lookup = cifs_lookup,
628 .getattr = cifs_getattr,
629 .unlink = cifs_unlink,
630 .link = cifs_hardlink,
631 .mkdir = cifs_mkdir,
632 .rmdir = cifs_rmdir,
633 .rename = cifs_rename,
634 .permission = cifs_permission,
635 /* revalidate:cifs_revalidate, */
636 .setattr = cifs_setattr,
637 .symlink = cifs_symlink,
638 .mknod = cifs_mknod,
639 #ifdef CONFIG_CIFS_XATTR
640 .setxattr = cifs_setxattr,
641 .getxattr = cifs_getxattr,
642 .listxattr = cifs_listxattr,
643 .removexattr = cifs_removexattr,
644 #endif
645 };
646
647 const struct inode_operations cifs_file_inode_ops = {
648 /* revalidate:cifs_revalidate, */
649 .setattr = cifs_setattr,
650 .getattr = cifs_getattr, /* do we need this anymore? */
651 .rename = cifs_rename,
652 .permission = cifs_permission,
653 #ifdef CONFIG_CIFS_XATTR
654 .setxattr = cifs_setxattr,
655 .getxattr = cifs_getxattr,
656 .listxattr = cifs_listxattr,
657 .removexattr = cifs_removexattr,
658 #endif
659 };
660
661 const struct inode_operations cifs_symlink_inode_ops = {
662 .readlink = generic_readlink,
663 .follow_link = cifs_follow_link,
664 .put_link = cifs_put_link,
665 .permission = cifs_permission,
666 /* BB add the following two eventually */
667 /* revalidate: cifs_revalidate,
668 setattr: cifs_notify_change, *//* BB do we need notify change */
669 #ifdef CONFIG_CIFS_XATTR
670 .setxattr = cifs_setxattr,
671 .getxattr = cifs_getxattr,
672 .listxattr = cifs_listxattr,
673 .removexattr = cifs_removexattr,
674 #endif
675 };
676
677 const struct file_operations cifs_file_ops = {
678 .read = do_sync_read,
679 .write = do_sync_write,
680 .aio_read = generic_file_aio_read,
681 .aio_write = cifs_file_aio_write,
682 .open = cifs_open,
683 .release = cifs_close,
684 .lock = cifs_lock,
685 .fsync = cifs_fsync,
686 .flush = cifs_flush,
687 .mmap = cifs_file_mmap,
688 .splice_read = generic_file_splice_read,
689 .llseek = cifs_llseek,
690 #ifdef CONFIG_CIFS_POSIX
691 .unlocked_ioctl = cifs_ioctl,
692 #endif /* CONFIG_CIFS_POSIX */
693
694 #ifdef CONFIG_CIFS_EXPERIMENTAL
695 .dir_notify = cifs_dir_notify,
696 #endif /* CONFIG_CIFS_EXPERIMENTAL */
697 };
698
699 const struct file_operations cifs_file_direct_ops = {
700 /* no mmap, no aio, no readv -
701 BB reevaluate whether they can be done with directio, no cache */
702 .read = cifs_user_read,
703 .write = cifs_user_write,
704 .open = cifs_open,
705 .release = cifs_close,
706 .lock = cifs_lock,
707 .fsync = cifs_fsync,
708 .flush = cifs_flush,
709 .splice_read = generic_file_splice_read,
710 #ifdef CONFIG_CIFS_POSIX
711 .unlocked_ioctl = cifs_ioctl,
712 #endif /* CONFIG_CIFS_POSIX */
713 .llseek = cifs_llseek,
714 #ifdef CONFIG_CIFS_EXPERIMENTAL
715 .dir_notify = cifs_dir_notify,
716 #endif /* CONFIG_CIFS_EXPERIMENTAL */
717 };
718 const struct file_operations cifs_file_nobrl_ops = {
719 .read = do_sync_read,
720 .write = do_sync_write,
721 .aio_read = generic_file_aio_read,
722 .aio_write = cifs_file_aio_write,
723 .open = cifs_open,
724 .release = cifs_close,
725 .fsync = cifs_fsync,
726 .flush = cifs_flush,
727 .mmap = cifs_file_mmap,
728 .splice_read = generic_file_splice_read,
729 .llseek = cifs_llseek,
730 #ifdef CONFIG_CIFS_POSIX
731 .unlocked_ioctl = cifs_ioctl,
732 #endif /* CONFIG_CIFS_POSIX */
733
734 #ifdef CONFIG_CIFS_EXPERIMENTAL
735 .dir_notify = cifs_dir_notify,
736 #endif /* CONFIG_CIFS_EXPERIMENTAL */
737 };
738
739 const struct file_operations cifs_file_direct_nobrl_ops = {
740 /* no mmap, no aio, no readv -
741 BB reevaluate whether they can be done with directio, no cache */
742 .read = cifs_user_read,
743 .write = cifs_user_write,
744 .open = cifs_open,
745 .release = cifs_close,
746 .fsync = cifs_fsync,
747 .flush = cifs_flush,
748 .splice_read = generic_file_splice_read,
749 #ifdef CONFIG_CIFS_POSIX
750 .unlocked_ioctl = cifs_ioctl,
751 #endif /* CONFIG_CIFS_POSIX */
752 .llseek = cifs_llseek,
753 #ifdef CONFIG_CIFS_EXPERIMENTAL
754 .dir_notify = cifs_dir_notify,
755 #endif /* CONFIG_CIFS_EXPERIMENTAL */
756 };
757
758 const struct file_operations cifs_dir_ops = {
759 .readdir = cifs_readdir,
760 .release = cifs_closedir,
761 .read = generic_read_dir,
762 #ifdef CONFIG_CIFS_EXPERIMENTAL
763 .dir_notify = cifs_dir_notify,
764 #endif /* CONFIG_CIFS_EXPERIMENTAL */
765 .unlocked_ioctl = cifs_ioctl,
766 };
767
768 static void
769 cifs_init_once(void *inode)
770 {
771 struct cifsInodeInfo *cifsi = inode;
772
773 inode_init_once(&cifsi->vfs_inode);
774 INIT_LIST_HEAD(&cifsi->lockList);
775 }
776
777 static int
778 cifs_init_inodecache(void)
779 {
780 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
781 sizeof(struct cifsInodeInfo),
782 0, (SLAB_RECLAIM_ACCOUNT|
783 SLAB_MEM_SPREAD),
784 cifs_init_once);
785 if (cifs_inode_cachep == NULL)
786 return -ENOMEM;
787
788 return 0;
789 }
790
791 static void
792 cifs_destroy_inodecache(void)
793 {
794 kmem_cache_destroy(cifs_inode_cachep);
795 }
796
797 static int
798 cifs_init_request_bufs(void)
799 {
800 if (CIFSMaxBufSize < 8192) {
801 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
802 Unicode path name has to fit in any SMB/CIFS path based frames */
803 CIFSMaxBufSize = 8192;
804 } else if (CIFSMaxBufSize > 1024*127) {
805 CIFSMaxBufSize = 1024 * 127;
806 } else {
807 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
808 }
809 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
810 cifs_req_cachep = kmem_cache_create("cifs_request",
811 CIFSMaxBufSize +
812 MAX_CIFS_HDR_SIZE, 0,
813 SLAB_HWCACHE_ALIGN, NULL);
814 if (cifs_req_cachep == NULL)
815 return -ENOMEM;
816
817 if (cifs_min_rcv < 1)
818 cifs_min_rcv = 1;
819 else if (cifs_min_rcv > 64) {
820 cifs_min_rcv = 64;
821 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
822 }
823
824 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
825 cifs_req_cachep);
826
827 if (cifs_req_poolp == NULL) {
828 kmem_cache_destroy(cifs_req_cachep);
829 return -ENOMEM;
830 }
831 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
832 almost all handle based requests (but not write response, nor is it
833 sufficient for path based requests). A smaller size would have
834 been more efficient (compacting multiple slab items on one 4k page)
835 for the case in which debug was on, but this larger size allows
836 more SMBs to use small buffer alloc and is still much more
837 efficient to alloc 1 per page off the slab compared to 17K (5page)
838 alloc of large cifs buffers even when page debugging is on */
839 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
840 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
841 NULL);
842 if (cifs_sm_req_cachep == NULL) {
843 mempool_destroy(cifs_req_poolp);
844 kmem_cache_destroy(cifs_req_cachep);
845 return -ENOMEM;
846 }
847
848 if (cifs_min_small < 2)
849 cifs_min_small = 2;
850 else if (cifs_min_small > 256) {
851 cifs_min_small = 256;
852 cFYI(1, ("cifs_min_small set to maximum (256)"));
853 }
854
855 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
856 cifs_sm_req_cachep);
857
858 if (cifs_sm_req_poolp == NULL) {
859 mempool_destroy(cifs_req_poolp);
860 kmem_cache_destroy(cifs_req_cachep);
861 kmem_cache_destroy(cifs_sm_req_cachep);
862 return -ENOMEM;
863 }
864
865 return 0;
866 }
867
868 static void
869 cifs_destroy_request_bufs(void)
870 {
871 mempool_destroy(cifs_req_poolp);
872 kmem_cache_destroy(cifs_req_cachep);
873 mempool_destroy(cifs_sm_req_poolp);
874 kmem_cache_destroy(cifs_sm_req_cachep);
875 }
876
877 static int
878 cifs_init_mids(void)
879 {
880 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
881 sizeof(struct mid_q_entry), 0,
882 SLAB_HWCACHE_ALIGN, NULL);
883 if (cifs_mid_cachep == NULL)
884 return -ENOMEM;
885
886 /* 3 is a reasonable minimum number of simultaneous operations */
887 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
888 if (cifs_mid_poolp == NULL) {
889 kmem_cache_destroy(cifs_mid_cachep);
890 return -ENOMEM;
891 }
892
893 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
894 sizeof(struct oplock_q_entry), 0,
895 SLAB_HWCACHE_ALIGN, NULL);
896 if (cifs_oplock_cachep == NULL) {
897 mempool_destroy(cifs_mid_poolp);
898 kmem_cache_destroy(cifs_mid_cachep);
899 return -ENOMEM;
900 }
901
902 return 0;
903 }
904
905 static void
906 cifs_destroy_mids(void)
907 {
908 mempool_destroy(cifs_mid_poolp);
909 kmem_cache_destroy(cifs_mid_cachep);
910 kmem_cache_destroy(cifs_oplock_cachep);
911 }
912
913 static int cifs_oplock_thread(void *dummyarg)
914 {
915 struct oplock_q_entry *oplock_item;
916 struct cifsTconInfo *pTcon;
917 struct inode *inode;
918 __u16 netfid;
919 int rc, waitrc = 0;
920
921 set_freezable();
922 do {
923 if (try_to_freeze())
924 continue;
925
926 spin_lock(&GlobalMid_Lock);
927 if (list_empty(&GlobalOplock_Q)) {
928 spin_unlock(&GlobalMid_Lock);
929 set_current_state(TASK_INTERRUPTIBLE);
930 schedule_timeout(39*HZ);
931 } else {
932 oplock_item = list_entry(GlobalOplock_Q.next,
933 struct oplock_q_entry, qhead);
934 cFYI(1, ("found oplock item to write out"));
935 pTcon = oplock_item->tcon;
936 inode = oplock_item->pinode;
937 netfid = oplock_item->netfid;
938 spin_unlock(&GlobalMid_Lock);
939 DeleteOplockQEntry(oplock_item);
940 /* can not grab inode sem here since it would
941 deadlock when oplock received on delete
942 since vfs_unlink holds the i_mutex across
943 the call */
944 /* mutex_lock(&inode->i_mutex);*/
945 if (S_ISREG(inode->i_mode)) {
946 rc = filemap_fdatawrite(inode->i_mapping);
947 if (CIFS_I(inode)->clientCanCacheRead == 0) {
948 waitrc = filemap_fdatawait(
949 inode->i_mapping);
950 invalidate_remote_inode(inode);
951 }
952 if (rc == 0)
953 rc = waitrc;
954 } else
955 rc = 0;
956 /* mutex_unlock(&inode->i_mutex);*/
957 if (rc)
958 CIFS_I(inode)->write_behind_rc = rc;
959 cFYI(1, ("Oplock flush inode %p rc %d",
960 inode, rc));
961
962 /* releasing stale oplock after recent reconnect
963 of smb session using a now incorrect file
964 handle is not a data integrity issue but do
965 not bother sending an oplock release if session
966 to server still is disconnected since oplock
967 already released by the server in that case */
968 if (pTcon->tidStatus != CifsNeedReconnect) {
969 rc = CIFSSMBLock(0, pTcon, netfid,
970 0 /* len */ , 0 /* offset */, 0,
971 0, LOCKING_ANDX_OPLOCK_RELEASE,
972 false /* wait flag */);
973 cFYI(1, ("Oplock release rc = %d", rc));
974 }
975 set_current_state(TASK_INTERRUPTIBLE);
976 schedule_timeout(1); /* yield in case q were corrupt */
977 }
978 } while (!kthread_should_stop());
979
980 return 0;
981 }
982
983 static int cifs_dnotify_thread(void *dummyarg)
984 {
985 struct list_head *tmp;
986 struct cifsSesInfo *ses;
987
988 do {
989 if (try_to_freeze())
990 continue;
991 set_current_state(TASK_INTERRUPTIBLE);
992 schedule_timeout(15*HZ);
993 read_lock(&GlobalSMBSeslock);
994 /* check if any stuck requests that need
995 to be woken up and wakeq so the
996 thread can wake up and error out */
997 list_for_each(tmp, &GlobalSMBSessionList) {
998 ses = list_entry(tmp, struct cifsSesInfo,
999 cifsSessionList);
1000 if (ses->server && atomic_read(&ses->server->inFlight))
1001 wake_up_all(&ses->server->response_q);
1002 }
1003 read_unlock(&GlobalSMBSeslock);
1004 } while (!kthread_should_stop());
1005
1006 return 0;
1007 }
1008
1009 static int __init
1010 init_cifs(void)
1011 {
1012 int rc = 0;
1013 cifs_proc_init();
1014 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
1015 INIT_LIST_HEAD(&GlobalSMBSessionList);
1016 INIT_LIST_HEAD(&GlobalTreeConnectionList);
1017 INIT_LIST_HEAD(&GlobalOplock_Q);
1018 #ifdef CONFIG_CIFS_EXPERIMENTAL
1019 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1020 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1021 #endif
1022 /*
1023 * Initialize Global counters
1024 */
1025 atomic_set(&sesInfoAllocCount, 0);
1026 atomic_set(&tconInfoAllocCount, 0);
1027 atomic_set(&tcpSesAllocCount, 0);
1028 atomic_set(&tcpSesReconnectCount, 0);
1029 atomic_set(&tconInfoReconnectCount, 0);
1030
1031 atomic_set(&bufAllocCount, 0);
1032 atomic_set(&smBufAllocCount, 0);
1033 #ifdef CONFIG_CIFS_STATS2
1034 atomic_set(&totBufAllocCount, 0);
1035 atomic_set(&totSmBufAllocCount, 0);
1036 #endif /* CONFIG_CIFS_STATS2 */
1037
1038 atomic_set(&midCount, 0);
1039 GlobalCurrentXid = 0;
1040 GlobalTotalActiveXid = 0;
1041 GlobalMaxActiveXid = 0;
1042 memset(Local_System_Name, 0, 15);
1043 rwlock_init(&GlobalSMBSeslock);
1044 spin_lock_init(&GlobalMid_Lock);
1045
1046 if (cifs_max_pending < 2) {
1047 cifs_max_pending = 2;
1048 cFYI(1, ("cifs_max_pending set to min of 2"));
1049 } else if (cifs_max_pending > 256) {
1050 cifs_max_pending = 256;
1051 cFYI(1, ("cifs_max_pending set to max of 256"));
1052 }
1053
1054 rc = cifs_init_inodecache();
1055 if (rc)
1056 goto out_clean_proc;
1057
1058 rc = cifs_init_mids();
1059 if (rc)
1060 goto out_destroy_inodecache;
1061
1062 rc = cifs_init_request_bufs();
1063 if (rc)
1064 goto out_destroy_mids;
1065
1066 rc = register_filesystem(&cifs_fs_type);
1067 if (rc)
1068 goto out_destroy_request_bufs;
1069 #ifdef CONFIG_CIFS_UPCALL
1070 rc = register_key_type(&cifs_spnego_key_type);
1071 if (rc)
1072 goto out_unregister_filesystem;
1073 #endif
1074 #ifdef CONFIG_CIFS_DFS_UPCALL
1075 rc = register_key_type(&key_type_dns_resolver);
1076 if (rc)
1077 goto out_unregister_key_type;
1078 #endif
1079 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1080 if (IS_ERR(oplockThread)) {
1081 rc = PTR_ERR(oplockThread);
1082 cERROR(1, ("error %d create oplock thread", rc));
1083 goto out_unregister_dfs_key_type;
1084 }
1085
1086 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1087 if (IS_ERR(dnotifyThread)) {
1088 rc = PTR_ERR(dnotifyThread);
1089 cERROR(1, ("error %d create dnotify thread", rc));
1090 goto out_stop_oplock_thread;
1091 }
1092
1093 return 0;
1094
1095 out_stop_oplock_thread:
1096 kthread_stop(oplockThread);
1097 out_unregister_dfs_key_type:
1098 #ifdef CONFIG_CIFS_DFS_UPCALL
1099 unregister_key_type(&key_type_dns_resolver);
1100 out_unregister_key_type:
1101 #endif
1102 #ifdef CONFIG_CIFS_UPCALL
1103 unregister_key_type(&cifs_spnego_key_type);
1104 out_unregister_filesystem:
1105 #endif
1106 unregister_filesystem(&cifs_fs_type);
1107 out_destroy_request_bufs:
1108 cifs_destroy_request_bufs();
1109 out_destroy_mids:
1110 cifs_destroy_mids();
1111 out_destroy_inodecache:
1112 cifs_destroy_inodecache();
1113 out_clean_proc:
1114 cifs_proc_clean();
1115 return rc;
1116 }
1117
1118 static void __exit
1119 exit_cifs(void)
1120 {
1121 cFYI(DBG2, ("exit_cifs"));
1122 cifs_proc_clean();
1123 #ifdef CONFIG_CIFS_DFS_UPCALL
1124 cifs_dfs_release_automount_timer();
1125 unregister_key_type(&key_type_dns_resolver);
1126 #endif
1127 #ifdef CONFIG_CIFS_UPCALL
1128 unregister_key_type(&cifs_spnego_key_type);
1129 #endif
1130 unregister_filesystem(&cifs_fs_type);
1131 cifs_destroy_inodecache();
1132 cifs_destroy_mids();
1133 cifs_destroy_request_bufs();
1134 kthread_stop(oplockThread);
1135 kthread_stop(dnotifyThread);
1136 }
1137
1138 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1139 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1140 MODULE_DESCRIPTION
1141 ("VFS to access servers complying with the SNIA CIFS Specification "
1142 "e.g. Samba and Windows");
1143 MODULE_VERSION(CIFS_VERSION);
1144 module_init(init_cifs)
1145 module_exit(exit_cifs)