[PATCH] Vectorize aio_read/aio_write fileop methods
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include "cifsfs.h"
38 #include "cifspdu.h"
39 #define DECLARE_GLOBALS_HERE
40 #include "cifsglob.h"
41 #include "cifsproto.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
44 #include <linux/mm.h>
45 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
46
47 #ifdef CONFIG_CIFS_QUOTA
48 static struct quotactl_ops cifs_quotactl_ops;
49 #endif
50
51 int cifsFYI = 0;
52 int cifsERROR = 1;
53 int traceSMB = 0;
54 unsigned int oplockEnabled = 1;
55 unsigned int experimEnabled = 0;
56 unsigned int linuxExtEnabled = 1;
57 unsigned int lookupCacheEnabled = 1;
58 unsigned int multiuser_mount = 0;
59 unsigned int extended_security = CIFSSEC_DEF;
60 /* unsigned int ntlmv2_support = 0; */
61 unsigned int sign_CIFS_PDUs = 1;
62 extern struct task_struct * oplockThread; /* remove sparse warning */
63 struct task_struct * oplockThread = NULL;
64 extern struct task_struct * dnotifyThread; /* remove sparse warning */
65 struct task_struct * dnotifyThread = NULL;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, int, 0);
68 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
69 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
70 module_param(cifs_min_rcv, int, 0);
71 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
72 unsigned int cifs_min_small = 30;
73 module_param(cifs_min_small, int, 0);
74 MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
75 unsigned int cifs_max_pending = CIFS_MAX_REQ;
76 module_param(cifs_max_pending, int, 0);
77 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
78
79 extern mempool_t *cifs_sm_req_poolp;
80 extern mempool_t *cifs_req_poolp;
81 extern mempool_t *cifs_mid_poolp;
82
83 extern kmem_cache_t *cifs_oplock_cachep;
84
85 static int
86 cifs_read_super(struct super_block *sb, void *data,
87 const char *devname, int silent)
88 {
89 struct inode *inode;
90 struct cifs_sb_info *cifs_sb;
91 int rc = 0;
92
93 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
94 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
95 cifs_sb = CIFS_SB(sb);
96 if(cifs_sb == NULL)
97 return -ENOMEM;
98
99 rc = cifs_mount(sb, cifs_sb, data, devname);
100
101 if (rc) {
102 if (!silent)
103 cERROR(1,
104 ("cifs_mount failed w/return code = %d", rc));
105 goto out_mount_failed;
106 }
107
108 sb->s_magic = CIFS_MAGIC_NUMBER;
109 sb->s_op = &cifs_super_ops;
110 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
111 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
112 #ifdef CONFIG_CIFS_QUOTA
113 sb->s_qcop = &cifs_quotactl_ops;
114 #endif
115 sb->s_blocksize = CIFS_MAX_MSGSIZE;
116 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
117 inode = iget(sb, ROOT_I);
118
119 if (!inode) {
120 rc = -ENOMEM;
121 goto out_no_root;
122 }
123
124 sb->s_root = d_alloc_root(inode);
125
126 if (!sb->s_root) {
127 rc = -ENOMEM;
128 goto out_no_root;
129 }
130
131 return 0;
132
133 out_no_root:
134 cERROR(1, ("cifs_read_super: get root inode failed"));
135 if (inode)
136 iput(inode);
137
138 out_mount_failed:
139 if(cifs_sb) {
140 if(cifs_sb->local_nls)
141 unload_nls(cifs_sb->local_nls);
142 kfree(cifs_sb);
143 }
144 return rc;
145 }
146
147 static void
148 cifs_put_super(struct super_block *sb)
149 {
150 int rc = 0;
151 struct cifs_sb_info *cifs_sb;
152
153 cFYI(1, ("In cifs_put_super"));
154 cifs_sb = CIFS_SB(sb);
155 if(cifs_sb == NULL) {
156 cFYI(1,("Empty cifs superblock info passed to unmount"));
157 return;
158 }
159 rc = cifs_umount(sb, cifs_sb);
160 if (rc) {
161 cERROR(1, ("cifs_umount failed with return code %d", rc));
162 }
163 unload_nls(cifs_sb->local_nls);
164 kfree(cifs_sb);
165 return;
166 }
167
168 static int
169 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
170 {
171 struct super_block *sb = dentry->d_sb;
172 int xid;
173 int rc = -EOPNOTSUPP;
174 struct cifs_sb_info *cifs_sb;
175 struct cifsTconInfo *pTcon;
176
177 xid = GetXid();
178
179 cifs_sb = CIFS_SB(sb);
180 pTcon = cifs_sb->tcon;
181
182 buf->f_type = CIFS_MAGIC_NUMBER;
183
184 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
185 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
186 presumably be total path, but note
187 that some servers (includinng Samba 3)
188 have a shorter maximum path */
189 buf->f_files = 0; /* undefined */
190 buf->f_ffree = 0; /* unlimited */
191
192 /* BB we could add a second check for a QFS Unix capability bit */
193 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
194 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
195 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
196 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
197
198 /* Only need to call the old QFSInfo if failed
199 on newer one */
200 if(rc)
201 rc = CIFSSMBQFSInfo(xid, pTcon, buf);
202
203 /* Old Windows servers do not support level 103, retry with level
204 one if old server failed the previous call */
205 if(rc)
206 rc = SMBOldQFSInfo(xid, pTcon, buf);
207 /*
208 int f_type;
209 __fsid_t f_fsid;
210 int f_namelen; */
211 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
212 FreeXid(xid);
213 return 0; /* always return success? what if volume is no
214 longer available? */
215 }
216
217 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
218 {
219 struct cifs_sb_info *cifs_sb;
220
221 cifs_sb = CIFS_SB(inode->i_sb);
222
223 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
224 return 0;
225 } else /* file mode might have been restricted at mount time
226 on the client (above and beyond ACL on servers) for
227 servers which do not support setting and viewing mode bits,
228 so allowing client to check permissions is useful */
229 return generic_permission(inode, mask, NULL);
230 }
231
232 static kmem_cache_t *cifs_inode_cachep;
233 static kmem_cache_t *cifs_req_cachep;
234 static kmem_cache_t *cifs_mid_cachep;
235 kmem_cache_t *cifs_oplock_cachep;
236 static kmem_cache_t *cifs_sm_req_cachep;
237 mempool_t *cifs_sm_req_poolp;
238 mempool_t *cifs_req_poolp;
239 mempool_t *cifs_mid_poolp;
240
241 static struct inode *
242 cifs_alloc_inode(struct super_block *sb)
243 {
244 struct cifsInodeInfo *cifs_inode;
245 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
246 if (!cifs_inode)
247 return NULL;
248 cifs_inode->cifsAttrs = 0x20; /* default */
249 atomic_set(&cifs_inode->inUse, 0);
250 cifs_inode->time = 0;
251 /* Until the file is open and we have gotten oplock
252 info back from the server, can not assume caching of
253 file data or metadata */
254 cifs_inode->clientCanCacheRead = FALSE;
255 cifs_inode->clientCanCacheAll = FALSE;
256 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
257 cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;
258 INIT_LIST_HEAD(&cifs_inode->openFileList);
259 return &cifs_inode->vfs_inode;
260 }
261
262 static void
263 cifs_destroy_inode(struct inode *inode)
264 {
265 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
266 }
267
268 /*
269 * cifs_show_options() is for displaying mount options in /proc/mounts.
270 * Not all settable options are displayed but most of the important
271 * ones are.
272 */
273 static int
274 cifs_show_options(struct seq_file *s, struct vfsmount *m)
275 {
276 struct cifs_sb_info *cifs_sb;
277
278 cifs_sb = CIFS_SB(m->mnt_sb);
279
280 if (cifs_sb) {
281 if (cifs_sb->tcon) {
282 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
283 if (cifs_sb->tcon->ses) {
284 if (cifs_sb->tcon->ses->userName)
285 seq_printf(s, ",username=%s",
286 cifs_sb->tcon->ses->userName);
287 if(cifs_sb->tcon->ses->domainName)
288 seq_printf(s, ",domain=%s",
289 cifs_sb->tcon->ses->domainName);
290 }
291 }
292 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
293 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
294 }
295 return 0;
296 }
297
298 #ifdef CONFIG_CIFS_QUOTA
299 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
300 struct fs_disk_quota * pdquota)
301 {
302 int xid;
303 int rc = 0;
304 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
305 struct cifsTconInfo *pTcon;
306
307 if(cifs_sb)
308 pTcon = cifs_sb->tcon;
309 else
310 return -EIO;
311
312
313 xid = GetXid();
314 if(pTcon) {
315 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
316 } else {
317 return -EIO;
318 }
319
320 FreeXid(xid);
321 return rc;
322 }
323
324 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
325 struct fs_disk_quota * pdquota)
326 {
327 int xid;
328 int rc = 0;
329 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
330 struct cifsTconInfo *pTcon;
331
332 if(cifs_sb)
333 pTcon = cifs_sb->tcon;
334 else
335 return -EIO;
336
337 xid = GetXid();
338 if(pTcon) {
339 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
340 } else {
341 rc = -EIO;
342 }
343
344 FreeXid(xid);
345 return rc;
346 }
347
348 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
349 {
350 int xid;
351 int rc = 0;
352 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
353 struct cifsTconInfo *pTcon;
354
355 if(cifs_sb)
356 pTcon = cifs_sb->tcon;
357 else
358 return -EIO;
359
360 xid = GetXid();
361 if(pTcon) {
362 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
363 } else {
364 rc = -EIO;
365 }
366
367 FreeXid(xid);
368 return rc;
369 }
370
371 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
372 {
373 int xid;
374 int rc = 0;
375 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
376 struct cifsTconInfo *pTcon;
377
378 if(cifs_sb) {
379 pTcon = cifs_sb->tcon;
380 } else {
381 return -EIO;
382 }
383 xid = GetXid();
384 if(pTcon) {
385 cFYI(1,("pqstats %p",qstats));
386 } else {
387 rc = -EIO;
388 }
389
390 FreeXid(xid);
391 return rc;
392 }
393
394 static struct quotactl_ops cifs_quotactl_ops = {
395 .set_xquota = cifs_xquota_set,
396 .get_xquota = cifs_xquota_set,
397 .set_xstate = cifs_xstate_set,
398 .get_xstate = cifs_xstate_get,
399 };
400 #endif
401
402 static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
403 {
404 struct cifs_sb_info *cifs_sb;
405 struct cifsTconInfo * tcon;
406
407 if (!(flags & MNT_FORCE))
408 return;
409 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
410 if(cifs_sb == NULL)
411 return;
412
413 tcon = cifs_sb->tcon;
414 if(tcon == NULL)
415 return;
416 down(&tcon->tconSem);
417 if (atomic_read(&tcon->useCount) == 1)
418 tcon->tidStatus = CifsExiting;
419 up(&tcon->tconSem);
420
421 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
422 /* cancel_notify_requests(tcon); */
423 if(tcon->ses && tcon->ses->server)
424 {
425 cFYI(1,("wake up tasks now - umount begin not complete"));
426 wake_up_all(&tcon->ses->server->request_q);
427 wake_up_all(&tcon->ses->server->response_q);
428 msleep(1); /* yield */
429 /* we have to kick the requests once more */
430 wake_up_all(&tcon->ses->server->response_q);
431 msleep(1);
432 }
433 /* BB FIXME - finish add checks for tidStatus BB */
434
435 return;
436 }
437
438 static int cifs_remount(struct super_block *sb, int *flags, char *data)
439 {
440 *flags |= MS_NODIRATIME;
441 return 0;
442 }
443
444 struct super_operations cifs_super_ops = {
445 .read_inode = cifs_read_inode,
446 .put_super = cifs_put_super,
447 .statfs = cifs_statfs,
448 .alloc_inode = cifs_alloc_inode,
449 .destroy_inode = cifs_destroy_inode,
450 /* .drop_inode = generic_delete_inode,
451 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
452 unless later we add lazy close of inodes or unless the kernel forgets to call
453 us with the same number of releases (closes) as opens */
454 .show_options = cifs_show_options,
455 .umount_begin = cifs_umount_begin,
456 .remount_fs = cifs_remount,
457 };
458
459 static int
460 cifs_get_sb(struct file_system_type *fs_type,
461 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
462 {
463 int rc;
464 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
465
466 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
467
468 if (IS_ERR(sb))
469 return PTR_ERR(sb);
470
471 sb->s_flags = flags;
472
473 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
474 if (rc) {
475 up_write(&sb->s_umount);
476 deactivate_super(sb);
477 return rc;
478 }
479 sb->s_flags |= MS_ACTIVE;
480 return simple_set_mnt(mnt, sb);
481 }
482
483 static ssize_t cifs_file_writev(struct file *file, const struct iovec *iov,
484 unsigned long nr_segs, loff_t *ppos)
485 {
486 struct inode *inode = file->f_dentry->d_inode;
487 ssize_t written;
488
489 written = generic_file_writev(file, iov, nr_segs, ppos);
490 if (!CIFS_I(inode)->clientCanCacheAll)
491 filemap_fdatawrite(inode->i_mapping);
492 return written;
493 }
494
495 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
496 unsigned long nr_segs, loff_t pos)
497 {
498 struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
499 ssize_t written;
500
501 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
502 if (!CIFS_I(inode)->clientCanCacheAll)
503 filemap_fdatawrite(inode->i_mapping);
504 return written;
505 }
506
507 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
508 {
509 /* origin == SEEK_END => we must revalidate the cached file length */
510 if (origin == 2) {
511 int retval = cifs_revalidate(file->f_dentry);
512 if (retval < 0)
513 return (loff_t)retval;
514 }
515 return remote_llseek(file, offset, origin);
516 }
517
518 static struct file_system_type cifs_fs_type = {
519 .owner = THIS_MODULE,
520 .name = "cifs",
521 .get_sb = cifs_get_sb,
522 .kill_sb = kill_anon_super,
523 /* .fs_flags */
524 };
525 struct inode_operations cifs_dir_inode_ops = {
526 .create = cifs_create,
527 .lookup = cifs_lookup,
528 .getattr = cifs_getattr,
529 .unlink = cifs_unlink,
530 .link = cifs_hardlink,
531 .mkdir = cifs_mkdir,
532 .rmdir = cifs_rmdir,
533 .rename = cifs_rename,
534 .permission = cifs_permission,
535 /* revalidate:cifs_revalidate, */
536 .setattr = cifs_setattr,
537 .symlink = cifs_symlink,
538 .mknod = cifs_mknod,
539 #ifdef CONFIG_CIFS_XATTR
540 .setxattr = cifs_setxattr,
541 .getxattr = cifs_getxattr,
542 .listxattr = cifs_listxattr,
543 .removexattr = cifs_removexattr,
544 #endif
545 };
546
547 struct inode_operations cifs_file_inode_ops = {
548 /* revalidate:cifs_revalidate, */
549 .setattr = cifs_setattr,
550 .getattr = cifs_getattr, /* do we need this anymore? */
551 .rename = cifs_rename,
552 .permission = cifs_permission,
553 #ifdef CONFIG_CIFS_XATTR
554 .setxattr = cifs_setxattr,
555 .getxattr = cifs_getxattr,
556 .listxattr = cifs_listxattr,
557 .removexattr = cifs_removexattr,
558 #endif
559 };
560
561 struct inode_operations cifs_symlink_inode_ops = {
562 .readlink = generic_readlink,
563 .follow_link = cifs_follow_link,
564 .put_link = cifs_put_link,
565 .permission = cifs_permission,
566 /* BB add the following two eventually */
567 /* revalidate: cifs_revalidate,
568 setattr: cifs_notify_change, *//* BB do we need notify change */
569 #ifdef CONFIG_CIFS_XATTR
570 .setxattr = cifs_setxattr,
571 .getxattr = cifs_getxattr,
572 .listxattr = cifs_listxattr,
573 .removexattr = cifs_removexattr,
574 #endif
575 };
576
577 const struct file_operations cifs_file_ops = {
578 .read = do_sync_read,
579 .write = do_sync_write,
580 .readv = generic_file_readv,
581 .writev = cifs_file_writev,
582 .aio_read = generic_file_aio_read,
583 .aio_write = cifs_file_aio_write,
584 .open = cifs_open,
585 .release = cifs_close,
586 .lock = cifs_lock,
587 .fsync = cifs_fsync,
588 .flush = cifs_flush,
589 .mmap = cifs_file_mmap,
590 .sendfile = generic_file_sendfile,
591 .llseek = cifs_llseek,
592 #ifdef CONFIG_CIFS_POSIX
593 .ioctl = cifs_ioctl,
594 #endif /* CONFIG_CIFS_POSIX */
595
596 #ifdef CONFIG_CIFS_EXPERIMENTAL
597 .dir_notify = cifs_dir_notify,
598 #endif /* CONFIG_CIFS_EXPERIMENTAL */
599 };
600
601 const struct file_operations cifs_file_direct_ops = {
602 /* no mmap, no aio, no readv -
603 BB reevaluate whether they can be done with directio, no cache */
604 .read = cifs_user_read,
605 .write = cifs_user_write,
606 .open = cifs_open,
607 .release = cifs_close,
608 .lock = cifs_lock,
609 .fsync = cifs_fsync,
610 .flush = cifs_flush,
611 .sendfile = generic_file_sendfile, /* BB removeme BB */
612 #ifdef CONFIG_CIFS_POSIX
613 .ioctl = cifs_ioctl,
614 #endif /* CONFIG_CIFS_POSIX */
615 .llseek = cifs_llseek,
616 #ifdef CONFIG_CIFS_EXPERIMENTAL
617 .dir_notify = cifs_dir_notify,
618 #endif /* CONFIG_CIFS_EXPERIMENTAL */
619 };
620 const struct file_operations cifs_file_nobrl_ops = {
621 .read = do_sync_read,
622 .write = do_sync_write,
623 .readv = generic_file_readv,
624 .writev = cifs_file_writev,
625 .aio_read = generic_file_aio_read,
626 .aio_write = cifs_file_aio_write,
627 .open = cifs_open,
628 .release = cifs_close,
629 .fsync = cifs_fsync,
630 .flush = cifs_flush,
631 .mmap = cifs_file_mmap,
632 .sendfile = generic_file_sendfile,
633 .llseek = cifs_llseek,
634 #ifdef CONFIG_CIFS_POSIX
635 .ioctl = cifs_ioctl,
636 #endif /* CONFIG_CIFS_POSIX */
637
638 #ifdef CONFIG_CIFS_EXPERIMENTAL
639 .dir_notify = cifs_dir_notify,
640 #endif /* CONFIG_CIFS_EXPERIMENTAL */
641 };
642
643 const struct file_operations cifs_file_direct_nobrl_ops = {
644 /* no mmap, no aio, no readv -
645 BB reevaluate whether they can be done with directio, no cache */
646 .read = cifs_user_read,
647 .write = cifs_user_write,
648 .open = cifs_open,
649 .release = cifs_close,
650 .fsync = cifs_fsync,
651 .flush = cifs_flush,
652 .sendfile = generic_file_sendfile, /* BB removeme BB */
653 #ifdef CONFIG_CIFS_POSIX
654 .ioctl = cifs_ioctl,
655 #endif /* CONFIG_CIFS_POSIX */
656 .llseek = cifs_llseek,
657 #ifdef CONFIG_CIFS_EXPERIMENTAL
658 .dir_notify = cifs_dir_notify,
659 #endif /* CONFIG_CIFS_EXPERIMENTAL */
660 };
661
662 const struct file_operations cifs_dir_ops = {
663 .readdir = cifs_readdir,
664 .release = cifs_closedir,
665 .read = generic_read_dir,
666 #ifdef CONFIG_CIFS_EXPERIMENTAL
667 .dir_notify = cifs_dir_notify,
668 #endif /* CONFIG_CIFS_EXPERIMENTAL */
669 .ioctl = cifs_ioctl,
670 };
671
672 static void
673 cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
674 {
675 struct cifsInodeInfo *cifsi = inode;
676
677 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
678 SLAB_CTOR_CONSTRUCTOR) {
679 inode_init_once(&cifsi->vfs_inode);
680 INIT_LIST_HEAD(&cifsi->lockList);
681 }
682 }
683
684 static int
685 cifs_init_inodecache(void)
686 {
687 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
688 sizeof (struct cifsInodeInfo),
689 0, (SLAB_RECLAIM_ACCOUNT|
690 SLAB_MEM_SPREAD),
691 cifs_init_once, NULL);
692 if (cifs_inode_cachep == NULL)
693 return -ENOMEM;
694
695 return 0;
696 }
697
698 static void
699 cifs_destroy_inodecache(void)
700 {
701 kmem_cache_destroy(cifs_inode_cachep);
702 }
703
704 static int
705 cifs_init_request_bufs(void)
706 {
707 if(CIFSMaxBufSize < 8192) {
708 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
709 Unicode path name has to fit in any SMB/CIFS path based frames */
710 CIFSMaxBufSize = 8192;
711 } else if (CIFSMaxBufSize > 1024*127) {
712 CIFSMaxBufSize = 1024 * 127;
713 } else {
714 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
715 }
716 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
717 cifs_req_cachep = kmem_cache_create("cifs_request",
718 CIFSMaxBufSize +
719 MAX_CIFS_HDR_SIZE, 0,
720 SLAB_HWCACHE_ALIGN, NULL, NULL);
721 if (cifs_req_cachep == NULL)
722 return -ENOMEM;
723
724 if(cifs_min_rcv < 1)
725 cifs_min_rcv = 1;
726 else if (cifs_min_rcv > 64) {
727 cifs_min_rcv = 64;
728 cERROR(1,("cifs_min_rcv set to maximum (64)"));
729 }
730
731 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
732 cifs_req_cachep);
733
734 if(cifs_req_poolp == NULL) {
735 kmem_cache_destroy(cifs_req_cachep);
736 return -ENOMEM;
737 }
738 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
739 almost all handle based requests (but not write response, nor is it
740 sufficient for path based requests). A smaller size would have
741 been more efficient (compacting multiple slab items on one 4k page)
742 for the case in which debug was on, but this larger size allows
743 more SMBs to use small buffer alloc and is still much more
744 efficient to alloc 1 per page off the slab compared to 17K (5page)
745 alloc of large cifs buffers even when page debugging is on */
746 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
747 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
748 NULL, NULL);
749 if (cifs_sm_req_cachep == NULL) {
750 mempool_destroy(cifs_req_poolp);
751 kmem_cache_destroy(cifs_req_cachep);
752 return -ENOMEM;
753 }
754
755 if(cifs_min_small < 2)
756 cifs_min_small = 2;
757 else if (cifs_min_small > 256) {
758 cifs_min_small = 256;
759 cFYI(1,("cifs_min_small set to maximum (256)"));
760 }
761
762 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
763 cifs_sm_req_cachep);
764
765 if(cifs_sm_req_poolp == NULL) {
766 mempool_destroy(cifs_req_poolp);
767 kmem_cache_destroy(cifs_req_cachep);
768 kmem_cache_destroy(cifs_sm_req_cachep);
769 return -ENOMEM;
770 }
771
772 return 0;
773 }
774
775 static void
776 cifs_destroy_request_bufs(void)
777 {
778 mempool_destroy(cifs_req_poolp);
779 kmem_cache_destroy(cifs_req_cachep);
780 mempool_destroy(cifs_sm_req_poolp);
781 kmem_cache_destroy(cifs_sm_req_cachep);
782 }
783
784 static int
785 cifs_init_mids(void)
786 {
787 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
788 sizeof (struct mid_q_entry), 0,
789 SLAB_HWCACHE_ALIGN, NULL, NULL);
790 if (cifs_mid_cachep == NULL)
791 return -ENOMEM;
792
793 /* 3 is a reasonable minimum number of simultaneous operations */
794 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
795 if(cifs_mid_poolp == NULL) {
796 kmem_cache_destroy(cifs_mid_cachep);
797 return -ENOMEM;
798 }
799
800 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
801 sizeof (struct oplock_q_entry), 0,
802 SLAB_HWCACHE_ALIGN, NULL, NULL);
803 if (cifs_oplock_cachep == NULL) {
804 kmem_cache_destroy(cifs_mid_cachep);
805 mempool_destroy(cifs_mid_poolp);
806 return -ENOMEM;
807 }
808
809 return 0;
810 }
811
812 static void
813 cifs_destroy_mids(void)
814 {
815 mempool_destroy(cifs_mid_poolp);
816 kmem_cache_destroy(cifs_mid_cachep);
817 kmem_cache_destroy(cifs_oplock_cachep);
818 }
819
820 static int cifs_oplock_thread(void * dummyarg)
821 {
822 struct oplock_q_entry * oplock_item;
823 struct cifsTconInfo *pTcon;
824 struct inode * inode;
825 __u16 netfid;
826 int rc;
827
828 do {
829 if (try_to_freeze())
830 continue;
831
832 spin_lock(&GlobalMid_Lock);
833 if(list_empty(&GlobalOplock_Q)) {
834 spin_unlock(&GlobalMid_Lock);
835 set_current_state(TASK_INTERRUPTIBLE);
836 schedule_timeout(39*HZ);
837 } else {
838 oplock_item = list_entry(GlobalOplock_Q.next,
839 struct oplock_q_entry, qhead);
840 if(oplock_item) {
841 cFYI(1,("found oplock item to write out"));
842 pTcon = oplock_item->tcon;
843 inode = oplock_item->pinode;
844 netfid = oplock_item->netfid;
845 spin_unlock(&GlobalMid_Lock);
846 DeleteOplockQEntry(oplock_item);
847 /* can not grab inode sem here since it would
848 deadlock when oplock received on delete
849 since vfs_unlink holds the i_mutex across
850 the call */
851 /* mutex_lock(&inode->i_mutex);*/
852 if (S_ISREG(inode->i_mode)) {
853 rc = filemap_fdatawrite(inode->i_mapping);
854 if(CIFS_I(inode)->clientCanCacheRead == 0) {
855 filemap_fdatawait(inode->i_mapping);
856 invalidate_remote_inode(inode);
857 }
858 } else
859 rc = 0;
860 /* mutex_unlock(&inode->i_mutex);*/
861 if (rc)
862 CIFS_I(inode)->write_behind_rc = rc;
863 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
864
865 /* releasing a stale oplock after recent reconnection
866 of smb session using a now incorrect file
867 handle is not a data integrity issue but do
868 not bother sending an oplock release if session
869 to server still is disconnected since oplock
870 already released by the server in that case */
871 if(pTcon->tidStatus != CifsNeedReconnect) {
872 rc = CIFSSMBLock(0, pTcon, netfid,
873 0 /* len */ , 0 /* offset */, 0,
874 0, LOCKING_ANDX_OPLOCK_RELEASE,
875 0 /* wait flag */);
876 cFYI(1,("Oplock release rc = %d ",rc));
877 }
878 } else
879 spin_unlock(&GlobalMid_Lock);
880 set_current_state(TASK_INTERRUPTIBLE);
881 schedule_timeout(1); /* yield in case q were corrupt */
882 }
883 } while (!kthread_should_stop());
884
885 return 0;
886 }
887
888 static int cifs_dnotify_thread(void * dummyarg)
889 {
890 struct list_head *tmp;
891 struct cifsSesInfo *ses;
892
893 do {
894 if (try_to_freeze())
895 continue;
896 set_current_state(TASK_INTERRUPTIBLE);
897 schedule_timeout(15*HZ);
898 read_lock(&GlobalSMBSeslock);
899 /* check if any stuck requests that need
900 to be woken up and wakeq so the
901 thread can wake up and error out */
902 list_for_each(tmp, &GlobalSMBSessionList) {
903 ses = list_entry(tmp, struct cifsSesInfo,
904 cifsSessionList);
905 if(ses && ses->server &&
906 atomic_read(&ses->server->inFlight))
907 wake_up_all(&ses->server->response_q);
908 }
909 read_unlock(&GlobalSMBSeslock);
910 } while (!kthread_should_stop());
911
912 return 0;
913 }
914
915 static int __init
916 init_cifs(void)
917 {
918 int rc = 0;
919 #ifdef CONFIG_PROC_FS
920 cifs_proc_init();
921 #endif
922 INIT_LIST_HEAD(&GlobalServerList); /* BB not implemented yet */
923 INIT_LIST_HEAD(&GlobalSMBSessionList);
924 INIT_LIST_HEAD(&GlobalTreeConnectionList);
925 INIT_LIST_HEAD(&GlobalOplock_Q);
926 #ifdef CONFIG_CIFS_EXPERIMENTAL
927 INIT_LIST_HEAD(&GlobalDnotifyReqList);
928 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
929 #endif
930 /*
931 * Initialize Global counters
932 */
933 atomic_set(&sesInfoAllocCount, 0);
934 atomic_set(&tconInfoAllocCount, 0);
935 atomic_set(&tcpSesAllocCount,0);
936 atomic_set(&tcpSesReconnectCount, 0);
937 atomic_set(&tconInfoReconnectCount, 0);
938
939 atomic_set(&bufAllocCount, 0);
940 atomic_set(&smBufAllocCount, 0);
941 #ifdef CONFIG_CIFS_STATS2
942 atomic_set(&totBufAllocCount, 0);
943 atomic_set(&totSmBufAllocCount, 0);
944 #endif /* CONFIG_CIFS_STATS2 */
945
946 atomic_set(&midCount, 0);
947 GlobalCurrentXid = 0;
948 GlobalTotalActiveXid = 0;
949 GlobalMaxActiveXid = 0;
950 rwlock_init(&GlobalSMBSeslock);
951 spin_lock_init(&GlobalMid_Lock);
952
953 if(cifs_max_pending < 2) {
954 cifs_max_pending = 2;
955 cFYI(1,("cifs_max_pending set to min of 2"));
956 } else if(cifs_max_pending > 256) {
957 cifs_max_pending = 256;
958 cFYI(1,("cifs_max_pending set to max of 256"));
959 }
960
961 rc = cifs_init_inodecache();
962 if (rc)
963 goto out_clean_proc;
964
965 rc = cifs_init_mids();
966 if (rc)
967 goto out_destroy_inodecache;
968
969 rc = cifs_init_request_bufs();
970 if (rc)
971 goto out_destroy_mids;
972
973 rc = register_filesystem(&cifs_fs_type);
974 if (rc)
975 goto out_destroy_request_bufs;
976
977 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
978 if (IS_ERR(oplockThread)) {
979 rc = PTR_ERR(oplockThread);
980 cERROR(1,("error %d create oplock thread", rc));
981 goto out_unregister_filesystem;
982 }
983
984 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
985 if (IS_ERR(dnotifyThread)) {
986 rc = PTR_ERR(dnotifyThread);
987 cERROR(1,("error %d create dnotify thread", rc));
988 goto out_stop_oplock_thread;
989 }
990
991 return 0;
992
993 out_stop_oplock_thread:
994 kthread_stop(oplockThread);
995 out_unregister_filesystem:
996 unregister_filesystem(&cifs_fs_type);
997 out_destroy_request_bufs:
998 cifs_destroy_request_bufs();
999 out_destroy_mids:
1000 cifs_destroy_mids();
1001 out_destroy_inodecache:
1002 cifs_destroy_inodecache();
1003 out_clean_proc:
1004 #ifdef CONFIG_PROC_FS
1005 cifs_proc_clean();
1006 #endif
1007 return rc;
1008 }
1009
1010 static void __exit
1011 exit_cifs(void)
1012 {
1013 cFYI(0, ("In unregister ie exit_cifs"));
1014 #ifdef CONFIG_PROC_FS
1015 cifs_proc_clean();
1016 #endif
1017 unregister_filesystem(&cifs_fs_type);
1018 cifs_destroy_inodecache();
1019 cifs_destroy_mids();
1020 cifs_destroy_request_bufs();
1021 kthread_stop(oplockThread);
1022 kthread_stop(dnotifyThread);
1023 }
1024
1025 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1026 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1027 MODULE_DESCRIPTION
1028 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1029 MODULE_VERSION(CIFS_VERSION);
1030 module_init(init_cifs)
1031 module_exit(exit_cifs)