[PATCH] move executable checking into ->permission()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / cifs / cifsfs.c
1 /*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
25
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include "cifsfs.h"
39 #include "cifspdu.h"
40 #define DECLARE_GLOBALS_HERE
41 #include "cifsglob.h"
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
45 #include <linux/mm.h>
46 #include <linux/key-type.h>
47 #include "dns_resolve.h"
48 #include "cifs_spnego.h"
49 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
50
51 #ifdef CONFIG_CIFS_QUOTA
52 static struct quotactl_ops cifs_quotactl_ops;
53 #endif /* QUOTA */
54
55 int cifsFYI = 0;
56 int cifsERROR = 1;
57 int traceSMB = 0;
58 unsigned int oplockEnabled = 1;
59 unsigned int experimEnabled = 0;
60 unsigned int linuxExtEnabled = 1;
61 unsigned int lookupCacheEnabled = 1;
62 unsigned int multiuser_mount = 0;
63 unsigned int extended_security = CIFSSEC_DEF;
64 /* unsigned int ntlmv2_support = 0; */
65 unsigned int sign_CIFS_PDUs = 1;
66 extern struct task_struct *oplockThread; /* remove sparse warning */
67 struct task_struct *oplockThread = NULL;
68 /* extern struct task_struct * dnotifyThread; remove sparse warning */
69 static struct task_struct *dnotifyThread = NULL;
70 static const struct super_operations cifs_super_ops;
71 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72 module_param(CIFSMaxBufSize, int, 0);
73 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
74 "Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
78 "1 to 64");
79 unsigned int cifs_min_small = 30;
80 module_param(cifs_min_small, int, 0);
81 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
82 "Range: 2 to 256");
83 unsigned int cifs_max_pending = CIFS_MAX_REQ;
84 module_param(cifs_max_pending, int, 0);
85 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
86 "Default: 50 Range: 2 to 256");
87
88 extern mempool_t *cifs_sm_req_poolp;
89 extern mempool_t *cifs_req_poolp;
90 extern mempool_t *cifs_mid_poolp;
91
92 extern struct kmem_cache *cifs_oplock_cachep;
93
94 static int
95 cifs_read_super(struct super_block *sb, void *data,
96 const char *devname, int silent)
97 {
98 struct inode *inode;
99 struct cifs_sb_info *cifs_sb;
100 int rc = 0;
101
102 /* BB should we make this contingent on mount parm? */
103 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
104 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
105 cifs_sb = CIFS_SB(sb);
106 if (cifs_sb == NULL)
107 return -ENOMEM;
108
109 #ifdef CONFIG_CIFS_DFS_UPCALL
110 /* copy mount params to sb for use in submounts */
111 /* BB: should we move this after the mount so we
112 * do not have to do the copy on failed mounts?
113 * BB: May be it is better to do simple copy before
114 * complex operation (mount), and in case of fail
115 * just exit instead of doing mount and attempting
116 * undo it if this copy fails?*/
117 if (data) {
118 int len = strlen(data);
119 cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
120 if (cifs_sb->mountdata == NULL) {
121 kfree(sb->s_fs_info);
122 sb->s_fs_info = NULL;
123 return -ENOMEM;
124 }
125 strncpy(cifs_sb->mountdata, data, len + 1);
126 cifs_sb->mountdata[len] = '\0';
127 }
128 #endif
129
130 rc = cifs_mount(sb, cifs_sb, data, devname);
131
132 if (rc) {
133 if (!silent)
134 cERROR(1,
135 ("cifs_mount failed w/return code = %d", rc));
136 goto out_mount_failed;
137 }
138
139 sb->s_magic = CIFS_MAGIC_NUMBER;
140 sb->s_op = &cifs_super_ops;
141 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
142 sb->s_blocksize =
143 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
144 #ifdef CONFIG_CIFS_QUOTA
145 sb->s_qcop = &cifs_quotactl_ops;
146 #endif
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_iget(sb, ROOT_I);
150
151 if (IS_ERR(inode)) {
152 rc = PTR_ERR(inode);
153 inode = NULL;
154 goto out_no_root;
155 }
156
157 sb->s_root = d_alloc_root(inode);
158
159 if (!sb->s_root) {
160 rc = -ENOMEM;
161 goto out_no_root;
162 }
163
164 #ifdef CONFIG_CIFS_EXPERIMENTAL
165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
166 cFYI(1, ("export ops supported"));
167 sb->s_export_op = &cifs_export_ops;
168 }
169 #endif /* EXPERIMENTAL */
170
171 return 0;
172
173 out_no_root:
174 cERROR(1, ("cifs_read_super: get root inode failed"));
175 if (inode)
176 iput(inode);
177
178 cifs_umount(sb, cifs_sb);
179
180 out_mount_failed:
181 if (cifs_sb) {
182 #ifdef CONFIG_CIFS_DFS_UPCALL
183 if (cifs_sb->mountdata) {
184 kfree(cifs_sb->mountdata);
185 cifs_sb->mountdata = NULL;
186 }
187 #endif
188 if (cifs_sb->local_nls)
189 unload_nls(cifs_sb->local_nls);
190 kfree(cifs_sb);
191 }
192 return rc;
193 }
194
195 static void
196 cifs_put_super(struct super_block *sb)
197 {
198 int rc = 0;
199 struct cifs_sb_info *cifs_sb;
200
201 cFYI(1, ("In cifs_put_super"));
202 cifs_sb = CIFS_SB(sb);
203 if (cifs_sb == NULL) {
204 cFYI(1, ("Empty cifs superblock info passed to unmount"));
205 return;
206 }
207 rc = cifs_umount(sb, cifs_sb);
208 if (rc)
209 cERROR(1, ("cifs_umount failed with return code %d", rc));
210 #ifdef CONFIG_CIFS_DFS_UPCALL
211 if (cifs_sb->mountdata) {
212 kfree(cifs_sb->mountdata);
213 cifs_sb->mountdata = NULL;
214 }
215 #endif
216
217 unload_nls(cifs_sb->local_nls);
218 kfree(cifs_sb);
219 return;
220 }
221
222 static int
223 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
224 {
225 struct super_block *sb = dentry->d_sb;
226 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
227 struct cifsTconInfo *tcon = cifs_sb->tcon;
228 int rc = -EOPNOTSUPP;
229 int xid;
230
231 xid = GetXid();
232
233 buf->f_type = CIFS_MAGIC_NUMBER;
234
235 /*
236 * PATH_MAX may be too long - it would presumably be total path,
237 * but note that some servers (includinng Samba 3) have a shorter
238 * maximum path.
239 *
240 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
241 */
242 buf->f_namelen = PATH_MAX;
243 buf->f_files = 0; /* undefined */
244 buf->f_ffree = 0; /* unlimited */
245
246 /*
247 * We could add a second check for a QFS Unix capability bit
248 */
249 if ((tcon->ses->capabilities & CAP_UNIX) &&
250 (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
251 rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
252
253 /*
254 * Only need to call the old QFSInfo if failed on newer one,
255 * e.g. by OS/2.
256 **/
257 if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
258 rc = CIFSSMBQFSInfo(xid, tcon, buf);
259
260 /*
261 * Some old Windows servers also do not support level 103, retry with
262 * older level one if old server failed the previous call or we
263 * bypassed it because we detected that this was an older LANMAN sess
264 */
265 if (rc)
266 rc = SMBOldQFSInfo(xid, tcon, buf);
267
268 FreeXid(xid);
269 return 0;
270 }
271
272 static int cifs_permission(struct inode *inode, int mask)
273 {
274 struct cifs_sb_info *cifs_sb;
275
276 cifs_sb = CIFS_SB(inode->i_sb);
277
278 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
279 if ((mask & MAY_EXEC) && !execute_ok(inode))
280 return -EACCES;
281 else
282 return 0;
283 } else /* file mode might have been restricted at mount time
284 on the client (above and beyond ACL on servers) for
285 servers which do not support setting and viewing mode bits,
286 so allowing client to check permissions is useful */
287 return generic_permission(inode, mask, NULL);
288 }
289
290 static struct kmem_cache *cifs_inode_cachep;
291 static struct kmem_cache *cifs_req_cachep;
292 static struct kmem_cache *cifs_mid_cachep;
293 struct kmem_cache *cifs_oplock_cachep;
294 static struct kmem_cache *cifs_sm_req_cachep;
295 mempool_t *cifs_sm_req_poolp;
296 mempool_t *cifs_req_poolp;
297 mempool_t *cifs_mid_poolp;
298
299 static struct inode *
300 cifs_alloc_inode(struct super_block *sb)
301 {
302 struct cifsInodeInfo *cifs_inode;
303 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
304 if (!cifs_inode)
305 return NULL;
306 cifs_inode->cifsAttrs = 0x20; /* default */
307 atomic_set(&cifs_inode->inUse, 0);
308 cifs_inode->time = 0;
309 cifs_inode->write_behind_rc = 0;
310 /* Until the file is open and we have gotten oplock
311 info back from the server, can not assume caching of
312 file data or metadata */
313 cifs_inode->clientCanCacheRead = false;
314 cifs_inode->clientCanCacheAll = false;
315 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
316
317 /* Can not set i_flags here - they get immediately overwritten
318 to zero by the VFS */
319 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
320 INIT_LIST_HEAD(&cifs_inode->openFileList);
321 return &cifs_inode->vfs_inode;
322 }
323
324 static void
325 cifs_destroy_inode(struct inode *inode)
326 {
327 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
328 }
329
330 /*
331 * cifs_show_options() is for displaying mount options in /proc/mounts.
332 * Not all settable options are displayed but most of the important
333 * ones are.
334 */
335 static int
336 cifs_show_options(struct seq_file *s, struct vfsmount *m)
337 {
338 struct cifs_sb_info *cifs_sb;
339
340 cifs_sb = CIFS_SB(m->mnt_sb);
341
342 if (cifs_sb) {
343 if (cifs_sb->tcon) {
344 /* BB add prepath to mount options displayed */
345 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
346 if (cifs_sb->tcon->ses) {
347 if (cifs_sb->tcon->ses->userName)
348 seq_printf(s, ",username=%s",
349 cifs_sb->tcon->ses->userName);
350 if (cifs_sb->tcon->ses->domainName)
351 seq_printf(s, ",domain=%s",
352 cifs_sb->tcon->ses->domainName);
353 }
354 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
355 !(cifs_sb->tcon->unix_ext))
356 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
357 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
358 !(cifs_sb->tcon->unix_ext))
359 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
360 if (!cifs_sb->tcon->unix_ext) {
361 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
362 cifs_sb->mnt_file_mode,
363 cifs_sb->mnt_dir_mode);
364 }
365 if (cifs_sb->tcon->seal)
366 seq_printf(s, ",seal");
367 if (cifs_sb->tcon->nocase)
368 seq_printf(s, ",nocase");
369 if (cifs_sb->tcon->retry)
370 seq_printf(s, ",hard");
371 }
372 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
373 seq_printf(s, ",posixpaths");
374 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
375 seq_printf(s, ",setuids");
376 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
377 seq_printf(s, ",serverino");
378 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
379 seq_printf(s, ",directio");
380 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
381 seq_printf(s, ",nouser_xattr");
382 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
383 seq_printf(s, ",mapchars");
384 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
385 seq_printf(s, ",sfu");
386 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
387 seq_printf(s, ",nobrl");
388 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
389 seq_printf(s, ",cifsacl");
390 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
391 seq_printf(s, ",dynperm");
392 if (m->mnt_sb->s_flags & MS_POSIXACL)
393 seq_printf(s, ",acl");
394
395 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
396 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
397 }
398 return 0;
399 }
400
401 #ifdef CONFIG_CIFS_QUOTA
402 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
403 struct fs_disk_quota *pdquota)
404 {
405 int xid;
406 int rc = 0;
407 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
408 struct cifsTconInfo *pTcon;
409
410 if (cifs_sb)
411 pTcon = cifs_sb->tcon;
412 else
413 return -EIO;
414
415
416 xid = GetXid();
417 if (pTcon) {
418 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
419 } else {
420 rc = -EIO;
421 }
422
423 FreeXid(xid);
424 return rc;
425 }
426
427 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
428 struct fs_disk_quota *pdquota)
429 {
430 int xid;
431 int rc = 0;
432 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
433 struct cifsTconInfo *pTcon;
434
435 if (cifs_sb)
436 pTcon = cifs_sb->tcon;
437 else
438 return -EIO;
439
440 xid = GetXid();
441 if (pTcon) {
442 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
443 } else {
444 rc = -EIO;
445 }
446
447 FreeXid(xid);
448 return rc;
449 }
450
451 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
452 {
453 int xid;
454 int rc = 0;
455 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
456 struct cifsTconInfo *pTcon;
457
458 if (cifs_sb)
459 pTcon = cifs_sb->tcon;
460 else
461 return -EIO;
462
463 xid = GetXid();
464 if (pTcon) {
465 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
466 } else {
467 rc = -EIO;
468 }
469
470 FreeXid(xid);
471 return rc;
472 }
473
474 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
475 {
476 int xid;
477 int rc = 0;
478 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
479 struct cifsTconInfo *pTcon;
480
481 if (cifs_sb) {
482 pTcon = cifs_sb->tcon;
483 } else {
484 return -EIO;
485 }
486 xid = GetXid();
487 if (pTcon) {
488 cFYI(1, ("pqstats %p", qstats));
489 } else {
490 rc = -EIO;
491 }
492
493 FreeXid(xid);
494 return rc;
495 }
496
497 static struct quotactl_ops cifs_quotactl_ops = {
498 .set_xquota = cifs_xquota_set,
499 .get_xquota = cifs_xquota_get,
500 .set_xstate = cifs_xstate_set,
501 .get_xstate = cifs_xstate_get,
502 };
503 #endif
504
505 static void cifs_umount_begin(struct super_block *sb)
506 {
507 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
508 struct cifsTconInfo *tcon;
509
510 if (cifs_sb == NULL)
511 return;
512
513 tcon = cifs_sb->tcon;
514 if (tcon == NULL)
515 return;
516 down(&tcon->tconSem);
517 if (atomic_read(&tcon->useCount) == 1)
518 tcon->tidStatus = CifsExiting;
519 up(&tcon->tconSem);
520
521 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
522 /* cancel_notify_requests(tcon); */
523 if (tcon->ses && tcon->ses->server) {
524 cFYI(1, ("wake up tasks now - umount begin not complete"));
525 wake_up_all(&tcon->ses->server->request_q);
526 wake_up_all(&tcon->ses->server->response_q);
527 msleep(1); /* yield */
528 /* we have to kick the requests once more */
529 wake_up_all(&tcon->ses->server->response_q);
530 msleep(1);
531 }
532 /* BB FIXME - finish add checks for tidStatus BB */
533
534 return;
535 }
536
537 #ifdef CONFIG_CIFS_STATS2
538 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
539 {
540 /* BB FIXME */
541 return 0;
542 }
543 #endif
544
545 static int cifs_remount(struct super_block *sb, int *flags, char *data)
546 {
547 *flags |= MS_NODIRATIME;
548 return 0;
549 }
550
551 static const struct super_operations cifs_super_ops = {
552 .put_super = cifs_put_super,
553 .statfs = cifs_statfs,
554 .alloc_inode = cifs_alloc_inode,
555 .destroy_inode = cifs_destroy_inode,
556 /* .drop_inode = generic_delete_inode,
557 .delete_inode = cifs_delete_inode, */ /* Do not need above two
558 functions unless later we add lazy close of inodes or unless the
559 kernel forgets to call us with the same number of releases (closes)
560 as opens */
561 .show_options = cifs_show_options,
562 .umount_begin = cifs_umount_begin,
563 .remount_fs = cifs_remount,
564 #ifdef CONFIG_CIFS_STATS2
565 .show_stats = cifs_show_stats,
566 #endif
567 };
568
569 static int
570 cifs_get_sb(struct file_system_type *fs_type,
571 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
572 {
573 int rc;
574 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
575
576 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
577
578 if (IS_ERR(sb))
579 return PTR_ERR(sb);
580
581 sb->s_flags = flags;
582
583 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
584 if (rc) {
585 up_write(&sb->s_umount);
586 deactivate_super(sb);
587 return rc;
588 }
589 sb->s_flags |= MS_ACTIVE;
590 return simple_set_mnt(mnt, sb);
591 }
592
593 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
594 unsigned long nr_segs, loff_t pos)
595 {
596 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
597 ssize_t written;
598
599 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
600 if (!CIFS_I(inode)->clientCanCacheAll)
601 filemap_fdatawrite(inode->i_mapping);
602 return written;
603 }
604
605 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
606 {
607 /* origin == SEEK_END => we must revalidate the cached file length */
608 if (origin == SEEK_END) {
609 int retval;
610
611 /* some applications poll for the file length in this strange
612 way so we must seek to end on non-oplocked files by
613 setting the revalidate time to zero */
614 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
615
616 retval = cifs_revalidate(file->f_path.dentry);
617 if (retval < 0)
618 return (loff_t)retval;
619 }
620 return generic_file_llseek_unlocked(file, offset, origin);
621 }
622
623 struct file_system_type cifs_fs_type = {
624 .owner = THIS_MODULE,
625 .name = "cifs",
626 .get_sb = cifs_get_sb,
627 .kill_sb = kill_anon_super,
628 /* .fs_flags */
629 };
630 const struct inode_operations cifs_dir_inode_ops = {
631 .create = cifs_create,
632 .lookup = cifs_lookup,
633 .getattr = cifs_getattr,
634 .unlink = cifs_unlink,
635 .link = cifs_hardlink,
636 .mkdir = cifs_mkdir,
637 .rmdir = cifs_rmdir,
638 .rename = cifs_rename,
639 .permission = cifs_permission,
640 /* revalidate:cifs_revalidate, */
641 .setattr = cifs_setattr,
642 .symlink = cifs_symlink,
643 .mknod = cifs_mknod,
644 #ifdef CONFIG_CIFS_XATTR
645 .setxattr = cifs_setxattr,
646 .getxattr = cifs_getxattr,
647 .listxattr = cifs_listxattr,
648 .removexattr = cifs_removexattr,
649 #endif
650 };
651
652 const struct inode_operations cifs_file_inode_ops = {
653 /* revalidate:cifs_revalidate, */
654 .setattr = cifs_setattr,
655 .getattr = cifs_getattr, /* do we need this anymore? */
656 .rename = cifs_rename,
657 .permission = cifs_permission,
658 #ifdef CONFIG_CIFS_XATTR
659 .setxattr = cifs_setxattr,
660 .getxattr = cifs_getxattr,
661 .listxattr = cifs_listxattr,
662 .removexattr = cifs_removexattr,
663 #endif
664 };
665
666 const struct inode_operations cifs_symlink_inode_ops = {
667 .readlink = generic_readlink,
668 .follow_link = cifs_follow_link,
669 .put_link = cifs_put_link,
670 .permission = cifs_permission,
671 /* BB add the following two eventually */
672 /* revalidate: cifs_revalidate,
673 setattr: cifs_notify_change, *//* BB do we need notify change */
674 #ifdef CONFIG_CIFS_XATTR
675 .setxattr = cifs_setxattr,
676 .getxattr = cifs_getxattr,
677 .listxattr = cifs_listxattr,
678 .removexattr = cifs_removexattr,
679 #endif
680 };
681
682 const struct file_operations cifs_file_ops = {
683 .read = do_sync_read,
684 .write = do_sync_write,
685 .aio_read = generic_file_aio_read,
686 .aio_write = cifs_file_aio_write,
687 .open = cifs_open,
688 .release = cifs_close,
689 .lock = cifs_lock,
690 .fsync = cifs_fsync,
691 .flush = cifs_flush,
692 .mmap = cifs_file_mmap,
693 .splice_read = generic_file_splice_read,
694 .llseek = cifs_llseek,
695 #ifdef CONFIG_CIFS_POSIX
696 .unlocked_ioctl = cifs_ioctl,
697 #endif /* CONFIG_CIFS_POSIX */
698
699 #ifdef CONFIG_CIFS_EXPERIMENTAL
700 .dir_notify = cifs_dir_notify,
701 #endif /* CONFIG_CIFS_EXPERIMENTAL */
702 };
703
704 const struct file_operations cifs_file_direct_ops = {
705 /* no mmap, no aio, no readv -
706 BB reevaluate whether they can be done with directio, no cache */
707 .read = cifs_user_read,
708 .write = cifs_user_write,
709 .open = cifs_open,
710 .release = cifs_close,
711 .lock = cifs_lock,
712 .fsync = cifs_fsync,
713 .flush = cifs_flush,
714 .splice_read = generic_file_splice_read,
715 #ifdef CONFIG_CIFS_POSIX
716 .unlocked_ioctl = cifs_ioctl,
717 #endif /* CONFIG_CIFS_POSIX */
718 .llseek = cifs_llseek,
719 #ifdef CONFIG_CIFS_EXPERIMENTAL
720 .dir_notify = cifs_dir_notify,
721 #endif /* CONFIG_CIFS_EXPERIMENTAL */
722 };
723 const struct file_operations cifs_file_nobrl_ops = {
724 .read = do_sync_read,
725 .write = do_sync_write,
726 .aio_read = generic_file_aio_read,
727 .aio_write = cifs_file_aio_write,
728 .open = cifs_open,
729 .release = cifs_close,
730 .fsync = cifs_fsync,
731 .flush = cifs_flush,
732 .mmap = cifs_file_mmap,
733 .splice_read = generic_file_splice_read,
734 .llseek = cifs_llseek,
735 #ifdef CONFIG_CIFS_POSIX
736 .unlocked_ioctl = cifs_ioctl,
737 #endif /* CONFIG_CIFS_POSIX */
738
739 #ifdef CONFIG_CIFS_EXPERIMENTAL
740 .dir_notify = cifs_dir_notify,
741 #endif /* CONFIG_CIFS_EXPERIMENTAL */
742 };
743
744 const struct file_operations cifs_file_direct_nobrl_ops = {
745 /* no mmap, no aio, no readv -
746 BB reevaluate whether they can be done with directio, no cache */
747 .read = cifs_user_read,
748 .write = cifs_user_write,
749 .open = cifs_open,
750 .release = cifs_close,
751 .fsync = cifs_fsync,
752 .flush = cifs_flush,
753 .splice_read = generic_file_splice_read,
754 #ifdef CONFIG_CIFS_POSIX
755 .unlocked_ioctl = cifs_ioctl,
756 #endif /* CONFIG_CIFS_POSIX */
757 .llseek = cifs_llseek,
758 #ifdef CONFIG_CIFS_EXPERIMENTAL
759 .dir_notify = cifs_dir_notify,
760 #endif /* CONFIG_CIFS_EXPERIMENTAL */
761 };
762
763 const struct file_operations cifs_dir_ops = {
764 .readdir = cifs_readdir,
765 .release = cifs_closedir,
766 .read = generic_read_dir,
767 #ifdef CONFIG_CIFS_EXPERIMENTAL
768 .dir_notify = cifs_dir_notify,
769 #endif /* CONFIG_CIFS_EXPERIMENTAL */
770 .unlocked_ioctl = cifs_ioctl,
771 .llseek = generic_file_llseek,
772 };
773
774 static void
775 cifs_init_once(void *inode)
776 {
777 struct cifsInodeInfo *cifsi = inode;
778
779 inode_init_once(&cifsi->vfs_inode);
780 INIT_LIST_HEAD(&cifsi->lockList);
781 }
782
783 static int
784 cifs_init_inodecache(void)
785 {
786 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
787 sizeof(struct cifsInodeInfo),
788 0, (SLAB_RECLAIM_ACCOUNT|
789 SLAB_MEM_SPREAD),
790 cifs_init_once);
791 if (cifs_inode_cachep == NULL)
792 return -ENOMEM;
793
794 return 0;
795 }
796
797 static void
798 cifs_destroy_inodecache(void)
799 {
800 kmem_cache_destroy(cifs_inode_cachep);
801 }
802
803 static int
804 cifs_init_request_bufs(void)
805 {
806 if (CIFSMaxBufSize < 8192) {
807 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
808 Unicode path name has to fit in any SMB/CIFS path based frames */
809 CIFSMaxBufSize = 8192;
810 } else if (CIFSMaxBufSize > 1024*127) {
811 CIFSMaxBufSize = 1024 * 127;
812 } else {
813 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
814 }
815 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
816 cifs_req_cachep = kmem_cache_create("cifs_request",
817 CIFSMaxBufSize +
818 MAX_CIFS_HDR_SIZE, 0,
819 SLAB_HWCACHE_ALIGN, NULL);
820 if (cifs_req_cachep == NULL)
821 return -ENOMEM;
822
823 if (cifs_min_rcv < 1)
824 cifs_min_rcv = 1;
825 else if (cifs_min_rcv > 64) {
826 cifs_min_rcv = 64;
827 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
828 }
829
830 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
831 cifs_req_cachep);
832
833 if (cifs_req_poolp == NULL) {
834 kmem_cache_destroy(cifs_req_cachep);
835 return -ENOMEM;
836 }
837 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
838 almost all handle based requests (but not write response, nor is it
839 sufficient for path based requests). A smaller size would have
840 been more efficient (compacting multiple slab items on one 4k page)
841 for the case in which debug was on, but this larger size allows
842 more SMBs to use small buffer alloc and is still much more
843 efficient to alloc 1 per page off the slab compared to 17K (5page)
844 alloc of large cifs buffers even when page debugging is on */
845 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
846 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
847 NULL);
848 if (cifs_sm_req_cachep == NULL) {
849 mempool_destroy(cifs_req_poolp);
850 kmem_cache_destroy(cifs_req_cachep);
851 return -ENOMEM;
852 }
853
854 if (cifs_min_small < 2)
855 cifs_min_small = 2;
856 else if (cifs_min_small > 256) {
857 cifs_min_small = 256;
858 cFYI(1, ("cifs_min_small set to maximum (256)"));
859 }
860
861 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
862 cifs_sm_req_cachep);
863
864 if (cifs_sm_req_poolp == NULL) {
865 mempool_destroy(cifs_req_poolp);
866 kmem_cache_destroy(cifs_req_cachep);
867 kmem_cache_destroy(cifs_sm_req_cachep);
868 return -ENOMEM;
869 }
870
871 return 0;
872 }
873
874 static void
875 cifs_destroy_request_bufs(void)
876 {
877 mempool_destroy(cifs_req_poolp);
878 kmem_cache_destroy(cifs_req_cachep);
879 mempool_destroy(cifs_sm_req_poolp);
880 kmem_cache_destroy(cifs_sm_req_cachep);
881 }
882
883 static int
884 cifs_init_mids(void)
885 {
886 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
887 sizeof(struct mid_q_entry), 0,
888 SLAB_HWCACHE_ALIGN, NULL);
889 if (cifs_mid_cachep == NULL)
890 return -ENOMEM;
891
892 /* 3 is a reasonable minimum number of simultaneous operations */
893 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
894 if (cifs_mid_poolp == NULL) {
895 kmem_cache_destroy(cifs_mid_cachep);
896 return -ENOMEM;
897 }
898
899 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
900 sizeof(struct oplock_q_entry), 0,
901 SLAB_HWCACHE_ALIGN, NULL);
902 if (cifs_oplock_cachep == NULL) {
903 mempool_destroy(cifs_mid_poolp);
904 kmem_cache_destroy(cifs_mid_cachep);
905 return -ENOMEM;
906 }
907
908 return 0;
909 }
910
911 static void
912 cifs_destroy_mids(void)
913 {
914 mempool_destroy(cifs_mid_poolp);
915 kmem_cache_destroy(cifs_mid_cachep);
916 kmem_cache_destroy(cifs_oplock_cachep);
917 }
918
919 static int cifs_oplock_thread(void *dummyarg)
920 {
921 struct oplock_q_entry *oplock_item;
922 struct cifsTconInfo *pTcon;
923 struct inode *inode;
924 __u16 netfid;
925 int rc, waitrc = 0;
926
927 set_freezable();
928 do {
929 if (try_to_freeze())
930 continue;
931
932 spin_lock(&GlobalMid_Lock);
933 if (list_empty(&GlobalOplock_Q)) {
934 spin_unlock(&GlobalMid_Lock);
935 set_current_state(TASK_INTERRUPTIBLE);
936 schedule_timeout(39*HZ);
937 } else {
938 oplock_item = list_entry(GlobalOplock_Q.next,
939 struct oplock_q_entry, qhead);
940 cFYI(1, ("found oplock item to write out"));
941 pTcon = oplock_item->tcon;
942 inode = oplock_item->pinode;
943 netfid = oplock_item->netfid;
944 spin_unlock(&GlobalMid_Lock);
945 DeleteOplockQEntry(oplock_item);
946 /* can not grab inode sem here since it would
947 deadlock when oplock received on delete
948 since vfs_unlink holds the i_mutex across
949 the call */
950 /* mutex_lock(&inode->i_mutex);*/
951 if (S_ISREG(inode->i_mode)) {
952 rc = filemap_fdatawrite(inode->i_mapping);
953 if (CIFS_I(inode)->clientCanCacheRead == 0) {
954 waitrc = filemap_fdatawait(
955 inode->i_mapping);
956 invalidate_remote_inode(inode);
957 }
958 if (rc == 0)
959 rc = waitrc;
960 } else
961 rc = 0;
962 /* mutex_unlock(&inode->i_mutex);*/
963 if (rc)
964 CIFS_I(inode)->write_behind_rc = rc;
965 cFYI(1, ("Oplock flush inode %p rc %d",
966 inode, rc));
967
968 /* releasing stale oplock after recent reconnect
969 of smb session using a now incorrect file
970 handle is not a data integrity issue but do
971 not bother sending an oplock release if session
972 to server still is disconnected since oplock
973 already released by the server in that case */
974 if (pTcon->tidStatus != CifsNeedReconnect) {
975 rc = CIFSSMBLock(0, pTcon, netfid,
976 0 /* len */ , 0 /* offset */, 0,
977 0, LOCKING_ANDX_OPLOCK_RELEASE,
978 false /* wait flag */);
979 cFYI(1, ("Oplock release rc = %d", rc));
980 }
981 set_current_state(TASK_INTERRUPTIBLE);
982 schedule_timeout(1); /* yield in case q were corrupt */
983 }
984 } while (!kthread_should_stop());
985
986 return 0;
987 }
988
989 static int cifs_dnotify_thread(void *dummyarg)
990 {
991 struct list_head *tmp;
992 struct cifsSesInfo *ses;
993
994 do {
995 if (try_to_freeze())
996 continue;
997 set_current_state(TASK_INTERRUPTIBLE);
998 schedule_timeout(15*HZ);
999 read_lock(&GlobalSMBSeslock);
1000 /* check if any stuck requests that need
1001 to be woken up and wakeq so the
1002 thread can wake up and error out */
1003 list_for_each(tmp, &GlobalSMBSessionList) {
1004 ses = list_entry(tmp, struct cifsSesInfo,
1005 cifsSessionList);
1006 if (ses->server && atomic_read(&ses->server->inFlight))
1007 wake_up_all(&ses->server->response_q);
1008 }
1009 read_unlock(&GlobalSMBSeslock);
1010 } while (!kthread_should_stop());
1011
1012 return 0;
1013 }
1014
1015 static int __init
1016 init_cifs(void)
1017 {
1018 int rc = 0;
1019 cifs_proc_init();
1020 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
1021 INIT_LIST_HEAD(&GlobalSMBSessionList);
1022 INIT_LIST_HEAD(&GlobalTreeConnectionList);
1023 INIT_LIST_HEAD(&GlobalOplock_Q);
1024 #ifdef CONFIG_CIFS_EXPERIMENTAL
1025 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1026 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1027 #endif
1028 /*
1029 * Initialize Global counters
1030 */
1031 atomic_set(&sesInfoAllocCount, 0);
1032 atomic_set(&tconInfoAllocCount, 0);
1033 atomic_set(&tcpSesAllocCount, 0);
1034 atomic_set(&tcpSesReconnectCount, 0);
1035 atomic_set(&tconInfoReconnectCount, 0);
1036
1037 atomic_set(&bufAllocCount, 0);
1038 atomic_set(&smBufAllocCount, 0);
1039 #ifdef CONFIG_CIFS_STATS2
1040 atomic_set(&totBufAllocCount, 0);
1041 atomic_set(&totSmBufAllocCount, 0);
1042 #endif /* CONFIG_CIFS_STATS2 */
1043
1044 atomic_set(&midCount, 0);
1045 GlobalCurrentXid = 0;
1046 GlobalTotalActiveXid = 0;
1047 GlobalMaxActiveXid = 0;
1048 memset(Local_System_Name, 0, 15);
1049 rwlock_init(&GlobalSMBSeslock);
1050 spin_lock_init(&GlobalMid_Lock);
1051
1052 if (cifs_max_pending < 2) {
1053 cifs_max_pending = 2;
1054 cFYI(1, ("cifs_max_pending set to min of 2"));
1055 } else if (cifs_max_pending > 256) {
1056 cifs_max_pending = 256;
1057 cFYI(1, ("cifs_max_pending set to max of 256"));
1058 }
1059
1060 rc = cifs_init_inodecache();
1061 if (rc)
1062 goto out_clean_proc;
1063
1064 rc = cifs_init_mids();
1065 if (rc)
1066 goto out_destroy_inodecache;
1067
1068 rc = cifs_init_request_bufs();
1069 if (rc)
1070 goto out_destroy_mids;
1071
1072 rc = register_filesystem(&cifs_fs_type);
1073 if (rc)
1074 goto out_destroy_request_bufs;
1075 #ifdef CONFIG_CIFS_UPCALL
1076 rc = register_key_type(&cifs_spnego_key_type);
1077 if (rc)
1078 goto out_unregister_filesystem;
1079 #endif
1080 #ifdef CONFIG_CIFS_DFS_UPCALL
1081 rc = register_key_type(&key_type_dns_resolver);
1082 if (rc)
1083 goto out_unregister_key_type;
1084 #endif
1085 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1086 if (IS_ERR(oplockThread)) {
1087 rc = PTR_ERR(oplockThread);
1088 cERROR(1, ("error %d create oplock thread", rc));
1089 goto out_unregister_dfs_key_type;
1090 }
1091
1092 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1093 if (IS_ERR(dnotifyThread)) {
1094 rc = PTR_ERR(dnotifyThread);
1095 cERROR(1, ("error %d create dnotify thread", rc));
1096 goto out_stop_oplock_thread;
1097 }
1098
1099 return 0;
1100
1101 out_stop_oplock_thread:
1102 kthread_stop(oplockThread);
1103 out_unregister_dfs_key_type:
1104 #ifdef CONFIG_CIFS_DFS_UPCALL
1105 unregister_key_type(&key_type_dns_resolver);
1106 out_unregister_key_type:
1107 #endif
1108 #ifdef CONFIG_CIFS_UPCALL
1109 unregister_key_type(&cifs_spnego_key_type);
1110 out_unregister_filesystem:
1111 #endif
1112 unregister_filesystem(&cifs_fs_type);
1113 out_destroy_request_bufs:
1114 cifs_destroy_request_bufs();
1115 out_destroy_mids:
1116 cifs_destroy_mids();
1117 out_destroy_inodecache:
1118 cifs_destroy_inodecache();
1119 out_clean_proc:
1120 cifs_proc_clean();
1121 return rc;
1122 }
1123
1124 static void __exit
1125 exit_cifs(void)
1126 {
1127 cFYI(DBG2, ("exit_cifs"));
1128 cifs_proc_clean();
1129 #ifdef CONFIG_CIFS_DFS_UPCALL
1130 cifs_dfs_release_automount_timer();
1131 unregister_key_type(&key_type_dns_resolver);
1132 #endif
1133 #ifdef CONFIG_CIFS_UPCALL
1134 unregister_key_type(&cifs_spnego_key_type);
1135 #endif
1136 unregister_filesystem(&cifs_fs_type);
1137 cifs_destroy_inodecache();
1138 cifs_destroy_mids();
1139 cifs_destroy_request_bufs();
1140 kthread_stop(oplockThread);
1141 kthread_stop(dnotifyThread);
1142 }
1143
1144 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1145 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1146 MODULE_DESCRIPTION
1147 ("VFS to access servers complying with the SNIA CIFS Specification "
1148 "e.g. Samba and Windows");
1149 MODULE_VERSION(CIFS_VERSION);
1150 module_init(init_cifs)
1151 module_exit(exit_cifs)