Merge tag 'v3.10.73' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / fuse / dev.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10 #include "fuse.h"
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/poll.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/aio.h>
24 #include <linux/freezer.h>
25
26 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
27 MODULE_ALIAS("devname:fuse");
28
29 static struct kmem_cache *fuse_req_cachep;
30
31 static struct fuse_conn *fuse_get_conn(struct file *file)
32 {
33 /*
34 * Lockless access is OK, because file->private data is set
35 * once during mount and is valid until the file is released.
36 */
37 return file->private_data;
38 }
39
40 static void fuse_request_init(struct fuse_req *req, struct page **pages,
41 struct fuse_page_desc *page_descs,
42 unsigned npages)
43 {
44 memset(req, 0, sizeof(*req));
45 memset(pages, 0, sizeof(*pages) * npages);
46 memset(page_descs, 0, sizeof(*page_descs) * npages);
47 INIT_LIST_HEAD(&req->list);
48 INIT_LIST_HEAD(&req->intr_entry);
49 init_waitqueue_head(&req->waitq);
50 atomic_set(&req->count, 1);
51 req->pages = pages;
52 req->page_descs = page_descs;
53 req->max_pages = npages;
54 }
55
56 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
57 {
58 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
59 if (req) {
60 struct page **pages;
61 struct fuse_page_desc *page_descs;
62
63 if (npages <= FUSE_REQ_INLINE_PAGES) {
64 pages = req->inline_pages;
65 page_descs = req->inline_page_descs;
66 } else {
67 pages = kmalloc(sizeof(struct page *) * npages, flags);
68 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
69 npages, flags);
70 }
71
72 if (!pages || !page_descs) {
73 kfree(pages);
74 kfree(page_descs);
75 kmem_cache_free(fuse_req_cachep, req);
76 return NULL;
77 }
78
79 fuse_request_init(req, pages, page_descs, npages);
80 }
81 return req;
82 }
83
84 struct fuse_req *fuse_request_alloc(unsigned npages)
85 {
86 return __fuse_request_alloc(npages, GFP_KERNEL);
87 }
88 EXPORT_SYMBOL_GPL(fuse_request_alloc);
89
90 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
91 {
92 return __fuse_request_alloc(npages, GFP_NOFS);
93 }
94
95 void fuse_request_free(struct fuse_req *req)
96 {
97 if (req->pages != req->inline_pages) {
98 kfree(req->pages);
99 kfree(req->page_descs);
100 }
101 kmem_cache_free(fuse_req_cachep, req);
102 }
103
104 static void block_sigs(sigset_t *oldset)
105 {
106 sigset_t mask;
107
108 siginitsetinv(&mask, sigmask(SIGKILL));
109 sigprocmask(SIG_BLOCK, &mask, oldset);
110 }
111
112 static void restore_sigs(sigset_t *oldset)
113 {
114 sigprocmask(SIG_SETMASK, oldset, NULL);
115 }
116
117 void __fuse_get_request(struct fuse_req *req)
118 {
119 atomic_inc(&req->count);
120 }
121
122 /* Must be called with > 1 refcount */
123 static void __fuse_put_request(struct fuse_req *req)
124 {
125 BUG_ON(atomic_read(&req->count) < 2);
126 atomic_dec(&req->count);
127 }
128
129 static void fuse_req_init_context(struct fuse_req *req)
130 {
131 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
132 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
133 req->in.h.pid = current->pid;
134 }
135
136 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
137 {
138 return !fc->initialized || (for_background && fc->blocked);
139 }
140
141 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
142 bool for_background)
143 {
144 struct fuse_req *req;
145 int err;
146 atomic_inc(&fc->num_waiting);
147
148 if (fuse_block_alloc(fc, for_background)) {
149 sigset_t oldset;
150 int intr;
151
152 block_sigs(&oldset);
153 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
154 !fuse_block_alloc(fc, for_background));
155 restore_sigs(&oldset);
156 err = -EINTR;
157 if (intr)
158 goto out;
159 }
160
161 err = -ENOTCONN;
162 if (!fc->connected)
163 goto out;
164
165 req = fuse_request_alloc(npages);
166 err = -ENOMEM;
167 if (!req) {
168 if (for_background)
169 wake_up(&fc->blocked_waitq);
170 goto out;
171 }
172
173 fuse_req_init_context(req);
174 req->waiting = 1;
175 req->background = for_background;
176 return req;
177
178 out:
179 atomic_dec(&fc->num_waiting);
180 return ERR_PTR(err);
181 }
182
183 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
184 {
185 return __fuse_get_req(fc, npages, false);
186 }
187 EXPORT_SYMBOL_GPL(fuse_get_req);
188
189 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
190 unsigned npages)
191 {
192 return __fuse_get_req(fc, npages, true);
193 }
194 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
195
196 /*
197 * Return request in fuse_file->reserved_req. However that may
198 * currently be in use. If that is the case, wait for it to become
199 * available.
200 */
201 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
202 struct file *file)
203 {
204 struct fuse_req *req = NULL;
205 struct fuse_file *ff = file->private_data;
206
207 do {
208 wait_event(fc->reserved_req_waitq, ff->reserved_req);
209 spin_lock(&fc->lock);
210 if (ff->reserved_req) {
211 req = ff->reserved_req;
212 ff->reserved_req = NULL;
213 req->stolen_file = get_file(file);
214 }
215 spin_unlock(&fc->lock);
216 } while (!req);
217
218 return req;
219 }
220
221 /*
222 * Put stolen request back into fuse_file->reserved_req
223 */
224 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
225 {
226 struct file *file = req->stolen_file;
227 struct fuse_file *ff = file->private_data;
228
229 spin_lock(&fc->lock);
230 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
231 BUG_ON(ff->reserved_req);
232 ff->reserved_req = req;
233 wake_up_all(&fc->reserved_req_waitq);
234 spin_unlock(&fc->lock);
235 fput(file);
236 }
237
238 /*
239 * Gets a requests for a file operation, always succeeds
240 *
241 * This is used for sending the FLUSH request, which must get to
242 * userspace, due to POSIX locks which may need to be unlocked.
243 *
244 * If allocation fails due to OOM, use the reserved request in
245 * fuse_file.
246 *
247 * This is very unlikely to deadlock accidentally, since the
248 * filesystem should not have it's own file open. If deadlock is
249 * intentional, it can still be broken by "aborting" the filesystem.
250 */
251 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
252 struct file *file)
253 {
254 struct fuse_req *req;
255
256 atomic_inc(&fc->num_waiting);
257 wait_event(fc->blocked_waitq, fc->initialized);
258 req = fuse_request_alloc(0);
259 if (!req)
260 req = get_reserved_req(fc, file);
261
262 fuse_req_init_context(req);
263 req->waiting = 1;
264 req->background = 0;
265 return req;
266 }
267
268 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
269 {
270 if (atomic_dec_and_test(&req->count)) {
271 if (unlikely(req->background)) {
272 /*
273 * We get here in the unlikely case that a background
274 * request was allocated but not sent
275 */
276 spin_lock(&fc->lock);
277 if (!fc->blocked)
278 wake_up(&fc->blocked_waitq);
279 spin_unlock(&fc->lock);
280 }
281
282 if (req->waiting)
283 atomic_dec(&fc->num_waiting);
284
285 if (req->stolen_file)
286 put_reserved_req(fc, req);
287 else
288 fuse_request_free(req);
289 }
290 }
291 EXPORT_SYMBOL_GPL(fuse_put_request);
292
293 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
294 {
295 unsigned nbytes = 0;
296 unsigned i;
297
298 for (i = 0; i < numargs; i++)
299 nbytes += args[i].size;
300
301 return nbytes;
302 }
303
304 static u64 fuse_get_unique(struct fuse_conn *fc)
305 {
306 fc->reqctr++;
307 /* zero is special */
308 if (fc->reqctr == 0)
309 fc->reqctr = 1;
310
311 return fc->reqctr;
312 }
313
314 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
315 {
316 req->in.h.len = sizeof(struct fuse_in_header) +
317 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
318 list_add_tail(&req->list, &fc->pending);
319 req->state = FUSE_REQ_PENDING;
320 if (!req->waiting) {
321 req->waiting = 1;
322 atomic_inc(&fc->num_waiting);
323 }
324 wake_up(&fc->waitq);
325 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
326 }
327
328 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
329 u64 nodeid, u64 nlookup)
330 {
331 forget->forget_one.nodeid = nodeid;
332 forget->forget_one.nlookup = nlookup;
333
334 spin_lock(&fc->lock);
335 if (fc->connected) {
336 fc->forget_list_tail->next = forget;
337 fc->forget_list_tail = forget;
338 wake_up(&fc->waitq);
339 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
340 } else {
341 kfree(forget);
342 }
343 spin_unlock(&fc->lock);
344 }
345
346 static void flush_bg_queue(struct fuse_conn *fc)
347 {
348 while (fc->active_background < fc->max_background &&
349 !list_empty(&fc->bg_queue)) {
350 struct fuse_req *req;
351
352 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
353 list_del(&req->list);
354 fc->active_background++;
355 req->in.h.unique = fuse_get_unique(fc);
356 queue_request(fc, req);
357 }
358 }
359
360 /*
361 * This function is called when a request is finished. Either a reply
362 * has arrived or it was aborted (and not yet sent) or some error
363 * occurred during communication with userspace, or the device file
364 * was closed. The requester thread is woken up (if still waiting),
365 * the 'end' callback is called if given, else the reference to the
366 * request is released
367 *
368 * Called with fc->lock, unlocks it
369 */
370 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
371 __releases(fc->lock)
372 {
373 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
374 req->end = NULL;
375 list_del(&req->list);
376 list_del(&req->intr_entry);
377 req->state = FUSE_REQ_FINISHED;
378 if (req->background) {
379 req->background = 0;
380
381 if (fc->num_background == fc->max_background)
382 fc->blocked = 0;
383
384 /* Wake up next waiter, if any */
385 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
386 wake_up(&fc->blocked_waitq);
387
388 if (fc->num_background == fc->congestion_threshold &&
389 fc->connected && fc->bdi_initialized) {
390 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
391 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
392 }
393 fc->num_background--;
394 fc->active_background--;
395 flush_bg_queue(fc);
396 }
397 spin_unlock(&fc->lock);
398 wake_up(&req->waitq);
399 if (end)
400 end(fc, req);
401 fuse_put_request(fc, req);
402 }
403
404 static void wait_answer_interruptible(struct fuse_conn *fc,
405 struct fuse_req *req)
406 __releases(fc->lock)
407 __acquires(fc->lock)
408 {
409 if (signal_pending(current))
410 return;
411
412 spin_unlock(&fc->lock);
413 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
414 spin_lock(&fc->lock);
415 }
416
417 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
418 {
419 list_add_tail(&req->intr_entry, &fc->interrupts);
420 wake_up(&fc->waitq);
421 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
422 }
423
424 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
425 __releases(fc->lock)
426 __acquires(fc->lock)
427 {
428 if (!fc->no_interrupt) {
429 /* Any signal may interrupt this */
430 wait_answer_interruptible(fc, req);
431
432 if (req->aborted)
433 goto aborted;
434 if (req->state == FUSE_REQ_FINISHED)
435 return;
436
437 req->interrupted = 1;
438 if (req->state == FUSE_REQ_SENT)
439 queue_interrupt(fc, req);
440 }
441
442 if (!req->force) {
443 sigset_t oldset;
444
445 /* Only fatal signals may interrupt this */
446 block_sigs(&oldset);
447 wait_answer_interruptible(fc, req);
448 restore_sigs(&oldset);
449
450 if (req->aborted)
451 goto aborted;
452 if (req->state == FUSE_REQ_FINISHED)
453 return;
454
455 /* Request is not yet in userspace, bail out */
456 if (req->state == FUSE_REQ_PENDING) {
457 list_del(&req->list);
458 __fuse_put_request(req);
459 req->out.h.error = -EINTR;
460 return;
461 }
462 }
463
464 /*
465 * Either request is already in userspace, or it was forced.
466 * Wait it out.
467 */
468 spin_unlock(&fc->lock);
469
470 while (req->state != FUSE_REQ_FINISHED)
471 wait_event_freezable(req->waitq,
472 req->state == FUSE_REQ_FINISHED);
473 spin_lock(&fc->lock);
474
475 if (!req->aborted)
476 return;
477
478 aborted:
479 BUG_ON(req->state != FUSE_REQ_FINISHED);
480 if (req->locked) {
481 /* This is uninterruptible sleep, because data is
482 being copied to/from the buffers of req. During
483 locked state, there mustn't be any filesystem
484 operation (e.g. page fault), since that could lead
485 to deadlock */
486 spin_unlock(&fc->lock);
487 wait_event(req->waitq, !req->locked);
488 spin_lock(&fc->lock);
489 }
490 }
491
492 #define CREATE_TRACE_POINTS
493 #include <linux/met_ftrace_fuse.h>
494
495 void met_fuse(int t_pid, char *t_name, unsigned int op, unsigned int size, struct timespec s_time, struct timespec e_time)
496 {
497 MET_FTRACE_PRINTK(met_fuse, t_pid, t_name, op, size, s_time, e_time);
498 }
499
500 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
501 {
502 BUG_ON(req->background);
503 spin_lock(&fc->lock);
504 if (!fc->connected)
505 req->out.h.error = -ENOTCONN;
506 else if (fc->conn_error)
507 req->out.h.error = -ECONNREFUSED;
508 else {
509 req->in.h.unique = fuse_get_unique(fc);
510 queue_request(fc, req);
511 /* acquire extra reference, since request is still needed
512 after request_end() */
513 __fuse_get_request(req);
514
515 request_wait_answer(fc, req);
516 }
517 spin_unlock(&fc->lock);
518 }
519
520 void fuse_request_send_ex(struct fuse_conn *fc, struct fuse_req *req,
521 __u32 size)
522 {
523 #ifdef MET_FUSEIO_TRACE
524 char name[TASK_COMM_LEN];
525 #endif
526
527 MET_FUSE_IOLOG_INIT();
528 FUSE_IOLOG_INIT();
529 req->isreply = 1;
530 FUSE_IOLOG_START();
531 MET_FUSE_IOLOG_START();
532 __fuse_request_send(fc, req);
533 MET_FUSE_IOLOG_END();
534 FUSE_IOLOG_END();
535 FUSE_IOLOG_PRINT(size, req->in.h.opcode);
536
537 #ifdef MET_FUSEIO_TRACE
538 met_fuse(task_pid_nr(current), get_task_comm(name, current), req->in.h.opcode, size, met_fuse_start_time, met_fuse_end_time);
539 #endif
540 }
541 EXPORT_SYMBOL_GPL(fuse_request_send_ex);
542
543 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
544 {
545 fuse_request_send_ex(fc, req, 0);
546 }
547 EXPORT_SYMBOL_GPL(fuse_request_send);
548
549 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
550 struct fuse_req *req)
551 {
552 BUG_ON(!req->background);
553 fc->num_background++;
554 if (fc->num_background == fc->max_background)
555 fc->blocked = 1;
556 if (fc->num_background == fc->congestion_threshold &&
557 fc->bdi_initialized) {
558 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
559 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
560 }
561 list_add_tail(&req->list, &fc->bg_queue);
562 flush_bg_queue(fc);
563 }
564
565 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
566 {
567 spin_lock(&fc->lock);
568 if (fc->connected) {
569 fuse_request_send_nowait_locked(fc, req);
570 spin_unlock(&fc->lock);
571 } else {
572 req->out.h.error = -ENOTCONN;
573 request_end(fc, req);
574 }
575 }
576
577 void fuse_request_send_background_ex(struct fuse_conn *fc, struct fuse_req *req,
578 __u32 size)
579 {
580 FUSE_IOLOG_INIT();
581 FUSE_IOLOG_START();
582 req->isreply = 1;
583 fuse_request_send_nowait(fc, req);
584 FUSE_IOLOG_END();
585 FUSE_IOLOG_PRINT(size, req->in.h.opcode);
586 }
587 EXPORT_SYMBOL_GPL(fuse_request_send_background_ex);
588
589 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
590 {
591 fuse_request_send_background_ex(fc, req, 0);
592 }
593 EXPORT_SYMBOL_GPL(fuse_request_send_background);
594
595 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
596 struct fuse_req *req, u64 unique)
597 {
598 int err = -ENODEV;
599
600 req->isreply = 0;
601 req->in.h.unique = unique;
602 spin_lock(&fc->lock);
603 if (fc->connected) {
604 queue_request(fc, req);
605 err = 0;
606 }
607 spin_unlock(&fc->lock);
608
609 return err;
610 }
611
612 /*
613 * Called under fc->lock
614 *
615 * fc->connected must have been checked previously
616 */
617 void fuse_request_send_background_locked(struct fuse_conn *fc,
618 struct fuse_req *req)
619 {
620 req->isreply = 1;
621 fuse_request_send_nowait_locked(fc, req);
622 }
623
624 void fuse_force_forget(struct file *file, u64 nodeid)
625 {
626 struct inode *inode = file_inode(file);
627 struct fuse_conn *fc = get_fuse_conn(inode);
628 struct fuse_req *req;
629 struct fuse_forget_in inarg;
630
631 memset(&inarg, 0, sizeof(inarg));
632 inarg.nlookup = 1;
633 req = fuse_get_req_nofail_nopages(fc, file);
634 req->in.h.opcode = FUSE_FORGET;
635 req->in.h.nodeid = nodeid;
636 req->in.numargs = 1;
637 req->in.args[0].size = sizeof(inarg);
638 req->in.args[0].value = &inarg;
639 req->isreply = 0;
640 __fuse_request_send(fc, req);
641 /* ignore errors */
642 fuse_put_request(fc, req);
643 }
644
645 /*
646 * Lock the request. Up to the next unlock_request() there mustn't be
647 * anything that could cause a page-fault. If the request was already
648 * aborted bail out.
649 */
650 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
651 {
652 int err = 0;
653 if (req) {
654 spin_lock(&fc->lock);
655 if (req->aborted)
656 err = -ENOENT;
657 else
658 req->locked = 1;
659 spin_unlock(&fc->lock);
660 }
661 return err;
662 }
663
664 /*
665 * Unlock request. If it was aborted during being locked, the
666 * requester thread is currently waiting for it to be unlocked, so
667 * wake it up.
668 */
669 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
670 {
671 if (req) {
672 spin_lock(&fc->lock);
673 req->locked = 0;
674 if (req->aborted)
675 wake_up(&req->waitq);
676 spin_unlock(&fc->lock);
677 }
678 }
679
680 struct fuse_copy_state {
681 struct fuse_conn *fc;
682 int write;
683 struct fuse_req *req;
684 const struct iovec *iov;
685 struct pipe_buffer *pipebufs;
686 struct pipe_buffer *currbuf;
687 struct pipe_inode_info *pipe;
688 unsigned long nr_segs;
689 unsigned long seglen;
690 unsigned long addr;
691 struct page *pg;
692 void *mapaddr;
693 void *buf;
694 unsigned len;
695 unsigned move_pages:1;
696 };
697
698 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
699 int write,
700 const struct iovec *iov, unsigned long nr_segs)
701 {
702 memset(cs, 0, sizeof(*cs));
703 cs->fc = fc;
704 cs->write = write;
705 cs->iov = iov;
706 cs->nr_segs = nr_segs;
707 }
708
709 /* Unmap and put previous page of userspace buffer */
710 static void fuse_copy_finish(struct fuse_copy_state *cs)
711 {
712 if (cs->currbuf) {
713 struct pipe_buffer *buf = cs->currbuf;
714
715 if (!cs->write) {
716 buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
717 } else {
718 kunmap(buf->page);
719 buf->len = PAGE_SIZE - cs->len;
720 }
721 cs->currbuf = NULL;
722 cs->mapaddr = NULL;
723 } else if (cs->mapaddr) {
724 kunmap(cs->pg);
725 if (cs->write) {
726 flush_dcache_page(cs->pg);
727 set_page_dirty_lock(cs->pg);
728 }
729 put_page(cs->pg);
730 cs->mapaddr = NULL;
731 }
732 }
733
734 /*
735 * Get another pagefull of userspace buffer, and map it to kernel
736 * address space, and lock request
737 */
738 static int fuse_copy_fill(struct fuse_copy_state *cs)
739 {
740 unsigned long offset;
741 int err;
742
743 unlock_request(cs->fc, cs->req);
744 fuse_copy_finish(cs);
745 if (cs->pipebufs) {
746 struct pipe_buffer *buf = cs->pipebufs;
747
748 if (!cs->write) {
749 err = buf->ops->confirm(cs->pipe, buf);
750 if (err)
751 return err;
752
753 BUG_ON(!cs->nr_segs);
754 cs->currbuf = buf;
755 cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
756 cs->len = buf->len;
757 cs->buf = cs->mapaddr + buf->offset;
758 cs->pipebufs++;
759 cs->nr_segs--;
760 } else {
761 struct page *page;
762
763 if (cs->nr_segs == cs->pipe->buffers)
764 return -EIO;
765
766 page = alloc_page(GFP_HIGHUSER);
767 if (!page)
768 return -ENOMEM;
769
770 buf->page = page;
771 buf->offset = 0;
772 buf->len = 0;
773
774 cs->currbuf = buf;
775 cs->mapaddr = kmap(page);
776 cs->buf = cs->mapaddr;
777 cs->len = PAGE_SIZE;
778 cs->pipebufs++;
779 cs->nr_segs++;
780 }
781 } else {
782 if (!cs->seglen) {
783 BUG_ON(!cs->nr_segs);
784 cs->seglen = cs->iov[0].iov_len;
785 cs->addr = (unsigned long) cs->iov[0].iov_base;
786 cs->iov++;
787 cs->nr_segs--;
788 }
789 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
790 if (err < 0)
791 return err;
792 BUG_ON(err != 1);
793 offset = cs->addr % PAGE_SIZE;
794 cs->mapaddr = kmap(cs->pg);
795 cs->buf = cs->mapaddr + offset;
796 cs->len = min(PAGE_SIZE - offset, cs->seglen);
797 cs->seglen -= cs->len;
798 cs->addr += cs->len;
799 }
800
801 return lock_request(cs->fc, cs->req);
802 }
803
804 /* Do as much copy to/from userspace buffer as we can */
805 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
806 {
807 unsigned ncpy = min(*size, cs->len);
808 if (val) {
809 if (cs->write)
810 memcpy(cs->buf, *val, ncpy);
811 else
812 memcpy(*val, cs->buf, ncpy);
813 *val += ncpy;
814 }
815 *size -= ncpy;
816 cs->len -= ncpy;
817 cs->buf += ncpy;
818 return ncpy;
819 }
820
821 static int fuse_check_page(struct page *page)
822 {
823 if (page_mapcount(page) ||
824 page->mapping != NULL ||
825 page_count(page) != 1 ||
826 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
827 ~(1 << PG_locked |
828 1 << PG_referenced |
829 1 << PG_uptodate |
830 1 << PG_lru |
831 1 << PG_active |
832 1 << PG_reclaim))) {
833 printk(KERN_WARNING "fuse: trying to steal weird page\n");
834 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
835 return 1;
836 }
837 return 0;
838 }
839
840 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
841 {
842 int err;
843 struct page *oldpage = *pagep;
844 struct page *newpage;
845 struct pipe_buffer *buf = cs->pipebufs;
846
847 unlock_request(cs->fc, cs->req);
848 fuse_copy_finish(cs);
849
850 err = buf->ops->confirm(cs->pipe, buf);
851 if (err)
852 return err;
853
854 BUG_ON(!cs->nr_segs);
855 cs->currbuf = buf;
856 cs->len = buf->len;
857 cs->pipebufs++;
858 cs->nr_segs--;
859
860 if (cs->len != PAGE_SIZE)
861 goto out_fallback;
862
863 if (buf->ops->steal(cs->pipe, buf) != 0)
864 goto out_fallback;
865
866 newpage = buf->page;
867
868 if (!PageUptodate(newpage))
869 SetPageUptodate(newpage);
870
871 ClearPageMappedToDisk(newpage);
872
873 if (fuse_check_page(newpage) != 0)
874 goto out_fallback_unlock;
875
876 /*
877 * This is a new and locked page, it shouldn't be mapped or
878 * have any special flags on it
879 */
880 if (WARN_ON(page_mapped(oldpage)))
881 goto out_fallback_unlock;
882 if (WARN_ON(page_has_private(oldpage)))
883 goto out_fallback_unlock;
884 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
885 goto out_fallback_unlock;
886 if (WARN_ON(PageMlocked(oldpage)))
887 goto out_fallback_unlock;
888
889 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
890 if (err) {
891 unlock_page(newpage);
892 return err;
893 }
894
895 page_cache_get(newpage);
896
897 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
898 lru_cache_add_file(newpage);
899
900 err = 0;
901 spin_lock(&cs->fc->lock);
902 if (cs->req->aborted)
903 err = -ENOENT;
904 else
905 *pagep = newpage;
906 spin_unlock(&cs->fc->lock);
907
908 if (err) {
909 unlock_page(newpage);
910 page_cache_release(newpage);
911 return err;
912 }
913
914 unlock_page(oldpage);
915 page_cache_release(oldpage);
916 cs->len = 0;
917
918 return 0;
919
920 out_fallback_unlock:
921 unlock_page(newpage);
922 out_fallback:
923 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
924 cs->buf = cs->mapaddr + buf->offset;
925
926 err = lock_request(cs->fc, cs->req);
927 if (err)
928 return err;
929
930 return 1;
931 }
932
933 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
934 unsigned offset, unsigned count)
935 {
936 struct pipe_buffer *buf;
937
938 if (cs->nr_segs == cs->pipe->buffers)
939 return -EIO;
940
941 unlock_request(cs->fc, cs->req);
942 fuse_copy_finish(cs);
943
944 buf = cs->pipebufs;
945 page_cache_get(page);
946 buf->page = page;
947 buf->offset = offset;
948 buf->len = count;
949
950 cs->pipebufs++;
951 cs->nr_segs++;
952 cs->len = 0;
953
954 return 0;
955 }
956
957 /*
958 * Copy a page in the request to/from the userspace buffer. Must be
959 * done atomically
960 */
961 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
962 unsigned offset, unsigned count, int zeroing)
963 {
964 int err;
965 struct page *page = *pagep;
966
967 if (page && zeroing && count < PAGE_SIZE)
968 clear_highpage(page);
969
970 while (count) {
971 if (cs->write && cs->pipebufs && page) {
972 return fuse_ref_page(cs, page, offset, count);
973 } else if (!cs->len) {
974 if (cs->move_pages && page &&
975 offset == 0 && count == PAGE_SIZE) {
976 err = fuse_try_move_page(cs, pagep);
977 if (err <= 0)
978 return err;
979 } else {
980 err = fuse_copy_fill(cs);
981 if (err)
982 return err;
983 }
984 }
985 if (page) {
986 void *mapaddr = kmap_atomic(page);
987 void *buf = mapaddr + offset;
988 offset += fuse_copy_do(cs, &buf, &count);
989 kunmap_atomic(mapaddr);
990 } else
991 offset += fuse_copy_do(cs, NULL, &count);
992 }
993 if (page && !cs->write)
994 flush_dcache_page(page);
995 return 0;
996 }
997
998 /* Copy pages in the request to/from userspace buffer */
999 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1000 int zeroing)
1001 {
1002 unsigned i;
1003 struct fuse_req *req = cs->req;
1004
1005 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
1006 int err;
1007 unsigned offset = req->page_descs[i].offset;
1008 unsigned count = min(nbytes, req->page_descs[i].length);
1009
1010 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1011 zeroing);
1012 if (err)
1013 return err;
1014
1015 nbytes -= count;
1016 }
1017 return 0;
1018 }
1019
1020 /* Copy a single argument in the request to/from userspace buffer */
1021 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1022 {
1023 while (size) {
1024 if (!cs->len) {
1025 int err = fuse_copy_fill(cs);
1026 if (err)
1027 return err;
1028 }
1029 fuse_copy_do(cs, &val, &size);
1030 }
1031 return 0;
1032 }
1033
1034 /* Copy request arguments to/from userspace buffer */
1035 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1036 unsigned argpages, struct fuse_arg *args,
1037 int zeroing)
1038 {
1039 int err = 0;
1040 unsigned i;
1041
1042 for (i = 0; !err && i < numargs; i++) {
1043 struct fuse_arg *arg = &args[i];
1044 if (i == numargs - 1 && argpages)
1045 err = fuse_copy_pages(cs, arg->size, zeroing);
1046 else
1047 err = fuse_copy_one(cs, arg->value, arg->size);
1048 }
1049 return err;
1050 }
1051
1052 static int forget_pending(struct fuse_conn *fc)
1053 {
1054 return fc->forget_list_head.next != NULL;
1055 }
1056
1057 static int request_pending(struct fuse_conn *fc)
1058 {
1059 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
1060 forget_pending(fc);
1061 }
1062
1063 /* Wait until a request is available on the pending list */
1064 static void request_wait(struct fuse_conn *fc)
1065 __releases(fc->lock)
1066 __acquires(fc->lock)
1067 {
1068 DECLARE_WAITQUEUE(wait, current);
1069
1070 add_wait_queue_exclusive(&fc->waitq, &wait);
1071 while (fc->connected && !request_pending(fc)) {
1072 set_current_state(TASK_INTERRUPTIBLE);
1073 if (signal_pending(current))
1074 break;
1075
1076 spin_unlock(&fc->lock);
1077 schedule();
1078 spin_lock(&fc->lock);
1079 }
1080 set_current_state(TASK_RUNNING);
1081 remove_wait_queue(&fc->waitq, &wait);
1082 }
1083
1084 /*
1085 * Transfer an interrupt request to userspace
1086 *
1087 * Unlike other requests this is assembled on demand, without a need
1088 * to allocate a separate fuse_req structure.
1089 *
1090 * Called with fc->lock held, releases it
1091 */
1092 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
1093 size_t nbytes, struct fuse_req *req)
1094 __releases(fc->lock)
1095 {
1096 struct fuse_in_header ih;
1097 struct fuse_interrupt_in arg;
1098 unsigned reqsize = sizeof(ih) + sizeof(arg);
1099 int err;
1100
1101 list_del_init(&req->intr_entry);
1102 req->intr_unique = fuse_get_unique(fc);
1103 memset(&ih, 0, sizeof(ih));
1104 memset(&arg, 0, sizeof(arg));
1105 ih.len = reqsize;
1106 ih.opcode = FUSE_INTERRUPT;
1107 ih.unique = req->intr_unique;
1108 arg.unique = req->in.h.unique;
1109
1110 spin_unlock(&fc->lock);
1111 if (nbytes < reqsize)
1112 return -EINVAL;
1113
1114 err = fuse_copy_one(cs, &ih, sizeof(ih));
1115 if (!err)
1116 err = fuse_copy_one(cs, &arg, sizeof(arg));
1117 fuse_copy_finish(cs);
1118
1119 return err ? err : reqsize;
1120 }
1121
1122 static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
1123 unsigned max,
1124 unsigned *countp)
1125 {
1126 struct fuse_forget_link *head = fc->forget_list_head.next;
1127 struct fuse_forget_link **newhead = &head;
1128 unsigned count;
1129
1130 for (count = 0; *newhead != NULL && count < max; count++)
1131 newhead = &(*newhead)->next;
1132
1133 fc->forget_list_head.next = *newhead;
1134 *newhead = NULL;
1135 if (fc->forget_list_head.next == NULL)
1136 fc->forget_list_tail = &fc->forget_list_head;
1137
1138 if (countp != NULL)
1139 *countp = count;
1140
1141 return head;
1142 }
1143
1144 static int fuse_read_single_forget(struct fuse_conn *fc,
1145 struct fuse_copy_state *cs,
1146 size_t nbytes)
1147 __releases(fc->lock)
1148 {
1149 int err;
1150 struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
1151 struct fuse_forget_in arg = {
1152 .nlookup = forget->forget_one.nlookup,
1153 };
1154 struct fuse_in_header ih = {
1155 .opcode = FUSE_FORGET,
1156 .nodeid = forget->forget_one.nodeid,
1157 .unique = fuse_get_unique(fc),
1158 .len = sizeof(ih) + sizeof(arg),
1159 };
1160
1161 spin_unlock(&fc->lock);
1162 kfree(forget);
1163 if (nbytes < ih.len)
1164 return -EINVAL;
1165
1166 err = fuse_copy_one(cs, &ih, sizeof(ih));
1167 if (!err)
1168 err = fuse_copy_one(cs, &arg, sizeof(arg));
1169 fuse_copy_finish(cs);
1170
1171 if (err)
1172 return err;
1173
1174 return ih.len;
1175 }
1176
1177 static int fuse_read_batch_forget(struct fuse_conn *fc,
1178 struct fuse_copy_state *cs, size_t nbytes)
1179 __releases(fc->lock)
1180 {
1181 int err;
1182 unsigned max_forgets;
1183 unsigned count;
1184 struct fuse_forget_link *head;
1185 struct fuse_batch_forget_in arg = { .count = 0 };
1186 struct fuse_in_header ih = {
1187 .opcode = FUSE_BATCH_FORGET,
1188 .unique = fuse_get_unique(fc),
1189 .len = sizeof(ih) + sizeof(arg),
1190 };
1191
1192 if (nbytes < ih.len) {
1193 spin_unlock(&fc->lock);
1194 return -EINVAL;
1195 }
1196
1197 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1198 head = dequeue_forget(fc, max_forgets, &count);
1199 spin_unlock(&fc->lock);
1200
1201 arg.count = count;
1202 ih.len += count * sizeof(struct fuse_forget_one);
1203 err = fuse_copy_one(cs, &ih, sizeof(ih));
1204 if (!err)
1205 err = fuse_copy_one(cs, &arg, sizeof(arg));
1206
1207 while (head) {
1208 struct fuse_forget_link *forget = head;
1209
1210 if (!err) {
1211 err = fuse_copy_one(cs, &forget->forget_one,
1212 sizeof(forget->forget_one));
1213 }
1214 head = forget->next;
1215 kfree(forget);
1216 }
1217
1218 fuse_copy_finish(cs);
1219
1220 if (err)
1221 return err;
1222
1223 return ih.len;
1224 }
1225
1226 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1227 size_t nbytes)
1228 __releases(fc->lock)
1229 {
1230 if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1231 return fuse_read_single_forget(fc, cs, nbytes);
1232 else
1233 return fuse_read_batch_forget(fc, cs, nbytes);
1234 }
1235
1236 /*
1237 * Read a single request into the userspace filesystem's buffer. This
1238 * function waits until a request is available, then removes it from
1239 * the pending list and copies request data to userspace buffer. If
1240 * no reply is needed (FORGET) or request has been aborted or there
1241 * was an error during the copying then it's finished by calling
1242 * request_end(). Otherwise add it to the processing list, and set
1243 * the 'sent' flag.
1244 */
1245 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1246 struct fuse_copy_state *cs, size_t nbytes)
1247 {
1248 int err;
1249 struct fuse_req *req;
1250 struct fuse_in *in;
1251 unsigned reqsize;
1252
1253 restart:
1254 spin_lock(&fc->lock);
1255 err = -EAGAIN;
1256 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
1257 !request_pending(fc))
1258 goto err_unlock;
1259
1260 request_wait(fc);
1261 err = -ENODEV;
1262 if (!fc->connected)
1263 goto err_unlock;
1264 err = -ERESTARTSYS;
1265 if (!request_pending(fc))
1266 goto err_unlock;
1267
1268 if (!list_empty(&fc->interrupts)) {
1269 req = list_entry(fc->interrupts.next, struct fuse_req,
1270 intr_entry);
1271 return fuse_read_interrupt(fc, cs, nbytes, req);
1272 }
1273
1274 if (forget_pending(fc)) {
1275 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
1276 return fuse_read_forget(fc, cs, nbytes);
1277
1278 if (fc->forget_batch <= -8)
1279 fc->forget_batch = 16;
1280 }
1281
1282 req = list_entry(fc->pending.next, struct fuse_req, list);
1283 req->state = FUSE_REQ_READING;
1284 list_move(&req->list, &fc->io);
1285
1286 in = &req->in;
1287 reqsize = in->h.len;
1288 /* If request is too large, reply with an error and restart the read */
1289 if (nbytes < reqsize) {
1290 req->out.h.error = -EIO;
1291 /* SETXATTR is special, since it may contain too large data */
1292 if (in->h.opcode == FUSE_SETXATTR)
1293 req->out.h.error = -E2BIG;
1294 request_end(fc, req);
1295 goto restart;
1296 }
1297 spin_unlock(&fc->lock);
1298 cs->req = req;
1299 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1300 if (!err)
1301 err = fuse_copy_args(cs, in->numargs, in->argpages,
1302 (struct fuse_arg *) in->args, 0);
1303 fuse_copy_finish(cs);
1304 spin_lock(&fc->lock);
1305 req->locked = 0;
1306 if (req->aborted) {
1307 request_end(fc, req);
1308 return -ENODEV;
1309 }
1310 if (err) {
1311 req->out.h.error = -EIO;
1312 request_end(fc, req);
1313 return err;
1314 }
1315 if (!req->isreply)
1316 request_end(fc, req);
1317 else {
1318 req->state = FUSE_REQ_SENT;
1319 list_move_tail(&req->list, &fc->processing);
1320 if (req->interrupted)
1321 queue_interrupt(fc, req);
1322 spin_unlock(&fc->lock);
1323 }
1324 return reqsize;
1325
1326 err_unlock:
1327 spin_unlock(&fc->lock);
1328 return err;
1329 }
1330
1331 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1332 unsigned long nr_segs, loff_t pos)
1333 {
1334 struct fuse_copy_state cs;
1335 struct file *file = iocb->ki_filp;
1336 struct fuse_conn *fc = fuse_get_conn(file);
1337 if (!fc)
1338 return -EPERM;
1339
1340 fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1341
1342 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1343 }
1344
1345 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1346 struct pipe_inode_info *pipe,
1347 size_t len, unsigned int flags)
1348 {
1349 int ret;
1350 int page_nr = 0;
1351 int do_wakeup = 0;
1352 struct pipe_buffer *bufs;
1353 struct fuse_copy_state cs;
1354 struct fuse_conn *fc = fuse_get_conn(in);
1355 if (!fc)
1356 return -EPERM;
1357
1358 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1359 if (!bufs)
1360 return -ENOMEM;
1361
1362 fuse_copy_init(&cs, fc, 1, NULL, 0);
1363 cs.pipebufs = bufs;
1364 cs.pipe = pipe;
1365 ret = fuse_dev_do_read(fc, in, &cs, len);
1366 if (ret < 0)
1367 goto out;
1368
1369 ret = 0;
1370 pipe_lock(pipe);
1371
1372 if (!pipe->readers) {
1373 send_sig(SIGPIPE, current, 0);
1374 if (!ret)
1375 ret = -EPIPE;
1376 goto out_unlock;
1377 }
1378
1379 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1380 ret = -EIO;
1381 goto out_unlock;
1382 }
1383
1384 while (page_nr < cs.nr_segs) {
1385 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1386 struct pipe_buffer *buf = pipe->bufs + newbuf;
1387
1388 buf->page = bufs[page_nr].page;
1389 buf->offset = bufs[page_nr].offset;
1390 buf->len = bufs[page_nr].len;
1391 /*
1392 * Need to be careful about this. Having buf->ops in module
1393 * code can Oops if the buffer persists after module unload.
1394 */
1395 buf->ops = &nosteal_pipe_buf_ops;
1396
1397 pipe->nrbufs++;
1398 page_nr++;
1399 ret += buf->len;
1400
1401 if (pipe->files)
1402 do_wakeup = 1;
1403 }
1404
1405 out_unlock:
1406 pipe_unlock(pipe);
1407
1408 if (do_wakeup) {
1409 smp_mb();
1410 if (waitqueue_active(&pipe->wait))
1411 wake_up_interruptible(&pipe->wait);
1412 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1413 }
1414
1415 out:
1416 for (; page_nr < cs.nr_segs; page_nr++)
1417 page_cache_release(bufs[page_nr].page);
1418
1419 kfree(bufs);
1420 return ret;
1421 }
1422
1423 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1424 struct fuse_copy_state *cs)
1425 {
1426 struct fuse_notify_poll_wakeup_out outarg;
1427 int err = -EINVAL;
1428
1429 if (size != sizeof(outarg))
1430 goto err;
1431
1432 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1433 if (err)
1434 goto err;
1435
1436 fuse_copy_finish(cs);
1437 return fuse_notify_poll_wakeup(fc, &outarg);
1438
1439 err:
1440 fuse_copy_finish(cs);
1441 return err;
1442 }
1443
1444 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1445 struct fuse_copy_state *cs)
1446 {
1447 struct fuse_notify_inval_inode_out outarg;
1448 int err = -EINVAL;
1449
1450 if (size != sizeof(outarg))
1451 goto err;
1452
1453 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1454 if (err)
1455 goto err;
1456 fuse_copy_finish(cs);
1457
1458 down_read(&fc->killsb);
1459 err = -ENOENT;
1460 if (fc->sb) {
1461 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1462 outarg.off, outarg.len);
1463 }
1464 up_read(&fc->killsb);
1465 return err;
1466
1467 err:
1468 fuse_copy_finish(cs);
1469 return err;
1470 }
1471
1472 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1473 struct fuse_copy_state *cs)
1474 {
1475 struct fuse_notify_inval_entry_out outarg;
1476 int err = -ENOMEM;
1477 char *buf;
1478 struct qstr name;
1479
1480 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1481 if (!buf)
1482 goto err;
1483
1484 err = -EINVAL;
1485 if (size < sizeof(outarg))
1486 goto err;
1487
1488 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1489 if (err)
1490 goto err;
1491
1492 err = -ENAMETOOLONG;
1493 if (outarg.namelen > FUSE_NAME_MAX)
1494 goto err;
1495
1496 err = -EINVAL;
1497 if (size != sizeof(outarg) + outarg.namelen + 1)
1498 goto err;
1499
1500 name.name = buf;
1501 name.len = outarg.namelen;
1502 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1503 if (err)
1504 goto err;
1505 fuse_copy_finish(cs);
1506 buf[outarg.namelen] = 0;
1507 name.hash = full_name_hash(name.name, name.len);
1508
1509 down_read(&fc->killsb);
1510 err = -ENOENT;
1511 if (fc->sb)
1512 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1513 up_read(&fc->killsb);
1514 kfree(buf);
1515 return err;
1516
1517 err:
1518 kfree(buf);
1519 fuse_copy_finish(cs);
1520 return err;
1521 }
1522
1523 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1524 struct fuse_copy_state *cs)
1525 {
1526 struct fuse_notify_delete_out outarg;
1527 int err = -ENOMEM;
1528 char *buf;
1529 struct qstr name;
1530
1531 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1532 if (!buf)
1533 goto err;
1534
1535 err = -EINVAL;
1536 if (size < sizeof(outarg))
1537 goto err;
1538
1539 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1540 if (err)
1541 goto err;
1542
1543 err = -ENAMETOOLONG;
1544 if (outarg.namelen > FUSE_NAME_MAX)
1545 goto err;
1546
1547 err = -EINVAL;
1548 if (size != sizeof(outarg) + outarg.namelen + 1)
1549 goto err;
1550
1551 name.name = buf;
1552 name.len = outarg.namelen;
1553 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1554 if (err)
1555 goto err;
1556 fuse_copy_finish(cs);
1557 buf[outarg.namelen] = 0;
1558 name.hash = full_name_hash(name.name, name.len);
1559
1560 down_read(&fc->killsb);
1561 err = -ENOENT;
1562 if (fc->sb)
1563 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1564 outarg.child, &name);
1565 up_read(&fc->killsb);
1566 kfree(buf);
1567 return err;
1568
1569 err:
1570 kfree(buf);
1571 fuse_copy_finish(cs);
1572 return err;
1573 }
1574
1575 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1576 struct fuse_copy_state *cs)
1577 {
1578 struct fuse_notify_store_out outarg;
1579 struct inode *inode;
1580 struct address_space *mapping;
1581 u64 nodeid;
1582 int err;
1583 pgoff_t index;
1584 unsigned int offset;
1585 unsigned int num;
1586 loff_t file_size;
1587 loff_t end;
1588
1589 err = -EINVAL;
1590 if (size < sizeof(outarg))
1591 goto out_finish;
1592
1593 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1594 if (err)
1595 goto out_finish;
1596
1597 err = -EINVAL;
1598 if (size - sizeof(outarg) != outarg.size)
1599 goto out_finish;
1600
1601 nodeid = outarg.nodeid;
1602
1603 down_read(&fc->killsb);
1604
1605 err = -ENOENT;
1606 if (!fc->sb)
1607 goto out_up_killsb;
1608
1609 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1610 if (!inode)
1611 goto out_up_killsb;
1612
1613 mapping = inode->i_mapping;
1614 index = outarg.offset >> PAGE_CACHE_SHIFT;
1615 offset = outarg.offset & ~PAGE_CACHE_MASK;
1616 file_size = i_size_read(inode);
1617 end = outarg.offset + outarg.size;
1618 if (end > file_size) {
1619 file_size = end;
1620 fuse_write_update_size(inode, file_size);
1621 }
1622
1623 num = outarg.size;
1624 while (num) {
1625 struct page *page;
1626 unsigned int this_num;
1627
1628 err = -ENOMEM;
1629 page = find_or_create_page(mapping, index,
1630 mapping_gfp_mask(mapping));
1631 if (!page)
1632 goto out_iput;
1633
1634 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1635 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1636 if (!err && offset == 0 && (num != 0 || file_size == end))
1637 SetPageUptodate(page);
1638 unlock_page(page);
1639 page_cache_release(page);
1640
1641 if (err)
1642 goto out_iput;
1643
1644 num -= this_num;
1645 offset = 0;
1646 index++;
1647 }
1648
1649 err = 0;
1650
1651 out_iput:
1652 iput(inode);
1653 out_up_killsb:
1654 up_read(&fc->killsb);
1655 out_finish:
1656 fuse_copy_finish(cs);
1657 return err;
1658 }
1659
1660 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1661 {
1662 release_pages(req->pages, req->num_pages, 0);
1663 }
1664
1665 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1666 struct fuse_notify_retrieve_out *outarg)
1667 {
1668 int err;
1669 struct address_space *mapping = inode->i_mapping;
1670 struct fuse_req *req;
1671 pgoff_t index;
1672 loff_t file_size;
1673 unsigned int num;
1674 unsigned int offset;
1675 size_t total_len = 0;
1676 int num_pages;
1677
1678 offset = outarg->offset & ~PAGE_CACHE_MASK;
1679 file_size = i_size_read(inode);
1680
1681 num = outarg->size;
1682 if (outarg->offset > file_size)
1683 num = 0;
1684 else if (outarg->offset + num > file_size)
1685 num = file_size - outarg->offset;
1686
1687 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1688 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1689
1690 req = fuse_get_req(fc, num_pages);
1691 if (IS_ERR(req))
1692 return PTR_ERR(req);
1693
1694 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1695 req->in.h.nodeid = outarg->nodeid;
1696 req->in.numargs = 2;
1697 req->in.argpages = 1;
1698 req->page_descs[0].offset = offset;
1699 req->end = fuse_retrieve_end;
1700
1701 index = outarg->offset >> PAGE_CACHE_SHIFT;
1702
1703 while (num && req->num_pages < num_pages) {
1704 struct page *page;
1705 unsigned int this_num;
1706
1707 page = find_get_page(mapping, index);
1708 if (!page)
1709 break;
1710
1711 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1712 req->pages[req->num_pages] = page;
1713 req->page_descs[req->num_pages].length = this_num;
1714 req->num_pages++;
1715
1716 offset = 0;
1717 num -= this_num;
1718 total_len += this_num;
1719 index++;
1720 }
1721 req->misc.retrieve_in.offset = outarg->offset;
1722 req->misc.retrieve_in.size = total_len;
1723 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1724 req->in.args[0].value = &req->misc.retrieve_in;
1725 req->in.args[1].size = total_len;
1726
1727 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1728 if (err)
1729 fuse_retrieve_end(fc, req);
1730
1731 return err;
1732 }
1733
1734 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1735 struct fuse_copy_state *cs)
1736 {
1737 struct fuse_notify_retrieve_out outarg;
1738 struct inode *inode;
1739 int err;
1740
1741 err = -EINVAL;
1742 if (size != sizeof(outarg))
1743 goto copy_finish;
1744
1745 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1746 if (err)
1747 goto copy_finish;
1748
1749 fuse_copy_finish(cs);
1750
1751 down_read(&fc->killsb);
1752 err = -ENOENT;
1753 if (fc->sb) {
1754 u64 nodeid = outarg.nodeid;
1755
1756 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1757 if (inode) {
1758 err = fuse_retrieve(fc, inode, &outarg);
1759 iput(inode);
1760 }
1761 }
1762 up_read(&fc->killsb);
1763
1764 return err;
1765
1766 copy_finish:
1767 fuse_copy_finish(cs);
1768 return err;
1769 }
1770
1771 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1772 unsigned int size, struct fuse_copy_state *cs)
1773 {
1774 /* Don't try to move pages (yet) */
1775 cs->move_pages = 0;
1776
1777 switch (code) {
1778 case FUSE_NOTIFY_POLL:
1779 return fuse_notify_poll(fc, size, cs);
1780
1781 case FUSE_NOTIFY_INVAL_INODE:
1782 return fuse_notify_inval_inode(fc, size, cs);
1783
1784 case FUSE_NOTIFY_INVAL_ENTRY:
1785 return fuse_notify_inval_entry(fc, size, cs);
1786
1787 case FUSE_NOTIFY_STORE:
1788 return fuse_notify_store(fc, size, cs);
1789
1790 case FUSE_NOTIFY_RETRIEVE:
1791 return fuse_notify_retrieve(fc, size, cs);
1792
1793 case FUSE_NOTIFY_DELETE:
1794 return fuse_notify_delete(fc, size, cs);
1795
1796 default:
1797 fuse_copy_finish(cs);
1798 return -EINVAL;
1799 }
1800 }
1801
1802 /* Look up request on processing list by unique ID */
1803 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1804 {
1805 struct list_head *entry;
1806
1807 list_for_each(entry, &fc->processing) {
1808 struct fuse_req *req;
1809 req = list_entry(entry, struct fuse_req, list);
1810 if (req->in.h.unique == unique || req->intr_unique == unique)
1811 return req;
1812 }
1813 return NULL;
1814 }
1815
1816 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1817 unsigned nbytes)
1818 {
1819 unsigned reqsize = sizeof(struct fuse_out_header);
1820
1821 if (out->h.error)
1822 return nbytes != reqsize ? -EINVAL : 0;
1823
1824 reqsize += len_args(out->numargs, out->args);
1825
1826 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1827 return -EINVAL;
1828 else if (reqsize > nbytes) {
1829 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1830 unsigned diffsize = reqsize - nbytes;
1831 if (diffsize > lastarg->size)
1832 return -EINVAL;
1833 lastarg->size -= diffsize;
1834 }
1835 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1836 out->page_zeroing);
1837 }
1838
1839 /*
1840 * Write a single reply to a request. First the header is copied from
1841 * the write buffer. The request is then searched on the processing
1842 * list by the unique ID found in the header. If found, then remove
1843 * it from the list and copy the rest of the buffer to the request.
1844 * The request is finished by calling request_end()
1845 */
1846 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1847 struct fuse_copy_state *cs, size_t nbytes)
1848 {
1849 int err;
1850 struct fuse_req *req;
1851 struct fuse_out_header oh;
1852
1853 if (nbytes < sizeof(struct fuse_out_header))
1854 return -EINVAL;
1855
1856 err = fuse_copy_one(cs, &oh, sizeof(oh));
1857 if (err)
1858 goto err_finish;
1859
1860 err = -EINVAL;
1861 if (oh.len != nbytes)
1862 goto err_finish;
1863
1864 /*
1865 * Zero oh.unique indicates unsolicited notification message
1866 * and error contains notification code.
1867 */
1868 if (!oh.unique) {
1869 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1870 return err ? err : nbytes;
1871 }
1872
1873 err = -EINVAL;
1874 if (oh.error <= -1000 || oh.error > 0)
1875 goto err_finish;
1876
1877 spin_lock(&fc->lock);
1878 err = -ENOENT;
1879 if (!fc->connected)
1880 goto err_unlock;
1881
1882 req = request_find(fc, oh.unique);
1883 if (!req)
1884 goto err_unlock;
1885
1886 if (req->aborted) {
1887 spin_unlock(&fc->lock);
1888 fuse_copy_finish(cs);
1889 spin_lock(&fc->lock);
1890 request_end(fc, req);
1891 return -ENOENT;
1892 }
1893 /* Is it an interrupt reply? */
1894 if (req->intr_unique == oh.unique) {
1895 err = -EINVAL;
1896 if (nbytes != sizeof(struct fuse_out_header))
1897 goto err_unlock;
1898
1899 if (oh.error == -ENOSYS)
1900 fc->no_interrupt = 1;
1901 else if (oh.error == -EAGAIN)
1902 queue_interrupt(fc, req);
1903
1904 spin_unlock(&fc->lock);
1905 fuse_copy_finish(cs);
1906 return nbytes;
1907 }
1908
1909 req->state = FUSE_REQ_WRITING;
1910 list_move(&req->list, &fc->io);
1911 req->out.h = oh;
1912 req->locked = 1;
1913 cs->req = req;
1914 if (!req->out.page_replace)
1915 cs->move_pages = 0;
1916 spin_unlock(&fc->lock);
1917
1918 err = copy_out_args(cs, &req->out, nbytes);
1919 fuse_copy_finish(cs);
1920
1921 spin_lock(&fc->lock);
1922 req->locked = 0;
1923 if (!err) {
1924 if (req->aborted)
1925 err = -ENOENT;
1926 } else if (!req->aborted)
1927 req->out.h.error = -EIO;
1928 request_end(fc, req);
1929
1930 return err ? err : nbytes;
1931
1932 err_unlock:
1933 spin_unlock(&fc->lock);
1934 err_finish:
1935 fuse_copy_finish(cs);
1936 return err;
1937 }
1938
1939 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1940 unsigned long nr_segs, loff_t pos)
1941 {
1942 struct fuse_copy_state cs;
1943 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1944 if (!fc)
1945 return -EPERM;
1946
1947 fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1948
1949 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1950 }
1951
1952 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1953 struct file *out, loff_t *ppos,
1954 size_t len, unsigned int flags)
1955 {
1956 unsigned nbuf;
1957 unsigned idx;
1958 struct pipe_buffer *bufs;
1959 struct fuse_copy_state cs;
1960 struct fuse_conn *fc;
1961 size_t rem;
1962 ssize_t ret;
1963
1964 fc = fuse_get_conn(out);
1965 if (!fc)
1966 return -EPERM;
1967
1968 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1969 if (!bufs)
1970 return -ENOMEM;
1971
1972 pipe_lock(pipe);
1973 nbuf = 0;
1974 rem = 0;
1975 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1976 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1977
1978 ret = -EINVAL;
1979 if (rem < len) {
1980 pipe_unlock(pipe);
1981 goto out;
1982 }
1983
1984 rem = len;
1985 while (rem) {
1986 struct pipe_buffer *ibuf;
1987 struct pipe_buffer *obuf;
1988
1989 BUG_ON(nbuf >= pipe->buffers);
1990 BUG_ON(!pipe->nrbufs);
1991 ibuf = &pipe->bufs[pipe->curbuf];
1992 obuf = &bufs[nbuf];
1993
1994 if (rem >= ibuf->len) {
1995 *obuf = *ibuf;
1996 ibuf->ops = NULL;
1997 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1998 pipe->nrbufs--;
1999 } else {
2000 ibuf->ops->get(pipe, ibuf);
2001 *obuf = *ibuf;
2002 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2003 obuf->len = rem;
2004 ibuf->offset += obuf->len;
2005 ibuf->len -= obuf->len;
2006 }
2007 nbuf++;
2008 rem -= obuf->len;
2009 }
2010 pipe_unlock(pipe);
2011
2012 fuse_copy_init(&cs, fc, 0, NULL, nbuf);
2013 cs.pipebufs = bufs;
2014 cs.pipe = pipe;
2015
2016 if (flags & SPLICE_F_MOVE)
2017 cs.move_pages = 1;
2018
2019 ret = fuse_dev_do_write(fc, &cs, len);
2020
2021 for (idx = 0; idx < nbuf; idx++) {
2022 struct pipe_buffer *buf = &bufs[idx];
2023 buf->ops->release(pipe, buf);
2024 }
2025 out:
2026 kfree(bufs);
2027 return ret;
2028 }
2029
2030 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2031 {
2032 unsigned mask = POLLOUT | POLLWRNORM;
2033 struct fuse_conn *fc = fuse_get_conn(file);
2034 if (!fc)
2035 return POLLERR;
2036
2037 poll_wait(file, &fc->waitq, wait);
2038
2039 spin_lock(&fc->lock);
2040 if (!fc->connected)
2041 mask = POLLERR;
2042 else if (request_pending(fc))
2043 mask |= POLLIN | POLLRDNORM;
2044 spin_unlock(&fc->lock);
2045
2046 return mask;
2047 }
2048
2049 /*
2050 * Abort all requests on the given list (pending or processing)
2051 *
2052 * This function releases and reacquires fc->lock
2053 */
2054 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2055 __releases(fc->lock)
2056 __acquires(fc->lock)
2057 {
2058 while (!list_empty(head)) {
2059 struct fuse_req *req;
2060 req = list_entry(head->next, struct fuse_req, list);
2061 req->out.h.error = -ECONNABORTED;
2062 request_end(fc, req);
2063 spin_lock(&fc->lock);
2064 }
2065 }
2066
2067 /*
2068 * Abort requests under I/O
2069 *
2070 * The requests are set to aborted and finished, and the request
2071 * waiter is woken up. This will make request_wait_answer() wait
2072 * until the request is unlocked and then return.
2073 *
2074 * If the request is asynchronous, then the end function needs to be
2075 * called after waiting for the request to be unlocked (if it was
2076 * locked).
2077 */
2078 static void end_io_requests(struct fuse_conn *fc)
2079 __releases(fc->lock)
2080 __acquires(fc->lock)
2081 {
2082 while (!list_empty(&fc->io)) {
2083 struct fuse_req *req =
2084 list_entry(fc->io.next, struct fuse_req, list);
2085 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
2086
2087 req->aborted = 1;
2088 req->out.h.error = -ECONNABORTED;
2089 req->state = FUSE_REQ_FINISHED;
2090 list_del_init(&req->list);
2091 wake_up(&req->waitq);
2092 if (end) {
2093 req->end = NULL;
2094 __fuse_get_request(req);
2095 spin_unlock(&fc->lock);
2096 wait_event(req->waitq, !req->locked);
2097 end(fc, req);
2098 fuse_put_request(fc, req);
2099 spin_lock(&fc->lock);
2100 }
2101 }
2102 }
2103
2104 static void end_queued_requests(struct fuse_conn *fc)
2105 __releases(fc->lock)
2106 __acquires(fc->lock)
2107 {
2108 fc->max_background = UINT_MAX;
2109 flush_bg_queue(fc);
2110 end_requests(fc, &fc->pending);
2111 end_requests(fc, &fc->processing);
2112 while (forget_pending(fc))
2113 kfree(dequeue_forget(fc, 1, NULL));
2114 }
2115
2116 static void end_polls(struct fuse_conn *fc)
2117 {
2118 struct rb_node *p;
2119
2120 p = rb_first(&fc->polled_files);
2121
2122 while (p) {
2123 struct fuse_file *ff;
2124 ff = rb_entry(p, struct fuse_file, polled_node);
2125 wake_up_interruptible_all(&ff->poll_wait);
2126
2127 p = rb_next(p);
2128 }
2129 }
2130
2131 /*
2132 * Abort all requests.
2133 *
2134 * Emergency exit in case of a malicious or accidental deadlock, or
2135 * just a hung filesystem.
2136 *
2137 * The same effect is usually achievable through killing the
2138 * filesystem daemon and all users of the filesystem. The exception
2139 * is the combination of an asynchronous request and the tricky
2140 * deadlock (see Documentation/filesystems/fuse.txt).
2141 *
2142 * During the aborting, progression of requests from the pending and
2143 * processing lists onto the io list, and progression of new requests
2144 * onto the pending list is prevented by req->connected being false.
2145 *
2146 * Progression of requests under I/O to the processing list is
2147 * prevented by the req->aborted flag being true for these requests.
2148 * For this reason requests on the io list must be aborted first.
2149 */
2150 void fuse_abort_conn(struct fuse_conn *fc)
2151 {
2152 spin_lock(&fc->lock);
2153 if (fc->connected) {
2154 fc->connected = 0;
2155 fc->blocked = 0;
2156 fc->initialized = 1;
2157 end_io_requests(fc);
2158 end_queued_requests(fc);
2159 end_polls(fc);
2160 wake_up_all(&fc->waitq);
2161 wake_up_all(&fc->blocked_waitq);
2162 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
2163 }
2164 spin_unlock(&fc->lock);
2165 }
2166 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2167
2168 int fuse_dev_release(struct inode *inode, struct file *file)
2169 {
2170 struct fuse_conn *fc = fuse_get_conn(file);
2171 if (fc) {
2172 spin_lock(&fc->lock);
2173 fc->connected = 0;
2174 fc->blocked = 0;
2175 fc->initialized = 1;
2176 end_queued_requests(fc);
2177 end_polls(fc);
2178 wake_up_all(&fc->blocked_waitq);
2179 spin_unlock(&fc->lock);
2180 fuse_conn_put(fc);
2181 }
2182
2183 return 0;
2184 }
2185 EXPORT_SYMBOL_GPL(fuse_dev_release);
2186
2187 static int fuse_dev_fasync(int fd, struct file *file, int on)
2188 {
2189 struct fuse_conn *fc = fuse_get_conn(file);
2190 if (!fc)
2191 return -EPERM;
2192
2193 /* No locking - fasync_helper does its own locking */
2194 return fasync_helper(fd, file, on, &fc->fasync);
2195 }
2196
2197 const struct file_operations fuse_dev_operations = {
2198 .owner = THIS_MODULE,
2199 .llseek = no_llseek,
2200 .read = do_sync_read,
2201 .aio_read = fuse_dev_read,
2202 .splice_read = fuse_dev_splice_read,
2203 .write = do_sync_write,
2204 .aio_write = fuse_dev_write,
2205 .splice_write = fuse_dev_splice_write,
2206 .poll = fuse_dev_poll,
2207 .release = fuse_dev_release,
2208 .fasync = fuse_dev_fasync,
2209 };
2210 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2211
2212 static struct miscdevice fuse_miscdevice = {
2213 .minor = FUSE_MINOR,
2214 .name = "fuse",
2215 .fops = &fuse_dev_operations,
2216 };
2217
2218 int __init fuse_dev_init(void)
2219 {
2220 int err = -ENOMEM;
2221 fuse_req_cachep = kmem_cache_create("fuse_request",
2222 sizeof(struct fuse_req),
2223 0, 0, NULL);
2224 if (!fuse_req_cachep)
2225 goto out;
2226
2227 err = misc_register(&fuse_miscdevice);
2228 if (err)
2229 goto out_cache_clean;
2230
2231 return 0;
2232
2233 out_cache_clean:
2234 kmem_cache_destroy(fuse_req_cachep);
2235 out:
2236 return err;
2237 }
2238
2239 void fuse_dev_cleanup(void)
2240 {
2241 misc_deregister(&fuse_miscdevice);
2242 kmem_cache_destroy(fuse_req_cachep);
2243 }