2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/poll.h>
15 #include <linux/uio.h>
16 #include <linux/miscdevice.h>
17 #include <linux/pagemap.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/swap.h>
22 #include <linux/splice.h>
23 #include <linux/aio.h>
24 #include <linux/freezer.h>
26 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
27 MODULE_ALIAS("devname:fuse");
29 static struct kmem_cache
*fuse_req_cachep
;
31 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
34 * Lockless access is OK, because file->private data is set
35 * once during mount and is valid until the file is released.
37 return file
->private_data
;
40 static void fuse_request_init(struct fuse_req
*req
, struct page
**pages
,
41 struct fuse_page_desc
*page_descs
,
44 memset(req
, 0, sizeof(*req
));
45 memset(pages
, 0, sizeof(*pages
) * npages
);
46 memset(page_descs
, 0, sizeof(*page_descs
) * npages
);
47 INIT_LIST_HEAD(&req
->list
);
48 INIT_LIST_HEAD(&req
->intr_entry
);
49 init_waitqueue_head(&req
->waitq
);
50 atomic_set(&req
->count
, 1);
52 req
->page_descs
= page_descs
;
53 req
->max_pages
= npages
;
56 static struct fuse_req
*__fuse_request_alloc(unsigned npages
, gfp_t flags
)
58 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, flags
);
61 struct fuse_page_desc
*page_descs
;
63 if (npages
<= FUSE_REQ_INLINE_PAGES
) {
64 pages
= req
->inline_pages
;
65 page_descs
= req
->inline_page_descs
;
67 pages
= kmalloc(sizeof(struct page
*) * npages
, flags
);
68 page_descs
= kmalloc(sizeof(struct fuse_page_desc
) *
72 if (!pages
|| !page_descs
) {
75 kmem_cache_free(fuse_req_cachep
, req
);
79 fuse_request_init(req
, pages
, page_descs
, npages
);
84 struct fuse_req
*fuse_request_alloc(unsigned npages
)
86 return __fuse_request_alloc(npages
, GFP_KERNEL
);
88 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
90 struct fuse_req
*fuse_request_alloc_nofs(unsigned npages
)
92 return __fuse_request_alloc(npages
, GFP_NOFS
);
95 void fuse_request_free(struct fuse_req
*req
)
97 if (req
->pages
!= req
->inline_pages
) {
99 kfree(req
->page_descs
);
101 kmem_cache_free(fuse_req_cachep
, req
);
104 static void block_sigs(sigset_t
*oldset
)
108 siginitsetinv(&mask
, sigmask(SIGKILL
));
109 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
112 static void restore_sigs(sigset_t
*oldset
)
114 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
117 void __fuse_get_request(struct fuse_req
*req
)
119 atomic_inc(&req
->count
);
122 /* Must be called with > 1 refcount */
123 static void __fuse_put_request(struct fuse_req
*req
)
125 BUG_ON(atomic_read(&req
->count
) < 2);
126 atomic_dec(&req
->count
);
129 static void fuse_req_init_context(struct fuse_req
*req
)
131 req
->in
.h
.uid
= from_kuid_munged(&init_user_ns
, current_fsuid());
132 req
->in
.h
.gid
= from_kgid_munged(&init_user_ns
, current_fsgid());
133 req
->in
.h
.pid
= current
->pid
;
136 static bool fuse_block_alloc(struct fuse_conn
*fc
, bool for_background
)
138 return !fc
->initialized
|| (for_background
&& fc
->blocked
);
141 static struct fuse_req
*__fuse_get_req(struct fuse_conn
*fc
, unsigned npages
,
144 struct fuse_req
*req
;
146 atomic_inc(&fc
->num_waiting
);
148 if (fuse_block_alloc(fc
, for_background
)) {
153 intr
= wait_event_interruptible_exclusive(fc
->blocked_waitq
,
154 !fuse_block_alloc(fc
, for_background
));
155 restore_sigs(&oldset
);
165 req
= fuse_request_alloc(npages
);
169 wake_up(&fc
->blocked_waitq
);
173 fuse_req_init_context(req
);
175 req
->background
= for_background
;
179 atomic_dec(&fc
->num_waiting
);
183 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
, unsigned npages
)
185 return __fuse_get_req(fc
, npages
, false);
187 EXPORT_SYMBOL_GPL(fuse_get_req
);
189 struct fuse_req
*fuse_get_req_for_background(struct fuse_conn
*fc
,
192 return __fuse_get_req(fc
, npages
, true);
194 EXPORT_SYMBOL_GPL(fuse_get_req_for_background
);
197 * Return request in fuse_file->reserved_req. However that may
198 * currently be in use. If that is the case, wait for it to become
201 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
204 struct fuse_req
*req
= NULL
;
205 struct fuse_file
*ff
= file
->private_data
;
208 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
209 spin_lock(&fc
->lock
);
210 if (ff
->reserved_req
) {
211 req
= ff
->reserved_req
;
212 ff
->reserved_req
= NULL
;
213 req
->stolen_file
= get_file(file
);
215 spin_unlock(&fc
->lock
);
222 * Put stolen request back into fuse_file->reserved_req
224 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
226 struct file
*file
= req
->stolen_file
;
227 struct fuse_file
*ff
= file
->private_data
;
229 spin_lock(&fc
->lock
);
230 fuse_request_init(req
, req
->pages
, req
->page_descs
, req
->max_pages
);
231 BUG_ON(ff
->reserved_req
);
232 ff
->reserved_req
= req
;
233 wake_up_all(&fc
->reserved_req_waitq
);
234 spin_unlock(&fc
->lock
);
239 * Gets a requests for a file operation, always succeeds
241 * This is used for sending the FLUSH request, which must get to
242 * userspace, due to POSIX locks which may need to be unlocked.
244 * If allocation fails due to OOM, use the reserved request in
247 * This is very unlikely to deadlock accidentally, since the
248 * filesystem should not have it's own file open. If deadlock is
249 * intentional, it can still be broken by "aborting" the filesystem.
251 struct fuse_req
*fuse_get_req_nofail_nopages(struct fuse_conn
*fc
,
254 struct fuse_req
*req
;
256 atomic_inc(&fc
->num_waiting
);
257 wait_event(fc
->blocked_waitq
, fc
->initialized
);
258 req
= fuse_request_alloc(0);
260 req
= get_reserved_req(fc
, file
);
262 fuse_req_init_context(req
);
268 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
270 if (atomic_dec_and_test(&req
->count
)) {
271 if (unlikely(req
->background
)) {
273 * We get here in the unlikely case that a background
274 * request was allocated but not sent
276 spin_lock(&fc
->lock
);
278 wake_up(&fc
->blocked_waitq
);
279 spin_unlock(&fc
->lock
);
283 atomic_dec(&fc
->num_waiting
);
285 if (req
->stolen_file
)
286 put_reserved_req(fc
, req
);
288 fuse_request_free(req
);
291 EXPORT_SYMBOL_GPL(fuse_put_request
);
293 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
298 for (i
= 0; i
< numargs
; i
++)
299 nbytes
+= args
[i
].size
;
304 static u64
fuse_get_unique(struct fuse_conn
*fc
)
307 /* zero is special */
314 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
316 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
317 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
318 list_add_tail(&req
->list
, &fc
->pending
);
319 req
->state
= FUSE_REQ_PENDING
;
322 atomic_inc(&fc
->num_waiting
);
325 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
328 void fuse_queue_forget(struct fuse_conn
*fc
, struct fuse_forget_link
*forget
,
329 u64 nodeid
, u64 nlookup
)
331 forget
->forget_one
.nodeid
= nodeid
;
332 forget
->forget_one
.nlookup
= nlookup
;
334 spin_lock(&fc
->lock
);
336 fc
->forget_list_tail
->next
= forget
;
337 fc
->forget_list_tail
= forget
;
339 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
343 spin_unlock(&fc
->lock
);
346 static void flush_bg_queue(struct fuse_conn
*fc
)
348 while (fc
->active_background
< fc
->max_background
&&
349 !list_empty(&fc
->bg_queue
)) {
350 struct fuse_req
*req
;
352 req
= list_entry(fc
->bg_queue
.next
, struct fuse_req
, list
);
353 list_del(&req
->list
);
354 fc
->active_background
++;
355 req
->in
.h
.unique
= fuse_get_unique(fc
);
356 queue_request(fc
, req
);
361 * This function is called when a request is finished. Either a reply
362 * has arrived or it was aborted (and not yet sent) or some error
363 * occurred during communication with userspace, or the device file
364 * was closed. The requester thread is woken up (if still waiting),
365 * the 'end' callback is called if given, else the reference to the
366 * request is released
368 * Called with fc->lock, unlocks it
370 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
373 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
375 list_del(&req
->list
);
376 list_del(&req
->intr_entry
);
377 req
->state
= FUSE_REQ_FINISHED
;
378 if (req
->background
) {
381 if (fc
->num_background
== fc
->max_background
)
384 /* Wake up next waiter, if any */
385 if (!fc
->blocked
&& waitqueue_active(&fc
->blocked_waitq
))
386 wake_up(&fc
->blocked_waitq
);
388 if (fc
->num_background
== fc
->congestion_threshold
&&
389 fc
->connected
&& fc
->bdi_initialized
) {
390 clear_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
391 clear_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
393 fc
->num_background
--;
394 fc
->active_background
--;
397 spin_unlock(&fc
->lock
);
398 wake_up(&req
->waitq
);
401 fuse_put_request(fc
, req
);
404 static void wait_answer_interruptible(struct fuse_conn
*fc
,
405 struct fuse_req
*req
)
409 if (signal_pending(current
))
412 spin_unlock(&fc
->lock
);
413 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
414 spin_lock(&fc
->lock
);
417 static void queue_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
)
419 list_add_tail(&req
->intr_entry
, &fc
->interrupts
);
421 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
424 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
428 if (!fc
->no_interrupt
) {
429 /* Any signal may interrupt this */
430 wait_answer_interruptible(fc
, req
);
434 if (req
->state
== FUSE_REQ_FINISHED
)
437 req
->interrupted
= 1;
438 if (req
->state
== FUSE_REQ_SENT
)
439 queue_interrupt(fc
, req
);
445 /* Only fatal signals may interrupt this */
447 wait_answer_interruptible(fc
, req
);
448 restore_sigs(&oldset
);
452 if (req
->state
== FUSE_REQ_FINISHED
)
455 /* Request is not yet in userspace, bail out */
456 if (req
->state
== FUSE_REQ_PENDING
) {
457 list_del(&req
->list
);
458 __fuse_put_request(req
);
459 req
->out
.h
.error
= -EINTR
;
465 * Either request is already in userspace, or it was forced.
468 spin_unlock(&fc
->lock
);
470 while (req
->state
!= FUSE_REQ_FINISHED
)
471 wait_event_freezable(req
->waitq
,
472 req
->state
== FUSE_REQ_FINISHED
);
473 spin_lock(&fc
->lock
);
479 BUG_ON(req
->state
!= FUSE_REQ_FINISHED
);
481 /* This is uninterruptible sleep, because data is
482 being copied to/from the buffers of req. During
483 locked state, there mustn't be any filesystem
484 operation (e.g. page fault), since that could lead
486 spin_unlock(&fc
->lock
);
487 wait_event(req
->waitq
, !req
->locked
);
488 spin_lock(&fc
->lock
);
492 #define CREATE_TRACE_POINTS
493 #include <linux/met_ftrace_fuse.h>
495 void met_fuse(int t_pid
, char *t_name
, unsigned int op
, unsigned int size
, struct timespec s_time
, struct timespec e_time
)
497 MET_FTRACE_PRINTK(met_fuse
, t_pid
, t_name
, op
, size
, s_time
, e_time
);
500 static void __fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
502 BUG_ON(req
->background
);
503 spin_lock(&fc
->lock
);
505 req
->out
.h
.error
= -ENOTCONN
;
506 else if (fc
->conn_error
)
507 req
->out
.h
.error
= -ECONNREFUSED
;
509 req
->in
.h
.unique
= fuse_get_unique(fc
);
510 queue_request(fc
, req
);
511 /* acquire extra reference, since request is still needed
512 after request_end() */
513 __fuse_get_request(req
);
515 request_wait_answer(fc
, req
);
517 spin_unlock(&fc
->lock
);
520 void fuse_request_send_ex(struct fuse_conn
*fc
, struct fuse_req
*req
,
523 #ifdef MET_FUSEIO_TRACE
524 char name
[TASK_COMM_LEN
];
527 MET_FUSE_IOLOG_INIT();
531 MET_FUSE_IOLOG_START();
532 __fuse_request_send(fc
, req
);
533 MET_FUSE_IOLOG_END();
535 FUSE_IOLOG_PRINT(size
, req
->in
.h
.opcode
);
537 #ifdef MET_FUSEIO_TRACE
538 met_fuse(task_pid_nr(current
), get_task_comm(name
, current
), req
->in
.h
.opcode
, size
, met_fuse_start_time
, met_fuse_end_time
);
541 EXPORT_SYMBOL_GPL(fuse_request_send_ex
);
543 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
545 fuse_request_send_ex(fc
, req
, 0);
547 EXPORT_SYMBOL_GPL(fuse_request_send
);
549 static void fuse_request_send_nowait_locked(struct fuse_conn
*fc
,
550 struct fuse_req
*req
)
552 BUG_ON(!req
->background
);
553 fc
->num_background
++;
554 if (fc
->num_background
== fc
->max_background
)
556 if (fc
->num_background
== fc
->congestion_threshold
&&
557 fc
->bdi_initialized
) {
558 set_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
559 set_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
561 list_add_tail(&req
->list
, &fc
->bg_queue
);
565 static void fuse_request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
567 spin_lock(&fc
->lock
);
569 fuse_request_send_nowait_locked(fc
, req
);
570 spin_unlock(&fc
->lock
);
572 req
->out
.h
.error
= -ENOTCONN
;
573 request_end(fc
, req
);
577 void fuse_request_send_background_ex(struct fuse_conn
*fc
, struct fuse_req
*req
,
583 fuse_request_send_nowait(fc
, req
);
585 FUSE_IOLOG_PRINT(size
, req
->in
.h
.opcode
);
587 EXPORT_SYMBOL_GPL(fuse_request_send_background_ex
);
589 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
591 fuse_request_send_background_ex(fc
, req
, 0);
593 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
595 static int fuse_request_send_notify_reply(struct fuse_conn
*fc
,
596 struct fuse_req
*req
, u64 unique
)
601 req
->in
.h
.unique
= unique
;
602 spin_lock(&fc
->lock
);
604 queue_request(fc
, req
);
607 spin_unlock(&fc
->lock
);
613 * Called under fc->lock
615 * fc->connected must have been checked previously
617 void fuse_request_send_background_locked(struct fuse_conn
*fc
,
618 struct fuse_req
*req
)
621 fuse_request_send_nowait_locked(fc
, req
);
624 void fuse_force_forget(struct file
*file
, u64 nodeid
)
626 struct inode
*inode
= file_inode(file
);
627 struct fuse_conn
*fc
= get_fuse_conn(inode
);
628 struct fuse_req
*req
;
629 struct fuse_forget_in inarg
;
631 memset(&inarg
, 0, sizeof(inarg
));
633 req
= fuse_get_req_nofail_nopages(fc
, file
);
634 req
->in
.h
.opcode
= FUSE_FORGET
;
635 req
->in
.h
.nodeid
= nodeid
;
637 req
->in
.args
[0].size
= sizeof(inarg
);
638 req
->in
.args
[0].value
= &inarg
;
640 __fuse_request_send(fc
, req
);
642 fuse_put_request(fc
, req
);
646 * Lock the request. Up to the next unlock_request() there mustn't be
647 * anything that could cause a page-fault. If the request was already
650 static int lock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
654 spin_lock(&fc
->lock
);
659 spin_unlock(&fc
->lock
);
665 * Unlock request. If it was aborted during being locked, the
666 * requester thread is currently waiting for it to be unlocked, so
669 static void unlock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
672 spin_lock(&fc
->lock
);
675 wake_up(&req
->waitq
);
676 spin_unlock(&fc
->lock
);
680 struct fuse_copy_state
{
681 struct fuse_conn
*fc
;
683 struct fuse_req
*req
;
684 const struct iovec
*iov
;
685 struct pipe_buffer
*pipebufs
;
686 struct pipe_buffer
*currbuf
;
687 struct pipe_inode_info
*pipe
;
688 unsigned long nr_segs
;
689 unsigned long seglen
;
695 unsigned move_pages
:1;
698 static void fuse_copy_init(struct fuse_copy_state
*cs
, struct fuse_conn
*fc
,
700 const struct iovec
*iov
, unsigned long nr_segs
)
702 memset(cs
, 0, sizeof(*cs
));
706 cs
->nr_segs
= nr_segs
;
709 /* Unmap and put previous page of userspace buffer */
710 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
713 struct pipe_buffer
*buf
= cs
->currbuf
;
716 buf
->ops
->unmap(cs
->pipe
, buf
, cs
->mapaddr
);
719 buf
->len
= PAGE_SIZE
- cs
->len
;
723 } else if (cs
->mapaddr
) {
726 flush_dcache_page(cs
->pg
);
727 set_page_dirty_lock(cs
->pg
);
735 * Get another pagefull of userspace buffer, and map it to kernel
736 * address space, and lock request
738 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
740 unsigned long offset
;
743 unlock_request(cs
->fc
, cs
->req
);
744 fuse_copy_finish(cs
);
746 struct pipe_buffer
*buf
= cs
->pipebufs
;
749 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
753 BUG_ON(!cs
->nr_segs
);
755 cs
->mapaddr
= buf
->ops
->map(cs
->pipe
, buf
, 0);
757 cs
->buf
= cs
->mapaddr
+ buf
->offset
;
763 if (cs
->nr_segs
== cs
->pipe
->buffers
)
766 page
= alloc_page(GFP_HIGHUSER
);
775 cs
->mapaddr
= kmap(page
);
776 cs
->buf
= cs
->mapaddr
;
783 BUG_ON(!cs
->nr_segs
);
784 cs
->seglen
= cs
->iov
[0].iov_len
;
785 cs
->addr
= (unsigned long) cs
->iov
[0].iov_base
;
789 err
= get_user_pages_fast(cs
->addr
, 1, cs
->write
, &cs
->pg
);
793 offset
= cs
->addr
% PAGE_SIZE
;
794 cs
->mapaddr
= kmap(cs
->pg
);
795 cs
->buf
= cs
->mapaddr
+ offset
;
796 cs
->len
= min(PAGE_SIZE
- offset
, cs
->seglen
);
797 cs
->seglen
-= cs
->len
;
801 return lock_request(cs
->fc
, cs
->req
);
804 /* Do as much copy to/from userspace buffer as we can */
805 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
807 unsigned ncpy
= min(*size
, cs
->len
);
810 memcpy(cs
->buf
, *val
, ncpy
);
812 memcpy(*val
, cs
->buf
, ncpy
);
821 static int fuse_check_page(struct page
*page
)
823 if (page_mapcount(page
) ||
824 page
->mapping
!= NULL
||
825 page_count(page
) != 1 ||
826 (page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
&
833 printk(KERN_WARNING
"fuse: trying to steal weird page\n");
834 printk(KERN_WARNING
" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page
, page
->index
, page
->flags
, page_count(page
), page_mapcount(page
), page
->mapping
);
840 static int fuse_try_move_page(struct fuse_copy_state
*cs
, struct page
**pagep
)
843 struct page
*oldpage
= *pagep
;
844 struct page
*newpage
;
845 struct pipe_buffer
*buf
= cs
->pipebufs
;
847 unlock_request(cs
->fc
, cs
->req
);
848 fuse_copy_finish(cs
);
850 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
854 BUG_ON(!cs
->nr_segs
);
860 if (cs
->len
!= PAGE_SIZE
)
863 if (buf
->ops
->steal(cs
->pipe
, buf
) != 0)
868 if (WARN_ON(!PageUptodate(newpage
)))
871 ClearPageMappedToDisk(newpage
);
873 if (fuse_check_page(newpage
) != 0)
874 goto out_fallback_unlock
;
877 * This is a new and locked page, it shouldn't be mapped or
878 * have any special flags on it
880 if (WARN_ON(page_mapped(oldpage
)))
881 goto out_fallback_unlock
;
882 if (WARN_ON(page_has_private(oldpage
)))
883 goto out_fallback_unlock
;
884 if (WARN_ON(PageDirty(oldpage
) || PageWriteback(oldpage
)))
885 goto out_fallback_unlock
;
886 if (WARN_ON(PageMlocked(oldpage
)))
887 goto out_fallback_unlock
;
889 err
= replace_page_cache_page(oldpage
, newpage
, GFP_KERNEL
);
891 unlock_page(newpage
);
895 page_cache_get(newpage
);
897 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
898 lru_cache_add_file(newpage
);
901 spin_lock(&cs
->fc
->lock
);
902 if (cs
->req
->aborted
)
906 spin_unlock(&cs
->fc
->lock
);
909 unlock_page(newpage
);
910 page_cache_release(newpage
);
914 unlock_page(oldpage
);
915 page_cache_release(oldpage
);
921 unlock_page(newpage
);
923 cs
->mapaddr
= buf
->ops
->map(cs
->pipe
, buf
, 1);
924 cs
->buf
= cs
->mapaddr
+ buf
->offset
;
926 err
= lock_request(cs
->fc
, cs
->req
);
933 static int fuse_ref_page(struct fuse_copy_state
*cs
, struct page
*page
,
934 unsigned offset
, unsigned count
)
936 struct pipe_buffer
*buf
;
938 if (cs
->nr_segs
== cs
->pipe
->buffers
)
941 unlock_request(cs
->fc
, cs
->req
);
942 fuse_copy_finish(cs
);
945 page_cache_get(page
);
947 buf
->offset
= offset
;
958 * Copy a page in the request to/from the userspace buffer. Must be
961 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
**pagep
,
962 unsigned offset
, unsigned count
, int zeroing
)
965 struct page
*page
= *pagep
;
967 if (page
&& zeroing
&& count
< PAGE_SIZE
)
968 clear_highpage(page
);
971 if (cs
->write
&& cs
->pipebufs
&& page
) {
972 return fuse_ref_page(cs
, page
, offset
, count
);
973 } else if (!cs
->len
) {
974 if (cs
->move_pages
&& page
&&
975 offset
== 0 && count
== PAGE_SIZE
) {
976 err
= fuse_try_move_page(cs
, pagep
);
980 err
= fuse_copy_fill(cs
);
986 void *mapaddr
= kmap_atomic(page
);
987 void *buf
= mapaddr
+ offset
;
988 offset
+= fuse_copy_do(cs
, &buf
, &count
);
989 kunmap_atomic(mapaddr
);
991 offset
+= fuse_copy_do(cs
, NULL
, &count
);
993 if (page
&& !cs
->write
)
994 flush_dcache_page(page
);
998 /* Copy pages in the request to/from userspace buffer */
999 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
1003 struct fuse_req
*req
= cs
->req
;
1005 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
1007 unsigned offset
= req
->page_descs
[i
].offset
;
1008 unsigned count
= min(nbytes
, req
->page_descs
[i
].length
);
1010 err
= fuse_copy_page(cs
, &req
->pages
[i
], offset
, count
,
1020 /* Copy a single argument in the request to/from userspace buffer */
1021 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
1025 int err
= fuse_copy_fill(cs
);
1029 fuse_copy_do(cs
, &val
, &size
);
1034 /* Copy request arguments to/from userspace buffer */
1035 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
1036 unsigned argpages
, struct fuse_arg
*args
,
1042 for (i
= 0; !err
&& i
< numargs
; i
++) {
1043 struct fuse_arg
*arg
= &args
[i
];
1044 if (i
== numargs
- 1 && argpages
)
1045 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
1047 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
1052 static int forget_pending(struct fuse_conn
*fc
)
1054 return fc
->forget_list_head
.next
!= NULL
;
1057 static int request_pending(struct fuse_conn
*fc
)
1059 return !list_empty(&fc
->pending
) || !list_empty(&fc
->interrupts
) ||
1063 /* Wait until a request is available on the pending list */
1064 static void request_wait(struct fuse_conn
*fc
)
1065 __releases(fc
->lock
)
1066 __acquires(fc
->lock
)
1068 DECLARE_WAITQUEUE(wait
, current
);
1070 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
1071 while (fc
->connected
&& !request_pending(fc
)) {
1072 set_current_state(TASK_INTERRUPTIBLE
);
1073 if (signal_pending(current
))
1076 spin_unlock(&fc
->lock
);
1078 spin_lock(&fc
->lock
);
1080 set_current_state(TASK_RUNNING
);
1081 remove_wait_queue(&fc
->waitq
, &wait
);
1085 * Transfer an interrupt request to userspace
1087 * Unlike other requests this is assembled on demand, without a need
1088 * to allocate a separate fuse_req structure.
1090 * Called with fc->lock held, releases it
1092 static int fuse_read_interrupt(struct fuse_conn
*fc
, struct fuse_copy_state
*cs
,
1093 size_t nbytes
, struct fuse_req
*req
)
1094 __releases(fc
->lock
)
1096 struct fuse_in_header ih
;
1097 struct fuse_interrupt_in arg
;
1098 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
1101 list_del_init(&req
->intr_entry
);
1102 req
->intr_unique
= fuse_get_unique(fc
);
1103 memset(&ih
, 0, sizeof(ih
));
1104 memset(&arg
, 0, sizeof(arg
));
1106 ih
.opcode
= FUSE_INTERRUPT
;
1107 ih
.unique
= req
->intr_unique
;
1108 arg
.unique
= req
->in
.h
.unique
;
1110 spin_unlock(&fc
->lock
);
1111 if (nbytes
< reqsize
)
1114 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1116 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1117 fuse_copy_finish(cs
);
1119 return err
? err
: reqsize
;
1122 static struct fuse_forget_link
*dequeue_forget(struct fuse_conn
*fc
,
1126 struct fuse_forget_link
*head
= fc
->forget_list_head
.next
;
1127 struct fuse_forget_link
**newhead
= &head
;
1130 for (count
= 0; *newhead
!= NULL
&& count
< max
; count
++)
1131 newhead
= &(*newhead
)->next
;
1133 fc
->forget_list_head
.next
= *newhead
;
1135 if (fc
->forget_list_head
.next
== NULL
)
1136 fc
->forget_list_tail
= &fc
->forget_list_head
;
1144 static int fuse_read_single_forget(struct fuse_conn
*fc
,
1145 struct fuse_copy_state
*cs
,
1147 __releases(fc
->lock
)
1150 struct fuse_forget_link
*forget
= dequeue_forget(fc
, 1, NULL
);
1151 struct fuse_forget_in arg
= {
1152 .nlookup
= forget
->forget_one
.nlookup
,
1154 struct fuse_in_header ih
= {
1155 .opcode
= FUSE_FORGET
,
1156 .nodeid
= forget
->forget_one
.nodeid
,
1157 .unique
= fuse_get_unique(fc
),
1158 .len
= sizeof(ih
) + sizeof(arg
),
1161 spin_unlock(&fc
->lock
);
1163 if (nbytes
< ih
.len
)
1166 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1168 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1169 fuse_copy_finish(cs
);
1177 static int fuse_read_batch_forget(struct fuse_conn
*fc
,
1178 struct fuse_copy_state
*cs
, size_t nbytes
)
1179 __releases(fc
->lock
)
1182 unsigned max_forgets
;
1184 struct fuse_forget_link
*head
;
1185 struct fuse_batch_forget_in arg
= { .count
= 0 };
1186 struct fuse_in_header ih
= {
1187 .opcode
= FUSE_BATCH_FORGET
,
1188 .unique
= fuse_get_unique(fc
),
1189 .len
= sizeof(ih
) + sizeof(arg
),
1192 if (nbytes
< ih
.len
) {
1193 spin_unlock(&fc
->lock
);
1197 max_forgets
= (nbytes
- ih
.len
) / sizeof(struct fuse_forget_one
);
1198 head
= dequeue_forget(fc
, max_forgets
, &count
);
1199 spin_unlock(&fc
->lock
);
1202 ih
.len
+= count
* sizeof(struct fuse_forget_one
);
1203 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1205 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1208 struct fuse_forget_link
*forget
= head
;
1211 err
= fuse_copy_one(cs
, &forget
->forget_one
,
1212 sizeof(forget
->forget_one
));
1214 head
= forget
->next
;
1218 fuse_copy_finish(cs
);
1226 static int fuse_read_forget(struct fuse_conn
*fc
, struct fuse_copy_state
*cs
,
1228 __releases(fc
->lock
)
1230 if (fc
->minor
< 16 || fc
->forget_list_head
.next
->next
== NULL
)
1231 return fuse_read_single_forget(fc
, cs
, nbytes
);
1233 return fuse_read_batch_forget(fc
, cs
, nbytes
);
1237 * Read a single request into the userspace filesystem's buffer. This
1238 * function waits until a request is available, then removes it from
1239 * the pending list and copies request data to userspace buffer. If
1240 * no reply is needed (FORGET) or request has been aborted or there
1241 * was an error during the copying then it's finished by calling
1242 * request_end(). Otherwise add it to the processing list, and set
1245 static ssize_t
fuse_dev_do_read(struct fuse_conn
*fc
, struct file
*file
,
1246 struct fuse_copy_state
*cs
, size_t nbytes
)
1249 struct fuse_req
*req
;
1254 spin_lock(&fc
->lock
);
1256 if ((file
->f_flags
& O_NONBLOCK
) && fc
->connected
&&
1257 !request_pending(fc
))
1265 if (!request_pending(fc
))
1268 if (!list_empty(&fc
->interrupts
)) {
1269 req
= list_entry(fc
->interrupts
.next
, struct fuse_req
,
1271 return fuse_read_interrupt(fc
, cs
, nbytes
, req
);
1274 if (forget_pending(fc
)) {
1275 if (list_empty(&fc
->pending
) || fc
->forget_batch
-- > 0)
1276 return fuse_read_forget(fc
, cs
, nbytes
);
1278 if (fc
->forget_batch
<= -8)
1279 fc
->forget_batch
= 16;
1282 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
1283 req
->state
= FUSE_REQ_READING
;
1284 list_move(&req
->list
, &fc
->io
);
1287 reqsize
= in
->h
.len
;
1288 /* If request is too large, reply with an error and restart the read */
1289 if (nbytes
< reqsize
) {
1290 req
->out
.h
.error
= -EIO
;
1291 /* SETXATTR is special, since it may contain too large data */
1292 if (in
->h
.opcode
== FUSE_SETXATTR
)
1293 req
->out
.h
.error
= -E2BIG
;
1294 request_end(fc
, req
);
1297 spin_unlock(&fc
->lock
);
1299 err
= fuse_copy_one(cs
, &in
->h
, sizeof(in
->h
));
1301 err
= fuse_copy_args(cs
, in
->numargs
, in
->argpages
,
1302 (struct fuse_arg
*) in
->args
, 0);
1303 fuse_copy_finish(cs
);
1304 spin_lock(&fc
->lock
);
1307 request_end(fc
, req
);
1311 req
->out
.h
.error
= -EIO
;
1312 request_end(fc
, req
);
1316 request_end(fc
, req
);
1318 req
->state
= FUSE_REQ_SENT
;
1319 list_move_tail(&req
->list
, &fc
->processing
);
1320 if (req
->interrupted
)
1321 queue_interrupt(fc
, req
);
1322 spin_unlock(&fc
->lock
);
1327 spin_unlock(&fc
->lock
);
1331 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, const struct iovec
*iov
,
1332 unsigned long nr_segs
, loff_t pos
)
1334 struct fuse_copy_state cs
;
1335 struct file
*file
= iocb
->ki_filp
;
1336 struct fuse_conn
*fc
= fuse_get_conn(file
);
1340 fuse_copy_init(&cs
, fc
, 1, iov
, nr_segs
);
1342 return fuse_dev_do_read(fc
, file
, &cs
, iov_length(iov
, nr_segs
));
1345 static ssize_t
fuse_dev_splice_read(struct file
*in
, loff_t
*ppos
,
1346 struct pipe_inode_info
*pipe
,
1347 size_t len
, unsigned int flags
)
1352 struct pipe_buffer
*bufs
;
1353 struct fuse_copy_state cs
;
1354 struct fuse_conn
*fc
= fuse_get_conn(in
);
1358 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1362 fuse_copy_init(&cs
, fc
, 1, NULL
, 0);
1365 ret
= fuse_dev_do_read(fc
, in
, &cs
, len
);
1372 if (!pipe
->readers
) {
1373 send_sig(SIGPIPE
, current
, 0);
1379 if (pipe
->nrbufs
+ cs
.nr_segs
> pipe
->buffers
) {
1384 while (page_nr
< cs
.nr_segs
) {
1385 int newbuf
= (pipe
->curbuf
+ pipe
->nrbufs
) & (pipe
->buffers
- 1);
1386 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
1388 buf
->page
= bufs
[page_nr
].page
;
1389 buf
->offset
= bufs
[page_nr
].offset
;
1390 buf
->len
= bufs
[page_nr
].len
;
1392 * Need to be careful about this. Having buf->ops in module
1393 * code can Oops if the buffer persists after module unload.
1395 buf
->ops
= &nosteal_pipe_buf_ops
;
1410 if (waitqueue_active(&pipe
->wait
))
1411 wake_up_interruptible(&pipe
->wait
);
1412 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
1416 for (; page_nr
< cs
.nr_segs
; page_nr
++)
1417 page_cache_release(bufs
[page_nr
].page
);
1423 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
1424 struct fuse_copy_state
*cs
)
1426 struct fuse_notify_poll_wakeup_out outarg
;
1429 if (size
!= sizeof(outarg
))
1432 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1436 fuse_copy_finish(cs
);
1437 return fuse_notify_poll_wakeup(fc
, &outarg
);
1440 fuse_copy_finish(cs
);
1444 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
1445 struct fuse_copy_state
*cs
)
1447 struct fuse_notify_inval_inode_out outarg
;
1450 if (size
!= sizeof(outarg
))
1453 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1456 fuse_copy_finish(cs
);
1458 down_read(&fc
->killsb
);
1461 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
1462 outarg
.off
, outarg
.len
);
1464 up_read(&fc
->killsb
);
1468 fuse_copy_finish(cs
);
1472 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
1473 struct fuse_copy_state
*cs
)
1475 struct fuse_notify_inval_entry_out outarg
;
1480 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1485 if (size
< sizeof(outarg
))
1488 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1492 err
= -ENAMETOOLONG
;
1493 if (outarg
.namelen
> FUSE_NAME_MAX
)
1497 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1501 name
.len
= outarg
.namelen
;
1502 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1505 fuse_copy_finish(cs
);
1506 buf
[outarg
.namelen
] = 0;
1507 name
.hash
= full_name_hash(name
.name
, name
.len
);
1509 down_read(&fc
->killsb
);
1512 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, 0, &name
);
1513 up_read(&fc
->killsb
);
1519 fuse_copy_finish(cs
);
1523 static int fuse_notify_delete(struct fuse_conn
*fc
, unsigned int size
,
1524 struct fuse_copy_state
*cs
)
1526 struct fuse_notify_delete_out outarg
;
1531 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1536 if (size
< sizeof(outarg
))
1539 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1543 err
= -ENAMETOOLONG
;
1544 if (outarg
.namelen
> FUSE_NAME_MAX
)
1548 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1552 name
.len
= outarg
.namelen
;
1553 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1556 fuse_copy_finish(cs
);
1557 buf
[outarg
.namelen
] = 0;
1558 name
.hash
= full_name_hash(name
.name
, name
.len
);
1560 down_read(&fc
->killsb
);
1563 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
,
1564 outarg
.child
, &name
);
1565 up_read(&fc
->killsb
);
1571 fuse_copy_finish(cs
);
1575 static int fuse_notify_store(struct fuse_conn
*fc
, unsigned int size
,
1576 struct fuse_copy_state
*cs
)
1578 struct fuse_notify_store_out outarg
;
1579 struct inode
*inode
;
1580 struct address_space
*mapping
;
1584 unsigned int offset
;
1590 if (size
< sizeof(outarg
))
1593 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1598 if (size
- sizeof(outarg
) != outarg
.size
)
1601 nodeid
= outarg
.nodeid
;
1603 down_read(&fc
->killsb
);
1609 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1613 mapping
= inode
->i_mapping
;
1614 index
= outarg
.offset
>> PAGE_CACHE_SHIFT
;
1615 offset
= outarg
.offset
& ~PAGE_CACHE_MASK
;
1616 file_size
= i_size_read(inode
);
1617 end
= outarg
.offset
+ outarg
.size
;
1618 if (end
> file_size
) {
1620 fuse_write_update_size(inode
, file_size
);
1626 unsigned int this_num
;
1629 page
= find_or_create_page(mapping
, index
,
1630 mapping_gfp_mask(mapping
));
1634 this_num
= min_t(unsigned, num
, PAGE_CACHE_SIZE
- offset
);
1635 err
= fuse_copy_page(cs
, &page
, offset
, this_num
, 0);
1636 if (!err
&& offset
== 0 && (num
!= 0 || file_size
== end
))
1637 SetPageUptodate(page
);
1639 page_cache_release(page
);
1654 up_read(&fc
->killsb
);
1656 fuse_copy_finish(cs
);
1660 static void fuse_retrieve_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1662 release_pages(req
->pages
, req
->num_pages
, 0);
1665 static int fuse_retrieve(struct fuse_conn
*fc
, struct inode
*inode
,
1666 struct fuse_notify_retrieve_out
*outarg
)
1669 struct address_space
*mapping
= inode
->i_mapping
;
1670 struct fuse_req
*req
;
1674 unsigned int offset
;
1675 size_t total_len
= 0;
1678 offset
= outarg
->offset
& ~PAGE_CACHE_MASK
;
1679 file_size
= i_size_read(inode
);
1682 if (outarg
->offset
> file_size
)
1684 else if (outarg
->offset
+ num
> file_size
)
1685 num
= file_size
- outarg
->offset
;
1687 num_pages
= (num
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1688 num_pages
= min(num_pages
, FUSE_MAX_PAGES_PER_REQ
);
1690 req
= fuse_get_req(fc
, num_pages
);
1692 return PTR_ERR(req
);
1694 req
->in
.h
.opcode
= FUSE_NOTIFY_REPLY
;
1695 req
->in
.h
.nodeid
= outarg
->nodeid
;
1696 req
->in
.numargs
= 2;
1697 req
->in
.argpages
= 1;
1698 req
->page_descs
[0].offset
= offset
;
1699 req
->end
= fuse_retrieve_end
;
1701 index
= outarg
->offset
>> PAGE_CACHE_SHIFT
;
1703 while (num
&& req
->num_pages
< num_pages
) {
1705 unsigned int this_num
;
1707 page
= find_get_page(mapping
, index
);
1711 this_num
= min_t(unsigned, num
, PAGE_CACHE_SIZE
- offset
);
1712 req
->pages
[req
->num_pages
] = page
;
1713 req
->page_descs
[req
->num_pages
].length
= this_num
;
1718 total_len
+= this_num
;
1721 req
->misc
.retrieve_in
.offset
= outarg
->offset
;
1722 req
->misc
.retrieve_in
.size
= total_len
;
1723 req
->in
.args
[0].size
= sizeof(req
->misc
.retrieve_in
);
1724 req
->in
.args
[0].value
= &req
->misc
.retrieve_in
;
1725 req
->in
.args
[1].size
= total_len
;
1727 err
= fuse_request_send_notify_reply(fc
, req
, outarg
->notify_unique
);
1729 fuse_retrieve_end(fc
, req
);
1734 static int fuse_notify_retrieve(struct fuse_conn
*fc
, unsigned int size
,
1735 struct fuse_copy_state
*cs
)
1737 struct fuse_notify_retrieve_out outarg
;
1738 struct inode
*inode
;
1742 if (size
!= sizeof(outarg
))
1745 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1749 fuse_copy_finish(cs
);
1751 down_read(&fc
->killsb
);
1754 u64 nodeid
= outarg
.nodeid
;
1756 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1758 err
= fuse_retrieve(fc
, inode
, &outarg
);
1762 up_read(&fc
->killsb
);
1767 fuse_copy_finish(cs
);
1771 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
1772 unsigned int size
, struct fuse_copy_state
*cs
)
1775 case FUSE_NOTIFY_POLL
:
1776 return fuse_notify_poll(fc
, size
, cs
);
1778 case FUSE_NOTIFY_INVAL_INODE
:
1779 return fuse_notify_inval_inode(fc
, size
, cs
);
1781 case FUSE_NOTIFY_INVAL_ENTRY
:
1782 return fuse_notify_inval_entry(fc
, size
, cs
);
1784 case FUSE_NOTIFY_STORE
:
1785 return fuse_notify_store(fc
, size
, cs
);
1787 case FUSE_NOTIFY_RETRIEVE
:
1788 return fuse_notify_retrieve(fc
, size
, cs
);
1790 case FUSE_NOTIFY_DELETE
:
1791 return fuse_notify_delete(fc
, size
, cs
);
1794 fuse_copy_finish(cs
);
1799 /* Look up request on processing list by unique ID */
1800 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
1802 struct list_head
*entry
;
1804 list_for_each(entry
, &fc
->processing
) {
1805 struct fuse_req
*req
;
1806 req
= list_entry(entry
, struct fuse_req
, list
);
1807 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
1813 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
1816 unsigned reqsize
= sizeof(struct fuse_out_header
);
1819 return nbytes
!= reqsize
? -EINVAL
: 0;
1821 reqsize
+= len_args(out
->numargs
, out
->args
);
1823 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
1825 else if (reqsize
> nbytes
) {
1826 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
1827 unsigned diffsize
= reqsize
- nbytes
;
1828 if (diffsize
> lastarg
->size
)
1830 lastarg
->size
-= diffsize
;
1832 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
1837 * Write a single reply to a request. First the header is copied from
1838 * the write buffer. The request is then searched on the processing
1839 * list by the unique ID found in the header. If found, then remove
1840 * it from the list and copy the rest of the buffer to the request.
1841 * The request is finished by calling request_end()
1843 static ssize_t
fuse_dev_do_write(struct fuse_conn
*fc
,
1844 struct fuse_copy_state
*cs
, size_t nbytes
)
1847 struct fuse_req
*req
;
1848 struct fuse_out_header oh
;
1850 if (nbytes
< sizeof(struct fuse_out_header
))
1853 err
= fuse_copy_one(cs
, &oh
, sizeof(oh
));
1858 if (oh
.len
!= nbytes
)
1862 * Zero oh.unique indicates unsolicited notification message
1863 * and error contains notification code.
1866 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), cs
);
1867 return err
? err
: nbytes
;
1871 if (oh
.error
<= -1000 || oh
.error
> 0)
1874 spin_lock(&fc
->lock
);
1879 req
= request_find(fc
, oh
.unique
);
1884 spin_unlock(&fc
->lock
);
1885 fuse_copy_finish(cs
);
1886 spin_lock(&fc
->lock
);
1887 request_end(fc
, req
);
1890 /* Is it an interrupt reply? */
1891 if (req
->intr_unique
== oh
.unique
) {
1893 if (nbytes
!= sizeof(struct fuse_out_header
))
1896 if (oh
.error
== -ENOSYS
)
1897 fc
->no_interrupt
= 1;
1898 else if (oh
.error
== -EAGAIN
)
1899 queue_interrupt(fc
, req
);
1901 spin_unlock(&fc
->lock
);
1902 fuse_copy_finish(cs
);
1906 req
->state
= FUSE_REQ_WRITING
;
1907 list_move(&req
->list
, &fc
->io
);
1911 if (!req
->out
.page_replace
)
1913 spin_unlock(&fc
->lock
);
1915 err
= copy_out_args(cs
, &req
->out
, nbytes
);
1916 fuse_copy_finish(cs
);
1918 spin_lock(&fc
->lock
);
1923 } else if (!req
->aborted
)
1924 req
->out
.h
.error
= -EIO
;
1925 request_end(fc
, req
);
1927 return err
? err
: nbytes
;
1930 spin_unlock(&fc
->lock
);
1932 fuse_copy_finish(cs
);
1936 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, const struct iovec
*iov
,
1937 unsigned long nr_segs
, loff_t pos
)
1939 struct fuse_copy_state cs
;
1940 struct fuse_conn
*fc
= fuse_get_conn(iocb
->ki_filp
);
1944 fuse_copy_init(&cs
, fc
, 0, iov
, nr_segs
);
1946 return fuse_dev_do_write(fc
, &cs
, iov_length(iov
, nr_segs
));
1949 static ssize_t
fuse_dev_splice_write(struct pipe_inode_info
*pipe
,
1950 struct file
*out
, loff_t
*ppos
,
1951 size_t len
, unsigned int flags
)
1955 struct pipe_buffer
*bufs
;
1956 struct fuse_copy_state cs
;
1957 struct fuse_conn
*fc
;
1961 fc
= fuse_get_conn(out
);
1965 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1972 for (idx
= 0; idx
< pipe
->nrbufs
&& rem
< len
; idx
++)
1973 rem
+= pipe
->bufs
[(pipe
->curbuf
+ idx
) & (pipe
->buffers
- 1)].len
;
1983 struct pipe_buffer
*ibuf
;
1984 struct pipe_buffer
*obuf
;
1986 BUG_ON(nbuf
>= pipe
->buffers
);
1987 BUG_ON(!pipe
->nrbufs
);
1988 ibuf
= &pipe
->bufs
[pipe
->curbuf
];
1991 if (rem
>= ibuf
->len
) {
1994 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (pipe
->buffers
- 1);
1997 ibuf
->ops
->get(pipe
, ibuf
);
1999 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
2001 ibuf
->offset
+= obuf
->len
;
2002 ibuf
->len
-= obuf
->len
;
2009 fuse_copy_init(&cs
, fc
, 0, NULL
, nbuf
);
2013 if (flags
& SPLICE_F_MOVE
)
2016 ret
= fuse_dev_do_write(fc
, &cs
, len
);
2018 for (idx
= 0; idx
< nbuf
; idx
++) {
2019 struct pipe_buffer
*buf
= &bufs
[idx
];
2020 buf
->ops
->release(pipe
, buf
);
2027 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
2029 unsigned mask
= POLLOUT
| POLLWRNORM
;
2030 struct fuse_conn
*fc
= fuse_get_conn(file
);
2034 poll_wait(file
, &fc
->waitq
, wait
);
2036 spin_lock(&fc
->lock
);
2039 else if (request_pending(fc
))
2040 mask
|= POLLIN
| POLLRDNORM
;
2041 spin_unlock(&fc
->lock
);
2047 * Abort all requests on the given list (pending or processing)
2049 * This function releases and reacquires fc->lock
2051 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
2052 __releases(fc
->lock
)
2053 __acquires(fc
->lock
)
2055 while (!list_empty(head
)) {
2056 struct fuse_req
*req
;
2057 req
= list_entry(head
->next
, struct fuse_req
, list
);
2058 req
->out
.h
.error
= -ECONNABORTED
;
2059 request_end(fc
, req
);
2060 spin_lock(&fc
->lock
);
2065 * Abort requests under I/O
2067 * The requests are set to aborted and finished, and the request
2068 * waiter is woken up. This will make request_wait_answer() wait
2069 * until the request is unlocked and then return.
2071 * If the request is asynchronous, then the end function needs to be
2072 * called after waiting for the request to be unlocked (if it was
2075 static void end_io_requests(struct fuse_conn
*fc
)
2076 __releases(fc
->lock
)
2077 __acquires(fc
->lock
)
2079 while (!list_empty(&fc
->io
)) {
2080 struct fuse_req
*req
=
2081 list_entry(fc
->io
.next
, struct fuse_req
, list
);
2082 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
2085 req
->out
.h
.error
= -ECONNABORTED
;
2086 req
->state
= FUSE_REQ_FINISHED
;
2087 list_del_init(&req
->list
);
2088 wake_up(&req
->waitq
);
2091 __fuse_get_request(req
);
2092 spin_unlock(&fc
->lock
);
2093 wait_event(req
->waitq
, !req
->locked
);
2095 fuse_put_request(fc
, req
);
2096 spin_lock(&fc
->lock
);
2101 static void end_queued_requests(struct fuse_conn
*fc
)
2102 __releases(fc
->lock
)
2103 __acquires(fc
->lock
)
2105 fc
->max_background
= UINT_MAX
;
2107 end_requests(fc
, &fc
->pending
);
2108 end_requests(fc
, &fc
->processing
);
2109 while (forget_pending(fc
))
2110 kfree(dequeue_forget(fc
, 1, NULL
));
2113 static void end_polls(struct fuse_conn
*fc
)
2117 p
= rb_first(&fc
->polled_files
);
2120 struct fuse_file
*ff
;
2121 ff
= rb_entry(p
, struct fuse_file
, polled_node
);
2122 wake_up_interruptible_all(&ff
->poll_wait
);
2129 * Abort all requests.
2131 * Emergency exit in case of a malicious or accidental deadlock, or
2132 * just a hung filesystem.
2134 * The same effect is usually achievable through killing the
2135 * filesystem daemon and all users of the filesystem. The exception
2136 * is the combination of an asynchronous request and the tricky
2137 * deadlock (see Documentation/filesystems/fuse.txt).
2139 * During the aborting, progression of requests from the pending and
2140 * processing lists onto the io list, and progression of new requests
2141 * onto the pending list is prevented by req->connected being false.
2143 * Progression of requests under I/O to the processing list is
2144 * prevented by the req->aborted flag being true for these requests.
2145 * For this reason requests on the io list must be aborted first.
2147 void fuse_abort_conn(struct fuse_conn
*fc
)
2149 spin_lock(&fc
->lock
);
2150 if (fc
->connected
) {
2153 fc
->initialized
= 1;
2154 end_io_requests(fc
);
2155 end_queued_requests(fc
);
2157 wake_up_all(&fc
->waitq
);
2158 wake_up_all(&fc
->blocked_waitq
);
2159 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
2161 spin_unlock(&fc
->lock
);
2163 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
2165 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
2167 struct fuse_conn
*fc
= fuse_get_conn(file
);
2169 spin_lock(&fc
->lock
);
2172 fc
->initialized
= 1;
2173 end_queued_requests(fc
);
2175 wake_up_all(&fc
->blocked_waitq
);
2176 spin_unlock(&fc
->lock
);
2182 EXPORT_SYMBOL_GPL(fuse_dev_release
);
2184 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
2186 struct fuse_conn
*fc
= fuse_get_conn(file
);
2190 /* No locking - fasync_helper does its own locking */
2191 return fasync_helper(fd
, file
, on
, &fc
->fasync
);
2194 const struct file_operations fuse_dev_operations
= {
2195 .owner
= THIS_MODULE
,
2196 .llseek
= no_llseek
,
2197 .read
= do_sync_read
,
2198 .aio_read
= fuse_dev_read
,
2199 .splice_read
= fuse_dev_splice_read
,
2200 .write
= do_sync_write
,
2201 .aio_write
= fuse_dev_write
,
2202 .splice_write
= fuse_dev_splice_write
,
2203 .poll
= fuse_dev_poll
,
2204 .release
= fuse_dev_release
,
2205 .fasync
= fuse_dev_fasync
,
2207 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
2209 static struct miscdevice fuse_miscdevice
= {
2210 .minor
= FUSE_MINOR
,
2212 .fops
= &fuse_dev_operations
,
2215 int __init
fuse_dev_init(void)
2218 fuse_req_cachep
= kmem_cache_create("fuse_request",
2219 sizeof(struct fuse_req
),
2221 if (!fuse_req_cachep
)
2224 err
= misc_register(&fuse_miscdevice
);
2226 goto out_cache_clean
;
2231 kmem_cache_destroy(fuse_req_cachep
);
2236 void fuse_dev_cleanup(void)
2238 misc_deregister(&fuse_miscdevice
);
2239 kmem_cache_destroy(fuse_req_cachep
);