2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/module.h>
17 #include <linux/compat.h>
18 #include <linux/swap.h>
19 #include <linux/aio.h>
20 #include <linux/falloc.h>
21 #include <asm/div64.h>
24 struct mutex fuse_iolog_lock
;
26 static struct fuse_proc_info fuse_iolog
[FUSE_IOLOG_MAX
];
27 static struct timespec fuse_iolog_time
;
28 static struct task_struct
*fuse_iolog_thread
=NULL
;
31 struct timespec
*start
,
33 struct timespec
*diff
)
35 if ((end
->tv_nsec
-start
->tv_nsec
)<0) {
36 diff
->tv_sec
= end
->tv_sec
-start
->tv_sec
-1;
37 diff
->tv_nsec
= 1000000000+end
->tv_nsec
-start
->tv_nsec
;
39 diff
->tv_sec
= end
->tv_sec
-start
->tv_sec
;
40 diff
->tv_nsec
= end
->tv_nsec
-start
->tv_nsec
;
45 struct fuse_ioiog_type_map
51 #define FUSE_IOLOG_TYPE_MAX 46
53 static const char *fuse_iolog_type
[FUSE_IOLOG_TYPE_MAX
]={
95 "notify_reply", /*41*/
96 "batch_forget", /*42*/
102 static const char *fuse_iolog_type2str(int type
)
104 if (type
>=0 && type
<FUSE_IOLOG_TYPE_MAX
)
106 else if (type
==CUSE_INIT
)
107 type
=FUSE_IOLOG_TYPE_MAX
-1;
111 return fuse_iolog_type
[type
];
114 int fuse_iolog_print(void)
117 char buf
[FUSE_IOLOG_BUFLEN
], *ptr
;
119 len
=FUSE_IOLOG_BUFLEN
-1;
122 for (i
=0;i
<FUSE_IOLOG_MAX
&& fuse_iolog
[i
].valid
;i
++) {
124 if (fuse_iolog
[i
].read
.count
|| fuse_iolog
[i
].write
.count
) {
125 n
=snprintf(ptr
, len
, "{%d:R(%d,%d,%d),W(%d,%d,%d)}",
127 fuse_iolog
[i
].read
.bytes
,
128 fuse_iolog
[i
].read
.count
,
129 fuse_iolog
[i
].read
.us
,
130 fuse_iolog
[i
].write
.bytes
,
131 fuse_iolog
[i
].write
.count
,
132 fuse_iolog
[i
].write
.us
);
141 if (fuse_iolog
[i
].misc_type
) {
142 n
=snprintf(ptr
, len
, "{%d:%s(%d,%d,%d)}",
144 fuse_iolog_type2str(fuse_iolog
[i
].misc_type
),
145 fuse_iolog
[i
].misc
.bytes
,
146 fuse_iolog
[i
].misc
.count
,
147 fuse_iolog
[i
].misc
.us
);
157 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG", "FUSEIO %s\n", buf
);
159 return ptr
- &buf
[0];
162 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG",
163 "FUSEIO log buffer overflow \n");
168 void fuse_iolog_proc_clear(void)
170 memset(&fuse_iolog
[0], 0, sizeof(struct fuse_proc_info
)*FUSE_IOLOG_MAX
);
171 get_monotonic_boottime(&fuse_iolog_time
);
174 inline __u32
fuse_iolog_timeus(struct timespec
*t
)
182 if (t
->tv_sec
> 3600)
183 return 0xD693A400; /* 3600000000 */
185 _t
= t
->tv_sec
* 1000000 + us
;
193 __u32
fuse_iolog_timeus_diff(struct timespec
*start
, struct timespec
*end
)
195 struct timespec diff
;
196 fuse_time_diff(start
, end
, &diff
);
197 return fuse_iolog_timeus(&diff
);
201 inline int fuse_iolog_proc_update(struct fuse_proc_info
*info
,
202 __u32 io_bytes
, int type
, struct timespec
*diff
)
204 struct fuse_rw_info
*rwi
;
207 _t
= fuse_iolog_timeus(diff
);
211 else if (type
== FUSE_WRITE
)
214 if (info
->misc_type
==0)
215 info
->misc_type
=type
;
216 else if (info
->misc_type
!=type
) /* misc type mismatch => continue */
221 rwi
->bytes
+= io_bytes
;
228 static int fuse_iolog_watch(void *arg
)
230 unsigned int timeout
;
232 struct timespec curr
, diff
;
235 if (kthread_should_stop()) break;
237 get_monotonic_boottime(&curr
);
239 mutex_lock(&fuse_iolog_lock
);
240 fuse_time_diff(&fuse_iolog_time
, &curr
, &diff
);
242 n
=fuse_iolog_print();
245 fuse_iolog_proc_clear();
247 mutex_unlock(&fuse_iolog_lock
);
250 set_current_state(TASK_INTERRUPTIBLE
);
251 timeout
= schedule_timeout(FUSE_IOLOG_LATENCY
*HZ
);
258 void fuse_iolog_init(void)
262 mutex_init(&fuse_iolog_lock
);
263 mutex_lock(&fuse_iolog_lock
);
264 fuse_iolog_proc_clear();
265 mutex_unlock(&fuse_iolog_lock
);
267 fuse_iolog_thread
=kthread_create(fuse_iolog_watch
, NULL
, "fuse_log");
268 if (IS_ERR(fuse_iolog_thread
)) {
269 ret
= PTR_ERR(fuse_iolog_thread
);
270 xlog_printk(ANDROID_LOG_DEBUG
, "BLOCK_TAG",
271 "Fail to create fuse_log thread %d\n", ret
);
272 fuse_iolog_thread
= NULL
;
275 wake_up_process(fuse_iolog_thread
);
280 void fuse_iolog_exit(void)
282 kthread_stop(fuse_iolog_thread
);
284 void fuse_iolog_add(__u32 io_bytes
, int type
,
285 struct timespec
*start
,
286 struct timespec
*end
)
288 struct fuse_proc_info
*info
;
289 struct timespec diff
;
292 pid
= task_pid_nr(current
);
293 fuse_time_diff(start
, end
, &diff
);
295 mutex_lock(&fuse_iolog_lock
);
297 for (i
=0;i
<FUSE_IOLOG_MAX
;i
++) {
301 if (info
->pid
== pid
) {
302 if (fuse_iolog_proc_update(info
, io_bytes
, type
, &diff
)) {
303 continue; // ops mismatch
315 fuse_iolog_proc_update(info
, io_bytes
, type
, &diff
);
317 get_monotonic_boottime(&fuse_iolog_time
);
322 if (i
==FUSE_IOLOG_MAX
) {
324 fuse_iolog_proc_clear();
328 fuse_iolog_proc_update(info
, io_bytes
, type
, &diff
);
331 mutex_unlock(&fuse_iolog_lock
);
336 static const struct file_operations fuse_direct_io_file_operations
;
338 static int fuse_send_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
339 int opcode
, struct fuse_open_out
*outargp
)
341 struct fuse_open_in inarg
;
342 struct fuse_req
*req
;
345 req
= fuse_get_req_nopages(fc
);
349 memset(&inarg
, 0, sizeof(inarg
));
350 inarg
.flags
= file
->f_flags
& ~(O_CREAT
| O_EXCL
| O_NOCTTY
);
351 if (!fc
->atomic_o_trunc
)
352 inarg
.flags
&= ~O_TRUNC
;
353 req
->in
.h
.opcode
= opcode
;
354 req
->in
.h
.nodeid
= nodeid
;
356 req
->in
.args
[0].size
= sizeof(inarg
);
357 req
->in
.args
[0].value
= &inarg
;
358 req
->out
.numargs
= 1;
359 req
->out
.args
[0].size
= sizeof(*outargp
);
360 req
->out
.args
[0].value
= outargp
;
361 fuse_request_send(fc
, req
);
362 err
= req
->out
.h
.error
;
363 fuse_put_request(fc
, req
);
368 struct fuse_file
*fuse_file_alloc(struct fuse_conn
*fc
)
370 struct fuse_file
*ff
;
372 ff
= kmalloc(sizeof(struct fuse_file
), GFP_KERNEL
);
377 ff
->reserved_req
= fuse_request_alloc(0);
378 if (unlikely(!ff
->reserved_req
)) {
383 INIT_LIST_HEAD(&ff
->write_entry
);
384 atomic_set(&ff
->count
, 0);
385 RB_CLEAR_NODE(&ff
->polled_node
);
386 init_waitqueue_head(&ff
->poll_wait
);
388 spin_lock(&fc
->lock
);
389 ff
->kh
= ++fc
->khctr
;
390 spin_unlock(&fc
->lock
);
395 void fuse_file_free(struct fuse_file
*ff
)
397 fuse_request_free(ff
->reserved_req
);
401 struct fuse_file
*fuse_file_get(struct fuse_file
*ff
)
403 atomic_inc(&ff
->count
);
407 static void fuse_release_async(struct work_struct
*work
)
409 struct fuse_req
*req
;
410 struct fuse_conn
*fc
;
413 req
= container_of(work
, struct fuse_req
, misc
.release
.work
);
414 path
= req
->misc
.release
.path
;
415 fc
= get_fuse_conn(path
.dentry
->d_inode
);
417 fuse_put_request(fc
, req
);
421 static void fuse_release_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
423 if (fc
->destroy_req
) {
425 * If this is a fuseblk mount, then it's possible that
426 * releasing the path will result in releasing the
427 * super block and sending the DESTROY request. If
428 * the server is single threaded, this would hang.
429 * For this reason do the path_put() in a separate
432 atomic_inc(&req
->count
);
433 INIT_WORK(&req
->misc
.release
.work
, fuse_release_async
);
434 schedule_work(&req
->misc
.release
.work
);
436 path_put(&req
->misc
.release
.path
);
440 static void fuse_file_put(struct fuse_file
*ff
, bool sync
)
442 if (atomic_dec_and_test(&ff
->count
)) {
443 struct fuse_req
*req
= ff
->reserved_req
;
447 fuse_request_send(ff
->fc
, req
);
448 path_put(&req
->misc
.release
.path
);
449 fuse_put_request(ff
->fc
, req
);
451 req
->end
= fuse_release_end
;
453 fuse_request_send_background(ff
->fc
, req
);
459 int fuse_do_open(struct fuse_conn
*fc
, u64 nodeid
, struct file
*file
,
462 struct fuse_open_out outarg
;
463 struct fuse_file
*ff
;
465 int opcode
= isdir
? FUSE_OPENDIR
: FUSE_OPEN
;
467 ff
= fuse_file_alloc(fc
);
471 err
= fuse_send_open(fc
, nodeid
, file
, opcode
, &outarg
);
478 outarg
.open_flags
&= ~FOPEN_DIRECT_IO
;
482 ff
->open_flags
= outarg
.open_flags
;
483 file
->private_data
= fuse_file_get(ff
);
487 EXPORT_SYMBOL_GPL(fuse_do_open
);
489 void fuse_finish_open(struct inode
*inode
, struct file
*file
)
491 struct fuse_file
*ff
= file
->private_data
;
492 struct fuse_conn
*fc
= get_fuse_conn(inode
);
494 if (ff
->open_flags
& FOPEN_DIRECT_IO
)
495 file
->f_op
= &fuse_direct_io_file_operations
;
496 if (!(ff
->open_flags
& FOPEN_KEEP_CACHE
))
497 invalidate_inode_pages2(inode
->i_mapping
);
498 if (ff
->open_flags
& FOPEN_NONSEEKABLE
)
499 nonseekable_open(inode
, file
);
500 if (fc
->atomic_o_trunc
&& (file
->f_flags
& O_TRUNC
)) {
501 struct fuse_inode
*fi
= get_fuse_inode(inode
);
503 spin_lock(&fc
->lock
);
504 fi
->attr_version
= ++fc
->attr_version
;
505 i_size_write(inode
, 0);
506 spin_unlock(&fc
->lock
);
507 fuse_invalidate_attr(inode
);
511 int fuse_open_common(struct inode
*inode
, struct file
*file
, bool isdir
)
513 struct fuse_conn
*fc
= get_fuse_conn(inode
);
516 err
= generic_file_open(inode
, file
);
520 err
= fuse_do_open(fc
, get_node_id(inode
), file
, isdir
);
524 fuse_finish_open(inode
, file
);
529 static void fuse_prepare_release(struct fuse_file
*ff
, int flags
, int opcode
)
531 struct fuse_conn
*fc
= ff
->fc
;
532 struct fuse_req
*req
= ff
->reserved_req
;
533 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
535 spin_lock(&fc
->lock
);
536 list_del(&ff
->write_entry
);
537 if (!RB_EMPTY_NODE(&ff
->polled_node
))
538 rb_erase(&ff
->polled_node
, &fc
->polled_files
);
539 spin_unlock(&fc
->lock
);
541 wake_up_interruptible_all(&ff
->poll_wait
);
544 inarg
->flags
= flags
;
545 req
->in
.h
.opcode
= opcode
;
546 req
->in
.h
.nodeid
= ff
->nodeid
;
548 req
->in
.args
[0].size
= sizeof(struct fuse_release_in
);
549 req
->in
.args
[0].value
= inarg
;
552 void fuse_release_common(struct file
*file
, int opcode
)
554 struct fuse_file
*ff
;
555 struct fuse_req
*req
;
557 ff
= file
->private_data
;
561 req
= ff
->reserved_req
;
562 fuse_prepare_release(ff
, file
->f_flags
, opcode
);
565 struct fuse_release_in
*inarg
= &req
->misc
.release
.in
;
566 inarg
->release_flags
|= FUSE_RELEASE_FLOCK_UNLOCK
;
567 inarg
->lock_owner
= fuse_lock_owner_id(ff
->fc
,
570 /* Hold vfsmount and dentry until release is finished */
571 path_get(&file
->f_path
);
572 req
->misc
.release
.path
= file
->f_path
;
575 * Normally this will send the RELEASE request, however if
576 * some asynchronous READ or WRITE requests are outstanding,
577 * the sending will be delayed.
579 * Make the release synchronous if this is a fuseblk mount,
580 * synchronous RELEASE is allowed (and desirable) in this case
581 * because the server can be trusted not to screw up.
583 fuse_file_put(ff
, ff
->fc
->destroy_req
!= NULL
);
586 static int fuse_open(struct inode
*inode
, struct file
*file
)
588 return fuse_open_common(inode
, file
, false);
591 static int fuse_release(struct inode
*inode
, struct file
*file
)
593 fuse_release_common(file
, FUSE_RELEASE
);
595 /* return value is ignored by VFS */
599 void fuse_sync_release(struct fuse_file
*ff
, int flags
)
601 WARN_ON(atomic_read(&ff
->count
) > 1);
602 fuse_prepare_release(ff
, flags
, FUSE_RELEASE
);
603 ff
->reserved_req
->force
= 1;
604 ff
->reserved_req
->background
= 0;
605 fuse_request_send(ff
->fc
, ff
->reserved_req
);
606 fuse_put_request(ff
->fc
, ff
->reserved_req
);
609 EXPORT_SYMBOL_GPL(fuse_sync_release
);
612 * Scramble the ID space with XTEA, so that the value of the files_struct
613 * pointer is not exposed to userspace.
615 u64
fuse_lock_owner_id(struct fuse_conn
*fc
, fl_owner_t id
)
617 u32
*k
= fc
->scramble_key
;
618 u64 v
= (unsigned long) id
;
624 for (i
= 0; i
< 32; i
++) {
625 v0
+= ((v1
<< 4 ^ v1
>> 5) + v1
) ^ (sum
+ k
[sum
& 3]);
627 v1
+= ((v0
<< 4 ^ v0
>> 5) + v0
) ^ (sum
+ k
[sum
>>11 & 3]);
630 return (u64
) v0
+ ((u64
) v1
<< 32);
634 * Check if page is under writeback
636 * This is currently done by walking the list of writepage requests
637 * for the inode, which can be pretty inefficient.
639 static bool fuse_page_is_writeback(struct inode
*inode
, pgoff_t index
)
641 struct fuse_conn
*fc
= get_fuse_conn(inode
);
642 struct fuse_inode
*fi
= get_fuse_inode(inode
);
643 struct fuse_req
*req
;
646 spin_lock(&fc
->lock
);
647 list_for_each_entry(req
, &fi
->writepages
, writepages_entry
) {
650 BUG_ON(req
->inode
!= inode
);
651 curr_index
= req
->misc
.write
.in
.offset
>> PAGE_CACHE_SHIFT
;
652 if (curr_index
== index
) {
657 spin_unlock(&fc
->lock
);
663 * Wait for page writeback to be completed.
665 * Since fuse doesn't rely on the VM writeback tracking, this has to
666 * use some other means.
668 static int fuse_wait_on_page_writeback(struct inode
*inode
, pgoff_t index
)
670 struct fuse_inode
*fi
= get_fuse_inode(inode
);
672 wait_event(fi
->page_waitq
, !fuse_page_is_writeback(inode
, index
));
676 static int fuse_flush(struct file
*file
, fl_owner_t id
)
678 struct inode
*inode
= file_inode(file
);
679 struct fuse_conn
*fc
= get_fuse_conn(inode
);
680 struct fuse_file
*ff
= file
->private_data
;
681 struct fuse_req
*req
;
682 struct fuse_flush_in inarg
;
685 if (is_bad_inode(inode
))
691 req
= fuse_get_req_nofail_nopages(fc
, file
);
692 memset(&inarg
, 0, sizeof(inarg
));
694 inarg
.lock_owner
= fuse_lock_owner_id(fc
, id
);
695 req
->in
.h
.opcode
= FUSE_FLUSH
;
696 req
->in
.h
.nodeid
= get_node_id(inode
);
698 req
->in
.args
[0].size
= sizeof(inarg
);
699 req
->in
.args
[0].value
= &inarg
;
701 fuse_request_send(fc
, req
);
702 err
= req
->out
.h
.error
;
703 fuse_put_request(fc
, req
);
704 if (err
== -ENOSYS
) {
712 * Wait for all pending writepages on the inode to finish.
714 * This is currently done by blocking further writes with FUSE_NOWRITE
715 * and waiting for all sent writes to complete.
717 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
718 * could conflict with truncation.
720 static void fuse_sync_writes(struct inode
*inode
)
722 fuse_set_nowrite(inode
);
723 fuse_release_nowrite(inode
);
726 int fuse_fsync_common(struct file
*file
, loff_t start
, loff_t end
,
727 int datasync
, int isdir
)
729 struct inode
*inode
= file
->f_mapping
->host
;
730 struct fuse_conn
*fc
= get_fuse_conn(inode
);
731 struct fuse_file
*ff
= file
->private_data
;
732 struct fuse_req
*req
;
733 struct fuse_fsync_in inarg
;
736 if (is_bad_inode(inode
))
739 err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
743 if ((!isdir
&& fc
->no_fsync
) || (isdir
&& fc
->no_fsyncdir
))
746 mutex_lock(&inode
->i_mutex
);
749 * Start writeback against all dirty pages of the inode, then
750 * wait for all outstanding writes, before sending the FSYNC
753 err
= write_inode_now(inode
, 0);
757 fuse_sync_writes(inode
);
759 req
= fuse_get_req_nopages(fc
);
765 memset(&inarg
, 0, sizeof(inarg
));
767 inarg
.fsync_flags
= datasync
? 1 : 0;
768 req
->in
.h
.opcode
= isdir
? FUSE_FSYNCDIR
: FUSE_FSYNC
;
769 req
->in
.h
.nodeid
= get_node_id(inode
);
771 req
->in
.args
[0].size
= sizeof(inarg
);
772 req
->in
.args
[0].value
= &inarg
;
773 fuse_request_send(fc
, req
);
774 err
= req
->out
.h
.error
;
775 fuse_put_request(fc
, req
);
776 if (err
== -ENOSYS
) {
784 mutex_unlock(&inode
->i_mutex
);
788 static int fuse_fsync(struct file
*file
, loff_t start
, loff_t end
,
791 return fuse_fsync_common(file
, start
, end
, datasync
, 0);
794 void fuse_read_fill(struct fuse_req
*req
, struct file
*file
, loff_t pos
,
795 size_t count
, int opcode
)
797 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
798 struct fuse_file
*ff
= file
->private_data
;
803 inarg
->flags
= file
->f_flags
;
804 req
->in
.h
.opcode
= opcode
;
805 req
->in
.h
.nodeid
= ff
->nodeid
;
807 req
->in
.args
[0].size
= sizeof(struct fuse_read_in
);
808 req
->in
.args
[0].value
= inarg
;
810 req
->out
.numargs
= 1;
811 req
->out
.args
[0].size
= count
;
814 static void fuse_release_user_pages(struct fuse_req
*req
, int write
)
818 for (i
= 0; i
< req
->num_pages
; i
++) {
819 struct page
*page
= req
->pages
[i
];
821 set_page_dirty_lock(page
);
827 * In case of short read, the caller sets 'pos' to the position of
828 * actual end of fuse request in IO request. Otherwise, if bytes_requested
829 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
832 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
833 * both submitted asynchronously. The first of them was ACKed by userspace as
834 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
835 * second request was ACKed as short, e.g. only 1K was read, resulting in
838 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
839 * will be equal to the length of the longest contiguous fragment of
840 * transferred data starting from the beginning of IO request.
842 static void fuse_aio_complete(struct fuse_io_priv
*io
, int err
, ssize_t pos
)
846 spin_lock(&io
->lock
);
848 io
->err
= io
->err
? : err
;
849 else if (pos
>= 0 && (io
->bytes
< 0 || pos
< io
->bytes
))
853 spin_unlock(&io
->lock
);
860 else if (io
->bytes
>= 0 && io
->write
)
863 res
= io
->bytes
< 0 ? io
->size
: io
->bytes
;
865 if (!is_sync_kiocb(io
->iocb
)) {
866 struct path
*path
= &io
->iocb
->ki_filp
->f_path
;
867 struct inode
*inode
= path
->dentry
->d_inode
;
868 struct fuse_conn
*fc
= get_fuse_conn(inode
);
869 struct fuse_inode
*fi
= get_fuse_inode(inode
);
871 spin_lock(&fc
->lock
);
872 fi
->attr_version
= ++fc
->attr_version
;
873 spin_unlock(&fc
->lock
);
877 aio_complete(io
->iocb
, res
, 0);
882 static void fuse_aio_complete_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
884 struct fuse_io_priv
*io
= req
->io
;
887 fuse_release_user_pages(req
, !io
->write
);
890 if (req
->misc
.write
.in
.size
!= req
->misc
.write
.out
.size
)
891 pos
= req
->misc
.write
.in
.offset
- io
->offset
+
892 req
->misc
.write
.out
.size
;
894 if (req
->misc
.read
.in
.size
!= req
->out
.args
[0].size
)
895 pos
= req
->misc
.read
.in
.offset
- io
->offset
+
896 req
->out
.args
[0].size
;
899 fuse_aio_complete(io
, req
->out
.h
.error
, pos
);
902 static size_t fuse_async_req_send(struct fuse_conn
*fc
, struct fuse_req
*req
,
903 size_t num_bytes
, struct fuse_io_priv
*io
)
905 spin_lock(&io
->lock
);
906 io
->size
+= num_bytes
;
908 spin_unlock(&io
->lock
);
911 req
->end
= fuse_aio_complete_req
;
913 __fuse_get_request(req
);
914 fuse_request_send_background(fc
, req
);
919 static size_t fuse_send_read(struct fuse_req
*req
, struct fuse_io_priv
*io
,
920 loff_t pos
, size_t count
, fl_owner_t owner
)
922 struct file
*file
= io
->file
;
923 struct fuse_file
*ff
= file
->private_data
;
924 struct fuse_conn
*fc
= ff
->fc
;
926 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
928 struct fuse_read_in
*inarg
= &req
->misc
.read
.in
;
930 inarg
->read_flags
|= FUSE_READ_LOCKOWNER
;
931 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
935 return fuse_async_req_send(fc
, req
, count
, io
);
937 fuse_request_send(fc
, req
);
938 return req
->out
.args
[0].size
;
941 static void fuse_read_update_size(struct inode
*inode
, loff_t size
,
944 struct fuse_conn
*fc
= get_fuse_conn(inode
);
945 struct fuse_inode
*fi
= get_fuse_inode(inode
);
947 spin_lock(&fc
->lock
);
948 if (attr_ver
== fi
->attr_version
&& size
< inode
->i_size
&&
949 !test_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
)) {
950 fi
->attr_version
= ++fc
->attr_version
;
951 i_size_write(inode
, size
);
953 spin_unlock(&fc
->lock
);
956 static int fuse_readpage(struct file
*file
, struct page
*page
)
958 struct fuse_io_priv io
= { .async
= 0, .file
= file
};
959 struct inode
*inode
= page
->mapping
->host
;
960 struct fuse_conn
*fc
= get_fuse_conn(inode
);
961 struct fuse_req
*req
;
963 loff_t pos
= page_offset(page
);
964 size_t count
= PAGE_CACHE_SIZE
;
969 if (is_bad_inode(inode
))
973 * Page writeback can extend beyond the lifetime of the
974 * page-cache page, so make sure we read a properly synced
977 fuse_wait_on_page_writeback(inode
, page
->index
);
979 req
= fuse_get_req(fc
, 1);
984 attr_ver
= fuse_get_attr_version(fc
);
986 req
->out
.page_zeroing
= 1;
987 req
->out
.argpages
= 1;
989 req
->pages
[0] = page
;
990 req
->page_descs
[0].length
= count
;
991 num_read
= fuse_send_read(req
, &io
, pos
, count
, NULL
);
992 err
= req
->out
.h
.error
;
993 fuse_put_request(fc
, req
);
997 * Short read means EOF. If file size is larger, truncate it
999 if (num_read
< count
)
1000 fuse_read_update_size(inode
, pos
+ num_read
, attr_ver
);
1002 SetPageUptodate(page
);
1005 fuse_invalidate_attr(inode
); /* atime changed */
1011 static void fuse_readpages_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1014 size_t count
= req
->misc
.read
.in
.size
;
1015 size_t num_read
= req
->out
.args
[0].size
;
1016 struct address_space
*mapping
= NULL
;
1018 for (i
= 0; mapping
== NULL
&& i
< req
->num_pages
; i
++)
1019 mapping
= req
->pages
[i
]->mapping
;
1022 struct inode
*inode
= mapping
->host
;
1025 * Short read means EOF. If file size is larger, truncate it
1027 if (!req
->out
.h
.error
&& num_read
< count
) {
1030 pos
= page_offset(req
->pages
[0]) + num_read
;
1031 fuse_read_update_size(inode
, pos
,
1032 req
->misc
.read
.attr_ver
);
1034 fuse_invalidate_attr(inode
); /* atime changed */
1037 for (i
= 0; i
< req
->num_pages
; i
++) {
1038 struct page
*page
= req
->pages
[i
];
1039 if (!req
->out
.h
.error
)
1040 SetPageUptodate(page
);
1044 page_cache_release(page
);
1047 fuse_file_put(req
->ff
, false);
1050 static void fuse_send_readpages(struct fuse_req
*req
, struct file
*file
)
1052 struct fuse_file
*ff
= file
->private_data
;
1053 struct fuse_conn
*fc
= ff
->fc
;
1054 loff_t pos
= page_offset(req
->pages
[0]);
1055 size_t count
= req
->num_pages
<< PAGE_CACHE_SHIFT
;
1057 req
->out
.argpages
= 1;
1058 req
->out
.page_zeroing
= 1;
1059 req
->out
.page_replace
= 1;
1060 fuse_read_fill(req
, file
, pos
, count
, FUSE_READ
);
1061 req
->misc
.read
.attr_ver
= fuse_get_attr_version(fc
);
1062 if (fc
->async_read
) {
1063 req
->ff
= fuse_file_get(ff
);
1064 req
->end
= fuse_readpages_end
;
1065 fuse_request_send_background_ex(fc
, req
, count
);
1067 fuse_request_send_ex(fc
, req
, count
);
1068 fuse_readpages_end(fc
, req
);
1069 fuse_put_request(fc
, req
);
1073 struct fuse_fill_data
{
1074 struct fuse_req
*req
;
1076 struct inode
*inode
;
1080 static int fuse_readpages_fill(void *_data
, struct page
*page
)
1082 struct fuse_fill_data
*data
= _data
;
1083 struct fuse_req
*req
= data
->req
;
1084 struct inode
*inode
= data
->inode
;
1085 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1087 fuse_wait_on_page_writeback(inode
, page
->index
);
1089 if (req
->num_pages
&&
1090 (req
->num_pages
== FUSE_MAX_PAGES_PER_REQ
||
1091 (req
->num_pages
+ 1) * PAGE_CACHE_SIZE
> fc
->max_read
||
1092 req
->pages
[req
->num_pages
- 1]->index
+ 1 != page
->index
)) {
1093 int nr_alloc
= min_t(unsigned, data
->nr_pages
,
1094 FUSE_MAX_PAGES_PER_REQ
);
1095 fuse_send_readpages(req
, data
->file
);
1097 req
= fuse_get_req_for_background(fc
, nr_alloc
);
1099 req
= fuse_get_req(fc
, nr_alloc
);
1104 return PTR_ERR(req
);
1108 if (WARN_ON(req
->num_pages
>= req
->max_pages
)) {
1109 fuse_put_request(fc
, req
);
1113 page_cache_get(page
);
1114 req
->pages
[req
->num_pages
] = page
;
1115 req
->page_descs
[req
->num_pages
].length
= PAGE_SIZE
;
1121 static int fuse_readpages(struct file
*file
, struct address_space
*mapping
,
1122 struct list_head
*pages
, unsigned nr_pages
)
1124 struct inode
*inode
= mapping
->host
;
1125 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1126 struct fuse_fill_data data
;
1128 int nr_alloc
= min_t(unsigned, nr_pages
, FUSE_MAX_PAGES_PER_REQ
);
1131 if (is_bad_inode(inode
))
1137 data
.req
= fuse_get_req_for_background(fc
, nr_alloc
);
1139 data
.req
= fuse_get_req(fc
, nr_alloc
);
1140 data
.nr_pages
= nr_pages
;
1141 err
= PTR_ERR(data
.req
);
1142 if (IS_ERR(data
.req
))
1145 err
= read_cache_pages(mapping
, pages
, fuse_readpages_fill
, &data
);
1147 if (data
.req
->num_pages
)
1148 fuse_send_readpages(data
.req
, file
);
1150 fuse_put_request(fc
, data
.req
);
1156 static ssize_t
fuse_file_aio_read(struct kiocb
*iocb
, const struct iovec
*iov
,
1157 unsigned long nr_segs
, loff_t pos
)
1159 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1160 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1163 * In auto invalidate mode, always update attributes on read.
1164 * Otherwise, only update if we attempt to read past EOF (to ensure
1165 * i_size is up to date).
1167 if (fc
->auto_inval_data
||
1168 (pos
+ iov_length(iov
, nr_segs
) > i_size_read(inode
))) {
1170 err
= fuse_update_attributes(inode
, NULL
, iocb
->ki_filp
, NULL
);
1175 return generic_file_aio_read(iocb
, iov
, nr_segs
, pos
);
1178 static void fuse_write_fill(struct fuse_req
*req
, struct fuse_file
*ff
,
1179 loff_t pos
, size_t count
)
1181 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
1182 struct fuse_write_out
*outarg
= &req
->misc
.write
.out
;
1185 inarg
->offset
= pos
;
1186 inarg
->size
= count
;
1187 req
->in
.h
.opcode
= FUSE_WRITE
;
1188 req
->in
.h
.nodeid
= ff
->nodeid
;
1189 req
->in
.numargs
= 2;
1190 if (ff
->fc
->minor
< 9)
1191 req
->in
.args
[0].size
= FUSE_COMPAT_WRITE_IN_SIZE
;
1193 req
->in
.args
[0].size
= sizeof(struct fuse_write_in
);
1194 req
->in
.args
[0].value
= inarg
;
1195 req
->in
.args
[1].size
= count
;
1196 req
->out
.numargs
= 1;
1197 req
->out
.args
[0].size
= sizeof(struct fuse_write_out
);
1198 req
->out
.args
[0].value
= outarg
;
1201 static size_t fuse_send_write(struct fuse_req
*req
, struct fuse_io_priv
*io
,
1202 loff_t pos
, size_t count
, fl_owner_t owner
)
1204 struct file
*file
= io
->file
;
1205 struct fuse_file
*ff
= file
->private_data
;
1206 struct fuse_conn
*fc
= ff
->fc
;
1207 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
1209 fuse_write_fill(req
, ff
, pos
, count
);
1210 inarg
->flags
= file
->f_flags
;
1211 if (owner
!= NULL
) {
1212 inarg
->write_flags
|= FUSE_WRITE_LOCKOWNER
;
1213 inarg
->lock_owner
= fuse_lock_owner_id(fc
, owner
);
1217 return fuse_async_req_send(fc
, req
, count
, io
);
1219 fuse_request_send_ex(fc
, req
, count
);
1220 return req
->misc
.write
.out
.size
;
1223 void fuse_write_update_size(struct inode
*inode
, loff_t pos
)
1225 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1226 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1228 spin_lock(&fc
->lock
);
1229 fi
->attr_version
= ++fc
->attr_version
;
1230 if (pos
> inode
->i_size
)
1231 i_size_write(inode
, pos
);
1232 spin_unlock(&fc
->lock
);
1235 static size_t fuse_send_write_pages(struct fuse_req
*req
, struct file
*file
,
1236 struct inode
*inode
, loff_t pos
,
1242 struct fuse_io_priv io
= { .async
= 0, .file
= file
};
1244 for (i
= 0; i
< req
->num_pages
; i
++)
1245 fuse_wait_on_page_writeback(inode
, req
->pages
[i
]->index
);
1247 res
= fuse_send_write(req
, &io
, pos
, count
, NULL
);
1249 offset
= req
->page_descs
[0].offset
;
1251 for (i
= 0; i
< req
->num_pages
; i
++) {
1252 struct page
*page
= req
->pages
[i
];
1254 if (!req
->out
.h
.error
&& !offset
&& count
>= PAGE_CACHE_SIZE
)
1255 SetPageUptodate(page
);
1257 if (count
> PAGE_CACHE_SIZE
- offset
)
1258 count
-= PAGE_CACHE_SIZE
- offset
;
1264 page_cache_release(page
);
1270 static ssize_t
fuse_fill_write_pages(struct fuse_req
*req
,
1271 struct address_space
*mapping
,
1272 struct iov_iter
*ii
, loff_t pos
)
1274 struct fuse_conn
*fc
= get_fuse_conn(mapping
->host
);
1275 unsigned offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1279 req
->in
.argpages
= 1;
1280 req
->page_descs
[0].offset
= offset
;
1285 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1286 size_t bytes
= min_t(size_t, PAGE_CACHE_SIZE
- offset
,
1287 iov_iter_count(ii
));
1289 bytes
= min_t(size_t, bytes
, fc
->max_write
- count
);
1293 if (iov_iter_fault_in_readable(ii
, bytes
))
1297 page
= grab_cache_page_write_begin(mapping
, index
, 0);
1301 if (mapping_writably_mapped(mapping
))
1302 flush_dcache_page(page
);
1304 pagefault_disable();
1305 tmp
= iov_iter_copy_from_user_atomic(page
, ii
, offset
, bytes
);
1307 flush_dcache_page(page
);
1309 mark_page_accessed(page
);
1311 iov_iter_advance(ii
, tmp
);
1314 page_cache_release(page
);
1315 bytes
= min(bytes
, iov_iter_single_seg_count(ii
));
1320 req
->pages
[req
->num_pages
] = page
;
1321 req
->page_descs
[req
->num_pages
].length
= tmp
;
1327 if (offset
== PAGE_CACHE_SIZE
)
1330 if (!fc
->big_writes
)
1332 } while (iov_iter_count(ii
) && count
< fc
->max_write
&&
1333 req
->num_pages
< req
->max_pages
&& offset
== 0);
1335 return count
> 0 ? count
: err
;
1338 static inline unsigned fuse_wr_pages(loff_t pos
, size_t len
)
1340 return min_t(unsigned,
1341 ((pos
+ len
- 1) >> PAGE_CACHE_SHIFT
) -
1342 (pos
>> PAGE_CACHE_SHIFT
) + 1,
1343 FUSE_MAX_PAGES_PER_REQ
);
1346 static ssize_t
fuse_perform_write(struct file
*file
,
1347 struct address_space
*mapping
,
1348 struct iov_iter
*ii
, loff_t pos
)
1350 struct inode
*inode
= mapping
->host
;
1351 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1352 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1356 if (is_bad_inode(inode
))
1359 if (inode
->i_size
< pos
+ iov_iter_count(ii
))
1360 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1363 struct fuse_req
*req
;
1365 unsigned nr_pages
= fuse_wr_pages(pos
, iov_iter_count(ii
));
1367 req
= fuse_get_req(fc
, nr_pages
);
1373 count
= fuse_fill_write_pages(req
, mapping
, ii
, pos
);
1379 num_written
= fuse_send_write_pages(req
, file
, inode
,
1381 err
= req
->out
.h
.error
;
1386 /* break out of the loop on short write */
1387 if (num_written
!= count
)
1391 fuse_put_request(fc
, req
);
1392 } while (!err
&& iov_iter_count(ii
));
1395 fuse_write_update_size(inode
, pos
);
1397 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
1398 fuse_invalidate_attr(inode
);
1400 return res
> 0 ? res
: err
;
1403 static ssize_t
fuse_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
1404 unsigned long nr_segs
, loff_t pos
)
1406 struct file
*file
= iocb
->ki_filp
;
1407 struct address_space
*mapping
= file
->f_mapping
;
1410 ssize_t written
= 0;
1411 ssize_t written_buffered
= 0;
1412 struct inode
*inode
= mapping
->host
;
1417 WARN_ON(iocb
->ki_pos
!= pos
);
1420 err
= generic_segment_checks(iov
, &nr_segs
, &ocount
, VERIFY_READ
);
1425 mutex_lock(&inode
->i_mutex
);
1427 /* We can write back this queue in page reclaim */
1428 current
->backing_dev_info
= mapping
->backing_dev_info
;
1430 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
1437 err
= file_remove_suid(file
);
1441 err
= file_update_time(file
);
1445 if (file
->f_flags
& O_DIRECT
) {
1446 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
,
1449 if (written
< 0 || written
== count
)
1455 iov_iter_init(&i
, iov
, nr_segs
, count
, written
);
1456 written_buffered
= fuse_perform_write(file
, mapping
, &i
, pos
);
1457 if (written_buffered
< 0) {
1458 err
= written_buffered
;
1461 endbyte
= pos
+ written_buffered
- 1;
1463 err
= filemap_write_and_wait_range(file
->f_mapping
, pos
,
1468 invalidate_mapping_pages(file
->f_mapping
,
1469 pos
>> PAGE_CACHE_SHIFT
,
1470 endbyte
>> PAGE_CACHE_SHIFT
);
1472 written
+= written_buffered
;
1473 iocb
->ki_pos
= pos
+ written_buffered
;
1475 iov_iter_init(&i
, iov
, nr_segs
, count
, 0);
1476 written
= fuse_perform_write(file
, mapping
, &i
, pos
);
1478 iocb
->ki_pos
= pos
+ written
;
1481 current
->backing_dev_info
= NULL
;
1482 mutex_unlock(&inode
->i_mutex
);
1484 return written
? written
: err
;
1487 static inline void fuse_page_descs_length_init(struct fuse_req
*req
,
1488 unsigned index
, unsigned nr_pages
)
1492 for (i
= index
; i
< index
+ nr_pages
; i
++)
1493 req
->page_descs
[i
].length
= PAGE_SIZE
-
1494 req
->page_descs
[i
].offset
;
1497 static inline unsigned long fuse_get_user_addr(const struct iov_iter
*ii
)
1499 return (unsigned long)ii
->iov
->iov_base
+ ii
->iov_offset
;
1502 static inline size_t fuse_get_frag_size(const struct iov_iter
*ii
,
1505 return min(iov_iter_single_seg_count(ii
), max_size
);
1508 static int fuse_get_user_pages(struct fuse_req
*req
, struct iov_iter
*ii
,
1509 size_t *nbytesp
, int write
)
1511 size_t nbytes
= 0; /* # bytes already packed in req */
1513 /* Special case for kernel I/O: can copy directly into the buffer */
1514 if (segment_eq(get_fs(), KERNEL_DS
)) {
1515 unsigned long user_addr
= fuse_get_user_addr(ii
);
1516 size_t frag_size
= fuse_get_frag_size(ii
, *nbytesp
);
1519 req
->in
.args
[1].value
= (void *) user_addr
;
1521 req
->out
.args
[0].value
= (void *) user_addr
;
1523 iov_iter_advance(ii
, frag_size
);
1524 *nbytesp
= frag_size
;
1528 while (nbytes
< *nbytesp
&& req
->num_pages
< req
->max_pages
) {
1530 unsigned long user_addr
= fuse_get_user_addr(ii
);
1531 unsigned offset
= user_addr
& ~PAGE_MASK
;
1532 size_t frag_size
= fuse_get_frag_size(ii
, *nbytesp
- nbytes
);
1535 unsigned n
= req
->max_pages
- req
->num_pages
;
1536 frag_size
= min_t(size_t, frag_size
, n
<< PAGE_SHIFT
);
1538 npages
= (frag_size
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1539 npages
= clamp(npages
, 1U, n
);
1541 ret
= get_user_pages_fast(user_addr
, npages
, !write
,
1542 &req
->pages
[req
->num_pages
]);
1547 frag_size
= min_t(size_t, frag_size
,
1548 (npages
<< PAGE_SHIFT
) - offset
);
1549 iov_iter_advance(ii
, frag_size
);
1551 req
->page_descs
[req
->num_pages
].offset
= offset
;
1552 fuse_page_descs_length_init(req
, req
->num_pages
, npages
);
1554 req
->num_pages
+= npages
;
1555 req
->page_descs
[req
->num_pages
- 1].length
-=
1556 (npages
<< PAGE_SHIFT
) - offset
- frag_size
;
1558 nbytes
+= frag_size
;
1562 req
->in
.argpages
= 1;
1564 req
->out
.argpages
= 1;
1571 static inline int fuse_iter_npages(const struct iov_iter
*ii_p
)
1573 struct iov_iter ii
= *ii_p
;
1576 while (iov_iter_count(&ii
) && npages
< FUSE_MAX_PAGES_PER_REQ
) {
1577 unsigned long user_addr
= fuse_get_user_addr(&ii
);
1578 unsigned offset
= user_addr
& ~PAGE_MASK
;
1579 size_t frag_size
= iov_iter_single_seg_count(&ii
);
1581 npages
+= (frag_size
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1582 iov_iter_advance(&ii
, frag_size
);
1585 return min(npages
, FUSE_MAX_PAGES_PER_REQ
);
1588 ssize_t
fuse_direct_io(struct fuse_io_priv
*io
, const struct iovec
*iov
,
1589 unsigned long nr_segs
, size_t count
, loff_t
*ppos
,
1592 struct file
*file
= io
->file
;
1593 struct fuse_file
*ff
= file
->private_data
;
1594 struct fuse_conn
*fc
= ff
->fc
;
1595 size_t nmax
= write
? fc
->max_write
: fc
->max_read
;
1598 struct fuse_req
*req
;
1601 iov_iter_init(&ii
, iov
, nr_segs
, count
, 0);
1604 req
= fuse_get_req_for_background(fc
, fuse_iter_npages(&ii
));
1606 req
= fuse_get_req(fc
, fuse_iter_npages(&ii
));
1608 return PTR_ERR(req
);
1612 fl_owner_t owner
= current
->files
;
1613 size_t nbytes
= min(count
, nmax
);
1614 int err
= fuse_get_user_pages(req
, &ii
, &nbytes
, write
);
1621 nres
= fuse_send_write(req
, io
, pos
, nbytes
, owner
);
1623 nres
= fuse_send_read(req
, io
, pos
, nbytes
, owner
);
1626 fuse_release_user_pages(req
, !write
);
1627 if (req
->out
.h
.error
) {
1629 res
= req
->out
.h
.error
;
1631 } else if (nres
> nbytes
) {
1641 fuse_put_request(fc
, req
);
1643 req
= fuse_get_req_for_background(fc
,
1644 fuse_iter_npages(&ii
));
1646 req
= fuse_get_req(fc
, fuse_iter_npages(&ii
));
1652 fuse_put_request(fc
, req
);
1658 EXPORT_SYMBOL_GPL(fuse_direct_io
);
1660 static ssize_t
__fuse_direct_read(struct fuse_io_priv
*io
,
1661 const struct iovec
*iov
,
1662 unsigned long nr_segs
, loff_t
*ppos
,
1666 struct file
*file
= io
->file
;
1667 struct inode
*inode
= file_inode(file
);
1669 if (is_bad_inode(inode
))
1672 res
= fuse_direct_io(io
, iov
, nr_segs
, count
, ppos
, 0);
1674 fuse_invalidate_attr(inode
);
1679 static ssize_t
fuse_direct_read(struct file
*file
, char __user
*buf
,
1680 size_t count
, loff_t
*ppos
)
1682 struct fuse_io_priv io
= { .async
= 0, .file
= file
};
1683 struct iovec iov
= { .iov_base
= buf
, .iov_len
= count
};
1684 return __fuse_direct_read(&io
, &iov
, 1, ppos
, count
);
1687 static ssize_t
__fuse_direct_write(struct fuse_io_priv
*io
,
1688 const struct iovec
*iov
,
1689 unsigned long nr_segs
, loff_t
*ppos
)
1691 struct file
*file
= io
->file
;
1692 struct inode
*inode
= file_inode(file
);
1693 size_t count
= iov_length(iov
, nr_segs
);
1696 res
= generic_write_checks(file
, ppos
, &count
, 0);
1698 res
= fuse_direct_io(io
, iov
, nr_segs
, count
, ppos
, 1);
1700 fuse_invalidate_attr(inode
);
1705 static ssize_t
fuse_direct_write(struct file
*file
, const char __user
*buf
,
1706 size_t count
, loff_t
*ppos
)
1708 struct iovec iov
= { .iov_base
= (void __user
*)buf
, .iov_len
= count
};
1709 struct inode
*inode
= file_inode(file
);
1711 struct fuse_io_priv io
= { .async
= 0, .file
= file
};
1713 if (is_bad_inode(inode
))
1716 /* Don't allow parallel writes to the same file */
1717 mutex_lock(&inode
->i_mutex
);
1718 res
= __fuse_direct_write(&io
, &iov
, 1, ppos
);
1720 fuse_write_update_size(inode
, *ppos
);
1721 mutex_unlock(&inode
->i_mutex
);
1726 static void fuse_writepage_free(struct fuse_conn
*fc
, struct fuse_req
*req
)
1728 __free_page(req
->pages
[0]);
1729 fuse_file_put(req
->ff
, false);
1732 static void fuse_writepage_finish(struct fuse_conn
*fc
, struct fuse_req
*req
)
1734 struct inode
*inode
= req
->inode
;
1735 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1736 struct backing_dev_info
*bdi
= inode
->i_mapping
->backing_dev_info
;
1738 list_del(&req
->writepages_entry
);
1739 dec_bdi_stat(bdi
, BDI_WRITEBACK
);
1740 dec_zone_page_state(req
->pages
[0], NR_WRITEBACK_TEMP
);
1741 bdi_writeout_inc(bdi
);
1742 wake_up(&fi
->page_waitq
);
1745 /* Called under fc->lock, may release and reacquire it */
1746 static void fuse_send_writepage(struct fuse_conn
*fc
, struct fuse_req
*req
)
1747 __releases(fc
->lock
)
1748 __acquires(fc
->lock
)
1750 struct fuse_inode
*fi
= get_fuse_inode(req
->inode
);
1751 loff_t size
= i_size_read(req
->inode
);
1752 struct fuse_write_in
*inarg
= &req
->misc
.write
.in
;
1757 if (inarg
->offset
+ PAGE_CACHE_SIZE
<= size
) {
1758 inarg
->size
= PAGE_CACHE_SIZE
;
1759 } else if (inarg
->offset
< size
) {
1760 inarg
->size
= size
& (PAGE_CACHE_SIZE
- 1);
1762 /* Got truncated off completely */
1766 req
->in
.args
[1].size
= inarg
->size
;
1768 fuse_request_send_background_locked(fc
, req
);
1772 fuse_writepage_finish(fc
, req
);
1773 spin_unlock(&fc
->lock
);
1774 fuse_writepage_free(fc
, req
);
1775 fuse_put_request(fc
, req
);
1776 spin_lock(&fc
->lock
);
1780 * If fi->writectr is positive (no truncate or fsync going on) send
1781 * all queued writepage requests.
1783 * Called with fc->lock
1785 void fuse_flush_writepages(struct inode
*inode
)
1786 __releases(fc
->lock
)
1787 __acquires(fc
->lock
)
1789 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1790 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1791 struct fuse_req
*req
;
1793 while (fi
->writectr
>= 0 && !list_empty(&fi
->queued_writes
)) {
1794 req
= list_entry(fi
->queued_writes
.next
, struct fuse_req
, list
);
1795 list_del_init(&req
->list
);
1796 fuse_send_writepage(fc
, req
);
1800 static void fuse_writepage_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1802 struct inode
*inode
= req
->inode
;
1803 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1805 mapping_set_error(inode
->i_mapping
, req
->out
.h
.error
);
1806 spin_lock(&fc
->lock
);
1808 fuse_writepage_finish(fc
, req
);
1809 spin_unlock(&fc
->lock
);
1810 fuse_writepage_free(fc
, req
);
1813 static int fuse_writepage_locked(struct page
*page
)
1815 struct address_space
*mapping
= page
->mapping
;
1816 struct inode
*inode
= mapping
->host
;
1817 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1818 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1819 struct fuse_req
*req
;
1820 struct fuse_file
*ff
;
1821 struct page
*tmp_page
;
1823 set_page_writeback(page
);
1825 req
= fuse_request_alloc_nofs(1);
1829 req
->background
= 1; /* writeback always goes to bg_queue */
1830 tmp_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1834 spin_lock(&fc
->lock
);
1835 BUG_ON(list_empty(&fi
->write_files
));
1836 ff
= list_entry(fi
->write_files
.next
, struct fuse_file
, write_entry
);
1837 req
->ff
= fuse_file_get(ff
);
1838 spin_unlock(&fc
->lock
);
1840 fuse_write_fill(req
, ff
, page_offset(page
), 0);
1842 copy_highpage(tmp_page
, page
);
1843 req
->misc
.write
.in
.write_flags
|= FUSE_WRITE_CACHE
;
1844 req
->in
.argpages
= 1;
1846 req
->pages
[0] = tmp_page
;
1847 req
->page_descs
[0].offset
= 0;
1848 req
->page_descs
[0].length
= PAGE_SIZE
;
1849 req
->end
= fuse_writepage_end
;
1852 inc_bdi_stat(mapping
->backing_dev_info
, BDI_WRITEBACK
);
1853 inc_zone_page_state(tmp_page
, NR_WRITEBACK_TEMP
);
1855 spin_lock(&fc
->lock
);
1856 list_add(&req
->writepages_entry
, &fi
->writepages
);
1857 list_add_tail(&req
->list
, &fi
->queued_writes
);
1858 fuse_flush_writepages(inode
);
1859 spin_unlock(&fc
->lock
);
1861 end_page_writeback(page
);
1866 fuse_request_free(req
);
1868 end_page_writeback(page
);
1872 static int fuse_writepage(struct page
*page
, struct writeback_control
*wbc
)
1876 err
= fuse_writepage_locked(page
);
1882 static int fuse_launder_page(struct page
*page
)
1885 if (clear_page_dirty_for_io(page
)) {
1886 struct inode
*inode
= page
->mapping
->host
;
1887 err
= fuse_writepage_locked(page
);
1889 fuse_wait_on_page_writeback(inode
, page
->index
);
1895 * Write back dirty pages now, because there may not be any suitable
1898 static void fuse_vma_close(struct vm_area_struct
*vma
)
1900 filemap_write_and_wait(vma
->vm_file
->f_mapping
);
1904 * Wait for writeback against this page to complete before allowing it
1905 * to be marked dirty again, and hence written back again, possibly
1906 * before the previous writepage completed.
1908 * Block here, instead of in ->writepage(), so that the userspace fs
1909 * can only block processes actually operating on the filesystem.
1911 * Otherwise unprivileged userspace fs would be able to block
1916 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1918 static int fuse_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1920 struct page
*page
= vmf
->page
;
1922 * Don't use page->mapping as it may become NULL from a
1923 * concurrent truncate.
1925 struct inode
*inode
= vma
->vm_file
->f_mapping
->host
;
1927 fuse_wait_on_page_writeback(inode
, page
->index
);
1931 static const struct vm_operations_struct fuse_file_vm_ops
= {
1932 .close
= fuse_vma_close
,
1933 .fault
= filemap_fault
,
1934 .page_mkwrite
= fuse_page_mkwrite
,
1935 .remap_pages
= generic_file_remap_pages
,
1938 static int fuse_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1940 if ((vma
->vm_flags
& VM_SHARED
) && (vma
->vm_flags
& VM_MAYWRITE
)) {
1941 struct inode
*inode
= file_inode(file
);
1942 struct fuse_conn
*fc
= get_fuse_conn(inode
);
1943 struct fuse_inode
*fi
= get_fuse_inode(inode
);
1944 struct fuse_file
*ff
= file
->private_data
;
1946 * file may be written through mmap, so chain it onto the
1947 * inodes's write_file list
1949 spin_lock(&fc
->lock
);
1950 if (list_empty(&ff
->write_entry
))
1951 list_add(&ff
->write_entry
, &fi
->write_files
);
1952 spin_unlock(&fc
->lock
);
1954 file_accessed(file
);
1955 vma
->vm_ops
= &fuse_file_vm_ops
;
1959 static int fuse_direct_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1961 /* Can't provide the coherency needed for MAP_SHARED */
1962 if (vma
->vm_flags
& VM_MAYSHARE
)
1965 invalidate_inode_pages2(file
->f_mapping
);
1967 return generic_file_mmap(file
, vma
);
1970 static int convert_fuse_file_lock(const struct fuse_file_lock
*ffl
,
1971 struct file_lock
*fl
)
1973 switch (ffl
->type
) {
1979 if (ffl
->start
> OFFSET_MAX
|| ffl
->end
> OFFSET_MAX
||
1980 ffl
->end
< ffl
->start
)
1983 fl
->fl_start
= ffl
->start
;
1984 fl
->fl_end
= ffl
->end
;
1985 fl
->fl_pid
= ffl
->pid
;
1991 fl
->fl_type
= ffl
->type
;
1995 static void fuse_lk_fill(struct fuse_req
*req
, struct file
*file
,
1996 const struct file_lock
*fl
, int opcode
, pid_t pid
,
1999 struct inode
*inode
= file_inode(file
);
2000 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2001 struct fuse_file
*ff
= file
->private_data
;
2002 struct fuse_lk_in
*arg
= &req
->misc
.lk_in
;
2005 arg
->owner
= fuse_lock_owner_id(fc
, fl
->fl_owner
);
2006 arg
->lk
.start
= fl
->fl_start
;
2007 arg
->lk
.end
= fl
->fl_end
;
2008 arg
->lk
.type
= fl
->fl_type
;
2011 arg
->lk_flags
|= FUSE_LK_FLOCK
;
2012 req
->in
.h
.opcode
= opcode
;
2013 req
->in
.h
.nodeid
= get_node_id(inode
);
2014 req
->in
.numargs
= 1;
2015 req
->in
.args
[0].size
= sizeof(*arg
);
2016 req
->in
.args
[0].value
= arg
;
2019 static int fuse_getlk(struct file
*file
, struct file_lock
*fl
)
2021 struct inode
*inode
= file_inode(file
);
2022 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2023 struct fuse_req
*req
;
2024 struct fuse_lk_out outarg
;
2027 req
= fuse_get_req_nopages(fc
);
2029 return PTR_ERR(req
);
2031 fuse_lk_fill(req
, file
, fl
, FUSE_GETLK
, 0, 0);
2032 req
->out
.numargs
= 1;
2033 req
->out
.args
[0].size
= sizeof(outarg
);
2034 req
->out
.args
[0].value
= &outarg
;
2035 fuse_request_send(fc
, req
);
2036 err
= req
->out
.h
.error
;
2037 fuse_put_request(fc
, req
);
2039 err
= convert_fuse_file_lock(&outarg
.lk
, fl
);
2044 static int fuse_setlk(struct file
*file
, struct file_lock
*fl
, int flock
)
2046 struct inode
*inode
= file_inode(file
);
2047 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2048 struct fuse_req
*req
;
2049 int opcode
= (fl
->fl_flags
& FL_SLEEP
) ? FUSE_SETLKW
: FUSE_SETLK
;
2050 pid_t pid
= fl
->fl_type
!= F_UNLCK
? current
->tgid
: 0;
2053 if (fl
->fl_lmops
&& fl
->fl_lmops
->lm_grant
) {
2054 /* NLM needs asynchronous locks, which we don't support yet */
2058 /* Unlock on close is handled by the flush method */
2059 if (fl
->fl_flags
& FL_CLOSE
)
2062 req
= fuse_get_req_nopages(fc
);
2064 return PTR_ERR(req
);
2066 fuse_lk_fill(req
, file
, fl
, opcode
, pid
, flock
);
2067 fuse_request_send(fc
, req
);
2068 err
= req
->out
.h
.error
;
2069 /* locking is restartable */
2072 fuse_put_request(fc
, req
);
2076 static int fuse_file_lock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2078 struct inode
*inode
= file_inode(file
);
2079 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2082 if (cmd
== F_CANCELLK
) {
2084 } else if (cmd
== F_GETLK
) {
2086 posix_test_lock(file
, fl
);
2089 err
= fuse_getlk(file
, fl
);
2092 err
= posix_lock_file(file
, fl
, NULL
);
2094 err
= fuse_setlk(file
, fl
, 0);
2099 static int fuse_file_flock(struct file
*file
, int cmd
, struct file_lock
*fl
)
2101 struct inode
*inode
= file_inode(file
);
2102 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2106 err
= flock_lock_file_wait(file
, fl
);
2108 struct fuse_file
*ff
= file
->private_data
;
2110 /* emulate flock with POSIX locks */
2111 fl
->fl_owner
= (fl_owner_t
) file
;
2113 err
= fuse_setlk(file
, fl
, 1);
2119 static sector_t
fuse_bmap(struct address_space
*mapping
, sector_t block
)
2121 struct inode
*inode
= mapping
->host
;
2122 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2123 struct fuse_req
*req
;
2124 struct fuse_bmap_in inarg
;
2125 struct fuse_bmap_out outarg
;
2128 if (!inode
->i_sb
->s_bdev
|| fc
->no_bmap
)
2131 req
= fuse_get_req_nopages(fc
);
2135 memset(&inarg
, 0, sizeof(inarg
));
2136 inarg
.block
= block
;
2137 inarg
.blocksize
= inode
->i_sb
->s_blocksize
;
2138 req
->in
.h
.opcode
= FUSE_BMAP
;
2139 req
->in
.h
.nodeid
= get_node_id(inode
);
2140 req
->in
.numargs
= 1;
2141 req
->in
.args
[0].size
= sizeof(inarg
);
2142 req
->in
.args
[0].value
= &inarg
;
2143 req
->out
.numargs
= 1;
2144 req
->out
.args
[0].size
= sizeof(outarg
);
2145 req
->out
.args
[0].value
= &outarg
;
2146 fuse_request_send(fc
, req
);
2147 err
= req
->out
.h
.error
;
2148 fuse_put_request(fc
, req
);
2152 return err
? 0 : outarg
.block
;
2155 static loff_t
fuse_file_llseek(struct file
*file
, loff_t offset
, int whence
)
2158 struct inode
*inode
= file_inode(file
);
2160 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2161 if (whence
== SEEK_CUR
|| whence
== SEEK_SET
)
2162 return generic_file_llseek(file
, offset
, whence
);
2164 mutex_lock(&inode
->i_mutex
);
2165 retval
= fuse_update_attributes(inode
, NULL
, file
, NULL
);
2167 retval
= generic_file_llseek(file
, offset
, whence
);
2168 mutex_unlock(&inode
->i_mutex
);
2173 static int fuse_ioctl_copy_user(struct page
**pages
, struct iovec
*iov
,
2174 unsigned int nr_segs
, size_t bytes
, bool to_user
)
2182 iov_iter_init(&ii
, iov
, nr_segs
, bytes
, 0);
2184 while (iov_iter_count(&ii
)) {
2185 struct page
*page
= pages
[page_idx
++];
2186 size_t todo
= min_t(size_t, PAGE_SIZE
, iov_iter_count(&ii
));
2192 char __user
*uaddr
= ii
.iov
->iov_base
+ ii
.iov_offset
;
2193 size_t iov_len
= ii
.iov
->iov_len
- ii
.iov_offset
;
2194 size_t copy
= min(todo
, iov_len
);
2198 left
= copy_from_user(kaddr
, uaddr
, copy
);
2200 left
= copy_to_user(uaddr
, kaddr
, copy
);
2205 iov_iter_advance(&ii
, copy
);
2217 * CUSE servers compiled on 32bit broke on 64bit kernels because the
2218 * ABI was defined to be 'struct iovec' which is different on 32bit
2219 * and 64bit. Fortunately we can determine which structure the server
2220 * used from the size of the reply.
2222 static int fuse_copy_ioctl_iovec_old(struct iovec
*dst
, void *src
,
2223 size_t transferred
, unsigned count
,
2226 #ifdef CONFIG_COMPAT
2227 if (count
* sizeof(struct compat_iovec
) == transferred
) {
2228 struct compat_iovec
*ciov
= src
;
2232 * With this interface a 32bit server cannot support
2233 * non-compat (i.e. ones coming from 64bit apps) ioctl
2239 for (i
= 0; i
< count
; i
++) {
2240 dst
[i
].iov_base
= compat_ptr(ciov
[i
].iov_base
);
2241 dst
[i
].iov_len
= ciov
[i
].iov_len
;
2247 if (count
* sizeof(struct iovec
) != transferred
)
2250 memcpy(dst
, src
, transferred
);
2254 /* Make sure iov_length() won't overflow */
2255 static int fuse_verify_ioctl_iov(struct iovec
*iov
, size_t count
)
2258 u32 max
= FUSE_MAX_PAGES_PER_REQ
<< PAGE_SHIFT
;
2260 for (n
= 0; n
< count
; n
++, iov
++) {
2261 if (iov
->iov_len
> (size_t) max
)
2263 max
-= iov
->iov_len
;
2268 static int fuse_copy_ioctl_iovec(struct fuse_conn
*fc
, struct iovec
*dst
,
2269 void *src
, size_t transferred
, unsigned count
,
2273 struct fuse_ioctl_iovec
*fiov
= src
;
2275 if (fc
->minor
< 16) {
2276 return fuse_copy_ioctl_iovec_old(dst
, src
, transferred
,
2280 if (count
* sizeof(struct fuse_ioctl_iovec
) != transferred
)
2283 for (i
= 0; i
< count
; i
++) {
2284 /* Did the server supply an inappropriate value? */
2285 if (fiov
[i
].base
!= (unsigned long) fiov
[i
].base
||
2286 fiov
[i
].len
!= (unsigned long) fiov
[i
].len
)
2289 dst
[i
].iov_base
= (void __user
*) (unsigned long) fiov
[i
].base
;
2290 dst
[i
].iov_len
= (size_t) fiov
[i
].len
;
2292 #ifdef CONFIG_COMPAT
2294 (ptr_to_compat(dst
[i
].iov_base
) != fiov
[i
].base
||
2295 (compat_size_t
) dst
[i
].iov_len
!= fiov
[i
].len
))
2305 * For ioctls, there is no generic way to determine how much memory
2306 * needs to be read and/or written. Furthermore, ioctls are allowed
2307 * to dereference the passed pointer, so the parameter requires deep
2308 * copying but FUSE has no idea whatsoever about what to copy in or
2311 * This is solved by allowing FUSE server to retry ioctl with
2312 * necessary in/out iovecs. Let's assume the ioctl implementation
2313 * needs to read in the following structure.
2320 * On the first callout to FUSE server, inarg->in_size and
2321 * inarg->out_size will be NULL; then, the server completes the ioctl
2322 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
2323 * the actual iov array to
2325 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
2327 * which tells FUSE to copy in the requested area and retry the ioctl.
2328 * On the second round, the server has access to the structure and
2329 * from that it can tell what to look for next, so on the invocation,
2330 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
2332 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
2333 * { .iov_base = a.buf, .iov_len = a.buflen } }
2335 * FUSE will copy both struct a and the pointed buffer from the
2336 * process doing the ioctl and retry ioctl with both struct a and the
2339 * This time, FUSE server has everything it needs and completes ioctl
2340 * without FUSE_IOCTL_RETRY which finishes the ioctl call.
2342 * Copying data out works the same way.
2344 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
2345 * automatically initializes in and out iovs by decoding @cmd with
2346 * _IOC_* macros and the server is not allowed to request RETRY. This
2347 * limits ioctl data transfers to well-formed ioctls and is the forced
2348 * behavior for all FUSE servers.
2350 long fuse_do_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
,
2353 struct fuse_file
*ff
= file
->private_data
;
2354 struct fuse_conn
*fc
= ff
->fc
;
2355 struct fuse_ioctl_in inarg
= {
2361 struct fuse_ioctl_out outarg
;
2362 struct fuse_req
*req
= NULL
;
2363 struct page
**pages
= NULL
;
2364 struct iovec
*iov_page
= NULL
;
2365 struct iovec
*in_iov
= NULL
, *out_iov
= NULL
;
2366 unsigned int in_iovs
= 0, out_iovs
= 0, num_pages
= 0, max_pages
;
2367 size_t in_size
, out_size
, transferred
;
2370 #if BITS_PER_LONG == 32
2371 inarg
.flags
|= FUSE_IOCTL_32BIT
;
2373 if (flags
& FUSE_IOCTL_COMPAT
)
2374 inarg
.flags
|= FUSE_IOCTL_32BIT
;
2377 /* assume all the iovs returned by client always fits in a page */
2378 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec
) * FUSE_IOCTL_MAX_IOV
> PAGE_SIZE
);
2381 pages
= kcalloc(FUSE_MAX_PAGES_PER_REQ
, sizeof(pages
[0]), GFP_KERNEL
);
2382 iov_page
= (struct iovec
*) __get_free_page(GFP_KERNEL
);
2383 if (!pages
|| !iov_page
)
2387 * If restricted, initialize IO parameters as encoded in @cmd.
2388 * RETRY from server is not allowed.
2390 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
)) {
2391 struct iovec
*iov
= iov_page
;
2393 iov
->iov_base
= (void __user
*)arg
;
2394 iov
->iov_len
= _IOC_SIZE(cmd
);
2396 if (_IOC_DIR(cmd
) & _IOC_WRITE
) {
2401 if (_IOC_DIR(cmd
) & _IOC_READ
) {
2408 inarg
.in_size
= in_size
= iov_length(in_iov
, in_iovs
);
2409 inarg
.out_size
= out_size
= iov_length(out_iov
, out_iovs
);
2412 * Out data can be used either for actual out data or iovs,
2413 * make sure there always is at least one page.
2415 out_size
= max_t(size_t, out_size
, PAGE_SIZE
);
2416 max_pages
= DIV_ROUND_UP(max(in_size
, out_size
), PAGE_SIZE
);
2418 /* make sure there are enough buffer pages and init request with them */
2420 if (max_pages
> FUSE_MAX_PAGES_PER_REQ
)
2422 while (num_pages
< max_pages
) {
2423 pages
[num_pages
] = alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
2424 if (!pages
[num_pages
])
2429 req
= fuse_get_req(fc
, num_pages
);
2435 memcpy(req
->pages
, pages
, sizeof(req
->pages
[0]) * num_pages
);
2436 req
->num_pages
= num_pages
;
2437 fuse_page_descs_length_init(req
, 0, req
->num_pages
);
2439 /* okay, let's send it to the client */
2440 req
->in
.h
.opcode
= FUSE_IOCTL
;
2441 req
->in
.h
.nodeid
= ff
->nodeid
;
2442 req
->in
.numargs
= 1;
2443 req
->in
.args
[0].size
= sizeof(inarg
);
2444 req
->in
.args
[0].value
= &inarg
;
2447 req
->in
.args
[1].size
= in_size
;
2448 req
->in
.argpages
= 1;
2450 err
= fuse_ioctl_copy_user(pages
, in_iov
, in_iovs
, in_size
,
2456 req
->out
.numargs
= 2;
2457 req
->out
.args
[0].size
= sizeof(outarg
);
2458 req
->out
.args
[0].value
= &outarg
;
2459 req
->out
.args
[1].size
= out_size
;
2460 req
->out
.argpages
= 1;
2461 req
->out
.argvar
= 1;
2463 fuse_request_send(fc
, req
);
2464 err
= req
->out
.h
.error
;
2465 transferred
= req
->out
.args
[1].size
;
2466 fuse_put_request(fc
, req
);
2471 /* did it ask for retry? */
2472 if (outarg
.flags
& FUSE_IOCTL_RETRY
) {
2475 /* no retry if in restricted mode */
2477 if (!(flags
& FUSE_IOCTL_UNRESTRICTED
))
2480 in_iovs
= outarg
.in_iovs
;
2481 out_iovs
= outarg
.out_iovs
;
2484 * Make sure things are in boundary, separate checks
2485 * are to protect against overflow.
2488 if (in_iovs
> FUSE_IOCTL_MAX_IOV
||
2489 out_iovs
> FUSE_IOCTL_MAX_IOV
||
2490 in_iovs
+ out_iovs
> FUSE_IOCTL_MAX_IOV
)
2493 vaddr
= kmap_atomic(pages
[0]);
2494 err
= fuse_copy_ioctl_iovec(fc
, iov_page
, vaddr
,
2495 transferred
, in_iovs
+ out_iovs
,
2496 (flags
& FUSE_IOCTL_COMPAT
) != 0);
2497 kunmap_atomic(vaddr
);
2502 out_iov
= in_iov
+ in_iovs
;
2504 err
= fuse_verify_ioctl_iov(in_iov
, in_iovs
);
2508 err
= fuse_verify_ioctl_iov(out_iov
, out_iovs
);
2516 if (transferred
> inarg
.out_size
)
2519 err
= fuse_ioctl_copy_user(pages
, out_iov
, out_iovs
, transferred
, true);
2522 fuse_put_request(fc
, req
);
2523 free_page((unsigned long) iov_page
);
2525 __free_page(pages
[--num_pages
]);
2528 return err
? err
: outarg
.result
;
2530 EXPORT_SYMBOL_GPL(fuse_do_ioctl
);
2532 long fuse_ioctl_common(struct file
*file
, unsigned int cmd
,
2533 unsigned long arg
, unsigned int flags
)
2535 struct inode
*inode
= file_inode(file
);
2536 struct fuse_conn
*fc
= get_fuse_conn(inode
);
2538 if (!fuse_allow_current_process(fc
))
2541 if (is_bad_inode(inode
))
2544 return fuse_do_ioctl(file
, cmd
, arg
, flags
);
2547 static long fuse_file_ioctl(struct file
*file
, unsigned int cmd
,
2550 return fuse_ioctl_common(file
, cmd
, arg
, 0);
2553 static long fuse_file_compat_ioctl(struct file
*file
, unsigned int cmd
,
2556 return fuse_ioctl_common(file
, cmd
, arg
, FUSE_IOCTL_COMPAT
);
2560 * All files which have been polled are linked to RB tree
2561 * fuse_conn->polled_files which is indexed by kh. Walk the tree and
2562 * find the matching one.
2564 static struct rb_node
**fuse_find_polled_node(struct fuse_conn
*fc
, u64 kh
,
2565 struct rb_node
**parent_out
)
2567 struct rb_node
**link
= &fc
->polled_files
.rb_node
;
2568 struct rb_node
*last
= NULL
;
2571 struct fuse_file
*ff
;
2574 ff
= rb_entry(last
, struct fuse_file
, polled_node
);
2577 link
= &last
->rb_left
;
2578 else if (kh
> ff
->kh
)
2579 link
= &last
->rb_right
;
2590 * The file is about to be polled. Make sure it's on the polled_files
2591 * RB tree. Note that files once added to the polled_files tree are
2592 * not removed before the file is released. This is because a file
2593 * polled once is likely to be polled again.
2595 static void fuse_register_polled_file(struct fuse_conn
*fc
,
2596 struct fuse_file
*ff
)
2598 spin_lock(&fc
->lock
);
2599 if (RB_EMPTY_NODE(&ff
->polled_node
)) {
2600 struct rb_node
**link
, *parent
;
2602 link
= fuse_find_polled_node(fc
, ff
->kh
, &parent
);
2604 rb_link_node(&ff
->polled_node
, parent
, link
);
2605 rb_insert_color(&ff
->polled_node
, &fc
->polled_files
);
2607 spin_unlock(&fc
->lock
);
2610 unsigned fuse_file_poll(struct file
*file
, poll_table
*wait
)
2612 struct fuse_file
*ff
= file
->private_data
;
2613 struct fuse_conn
*fc
= ff
->fc
;
2614 struct fuse_poll_in inarg
= { .fh
= ff
->fh
, .kh
= ff
->kh
};
2615 struct fuse_poll_out outarg
;
2616 struct fuse_req
*req
;
2620 return DEFAULT_POLLMASK
;
2622 poll_wait(file
, &ff
->poll_wait
, wait
);
2623 inarg
.events
= (__u32
)poll_requested_events(wait
);
2626 * Ask for notification iff there's someone waiting for it.
2627 * The client may ignore the flag and always notify.
2629 if (waitqueue_active(&ff
->poll_wait
)) {
2630 inarg
.flags
|= FUSE_POLL_SCHEDULE_NOTIFY
;
2631 fuse_register_polled_file(fc
, ff
);
2634 req
= fuse_get_req_nopages(fc
);
2638 req
->in
.h
.opcode
= FUSE_POLL
;
2639 req
->in
.h
.nodeid
= ff
->nodeid
;
2640 req
->in
.numargs
= 1;
2641 req
->in
.args
[0].size
= sizeof(inarg
);
2642 req
->in
.args
[0].value
= &inarg
;
2643 req
->out
.numargs
= 1;
2644 req
->out
.args
[0].size
= sizeof(outarg
);
2645 req
->out
.args
[0].value
= &outarg
;
2646 fuse_request_send(fc
, req
);
2647 err
= req
->out
.h
.error
;
2648 fuse_put_request(fc
, req
);
2651 return outarg
.revents
;
2652 if (err
== -ENOSYS
) {
2654 return DEFAULT_POLLMASK
;
2658 EXPORT_SYMBOL_GPL(fuse_file_poll
);
2661 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2662 * wakes up the poll waiters.
2664 int fuse_notify_poll_wakeup(struct fuse_conn
*fc
,
2665 struct fuse_notify_poll_wakeup_out
*outarg
)
2667 u64 kh
= outarg
->kh
;
2668 struct rb_node
**link
;
2670 spin_lock(&fc
->lock
);
2672 link
= fuse_find_polled_node(fc
, kh
, NULL
);
2674 struct fuse_file
*ff
;
2676 ff
= rb_entry(*link
, struct fuse_file
, polled_node
);
2677 wake_up_interruptible_sync(&ff
->poll_wait
);
2680 spin_unlock(&fc
->lock
);
2684 static void fuse_do_truncate(struct file
*file
)
2686 struct inode
*inode
= file
->f_mapping
->host
;
2689 attr
.ia_valid
= ATTR_SIZE
;
2690 attr
.ia_size
= i_size_read(inode
);
2692 attr
.ia_file
= file
;
2693 attr
.ia_valid
|= ATTR_FILE
;
2695 fuse_do_setattr(inode
, &attr
, file
);
2698 static inline loff_t
fuse_round_up(loff_t off
)
2700 return round_up(off
, FUSE_MAX_PAGES_PER_REQ
<< PAGE_SHIFT
);
2704 fuse_direct_IO(int rw
, struct kiocb
*iocb
, const struct iovec
*iov
,
2705 loff_t offset
, unsigned long nr_segs
)
2708 struct file
*file
= iocb
->ki_filp
;
2709 struct fuse_file
*ff
= file
->private_data
;
2710 bool async_dio
= ff
->fc
->async_dio
;
2712 struct inode
*inode
;
2714 size_t count
= iov_length(iov
, nr_segs
);
2715 struct fuse_io_priv
*io
;
2718 inode
= file
->f_mapping
->host
;
2719 i_size
= i_size_read(inode
);
2721 /* optimization for short read */
2722 if (async_dio
&& rw
!= WRITE
&& offset
+ count
> i_size
) {
2723 if (offset
>= i_size
)
2725 count
= min_t(loff_t
, count
, fuse_round_up(i_size
- offset
));
2728 io
= kmalloc(sizeof(struct fuse_io_priv
), GFP_KERNEL
);
2731 spin_lock_init(&io
->lock
);
2735 io
->offset
= offset
;
2736 io
->write
= (rw
== WRITE
);
2740 * By default, we want to optimize all I/Os with async request
2741 * submission to the client filesystem if supported.
2743 io
->async
= async_dio
;
2747 * We cannot asynchronously extend the size of a file. We have no method
2748 * to wait on real async I/O requests, so we must submit this request
2751 if (!is_sync_kiocb(iocb
) && (offset
+ count
> i_size
) && rw
== WRITE
)
2755 ret
= __fuse_direct_write(io
, iov
, nr_segs
, &pos
);
2757 ret
= __fuse_direct_read(io
, iov
, nr_segs
, &pos
, count
);
2760 fuse_aio_complete(io
, ret
< 0 ? ret
: 0, -1);
2762 /* we have a non-extending, async request, so return */
2763 if (!is_sync_kiocb(iocb
))
2764 return -EIOCBQUEUED
;
2766 ret
= wait_on_sync_kiocb(iocb
);
2773 fuse_write_update_size(inode
, pos
);
2774 else if (ret
< 0 && offset
+ count
> i_size
)
2775 fuse_do_truncate(file
);
2781 static long fuse_file_fallocate(struct file
*file
, int mode
, loff_t offset
,
2784 struct fuse_file
*ff
= file
->private_data
;
2785 struct inode
*inode
= file
->f_inode
;
2786 struct fuse_inode
*fi
= get_fuse_inode(inode
);
2787 struct fuse_conn
*fc
= ff
->fc
;
2788 struct fuse_req
*req
;
2789 struct fuse_fallocate_in inarg
= {
2796 bool lock_inode
= !(mode
& FALLOC_FL_KEEP_SIZE
) ||
2797 (mode
& FALLOC_FL_PUNCH_HOLE
);
2799 if (fc
->no_fallocate
)
2803 mutex_lock(&inode
->i_mutex
);
2804 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
2805 loff_t endbyte
= offset
+ length
- 1;
2806 err
= filemap_write_and_wait_range(inode
->i_mapping
,
2811 fuse_sync_writes(inode
);
2815 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
2816 set_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
2818 req
= fuse_get_req_nopages(fc
);
2824 req
->in
.h
.opcode
= FUSE_FALLOCATE
;
2825 req
->in
.h
.nodeid
= ff
->nodeid
;
2826 req
->in
.numargs
= 1;
2827 req
->in
.args
[0].size
= sizeof(inarg
);
2828 req
->in
.args
[0].value
= &inarg
;
2829 fuse_request_send(fc
, req
);
2830 err
= req
->out
.h
.error
;
2831 if (err
== -ENOSYS
) {
2832 fc
->no_fallocate
= 1;
2835 fuse_put_request(fc
, req
);
2840 /* we could have extended the file */
2841 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
2842 fuse_write_update_size(inode
, offset
+ length
);
2844 if (mode
& FALLOC_FL_PUNCH_HOLE
)
2845 truncate_pagecache_range(inode
, offset
, offset
+ length
- 1);
2847 fuse_invalidate_attr(inode
);
2850 if (!(mode
& FALLOC_FL_KEEP_SIZE
))
2851 clear_bit(FUSE_I_SIZE_UNSTABLE
, &fi
->state
);
2854 mutex_unlock(&inode
->i_mutex
);
2859 static const struct file_operations fuse_file_operations
= {
2860 .llseek
= fuse_file_llseek
,
2861 .read
= do_sync_read
,
2862 .aio_read
= fuse_file_aio_read
,
2863 .write
= do_sync_write
,
2864 .aio_write
= fuse_file_aio_write
,
2865 .mmap
= fuse_file_mmap
,
2867 .flush
= fuse_flush
,
2868 .release
= fuse_release
,
2869 .fsync
= fuse_fsync
,
2870 .lock
= fuse_file_lock
,
2871 .flock
= fuse_file_flock
,
2872 .splice_read
= generic_file_splice_read
,
2873 .unlocked_ioctl
= fuse_file_ioctl
,
2874 .compat_ioctl
= fuse_file_compat_ioctl
,
2875 .poll
= fuse_file_poll
,
2876 .fallocate
= fuse_file_fallocate
,
2879 static const struct file_operations fuse_direct_io_file_operations
= {
2880 .llseek
= fuse_file_llseek
,
2881 .read
= fuse_direct_read
,
2882 .write
= fuse_direct_write
,
2883 .mmap
= fuse_direct_mmap
,
2885 .flush
= fuse_flush
,
2886 .release
= fuse_release
,
2887 .fsync
= fuse_fsync
,
2888 .lock
= fuse_file_lock
,
2889 .flock
= fuse_file_flock
,
2890 .unlocked_ioctl
= fuse_file_ioctl
,
2891 .compat_ioctl
= fuse_file_compat_ioctl
,
2892 .poll
= fuse_file_poll
,
2893 .fallocate
= fuse_file_fallocate
,
2894 /* no splice_read */
2897 static const struct address_space_operations fuse_file_aops
= {
2898 .readpage
= fuse_readpage
,
2899 .writepage
= fuse_writepage
,
2900 .launder_page
= fuse_launder_page
,
2901 .readpages
= fuse_readpages
,
2902 .set_page_dirty
= __set_page_dirty_nobuffers
,
2904 .direct_IO
= fuse_direct_IO
,
2907 void fuse_init_file_inode(struct inode
*inode
)
2909 inode
->i_fop
= &fuse_file_operations
;
2910 inode
->i_data
.a_ops
= &fuse_file_aops
;