3a2f37ad1f89c8bbdff87d395e3d14de2e3c9c16
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / fs / 9p / vfs_file.c
1 /*
2 * linux/fs/9p/vfs_file.c
3 *
4 * This file contians vfs file ops for 9P2000.
5 *
6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to:
20 * Free Software Foundation
21 * 51 Franklin Street, Fifth Floor
22 * Boston, MA 02111-1301 USA
23 *
24 */
25
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/fs.h>
29 #include <linux/sched.h>
30 #include <linux/file.h>
31 #include <linux/stat.h>
32 #include <linux/string.h>
33 #include <linux/inet.h>
34 #include <linux/list.h>
35 #include <linux/pagemap.h>
36 #include <linux/utsname.h>
37 #include <linux/uaccess.h>
38 #include <linux/idr.h>
39 #include <linux/uio.h>
40 #include <linux/slab.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/client.h>
43
44 #include "v9fs.h"
45 #include "v9fs_vfs.h"
46 #include "fid.h"
47 #include "cache.h"
48
49 static const struct vm_operations_struct v9fs_file_vm_ops;
50 static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
51
52 /**
53 * v9fs_file_open - open a file (or directory)
54 * @inode: inode to be opened
55 * @file: file being opened
56 *
57 */
58
59 int v9fs_file_open(struct inode *inode, struct file *file)
60 {
61 int err;
62 struct v9fs_inode *v9inode;
63 struct v9fs_session_info *v9ses;
64 struct p9_fid *fid;
65 int omode;
66
67 p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
68 v9inode = V9FS_I(inode);
69 v9ses = v9fs_inode2v9ses(inode);
70 if (v9fs_proto_dotl(v9ses))
71 omode = v9fs_open_to_dotl_flags(file->f_flags);
72 else
73 omode = v9fs_uflags2omode(file->f_flags,
74 v9fs_proto_dotu(v9ses));
75 fid = file->private_data;
76 if (!fid) {
77 fid = v9fs_fid_clone(file_dentry(file));
78 if (IS_ERR(fid))
79 return PTR_ERR(fid);
80
81 err = p9_client_open(fid, omode);
82 if (err < 0) {
83 p9_client_clunk(fid);
84 return err;
85 }
86 if ((file->f_flags & O_APPEND) &&
87 (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
88 generic_file_llseek(file, 0, SEEK_END);
89 }
90
91 file->private_data = fid;
92 mutex_lock(&v9inode->v_mutex);
93 if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
94 !v9inode->writeback_fid &&
95 ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
96 /*
97 * clone a fid and add it to writeback_fid
98 * we do it during open time instead of
99 * page dirty time via write_begin/page_mkwrite
100 * because we want write after unlink usecase
101 * to work.
102 */
103 fid = v9fs_writeback_fid(file_dentry(file));
104 if (IS_ERR(fid)) {
105 err = PTR_ERR(fid);
106 mutex_unlock(&v9inode->v_mutex);
107 goto out_error;
108 }
109 v9inode->writeback_fid = (void *) fid;
110 }
111 mutex_unlock(&v9inode->v_mutex);
112 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
113 v9fs_cache_inode_set_cookie(inode, file);
114 return 0;
115 out_error:
116 p9_client_clunk(file->private_data);
117 file->private_data = NULL;
118 return err;
119 }
120
121 /**
122 * v9fs_file_lock - lock a file (or directory)
123 * @filp: file to be locked
124 * @cmd: lock command
125 * @fl: file lock structure
126 *
127 * Bugs: this looks like a local only lock, we should extend into 9P
128 * by using open exclusive
129 */
130
131 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
132 {
133 int res = 0;
134 struct inode *inode = file_inode(filp);
135
136 p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
137
138 /* No mandatory locks */
139 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
140 return -ENOLCK;
141
142 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
143 filemap_write_and_wait(inode->i_mapping);
144 invalidate_mapping_pages(&inode->i_data, 0, -1);
145 }
146
147 return res;
148 }
149
150 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
151 {
152 struct p9_flock flock;
153 struct p9_fid *fid;
154 uint8_t status = P9_LOCK_ERROR;
155 int res = 0;
156 unsigned char fl_type;
157
158 fid = filp->private_data;
159 BUG_ON(fid == NULL);
160
161 if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
162 BUG();
163
164 res = locks_lock_file_wait(filp, fl);
165 if (res < 0)
166 goto out;
167
168 /* convert posix lock to p9 tlock args */
169 memset(&flock, 0, sizeof(flock));
170 /* map the lock type */
171 switch (fl->fl_type) {
172 case F_RDLCK:
173 flock.type = P9_LOCK_TYPE_RDLCK;
174 break;
175 case F_WRLCK:
176 flock.type = P9_LOCK_TYPE_WRLCK;
177 break;
178 case F_UNLCK:
179 flock.type = P9_LOCK_TYPE_UNLCK;
180 break;
181 }
182 flock.start = fl->fl_start;
183 if (fl->fl_end == OFFSET_MAX)
184 flock.length = 0;
185 else
186 flock.length = fl->fl_end - fl->fl_start + 1;
187 flock.proc_id = fl->fl_pid;
188 flock.client_id = fid->clnt->name;
189 if (IS_SETLKW(cmd))
190 flock.flags = P9_LOCK_FLAGS_BLOCK;
191
192 /*
193 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
194 * for lock request, keep on trying
195 */
196 for (;;) {
197 res = p9_client_lock_dotl(fid, &flock, &status);
198 if (res < 0)
199 goto out_unlock;
200
201 if (status != P9_LOCK_BLOCKED)
202 break;
203 if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
204 break;
205 if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
206 break;
207 /*
208 * p9_client_lock_dotl overwrites flock.client_id with the
209 * server message, free and reuse the client name
210 */
211 if (flock.client_id != fid->clnt->name) {
212 kfree(flock.client_id);
213 flock.client_id = fid->clnt->name;
214 }
215 }
216
217 /* map 9p status to VFS status */
218 switch (status) {
219 case P9_LOCK_SUCCESS:
220 res = 0;
221 break;
222 case P9_LOCK_BLOCKED:
223 res = -EAGAIN;
224 break;
225 default:
226 WARN_ONCE(1, "unknown lock status code: %d\n", status);
227 /* fallthough */
228 case P9_LOCK_ERROR:
229 case P9_LOCK_GRACE:
230 res = -ENOLCK;
231 break;
232 }
233
234 out_unlock:
235 /*
236 * incase server returned error for lock request, revert
237 * it locally
238 */
239 if (res < 0 && fl->fl_type != F_UNLCK) {
240 fl_type = fl->fl_type;
241 fl->fl_type = F_UNLCK;
242 /* Even if this fails we want to return the remote error */
243 locks_lock_file_wait(filp, fl);
244 fl->fl_type = fl_type;
245 }
246 if (flock.client_id != fid->clnt->name)
247 kfree(flock.client_id);
248 out:
249 return res;
250 }
251
252 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
253 {
254 struct p9_getlock glock;
255 struct p9_fid *fid;
256 int res = 0;
257
258 fid = filp->private_data;
259 BUG_ON(fid == NULL);
260
261 posix_test_lock(filp, fl);
262 /*
263 * if we have a conflicting lock locally, no need to validate
264 * with server
265 */
266 if (fl->fl_type != F_UNLCK)
267 return res;
268
269 /* convert posix lock to p9 tgetlock args */
270 memset(&glock, 0, sizeof(glock));
271 glock.type = P9_LOCK_TYPE_UNLCK;
272 glock.start = fl->fl_start;
273 if (fl->fl_end == OFFSET_MAX)
274 glock.length = 0;
275 else
276 glock.length = fl->fl_end - fl->fl_start + 1;
277 glock.proc_id = fl->fl_pid;
278 glock.client_id = fid->clnt->name;
279
280 res = p9_client_getlock_dotl(fid, &glock);
281 if (res < 0)
282 goto out;
283 /* map 9p lock type to os lock type */
284 switch (glock.type) {
285 case P9_LOCK_TYPE_RDLCK:
286 fl->fl_type = F_RDLCK;
287 break;
288 case P9_LOCK_TYPE_WRLCK:
289 fl->fl_type = F_WRLCK;
290 break;
291 case P9_LOCK_TYPE_UNLCK:
292 fl->fl_type = F_UNLCK;
293 break;
294 }
295 if (glock.type != P9_LOCK_TYPE_UNLCK) {
296 fl->fl_start = glock.start;
297 if (glock.length == 0)
298 fl->fl_end = OFFSET_MAX;
299 else
300 fl->fl_end = glock.start + glock.length - 1;
301 fl->fl_pid = -glock.proc_id;
302 }
303 out:
304 if (glock.client_id != fid->clnt->name)
305 kfree(glock.client_id);
306 return res;
307 }
308
309 /**
310 * v9fs_file_lock_dotl - lock a file (or directory)
311 * @filp: file to be locked
312 * @cmd: lock command
313 * @fl: file lock structure
314 *
315 */
316
317 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
318 {
319 struct inode *inode = file_inode(filp);
320 int ret = -ENOLCK;
321
322 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
323 filp, cmd, fl, filp);
324
325 /* No mandatory locks */
326 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
327 goto out_err;
328
329 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
330 filemap_write_and_wait(inode->i_mapping);
331 invalidate_mapping_pages(&inode->i_data, 0, -1);
332 }
333
334 if (IS_SETLK(cmd) || IS_SETLKW(cmd))
335 ret = v9fs_file_do_lock(filp, cmd, fl);
336 else if (IS_GETLK(cmd))
337 ret = v9fs_file_getlock(filp, fl);
338 else
339 ret = -EINVAL;
340 out_err:
341 return ret;
342 }
343
344 /**
345 * v9fs_file_flock_dotl - lock a file
346 * @filp: file to be locked
347 * @cmd: lock command
348 * @fl: file lock structure
349 *
350 */
351
352 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
353 struct file_lock *fl)
354 {
355 struct inode *inode = file_inode(filp);
356 int ret = -ENOLCK;
357
358 p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
359 filp, cmd, fl, filp);
360
361 /* No mandatory locks */
362 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
363 goto out_err;
364
365 if (!(fl->fl_flags & FL_FLOCK))
366 goto out_err;
367
368 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
369 filemap_write_and_wait(inode->i_mapping);
370 invalidate_mapping_pages(&inode->i_data, 0, -1);
371 }
372 /* Convert flock to posix lock */
373 fl->fl_flags |= FL_POSIX;
374 fl->fl_flags ^= FL_FLOCK;
375
376 if (IS_SETLK(cmd) | IS_SETLKW(cmd))
377 ret = v9fs_file_do_lock(filp, cmd, fl);
378 else
379 ret = -EINVAL;
380 out_err:
381 return ret;
382 }
383
384 /**
385 * v9fs_file_read - read from a file
386 * @filp: file pointer to read
387 * @udata: user data buffer to read data into
388 * @count: size of buffer
389 * @offset: offset at which to read data
390 *
391 */
392
393 static ssize_t
394 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
395 {
396 struct p9_fid *fid = iocb->ki_filp->private_data;
397 int ret, err = 0;
398
399 p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
400 iov_iter_count(to), iocb->ki_pos);
401
402 ret = p9_client_read(fid, iocb->ki_pos, to, &err);
403 if (!ret)
404 return err;
405
406 iocb->ki_pos += ret;
407 return ret;
408 }
409
410 /**
411 * v9fs_file_write - write to a file
412 * @filp: file pointer to write
413 * @data: data buffer to write data from
414 * @count: size of buffer
415 * @offset: offset at which to write data
416 *
417 */
418 static ssize_t
419 v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
420 {
421 struct file *file = iocb->ki_filp;
422 ssize_t retval;
423 loff_t origin;
424 int err = 0;
425
426 retval = generic_write_checks(iocb, from);
427 if (retval <= 0)
428 return retval;
429
430 origin = iocb->ki_pos;
431 retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
432 if (retval > 0) {
433 struct inode *inode = file_inode(file);
434 loff_t i_size;
435 unsigned long pg_start, pg_end;
436 pg_start = origin >> PAGE_SHIFT;
437 pg_end = (origin + retval - 1) >> PAGE_SHIFT;
438 if (inode->i_mapping && inode->i_mapping->nrpages)
439 invalidate_inode_pages2_range(inode->i_mapping,
440 pg_start, pg_end);
441 iocb->ki_pos += retval;
442 i_size = i_size_read(inode);
443 if (iocb->ki_pos > i_size) {
444 inode_add_bytes(inode, iocb->ki_pos - i_size);
445 i_size_write(inode, iocb->ki_pos);
446 }
447 return retval;
448 }
449 return err;
450 }
451
452 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
453 int datasync)
454 {
455 struct p9_fid *fid;
456 struct inode *inode = filp->f_mapping->host;
457 struct p9_wstat wstat;
458 int retval;
459
460 retval = file_write_and_wait_range(filp, start, end);
461 if (retval)
462 return retval;
463
464 inode_lock(inode);
465 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
466
467 fid = filp->private_data;
468 v9fs_blank_wstat(&wstat);
469
470 retval = p9_client_wstat(fid, &wstat);
471 inode_unlock(inode);
472
473 return retval;
474 }
475
476 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
477 int datasync)
478 {
479 struct p9_fid *fid;
480 struct inode *inode = filp->f_mapping->host;
481 int retval;
482
483 retval = file_write_and_wait_range(filp, start, end);
484 if (retval)
485 return retval;
486
487 inode_lock(inode);
488 p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
489
490 fid = filp->private_data;
491
492 retval = p9_client_fsync(fid, datasync);
493 inode_unlock(inode);
494
495 return retval;
496 }
497
498 static int
499 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
500 {
501 int retval;
502
503
504 retval = generic_file_mmap(filp, vma);
505 if (!retval)
506 vma->vm_ops = &v9fs_file_vm_ops;
507
508 return retval;
509 }
510
511 static int
512 v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
513 {
514 int retval;
515 struct inode *inode;
516 struct v9fs_inode *v9inode;
517 struct p9_fid *fid;
518
519 inode = file_inode(filp);
520 v9inode = V9FS_I(inode);
521 mutex_lock(&v9inode->v_mutex);
522 if (!v9inode->writeback_fid &&
523 (vma->vm_flags & VM_WRITE)) {
524 /*
525 * clone a fid and add it to writeback_fid
526 * we do it during mmap instead of
527 * page dirty time via write_begin/page_mkwrite
528 * because we want write after unlink usecase
529 * to work.
530 */
531 fid = v9fs_writeback_fid(file_dentry(filp));
532 if (IS_ERR(fid)) {
533 retval = PTR_ERR(fid);
534 mutex_unlock(&v9inode->v_mutex);
535 return retval;
536 }
537 v9inode->writeback_fid = (void *) fid;
538 }
539 mutex_unlock(&v9inode->v_mutex);
540
541 retval = generic_file_mmap(filp, vma);
542 if (!retval)
543 vma->vm_ops = &v9fs_mmap_file_vm_ops;
544
545 return retval;
546 }
547
548 static int
549 v9fs_vm_page_mkwrite(struct vm_fault *vmf)
550 {
551 struct v9fs_inode *v9inode;
552 struct page *page = vmf->page;
553 struct file *filp = vmf->vma->vm_file;
554 struct inode *inode = file_inode(filp);
555
556
557 p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
558 page, (unsigned long)filp->private_data);
559
560 /* Update file times before taking page lock */
561 file_update_time(filp);
562
563 v9inode = V9FS_I(inode);
564 /* make sure the cache has finished storing the page */
565 v9fs_fscache_wait_on_page_write(inode, page);
566 BUG_ON(!v9inode->writeback_fid);
567 lock_page(page);
568 if (page->mapping != inode->i_mapping)
569 goto out_unlock;
570 wait_for_stable_page(page);
571
572 return VM_FAULT_LOCKED;
573 out_unlock:
574 unlock_page(page);
575 return VM_FAULT_NOPAGE;
576 }
577
578 /**
579 * v9fs_mmap_file_read - read from a file
580 * @filp: file pointer to read
581 * @data: user data buffer to read data into
582 * @count: size of buffer
583 * @offset: offset at which to read data
584 *
585 */
586 static ssize_t
587 v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
588 {
589 /* TODO: Check if there are dirty pages */
590 return v9fs_file_read_iter(iocb, to);
591 }
592
593 /**
594 * v9fs_mmap_file_write - write to a file
595 * @filp: file pointer to write
596 * @data: data buffer to write data from
597 * @count: size of buffer
598 * @offset: offset at which to write data
599 *
600 */
601 static ssize_t
602 v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
603 {
604 /*
605 * TODO: invalidate mmaps on filp's inode between
606 * offset and offset+count
607 */
608 return v9fs_file_write_iter(iocb, from);
609 }
610
611 static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
612 {
613 struct inode *inode;
614
615 struct writeback_control wbc = {
616 .nr_to_write = LONG_MAX,
617 .sync_mode = WB_SYNC_ALL,
618 .range_start = vma->vm_pgoff * PAGE_SIZE,
619 /* absolute end, byte at end included */
620 .range_end = vma->vm_pgoff * PAGE_SIZE +
621 (vma->vm_end - vma->vm_start - 1),
622 };
623
624
625 p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
626
627 inode = file_inode(vma->vm_file);
628
629 if (!mapping_cap_writeback_dirty(inode->i_mapping))
630 wbc.nr_to_write = 0;
631
632 might_sleep();
633 sync_inode(inode, &wbc);
634 }
635
636
637 static const struct vm_operations_struct v9fs_file_vm_ops = {
638 .fault = filemap_fault,
639 .map_pages = filemap_map_pages,
640 .page_mkwrite = v9fs_vm_page_mkwrite,
641 };
642
643 static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
644 .close = v9fs_mmap_vm_close,
645 .fault = filemap_fault,
646 .map_pages = filemap_map_pages,
647 .page_mkwrite = v9fs_vm_page_mkwrite,
648 };
649
650
651 const struct file_operations v9fs_cached_file_operations = {
652 .llseek = generic_file_llseek,
653 .read_iter = generic_file_read_iter,
654 .write_iter = generic_file_write_iter,
655 .open = v9fs_file_open,
656 .release = v9fs_dir_release,
657 .lock = v9fs_file_lock,
658 .mmap = v9fs_file_mmap,
659 .fsync = v9fs_file_fsync,
660 };
661
662 const struct file_operations v9fs_cached_file_operations_dotl = {
663 .llseek = generic_file_llseek,
664 .read_iter = generic_file_read_iter,
665 .write_iter = generic_file_write_iter,
666 .open = v9fs_file_open,
667 .release = v9fs_dir_release,
668 .lock = v9fs_file_lock_dotl,
669 .flock = v9fs_file_flock_dotl,
670 .mmap = v9fs_file_mmap,
671 .fsync = v9fs_file_fsync_dotl,
672 };
673
674 const struct file_operations v9fs_file_operations = {
675 .llseek = generic_file_llseek,
676 .read_iter = v9fs_file_read_iter,
677 .write_iter = v9fs_file_write_iter,
678 .open = v9fs_file_open,
679 .release = v9fs_dir_release,
680 .lock = v9fs_file_lock,
681 .mmap = generic_file_readonly_mmap,
682 .fsync = v9fs_file_fsync,
683 };
684
685 const struct file_operations v9fs_file_operations_dotl = {
686 .llseek = generic_file_llseek,
687 .read_iter = v9fs_file_read_iter,
688 .write_iter = v9fs_file_write_iter,
689 .open = v9fs_file_open,
690 .release = v9fs_dir_release,
691 .lock = v9fs_file_lock_dotl,
692 .flock = v9fs_file_flock_dotl,
693 .mmap = generic_file_readonly_mmap,
694 .fsync = v9fs_file_fsync_dotl,
695 };
696
697 const struct file_operations v9fs_mmap_file_operations = {
698 .llseek = generic_file_llseek,
699 .read_iter = v9fs_mmap_file_read_iter,
700 .write_iter = v9fs_mmap_file_write_iter,
701 .open = v9fs_file_open,
702 .release = v9fs_dir_release,
703 .lock = v9fs_file_lock,
704 .mmap = v9fs_mmap_file_mmap,
705 .fsync = v9fs_file_fsync,
706 };
707
708 const struct file_operations v9fs_mmap_file_operations_dotl = {
709 .llseek = generic_file_llseek,
710 .read_iter = v9fs_mmap_file_read_iter,
711 .write_iter = v9fs_mmap_file_write_iter,
712 .open = v9fs_file_open,
713 .release = v9fs_dir_release,
714 .lock = v9fs_file_lock_dotl,
715 .flock = v9fs_file_flock_dotl,
716 .mmap = v9fs_mmap_file_mmap,
717 .fsync = v9fs_file_fsync_dotl,
718 };