Staging: pohmelfs: move open brace to same line on structs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / pohmelfs / inode.c
CommitLineData
b3f08cad
EP
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/backing-dev.h>
18#include <linux/crypto.h>
19#include <linux/fs.h>
20#include <linux/jhash.h>
21#include <linux/hash.h>
22#include <linux/ktime.h>
23#include <linux/mm.h>
24#include <linux/mount.h>
25#include <linux/pagemap.h>
26#include <linux/pagevec.h>
27#include <linux/parser.h>
28#include <linux/swap.h>
29#include <linux/slab.h>
30#include <linux/statfs.h>
31#include <linux/writeback.h>
32#include <linux/quotaops.h>
33
34#include "netfs.h"
35
36#define POHMELFS_MAGIC_NUM 0x504f482e
37
38static struct kmem_cache *pohmelfs_inode_cache;
39
40/*
41 * Removes inode from all trees, drops local name cache and removes all queued
42 * requests for object removal.
43 */
44void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi)
45{
46 mutex_lock(&pi->offset_lock);
47 pohmelfs_free_names(pi);
48 mutex_unlock(&pi->offset_lock);
49
50 dprintk("%s: deleted stuff in ino: %llu.\n", __func__, pi->ino);
51}
52
53/*
54 * Sync inode to server.
55 * Returns zero in success and negative error value otherwise.
56 * It will gather path to root directory into structures containing
57 * creation mode, permissions and names, so that the whole path
58 * to given inode could be created using only single network command.
59 */
60int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans)
61{
62 struct pohmelfs_inode *pi = POHMELFS_I(inode);
63 int err = -ENOMEM, size;
64 struct netfs_cmd *cmd;
65 void *data;
66 int cur_len = netfs_trans_cur_len(trans);
67
68 if (unlikely(cur_len < 0))
69 return -ETOOSMALL;
70
71 cmd = netfs_trans_current(trans);
72 cur_len -= sizeof(struct netfs_cmd);
73
74 data = (void *)(cmd + 1);
75
76 err = pohmelfs_construct_path_string(pi, data, cur_len);
77 if (err < 0)
78 goto err_out_exit;
79
80 size = err;
81
82 cmd->start = i_size_read(inode);
83 cmd->cmd = NETFS_CREATE;
84 cmd->size = size;
85 cmd->id = pi->ino;
86 cmd->ext = inode->i_mode;
87
88 netfs_convert_cmd(cmd);
89
90 netfs_trans_update(cmd, trans, size);
91
92 return 0;
93
94err_out_exit:
95 printk("%s: completed ino: %llu, err: %d.\n", __func__, pi->ino, err);
96 return err;
97}
98
99static int pohmelfs_write_trans_complete(struct page **pages, unsigned int page_num,
100 void *private, int err)
101{
102 unsigned i;
103
104 dprintk("%s: pages: %lu-%lu, page_num: %u, err: %d.\n",
105 __func__, pages[0]->index, pages[page_num-1]->index,
106 page_num, err);
107
108 for (i = 0; i < page_num; i++) {
109 struct page *page = pages[i];
110
111 if (!page)
112 continue;
113
114 end_page_writeback(page);
115
116 if (err < 0) {
117 SetPageError(page);
118 set_page_dirty(page);
119 }
120
121 unlock_page(page);
122 page_cache_release(page);
123
124 /* dprintk("%s: %3u/%u: page: %p.\n", __func__, i, page_num, page); */
125 }
126 return err;
127}
128
129static int pohmelfs_inode_has_dirty_pages(struct address_space *mapping, pgoff_t index)
130{
131 int ret;
132 struct page *page;
133
134 rcu_read_lock();
135 ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
136 (void **)&page, index, 1, PAGECACHE_TAG_DIRTY);
137 rcu_read_unlock();
138 return ret;
139}
140
141static int pohmelfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
142{
143 struct inode *inode = mapping->host;
144 struct pohmelfs_inode *pi = POHMELFS_I(inode);
145 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
146 struct backing_dev_info *bdi = mapping->backing_dev_info;
147 int err = 0;
148 int done = 0;
149 int nr_pages;
150 pgoff_t index;
151 pgoff_t end; /* Inclusive */
152 int scanned = 0;
153 int range_whole = 0;
154
155 if (wbc->nonblocking && bdi_write_congested(bdi)) {
156 wbc->encountered_congestion = 1;
157 return 0;
158 }
159
160 if (wbc->range_cyclic) {
161 index = mapping->writeback_index; /* Start from prev offset */
162 end = -1;
163 } else {
164 index = wbc->range_start >> PAGE_CACHE_SHIFT;
165 end = wbc->range_end >> PAGE_CACHE_SHIFT;
166 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
167 range_whole = 1;
168 scanned = 1;
169 }
170retry:
171 while (!done && (index <= end)) {
172 unsigned int i = min(end - index, (pgoff_t)psb->trans_max_pages);
173 int path_len;
174 struct netfs_trans *trans;
175
176 err = pohmelfs_inode_has_dirty_pages(mapping, index);
177 if (!err)
178 break;
179
180 err = pohmelfs_path_length(pi);
181 if (err < 0)
182 break;
183
184 path_len = err;
185
186 if (path_len <= 2) {
187 err = -ENOENT;
188 break;
189 }
190
191 trans = netfs_trans_alloc(psb, path_len, 0, i);
192 if (!trans) {
193 err = -ENOMEM;
194 break;
195 }
196 trans->complete = &pohmelfs_write_trans_complete;
197
198 trans->page_num = nr_pages = find_get_pages_tag(mapping, &index,
199 PAGECACHE_TAG_DIRTY, trans->page_num,
200 trans->pages);
201
202 dprintk("%s: t: %p, nr_pages: %u, end: %lu, index: %lu, max: %u.\n",
203 __func__, trans, nr_pages, end, index, trans->page_num);
204
205 if (!nr_pages)
206 goto err_out_reset;
207
208 err = pohmelfs_write_inode_create(inode, trans);
209 if (err)
210 goto err_out_reset;
211
212 err = 0;
213 scanned = 1;
214
215 for (i = 0; i < trans->page_num; i++) {
216 struct page *page = trans->pages[i];
217
218 lock_page(page);
219
220 if (unlikely(page->mapping != mapping))
221 goto out_continue;
222
223 if (!wbc->range_cyclic && page->index > end) {
224 done = 1;
225 goto out_continue;
226 }
227
228 if (wbc->sync_mode != WB_SYNC_NONE)
229 wait_on_page_writeback(page);
230
231 if (PageWriteback(page) ||
232 !clear_page_dirty_for_io(page)) {
233 dprintk("%s: not clear for io page: %p, writeback: %d.\n",
234 __func__, page, PageWriteback(page));
235 goto out_continue;
236 }
237
238 set_page_writeback(page);
239
240 trans->attached_size += page_private(page);
241 trans->attached_pages++;
242#if 0
243 dprintk("%s: %u/%u added trans: %p, gen: %u, page: %p, [High: %d], size: %lu, idx: %lu.\n",
244 __func__, i, trans->page_num, trans, trans->gen, page,
245 !!PageHighMem(page), page_private(page), page->index);
246#endif
247 wbc->nr_to_write--;
248
249 if (wbc->nr_to_write <= 0)
250 done = 1;
251 if (wbc->nonblocking && bdi_write_congested(bdi)) {
252 wbc->encountered_congestion = 1;
253 done = 1;
254 }
255
256 continue;
257out_continue:
258 unlock_page(page);
259 trans->pages[i] = NULL;
260 }
261
262 err = netfs_trans_finish(trans, psb);
263 if (err)
264 break;
265
266 continue;
267
268err_out_reset:
269 trans->result = err;
270 netfs_trans_reset(trans);
271 netfs_trans_put(trans);
272 break;
273 }
274
275 if (!scanned && !done) {
276 /*
277 * We hit the last page and there is more work to be done: wrap
278 * back to the start of the file
279 */
280 scanned = 1;
281 index = 0;
282 goto retry;
283 }
284
285 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
286 mapping->writeback_index = index;
287
288 return err;
289}
290
291/*
292 * Inode writeback creation completion callback.
293 * Only invoked for just created inodes, which do not have pages attached,
294 * like dirs and empty files.
295 */
296static int pohmelfs_write_inode_complete(struct page **pages, unsigned int page_num,
297 void *private, int err)
298{
299 struct inode *inode = private;
300 struct pohmelfs_inode *pi = POHMELFS_I(inode);
301
302 if (inode) {
303 if (err) {
304 mark_inode_dirty(inode);
305 clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
306 } else {
307 set_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
308 }
309
310 pohmelfs_put_inode(pi);
311 }
312
313 return err;
314}
315
316int pohmelfs_write_create_inode(struct pohmelfs_inode *pi)
317{
318 struct netfs_trans *t;
319 struct inode *inode = &pi->vfs_inode;
320 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
321 int err;
322
323 if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
324 return 0;
325
326 dprintk("%s: started ino: %llu.\n", __func__, pi->ino);
327
328 err = pohmelfs_path_length(pi);
329 if (err < 0)
330 goto err_out_exit;
331
332 t = netfs_trans_alloc(psb, err + 1, 0, 0);
333 if (!t) {
334 err = -ENOMEM;
335 goto err_out_put;
336 }
337 t->complete = pohmelfs_write_inode_complete;
338 t->private = igrab(inode);
339 if (!t->private) {
340 err = -ENOENT;
341 goto err_out_put;
342 }
343
344 err = pohmelfs_write_inode_create(inode, t);
345 if (err)
346 goto err_out_put;
347
348 netfs_trans_finish(t, POHMELFS_SB(inode->i_sb));
349
350 return 0;
351
352err_out_put:
353 t->result = err;
354 netfs_trans_put(t);
355err_out_exit:
356 return err;
357}
358
359/*
360 * Sync all not-yet-created children in given directory to the server.
361 */
362static int pohmelfs_write_inode_create_children(struct inode *inode)
363{
364 struct pohmelfs_inode *parent = POHMELFS_I(inode);
365 struct super_block *sb = inode->i_sb;
366 struct pohmelfs_name *n;
367
368 while (!list_empty(&parent->sync_create_list)) {
369 n = NULL;
370 mutex_lock(&parent->offset_lock);
371 if (!list_empty(&parent->sync_create_list)) {
372 n = list_first_entry(&parent->sync_create_list,
373 struct pohmelfs_name, sync_create_entry);
374 list_del_init(&n->sync_create_entry);
375 }
376 mutex_unlock(&parent->offset_lock);
377
378 if (!n)
379 break;
380
381 inode = ilookup(sb, n->ino);
382
383 dprintk("%s: parent: %llu, ino: %llu, inode: %p.\n",
384 __func__, parent->ino, n->ino, inode);
385
386 if (inode && (inode->i_state & I_DIRTY)) {
387 struct pohmelfs_inode *pi = POHMELFS_I(inode);
388 pohmelfs_write_create_inode(pi);
389 //pohmelfs_meta_command(pi, NETFS_INODE_INFO, 0, NULL, NULL, 0);
390 iput(inode);
391 }
392 }
393
394 return 0;
395}
396
397/*
398 * Removes given child from given inode on server.
399 */
400int pohmelfs_remove_child(struct pohmelfs_inode *pi, struct pohmelfs_name *n)
401{
402 return pohmelfs_meta_command_data(pi, pi->ino, NETFS_REMOVE, NULL, 0, NULL, NULL, 0);
403}
404
405/*
406 * Writeback for given inode.
407 */
408static int pohmelfs_write_inode(struct inode *inode, int sync)
409{
410 struct pohmelfs_inode *pi = POHMELFS_I(inode);
411
412 pohmelfs_write_create_inode(pi);
413 pohmelfs_write_inode_create_children(inode);
414
415 return 0;
416}
417
418/*
419 * It is not exported, sorry...
420 */
421static inline wait_queue_head_t *page_waitqueue(struct page *page)
422{
423 const struct zone *zone = page_zone(page);
424
425 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
426}
427
428static int pohmelfs_wait_on_page_locked(struct page *page)
429{
430 struct pohmelfs_sb *psb = POHMELFS_SB(page->mapping->host->i_sb);
431 long ret = psb->wait_on_page_timeout;
432 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
433 int err = 0;
434
435 if (!PageLocked(page))
436 return 0;
437
438 for (;;) {
439 prepare_to_wait(page_waitqueue(page),
440 &wait.wait, TASK_INTERRUPTIBLE);
441
442 dprintk("%s: page: %p, locked: %d, uptodate: %d, error: %d, flags: %lx.\n",
443 __func__, page, PageLocked(page), PageUptodate(page),
444 PageError(page), page->flags);
445
446 if (!PageLocked(page))
447 break;
448
449 if (!signal_pending(current)) {
450 ret = schedule_timeout(ret);
451 if (!ret)
452 break;
453 continue;
454 }
455 ret = -ERESTARTSYS;
456 break;
457 }
458 finish_wait(page_waitqueue(page), &wait.wait);
459
460 if (!ret)
461 err = -ETIMEDOUT;
462
463
464 if (!err)
465 SetPageUptodate(page);
466
467 if (err)
468 printk("%s: page: %p, uptodate: %d, locked: %d, err: %d.\n",
469 __func__, page, PageUptodate(page), PageLocked(page), err);
470
471 return err;
472}
473
474static int pohmelfs_read_page_complete(struct page **pages, unsigned int page_num,
475 void *private, int err)
476{
477 struct page *page = private;
478
479 if (PageChecked(page))
480 return err;
481
482 if (err < 0) {
483 dprintk("%s: page: %p, err: %d.\n", __func__, page, err);
484 SetPageError(page);
485 }
486
487 unlock_page(page);
488
489 return err;
490}
491
492/*
493 * Read a page from remote server.
494 * Function will wait until page is unlocked.
495 */
496static int pohmelfs_readpage(struct file *file, struct page *page)
497{
498 struct inode *inode = page->mapping->host;
499 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
500 struct pohmelfs_inode *pi = POHMELFS_I(inode);
501 struct netfs_trans *t;
502 struct netfs_cmd *cmd;
503 int err, path_len;
504 void *data;
505 u64 isize;
506
507 err = pohmelfs_data_lock(pi, page->index << PAGE_CACHE_SHIFT,
508 PAGE_SIZE, POHMELFS_READ_LOCK);
509 if (err)
510 goto err_out_exit;
511
512 isize = i_size_read(inode);
513 if (isize <= page->index << PAGE_CACHE_SHIFT) {
514 SetPageUptodate(page);
515 unlock_page(page);
516 return 0;
517 }
518
519 path_len = pohmelfs_path_length(pi);
520 if (path_len < 0) {
521 err = path_len;
522 goto err_out_exit;
523 }
524
525 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
526 if (!t) {
527 err = -ENOMEM;
528 goto err_out_exit;
529 }
530
531 t->complete = pohmelfs_read_page_complete;
532 t->private = page;
533
534 cmd = netfs_trans_current(t);
535 data = (void *)(cmd + 1);
536
537 err = pohmelfs_construct_path_string(pi, data, path_len);
538 if (err < 0)
539 goto err_out_free;
540
541 path_len = err;
542
543 cmd->id = pi->ino;
544 cmd->start = page->index;
545 cmd->start <<= PAGE_CACHE_SHIFT;
546 cmd->size = PAGE_CACHE_SIZE + path_len;
547 cmd->cmd = NETFS_READ_PAGE;
548 cmd->ext = path_len;
549
550 dprintk("%s: path: '%s', page: %p, ino: %llu, start: %llu, size: %lu.\n",
551 __func__, (char *)data, page, pi->ino, cmd->start, PAGE_CACHE_SIZE);
552
553 netfs_convert_cmd(cmd);
554 netfs_trans_update(cmd, t, path_len);
555
556 err = netfs_trans_finish(t, psb);
557 if (err)
558 goto err_out_return;
559
560 return pohmelfs_wait_on_page_locked(page);
561
562err_out_free:
563 t->result = err;
564 netfs_trans_put(t);
565err_out_exit:
566 SetPageError(page);
567 if (PageLocked(page))
568 unlock_page(page);
569err_out_return:
570 printk("%s: page: %p, start: %lu, size: %lu, err: %d.\n",
571 __func__, page, page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, err);
572
573 return err;
574}
575
576/*
577 * Write begin/end magic.
578 * Allocates a page and writes inode if it was not synced to server before.
579 */
580static int pohmelfs_write_begin(struct file *file, struct address_space *mapping,
581 loff_t pos, unsigned len, unsigned flags,
582 struct page **pagep, void **fsdata)
583{
584 struct inode *inode = mapping->host;
585 struct page *page;
586 pgoff_t index;
587 unsigned start, end;
588 int err;
589
590 *pagep = NULL;
591
592 index = pos >> PAGE_CACHE_SHIFT;
593 start = pos & (PAGE_CACHE_SIZE - 1);
594 end = start + len;
595
596 page = grab_cache_page(mapping, index);
597#if 0
598 dprintk("%s: page: %p pos: %llu, len: %u, index: %lu, start: %u, end: %u, uptodate: %d.\n",
599 __func__, page, pos, len, index, start, end, PageUptodate(page));
600#endif
601 if (!page) {
602 err = -ENOMEM;
603 goto err_out_exit;
604 }
605
606 while (!PageUptodate(page)) {
607 if (start && test_bit(NETFS_INODE_REMOTE_SYNCED, &POHMELFS_I(inode)->state)) {
608 err = pohmelfs_readpage(file, page);
609 if (err)
610 goto err_out_exit;
611
612 lock_page(page);
613 continue;
614 }
615
616 if (len != PAGE_CACHE_SIZE) {
617 void *kaddr = kmap_atomic(page, KM_USER0);
618
619 memset(kaddr + start, 0, PAGE_CACHE_SIZE - start);
620 flush_dcache_page(page);
621 kunmap_atomic(kaddr, KM_USER0);
622 }
623 SetPageUptodate(page);
624 }
625
626 set_page_private(page, end);
627
628 *pagep = page;
629
630 return 0;
631
632err_out_exit:
633 page_cache_release(page);
634 *pagep = NULL;
635
636 return err;
637}
638
639static int pohmelfs_write_end(struct file *file, struct address_space *mapping,
640 loff_t pos, unsigned len, unsigned copied,
641 struct page *page, void *fsdata)
642{
643 struct inode *inode = mapping->host;
644
645 if (copied != len) {
646 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
647 void *kaddr = kmap_atomic(page, KM_USER0);
648
649 memset(kaddr + from + copied, 0, len - copied);
650 flush_dcache_page(page);
651 kunmap_atomic(kaddr, KM_USER0);
652 }
653
654 SetPageUptodate(page);
655 set_page_dirty(page);
656#if 0
657 dprintk("%s: page: %p [U: %d, D: %d, L: %d], pos: %llu, len: %u, copied: %u.\n",
658 __func__, page,
659 PageUptodate(page), PageDirty(page), PageLocked(page),
660 pos, len, copied);
661#endif
662 flush_dcache_page(page);
663
664 unlock_page(page);
665 page_cache_release(page);
666
667 if (pos + copied > inode->i_size) {
668 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
669
670 psb->avail_size -= pos + copied - inode->i_size;
671
672 i_size_write(inode, pos + copied);
673 }
674
675 return copied;
676}
677
678static int pohmelfs_readpages_trans_complete(struct page **__pages, unsigned int page_num,
679 void *private, int err)
680{
681 struct pohmelfs_inode *pi = private;
682 unsigned int i, num;
683 struct page **pages, *page = (struct page *)__pages;
684 loff_t index = page->index;
685
686 pages = kzalloc(sizeof(void *) * page_num, GFP_NOIO);
687 if (!pages)
688 return -ENOMEM;
689
690 num = find_get_pages_contig(pi->vfs_inode.i_mapping, index, page_num, pages);
691 if (num <= 0) {
692 err = num;
693 goto err_out_free;
694 }
695
696 for (i=0; i<num; ++i) {
697 page = pages[i];
698
699 if (err)
700 printk("%s: %u/%u: page: %p, index: %lu, uptodate: %d, locked: %d, err: %d.\n",
701 __func__, i, num, page, page->index,
702 PageUptodate(page), PageLocked(page), err);
703
704 if (!PageChecked(page)) {
705 if (err < 0)
706 SetPageError(page);
707 unlock_page(page);
708 }
709 page_cache_release(page);
710 page_cache_release(page);
711 }
712
713err_out_free:
714 kfree(pages);
715 return err;
716}
717
718static int pohmelfs_send_readpages(struct pohmelfs_inode *pi, struct page *first, unsigned int num)
719{
720 struct netfs_trans *t;
721 struct netfs_cmd *cmd;
722 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
723 int err, path_len;
724 void *data;
725
726 err = pohmelfs_data_lock(pi, first->index << PAGE_CACHE_SHIFT,
727 num * PAGE_SIZE, POHMELFS_READ_LOCK);
728 if (err)
729 goto err_out_exit;
730
731 path_len = pohmelfs_path_length(pi);
732 if (path_len < 0) {
733 err = path_len;
734 goto err_out_exit;
735 }
736
737 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
738 if (!t) {
739 err = -ENOMEM;
740 goto err_out_exit;
741 }
742
743 cmd = netfs_trans_current(t);
744 data = (void *)(cmd + 1);
745
746 t->complete = pohmelfs_readpages_trans_complete;
747 t->private = pi;
748 t->page_num = num;
749 t->pages = (struct page **)first;
750
751 err = pohmelfs_construct_path_string(pi, data, path_len);
752 if (err < 0)
753 goto err_out_put;
754
755 path_len = err;
756
757 cmd->cmd = NETFS_READ_PAGES;
758 cmd->start = first->index;
759 cmd->start <<= PAGE_CACHE_SHIFT;
760 cmd->size = (num << 8 | PAGE_CACHE_SHIFT);
761 cmd->id = pi->ino;
762 cmd->ext = path_len;
763
764 dprintk("%s: t: %p, gen: %u, path: '%s', path_len: %u, "
765 "start: %lu, num: %u.\n",
766 __func__, t, t->gen, (char *)data, path_len,
767 first->index, num);
768
769 netfs_convert_cmd(cmd);
770 netfs_trans_update(cmd, t, path_len);
771
772 return netfs_trans_finish(t, psb);
773
774err_out_put:
775 netfs_trans_free(t);
776err_out_exit:
777 pohmelfs_readpages_trans_complete((struct page **)first, num, pi, err);
778 return err;
779}
780
781#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
782
783static int pohmelfs_readpages(struct file *file, struct address_space *mapping,
784 struct list_head *pages, unsigned nr_pages)
785{
786 unsigned int page_idx, num = 0;
787 struct page *page = NULL, *first = NULL;
788
789 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
790 page = list_to_page(pages);
791
792 prefetchw(&page->flags);
793 list_del(&page->lru);
794
795 if (!add_to_page_cache_lru(page, mapping,
796 page->index, GFP_KERNEL)) {
797
798 if (!num) {
799 num = 1;
800 first = page;
801 continue;
802 }
803
804 dprintk("%s: added to lru page: %p, page_index: %lu, first_index: %lu.\n",
805 __func__, page, page->index, first->index);
806
807 if (unlikely(first->index + num != page->index) || (num > 500)) {
808 pohmelfs_send_readpages(POHMELFS_I(mapping->host),
809 first, num);
810 first = page;
811 num = 0;
812 }
813
814 num++;
815 }
816 }
817 pohmelfs_send_readpages(POHMELFS_I(mapping->host), first, num);
818
819 /*
820 * This will be sync read, so when last page is processed,
821 * all previous are alerady unlocked and ready to be used.
822 */
823 return 0;
824}
825
826/*
827 * Small addres space operations for POHMELFS.
828 */
829const struct address_space_operations pohmelfs_aops = {
830 .readpage = pohmelfs_readpage,
831 .readpages = pohmelfs_readpages,
832 .writepages = pohmelfs_writepages,
833 .write_begin = pohmelfs_write_begin,
834 .write_end = pohmelfs_write_end,
835 .set_page_dirty = __set_page_dirty_nobuffers,
836};
837
838/*
839 * ->detroy_inode() callback. Deletes inode from the caches
840 * and frees private data.
841 */
842static void pohmelfs_destroy_inode(struct inode *inode)
843{
844 struct super_block *sb = inode->i_sb;
845 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
846 struct pohmelfs_inode *pi = POHMELFS_I(inode);
847
848 //pohmelfs_data_unlock(pi, 0, inode->i_size, POHMELFS_READ_LOCK);
849
850 pohmelfs_inode_del_inode(psb, pi);
851
852 dprintk("%s: pi: %p, inode: %p, ino: %llu.\n",
853 __func__, pi, &pi->vfs_inode, pi->ino);
854 kmem_cache_free(pohmelfs_inode_cache, pi);
855 atomic_long_dec(&psb->total_inodes);
856}
857
858/*
859 * ->alloc_inode() callback. Allocates inode and initilizes private data.
860 */
861static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
862{
863 struct pohmelfs_inode *pi;
864
865 pi = kmem_cache_alloc(pohmelfs_inode_cache, GFP_NOIO);
866 if (!pi)
867 return NULL;
868
869 pi->hash_root = RB_ROOT;
870 mutex_init(&pi->offset_lock);
871
872 INIT_LIST_HEAD(&pi->sync_create_list);
873
874 INIT_LIST_HEAD(&pi->inode_entry);
875
876 pi->lock_type = 0;
877 pi->state = 0;
878 pi->total_len = 0;
879 pi->drop_count = 0;
880
881 dprintk("%s: pi: %p, inode: %p.\n", __func__, pi, &pi->vfs_inode);
882
883 atomic_long_inc(&POHMELFS_SB(sb)->total_inodes);
884
885 return &pi->vfs_inode;
886}
887
888/*
889 * We want fsync() to work on POHMELFS.
890 */
891static int pohmelfs_fsync(struct file *file, struct dentry *dentry, int datasync)
892{
893 struct inode *inode = file->f_mapping->host;
894 struct writeback_control wbc = {
895 .sync_mode = WB_SYNC_ALL,
896 .nr_to_write = 0, /* sys_fsync did this */
897 };
898
899 return sync_inode(inode, &wbc);
900}
901
902ssize_t pohmelfs_write(struct file *file, const char __user *buf,
903 size_t len, loff_t *ppos)
904{
905 struct address_space *mapping = file->f_mapping;
906 struct inode *inode = mapping->host;
907 struct pohmelfs_inode *pi = POHMELFS_I(inode);
908 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
909 struct kiocb kiocb;
910 ssize_t ret;
911 loff_t pos = *ppos;
912
913 init_sync_kiocb(&kiocb, file);
914 kiocb.ki_pos = pos;
915 kiocb.ki_left = len;
916
02d84ca5 917 dprintk("%s: len: %zu, pos: %llu.\n", __func__, len, pos);
b3f08cad
EP
918
919 mutex_lock(&inode->i_mutex);
920 ret = pohmelfs_data_lock(pi, pos, len, POHMELFS_WRITE_LOCK);
921 if (ret)
922 goto err_out_unlock;
923
924 ret = generic_file_aio_write_nolock(&kiocb, &iov, 1, pos);
925 *ppos = kiocb.ki_pos;
926
927 mutex_unlock(&inode->i_mutex);
928 WARN_ON(ret < 0);
929
930 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
931 ssize_t err;
932
933 err = sync_page_range(inode, mapping, pos, ret);
934 if (err < 0)
935 ret = err;
936 WARN_ON(ret < 0);
937 }
938
939 return ret;
940
941err_out_unlock:
942 mutex_unlock(&inode->i_mutex);
943 return ret;
944}
945
946const static struct file_operations pohmelfs_file_ops = {
947 .open = generic_file_open,
948 .fsync = pohmelfs_fsync,
949
950 .llseek = generic_file_llseek,
951
952 .read = do_sync_read,
953 .aio_read = generic_file_aio_read,
954
955 .mmap = generic_file_mmap,
956
957 .splice_read = generic_file_splice_read,
958 .splice_write = generic_file_splice_write,
959
960 .write = pohmelfs_write,
961 .aio_write = generic_file_aio_write,
962};
963
964const struct inode_operations pohmelfs_symlink_inode_operations = {
965 .readlink = generic_readlink,
966 .follow_link = page_follow_link_light,
967 .put_link = page_put_link,
968};
969
970int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
971{
972 int err;
973
974 err = inode_change_ok(inode, attr);
975 if (err) {
976 dprintk("%s: ino: %llu, inode changes are not allowed.\n", __func__, POHMELFS_I(inode)->ino);
977 goto err_out_exit;
978 }
979
980 if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
981 (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
de459f26 982 err = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
b3f08cad
EP
983 if (err)
984 goto err_out_exit;
985 }
986
987 err = inode_setattr(inode, attr);
988 if (err) {
989 dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
990 goto err_out_exit;
991 }
992
993 dprintk("%s: ino: %llu, mode: %o -> %o, uid: %u -> %u, gid: %u -> %u, size: %llu -> %llu.\n",
994 __func__, POHMELFS_I(inode)->ino, inode->i_mode, attr->ia_mode,
995 inode->i_uid, attr->ia_uid, inode->i_gid, attr->ia_gid, inode->i_size, attr->ia_size);
996
997 return 0;
998
999err_out_exit:
1000 return err;
1001}
1002
1003int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr)
1004{
1005 struct inode *inode = dentry->d_inode;
1006 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1007 int err;
1008
1009 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
1010 if (err)
1011 goto err_out_exit;
1012
1013 err = security_inode_setattr(dentry, attr);
1014 if (err)
1015 goto err_out_exit;
1016
1017 err = pohmelfs_setattr_raw(inode, attr);
1018 if (err)
1019 goto err_out_exit;
1020
1021 return 0;
1022
1023err_out_exit:
1024 return err;
1025}
1026
1027static int pohmelfs_send_xattr_req(struct pohmelfs_inode *pi, u64 id, u64 start,
1028 const char *name, const void *value, size_t attrsize, int command)
1029{
1030 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
1031 int err, path_len, namelen = strlen(name) + 1; /* 0-byte */
1032 struct netfs_trans *t;
1033 struct netfs_cmd *cmd;
1034 void *data;
1035
02d84ca5 1036 dprintk("%s: id: %llu, start: %llu, name: '%s', attrsize: %zu, cmd: %d.\n",
b3f08cad
EP
1037 __func__, id, start, name, attrsize, command);
1038
1039 path_len = pohmelfs_path_length(pi);
1040 if (path_len < 0) {
1041 err = path_len;
1042 goto err_out_exit;
1043 }
1044
1045 t = netfs_trans_alloc(psb, namelen + path_len + attrsize, 0, 0);
1046 if (!t) {
1047 err = -ENOMEM;
1048 goto err_out_exit;
1049 }
1050
1051 cmd = netfs_trans_current(t);
1052 data = cmd + 1;
1053
1054 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1055 if (path_len < 0) {
1056 err = path_len;
1057 goto err_out_put;
1058 }
1059 data += path_len;
1060
1061 /*
1062 * 'name' is a NUL-terminated string already and
1063 * 'namelen' includes 0-byte.
1064 */
1065 memcpy(data, name, namelen);
1066 data += namelen;
1067
1068 memcpy(data, value, attrsize);
1069
1070 cmd->cmd = command;
1071 cmd->id = id;
1072 cmd->start = start;
1073 cmd->size = attrsize + namelen + path_len;
1074 cmd->ext = path_len;
1075 cmd->csize = 0;
1076 cmd->cpad = 0;
1077
1078 netfs_convert_cmd(cmd);
1079 netfs_trans_update(cmd, t, namelen + path_len + attrsize);
1080
1081 return netfs_trans_finish(t, psb);
1082
1083err_out_put:
1084 t->result = err;
1085 netfs_trans_put(t);
1086err_out_exit:
1087 return err;
1088}
1089
1090static int pohmelfs_setxattr(struct dentry *dentry, const char *name,
1091 const void *value, size_t attrsize, int flags)
1092{
1093 struct inode *inode = dentry->d_inode;
1094 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1095 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1096
1097 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1098 return -EOPNOTSUPP;
1099
1100 return pohmelfs_send_xattr_req(pi, flags, attrsize, name,
1101 value, attrsize, NETFS_XATTR_SET);
1102}
1103
1104static ssize_t pohmelfs_getxattr(struct dentry *dentry, const char *name,
1105 void *value, size_t attrsize)
1106{
1107 struct inode *inode = dentry->d_inode;
1108 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1109 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1110 struct pohmelfs_mcache *m;
1111 int err;
1112 long timeout = psb->mcache_timeout;
1113
1114 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1115 return -EOPNOTSUPP;
1116
1117 m = pohmelfs_mcache_alloc(psb, 0, attrsize, value);
1118 if (IS_ERR(m))
1119 return PTR_ERR(m);
1120
1121 dprintk("%s: ino: %llu, name: '%s', size: %zu.\n",
1122 __func__, pi->ino, name, attrsize);
1123
1124 err = pohmelfs_send_xattr_req(pi, m->gen, attrsize, name, value, 0, NETFS_XATTR_GET);
1125 if (err)
1126 goto err_out_put;
1127
1128 do {
1129 err = wait_for_completion_timeout(&m->complete, timeout);
1130 if (err) {
1131 err = m->err;
1132 break;
1133 }
1134
1135 /*
1136 * This loop is a bit ugly, since it waits until reference counter
1137 * hits 1 and then put object here. Main goal is to prevent race with
1138 * network thread, when it can start processing given request, i.e.
1139 * increase its reference counter but yet not complete it, while
1140 * we will exit from ->getxattr() with timeout, and although request
1141 * will not be freed (its reference counter was increased by network
1142 * thread), data pointer provided by user may be released, so we will
1143 * overwrite already freed area in network thread.
1144 *
1145 * Now after timeout we remove request from the cache, so it can not be
1146 * found by network thread, and wait for its reference counter to hit 1,
1147 * i.e. if network thread already started to process this request, we wait
1148 * it to finish, and then free object locally. If reference counter is
1149 * already 1, i.e. request is not used by anyone else, we can free it without
1150 * problem.
1151 */
1152 err = -ETIMEDOUT;
1153 timeout = HZ;
1154
1155 pohmelfs_mcache_remove_locked(psb, m);
1156 } while (atomic_read(&m->refcnt) != 1);
1157
1158 pohmelfs_mcache_put(psb, m);
1159
1160 dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err);
1161
1162 return err;
1163
1164err_out_put:
1165 pohmelfs_mcache_put(psb, m);
1166 return err;
1167}
1168
1169static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1170{
1171 struct inode *inode = dentry->d_inode;
e5043424 1172#if 0
b3f08cad
EP
1173 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1174 int err;
e5043424 1175
b3f08cad
EP
1176 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
1177 if (err)
1178 return err;
b3f08cad
EP
1179 dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n",
1180 __func__, pi->ino, inode->i_mode, inode->i_uid,
1181 inode->i_gid, inode->i_size);
e5043424 1182#endif
b3f08cad
EP
1183
1184 generic_fillattr(inode, stat);
1185 return 0;
1186}
1187
1188const struct inode_operations pohmelfs_file_inode_operations = {
1189 .setattr = pohmelfs_setattr,
1190 .getattr = pohmelfs_getattr,
1191 .setxattr = pohmelfs_setxattr,
1192 .getxattr = pohmelfs_getxattr,
1193};
1194
1195/*
1196 * Fill inode data: mode, size, operation callbacks and so on...
1197 */
1198void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info)
1199{
1200 inode->i_mode = info->mode;
1201 inode->i_nlink = info->nlink;
1202 inode->i_uid = info->uid;
1203 inode->i_gid = info->gid;
1204 inode->i_blocks = info->blocks;
1205 inode->i_rdev = info->rdev;
1206 inode->i_size = info->size;
1207 inode->i_version = info->version;
1208 inode->i_blkbits = ffs(info->blocksize);
1209
1210 dprintk("%s: inode: %p, num: %lu/%llu inode is regular: %d, dir: %d, link: %d, mode: %o, size: %llu.\n",
1211 __func__, inode, inode->i_ino, info->ino,
1212 S_ISREG(inode->i_mode), S_ISDIR(inode->i_mode),
1213 S_ISLNK(inode->i_mode), inode->i_mode, inode->i_size);
1214
1215 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
1216
1217 /*
1218 * i_mapping is a pointer to i_data during inode initialization.
1219 */
1220 inode->i_data.a_ops = &pohmelfs_aops;
1221
1222 if (S_ISREG(inode->i_mode)) {
1223 inode->i_fop = &pohmelfs_file_ops;
1224 inode->i_op = &pohmelfs_file_inode_operations;
1225 } else if (S_ISDIR(inode->i_mode)) {
1226 inode->i_fop = &pohmelfs_dir_fops;
1227 inode->i_op = &pohmelfs_dir_inode_ops;
1228 } else if (S_ISLNK(inode->i_mode)) {
1229 inode->i_op = &pohmelfs_symlink_inode_operations;
1230 inode->i_fop = &pohmelfs_file_ops;
1231 } else {
1232 inode->i_fop = &generic_ro_fops;
1233 }
1234}
1235
1236static void pohmelfs_drop_inode(struct inode *inode)
1237{
1238 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1239 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1240
1241 spin_lock(&psb->ino_lock);
1242 list_del_init(&pi->inode_entry);
1243 spin_unlock(&psb->ino_lock);
1244
1245 generic_drop_inode(inode);
1246}
1247
1248static struct pohmelfs_inode *pohmelfs_get_inode_from_list(struct pohmelfs_sb *psb,
1249 struct list_head *head, unsigned int *count)
1250{
1251 struct pohmelfs_inode *pi = NULL;
1252
1253 spin_lock(&psb->ino_lock);
1254 if (!list_empty(head)) {
1255 pi = list_entry(head->next, struct pohmelfs_inode,
1256 inode_entry);
1257 list_del_init(&pi->inode_entry);
1258 *count = pi->drop_count;
1259 pi->drop_count = 0;
1260 }
1261 spin_unlock(&psb->ino_lock);
1262
1263 return pi;
1264}
1265
1266static void pohmelfs_flush_transactions(struct pohmelfs_sb *psb)
1267{
1268 struct pohmelfs_config *c;
1269
1270 mutex_lock(&psb->state_lock);
1271 list_for_each_entry(c, &psb->state_list, config_entry) {
1272 pohmelfs_state_flush_transactions(&c->state);
1273 }
1274 mutex_unlock(&psb->state_lock);
1275}
1276
1277/*
1278 * ->put_super() callback. Invoked before superblock is destroyed,
1279 * so it has to clean all private data.
1280 */
1281static void pohmelfs_put_super(struct super_block *sb)
1282{
1283 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1284 struct pohmelfs_inode *pi;
1285 unsigned int count;
1286 unsigned int in_drop_list = 0;
1287 struct inode *inode, *tmp;
1288
1289 dprintk("%s.\n", __func__);
1290
1291 /*
1292 * Kill pending transactions, which could affect inodes in-flight.
1293 */
1294 pohmelfs_flush_transactions(psb);
1295
1296 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count))) {
1297 inode = &pi->vfs_inode;
1298
1299 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1300 __func__, pi->ino, pi, inode, count);
1301
1302 if (atomic_read(&inode->i_count) != count) {
1303 printk("%s: ino: %llu, pi: %p, inode: %p, count: %u, i_count: %d.\n",
1304 __func__, pi->ino, pi, inode, count,
1305 atomic_read(&inode->i_count));
1306 count = atomic_read(&inode->i_count);
1307 in_drop_list++;
1308 }
1309
1310 while (count--)
1311 iput(&pi->vfs_inode);
1312 }
1313
1314 list_for_each_entry_safe(inode, tmp, &sb->s_inodes, i_sb_list) {
1315 pi = POHMELFS_I(inode);
1316
1317 dprintk("%s: ino: %llu, pi: %p, inode: %p, i_count: %u.\n",
1318 __func__, pi->ino, pi, inode, atomic_read(&inode->i_count));
1319
1320 /*
1321 * These are special inodes, they were created during
1322 * directory reading or lookup, and were not bound to dentry,
1323 * so they live here with reference counter being 1 and prevent
1324 * umount from succeed since it believes that they are busy.
1325 */
1326 count = atomic_read(&inode->i_count);
1327 if (count) {
1328 list_del_init(&inode->i_sb_list);
1329 while (count--)
1330 iput(&pi->vfs_inode);
1331 }
1332 }
1333
1334 psb->trans_scan_timeout = psb->drop_scan_timeout = 0;
1335 cancel_rearming_delayed_work(&psb->dwork);
1336 cancel_rearming_delayed_work(&psb->drop_dwork);
1337 flush_scheduled_work();
1338
1339 dprintk("%s: stopped workqueues.\n", __func__);
1340
1341 pohmelfs_crypto_exit(psb);
1342 pohmelfs_state_exit(psb);
1343
1344 kfree(psb);
1345 sb->s_fs_info = NULL;
b3f08cad
EP
1346}
1347
b3f08cad
EP
1348static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1349{
1350 struct super_block *sb = dentry->d_sb;
1351 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1352
1353 /*
1354 * There are no filesystem size limits yet.
1355 */
1356 memset(buf, 0, sizeof(struct kstatfs));
1357
1358 buf->f_type = POHMELFS_MAGIC_NUM; /* 'POH.' */
1359 buf->f_bsize = sb->s_blocksize;
1360 buf->f_files = psb->ino;
1361 buf->f_namelen = 255;
1362 buf->f_files = atomic_long_read(&psb->total_inodes);
1363 buf->f_bfree = buf->f_bavail = psb->avail_size >> PAGE_SHIFT;
1364 buf->f_blocks = psb->total_size >> PAGE_SHIFT;
1365
1366 dprintk("%s: total: %llu, avail: %llu, inodes: %llu, bsize: %lu.\n",
1367 __func__, psb->total_size, psb->avail_size, buf->f_files, sb->s_blocksize);
1368
1369 return 0;
1370}
1371
1372static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
1373{
1374 struct pohmelfs_sb *psb = POHMELFS_SB(vfs->mnt_sb);
1375
1376 seq_printf(seq, ",idx=%u", psb->idx);
1377 seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout));
1378 seq_printf(seq, ",drop_scan_timeout=%u", jiffies_to_msecs(psb->drop_scan_timeout));
1379 seq_printf(seq, ",wait_on_page_timeout=%u", jiffies_to_msecs(psb->wait_on_page_timeout));
1380 seq_printf(seq, ",trans_retries=%u", psb->trans_retries);
1381 seq_printf(seq, ",crypto_thread_num=%u", psb->crypto_thread_num);
1382 seq_printf(seq, ",trans_max_pages=%u", psb->trans_max_pages);
1383 seq_printf(seq, ",mcache_timeout=%u", jiffies_to_msecs(psb->mcache_timeout));
1384 if (psb->crypto_fail_unsupported)
1385 seq_printf(seq, ",crypto_fail_unsupported");
1386
1387 return 0;
1388}
1389
b3f08cad
EP
1390enum {
1391 pohmelfs_opt_idx,
e5043424
EP
1392 pohmelfs_opt_crypto_thread_num,
1393 pohmelfs_opt_trans_max_pages,
1394 pohmelfs_opt_crypto_fail_unsupported,
1395
1396 /* Remountable options */
b3f08cad
EP
1397 pohmelfs_opt_trans_scan_timeout,
1398 pohmelfs_opt_drop_scan_timeout,
1399 pohmelfs_opt_wait_on_page_timeout,
1400 pohmelfs_opt_trans_retries,
b3f08cad
EP
1401 pohmelfs_opt_mcache_timeout,
1402};
1403
1404static struct match_token pohmelfs_tokens[] = {
1405 {pohmelfs_opt_idx, "idx=%u"},
e5043424
EP
1406 {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"},
1407 {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"},
1408 {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"},
b3f08cad
EP
1409 {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"},
1410 {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"},
1411 {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"},
1412 {pohmelfs_opt_trans_retries, "trans_retries=%u"},
b3f08cad
EP
1413 {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"},
1414};
1415
e5043424 1416static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount)
b3f08cad
EP
1417{
1418 char *p;
1419 substring_t args[MAX_OPT_ARGS];
1420 int option, err;
1421
1422 if (!options)
1423 return 0;
1424
1425 while ((p = strsep(&options, ",")) != NULL) {
1426 int token;
1427 if (!*p)
1428 continue;
1429
1430 token = match_token(p, pohmelfs_tokens, args);
1431
1432 err = match_int(&args[0], &option);
1433 if (err)
1434 return err;
1435
e5043424
EP
1436 if (remount && token <= pohmelfs_opt_crypto_fail_unsupported)
1437 continue;
1438
b3f08cad
EP
1439 switch (token) {
1440 case pohmelfs_opt_idx:
1441 psb->idx = option;
1442 break;
1443 case pohmelfs_opt_trans_scan_timeout:
1444 psb->trans_scan_timeout = msecs_to_jiffies(option);
1445 break;
1446 case pohmelfs_opt_drop_scan_timeout:
1447 psb->drop_scan_timeout = msecs_to_jiffies(option);
1448 break;
1449 case pohmelfs_opt_wait_on_page_timeout:
1450 psb->wait_on_page_timeout = msecs_to_jiffies(option);
1451 break;
1452 case pohmelfs_opt_mcache_timeout:
1453 psb->mcache_timeout = msecs_to_jiffies(option);
1454 break;
1455 case pohmelfs_opt_trans_retries:
1456 psb->trans_retries = option;
1457 break;
1458 case pohmelfs_opt_crypto_thread_num:
1459 psb->crypto_thread_num = option;
1460 break;
1461 case pohmelfs_opt_trans_max_pages:
1462 psb->trans_max_pages = option;
1463 break;
1464 case pohmelfs_opt_crypto_fail_unsupported:
1465 psb->crypto_fail_unsupported = 1;
1466 break;
1467 default:
1468 return -EINVAL;
1469 }
1470 }
1471
1472 return 0;
1473}
1474
e5043424
EP
1475static int pohmelfs_remount(struct super_block *sb, int *flags, char *data)
1476{
1477 int err;
1478 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1479 unsigned long old_sb_flags = sb->s_flags;
1480
1481 err = pohmelfs_parse_options(data, psb, 1);
1482 if (err)
1483 goto err_out_restore;
1484
1485 if (!(*flags & MS_RDONLY))
1486 sb->s_flags &= ~MS_RDONLY;
1487 return 0;
1488
1489err_out_restore:
1490 sb->s_flags = old_sb_flags;
1491 return err;
1492}
1493
b3f08cad
EP
1494static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count)
1495{
1496 struct inode *inode = &pi->vfs_inode;
1497
1498 dprintk("%s: %p: ino: %llu, owned: %d.\n",
1499 __func__, inode, pi->ino, test_bit(NETFS_INODE_OWNED, &pi->state));
1500
1501 mutex_lock(&inode->i_mutex);
1502 if (test_and_clear_bit(NETFS_INODE_OWNED, &pi->state)) {
1503 filemap_fdatawrite(inode->i_mapping);
1504 inode->i_sb->s_op->write_inode(inode, 0);
1505 }
1506
1507 truncate_inode_pages(inode->i_mapping, 0);
1508
1509 pohmelfs_data_unlock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
1510 mutex_unlock(&inode->i_mutex);
1511}
1512
1513static void pohmelfs_put_inode_count(struct pohmelfs_inode *pi, unsigned int count)
1514{
1515 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1516 __func__, pi->ino, pi, &pi->vfs_inode, count);
1517
1518 if (test_and_clear_bit(NETFS_INODE_NEED_FLUSH, &pi->state))
1519 pohmelfs_flush_inode(pi, count);
1520
1521 while (count--)
1522 iput(&pi->vfs_inode);
1523}
1524
1525static void pohmelfs_drop_scan(struct work_struct *work)
1526{
1527 struct pohmelfs_sb *psb =
1528 container_of(work, struct pohmelfs_sb, drop_dwork.work);
1529 struct pohmelfs_inode *pi;
1530 unsigned int count = 0;
1531
1532 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count))) {
1533 pohmelfs_put_inode_count(pi, count);
1534 }
1535 pohmelfs_check_states(psb);
1536
1537 if (psb->drop_scan_timeout)
1538 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1539}
1540
1541/*
1542 * Run through all transactions starting from the oldest,
1543 * drop transaction from current state and try to send it
1544 * to all remote nodes, which are currently installed.
1545 */
1546static void pohmelfs_trans_scan_state(struct netfs_state *st)
1547{
1548 struct rb_node *rb_node;
1549 struct netfs_trans_dst *dst;
1550 struct pohmelfs_sb *psb = st->psb;
1551 unsigned int timeout = psb->trans_scan_timeout;
1552 struct netfs_trans *t;
1553 int err;
1554
1555 mutex_lock(&st->trans_lock);
1556 for (rb_node = rb_first(&st->trans_root); rb_node; ) {
1557 dst = rb_entry(rb_node, struct netfs_trans_dst, state_entry);
1558 t = dst->trans;
1559
1560 if (timeout && time_after(dst->send_time + timeout, jiffies)
1561 && dst->retries == 0)
1562 break;
1563
1564 dprintk("%s: t: %p, gen: %u, st: %p, retries: %u, max: %u.\n",
1565 __func__, t, t->gen, st, dst->retries, psb->trans_retries);
1566 netfs_trans_get(t);
1567
1568 rb_node = rb_next(rb_node);
1569
1570 err = -ETIMEDOUT;
1571 if (timeout && (++dst->retries < psb->trans_retries)) {
1572 err = netfs_trans_resend(t, psb);
1573 }
1574
1575 if (err || (t->flags & NETFS_TRANS_SINGLE_DST)) {
1576 if (netfs_trans_remove_nolock(dst, st))
1577 netfs_trans_drop_dst_nostate(dst);
1578 }
1579
1580 t->result = err;
1581 netfs_trans_put(t);
1582 }
1583 mutex_unlock(&st->trans_lock);
1584}
1585
1586/*
1587 * Walk through all installed network states and resend all
1588 * transactions, which are old enough.
1589 */
1590static void pohmelfs_trans_scan(struct work_struct *work)
1591{
1592 struct pohmelfs_sb *psb =
1593 container_of(work, struct pohmelfs_sb, dwork.work);
1594 struct netfs_state *st;
1595 struct pohmelfs_config *c;
1596
1597 mutex_lock(&psb->state_lock);
1598 list_for_each_entry(c, &psb->state_list, config_entry) {
1599 st = &c->state;
1600
1601 pohmelfs_trans_scan_state(st);
1602 }
1603 mutex_unlock(&psb->state_lock);
1604
1605 /*
1606 * If no timeout specified then system is in the middle of umount process,
1607 * so no need to reschedule scanning process again.
1608 */
1609 if (psb->trans_scan_timeout)
1610 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1611}
1612
1613int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon,
1614 unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start)
1615{
1616 struct inode *inode = &pi->vfs_inode;
1617 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1618 int err = 0, sz;
1619 struct netfs_trans *t;
1620 int path_len, addon_len = 0;
1621 void *data;
1622 struct netfs_inode_info *info;
1623 struct netfs_cmd *cmd;
1624
1625 dprintk("%s: ino: %llu, cmd: %u, addon: %p.\n", __func__, pi->ino, cmd_op, addon);
1626
1627 path_len = pohmelfs_path_length(pi);
1628 if (path_len < 0) {
1629 err = path_len;
1630 goto err_out_exit;
1631 }
1632
1633 if (addon)
1634 addon_len = strlen(addon) + 1; /* 0-byte */
1635 sz = addon_len;
1636
1637 if (cmd_op == NETFS_INODE_INFO)
1638 sz += sizeof(struct netfs_inode_info);
1639
1640 t = netfs_trans_alloc(psb, sz + path_len, flags, 0);
1641 if (!t) {
1642 err = -ENOMEM;
1643 goto err_out_exit;
1644 }
1645 t->complete = complete;
1646 t->private = priv;
1647
1648 cmd = netfs_trans_current(t);
1649 data = (void *)(cmd + 1);
1650
1651 if (cmd_op == NETFS_INODE_INFO) {
1652 info = (struct netfs_inode_info *)(cmd + 1);
1653 data = (void *)(info + 1);
1654
1655 /*
1656 * We are under i_mutex, can read and change whatever we want...
1657 */
1658 info->mode = inode->i_mode;
1659 info->nlink = inode->i_nlink;
1660 info->uid = inode->i_uid;
1661 info->gid = inode->i_gid;
1662 info->blocks = inode->i_blocks;
1663 info->rdev = inode->i_rdev;
1664 info->size = inode->i_size;
1665 info->version = inode->i_version;
1666
1667 netfs_convert_inode_info(info);
1668 }
1669
1670 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1671 if (path_len < 0)
1672 goto err_out_free;
1673
1674 dprintk("%s: path_len: %d.\n", __func__, path_len);
1675
1676 if (addon) {
1677 path_len--; /* Do not place null-byte before the addon */
1678 path_len += sprintf(data + path_len, "/%s", addon) + 1; /* 0 - byte */
1679 }
1680
1681 sz += path_len;
1682
1683 cmd->cmd = cmd_op;
1684 cmd->ext = path_len;
1685 cmd->size = sz;
1686 cmd->id = id;
1687 cmd->start = start;
1688
1689 netfs_convert_cmd(cmd);
1690 netfs_trans_update(cmd, t, sz);
1691
1692 /*
1693 * Note, that it is possible to leak error here: transaction callback will not
1694 * be invoked for allocation path failure.
1695 */
1696 return netfs_trans_finish(t, psb);
1697
1698err_out_free:
1699 netfs_trans_free(t);
1700err_out_exit:
1701 if (complete)
1702 complete(NULL, 0, priv, err);
1703 return err;
1704}
1705
1706int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags,
1707 netfs_trans_complete_t complete, void *priv, u64 start)
1708{
1709 return pohmelfs_meta_command_data(pi, pi->ino, cmd_op, NULL, flags, complete, priv, start);
1710}
1711
1712/*
1713 * Send request and wait for POHMELFS root capabilities response,
1714 * which will update server's informaion about size of the export,
1715 * permissions, number of objects, available size and so on.
1716 */
1717static int pohmelfs_root_handshake(struct pohmelfs_sb *psb)
1718{
1719 struct netfs_trans *t;
1720 struct netfs_cmd *cmd;
1721 int err = -ENOMEM;
1722
1723 t = netfs_trans_alloc(psb, 0, 0, 0);
1724 if (!t)
1725 goto err_out_exit;
1726
1727 cmd = netfs_trans_current(t);
1728
1729 cmd->cmd = NETFS_CAPABILITIES;
1730 cmd->id = POHMELFS_ROOT_CAPABILITIES;
1731 cmd->size = 0;
1732 cmd->start = 0;
1733 cmd->ext = 0;
1734 cmd->csize = 0;
1735
1736 netfs_convert_cmd(cmd);
1737 netfs_trans_update(cmd, t, 0);
1738
1739 err = netfs_trans_finish(t, psb);
1740 if (err)
1741 goto err_out_exit;
1742
1743 psb->flags = ~0;
1744 err = wait_event_interruptible_timeout(psb->wait,
1745 (psb->flags != ~0),
1746 psb->wait_on_page_timeout);
1747 if (!err) {
1748 err = -ETIMEDOUT;
1749 } else {
1750 err = -psb->flags;
1751 }
1752
1753 if (err)
1754 goto err_out_exit;
1755
1756 return 0;
1757
1758err_out_exit:
1759 return err;
1760}
1761
f2739de1
EP
1762static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
1763{
1764 struct netfs_state *st;
1765 struct pohmelfs_ctl *ctl;
1766 struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb);
1767 struct pohmelfs_config *c;
1768
1769 mutex_lock(&psb->state_lock);
1770
1771 seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n");
1772
1773 list_for_each_entry(c, &psb->state_list, config_entry) {
1774 st = &c->state;
1775 ctl = &st->ctl;
1776
1777 seq_printf(m, "%u ", ctl->idx);
1778 if (ctl->addr.sa_family == AF_INET) {
1779 struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr;
1780 //seq_printf(m, "%pi4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port));
1781 seq_printf(m, "%u.%u.%u.%u:%u", NIPQUAD(sin->sin_addr.s_addr), ntohs(sin->sin_port));
1782 } else if (ctl->addr.sa_family == AF_INET6) {
1783 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr;
1784 seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port));
1785 } else {
1786 unsigned int i;
1787 for (i=0; i<ctl->addrlen; ++i)
1788 seq_printf(m, "%02x.", ctl->addr.addr[i]);
1789 }
1790
1791 seq_printf(m, " %u %u %d %u %x\n",
1792 ctl->type, ctl->proto,
1793 st->socket != NULL,
1794 ctl->prio, ctl->perm);
1795 }
1796 mutex_unlock(&psb->state_lock);
1797
1798 return 0;
1799}
1800
e5043424
EP
1801static const struct super_operations pohmelfs_sb_ops = {
1802 .alloc_inode = pohmelfs_alloc_inode,
1803 .destroy_inode = pohmelfs_destroy_inode,
1804 .drop_inode = pohmelfs_drop_inode,
1805 .write_inode = pohmelfs_write_inode,
1806 .put_super = pohmelfs_put_super,
1807 .remount_fs = pohmelfs_remount,
1808 .statfs = pohmelfs_statfs,
1809 .show_options = pohmelfs_show_options,
f2739de1 1810 .show_stats = pohmelfs_show_stats,
e5043424
EP
1811};
1812
b3f08cad
EP
1813/*
1814 * Allocate private superblock and create root dir.
1815 */
1816static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
1817{
1818 struct pohmelfs_sb *psb;
1819 int err = -ENOMEM;
1820 struct inode *root;
1821 struct pohmelfs_inode *npi;
1822 struct qstr str;
1823
b3f08cad
EP
1824 psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL);
1825 if (!psb)
1826 goto err_out_exit;
1827
1828 sb->s_fs_info = psb;
1829 sb->s_op = &pohmelfs_sb_ops;
1830 sb->s_magic = POHMELFS_MAGIC_NUM;
1831 sb->s_maxbytes = MAX_LFS_FILESIZE;
1832 sb->s_blocksize = PAGE_SIZE;
1833
1834 psb->sb = sb;
1835
1836 psb->ino = 2;
1837 psb->idx = 0;
1838 psb->active_state = NULL;
1839 psb->trans_retries = 5;
1840 psb->trans_data_size = PAGE_SIZE;
1841 psb->drop_scan_timeout = msecs_to_jiffies(1000);
1842 psb->trans_scan_timeout = msecs_to_jiffies(5000);
1843 psb->wait_on_page_timeout = msecs_to_jiffies(5000);
1844 init_waitqueue_head(&psb->wait);
1845
1846 spin_lock_init(&psb->ino_lock);
1847
1848 INIT_LIST_HEAD(&psb->drop_list);
1849
1850 mutex_init(&psb->mcache_lock);
1851 psb->mcache_root = RB_ROOT;
1852 psb->mcache_timeout = msecs_to_jiffies(5000);
1853 atomic_long_set(&psb->mcache_gen, 0);
1854
1855 psb->trans_max_pages = 100;
1856
1857 psb->crypto_align_size = 16;
1858 psb->crypto_attached_size = 0;
1859 psb->hash_strlen = 0;
1860 psb->cipher_strlen = 0;
1861 psb->perform_crypto = 0;
1862 psb->crypto_thread_num = 2;
1863 psb->crypto_fail_unsupported = 0;
1864 mutex_init(&psb->crypto_thread_lock);
1865 INIT_LIST_HEAD(&psb->crypto_ready_list);
1866 INIT_LIST_HEAD(&psb->crypto_active_list);
1867
1868 atomic_set(&psb->trans_gen, 1);
1869 atomic_set(&psb->total_inodes, 0);
1870
1871 mutex_init(&psb->state_lock);
1872 INIT_LIST_HEAD(&psb->state_list);
1873
e5043424 1874 err = pohmelfs_parse_options((char *) data, psb, 0);
b3f08cad
EP
1875 if (err)
1876 goto err_out_free_sb;
1877
1878 err = pohmelfs_copy_crypto(psb);
1879 if (err)
1880 goto err_out_free_sb;
1881
1882 err = pohmelfs_state_init(psb);
1883 if (err)
1884 goto err_out_free_strings;
1885
1886 err = pohmelfs_crypto_init(psb);
1887 if (err)
1888 goto err_out_state_exit;
1889
1890 err = pohmelfs_root_handshake(psb);
1891 if (err)
1892 goto err_out_crypto_exit;
1893
1894 str.name = "/";
1895 str.hash = jhash("/", 1, 0);
1896 str.len = 1;
1897
1898 npi = pohmelfs_create_entry_local(psb, NULL, &str, 0, 0755|S_IFDIR);
1899 if (IS_ERR(npi)) {
1900 err = PTR_ERR(npi);
1901 goto err_out_crypto_exit;
1902 }
872dc5e5
EP
1903 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
1904 clear_bit(NETFS_INODE_OWNED, &npi->state);
b3f08cad
EP
1905
1906 root = &npi->vfs_inode;
1907
1908 sb->s_root = d_alloc_root(root);
1909 if (!sb->s_root)
1910 goto err_out_put_root;
1911
1912 INIT_DELAYED_WORK(&psb->drop_dwork, pohmelfs_drop_scan);
1913 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1914
1915 INIT_DELAYED_WORK(&psb->dwork, pohmelfs_trans_scan);
1916 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1917
1918 return 0;
1919
1920err_out_put_root:
1921 iput(root);
1922err_out_crypto_exit:
1923 pohmelfs_crypto_exit(psb);
1924err_out_state_exit:
1925 pohmelfs_state_exit(psb);
1926err_out_free_strings:
1927 kfree(psb->cipher_string);
1928 kfree(psb->hash_string);
1929err_out_free_sb:
1930 kfree(psb);
1931err_out_exit:
1932
1933 dprintk("%s: err: %d.\n", __func__, err);
1934 return err;
1935}
1936
1937/*
1938 * Some VFS magic here...
1939 */
1940static int pohmelfs_get_sb(struct file_system_type *fs_type,
1941 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
1942{
1943 return get_sb_nodev(fs_type, flags, data, pohmelfs_fill_super,
1944 mnt);
1945}
1946
cee9bb2e
EP
1947/*
1948 * We need this to sync all inodes earlier, since when writeback
1949 * is invoked from the umount/mntput path dcache is already shrunk,
1950 * see generic_shutdown_super(), and no inodes can access the path.
1951 */
1952static void pohmelfs_kill_super(struct super_block *sb)
1953{
1954 struct writeback_control wbc = {
1955 .sync_mode = WB_SYNC_ALL,
1956 .range_start = 0,
1957 .range_end = LLONG_MAX,
1958 .nr_to_write = LONG_MAX,
1959 };
1960 generic_sync_sb_inodes(sb, &wbc);
1961
1962 kill_anon_super(sb);
1963}
1964
b3f08cad
EP
1965static struct file_system_type pohmel_fs_type = {
1966 .owner = THIS_MODULE,
1967 .name = "pohmel",
1968 .get_sb = pohmelfs_get_sb,
cee9bb2e 1969 .kill_sb = pohmelfs_kill_super,
b3f08cad
EP
1970};
1971
1972/*
1973 * Cache and module initializations and freeing routings.
1974 */
1975static void pohmelfs_init_once(void *data)
1976{
1977 struct pohmelfs_inode *pi = data;
1978
1979 inode_init_once(&pi->vfs_inode);
1980}
1981
1982static int __init pohmelfs_init_inodecache(void)
1983{
1984 pohmelfs_inode_cache = kmem_cache_create("pohmelfs_inode_cache",
1985 sizeof(struct pohmelfs_inode),
1986 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
1987 pohmelfs_init_once);
1988 if (!pohmelfs_inode_cache)
1989 return -ENOMEM;
1990
1991 return 0;
1992}
1993
1994static void pohmelfs_destroy_inodecache(void)
1995{
1996 kmem_cache_destroy(pohmelfs_inode_cache);
1997}
1998
1999static int __init init_pohmel_fs(void)
2000{
2001 int err;
2002
2003 err = pohmelfs_config_init();
2004 if (err)
2005 goto err_out_exit;
2006
2007 err = pohmelfs_init_inodecache();
2008 if (err)
2009 goto err_out_config_exit;
2010
2011 err = pohmelfs_mcache_init();
2012 if (err)
2013 goto err_out_destroy;
2014
2015 err = netfs_trans_init();
2016 if (err)
2017 goto err_out_mcache_exit;
2018
2019 err = register_filesystem(&pohmel_fs_type);
2020 if (err)
2021 goto err_out_trans;
2022
2023 return 0;
2024
2025err_out_trans:
2026 netfs_trans_exit();
2027err_out_mcache_exit:
2028 pohmelfs_mcache_exit();
2029err_out_destroy:
2030 pohmelfs_destroy_inodecache();
2031err_out_config_exit:
2032 pohmelfs_config_exit();
2033err_out_exit:
2034 return err;
2035}
2036
2037static void __exit exit_pohmel_fs(void)
2038{
2039 unregister_filesystem(&pohmel_fs_type);
2040 pohmelfs_destroy_inodecache();
2041 pohmelfs_mcache_exit();
2042 pohmelfs_config_exit();
2043 netfs_trans_exit();
2044}
2045
2046module_init(init_pohmel_fs);
2047module_exit(exit_pohmel_fs);
2048
2049MODULE_LICENSE("GPL");
2050MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
2051MODULE_DESCRIPTION("Pohmel filesystem");