Staging: pohmelfs: avoid null dereference
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / pohmelfs / inode.c
CommitLineData
b3f08cad
EP
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/backing-dev.h>
18#include <linux/crypto.h>
19#include <linux/fs.h>
20#include <linux/jhash.h>
21#include <linux/hash.h>
22#include <linux/ktime.h>
23#include <linux/mm.h>
24#include <linux/mount.h>
25#include <linux/pagemap.h>
26#include <linux/pagevec.h>
27#include <linux/parser.h>
28#include <linux/swap.h>
29#include <linux/slab.h>
30#include <linux/statfs.h>
31#include <linux/writeback.h>
32#include <linux/quotaops.h>
33
34#include "netfs.h"
35
36#define POHMELFS_MAGIC_NUM 0x504f482e
37
38static struct kmem_cache *pohmelfs_inode_cache;
39
40/*
41 * Removes inode from all trees, drops local name cache and removes all queued
42 * requests for object removal.
43 */
44void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi)
45{
46 mutex_lock(&pi->offset_lock);
47 pohmelfs_free_names(pi);
48 mutex_unlock(&pi->offset_lock);
49
50 dprintk("%s: deleted stuff in ino: %llu.\n", __func__, pi->ino);
51}
52
53/*
54 * Sync inode to server.
55 * Returns zero in success and negative error value otherwise.
56 * It will gather path to root directory into structures containing
57 * creation mode, permissions and names, so that the whole path
58 * to given inode could be created using only single network command.
59 */
60int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans)
61{
62 struct pohmelfs_inode *pi = POHMELFS_I(inode);
63 int err = -ENOMEM, size;
64 struct netfs_cmd *cmd;
65 void *data;
66 int cur_len = netfs_trans_cur_len(trans);
67
68 if (unlikely(cur_len < 0))
69 return -ETOOSMALL;
70
71 cmd = netfs_trans_current(trans);
72 cur_len -= sizeof(struct netfs_cmd);
73
74 data = (void *)(cmd + 1);
75
76 err = pohmelfs_construct_path_string(pi, data, cur_len);
77 if (err < 0)
78 goto err_out_exit;
79
80 size = err;
81
82 cmd->start = i_size_read(inode);
83 cmd->cmd = NETFS_CREATE;
84 cmd->size = size;
85 cmd->id = pi->ino;
86 cmd->ext = inode->i_mode;
87
88 netfs_convert_cmd(cmd);
89
90 netfs_trans_update(cmd, trans, size);
91
92 return 0;
93
94err_out_exit:
95 printk("%s: completed ino: %llu, err: %d.\n", __func__, pi->ino, err);
96 return err;
97}
98
99static int pohmelfs_write_trans_complete(struct page **pages, unsigned int page_num,
100 void *private, int err)
101{
102 unsigned i;
103
104 dprintk("%s: pages: %lu-%lu, page_num: %u, err: %d.\n",
105 __func__, pages[0]->index, pages[page_num-1]->index,
106 page_num, err);
107
108 for (i = 0; i < page_num; i++) {
109 struct page *page = pages[i];
110
111 if (!page)
112 continue;
113
114 end_page_writeback(page);
115
116 if (err < 0) {
117 SetPageError(page);
118 set_page_dirty(page);
119 }
120
121 unlock_page(page);
122 page_cache_release(page);
123
124 /* dprintk("%s: %3u/%u: page: %p.\n", __func__, i, page_num, page); */
125 }
126 return err;
127}
128
129static int pohmelfs_inode_has_dirty_pages(struct address_space *mapping, pgoff_t index)
130{
131 int ret;
132 struct page *page;
133
134 rcu_read_lock();
135 ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
136 (void **)&page, index, 1, PAGECACHE_TAG_DIRTY);
137 rcu_read_unlock();
138 return ret;
139}
140
141static int pohmelfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
142{
143 struct inode *inode = mapping->host;
144 struct pohmelfs_inode *pi = POHMELFS_I(inode);
145 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
b3f08cad
EP
146 int err = 0;
147 int done = 0;
148 int nr_pages;
149 pgoff_t index;
150 pgoff_t end; /* Inclusive */
151 int scanned = 0;
152 int range_whole = 0;
153
b3f08cad
EP
154 if (wbc->range_cyclic) {
155 index = mapping->writeback_index; /* Start from prev offset */
156 end = -1;
157 } else {
158 index = wbc->range_start >> PAGE_CACHE_SHIFT;
159 end = wbc->range_end >> PAGE_CACHE_SHIFT;
160 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
161 range_whole = 1;
162 scanned = 1;
163 }
164retry:
165 while (!done && (index <= end)) {
166 unsigned int i = min(end - index, (pgoff_t)psb->trans_max_pages);
167 int path_len;
168 struct netfs_trans *trans;
169
170 err = pohmelfs_inode_has_dirty_pages(mapping, index);
171 if (!err)
172 break;
173
174 err = pohmelfs_path_length(pi);
175 if (err < 0)
176 break;
177
178 path_len = err;
179
180 if (path_len <= 2) {
181 err = -ENOENT;
182 break;
183 }
184
185 trans = netfs_trans_alloc(psb, path_len, 0, i);
186 if (!trans) {
187 err = -ENOMEM;
188 break;
189 }
190 trans->complete = &pohmelfs_write_trans_complete;
191
192 trans->page_num = nr_pages = find_get_pages_tag(mapping, &index,
193 PAGECACHE_TAG_DIRTY, trans->page_num,
194 trans->pages);
195
196 dprintk("%s: t: %p, nr_pages: %u, end: %lu, index: %lu, max: %u.\n",
197 __func__, trans, nr_pages, end, index, trans->page_num);
198
199 if (!nr_pages)
200 goto err_out_reset;
201
202 err = pohmelfs_write_inode_create(inode, trans);
203 if (err)
204 goto err_out_reset;
205
206 err = 0;
207 scanned = 1;
208
209 for (i = 0; i < trans->page_num; i++) {
210 struct page *page = trans->pages[i];
211
212 lock_page(page);
213
214 if (unlikely(page->mapping != mapping))
215 goto out_continue;
216
217 if (!wbc->range_cyclic && page->index > end) {
218 done = 1;
219 goto out_continue;
220 }
221
222 if (wbc->sync_mode != WB_SYNC_NONE)
223 wait_on_page_writeback(page);
224
225 if (PageWriteback(page) ||
226 !clear_page_dirty_for_io(page)) {
227 dprintk("%s: not clear for io page: %p, writeback: %d.\n",
228 __func__, page, PageWriteback(page));
229 goto out_continue;
230 }
231
232 set_page_writeback(page);
233
234 trans->attached_size += page_private(page);
235 trans->attached_pages++;
236#if 0
237 dprintk("%s: %u/%u added trans: %p, gen: %u, page: %p, [High: %d], size: %lu, idx: %lu.\n",
238 __func__, i, trans->page_num, trans, trans->gen, page,
239 !!PageHighMem(page), page_private(page), page->index);
240#endif
241 wbc->nr_to_write--;
242
243 if (wbc->nr_to_write <= 0)
244 done = 1;
b3f08cad
EP
245
246 continue;
247out_continue:
248 unlock_page(page);
249 trans->pages[i] = NULL;
250 }
251
252 err = netfs_trans_finish(trans, psb);
253 if (err)
254 break;
255
256 continue;
257
258err_out_reset:
259 trans->result = err;
260 netfs_trans_reset(trans);
261 netfs_trans_put(trans);
262 break;
263 }
264
265 if (!scanned && !done) {
266 /*
267 * We hit the last page and there is more work to be done: wrap
268 * back to the start of the file
269 */
270 scanned = 1;
271 index = 0;
272 goto retry;
273 }
274
275 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
276 mapping->writeback_index = index;
277
278 return err;
279}
280
281/*
282 * Inode writeback creation completion callback.
283 * Only invoked for just created inodes, which do not have pages attached,
284 * like dirs and empty files.
285 */
286static int pohmelfs_write_inode_complete(struct page **pages, unsigned int page_num,
287 void *private, int err)
288{
289 struct inode *inode = private;
290 struct pohmelfs_inode *pi = POHMELFS_I(inode);
291
292 if (inode) {
293 if (err) {
294 mark_inode_dirty(inode);
295 clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
296 } else {
297 set_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
298 }
299
300 pohmelfs_put_inode(pi);
301 }
302
303 return err;
304}
305
306int pohmelfs_write_create_inode(struct pohmelfs_inode *pi)
307{
308 struct netfs_trans *t;
309 struct inode *inode = &pi->vfs_inode;
310 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
311 int err;
312
313 if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
314 return 0;
315
316 dprintk("%s: started ino: %llu.\n", __func__, pi->ino);
317
318 err = pohmelfs_path_length(pi);
319 if (err < 0)
320 goto err_out_exit;
321
322 t = netfs_trans_alloc(psb, err + 1, 0, 0);
323 if (!t) {
324 err = -ENOMEM;
e3c0acf4 325 goto err_out_exit;
b3f08cad
EP
326 }
327 t->complete = pohmelfs_write_inode_complete;
328 t->private = igrab(inode);
329 if (!t->private) {
330 err = -ENOENT;
331 goto err_out_put;
332 }
333
334 err = pohmelfs_write_inode_create(inode, t);
335 if (err)
336 goto err_out_put;
337
338 netfs_trans_finish(t, POHMELFS_SB(inode->i_sb));
339
340 return 0;
341
342err_out_put:
343 t->result = err;
344 netfs_trans_put(t);
345err_out_exit:
346 return err;
347}
348
349/*
350 * Sync all not-yet-created children in given directory to the server.
351 */
352static int pohmelfs_write_inode_create_children(struct inode *inode)
353{
354 struct pohmelfs_inode *parent = POHMELFS_I(inode);
355 struct super_block *sb = inode->i_sb;
356 struct pohmelfs_name *n;
357
358 while (!list_empty(&parent->sync_create_list)) {
359 n = NULL;
360 mutex_lock(&parent->offset_lock);
361 if (!list_empty(&parent->sync_create_list)) {
362 n = list_first_entry(&parent->sync_create_list,
363 struct pohmelfs_name, sync_create_entry);
364 list_del_init(&n->sync_create_entry);
365 }
366 mutex_unlock(&parent->offset_lock);
367
368 if (!n)
369 break;
370
371 inode = ilookup(sb, n->ino);
372
373 dprintk("%s: parent: %llu, ino: %llu, inode: %p.\n",
374 __func__, parent->ino, n->ino, inode);
375
376 if (inode && (inode->i_state & I_DIRTY)) {
377 struct pohmelfs_inode *pi = POHMELFS_I(inode);
378 pohmelfs_write_create_inode(pi);
3bafeab7 379 /* pohmelfs_meta_command(pi, NETFS_INODE_INFO, 0, NULL, NULL, 0); */
b3f08cad
EP
380 iput(inode);
381 }
382 }
383
384 return 0;
385}
386
387/*
388 * Removes given child from given inode on server.
389 */
390int pohmelfs_remove_child(struct pohmelfs_inode *pi, struct pohmelfs_name *n)
391{
392 return pohmelfs_meta_command_data(pi, pi->ino, NETFS_REMOVE, NULL, 0, NULL, NULL, 0);
393}
394
395/*
396 * Writeback for given inode.
397 */
398static int pohmelfs_write_inode(struct inode *inode, int sync)
399{
400 struct pohmelfs_inode *pi = POHMELFS_I(inode);
401
402 pohmelfs_write_create_inode(pi);
403 pohmelfs_write_inode_create_children(inode);
404
405 return 0;
406}
407
408/*
409 * It is not exported, sorry...
410 */
411static inline wait_queue_head_t *page_waitqueue(struct page *page)
412{
413 const struct zone *zone = page_zone(page);
414
415 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
416}
417
418static int pohmelfs_wait_on_page_locked(struct page *page)
419{
420 struct pohmelfs_sb *psb = POHMELFS_SB(page->mapping->host->i_sb);
421 long ret = psb->wait_on_page_timeout;
422 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
423 int err = 0;
424
425 if (!PageLocked(page))
426 return 0;
427
428 for (;;) {
429 prepare_to_wait(page_waitqueue(page),
430 &wait.wait, TASK_INTERRUPTIBLE);
431
432 dprintk("%s: page: %p, locked: %d, uptodate: %d, error: %d, flags: %lx.\n",
433 __func__, page, PageLocked(page), PageUptodate(page),
434 PageError(page), page->flags);
435
436 if (!PageLocked(page))
437 break;
438
439 if (!signal_pending(current)) {
440 ret = schedule_timeout(ret);
441 if (!ret)
442 break;
443 continue;
444 }
445 ret = -ERESTARTSYS;
446 break;
447 }
448 finish_wait(page_waitqueue(page), &wait.wait);
449
450 if (!ret)
451 err = -ETIMEDOUT;
452
453
454 if (!err)
455 SetPageUptodate(page);
456
457 if (err)
458 printk("%s: page: %p, uptodate: %d, locked: %d, err: %d.\n",
459 __func__, page, PageUptodate(page), PageLocked(page), err);
460
461 return err;
462}
463
464static int pohmelfs_read_page_complete(struct page **pages, unsigned int page_num,
465 void *private, int err)
466{
467 struct page *page = private;
468
469 if (PageChecked(page))
470 return err;
471
472 if (err < 0) {
473 dprintk("%s: page: %p, err: %d.\n", __func__, page, err);
474 SetPageError(page);
475 }
476
477 unlock_page(page);
478
479 return err;
480}
481
482/*
483 * Read a page from remote server.
484 * Function will wait until page is unlocked.
485 */
486static int pohmelfs_readpage(struct file *file, struct page *page)
487{
488 struct inode *inode = page->mapping->host;
489 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
490 struct pohmelfs_inode *pi = POHMELFS_I(inode);
491 struct netfs_trans *t;
492 struct netfs_cmd *cmd;
493 int err, path_len;
494 void *data;
495 u64 isize;
496
497 err = pohmelfs_data_lock(pi, page->index << PAGE_CACHE_SHIFT,
498 PAGE_SIZE, POHMELFS_READ_LOCK);
499 if (err)
500 goto err_out_exit;
501
502 isize = i_size_read(inode);
503 if (isize <= page->index << PAGE_CACHE_SHIFT) {
504 SetPageUptodate(page);
505 unlock_page(page);
506 return 0;
507 }
508
509 path_len = pohmelfs_path_length(pi);
510 if (path_len < 0) {
511 err = path_len;
512 goto err_out_exit;
513 }
514
515 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
516 if (!t) {
517 err = -ENOMEM;
518 goto err_out_exit;
519 }
520
521 t->complete = pohmelfs_read_page_complete;
522 t->private = page;
523
524 cmd = netfs_trans_current(t);
525 data = (void *)(cmd + 1);
526
527 err = pohmelfs_construct_path_string(pi, data, path_len);
528 if (err < 0)
529 goto err_out_free;
530
531 path_len = err;
532
533 cmd->id = pi->ino;
534 cmd->start = page->index;
535 cmd->start <<= PAGE_CACHE_SHIFT;
536 cmd->size = PAGE_CACHE_SIZE + path_len;
537 cmd->cmd = NETFS_READ_PAGE;
538 cmd->ext = path_len;
539
540 dprintk("%s: path: '%s', page: %p, ino: %llu, start: %llu, size: %lu.\n",
541 __func__, (char *)data, page, pi->ino, cmd->start, PAGE_CACHE_SIZE);
542
543 netfs_convert_cmd(cmd);
544 netfs_trans_update(cmd, t, path_len);
545
546 err = netfs_trans_finish(t, psb);
547 if (err)
548 goto err_out_return;
549
550 return pohmelfs_wait_on_page_locked(page);
551
552err_out_free:
553 t->result = err;
554 netfs_trans_put(t);
555err_out_exit:
556 SetPageError(page);
557 if (PageLocked(page))
558 unlock_page(page);
559err_out_return:
560 printk("%s: page: %p, start: %lu, size: %lu, err: %d.\n",
561 __func__, page, page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, err);
562
563 return err;
564}
565
566/*
567 * Write begin/end magic.
568 * Allocates a page and writes inode if it was not synced to server before.
569 */
570static int pohmelfs_write_begin(struct file *file, struct address_space *mapping,
571 loff_t pos, unsigned len, unsigned flags,
572 struct page **pagep, void **fsdata)
573{
574 struct inode *inode = mapping->host;
575 struct page *page;
576 pgoff_t index;
577 unsigned start, end;
578 int err;
579
580 *pagep = NULL;
581
582 index = pos >> PAGE_CACHE_SHIFT;
583 start = pos & (PAGE_CACHE_SIZE - 1);
584 end = start + len;
585
586 page = grab_cache_page(mapping, index);
587#if 0
588 dprintk("%s: page: %p pos: %llu, len: %u, index: %lu, start: %u, end: %u, uptodate: %d.\n",
589 __func__, page, pos, len, index, start, end, PageUptodate(page));
590#endif
591 if (!page) {
592 err = -ENOMEM;
593 goto err_out_exit;
594 }
595
596 while (!PageUptodate(page)) {
597 if (start && test_bit(NETFS_INODE_REMOTE_SYNCED, &POHMELFS_I(inode)->state)) {
598 err = pohmelfs_readpage(file, page);
599 if (err)
600 goto err_out_exit;
601
602 lock_page(page);
603 continue;
604 }
605
606 if (len != PAGE_CACHE_SIZE) {
607 void *kaddr = kmap_atomic(page, KM_USER0);
608
609 memset(kaddr + start, 0, PAGE_CACHE_SIZE - start);
610 flush_dcache_page(page);
611 kunmap_atomic(kaddr, KM_USER0);
612 }
613 SetPageUptodate(page);
614 }
615
616 set_page_private(page, end);
617
618 *pagep = page;
619
620 return 0;
621
622err_out_exit:
623 page_cache_release(page);
624 *pagep = NULL;
625
626 return err;
627}
628
629static int pohmelfs_write_end(struct file *file, struct address_space *mapping,
630 loff_t pos, unsigned len, unsigned copied,
631 struct page *page, void *fsdata)
632{
633 struct inode *inode = mapping->host;
634
635 if (copied != len) {
636 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
637 void *kaddr = kmap_atomic(page, KM_USER0);
638
639 memset(kaddr + from + copied, 0, len - copied);
640 flush_dcache_page(page);
641 kunmap_atomic(kaddr, KM_USER0);
642 }
643
644 SetPageUptodate(page);
645 set_page_dirty(page);
646#if 0
647 dprintk("%s: page: %p [U: %d, D: %d, L: %d], pos: %llu, len: %u, copied: %u.\n",
648 __func__, page,
649 PageUptodate(page), PageDirty(page), PageLocked(page),
650 pos, len, copied);
651#endif
652 flush_dcache_page(page);
653
654 unlock_page(page);
655 page_cache_release(page);
656
657 if (pos + copied > inode->i_size) {
658 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
659
660 psb->avail_size -= pos + copied - inode->i_size;
661
662 i_size_write(inode, pos + copied);
663 }
664
665 return copied;
666}
667
668static int pohmelfs_readpages_trans_complete(struct page **__pages, unsigned int page_num,
669 void *private, int err)
670{
671 struct pohmelfs_inode *pi = private;
672 unsigned int i, num;
673 struct page **pages, *page = (struct page *)__pages;
674 loff_t index = page->index;
675
676 pages = kzalloc(sizeof(void *) * page_num, GFP_NOIO);
677 if (!pages)
678 return -ENOMEM;
679
680 num = find_get_pages_contig(pi->vfs_inode.i_mapping, index, page_num, pages);
681 if (num <= 0) {
682 err = num;
683 goto err_out_free;
684 }
685
686 for (i=0; i<num; ++i) {
687 page = pages[i];
688
689 if (err)
690 printk("%s: %u/%u: page: %p, index: %lu, uptodate: %d, locked: %d, err: %d.\n",
691 __func__, i, num, page, page->index,
692 PageUptodate(page), PageLocked(page), err);
693
694 if (!PageChecked(page)) {
695 if (err < 0)
696 SetPageError(page);
697 unlock_page(page);
698 }
699 page_cache_release(page);
700 page_cache_release(page);
701 }
702
703err_out_free:
704 kfree(pages);
705 return err;
706}
707
708static int pohmelfs_send_readpages(struct pohmelfs_inode *pi, struct page *first, unsigned int num)
709{
710 struct netfs_trans *t;
711 struct netfs_cmd *cmd;
712 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
713 int err, path_len;
714 void *data;
715
716 err = pohmelfs_data_lock(pi, first->index << PAGE_CACHE_SHIFT,
717 num * PAGE_SIZE, POHMELFS_READ_LOCK);
718 if (err)
719 goto err_out_exit;
720
721 path_len = pohmelfs_path_length(pi);
722 if (path_len < 0) {
723 err = path_len;
724 goto err_out_exit;
725 }
726
727 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
728 if (!t) {
729 err = -ENOMEM;
730 goto err_out_exit;
731 }
732
733 cmd = netfs_trans_current(t);
734 data = (void *)(cmd + 1);
735
736 t->complete = pohmelfs_readpages_trans_complete;
737 t->private = pi;
738 t->page_num = num;
739 t->pages = (struct page **)first;
740
741 err = pohmelfs_construct_path_string(pi, data, path_len);
742 if (err < 0)
743 goto err_out_put;
744
745 path_len = err;
746
747 cmd->cmd = NETFS_READ_PAGES;
748 cmd->start = first->index;
749 cmd->start <<= PAGE_CACHE_SHIFT;
750 cmd->size = (num << 8 | PAGE_CACHE_SHIFT);
751 cmd->id = pi->ino;
752 cmd->ext = path_len;
753
754 dprintk("%s: t: %p, gen: %u, path: '%s', path_len: %u, "
755 "start: %lu, num: %u.\n",
756 __func__, t, t->gen, (char *)data, path_len,
757 first->index, num);
758
759 netfs_convert_cmd(cmd);
760 netfs_trans_update(cmd, t, path_len);
761
762 return netfs_trans_finish(t, psb);
763
764err_out_put:
765 netfs_trans_free(t);
766err_out_exit:
767 pohmelfs_readpages_trans_complete((struct page **)first, num, pi, err);
768 return err;
769}
770
771#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
772
773static int pohmelfs_readpages(struct file *file, struct address_space *mapping,
774 struct list_head *pages, unsigned nr_pages)
775{
776 unsigned int page_idx, num = 0;
777 struct page *page = NULL, *first = NULL;
778
779 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
780 page = list_to_page(pages);
781
782 prefetchw(&page->flags);
783 list_del(&page->lru);
784
785 if (!add_to_page_cache_lru(page, mapping,
786 page->index, GFP_KERNEL)) {
787
788 if (!num) {
789 num = 1;
790 first = page;
791 continue;
792 }
793
794 dprintk("%s: added to lru page: %p, page_index: %lu, first_index: %lu.\n",
795 __func__, page, page->index, first->index);
796
797 if (unlikely(first->index + num != page->index) || (num > 500)) {
798 pohmelfs_send_readpages(POHMELFS_I(mapping->host),
799 first, num);
800 first = page;
801 num = 0;
802 }
803
804 num++;
805 }
806 }
807 pohmelfs_send_readpages(POHMELFS_I(mapping->host), first, num);
808
809 /*
810 * This will be sync read, so when last page is processed,
811 * all previous are alerady unlocked and ready to be used.
812 */
813 return 0;
814}
815
816/*
817 * Small addres space operations for POHMELFS.
818 */
819const struct address_space_operations pohmelfs_aops = {
820 .readpage = pohmelfs_readpage,
821 .readpages = pohmelfs_readpages,
822 .writepages = pohmelfs_writepages,
823 .write_begin = pohmelfs_write_begin,
824 .write_end = pohmelfs_write_end,
825 .set_page_dirty = __set_page_dirty_nobuffers,
826};
827
828/*
829 * ->detroy_inode() callback. Deletes inode from the caches
830 * and frees private data.
831 */
832static void pohmelfs_destroy_inode(struct inode *inode)
833{
834 struct super_block *sb = inode->i_sb;
835 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
836 struct pohmelfs_inode *pi = POHMELFS_I(inode);
837
3bafeab7 838 /* pohmelfs_data_unlock(pi, 0, inode->i_size, POHMELFS_READ_LOCK); */
b3f08cad
EP
839
840 pohmelfs_inode_del_inode(psb, pi);
841
842 dprintk("%s: pi: %p, inode: %p, ino: %llu.\n",
843 __func__, pi, &pi->vfs_inode, pi->ino);
844 kmem_cache_free(pohmelfs_inode_cache, pi);
845 atomic_long_dec(&psb->total_inodes);
846}
847
848/*
849 * ->alloc_inode() callback. Allocates inode and initilizes private data.
850 */
851static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
852{
853 struct pohmelfs_inode *pi;
854
855 pi = kmem_cache_alloc(pohmelfs_inode_cache, GFP_NOIO);
856 if (!pi)
857 return NULL;
858
859 pi->hash_root = RB_ROOT;
860 mutex_init(&pi->offset_lock);
861
862 INIT_LIST_HEAD(&pi->sync_create_list);
863
864 INIT_LIST_HEAD(&pi->inode_entry);
865
866 pi->lock_type = 0;
867 pi->state = 0;
868 pi->total_len = 0;
869 pi->drop_count = 0;
870
871 dprintk("%s: pi: %p, inode: %p.\n", __func__, pi, &pi->vfs_inode);
872
873 atomic_long_inc(&POHMELFS_SB(sb)->total_inodes);
874
875 return &pi->vfs_inode;
876}
877
878/*
879 * We want fsync() to work on POHMELFS.
880 */
881static int pohmelfs_fsync(struct file *file, struct dentry *dentry, int datasync)
882{
883 struct inode *inode = file->f_mapping->host;
884 struct writeback_control wbc = {
885 .sync_mode = WB_SYNC_ALL,
886 .nr_to_write = 0, /* sys_fsync did this */
887 };
888
889 return sync_inode(inode, &wbc);
890}
891
892ssize_t pohmelfs_write(struct file *file, const char __user *buf,
893 size_t len, loff_t *ppos)
894{
895 struct address_space *mapping = file->f_mapping;
896 struct inode *inode = mapping->host;
897 struct pohmelfs_inode *pi = POHMELFS_I(inode);
898 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
899 struct kiocb kiocb;
900 ssize_t ret;
901 loff_t pos = *ppos;
902
903 init_sync_kiocb(&kiocb, file);
904 kiocb.ki_pos = pos;
905 kiocb.ki_left = len;
906
02d84ca5 907 dprintk("%s: len: %zu, pos: %llu.\n", __func__, len, pos);
b3f08cad
EP
908
909 mutex_lock(&inode->i_mutex);
910 ret = pohmelfs_data_lock(pi, pos, len, POHMELFS_WRITE_LOCK);
911 if (ret)
912 goto err_out_unlock;
913
b04f9321 914 ret = __generic_file_aio_write(&kiocb, &iov, 1, &kiocb.ki_pos);
b3f08cad
EP
915 *ppos = kiocb.ki_pos;
916
917 mutex_unlock(&inode->i_mutex);
918 WARN_ON(ret < 0);
919
aa3caafe 920 if (ret > 0) {
b3f08cad
EP
921 ssize_t err;
922
aa3caafe 923 err = generic_write_sync(file, pos, ret);
b3f08cad
EP
924 if (err < 0)
925 ret = err;
926 WARN_ON(ret < 0);
927 }
928
929 return ret;
930
931err_out_unlock:
932 mutex_unlock(&inode->i_mutex);
933 return ret;
934}
935
80fe3e5d 936static const struct file_operations pohmelfs_file_ops = {
b3f08cad
EP
937 .open = generic_file_open,
938 .fsync = pohmelfs_fsync,
939
940 .llseek = generic_file_llseek,
941
942 .read = do_sync_read,
943 .aio_read = generic_file_aio_read,
944
945 .mmap = generic_file_mmap,
946
947 .splice_read = generic_file_splice_read,
948 .splice_write = generic_file_splice_write,
949
950 .write = pohmelfs_write,
951 .aio_write = generic_file_aio_write,
952};
953
954const struct inode_operations pohmelfs_symlink_inode_operations = {
955 .readlink = generic_readlink,
956 .follow_link = page_follow_link_light,
957 .put_link = page_put_link,
958};
959
960int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
961{
962 int err;
963
964 err = inode_change_ok(inode, attr);
965 if (err) {
966 dprintk("%s: ino: %llu, inode changes are not allowed.\n", __func__, POHMELFS_I(inode)->ino);
967 goto err_out_exit;
968 }
969
970 if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
971 (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
de459f26 972 err = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
b3f08cad
EP
973 if (err)
974 goto err_out_exit;
975 }
976
977 err = inode_setattr(inode, attr);
978 if (err) {
979 dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
980 goto err_out_exit;
981 }
982
983 dprintk("%s: ino: %llu, mode: %o -> %o, uid: %u -> %u, gid: %u -> %u, size: %llu -> %llu.\n",
984 __func__, POHMELFS_I(inode)->ino, inode->i_mode, attr->ia_mode,
985 inode->i_uid, attr->ia_uid, inode->i_gid, attr->ia_gid, inode->i_size, attr->ia_size);
986
987 return 0;
988
989err_out_exit:
990 return err;
991}
992
993int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr)
994{
995 struct inode *inode = dentry->d_inode;
996 struct pohmelfs_inode *pi = POHMELFS_I(inode);
997 int err;
998
999 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
1000 if (err)
1001 goto err_out_exit;
1002
1003 err = security_inode_setattr(dentry, attr);
1004 if (err)
1005 goto err_out_exit;
1006
1007 err = pohmelfs_setattr_raw(inode, attr);
1008 if (err)
1009 goto err_out_exit;
1010
1011 return 0;
1012
1013err_out_exit:
1014 return err;
1015}
1016
1017static int pohmelfs_send_xattr_req(struct pohmelfs_inode *pi, u64 id, u64 start,
1018 const char *name, const void *value, size_t attrsize, int command)
1019{
1020 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
1021 int err, path_len, namelen = strlen(name) + 1; /* 0-byte */
1022 struct netfs_trans *t;
1023 struct netfs_cmd *cmd;
1024 void *data;
1025
02d84ca5 1026 dprintk("%s: id: %llu, start: %llu, name: '%s', attrsize: %zu, cmd: %d.\n",
b3f08cad
EP
1027 __func__, id, start, name, attrsize, command);
1028
1029 path_len = pohmelfs_path_length(pi);
1030 if (path_len < 0) {
1031 err = path_len;
1032 goto err_out_exit;
1033 }
1034
1035 t = netfs_trans_alloc(psb, namelen + path_len + attrsize, 0, 0);
1036 if (!t) {
1037 err = -ENOMEM;
1038 goto err_out_exit;
1039 }
1040
1041 cmd = netfs_trans_current(t);
1042 data = cmd + 1;
1043
1044 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1045 if (path_len < 0) {
1046 err = path_len;
1047 goto err_out_put;
1048 }
1049 data += path_len;
1050
1051 /*
1052 * 'name' is a NUL-terminated string already and
1053 * 'namelen' includes 0-byte.
1054 */
1055 memcpy(data, name, namelen);
1056 data += namelen;
1057
1058 memcpy(data, value, attrsize);
1059
1060 cmd->cmd = command;
1061 cmd->id = id;
1062 cmd->start = start;
1063 cmd->size = attrsize + namelen + path_len;
1064 cmd->ext = path_len;
1065 cmd->csize = 0;
1066 cmd->cpad = 0;
1067
1068 netfs_convert_cmd(cmd);
1069 netfs_trans_update(cmd, t, namelen + path_len + attrsize);
1070
1071 return netfs_trans_finish(t, psb);
1072
1073err_out_put:
1074 t->result = err;
1075 netfs_trans_put(t);
1076err_out_exit:
1077 return err;
1078}
1079
1080static int pohmelfs_setxattr(struct dentry *dentry, const char *name,
1081 const void *value, size_t attrsize, int flags)
1082{
1083 struct inode *inode = dentry->d_inode;
1084 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1085 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1086
1087 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1088 return -EOPNOTSUPP;
1089
1090 return pohmelfs_send_xattr_req(pi, flags, attrsize, name,
1091 value, attrsize, NETFS_XATTR_SET);
1092}
1093
1094static ssize_t pohmelfs_getxattr(struct dentry *dentry, const char *name,
1095 void *value, size_t attrsize)
1096{
1097 struct inode *inode = dentry->d_inode;
1098 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1099 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1100 struct pohmelfs_mcache *m;
1101 int err;
1102 long timeout = psb->mcache_timeout;
1103
1104 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1105 return -EOPNOTSUPP;
1106
1107 m = pohmelfs_mcache_alloc(psb, 0, attrsize, value);
1108 if (IS_ERR(m))
1109 return PTR_ERR(m);
1110
1111 dprintk("%s: ino: %llu, name: '%s', size: %zu.\n",
1112 __func__, pi->ino, name, attrsize);
1113
1114 err = pohmelfs_send_xattr_req(pi, m->gen, attrsize, name, value, 0, NETFS_XATTR_GET);
1115 if (err)
1116 goto err_out_put;
1117
1118 do {
1119 err = wait_for_completion_timeout(&m->complete, timeout);
1120 if (err) {
1121 err = m->err;
1122 break;
1123 }
1124
1125 /*
1126 * This loop is a bit ugly, since it waits until reference counter
1127 * hits 1 and then put object here. Main goal is to prevent race with
1128 * network thread, when it can start processing given request, i.e.
1129 * increase its reference counter but yet not complete it, while
1130 * we will exit from ->getxattr() with timeout, and although request
1131 * will not be freed (its reference counter was increased by network
1132 * thread), data pointer provided by user may be released, so we will
1133 * overwrite already freed area in network thread.
1134 *
1135 * Now after timeout we remove request from the cache, so it can not be
1136 * found by network thread, and wait for its reference counter to hit 1,
1137 * i.e. if network thread already started to process this request, we wait
1138 * it to finish, and then free object locally. If reference counter is
1139 * already 1, i.e. request is not used by anyone else, we can free it without
1140 * problem.
1141 */
1142 err = -ETIMEDOUT;
1143 timeout = HZ;
1144
1145 pohmelfs_mcache_remove_locked(psb, m);
1146 } while (atomic_read(&m->refcnt) != 1);
1147
1148 pohmelfs_mcache_put(psb, m);
1149
1150 dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err);
1151
1152 return err;
1153
1154err_out_put:
1155 pohmelfs_mcache_put(psb, m);
1156 return err;
1157}
1158
1159static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1160{
1161 struct inode *inode = dentry->d_inode;
e5043424 1162#if 0
b3f08cad
EP
1163 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1164 int err;
e5043424 1165
b3f08cad
EP
1166 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
1167 if (err)
1168 return err;
b3f08cad
EP
1169 dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n",
1170 __func__, pi->ino, inode->i_mode, inode->i_uid,
1171 inode->i_gid, inode->i_size);
e5043424 1172#endif
b3f08cad
EP
1173
1174 generic_fillattr(inode, stat);
1175 return 0;
1176}
1177
1178const struct inode_operations pohmelfs_file_inode_operations = {
1179 .setattr = pohmelfs_setattr,
1180 .getattr = pohmelfs_getattr,
1181 .setxattr = pohmelfs_setxattr,
1182 .getxattr = pohmelfs_getxattr,
1183};
1184
1185/*
1186 * Fill inode data: mode, size, operation callbacks and so on...
1187 */
1188void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info)
1189{
1190 inode->i_mode = info->mode;
1191 inode->i_nlink = info->nlink;
1192 inode->i_uid = info->uid;
1193 inode->i_gid = info->gid;
1194 inode->i_blocks = info->blocks;
1195 inode->i_rdev = info->rdev;
1196 inode->i_size = info->size;
1197 inode->i_version = info->version;
1198 inode->i_blkbits = ffs(info->blocksize);
1199
1200 dprintk("%s: inode: %p, num: %lu/%llu inode is regular: %d, dir: %d, link: %d, mode: %o, size: %llu.\n",
1201 __func__, inode, inode->i_ino, info->ino,
1202 S_ISREG(inode->i_mode), S_ISDIR(inode->i_mode),
1203 S_ISLNK(inode->i_mode), inode->i_mode, inode->i_size);
1204
1205 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
1206
1207 /*
1208 * i_mapping is a pointer to i_data during inode initialization.
1209 */
1210 inode->i_data.a_ops = &pohmelfs_aops;
1211
1212 if (S_ISREG(inode->i_mode)) {
1213 inode->i_fop = &pohmelfs_file_ops;
1214 inode->i_op = &pohmelfs_file_inode_operations;
1215 } else if (S_ISDIR(inode->i_mode)) {
1216 inode->i_fop = &pohmelfs_dir_fops;
1217 inode->i_op = &pohmelfs_dir_inode_ops;
1218 } else if (S_ISLNK(inode->i_mode)) {
1219 inode->i_op = &pohmelfs_symlink_inode_operations;
1220 inode->i_fop = &pohmelfs_file_ops;
1221 } else {
1222 inode->i_fop = &generic_ro_fops;
1223 }
1224}
1225
1226static void pohmelfs_drop_inode(struct inode *inode)
1227{
1228 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1229 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1230
1231 spin_lock(&psb->ino_lock);
1232 list_del_init(&pi->inode_entry);
1233 spin_unlock(&psb->ino_lock);
1234
1235 generic_drop_inode(inode);
1236}
1237
1238static struct pohmelfs_inode *pohmelfs_get_inode_from_list(struct pohmelfs_sb *psb,
1239 struct list_head *head, unsigned int *count)
1240{
1241 struct pohmelfs_inode *pi = NULL;
1242
1243 spin_lock(&psb->ino_lock);
1244 if (!list_empty(head)) {
1245 pi = list_entry(head->next, struct pohmelfs_inode,
1246 inode_entry);
1247 list_del_init(&pi->inode_entry);
1248 *count = pi->drop_count;
1249 pi->drop_count = 0;
1250 }
1251 spin_unlock(&psb->ino_lock);
1252
1253 return pi;
1254}
1255
1256static void pohmelfs_flush_transactions(struct pohmelfs_sb *psb)
1257{
1258 struct pohmelfs_config *c;
1259
1260 mutex_lock(&psb->state_lock);
1261 list_for_each_entry(c, &psb->state_list, config_entry) {
1262 pohmelfs_state_flush_transactions(&c->state);
1263 }
1264 mutex_unlock(&psb->state_lock);
1265}
1266
1267/*
1268 * ->put_super() callback. Invoked before superblock is destroyed,
1269 * so it has to clean all private data.
1270 */
1271static void pohmelfs_put_super(struct super_block *sb)
1272{
1273 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1274 struct pohmelfs_inode *pi;
1275 unsigned int count;
1276 unsigned int in_drop_list = 0;
1277 struct inode *inode, *tmp;
1278
1279 dprintk("%s.\n", __func__);
1280
1281 /*
1282 * Kill pending transactions, which could affect inodes in-flight.
1283 */
1284 pohmelfs_flush_transactions(psb);
1285
1286 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count))) {
1287 inode = &pi->vfs_inode;
1288
1289 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1290 __func__, pi->ino, pi, inode, count);
1291
1292 if (atomic_read(&inode->i_count) != count) {
1293 printk("%s: ino: %llu, pi: %p, inode: %p, count: %u, i_count: %d.\n",
1294 __func__, pi->ino, pi, inode, count,
1295 atomic_read(&inode->i_count));
1296 count = atomic_read(&inode->i_count);
1297 in_drop_list++;
1298 }
1299
1300 while (count--)
1301 iput(&pi->vfs_inode);
1302 }
1303
1304 list_for_each_entry_safe(inode, tmp, &sb->s_inodes, i_sb_list) {
1305 pi = POHMELFS_I(inode);
1306
1307 dprintk("%s: ino: %llu, pi: %p, inode: %p, i_count: %u.\n",
1308 __func__, pi->ino, pi, inode, atomic_read(&inode->i_count));
1309
1310 /*
1311 * These are special inodes, they were created during
1312 * directory reading or lookup, and were not bound to dentry,
1313 * so they live here with reference counter being 1 and prevent
1314 * umount from succeed since it believes that they are busy.
1315 */
1316 count = atomic_read(&inode->i_count);
1317 if (count) {
1318 list_del_init(&inode->i_sb_list);
1319 while (count--)
1320 iput(&pi->vfs_inode);
1321 }
1322 }
1323
1324 psb->trans_scan_timeout = psb->drop_scan_timeout = 0;
1325 cancel_rearming_delayed_work(&psb->dwork);
1326 cancel_rearming_delayed_work(&psb->drop_dwork);
1327 flush_scheduled_work();
1328
1329 dprintk("%s: stopped workqueues.\n", __func__);
1330
1331 pohmelfs_crypto_exit(psb);
1332 pohmelfs_state_exit(psb);
1333
1334 kfree(psb);
1335 sb->s_fs_info = NULL;
b3f08cad
EP
1336}
1337
b3f08cad
EP
1338static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1339{
1340 struct super_block *sb = dentry->d_sb;
1341 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1342
1343 /*
1344 * There are no filesystem size limits yet.
1345 */
1346 memset(buf, 0, sizeof(struct kstatfs));
1347
1348 buf->f_type = POHMELFS_MAGIC_NUM; /* 'POH.' */
1349 buf->f_bsize = sb->s_blocksize;
1350 buf->f_files = psb->ino;
1351 buf->f_namelen = 255;
1352 buf->f_files = atomic_long_read(&psb->total_inodes);
1353 buf->f_bfree = buf->f_bavail = psb->avail_size >> PAGE_SHIFT;
1354 buf->f_blocks = psb->total_size >> PAGE_SHIFT;
1355
1356 dprintk("%s: total: %llu, avail: %llu, inodes: %llu, bsize: %lu.\n",
1357 __func__, psb->total_size, psb->avail_size, buf->f_files, sb->s_blocksize);
1358
1359 return 0;
1360}
1361
1362static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
1363{
1364 struct pohmelfs_sb *psb = POHMELFS_SB(vfs->mnt_sb);
1365
1366 seq_printf(seq, ",idx=%u", psb->idx);
1367 seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout));
1368 seq_printf(seq, ",drop_scan_timeout=%u", jiffies_to_msecs(psb->drop_scan_timeout));
1369 seq_printf(seq, ",wait_on_page_timeout=%u", jiffies_to_msecs(psb->wait_on_page_timeout));
1370 seq_printf(seq, ",trans_retries=%u", psb->trans_retries);
1371 seq_printf(seq, ",crypto_thread_num=%u", psb->crypto_thread_num);
1372 seq_printf(seq, ",trans_max_pages=%u", psb->trans_max_pages);
1373 seq_printf(seq, ",mcache_timeout=%u", jiffies_to_msecs(psb->mcache_timeout));
1374 if (psb->crypto_fail_unsupported)
1375 seq_printf(seq, ",crypto_fail_unsupported");
1376
1377 return 0;
1378}
1379
b3f08cad
EP
1380enum {
1381 pohmelfs_opt_idx,
e5043424
EP
1382 pohmelfs_opt_crypto_thread_num,
1383 pohmelfs_opt_trans_max_pages,
1384 pohmelfs_opt_crypto_fail_unsupported,
1385
1386 /* Remountable options */
b3f08cad
EP
1387 pohmelfs_opt_trans_scan_timeout,
1388 pohmelfs_opt_drop_scan_timeout,
1389 pohmelfs_opt_wait_on_page_timeout,
1390 pohmelfs_opt_trans_retries,
b3f08cad
EP
1391 pohmelfs_opt_mcache_timeout,
1392};
1393
1394static struct match_token pohmelfs_tokens[] = {
1395 {pohmelfs_opt_idx, "idx=%u"},
e5043424
EP
1396 {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"},
1397 {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"},
1398 {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"},
b3f08cad
EP
1399 {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"},
1400 {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"},
1401 {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"},
1402 {pohmelfs_opt_trans_retries, "trans_retries=%u"},
b3f08cad
EP
1403 {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"},
1404};
1405
e5043424 1406static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount)
b3f08cad
EP
1407{
1408 char *p;
1409 substring_t args[MAX_OPT_ARGS];
1410 int option, err;
1411
1412 if (!options)
1413 return 0;
1414
1415 while ((p = strsep(&options, ",")) != NULL) {
1416 int token;
1417 if (!*p)
1418 continue;
1419
1420 token = match_token(p, pohmelfs_tokens, args);
1421
1422 err = match_int(&args[0], &option);
1423 if (err)
1424 return err;
1425
e5043424
EP
1426 if (remount && token <= pohmelfs_opt_crypto_fail_unsupported)
1427 continue;
1428
b3f08cad
EP
1429 switch (token) {
1430 case pohmelfs_opt_idx:
1431 psb->idx = option;
1432 break;
1433 case pohmelfs_opt_trans_scan_timeout:
1434 psb->trans_scan_timeout = msecs_to_jiffies(option);
1435 break;
1436 case pohmelfs_opt_drop_scan_timeout:
1437 psb->drop_scan_timeout = msecs_to_jiffies(option);
1438 break;
1439 case pohmelfs_opt_wait_on_page_timeout:
1440 psb->wait_on_page_timeout = msecs_to_jiffies(option);
1441 break;
1442 case pohmelfs_opt_mcache_timeout:
1443 psb->mcache_timeout = msecs_to_jiffies(option);
1444 break;
1445 case pohmelfs_opt_trans_retries:
1446 psb->trans_retries = option;
1447 break;
1448 case pohmelfs_opt_crypto_thread_num:
1449 psb->crypto_thread_num = option;
1450 break;
1451 case pohmelfs_opt_trans_max_pages:
1452 psb->trans_max_pages = option;
1453 break;
1454 case pohmelfs_opt_crypto_fail_unsupported:
1455 psb->crypto_fail_unsupported = 1;
1456 break;
1457 default:
1458 return -EINVAL;
1459 }
1460 }
1461
1462 return 0;
1463}
1464
e5043424
EP
1465static int pohmelfs_remount(struct super_block *sb, int *flags, char *data)
1466{
1467 int err;
1468 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1469 unsigned long old_sb_flags = sb->s_flags;
1470
1471 err = pohmelfs_parse_options(data, psb, 1);
1472 if (err)
1473 goto err_out_restore;
1474
1475 if (!(*flags & MS_RDONLY))
1476 sb->s_flags &= ~MS_RDONLY;
1477 return 0;
1478
1479err_out_restore:
1480 sb->s_flags = old_sb_flags;
1481 return err;
1482}
1483
b3f08cad
EP
1484static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count)
1485{
1486 struct inode *inode = &pi->vfs_inode;
1487
1488 dprintk("%s: %p: ino: %llu, owned: %d.\n",
1489 __func__, inode, pi->ino, test_bit(NETFS_INODE_OWNED, &pi->state));
1490
1491 mutex_lock(&inode->i_mutex);
1492 if (test_and_clear_bit(NETFS_INODE_OWNED, &pi->state)) {
1493 filemap_fdatawrite(inode->i_mapping);
1494 inode->i_sb->s_op->write_inode(inode, 0);
1495 }
1496
2d7cf8ef 1497#ifdef POHMELFS_TRUNCATE_ON_INODE_FLUSH
b3f08cad 1498 truncate_inode_pages(inode->i_mapping, 0);
2d7cf8ef 1499#endif
b3f08cad
EP
1500
1501 pohmelfs_data_unlock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
1502 mutex_unlock(&inode->i_mutex);
1503}
1504
1505static void pohmelfs_put_inode_count(struct pohmelfs_inode *pi, unsigned int count)
1506{
1507 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1508 __func__, pi->ino, pi, &pi->vfs_inode, count);
1509
1510 if (test_and_clear_bit(NETFS_INODE_NEED_FLUSH, &pi->state))
1511 pohmelfs_flush_inode(pi, count);
1512
1513 while (count--)
1514 iput(&pi->vfs_inode);
1515}
1516
1517static void pohmelfs_drop_scan(struct work_struct *work)
1518{
1519 struct pohmelfs_sb *psb =
1520 container_of(work, struct pohmelfs_sb, drop_dwork.work);
1521 struct pohmelfs_inode *pi;
1522 unsigned int count = 0;
1523
dc828461 1524 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count)))
b3f08cad 1525 pohmelfs_put_inode_count(pi, count);
dc828461 1526
b3f08cad
EP
1527 pohmelfs_check_states(psb);
1528
1529 if (psb->drop_scan_timeout)
1530 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1531}
1532
1533/*
1534 * Run through all transactions starting from the oldest,
1535 * drop transaction from current state and try to send it
1536 * to all remote nodes, which are currently installed.
1537 */
1538static void pohmelfs_trans_scan_state(struct netfs_state *st)
1539{
1540 struct rb_node *rb_node;
1541 struct netfs_trans_dst *dst;
1542 struct pohmelfs_sb *psb = st->psb;
1543 unsigned int timeout = psb->trans_scan_timeout;
1544 struct netfs_trans *t;
1545 int err;
1546
1547 mutex_lock(&st->trans_lock);
1548 for (rb_node = rb_first(&st->trans_root); rb_node; ) {
1549 dst = rb_entry(rb_node, struct netfs_trans_dst, state_entry);
1550 t = dst->trans;
1551
1552 if (timeout && time_after(dst->send_time + timeout, jiffies)
1553 && dst->retries == 0)
1554 break;
1555
1556 dprintk("%s: t: %p, gen: %u, st: %p, retries: %u, max: %u.\n",
1557 __func__, t, t->gen, st, dst->retries, psb->trans_retries);
1558 netfs_trans_get(t);
1559
1560 rb_node = rb_next(rb_node);
1561
1562 err = -ETIMEDOUT;
dc828461 1563 if (timeout && (++dst->retries < psb->trans_retries))
b3f08cad 1564 err = netfs_trans_resend(t, psb);
b3f08cad
EP
1565
1566 if (err || (t->flags & NETFS_TRANS_SINGLE_DST)) {
1567 if (netfs_trans_remove_nolock(dst, st))
1568 netfs_trans_drop_dst_nostate(dst);
1569 }
1570
1571 t->result = err;
1572 netfs_trans_put(t);
1573 }
1574 mutex_unlock(&st->trans_lock);
1575}
1576
1577/*
1578 * Walk through all installed network states and resend all
1579 * transactions, which are old enough.
1580 */
1581static void pohmelfs_trans_scan(struct work_struct *work)
1582{
1583 struct pohmelfs_sb *psb =
1584 container_of(work, struct pohmelfs_sb, dwork.work);
1585 struct netfs_state *st;
1586 struct pohmelfs_config *c;
1587
1588 mutex_lock(&psb->state_lock);
1589 list_for_each_entry(c, &psb->state_list, config_entry) {
1590 st = &c->state;
1591
1592 pohmelfs_trans_scan_state(st);
1593 }
1594 mutex_unlock(&psb->state_lock);
1595
1596 /*
1597 * If no timeout specified then system is in the middle of umount process,
1598 * so no need to reschedule scanning process again.
1599 */
1600 if (psb->trans_scan_timeout)
1601 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1602}
1603
1604int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon,
1605 unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start)
1606{
1607 struct inode *inode = &pi->vfs_inode;
1608 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1609 int err = 0, sz;
1610 struct netfs_trans *t;
1611 int path_len, addon_len = 0;
1612 void *data;
1613 struct netfs_inode_info *info;
1614 struct netfs_cmd *cmd;
1615
1616 dprintk("%s: ino: %llu, cmd: %u, addon: %p.\n", __func__, pi->ino, cmd_op, addon);
1617
1618 path_len = pohmelfs_path_length(pi);
1619 if (path_len < 0) {
1620 err = path_len;
1621 goto err_out_exit;
1622 }
1623
1624 if (addon)
1625 addon_len = strlen(addon) + 1; /* 0-byte */
1626 sz = addon_len;
1627
1628 if (cmd_op == NETFS_INODE_INFO)
1629 sz += sizeof(struct netfs_inode_info);
1630
1631 t = netfs_trans_alloc(psb, sz + path_len, flags, 0);
1632 if (!t) {
1633 err = -ENOMEM;
1634 goto err_out_exit;
1635 }
1636 t->complete = complete;
1637 t->private = priv;
1638
1639 cmd = netfs_trans_current(t);
1640 data = (void *)(cmd + 1);
1641
1642 if (cmd_op == NETFS_INODE_INFO) {
1643 info = (struct netfs_inode_info *)(cmd + 1);
1644 data = (void *)(info + 1);
1645
1646 /*
1647 * We are under i_mutex, can read and change whatever we want...
1648 */
1649 info->mode = inode->i_mode;
1650 info->nlink = inode->i_nlink;
1651 info->uid = inode->i_uid;
1652 info->gid = inode->i_gid;
1653 info->blocks = inode->i_blocks;
1654 info->rdev = inode->i_rdev;
1655 info->size = inode->i_size;
1656 info->version = inode->i_version;
1657
1658 netfs_convert_inode_info(info);
1659 }
1660
1661 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1662 if (path_len < 0)
1663 goto err_out_free;
1664
1665 dprintk("%s: path_len: %d.\n", __func__, path_len);
1666
1667 if (addon) {
1668 path_len--; /* Do not place null-byte before the addon */
1669 path_len += sprintf(data + path_len, "/%s", addon) + 1; /* 0 - byte */
1670 }
1671
1672 sz += path_len;
1673
1674 cmd->cmd = cmd_op;
1675 cmd->ext = path_len;
1676 cmd->size = sz;
1677 cmd->id = id;
1678 cmd->start = start;
1679
1680 netfs_convert_cmd(cmd);
1681 netfs_trans_update(cmd, t, sz);
1682
1683 /*
1684 * Note, that it is possible to leak error here: transaction callback will not
1685 * be invoked for allocation path failure.
1686 */
1687 return netfs_trans_finish(t, psb);
1688
1689err_out_free:
1690 netfs_trans_free(t);
1691err_out_exit:
1692 if (complete)
1693 complete(NULL, 0, priv, err);
1694 return err;
1695}
1696
1697int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags,
1698 netfs_trans_complete_t complete, void *priv, u64 start)
1699{
1700 return pohmelfs_meta_command_data(pi, pi->ino, cmd_op, NULL, flags, complete, priv, start);
1701}
1702
1703/*
1704 * Send request and wait for POHMELFS root capabilities response,
1705 * which will update server's informaion about size of the export,
1706 * permissions, number of objects, available size and so on.
1707 */
1708static int pohmelfs_root_handshake(struct pohmelfs_sb *psb)
1709{
1710 struct netfs_trans *t;
1711 struct netfs_cmd *cmd;
1712 int err = -ENOMEM;
1713
1714 t = netfs_trans_alloc(psb, 0, 0, 0);
1715 if (!t)
1716 goto err_out_exit;
1717
1718 cmd = netfs_trans_current(t);
1719
1720 cmd->cmd = NETFS_CAPABILITIES;
1721 cmd->id = POHMELFS_ROOT_CAPABILITIES;
1722 cmd->size = 0;
1723 cmd->start = 0;
1724 cmd->ext = 0;
1725 cmd->csize = 0;
1726
1727 netfs_convert_cmd(cmd);
1728 netfs_trans_update(cmd, t, 0);
1729
1730 err = netfs_trans_finish(t, psb);
1731 if (err)
1732 goto err_out_exit;
1733
1734 psb->flags = ~0;
1735 err = wait_event_interruptible_timeout(psb->wait,
1736 (psb->flags != ~0),
1737 psb->wait_on_page_timeout);
2d7cf8ef 1738 if (!err)
b3f08cad 1739 err = -ETIMEDOUT;
2d7cf8ef 1740 else if (err > 0)
b3f08cad 1741 err = -psb->flags;
b3f08cad
EP
1742
1743 if (err)
1744 goto err_out_exit;
1745
1746 return 0;
1747
1748err_out_exit:
1749 return err;
1750}
1751
f2739de1
EP
1752static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
1753{
1754 struct netfs_state *st;
1755 struct pohmelfs_ctl *ctl;
1756 struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb);
1757 struct pohmelfs_config *c;
1758
1759 mutex_lock(&psb->state_lock);
1760
1761 seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n");
1762
1763 list_for_each_entry(c, &psb->state_list, config_entry) {
1764 st = &c->state;
1765 ctl = &st->ctl;
1766
1767 seq_printf(m, "%u ", ctl->idx);
1768 if (ctl->addr.sa_family == AF_INET) {
1769 struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr;
0c14c06c 1770 seq_printf(m, "%pI4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port));
f2739de1
EP
1771 } else if (ctl->addr.sa_family == AF_INET6) {
1772 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr;
1773 seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port));
1774 } else {
1775 unsigned int i;
1776 for (i=0; i<ctl->addrlen; ++i)
1777 seq_printf(m, "%02x.", ctl->addr.addr[i]);
1778 }
1779
1780 seq_printf(m, " %u %u %d %u %x\n",
1781 ctl->type, ctl->proto,
1782 st->socket != NULL,
1783 ctl->prio, ctl->perm);
1784 }
1785 mutex_unlock(&psb->state_lock);
1786
1787 return 0;
1788}
1789
e5043424
EP
1790static const struct super_operations pohmelfs_sb_ops = {
1791 .alloc_inode = pohmelfs_alloc_inode,
1792 .destroy_inode = pohmelfs_destroy_inode,
1793 .drop_inode = pohmelfs_drop_inode,
1794 .write_inode = pohmelfs_write_inode,
1795 .put_super = pohmelfs_put_super,
1796 .remount_fs = pohmelfs_remount,
1797 .statfs = pohmelfs_statfs,
1798 .show_options = pohmelfs_show_options,
f2739de1 1799 .show_stats = pohmelfs_show_stats,
e5043424
EP
1800};
1801
b3f08cad
EP
1802/*
1803 * Allocate private superblock and create root dir.
1804 */
1805static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
1806{
1807 struct pohmelfs_sb *psb;
1808 int err = -ENOMEM;
1809 struct inode *root;
1810 struct pohmelfs_inode *npi;
1811 struct qstr str;
1812
b3f08cad
EP
1813 psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL);
1814 if (!psb)
1815 goto err_out_exit;
1816
1817 sb->s_fs_info = psb;
1818 sb->s_op = &pohmelfs_sb_ops;
1819 sb->s_magic = POHMELFS_MAGIC_NUM;
1820 sb->s_maxbytes = MAX_LFS_FILESIZE;
1821 sb->s_blocksize = PAGE_SIZE;
1822
1823 psb->sb = sb;
1824
1825 psb->ino = 2;
1826 psb->idx = 0;
1827 psb->active_state = NULL;
1828 psb->trans_retries = 5;
1829 psb->trans_data_size = PAGE_SIZE;
1830 psb->drop_scan_timeout = msecs_to_jiffies(1000);
1831 psb->trans_scan_timeout = msecs_to_jiffies(5000);
1832 psb->wait_on_page_timeout = msecs_to_jiffies(5000);
1833 init_waitqueue_head(&psb->wait);
1834
1835 spin_lock_init(&psb->ino_lock);
1836
1837 INIT_LIST_HEAD(&psb->drop_list);
1838
1839 mutex_init(&psb->mcache_lock);
1840 psb->mcache_root = RB_ROOT;
1841 psb->mcache_timeout = msecs_to_jiffies(5000);
1842 atomic_long_set(&psb->mcache_gen, 0);
1843
1844 psb->trans_max_pages = 100;
1845
1846 psb->crypto_align_size = 16;
1847 psb->crypto_attached_size = 0;
1848 psb->hash_strlen = 0;
1849 psb->cipher_strlen = 0;
1850 psb->perform_crypto = 0;
1851 psb->crypto_thread_num = 2;
1852 psb->crypto_fail_unsupported = 0;
1853 mutex_init(&psb->crypto_thread_lock);
1854 INIT_LIST_HEAD(&psb->crypto_ready_list);
1855 INIT_LIST_HEAD(&psb->crypto_active_list);
1856
1857 atomic_set(&psb->trans_gen, 1);
76efa5e3 1858 atomic_long_set(&psb->total_inodes, 0);
b3f08cad
EP
1859
1860 mutex_init(&psb->state_lock);
1861 INIT_LIST_HEAD(&psb->state_list);
1862
e5043424 1863 err = pohmelfs_parse_options((char *) data, psb, 0);
b3f08cad
EP
1864 if (err)
1865 goto err_out_free_sb;
1866
1867 err = pohmelfs_copy_crypto(psb);
1868 if (err)
1869 goto err_out_free_sb;
1870
1871 err = pohmelfs_state_init(psb);
1872 if (err)
1873 goto err_out_free_strings;
1874
1875 err = pohmelfs_crypto_init(psb);
1876 if (err)
1877 goto err_out_state_exit;
1878
1879 err = pohmelfs_root_handshake(psb);
1880 if (err)
1881 goto err_out_crypto_exit;
1882
1883 str.name = "/";
1884 str.hash = jhash("/", 1, 0);
1885 str.len = 1;
1886
1887 npi = pohmelfs_create_entry_local(psb, NULL, &str, 0, 0755|S_IFDIR);
1888 if (IS_ERR(npi)) {
1889 err = PTR_ERR(npi);
1890 goto err_out_crypto_exit;
1891 }
872dc5e5
EP
1892 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
1893 clear_bit(NETFS_INODE_OWNED, &npi->state);
b3f08cad
EP
1894
1895 root = &npi->vfs_inode;
1896
1897 sb->s_root = d_alloc_root(root);
1898 if (!sb->s_root)
1899 goto err_out_put_root;
1900
1901 INIT_DELAYED_WORK(&psb->drop_dwork, pohmelfs_drop_scan);
1902 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1903
1904 INIT_DELAYED_WORK(&psb->dwork, pohmelfs_trans_scan);
1905 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1906
1907 return 0;
1908
1909err_out_put_root:
1910 iput(root);
1911err_out_crypto_exit:
1912 pohmelfs_crypto_exit(psb);
1913err_out_state_exit:
1914 pohmelfs_state_exit(psb);
1915err_out_free_strings:
1916 kfree(psb->cipher_string);
1917 kfree(psb->hash_string);
1918err_out_free_sb:
1919 kfree(psb);
1920err_out_exit:
1921
1922 dprintk("%s: err: %d.\n", __func__, err);
1923 return err;
1924}
1925
1926/*
1927 * Some VFS magic here...
1928 */
1929static int pohmelfs_get_sb(struct file_system_type *fs_type,
1930 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
1931{
1932 return get_sb_nodev(fs_type, flags, data, pohmelfs_fill_super,
1933 mnt);
1934}
1935
cee9bb2e
EP
1936/*
1937 * We need this to sync all inodes earlier, since when writeback
1938 * is invoked from the umount/mntput path dcache is already shrunk,
1939 * see generic_shutdown_super(), and no inodes can access the path.
1940 */
1941static void pohmelfs_kill_super(struct super_block *sb)
1942{
d8a8559c 1943 sync_inodes_sb(sb);
cee9bb2e
EP
1944 kill_anon_super(sb);
1945}
1946
b3f08cad
EP
1947static struct file_system_type pohmel_fs_type = {
1948 .owner = THIS_MODULE,
1949 .name = "pohmel",
1950 .get_sb = pohmelfs_get_sb,
cee9bb2e 1951 .kill_sb = pohmelfs_kill_super,
b3f08cad
EP
1952};
1953
1954/*
1955 * Cache and module initializations and freeing routings.
1956 */
1957static void pohmelfs_init_once(void *data)
1958{
1959 struct pohmelfs_inode *pi = data;
1960
1961 inode_init_once(&pi->vfs_inode);
1962}
1963
1964static int __init pohmelfs_init_inodecache(void)
1965{
1966 pohmelfs_inode_cache = kmem_cache_create("pohmelfs_inode_cache",
1967 sizeof(struct pohmelfs_inode),
1968 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
1969 pohmelfs_init_once);
1970 if (!pohmelfs_inode_cache)
1971 return -ENOMEM;
1972
1973 return 0;
1974}
1975
1976static void pohmelfs_destroy_inodecache(void)
1977{
1978 kmem_cache_destroy(pohmelfs_inode_cache);
1979}
1980
1981static int __init init_pohmel_fs(void)
1982{
1983 int err;
1984
1985 err = pohmelfs_config_init();
1986 if (err)
1987 goto err_out_exit;
1988
1989 err = pohmelfs_init_inodecache();
1990 if (err)
1991 goto err_out_config_exit;
1992
1993 err = pohmelfs_mcache_init();
1994 if (err)
1995 goto err_out_destroy;
1996
1997 err = netfs_trans_init();
1998 if (err)
1999 goto err_out_mcache_exit;
2000
2001 err = register_filesystem(&pohmel_fs_type);
2002 if (err)
2003 goto err_out_trans;
2004
2005 return 0;
2006
2007err_out_trans:
2008 netfs_trans_exit();
2009err_out_mcache_exit:
2010 pohmelfs_mcache_exit();
2011err_out_destroy:
2012 pohmelfs_destroy_inodecache();
2013err_out_config_exit:
2014 pohmelfs_config_exit();
2015err_out_exit:
2016 return err;
2017}
2018
2019static void __exit exit_pohmel_fs(void)
2020{
2021 unregister_filesystem(&pohmel_fs_type);
2022 pohmelfs_destroy_inodecache();
2023 pohmelfs_mcache_exit();
2024 pohmelfs_config_exit();
2025 netfs_trans_exit();
2026}
2027
2028module_init(init_pohmel_fs);
2029module_exit(exit_pohmel_fs);
2030
2031MODULE_LICENSE("GPL");
2032MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
2033MODULE_DESCRIPTION("Pohmel filesystem");