Merge branch 'for-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / pohmelfs / inode.c
1 /*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/module.h>
17 #include <linux/backing-dev.h>
18 #include <linux/crypto.h>
19 #include <linux/fs.h>
20 #include <linux/jhash.h>
21 #include <linux/hash.h>
22 #include <linux/ktime.h>
23 #include <linux/mm.h>
24 #include <linux/mount.h>
25 #include <linux/pagemap.h>
26 #include <linux/pagevec.h>
27 #include <linux/parser.h>
28 #include <linux/swap.h>
29 #include <linux/slab.h>
30 #include <linux/statfs.h>
31 #include <linux/writeback.h>
32 #include <linux/prefetch.h>
33
34 #include "netfs.h"
35
36 #define POHMELFS_MAGIC_NUM 0x504f482e
37
38 static struct kmem_cache *pohmelfs_inode_cache;
39 static atomic_t psb_bdi_num = ATOMIC_INIT(0);
40
41 /*
42 * Removes inode from all trees, drops local name cache and removes all queued
43 * requests for object removal.
44 */
45 void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi)
46 {
47 mutex_lock(&pi->offset_lock);
48 pohmelfs_free_names(pi);
49 mutex_unlock(&pi->offset_lock);
50
51 dprintk("%s: deleted stuff in ino: %llu.\n", __func__, pi->ino);
52 }
53
54 /*
55 * Sync inode to server.
56 * Returns zero in success and negative error value otherwise.
57 * It will gather path to root directory into structures containing
58 * creation mode, permissions and names, so that the whole path
59 * to given inode could be created using only single network command.
60 */
61 int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans)
62 {
63 struct pohmelfs_inode *pi = POHMELFS_I(inode);
64 int err = -ENOMEM, size;
65 struct netfs_cmd *cmd;
66 void *data;
67 int cur_len = netfs_trans_cur_len(trans);
68
69 if (unlikely(cur_len < 0))
70 return -ETOOSMALL;
71
72 cmd = netfs_trans_current(trans);
73 cur_len -= sizeof(struct netfs_cmd);
74
75 data = (void *)(cmd + 1);
76
77 err = pohmelfs_construct_path_string(pi, data, cur_len);
78 if (err < 0)
79 goto err_out_exit;
80
81 size = err;
82
83 cmd->start = i_size_read(inode);
84 cmd->cmd = NETFS_CREATE;
85 cmd->size = size;
86 cmd->id = pi->ino;
87 cmd->ext = inode->i_mode;
88
89 netfs_convert_cmd(cmd);
90
91 netfs_trans_update(cmd, trans, size);
92
93 return 0;
94
95 err_out_exit:
96 printk("%s: completed ino: %llu, err: %d.\n", __func__, pi->ino, err);
97 return err;
98 }
99
100 static int pohmelfs_write_trans_complete(struct page **pages, unsigned int page_num,
101 void *private, int err)
102 {
103 unsigned i;
104
105 dprintk("%s: pages: %lu-%lu, page_num: %u, err: %d.\n",
106 __func__, pages[0]->index, pages[page_num-1]->index,
107 page_num, err);
108
109 for (i = 0; i < page_num; i++) {
110 struct page *page = pages[i];
111
112 if (!page)
113 continue;
114
115 end_page_writeback(page);
116
117 if (err < 0) {
118 SetPageError(page);
119 set_page_dirty(page);
120 }
121
122 unlock_page(page);
123 page_cache_release(page);
124
125 /* dprintk("%s: %3u/%u: page: %p.\n", __func__, i, page_num, page); */
126 }
127 return err;
128 }
129
130 static int pohmelfs_inode_has_dirty_pages(struct address_space *mapping, pgoff_t index)
131 {
132 int ret;
133 struct page *page;
134
135 rcu_read_lock();
136 ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
137 (void **)&page, index, 1, PAGECACHE_TAG_DIRTY);
138 rcu_read_unlock();
139 return ret;
140 }
141
142 static int pohmelfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
143 {
144 struct inode *inode = mapping->host;
145 struct pohmelfs_inode *pi = POHMELFS_I(inode);
146 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
147 int err = 0;
148 int done = 0;
149 int nr_pages;
150 pgoff_t index;
151 pgoff_t end; /* Inclusive */
152 int scanned = 0;
153 int range_whole = 0;
154
155 if (wbc->range_cyclic) {
156 index = mapping->writeback_index; /* Start from prev offset */
157 end = -1;
158 } else {
159 index = wbc->range_start >> PAGE_CACHE_SHIFT;
160 end = wbc->range_end >> PAGE_CACHE_SHIFT;
161 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
162 range_whole = 1;
163 scanned = 1;
164 }
165 retry:
166 while (!done && (index <= end)) {
167 unsigned int i = min(end - index, (pgoff_t)psb->trans_max_pages);
168 int path_len;
169 struct netfs_trans *trans;
170
171 err = pohmelfs_inode_has_dirty_pages(mapping, index);
172 if (!err)
173 break;
174
175 err = pohmelfs_path_length(pi);
176 if (err < 0)
177 break;
178
179 path_len = err;
180
181 if (path_len <= 2) {
182 err = -ENOENT;
183 break;
184 }
185
186 trans = netfs_trans_alloc(psb, path_len, 0, i);
187 if (!trans) {
188 err = -ENOMEM;
189 break;
190 }
191 trans->complete = &pohmelfs_write_trans_complete;
192
193 trans->page_num = nr_pages = find_get_pages_tag(mapping, &index,
194 PAGECACHE_TAG_DIRTY, trans->page_num,
195 trans->pages);
196
197 dprintk("%s: t: %p, nr_pages: %u, end: %lu, index: %lu, max: %u.\n",
198 __func__, trans, nr_pages, end, index, trans->page_num);
199
200 if (!nr_pages)
201 goto err_out_reset;
202
203 err = pohmelfs_write_inode_create(inode, trans);
204 if (err)
205 goto err_out_reset;
206
207 err = 0;
208 scanned = 1;
209
210 for (i = 0; i < trans->page_num; i++) {
211 struct page *page = trans->pages[i];
212
213 lock_page(page);
214
215 if (unlikely(page->mapping != mapping))
216 goto out_continue;
217
218 if (!wbc->range_cyclic && page->index > end) {
219 done = 1;
220 goto out_continue;
221 }
222
223 if (wbc->sync_mode != WB_SYNC_NONE)
224 wait_on_page_writeback(page);
225
226 if (PageWriteback(page) ||
227 !clear_page_dirty_for_io(page)) {
228 dprintk("%s: not clear for io page: %p, writeback: %d.\n",
229 __func__, page, PageWriteback(page));
230 goto out_continue;
231 }
232
233 set_page_writeback(page);
234
235 trans->attached_size += page_private(page);
236 trans->attached_pages++;
237 #if 0
238 dprintk("%s: %u/%u added trans: %p, gen: %u, page: %p, [High: %d], size: %lu, idx: %lu.\n",
239 __func__, i, trans->page_num, trans, trans->gen, page,
240 !!PageHighMem(page), page_private(page), page->index);
241 #endif
242 wbc->nr_to_write--;
243
244 if (wbc->nr_to_write <= 0)
245 done = 1;
246
247 continue;
248 out_continue:
249 unlock_page(page);
250 trans->pages[i] = NULL;
251 }
252
253 err = netfs_trans_finish(trans, psb);
254 if (err)
255 break;
256
257 continue;
258
259 err_out_reset:
260 trans->result = err;
261 netfs_trans_reset(trans);
262 netfs_trans_put(trans);
263 break;
264 }
265
266 if (!scanned && !done) {
267 /*
268 * We hit the last page and there is more work to be done: wrap
269 * back to the start of the file
270 */
271 scanned = 1;
272 index = 0;
273 goto retry;
274 }
275
276 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
277 mapping->writeback_index = index;
278
279 return err;
280 }
281
282 /*
283 * Inode writeback creation completion callback.
284 * Only invoked for just created inodes, which do not have pages attached,
285 * like dirs and empty files.
286 */
287 static int pohmelfs_write_inode_complete(struct page **pages, unsigned int page_num,
288 void *private, int err)
289 {
290 struct inode *inode = private;
291 struct pohmelfs_inode *pi = POHMELFS_I(inode);
292
293 if (inode) {
294 if (err) {
295 mark_inode_dirty(inode);
296 clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
297 } else {
298 set_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
299 }
300
301 pohmelfs_put_inode(pi);
302 }
303
304 return err;
305 }
306
307 int pohmelfs_write_create_inode(struct pohmelfs_inode *pi)
308 {
309 struct netfs_trans *t;
310 struct inode *inode = &pi->vfs_inode;
311 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
312 int err;
313
314 if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
315 return 0;
316
317 dprintk("%s: started ino: %llu.\n", __func__, pi->ino);
318
319 err = pohmelfs_path_length(pi);
320 if (err < 0)
321 goto err_out_exit;
322
323 t = netfs_trans_alloc(psb, err + 1, 0, 0);
324 if (!t) {
325 err = -ENOMEM;
326 goto err_out_exit;
327 }
328 t->complete = pohmelfs_write_inode_complete;
329 t->private = igrab(inode);
330 if (!t->private) {
331 err = -ENOENT;
332 goto err_out_put;
333 }
334
335 err = pohmelfs_write_inode_create(inode, t);
336 if (err)
337 goto err_out_put;
338
339 netfs_trans_finish(t, POHMELFS_SB(inode->i_sb));
340
341 return 0;
342
343 err_out_put:
344 t->result = err;
345 netfs_trans_put(t);
346 err_out_exit:
347 return err;
348 }
349
350 /*
351 * Sync all not-yet-created children in given directory to the server.
352 */
353 static int pohmelfs_write_inode_create_children(struct inode *inode)
354 {
355 struct pohmelfs_inode *parent = POHMELFS_I(inode);
356 struct super_block *sb = inode->i_sb;
357 struct pohmelfs_name *n;
358
359 while (!list_empty(&parent->sync_create_list)) {
360 n = NULL;
361 mutex_lock(&parent->offset_lock);
362 if (!list_empty(&parent->sync_create_list)) {
363 n = list_first_entry(&parent->sync_create_list,
364 struct pohmelfs_name, sync_create_entry);
365 list_del_init(&n->sync_create_entry);
366 }
367 mutex_unlock(&parent->offset_lock);
368
369 if (!n)
370 break;
371
372 inode = ilookup(sb, n->ino);
373
374 dprintk("%s: parent: %llu, ino: %llu, inode: %p.\n",
375 __func__, parent->ino, n->ino, inode);
376
377 if (inode && (inode->i_state & I_DIRTY)) {
378 struct pohmelfs_inode *pi = POHMELFS_I(inode);
379 pohmelfs_write_create_inode(pi);
380 /* pohmelfs_meta_command(pi, NETFS_INODE_INFO, 0, NULL, NULL, 0); */
381 iput(inode);
382 }
383 }
384
385 return 0;
386 }
387
388 /*
389 * Removes given child from given inode on server.
390 */
391 int pohmelfs_remove_child(struct pohmelfs_inode *pi, struct pohmelfs_name *n)
392 {
393 return pohmelfs_meta_command_data(pi, pi->ino, NETFS_REMOVE, NULL, 0, NULL, NULL, 0);
394 }
395
396 /*
397 * Writeback for given inode.
398 */
399 static int pohmelfs_write_inode(struct inode *inode,
400 struct writeback_control *wbc)
401 {
402 struct pohmelfs_inode *pi = POHMELFS_I(inode);
403
404 pohmelfs_write_create_inode(pi);
405 pohmelfs_write_inode_create_children(inode);
406
407 return 0;
408 }
409
410 /*
411 * It is not exported, sorry...
412 */
413 static inline wait_queue_head_t *page_waitqueue(struct page *page)
414 {
415 const struct zone *zone = page_zone(page);
416
417 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
418 }
419
420 static int pohmelfs_wait_on_page_locked(struct page *page)
421 {
422 struct pohmelfs_sb *psb = POHMELFS_SB(page->mapping->host->i_sb);
423 long ret = psb->wait_on_page_timeout;
424 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
425 int err = 0;
426
427 if (!PageLocked(page))
428 return 0;
429
430 for (;;) {
431 prepare_to_wait(page_waitqueue(page),
432 &wait.wait, TASK_INTERRUPTIBLE);
433
434 dprintk("%s: page: %p, locked: %d, uptodate: %d, error: %d, flags: %lx.\n",
435 __func__, page, PageLocked(page), PageUptodate(page),
436 PageError(page), page->flags);
437
438 if (!PageLocked(page))
439 break;
440
441 if (!signal_pending(current)) {
442 ret = schedule_timeout(ret);
443 if (!ret)
444 break;
445 continue;
446 }
447 ret = -ERESTARTSYS;
448 break;
449 }
450 finish_wait(page_waitqueue(page), &wait.wait);
451
452 if (!ret)
453 err = -ETIMEDOUT;
454
455
456 if (!err)
457 SetPageUptodate(page);
458
459 if (err)
460 printk("%s: page: %p, uptodate: %d, locked: %d, err: %d.\n",
461 __func__, page, PageUptodate(page), PageLocked(page), err);
462
463 return err;
464 }
465
466 static int pohmelfs_read_page_complete(struct page **pages, unsigned int page_num,
467 void *private, int err)
468 {
469 struct page *page = private;
470
471 if (PageChecked(page))
472 return err;
473
474 if (err < 0) {
475 dprintk("%s: page: %p, err: %d.\n", __func__, page, err);
476 SetPageError(page);
477 }
478
479 unlock_page(page);
480
481 return err;
482 }
483
484 /*
485 * Read a page from remote server.
486 * Function will wait until page is unlocked.
487 */
488 static int pohmelfs_readpage(struct file *file, struct page *page)
489 {
490 struct inode *inode = page->mapping->host;
491 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
492 struct pohmelfs_inode *pi = POHMELFS_I(inode);
493 struct netfs_trans *t;
494 struct netfs_cmd *cmd;
495 int err, path_len;
496 void *data;
497 u64 isize;
498
499 err = pohmelfs_data_lock(pi, page->index << PAGE_CACHE_SHIFT,
500 PAGE_SIZE, POHMELFS_READ_LOCK);
501 if (err)
502 goto err_out_exit;
503
504 isize = i_size_read(inode);
505 if (isize <= page->index << PAGE_CACHE_SHIFT) {
506 SetPageUptodate(page);
507 unlock_page(page);
508 return 0;
509 }
510
511 path_len = pohmelfs_path_length(pi);
512 if (path_len < 0) {
513 err = path_len;
514 goto err_out_exit;
515 }
516
517 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
518 if (!t) {
519 err = -ENOMEM;
520 goto err_out_exit;
521 }
522
523 t->complete = pohmelfs_read_page_complete;
524 t->private = page;
525
526 cmd = netfs_trans_current(t);
527 data = (void *)(cmd + 1);
528
529 err = pohmelfs_construct_path_string(pi, data, path_len);
530 if (err < 0)
531 goto err_out_free;
532
533 path_len = err;
534
535 cmd->id = pi->ino;
536 cmd->start = page->index;
537 cmd->start <<= PAGE_CACHE_SHIFT;
538 cmd->size = PAGE_CACHE_SIZE + path_len;
539 cmd->cmd = NETFS_READ_PAGE;
540 cmd->ext = path_len;
541
542 dprintk("%s: path: '%s', page: %p, ino: %llu, start: %llu, size: %lu.\n",
543 __func__, (char *)data, page, pi->ino, cmd->start, PAGE_CACHE_SIZE);
544
545 netfs_convert_cmd(cmd);
546 netfs_trans_update(cmd, t, path_len);
547
548 err = netfs_trans_finish(t, psb);
549 if (err)
550 goto err_out_return;
551
552 return pohmelfs_wait_on_page_locked(page);
553
554 err_out_free:
555 t->result = err;
556 netfs_trans_put(t);
557 err_out_exit:
558 SetPageError(page);
559 if (PageLocked(page))
560 unlock_page(page);
561 err_out_return:
562 printk("%s: page: %p, start: %lu, size: %lu, err: %d.\n",
563 __func__, page, page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, err);
564
565 return err;
566 }
567
568 /*
569 * Write begin/end magic.
570 * Allocates a page and writes inode if it was not synced to server before.
571 */
572 static int pohmelfs_write_begin(struct file *file, struct address_space *mapping,
573 loff_t pos, unsigned len, unsigned flags,
574 struct page **pagep, void **fsdata)
575 {
576 struct inode *inode = mapping->host;
577 struct page *page;
578 pgoff_t index;
579 unsigned start, end;
580 int err;
581
582 *pagep = NULL;
583
584 index = pos >> PAGE_CACHE_SHIFT;
585 start = pos & (PAGE_CACHE_SIZE - 1);
586 end = start + len;
587
588 page = grab_cache_page(mapping, index);
589 #if 0
590 dprintk("%s: page: %p pos: %llu, len: %u, index: %lu, start: %u, end: %u, uptodate: %d.\n",
591 __func__, page, pos, len, index, start, end, PageUptodate(page));
592 #endif
593 if (!page) {
594 err = -ENOMEM;
595 goto err_out_exit;
596 }
597
598 while (!PageUptodate(page)) {
599 if (start && test_bit(NETFS_INODE_REMOTE_SYNCED, &POHMELFS_I(inode)->state)) {
600 err = pohmelfs_readpage(file, page);
601 if (err)
602 goto err_out_exit;
603
604 lock_page(page);
605 continue;
606 }
607
608 if (len != PAGE_CACHE_SIZE) {
609 void *kaddr = kmap_atomic(page, KM_USER0);
610
611 memset(kaddr + start, 0, PAGE_CACHE_SIZE - start);
612 flush_dcache_page(page);
613 kunmap_atomic(kaddr, KM_USER0);
614 }
615 SetPageUptodate(page);
616 }
617
618 set_page_private(page, end);
619
620 *pagep = page;
621
622 return 0;
623
624 err_out_exit:
625 page_cache_release(page);
626 *pagep = NULL;
627
628 return err;
629 }
630
631 static int pohmelfs_write_end(struct file *file, struct address_space *mapping,
632 loff_t pos, unsigned len, unsigned copied,
633 struct page *page, void *fsdata)
634 {
635 struct inode *inode = mapping->host;
636
637 if (copied != len) {
638 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
639 void *kaddr = kmap_atomic(page, KM_USER0);
640
641 memset(kaddr + from + copied, 0, len - copied);
642 flush_dcache_page(page);
643 kunmap_atomic(kaddr, KM_USER0);
644 }
645
646 SetPageUptodate(page);
647 set_page_dirty(page);
648 #if 0
649 dprintk("%s: page: %p [U: %d, D: %d, L: %d], pos: %llu, len: %u, copied: %u.\n",
650 __func__, page,
651 PageUptodate(page), PageDirty(page), PageLocked(page),
652 pos, len, copied);
653 #endif
654 flush_dcache_page(page);
655
656 unlock_page(page);
657 page_cache_release(page);
658
659 if (pos + copied > inode->i_size) {
660 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
661
662 psb->avail_size -= pos + copied - inode->i_size;
663
664 i_size_write(inode, pos + copied);
665 }
666
667 return copied;
668 }
669
670 static int pohmelfs_readpages_trans_complete(struct page **__pages, unsigned int page_num,
671 void *private, int err)
672 {
673 struct pohmelfs_inode *pi = private;
674 unsigned int i, num;
675 struct page **pages, *page = (struct page *)__pages;
676 loff_t index = page->index;
677
678 pages = kzalloc(sizeof(void *) * page_num, GFP_NOIO);
679 if (!pages)
680 return -ENOMEM;
681
682 num = find_get_pages_contig(pi->vfs_inode.i_mapping, index, page_num, pages);
683 if (num <= 0) {
684 err = num;
685 goto err_out_free;
686 }
687
688 for (i = 0; i < num; ++i) {
689 page = pages[i];
690
691 if (err)
692 printk("%s: %u/%u: page: %p, index: %lu, uptodate: %d, locked: %d, err: %d.\n",
693 __func__, i, num, page, page->index,
694 PageUptodate(page), PageLocked(page), err);
695
696 if (!PageChecked(page)) {
697 if (err < 0)
698 SetPageError(page);
699 unlock_page(page);
700 }
701 page_cache_release(page);
702 page_cache_release(page);
703 }
704
705 err_out_free:
706 kfree(pages);
707 return err;
708 }
709
710 static int pohmelfs_send_readpages(struct pohmelfs_inode *pi, struct page *first, unsigned int num)
711 {
712 struct netfs_trans *t;
713 struct netfs_cmd *cmd;
714 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
715 int err, path_len;
716 void *data;
717
718 err = pohmelfs_data_lock(pi, first->index << PAGE_CACHE_SHIFT,
719 num * PAGE_SIZE, POHMELFS_READ_LOCK);
720 if (err)
721 goto err_out_exit;
722
723 path_len = pohmelfs_path_length(pi);
724 if (path_len < 0) {
725 err = path_len;
726 goto err_out_exit;
727 }
728
729 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
730 if (!t) {
731 err = -ENOMEM;
732 goto err_out_exit;
733 }
734
735 cmd = netfs_trans_current(t);
736 data = (void *)(cmd + 1);
737
738 t->complete = pohmelfs_readpages_trans_complete;
739 t->private = pi;
740 t->page_num = num;
741 t->pages = (struct page **)first;
742
743 err = pohmelfs_construct_path_string(pi, data, path_len);
744 if (err < 0)
745 goto err_out_put;
746
747 path_len = err;
748
749 cmd->cmd = NETFS_READ_PAGES;
750 cmd->start = first->index;
751 cmd->start <<= PAGE_CACHE_SHIFT;
752 cmd->size = (num << 8 | PAGE_CACHE_SHIFT);
753 cmd->id = pi->ino;
754 cmd->ext = path_len;
755
756 dprintk("%s: t: %p, gen: %u, path: '%s', path_len: %u, "
757 "start: %lu, num: %u.\n",
758 __func__, t, t->gen, (char *)data, path_len,
759 first->index, num);
760
761 netfs_convert_cmd(cmd);
762 netfs_trans_update(cmd, t, path_len);
763
764 return netfs_trans_finish(t, psb);
765
766 err_out_put:
767 netfs_trans_free(t);
768 err_out_exit:
769 pohmelfs_readpages_trans_complete((struct page **)first, num, pi, err);
770 return err;
771 }
772
773 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
774
775 static int pohmelfs_readpages(struct file *file, struct address_space *mapping,
776 struct list_head *pages, unsigned nr_pages)
777 {
778 unsigned int page_idx, num = 0;
779 struct page *page = NULL, *first = NULL;
780
781 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
782 page = list_to_page(pages);
783
784 prefetchw(&page->flags);
785 list_del(&page->lru);
786
787 if (!add_to_page_cache_lru(page, mapping,
788 page->index, GFP_KERNEL)) {
789
790 if (!num) {
791 num = 1;
792 first = page;
793 continue;
794 }
795
796 dprintk("%s: added to lru page: %p, page_index: %lu, first_index: %lu.\n",
797 __func__, page, page->index, first->index);
798
799 if (unlikely(first->index + num != page->index) || (num > 500)) {
800 pohmelfs_send_readpages(POHMELFS_I(mapping->host),
801 first, num);
802 first = page;
803 num = 0;
804 }
805
806 num++;
807 }
808 }
809 pohmelfs_send_readpages(POHMELFS_I(mapping->host), first, num);
810
811 /*
812 * This will be sync read, so when last page is processed,
813 * all previous are alerady unlocked and ready to be used.
814 */
815 return 0;
816 }
817
818 /*
819 * Small address space operations for POHMELFS.
820 */
821 const struct address_space_operations pohmelfs_aops = {
822 .readpage = pohmelfs_readpage,
823 .readpages = pohmelfs_readpages,
824 .writepages = pohmelfs_writepages,
825 .write_begin = pohmelfs_write_begin,
826 .write_end = pohmelfs_write_end,
827 .set_page_dirty = __set_page_dirty_nobuffers,
828 };
829
830 static void pohmelfs_i_callback(struct rcu_head *head)
831 {
832 struct inode *inode = container_of(head, struct inode, i_rcu);
833 INIT_LIST_HEAD(&inode->i_dentry);
834 kmem_cache_free(pohmelfs_inode_cache, POHMELFS_I(inode));
835 }
836
837 /*
838 * ->destroy_inode() callback. Deletes inode from the caches
839 * and frees private data.
840 */
841 static void pohmelfs_destroy_inode(struct inode *inode)
842 {
843 struct super_block *sb = inode->i_sb;
844 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
845 struct pohmelfs_inode *pi = POHMELFS_I(inode);
846
847 /* pohmelfs_data_unlock(pi, 0, inode->i_size, POHMELFS_READ_LOCK); */
848
849 pohmelfs_inode_del_inode(psb, pi);
850
851 dprintk("%s: pi: %p, inode: %p, ino: %llu.\n",
852 __func__, pi, &pi->vfs_inode, pi->ino);
853 atomic_long_dec(&psb->total_inodes);
854 call_rcu(&inode->i_rcu, pohmelfs_i_callback);
855 }
856
857 /*
858 * ->alloc_inode() callback. Allocates inode and initializes private data.
859 */
860 static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
861 {
862 struct pohmelfs_inode *pi;
863
864 pi = kmem_cache_alloc(pohmelfs_inode_cache, GFP_NOIO);
865 if (!pi)
866 return NULL;
867
868 pi->hash_root = RB_ROOT;
869 mutex_init(&pi->offset_lock);
870
871 INIT_LIST_HEAD(&pi->sync_create_list);
872
873 INIT_LIST_HEAD(&pi->inode_entry);
874
875 pi->lock_type = 0;
876 pi->state = 0;
877 pi->total_len = 0;
878 pi->drop_count = 0;
879
880 dprintk("%s: pi: %p, inode: %p.\n", __func__, pi, &pi->vfs_inode);
881
882 atomic_long_inc(&POHMELFS_SB(sb)->total_inodes);
883
884 return &pi->vfs_inode;
885 }
886
887 /*
888 * We want fsync() to work on POHMELFS.
889 */
890 static int pohmelfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
891 {
892 struct inode *inode = file->f_mapping->host;
893 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
894 if (!err) {
895 mutex_lock(&inode->i_mutex);
896 err = sync_inode_metadata(inode, 1);
897 mutex_unlock(&inode->i_mutex);
898 }
899 return err;
900 }
901
902 ssize_t pohmelfs_write(struct file *file, const char __user *buf,
903 size_t len, loff_t *ppos)
904 {
905 struct address_space *mapping = file->f_mapping;
906 struct inode *inode = mapping->host;
907 struct pohmelfs_inode *pi = POHMELFS_I(inode);
908 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
909 struct kiocb kiocb;
910 ssize_t ret;
911 loff_t pos = *ppos;
912
913 init_sync_kiocb(&kiocb, file);
914 kiocb.ki_pos = pos;
915 kiocb.ki_left = len;
916
917 dprintk("%s: len: %zu, pos: %llu.\n", __func__, len, pos);
918
919 mutex_lock(&inode->i_mutex);
920 ret = pohmelfs_data_lock(pi, pos, len, POHMELFS_WRITE_LOCK);
921 if (ret)
922 goto err_out_unlock;
923
924 ret = __generic_file_aio_write(&kiocb, &iov, 1, &kiocb.ki_pos);
925 *ppos = kiocb.ki_pos;
926
927 mutex_unlock(&inode->i_mutex);
928 WARN_ON(ret < 0);
929
930 if (ret > 0) {
931 ssize_t err;
932
933 err = generic_write_sync(file, pos, ret);
934 if (err < 0)
935 ret = err;
936 WARN_ON(ret < 0);
937 }
938
939 return ret;
940
941 err_out_unlock:
942 mutex_unlock(&inode->i_mutex);
943 return ret;
944 }
945
946 static const struct file_operations pohmelfs_file_ops = {
947 .open = generic_file_open,
948 .fsync = pohmelfs_fsync,
949
950 .llseek = generic_file_llseek,
951
952 .read = do_sync_read,
953 .aio_read = generic_file_aio_read,
954
955 .mmap = generic_file_mmap,
956
957 .splice_read = generic_file_splice_read,
958 .splice_write = generic_file_splice_write,
959
960 .write = pohmelfs_write,
961 .aio_write = generic_file_aio_write,
962 };
963
964 const struct inode_operations pohmelfs_symlink_inode_operations = {
965 .readlink = generic_readlink,
966 .follow_link = page_follow_link_light,
967 .put_link = page_put_link,
968 };
969
970 int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
971 {
972 int err;
973
974 err = inode_change_ok(inode, attr);
975 if (err) {
976 dprintk("%s: ino: %llu, inode changes are not allowed.\n", __func__, POHMELFS_I(inode)->ino);
977 goto err_out_exit;
978 }
979
980 if ((attr->ia_valid & ATTR_SIZE) &&
981 attr->ia_size != i_size_read(inode)) {
982 err = vmtruncate(inode, attr->ia_size);
983 if (err) {
984 dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
985 goto err_out_exit;
986 }
987 }
988
989 setattr_copy(inode, attr);
990 mark_inode_dirty(inode);
991
992 dprintk("%s: ino: %llu, mode: %o -> %o, uid: %u -> %u, gid: %u -> %u, size: %llu -> %llu.\n",
993 __func__, POHMELFS_I(inode)->ino, inode->i_mode, attr->ia_mode,
994 inode->i_uid, attr->ia_uid, inode->i_gid, attr->ia_gid, inode->i_size, attr->ia_size);
995
996 return 0;
997
998 err_out_exit:
999 return err;
1000 }
1001
1002 int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr)
1003 {
1004 struct inode *inode = dentry->d_inode;
1005 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1006 int err;
1007
1008 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
1009 if (err)
1010 goto err_out_exit;
1011
1012 err = security_inode_setattr(dentry, attr);
1013 if (err)
1014 goto err_out_exit;
1015
1016 err = pohmelfs_setattr_raw(inode, attr);
1017 if (err)
1018 goto err_out_exit;
1019
1020 return 0;
1021
1022 err_out_exit:
1023 return err;
1024 }
1025
1026 static int pohmelfs_send_xattr_req(struct pohmelfs_inode *pi, u64 id, u64 start,
1027 const char *name, const void *value, size_t attrsize, int command)
1028 {
1029 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
1030 int err, path_len, namelen = strlen(name) + 1; /* 0-byte */
1031 struct netfs_trans *t;
1032 struct netfs_cmd *cmd;
1033 void *data;
1034
1035 dprintk("%s: id: %llu, start: %llu, name: '%s', attrsize: %zu, cmd: %d.\n",
1036 __func__, id, start, name, attrsize, command);
1037
1038 path_len = pohmelfs_path_length(pi);
1039 if (path_len < 0) {
1040 err = path_len;
1041 goto err_out_exit;
1042 }
1043
1044 t = netfs_trans_alloc(psb, namelen + path_len + attrsize, 0, 0);
1045 if (!t) {
1046 err = -ENOMEM;
1047 goto err_out_exit;
1048 }
1049
1050 cmd = netfs_trans_current(t);
1051 data = cmd + 1;
1052
1053 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1054 if (path_len < 0) {
1055 err = path_len;
1056 goto err_out_put;
1057 }
1058 data += path_len;
1059
1060 /*
1061 * 'name' is a NUL-terminated string already and
1062 * 'namelen' includes 0-byte.
1063 */
1064 memcpy(data, name, namelen);
1065 data += namelen;
1066
1067 memcpy(data, value, attrsize);
1068
1069 cmd->cmd = command;
1070 cmd->id = id;
1071 cmd->start = start;
1072 cmd->size = attrsize + namelen + path_len;
1073 cmd->ext = path_len;
1074 cmd->csize = 0;
1075 cmd->cpad = 0;
1076
1077 netfs_convert_cmd(cmd);
1078 netfs_trans_update(cmd, t, namelen + path_len + attrsize);
1079
1080 return netfs_trans_finish(t, psb);
1081
1082 err_out_put:
1083 t->result = err;
1084 netfs_trans_put(t);
1085 err_out_exit:
1086 return err;
1087 }
1088
1089 static int pohmelfs_setxattr(struct dentry *dentry, const char *name,
1090 const void *value, size_t attrsize, int flags)
1091 {
1092 struct inode *inode = dentry->d_inode;
1093 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1094 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1095
1096 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1097 return -EOPNOTSUPP;
1098
1099 return pohmelfs_send_xattr_req(pi, flags, attrsize, name,
1100 value, attrsize, NETFS_XATTR_SET);
1101 }
1102
1103 static ssize_t pohmelfs_getxattr(struct dentry *dentry, const char *name,
1104 void *value, size_t attrsize)
1105 {
1106 struct inode *inode = dentry->d_inode;
1107 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1108 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1109 struct pohmelfs_mcache *m;
1110 int err;
1111 long timeout = psb->mcache_timeout;
1112
1113 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1114 return -EOPNOTSUPP;
1115
1116 m = pohmelfs_mcache_alloc(psb, 0, attrsize, value);
1117 if (IS_ERR(m))
1118 return PTR_ERR(m);
1119
1120 dprintk("%s: ino: %llu, name: '%s', size: %zu.\n",
1121 __func__, pi->ino, name, attrsize);
1122
1123 err = pohmelfs_send_xattr_req(pi, m->gen, attrsize, name, value, 0, NETFS_XATTR_GET);
1124 if (err)
1125 goto err_out_put;
1126
1127 do {
1128 err = wait_for_completion_timeout(&m->complete, timeout);
1129 if (err) {
1130 err = m->err;
1131 break;
1132 }
1133
1134 /*
1135 * This loop is a bit ugly, since it waits until reference counter
1136 * hits 1 and then puts the object here. Main goal is to prevent race with
1137 * the network thread, when it can start processing the given request, i.e.
1138 * increase its reference counter but yet not complete it, while
1139 * we will exit from ->getxattr() with timeout, and although request
1140 * will not be freed (its reference counter was increased by network
1141 * thread), data pointer provided by user may be released, so we will
1142 * overwrite an already freed area in the network thread.
1143 *
1144 * Now after timeout we remove request from the cache, so it can not be
1145 * found by network thread, and wait for its reference counter to hit 1,
1146 * i.e. if network thread already started to process this request, we wait
1147 * for it to finish, and then free object locally. If reference counter is
1148 * already 1, i.e. request is not used by anyone else, we can free it without
1149 * problem.
1150 */
1151 err = -ETIMEDOUT;
1152 timeout = HZ;
1153
1154 pohmelfs_mcache_remove_locked(psb, m);
1155 } while (atomic_read(&m->refcnt) != 1);
1156
1157 pohmelfs_mcache_put(psb, m);
1158
1159 dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err);
1160
1161 return err;
1162
1163 err_out_put:
1164 pohmelfs_mcache_put(psb, m);
1165 return err;
1166 }
1167
1168 static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1169 {
1170 struct inode *inode = dentry->d_inode;
1171 #if 0
1172 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1173 int err;
1174
1175 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
1176 if (err)
1177 return err;
1178 dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n",
1179 __func__, pi->ino, inode->i_mode, inode->i_uid,
1180 inode->i_gid, inode->i_size);
1181 #endif
1182
1183 generic_fillattr(inode, stat);
1184 return 0;
1185 }
1186
1187 const struct inode_operations pohmelfs_file_inode_operations = {
1188 .setattr = pohmelfs_setattr,
1189 .getattr = pohmelfs_getattr,
1190 .setxattr = pohmelfs_setxattr,
1191 .getxattr = pohmelfs_getxattr,
1192 };
1193
1194 /*
1195 * Fill inode data: mode, size, operation callbacks and so on...
1196 */
1197 void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info)
1198 {
1199 inode->i_mode = info->mode;
1200 set_nlink(inode, info->nlink);
1201 inode->i_uid = info->uid;
1202 inode->i_gid = info->gid;
1203 inode->i_blocks = info->blocks;
1204 inode->i_rdev = info->rdev;
1205 inode->i_size = info->size;
1206 inode->i_version = info->version;
1207 inode->i_blkbits = ffs(info->blocksize);
1208
1209 dprintk("%s: inode: %p, num: %lu/%llu inode is regular: %d, dir: %d, link: %d, mode: %o, size: %llu.\n",
1210 __func__, inode, inode->i_ino, info->ino,
1211 S_ISREG(inode->i_mode), S_ISDIR(inode->i_mode),
1212 S_ISLNK(inode->i_mode), inode->i_mode, inode->i_size);
1213
1214 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
1215
1216 /*
1217 * i_mapping is a pointer to i_data during inode initialization.
1218 */
1219 inode->i_data.a_ops = &pohmelfs_aops;
1220
1221 if (S_ISREG(inode->i_mode)) {
1222 inode->i_fop = &pohmelfs_file_ops;
1223 inode->i_op = &pohmelfs_file_inode_operations;
1224 } else if (S_ISDIR(inode->i_mode)) {
1225 inode->i_fop = &pohmelfs_dir_fops;
1226 inode->i_op = &pohmelfs_dir_inode_ops;
1227 } else if (S_ISLNK(inode->i_mode)) {
1228 inode->i_op = &pohmelfs_symlink_inode_operations;
1229 inode->i_fop = &pohmelfs_file_ops;
1230 } else {
1231 inode->i_fop = &generic_ro_fops;
1232 }
1233 }
1234
1235 static int pohmelfs_drop_inode(struct inode *inode)
1236 {
1237 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1238 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1239
1240 spin_lock(&psb->ino_lock);
1241 list_del_init(&pi->inode_entry);
1242 spin_unlock(&psb->ino_lock);
1243
1244 return generic_drop_inode(inode);
1245 }
1246
1247 static struct pohmelfs_inode *pohmelfs_get_inode_from_list(struct pohmelfs_sb *psb,
1248 struct list_head *head, unsigned int *count)
1249 {
1250 struct pohmelfs_inode *pi = NULL;
1251
1252 spin_lock(&psb->ino_lock);
1253 if (!list_empty(head)) {
1254 pi = list_entry(head->next, struct pohmelfs_inode,
1255 inode_entry);
1256 list_del_init(&pi->inode_entry);
1257 *count = pi->drop_count;
1258 pi->drop_count = 0;
1259 }
1260 spin_unlock(&psb->ino_lock);
1261
1262 return pi;
1263 }
1264
1265 static void pohmelfs_flush_transactions(struct pohmelfs_sb *psb)
1266 {
1267 struct pohmelfs_config *c;
1268
1269 mutex_lock(&psb->state_lock);
1270 list_for_each_entry(c, &psb->state_list, config_entry) {
1271 pohmelfs_state_flush_transactions(&c->state);
1272 }
1273 mutex_unlock(&psb->state_lock);
1274 }
1275
1276 /*
1277 * ->put_super() callback. Invoked before superblock is destroyed,
1278 * so it has to clean all private data.
1279 */
1280 static void pohmelfs_put_super(struct super_block *sb)
1281 {
1282 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1283 struct pohmelfs_inode *pi;
1284 unsigned int count = 0;
1285 unsigned int in_drop_list = 0;
1286 struct inode *inode, *tmp;
1287
1288 dprintk("%s.\n", __func__);
1289
1290 /*
1291 * Kill pending transactions, which could affect inodes in-flight.
1292 */
1293 pohmelfs_flush_transactions(psb);
1294
1295 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count))) {
1296 inode = &pi->vfs_inode;
1297
1298 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1299 __func__, pi->ino, pi, inode, count);
1300
1301 if (atomic_read(&inode->i_count) != count) {
1302 printk("%s: ino: %llu, pi: %p, inode: %p, count: %u, i_count: %d.\n",
1303 __func__, pi->ino, pi, inode, count,
1304 atomic_read(&inode->i_count));
1305 count = atomic_read(&inode->i_count);
1306 in_drop_list++;
1307 }
1308
1309 while (count--)
1310 iput(&pi->vfs_inode);
1311 }
1312
1313 list_for_each_entry_safe(inode, tmp, &sb->s_inodes, i_sb_list) {
1314 pi = POHMELFS_I(inode);
1315
1316 dprintk("%s: ino: %llu, pi: %p, inode: %p, i_count: %u.\n",
1317 __func__, pi->ino, pi, inode, atomic_read(&inode->i_count));
1318
1319 /*
1320 * These are special inodes, they were created during
1321 * directory reading or lookup, and were not bound to dentry,
1322 * so they live here with reference counter being 1 and prevent
1323 * umount from succeed since it believes that they are busy.
1324 */
1325 count = atomic_read(&inode->i_count);
1326 if (count) {
1327 list_del_init(&inode->i_sb_list);
1328 while (count--)
1329 iput(&pi->vfs_inode);
1330 }
1331 }
1332
1333 psb->trans_scan_timeout = psb->drop_scan_timeout = 0;
1334 cancel_delayed_work_sync(&psb->dwork);
1335 cancel_delayed_work_sync(&psb->drop_dwork);
1336 flush_scheduled_work();
1337
1338 dprintk("%s: stopped workqueues.\n", __func__);
1339
1340 pohmelfs_crypto_exit(psb);
1341 pohmelfs_state_exit(psb);
1342
1343 bdi_destroy(&psb->bdi);
1344
1345 kfree(psb);
1346 sb->s_fs_info = NULL;
1347 }
1348
1349 static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1350 {
1351 struct super_block *sb = dentry->d_sb;
1352 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1353
1354 /*
1355 * There are no filesystem size limits yet.
1356 */
1357 memset(buf, 0, sizeof(struct kstatfs));
1358
1359 buf->f_type = POHMELFS_MAGIC_NUM; /* 'POH.' */
1360 buf->f_bsize = sb->s_blocksize;
1361 buf->f_files = psb->ino;
1362 buf->f_namelen = 255;
1363 buf->f_files = atomic_long_read(&psb->total_inodes);
1364 buf->f_bfree = buf->f_bavail = psb->avail_size >> PAGE_SHIFT;
1365 buf->f_blocks = psb->total_size >> PAGE_SHIFT;
1366
1367 dprintk("%s: total: %llu, avail: %llu, inodes: %llu, bsize: %lu.\n",
1368 __func__, psb->total_size, psb->avail_size, buf->f_files, sb->s_blocksize);
1369
1370 return 0;
1371 }
1372
1373 static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
1374 {
1375 struct pohmelfs_sb *psb = POHMELFS_SB(vfs->mnt_sb);
1376
1377 seq_printf(seq, ",idx=%u", psb->idx);
1378 seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout));
1379 seq_printf(seq, ",drop_scan_timeout=%u", jiffies_to_msecs(psb->drop_scan_timeout));
1380 seq_printf(seq, ",wait_on_page_timeout=%u", jiffies_to_msecs(psb->wait_on_page_timeout));
1381 seq_printf(seq, ",trans_retries=%u", psb->trans_retries);
1382 seq_printf(seq, ",crypto_thread_num=%u", psb->crypto_thread_num);
1383 seq_printf(seq, ",trans_max_pages=%u", psb->trans_max_pages);
1384 seq_printf(seq, ",mcache_timeout=%u", jiffies_to_msecs(psb->mcache_timeout));
1385 if (psb->crypto_fail_unsupported)
1386 seq_printf(seq, ",crypto_fail_unsupported");
1387
1388 return 0;
1389 }
1390
1391 enum {
1392 pohmelfs_opt_idx,
1393 pohmelfs_opt_crypto_thread_num,
1394 pohmelfs_opt_trans_max_pages,
1395 pohmelfs_opt_crypto_fail_unsupported,
1396
1397 /* Remountable options */
1398 pohmelfs_opt_trans_scan_timeout,
1399 pohmelfs_opt_drop_scan_timeout,
1400 pohmelfs_opt_wait_on_page_timeout,
1401 pohmelfs_opt_trans_retries,
1402 pohmelfs_opt_mcache_timeout,
1403 };
1404
1405 static struct match_token pohmelfs_tokens[] = {
1406 {pohmelfs_opt_idx, "idx=%u"},
1407 {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"},
1408 {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"},
1409 {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"},
1410 {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"},
1411 {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"},
1412 {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"},
1413 {pohmelfs_opt_trans_retries, "trans_retries=%u"},
1414 {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"},
1415 };
1416
1417 static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount)
1418 {
1419 char *p;
1420 substring_t args[MAX_OPT_ARGS];
1421 int option, err;
1422
1423 if (!options)
1424 return 0;
1425
1426 while ((p = strsep(&options, ",")) != NULL) {
1427 int token;
1428 if (!*p)
1429 continue;
1430
1431 token = match_token(p, pohmelfs_tokens, args);
1432
1433 err = match_int(&args[0], &option);
1434 if (err)
1435 return err;
1436
1437 if (remount && token <= pohmelfs_opt_crypto_fail_unsupported)
1438 continue;
1439
1440 switch (token) {
1441 case pohmelfs_opt_idx:
1442 psb->idx = option;
1443 break;
1444 case pohmelfs_opt_trans_scan_timeout:
1445 psb->trans_scan_timeout = msecs_to_jiffies(option);
1446 break;
1447 case pohmelfs_opt_drop_scan_timeout:
1448 psb->drop_scan_timeout = msecs_to_jiffies(option);
1449 break;
1450 case pohmelfs_opt_wait_on_page_timeout:
1451 psb->wait_on_page_timeout = msecs_to_jiffies(option);
1452 break;
1453 case pohmelfs_opt_mcache_timeout:
1454 psb->mcache_timeout = msecs_to_jiffies(option);
1455 break;
1456 case pohmelfs_opt_trans_retries:
1457 psb->trans_retries = option;
1458 break;
1459 case pohmelfs_opt_crypto_thread_num:
1460 psb->crypto_thread_num = option;
1461 break;
1462 case pohmelfs_opt_trans_max_pages:
1463 psb->trans_max_pages = option;
1464 break;
1465 case pohmelfs_opt_crypto_fail_unsupported:
1466 psb->crypto_fail_unsupported = 1;
1467 break;
1468 default:
1469 return -EINVAL;
1470 }
1471 }
1472
1473 return 0;
1474 }
1475
1476 static int pohmelfs_remount(struct super_block *sb, int *flags, char *data)
1477 {
1478 int err;
1479 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1480 unsigned long old_sb_flags = sb->s_flags;
1481
1482 err = pohmelfs_parse_options(data, psb, 1);
1483 if (err)
1484 goto err_out_restore;
1485
1486 if (!(*flags & MS_RDONLY))
1487 sb->s_flags &= ~MS_RDONLY;
1488 return 0;
1489
1490 err_out_restore:
1491 sb->s_flags = old_sb_flags;
1492 return err;
1493 }
1494
1495 static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count)
1496 {
1497 struct inode *inode = &pi->vfs_inode;
1498
1499 dprintk("%s: %p: ino: %llu, owned: %d.\n",
1500 __func__, inode, pi->ino, test_bit(NETFS_INODE_OWNED, &pi->state));
1501
1502 mutex_lock(&inode->i_mutex);
1503 if (test_and_clear_bit(NETFS_INODE_OWNED, &pi->state)) {
1504 filemap_fdatawrite(inode->i_mapping);
1505 inode->i_sb->s_op->write_inode(inode, 0);
1506 }
1507
1508 #ifdef POHMELFS_TRUNCATE_ON_INODE_FLUSH
1509 truncate_inode_pages(inode->i_mapping, 0);
1510 #endif
1511
1512 pohmelfs_data_unlock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
1513 mutex_unlock(&inode->i_mutex);
1514 }
1515
1516 static void pohmelfs_put_inode_count(struct pohmelfs_inode *pi, unsigned int count)
1517 {
1518 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1519 __func__, pi->ino, pi, &pi->vfs_inode, count);
1520
1521 if (test_and_clear_bit(NETFS_INODE_NEED_FLUSH, &pi->state))
1522 pohmelfs_flush_inode(pi, count);
1523
1524 while (count--)
1525 iput(&pi->vfs_inode);
1526 }
1527
1528 static void pohmelfs_drop_scan(struct work_struct *work)
1529 {
1530 struct pohmelfs_sb *psb =
1531 container_of(work, struct pohmelfs_sb, drop_dwork.work);
1532 struct pohmelfs_inode *pi;
1533 unsigned int count = 0;
1534
1535 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count)))
1536 pohmelfs_put_inode_count(pi, count);
1537
1538 pohmelfs_check_states(psb);
1539
1540 if (psb->drop_scan_timeout)
1541 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1542 }
1543
1544 /*
1545 * Run through all transactions starting from the oldest,
1546 * drop transaction from current state and try to send it
1547 * to all remote nodes, which are currently installed.
1548 */
1549 static void pohmelfs_trans_scan_state(struct netfs_state *st)
1550 {
1551 struct rb_node *rb_node;
1552 struct netfs_trans_dst *dst;
1553 struct pohmelfs_sb *psb = st->psb;
1554 unsigned int timeout = psb->trans_scan_timeout;
1555 struct netfs_trans *t;
1556 int err;
1557
1558 mutex_lock(&st->trans_lock);
1559 for (rb_node = rb_first(&st->trans_root); rb_node; ) {
1560 dst = rb_entry(rb_node, struct netfs_trans_dst, state_entry);
1561 t = dst->trans;
1562
1563 if (timeout && time_after(dst->send_time + timeout, jiffies)
1564 && dst->retries == 0)
1565 break;
1566
1567 dprintk("%s: t: %p, gen: %u, st: %p, retries: %u, max: %u.\n",
1568 __func__, t, t->gen, st, dst->retries, psb->trans_retries);
1569 netfs_trans_get(t);
1570
1571 rb_node = rb_next(rb_node);
1572
1573 err = -ETIMEDOUT;
1574 if (timeout && (++dst->retries < psb->trans_retries))
1575 err = netfs_trans_resend(t, psb);
1576
1577 if (err || (t->flags & NETFS_TRANS_SINGLE_DST)) {
1578 if (netfs_trans_remove_nolock(dst, st))
1579 netfs_trans_drop_dst_nostate(dst);
1580 }
1581
1582 t->result = err;
1583 netfs_trans_put(t);
1584 }
1585 mutex_unlock(&st->trans_lock);
1586 }
1587
1588 /*
1589 * Walk through all installed network states and resend all
1590 * transactions, which are old enough.
1591 */
1592 static void pohmelfs_trans_scan(struct work_struct *work)
1593 {
1594 struct pohmelfs_sb *psb =
1595 container_of(work, struct pohmelfs_sb, dwork.work);
1596 struct netfs_state *st;
1597 struct pohmelfs_config *c;
1598
1599 mutex_lock(&psb->state_lock);
1600 list_for_each_entry(c, &psb->state_list, config_entry) {
1601 st = &c->state;
1602
1603 pohmelfs_trans_scan_state(st);
1604 }
1605 mutex_unlock(&psb->state_lock);
1606
1607 /*
1608 * If no timeout specified then system is in the middle of umount process,
1609 * so no need to reschedule scanning process again.
1610 */
1611 if (psb->trans_scan_timeout)
1612 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1613 }
1614
1615 int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon,
1616 unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start)
1617 {
1618 struct inode *inode = &pi->vfs_inode;
1619 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1620 int err = 0, sz;
1621 struct netfs_trans *t;
1622 int path_len, addon_len = 0;
1623 void *data;
1624 struct netfs_inode_info *info;
1625 struct netfs_cmd *cmd;
1626
1627 dprintk("%s: ino: %llu, cmd: %u, addon: %p.\n", __func__, pi->ino, cmd_op, addon);
1628
1629 path_len = pohmelfs_path_length(pi);
1630 if (path_len < 0) {
1631 err = path_len;
1632 goto err_out_exit;
1633 }
1634
1635 if (addon)
1636 addon_len = strlen(addon) + 1; /* 0-byte */
1637 sz = addon_len;
1638
1639 if (cmd_op == NETFS_INODE_INFO)
1640 sz += sizeof(struct netfs_inode_info);
1641
1642 t = netfs_trans_alloc(psb, sz + path_len, flags, 0);
1643 if (!t) {
1644 err = -ENOMEM;
1645 goto err_out_exit;
1646 }
1647 t->complete = complete;
1648 t->private = priv;
1649
1650 cmd = netfs_trans_current(t);
1651 data = (void *)(cmd + 1);
1652
1653 if (cmd_op == NETFS_INODE_INFO) {
1654 info = (struct netfs_inode_info *)(cmd + 1);
1655 data = (void *)(info + 1);
1656
1657 /*
1658 * We are under i_mutex, can read and change whatever we want...
1659 */
1660 info->mode = inode->i_mode;
1661 info->nlink = inode->i_nlink;
1662 info->uid = inode->i_uid;
1663 info->gid = inode->i_gid;
1664 info->blocks = inode->i_blocks;
1665 info->rdev = inode->i_rdev;
1666 info->size = inode->i_size;
1667 info->version = inode->i_version;
1668
1669 netfs_convert_inode_info(info);
1670 }
1671
1672 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1673 if (path_len < 0)
1674 goto err_out_free;
1675
1676 dprintk("%s: path_len: %d.\n", __func__, path_len);
1677
1678 if (addon) {
1679 path_len--; /* Do not place null-byte before the addon */
1680 path_len += sprintf(data + path_len, "/%s", addon) + 1; /* 0 - byte */
1681 }
1682
1683 sz += path_len;
1684
1685 cmd->cmd = cmd_op;
1686 cmd->ext = path_len;
1687 cmd->size = sz;
1688 cmd->id = id;
1689 cmd->start = start;
1690
1691 netfs_convert_cmd(cmd);
1692 netfs_trans_update(cmd, t, sz);
1693
1694 /*
1695 * Note, that it is possible to leak error here: transaction callback will not
1696 * be invoked for allocation path failure.
1697 */
1698 return netfs_trans_finish(t, psb);
1699
1700 err_out_free:
1701 netfs_trans_free(t);
1702 err_out_exit:
1703 if (complete)
1704 complete(NULL, 0, priv, err);
1705 return err;
1706 }
1707
1708 int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags,
1709 netfs_trans_complete_t complete, void *priv, u64 start)
1710 {
1711 return pohmelfs_meta_command_data(pi, pi->ino, cmd_op, NULL, flags, complete, priv, start);
1712 }
1713
1714 /*
1715 * Send request and wait for POHMELFS root capabilities response,
1716 * which will update server's informaion about size of the export,
1717 * permissions, number of objects, available size and so on.
1718 */
1719 static int pohmelfs_root_handshake(struct pohmelfs_sb *psb)
1720 {
1721 struct netfs_trans *t;
1722 struct netfs_cmd *cmd;
1723 int err = -ENOMEM;
1724
1725 t = netfs_trans_alloc(psb, 0, 0, 0);
1726 if (!t)
1727 goto err_out_exit;
1728
1729 cmd = netfs_trans_current(t);
1730
1731 cmd->cmd = NETFS_CAPABILITIES;
1732 cmd->id = POHMELFS_ROOT_CAPABILITIES;
1733 cmd->size = 0;
1734 cmd->start = 0;
1735 cmd->ext = 0;
1736 cmd->csize = 0;
1737
1738 netfs_convert_cmd(cmd);
1739 netfs_trans_update(cmd, t, 0);
1740
1741 err = netfs_trans_finish(t, psb);
1742 if (err)
1743 goto err_out_exit;
1744
1745 psb->flags = ~0;
1746 err = wait_event_interruptible_timeout(psb->wait,
1747 (psb->flags != ~0),
1748 psb->wait_on_page_timeout);
1749 if (!err)
1750 err = -ETIMEDOUT;
1751 else if (err > 0)
1752 err = -psb->flags;
1753
1754 if (err)
1755 goto err_out_exit;
1756
1757 return 0;
1758
1759 err_out_exit:
1760 return err;
1761 }
1762
1763 static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
1764 {
1765 struct netfs_state *st;
1766 struct pohmelfs_ctl *ctl;
1767 struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb);
1768 struct pohmelfs_config *c;
1769
1770 mutex_lock(&psb->state_lock);
1771
1772 seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n");
1773
1774 list_for_each_entry(c, &psb->state_list, config_entry) {
1775 st = &c->state;
1776 ctl = &st->ctl;
1777
1778 seq_printf(m, "%u ", ctl->idx);
1779 if (ctl->addr.sa_family == AF_INET) {
1780 struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr;
1781 seq_printf(m, "%pI4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port));
1782 } else if (ctl->addr.sa_family == AF_INET6) {
1783 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr;
1784 seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port));
1785 } else {
1786 unsigned int i;
1787 for (i = 0; i < ctl->addrlen; ++i)
1788 seq_printf(m, "%02x.", ctl->addr.addr[i]);
1789 }
1790
1791 seq_printf(m, " %u %u %d %u %x\n",
1792 ctl->type, ctl->proto,
1793 st->socket != NULL,
1794 ctl->prio, ctl->perm);
1795 }
1796 mutex_unlock(&psb->state_lock);
1797
1798 return 0;
1799 }
1800
1801 static const struct super_operations pohmelfs_sb_ops = {
1802 .alloc_inode = pohmelfs_alloc_inode,
1803 .destroy_inode = pohmelfs_destroy_inode,
1804 .drop_inode = pohmelfs_drop_inode,
1805 .write_inode = pohmelfs_write_inode,
1806 .put_super = pohmelfs_put_super,
1807 .remount_fs = pohmelfs_remount,
1808 .statfs = pohmelfs_statfs,
1809 .show_options = pohmelfs_show_options,
1810 .show_stats = pohmelfs_show_stats,
1811 };
1812
1813 /*
1814 * Allocate private superblock and create root dir.
1815 */
1816 static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
1817 {
1818 struct pohmelfs_sb *psb;
1819 int err = -ENOMEM;
1820 struct inode *root;
1821 struct pohmelfs_inode *npi;
1822 struct qstr str;
1823
1824 psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL);
1825 if (!psb)
1826 goto err_out_exit;
1827
1828 err = bdi_init(&psb->bdi);
1829 if (err)
1830 goto err_out_free_sb;
1831
1832 err = bdi_register(&psb->bdi, NULL, "pfs-%d", atomic_inc_return(&psb_bdi_num));
1833 if (err) {
1834 bdi_destroy(&psb->bdi);
1835 goto err_out_free_sb;
1836 }
1837
1838 sb->s_fs_info = psb;
1839 sb->s_op = &pohmelfs_sb_ops;
1840 sb->s_magic = POHMELFS_MAGIC_NUM;
1841 sb->s_maxbytes = MAX_LFS_FILESIZE;
1842 sb->s_blocksize = PAGE_SIZE;
1843 sb->s_bdi = &psb->bdi;
1844
1845 psb->sb = sb;
1846
1847 psb->ino = 2;
1848 psb->idx = 0;
1849 psb->active_state = NULL;
1850 psb->trans_retries = 5;
1851 psb->trans_data_size = PAGE_SIZE;
1852 psb->drop_scan_timeout = msecs_to_jiffies(1000);
1853 psb->trans_scan_timeout = msecs_to_jiffies(5000);
1854 psb->wait_on_page_timeout = msecs_to_jiffies(5000);
1855 init_waitqueue_head(&psb->wait);
1856
1857 spin_lock_init(&psb->ino_lock);
1858
1859 INIT_LIST_HEAD(&psb->drop_list);
1860
1861 mutex_init(&psb->mcache_lock);
1862 psb->mcache_root = RB_ROOT;
1863 psb->mcache_timeout = msecs_to_jiffies(5000);
1864 atomic_long_set(&psb->mcache_gen, 0);
1865
1866 psb->trans_max_pages = 100;
1867
1868 psb->crypto_align_size = 16;
1869 psb->crypto_attached_size = 0;
1870 psb->hash_strlen = 0;
1871 psb->cipher_strlen = 0;
1872 psb->perform_crypto = 0;
1873 psb->crypto_thread_num = 2;
1874 psb->crypto_fail_unsupported = 0;
1875 mutex_init(&psb->crypto_thread_lock);
1876 INIT_LIST_HEAD(&psb->crypto_ready_list);
1877 INIT_LIST_HEAD(&psb->crypto_active_list);
1878
1879 atomic_set(&psb->trans_gen, 1);
1880 atomic_long_set(&psb->total_inodes, 0);
1881
1882 mutex_init(&psb->state_lock);
1883 INIT_LIST_HEAD(&psb->state_list);
1884
1885 err = pohmelfs_parse_options((char *) data, psb, 0);
1886 if (err)
1887 goto err_out_free_bdi;
1888
1889 err = pohmelfs_copy_crypto(psb);
1890 if (err)
1891 goto err_out_free_bdi;
1892
1893 err = pohmelfs_state_init(psb);
1894 if (err)
1895 goto err_out_free_strings;
1896
1897 err = pohmelfs_crypto_init(psb);
1898 if (err)
1899 goto err_out_state_exit;
1900
1901 err = pohmelfs_root_handshake(psb);
1902 if (err)
1903 goto err_out_crypto_exit;
1904
1905 str.name = "/";
1906 str.hash = jhash("/", 1, 0);
1907 str.len = 1;
1908
1909 npi = pohmelfs_create_entry_local(psb, NULL, &str, 0, 0755|S_IFDIR);
1910 if (IS_ERR(npi)) {
1911 err = PTR_ERR(npi);
1912 goto err_out_crypto_exit;
1913 }
1914 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
1915 clear_bit(NETFS_INODE_OWNED, &npi->state);
1916
1917 root = &npi->vfs_inode;
1918
1919 sb->s_root = d_alloc_root(root);
1920 if (!sb->s_root)
1921 goto err_out_put_root;
1922
1923 INIT_DELAYED_WORK(&psb->drop_dwork, pohmelfs_drop_scan);
1924 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1925
1926 INIT_DELAYED_WORK(&psb->dwork, pohmelfs_trans_scan);
1927 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1928
1929 return 0;
1930
1931 err_out_put_root:
1932 iput(root);
1933 err_out_crypto_exit:
1934 pohmelfs_crypto_exit(psb);
1935 err_out_state_exit:
1936 pohmelfs_state_exit(psb);
1937 err_out_free_strings:
1938 kfree(psb->cipher_string);
1939 kfree(psb->hash_string);
1940 err_out_free_bdi:
1941 bdi_destroy(&psb->bdi);
1942 err_out_free_sb:
1943 kfree(psb);
1944 err_out_exit:
1945
1946 dprintk("%s: err: %d.\n", __func__, err);
1947 return err;
1948 }
1949
1950 /*
1951 * Some VFS magic here...
1952 */
1953 static struct dentry *pohmelfs_mount(struct file_system_type *fs_type,
1954 int flags, const char *dev_name, void *data)
1955 {
1956 return mount_nodev(fs_type, flags, data, pohmelfs_fill_super);
1957 }
1958
1959 /*
1960 * We need this to sync all inodes earlier, since when writeback
1961 * is invoked from the umount/mntput path dcache is already shrunk,
1962 * see generic_shutdown_super(), and no inodes can access the path.
1963 */
1964 static void pohmelfs_kill_super(struct super_block *sb)
1965 {
1966 sync_inodes_sb(sb);
1967 kill_anon_super(sb);
1968 }
1969
1970 static struct file_system_type pohmel_fs_type = {
1971 .owner = THIS_MODULE,
1972 .name = "pohmel",
1973 .mount = pohmelfs_mount,
1974 .kill_sb = pohmelfs_kill_super,
1975 };
1976
1977 /*
1978 * Cache and module initializations and freeing routings.
1979 */
1980 static void pohmelfs_init_once(void *data)
1981 {
1982 struct pohmelfs_inode *pi = data;
1983
1984 inode_init_once(&pi->vfs_inode);
1985 }
1986
1987 static int __init pohmelfs_init_inodecache(void)
1988 {
1989 pohmelfs_inode_cache = kmem_cache_create("pohmelfs_inode_cache",
1990 sizeof(struct pohmelfs_inode),
1991 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
1992 pohmelfs_init_once);
1993 if (!pohmelfs_inode_cache)
1994 return -ENOMEM;
1995
1996 return 0;
1997 }
1998
1999 static void pohmelfs_destroy_inodecache(void)
2000 {
2001 kmem_cache_destroy(pohmelfs_inode_cache);
2002 }
2003
2004 static int __init init_pohmel_fs(void)
2005 {
2006 int err;
2007
2008 err = pohmelfs_config_init();
2009 if (err)
2010 goto err_out_exit;
2011
2012 err = pohmelfs_init_inodecache();
2013 if (err)
2014 goto err_out_config_exit;
2015
2016 err = pohmelfs_mcache_init();
2017 if (err)
2018 goto err_out_destroy;
2019
2020 err = netfs_trans_init();
2021 if (err)
2022 goto err_out_mcache_exit;
2023
2024 err = register_filesystem(&pohmel_fs_type);
2025 if (err)
2026 goto err_out_trans;
2027
2028 return 0;
2029
2030 err_out_trans:
2031 netfs_trans_exit();
2032 err_out_mcache_exit:
2033 pohmelfs_mcache_exit();
2034 err_out_destroy:
2035 pohmelfs_destroy_inodecache();
2036 err_out_config_exit:
2037 pohmelfs_config_exit();
2038 err_out_exit:
2039 return err;
2040 }
2041
2042 static void __exit exit_pohmel_fs(void)
2043 {
2044 unregister_filesystem(&pohmel_fs_type);
2045 pohmelfs_destroy_inodecache();
2046 pohmelfs_mcache_exit();
2047 pohmelfs_config_exit();
2048 netfs_trans_exit();
2049 }
2050
2051 module_init(init_pohmel_fs);
2052 module_exit(exit_pohmel_fs);
2053
2054 MODULE_LICENSE("GPL");
2055 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
2056 MODULE_DESCRIPTION("Pohmel filesystem");