Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / backing-dev.c
1
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13
14 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
15
16 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
17 {
18 }
19 EXPORT_SYMBOL(default_unplug_io_fn);
20
21 struct backing_dev_info default_backing_dev_info = {
22 .name = "default",
23 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
24 .state = 0,
25 .capabilities = BDI_CAP_MAP_COPY,
26 .unplug_io_fn = default_unplug_io_fn,
27 };
28 EXPORT_SYMBOL_GPL(default_backing_dev_info);
29
30 struct backing_dev_info noop_backing_dev_info = {
31 .name = "noop",
32 };
33 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
34
35 static struct class *bdi_class;
36
37 /*
38 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
39 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
40 * locking.
41 */
42 DEFINE_SPINLOCK(bdi_lock);
43 LIST_HEAD(bdi_list);
44 LIST_HEAD(bdi_pending_list);
45
46 static struct task_struct *sync_supers_tsk;
47 static struct timer_list sync_supers_timer;
48
49 static int bdi_sync_supers(void *);
50 static void sync_supers_timer_fn(unsigned long);
51
52 static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
53
54 #ifdef CONFIG_DEBUG_FS
55 #include <linux/debugfs.h>
56 #include <linux/seq_file.h>
57
58 static struct dentry *bdi_debug_root;
59
60 static void bdi_debug_init(void)
61 {
62 bdi_debug_root = debugfs_create_dir("bdi", NULL);
63 }
64
65 static int bdi_debug_stats_show(struct seq_file *m, void *v)
66 {
67 struct backing_dev_info *bdi = m->private;
68 struct bdi_writeback *wb;
69 unsigned long background_thresh;
70 unsigned long dirty_thresh;
71 unsigned long bdi_thresh;
72 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
73 struct inode *inode;
74
75 /*
76 * inode lock is enough here, the bdi->wb_list is protected by
77 * RCU on the reader side
78 */
79 nr_wb = nr_dirty = nr_io = nr_more_io = 0;
80 spin_lock(&inode_lock);
81 list_for_each_entry(wb, &bdi->wb_list, list) {
82 nr_wb++;
83 list_for_each_entry(inode, &wb->b_dirty, i_list)
84 nr_dirty++;
85 list_for_each_entry(inode, &wb->b_io, i_list)
86 nr_io++;
87 list_for_each_entry(inode, &wb->b_more_io, i_list)
88 nr_more_io++;
89 }
90 spin_unlock(&inode_lock);
91
92 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
93
94 #define K(x) ((x) << (PAGE_SHIFT - 10))
95 seq_printf(m,
96 "BdiWriteback: %8lu kB\n"
97 "BdiReclaimable: %8lu kB\n"
98 "BdiDirtyThresh: %8lu kB\n"
99 "DirtyThresh: %8lu kB\n"
100 "BackgroundThresh: %8lu kB\n"
101 "WritebackThreads: %8lu\n"
102 "b_dirty: %8lu\n"
103 "b_io: %8lu\n"
104 "b_more_io: %8lu\n"
105 "bdi_list: %8u\n"
106 "state: %8lx\n"
107 "wb_mask: %8lx\n"
108 "wb_list: %8u\n"
109 "wb_cnt: %8u\n",
110 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
111 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
112 K(bdi_thresh), K(dirty_thresh),
113 K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
114 !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
115 !list_empty(&bdi->wb_list), bdi->wb_cnt);
116 #undef K
117
118 return 0;
119 }
120
121 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
122 {
123 return single_open(file, bdi_debug_stats_show, inode->i_private);
124 }
125
126 static const struct file_operations bdi_debug_stats_fops = {
127 .open = bdi_debug_stats_open,
128 .read = seq_read,
129 .llseek = seq_lseek,
130 .release = single_release,
131 };
132
133 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
134 {
135 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
136 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
137 bdi, &bdi_debug_stats_fops);
138 }
139
140 static void bdi_debug_unregister(struct backing_dev_info *bdi)
141 {
142 debugfs_remove(bdi->debug_stats);
143 debugfs_remove(bdi->debug_dir);
144 }
145 #else
146 static inline void bdi_debug_init(void)
147 {
148 }
149 static inline void bdi_debug_register(struct backing_dev_info *bdi,
150 const char *name)
151 {
152 }
153 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
154 {
155 }
156 #endif
157
158 static ssize_t read_ahead_kb_store(struct device *dev,
159 struct device_attribute *attr,
160 const char *buf, size_t count)
161 {
162 struct backing_dev_info *bdi = dev_get_drvdata(dev);
163 char *end;
164 unsigned long read_ahead_kb;
165 ssize_t ret = -EINVAL;
166
167 read_ahead_kb = simple_strtoul(buf, &end, 10);
168 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
169 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
170 ret = count;
171 }
172 return ret;
173 }
174
175 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
176
177 #define BDI_SHOW(name, expr) \
178 static ssize_t name##_show(struct device *dev, \
179 struct device_attribute *attr, char *page) \
180 { \
181 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
182 \
183 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
184 }
185
186 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
187
188 static ssize_t min_ratio_store(struct device *dev,
189 struct device_attribute *attr, const char *buf, size_t count)
190 {
191 struct backing_dev_info *bdi = dev_get_drvdata(dev);
192 char *end;
193 unsigned int ratio;
194 ssize_t ret = -EINVAL;
195
196 ratio = simple_strtoul(buf, &end, 10);
197 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
198 ret = bdi_set_min_ratio(bdi, ratio);
199 if (!ret)
200 ret = count;
201 }
202 return ret;
203 }
204 BDI_SHOW(min_ratio, bdi->min_ratio)
205
206 static ssize_t max_ratio_store(struct device *dev,
207 struct device_attribute *attr, const char *buf, size_t count)
208 {
209 struct backing_dev_info *bdi = dev_get_drvdata(dev);
210 char *end;
211 unsigned int ratio;
212 ssize_t ret = -EINVAL;
213
214 ratio = simple_strtoul(buf, &end, 10);
215 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
216 ret = bdi_set_max_ratio(bdi, ratio);
217 if (!ret)
218 ret = count;
219 }
220 return ret;
221 }
222 BDI_SHOW(max_ratio, bdi->max_ratio)
223
224 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
225
226 static struct device_attribute bdi_dev_attrs[] = {
227 __ATTR_RW(read_ahead_kb),
228 __ATTR_RW(min_ratio),
229 __ATTR_RW(max_ratio),
230 __ATTR_NULL,
231 };
232
233 static __init int bdi_class_init(void)
234 {
235 bdi_class = class_create(THIS_MODULE, "bdi");
236 if (IS_ERR(bdi_class))
237 return PTR_ERR(bdi_class);
238
239 bdi_class->dev_attrs = bdi_dev_attrs;
240 bdi_debug_init();
241 return 0;
242 }
243 postcore_initcall(bdi_class_init);
244
245 static int __init default_bdi_init(void)
246 {
247 int err;
248
249 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
250 BUG_ON(IS_ERR(sync_supers_tsk));
251
252 init_timer(&sync_supers_timer);
253 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
254 bdi_arm_supers_timer();
255
256 err = bdi_init(&default_backing_dev_info);
257 if (!err)
258 bdi_register(&default_backing_dev_info, NULL, "default");
259
260 return err;
261 }
262 subsys_initcall(default_bdi_init);
263
264 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
265 {
266 memset(wb, 0, sizeof(*wb));
267
268 wb->bdi = bdi;
269 wb->last_old_flush = jiffies;
270 INIT_LIST_HEAD(&wb->b_dirty);
271 INIT_LIST_HEAD(&wb->b_io);
272 INIT_LIST_HEAD(&wb->b_more_io);
273 }
274
275 static void bdi_task_init(struct backing_dev_info *bdi,
276 struct bdi_writeback *wb)
277 {
278 struct task_struct *tsk = current;
279
280 spin_lock(&bdi->wb_lock);
281 list_add_tail_rcu(&wb->list, &bdi->wb_list);
282 spin_unlock(&bdi->wb_lock);
283
284 tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
285 set_freezable();
286
287 /*
288 * Our parent may run at a different priority, just set us to normal
289 */
290 set_user_nice(tsk, 0);
291 }
292
293 static int bdi_start_fn(void *ptr)
294 {
295 struct bdi_writeback *wb = ptr;
296 struct backing_dev_info *bdi = wb->bdi;
297 int ret;
298
299 /*
300 * Add us to the active bdi_list
301 */
302 spin_lock_bh(&bdi_lock);
303 list_add_rcu(&bdi->bdi_list, &bdi_list);
304 spin_unlock_bh(&bdi_lock);
305
306 bdi_task_init(bdi, wb);
307
308 /*
309 * Clear pending bit and wakeup anybody waiting to tear us down
310 */
311 clear_bit(BDI_pending, &bdi->state);
312 smp_mb__after_clear_bit();
313 wake_up_bit(&bdi->state, BDI_pending);
314
315 ret = bdi_writeback_task(wb);
316
317 /*
318 * Remove us from the list
319 */
320 spin_lock(&bdi->wb_lock);
321 list_del_rcu(&wb->list);
322 spin_unlock(&bdi->wb_lock);
323
324 /*
325 * Flush any work that raced with us exiting. No new work
326 * will be added, since this bdi isn't discoverable anymore.
327 */
328 if (!list_empty(&bdi->work_list))
329 wb_do_writeback(wb, 1);
330
331 wb->task = NULL;
332 return ret;
333 }
334
335 int bdi_has_dirty_io(struct backing_dev_info *bdi)
336 {
337 return wb_has_dirty_io(&bdi->wb);
338 }
339
340 static void bdi_flush_io(struct backing_dev_info *bdi)
341 {
342 struct writeback_control wbc = {
343 .bdi = bdi,
344 .sync_mode = WB_SYNC_NONE,
345 .older_than_this = NULL,
346 .range_cyclic = 1,
347 .nr_to_write = 1024,
348 };
349
350 writeback_inodes_wbc(&wbc);
351 }
352
353 /*
354 * kupdated() used to do this. We cannot do it from the bdi_forker_task()
355 * or we risk deadlocking on ->s_umount. The longer term solution would be
356 * to implement sync_supers_bdi() or similar and simply do it from the
357 * bdi writeback tasks individually.
358 */
359 static int bdi_sync_supers(void *unused)
360 {
361 set_user_nice(current, 0);
362
363 while (!kthread_should_stop()) {
364 set_current_state(TASK_INTERRUPTIBLE);
365 schedule();
366
367 /*
368 * Do this periodically, like kupdated() did before.
369 */
370 sync_supers();
371 }
372
373 return 0;
374 }
375
376 void bdi_arm_supers_timer(void)
377 {
378 unsigned long next;
379
380 if (!dirty_writeback_interval)
381 return;
382
383 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
384 mod_timer(&sync_supers_timer, round_jiffies_up(next));
385 }
386
387 static void sync_supers_timer_fn(unsigned long unused)
388 {
389 wake_up_process(sync_supers_tsk);
390 bdi_arm_supers_timer();
391 }
392
393 static int bdi_forker_task(void *ptr)
394 {
395 struct bdi_writeback *me = ptr;
396
397 bdi_task_init(me->bdi, me);
398
399 for (;;) {
400 struct backing_dev_info *bdi, *tmp;
401 struct bdi_writeback *wb;
402
403 /*
404 * Temporary measure, we want to make sure we don't see
405 * dirty data on the default backing_dev_info
406 */
407 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
408 wb_do_writeback(me, 0);
409
410 spin_lock_bh(&bdi_lock);
411
412 /*
413 * Check if any existing bdi's have dirty data without
414 * a thread registered. If so, set that up.
415 */
416 list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
417 if (bdi->wb.task)
418 continue;
419 if (list_empty(&bdi->work_list) &&
420 !bdi_has_dirty_io(bdi))
421 continue;
422
423 bdi_add_default_flusher_task(bdi);
424 }
425
426 set_current_state(TASK_INTERRUPTIBLE);
427
428 if (list_empty(&bdi_pending_list)) {
429 unsigned long wait;
430
431 spin_unlock_bh(&bdi_lock);
432 wait = msecs_to_jiffies(dirty_writeback_interval * 10);
433 if (wait)
434 schedule_timeout(wait);
435 else
436 schedule();
437 try_to_freeze();
438 continue;
439 }
440
441 __set_current_state(TASK_RUNNING);
442
443 /*
444 * This is our real job - check for pending entries in
445 * bdi_pending_list, and create the tasks that got added
446 */
447 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
448 bdi_list);
449 list_del_init(&bdi->bdi_list);
450 spin_unlock_bh(&bdi_lock);
451
452 wb = &bdi->wb;
453 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
454 dev_name(bdi->dev));
455 /*
456 * If task creation fails, then readd the bdi to
457 * the pending list and force writeout of the bdi
458 * from this forker thread. That will free some memory
459 * and we can try again.
460 */
461 if (IS_ERR(wb->task)) {
462 wb->task = NULL;
463
464 /*
465 * Add this 'bdi' to the back, so we get
466 * a chance to flush other bdi's to free
467 * memory.
468 */
469 spin_lock_bh(&bdi_lock);
470 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
471 spin_unlock_bh(&bdi_lock);
472
473 bdi_flush_io(bdi);
474 }
475 }
476
477 return 0;
478 }
479
480 static void bdi_add_to_pending(struct rcu_head *head)
481 {
482 struct backing_dev_info *bdi;
483
484 bdi = container_of(head, struct backing_dev_info, rcu_head);
485 INIT_LIST_HEAD(&bdi->bdi_list);
486
487 spin_lock(&bdi_lock);
488 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
489 spin_unlock(&bdi_lock);
490
491 /*
492 * We are now on the pending list, wake up bdi_forker_task()
493 * to finish the job and add us back to the active bdi_list
494 */
495 wake_up_process(default_backing_dev_info.wb.task);
496 }
497
498 /*
499 * Add the default flusher task that gets created for any bdi
500 * that has dirty data pending writeout
501 */
502 void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
503 {
504 if (!bdi_cap_writeback_dirty(bdi))
505 return;
506
507 if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
508 printk(KERN_ERR "bdi %p/%s is not registered!\n",
509 bdi, bdi->name);
510 return;
511 }
512
513 /*
514 * Check with the helper whether to proceed adding a task. Will only
515 * abort if we two or more simultanous calls to
516 * bdi_add_default_flusher_task() occured, further additions will block
517 * waiting for previous additions to finish.
518 */
519 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
520 list_del_rcu(&bdi->bdi_list);
521
522 /*
523 * We must wait for the current RCU period to end before
524 * moving to the pending list. So schedule that operation
525 * from an RCU callback.
526 */
527 call_rcu(&bdi->rcu_head, bdi_add_to_pending);
528 }
529 }
530
531 /*
532 * Remove bdi from bdi_list, and ensure that it is no longer visible
533 */
534 static void bdi_remove_from_list(struct backing_dev_info *bdi)
535 {
536 spin_lock_bh(&bdi_lock);
537 list_del_rcu(&bdi->bdi_list);
538 spin_unlock_bh(&bdi_lock);
539
540 synchronize_rcu();
541 }
542
543 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
544 const char *fmt, ...)
545 {
546 va_list args;
547 int ret = 0;
548 struct device *dev;
549
550 if (bdi->dev) /* The driver needs to use separate queues per device */
551 goto exit;
552
553 va_start(args, fmt);
554 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
555 va_end(args);
556 if (IS_ERR(dev)) {
557 ret = PTR_ERR(dev);
558 goto exit;
559 }
560
561 spin_lock_bh(&bdi_lock);
562 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
563 spin_unlock_bh(&bdi_lock);
564
565 bdi->dev = dev;
566
567 /*
568 * Just start the forker thread for our default backing_dev_info,
569 * and add other bdi's to the list. They will get a thread created
570 * on-demand when they need it.
571 */
572 if (bdi_cap_flush_forker(bdi)) {
573 struct bdi_writeback *wb = &bdi->wb;
574
575 wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
576 dev_name(dev));
577 if (IS_ERR(wb->task)) {
578 wb->task = NULL;
579 ret = -ENOMEM;
580
581 bdi_remove_from_list(bdi);
582 goto exit;
583 }
584 }
585
586 bdi_debug_register(bdi, dev_name(dev));
587 set_bit(BDI_registered, &bdi->state);
588 exit:
589 return ret;
590 }
591 EXPORT_SYMBOL(bdi_register);
592
593 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
594 {
595 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
596 }
597 EXPORT_SYMBOL(bdi_register_dev);
598
599 /*
600 * Remove bdi from the global list and shutdown any threads we have running
601 */
602 static void bdi_wb_shutdown(struct backing_dev_info *bdi)
603 {
604 struct bdi_writeback *wb;
605
606 if (!bdi_cap_writeback_dirty(bdi))
607 return;
608
609 /*
610 * If setup is pending, wait for that to complete first
611 */
612 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
613 TASK_UNINTERRUPTIBLE);
614
615 /*
616 * Make sure nobody finds us on the bdi_list anymore
617 */
618 bdi_remove_from_list(bdi);
619
620 /*
621 * Finally, kill the kernel threads. We don't need to be RCU
622 * safe anymore, since the bdi is gone from visibility. Force
623 * unfreeze of the thread before calling kthread_stop(), otherwise
624 * it would never exet if it is currently stuck in the refrigerator.
625 */
626 list_for_each_entry(wb, &bdi->wb_list, list) {
627 thaw_process(wb->task);
628 kthread_stop(wb->task);
629 }
630 }
631
632 /*
633 * This bdi is going away now, make sure that no super_blocks point to it
634 */
635 static void bdi_prune_sb(struct backing_dev_info *bdi)
636 {
637 struct super_block *sb;
638
639 spin_lock(&sb_lock);
640 list_for_each_entry(sb, &super_blocks, s_list) {
641 if (sb->s_bdi == bdi)
642 sb->s_bdi = NULL;
643 }
644 spin_unlock(&sb_lock);
645 }
646
647 void bdi_unregister(struct backing_dev_info *bdi)
648 {
649 if (bdi->dev) {
650 bdi_prune_sb(bdi);
651
652 if (!bdi_cap_flush_forker(bdi))
653 bdi_wb_shutdown(bdi);
654 bdi_debug_unregister(bdi);
655 device_unregister(bdi->dev);
656 bdi->dev = NULL;
657 }
658 }
659 EXPORT_SYMBOL(bdi_unregister);
660
661 int bdi_init(struct backing_dev_info *bdi)
662 {
663 int i, err;
664
665 bdi->dev = NULL;
666
667 bdi->min_ratio = 0;
668 bdi->max_ratio = 100;
669 bdi->max_prop_frac = PROP_FRAC_BASE;
670 spin_lock_init(&bdi->wb_lock);
671 INIT_RCU_HEAD(&bdi->rcu_head);
672 INIT_LIST_HEAD(&bdi->bdi_list);
673 INIT_LIST_HEAD(&bdi->wb_list);
674 INIT_LIST_HEAD(&bdi->work_list);
675
676 bdi_wb_init(&bdi->wb, bdi);
677
678 /*
679 * Just one thread support for now, hard code mask and count
680 */
681 bdi->wb_mask = 1;
682 bdi->wb_cnt = 1;
683
684 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
685 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
686 if (err)
687 goto err;
688 }
689
690 bdi->dirty_exceeded = 0;
691 err = prop_local_init_percpu(&bdi->completions);
692
693 if (err) {
694 err:
695 while (i--)
696 percpu_counter_destroy(&bdi->bdi_stat[i]);
697 }
698
699 return err;
700 }
701 EXPORT_SYMBOL(bdi_init);
702
703 void bdi_destroy(struct backing_dev_info *bdi)
704 {
705 int i;
706
707 /*
708 * Splice our entries to the default_backing_dev_info, if this
709 * bdi disappears
710 */
711 if (bdi_has_dirty_io(bdi)) {
712 struct bdi_writeback *dst = &default_backing_dev_info.wb;
713
714 spin_lock(&inode_lock);
715 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
716 list_splice(&bdi->wb.b_io, &dst->b_io);
717 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
718 spin_unlock(&inode_lock);
719 }
720
721 bdi_unregister(bdi);
722
723 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
724 percpu_counter_destroy(&bdi->bdi_stat[i]);
725
726 prop_local_destroy_percpu(&bdi->completions);
727 }
728 EXPORT_SYMBOL(bdi_destroy);
729
730 /*
731 * For use from filesystems to quickly init and register a bdi associated
732 * with dirty writeback
733 */
734 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
735 unsigned int cap)
736 {
737 char tmp[32];
738 int err;
739
740 bdi->name = name;
741 bdi->capabilities = cap;
742 err = bdi_init(bdi);
743 if (err)
744 return err;
745
746 sprintf(tmp, "%.28s%s", name, "-%d");
747 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
748 if (err) {
749 bdi_destroy(bdi);
750 return err;
751 }
752
753 return 0;
754 }
755 EXPORT_SYMBOL(bdi_setup_and_register);
756
757 static wait_queue_head_t congestion_wqh[2] = {
758 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
759 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
760 };
761
762 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
763 {
764 enum bdi_state bit;
765 wait_queue_head_t *wqh = &congestion_wqh[sync];
766
767 bit = sync ? BDI_sync_congested : BDI_async_congested;
768 clear_bit(bit, &bdi->state);
769 smp_mb__after_clear_bit();
770 if (waitqueue_active(wqh))
771 wake_up(wqh);
772 }
773 EXPORT_SYMBOL(clear_bdi_congested);
774
775 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
776 {
777 enum bdi_state bit;
778
779 bit = sync ? BDI_sync_congested : BDI_async_congested;
780 set_bit(bit, &bdi->state);
781 }
782 EXPORT_SYMBOL(set_bdi_congested);
783
784 /**
785 * congestion_wait - wait for a backing_dev to become uncongested
786 * @sync: SYNC or ASYNC IO
787 * @timeout: timeout in jiffies
788 *
789 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
790 * write congestion. If no backing_devs are congested then just wait for the
791 * next write to be completed.
792 */
793 long congestion_wait(int sync, long timeout)
794 {
795 long ret;
796 DEFINE_WAIT(wait);
797 wait_queue_head_t *wqh = &congestion_wqh[sync];
798
799 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
800 ret = io_schedule_timeout(timeout);
801 finish_wait(wqh, &wait);
802 return ret;
803 }
804 EXPORT_SYMBOL(congestion_wait);
805