60123677
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] /
1 /*
2 * background writeback - scan btree for dirty data and write it to the backing
3 * device
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "writeback.h"
13
14 #include <linux/delay.h>
15 #include <linux/kthread.h>
16 #include <trace/events/bcache.h>
17
18 /* Rate limiting */
19
20 static void __update_writeback_rate(struct cached_dev *dc)
21 {
22 struct cache_set *c = dc->disk.c;
23 uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
24 uint64_t cache_dirty_target =
25 div_u64(cache_sectors * dc->writeback_percent, 100);
26
27 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
28 c->cached_dev_sectors);
29
30 /* PD controller */
31
32 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
33 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
34 int64_t proportional = dirty - target;
35 int64_t change;
36
37 dc->disk.sectors_dirty_last = dirty;
38
39 /* Scale to sectors per second */
40
41 proportional *= dc->writeback_rate_update_seconds;
42 proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
43
44 derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
45
46 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
47 (dc->writeback_rate_d_term /
48 dc->writeback_rate_update_seconds) ?: 1, 0);
49
50 derivative *= dc->writeback_rate_d_term;
51 derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
52
53 change = proportional + derivative;
54
55 /* Don't increase writeback rate if the device isn't keeping up */
56 if (change > 0 &&
57 time_after64(local_clock(),
58 dc->writeback_rate.next + NSEC_PER_MSEC))
59 change = 0;
60
61 dc->writeback_rate.rate =
62 clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
63 1, NSEC_PER_MSEC);
64
65 dc->writeback_rate_proportional = proportional;
66 dc->writeback_rate_derivative = derivative;
67 dc->writeback_rate_change = change;
68 dc->writeback_rate_target = target;
69 }
70
71 static void update_writeback_rate(struct work_struct *work)
72 {
73 struct cached_dev *dc = container_of(to_delayed_work(work),
74 struct cached_dev,
75 writeback_rate_update);
76
77 down_read(&dc->writeback_lock);
78
79 if (atomic_read(&dc->has_dirty) &&
80 dc->writeback_percent)
81 __update_writeback_rate(dc);
82
83 up_read(&dc->writeback_lock);
84
85 schedule_delayed_work(&dc->writeback_rate_update,
86 dc->writeback_rate_update_seconds * HZ);
87 }
88
89 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
90 {
91 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
92 !dc->writeback_percent)
93 return 0;
94
95 return bch_next_delay(&dc->writeback_rate, sectors);
96 }
97
98 struct dirty_io {
99 struct closure cl;
100 struct cached_dev *dc;
101 struct bio bio;
102 };
103
104 static void dirty_init(struct keybuf_key *w)
105 {
106 struct dirty_io *io = w->private;
107 struct bio *bio = &io->bio;
108
109 bio_init(bio);
110 if (!io->dc->writeback_percent)
111 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
112
113 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
114 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
115 bio->bi_private = w;
116 bio->bi_io_vec = bio->bi_inline_vecs;
117 bch_bio_map(bio, NULL);
118 }
119
120 static void dirty_io_destructor(struct closure *cl)
121 {
122 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
123 kfree(io);
124 }
125
126 static void write_dirty_finish(struct closure *cl)
127 {
128 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
129 struct keybuf_key *w = io->bio.bi_private;
130 struct cached_dev *dc = io->dc;
131 struct bio_vec *bv;
132 int i;
133
134 bio_for_each_segment_all(bv, &io->bio, i)
135 __free_page(bv->bv_page);
136
137 /* This is kind of a dumb way of signalling errors. */
138 if (KEY_DIRTY(&w->key)) {
139 int ret;
140 unsigned i;
141 struct keylist keys;
142
143 bch_keylist_init(&keys);
144
145 bkey_copy(keys.top, &w->key);
146 SET_KEY_DIRTY(keys.top, false);
147 bch_keylist_push(&keys);
148
149 for (i = 0; i < KEY_PTRS(&w->key); i++)
150 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
151
152 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
153
154 if (ret)
155 trace_bcache_writeback_collision(&w->key);
156
157 atomic_long_inc(ret
158 ? &dc->disk.c->writeback_keys_failed
159 : &dc->disk.c->writeback_keys_done);
160 }
161
162 bch_keybuf_del(&dc->writeback_keys, w);
163 up(&dc->in_flight);
164
165 closure_return_with_destructor(cl, dirty_io_destructor);
166 }
167
168 static void dirty_endio(struct bio *bio)
169 {
170 struct keybuf_key *w = bio->bi_private;
171 struct dirty_io *io = w->private;
172
173 if (bio->bi_error)
174 SET_KEY_DIRTY(&w->key, false);
175
176 closure_put(&io->cl);
177 }
178
179 static void write_dirty(struct closure *cl)
180 {
181 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
182 struct keybuf_key *w = io->bio.bi_private;
183
184 dirty_init(w);
185 io->bio.bi_rw = WRITE;
186 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
187 io->bio.bi_bdev = io->dc->bdev;
188 io->bio.bi_end_io = dirty_endio;
189
190 closure_bio_submit(&io->bio, cl);
191
192 continue_at(cl, write_dirty_finish, system_wq);
193 }
194
195 static void read_dirty_endio(struct bio *bio)
196 {
197 struct keybuf_key *w = bio->bi_private;
198 struct dirty_io *io = w->private;
199
200 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
201 bio->bi_error, "reading dirty data from cache");
202
203 dirty_endio(bio);
204 }
205
206 static void read_dirty_submit(struct closure *cl)
207 {
208 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
209
210 closure_bio_submit(&io->bio, cl);
211
212 continue_at(cl, write_dirty, system_wq);
213 }
214
215 static void read_dirty(struct cached_dev *dc)
216 {
217 unsigned delay = 0;
218 struct keybuf_key *w;
219 struct dirty_io *io;
220 struct closure cl;
221
222 closure_init_stack(&cl);
223
224 /*
225 * XXX: if we error, background writeback just spins. Should use some
226 * mempools.
227 */
228
229 while (!kthread_should_stop()) {
230
231 w = bch_keybuf_next(&dc->writeback_keys);
232 if (!w)
233 break;
234
235 BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
236
237 if (KEY_START(&w->key) != dc->last_read ||
238 jiffies_to_msecs(delay) > 50)
239 while (!kthread_should_stop() && delay)
240 delay = schedule_timeout_interruptible(delay);
241
242 dc->last_read = KEY_OFFSET(&w->key);
243
244 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
245 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
246 GFP_KERNEL);
247 if (!io)
248 goto err;
249
250 w->private = io;
251 io->dc = dc;
252
253 dirty_init(w);
254 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
255 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
256 &w->key, 0)->bdev;
257 io->bio.bi_rw = READ;
258 io->bio.bi_end_io = read_dirty_endio;
259
260 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
261 goto err_free;
262
263 trace_bcache_writeback(&w->key);
264
265 down(&dc->in_flight);
266 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
267
268 delay = writeback_delay(dc, KEY_SIZE(&w->key));
269 }
270
271 if (0) {
272 err_free:
273 kfree(w->private);
274 err:
275 bch_keybuf_del(&dc->writeback_keys, w);
276 }
277
278 /*
279 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
280 * freed) before refilling again
281 */
282 closure_sync(&cl);
283 }
284
285 /* Scan for dirty data */
286
287 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
288 uint64_t offset, int nr_sectors)
289 {
290 struct bcache_device *d = c->devices[inode];
291 unsigned stripe_offset, stripe, sectors_dirty;
292
293 if (!d)
294 return;
295
296 stripe = offset_to_stripe(d, offset);
297 stripe_offset = offset & (d->stripe_size - 1);
298
299 while (nr_sectors) {
300 int s = min_t(unsigned, abs(nr_sectors),
301 d->stripe_size - stripe_offset);
302
303 if (nr_sectors < 0)
304 s = -s;
305
306 if (stripe >= d->nr_stripes)
307 return;
308
309 sectors_dirty = atomic_add_return(s,
310 d->stripe_sectors_dirty + stripe);
311 if (sectors_dirty == d->stripe_size)
312 set_bit(stripe, d->full_dirty_stripes);
313 else
314 clear_bit(stripe, d->full_dirty_stripes);
315
316 nr_sectors -= s;
317 stripe_offset = 0;
318 stripe++;
319 }
320 }
321
322 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
323 {
324 struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
325
326 BUG_ON(KEY_INODE(k) != dc->disk.id);
327
328 return KEY_DIRTY(k);
329 }
330
331 static void refill_full_stripes(struct cached_dev *dc)
332 {
333 struct keybuf *buf = &dc->writeback_keys;
334 unsigned start_stripe, stripe, next_stripe;
335 bool wrapped = false;
336
337 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
338
339 if (stripe >= dc->disk.nr_stripes)
340 stripe = 0;
341
342 start_stripe = stripe;
343
344 while (1) {
345 stripe = find_next_bit(dc->disk.full_dirty_stripes,
346 dc->disk.nr_stripes, stripe);
347
348 if (stripe == dc->disk.nr_stripes)
349 goto next;
350
351 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
352 dc->disk.nr_stripes, stripe);
353
354 buf->last_scanned = KEY(dc->disk.id,
355 stripe * dc->disk.stripe_size, 0);
356
357 bch_refill_keybuf(dc->disk.c, buf,
358 &KEY(dc->disk.id,
359 next_stripe * dc->disk.stripe_size, 0),
360 dirty_pred);
361
362 if (array_freelist_empty(&buf->freelist))
363 return;
364
365 stripe = next_stripe;
366 next:
367 if (wrapped && stripe > start_stripe)
368 return;
369
370 if (stripe == dc->disk.nr_stripes) {
371 stripe = 0;
372 wrapped = true;
373 }
374 }
375 }
376
377 /*
378 * Returns true if we scanned the entire disk
379 */
380 static bool refill_dirty(struct cached_dev *dc)
381 {
382 struct keybuf *buf = &dc->writeback_keys;
383 struct bkey start = KEY(dc->disk.id, 0, 0);
384 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
385 struct bkey start_pos;
386
387 /*
388 * make sure keybuf pos is inside the range for this disk - at bringup
389 * we might not be attached yet so this disk's inode nr isn't
390 * initialized then
391 */
392 if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
393 bkey_cmp(&buf->last_scanned, &end) > 0)
394 buf->last_scanned = start;
395
396 if (dc->partial_stripes_expensive) {
397 refill_full_stripes(dc);
398 if (array_freelist_empty(&buf->freelist))
399 return false;
400 }
401
402 start_pos = buf->last_scanned;
403 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
404
405 if (bkey_cmp(&buf->last_scanned, &end) < 0)
406 return false;
407
408 /*
409 * If we get to the end start scanning again from the beginning, and
410 * only scan up to where we initially started scanning from:
411 */
412 buf->last_scanned = start;
413 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
414
415 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
416 }
417
418 static int bch_writeback_thread(void *arg)
419 {
420 struct cached_dev *dc = arg;
421 bool searched_full_index;
422
423 while (!kthread_should_stop()) {
424 down_write(&dc->writeback_lock);
425 if (!atomic_read(&dc->has_dirty) ||
426 (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
427 !dc->writeback_running)) {
428 up_write(&dc->writeback_lock);
429 set_current_state(TASK_INTERRUPTIBLE);
430
431 if (kthread_should_stop())
432 return 0;
433
434 schedule();
435 continue;
436 }
437
438 searched_full_index = refill_dirty(dc);
439
440 if (searched_full_index &&
441 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
442 atomic_set(&dc->has_dirty, 0);
443 cached_dev_put(dc);
444 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
445 bch_write_bdev_super(dc, NULL);
446 }
447
448 up_write(&dc->writeback_lock);
449
450 bch_ratelimit_reset(&dc->writeback_rate);
451 read_dirty(dc);
452
453 if (searched_full_index) {
454 unsigned delay = dc->writeback_delay * HZ;
455
456 while (delay &&
457 !kthread_should_stop() &&
458 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
459 delay = schedule_timeout_interruptible(delay);
460 }
461 }
462
463 return 0;
464 }
465
466 /* Init */
467
468 struct sectors_dirty_init {
469 struct btree_op op;
470 unsigned inode;
471 };
472
473 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
474 struct bkey *k)
475 {
476 struct sectors_dirty_init *op = container_of(_op,
477 struct sectors_dirty_init, op);
478 if (KEY_INODE(k) > op->inode)
479 return MAP_DONE;
480
481 if (KEY_DIRTY(k))
482 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
483 KEY_START(k), KEY_SIZE(k));
484
485 return MAP_CONTINUE;
486 }
487
488 void bch_sectors_dirty_init(struct cached_dev *dc)
489 {
490 struct sectors_dirty_init op;
491
492 bch_btree_op_init(&op.op, -1);
493 op.inode = dc->disk.id;
494
495 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
496 sectors_dirty_init_fn, 0);
497
498 dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
499 }
500
501 void bch_cached_dev_writeback_init(struct cached_dev *dc)
502 {
503 sema_init(&dc->in_flight, 64);
504 init_rwsem(&dc->writeback_lock);
505 bch_keybuf_init(&dc->writeback_keys);
506
507 dc->writeback_metadata = true;
508 dc->writeback_running = true;
509 dc->writeback_percent = 10;
510 dc->writeback_delay = 30;
511 dc->writeback_rate.rate = 1024;
512
513 dc->writeback_rate_update_seconds = 5;
514 dc->writeback_rate_d_term = 30;
515 dc->writeback_rate_p_term_inverse = 6000;
516
517 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
518 }
519
520 int bch_cached_dev_writeback_start(struct cached_dev *dc)
521 {
522 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
523 "bcache_writeback");
524 if (IS_ERR(dc->writeback_thread))
525 return PTR_ERR(dc->writeback_thread);
526
527 schedule_delayed_work(&dc->writeback_rate_update,
528 dc->writeback_rate_update_seconds * HZ);
529
530 bch_writeback_queue(dc);
531
532 return 0;
533 }