7c1275e66025b691ec8ee4896448d46e28a2c5d9
2 * Moving/copying garbage collector
4 * Copyright 2012 Google, Inc.
12 #include <trace/events/bcache.h>
17 struct data_insert_op op
;
21 static bool moving_pred(struct keybuf
*buf
, struct bkey
*k
)
23 struct cache_set
*c
= container_of(buf
, struct cache_set
,
27 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
28 struct cache
*ca
= PTR_CACHE(c
, k
, i
);
29 struct bucket
*g
= PTR_BUCKET(c
, k
, i
);
31 if (GC_SECTORS_USED(g
) < ca
->gc_move_threshold
)
38 /* Moving GC - IO loop */
40 static void moving_io_destructor(struct closure
*cl
)
42 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
46 static void write_moving_finish(struct closure
*cl
)
48 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
49 struct bio
*bio
= &io
->bio
.bio
;
53 bio_for_each_segment_all(bv
, bio
, i
)
54 __free_page(bv
->bv_page
);
56 if (io
->op
.replace_collision
)
57 trace_bcache_gc_copy_collision(&io
->w
->key
);
59 bch_keybuf_del(&io
->op
.c
->moving_gc_keys
, io
->w
);
61 up(&io
->op
.c
->moving_in_flight
);
63 closure_return_with_destructor(cl
, moving_io_destructor
);
66 static void read_moving_endio(struct bio
*bio
, int error
)
68 struct moving_io
*io
= container_of(bio
->bi_private
,
69 struct moving_io
, cl
);
74 bch_bbio_endio(io
->op
.c
, bio
, error
, "reading data to move");
77 static void moving_init(struct moving_io
*io
)
79 struct bio
*bio
= &io
->bio
.bio
;
83 bio_set_prio(bio
, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE
, 0));
85 bio
->bi_size
= KEY_SIZE(&io
->w
->key
) << 9;
86 bio
->bi_max_vecs
= DIV_ROUND_UP(KEY_SIZE(&io
->w
->key
),
88 bio
->bi_private
= &io
->cl
;
89 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
90 bch_bio_map(bio
, NULL
);
93 static void write_moving(struct closure
*cl
)
95 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
96 struct data_insert_op
*op
= &io
->op
;
101 io
->bio
.bio
.bi_sector
= KEY_START(&io
->w
->key
);
103 op
->bio
= &io
->bio
.bio
;
105 op
->writeback
= KEY_DIRTY(&io
->w
->key
);
106 op
->csum
= KEY_CSUM(&io
->w
->key
);
108 bkey_copy(&op
->replace_key
, &io
->w
->key
);
111 closure_call(&op
->cl
, bch_data_insert
, NULL
, cl
);
114 continue_at(cl
, write_moving_finish
, system_wq
);
117 static void read_moving_submit(struct closure
*cl
)
119 struct moving_io
*io
= container_of(cl
, struct moving_io
, cl
);
120 struct bio
*bio
= &io
->bio
.bio
;
122 bch_submit_bbio(bio
, io
->op
.c
, &io
->w
->key
, 0);
124 continue_at(cl
, write_moving
, system_wq
);
127 static void read_moving(struct cache_set
*c
)
129 struct keybuf_key
*w
;
130 struct moving_io
*io
;
134 closure_init_stack(&cl
);
136 /* XXX: if we error, background writeback could stall indefinitely */
138 while (!test_bit(CACHE_SET_STOPPING
, &c
->flags
)) {
139 w
= bch_keybuf_next_rescan(c
, &c
->moving_gc_keys
,
140 &MAX_KEY
, moving_pred
);
144 io
= kzalloc(sizeof(struct moving_io
) + sizeof(struct bio_vec
)
145 * DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
),
152 io
->op
.inode
= KEY_INODE(&w
->key
);
159 bio
->bi_end_io
= read_moving_endio
;
161 if (bio_alloc_pages(bio
, GFP_KERNEL
))
164 trace_bcache_gc_copy(&w
->key
);
166 down(&c
->moving_in_flight
);
167 closure_call(&io
->cl
, read_moving_submit
, NULL
, &cl
);
171 err
: if (!IS_ERR_OR_NULL(w
->private))
174 bch_keybuf_del(&c
->moving_gc_keys
, w
);
180 static bool bucket_cmp(struct bucket
*l
, struct bucket
*r
)
182 return GC_SECTORS_USED(l
) < GC_SECTORS_USED(r
);
185 static unsigned bucket_heap_top(struct cache
*ca
)
187 return GC_SECTORS_USED(heap_peek(&ca
->heap
));
190 void bch_moving_gc(struct cache_set
*c
)
196 if (!c
->copy_gc_enabled
)
199 mutex_lock(&c
->bucket_lock
);
201 for_each_cache(ca
, c
, i
) {
202 unsigned sectors_to_move
= 0;
203 unsigned reserve_sectors
= ca
->sb
.bucket_size
*
204 min(fifo_used(&ca
->free
), ca
->free
.size
/ 2);
208 for_each_bucket(b
, ca
) {
209 if (!GC_SECTORS_USED(b
))
212 if (!heap_full(&ca
->heap
)) {
213 sectors_to_move
+= GC_SECTORS_USED(b
);
214 heap_add(&ca
->heap
, b
, bucket_cmp
);
215 } else if (bucket_cmp(b
, heap_peek(&ca
->heap
))) {
216 sectors_to_move
-= bucket_heap_top(ca
);
217 sectors_to_move
+= GC_SECTORS_USED(b
);
219 ca
->heap
.data
[0] = b
;
220 heap_sift(&ca
->heap
, 0, bucket_cmp
);
224 while (sectors_to_move
> reserve_sectors
) {
225 heap_pop(&ca
->heap
, b
, bucket_cmp
);
226 sectors_to_move
-= GC_SECTORS_USED(b
);
229 ca
->gc_move_threshold
= bucket_heap_top(ca
);
231 pr_debug("threshold %u", ca
->gc_move_threshold
);
234 mutex_unlock(&c
->bucket_lock
);
236 c
->moving_gc_keys
.last_scanned
= ZERO_KEY
;
241 void bch_moving_init_cache_set(struct cache_set
*c
)
243 bch_keybuf_init(&c
->moving_gc_keys
);
244 sema_init(&c
->moving_in_flight
, 64);