Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / persistent-data / dm-transaction-manager.c
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6 #include "dm-transaction-manager.h"
7 #include "dm-space-map.h"
8 #include "dm-space-map-checker.h"
9 #include "dm-space-map-disk.h"
10 #include "dm-space-map-metadata.h"
11 #include "dm-persistent-data-internal.h"
12
13 #include <linux/export.h>
14 #include <linux/slab.h>
15 #include <linux/device-mapper.h>
16
17 #define DM_MSG_PREFIX "transaction manager"
18
19 /*----------------------------------------------------------------*/
20
21 struct shadow_info {
22 struct hlist_node hlist;
23 dm_block_t where;
24 };
25
26 /*
27 * It would be nice if we scaled with the size of transaction.
28 */
29 #define HASH_SIZE 256
30 #define HASH_MASK (HASH_SIZE - 1)
31
32 struct dm_transaction_manager {
33 int is_clone;
34 struct dm_transaction_manager *real;
35
36 struct dm_block_manager *bm;
37 struct dm_space_map *sm;
38
39 spinlock_t lock;
40 struct hlist_head buckets[HASH_SIZE];
41 };
42
43 /*----------------------------------------------------------------*/
44
45 static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
46 {
47 int r = 0;
48 unsigned bucket = dm_hash_block(b, HASH_MASK);
49 struct shadow_info *si;
50 struct hlist_node *n;
51
52 spin_lock(&tm->lock);
53 hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
54 if (si->where == b) {
55 r = 1;
56 break;
57 }
58 spin_unlock(&tm->lock);
59
60 return r;
61 }
62
63 /*
64 * This can silently fail if there's no memory. We're ok with this since
65 * creating redundant shadows causes no harm.
66 */
67 static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
68 {
69 unsigned bucket;
70 struct shadow_info *si;
71
72 si = kmalloc(sizeof(*si), GFP_NOIO);
73 if (si) {
74 si->where = b;
75 bucket = dm_hash_block(b, HASH_MASK);
76 spin_lock(&tm->lock);
77 hlist_add_head(&si->hlist, tm->buckets + bucket);
78 spin_unlock(&tm->lock);
79 }
80 }
81
82 static void wipe_shadow_table(struct dm_transaction_manager *tm)
83 {
84 struct shadow_info *si;
85 struct hlist_node *n, *tmp;
86 struct hlist_head *bucket;
87 int i;
88
89 spin_lock(&tm->lock);
90 for (i = 0; i < HASH_SIZE; i++) {
91 bucket = tm->buckets + i;
92 hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
93 kfree(si);
94
95 INIT_HLIST_HEAD(bucket);
96 }
97
98 spin_unlock(&tm->lock);
99 }
100
101 /*----------------------------------------------------------------*/
102
103 static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
104 struct dm_space_map *sm)
105 {
106 int i;
107 struct dm_transaction_manager *tm;
108
109 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
110 if (!tm)
111 return ERR_PTR(-ENOMEM);
112
113 tm->is_clone = 0;
114 tm->real = NULL;
115 tm->bm = bm;
116 tm->sm = sm;
117
118 spin_lock_init(&tm->lock);
119 for (i = 0; i < HASH_SIZE; i++)
120 INIT_HLIST_HEAD(tm->buckets + i);
121
122 return tm;
123 }
124
125 struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
126 {
127 struct dm_transaction_manager *tm;
128
129 tm = kmalloc(sizeof(*tm), GFP_KERNEL);
130 if (tm) {
131 tm->is_clone = 1;
132 tm->real = real;
133 }
134
135 return tm;
136 }
137 EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
138
139 void dm_tm_destroy(struct dm_transaction_manager *tm)
140 {
141 if (!tm->is_clone)
142 wipe_shadow_table(tm);
143
144 kfree(tm);
145 }
146 EXPORT_SYMBOL_GPL(dm_tm_destroy);
147
148 int dm_tm_pre_commit(struct dm_transaction_manager *tm)
149 {
150 int r;
151
152 if (tm->is_clone)
153 return -EWOULDBLOCK;
154
155 r = dm_sm_commit(tm->sm);
156 if (r < 0)
157 return r;
158
159 return 0;
160 }
161 EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
162
163 int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
164 {
165 if (tm->is_clone)
166 return -EWOULDBLOCK;
167
168 wipe_shadow_table(tm);
169
170 return dm_bm_flush_and_unlock(tm->bm, root);
171 }
172 EXPORT_SYMBOL_GPL(dm_tm_commit);
173
174 int dm_tm_new_block(struct dm_transaction_manager *tm,
175 struct dm_block_validator *v,
176 struct dm_block **result)
177 {
178 int r;
179 dm_block_t new_block;
180
181 if (tm->is_clone)
182 return -EWOULDBLOCK;
183
184 r = dm_sm_new_block(tm->sm, &new_block);
185 if (r < 0)
186 return r;
187
188 r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
189 if (r < 0) {
190 dm_sm_dec_block(tm->sm, new_block);
191 return r;
192 }
193
194 /*
195 * New blocks count as shadows in that they don't need to be
196 * shadowed again.
197 */
198 insert_shadow(tm, new_block);
199
200 return 0;
201 }
202
203 static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
204 struct dm_block_validator *v,
205 struct dm_block **result)
206 {
207 int r;
208 dm_block_t new;
209 struct dm_block *orig_block;
210
211 r = dm_sm_new_block(tm->sm, &new);
212 if (r < 0)
213 return r;
214
215 r = dm_sm_dec_block(tm->sm, orig);
216 if (r < 0)
217 return r;
218
219 r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
220 if (r < 0)
221 return r;
222
223 r = dm_bm_unlock_move(orig_block, new);
224 if (r < 0) {
225 dm_bm_unlock(orig_block);
226 return r;
227 }
228
229 return dm_bm_write_lock(tm->bm, new, v, result);
230 }
231
232 int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
233 struct dm_block_validator *v, struct dm_block **result,
234 int *inc_children)
235 {
236 int r;
237
238 if (tm->is_clone)
239 return -EWOULDBLOCK;
240
241 r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
242 if (r < 0)
243 return r;
244
245 if (is_shadow(tm, orig) && !*inc_children)
246 return dm_bm_write_lock(tm->bm, orig, v, result);
247
248 r = __shadow_block(tm, orig, v, result);
249 if (r < 0)
250 return r;
251 insert_shadow(tm, dm_block_location(*result));
252
253 return r;
254 }
255 EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
256
257 int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
258 struct dm_block_validator *v,
259 struct dm_block **blk)
260 {
261 if (tm->is_clone)
262 return dm_bm_read_try_lock(tm->real->bm, b, v, blk);
263
264 return dm_bm_read_lock(tm->bm, b, v, blk);
265 }
266 EXPORT_SYMBOL_GPL(dm_tm_read_lock);
267
268 int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
269 {
270 return dm_bm_unlock(b);
271 }
272 EXPORT_SYMBOL_GPL(dm_tm_unlock);
273
274 void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
275 {
276 /*
277 * The non-blocking clone doesn't support this.
278 */
279 BUG_ON(tm->is_clone);
280
281 dm_sm_inc_block(tm->sm, b);
282 }
283 EXPORT_SYMBOL_GPL(dm_tm_inc);
284
285 void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
286 {
287 /*
288 * The non-blocking clone doesn't support this.
289 */
290 BUG_ON(tm->is_clone);
291
292 dm_sm_dec_block(tm->sm, b);
293 }
294 EXPORT_SYMBOL_GPL(dm_tm_dec);
295
296 int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
297 uint32_t *result)
298 {
299 if (tm->is_clone)
300 return -EWOULDBLOCK;
301
302 return dm_sm_get_count(tm->sm, b, result);
303 }
304
305 struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
306 {
307 return tm->bm;
308 }
309
310 /*----------------------------------------------------------------*/
311
312 static int dm_tm_create_internal(struct dm_block_manager *bm,
313 dm_block_t sb_location,
314 struct dm_block_validator *sb_validator,
315 size_t root_offset, size_t root_max_len,
316 struct dm_transaction_manager **tm,
317 struct dm_space_map **sm,
318 struct dm_block **sblock,
319 int create)
320 {
321 int r;
322 struct dm_space_map *inner;
323
324 inner = dm_sm_metadata_init();
325 if (IS_ERR(inner))
326 return PTR_ERR(inner);
327
328 *tm = dm_tm_create(bm, inner);
329 if (IS_ERR(*tm)) {
330 dm_sm_destroy(inner);
331 return PTR_ERR(*tm);
332 }
333
334 if (create) {
335 r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location,
336 sb_validator, sblock);
337 if (r < 0) {
338 DMERR("couldn't lock superblock");
339 goto bad1;
340 }
341
342 r = dm_sm_metadata_create(inner, *tm, dm_bm_nr_blocks(bm),
343 sb_location);
344 if (r) {
345 DMERR("couldn't create metadata space map");
346 goto bad2;
347 }
348
349 *sm = dm_sm_checker_create(inner);
350 if (IS_ERR(*sm)) {
351 r = PTR_ERR(*sm);
352 goto bad2;
353 }
354
355 } else {
356 r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
357 sb_validator, sblock);
358 if (r < 0) {
359 DMERR("couldn't lock superblock");
360 goto bad1;
361 }
362
363 r = dm_sm_metadata_open(inner, *tm,
364 dm_block_data(*sblock) + root_offset,
365 root_max_len);
366 if (r) {
367 DMERR("couldn't open metadata space map");
368 goto bad2;
369 }
370
371 *sm = dm_sm_checker_create(inner);
372 if (IS_ERR(*sm)) {
373 r = PTR_ERR(*sm);
374 goto bad2;
375 }
376 }
377
378 return 0;
379
380 bad2:
381 dm_tm_unlock(*tm, *sblock);
382 bad1:
383 dm_tm_destroy(*tm);
384 dm_sm_destroy(inner);
385 return r;
386 }
387
388 int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
389 struct dm_block_validator *sb_validator,
390 struct dm_transaction_manager **tm,
391 struct dm_space_map **sm, struct dm_block **sblock)
392 {
393 return dm_tm_create_internal(bm, sb_location, sb_validator,
394 0, 0, tm, sm, sblock, 1);
395 }
396 EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
397
398 int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
399 struct dm_block_validator *sb_validator,
400 size_t root_offset, size_t root_max_len,
401 struct dm_transaction_manager **tm,
402 struct dm_space_map **sm, struct dm_block **sblock)
403 {
404 return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset,
405 root_max_len, tm, sm, sblock, 0);
406 }
407 EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
408
409 /*----------------------------------------------------------------*/