Merge branch 'for-linville' of git://github.com/kvalo/ath6kl
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / persistent-data / dm-space-map-common.c
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm-space-map-common.h"
8 #include "dm-transaction-manager.h"
9
10 #include <linux/bitops.h>
11 #include <linux/device-mapper.h>
12
13 #define DM_MSG_PREFIX "space map common"
14
15 /*----------------------------------------------------------------*/
16
17 /*
18 * Index validator.
19 */
20 #define INDEX_CSUM_XOR 160478
21
22 static void index_prepare_for_write(struct dm_block_validator *v,
23 struct dm_block *b,
24 size_t block_size)
25 {
26 struct disk_metadata_index *mi_le = dm_block_data(b);
27
28 mi_le->blocknr = cpu_to_le64(dm_block_location(b));
29 mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
30 block_size - sizeof(__le32),
31 INDEX_CSUM_XOR));
32 }
33
34 static int index_check(struct dm_block_validator *v,
35 struct dm_block *b,
36 size_t block_size)
37 {
38 struct disk_metadata_index *mi_le = dm_block_data(b);
39 __le32 csum_disk;
40
41 if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
42 DMERR("index_check failed blocknr %llu wanted %llu",
43 le64_to_cpu(mi_le->blocknr), dm_block_location(b));
44 return -ENOTBLK;
45 }
46
47 csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
48 block_size - sizeof(__le32),
49 INDEX_CSUM_XOR));
50 if (csum_disk != mi_le->csum) {
51 DMERR("index_check failed csum %u wanted %u",
52 le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
53 return -EILSEQ;
54 }
55
56 return 0;
57 }
58
59 static struct dm_block_validator index_validator = {
60 .name = "index",
61 .prepare_for_write = index_prepare_for_write,
62 .check = index_check
63 };
64
65 /*----------------------------------------------------------------*/
66
67 /*
68 * Bitmap validator
69 */
70 #define BITMAP_CSUM_XOR 240779
71
72 static void bitmap_prepare_for_write(struct dm_block_validator *v,
73 struct dm_block *b,
74 size_t block_size)
75 {
76 struct disk_bitmap_header *disk_header = dm_block_data(b);
77
78 disk_header->blocknr = cpu_to_le64(dm_block_location(b));
79 disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
80 block_size - sizeof(__le32),
81 BITMAP_CSUM_XOR));
82 }
83
84 static int bitmap_check(struct dm_block_validator *v,
85 struct dm_block *b,
86 size_t block_size)
87 {
88 struct disk_bitmap_header *disk_header = dm_block_data(b);
89 __le32 csum_disk;
90
91 if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
92 DMERR("bitmap check failed blocknr %llu wanted %llu",
93 le64_to_cpu(disk_header->blocknr), dm_block_location(b));
94 return -ENOTBLK;
95 }
96
97 csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
98 block_size - sizeof(__le32),
99 BITMAP_CSUM_XOR));
100 if (csum_disk != disk_header->csum) {
101 DMERR("bitmap check failed csum %u wanted %u",
102 le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
103 return -EILSEQ;
104 }
105
106 return 0;
107 }
108
109 static struct dm_block_validator dm_sm_bitmap_validator = {
110 .name = "sm_bitmap",
111 .prepare_for_write = bitmap_prepare_for_write,
112 .check = bitmap_check
113 };
114
115 /*----------------------------------------------------------------*/
116
117 #define ENTRIES_PER_WORD 32
118 #define ENTRIES_SHIFT 5
119
120 static void *dm_bitmap_data(struct dm_block *b)
121 {
122 return dm_block_data(b) + sizeof(struct disk_bitmap_header);
123 }
124
125 #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
126
127 static unsigned bitmap_word_used(void *addr, unsigned b)
128 {
129 __le64 *words_le = addr;
130 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
131
132 uint64_t bits = le64_to_cpu(*w_le);
133 uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH;
134
135 return !(~bits & mask);
136 }
137
138 static unsigned sm_lookup_bitmap(void *addr, unsigned b)
139 {
140 __le64 *words_le = addr;
141 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
142 unsigned hi, lo;
143
144 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
145 hi = !!test_bit_le(b, (void *) w_le);
146 lo = !!test_bit_le(b + 1, (void *) w_le);
147 return (hi << 1) | lo;
148 }
149
150 static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
151 {
152 __le64 *words_le = addr;
153 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
154
155 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
156
157 if (val & 2)
158 __set_bit_le(b, (void *) w_le);
159 else
160 __clear_bit_le(b, (void *) w_le);
161
162 if (val & 1)
163 __set_bit_le(b + 1, (void *) w_le);
164 else
165 __clear_bit_le(b + 1, (void *) w_le);
166 }
167
168 static int sm_find_free(void *addr, unsigned begin, unsigned end,
169 unsigned *result)
170 {
171 while (begin < end) {
172 if (!(begin & (ENTRIES_PER_WORD - 1)) &&
173 bitmap_word_used(addr, begin)) {
174 begin += ENTRIES_PER_WORD;
175 continue;
176 }
177
178 if (!sm_lookup_bitmap(addr, begin)) {
179 *result = begin;
180 return 0;
181 }
182
183 begin++;
184 }
185
186 return -ENOSPC;
187 }
188
189 /*----------------------------------------------------------------*/
190
191 static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
192 {
193 ll->tm = tm;
194
195 ll->bitmap_info.tm = tm;
196 ll->bitmap_info.levels = 1;
197
198 /*
199 * Because the new bitmap blocks are created via a shadow
200 * operation, the old entry has already had its reference count
201 * decremented and we don't need the btree to do any bookkeeping.
202 */
203 ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
204 ll->bitmap_info.value_type.inc = NULL;
205 ll->bitmap_info.value_type.dec = NULL;
206 ll->bitmap_info.value_type.equal = NULL;
207
208 ll->ref_count_info.tm = tm;
209 ll->ref_count_info.levels = 1;
210 ll->ref_count_info.value_type.size = sizeof(uint32_t);
211 ll->ref_count_info.value_type.inc = NULL;
212 ll->ref_count_info.value_type.dec = NULL;
213 ll->ref_count_info.value_type.equal = NULL;
214
215 ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
216
217 if (ll->block_size > (1 << 30)) {
218 DMERR("block size too big to hold bitmaps");
219 return -EINVAL;
220 }
221
222 ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) *
223 ENTRIES_PER_BYTE;
224 ll->nr_blocks = 0;
225 ll->bitmap_root = 0;
226 ll->ref_count_root = 0;
227
228 return 0;
229 }
230
231 int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
232 {
233 int r;
234 dm_block_t i, nr_blocks, nr_indexes;
235 unsigned old_blocks, blocks;
236
237 nr_blocks = ll->nr_blocks + extra_blocks;
238 old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
239 blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block);
240
241 nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block);
242 if (nr_indexes > ll->max_entries(ll)) {
243 DMERR("space map too large");
244 return -EINVAL;
245 }
246
247 for (i = old_blocks; i < blocks; i++) {
248 struct dm_block *b;
249 struct disk_index_entry idx;
250
251 r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
252 if (r < 0)
253 return r;
254 idx.blocknr = cpu_to_le64(dm_block_location(b));
255
256 r = dm_tm_unlock(ll->tm, b);
257 if (r < 0)
258 return r;
259
260 idx.nr_free = cpu_to_le32(ll->entries_per_block);
261 idx.none_free_before = 0;
262
263 r = ll->save_ie(ll, i, &idx);
264 if (r < 0)
265 return r;
266 }
267
268 ll->nr_blocks = nr_blocks;
269 return 0;
270 }
271
272 int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
273 {
274 int r;
275 dm_block_t index = b;
276 struct disk_index_entry ie_disk;
277 struct dm_block *blk;
278
279 b = do_div(index, ll->entries_per_block);
280 r = ll->load_ie(ll, index, &ie_disk);
281 if (r < 0)
282 return r;
283
284 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
285 &dm_sm_bitmap_validator, &blk);
286 if (r < 0)
287 return r;
288
289 *result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
290
291 return dm_tm_unlock(ll->tm, blk);
292 }
293
294 int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
295 {
296 __le32 le_rc;
297 int r = sm_ll_lookup_bitmap(ll, b, result);
298
299 if (r)
300 return r;
301
302 if (*result != 3)
303 return r;
304
305 r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
306 if (r < 0)
307 return r;
308
309 *result = le32_to_cpu(le_rc);
310
311 return r;
312 }
313
314 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
315 dm_block_t end, dm_block_t *result)
316 {
317 int r;
318 struct disk_index_entry ie_disk;
319 dm_block_t i, index_begin = begin;
320 dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
321
322 /*
323 * FIXME: Use shifts
324 */
325 begin = do_div(index_begin, ll->entries_per_block);
326 end = do_div(end, ll->entries_per_block);
327
328 for (i = index_begin; i < index_end; i++, begin = 0) {
329 struct dm_block *blk;
330 unsigned position;
331 uint32_t bit_end;
332
333 r = ll->load_ie(ll, i, &ie_disk);
334 if (r < 0)
335 return r;
336
337 if (le32_to_cpu(ie_disk.nr_free) == 0)
338 continue;
339
340 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
341 &dm_sm_bitmap_validator, &blk);
342 if (r < 0)
343 return r;
344
345 bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
346
347 r = sm_find_free(dm_bitmap_data(blk),
348 max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
349 bit_end, &position);
350 if (r == -ENOSPC) {
351 /*
352 * This might happen because we started searching
353 * part way through the bitmap.
354 */
355 dm_tm_unlock(ll->tm, blk);
356 continue;
357
358 } else if (r < 0) {
359 dm_tm_unlock(ll->tm, blk);
360 return r;
361 }
362
363 r = dm_tm_unlock(ll->tm, blk);
364 if (r < 0)
365 return r;
366
367 *result = i * ll->entries_per_block + (dm_block_t) position;
368 return 0;
369 }
370
371 return -ENOSPC;
372 }
373
374 int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
375 uint32_t ref_count, enum allocation_event *ev)
376 {
377 int r;
378 uint32_t bit, old;
379 struct dm_block *nb;
380 dm_block_t index = b;
381 struct disk_index_entry ie_disk;
382 void *bm_le;
383 int inc;
384
385 bit = do_div(index, ll->entries_per_block);
386 r = ll->load_ie(ll, index, &ie_disk);
387 if (r < 0)
388 return r;
389
390 r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr),
391 &dm_sm_bitmap_validator, &nb, &inc);
392 if (r < 0) {
393 DMERR("dm_tm_shadow_block() failed");
394 return r;
395 }
396 ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
397
398 bm_le = dm_bitmap_data(nb);
399 old = sm_lookup_bitmap(bm_le, bit);
400
401 if (ref_count <= 2) {
402 sm_set_bitmap(bm_le, bit, ref_count);
403
404 r = dm_tm_unlock(ll->tm, nb);
405 if (r < 0)
406 return r;
407
408 if (old > 2) {
409 r = dm_btree_remove(&ll->ref_count_info,
410 ll->ref_count_root,
411 &b, &ll->ref_count_root);
412 if (r)
413 return r;
414 }
415
416 } else {
417 __le32 le_rc = cpu_to_le32(ref_count);
418
419 sm_set_bitmap(bm_le, bit, 3);
420 r = dm_tm_unlock(ll->tm, nb);
421 if (r < 0)
422 return r;
423
424 __dm_bless_for_disk(&le_rc);
425 r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
426 &b, &le_rc, &ll->ref_count_root);
427 if (r < 0) {
428 DMERR("ref count insert failed");
429 return r;
430 }
431 }
432
433 if (ref_count && !old) {
434 *ev = SM_ALLOC;
435 ll->nr_allocated++;
436 ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1);
437 if (le32_to_cpu(ie_disk.none_free_before) == bit)
438 ie_disk.none_free_before = cpu_to_le32(bit + 1);
439
440 } else if (old && !ref_count) {
441 *ev = SM_FREE;
442 ll->nr_allocated--;
443 ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1);
444 ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
445 }
446
447 return ll->save_ie(ll, index, &ie_disk);
448 }
449
450 int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
451 {
452 int r;
453 uint32_t rc;
454
455 r = sm_ll_lookup(ll, b, &rc);
456 if (r)
457 return r;
458
459 return sm_ll_insert(ll, b, rc + 1, ev);
460 }
461
462 int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
463 {
464 int r;
465 uint32_t rc;
466
467 r = sm_ll_lookup(ll, b, &rc);
468 if (r)
469 return r;
470
471 if (!rc)
472 return -EINVAL;
473
474 return sm_ll_insert(ll, b, rc - 1, ev);
475 }
476
477 int sm_ll_commit(struct ll_disk *ll)
478 {
479 return ll->commit(ll);
480 }
481
482 /*----------------------------------------------------------------*/
483
484 static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
485 struct disk_index_entry *ie)
486 {
487 memcpy(ie, ll->mi_le.index + index, sizeof(*ie));
488 return 0;
489 }
490
491 static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
492 struct disk_index_entry *ie)
493 {
494 memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
495 return 0;
496 }
497
498 static int metadata_ll_init_index(struct ll_disk *ll)
499 {
500 int r;
501 struct dm_block *b;
502
503 r = dm_tm_new_block(ll->tm, &index_validator, &b);
504 if (r < 0)
505 return r;
506
507 memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
508 ll->bitmap_root = dm_block_location(b);
509
510 return dm_tm_unlock(ll->tm, b);
511 }
512
513 static int metadata_ll_open(struct ll_disk *ll)
514 {
515 int r;
516 struct dm_block *block;
517
518 r = dm_tm_read_lock(ll->tm, ll->bitmap_root,
519 &index_validator, &block);
520 if (r)
521 return r;
522
523 memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
524 return dm_tm_unlock(ll->tm, block);
525 }
526
527 static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
528 {
529 return MAX_METADATA_BITMAPS;
530 }
531
532 static int metadata_ll_commit(struct ll_disk *ll)
533 {
534 int r, inc;
535 struct dm_block *b;
536
537 r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc);
538 if (r)
539 return r;
540
541 memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
542 ll->bitmap_root = dm_block_location(b);
543
544 return dm_tm_unlock(ll->tm, b);
545 }
546
547 int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
548 {
549 int r;
550
551 r = sm_ll_init(ll, tm);
552 if (r < 0)
553 return r;
554
555 ll->load_ie = metadata_ll_load_ie;
556 ll->save_ie = metadata_ll_save_ie;
557 ll->init_index = metadata_ll_init_index;
558 ll->open_index = metadata_ll_open;
559 ll->max_entries = metadata_ll_max_entries;
560 ll->commit = metadata_ll_commit;
561
562 ll->nr_blocks = 0;
563 ll->nr_allocated = 0;
564
565 r = ll->init_index(ll);
566 if (r < 0)
567 return r;
568
569 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
570 if (r < 0)
571 return r;
572
573 return 0;
574 }
575
576 int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
577 void *root_le, size_t len)
578 {
579 int r;
580 struct disk_sm_root *smr = root_le;
581
582 if (len < sizeof(struct disk_sm_root)) {
583 DMERR("sm_metadata root too small");
584 return -ENOMEM;
585 }
586
587 r = sm_ll_init(ll, tm);
588 if (r < 0)
589 return r;
590
591 ll->load_ie = metadata_ll_load_ie;
592 ll->save_ie = metadata_ll_save_ie;
593 ll->init_index = metadata_ll_init_index;
594 ll->open_index = metadata_ll_open;
595 ll->max_entries = metadata_ll_max_entries;
596 ll->commit = metadata_ll_commit;
597
598 ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
599 ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
600 ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
601 ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
602
603 return ll->open_index(ll);
604 }
605
606 /*----------------------------------------------------------------*/
607
608 static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
609 struct disk_index_entry *ie)
610 {
611 return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie);
612 }
613
614 static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
615 struct disk_index_entry *ie)
616 {
617 __dm_bless_for_disk(ie);
618 return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root,
619 &index, ie, &ll->bitmap_root);
620 }
621
622 static int disk_ll_init_index(struct ll_disk *ll)
623 {
624 return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root);
625 }
626
627 static int disk_ll_open(struct ll_disk *ll)
628 {
629 /* nothing to do */
630 return 0;
631 }
632
633 static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
634 {
635 return -1ULL;
636 }
637
638 static int disk_ll_commit(struct ll_disk *ll)
639 {
640 return 0;
641 }
642
643 int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm)
644 {
645 int r;
646
647 r = sm_ll_init(ll, tm);
648 if (r < 0)
649 return r;
650
651 ll->load_ie = disk_ll_load_ie;
652 ll->save_ie = disk_ll_save_ie;
653 ll->init_index = disk_ll_init_index;
654 ll->open_index = disk_ll_open;
655 ll->max_entries = disk_ll_max_entries;
656 ll->commit = disk_ll_commit;
657
658 ll->nr_blocks = 0;
659 ll->nr_allocated = 0;
660
661 r = ll->init_index(ll);
662 if (r < 0)
663 return r;
664
665 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
666 if (r < 0)
667 return r;
668
669 return 0;
670 }
671
672 int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
673 void *root_le, size_t len)
674 {
675 int r;
676 struct disk_sm_root *smr = root_le;
677
678 if (len < sizeof(struct disk_sm_root)) {
679 DMERR("sm_metadata root too small");
680 return -ENOMEM;
681 }
682
683 r = sm_ll_init(ll, tm);
684 if (r < 0)
685 return r;
686
687 ll->load_ie = disk_ll_load_ie;
688 ll->save_ie = disk_ll_save_ie;
689 ll->init_index = disk_ll_init_index;
690 ll->open_index = disk_ll_open;
691 ll->max_entries = disk_ll_max_entries;
692 ll->commit = disk_ll_commit;
693
694 ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
695 ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
696 ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
697 ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
698
699 return ll->open_index(ll);
700 }
701
702 /*----------------------------------------------------------------*/