drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / tuxonice_bio_chains.c
1 /*
2 * kernel/power/tuxonice_bio_devinfo.c
3 *
4 * Copyright (C) 2009-2010 Nigel Cunningham (nigel at tuxonice net)
5 *
6 * Distributed under GPLv2.
7 *
8 */
9
10 #include <linux/mm_types.h>
11 #include "tuxonice_bio.h"
12 #include "tuxonice_bio_internal.h"
13 #include "tuxonice_alloc.h"
14 #include "tuxonice_ui.h"
15 #include "tuxonice.h"
16 #include "tuxonice_io.h"
17
18 static struct toi_bdev_info *prio_chain_head;
19 static int num_chains;
20
21 /* Pointer to current entry being loaded/saved. */
22 struct toi_extent_iterate_state toi_writer_posn;
23
24 #define metadata_size (sizeof(struct toi_bdev_info) - \
25 offsetof(struct toi_bdev_info, uuid))
26
27 /*
28 * After section 0 (header) comes 2 => next_section[0] = 2
29 */
30 static int next_section[3] = { 2, 3, 1 };
31
32 /**
33 * dump_block_chains - print the contents of the bdev info array.
34 **/
35 void dump_block_chains(void)
36 {
37 int i = 0;
38 int j;
39 struct toi_bdev_info *cur_chain = prio_chain_head;
40
41 while (cur_chain) {
42 struct hibernate_extent *this = cur_chain->blocks.first;
43
44 printk(KERN_DEBUG "Chain %d (prio %d):", i, cur_chain->prio);
45
46 while (this) {
47 printk(KERN_CONT " [%lu-%lu]%s", this->start,
48 this->end, this->next ? "," : "");
49 this = this->next;
50 }
51
52 printk("\n");
53 cur_chain = cur_chain->next;
54 i++;
55 }
56
57 printk(KERN_DEBUG "Saved states:\n");
58 for (i = 0; i < 4; i++) {
59 printk(KERN_DEBUG "Slot %d: Chain %d.\n", i, toi_writer_posn.saved_chain_number[i]);
60
61 cur_chain = prio_chain_head;
62 j = 0;
63 while (cur_chain) {
64 printk(KERN_DEBUG " Chain %d: Extent %d. Offset %lu.\n",
65 j, cur_chain->saved_state[i].extent_num,
66 cur_chain->saved_state[i].offset);
67 cur_chain = cur_chain->next;
68 j++;
69 }
70 printk(KERN_CONT "\n");
71 }
72 }
73
74 /**
75 *
76 **/
77 static void toi_extent_chain_next(void)
78 {
79 struct toi_bdev_info *this = toi_writer_posn.current_chain;
80
81 if (!this->blocks.current_extent)
82 return;
83
84 if (this->blocks.current_offset == this->blocks.current_extent->end) {
85 if (this->blocks.current_extent->next) {
86 this->blocks.current_extent = this->blocks.current_extent->next;
87 this->blocks.current_offset = this->blocks.current_extent->start;
88 } else {
89 this->blocks.current_extent = NULL;
90 this->blocks.current_offset = 0;
91 }
92 } else
93 this->blocks.current_offset++;
94 }
95
96 /**
97 *
98 */
99
100 static struct toi_bdev_info *__find_next_chain_same_prio(void)
101 {
102 struct toi_bdev_info *start_chain = toi_writer_posn.current_chain;
103 struct toi_bdev_info *this = start_chain;
104 int orig_prio = this->prio;
105
106 do {
107 this = this->next;
108
109 if (!this)
110 this = prio_chain_head;
111
112 /* Back on original chain? Use it again. */
113 if (this == start_chain)
114 return start_chain;
115
116 } while (!this->blocks.current_extent || this->prio != orig_prio);
117
118 return this;
119 }
120
121 static void find_next_chain(void)
122 {
123 struct toi_bdev_info *this;
124
125 this = __find_next_chain_same_prio();
126
127 /*
128 * If we didn't get another chain of the same priority that we
129 * can use, look for the next priority.
130 */
131 while (this && !this->blocks.current_extent)
132 this = this->next;
133
134 toi_writer_posn.current_chain = this;
135 }
136
137 /**
138 * toi_extent_state_next - go to the next extent
139 * @blocks: The number of values to progress.
140 * @stripe_mode: Whether to spread usage across all chains.
141 *
142 * Given a state, progress to the next valid entry. We may begin in an
143 * invalid state, as we do when invoked after extent_state_goto_start below.
144 *
145 * When using compression and expected_compression > 0, we let the image size
146 * be larger than storage, so we can validly run out of data to return.
147 **/
148 static unsigned long toi_extent_state_next(int blocks, int current_stream)
149 {
150 int i;
151
152 if (!toi_writer_posn.current_chain)
153 return -ENOSPC;
154
155 /* Assume chains always have lengths that are multiples of @blocks */
156 for (i = 0; i < blocks; i++)
157 toi_extent_chain_next();
158
159 /* The header stream is not striped */
160 if (current_stream || !toi_writer_posn.current_chain->blocks.current_extent)
161 find_next_chain();
162
163 return toi_writer_posn.current_chain ? 0 : -ENOSPC;
164 }
165
166 static void toi_insert_chain_in_prio_list(struct toi_bdev_info *this)
167 {
168 struct toi_bdev_info **prev_ptr;
169 struct toi_bdev_info *cur;
170
171 /* Loop through the existing chain, finding where to insert it */
172 prev_ptr = &prio_chain_head;
173 cur = prio_chain_head;
174
175 while (cur && cur->prio >= this->prio) {
176 prev_ptr = &cur->next;
177 cur = cur->next;
178 }
179
180 this->next = *prev_ptr;
181 *prev_ptr = this;
182
183 this = prio_chain_head;
184 while (this)
185 this = this->next;
186 num_chains++;
187 }
188
189 /**
190 * toi_extent_state_goto_start - reinitialize an extent chain iterator
191 * @state: Iterator to reinitialize
192 **/
193 void toi_extent_state_goto_start(void)
194 {
195 struct toi_bdev_info *this = prio_chain_head;
196
197 while (this) {
198 toi_message(TOI_BIO, TOI_VERBOSE, 0,
199 "Setting current extent to %p.", this->blocks.first);
200 this->blocks.current_extent = this->blocks.first;
201 if (this->blocks.current_extent) {
202 toi_message(TOI_BIO, TOI_VERBOSE, 0,
203 "Setting current offset to %lu.",
204 this->blocks.current_extent->start);
205 this->blocks.current_offset = this->blocks.current_extent->start;
206 }
207
208 this = this->next;
209 }
210
211 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Setting current chain to %p.", prio_chain_head);
212 toi_writer_posn.current_chain = prio_chain_head;
213 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Leaving extent state goto start.");
214 }
215
216 /**
217 * toi_extent_state_save - save state of the iterator
218 * @state: Current state of the chain
219 * @saved_state: Iterator to populate
220 *
221 * Given a state and a struct hibernate_extent_state_store, save the current
222 * position in a format that can be used with relocated chains (at
223 * resume time).
224 **/
225 void toi_extent_state_save(int slot)
226 {
227 struct toi_bdev_info *cur_chain = prio_chain_head;
228 struct hibernate_extent *extent;
229 struct hibernate_extent_saved_state *chain_state;
230 int i = 0;
231
232 toi_message(TOI_BIO, TOI_VERBOSE, 0, "toi_extent_state_save, slot %d.", slot);
233
234 if (!toi_writer_posn.current_chain) {
235 toi_message(TOI_BIO, TOI_VERBOSE, 0, "No current chain => " "chain_num = -1.");
236 toi_writer_posn.saved_chain_number[slot] = -1;
237 return;
238 }
239
240 while (cur_chain) {
241 i++;
242 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Saving chain %d (%p) "
243 "state, slot %d.", i, cur_chain, slot);
244
245 chain_state = &cur_chain->saved_state[slot];
246
247 chain_state->offset = cur_chain->blocks.current_offset;
248
249 if (toi_writer_posn.current_chain == cur_chain) {
250 toi_writer_posn.saved_chain_number[slot] = i;
251 toi_message(TOI_BIO, TOI_VERBOSE, 0, "This is the chain "
252 "we were on => chain_num is %d.", i);
253 }
254
255 if (!cur_chain->blocks.current_extent) {
256 chain_state->extent_num = 0;
257 toi_message(TOI_BIO, TOI_VERBOSE, 0, "No current extent "
258 "for this chain => extent_num %d is 0.", i);
259 cur_chain = cur_chain->next;
260 continue;
261 }
262
263 extent = cur_chain->blocks.first;
264 chain_state->extent_num = 1;
265
266 while (extent != cur_chain->blocks.current_extent) {
267 chain_state->extent_num++;
268 extent = extent->next;
269 }
270
271 toi_message(TOI_BIO, TOI_VERBOSE, 0, "extent num %d is %d.", i,
272 chain_state->extent_num);
273
274 cur_chain = cur_chain->next;
275 }
276 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Completed saving extent state slot %d.", slot);
277 }
278
279 /**
280 * toi_extent_state_restore - restore the position saved by extent_state_save
281 * @state: State to populate
282 * @saved_state: Iterator saved to restore
283 **/
284 void toi_extent_state_restore(int slot)
285 {
286 int i = 0;
287 struct toi_bdev_info *cur_chain = prio_chain_head;
288 struct hibernate_extent_saved_state *chain_state;
289
290 toi_message(TOI_BIO, TOI_VERBOSE, 0, "toi_extent_state_restore - slot %d.", slot);
291
292 if (toi_writer_posn.saved_chain_number[slot] == -1) {
293 toi_writer_posn.current_chain = NULL;
294 return;
295 }
296
297 while (cur_chain) {
298 int posn;
299 int j;
300 i++;
301 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Restoring chain %d (%p) "
302 "state, slot %d.", i, cur_chain, slot);
303
304 chain_state = &cur_chain->saved_state[slot];
305
306 posn = chain_state->extent_num;
307
308 cur_chain->blocks.current_extent = cur_chain->blocks.first;
309 cur_chain->blocks.current_offset = chain_state->offset;
310
311 if (i == toi_writer_posn.saved_chain_number[slot]) {
312 toi_writer_posn.current_chain = cur_chain;
313 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Found current chain.");
314 }
315
316 for (j = 0; j < 4; j++)
317 if (i == toi_writer_posn.saved_chain_number[j]) {
318 toi_writer_posn.saved_chain_ptr[j] = cur_chain;
319 toi_message(TOI_BIO, TOI_VERBOSE, 0,
320 "Found saved chain ptr %d (%p) (offset"
321 " %d).", j, cur_chain,
322 cur_chain->saved_state[j].offset);
323 }
324
325 if (posn) {
326 while (--posn)
327 cur_chain->blocks.current_extent =
328 cur_chain->blocks.current_extent->next;
329 } else
330 cur_chain->blocks.current_extent = NULL;
331
332 cur_chain = cur_chain->next;
333 }
334 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Done.");
335 if (test_action_state(TOI_LOGALL))
336 dump_block_chains();
337 }
338
339 /*
340 * Storage needed
341 *
342 * Returns amount of space in the image header required
343 * for the chain data. This ignores the links between
344 * pages, which we factor in when allocating the space.
345 */
346 int toi_bio_devinfo_storage_needed(void)
347 {
348 int result = sizeof(num_chains);
349 struct toi_bdev_info *chain = prio_chain_head;
350
351 while (chain) {
352 result += metadata_size;
353
354 /* Chain size */
355 result += sizeof(int);
356
357 /* Extents */
358 result += (2 * sizeof(unsigned long) * chain->blocks.num_extents);
359
360 chain = chain->next;
361 }
362
363 result += 4 * sizeof(int);
364 return result;
365 }
366
367 static unsigned long chain_pages_used(struct toi_bdev_info *chain)
368 {
369 struct hibernate_extent *this = chain->blocks.first;
370 struct hibernate_extent_saved_state *state = &chain->saved_state[3];
371 unsigned long size = 0;
372 int extent_idx = 1;
373
374 if (!state->extent_num) {
375 if (!this)
376 return 0;
377 else
378 return chain->blocks.size;
379 }
380
381 while (extent_idx < state->extent_num) {
382 size += (this->end - this->start + 1);
383 this = this->next;
384 extent_idx++;
385 }
386
387 /* We didn't use the one we're sitting on, so don't count it */
388 return size + state->offset - this->start;
389 }
390
391 /**
392 * toi_serialise_extent_chain - write a chain in the image
393 * @chain: Chain to write.
394 **/
395 static int toi_serialise_extent_chain(struct toi_bdev_info *chain)
396 {
397 struct hibernate_extent *this;
398 int ret;
399 int i = 1;
400
401 chain->pages_used = chain_pages_used(chain);
402
403 if (test_action_state(TOI_LOGALL))
404 dump_block_chains();
405 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Serialising chain (dev_t %lx).", chain->dev_t);
406 /* Device info - dev_t, prio, bmap_shift, blocks per page, positions */
407 ret = toiActiveAllocator->rw_header_chunk(WRITE, &toi_blockwriter_ops,
408 (char *)&chain->uuid, metadata_size);
409 if (ret)
410 return ret;
411
412 /* Num extents */
413 ret = toiActiveAllocator->rw_header_chunk(WRITE, &toi_blockwriter_ops,
414 (char *)&chain->blocks.num_extents, sizeof(int));
415 if (ret)
416 return ret;
417
418 toi_message(TOI_BIO, TOI_VERBOSE, 0, "%d extents.", chain->blocks.num_extents);
419
420 this = chain->blocks.first;
421 while (this) {
422 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Extent %d.", i);
423 ret = toiActiveAllocator->rw_header_chunk(WRITE,
424 &toi_blockwriter_ops,
425 (char *)this, 2 * sizeof(this->start));
426 if (ret)
427 return ret;
428 this = this->next;
429 i++;
430 }
431
432 return ret;
433 }
434
435 int toi_serialise_extent_chains(void)
436 {
437 struct toi_bdev_info *this = prio_chain_head;
438 int result;
439
440 /* Write the number of chains */
441 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Write number of chains (%d)", num_chains);
442 result = toiActiveAllocator->rw_header_chunk(WRITE,
443 &toi_blockwriter_ops, (char *)&num_chains,
444 sizeof(int));
445 if (result)
446 return result;
447
448 /* Then the chains themselves */
449 while (this) {
450 result = toi_serialise_extent_chain(this);
451 if (result)
452 return result;
453 this = this->next;
454 }
455
456 /*
457 * Finally, the chain we should be on at the start of each
458 * section.
459 */
460 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Saved chain numbers.");
461 result = toiActiveAllocator->rw_header_chunk(WRITE,
462 &toi_blockwriter_ops,
463 (char *)&toi_writer_posn.saved_chain_number[0],
464 4 * sizeof(int));
465
466 return result;
467 }
468
469 int toi_register_storage_chain(struct toi_bdev_info *new)
470 {
471 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Inserting chain %p into list.", new);
472 toi_insert_chain_in_prio_list(new);
473 return 0;
474 }
475
476 static void free_bdev_info(struct toi_bdev_info *chain)
477 {
478 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Free chain %p.", chain);
479
480 toi_message(TOI_BIO, TOI_VERBOSE, 0, " - Block extents.");
481 toi_put_extent_chain(&chain->blocks);
482
483 /*
484 * The allocator may need to do more than just free the chains
485 * (swap_free, for example). Don't call from boot kernel.
486 */
487 toi_message(TOI_BIO, TOI_VERBOSE, 0, " - Allocator extents.");
488 if (chain->allocator)
489 chain->allocator->bio_allocator_ops->free_storage(chain);
490
491 /*
492 * Dropping out of reading atomic copy? Need to undo
493 * toi_open_by_devnum.
494 */
495 toi_message(TOI_BIO, TOI_VERBOSE, 0, " - Bdev.");
496 if (chain->bdev && !IS_ERR(chain->bdev) &&
497 chain->bdev != resume_block_device &&
498 chain->bdev != header_block_device && test_toi_state(TOI_TRYING_TO_RESUME))
499 toi_close_bdev(chain->bdev);
500
501 /* Poison */
502 toi_message(TOI_BIO, TOI_VERBOSE, 0, " - Struct.");
503 toi_kfree(39, chain, sizeof(*chain));
504
505 if (prio_chain_head == chain)
506 prio_chain_head = NULL;
507
508 num_chains--;
509 }
510
511 void free_all_bdev_info(void)
512 {
513 struct toi_bdev_info *this = prio_chain_head;
514
515 while (this) {
516 struct toi_bdev_info *next = this->next;
517 free_bdev_info(this);
518 this = next;
519 }
520
521 memset((char *)&toi_writer_posn, 0, sizeof(toi_writer_posn));
522 prio_chain_head = NULL;
523 }
524
525 static void set_up_start_position(void)
526 {
527 toi_writer_posn.current_chain = prio_chain_head;
528 go_next_page(0, 0);
529 }
530
531 /**
532 * toi_load_extent_chain - read back a chain saved in the image
533 * @chain: Chain to load
534 *
535 * The linked list of extents is reconstructed from the disk. chain will point
536 * to the first entry.
537 **/
538 int toi_load_extent_chain(int index, int *num_loaded)
539 {
540 struct toi_bdev_info *chain = toi_kzalloc(39,
541 sizeof(struct toi_bdev_info), GFP_ATOMIC);
542 struct hibernate_extent *this, *last = NULL;
543 int i, ret;
544
545 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Loading extent chain %d.", index);
546 /* Get dev_t, prio, bmap_shift, blocks per page, positions */
547 ret = toiActiveAllocator->rw_header_chunk_noreadahead(READ, NULL,
548 (char *)&chain->uuid, metadata_size);
549
550 if (ret) {
551 printk(KERN_ERR "Failed to read the size of extent chain.\n");
552 toi_kfree(39, chain, sizeof(*chain));
553 return 1;
554 }
555
556 toi_bkd.pages_used[index] = chain->pages_used;
557
558 ret = toiActiveAllocator->rw_header_chunk_noreadahead(READ, NULL,
559 (char *)&chain->blocks.num_extents,
560 sizeof(int));
561 if (ret) {
562 printk(KERN_ERR "Failed to read the size of extent chain.\n");
563 toi_kfree(39, chain, sizeof(*chain));
564 return 1;
565 }
566
567 toi_message(TOI_BIO, TOI_VERBOSE, 0, "%d extents.", chain->blocks.num_extents);
568
569 for (i = 0; i < chain->blocks.num_extents; i++) {
570 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Extent %d.", i + 1);
571
572 this = toi_kzalloc(2, sizeof(struct hibernate_extent), TOI_ATOMIC_GFP);
573 if (!this) {
574 printk(KERN_INFO "Failed to allocate a new extent.\n");
575 free_bdev_info(chain);
576 return -ENOMEM;
577 }
578 this->next = NULL;
579 /* Get the next page */
580 ret = toiActiveAllocator->rw_header_chunk_noreadahead(READ,
581 NULL, (char *)this,
582 2 * sizeof(this->start));
583 if (ret) {
584 printk(KERN_INFO "Failed to read an extent.\n");
585 toi_kfree(2, this, sizeof(struct hibernate_extent));
586 free_bdev_info(chain);
587 return 1;
588 }
589
590 if (last)
591 last->next = this;
592 else {
593 char b1[32], b2[32], b3[32];
594 /*
595 * Open the bdev
596 */
597 toi_message(TOI_BIO, TOI_VERBOSE, 0,
598 "Chain dev_t is %s. Resume dev t is %s. Header"
599 " bdev_t is %s.\n",
600 format_dev_t(b1, chain->dev_t),
601 format_dev_t(b2, resume_dev_t),
602 format_dev_t(b3, toi_sig_data->header_dev_t));
603
604 if (chain->dev_t == resume_dev_t)
605 chain->bdev = resume_block_device;
606 else if (chain->dev_t == toi_sig_data->header_dev_t)
607 chain->bdev = header_block_device;
608 else {
609 chain->bdev = toi_open_bdev(chain->uuid, chain->dev_t, 1);
610 if (IS_ERR(chain->bdev)) {
611 free_bdev_info(chain);
612 return -ENODEV;
613 }
614 }
615
616 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Chain bmap shift "
617 "is %d and blocks per page is %d.",
618 chain->bmap_shift, chain->blocks_per_page);
619
620 chain->blocks.first = this;
621
622 /*
623 * Couldn't do this earlier, but can't do
624 * goto_start now - we may have already used blocks
625 * in the first chain.
626 */
627 chain->blocks.current_extent = this;
628 chain->blocks.current_offset = this->start;
629
630 /*
631 * Can't wait until we've read the whole chain
632 * before we insert it in the list. We might need
633 * this chain to read the next page in the header
634 */
635 toi_insert_chain_in_prio_list(chain);
636 }
637
638 /*
639 * We have to wait until 2 extents are loaded before setting up
640 * properly because if the first extent has only one page, we
641 * will need to put the position on the second extent. Sounds
642 * obvious, but it wasn't!
643 */
644 (*num_loaded)++;
645 if ((*num_loaded) == 2)
646 set_up_start_position();
647 last = this;
648 }
649
650 /*
651 * Shouldn't get empty chains, but it's not impossible. Link them in so
652 * they get freed properly later.
653 */
654 if (!chain->blocks.num_extents)
655 toi_insert_chain_in_prio_list(chain);
656
657 if (!chain->blocks.current_extent) {
658 chain->blocks.current_extent = chain->blocks.first;
659 if (chain->blocks.current_extent)
660 chain->blocks.current_offset = chain->blocks.current_extent->start;
661 }
662 return 0;
663 }
664
665 int toi_load_extent_chains(void)
666 {
667 int result;
668 int to_load;
669 int i;
670 int extents_loaded = 0;
671
672 result = toiActiveAllocator->rw_header_chunk_noreadahead(READ, NULL,
673 (char *)&to_load, sizeof(int));
674 if (result)
675 return result;
676 toi_message(TOI_BIO, TOI_VERBOSE, 0, "%d chains to read.", to_load);
677
678 for (i = 0; i < to_load; i++) {
679 toi_message(TOI_BIO, TOI_VERBOSE, 0, " >> Loading chain %d/%d.", i, to_load);
680 result = toi_load_extent_chain(i, &extents_loaded);
681 if (result)
682 return result;
683 }
684
685 /* If we never got to a second extent, we still need to do this. */
686 if (extents_loaded == 1)
687 set_up_start_position();
688
689 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Save chain numbers.");
690 result = toiActiveAllocator->rw_header_chunk_noreadahead(READ,
691 &toi_blockwriter_ops,
692 (char *)&toi_writer_posn.
693 saved_chain_number[0],
694 4 * sizeof(int));
695
696 return result;
697 }
698
699 static int toi_end_of_stream(int writing, int section_barrier)
700 {
701 struct toi_bdev_info *cur_chain = toi_writer_posn.current_chain;
702 int compare_to = next_section[current_stream];
703 struct toi_bdev_info *compare_chain = toi_writer_posn.saved_chain_ptr[compare_to];
704 int compare_offset = compare_chain ? compare_chain->saved_state[compare_to].offset : 0;
705
706 if (!section_barrier)
707 return 0;
708
709 if (!cur_chain)
710 return 1;
711
712 if (cur_chain == compare_chain && cur_chain->blocks.current_offset == compare_offset) {
713 if (writing) {
714 if (!current_stream) {
715 debug_broken_header();
716 return 1;
717 }
718 } else {
719 more_readahead = 0;
720 toi_message(TOI_BIO, TOI_VERBOSE, 0,
721 "Reached the end of stream %d "
722 "(not an error).", current_stream);
723 return 1;
724 }
725 }
726
727 return 0;
728 }
729
730 /**
731 * go_next_page - skip blocks to the start of the next page
732 * @writing: Whether we're reading or writing the image.
733 *
734 * Go forward one page.
735 **/
736 int go_next_page(int writing, int section_barrier)
737 {
738 struct toi_bdev_info *cur_chain = toi_writer_posn.current_chain;
739 int max = cur_chain ? cur_chain->blocks_per_page : 1;
740
741 /* Nope. Go foward a page - or maybe two. Don't stripe the header,
742 * so that bad fragmentation doesn't put the extent data containing
743 * the location of the second page out of the first header page.
744 */
745 if (toi_extent_state_next(max, current_stream)) {
746 /* Don't complain if readahead falls off the end */
747 if (writing && section_barrier) {
748 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Extent state eof. "
749 "Expected compression ratio too optimistic?");
750 if (test_action_state(TOI_LOGALL))
751 dump_block_chains();
752 }
753 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Ran out of extents to "
754 "read/write. (Not necessarily a fatal error.");
755 return -ENOSPC;
756 }
757
758 return 0;
759 }
760
761 int devices_of_same_priority(struct toi_bdev_info *this)
762 {
763 struct toi_bdev_info *check = prio_chain_head;
764 int i = 0;
765
766 while (check) {
767 if (check->prio == this->prio)
768 i++;
769 check = check->next;
770 }
771
772 return i;
773 }
774
775 /**
776 * toi_bio_rw_page - do i/o on the next disk page in the image
777 * @writing: Whether reading or writing.
778 * @page: Page to do i/o on.
779 * @is_readahead: Whether we're doing readahead
780 * @free_group: The group used in allocating the page
781 *
782 * Submit a page for reading or writing, possibly readahead.
783 * Pass the group used in allocating the page as well, as it should
784 * be freed on completion of the bio if we're writing the page.
785 **/
786 int toi_bio_rw_page(int writing, struct page *page, int is_readahead, int free_group)
787 {
788 int result = toi_end_of_stream(writing, 1);
789 struct toi_bdev_info *dev_info = toi_writer_posn.current_chain;
790
791 if (result) {
792 if (writing)
793 abort_hibernate(TOI_INSUFFICIENT_STORAGE,
794 "Insufficient storage for your image.");
795 else
796 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Seeking to "
797 "read/write another page when stream has " "ended.");
798 return -ENOSPC;
799 }
800
801 toi_message(TOI_BIO, TOI_VERBOSE, 0,
802 "%s %lx:%ld",
803 writing ? "Write" : "Read", dev_info->dev_t, dev_info->blocks.current_offset);
804
805 result = toi_do_io(writing, dev_info->bdev,
806 dev_info->blocks.current_offset << dev_info->bmap_shift,
807 page, is_readahead, 0, free_group);
808
809 /* Ignore the result here - will check end of stream if come in again */
810 go_next_page(writing, 1);
811
812 if (result)
813 printk(KERN_ERR "toi_do_io returned %d.\n", result);
814 return result;
815 }
816
817 dev_t get_header_dev_t(void)
818 {
819 return prio_chain_head->dev_t;
820 }
821
822 struct block_device *get_header_bdev(void)
823 {
824 return prio_chain_head->bdev;
825 }
826
827 unsigned long get_headerblock(void)
828 {
829 return prio_chain_head->blocks.first->start << prio_chain_head->bmap_shift;
830 }
831
832 int get_main_pool_phys_params(void)
833 {
834 struct toi_bdev_info *this = prio_chain_head;
835 int result;
836
837 while (this) {
838 result = this->allocator->bio_allocator_ops->bmap(this);
839 if (result)
840 return result;
841 this = this->next;
842 }
843
844 return 0;
845 }
846
847 static int apply_header_reservation(void)
848 {
849 int i;
850
851 if (!header_pages_reserved) {
852 toi_message(TOI_BIO, TOI_VERBOSE, 0, "No header pages reserved at the moment.");
853 return 0;
854 }
855
856 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Applying header reservation.");
857
858 /* Apply header space reservation */
859 toi_extent_state_goto_start();
860
861 for (i = 0; i < header_pages_reserved; i++)
862 if (go_next_page(1, 0))
863 return -ENOSPC;
864
865 /* The end of header pages will be the start of pageset 2 */
866 toi_extent_state_save(2);
867
868 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Finished applying header reservation.");
869 return 0;
870 }
871
872 static int toi_bio_register_storage(void)
873 {
874 int result = 0;
875 struct toi_module_ops *this_module;
876
877 list_for_each_entry(this_module, &toi_modules, module_list) {
878 if (!this_module->enabled || this_module->type != BIO_ALLOCATOR_MODULE)
879 continue;
880 toi_message(TOI_BIO, TOI_VERBOSE, 0,
881 "Registering storage from %s.", this_module->name);
882 result = this_module->bio_allocator_ops->register_storage();
883 if (result)
884 break;
885 }
886
887 return result;
888 }
889
890 int toi_bio_allocate_storage(unsigned long request)
891 {
892 struct toi_bdev_info *chain = prio_chain_head;
893 unsigned long to_get = request;
894 unsigned long extra_pages, needed;
895 int no_free = 0;
896
897 if (!chain) {
898 int result = toi_bio_register_storage();
899 toi_message(TOI_BIO, TOI_VERBOSE, 0, "toi_bio_allocate_storage: "
900 "Registering storage.");
901 if (result)
902 return 0;
903 chain = prio_chain_head;
904 if (!chain) {
905 printk("TuxOnIce: No storage was registered.\n");
906 return 0;
907 }
908 }
909
910 toi_message(TOI_BIO, TOI_VERBOSE, 0, "toi_bio_allocate_storage: "
911 "Request is %lu pages.", request);
912 extra_pages = DIV_ROUND_UP(request * (sizeof(unsigned long)
913 + sizeof(int)), PAGE_SIZE);
914 needed = request + extra_pages + header_pages_reserved;
915 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Adding %lu extra pages and %lu "
916 "for header => %lu.", extra_pages, header_pages_reserved, needed);
917 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Already allocated %lu pages.", raw_pages_allocd);
918
919 to_get = needed > raw_pages_allocd ? needed - raw_pages_allocd : 0;
920 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Need to get %lu pages.", to_get);
921
922 if (!to_get)
923 return apply_header_reservation();
924
925 while (to_get && chain) {
926 int num_group = devices_of_same_priority(chain);
927 int divisor = num_group - no_free;
928 int i;
929 unsigned long portion = DIV_ROUND_UP(to_get, divisor);
930 unsigned long got = 0;
931 unsigned long got_this_round = 0;
932 struct toi_bdev_info *top = chain;
933
934 toi_message(TOI_BIO, TOI_VERBOSE, 0,
935 " Start of loop. To get is %lu. Divisor is %d.", to_get, divisor);
936 no_free = 0;
937
938 /*
939 * We're aiming to spread the allocated storage as evenly
940 * as possible, but we also want to get all the storage we
941 * can off this priority.
942 */
943 for (i = 0; i < num_group; i++) {
944 struct toi_bio_allocator_ops *ops = chain->allocator->bio_allocator_ops;
945 toi_message(TOI_BIO, TOI_VERBOSE, 0,
946 " Asking for %lu pages from chain %p.", portion, chain);
947 got = ops->allocate_storage(chain, portion);
948 toi_message(TOI_BIO, TOI_VERBOSE, 0,
949 " Got %lu pages from allocator %p.", got, chain);
950 if (!got)
951 no_free++;
952 got_this_round += got;
953 chain = chain->next;
954 }
955 toi_message(TOI_BIO, TOI_VERBOSE, 0, " Loop finished. Got a "
956 "total of %lu pages from %d allocators.",
957 got_this_round, divisor - no_free);
958
959 raw_pages_allocd += got_this_round;
960 to_get = needed > raw_pages_allocd ? needed - raw_pages_allocd : 0;
961
962 /*
963 * If we got anything from chains of this priority and we
964 * still have storage to allocate, go over this priority
965 * again.
966 */
967 if (got_this_round && to_get)
968 chain = top;
969 else
970 no_free = 0;
971 }
972
973 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Finished allocating. Calling "
974 "get_main_pool_phys_params");
975 /* Now let swap allocator bmap the pages */
976 get_main_pool_phys_params();
977
978 toi_message(TOI_BIO, TOI_VERBOSE, 0, "Done. Reserving header.");
979 return apply_header_reservation();
980 }
981
982 void toi_bio_chains_post_atomic(struct toi_boot_kernel_data *bkd)
983 {
984 int i = 0;
985 struct toi_bdev_info *cur_chain = prio_chain_head;
986
987 while (cur_chain) {
988 cur_chain->pages_used = bkd->pages_used[i];
989 cur_chain = cur_chain->next;
990 i++;
991 }
992 }
993
994 int toi_bio_chains_debug_info(char *buffer, int size)
995 {
996 /* Show what we actually used */
997 struct toi_bdev_info *cur_chain = prio_chain_head;
998 int len = 0;
999
1000 while (cur_chain) {
1001 len += scnprintf(buffer + len, size - len, " Used %lu pages "
1002 "from %s.\n", cur_chain->pages_used, cur_chain->name);
1003 cur_chain = cur_chain->next;
1004 }
1005
1006 return len;
1007 }