2 * kernel/power/tuxonice_bio_devinfo.c
4 * Copyright (C) 2009-2010 Nigel Cunningham (nigel at tuxonice net)
6 * Distributed under GPLv2.
10 #include <linux/mm_types.h>
11 #include "tuxonice_bio.h"
12 #include "tuxonice_bio_internal.h"
13 #include "tuxonice_alloc.h"
14 #include "tuxonice_ui.h"
16 #include "tuxonice_io.h"
18 static struct toi_bdev_info
*prio_chain_head
;
19 static int num_chains
;
21 /* Pointer to current entry being loaded/saved. */
22 struct toi_extent_iterate_state toi_writer_posn
;
24 #define metadata_size (sizeof(struct toi_bdev_info) - \
25 offsetof(struct toi_bdev_info, uuid))
28 * After section 0 (header) comes 2 => next_section[0] = 2
30 static int next_section
[3] = { 2, 3, 1 };
33 * dump_block_chains - print the contents of the bdev info array.
35 void dump_block_chains(void)
39 struct toi_bdev_info
*cur_chain
= prio_chain_head
;
42 struct hibernate_extent
*this = cur_chain
->blocks
.first
;
44 printk(KERN_DEBUG
"Chain %d (prio %d):", i
, cur_chain
->prio
);
47 printk(KERN_CONT
" [%lu-%lu]%s", this->start
,
48 this->end
, this->next
? "," : "");
53 cur_chain
= cur_chain
->next
;
57 printk(KERN_DEBUG
"Saved states:\n");
58 for (i
= 0; i
< 4; i
++) {
59 printk(KERN_DEBUG
"Slot %d: Chain %d.\n", i
, toi_writer_posn
.saved_chain_number
[i
]);
61 cur_chain
= prio_chain_head
;
64 printk(KERN_DEBUG
" Chain %d: Extent %d. Offset %lu.\n",
65 j
, cur_chain
->saved_state
[i
].extent_num
,
66 cur_chain
->saved_state
[i
].offset
);
67 cur_chain
= cur_chain
->next
;
70 printk(KERN_CONT
"\n");
77 static void toi_extent_chain_next(void)
79 struct toi_bdev_info
*this = toi_writer_posn
.current_chain
;
81 if (!this->blocks
.current_extent
)
84 if (this->blocks
.current_offset
== this->blocks
.current_extent
->end
) {
85 if (this->blocks
.current_extent
->next
) {
86 this->blocks
.current_extent
= this->blocks
.current_extent
->next
;
87 this->blocks
.current_offset
= this->blocks
.current_extent
->start
;
89 this->blocks
.current_extent
= NULL
;
90 this->blocks
.current_offset
= 0;
93 this->blocks
.current_offset
++;
100 static struct toi_bdev_info
*__find_next_chain_same_prio(void)
102 struct toi_bdev_info
*start_chain
= toi_writer_posn
.current_chain
;
103 struct toi_bdev_info
*this = start_chain
;
104 int orig_prio
= this->prio
;
110 this = prio_chain_head
;
112 /* Back on original chain? Use it again. */
113 if (this == start_chain
)
116 } while (!this->blocks
.current_extent
|| this->prio
!= orig_prio
);
121 static void find_next_chain(void)
123 struct toi_bdev_info
*this;
125 this = __find_next_chain_same_prio();
128 * If we didn't get another chain of the same priority that we
129 * can use, look for the next priority.
131 while (this && !this->blocks
.current_extent
)
134 toi_writer_posn
.current_chain
= this;
138 * toi_extent_state_next - go to the next extent
139 * @blocks: The number of values to progress.
140 * @stripe_mode: Whether to spread usage across all chains.
142 * Given a state, progress to the next valid entry. We may begin in an
143 * invalid state, as we do when invoked after extent_state_goto_start below.
145 * When using compression and expected_compression > 0, we let the image size
146 * be larger than storage, so we can validly run out of data to return.
148 static unsigned long toi_extent_state_next(int blocks
, int current_stream
)
152 if (!toi_writer_posn
.current_chain
)
155 /* Assume chains always have lengths that are multiples of @blocks */
156 for (i
= 0; i
< blocks
; i
++)
157 toi_extent_chain_next();
159 /* The header stream is not striped */
160 if (current_stream
|| !toi_writer_posn
.current_chain
->blocks
.current_extent
)
163 return toi_writer_posn
.current_chain
? 0 : -ENOSPC
;
166 static void toi_insert_chain_in_prio_list(struct toi_bdev_info
*this)
168 struct toi_bdev_info
**prev_ptr
;
169 struct toi_bdev_info
*cur
;
171 /* Loop through the existing chain, finding where to insert it */
172 prev_ptr
= &prio_chain_head
;
173 cur
= prio_chain_head
;
175 while (cur
&& cur
->prio
>= this->prio
) {
176 prev_ptr
= &cur
->next
;
180 this->next
= *prev_ptr
;
183 this = prio_chain_head
;
190 * toi_extent_state_goto_start - reinitialize an extent chain iterator
191 * @state: Iterator to reinitialize
193 void toi_extent_state_goto_start(void)
195 struct toi_bdev_info
*this = prio_chain_head
;
198 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
199 "Setting current extent to %p.", this->blocks
.first
);
200 this->blocks
.current_extent
= this->blocks
.first
;
201 if (this->blocks
.current_extent
) {
202 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
203 "Setting current offset to %lu.",
204 this->blocks
.current_extent
->start
);
205 this->blocks
.current_offset
= this->blocks
.current_extent
->start
;
211 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Setting current chain to %p.", prio_chain_head
);
212 toi_writer_posn
.current_chain
= prio_chain_head
;
213 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Leaving extent state goto start.");
217 * toi_extent_state_save - save state of the iterator
218 * @state: Current state of the chain
219 * @saved_state: Iterator to populate
221 * Given a state and a struct hibernate_extent_state_store, save the current
222 * position in a format that can be used with relocated chains (at
225 void toi_extent_state_save(int slot
)
227 struct toi_bdev_info
*cur_chain
= prio_chain_head
;
228 struct hibernate_extent
*extent
;
229 struct hibernate_extent_saved_state
*chain_state
;
232 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "toi_extent_state_save, slot %d.", slot
);
234 if (!toi_writer_posn
.current_chain
) {
235 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "No current chain => " "chain_num = -1.");
236 toi_writer_posn
.saved_chain_number
[slot
] = -1;
242 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Saving chain %d (%p) "
243 "state, slot %d.", i
, cur_chain
, slot
);
245 chain_state
= &cur_chain
->saved_state
[slot
];
247 chain_state
->offset
= cur_chain
->blocks
.current_offset
;
249 if (toi_writer_posn
.current_chain
== cur_chain
) {
250 toi_writer_posn
.saved_chain_number
[slot
] = i
;
251 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "This is the chain "
252 "we were on => chain_num is %d.", i
);
255 if (!cur_chain
->blocks
.current_extent
) {
256 chain_state
->extent_num
= 0;
257 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "No current extent "
258 "for this chain => extent_num %d is 0.", i
);
259 cur_chain
= cur_chain
->next
;
263 extent
= cur_chain
->blocks
.first
;
264 chain_state
->extent_num
= 1;
266 while (extent
!= cur_chain
->blocks
.current_extent
) {
267 chain_state
->extent_num
++;
268 extent
= extent
->next
;
271 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "extent num %d is %d.", i
,
272 chain_state
->extent_num
);
274 cur_chain
= cur_chain
->next
;
276 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Completed saving extent state slot %d.", slot
);
280 * toi_extent_state_restore - restore the position saved by extent_state_save
281 * @state: State to populate
282 * @saved_state: Iterator saved to restore
284 void toi_extent_state_restore(int slot
)
287 struct toi_bdev_info
*cur_chain
= prio_chain_head
;
288 struct hibernate_extent_saved_state
*chain_state
;
290 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "toi_extent_state_restore - slot %d.", slot
);
292 if (toi_writer_posn
.saved_chain_number
[slot
] == -1) {
293 toi_writer_posn
.current_chain
= NULL
;
301 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Restoring chain %d (%p) "
302 "state, slot %d.", i
, cur_chain
, slot
);
304 chain_state
= &cur_chain
->saved_state
[slot
];
306 posn
= chain_state
->extent_num
;
308 cur_chain
->blocks
.current_extent
= cur_chain
->blocks
.first
;
309 cur_chain
->blocks
.current_offset
= chain_state
->offset
;
311 if (i
== toi_writer_posn
.saved_chain_number
[slot
]) {
312 toi_writer_posn
.current_chain
= cur_chain
;
313 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Found current chain.");
316 for (j
= 0; j
< 4; j
++)
317 if (i
== toi_writer_posn
.saved_chain_number
[j
]) {
318 toi_writer_posn
.saved_chain_ptr
[j
] = cur_chain
;
319 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
320 "Found saved chain ptr %d (%p) (offset"
321 " %d).", j
, cur_chain
,
322 cur_chain
->saved_state
[j
].offset
);
327 cur_chain
->blocks
.current_extent
=
328 cur_chain
->blocks
.current_extent
->next
;
330 cur_chain
->blocks
.current_extent
= NULL
;
332 cur_chain
= cur_chain
->next
;
334 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Done.");
335 if (test_action_state(TOI_LOGALL
))
342 * Returns amount of space in the image header required
343 * for the chain data. This ignores the links between
344 * pages, which we factor in when allocating the space.
346 int toi_bio_devinfo_storage_needed(void)
348 int result
= sizeof(num_chains
);
349 struct toi_bdev_info
*chain
= prio_chain_head
;
352 result
+= metadata_size
;
355 result
+= sizeof(int);
358 result
+= (2 * sizeof(unsigned long) * chain
->blocks
.num_extents
);
363 result
+= 4 * sizeof(int);
367 static unsigned long chain_pages_used(struct toi_bdev_info
*chain
)
369 struct hibernate_extent
*this = chain
->blocks
.first
;
370 struct hibernate_extent_saved_state
*state
= &chain
->saved_state
[3];
371 unsigned long size
= 0;
374 if (!state
->extent_num
) {
378 return chain
->blocks
.size
;
381 while (extent_idx
< state
->extent_num
) {
382 size
+= (this->end
- this->start
+ 1);
387 /* We didn't use the one we're sitting on, so don't count it */
388 return size
+ state
->offset
- this->start
;
392 * toi_serialise_extent_chain - write a chain in the image
393 * @chain: Chain to write.
395 static int toi_serialise_extent_chain(struct toi_bdev_info
*chain
)
397 struct hibernate_extent
*this;
401 chain
->pages_used
= chain_pages_used(chain
);
403 if (test_action_state(TOI_LOGALL
))
405 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Serialising chain (dev_t %lx).", chain
->dev_t
);
406 /* Device info - dev_t, prio, bmap_shift, blocks per page, positions */
407 ret
= toiActiveAllocator
->rw_header_chunk(WRITE
, &toi_blockwriter_ops
,
408 (char *)&chain
->uuid
, metadata_size
);
413 ret
= toiActiveAllocator
->rw_header_chunk(WRITE
, &toi_blockwriter_ops
,
414 (char *)&chain
->blocks
.num_extents
, sizeof(int));
418 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "%d extents.", chain
->blocks
.num_extents
);
420 this = chain
->blocks
.first
;
422 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Extent %d.", i
);
423 ret
= toiActiveAllocator
->rw_header_chunk(WRITE
,
424 &toi_blockwriter_ops
,
425 (char *)this, 2 * sizeof(this->start
));
435 int toi_serialise_extent_chains(void)
437 struct toi_bdev_info
*this = prio_chain_head
;
440 /* Write the number of chains */
441 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Write number of chains (%d)", num_chains
);
442 result
= toiActiveAllocator
->rw_header_chunk(WRITE
,
443 &toi_blockwriter_ops
, (char *)&num_chains
,
448 /* Then the chains themselves */
450 result
= toi_serialise_extent_chain(this);
457 * Finally, the chain we should be on at the start of each
460 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Saved chain numbers.");
461 result
= toiActiveAllocator
->rw_header_chunk(WRITE
,
462 &toi_blockwriter_ops
,
463 (char *)&toi_writer_posn
.saved_chain_number
[0],
469 int toi_register_storage_chain(struct toi_bdev_info
*new)
471 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Inserting chain %p into list.", new);
472 toi_insert_chain_in_prio_list(new);
476 static void free_bdev_info(struct toi_bdev_info
*chain
)
478 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Free chain %p.", chain
);
480 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, " - Block extents.");
481 toi_put_extent_chain(&chain
->blocks
);
484 * The allocator may need to do more than just free the chains
485 * (swap_free, for example). Don't call from boot kernel.
487 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, " - Allocator extents.");
488 if (chain
->allocator
)
489 chain
->allocator
->bio_allocator_ops
->free_storage(chain
);
492 * Dropping out of reading atomic copy? Need to undo
493 * toi_open_by_devnum.
495 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, " - Bdev.");
496 if (chain
->bdev
&& !IS_ERR(chain
->bdev
) &&
497 chain
->bdev
!= resume_block_device
&&
498 chain
->bdev
!= header_block_device
&& test_toi_state(TOI_TRYING_TO_RESUME
))
499 toi_close_bdev(chain
->bdev
);
502 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, " - Struct.");
503 toi_kfree(39, chain
, sizeof(*chain
));
505 if (prio_chain_head
== chain
)
506 prio_chain_head
= NULL
;
511 void free_all_bdev_info(void)
513 struct toi_bdev_info
*this = prio_chain_head
;
516 struct toi_bdev_info
*next
= this->next
;
517 free_bdev_info(this);
521 memset((char *)&toi_writer_posn
, 0, sizeof(toi_writer_posn
));
522 prio_chain_head
= NULL
;
525 static void set_up_start_position(void)
527 toi_writer_posn
.current_chain
= prio_chain_head
;
532 * toi_load_extent_chain - read back a chain saved in the image
533 * @chain: Chain to load
535 * The linked list of extents is reconstructed from the disk. chain will point
536 * to the first entry.
538 int toi_load_extent_chain(int index
, int *num_loaded
)
540 struct toi_bdev_info
*chain
= toi_kzalloc(39,
541 sizeof(struct toi_bdev_info
), GFP_ATOMIC
);
542 struct hibernate_extent
*this, *last
= NULL
;
545 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Loading extent chain %d.", index
);
546 /* Get dev_t, prio, bmap_shift, blocks per page, positions */
547 ret
= toiActiveAllocator
->rw_header_chunk_noreadahead(READ
, NULL
,
548 (char *)&chain
->uuid
, metadata_size
);
551 printk(KERN_ERR
"Failed to read the size of extent chain.\n");
552 toi_kfree(39, chain
, sizeof(*chain
));
556 toi_bkd
.pages_used
[index
] = chain
->pages_used
;
558 ret
= toiActiveAllocator
->rw_header_chunk_noreadahead(READ
, NULL
,
559 (char *)&chain
->blocks
.num_extents
,
562 printk(KERN_ERR
"Failed to read the size of extent chain.\n");
563 toi_kfree(39, chain
, sizeof(*chain
));
567 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "%d extents.", chain
->blocks
.num_extents
);
569 for (i
= 0; i
< chain
->blocks
.num_extents
; i
++) {
570 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Extent %d.", i
+ 1);
572 this = toi_kzalloc(2, sizeof(struct hibernate_extent
), TOI_ATOMIC_GFP
);
574 printk(KERN_INFO
"Failed to allocate a new extent.\n");
575 free_bdev_info(chain
);
579 /* Get the next page */
580 ret
= toiActiveAllocator
->rw_header_chunk_noreadahead(READ
,
582 2 * sizeof(this->start
));
584 printk(KERN_INFO
"Failed to read an extent.\n");
585 toi_kfree(2, this, sizeof(struct hibernate_extent
));
586 free_bdev_info(chain
);
593 char b1
[32], b2
[32], b3
[32];
597 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
598 "Chain dev_t is %s. Resume dev t is %s. Header"
600 format_dev_t(b1
, chain
->dev_t
),
601 format_dev_t(b2
, resume_dev_t
),
602 format_dev_t(b3
, toi_sig_data
->header_dev_t
));
604 if (chain
->dev_t
== resume_dev_t
)
605 chain
->bdev
= resume_block_device
;
606 else if (chain
->dev_t
== toi_sig_data
->header_dev_t
)
607 chain
->bdev
= header_block_device
;
609 chain
->bdev
= toi_open_bdev(chain
->uuid
, chain
->dev_t
, 1);
610 if (IS_ERR(chain
->bdev
)) {
611 free_bdev_info(chain
);
616 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Chain bmap shift "
617 "is %d and blocks per page is %d.",
618 chain
->bmap_shift
, chain
->blocks_per_page
);
620 chain
->blocks
.first
= this;
623 * Couldn't do this earlier, but can't do
624 * goto_start now - we may have already used blocks
625 * in the first chain.
627 chain
->blocks
.current_extent
= this;
628 chain
->blocks
.current_offset
= this->start
;
631 * Can't wait until we've read the whole chain
632 * before we insert it in the list. We might need
633 * this chain to read the next page in the header
635 toi_insert_chain_in_prio_list(chain
);
639 * We have to wait until 2 extents are loaded before setting up
640 * properly because if the first extent has only one page, we
641 * will need to put the position on the second extent. Sounds
642 * obvious, but it wasn't!
645 if ((*num_loaded
) == 2)
646 set_up_start_position();
651 * Shouldn't get empty chains, but it's not impossible. Link them in so
652 * they get freed properly later.
654 if (!chain
->blocks
.num_extents
)
655 toi_insert_chain_in_prio_list(chain
);
657 if (!chain
->blocks
.current_extent
) {
658 chain
->blocks
.current_extent
= chain
->blocks
.first
;
659 if (chain
->blocks
.current_extent
)
660 chain
->blocks
.current_offset
= chain
->blocks
.current_extent
->start
;
665 int toi_load_extent_chains(void)
670 int extents_loaded
= 0;
672 result
= toiActiveAllocator
->rw_header_chunk_noreadahead(READ
, NULL
,
673 (char *)&to_load
, sizeof(int));
676 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "%d chains to read.", to_load
);
678 for (i
= 0; i
< to_load
; i
++) {
679 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, " >> Loading chain %d/%d.", i
, to_load
);
680 result
= toi_load_extent_chain(i
, &extents_loaded
);
685 /* If we never got to a second extent, we still need to do this. */
686 if (extents_loaded
== 1)
687 set_up_start_position();
689 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Save chain numbers.");
690 result
= toiActiveAllocator
->rw_header_chunk_noreadahead(READ
,
691 &toi_blockwriter_ops
,
692 (char *)&toi_writer_posn
.
693 saved_chain_number
[0],
699 static int toi_end_of_stream(int writing
, int section_barrier
)
701 struct toi_bdev_info
*cur_chain
= toi_writer_posn
.current_chain
;
702 int compare_to
= next_section
[current_stream
];
703 struct toi_bdev_info
*compare_chain
= toi_writer_posn
.saved_chain_ptr
[compare_to
];
704 int compare_offset
= compare_chain
? compare_chain
->saved_state
[compare_to
].offset
: 0;
706 if (!section_barrier
)
712 if (cur_chain
== compare_chain
&& cur_chain
->blocks
.current_offset
== compare_offset
) {
714 if (!current_stream
) {
715 debug_broken_header();
720 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
721 "Reached the end of stream %d "
722 "(not an error).", current_stream
);
731 * go_next_page - skip blocks to the start of the next page
732 * @writing: Whether we're reading or writing the image.
734 * Go forward one page.
736 int go_next_page(int writing
, int section_barrier
)
738 struct toi_bdev_info
*cur_chain
= toi_writer_posn
.current_chain
;
739 int max
= cur_chain
? cur_chain
->blocks_per_page
: 1;
741 /* Nope. Go foward a page - or maybe two. Don't stripe the header,
742 * so that bad fragmentation doesn't put the extent data containing
743 * the location of the second page out of the first header page.
745 if (toi_extent_state_next(max
, current_stream
)) {
746 /* Don't complain if readahead falls off the end */
747 if (writing
&& section_barrier
) {
748 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Extent state eof. "
749 "Expected compression ratio too optimistic?");
750 if (test_action_state(TOI_LOGALL
))
753 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Ran out of extents to "
754 "read/write. (Not necessarily a fatal error.");
761 int devices_of_same_priority(struct toi_bdev_info
*this)
763 struct toi_bdev_info
*check
= prio_chain_head
;
767 if (check
->prio
== this->prio
)
776 * toi_bio_rw_page - do i/o on the next disk page in the image
777 * @writing: Whether reading or writing.
778 * @page: Page to do i/o on.
779 * @is_readahead: Whether we're doing readahead
780 * @free_group: The group used in allocating the page
782 * Submit a page for reading or writing, possibly readahead.
783 * Pass the group used in allocating the page as well, as it should
784 * be freed on completion of the bio if we're writing the page.
786 int toi_bio_rw_page(int writing
, struct page
*page
, int is_readahead
, int free_group
)
788 int result
= toi_end_of_stream(writing
, 1);
789 struct toi_bdev_info
*dev_info
= toi_writer_posn
.current_chain
;
793 abort_hibernate(TOI_INSUFFICIENT_STORAGE
,
794 "Insufficient storage for your image.");
796 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Seeking to "
797 "read/write another page when stream has " "ended.");
801 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
803 writing
? "Write" : "Read", dev_info
->dev_t
, dev_info
->blocks
.current_offset
);
805 result
= toi_do_io(writing
, dev_info
->bdev
,
806 dev_info
->blocks
.current_offset
<< dev_info
->bmap_shift
,
807 page
, is_readahead
, 0, free_group
);
809 /* Ignore the result here - will check end of stream if come in again */
810 go_next_page(writing
, 1);
813 printk(KERN_ERR
"toi_do_io returned %d.\n", result
);
817 dev_t
get_header_dev_t(void)
819 return prio_chain_head
->dev_t
;
822 struct block_device
*get_header_bdev(void)
824 return prio_chain_head
->bdev
;
827 unsigned long get_headerblock(void)
829 return prio_chain_head
->blocks
.first
->start
<< prio_chain_head
->bmap_shift
;
832 int get_main_pool_phys_params(void)
834 struct toi_bdev_info
*this = prio_chain_head
;
838 result
= this->allocator
->bio_allocator_ops
->bmap(this);
847 static int apply_header_reservation(void)
851 if (!header_pages_reserved
) {
852 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "No header pages reserved at the moment.");
856 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Applying header reservation.");
858 /* Apply header space reservation */
859 toi_extent_state_goto_start();
861 for (i
= 0; i
< header_pages_reserved
; i
++)
862 if (go_next_page(1, 0))
865 /* The end of header pages will be the start of pageset 2 */
866 toi_extent_state_save(2);
868 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Finished applying header reservation.");
872 static int toi_bio_register_storage(void)
875 struct toi_module_ops
*this_module
;
877 list_for_each_entry(this_module
, &toi_modules
, module_list
) {
878 if (!this_module
->enabled
|| this_module
->type
!= BIO_ALLOCATOR_MODULE
)
880 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
881 "Registering storage from %s.", this_module
->name
);
882 result
= this_module
->bio_allocator_ops
->register_storage();
890 int toi_bio_allocate_storage(unsigned long request
)
892 struct toi_bdev_info
*chain
= prio_chain_head
;
893 unsigned long to_get
= request
;
894 unsigned long extra_pages
, needed
;
898 int result
= toi_bio_register_storage();
899 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "toi_bio_allocate_storage: "
900 "Registering storage.");
903 chain
= prio_chain_head
;
905 printk("TuxOnIce: No storage was registered.\n");
910 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "toi_bio_allocate_storage: "
911 "Request is %lu pages.", request
);
912 extra_pages
= DIV_ROUND_UP(request
* (sizeof(unsigned long)
913 + sizeof(int)), PAGE_SIZE
);
914 needed
= request
+ extra_pages
+ header_pages_reserved
;
915 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Adding %lu extra pages and %lu "
916 "for header => %lu.", extra_pages
, header_pages_reserved
, needed
);
917 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Already allocated %lu pages.", raw_pages_allocd
);
919 to_get
= needed
> raw_pages_allocd
? needed
- raw_pages_allocd
: 0;
920 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Need to get %lu pages.", to_get
);
923 return apply_header_reservation();
925 while (to_get
&& chain
) {
926 int num_group
= devices_of_same_priority(chain
);
927 int divisor
= num_group
- no_free
;
929 unsigned long portion
= DIV_ROUND_UP(to_get
, divisor
);
930 unsigned long got
= 0;
931 unsigned long got_this_round
= 0;
932 struct toi_bdev_info
*top
= chain
;
934 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
935 " Start of loop. To get is %lu. Divisor is %d.", to_get
, divisor
);
939 * We're aiming to spread the allocated storage as evenly
940 * as possible, but we also want to get all the storage we
941 * can off this priority.
943 for (i
= 0; i
< num_group
; i
++) {
944 struct toi_bio_allocator_ops
*ops
= chain
->allocator
->bio_allocator_ops
;
945 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
946 " Asking for %lu pages from chain %p.", portion
, chain
);
947 got
= ops
->allocate_storage(chain
, portion
);
948 toi_message(TOI_BIO
, TOI_VERBOSE
, 0,
949 " Got %lu pages from allocator %p.", got
, chain
);
952 got_this_round
+= got
;
955 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, " Loop finished. Got a "
956 "total of %lu pages from %d allocators.",
957 got_this_round
, divisor
- no_free
);
959 raw_pages_allocd
+= got_this_round
;
960 to_get
= needed
> raw_pages_allocd
? needed
- raw_pages_allocd
: 0;
963 * If we got anything from chains of this priority and we
964 * still have storage to allocate, go over this priority
967 if (got_this_round
&& to_get
)
973 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Finished allocating. Calling "
974 "get_main_pool_phys_params");
975 /* Now let swap allocator bmap the pages */
976 get_main_pool_phys_params();
978 toi_message(TOI_BIO
, TOI_VERBOSE
, 0, "Done. Reserving header.");
979 return apply_header_reservation();
982 void toi_bio_chains_post_atomic(struct toi_boot_kernel_data
*bkd
)
985 struct toi_bdev_info
*cur_chain
= prio_chain_head
;
988 cur_chain
->pages_used
= bkd
->pages_used
[i
];
989 cur_chain
= cur_chain
->next
;
994 int toi_bio_chains_debug_info(char *buffer
, int size
)
996 /* Show what we actually used */
997 struct toi_bdev_info
*cur_chain
= prio_chain_head
;
1001 len
+= scnprintf(buffer
+ len
, size
- len
, " Used %lu pages "
1002 "from %s.\n", cur_chain
->pages_used
, cur_chain
->name
);
1003 cur_chain
= cur_chain
->next
;