2 * kernel/power/tuxonice_io.c
4 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
5 * Copyright (C) 1998,2001,2002 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002-2003 Florent Chabaud <fchabaud@free.fr>
7 * Copyright (C) 2002-2010 Nigel Cunningham (nigel at tuxonice net)
9 * This file is released under the GPLv2.
11 * It contains high level IO routines for hibernating.
15 #include <linux/suspend.h>
16 #include <linux/version.h>
17 #include <linux/utsname.h>
18 #include <linux/mount.h>
19 #include <linux/highmem.h>
20 #include <linux/kthread.h>
21 #include <linux/cpu.h>
22 #include <linux/fs_struct.h>
23 #include <linux/bio.h>
24 #include <linux/fs_uuid.h>
25 #include <asm/tlbflush.h>
28 #include "tuxonice_modules.h"
29 #include "tuxonice_pageflags.h"
30 #include "tuxonice_io.h"
31 #include "tuxonice_ui.h"
32 #include "tuxonice_storage.h"
33 #include "tuxonice_prepare_image.h"
34 #include "tuxonice_extent.h"
35 #include "tuxonice_sysfs.h"
36 #include "tuxonice_builtin.h"
37 #include "tuxonice_checksum.h"
38 #include "tuxonice_alloc.h"
39 char alt_resume_param
[256];
41 /* Version read from image header at resume */
42 static int toi_image_header_version
;
44 #define read_if_version(VERS, VAR, DESC, ERR_ACT) do { \
45 if (likely(toi_image_header_version >= VERS)) \
46 if (toiActiveAllocator->rw_header_chunk(READ, NULL, \
47 (char *) &VAR, sizeof(VAR))) { \
48 abort_hibernate(TOI_FAILED_IO, "Failed to read DESC."); \
53 /* Variables shared between threads and updated under the mutex */
54 static int io_write
, io_finish_at
, io_base
, io_barmax
, io_pageset
, io_result
;
55 static int io_index
, io_nextupdate
, io_pc
, io_pc_step
;
56 static DEFINE_MUTEX(io_mutex
);
57 static DEFINE_PER_CPU(struct page
*, last_sought
);
58 static DEFINE_PER_CPU(struct page
*, last_high_page
);
59 static DEFINE_PER_CPU(char *, checksum_locn
);
60 static DEFINE_PER_CPU(struct pbe
*, last_low_page
);
61 static atomic_t io_count
;
62 atomic_t toi_io_workers
;
63 EXPORT_SYMBOL_GPL(toi_io_workers
);
65 static int using_flusher
;
67 DECLARE_WAIT_QUEUE_HEAD(toi_io_queue_flusher
);
68 EXPORT_SYMBOL_GPL(toi_io_queue_flusher
);
70 int toi_bio_queue_flusher_should_finish
;
71 EXPORT_SYMBOL_GPL(toi_bio_queue_flusher_should_finish
);
75 static char *image_version_error
= "The image header version is newer than "
76 "this kernel supports.";
78 struct toi_module_ops
*first_filter
;
80 static atomic_t toi_num_other_threads
;
81 static DECLARE_WAIT_QUEUE_HEAD(toi_worker_wait_queue
);
82 enum toi_worker_commands
{
87 static enum toi_worker_commands toi_worker_command
;
90 * toi_attempt_to_parse_resume_device - determine if we can hibernate
92 * Can we hibernate, using the current resume= parameter?
94 int toi_attempt_to_parse_resume_device(int quiet
)
96 struct list_head
*Allocator
;
97 struct toi_module_ops
*thisAllocator
;
98 int result
, returning
= 0;
100 if (toi_activate_storage(0))
103 toiActiveAllocator
= NULL
;
104 clear_toi_state(TOI_RESUME_DEVICE_OK
);
105 clear_toi_state(TOI_CAN_RESUME
);
106 clear_result_state(TOI_ABORTED
);
108 if (!toiNumAllocators
) {
110 printk(KERN_INFO
"TuxOnIce: No storage allocators have "
111 "been registered. Hibernating will be " "disabled.\n");
115 list_for_each(Allocator
, &toiAllocators
) {
116 thisAllocator
= list_entry(Allocator
, struct toi_module_ops
, type_list
);
119 * Not sure why you'd want to disable an allocator, but
120 * we should honour the flag if we're providing it
122 if (!thisAllocator
->enabled
)
125 result
= thisAllocator
->parse_sig_location(resume_file
, (toiNumAllocators
== 1),
130 /* For this allocator, but not a valid
131 * configuration. Error already printed. */
135 /* For this allocator and valid. */
136 toiActiveAllocator
= thisAllocator
;
138 set_toi_state(TOI_RESUME_DEVICE_OK
);
139 set_toi_state(TOI_CAN_RESUME
);
145 printk(KERN_INFO
"TuxOnIce: No matching enabled allocator "
146 "found. Resuming disabled.\n");
148 toi_deactivate_storage(0);
151 EXPORT_SYMBOL_GPL(toi_attempt_to_parse_resume_device
);
153 void attempt_to_parse_resume_device2(void)
156 toi_attempt_to_parse_resume_device(0);
159 EXPORT_SYMBOL_GPL(attempt_to_parse_resume_device2
);
161 void save_restore_alt_param(int replace
, int quiet
)
163 static char resume_param_save
[255];
164 static unsigned long toi_state_save
;
167 toi_state_save
= toi_state
;
168 strncpy(resume_param_save
, resume_file
, sizeof(resume_param_save
) - 1);
169 strcpy(resume_file
, alt_resume_param
);
171 strcpy(resume_file
, resume_param_save
);
172 toi_state
= toi_state_save
;
174 toi_attempt_to_parse_resume_device(quiet
);
177 void attempt_to_parse_alt_resume_param(void)
181 /* Temporarily set resume_param to the poweroff value */
182 if (!strlen(alt_resume_param
))
185 printk(KERN_INFO
"=== Trying Poweroff Resume2 ===\n");
186 save_restore_alt_param(SAVE
, NOQUIET
);
187 if (test_toi_state(TOI_CAN_RESUME
))
190 printk(KERN_INFO
"=== Done ===\n");
191 save_restore_alt_param(RESTORE
, QUIET
);
193 /* If not ok, clear the string */
197 printk(KERN_INFO
"Can't resume from that location; clearing " "alt_resume_param.\n");
198 alt_resume_param
[0] = '\0';
202 * noresume_reset_modules - reset data structures in case of non resuming
204 * When we read the start of an image, modules (and especially the
205 * active allocator) might need to reset data structures if we
206 * decide to remove the image rather than resuming from it.
208 static void noresume_reset_modules(void)
210 struct toi_module_ops
*this_filter
;
212 list_for_each_entry(this_filter
, &toi_filters
, type_list
)
213 if (this_filter
->noresume_reset
)
214 this_filter
->noresume_reset();
216 if (toiActiveAllocator
&& toiActiveAllocator
->noresume_reset
)
217 toiActiveAllocator
->noresume_reset();
221 * fill_toi_header - fill the hibernate header structure
222 * @struct toi_header: Header data structure to be filled.
224 static int fill_toi_header(struct toi_header
*sh
)
228 error
= init_header((struct swsusp_info
*)sh
);
232 sh
->pagedir
= pagedir1
;
233 sh
->pageset_2_size
= pagedir2
.size
;
234 sh
->param0
= toi_result
;
235 sh
->param1
= toi_bkd
.toi_action
;
236 sh
->param2
= toi_bkd
.toi_debug_state
;
237 sh
->param3
= toi_bkd
.toi_default_console_level
;
238 sh
->root_fs
= current
->fs
->root
.mnt
->mnt_sb
->s_dev
;
239 for (i
= 0; i
< 4; i
++)
240 sh
->io_time
[i
/ 2][i
% 2] = toi_bkd
.toi_io_time
[i
/ 2][i
% 2];
241 sh
->bkd
= boot_kernel_data_buffer
;
246 * rw_init_modules - initialize modules
247 * @rw: Whether we are reading of writing an image.
248 * @which: Section of the image being processed.
250 * Iterate over modules, preparing the ones that will be used to read or write
253 static int rw_init_modules(int rw
, int which
)
255 struct toi_module_ops
*this_module
;
256 /* Initialise page transformers */
257 list_for_each_entry(this_module
, &toi_filters
, type_list
) {
258 if (!this_module
->enabled
)
260 if (this_module
->rw_init
&& this_module
->rw_init(rw
, which
)) {
261 abort_hibernate(TOI_FAILED_MODULE_INIT
,
262 "Failed to initialize the %s filter.", this_module
->name
);
267 /* Initialise allocator */
268 if (toiActiveAllocator
->rw_init(rw
, which
)) {
269 abort_hibernate(TOI_FAILED_MODULE_INIT
, "Failed to initialise the allocator.");
273 /* Initialise other modules */
274 list_for_each_entry(this_module
, &toi_modules
, module_list
) {
275 if (!this_module
->enabled
||
276 this_module
->type
== FILTER_MODULE
|| this_module
->type
== WRITER_MODULE
)
278 if (this_module
->rw_init
&& this_module
->rw_init(rw
, which
)) {
279 set_abort_result(TOI_FAILED_MODULE_INIT
);
280 printk(KERN_INFO
"Setting aborted flag due to module " "init failure.\n");
289 * rw_cleanup_modules - cleanup modules
290 * @rw: Whether we are reading of writing an image.
292 * Cleanup components after reading or writing a set of pages.
293 * Only the allocator may fail.
295 static int rw_cleanup_modules(int rw
)
297 struct toi_module_ops
*this_module
;
300 /* Cleanup other modules */
301 list_for_each_entry(this_module
, &toi_modules
, module_list
) {
302 if (!this_module
->enabled
||
303 this_module
->type
== FILTER_MODULE
|| this_module
->type
== WRITER_MODULE
)
305 if (this_module
->rw_cleanup
)
306 result
|= this_module
->rw_cleanup(rw
);
309 /* Flush data and cleanup */
310 list_for_each_entry(this_module
, &toi_filters
, type_list
) {
311 if (!this_module
->enabled
)
313 if (this_module
->rw_cleanup
)
314 result
|= this_module
->rw_cleanup(rw
);
317 result
|= toiActiveAllocator
->rw_cleanup(rw
);
322 static struct page
*copy_page_from_orig_page(struct page
*orig_page
, int is_high
)
325 struct page
*high_page
= NULL
,
326 **my_last_high_page
= &__get_cpu_var(last_high_page
),
327 **my_last_sought
= &__get_cpu_var(last_sought
);
328 struct pbe
*this, **my_last_low_page
= &__get_cpu_var(last_low_page
);
332 if (*my_last_sought
&& *my_last_high_page
&& *my_last_sought
< orig_page
)
333 high_page
= *my_last_high_page
;
335 high_page
= (struct page
*)restore_highmem_pblist
;
336 this = (struct pbe
*)kmap(high_page
);
339 if (*my_last_sought
&& *my_last_low_page
&& *my_last_sought
< orig_page
)
340 this = *my_last_low_page
;
342 this = restore_pblist
;
343 compare
= page_address(orig_page
);
346 *my_last_sought
= orig_page
;
348 /* Locate page containing pbe */
349 while (this[PBES_PER_PAGE
- 1].next
&& this[PBES_PER_PAGE
- 1].orig_address
< compare
) {
351 struct page
*next_high_page
= (struct page
*)
352 this[PBES_PER_PAGE
- 1].next
;
354 this = kmap(next_high_page
);
355 high_page
= next_high_page
;
357 this = this[PBES_PER_PAGE
- 1].next
;
360 /* Do a binary search within the page */
363 index
= PBES_PER_PAGE
/ 2;
365 if (!this[index
].orig_address
|| this[index
].orig_address
> compare
)
367 else if (this[index
].orig_address
== compare
) {
369 struct page
*page
= this[index
].address
;
370 *my_last_high_page
= high_page
;
374 *my_last_low_page
= this;
375 return virt_to_page(this[index
].address
);
378 index
= ((max
+ min
) / 2);
384 abort_hibernate(TOI_FAILED_IO
, "Failed to get destination page for"
385 " orig page %p. This[min].orig_address=%p.\n", orig_page
,
386 this[index
].orig_address
);
391 * write_next_page - write the next page in a pageset
392 * @data_pfn: The pfn where the next data to write is located.
393 * @my_io_index: The index of the page in the pageset.
394 * @write_pfn: The pfn number to write in the image (where the data belongs).
396 * Get the pfn of the next page to write, map the page if necessary and do the
399 static int write_next_page(unsigned long *data_pfn
, int *my_io_index
, unsigned long *write_pfn
)
402 char **my_checksum_locn
= &__get_cpu_var(checksum_locn
);
403 int result
= 0, was_present
;
405 *data_pfn
= memory_bm_next_pfn(io_map
);
407 /* Another thread could have beaten us to it. */
408 if (*data_pfn
== BM_END_OF_MAP
) {
409 if (atomic_read(&io_count
)) {
410 printk(KERN_INFO
"Ran out of pfns but io_count is "
411 "still %d.\n", atomic_read(&io_count
));
414 mutex_unlock(&io_mutex
);
418 *my_io_index
= io_finish_at
- atomic_sub_return(1, &io_count
);
420 memory_bm_clear_bit(io_map
, *data_pfn
);
421 page
= pfn_to_page(*data_pfn
);
423 was_present
= kernel_page_present(page
);
425 kernel_map_pages(page
, 1, 1);
428 *write_pfn
= memory_bm_next_pfn(pageset1_map
);
430 *write_pfn
= *data_pfn
;
431 *my_checksum_locn
= tuxonice_get_next_checksum();
434 toi_message(TOI_IO
, TOI_VERBOSE
, 0, "Write %d:%ld.", *my_io_index
, *write_pfn
);
436 mutex_unlock(&io_mutex
);
438 if (io_pageset
== 2 && tuxonice_calc_checksum(page
, *my_checksum_locn
))
441 result
= first_filter
->write_page(*write_pfn
, TOI_PAGE
, page
, PAGE_SIZE
);
444 kernel_map_pages(page
, 1, 0);
450 * read_next_page - read the next page in a pageset
451 * @my_io_index: The index of the page in the pageset.
452 * @write_pfn: The pfn in which the data belongs.
454 * Read a page of the image into our buffer. It can happen (here and in the
455 * write routine) that threads don't get run until after other CPUs have done
456 * all the work. This was the cause of the long standing issue with
457 * occasionally getting -ENODATA errors at the end of reading the image. We
458 * therefore need to check there's actually a page to read before trying to
462 static int read_next_page(int *my_io_index
, unsigned long *write_pfn
, struct page
*buffer
)
464 unsigned int buf_size
= PAGE_SIZE
;
465 unsigned long left
= atomic_read(&io_count
);
470 /* Start off assuming the page we read isn't resaved */
471 *my_io_index
= io_finish_at
- atomic_sub_return(1, &io_count
);
473 mutex_unlock(&io_mutex
);
476 * Are we aborting? If so, don't submit any more I/O as
477 * resetting the resume_attempted flag (from ui.c) will
478 * clear the bdev flags, making this thread oops.
480 if (unlikely(test_toi_state(TOI_STOP_RESUME
))) {
481 atomic_dec(&toi_io_workers
);
482 if (!atomic_read(&toi_io_workers
)) {
484 * So we can be sure we'll have memory for
485 * marking that we haven't resumed.
487 rw_cleanup_modules(READ
);
488 set_toi_state(TOI_IO_STOPPED
);
495 * See toi_bio_read_page in tuxonice_bio.c:
496 * read the next page in the image.
498 return first_filter
->read_page(write_pfn
, TOI_PAGE
, buffer
, &buf_size
);
501 static void use_read_page(unsigned long write_pfn
, struct page
*buffer
)
503 struct page
*final_page
= pfn_to_page(write_pfn
), *copy_page
= final_page
;
504 char *virt
, *buffer_virt
;
505 int was_present
, cpu
= smp_processor_id();
506 unsigned long idx
= 0;
508 if (io_pageset
== 1 && (!pageset1_copy_map
||
509 !memory_bm_test_bit_index(pageset1_copy_map
, write_pfn
, cpu
))) {
510 int is_high
= PageHighMem(final_page
);
512 copy_page_from_orig_page(is_high
? (void *)write_pfn
: final_page
, is_high
);
515 if (!memory_bm_test_bit_index(io_map
, write_pfn
, cpu
)) {
516 toi_message(TOI_IO
, TOI_VERBOSE
, 0, "Discard %ld.", write_pfn
);
517 mutex_lock(&io_mutex
);
518 idx
= atomic_add_return(1, &io_count
);
519 mutex_unlock(&io_mutex
);
523 virt
= kmap(copy_page
);
524 buffer_virt
= kmap(buffer
);
525 was_present
= kernel_page_present(copy_page
);
527 kernel_map_pages(copy_page
, 1, 1);
528 memcpy(virt
, buffer_virt
, PAGE_SIZE
);
530 kernel_map_pages(copy_page
, 1, 0);
533 memory_bm_clear_bit_index(io_map
, write_pfn
, cpu
);
534 toi_message(TOI_IO
, TOI_VERBOSE
, 0, "Read %d:%ld", idx
, write_pfn
);
537 static unsigned long status_update(int writing
, unsigned long done
, unsigned long ticks
)
539 int cs_index
= writing
? 0 : 1;
540 unsigned long ticks_so_far
= toi_bkd
.toi_io_time
[cs_index
][1] + ticks
;
541 unsigned long msec
= jiffies_to_msecs(abs(ticks_so_far
));
542 unsigned long pgs_per_s
, estimate
= 0, pages_left
;
545 pages_left
= io_barmax
- done
;
546 pgs_per_s
= 1000 * done
/ msec
;
548 estimate
= DIV_ROUND_UP(pages_left
, pgs_per_s
);
551 if (estimate
&& ticks
> HZ
/ 2)
552 return toi_update_status(done
, io_barmax
,
553 " %d/%d MB (%lu sec left)",
554 MB(done
+ 1), MB(io_barmax
), estimate
);
556 return toi_update_status(done
, io_barmax
, " %d/%d MB", MB(done
+ 1), MB(io_barmax
));
560 * worker_rw_loop - main loop to read/write pages
562 * The main I/O loop for reading or writing pages. The io_map bitmap is used to
563 * track the pages to read/write.
564 * If we are reading, the pages are loaded to their final (mapped) pfn.
565 * Data is non zero iff this is a thread started via start_other_threads.
566 * In that case, we stay in here until told to quit.
568 static int worker_rw_loop(void *data
)
570 unsigned long data_pfn
, write_pfn
, next_jiffies
= jiffies
+ HZ
/ 4,
571 jif_index
= 1, start_time
= jiffies
, thread_num
;
572 int result
= 0, my_io_index
= 0, last_worker
;
573 struct page
*buffer
= toi_alloc_page(28, TOI_ATOMIC_GFP
);
574 cpumask_var_t orig_mask
;
576 if (!alloc_cpumask_var(&orig_mask
, GFP_KERNEL
)) {
577 printk(KERN_EMERG
"Failed to allocate cpumask for TuxOnIce I/O thread %ld.\n",
578 (unsigned long)data
);
582 cpumask_copy(orig_mask
, tsk_cpus_allowed(current
));
584 current
->flags
|= PF_NOFREEZE
;
587 mutex_lock(&io_mutex
);
588 thread_num
= atomic_read(&toi_io_workers
);
590 cpumask_copy(tsk_cpus_allowed(current
), orig_mask
);
593 atomic_inc(&toi_io_workers
);
595 while (atomic_read(&io_count
) >= atomic_read(&toi_io_workers
) &&
596 !(io_write
&& test_result_state(TOI_ABORTED
)) &&
597 toi_worker_command
== TOI_IO_WORKER_RUN
) {
598 if (!thread_num
&& jiffies
> next_jiffies
) {
599 next_jiffies
+= HZ
/ 4;
600 if (toiActiveAllocator
->update_throughput_throttle
)
601 toiActiveAllocator
->update_throughput_throttle(jif_index
);
606 * What page to use? If reading, don't know yet which page's
607 * data will be read, so always use the buffer. If writing,
608 * use the copy (Pageset1) or original page (Pageset2), but
609 * always write the pfn of the original page.
612 result
= write_next_page(&data_pfn
, &my_io_index
, &write_pfn
);
614 result
= read_next_page(&my_io_index
, &write_pfn
, buffer
);
617 mutex_lock(&io_mutex
);
619 if (result
== -ENODATA
) {
620 toi_message(TOI_IO
, TOI_VERBOSE
, 0,
621 "Thread %d has no more work.", smp_processor_id());
628 printk(KERN_INFO
"Write chunk returned %d.\n", result
);
629 abort_hibernate(TOI_FAILED_IO
,
630 "Failed to write a chunk of the " "image.");
634 if (io_pageset
== 1) {
635 printk(KERN_ERR
"\nBreaking out of I/O loop "
636 "because of result code %d.\n", result
);
639 panic("Read chunk returned (%d)", result
);
643 * Discard reads of resaved pages while reading ps2
644 * and unwanted pages while rereading ps2 when aborting.
647 if (!PageResave(pfn_to_page(write_pfn
)))
648 use_read_page(write_pfn
, buffer
);
650 mutex_lock(&io_mutex
);
651 toi_message(TOI_IO
, TOI_VERBOSE
, 0, "Resaved %ld.", write_pfn
);
652 atomic_inc(&io_count
);
653 mutex_unlock(&io_mutex
);
658 if (my_io_index
+ io_base
> io_nextupdate
)
659 io_nextupdate
= status_update(io_write
,
660 my_io_index
+ io_base
,
661 jiffies
- start_time
);
663 if (my_io_index
> io_pc
) {
664 printk(KERN_CONT
"...%d%%", 20 * io_pc_step
);
666 io_pc
= io_finish_at
* io_pc_step
/ 5;
670 toi_cond_pause(0, NULL
);
673 * Subtle: If there's less I/O still to be done than threads
674 * running, quit. This stops us doing I/O beyond the end of
675 * the image when reading.
677 * Possible race condition. Two threads could do the test at
678 * the same time; one should exit and one should continue.
679 * Therefore we take the mutex before comparing and exiting.
682 mutex_lock(&io_mutex
);
685 last_worker
= atomic_dec_and_test(&toi_io_workers
);
686 toi_message(TOI_IO
, TOI_VERBOSE
, 0, "%d workers left.", atomic_read(&toi_io_workers
));
687 mutex_unlock(&io_mutex
);
689 if ((unsigned long)data
&& toi_worker_command
!= TOI_IO_WORKER_EXIT
) {
690 /* Were we the last thread and we're using a flusher thread? */
691 if (last_worker
&& using_flusher
) {
692 toiActiveAllocator
->finish_all_io();
694 /* First, if we're doing I/O, wait for it to finish */
695 wait_event(toi_worker_wait_queue
, toi_worker_command
!= TOI_IO_WORKER_RUN
);
696 /* Then wait to be told what to do next */
697 wait_event(toi_worker_wait_queue
, toi_worker_command
!= TOI_IO_WORKER_STOP
);
698 if (toi_worker_command
== TOI_IO_WORKER_RUN
)
703 atomic_dec(&toi_num_other_threads
);
705 toi_message(TOI_IO
, TOI_LOW
, 0, "Thread %d exiting.", thread_num
);
706 toi__free_page(28, buffer
);
707 free_cpumask_var(orig_mask
);
712 int toi_start_other_threads(void)
715 struct task_struct
*p
;
716 int to_start
= (toi_max_workers
? toi_max_workers
: num_online_cpus()) - 1;
717 unsigned long num_started
= 0;
719 if (test_action_state(TOI_NO_MULTITHREADED_IO
))
722 toi_worker_command
= TOI_IO_WORKER_STOP
;
724 for_each_online_cpu(cpu
) {
725 if (num_started
== to_start
)
728 if (cpu
== smp_processor_id())
731 p
= kthread_create_on_node(worker_rw_loop
, (void *)num_started
+ 1,
732 cpu_to_node(cpu
), "ktoi_io/%d", cpu
);
734 printk(KERN_ERR
"ktoi_io for %i failed\n", cpu
);
737 kthread_bind(p
, cpu
);
738 p
->flags
|= PF_MEMALLOC
;
741 atomic_inc(&toi_num_other_threads
);
744 hib_warn("Started %ld threads.", num_started
);
746 toi_message(TOI_IO
, TOI_LOW
, 0, "Started %d threads.", num_started
);
750 void toi_stop_other_threads(void)
752 toi_message(TOI_IO
, TOI_LOW
, 0, "Stopping other threads.");
753 toi_worker_command
= TOI_IO_WORKER_EXIT
;
754 wake_up(&toi_worker_wait_queue
);
758 * do_rw_loop - main highlevel function for reading or writing pages
760 * Create the io_map bitmap and call worker_rw_loop to perform I/O operations.
762 static int do_rw_loop(int write
, int finish_at
, struct memory_bitmap
*pageflags
,
763 int base
, int barmax
, int pageset
)
765 int index
= 0, cpu
, result
= 0, workers_started
;
768 first_filter
= toi_get_next_filter(NULL
);
774 io_finish_at
= finish_at
;
777 io_pageset
= pageset
;
779 io_pc
= io_finish_at
/ 5;
782 io_nextupdate
= base
+ 1;
783 toi_bio_queue_flusher_should_finish
= 0;
785 for_each_online_cpu(cpu
) {
786 per_cpu(last_sought
, cpu
) = NULL
;
787 per_cpu(last_low_page
, cpu
) = NULL
;
788 per_cpu(last_high_page
, cpu
) = NULL
;
791 /* Ensure all bits clear */
792 memory_bm_clear(io_map
);
794 /* Set the bits for the pages to write */
795 memory_bm_position_reset(pageflags
);
797 pfn
= memory_bm_next_pfn(pageflags
);
799 while (pfn
!= BM_END_OF_MAP
&& index
< finish_at
) {
800 memory_bm_set_bit(io_map
, pfn
);
801 pfn
= memory_bm_next_pfn(pageflags
);
805 BUG_ON(index
< finish_at
);
807 atomic_set(&io_count
, finish_at
);
809 memory_bm_position_reset(pageset1_map
);
811 mutex_lock(&io_mutex
);
813 clear_toi_state(TOI_IO_STOPPED
);
815 using_flusher
= (atomic_read(&toi_num_other_threads
) &&
816 toiActiveAllocator
->io_flusher
&&
817 !test_action_state(TOI_NO_FLUSHER_THREAD
));
819 workers_started
= atomic_read(&toi_num_other_threads
);
821 memory_bm_set_iterators(io_map
, atomic_read(&toi_num_other_threads
) + 1);
822 memory_bm_position_reset(io_map
);
824 memory_bm_set_iterators(pageset1_copy_map
, atomic_read(&toi_num_other_threads
) + 1);
825 memory_bm_position_reset(pageset1_copy_map
);
827 toi_worker_command
= TOI_IO_WORKER_RUN
;
828 wake_up(&toi_worker_wait_queue
);
830 mutex_unlock(&io_mutex
);
833 result
= toiActiveAllocator
->io_flusher(write
);
835 worker_rw_loop(NULL
);
837 while (atomic_read(&toi_io_workers
))
840 printk(KERN_CONT
"\n");
842 toi_worker_command
= TOI_IO_WORKER_STOP
;
843 wake_up(&toi_worker_wait_queue
);
845 if (unlikely(test_toi_state(TOI_STOP_RESUME
))) {
846 if (!atomic_read(&toi_io_workers
)) {
847 rw_cleanup_modules(READ
);
848 set_toi_state(TOI_IO_STOPPED
);
853 set_toi_state(TOI_IO_STOPPED
);
855 if (!io_result
&& !result
&& !test_result_state(TOI_ABORTED
)) {
858 toi_update_status(io_base
+ io_finish_at
, io_barmax
,
859 " %d/%d MB ", MB(io_base
+ io_finish_at
), MB(io_barmax
));
861 memory_bm_position_reset(io_map
);
862 next
= memory_bm_next_pfn(io_map
);
863 if (next
!= BM_END_OF_MAP
) {
864 printk(KERN_INFO
"Finished I/O loop but still work to "
865 "do?\nFinish at = %d. io_count = %d.\n",
866 finish_at
, atomic_read(&io_count
));
867 printk(KERN_INFO
"I/O bitmap still records work to do." "%ld.\n", next
);
868 hib_err("Finish at = %d. io_count = %d. next: %lu\n", finish_at
, atomic_read(&io_count
), next
);
869 #ifdef CONFIG_TOI_FIXUP
879 return io_result
? io_result
: result
;
883 * write_pageset - write a pageset to disk.
884 * @pagedir: Which pagedir to write.
887 * Zero on success or -1 on failure.
889 int write_pageset(struct pagedir
*pagedir
)
891 int finish_at
, base
= 0;
892 int barmax
= pagedir1
.size
+ pagedir2
.size
;
894 struct memory_bitmap
*pageflags
;
895 unsigned long start_time
, end_time
;
898 * Even if there is nothing to read or write, the allocator
899 * may need the init/cleanup for it's housekeeping. (eg:
900 * Pageset1 may start where pageset2 ends when writing).
902 finish_at
= pagedir
->size
;
904 if (pagedir
->id
== 1) {
905 hib_log("start to writing kernel & process data...\n");
906 toi_prepare_status(DONT_CLEAR_BAR
, "Writing kernel & process data...");
907 base
= pagedir2
.size
;
908 if (test_action_state(TOI_TEST_FILTER_SPEED
) || test_action_state(TOI_TEST_BIO
))
909 pageflags
= pageset1_map
;
911 pageflags
= pageset1_copy_map
;
913 hib_log("start to writing caches...\n");
914 toi_prepare_status(DONT_CLEAR_BAR
, "Writing caches...");
915 pageflags
= pageset2_map
;
918 start_time
= jiffies
;
920 if (rw_init_modules(1, pagedir
->id
)) {
921 abort_hibernate(TOI_FAILED_MODULE_INIT
,
922 "Failed to initialise modules for writing.");
927 error
= do_rw_loop(1, finish_at
, pageflags
, base
, barmax
, pagedir
->id
);
929 if (rw_cleanup_modules(WRITE
) && !error
) {
930 abort_hibernate(TOI_FAILED_MODULE_CLEANUP
, "Failed to cleanup after writing.");
936 if ((end_time
- start_time
) && (!test_result_state(TOI_ABORTED
))) {
937 toi_bkd
.toi_io_time
[0][0] += finish_at
,
938 toi_bkd
.toi_io_time
[0][1] += (end_time
- start_time
);
941 hib_log("@line:%d return value(%ld)\n", __LINE__
, error
);
946 * read_pageset - highlevel function to read a pageset from disk
947 * @pagedir: pageset to read
948 * @overwrittenpagesonly: Whether to read the whole pageset or
952 * Zero on success or -1 on failure.
954 static int read_pageset(struct pagedir
*pagedir
, int overwrittenpagesonly
)
956 int result
= 0, base
= 0;
957 int finish_at
= pagedir
->size
;
958 int barmax
= pagedir1
.size
+ pagedir2
.size
;
959 struct memory_bitmap
*pageflags
;
960 unsigned long start_time
, end_time
;
962 if (pagedir
->id
== 1) {
963 toi_prepare_status(DONT_CLEAR_BAR
, "Reading kernel & process data...");
964 pageflags
= pageset1_map
;
966 toi_prepare_status(DONT_CLEAR_BAR
, "Reading caches...");
967 if (overwrittenpagesonly
) {
968 barmax
= min(pagedir1
.size
, pagedir2
.size
);
969 finish_at
= min(pagedir1
.size
, pagedir2
.size
);
971 base
= pagedir1
.size
;
972 pageflags
= pageset2_map
;
975 start_time
= jiffies
;
977 if (rw_init_modules(0, pagedir
->id
)) {
978 toiActiveAllocator
->remove_image();
981 result
= do_rw_loop(0, finish_at
, pageflags
, base
, barmax
, pagedir
->id
);
983 if (rw_cleanup_modules(READ
) && !result
) {
984 abort_hibernate(TOI_FAILED_MODULE_CLEANUP
, "Failed to cleanup after reading.");
991 if ((end_time
- start_time
) && (!test_result_state(TOI_ABORTED
))) {
992 toi_bkd
.toi_io_time
[1][0] += finish_at
,
993 toi_bkd
.toi_io_time
[1][1] += (end_time
- start_time
);
1000 * write_module_configs - store the modules configuration
1002 * The configuration for each module is stored in the image header.
1004 * Zero on success, Error value otherwise.
1006 static int write_module_configs(void)
1008 struct toi_module_ops
*this_module
;
1009 char *buffer
= (char *)toi_get_zeroed_page(22, TOI_ATOMIC_GFP
);
1011 struct toi_module_header toi_module_header
;
1014 printk(KERN_INFO
"Failed to allocate a buffer for saving "
1015 "module configuration info.\n");
1020 * We have to know which data goes with which module, so we at
1021 * least write a length of zero for a module. Note that we are
1022 * also assuming every module's config data takes <= PAGE_SIZE.
1025 /* For each module (in registration order) */
1026 list_for_each_entry(this_module
, &toi_modules
, module_list
) {
1027 if (!this_module
->enabled
|| !this_module
->storage_needed
||
1028 (this_module
->type
== WRITER_MODULE
&& toiActiveAllocator
!= this_module
))
1031 /* Get the data from the module */
1033 if (this_module
->save_config_info
)
1034 len
= this_module
->save_config_info(buffer
);
1036 /* Save the details of the module */
1037 toi_module_header
.enabled
= this_module
->enabled
;
1038 toi_module_header
.type
= this_module
->type
;
1039 toi_module_header
.index
= index
++;
1040 strncpy(toi_module_header
.name
, this_module
->name
,
1041 sizeof(toi_module_header
.name
) - 1);
1042 toiActiveAllocator
->rw_header_chunk(WRITE
,
1044 (char *)&toi_module_header
,
1045 sizeof(toi_module_header
));
1047 /* Save the size of the data and any data returned */
1048 toiActiveAllocator
->rw_header_chunk(WRITE
, this_module
, (char *)&len
, sizeof(int));
1050 toiActiveAllocator
->rw_header_chunk(WRITE
, this_module
, buffer
, len
);
1053 /* Write a blank header to terminate the list */
1054 toi_module_header
.name
[0] = '\0';
1055 toiActiveAllocator
->rw_header_chunk(WRITE
, NULL
,
1056 (char *)&toi_module_header
, sizeof(toi_module_header
));
1058 toi_free_page(22, (unsigned long)buffer
);
1063 * read_one_module_config - read and configure one module
1065 * Read the configuration for one module, and configure the module
1066 * to match if it is loaded.
1069 * Zero on success, Error value otherwise.
1071 static int read_one_module_config(struct toi_module_header
*header
)
1073 struct toi_module_ops
*this_module
;
1077 /* Find the module */
1078 this_module
= toi_find_module_given_name(header
->name
);
1081 if (header
->enabled
) {
1082 toi_early_boot_message(1, TOI_CONTINUE_REQ
,
1083 "It looks like we need module %s for reading "
1084 "the image but it hasn't been registered.\n",
1086 if (!(test_toi_state(TOI_CONTINUE_REQ
)))
1089 printk(KERN_INFO
"Module %s configuration data found, "
1090 "but the module hasn't registered. Looks like "
1091 "it was disabled, so we're ignoring its data.", header
->name
);
1094 /* Get the length of the data (if any) */
1095 result
= toiActiveAllocator
->rw_header_chunk(READ
, NULL
, (char *)&len
, sizeof(int));
1097 printk(KERN_ERR
"Failed to read the length of the module %s's"
1098 " configuration data.\n", header
->name
);
1102 /* Read any data and pass to the module (if we found one) */
1106 buffer
= (char *)toi_get_zeroed_page(23, TOI_ATOMIC_GFP
);
1109 printk(KERN_ERR
"Failed to allocate a buffer for reloading "
1110 "module configuration info.\n");
1114 toiActiveAllocator
->rw_header_chunk(READ
, NULL
, buffer
, len
);
1119 if (!this_module
->save_config_info
)
1120 printk(KERN_ERR
"Huh? Module %s appears to have a "
1121 "save_config_info, but not a load_config_info "
1122 "function!\n", this_module
->name
);
1124 this_module
->load_config_info(buffer
, len
);
1127 * Now move this module to the tail of its lists. This will put it in
1128 * order. Any new modules will end up at the top of the lists. They
1129 * should have been set to disabled when loaded (people will
1130 * normally not edit an initrd to load a new module and then hibernate
1131 * without using it!).
1134 toi_move_module_tail(this_module
);
1136 this_module
->enabled
= header
->enabled
;
1139 toi_free_page(23, (unsigned long)buffer
);
1144 * read_module_configs - reload module configurations from the image header.
1147 * Zero on success or an error code.
1149 static int read_module_configs(void)
1152 struct toi_module_header toi_module_header
;
1153 struct toi_module_ops
*this_module
;
1155 /* All modules are initially disabled. That way, if we have a module
1156 * loaded now that wasn't loaded when we hibernated, it won't be used
1157 * in trying to read the data.
1159 list_for_each_entry(this_module
, &toi_modules
, module_list
)
1160 this_module
->enabled
= 0;
1162 /* Get the first module header */
1163 result
= toiActiveAllocator
->rw_header_chunk(READ
, NULL
,
1164 (char *)&toi_module_header
,
1165 sizeof(toi_module_header
));
1167 printk(KERN_ERR
"Failed to read the next module header.\n");
1171 /* For each module (in registration order) */
1172 while (toi_module_header
.name
[0]) {
1173 result
= read_one_module_config(&toi_module_header
);
1178 /* Get the next module header */
1179 result
= toiActiveAllocator
->rw_header_chunk(READ
, NULL
,
1180 (char *)&toi_module_header
,
1181 sizeof(toi_module_header
));
1184 printk(KERN_ERR
"Failed to read the next module " "header.\n");
1192 static inline int save_fs_info(struct fs_info
*fs
, struct block_device
*bdev
)
1194 #ifdef CONFIG_TOI_ENHANCE
1195 char buf
[BDEVNAME_SIZE
];
1197 bdevname(bdev
, buf
);
1198 if (!toi_ignore_late_initcall() && strstr(buf
, "dm-"))
1201 return (!fs
|| IS_ERR(fs
) || !fs
->last_mount_size
) ? 0 : 1;
1204 int fs_info_space_needed(void)
1206 const struct super_block
*sb
;
1207 int result
= sizeof(int);
1209 list_for_each_entry(sb
, &super_blocks
, s_list
) {
1215 fs
= fs_info_from_block_dev(sb
->s_bdev
);
1216 if (save_fs_info(fs
, sb
->s_bdev
))
1217 result
+= 16 + sizeof(dev_t
) + sizeof(int) + fs
->last_mount_size
;
1223 static int fs_info_num_to_save(void)
1225 const struct super_block
*sb
;
1228 list_for_each_entry(sb
, &super_blocks
, s_list
) {
1234 fs
= fs_info_from_block_dev(sb
->s_bdev
);
1235 if (save_fs_info(fs
, sb
->s_bdev
))
1243 static int fs_info_save(void)
1245 const struct super_block
*sb
;
1246 int to_save
= fs_info_num_to_save();
1248 if (toiActiveAllocator
->rw_header_chunk(WRITE
, NULL
, (char *)&to_save
, sizeof(int))) {
1249 abort_hibernate(TOI_FAILED_IO
, "Failed to write num fs_info" " to save.");
1253 list_for_each_entry(sb
, &super_blocks
, s_list
) {
1259 fs
= fs_info_from_block_dev(sb
->s_bdev
);
1260 if (save_fs_info(fs
, sb
->s_bdev
)) {
1261 if (toiActiveAllocator
->rw_header_chunk(WRITE
, NULL
, &fs
->uuid
[0], 16)) {
1262 abort_hibernate(TOI_FAILED_IO
, "Failed to " "write uuid.");
1265 if (toiActiveAllocator
->rw_header_chunk(WRITE
, NULL
,
1268 abort_hibernate(TOI_FAILED_IO
, "Failed to " "write dev_t.");
1271 if (toiActiveAllocator
->rw_header_chunk(WRITE
, NULL
,
1272 (char *)&fs
->last_mount_size
,
1274 abort_hibernate(TOI_FAILED_IO
,
1275 "Failed to " "write last mount length.");
1278 if (toiActiveAllocator
->rw_header_chunk(WRITE
, NULL
,
1280 fs
->last_mount_size
)) {
1281 abort_hibernate(TOI_FAILED_IO
, "Failed to " "write uuid.");
1290 static int fs_info_load_and_check_one(void)
1292 char uuid
[16], *last_mount
;
1295 struct block_device
*dev
;
1296 struct fs_info
*fs_info
, seek
;
1298 if (toiActiveAllocator
->rw_header_chunk(READ
, NULL
, uuid
, 16)) {
1299 abort_hibernate(TOI_FAILED_IO
, "Failed to read uuid.");
1303 read_if_version(3, dev_t
, "uuid dev_t field", return -EIO
);
1305 if (toiActiveAllocator
->rw_header_chunk(READ
, NULL
, (char *)&ln
, sizeof(int))) {
1306 abort_hibernate(TOI_FAILED_IO
, "Failed to read last mount size.");
1310 last_mount
= kzalloc(ln
, GFP_KERNEL
);
1315 if (toiActiveAllocator
->rw_header_chunk(READ
, NULL
, last_mount
, ln
)) {
1316 abort_hibernate(TOI_FAILED_IO
, "Failed to read last mount timestamp.");
1321 strncpy((char *)&seek
.uuid
, uuid
, 16);
1323 seek
.last_mount_size
= ln
;
1324 seek
.last_mount
= last_mount
;
1325 dev_t
= blk_lookup_fs_info(&seek
);
1329 dev
= toi_open_by_devnum(dev_t
);
1331 fs_info
= fs_info_from_block_dev(dev
);
1332 if (fs_info
&& !IS_ERR(fs_info
)) {
1333 if (ln
!= fs_info
->last_mount_size
) {
1334 printk(KERN_EMERG
"Found matching uuid but last mount "
1335 "time lengths differ?! "
1336 "(%d vs %d).\n", ln
, fs_info
->last_mount_size
);
1339 char buf
[BDEVNAME_SIZE
];
1340 result
= !!memcmp(fs_info
->last_mount
, last_mount
, ln
);
1342 printk(KERN_EMERG
"Last mount time for %s has "
1343 "changed!\n", bdevname(dev
, buf
));
1346 toi_close_bdev(dev
);
1347 free_fs_info(fs_info
);
1353 static int fs_info_load_and_check(void)
1355 int to_do
, result
= 0;
1357 if (toiActiveAllocator
->rw_header_chunk(READ
, NULL
, (char *)&to_do
, sizeof(int))) {
1358 abort_hibernate(TOI_FAILED_IO
, "Failed to read num fs_info " "to load.");
1363 result
|= fs_info_load_and_check_one();
1369 * write_image_header - write the image header after write the image proper
1372 * Zero on success, error value otherwise.
1374 int write_image_header(void)
1377 int total
= pagedir1
.size
+ pagedir2
.size
+ 2;
1378 char *header_buffer
= NULL
;
1380 /* Now prepare to write the header */
1381 ret
= toiActiveAllocator
->write_header_init();
1383 abort_hibernate(TOI_FAILED_MODULE_INIT
,
1384 "Active allocator's write_header_init" " function failed.");
1385 goto write_image_header_abort
;
1389 header_buffer
= (char *)toi_get_zeroed_page(24, TOI_ATOMIC_GFP
);
1390 if (!header_buffer
) {
1391 abort_hibernate(TOI_OUT_OF_MEMORY
,
1392 "Out of memory when trying to get page for header!");
1393 goto write_image_header_abort
;
1396 /* Write hibernate header */
1397 if (fill_toi_header((struct toi_header
*)header_buffer
)) {
1398 abort_hibernate(TOI_OUT_OF_MEMORY
, "Failure to fill header information!");
1399 goto write_image_header_abort
;
1402 if (toiActiveAllocator
->rw_header_chunk(WRITE
, NULL
,
1403 header_buffer
, sizeof(struct toi_header
))) {
1404 abort_hibernate(TOI_OUT_OF_MEMORY
, "Failure to write header info.");
1405 goto write_image_header_abort
;
1408 if (toiActiveAllocator
->rw_header_chunk(WRITE
, NULL
,
1409 (char *)&toi_max_workers
,
1410 sizeof(toi_max_workers
))) {
1411 abort_hibernate(TOI_OUT_OF_MEMORY
, "Failure to number of workers to use.");
1412 goto write_image_header_abort
;
1415 /* Write filesystem info */
1417 goto write_image_header_abort
;
1419 /* Write module configurations */
1420 ret
= write_module_configs();
1422 abort_hibernate(TOI_FAILED_IO
, "Failed to write module configs.");
1423 goto write_image_header_abort
;
1426 if (memory_bm_write(pageset1_map
, toiActiveAllocator
->rw_header_chunk
)) {
1427 abort_hibernate(TOI_FAILED_IO
, "Failed to write bitmaps.");
1428 goto write_image_header_abort
;
1431 /* Flush data and let allocator cleanup */
1432 if (toiActiveAllocator
->write_header_cleanup()) {
1433 abort_hibernate(TOI_FAILED_IO
, "Failed to cleanup writing header.");
1434 goto write_image_header_abort_no_cleanup
;
1437 if (test_result_state(TOI_ABORTED
))
1438 goto write_image_header_abort_no_cleanup
;
1440 toi_update_status(total
, total
, NULL
);
1444 toi_free_page(24, (unsigned long)header_buffer
);
1447 write_image_header_abort
:
1448 toiActiveAllocator
->write_header_cleanup();
1449 write_image_header_abort_no_cleanup
:
1455 * sanity_check - check the header
1456 * @sh: the header which was saved at hibernate time.
1458 * Perform a few checks, seeking to ensure that the kernel being
1459 * booted matches the one hibernated. They need to match so we can
1460 * be _sure_ things will work. It is not absolutely impossible for
1461 * resuming from a different kernel to work, just not assured.
1463 static char *sanity_check(struct toi_header
*sh
)
1465 char *reason
= check_image_kernel((struct swsusp_info
*)sh
);
1470 if (!test_action_state(TOI_IGNORE_ROOTFS
)) {
1471 const struct super_block
*sb
;
1472 list_for_each_entry(sb
, &super_blocks
, s_list
) {
1473 if ((!(sb
->s_flags
& MS_RDONLY
)) &&
1474 (sb
->s_type
->fs_flags
& FS_REQUIRES_DEV
))
1475 return "Device backed fs has been mounted "
1476 "rw prior to resume or initrd/ramfs " "is mounted rw.";
1483 static DECLARE_WAIT_QUEUE_HEAD(freeze_wait
);
1485 #define FREEZE_IN_PROGRESS (~0)
1487 static int freeze_result
;
1489 static void do_freeze(struct work_struct
*dummy
)
1491 freeze_result
= freeze_processes();
1492 wake_up(&freeze_wait
);
1493 trap_non_toi_io
= 1;
1496 static DECLARE_WORK(freeze_work
, do_freeze
);
1499 * __read_pageset1 - test for the existence of an image and attempt to load it
1502 * Zero if image found and pageset1 successfully loaded.
1503 * Error if no image found or loaded.
1505 static int __read_pageset1(void)
1508 char *header_buffer
= (char *)toi_get_zeroed_page(25, TOI_ATOMIC_GFP
), *sanity_error
= NULL
;
1509 struct toi_header
*toi_header
;
1511 if (!header_buffer
) {
1512 printk(KERN_INFO
"Unable to allocate a page for reading the " "signature.\n");
1516 /* Check for an image */
1517 result
= toiActiveAllocator
->image_exists(1);
1520 toi_early_boot_message(1, 0, "The signature from an older "
1521 "version of TuxOnIce has been detected.");
1522 goto out_remove_image
;
1527 noresume_reset_modules();
1528 printk(KERN_INFO
"TuxOnIce: No image found.\n");
1533 * Prepare the active allocator for reading the image header. The
1534 * activate allocator might read its own configuration.
1536 * NB: This call may never return because there might be a signature
1537 * for a different image such that we warn the user and they choose
1538 * to reboot. (If the device ids look erroneous (2.4 vs 2.6) or the
1539 * location of the image might be unavailable if it was stored on a
1540 * network connection).
1543 result
= toiActiveAllocator
->read_header_init();
1545 printk(KERN_INFO
"TuxOnIce: Failed to initialise, reading the " "image header.\n");
1546 goto out_remove_image
;
1549 /* Check for noresume command line option */
1550 if (test_toi_state(TOI_NORESUME_SPECIFIED
)) {
1551 printk(KERN_INFO
"TuxOnIce: Noresume on command line. Removed " "image.\n");
1552 goto out_remove_image
;
1555 /* Check whether we've resumed before */
1556 if (test_toi_state(TOI_RESUMED_BEFORE
)) {
1557 toi_early_boot_message(1, 0, NULL
);
1558 if (!(test_toi_state(TOI_CONTINUE_REQ
))) {
1559 printk(KERN_INFO
"TuxOnIce: Tried to resume before: "
1560 "Invalidated image.\n");
1561 goto out_remove_image
;
1565 clear_toi_state(TOI_CONTINUE_REQ
);
1567 toi_image_header_version
= toiActiveAllocator
->get_header_version();
1569 if (unlikely(toi_image_header_version
> TOI_HEADER_VERSION
)) {
1570 toi_early_boot_message(1, 0, image_version_error
);
1571 if (!(test_toi_state(TOI_CONTINUE_REQ
))) {
1572 printk(KERN_INFO
"TuxOnIce: Header version too new: "
1573 "Invalidated image.\n");
1574 goto out_remove_image
;
1578 /* Read hibernate header */
1579 result
= toiActiveAllocator
->rw_header_chunk(READ
, NULL
,
1580 header_buffer
, sizeof(struct toi_header
));
1582 printk(KERN_ERR
"TuxOnIce: Failed to read the image " "signature.\n");
1583 goto out_remove_image
;
1586 toi_header
= (struct toi_header
*)header_buffer
;
1589 * NB: This call may also result in a reboot rather than returning.
1592 sanity_error
= sanity_check(toi_header
);
1594 toi_early_boot_message(1, TOI_CONTINUE_REQ
, sanity_error
);
1595 printk(KERN_INFO
"TuxOnIce: Sanity check failed.\n");
1596 goto out_remove_image
;
1600 * We have an image and it looks like it will load okay.
1602 * Get metadata from header. Don't override commandline parameters.
1604 * We don't need to save the image size limit because it's not used
1605 * during resume and will be restored with the image anyway.
1608 memcpy((char *)&pagedir1
, (char *)&toi_header
->pagedir
, sizeof(pagedir1
));
1609 toi_result
= toi_header
->param0
;
1610 if (!toi_bkd
.toi_debug_state
) {
1611 toi_bkd
.toi_action
=
1612 (toi_header
->param1
& ~toi_bootflags_mask
) |
1613 (toi_bkd
.toi_action
& toi_bootflags_mask
);
1614 toi_bkd
.toi_debug_state
= toi_header
->param2
;
1615 toi_bkd
.toi_default_console_level
= toi_header
->param3
;
1617 clear_toi_state(TOI_IGNORE_LOGLEVEL
);
1618 pagedir2
.size
= toi_header
->pageset_2_size
;
1619 for (i
= 0; i
< 4; i
++)
1620 toi_bkd
.toi_io_time
[i
/ 2][i
% 2] = toi_header
->io_time
[i
/ 2][i
% 2];
1622 set_toi_state(TOI_BOOT_KERNEL
);
1623 boot_kernel_data_buffer
= toi_header
->bkd
;
1625 read_if_version(1, toi_max_workers
, "TuxOnIce max workers", goto out_remove_image
);
1627 /* Read filesystem info */
1628 if (fs_info_load_and_check()) {
1629 printk(KERN_EMERG
"TuxOnIce: File system mount time checks "
1630 "failed. Refusing to corrupt your filesystems!\n");
1631 goto out_remove_image
;
1634 /* Read module configurations */
1635 result
= read_module_configs();
1639 printk(KERN_INFO
"TuxOnIce: Failed to read TuxOnIce module " "configurations.\n");
1640 clear_action_state(TOI_KEEP_IMAGE
);
1641 goto out_remove_image
;
1644 toi_prepare_console();
1646 set_toi_state(TOI_NOW_RESUMING
);
1648 if (!test_action_state(TOI_LATE_CPU_HOTPLUG
)) {
1649 toi_prepare_status(DONT_CLEAR_BAR
, "Disable nonboot cpus.");
1650 if (disable_nonboot_cpus()) {
1651 set_abort_result(TOI_CPU_HOTPLUG_FAILED
);
1652 goto out_reset_console
;
1656 result
= pm_notifier_call_chain(PM_RESTORE_PREPARE
);
1658 goto out_notifier_call_chain
;
1660 if (usermodehelper_disable())
1661 goto out_enable_nonboot_cpus
;
1663 current
->flags
|= PF_NOFREEZE
;
1664 freeze_result
= FREEZE_IN_PROGRESS
;
1666 schedule_work_on(cpumask_first(cpu_online_mask
), &freeze_work
);
1668 toi_cond_pause(1, "About to read original pageset1 locations.");
1671 * See _toi_rw_header_chunk in tuxonice_bio.c:
1672 * Initialize pageset1_map by reading the map from the image.
1674 if (memory_bm_read(pageset1_map
, toiActiveAllocator
->rw_header_chunk
))
1678 * See toi_rw_cleanup in tuxonice_bio.c:
1679 * Clean up after reading the header.
1681 result
= toiActiveAllocator
->read_header_cleanup();
1683 printk(KERN_ERR
"TuxOnIce: Failed to cleanup after reading the " "image header.\n");
1687 toi_cond_pause(1, "About to read pagedir.");
1690 * Get the addresses of pages into which we will load the kernel to
1691 * be copied back and check if they conflict with the ones we are using.
1693 if (toi_get_pageset1_load_addresses()) {
1694 printk(KERN_INFO
"TuxOnIce: Failed to get load addresses for " "pageset1.\n");
1698 /* Read the original kernel back */
1699 toi_cond_pause(1, "About to read pageset 1.");
1701 /* Given the pagemap, read back the data from disk */
1702 if (read_pageset(&pagedir1
, 0)) {
1703 toi_prepare_status(DONT_CLEAR_BAR
, "Failed to read pageset 1.");
1708 toi_cond_pause(1, "About to restore original kernel.");
1711 if (!test_action_state(TOI_KEEP_IMAGE
) && toiActiveAllocator
->mark_resume_attempted
)
1712 toiActiveAllocator
->mark_resume_attempted(1);
1714 wait_event(freeze_wait
, freeze_result
!= FREEZE_IN_PROGRESS
);
1716 current
->flags
&= ~PF_NOFREEZE
;
1717 toi_free_page(25, (unsigned long)header_buffer
);
1721 wait_event(freeze_wait
, freeze_result
!= FREEZE_IN_PROGRESS
);
1722 trap_non_toi_io
= 0;
1724 usermodehelper_enable();
1725 out_enable_nonboot_cpus
:
1726 enable_nonboot_cpus();
1727 out_notifier_call_chain
:
1728 pm_notifier_call_chain(PM_POST_RESTORE
);
1730 toi_cleanup_console();
1733 if (!test_action_state(TOI_KEEP_IMAGE
))
1734 toiActiveAllocator
->remove_image();
1735 toiActiveAllocator
->read_header_cleanup();
1736 noresume_reset_modules();
1741 * read_pageset1 - highlevel function to read the saved pages
1743 * Attempt to read the header and pageset1 of a hibernate image.
1744 * Handle the outcome, complaining where appropriate.
1746 int read_pageset1(void)
1750 error
= __read_pageset1();
1752 if (error
&& error
!= -ENODATA
&& error
!= -EINVAL
&& !test_result_state(TOI_ABORTED
))
1753 abort_hibernate(TOI_IMAGE_ERROR
, "TuxOnIce: Error %d resuming\n", error
);
1759 * get_have_image_data - check the image header
1761 static char *get_have_image_data(void)
1763 char *output_buffer
= (char *)toi_get_zeroed_page(26, TOI_ATOMIC_GFP
);
1764 struct toi_header
*toi_header
;
1766 if (!output_buffer
) {
1767 printk(KERN_INFO
"Output buffer null.\n");
1771 /* Check for an image */
1772 if (!toiActiveAllocator
->image_exists(1) ||
1773 toiActiveAllocator
->read_header_init() ||
1774 toiActiveAllocator
->rw_header_chunk(READ
, NULL
,
1775 output_buffer
, sizeof(struct toi_header
))) {
1776 sprintf(output_buffer
, "0\n");
1778 * From an initrd/ramfs, catting have_image and
1779 * getting a result of 0 is sufficient.
1781 clear_toi_state(TOI_BOOT_TIME
);
1785 toi_header
= (struct toi_header
*)output_buffer
;
1787 sprintf(output_buffer
, "1\n%s\n%s\n", toi_header
->uts
.machine
, toi_header
->uts
.version
);
1789 /* Check whether we've resumed before */
1790 if (test_toi_state(TOI_RESUMED_BEFORE
))
1791 strcat(output_buffer
, "Resumed before.\n");
1794 noresume_reset_modules();
1795 return output_buffer
;
1799 * read_pageset2 - read second part of the image
1800 * @overwrittenpagesonly: Read only pages which would have been
1801 * verwritten by pageset1?
1803 * Read in part or all of pageset2 of an image, depending upon
1804 * whether we are hibernating and have only overwritten a portion
1805 * with pageset1 pages, or are resuming and need to read them
1809 * Zero if no error, otherwise the error value.
1811 int read_pageset2(int overwrittenpagesonly
)
1818 result
= read_pageset(&pagedir2
, overwrittenpagesonly
);
1820 toi_cond_pause(1, "Pagedir 2 read.");
1826 * image_exists_read - has an image been found?
1827 * @page: Output buffer
1829 * Store 0 or 1 in page, depending on whether an image is found.
1830 * Incoming buffer is PAGE_SIZE and result is guaranteed
1831 * to be far less than that, so we don't worry about
1834 int image_exists_read(const char *page
, int count
)
1839 if (toi_activate_storage(0))
1842 if (!test_toi_state(TOI_RESUME_DEVICE_OK
))
1843 toi_attempt_to_parse_resume_device(0);
1845 if (!toiActiveAllocator
) {
1846 len
= sprintf((char *)page
, "-1\n");
1848 result
= get_have_image_data();
1850 len
= sprintf((char *)page
, "%s", result
);
1851 toi_free_page(26, (unsigned long)result
);
1855 toi_deactivate_storage(0);
1861 * image_exists_write - invalidate an image if one exists
1863 int image_exists_write(const char *buffer
, int count
)
1865 if (toi_activate_storage(0))
1868 if (toiActiveAllocator
&& toiActiveAllocator
->image_exists(1))
1869 toiActiveAllocator
->remove_image();
1871 toi_deactivate_storage(0);
1873 clear_result_state(TOI_KEPT_IMAGE
);