2 * kernel/power/tuxonice_atomic_copy.c
4 * Copyright 2004-2010 Nigel Cunningham (nigel at tuxonice net)
6 * Distributed under GPLv2.
8 * Routines for doing the atomic save/restore.
11 #include <linux/suspend.h>
12 #include <linux/highmem.h>
13 #include <linux/cpu.h>
14 #include <linux/freezer.h>
15 #include <linux/console.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/ftrace.h>
18 #include <asm/suspend.h>
20 #include "tuxonice_storage.h"
21 #include "tuxonice_power_off.h"
22 #include "tuxonice_ui.h"
23 #include "tuxonice_io.h"
24 #include "tuxonice_prepare_image.h"
25 #include "tuxonice_pageflags.h"
26 #include "tuxonice_checksum.h"
27 #include "tuxonice_builtin.h"
28 #include "tuxonice_atomic_copy.h"
29 #include "tuxonice_alloc.h"
30 #include "tuxonice_modules.h"
32 unsigned long extra_pd1_pages_used
;
35 * free_pbe_list - free page backup entries used by the atomic copy code.
36 * @list: List to free.
37 * @highmem: Whether the list is in highmem.
39 * Normally, this function isn't used. If, however, we need to abort before
40 * doing the atomic copy, we use this to free the pbes previously allocated.
42 static void free_pbe_list(struct pbe
**list
, int highmem
)
46 struct pbe
*free_pbe
, *next_page
= NULL
;
50 page
= (struct page
*)*list
;
51 free_pbe
= (struct pbe
*)kmap(page
);
53 page
= virt_to_page(*list
);
57 for (i
= 0; i
< PBES_PER_PAGE
; i
++) {
61 toi__free_page(29, free_pbe
->address
);
63 toi_free_page(29, (unsigned long)free_pbe
->address
);
64 free_pbe
= free_pbe
->next
;
76 toi__free_page(29, page
);
77 *list
= (struct pbe
*)next_page
;
82 * copyback_post - post atomic-restore actions
84 * After doing the atomic restore, we have a few more things to do:
85 * 1) We want to retain some values across the restore, so we now copy
86 * these from the nosave variables to the normal ones.
87 * 2) Set the status flags.
89 * 4) Tell userui so it can redraw & restore settings.
90 * 5) Reread the page cache.
92 void copyback_post(void)
94 struct toi_boot_kernel_data
*bkd
= (struct toi_boot_kernel_data
*)boot_kernel_data_buffer
;
96 if (toi_activate_storage(1))
97 panic("Failed to reactivate our storage.");
99 toi_post_atomic_restore_modules(bkd
);
101 toi_cond_pause(1, "About to reload secondary pagedir.");
103 if (read_pageset2(0))
104 panic("Unable to successfully reread the page cache.");
107 * If the user wants to sleep again after resuming from full-off,
108 * it's most likely to be in order to suspend to ram, so we'll
109 * do this check after loading pageset2, to give them the fastest
110 * wakeup when they are ready to use the computer again.
116 * toi_copy_pageset1 - do the atomic copy of pageset1
118 * Make the atomic copy of pageset1. We can't use copy_page (as we once did)
119 * because we can't be sure what side effects it has. On my old Duron, with
120 * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt
121 * count at resume time 4 instead of 3.
123 * We don't want to call kmap_atomic unconditionally because it has the side
124 * effect of incrementing the preempt count, which will leave it one too high
125 * post resume (the page containing the preempt count will be copied after
126 * its incremented. This is essentially the same problem.
128 void toi_copy_pageset1(void)
131 unsigned long source_index
, dest_index
;
133 memory_bm_position_reset(pageset1_map
);
134 memory_bm_position_reset(pageset1_copy_map
);
136 source_index
= memory_bm_next_pfn(pageset1_map
);
137 dest_index
= memory_bm_next_pfn(pageset1_copy_map
);
139 for (i
= 0; i
< pagedir1
.size
; i
++) {
140 unsigned long *origvirt
, *copyvirt
;
141 struct page
*origpage
, *copypage
;
142 int loop
= (PAGE_SIZE
/ sizeof(unsigned long)) - 1, was_present1
, was_present2
;
144 #ifdef CONFIG_TOI_ENHANCE
145 if (!pfn_valid(source_index
) || !pfn_valid(dest_index
)) {
146 pr_emerg("[%s] (%d) dest_index:%lu, source_index:%lu\n", __func__
, i
,
147 dest_index
, source_index
);
148 set_abort_result(TOI_ARCH_PREPARE_FAILED
);
153 origpage
= pfn_to_page(source_index
);
154 copypage
= pfn_to_page(dest_index
);
156 origvirt
= PageHighMem(origpage
) ? kmap_atomic(origpage
) : page_address(origpage
);
158 copyvirt
= PageHighMem(copypage
) ? kmap_atomic(copypage
) : page_address(copypage
);
160 was_present1
= kernel_page_present(origpage
);
162 kernel_map_pages(origpage
, 1, 1);
164 was_present2
= kernel_page_present(copypage
);
166 kernel_map_pages(copypage
, 1, 1);
169 *(copyvirt
+ loop
) = *(origvirt
+ loop
);
174 kernel_map_pages(origpage
, 1, 0);
177 kernel_map_pages(copypage
, 1, 0);
179 if (PageHighMem(origpage
))
180 kunmap_atomic(origvirt
);
182 if (PageHighMem(copypage
))
183 kunmap_atomic(copyvirt
);
185 source_index
= memory_bm_next_pfn(pageset1_map
);
186 dest_index
= memory_bm_next_pfn(pageset1_copy_map
);
191 * __toi_post_context_save - steps after saving the cpu context
193 * Steps taken after saving the CPU state to make the actual
196 * Called from swsusp_save in snapshot.c via toi_post_context_save.
198 int __toi_post_context_save(void)
200 unsigned long old_ps1_size
= pagedir1
.size
;
204 free_checksum_pages();
206 toi_recalculate_image_contents(1);
208 extra_pd1_pages_used
= pagedir1
.size
> old_ps1_size
? pagedir1
.size
- old_ps1_size
: 0;
210 if (extra_pd1_pages_used
> extra_pd1_pages_allowance
) {
211 printk(KERN_INFO
"Pageset1 has grown by %lu pages. "
212 "extra_pages_allowance is currently only %lu.\n",
213 pagedir1
.size
- old_ps1_size
, extra_pd1_pages_allowance
);
216 * Highlevel code will see this, clear the state and
217 * retry if we haven't already done so twice.
219 if (any_to_free(1)) {
220 set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL
);
223 if (try_allocate_extra_memory()) {
224 printk(KERN_INFO
"Failed to allocate the extra memory"
225 " needed. Restarting the process.");
226 set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL
);
229 printk(KERN_INFO
"However it looks like there's enough"
230 " free ram and storage to handle this, so " " continuing anyway.");
232 * What if try_allocate_extra_memory above calls
233 * toi_allocate_extra_pagedir_memory and it allocs a new
234 * slab page via toi_kzalloc which should be in ps1? So...
236 toi_recalculate_image_contents(1);
239 if (!test_action_state(TOI_TEST_FILTER_SPEED
) && !test_action_state(TOI_TEST_BIO
))
246 * toi_hibernate - high level code for doing the atomic copy
248 * High-level code which prepares to do the atomic copy. Loosely based
249 * on the swsusp version, but with the following twists:
250 * - We set toi_running so the swsusp code uses our code paths.
251 * - We give better feedback regarding what goes wrong if there is a
253 * - We use an extra function to call the assembly, just in case this code
254 * is in a module (return address).
256 int toi_hibernate(void)
260 toi_running
= 1; /* For the swsusp code we use :< */
262 error
= toi_lowlevel_builtin();
265 struct toi_boot_kernel_data
*bkd
=
266 (struct toi_boot_kernel_data
*)boot_kernel_data_buffer
;
269 * The boot kernel's data may be larger (newer version) or
270 * smaller (older version) than ours. Copy the minimum
271 * of the two sizes, so that we don't overwrite valid values
272 * from pre-atomic copy.
275 memcpy(&toi_bkd
, (char *)boot_kernel_data_buffer
,
276 min_t(int, sizeof(struct toi_boot_kernel_data
), bkd
->size
));
284 * toi_atomic_restore - prepare to do the atomic restore
286 * Get ready to do the atomic restore. This part gets us into the same
287 * state we are in prior to do calling do_toi_lowlevel while
288 * hibernating: hot-unplugging secondary cpus and freeze processes,
289 * before starting the thread that will do the restore.
291 int toi_atomic_restore(void)
297 toi_prepare_status(DONT_CLEAR_BAR
, "Atomic restore.");
299 memcpy(&toi_bkd
.toi_nosave_commandline
, saved_command_line
, strlen(saved_command_line
));
301 toi_pre_atomic_restore_modules(&toi_bkd
);
303 if (add_boot_kernel_data_pbe())
306 toi_prepare_status(DONT_CLEAR_BAR
, "Doing atomic copy/restore.");
308 if (toi_go_atomic(PMSG_QUIESCE
, 0))
311 /* We'll ignore saved state, but this gets preempt count (etc) right */
312 save_processor_state();
314 error
= swsusp_arch_resume();
316 * Code below is only ever reached in case of failure. Otherwise
317 * execution continues at place where swsusp_arch_suspend was called.
319 * We don't know whether it's safe to continue (this shouldn't happen),
320 * so lets err on the side of caution.
325 free_pbe_list(&restore_pblist
, 0);
326 #ifdef CONFIG_HIGHMEM
327 pr_warn("[%s] 0x%p 0x%p 0x%p\n", __func__
,
328 restore_highmem_pblist
->address
, restore_highmem_pblist
->orig_address
, restore_highmem_pblist
->next
);
329 if (restore_highmem_pblist
->next
!= NULL
)
330 free_pbe_list(&restore_highmem_pblist
, 1);
337 * toi_go_atomic - do the actual atomic copy/restore
338 * @state: The state to use for dpm_suspend_start & power_down calls.
339 * @suspend_time: Whether we're suspending or resuming.
341 int toi_go_atomic(pm_message_t state
, int suspend_time
)
344 if (platform_begin(1)) {
345 set_abort_result(TOI_PLATFORM_PREP_FAILED
);
346 toi_end_atomic(ATOMIC_STEP_PLATFORM_END
, suspend_time
, 3);
347 hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__
,
348 suspend_time
, state
.event
);
352 if (dpm_prepare(PMSG_FREEZE
)) {
353 set_abort_result(TOI_DPM_PREPARE_FAILED
);
354 dpm_complete(PMSG_RECOVER
);
355 toi_end_atomic(ATOMIC_STEP_PLATFORM_END
, suspend_time
, 3);
356 hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__
,
357 suspend_time
, state
.event
);
364 pm_restrict_gfp_mask();
367 #if 0 /* FIXME: jonathan.jmchen: trick code here to let dpm_suspend succeeded, NEED to find out the root cause!! */
368 if (events_check_enabled
) {
369 hib_log("play trick here set events_check_enabled(%d) = false!!\n",
370 events_check_enabled
);
371 events_check_enabled
= false;
374 if (dpm_suspend(state
)) {
375 set_abort_result(TOI_DPM_SUSPEND_FAILED
);
376 toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME
, suspend_time
, 3);
377 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
378 __LINE__
, suspend_time
, state
.event
, toi_result
);
382 if (dpm_suspend_start(state
)) {
383 set_abort_result(TOI_DPM_SUSPEND_FAILED
);
384 toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME
, suspend_time
, 3);
385 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
386 __LINE__
, suspend_time
, state
.event
, toi_result
);
391 /* At this point, dpm_suspend_start() has been called, but *not*
392 * dpm_suspend_noirq(). We *must* dpm_suspend_noirq() now.
393 * Otherwise, drivers for some devices (e.g. interrupt controllers)
394 * become desynchronized with the actual state of the hardware
395 * at resume time, and evil weirdness ensues.
398 if (dpm_suspend_end(state
)) {
399 set_abort_result(TOI_DEVICE_REFUSED
);
400 toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME
, suspend_time
, 1);
401 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__
,
402 suspend_time
, state
.event
, toi_result
);
407 if (platform_pre_snapshot(1))
408 set_abort_result(TOI_PRE_SNAPSHOT_FAILED
);
410 if (platform_pre_restore(1))
411 set_abort_result(TOI_PRE_RESTORE_FAILED
);
414 if (test_result_state(TOI_ABORTED
)) {
415 toi_end_atomic(ATOMIC_STEP_PLATFORM_FINISH
, suspend_time
, 1);
416 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__
,
417 suspend_time
, state
.event
, toi_result
);
421 if (test_action_state(TOI_LATE_CPU_HOTPLUG
)) {
422 if (disable_nonboot_cpus()) {
423 set_abort_result(TOI_CPU_HOTPLUG_FAILED
);
424 toi_end_atomic(ATOMIC_STEP_CPU_HOTPLUG
, suspend_time
, 1);
425 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
426 __LINE__
, suspend_time
, state
.event
, toi_result
);
433 if (syscore_suspend()) {
434 set_abort_result(TOI_SYSCORE_REFUSED
);
435 toi_end_atomic(ATOMIC_STEP_IRQS
, suspend_time
, 1);
436 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__
,
437 suspend_time
, state
.event
, toi_result
);
441 if (suspend_time
&& pm_wakeup_pending()) {
442 set_abort_result(TOI_WAKEUP_EVENT
);
443 toi_end_atomic(ATOMIC_STEP_SYSCORE_RESUME
, suspend_time
, 1);
444 hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__
,
445 suspend_time
, state
.event
, toi_result
);
448 hib_log("SUCCEEDED @line:%d suspend(%d) pm_state(%d)\n", __LINE__
, suspend_time
,
454 * toi_end_atomic - post atomic copy/restore routines
455 * @stage: What step to start at.
456 * @suspend_time: Whether we're suspending or resuming.
457 * @error: Whether we're recovering from an error.
459 void toi_end_atomic(int stage
, int suspend_time
, int error
)
461 pm_message_t msg
= suspend_time
? (error
? PMSG_RECOVER
: PMSG_THAW
) : PMSG_RESTORE
;
464 case ATOMIC_ALL_STEPS
:
466 events_check_enabled
= false;
469 case ATOMIC_STEP_SYSCORE_RESUME
:
471 case ATOMIC_STEP_IRQS
:
473 case ATOMIC_STEP_CPU_HOTPLUG
:
474 if (test_action_state(TOI_LATE_CPU_HOTPLUG
))
475 enable_nonboot_cpus();
476 case ATOMIC_STEP_PLATFORM_FINISH
:
477 if (!suspend_time
&& error
& 2)
478 platform_restore_cleanup(1);
481 dpm_resume_start(msg
);
482 case ATOMIC_STEP_DEVICE_RESUME
:
483 if (suspend_time
&& (error
& 2))
486 if (error
|| !toi_in_suspend())
487 pm_restore_gfp_mask();
490 case ATOMIC_STEP_DPM_COMPLETE
:
492 case ATOMIC_STEP_PLATFORM_END
:
495 toi_prepare_status(DONT_CLEAR_BAR
, "Post atomic.");