1 #include <linux/suspend.h>
2 #include <linux/suspend_ioctls.h>
3 #include <linux/utsname.h>
4 #include <linux/freezer.h>
7 struct new_utsname uts
;
9 unsigned long num_physpages
;
11 unsigned long image_pages
;
14 } __attribute__((aligned(PAGE_SIZE
)));
16 #ifdef CONFIG_HIBERNATION
17 /* kernel/power/snapshot.c */
18 extern void __init
hibernate_reserved_size_init(void);
19 extern void __init
hibernate_image_size_init(void);
21 #ifdef CONFIG_ARCH_HIBERNATION_HEADER
22 /* Maximum size of architecture specific data in a hibernation header */
23 #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
25 extern int arch_hibernation_header_save(void *addr
, unsigned int max_size
);
26 extern int arch_hibernation_header_restore(void *addr
);
28 static inline int init_header_complete(struct swsusp_info
*info
)
30 return arch_hibernation_header_save(info
, MAX_ARCH_HEADER_SIZE
);
33 static inline char *check_image_kernel(struct swsusp_info
*info
)
35 return arch_hibernation_header_restore(info
) ?
36 "architecture specific data" : NULL
;
39 extern char *check_image_kernel(struct swsusp_info
*info
);
40 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
41 extern int init_header(struct swsusp_info
*info
);
43 extern char resume_file
[256];
45 * Keep some memory free so that I/O operations can succeed without paging
46 * [Might this be more than 4 MB?]
48 #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
51 * Keep 1 MB of memory free so that device drivers can allocate some pages in
52 * their .suspend() routines without breaking the suspend to disk.
54 #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
56 /* kernel/power/hibernate.c */
57 extern bool freezer_test_done
;
59 extern int hibernation_snapshot(int platform_mode
);
60 extern int hibernation_restore(int platform_mode
);
61 extern int hibernation_platform_enter(void);
62 extern void platform_recover(int platform_mode
);
64 #else /* !CONFIG_HIBERNATION */
66 static inline void hibernate_reserved_size_init(void) {}
67 static inline void hibernate_image_size_init(void) {}
68 #endif /* !CONFIG_HIBERNATION */
70 extern int pfn_is_nosave(unsigned long);
72 #define power_attr(_name) \
73 static struct kobj_attribute _name##_attr = { \
75 .name = __stringify(_name), \
78 .show = _name##_show, \
79 .store = _name##_store, \
82 extern struct pbe
*restore_pblist
;
84 /* Preferred image size in bytes (default 500 MB) */
85 extern unsigned long image_size
;
86 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
87 extern unsigned long reserved_size
;
88 extern int in_suspend
;
89 extern dev_t swsusp_resume_device
;
90 extern sector_t swsusp_resume_block
;
92 extern asmlinkage
int swsusp_arch_suspend(void);
93 extern asmlinkage
int swsusp_arch_resume(void);
95 extern int create_basic_memory_bitmaps(void);
96 extern void free_basic_memory_bitmaps(void);
97 extern int hibernate_preallocate_memory(void);
100 * Auxiliary structure used for reading the snapshot image data and
101 * metadata from and writing them to the list of page backup entries
102 * (PBEs) which is the main data structure of swsusp.
104 * Using struct snapshot_handle we can transfer the image, including its
105 * metadata, as a continuous sequence of bytes with the help of
106 * snapshot_read_next() and snapshot_write_next().
108 * The code that writes the image to a storage or transfers it to
109 * the user land is required to use snapshot_read_next() for this
110 * purpose and it should not make any assumptions regarding the internal
111 * structure of the image. Similarly, the code that reads the image from
112 * a storage or transfers it from the user land is required to use
113 * snapshot_write_next().
115 * This may allow us to change the internal structure of the image
116 * in the future with considerably less effort.
119 struct snapshot_handle
{
120 unsigned int cur
; /* number of the block of PAGE_SIZE bytes the
121 * next operation will refer to (ie. current)
123 void *buffer
; /* address of the block to read from
126 int sync_read
; /* Set to one to notify the caller of
127 * snapshot_write_next() that it may
128 * need to call wait_on_bio_chain()
132 /* This macro returns the address from/to which the caller of
133 * snapshot_read_next()/snapshot_write_next() is allowed to
134 * read/write data after the function returns
136 #define data_of(handle) ((handle).buffer)
138 extern unsigned int snapshot_additional_pages(struct zone
*zone
);
139 extern unsigned long snapshot_get_image_size(void);
140 extern int snapshot_read_next(struct snapshot_handle
*handle
);
141 extern int snapshot_write_next(struct snapshot_handle
*handle
);
142 extern void snapshot_write_finalize(struct snapshot_handle
*handle
);
143 extern int snapshot_image_loaded(struct snapshot_handle
*handle
);
145 /* If unset, the snapshot device cannot be open. */
146 extern atomic_t snapshot_device_available
;
148 extern sector_t
alloc_swapdev_block(int swap
);
149 extern void free_all_swap_pages(int swap
);
150 extern int swsusp_swap_in_use(void);
153 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
156 #define SF_PLATFORM_MODE 1
157 #define SF_NOCOMPRESS_MODE 2
158 #define SF_CRC32_MODE 4
160 /* kernel/power/hibernate.c */
161 extern int swsusp_check(void);
162 extern void swsusp_free(void);
163 extern int swsusp_read(unsigned int *flags_p
);
164 extern int swsusp_write(unsigned int flags
);
165 extern void swsusp_close(fmode_t
);
166 #ifdef CONFIG_SUSPEND
167 extern int swsusp_unmark(void);
170 /* kernel/power/block_io.c */
171 extern struct block_device
*hib_resume_bdev
;
173 extern int hib_bio_read_page(pgoff_t page_off
, void *addr
,
174 struct bio
**bio_chain
);
175 extern int hib_bio_write_page(pgoff_t page_off
, void *addr
,
176 struct bio
**bio_chain
);
177 extern int hib_wait_on_bio_chain(struct bio
**bio_chain
);
180 /* kernel/power/swsusp.c */
181 extern void swsusp_show_speed(struct timeval
*, struct timeval
*,
182 unsigned int, char *);
184 #ifdef CONFIG_SUSPEND
185 struct pm_sleep_state
{
187 suspend_state_t state
;
190 /* kernel/power/suspend.c */
191 extern struct pm_sleep_state pm_states
[];
193 extern int suspend_devices_and_enter(suspend_state_t state
);
194 #else /* !CONFIG_SUSPEND */
195 static inline int suspend_devices_and_enter(suspend_state_t state
)
199 #endif /* !CONFIG_SUSPEND */
201 #ifdef CONFIG_PM_TEST_SUSPEND
202 /* kernel/power/suspend_test.c */
203 extern void suspend_test_start(void);
204 extern void suspend_test_finish(const char *label
);
205 #else /* !CONFIG_PM_TEST_SUSPEND */
206 static inline void suspend_test_start(void) {}
207 static inline void suspend_test_finish(const char *label
) {}
208 #endif /* !CONFIG_PM_TEST_SUSPEND */
210 #ifdef CONFIG_PM_SLEEP
211 /* kernel/power/main.c */
212 extern int pm_notifier_call_chain(unsigned long val
);
215 #ifdef CONFIG_HIGHMEM
216 int restore_highmem(void);
218 static inline unsigned int count_highmem_pages(void) { return 0; }
219 static inline int restore_highmem(void) { return 0; }
223 * Suspend test levels
237 #define TEST_FIRST TEST_NONE
238 #define TEST_MAX (__TEST_AFTER_LAST - 1)
240 extern int pm_test_level
;
242 #ifdef CONFIG_SUSPEND_FREEZER
243 static inline int suspend_freeze_processes(void)
247 error
= freeze_processes();
249 * freeze_processes() automatically thaws every task if freezing
250 * fails. So we need not do anything extra upon error.
255 error
= freeze_kernel_threads();
257 * freeze_kernel_threads() thaws only kernel threads upon freezing
258 * failure. So we have to thaw the userspace tasks ourselves.
266 static inline void suspend_thaw_processes(void)
271 static inline int suspend_freeze_processes(void)
276 static inline void suspend_thaw_processes(void)
281 extern struct page
*saveable_page(struct zone
*z
, unsigned long p
);
282 #ifdef CONFIG_HIGHMEM
283 extern struct page
*saveable_highmem_page(struct zone
*z
, unsigned long p
);
286 inline struct page
*saveable_highmem_page(struct zone
*z
, unsigned long p
)
292 #define PBES_PER_PAGE (PAGE_SIZE / sizeof(struct pbe))
293 extern struct list_head nosave_regions
;
296 * This structure represents a range of page frames the contents of which
297 * should not be saved during the suspend.
300 struct nosave_region
{
301 struct list_head list
;
302 unsigned long start_pfn
;
303 unsigned long end_pfn
;
306 #define BM_END_OF_MAP (~0UL)
308 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
311 struct list_head hook
; /* hook into a list of bitmap blocks */
312 unsigned long start_pfn
; /* pfn represented by the first bit */
313 unsigned long end_pfn
; /* pfn represented by the last bit plus 1 */
314 unsigned long *data
; /* bitmap representing pages */
317 /* struct bm_position is used for browsing memory bitmaps */
320 struct bm_block
*block
;
324 struct memory_bitmap
{
325 struct list_head blocks
; /* list of bitmap blocks */
326 struct linked_page
*p_list
; /* list of pages used to store zone
327 * bitmap objects and bitmap block
330 struct bm_position
*states
; /* most recently used bit position */
331 int num_states
; /* when iterating over a bitmap and
332 * number of states we support.
336 extern int memory_bm_create(struct memory_bitmap
*bm
, gfp_t gfp_mask
,
338 extern int memory_bm_create_index(struct memory_bitmap
*bm
, gfp_t gfp_mask
,
339 int safe_needed
, int index
);
340 extern void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
);
341 extern void memory_bm_set_bit(struct memory_bitmap
*bm
, unsigned long pfn
);
342 extern void memory_bm_clear_bit(struct memory_bitmap
*bm
, unsigned long pfn
);
343 extern void memory_bm_clear_bit_index(struct memory_bitmap
*bm
, unsigned long pfn
, int index
);
344 extern int memory_bm_test_bit(struct memory_bitmap
*bm
, unsigned long pfn
);
345 extern int memory_bm_test_bit_index(struct memory_bitmap
*bm
, unsigned long pfn
, int index
);
346 extern unsigned long memory_bm_next_pfn(struct memory_bitmap
*bm
);
347 extern unsigned long memory_bm_next_pfn_index(struct memory_bitmap
*bm
,
349 extern void memory_bm_position_reset(struct memory_bitmap
*bm
);
350 extern void memory_bm_clear(struct memory_bitmap
*bm
);
351 extern void memory_bm_copy(struct memory_bitmap
*source
,
352 struct memory_bitmap
*dest
);
353 extern void memory_bm_dup(struct memory_bitmap
*source
,
354 struct memory_bitmap
*dest
);
355 extern int memory_bm_set_iterators(struct memory_bitmap
*bm
, int number
);
358 struct toi_module_ops
;
359 extern int memory_bm_read(struct memory_bitmap
*bm
, int (*rw_chunk
)
360 (int rw
, struct toi_module_ops
*owner
, char *buffer
, int buffer_size
));
361 extern int memory_bm_write(struct memory_bitmap
*bm
, int (*rw_chunk
)
362 (int rw
, struct toi_module_ops
*owner
, char *buffer
, int buffer_size
));
365 #ifdef CONFIG_PM_AUTOSLEEP
367 /* kernel/power/autosleep.c */
368 extern int pm_autosleep_init(void);
369 extern int pm_autosleep_lock(void);
370 extern void pm_autosleep_unlock(void);
371 extern suspend_state_t
pm_autosleep_state(void);
372 extern int pm_autosleep_set_state(suspend_state_t state
);
374 #else /* !CONFIG_PM_AUTOSLEEP */
376 static inline int pm_autosleep_init(void) { return 0; }
377 static inline int pm_autosleep_lock(void) { return 0; }
378 static inline void pm_autosleep_unlock(void) {}
379 static inline suspend_state_t
pm_autosleep_state(void) { return PM_SUSPEND_ON
; }
381 #endif /* !CONFIG_PM_AUTOSLEEP */
383 #ifdef CONFIG_EARLYSUSPEND
384 /* kernel/power/earlysuspend.c */
385 void request_suspend_state(suspend_state_t state
);
386 suspend_state_t
get_suspend_state(void);
388 extern void suspend_syssync_enqueue(void) ;
389 extern void suspend_check_sys_sync_done(void);
392 #ifdef CONFIG_PM_WAKELOCKS
394 /* kernel/power/wakelock.c */
395 extern ssize_t
pm_show_wakelocks(char *buf
, bool show_active
);
396 extern int pm_wake_lock(const char *buf
);
397 extern int pm_wake_unlock(const char *buf
);
399 #endif /* !CONFIG_PM_WAKELOCKS */