fix compilation after merge
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / power.h
CommitLineData
1da177e4 1#include <linux/suspend.h>
3010f8ca 2#include <linux/suspend_ioctls.h>
1da177e4 3#include <linux/utsname.h>
b28f5081 4#include <linux/freezer.h>
1da177e4 5
1da177e4
LT
6struct swsusp_info {
7 struct new_utsname uts;
8 u32 version_code;
9 unsigned long num_physpages;
10 int cpus;
11 unsigned long image_pages;
7088a5c0 12 unsigned long pages;
6e1819d6 13 unsigned long size;
1da177e4
LT
14} __attribute__((aligned(PAGE_SIZE)));
15
d307c4a8 16#ifdef CONFIG_HIBERNATION
ac5c24ec 17/* kernel/power/snapshot.c */
ddeb6487 18extern void __init hibernate_reserved_size_init(void);
ac5c24ec
RW
19extern void __init hibernate_image_size_init(void);
20
d307c4a8
RW
21#ifdef CONFIG_ARCH_HIBERNATION_HEADER
22/* Maximum size of architecture specific data in a hibernation header */
23#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
1da177e4 24
d307c4a8
RW
25extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
26extern int arch_hibernation_header_restore(void *addr);
27
28static inline int init_header_complete(struct swsusp_info *info)
29{
30 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
31}
32
33static inline char *check_image_kernel(struct swsusp_info *info)
34{
35 return arch_hibernation_header_restore(info) ?
36 "architecture specific data" : NULL;
37}
6fa3eb70
S
38#else
39extern char *check_image_kernel(struct swsusp_info *info);
d307c4a8 40#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
6fa3eb70 41extern int init_header(struct swsusp_info *info);
1da177e4 42
6fa3eb70 43extern char resume_file[256];
56f99bcb
RW
44/*
45 * Keep some memory free so that I/O operations can succeed without paging
46 * [Might this be more than 4 MB?]
47 */
48#define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
d307c4a8 49
56f99bcb
RW
50/*
51 * Keep 1 MB of memory free so that device drivers can allocate some pages in
52 * their .suspend() routines without breaking the suspend to disk.
53 */
54#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
1da177e4 55
8b759b84 56/* kernel/power/hibernate.c */
97819a26
SB
57extern bool freezer_test_done;
58
7777fab9 59extern int hibernation_snapshot(int platform_mode);
a634cc10 60extern int hibernation_restore(int platform_mode);
7777fab9 61extern int hibernation_platform_enter(void);
6fa3eb70 62extern void platform_recover(int platform_mode);
ac5c24ec
RW
63
64#else /* !CONFIG_HIBERNATION */
65
ddeb6487 66static inline void hibernate_reserved_size_init(void) {}
ac5c24ec
RW
67static inline void hibernate_image_size_init(void) {}
68#endif /* !CONFIG_HIBERNATION */
a6d70980 69
49c3df6a
VG
70extern int pfn_is_nosave(unsigned long);
71
1da177e4 72#define power_attr(_name) \
386f275f 73static struct kobj_attribute _name##_attr = { \
1da177e4
LT
74 .attr = { \
75 .name = __stringify(_name), \
76 .mode = 0644, \
77 }, \
78 .show = _name##_show, \
79 .store = _name##_store, \
80}
81
6fa3eb70
S
82extern struct pbe *restore_pblist;
83
853609b6
RW
84/* Preferred image size in bytes (default 500 MB) */
85extern unsigned long image_size;
ddeb6487
RW
86/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
87extern unsigned long reserved_size;
f577eb30 88extern int in_suspend;
61159a31 89extern dev_t swsusp_resume_device;
9a154d9d 90extern sector_t swsusp_resume_block;
f577eb30 91
25761b6e
RW
92extern asmlinkage int swsusp_arch_suspend(void);
93extern asmlinkage int swsusp_arch_resume(void);
94
74dfd666
RW
95extern int create_basic_memory_bitmaps(void);
96extern void free_basic_memory_bitmaps(void);
64a473cb 97extern int hibernate_preallocate_memory(void);
f577eb30 98
fb13a28b
RW
99/**
100 * Auxiliary structure used for reading the snapshot image data and
101 * metadata from and writing them to the list of page backup entries
102 * (PBEs) which is the main data structure of swsusp.
103 *
104 * Using struct snapshot_handle we can transfer the image, including its
105 * metadata, as a continuous sequence of bytes with the help of
106 * snapshot_read_next() and snapshot_write_next().
107 *
108 * The code that writes the image to a storage or transfers it to
109 * the user land is required to use snapshot_read_next() for this
110 * purpose and it should not make any assumptions regarding the internal
111 * structure of the image. Similarly, the code that reads the image from
112 * a storage or transfers it from the user land is required to use
113 * snapshot_write_next().
114 *
115 * This may allow us to change the internal structure of the image
116 * in the future with considerably less effort.
117 */
118
f577eb30 119struct snapshot_handle {
fb13a28b
RW
120 unsigned int cur; /* number of the block of PAGE_SIZE bytes the
121 * next operation will refer to (ie. current)
122 */
fb13a28b
RW
123 void *buffer; /* address of the block to read from
124 * or write to
125 */
fb13a28b
RW
126 int sync_read; /* Set to one to notify the caller of
127 * snapshot_write_next() that it may
128 * need to call wait_on_bio_chain()
129 */
f577eb30
RW
130};
131
fb13a28b
RW
132/* This macro returns the address from/to which the caller of
133 * snapshot_read_next()/snapshot_write_next() is allowed to
134 * read/write data after the function returns
135 */
d3c1b24c 136#define data_of(handle) ((handle).buffer)
f577eb30 137
b788db79 138extern unsigned int snapshot_additional_pages(struct zone *zone);
af508b34 139extern unsigned long snapshot_get_image_size(void);
d3c1b24c
JS
140extern int snapshot_read_next(struct snapshot_handle *handle);
141extern int snapshot_write_next(struct snapshot_handle *handle);
8357376d 142extern void snapshot_write_finalize(struct snapshot_handle *handle);
b788db79 143extern int snapshot_image_loaded(struct snapshot_handle *handle);
61159a31 144
0709db60
RW
145/* If unset, the snapshot device cannot be open. */
146extern atomic_t snapshot_device_available;
147
d1d241cc
RW
148extern sector_t alloc_swapdev_block(int swap);
149extern void free_all_swap_pages(int swap);
150extern int swsusp_swap_in_use(void);
61159a31 151
a634cc10
RW
152/*
153 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
154 * the image header.
155 */
156#define SF_PLATFORM_MODE 1
f996fc96 157#define SF_NOCOMPRESS_MODE 2
081a9d04 158#define SF_CRC32_MODE 4
a634cc10 159
8b759b84 160/* kernel/power/hibernate.c */
74c7e2ef 161extern int swsusp_check(void);
74c7e2ef 162extern void swsusp_free(void);
a634cc10
RW
163extern int swsusp_read(unsigned int *flags_p);
164extern int swsusp_write(unsigned int flags);
c2dd0dae 165extern void swsusp_close(fmode_t);
62c552cc
BS
166#ifdef CONFIG_SUSPEND
167extern int swsusp_unmark(void);
168#endif
0d3a9abe 169
8a0d613f
JS
170/* kernel/power/block_io.c */
171extern struct block_device *hib_resume_bdev;
172
173extern int hib_bio_read_page(pgoff_t page_off, void *addr,
174 struct bio **bio_chain);
175extern int hib_bio_write_page(pgoff_t page_off, void *addr,
176 struct bio **bio_chain);
177extern int hib_wait_on_bio_chain(struct bio **bio_chain);
178
0d3a9abe 179struct timeval;
b10d9117 180/* kernel/power/swsusp.c */
0d3a9abe
RW
181extern void swsusp_show_speed(struct timeval *, struct timeval *,
182 unsigned int, char *);
b10d9117 183
296699de 184#ifdef CONFIG_SUSPEND
e89547b8
RW
185struct pm_sleep_state {
186 const char *label;
187 suspend_state_t state;
188};
189
a9d70523 190/* kernel/power/suspend.c */
e89547b8 191extern struct pm_sleep_state pm_states[];
a9d70523 192
9afc0d8b 193extern bool valid_state(suspend_state_t state);
6c961dfb 194extern int suspend_devices_and_enter(suspend_state_t state);
296699de
RW
195#else /* !CONFIG_SUSPEND */
196static inline int suspend_devices_and_enter(suspend_state_t state)
197{
198 return -ENOSYS;
199}
9afc0d8b 200static inline bool valid_state(suspend_state_t state) { return false; }
296699de
RW
201#endif /* !CONFIG_SUSPEND */
202
a9d70523
RW
203#ifdef CONFIG_PM_TEST_SUSPEND
204/* kernel/power/suspend_test.c */
205extern void suspend_test_start(void);
206extern void suspend_test_finish(const char *label);
207#else /* !CONFIG_PM_TEST_SUSPEND */
208static inline void suspend_test_start(void) {}
209static inline void suspend_test_finish(const char *label) {}
210#endif /* !CONFIG_PM_TEST_SUSPEND */
211
82525756
AS
212#ifdef CONFIG_PM_SLEEP
213/* kernel/power/main.c */
214extern int pm_notifier_call_chain(unsigned long val);
215#endif
2f8ed1c6
AB
216
217#ifdef CONFIG_HIGHMEM
2f8ed1c6
AB
218int restore_highmem(void);
219#else
220static inline unsigned int count_highmem_pages(void) { return 0; }
221static inline int restore_highmem(void) { return 0; }
222#endif
0e7d56e3
RW
223
224/*
225 * Suspend test levels
226 */
227enum {
228 /* keep first */
229 TEST_NONE,
230 TEST_CORE,
231 TEST_CPUS,
232 TEST_PLATFORM,
233 TEST_DEVICES,
234 TEST_FREEZER,
235 /* keep last */
236 __TEST_AFTER_LAST
237};
238
239#define TEST_FIRST TEST_NONE
240#define TEST_MAX (__TEST_AFTER_LAST - 1)
4cc79776
RW
241
242extern int pm_test_level;
b28f5081
JB
243
244#ifdef CONFIG_SUSPEND_FREEZER
245static inline int suspend_freeze_processes(void)
246{
379e0be8
SB
247 int error;
248
249 error = freeze_processes();
379e0be8
SB
250 /*
251 * freeze_processes() automatically thaws every task if freezing
252 * fails. So we need not do anything extra upon error.
253 */
254 if (error)
6f585f75 255 return error;
379e0be8
SB
256
257 error = freeze_kernel_threads();
379e0be8
SB
258 /*
259 * freeze_kernel_threads() thaws only kernel threads upon freezing
260 * failure. So we have to thaw the userspace tasks ourselves.
261 */
262 if (error)
263 thaw_processes();
264
379e0be8 265 return error;
b28f5081
JB
266}
267
268static inline void suspend_thaw_processes(void)
269{
270 thaw_processes();
271}
272#else
273static inline int suspend_freeze_processes(void)
274{
275 return 0;
276}
277
278static inline void suspend_thaw_processes(void)
279{
280}
281#endif
7483b4a4 282
6fa3eb70
S
283extern struct page *saveable_page(struct zone *z, unsigned long p);
284#ifdef CONFIG_HIGHMEM
285extern struct page *saveable_highmem_page(struct zone *z, unsigned long p);
286#else
287static
288inline struct page *saveable_highmem_page(struct zone *z, unsigned long p)
289{
290 return NULL;
291}
292#endif
293
294#define PBES_PER_PAGE (PAGE_SIZE / sizeof(struct pbe))
295extern struct list_head nosave_regions;
296
297/**
298 * This structure represents a range of page frames the contents of which
299 * should not be saved during the suspend.
300 */
301
302struct nosave_region {
303 struct list_head list;
304 unsigned long start_pfn;
305 unsigned long end_pfn;
306};
307
308#define BM_END_OF_MAP (~0UL)
309
310#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
311
312struct bm_block {
313 struct list_head hook; /* hook into a list of bitmap blocks */
314 unsigned long start_pfn; /* pfn represented by the first bit */
315 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
316 unsigned long *data; /* bitmap representing pages */
317};
318
319/* struct bm_position is used for browsing memory bitmaps */
320
321struct bm_position {
322 struct bm_block *block;
323 int bit;
324};
325
326struct memory_bitmap {
327 struct list_head blocks; /* list of bitmap blocks */
328 struct linked_page *p_list; /* list of pages used to store zone
329 * bitmap objects and bitmap block
330 * objects
331 */
332 struct bm_position *states; /* most recently used bit position */
333 int num_states; /* when iterating over a bitmap and
334 * number of states we support.
335 */
336};
337
338extern int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
339 int safe_needed);
340extern int memory_bm_create_index(struct memory_bitmap *bm, gfp_t gfp_mask,
341 int safe_needed, int index);
342extern void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
343extern void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn);
344extern void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn);
345extern void memory_bm_clear_bit_index(struct memory_bitmap *bm, unsigned long pfn, int index);
346extern int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn);
347extern int memory_bm_test_bit_index(struct memory_bitmap *bm, unsigned long pfn, int index);
348extern unsigned long memory_bm_next_pfn(struct memory_bitmap *bm);
349extern unsigned long memory_bm_next_pfn_index(struct memory_bitmap *bm,
350 int index);
351extern void memory_bm_position_reset(struct memory_bitmap *bm);
352extern void memory_bm_clear(struct memory_bitmap *bm);
353extern void memory_bm_copy(struct memory_bitmap *source,
354 struct memory_bitmap *dest);
355extern void memory_bm_dup(struct memory_bitmap *source,
356 struct memory_bitmap *dest);
357extern int memory_bm_set_iterators(struct memory_bitmap *bm, int number);
358
359#ifdef CONFIG_TOI
360struct toi_module_ops;
361extern int memory_bm_read(struct memory_bitmap *bm, int (*rw_chunk)
362 (int rw, struct toi_module_ops *owner, char *buffer, int buffer_size));
363extern int memory_bm_write(struct memory_bitmap *bm, int (*rw_chunk)
364 (int rw, struct toi_module_ops *owner, char *buffer, int buffer_size));
365#endif
366
7483b4a4
RW
367#ifdef CONFIG_PM_AUTOSLEEP
368
369/* kernel/power/autosleep.c */
370extern int pm_autosleep_init(void);
371extern int pm_autosleep_lock(void);
372extern void pm_autosleep_unlock(void);
373extern suspend_state_t pm_autosleep_state(void);
374extern int pm_autosleep_set_state(suspend_state_t state);
375
376#else /* !CONFIG_PM_AUTOSLEEP */
377
378static inline int pm_autosleep_init(void) { return 0; }
379static inline int pm_autosleep_lock(void) { return 0; }
380static inline void pm_autosleep_unlock(void) {}
381static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
382
383#endif /* !CONFIG_PM_AUTOSLEEP */
b86ff982 384
6fa3eb70
S
385#ifdef CONFIG_EARLYSUSPEND
386/* kernel/power/earlysuspend.c */
387void request_suspend_state(suspend_state_t state);
388suspend_state_t get_suspend_state(void);
389//[MTK]
390extern void suspend_syssync_enqueue(void) ;
391extern void suspend_check_sys_sync_done(void);
392#endif
393
b86ff982
RW
394#ifdef CONFIG_PM_WAKELOCKS
395
396/* kernel/power/wakelock.c */
397extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
398extern int pm_wake_lock(const char *buf);
399extern int pm_wake_unlock(const char *buf);
400
401#endif /* !CONFIG_PM_WAKELOCKS */