Merge tag 'v3.10.56' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / power.h
1 #include <linux/suspend.h>
2 #include <linux/suspend_ioctls.h>
3 #include <linux/utsname.h>
4 #include <linux/freezer.h>
5
6 struct swsusp_info {
7 struct new_utsname uts;
8 u32 version_code;
9 unsigned long num_physpages;
10 int cpus;
11 unsigned long image_pages;
12 unsigned long pages;
13 unsigned long size;
14 } __attribute__((aligned(PAGE_SIZE)));
15
16 #ifdef CONFIG_HIBERNATION
17 /* kernel/power/snapshot.c */
18 extern void __init hibernate_reserved_size_init(void);
19 extern void __init hibernate_image_size_init(void);
20
21 #ifdef CONFIG_ARCH_HIBERNATION_HEADER
22 /* Maximum size of architecture specific data in a hibernation header */
23 #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
24
25 extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
26 extern int arch_hibernation_header_restore(void *addr);
27
28 static inline int init_header_complete(struct swsusp_info *info)
29 {
30 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
31 }
32
33 static inline char *check_image_kernel(struct swsusp_info *info)
34 {
35 return arch_hibernation_header_restore(info) ?
36 "architecture specific data" : NULL;
37 }
38 #else
39 extern char *check_image_kernel(struct swsusp_info *info);
40 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
41 extern int init_header(struct swsusp_info *info);
42
43 extern char resume_file[256];
44 /*
45 * Keep some memory free so that I/O operations can succeed without paging
46 * [Might this be more than 4 MB?]
47 */
48 #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
49
50 /*
51 * Keep 1 MB of memory free so that device drivers can allocate some pages in
52 * their .suspend() routines without breaking the suspend to disk.
53 */
54 #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
55
56 /* kernel/power/hibernate.c */
57 extern bool freezer_test_done;
58
59 extern int hibernation_snapshot(int platform_mode);
60 extern int hibernation_restore(int platform_mode);
61 extern int hibernation_platform_enter(void);
62 extern void platform_recover(int platform_mode);
63
64 #else /* !CONFIG_HIBERNATION */
65
66 static inline void hibernate_reserved_size_init(void) {}
67 static inline void hibernate_image_size_init(void) {}
68 #endif /* !CONFIG_HIBERNATION */
69
70 extern int pfn_is_nosave(unsigned long);
71
72 #define power_attr(_name) \
73 static struct kobj_attribute _name##_attr = { \
74 .attr = { \
75 .name = __stringify(_name), \
76 .mode = 0644, \
77 }, \
78 .show = _name##_show, \
79 .store = _name##_store, \
80 }
81
82 extern struct pbe *restore_pblist;
83
84 /* Preferred image size in bytes (default 500 MB) */
85 extern unsigned long image_size;
86 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
87 extern unsigned long reserved_size;
88 extern int in_suspend;
89 extern dev_t swsusp_resume_device;
90 extern sector_t swsusp_resume_block;
91
92 extern asmlinkage int swsusp_arch_suspend(void);
93 extern asmlinkage int swsusp_arch_resume(void);
94
95 extern int create_basic_memory_bitmaps(void);
96 extern void free_basic_memory_bitmaps(void);
97 extern int hibernate_preallocate_memory(void);
98
99 /**
100 * Auxiliary structure used for reading the snapshot image data and
101 * metadata from and writing them to the list of page backup entries
102 * (PBEs) which is the main data structure of swsusp.
103 *
104 * Using struct snapshot_handle we can transfer the image, including its
105 * metadata, as a continuous sequence of bytes with the help of
106 * snapshot_read_next() and snapshot_write_next().
107 *
108 * The code that writes the image to a storage or transfers it to
109 * the user land is required to use snapshot_read_next() for this
110 * purpose and it should not make any assumptions regarding the internal
111 * structure of the image. Similarly, the code that reads the image from
112 * a storage or transfers it from the user land is required to use
113 * snapshot_write_next().
114 *
115 * This may allow us to change the internal structure of the image
116 * in the future with considerably less effort.
117 */
118
119 struct snapshot_handle {
120 unsigned int cur; /* number of the block of PAGE_SIZE bytes the
121 * next operation will refer to (ie. current)
122 */
123 void *buffer; /* address of the block to read from
124 * or write to
125 */
126 int sync_read; /* Set to one to notify the caller of
127 * snapshot_write_next() that it may
128 * need to call wait_on_bio_chain()
129 */
130 };
131
132 /* This macro returns the address from/to which the caller of
133 * snapshot_read_next()/snapshot_write_next() is allowed to
134 * read/write data after the function returns
135 */
136 #define data_of(handle) ((handle).buffer)
137
138 extern unsigned int snapshot_additional_pages(struct zone *zone);
139 extern unsigned long snapshot_get_image_size(void);
140 extern int snapshot_read_next(struct snapshot_handle *handle);
141 extern int snapshot_write_next(struct snapshot_handle *handle);
142 extern void snapshot_write_finalize(struct snapshot_handle *handle);
143 extern int snapshot_image_loaded(struct snapshot_handle *handle);
144
145 /* If unset, the snapshot device cannot be open. */
146 extern atomic_t snapshot_device_available;
147
148 extern sector_t alloc_swapdev_block(int swap);
149 extern void free_all_swap_pages(int swap);
150 extern int swsusp_swap_in_use(void);
151
152 /*
153 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
154 * the image header.
155 */
156 #define SF_PLATFORM_MODE 1
157 #define SF_NOCOMPRESS_MODE 2
158 #define SF_CRC32_MODE 4
159
160 /* kernel/power/hibernate.c */
161 extern int swsusp_check(void);
162 extern void swsusp_free(void);
163 extern int swsusp_read(unsigned int *flags_p);
164 extern int swsusp_write(unsigned int flags);
165 extern void swsusp_close(fmode_t);
166 #ifdef CONFIG_SUSPEND
167 extern int swsusp_unmark(void);
168 #endif
169
170 /* kernel/power/block_io.c */
171 extern struct block_device *hib_resume_bdev;
172
173 extern int hib_bio_read_page(pgoff_t page_off, void *addr,
174 struct bio **bio_chain);
175 extern int hib_bio_write_page(pgoff_t page_off, void *addr,
176 struct bio **bio_chain);
177 extern int hib_wait_on_bio_chain(struct bio **bio_chain);
178
179 struct timeval;
180 /* kernel/power/swsusp.c */
181 extern void swsusp_show_speed(struct timeval *, struct timeval *,
182 unsigned int, char *);
183
184 #ifdef CONFIG_SUSPEND
185 struct pm_sleep_state {
186 const char *label;
187 suspend_state_t state;
188 };
189
190 /* kernel/power/suspend.c */
191 extern struct pm_sleep_state pm_states[];
192
193 extern int suspend_devices_and_enter(suspend_state_t state);
194 #else /* !CONFIG_SUSPEND */
195 static inline int suspend_devices_and_enter(suspend_state_t state)
196 {
197 return -ENOSYS;
198 }
199 #endif /* !CONFIG_SUSPEND */
200
201 #ifdef CONFIG_PM_TEST_SUSPEND
202 /* kernel/power/suspend_test.c */
203 extern void suspend_test_start(void);
204 extern void suspend_test_finish(const char *label);
205 #else /* !CONFIG_PM_TEST_SUSPEND */
206 static inline void suspend_test_start(void) {}
207 static inline void suspend_test_finish(const char *label) {}
208 #endif /* !CONFIG_PM_TEST_SUSPEND */
209
210 #ifdef CONFIG_PM_SLEEP
211 /* kernel/power/main.c */
212 extern int pm_notifier_call_chain(unsigned long val);
213 #endif
214
215 #ifdef CONFIG_HIGHMEM
216 int restore_highmem(void);
217 #else
218 static inline unsigned int count_highmem_pages(void) { return 0; }
219 static inline int restore_highmem(void) { return 0; }
220 #endif
221
222 /*
223 * Suspend test levels
224 */
225 enum {
226 /* keep first */
227 TEST_NONE,
228 TEST_CORE,
229 TEST_CPUS,
230 TEST_PLATFORM,
231 TEST_DEVICES,
232 TEST_FREEZER,
233 /* keep last */
234 __TEST_AFTER_LAST
235 };
236
237 #define TEST_FIRST TEST_NONE
238 #define TEST_MAX (__TEST_AFTER_LAST - 1)
239
240 extern int pm_test_level;
241
242 #ifdef CONFIG_SUSPEND_FREEZER
243 static inline int suspend_freeze_processes(void)
244 {
245 int error;
246
247 error = freeze_processes();
248 /*
249 * freeze_processes() automatically thaws every task if freezing
250 * fails. So we need not do anything extra upon error.
251 */
252 if (error)
253 return error;
254
255 error = freeze_kernel_threads();
256 /*
257 * freeze_kernel_threads() thaws only kernel threads upon freezing
258 * failure. So we have to thaw the userspace tasks ourselves.
259 */
260 if (error)
261 thaw_processes();
262
263 return error;
264 }
265
266 static inline void suspend_thaw_processes(void)
267 {
268 thaw_processes();
269 }
270 #else
271 static inline int suspend_freeze_processes(void)
272 {
273 return 0;
274 }
275
276 static inline void suspend_thaw_processes(void)
277 {
278 }
279 #endif
280
281 extern struct page *saveable_page(struct zone *z, unsigned long p);
282 #ifdef CONFIG_HIGHMEM
283 extern struct page *saveable_highmem_page(struct zone *z, unsigned long p);
284 #else
285 static
286 inline struct page *saveable_highmem_page(struct zone *z, unsigned long p)
287 {
288 return NULL;
289 }
290 #endif
291
292 #define PBES_PER_PAGE (PAGE_SIZE / sizeof(struct pbe))
293 extern struct list_head nosave_regions;
294
295 /**
296 * This structure represents a range of page frames the contents of which
297 * should not be saved during the suspend.
298 */
299
300 struct nosave_region {
301 struct list_head list;
302 unsigned long start_pfn;
303 unsigned long end_pfn;
304 };
305
306 #define BM_END_OF_MAP (~0UL)
307
308 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
309
310 struct bm_block {
311 struct list_head hook; /* hook into a list of bitmap blocks */
312 unsigned long start_pfn; /* pfn represented by the first bit */
313 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
314 unsigned long *data; /* bitmap representing pages */
315 };
316
317 /* struct bm_position is used for browsing memory bitmaps */
318
319 struct bm_position {
320 struct bm_block *block;
321 int bit;
322 };
323
324 struct memory_bitmap {
325 struct list_head blocks; /* list of bitmap blocks */
326 struct linked_page *p_list; /* list of pages used to store zone
327 * bitmap objects and bitmap block
328 * objects
329 */
330 struct bm_position *states; /* most recently used bit position */
331 int num_states; /* when iterating over a bitmap and
332 * number of states we support.
333 */
334 };
335
336 extern int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
337 int safe_needed);
338 extern int memory_bm_create_index(struct memory_bitmap *bm, gfp_t gfp_mask,
339 int safe_needed, int index);
340 extern void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
341 extern void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn);
342 extern void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn);
343 extern void memory_bm_clear_bit_index(struct memory_bitmap *bm, unsigned long pfn, int index);
344 extern int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn);
345 extern int memory_bm_test_bit_index(struct memory_bitmap *bm, unsigned long pfn, int index);
346 extern unsigned long memory_bm_next_pfn(struct memory_bitmap *bm);
347 extern unsigned long memory_bm_next_pfn_index(struct memory_bitmap *bm,
348 int index);
349 extern void memory_bm_position_reset(struct memory_bitmap *bm);
350 extern void memory_bm_clear(struct memory_bitmap *bm);
351 extern void memory_bm_copy(struct memory_bitmap *source,
352 struct memory_bitmap *dest);
353 extern void memory_bm_dup(struct memory_bitmap *source,
354 struct memory_bitmap *dest);
355 extern int memory_bm_set_iterators(struct memory_bitmap *bm, int number);
356
357 #ifdef CONFIG_TOI
358 struct toi_module_ops;
359 extern int memory_bm_read(struct memory_bitmap *bm, int (*rw_chunk)
360 (int rw, struct toi_module_ops *owner, char *buffer, int buffer_size));
361 extern int memory_bm_write(struct memory_bitmap *bm, int (*rw_chunk)
362 (int rw, struct toi_module_ops *owner, char *buffer, int buffer_size));
363 #endif
364
365 #ifdef CONFIG_PM_AUTOSLEEP
366
367 /* kernel/power/autosleep.c */
368 extern int pm_autosleep_init(void);
369 extern int pm_autosleep_lock(void);
370 extern void pm_autosleep_unlock(void);
371 extern suspend_state_t pm_autosleep_state(void);
372 extern int pm_autosleep_set_state(suspend_state_t state);
373
374 #else /* !CONFIG_PM_AUTOSLEEP */
375
376 static inline int pm_autosleep_init(void) { return 0; }
377 static inline int pm_autosleep_lock(void) { return 0; }
378 static inline void pm_autosleep_unlock(void) {}
379 static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
380
381 #endif /* !CONFIG_PM_AUTOSLEEP */
382
383 #ifdef CONFIG_EARLYSUSPEND
384 /* kernel/power/earlysuspend.c */
385 void request_suspend_state(suspend_state_t state);
386 suspend_state_t get_suspend_state(void);
387 //[MTK]
388 extern void suspend_syssync_enqueue(void) ;
389 extern void suspend_check_sys_sync_done(void);
390 #endif
391
392 #ifdef CONFIG_PM_WAKELOCKS
393
394 /* kernel/power/wakelock.c */
395 extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
396 extern int pm_wake_lock(const char *buf);
397 extern int pm_wake_unlock(const char *buf);
398
399 #endif /* !CONFIG_PM_WAKELOCKS */