2 * kernel/power/tuxonice_pagedir.c
4 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
5 * Copyright (C) 1998,2001,2002 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002-2003 Florent Chabaud <fchabaud@free.fr>
7 * Copyright (C) 2006-2010 Nigel Cunningham (nigel at tuxonice net)
9 * This file is released under the GPLv2.
11 * Routines for handling pagesets.
12 * Note that pbes aren't actually stored as such. They're stored as
13 * bitmaps and extents.
16 #include <linux/suspend.h>
17 #include <linux/highmem.h>
18 #include <linux/bootmem.h>
19 #include <linux/hardirq.h>
20 #include <linux/sched.h>
21 #include <linux/cpu.h>
22 #include <asm/tlbflush.h>
24 #include "tuxonice_pageflags.h"
25 #include "tuxonice_ui.h"
26 #include "tuxonice_pagedir.h"
27 #include "tuxonice_prepare_image.h"
29 #include "tuxonice_builtin.h"
30 #include "tuxonice_alloc.h"
33 static struct pbe
*this_low_pbe
;
34 static struct pbe
**last_low_pbe_ptr
;
36 void toi_reset_alt_image_pageset2_pfn(void)
38 memory_bm_position_reset(pageset2_map
);
41 static struct page
*first_conflicting_page
;
44 * free_conflicting_pages
47 static void free_conflicting_pages(void)
49 while (first_conflicting_page
) {
50 struct page
*next
= *((struct page
**)kmap(first_conflicting_page
));
51 kunmap(first_conflicting_page
);
52 toi__free_page(29, first_conflicting_page
);
53 first_conflicting_page
= next
;
57 /* __toi_get_nonconflicting_page
59 * Description: Gets order zero pages that won't be overwritten
60 * while copying the original pages.
63 struct page
*___toi_get_nonconflicting_page(int can_be_highmem
)
66 gfp_t flags
= TOI_ATOMIC_GFP
;
68 flags
|= __GFP_HIGHMEM
;
71 if (test_toi_state(TOI_LOADING_ALT_IMAGE
) && pageset2_map
&& (ptoi_pfn
!= BM_END_OF_MAP
)) {
73 ptoi_pfn
= memory_bm_next_pfn(pageset2_map
);
74 if (ptoi_pfn
!= BM_END_OF_MAP
) {
75 page
= pfn_to_page(ptoi_pfn
);
76 if (!PagePageset1(page
) && (can_be_highmem
|| !PageHighMem(page
)))
79 } while (ptoi_pfn
!= BM_END_OF_MAP
);
83 page
= toi_alloc_page(29, flags
);
85 printk(KERN_INFO
"Failed to get nonconflicting " "page.\n");
88 if (PagePageset1(page
)) {
89 struct page
**next
= (struct page
**)kmap(page
);
90 *next
= first_conflicting_page
;
91 first_conflicting_page
= page
;
94 } while (PagePageset1(page
));
99 unsigned long __toi_get_nonconflicting_page(void)
101 struct page
*page
= ___toi_get_nonconflicting_page(0);
102 return page
? (unsigned long)page_address(page
) : 0;
105 static struct pbe
*get_next_pbe(struct page
**page_ptr
, struct pbe
*this_pbe
, int highmem
)
107 if (((((unsigned long)this_pbe
) & (PAGE_SIZE
- 1))
108 + 2 * sizeof(struct pbe
)) > PAGE_SIZE
) {
109 struct page
*new_page
= ___toi_get_nonconflicting_page(highmem
);
111 return ERR_PTR(-ENOMEM
);
112 this_pbe
= (struct pbe
*)kmap(new_page
);
113 memset(this_pbe
, 0, PAGE_SIZE
);
114 *page_ptr
= new_page
;
122 * get_pageset1_load_addresses - generate pbes for conflicting pages
124 * We check here that pagedir & pages it points to won't collide
125 * with pages where we're going to restore from the loaded pages
129 * Zero on success, one if couldn't find enough pages (shouldn't
132 int toi_get_pageset1_load_addresses(void)
134 int pfn
, highallocd
= 0, lowallocd
= 0;
135 int low_needed
= pagedir1
.size
- get_highmem_size(pagedir1
);
136 int high_needed
= get_highmem_size(pagedir1
);
137 int low_pages_for_highmem
= 0;
138 gfp_t flags
= GFP_ATOMIC
| __GFP_NOWARN
| __GFP_HIGHMEM
;
139 struct page
*page
, *high_pbe_page
= NULL
, *last_high_pbe_page
= NULL
,
140 *low_pbe_page
, *last_low_pbe_page
= NULL
;
141 struct pbe
**last_high_pbe_ptr
= &restore_highmem_pblist
, *this_high_pbe
= NULL
;
142 unsigned long orig_low_pfn
, orig_high_pfn
;
143 int high_pbes_done
= 0, low_pbes_done
= 0;
144 int low_direct
= 0, high_direct
= 0, result
= 0, i
;
145 int high_page
= 1, high_offset
= 0, low_page
= 1, low_offset
= 0;
147 memory_bm_set_iterators(pageset1_map
, 3);
148 memory_bm_position_reset(pageset1_map
);
150 memory_bm_set_iterators(pageset1_copy_map
, 2);
151 memory_bm_position_reset(pageset1_copy_map
);
153 last_low_pbe_ptr
= &restore_pblist
;
155 /* First, allocate pages for the start of our pbe lists. */
157 high_pbe_page
= ___toi_get_nonconflicting_page(1);
158 if (!high_pbe_page
) {
162 this_high_pbe
= (struct pbe
*)kmap(high_pbe_page
);
163 memset(this_high_pbe
, 0, PAGE_SIZE
);
166 low_pbe_page
= ___toi_get_nonconflicting_page(0);
171 this_low_pbe
= (struct pbe
*)page_address(low_pbe_page
);
174 * Next, allocate the number of pages we need.
177 i
= low_needed
+ high_needed
;
183 flags
&= ~__GFP_HIGHMEM
;
185 page
= toi_alloc_page(30, flags
);
188 SetPagePageset1Copy(page
);
189 is_high
= PageHighMem(page
);
191 if (PagePageset1(page
)) {
204 high_needed
-= high_direct
;
205 low_needed
-= low_direct
;
208 * Do we need to use some lowmem pages for the copies of highmem
211 if (high_needed
> highallocd
) {
212 low_pages_for_highmem
= high_needed
- highallocd
;
213 high_needed
-= low_pages_for_highmem
;
214 low_needed
+= low_pages_for_highmem
;
218 * Now generate our pbes (which will be used for the atomic restore),
219 * and free unneeded pages.
221 memory_bm_position_reset(pageset1_copy_map
);
222 for (pfn
= memory_bm_next_pfn_index(pageset1_copy_map
, 1); pfn
!= BM_END_OF_MAP
;
223 pfn
= memory_bm_next_pfn_index(pageset1_copy_map
, 1)) {
225 page
= pfn_to_page(pfn
);
226 is_high
= PageHighMem(page
);
228 if (PagePageset1(page
))
231 /* Nope. We're going to use this page. Add a pbe. */
232 if (is_high
|| low_pages_for_highmem
) {
233 struct page
*orig_page
;
236 low_pages_for_highmem
--;
238 orig_high_pfn
= memory_bm_next_pfn_index(pageset1_map
, 1);
239 BUG_ON(orig_high_pfn
== BM_END_OF_MAP
);
240 orig_page
= pfn_to_page(orig_high_pfn
);
241 } while (!PageHighMem(orig_page
) || PagePageset1Copy(orig_page
));
243 this_high_pbe
->orig_address
= (void *)orig_high_pfn
;
244 this_high_pbe
->address
= page
;
245 this_high_pbe
->next
= NULL
;
246 toi_message(TOI_PAGEDIR
, TOI_VERBOSE
, 0, "High pbe %d/%d: %p(%d)=>%p",
247 high_page
, high_offset
, page
, orig_high_pfn
, orig_page
);
248 if (last_high_pbe_page
!= high_pbe_page
) {
249 *last_high_pbe_ptr
= (struct pbe
*)high_pbe_page
;
250 if (last_high_pbe_page
) {
251 kunmap(last_high_pbe_page
);
256 last_high_pbe_page
= high_pbe_page
;
258 *last_high_pbe_ptr
= this_high_pbe
;
261 last_high_pbe_ptr
= &this_high_pbe
->next
;
262 this_high_pbe
= get_next_pbe(&high_pbe_page
, this_high_pbe
, 1);
263 if (IS_ERR(this_high_pbe
)) {
264 printk(KERN_INFO
"This high pbe is an error.\n");
268 struct page
*orig_page
;
271 orig_low_pfn
= memory_bm_next_pfn_index(pageset1_map
, 2);
272 BUG_ON(orig_low_pfn
== BM_END_OF_MAP
);
273 orig_page
= pfn_to_page(orig_low_pfn
);
274 } while (PageHighMem(orig_page
) || PagePageset1Copy(orig_page
));
276 this_low_pbe
->orig_address
= page_address(orig_page
);
277 this_low_pbe
->address
= page_address(page
);
278 this_low_pbe
->next
= NULL
;
279 toi_message(TOI_PAGEDIR
, TOI_VERBOSE
, 0, "Low pbe %d/%d: %p(%d)=>%p",
280 low_page
, low_offset
, this_low_pbe
->orig_address
,
281 orig_low_pfn
, this_low_pbe
->address
);
282 *last_low_pbe_ptr
= this_low_pbe
;
283 last_low_pbe_ptr
= &this_low_pbe
->next
;
284 this_low_pbe
= get_next_pbe(&low_pbe_page
, this_low_pbe
, 0);
285 if (low_pbe_page
!= last_low_pbe_page
) {
286 if (last_low_pbe_page
) {
290 last_low_pbe_page
= low_pbe_page
;
293 if (IS_ERR(this_low_pbe
)) {
294 printk(KERN_INFO
"this_low_pbe is an error.\n");
301 kunmap(high_pbe_page
);
303 if (last_high_pbe_page
!= high_pbe_page
) {
304 if (last_high_pbe_page
)
305 kunmap(last_high_pbe_page
);
306 toi__free_page(29, high_pbe_page
);
309 free_conflicting_pages();
312 memory_bm_set_iterators(pageset1_map
, 1);
313 memory_bm_set_iterators(pageset1_copy_map
, 1);
317 int add_boot_kernel_data_pbe(void)
319 this_low_pbe
->address
= (char *)__toi_get_nonconflicting_page();
320 if (!this_low_pbe
->address
) {
321 printk(KERN_INFO
"Failed to get bkd atomic restore buffer.");
325 toi_bkd
.size
= sizeof(toi_bkd
);
326 memcpy(this_low_pbe
->address
, &toi_bkd
, sizeof(toi_bkd
));
328 *last_low_pbe_ptr
= this_low_pbe
;
329 this_low_pbe
->orig_address
= (char *)boot_kernel_data_buffer
;
330 this_low_pbe
->next
= NULL
;