import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / power / tuxonice_incremental.c
1 /*
2 * kernel/power/incremental.c
3 *
4 * Copyright (C) 2012 Nigel Cunningham (nigel at tuxonice net)
5 *
6 * This file is released under the GPLv2.
7 *
8 * This file contains routines related to storing incremental images - that
9 * is, retaining an image after an initial cycle and then storing incremental
10 * changes on subsequent hibernations.
11 */
12
13 #include <linux/suspend.h>
14 #include <linux/highmem.h>
15 #include <linux/vmalloc.h>
16 #include <linux/crypto.h>
17 #include <linux/scatterlist.h>
18
19 #include "tuxonice_builtin.h"
20 #include "tuxonice.h"
21 #include "tuxonice_modules.h"
22 #include "tuxonice_sysfs.h"
23 #include "tuxonice_io.h"
24 #include "tuxonice_ui.h"
25 #include "tuxonice_alloc.h"
26
27 static struct toi_module_ops toi_incremental_ops;
28 static struct toi_module_ops *next_driver;
29 static unsigned long toi_incremental_bytes_in, toi_incremental_bytes_out;
30
31 static char toi_incremental_slow_cmp_name[32] = "sha1";
32 static int toi_incremental_digestsize;
33
34 static DEFINE_MUTEX(stats_lock);
35
36 struct toi_cpu_context {
37 u8 *buffer_start;
38 struct hash_desc desc;
39 struct scatterlist sg[1];
40 unsigned char *digest;
41 };
42
43 #define OUT_BUF_SIZE (2 * PAGE_SIZE)
44
45 static DEFINE_PER_CPU(struct toi_cpu_context, contexts);
46
47 /*
48 * toi_crypto_prepare
49 *
50 * Prepare to do some work by allocating buffers and transforms.
51 */
52 static int toi_incremental_crypto_prepare(void)
53 {
54 int cpu, digestsize = toi_incremental_digestsize;
55
56 if (!*toi_incremental_slow_cmp_name) {
57 printk(KERN_INFO "TuxOnIce: Incremental image support enabled but no "
58 "hash algorithm set.\n");
59 return 1;
60 }
61
62 for_each_online_cpu(cpu) {
63 struct toi_cpu_context *this = &per_cpu(contexts, cpu);
64 this->desc.tfm = crypto_alloc_hash(toi_incremental_slow_cmp_name, 0, 0);
65 if (IS_ERR(this->desc.tfm)) {
66 printk(KERN_INFO "TuxOnIce: Failed to initialise the "
67 "%s hashing transform.\n", toi_incremental_slow_cmp_name);
68 this->desc.tfm = NULL;
69 return 1;
70 }
71
72 if (!digestsize) {
73 digestsize = crypto_hash_digestsize(this->desc.tfm);
74 toi_incremental_digestsize = digestsize;
75 }
76
77 this->digest = toi_kzalloc(16, digestsize, GFP_KERNEL);
78 if (!this->digest)
79 return -ENOMEM;
80
81 this->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
82 }
83
84 return 0;
85 }
86
87 static int toi_incremental_rw_cleanup(int writing)
88 {
89 int cpu;
90
91 for_each_online_cpu(cpu) {
92 struct toi_cpu_context *this = &per_cpu(contexts, cpu);
93 if (this->desc.tfm) {
94 crypto_free_hash(this->desc.tfm);
95 this->desc.tfm = NULL;
96 }
97
98 if (this->digest) {
99 toi_kfree(16, this->digest, toi_incremental_digestsize);
100 this->digest = NULL;
101 }
102 }
103
104 return 0;
105 }
106
107 /*
108 * toi_incremental_init
109 */
110
111 static int toi_incremental_init(int hibernate_or_resume)
112 {
113 if (!hibernate_or_resume)
114 return 0;
115
116 next_driver = toi_get_next_filter(&toi_incremental_ops);
117
118 return next_driver ? 0 : -ECHILD;
119 }
120
121 /*
122 * toi_incremental_rw_init()
123 */
124
125 static int toi_incremental_rw_init(int rw, int stream_number)
126 {
127 if (rw == WRITE && toi_incremental_crypto_prepare()) {
128 printk(KERN_ERR "Failed to initialise hashing " "algorithm.\n");
129 if (rw == READ) {
130 printk(KERN_INFO "Unable to read the image.\n");
131 return -ENODEV;
132 } else {
133 printk(KERN_INFO "Continuing without "
134 " calculating an incremental image.\n");
135 toi_incremental_ops.enabled = 0;
136 }
137 }
138
139 return 0;
140 }
141
142 /*
143 * toi_incremental_write_page()
144 *
145 * Decide whether to write a page to the image. Calculate the SHA1 (or something
146 * else if the user changes the hashing algo) of the page and compare it to the
147 * previous value (if any). If there was no previous value or the values are
148 * different, write the page. Otherwise, skip the write.
149 *
150 * @TODO: Clear hashes for pages that are no longer in the image!
151 *
152 * Buffer_page: Pointer to a buffer of size PAGE_SIZE, containing
153 * data to be written.
154 *
155 * Returns: 0 on success. Otherwise the error is that returned by later
156 * modules, -ECHILD if we have a broken pipeline or -EIO if
157 * zlib errs.
158 */
159 static int toi_incremental_write_page(unsigned long index, int buf_type,
160 void *buffer_page, unsigned int buf_size)
161 {
162 int ret = 0, cpu = smp_processor_id();
163 struct toi_cpu_context *ctx = &per_cpu(contexts, cpu);
164 int to_write = true;
165
166 if (ctx->desc.tfm) {
167 /* char *old_hash; */
168
169 ctx->buffer_start = TOI_MAP(buf_type, buffer_page);
170
171 sg_init_one(&ctx->sg[0], ctx->buffer_start, buf_size);
172
173 ret = crypto_hash_digest(&ctx->desc, &ctx->sg[0], ctx->sg[0].length, ctx->digest);
174 /* old_hash = get_old_hash(index); */
175
176 TOI_UNMAP(buf_type, buffer_page);
177
178 #if 0
179 if (!ret && new_hash == old_hash) {
180 to_write = false;
181 } else
182 store_hash(ctx, index, new_hash);
183 #endif
184 }
185
186 mutex_lock(&stats_lock);
187
188 toi_incremental_bytes_in += buf_size;
189 if (ret || to_write)
190 toi_incremental_bytes_out += buf_size;
191
192 mutex_unlock(&stats_lock);
193
194 if (ret || to_write) {
195 int ret2 = next_driver->write_page(index, buf_type,
196 buffer_page, buf_size);
197 if (!ret)
198 ret = ret2;
199 }
200
201 return ret;
202 }
203
204 /*
205 * toi_incremental_read_page()
206 * @buffer_page: struct page *. Pointer to a buffer of size PAGE_SIZE.
207 *
208 * Nothing extra to do here.
209 */
210 static int toi_incremental_read_page(unsigned long *index, int buf_type,
211 void *buffer_page, unsigned int *buf_size)
212 {
213 return next_driver->read_page(index, TOI_PAGE, buffer_page, buf_size);
214 }
215
216 /*
217 * toi_incremental_print_debug_stats
218 * @buffer: Pointer to a buffer into which the debug info will be printed.
219 * @size: Size of the buffer.
220 *
221 * Print information to be recorded for debugging purposes into a buffer.
222 * Returns: Number of characters written to the buffer.
223 */
224
225 static int toi_incremental_print_debug_stats(char *buffer, int size)
226 {
227 unsigned long pages_in = toi_incremental_bytes_in >> PAGE_SHIFT,
228 pages_out = toi_incremental_bytes_out >> PAGE_SHIFT;
229 int len;
230
231 /* Output the size of the incremental image. */
232 if (*toi_incremental_slow_cmp_name)
233 len = scnprintf(buffer, size, "- Hash algorithm is '%s'.\n",
234 toi_incremental_slow_cmp_name);
235 else
236 len = scnprintf(buffer, size, "- Hash algorithm is not set.\n");
237
238 if (pages_in)
239 len += scnprintf(buffer + len, size - len, " Incremental image "
240 "%lu of %lu bytes (%ld percent).\n",
241 toi_incremental_bytes_out,
242 toi_incremental_bytes_in, pages_out * 100 / pages_in);
243 return len;
244 }
245
246 /*
247 * toi_incremental_memory_needed
248 *
249 * Tell the caller how much memory we need to operate during hibernate/resume.
250 * Returns: Unsigned long. Maximum number of bytes of memory required for
251 * operation.
252 */
253 static int toi_incremental_memory_needed(void)
254 {
255 return 2 * PAGE_SIZE;
256 }
257
258 static int toi_incremental_storage_needed(void)
259 {
260 return 2 * sizeof(unsigned long) + sizeof(int) + strlen(toi_incremental_slow_cmp_name) + 1;
261 }
262
263 /*
264 * toi_incremental_save_config_info
265 * @buffer: Pointer to a buffer of size PAGE_SIZE.
266 *
267 * Save informaton needed when reloading the image at resume time.
268 * Returns: Number of bytes used for saving our data.
269 */
270 static int toi_incremental_save_config_info(char *buffer)
271 {
272 int len = strlen(toi_incremental_slow_cmp_name) + 1, offset = 0;
273
274 *((unsigned long *)buffer) = toi_incremental_bytes_in;
275 offset += sizeof(unsigned long);
276 *((unsigned long *)(buffer + offset)) = toi_incremental_bytes_out;
277 offset += sizeof(unsigned long);
278 *((int *)(buffer + offset)) = len;
279 offset += sizeof(int);
280 strncpy(buffer + offset, toi_incremental_slow_cmp_name, len);
281 return offset + len;
282 }
283
284 /* toi_incremental_load_config_info
285 * @buffer: Pointer to the start of the data.
286 * @size: Number of bytes that were saved.
287 *
288 * Description: Reload information to be retained for debugging info.
289 */
290 static void toi_incremental_load_config_info(char *buffer, int size)
291 {
292 int len, offset = 0;
293
294 toi_incremental_bytes_in = *((unsigned long *)buffer);
295 offset += sizeof(unsigned long);
296 toi_incremental_bytes_out = *((unsigned long *)(buffer + offset));
297 offset += sizeof(unsigned long);
298 len = *((int *)(buffer + offset));
299 offset += sizeof(int);
300 strncpy(toi_incremental_slow_cmp_name, buffer + offset, len);
301 }
302
303 static void toi_incremental_pre_atomic_restore(struct toi_boot_kernel_data *bkd)
304 {
305 bkd->incremental_bytes_in = toi_incremental_bytes_in;
306 bkd->incremental_bytes_out = toi_incremental_bytes_out;
307 }
308
309 static void toi_incremental_post_atomic_restore(struct toi_boot_kernel_data *bkd)
310 {
311 toi_incremental_bytes_in = bkd->incremental_bytes_in;
312 toi_incremental_bytes_out = bkd->incremental_bytes_out;
313 }
314
315 static void toi_incremental_algo_change(void)
316 {
317 /* Reset so it's gotten from crypto_hash_digestsize afresh */
318 toi_incremental_digestsize = 0;
319 }
320
321 /*
322 * data for our sysfs entries.
323 */
324 static struct toi_sysfs_data sysfs_params[] = {
325 SYSFS_INT("enabled", SYSFS_RW, &toi_incremental_ops.enabled, 0, 1, 0,
326 NULL),
327 SYSFS_STRING("algorithm", SYSFS_RW, toi_incremental_slow_cmp_name, 31, 0,
328 toi_incremental_algo_change),
329 };
330
331 /*
332 * Ops structure.
333 */
334 static struct toi_module_ops toi_incremental_ops = {
335 .type = FILTER_MODULE,
336 .name = "incremental",
337 .directory = "incremental",
338 .module = THIS_MODULE,
339 .initialise = toi_incremental_init,
340 .memory_needed = toi_incremental_memory_needed,
341 .print_debug_info = toi_incremental_print_debug_stats,
342 .save_config_info = toi_incremental_save_config_info,
343 .load_config_info = toi_incremental_load_config_info,
344 .storage_needed = toi_incremental_storage_needed,
345
346 .pre_atomic_restore = toi_incremental_pre_atomic_restore,
347 .post_atomic_restore = toi_incremental_post_atomic_restore,
348
349 .rw_init = toi_incremental_rw_init,
350 .rw_cleanup = toi_incremental_rw_cleanup,
351
352 .write_page = toi_incremental_write_page,
353 .read_page = toi_incremental_read_page,
354
355 .sysfs_data = sysfs_params,
356 .num_sysfs_entries = sizeof(sysfs_params) / sizeof(struct toi_sysfs_data),
357 };
358
359 /* ---- Registration ---- */
360
361 static __init int toi_incremental_load(void)
362 {
363 return toi_register_module(&toi_incremental_ops);
364 }
365
366 #ifdef MODULE
367 static __exit void toi_incremental_unload(void)
368 {
369 toi_unregister_module(&toi_incremental_ops);
370 }
371 module_init(toi_incremental_load);
372 module_exit(toi_incremental_unload);
373 MODULE_LICENSE("GPL");
374 MODULE_AUTHOR("Nigel Cunningham");
375 MODULE_DESCRIPTION("Incremental Image Support for TuxOnIce");
376 #else
377 late_initcall(toi_incremental_load);
378 #endif