[CRYPTO] Add plumbing for multi-block operations
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / crypto / cipher.c
CommitLineData
1da177e4
LT
1/*
2 * Cryptographic API.
3 *
4 * Cipher operations.
5 *
6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
c774e93e 7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
1da177e4
LT
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15#include <linux/compiler.h>
16#include <linux/kernel.h>
17#include <linux/crypto.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22#include <asm/scatterlist.h>
23#include "internal.h"
24#include "scatterwalk.h"
25
c774e93e
HX
26struct cipher_desc {
27 struct crypto_tfm *tfm;
28 void (*crfn)(void *ctx, u8 *dst, const u8 *src);
29 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
30 const u8 *src, unsigned int nbytes);
31 void *info;
32};
1da177e4
LT
33
34static inline void xor_64(u8 *a, const u8 *b)
35{
36 ((u32 *)a)[0] ^= ((u32 *)b)[0];
37 ((u32 *)a)[1] ^= ((u32 *)b)[1];
38}
39
40static inline void xor_128(u8 *a, const u8 *b)
41{
42 ((u32 *)a)[0] ^= ((u32 *)b)[0];
43 ((u32 *)a)[1] ^= ((u32 *)b)[1];
44 ((u32 *)a)[2] ^= ((u32 *)b)[2];
45 ((u32 *)a)[3] ^= ((u32 *)b)[3];
46}
c774e93e
HX
47
48static unsigned int crypt_slow(const struct cipher_desc *desc,
49 struct scatter_walk *in,
50 struct scatter_walk *out, unsigned int bsize)
1da177e4 51{
c774e93e
HX
52 u8 src[bsize];
53 u8 dst[bsize];
54 unsigned int n;
1da177e4 55
c774e93e
HX
56 n = scatterwalk_copychunks(src, in, bsize, 0);
57 scatterwalk_advance(in, n);
1da177e4 58
c774e93e 59 desc->prfn(desc, dst, src, bsize);
1da177e4 60
c774e93e
HX
61 n = scatterwalk_copychunks(dst, out, bsize, 1);
62 scatterwalk_advance(out, n);
1da177e4 63
c774e93e 64 return bsize;
1da177e4
LT
65}
66
c774e93e
HX
67static inline unsigned int crypt_fast(const struct cipher_desc *desc,
68 struct scatter_walk *in,
69 struct scatter_walk *out,
70 unsigned int nbytes)
1da177e4 71{
c774e93e
HX
72 u8 *src, *dst;
73
74 src = in->data;
75 dst = scatterwalk_samebuf(in, out) ? src : out->data;
76
77 nbytes = desc->prfn(desc, dst, src, nbytes);
78
79 scatterwalk_advance(in, nbytes);
80 scatterwalk_advance(out, nbytes);
1da177e4 81
c774e93e 82 return nbytes;
1da177e4
LT
83}
84
85/*
86 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
87 * multiple page boundaries by using temporary blocks. In user context,
c774e93e 88 * the kernel is given a chance to schedule us once per page.
1da177e4 89 */
c774e93e 90static int crypt(const struct cipher_desc *desc,
1da177e4
LT
91 struct scatterlist *dst,
92 struct scatterlist *src,
c774e93e 93 unsigned int nbytes)
1da177e4
LT
94{
95 struct scatter_walk walk_in, walk_out;
c774e93e 96 struct crypto_tfm *tfm = desc->tfm;
1da177e4 97 const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
1da177e4
LT
98
99 if (!nbytes)
100 return 0;
101
102 if (nbytes % bsize) {
103 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
104 return -EINVAL;
105 }
106
107 scatterwalk_start(&walk_in, src);
108 scatterwalk_start(&walk_out, dst);
109
110 for(;;) {
c774e93e 111 unsigned int n;
1da177e4
LT
112
113 scatterwalk_map(&walk_in, 0);
114 scatterwalk_map(&walk_out, 1);
115
c774e93e
HX
116 n = scatterwalk_clamp(&walk_in, nbytes);
117 n = scatterwalk_clamp(&walk_out, n);
1da177e4 118
c774e93e
HX
119 if (likely(n >= bsize))
120 n = crypt_fast(desc, &walk_in, &walk_out, n);
121 else
122 n = crypt_slow(desc, &walk_in, &walk_out, bsize);
1da177e4 123
c774e93e 124 nbytes -= n;
1da177e4
LT
125
126 scatterwalk_done(&walk_in, 0, nbytes);
127 scatterwalk_done(&walk_out, 1, nbytes);
128
129 if (!nbytes)
130 return 0;
131
132 crypto_yield(tfm);
133 }
134}
135
c774e93e
HX
136static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
137 u8 *dst, const u8 *src,
138 unsigned int nbytes)
1da177e4 139{
c774e93e
HX
140 struct crypto_tfm *tfm = desc->tfm;
141 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
142 int bsize = crypto_tfm_alg_blocksize(tfm);
143
144 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
145 u8 *iv = desc->info;
146 unsigned int done = 0;
147
148 do {
149 xor(iv, src);
150 fn(crypto_tfm_ctx(tfm), dst, iv);
151 memcpy(iv, dst, bsize);
1da177e4 152
c774e93e
HX
153 src += bsize;
154 dst += bsize;
155 } while ((done += bsize) < nbytes);
156
157 return done;
1da177e4
LT
158}
159
c774e93e
HX
160static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
161 u8 *dst, const u8 *src,
162 unsigned int nbytes)
1da177e4 163{
c774e93e
HX
164 struct crypto_tfm *tfm = desc->tfm;
165 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
166 int bsize = crypto_tfm_alg_blocksize(tfm);
167
168 u8 stack[src == dst ? bsize : 0];
169 u8 *buf = stack;
170 u8 **dst_p = src == dst ? &buf : &dst;
171
172 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
173 u8 *iv = desc->info;
174 unsigned int done = 0;
175
176 do {
177 u8 *tmp_dst = *dst_p;
1da177e4 178
c774e93e
HX
179 fn(crypto_tfm_ctx(tfm), tmp_dst, src);
180 xor(tmp_dst, iv);
181 memcpy(iv, src, bsize);
182 if (tmp_dst != dst)
183 memcpy(dst, tmp_dst, bsize);
184
185 src += bsize;
186 dst += bsize;
187 } while ((done += bsize) < nbytes);
188
189 return done;
1da177e4
LT
190}
191
c774e93e
HX
192static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
193 const u8 *src, unsigned int nbytes)
1da177e4 194{
c774e93e
HX
195 struct crypto_tfm *tfm = desc->tfm;
196 int bsize = crypto_tfm_alg_blocksize(tfm);
197 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
198 unsigned int done = 0;
199
200 do {
201 fn(crypto_tfm_ctx(tfm), dst, src);
202
203 src += bsize;
204 dst += bsize;
205 } while ((done += bsize) < nbytes);
206
207 return done;
1da177e4
LT
208}
209
210static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
211{
212 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
213
214 if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
215 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
216 return -EINVAL;
217 } else
218 return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen,
219 &tfm->crt_flags);
220}
221
222static int ecb_encrypt(struct crypto_tfm *tfm,
223 struct scatterlist *dst,
224 struct scatterlist *src, unsigned int nbytes)
225{
c774e93e
HX
226 struct cipher_desc desc;
227
228 desc.tfm = tfm;
229 desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
230 desc.prfn = ecb_process;
231
232 return crypt(&desc, dst, src, nbytes);
1da177e4
LT
233}
234
235static int ecb_decrypt(struct crypto_tfm *tfm,
236 struct scatterlist *dst,
237 struct scatterlist *src,
238 unsigned int nbytes)
239{
c774e93e
HX
240 struct cipher_desc desc;
241
242 desc.tfm = tfm;
243 desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
244 desc.prfn = ecb_process;
245
246 return crypt(&desc, dst, src, nbytes);
1da177e4
LT
247}
248
249static int cbc_encrypt(struct crypto_tfm *tfm,
250 struct scatterlist *dst,
251 struct scatterlist *src,
252 unsigned int nbytes)
253{
c774e93e
HX
254 struct cipher_desc desc;
255
256 desc.tfm = tfm;
257 desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
258 desc.prfn = cbc_process_encrypt;
259 desc.info = tfm->crt_cipher.cit_iv;
260
261 return crypt(&desc, dst, src, nbytes);
1da177e4
LT
262}
263
264static int cbc_encrypt_iv(struct crypto_tfm *tfm,
265 struct scatterlist *dst,
266 struct scatterlist *src,
267 unsigned int nbytes, u8 *iv)
268{
c774e93e
HX
269 struct cipher_desc desc;
270
271 desc.tfm = tfm;
272 desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt;
273 desc.prfn = cbc_process_encrypt;
274 desc.info = iv;
275
276 return crypt(&desc, dst, src, nbytes);
1da177e4
LT
277}
278
279static int cbc_decrypt(struct crypto_tfm *tfm,
280 struct scatterlist *dst,
281 struct scatterlist *src,
282 unsigned int nbytes)
283{
c774e93e
HX
284 struct cipher_desc desc;
285
286 desc.tfm = tfm;
287 desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
288 desc.prfn = cbc_process_decrypt;
289 desc.info = tfm->crt_cipher.cit_iv;
290
291 return crypt(&desc, dst, src, nbytes);
1da177e4
LT
292}
293
294static int cbc_decrypt_iv(struct crypto_tfm *tfm,
295 struct scatterlist *dst,
296 struct scatterlist *src,
297 unsigned int nbytes, u8 *iv)
298{
c774e93e
HX
299 struct cipher_desc desc;
300
301 desc.tfm = tfm;
302 desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt;
303 desc.prfn = cbc_process_decrypt;
304 desc.info = iv;
305
306 return crypt(&desc, dst, src, nbytes);
1da177e4
LT
307}
308
309static int nocrypt(struct crypto_tfm *tfm,
310 struct scatterlist *dst,
311 struct scatterlist *src,
312 unsigned int nbytes)
313{
314 return -ENOSYS;
315}
316
317static int nocrypt_iv(struct crypto_tfm *tfm,
318 struct scatterlist *dst,
319 struct scatterlist *src,
320 unsigned int nbytes, u8 *iv)
321{
322 return -ENOSYS;
323}
324
325int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
326{
327 u32 mode = flags & CRYPTO_TFM_MODE_MASK;
328
329 tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
330 if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
331 tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
332
333 return 0;
334}
335
336int crypto_init_cipher_ops(struct crypto_tfm *tfm)
337{
338 int ret = 0;
339 struct cipher_tfm *ops = &tfm->crt_cipher;
340
341 ops->cit_setkey = setkey;
342
343 switch (tfm->crt_cipher.cit_mode) {
344 case CRYPTO_TFM_MODE_ECB:
345 ops->cit_encrypt = ecb_encrypt;
346 ops->cit_decrypt = ecb_decrypt;
347 break;
348
349 case CRYPTO_TFM_MODE_CBC:
350 ops->cit_encrypt = cbc_encrypt;
351 ops->cit_decrypt = cbc_decrypt;
352 ops->cit_encrypt_iv = cbc_encrypt_iv;
353 ops->cit_decrypt_iv = cbc_decrypt_iv;
354 break;
355
356 case CRYPTO_TFM_MODE_CFB:
357 ops->cit_encrypt = nocrypt;
358 ops->cit_decrypt = nocrypt;
359 ops->cit_encrypt_iv = nocrypt_iv;
360 ops->cit_decrypt_iv = nocrypt_iv;
361 break;
362
363 case CRYPTO_TFM_MODE_CTR:
364 ops->cit_encrypt = nocrypt;
365 ops->cit_decrypt = nocrypt;
366 ops->cit_encrypt_iv = nocrypt_iv;
367 ops->cit_decrypt_iv = nocrypt_iv;
368 break;
369
370 default:
371 BUG();
372 }
373
374 if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
375
376 switch (crypto_tfm_alg_blocksize(tfm)) {
377 case 8:
378 ops->cit_xor_block = xor_64;
379 break;
380
381 case 16:
382 ops->cit_xor_block = xor_128;
383 break;
384
385 default:
386 printk(KERN_WARNING "%s: block size %u not supported\n",
387 crypto_tfm_alg_name(tfm),
388 crypto_tfm_alg_blocksize(tfm));
389 ret = -EINVAL;
390 goto out;
391 }
392
393 ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
394 ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
395 if (ops->cit_iv == NULL)
396 ret = -ENOMEM;
397 }
398
399out:
400 return ret;
401}
402
403void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
404{
8279dd74 405 kfree(tfm->crt_cipher.cit_iv);
1da177e4 406}