Commit | Line | Data |
---|---|---|
124b53d0 HX |
1 | /* |
2 | * Software async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the Free | |
8 | * Software Foundation; either version 2 of the License, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <crypto/algapi.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/kthread.h> | |
18 | #include <linux/list.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/scatterlist.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | ||
26 | #define CRYPTD_MAX_QLEN 100 | |
27 | ||
28 | struct cryptd_state { | |
29 | spinlock_t lock; | |
30 | struct mutex mutex; | |
31 | struct crypto_queue queue; | |
32 | struct task_struct *task; | |
33 | }; | |
34 | ||
35 | struct cryptd_instance_ctx { | |
36 | struct crypto_spawn spawn; | |
37 | struct cryptd_state *state; | |
38 | }; | |
39 | ||
40 | struct cryptd_blkcipher_ctx { | |
41 | struct crypto_blkcipher *child; | |
42 | }; | |
43 | ||
44 | struct cryptd_blkcipher_request_ctx { | |
45 | crypto_completion_t complete; | |
46 | }; | |
47 | ||
b8a28251 LH |
48 | struct cryptd_hash_ctx { |
49 | struct crypto_hash *child; | |
50 | }; | |
51 | ||
52 | struct cryptd_hash_request_ctx { | |
53 | crypto_completion_t complete; | |
54 | }; | |
124b53d0 HX |
55 | |
56 | static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) | |
57 | { | |
58 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
59 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
60 | return ictx->state; | |
61 | } | |
62 | ||
63 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | |
64 | const u8 *key, unsigned int keylen) | |
65 | { | |
66 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | |
67 | struct crypto_blkcipher *child = ctx->child; | |
68 | int err; | |
69 | ||
70 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
71 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | |
72 | CRYPTO_TFM_REQ_MASK); | |
73 | err = crypto_blkcipher_setkey(child, key, keylen); | |
74 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | |
75 | CRYPTO_TFM_RES_MASK); | |
76 | return err; | |
77 | } | |
78 | ||
79 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | |
80 | struct crypto_blkcipher *child, | |
81 | int err, | |
82 | int (*crypt)(struct blkcipher_desc *desc, | |
83 | struct scatterlist *dst, | |
84 | struct scatterlist *src, | |
85 | unsigned int len)) | |
86 | { | |
87 | struct cryptd_blkcipher_request_ctx *rctx; | |
88 | struct blkcipher_desc desc; | |
89 | ||
90 | rctx = ablkcipher_request_ctx(req); | |
91 | ||
93aa7f8a HX |
92 | if (unlikely(err == -EINPROGRESS)) |
93 | goto out; | |
124b53d0 HX |
94 | |
95 | desc.tfm = child; | |
96 | desc.info = req->info; | |
97 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
98 | ||
99 | err = crypt(&desc, req->dst, req->src, req->nbytes); | |
100 | ||
101 | req->base.complete = rctx->complete; | |
102 | ||
93aa7f8a | 103 | out: |
124b53d0 | 104 | local_bh_disable(); |
93aa7f8a | 105 | rctx->complete(&req->base, err); |
124b53d0 HX |
106 | local_bh_enable(); |
107 | } | |
108 | ||
109 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | |
110 | { | |
111 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
112 | struct crypto_blkcipher *child = ctx->child; | |
113 | ||
114 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
115 | crypto_blkcipher_crt(child)->encrypt); | |
116 | } | |
117 | ||
118 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |
119 | { | |
120 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
121 | struct crypto_blkcipher *child = ctx->child; | |
122 | ||
123 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
124 | crypto_blkcipher_crt(child)->decrypt); | |
125 | } | |
126 | ||
127 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |
128 | crypto_completion_t complete) | |
129 | { | |
130 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | |
131 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
132 | struct cryptd_state *state = | |
133 | cryptd_get_state(crypto_ablkcipher_tfm(tfm)); | |
134 | int err; | |
135 | ||
136 | rctx->complete = req->base.complete; | |
137 | req->base.complete = complete; | |
138 | ||
139 | spin_lock_bh(&state->lock); | |
2de98e75 | 140 | err = ablkcipher_enqueue_request(&state->queue, req); |
124b53d0 HX |
141 | spin_unlock_bh(&state->lock); |
142 | ||
143 | wake_up_process(state->task); | |
144 | return err; | |
145 | } | |
146 | ||
147 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | |
148 | { | |
149 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | |
150 | } | |
151 | ||
152 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | |
153 | { | |
154 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | |
155 | } | |
156 | ||
157 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |
158 | { | |
159 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
160 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
161 | struct crypto_spawn *spawn = &ictx->spawn; | |
162 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
163 | struct crypto_blkcipher *cipher; | |
164 | ||
165 | cipher = crypto_spawn_blkcipher(spawn); | |
166 | if (IS_ERR(cipher)) | |
167 | return PTR_ERR(cipher); | |
168 | ||
169 | ctx->child = cipher; | |
170 | tfm->crt_ablkcipher.reqsize = | |
171 | sizeof(struct cryptd_blkcipher_request_ctx); | |
172 | return 0; | |
173 | } | |
174 | ||
175 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |
176 | { | |
177 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
178 | struct cryptd_state *state = cryptd_get_state(tfm); | |
179 | int active; | |
180 | ||
181 | mutex_lock(&state->mutex); | |
2de98e75 HX |
182 | active = ablkcipher_tfm_in_queue(&state->queue, |
183 | __crypto_ablkcipher_cast(tfm)); | |
124b53d0 HX |
184 | mutex_unlock(&state->mutex); |
185 | ||
186 | BUG_ON(active); | |
187 | ||
188 | crypto_free_blkcipher(ctx->child); | |
189 | } | |
190 | ||
191 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |
192 | struct cryptd_state *state) | |
193 | { | |
194 | struct crypto_instance *inst; | |
195 | struct cryptd_instance_ctx *ctx; | |
196 | int err; | |
197 | ||
198 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
b1145ce3 JL |
199 | if (!inst) { |
200 | inst = ERR_PTR(-ENOMEM); | |
124b53d0 | 201 | goto out; |
b1145ce3 | 202 | } |
124b53d0 HX |
203 | |
204 | err = -ENAMETOOLONG; | |
205 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
206 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
207 | goto out_free_inst; | |
208 | ||
209 | ctx = crypto_instance_ctx(inst); | |
210 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
211 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
212 | if (err) | |
213 | goto out_free_inst; | |
214 | ||
215 | ctx->state = state; | |
216 | ||
217 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
218 | ||
219 | inst->alg.cra_priority = alg->cra_priority + 50; | |
220 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
221 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
222 | ||
223 | out: | |
224 | return inst; | |
225 | ||
226 | out_free_inst: | |
227 | kfree(inst); | |
228 | inst = ERR_PTR(err); | |
229 | goto out; | |
230 | } | |
231 | ||
232 | static struct crypto_instance *cryptd_alloc_blkcipher( | |
233 | struct rtattr **tb, struct cryptd_state *state) | |
234 | { | |
235 | struct crypto_instance *inst; | |
236 | struct crypto_alg *alg; | |
237 | ||
238 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | |
332f8840 | 239 | CRYPTO_ALG_TYPE_MASK); |
124b53d0 | 240 | if (IS_ERR(alg)) |
e231c2ee | 241 | return ERR_CAST(alg); |
124b53d0 HX |
242 | |
243 | inst = cryptd_alloc_instance(alg, state); | |
244 | if (IS_ERR(inst)) | |
245 | goto out_put_alg; | |
246 | ||
332f8840 | 247 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
124b53d0 HX |
248 | inst->alg.cra_type = &crypto_ablkcipher_type; |
249 | ||
250 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
251 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
252 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
253 | ||
927eead5 HX |
254 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
255 | ||
124b53d0 HX |
256 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
257 | ||
258 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | |
259 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | |
260 | ||
261 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | |
262 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | |
263 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | |
264 | ||
124b53d0 HX |
265 | out_put_alg: |
266 | crypto_mod_put(alg); | |
267 | return inst; | |
268 | } | |
269 | ||
b8a28251 LH |
270 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
271 | { | |
272 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
273 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
274 | struct crypto_spawn *spawn = &ictx->spawn; | |
275 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
276 | struct crypto_hash *cipher; | |
277 | ||
278 | cipher = crypto_spawn_hash(spawn); | |
279 | if (IS_ERR(cipher)) | |
280 | return PTR_ERR(cipher); | |
281 | ||
282 | ctx->child = cipher; | |
283 | tfm->crt_ahash.reqsize = | |
284 | sizeof(struct cryptd_hash_request_ctx); | |
285 | return 0; | |
286 | } | |
287 | ||
288 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
289 | { | |
290 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
291 | struct cryptd_state *state = cryptd_get_state(tfm); | |
292 | int active; | |
293 | ||
294 | mutex_lock(&state->mutex); | |
295 | active = ahash_tfm_in_queue(&state->queue, | |
296 | __crypto_ahash_cast(tfm)); | |
297 | mutex_unlock(&state->mutex); | |
298 | ||
299 | BUG_ON(active); | |
300 | ||
301 | crypto_free_hash(ctx->child); | |
302 | } | |
303 | ||
304 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | |
305 | const u8 *key, unsigned int keylen) | |
306 | { | |
307 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
308 | struct crypto_hash *child = ctx->child; | |
309 | int err; | |
310 | ||
311 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
312 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & | |
313 | CRYPTO_TFM_REQ_MASK); | |
314 | err = crypto_hash_setkey(child, key, keylen); | |
315 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & | |
316 | CRYPTO_TFM_RES_MASK); | |
317 | return err; | |
318 | } | |
319 | ||
320 | static int cryptd_hash_enqueue(struct ahash_request *req, | |
321 | crypto_completion_t complete) | |
322 | { | |
323 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
324 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
325 | struct cryptd_state *state = | |
326 | cryptd_get_state(crypto_ahash_tfm(tfm)); | |
327 | int err; | |
328 | ||
329 | rctx->complete = req->base.complete; | |
330 | req->base.complete = complete; | |
331 | ||
332 | spin_lock_bh(&state->lock); | |
333 | err = ahash_enqueue_request(&state->queue, req); | |
334 | spin_unlock_bh(&state->lock); | |
335 | ||
336 | wake_up_process(state->task); | |
337 | return err; | |
338 | } | |
339 | ||
340 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | |
341 | { | |
342 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
343 | struct crypto_hash *child = ctx->child; | |
344 | struct ahash_request *req = ahash_request_cast(req_async); | |
345 | struct cryptd_hash_request_ctx *rctx; | |
346 | struct hash_desc desc; | |
347 | ||
348 | rctx = ahash_request_ctx(req); | |
349 | ||
350 | if (unlikely(err == -EINPROGRESS)) | |
351 | goto out; | |
352 | ||
353 | desc.tfm = child; | |
354 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
355 | ||
356 | err = crypto_hash_crt(child)->init(&desc); | |
357 | ||
358 | req->base.complete = rctx->complete; | |
359 | ||
360 | out: | |
361 | local_bh_disable(); | |
362 | rctx->complete(&req->base, err); | |
363 | local_bh_enable(); | |
364 | } | |
365 | ||
366 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | |
367 | { | |
368 | return cryptd_hash_enqueue(req, cryptd_hash_init); | |
369 | } | |
370 | ||
371 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | |
372 | { | |
373 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
374 | struct crypto_hash *child = ctx->child; | |
375 | struct ahash_request *req = ahash_request_cast(req_async); | |
376 | struct cryptd_hash_request_ctx *rctx; | |
377 | struct hash_desc desc; | |
378 | ||
379 | rctx = ahash_request_ctx(req); | |
380 | ||
381 | if (unlikely(err == -EINPROGRESS)) | |
382 | goto out; | |
383 | ||
384 | desc.tfm = child; | |
385 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
386 | ||
387 | err = crypto_hash_crt(child)->update(&desc, | |
388 | req->src, | |
389 | req->nbytes); | |
390 | ||
391 | req->base.complete = rctx->complete; | |
392 | ||
393 | out: | |
394 | local_bh_disable(); | |
395 | rctx->complete(&req->base, err); | |
396 | local_bh_enable(); | |
397 | } | |
398 | ||
399 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | |
400 | { | |
401 | return cryptd_hash_enqueue(req, cryptd_hash_update); | |
402 | } | |
403 | ||
404 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | |
405 | { | |
406 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
407 | struct crypto_hash *child = ctx->child; | |
408 | struct ahash_request *req = ahash_request_cast(req_async); | |
409 | struct cryptd_hash_request_ctx *rctx; | |
410 | struct hash_desc desc; | |
411 | ||
412 | rctx = ahash_request_ctx(req); | |
413 | ||
414 | if (unlikely(err == -EINPROGRESS)) | |
415 | goto out; | |
416 | ||
417 | desc.tfm = child; | |
418 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
419 | ||
420 | err = crypto_hash_crt(child)->final(&desc, req->result); | |
421 | ||
422 | req->base.complete = rctx->complete; | |
423 | ||
424 | out: | |
425 | local_bh_disable(); | |
426 | rctx->complete(&req->base, err); | |
427 | local_bh_enable(); | |
428 | } | |
429 | ||
430 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | |
431 | { | |
432 | return cryptd_hash_enqueue(req, cryptd_hash_final); | |
433 | } | |
434 | ||
435 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | |
436 | { | |
437 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
438 | struct crypto_hash *child = ctx->child; | |
439 | struct ahash_request *req = ahash_request_cast(req_async); | |
440 | struct cryptd_hash_request_ctx *rctx; | |
441 | struct hash_desc desc; | |
442 | ||
443 | rctx = ahash_request_ctx(req); | |
444 | ||
445 | if (unlikely(err == -EINPROGRESS)) | |
446 | goto out; | |
447 | ||
448 | desc.tfm = child; | |
449 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
450 | ||
451 | err = crypto_hash_crt(child)->digest(&desc, | |
452 | req->src, | |
453 | req->nbytes, | |
454 | req->result); | |
455 | ||
456 | req->base.complete = rctx->complete; | |
457 | ||
458 | out: | |
459 | local_bh_disable(); | |
460 | rctx->complete(&req->base, err); | |
461 | local_bh_enable(); | |
462 | } | |
463 | ||
464 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |
465 | { | |
466 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | |
467 | } | |
468 | ||
469 | static struct crypto_instance *cryptd_alloc_hash( | |
470 | struct rtattr **tb, struct cryptd_state *state) | |
471 | { | |
472 | struct crypto_instance *inst; | |
473 | struct crypto_alg *alg; | |
474 | ||
475 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | |
476 | CRYPTO_ALG_TYPE_HASH_MASK); | |
477 | if (IS_ERR(alg)) | |
478 | return ERR_PTR(PTR_ERR(alg)); | |
479 | ||
480 | inst = cryptd_alloc_instance(alg, state); | |
481 | if (IS_ERR(inst)) | |
482 | goto out_put_alg; | |
483 | ||
484 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | |
485 | inst->alg.cra_type = &crypto_ahash_type; | |
486 | ||
487 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; | |
488 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | |
489 | ||
490 | inst->alg.cra_init = cryptd_hash_init_tfm; | |
491 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | |
492 | ||
493 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | |
494 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | |
495 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | |
496 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | |
497 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | |
498 | ||
499 | out_put_alg: | |
500 | crypto_mod_put(alg); | |
501 | return inst; | |
502 | } | |
503 | ||
124b53d0 HX |
504 | static struct cryptd_state state; |
505 | ||
506 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | |
507 | { | |
508 | struct crypto_attr_type *algt; | |
509 | ||
510 | algt = crypto_get_attr_type(tb); | |
511 | if (IS_ERR(algt)) | |
e231c2ee | 512 | return ERR_CAST(algt); |
124b53d0 HX |
513 | |
514 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
515 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
516 | return cryptd_alloc_blkcipher(tb, &state); | |
b8a28251 LH |
517 | case CRYPTO_ALG_TYPE_DIGEST: |
518 | return cryptd_alloc_hash(tb, &state); | |
124b53d0 HX |
519 | } |
520 | ||
521 | return ERR_PTR(-EINVAL); | |
522 | } | |
523 | ||
524 | static void cryptd_free(struct crypto_instance *inst) | |
525 | { | |
526 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
527 | ||
528 | crypto_drop_spawn(&ctx->spawn); | |
529 | kfree(inst); | |
530 | } | |
531 | ||
532 | static struct crypto_template cryptd_tmpl = { | |
533 | .name = "cryptd", | |
534 | .alloc = cryptd_alloc, | |
535 | .free = cryptd_free, | |
536 | .module = THIS_MODULE, | |
537 | }; | |
538 | ||
539 | static inline int cryptd_create_thread(struct cryptd_state *state, | |
540 | int (*fn)(void *data), const char *name) | |
541 | { | |
542 | spin_lock_init(&state->lock); | |
543 | mutex_init(&state->mutex); | |
544 | crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); | |
545 | ||
189fe317 | 546 | state->task = kthread_run(fn, state, name); |
124b53d0 HX |
547 | if (IS_ERR(state->task)) |
548 | return PTR_ERR(state->task); | |
549 | ||
550 | return 0; | |
551 | } | |
552 | ||
553 | static inline void cryptd_stop_thread(struct cryptd_state *state) | |
554 | { | |
555 | BUG_ON(state->queue.qlen); | |
556 | kthread_stop(state->task); | |
557 | } | |
558 | ||
559 | static int cryptd_thread(void *data) | |
560 | { | |
561 | struct cryptd_state *state = data; | |
562 | int stop; | |
563 | ||
189fe317 RW |
564 | current->flags |= PF_NOFREEZE; |
565 | ||
124b53d0 HX |
566 | do { |
567 | struct crypto_async_request *req, *backlog; | |
568 | ||
569 | mutex_lock(&state->mutex); | |
570 | __set_current_state(TASK_INTERRUPTIBLE); | |
571 | ||
572 | spin_lock_bh(&state->lock); | |
573 | backlog = crypto_get_backlog(&state->queue); | |
574 | req = crypto_dequeue_request(&state->queue); | |
575 | spin_unlock_bh(&state->lock); | |
576 | ||
577 | stop = kthread_should_stop(); | |
578 | ||
579 | if (stop || req) { | |
580 | __set_current_state(TASK_RUNNING); | |
581 | if (req) { | |
582 | if (backlog) | |
583 | backlog->complete(backlog, | |
584 | -EINPROGRESS); | |
585 | req->complete(req, 0); | |
586 | } | |
587 | } | |
588 | ||
589 | mutex_unlock(&state->mutex); | |
590 | ||
591 | schedule(); | |
592 | } while (!stop); | |
593 | ||
594 | return 0; | |
595 | } | |
596 | ||
597 | static int __init cryptd_init(void) | |
598 | { | |
599 | int err; | |
600 | ||
601 | err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); | |
602 | if (err) | |
603 | return err; | |
604 | ||
605 | err = crypto_register_template(&cryptd_tmpl); | |
606 | if (err) | |
607 | kthread_stop(state.task); | |
608 | ||
609 | return err; | |
610 | } | |
611 | ||
612 | static void __exit cryptd_exit(void) | |
613 | { | |
614 | cryptd_stop_thread(&state); | |
615 | crypto_unregister_template(&cryptd_tmpl); | |
616 | } | |
617 | ||
618 | module_init(cryptd_init); | |
619 | module_exit(cryptd_exit); | |
620 | ||
621 | MODULE_LICENSE("GPL"); | |
622 | MODULE_DESCRIPTION("Software async crypto daemon"); |