Commit | Line | Data |
---|---|---|
1e65b81a TC |
1 | /* |
2 | * Software multibuffer async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com> | |
5 | * | |
6 | * Adapted from crypto daemon. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <crypto/algapi.h> | |
16 | #include <crypto/internal/hash.h> | |
17 | #include <crypto/internal/aead.h> | |
18 | #include <crypto/mcryptd.h> | |
19 | #include <crypto/crypto_wq.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/list.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/scatterlist.h> | |
26 | #include <linux/sched.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/hardirq.h> | |
29 | ||
30 | #define MCRYPTD_MAX_CPU_QLEN 100 | |
31 | #define MCRYPTD_BATCH 9 | |
32 | ||
33 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | |
34 | unsigned int tail); | |
35 | ||
36 | struct mcryptd_flush_list { | |
37 | struct list_head list; | |
38 | struct mutex lock; | |
39 | }; | |
40 | ||
1f6e97f6 | 41 | static struct mcryptd_flush_list __percpu *mcryptd_flist; |
1e65b81a TC |
42 | |
43 | struct hashd_instance_ctx { | |
44 | struct crypto_shash_spawn spawn; | |
45 | struct mcryptd_queue *queue; | |
46 | }; | |
47 | ||
48 | static void mcryptd_queue_worker(struct work_struct *work); | |
49 | ||
50 | void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) | |
51 | { | |
52 | struct mcryptd_flush_list *flist; | |
53 | ||
54 | if (!cstate->flusher_engaged) { | |
55 | /* put the flusher on the flush list */ | |
56 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | |
57 | mutex_lock(&flist->lock); | |
58 | list_add_tail(&cstate->flush_list, &flist->list); | |
59 | cstate->flusher_engaged = true; | |
60 | cstate->next_flush = jiffies + delay; | |
61 | queue_delayed_work_on(smp_processor_id(), kcrypto_wq, | |
62 | &cstate->flush, delay); | |
63 | mutex_unlock(&flist->lock); | |
64 | } | |
65 | } | |
66 | EXPORT_SYMBOL(mcryptd_arm_flusher); | |
67 | ||
68 | static int mcryptd_init_queue(struct mcryptd_queue *queue, | |
69 | unsigned int max_cpu_qlen) | |
70 | { | |
71 | int cpu; | |
72 | struct mcryptd_cpu_queue *cpu_queue; | |
73 | ||
74 | queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); | |
75 | pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); | |
76 | if (!queue->cpu_queue) | |
77 | return -ENOMEM; | |
78 | for_each_possible_cpu(cpu) { | |
79 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
80 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); | |
81 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | |
82 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); | |
83 | } | |
84 | return 0; | |
85 | } | |
86 | ||
87 | static void mcryptd_fini_queue(struct mcryptd_queue *queue) | |
88 | { | |
89 | int cpu; | |
90 | struct mcryptd_cpu_queue *cpu_queue; | |
91 | ||
92 | for_each_possible_cpu(cpu) { | |
93 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
94 | BUG_ON(cpu_queue->queue.qlen); | |
95 | } | |
96 | free_percpu(queue->cpu_queue); | |
97 | } | |
98 | ||
99 | static int mcryptd_enqueue_request(struct mcryptd_queue *queue, | |
100 | struct crypto_async_request *request, | |
101 | struct mcryptd_hash_request_ctx *rctx) | |
102 | { | |
103 | int cpu, err; | |
104 | struct mcryptd_cpu_queue *cpu_queue; | |
105 | ||
106 | cpu = get_cpu(); | |
107 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | |
108 | rctx->tag.cpu = cpu; | |
109 | ||
110 | err = crypto_enqueue_request(&cpu_queue->queue, request); | |
111 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", | |
112 | cpu, cpu_queue, request); | |
113 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | |
114 | put_cpu(); | |
115 | ||
116 | return err; | |
117 | } | |
118 | ||
119 | /* | |
120 | * Try to opportunisticlly flush the partially completed jobs if | |
121 | * crypto daemon is the only task running. | |
122 | */ | |
123 | static void mcryptd_opportunistic_flush(void) | |
124 | { | |
125 | struct mcryptd_flush_list *flist; | |
126 | struct mcryptd_alg_cstate *cstate; | |
127 | ||
128 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | |
129 | while (single_task_running()) { | |
130 | mutex_lock(&flist->lock); | |
131 | if (list_empty(&flist->list)) { | |
132 | mutex_unlock(&flist->lock); | |
133 | return; | |
134 | } | |
135 | cstate = list_entry(flist->list.next, | |
136 | struct mcryptd_alg_cstate, flush_list); | |
137 | if (!cstate->flusher_engaged) { | |
138 | mutex_unlock(&flist->lock); | |
139 | return; | |
140 | } | |
141 | list_del(&cstate->flush_list); | |
142 | cstate->flusher_engaged = false; | |
143 | mutex_unlock(&flist->lock); | |
144 | cstate->alg_state->flusher(cstate); | |
145 | } | |
146 | } | |
147 | ||
148 | /* | |
149 | * Called in workqueue context, do one real cryption work (via | |
150 | * req->complete) and reschedule itself if there are more work to | |
151 | * do. | |
152 | */ | |
153 | static void mcryptd_queue_worker(struct work_struct *work) | |
154 | { | |
155 | struct mcryptd_cpu_queue *cpu_queue; | |
156 | struct crypto_async_request *req, *backlog; | |
157 | int i; | |
158 | ||
159 | /* | |
160 | * Need to loop through more than once for multi-buffer to | |
161 | * be effective. | |
162 | */ | |
163 | ||
164 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); | |
165 | i = 0; | |
166 | while (i < MCRYPTD_BATCH || single_task_running()) { | |
167 | /* | |
168 | * preempt_disable/enable is used to prevent | |
169 | * being preempted by mcryptd_enqueue_request() | |
170 | */ | |
171 | local_bh_disable(); | |
172 | preempt_disable(); | |
173 | backlog = crypto_get_backlog(&cpu_queue->queue); | |
174 | req = crypto_dequeue_request(&cpu_queue->queue); | |
175 | preempt_enable(); | |
176 | local_bh_enable(); | |
177 | ||
178 | if (!req) { | |
179 | mcryptd_opportunistic_flush(); | |
180 | return; | |
181 | } | |
182 | ||
183 | if (backlog) | |
184 | backlog->complete(backlog, -EINPROGRESS); | |
185 | req->complete(req, 0); | |
186 | if (!cpu_queue->queue.qlen) | |
187 | return; | |
188 | ++i; | |
189 | } | |
190 | if (cpu_queue->queue.qlen) | |
191 | queue_work(kcrypto_wq, &cpu_queue->work); | |
192 | } | |
193 | ||
194 | void mcryptd_flusher(struct work_struct *__work) | |
195 | { | |
196 | struct mcryptd_alg_cstate *alg_cpu_state; | |
197 | struct mcryptd_alg_state *alg_state; | |
198 | struct mcryptd_flush_list *flist; | |
199 | int cpu; | |
200 | ||
201 | cpu = smp_processor_id(); | |
202 | alg_cpu_state = container_of(to_delayed_work(__work), | |
203 | struct mcryptd_alg_cstate, flush); | |
204 | alg_state = alg_cpu_state->alg_state; | |
205 | if (alg_cpu_state->cpu != cpu) | |
206 | pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", | |
207 | cpu, alg_cpu_state->cpu); | |
208 | ||
209 | if (alg_cpu_state->flusher_engaged) { | |
210 | flist = per_cpu_ptr(mcryptd_flist, cpu); | |
211 | mutex_lock(&flist->lock); | |
212 | list_del(&alg_cpu_state->flush_list); | |
213 | alg_cpu_state->flusher_engaged = false; | |
214 | mutex_unlock(&flist->lock); | |
215 | alg_state->flusher(alg_cpu_state); | |
216 | } | |
217 | } | |
218 | EXPORT_SYMBOL_GPL(mcryptd_flusher); | |
219 | ||
220 | static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) | |
221 | { | |
222 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
223 | struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
224 | ||
225 | return ictx->queue; | |
226 | } | |
227 | ||
228 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | |
229 | unsigned int tail) | |
230 | { | |
231 | char *p; | |
232 | struct crypto_instance *inst; | |
233 | int err; | |
234 | ||
235 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); | |
236 | if (!p) | |
237 | return ERR_PTR(-ENOMEM); | |
238 | ||
239 | inst = (void *)(p + head); | |
240 | ||
241 | err = -ENAMETOOLONG; | |
242 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
243 | "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
244 | goto out_free_inst; | |
245 | ||
246 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
247 | ||
248 | inst->alg.cra_priority = alg->cra_priority + 50; | |
249 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
250 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
251 | ||
252 | out: | |
253 | return p; | |
254 | ||
255 | out_free_inst: | |
256 | kfree(p); | |
257 | p = ERR_PTR(err); | |
258 | goto out; | |
259 | } | |
260 | ||
9a3baed9 | 261 | static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type, |
f52bbf55 SM |
262 | u32 *mask) |
263 | { | |
264 | struct crypto_attr_type *algt; | |
265 | ||
266 | algt = crypto_get_attr_type(tb); | |
267 | if (IS_ERR(algt)) | |
9a3baed9 | 268 | return false; |
269 | ||
270 | *type |= algt->type & CRYPTO_ALG_INTERNAL; | |
271 | *mask |= algt->mask & CRYPTO_ALG_INTERNAL; | |
272 | ||
273 | if (*type & *mask & CRYPTO_ALG_INTERNAL) | |
274 | return true; | |
275 | else | |
276 | return false; | |
f52bbf55 SM |
277 | } |
278 | ||
1e65b81a TC |
279 | static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) |
280 | { | |
281 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
282 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
283 | struct crypto_shash_spawn *spawn = &ictx->spawn; | |
284 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
285 | struct crypto_shash *hash; | |
286 | ||
287 | hash = crypto_spawn_shash(spawn); | |
288 | if (IS_ERR(hash)) | |
289 | return PTR_ERR(hash); | |
290 | ||
291 | ctx->child = hash; | |
292 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
293 | sizeof(struct mcryptd_hash_request_ctx) + | |
294 | crypto_shash_descsize(hash)); | |
295 | return 0; | |
296 | } | |
297 | ||
298 | static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
299 | { | |
300 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
301 | ||
302 | crypto_free_shash(ctx->child); | |
303 | } | |
304 | ||
305 | static int mcryptd_hash_setkey(struct crypto_ahash *parent, | |
306 | const u8 *key, unsigned int keylen) | |
307 | { | |
308 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
309 | struct crypto_shash *child = ctx->child; | |
310 | int err; | |
311 | ||
312 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
313 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | |
314 | CRYPTO_TFM_REQ_MASK); | |
315 | err = crypto_shash_setkey(child, key, keylen); | |
316 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | |
317 | CRYPTO_TFM_RES_MASK); | |
318 | return err; | |
319 | } | |
320 | ||
321 | static int mcryptd_hash_enqueue(struct ahash_request *req, | |
322 | crypto_completion_t complete) | |
323 | { | |
324 | int ret; | |
325 | ||
326 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
327 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
328 | struct mcryptd_queue *queue = | |
329 | mcryptd_get_queue(crypto_ahash_tfm(tfm)); | |
330 | ||
331 | rctx->complete = req->base.complete; | |
332 | req->base.complete = complete; | |
333 | ||
334 | ret = mcryptd_enqueue_request(queue, &req->base, rctx); | |
335 | ||
336 | return ret; | |
337 | } | |
338 | ||
339 | static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) | |
340 | { | |
341 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
342 | struct crypto_shash *child = ctx->child; | |
343 | struct ahash_request *req = ahash_request_cast(req_async); | |
344 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
345 | struct shash_desc *desc = &rctx->desc; | |
346 | ||
347 | if (unlikely(err == -EINPROGRESS)) | |
348 | goto out; | |
349 | ||
350 | desc->tfm = child; | |
351 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
352 | ||
353 | err = crypto_shash_init(desc); | |
354 | ||
355 | req->base.complete = rctx->complete; | |
356 | ||
357 | out: | |
358 | local_bh_disable(); | |
359 | rctx->complete(&req->base, err); | |
360 | local_bh_enable(); | |
361 | } | |
362 | ||
363 | static int mcryptd_hash_init_enqueue(struct ahash_request *req) | |
364 | { | |
365 | return mcryptd_hash_enqueue(req, mcryptd_hash_init); | |
366 | } | |
367 | ||
368 | static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) | |
369 | { | |
370 | struct ahash_request *req = ahash_request_cast(req_async); | |
371 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
372 | ||
373 | if (unlikely(err == -EINPROGRESS)) | |
374 | goto out; | |
375 | ||
376 | err = shash_ahash_mcryptd_update(req, &rctx->desc); | |
377 | if (err) { | |
378 | req->base.complete = rctx->complete; | |
379 | goto out; | |
380 | } | |
381 | ||
382 | return; | |
383 | out: | |
384 | local_bh_disable(); | |
385 | rctx->complete(&req->base, err); | |
386 | local_bh_enable(); | |
387 | } | |
388 | ||
389 | static int mcryptd_hash_update_enqueue(struct ahash_request *req) | |
390 | { | |
391 | return mcryptd_hash_enqueue(req, mcryptd_hash_update); | |
392 | } | |
393 | ||
394 | static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) | |
395 | { | |
396 | struct ahash_request *req = ahash_request_cast(req_async); | |
397 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
398 | ||
399 | if (unlikely(err == -EINPROGRESS)) | |
400 | goto out; | |
401 | ||
402 | err = shash_ahash_mcryptd_final(req, &rctx->desc); | |
403 | if (err) { | |
404 | req->base.complete = rctx->complete; | |
405 | goto out; | |
406 | } | |
407 | ||
408 | return; | |
409 | out: | |
410 | local_bh_disable(); | |
411 | rctx->complete(&req->base, err); | |
412 | local_bh_enable(); | |
413 | } | |
414 | ||
415 | static int mcryptd_hash_final_enqueue(struct ahash_request *req) | |
416 | { | |
417 | return mcryptd_hash_enqueue(req, mcryptd_hash_final); | |
418 | } | |
419 | ||
420 | static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) | |
421 | { | |
422 | struct ahash_request *req = ahash_request_cast(req_async); | |
423 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
424 | ||
425 | if (unlikely(err == -EINPROGRESS)) | |
426 | goto out; | |
427 | ||
428 | err = shash_ahash_mcryptd_finup(req, &rctx->desc); | |
429 | ||
430 | if (err) { | |
431 | req->base.complete = rctx->complete; | |
432 | goto out; | |
433 | } | |
434 | ||
435 | return; | |
436 | out: | |
437 | local_bh_disable(); | |
438 | rctx->complete(&req->base, err); | |
439 | local_bh_enable(); | |
440 | } | |
441 | ||
442 | static int mcryptd_hash_finup_enqueue(struct ahash_request *req) | |
443 | { | |
444 | return mcryptd_hash_enqueue(req, mcryptd_hash_finup); | |
445 | } | |
446 | ||
447 | static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) | |
448 | { | |
449 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
450 | struct crypto_shash *child = ctx->child; | |
451 | struct ahash_request *req = ahash_request_cast(req_async); | |
452 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
453 | struct shash_desc *desc = &rctx->desc; | |
454 | ||
455 | if (unlikely(err == -EINPROGRESS)) | |
456 | goto out; | |
457 | ||
458 | desc->tfm = child; | |
459 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ | |
460 | ||
461 | err = shash_ahash_mcryptd_digest(req, desc); | |
462 | ||
463 | if (err) { | |
464 | req->base.complete = rctx->complete; | |
465 | goto out; | |
466 | } | |
467 | ||
468 | return; | |
469 | out: | |
470 | local_bh_disable(); | |
471 | rctx->complete(&req->base, err); | |
472 | local_bh_enable(); | |
473 | } | |
474 | ||
475 | static int mcryptd_hash_digest_enqueue(struct ahash_request *req) | |
476 | { | |
477 | return mcryptd_hash_enqueue(req, mcryptd_hash_digest); | |
478 | } | |
479 | ||
480 | static int mcryptd_hash_export(struct ahash_request *req, void *out) | |
481 | { | |
482 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
483 | ||
484 | return crypto_shash_export(&rctx->desc, out); | |
485 | } | |
486 | ||
487 | static int mcryptd_hash_import(struct ahash_request *req, const void *in) | |
488 | { | |
489 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
490 | ||
491 | return crypto_shash_import(&rctx->desc, in); | |
492 | } | |
493 | ||
494 | static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |
495 | struct mcryptd_queue *queue) | |
496 | { | |
497 | struct hashd_instance_ctx *ctx; | |
498 | struct ahash_instance *inst; | |
499 | struct shash_alg *salg; | |
500 | struct crypto_alg *alg; | |
f52bbf55 SM |
501 | u32 type = 0; |
502 | u32 mask = 0; | |
1e65b81a TC |
503 | int err; |
504 | ||
9a3baed9 | 505 | if (!mcryptd_check_internal(tb, &type, &mask)) |
506 | return -EINVAL; | |
f52bbf55 SM |
507 | |
508 | salg = shash_attr_alg(tb[1], type, mask); | |
1e65b81a TC |
509 | if (IS_ERR(salg)) |
510 | return PTR_ERR(salg); | |
511 | ||
512 | alg = &salg->base; | |
513 | pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); | |
514 | inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), | |
515 | sizeof(*ctx)); | |
516 | err = PTR_ERR(inst); | |
517 | if (IS_ERR(inst)) | |
518 | goto out_put_alg; | |
519 | ||
520 | ctx = ahash_instance_ctx(inst); | |
521 | ctx->queue = queue; | |
522 | ||
523 | err = crypto_init_shash_spawn(&ctx->spawn, salg, | |
524 | ahash_crypto_instance(inst)); | |
525 | if (err) | |
526 | goto out_free_inst; | |
527 | ||
f52bbf55 SM |
528 | type = CRYPTO_ALG_ASYNC; |
529 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | |
530 | type |= CRYPTO_ALG_INTERNAL; | |
531 | inst->alg.halg.base.cra_flags = type; | |
1e65b81a TC |
532 | |
533 | inst->alg.halg.digestsize = salg->digestsize; | |
f8c07cbc | 534 | inst->alg.halg.statesize = salg->statesize; |
1e65b81a TC |
535 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); |
536 | ||
537 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; | |
538 | inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; | |
539 | ||
540 | inst->alg.init = mcryptd_hash_init_enqueue; | |
541 | inst->alg.update = mcryptd_hash_update_enqueue; | |
542 | inst->alg.final = mcryptd_hash_final_enqueue; | |
543 | inst->alg.finup = mcryptd_hash_finup_enqueue; | |
544 | inst->alg.export = mcryptd_hash_export; | |
545 | inst->alg.import = mcryptd_hash_import; | |
546 | inst->alg.setkey = mcryptd_hash_setkey; | |
547 | inst->alg.digest = mcryptd_hash_digest_enqueue; | |
548 | ||
549 | err = ahash_register_instance(tmpl, inst); | |
550 | if (err) { | |
551 | crypto_drop_shash(&ctx->spawn); | |
552 | out_free_inst: | |
553 | kfree(inst); | |
554 | } | |
555 | ||
556 | out_put_alg: | |
557 | crypto_mod_put(alg); | |
558 | return err; | |
559 | } | |
560 | ||
561 | static struct mcryptd_queue mqueue; | |
562 | ||
563 | static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | |
564 | { | |
565 | struct crypto_attr_type *algt; | |
566 | ||
567 | algt = crypto_get_attr_type(tb); | |
568 | if (IS_ERR(algt)) | |
569 | return PTR_ERR(algt); | |
570 | ||
571 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
572 | case CRYPTO_ALG_TYPE_DIGEST: | |
573 | return mcryptd_create_hash(tmpl, tb, &mqueue); | |
574 | break; | |
575 | } | |
576 | ||
577 | return -EINVAL; | |
578 | } | |
579 | ||
580 | static void mcryptd_free(struct crypto_instance *inst) | |
581 | { | |
582 | struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
583 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | |
584 | ||
585 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
586 | case CRYPTO_ALG_TYPE_AHASH: | |
587 | crypto_drop_shash(&hctx->spawn); | |
588 | kfree(ahash_instance(inst)); | |
589 | return; | |
590 | default: | |
591 | crypto_drop_spawn(&ctx->spawn); | |
592 | kfree(inst); | |
593 | } | |
594 | } | |
595 | ||
596 | static struct crypto_template mcryptd_tmpl = { | |
597 | .name = "mcryptd", | |
598 | .create = mcryptd_create, | |
599 | .free = mcryptd_free, | |
600 | .module = THIS_MODULE, | |
601 | }; | |
602 | ||
603 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, | |
604 | u32 type, u32 mask) | |
605 | { | |
606 | char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
607 | struct crypto_ahash *tfm; | |
608 | ||
609 | if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
610 | "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
611 | return ERR_PTR(-EINVAL); | |
612 | tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); | |
613 | if (IS_ERR(tfm)) | |
614 | return ERR_CAST(tfm); | |
615 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
616 | crypto_free_ahash(tfm); | |
617 | return ERR_PTR(-EINVAL); | |
618 | } | |
619 | ||
620 | return __mcryptd_ahash_cast(tfm); | |
621 | } | |
622 | EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); | |
623 | ||
624 | int shash_ahash_mcryptd_digest(struct ahash_request *req, | |
625 | struct shash_desc *desc) | |
626 | { | |
627 | int err; | |
628 | ||
629 | err = crypto_shash_init(desc) ?: | |
630 | shash_ahash_mcryptd_finup(req, desc); | |
631 | ||
632 | return err; | |
633 | } | |
634 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest); | |
635 | ||
636 | int shash_ahash_mcryptd_update(struct ahash_request *req, | |
637 | struct shash_desc *desc) | |
638 | { | |
639 | struct crypto_shash *tfm = desc->tfm; | |
640 | struct shash_alg *shash = crypto_shash_alg(tfm); | |
641 | ||
642 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | |
643 | ||
644 | return shash->update(desc, NULL, 0); | |
645 | } | |
646 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update); | |
647 | ||
648 | int shash_ahash_mcryptd_finup(struct ahash_request *req, | |
649 | struct shash_desc *desc) | |
650 | { | |
651 | struct crypto_shash *tfm = desc->tfm; | |
652 | struct shash_alg *shash = crypto_shash_alg(tfm); | |
653 | ||
654 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | |
655 | ||
656 | return shash->finup(desc, NULL, 0, req->result); | |
657 | } | |
658 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup); | |
659 | ||
660 | int shash_ahash_mcryptd_final(struct ahash_request *req, | |
661 | struct shash_desc *desc) | |
662 | { | |
663 | struct crypto_shash *tfm = desc->tfm; | |
664 | struct shash_alg *shash = crypto_shash_alg(tfm); | |
665 | ||
666 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ | |
667 | ||
668 | return shash->final(desc, req->result); | |
669 | } | |
670 | EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final); | |
671 | ||
672 | struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) | |
673 | { | |
674 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | |
675 | ||
676 | return ctx->child; | |
677 | } | |
678 | EXPORT_SYMBOL_GPL(mcryptd_ahash_child); | |
679 | ||
680 | struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) | |
681 | { | |
682 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
683 | return &rctx->desc; | |
684 | } | |
685 | EXPORT_SYMBOL_GPL(mcryptd_shash_desc); | |
686 | ||
687 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm) | |
688 | { | |
689 | crypto_free_ahash(&tfm->base); | |
690 | } | |
691 | EXPORT_SYMBOL_GPL(mcryptd_free_ahash); | |
692 | ||
693 | ||
694 | static int __init mcryptd_init(void) | |
695 | { | |
696 | int err, cpu; | |
697 | struct mcryptd_flush_list *flist; | |
698 | ||
699 | mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); | |
700 | for_each_possible_cpu(cpu) { | |
701 | flist = per_cpu_ptr(mcryptd_flist, cpu); | |
702 | INIT_LIST_HEAD(&flist->list); | |
703 | mutex_init(&flist->lock); | |
704 | } | |
705 | ||
706 | err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); | |
707 | if (err) { | |
708 | free_percpu(mcryptd_flist); | |
709 | return err; | |
710 | } | |
711 | ||
712 | err = crypto_register_template(&mcryptd_tmpl); | |
713 | if (err) { | |
714 | mcryptd_fini_queue(&mqueue); | |
715 | free_percpu(mcryptd_flist); | |
716 | } | |
717 | ||
718 | return err; | |
719 | } | |
720 | ||
721 | static void __exit mcryptd_exit(void) | |
722 | { | |
723 | mcryptd_fini_queue(&mqueue); | |
724 | crypto_unregister_template(&mcryptd_tmpl); | |
725 | free_percpu(mcryptd_flist); | |
726 | } | |
727 | ||
728 | subsys_initcall(mcryptd_init); | |
729 | module_exit(mcryptd_exit); | |
730 | ||
731 | MODULE_LICENSE("GPL"); | |
732 | MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); | |
4943ba16 | 733 | MODULE_ALIAS_CRYPTO("mcryptd"); |