crypto: talitos - don't set done notification in hot path
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / crypto / talitos.c
CommitLineData
9c4a7965
KP
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
5228f0f7 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
9c4a7965
KP
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/of_platform.h>
36#include <linux/dma-mapping.h>
37#include <linux/io.h>
38#include <linux/spinlock.h>
39#include <linux/rtnetlink.h>
5a0e3ad6 40#include <linux/slab.h>
9c4a7965
KP
41
42#include <crypto/algapi.h>
43#include <crypto/aes.h>
3952f17e 44#include <crypto/des.h>
9c4a7965 45#include <crypto/sha.h>
497f2e6b 46#include <crypto/md5.h>
9c4a7965
KP
47#include <crypto/aead.h>
48#include <crypto/authenc.h>
4de9d0b5 49#include <crypto/skcipher.h>
acbf7c62
LN
50#include <crypto/hash.h>
51#include <crypto/internal/hash.h>
4de9d0b5 52#include <crypto/scatterwalk.h>
9c4a7965
KP
53
54#include "talitos.h"
55
56#define TALITOS_TIMEOUT 100000
57#define TALITOS_MAX_DATA_LEN 65535
58
59#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
60#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
61#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
62
63/* descriptor pointer entry */
64struct talitos_ptr {
65 __be16 len; /* length */
66 u8 j_extent; /* jump to sg link table and/or extent */
67 u8 eptr; /* extended address */
68 __be32 ptr; /* address */
69};
70
497f2e6b
LN
71static const struct talitos_ptr zero_entry = {
72 .len = 0,
73 .j_extent = 0,
74 .eptr = 0,
75 .ptr = 0
76};
77
9c4a7965
KP
78/* descriptor */
79struct talitos_desc {
80 __be32 hdr; /* header high bits */
81 __be32 hdr_lo; /* header low bits */
82 struct talitos_ptr ptr[7]; /* ptr/len pair array */
83};
84
85/**
86 * talitos_request - descriptor submission request
87 * @desc: descriptor pointer (kernel virtual)
88 * @dma_desc: descriptor's physical bus address
89 * @callback: whom to call when descriptor processing is done
90 * @context: caller context (optional)
91 */
92struct talitos_request {
93 struct talitos_desc *desc;
94 dma_addr_t dma_desc;
95 void (*callback) (struct device *dev, struct talitos_desc *desc,
96 void *context, int error);
97 void *context;
98};
99
4b992628
KP
100/* per-channel fifo management */
101struct talitos_channel {
102 /* request fifo */
103 struct talitos_request *fifo;
104
105 /* number of requests pending in channel h/w fifo */
106 atomic_t submit_count ____cacheline_aligned;
107
108 /* request submission (head) lock */
109 spinlock_t head_lock ____cacheline_aligned;
110 /* index to next free descriptor request */
111 int head;
112
113 /* request release (tail) lock */
114 spinlock_t tail_lock ____cacheline_aligned;
115 /* index to next in-progress/done descriptor request */
116 int tail;
117};
118
9c4a7965
KP
119struct talitos_private {
120 struct device *dev;
2dc11581 121 struct platform_device *ofdev;
9c4a7965
KP
122 void __iomem *reg;
123 int irq;
124
125 /* SEC version geometry (from device tree node) */
126 unsigned int num_channels;
127 unsigned int chfifo_len;
128 unsigned int exec_units;
129 unsigned int desc_types;
130
f3c85bc1
LN
131 /* SEC Compatibility info */
132 unsigned long features;
133
9c4a7965
KP
134 /*
135 * length of the request fifo
136 * fifo_len is chfifo_len rounded up to next power of 2
137 * so we can use bitwise ops to wrap
138 */
139 unsigned int fifo_len;
140
4b992628 141 struct talitos_channel *chan;
9c4a7965 142
4b992628
KP
143 /* next channel to be assigned next incoming descriptor */
144 atomic_t last_chan ____cacheline_aligned;
9c4a7965
KP
145
146 /* request callback tasklet */
147 struct tasklet_struct done_task;
9c4a7965
KP
148
149 /* list of registered algorithms */
150 struct list_head alg_list;
151
152 /* hwrng device */
153 struct hwrng rng;
154};
155
f3c85bc1
LN
156/* .features flag */
157#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
fe5720e2 158#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
60f208d7 159#define TALITOS_FTR_SHA224_HWINIT 0x00000004
f3c85bc1 160
81eb024c
KP
161static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
162{
163 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
a752447a 164 talitos_ptr->eptr = upper_32_bits(dma_addr);
81eb024c
KP
165}
166
9c4a7965
KP
167/*
168 * map virtual single (contiguous) pointer to h/w descriptor pointer
169 */
170static void map_single_talitos_ptr(struct device *dev,
171 struct talitos_ptr *talitos_ptr,
172 unsigned short len, void *data,
173 unsigned char extent,
174 enum dma_data_direction dir)
175{
81eb024c
KP
176 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
177
9c4a7965 178 talitos_ptr->len = cpu_to_be16(len);
81eb024c 179 to_talitos_ptr(talitos_ptr, dma_addr);
9c4a7965
KP
180 talitos_ptr->j_extent = extent;
181}
182
183/*
184 * unmap bus single (contiguous) h/w descriptor pointer
185 */
186static void unmap_single_talitos_ptr(struct device *dev,
187 struct talitos_ptr *talitos_ptr,
188 enum dma_data_direction dir)
189{
190 dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
191 be16_to_cpu(talitos_ptr->len), dir);
192}
193
194static int reset_channel(struct device *dev, int ch)
195{
196 struct talitos_private *priv = dev_get_drvdata(dev);
197 unsigned int timeout = TALITOS_TIMEOUT;
198
199 setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
200
201 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
202 && --timeout)
203 cpu_relax();
204
205 if (timeout == 0) {
206 dev_err(dev, "failed to reset channel %d\n", ch);
207 return -EIO;
208 }
209
81eb024c
KP
210 /* set 36-bit addressing, done writeback enable and done IRQ enable */
211 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
212 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
9c4a7965 213
fe5720e2
KP
214 /* and ICCR writeback, if available */
215 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
216 setbits32(priv->reg + TALITOS_CCCR_LO(ch),
217 TALITOS_CCCR_LO_IWSE);
218
9c4a7965
KP
219 return 0;
220}
221
222static int reset_device(struct device *dev)
223{
224 struct talitos_private *priv = dev_get_drvdata(dev);
225 unsigned int timeout = TALITOS_TIMEOUT;
226
227 setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
228
229 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
230 && --timeout)
231 cpu_relax();
232
233 if (timeout == 0) {
234 dev_err(dev, "failed to reset device\n");
235 return -EIO;
236 }
237
238 return 0;
239}
240
241/*
242 * Reset and initialize the device
243 */
244static int init_device(struct device *dev)
245{
246 struct talitos_private *priv = dev_get_drvdata(dev);
247 int ch, err;
248
249 /*
250 * Master reset
251 * errata documentation: warning: certain SEC interrupts
252 * are not fully cleared by writing the MCR:SWR bit,
253 * set bit twice to completely reset
254 */
255 err = reset_device(dev);
256 if (err)
257 return err;
258
259 err = reset_device(dev);
260 if (err)
261 return err;
262
263 /* reset channels */
264 for (ch = 0; ch < priv->num_channels; ch++) {
265 err = reset_channel(dev, ch);
266 if (err)
267 return err;
268 }
269
270 /* enable channel done and error interrupts */
271 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
272 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
273
fe5720e2
KP
274 /* disable integrity check error interrupts (use writeback instead) */
275 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
276 setbits32(priv->reg + TALITOS_MDEUICR_LO,
277 TALITOS_MDEUICR_LO_ICE);
278
9c4a7965
KP
279 return 0;
280}
281
282/**
283 * talitos_submit - submits a descriptor to the device for processing
284 * @dev: the SEC device to be used
5228f0f7 285 * @ch: the SEC device channel to be used
9c4a7965
KP
286 * @desc: the descriptor to be processed by the device
287 * @callback: whom to call when processing is complete
288 * @context: a handle for use by caller (optional)
289 *
290 * desc must contain valid dma-mapped (bus physical) address pointers.
291 * callback must check err and feedback in descriptor header
292 * for device processing status.
293 */
5228f0f7 294static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
9c4a7965
KP
295 void (*callback)(struct device *dev,
296 struct talitos_desc *desc,
297 void *context, int error),
298 void *context)
299{
300 struct talitos_private *priv = dev_get_drvdata(dev);
301 struct talitos_request *request;
5228f0f7 302 unsigned long flags;
9c4a7965
KP
303 int head;
304
4b992628 305 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
9c4a7965 306
4b992628 307 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
ec6644d6 308 /* h/w fifo is full */
4b992628 309 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
310 return -EAGAIN;
311 }
312
4b992628
KP
313 head = priv->chan[ch].head;
314 request = &priv->chan[ch].fifo[head];
ec6644d6 315
9c4a7965
KP
316 /* map descriptor and save caller data */
317 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
318 DMA_BIDIRECTIONAL);
319 request->callback = callback;
320 request->context = context;
321
322 /* increment fifo head */
4b992628 323 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
9c4a7965
KP
324
325 smp_wmb();
326 request->desc = desc;
327
328 /* GO! */
329 wmb();
a752447a 330 out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc));
81eb024c 331 out_be32(priv->reg + TALITOS_FF_LO(ch),
a752447a 332 lower_32_bits(request->dma_desc));
9c4a7965 333
4b992628 334 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
335
336 return -EINPROGRESS;
337}
338
339/*
340 * process what was done, notify callback of error if not
341 */
342static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
343{
344 struct talitos_private *priv = dev_get_drvdata(dev);
345 struct talitos_request *request, saved_req;
346 unsigned long flags;
347 int tail, status;
348
4b992628 349 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
9c4a7965 350
4b992628
KP
351 tail = priv->chan[ch].tail;
352 while (priv->chan[ch].fifo[tail].desc) {
353 request = &priv->chan[ch].fifo[tail];
9c4a7965
KP
354
355 /* descriptors with their done bits set don't get the error */
356 rmb();
ca38a814 357 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
9c4a7965 358 status = 0;
ca38a814 359 else
9c4a7965
KP
360 if (!error)
361 break;
362 else
363 status = error;
364
365 dma_unmap_single(dev, request->dma_desc,
e938e465
KP
366 sizeof(struct talitos_desc),
367 DMA_BIDIRECTIONAL);
9c4a7965
KP
368
369 /* copy entries so we can call callback outside lock */
370 saved_req.desc = request->desc;
371 saved_req.callback = request->callback;
372 saved_req.context = request->context;
373
374 /* release request entry in fifo */
375 smp_wmb();
376 request->desc = NULL;
377
378 /* increment fifo tail */
4b992628 379 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
9c4a7965 380
4b992628 381 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
ec6644d6 382
4b992628 383 atomic_dec(&priv->chan[ch].submit_count);
ec6644d6 384
9c4a7965
KP
385 saved_req.callback(dev, saved_req.desc, saved_req.context,
386 status);
387 /* channel may resume processing in single desc error case */
388 if (error && !reset_ch && status == error)
389 return;
4b992628
KP
390 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
391 tail = priv->chan[ch].tail;
9c4a7965
KP
392 }
393
4b992628 394 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
9c4a7965
KP
395}
396
397/*
398 * process completed requests for channels that have done status
399 */
400static void talitos_done(unsigned long data)
401{
402 struct device *dev = (struct device *)data;
403 struct talitos_private *priv = dev_get_drvdata(dev);
404 int ch;
405
406 for (ch = 0; ch < priv->num_channels; ch++)
407 flush_channel(dev, ch, 0, 0);
1c2e8811
LN
408
409 /* At this point, all completed channels have been processed.
410 * Unmask done interrupts for channels completed later on.
411 */
fe5720e2
KP
412 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
413 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
9c4a7965
KP
414}
415
416/*
417 * locate current (offending) descriptor
418 */
419static struct talitos_desc *current_desc(struct device *dev, int ch)
420{
421 struct talitos_private *priv = dev_get_drvdata(dev);
4b992628 422 int tail = priv->chan[ch].tail;
9c4a7965
KP
423 dma_addr_t cur_desc;
424
425 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
426
4b992628 427 while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
9c4a7965 428 tail = (tail + 1) & (priv->fifo_len - 1);
4b992628 429 if (tail == priv->chan[ch].tail) {
9c4a7965
KP
430 dev_err(dev, "couldn't locate current descriptor\n");
431 return NULL;
432 }
433 }
434
4b992628 435 return priv->chan[ch].fifo[tail].desc;
9c4a7965
KP
436}
437
438/*
439 * user diagnostics; report root cause of error based on execution unit status
440 */
e938e465
KP
441static void report_eu_error(struct device *dev, int ch,
442 struct talitos_desc *desc)
9c4a7965
KP
443{
444 struct talitos_private *priv = dev_get_drvdata(dev);
445 int i;
446
447 switch (desc->hdr & DESC_HDR_SEL0_MASK) {
448 case DESC_HDR_SEL0_AFEU:
449 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
450 in_be32(priv->reg + TALITOS_AFEUISR),
451 in_be32(priv->reg + TALITOS_AFEUISR_LO));
452 break;
453 case DESC_HDR_SEL0_DEU:
454 dev_err(dev, "DEUISR 0x%08x_%08x\n",
455 in_be32(priv->reg + TALITOS_DEUISR),
456 in_be32(priv->reg + TALITOS_DEUISR_LO));
457 break;
458 case DESC_HDR_SEL0_MDEUA:
459 case DESC_HDR_SEL0_MDEUB:
460 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
461 in_be32(priv->reg + TALITOS_MDEUISR),
462 in_be32(priv->reg + TALITOS_MDEUISR_LO));
463 break;
464 case DESC_HDR_SEL0_RNG:
465 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
466 in_be32(priv->reg + TALITOS_RNGUISR),
467 in_be32(priv->reg + TALITOS_RNGUISR_LO));
468 break;
469 case DESC_HDR_SEL0_PKEU:
470 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
471 in_be32(priv->reg + TALITOS_PKEUISR),
472 in_be32(priv->reg + TALITOS_PKEUISR_LO));
473 break;
474 case DESC_HDR_SEL0_AESU:
475 dev_err(dev, "AESUISR 0x%08x_%08x\n",
476 in_be32(priv->reg + TALITOS_AESUISR),
477 in_be32(priv->reg + TALITOS_AESUISR_LO));
478 break;
479 case DESC_HDR_SEL0_CRCU:
480 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
481 in_be32(priv->reg + TALITOS_CRCUISR),
482 in_be32(priv->reg + TALITOS_CRCUISR_LO));
483 break;
484 case DESC_HDR_SEL0_KEU:
485 dev_err(dev, "KEUISR 0x%08x_%08x\n",
486 in_be32(priv->reg + TALITOS_KEUISR),
487 in_be32(priv->reg + TALITOS_KEUISR_LO));
488 break;
489 }
490
491 switch (desc->hdr & DESC_HDR_SEL1_MASK) {
492 case DESC_HDR_SEL1_MDEUA:
493 case DESC_HDR_SEL1_MDEUB:
494 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
495 in_be32(priv->reg + TALITOS_MDEUISR),
496 in_be32(priv->reg + TALITOS_MDEUISR_LO));
497 break;
498 case DESC_HDR_SEL1_CRCU:
499 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
500 in_be32(priv->reg + TALITOS_CRCUISR),
501 in_be32(priv->reg + TALITOS_CRCUISR_LO));
502 break;
503 }
504
505 for (i = 0; i < 8; i++)
506 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
507 in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
508 in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
509}
510
511/*
512 * recover from error interrupts
513 */
40405f10 514static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
9c4a7965
KP
515{
516 struct device *dev = (struct device *)data;
517 struct talitos_private *priv = dev_get_drvdata(dev);
518 unsigned int timeout = TALITOS_TIMEOUT;
519 int ch, error, reset_dev = 0, reset_ch = 0;
40405f10 520 u32 v, v_lo;
9c4a7965
KP
521
522 for (ch = 0; ch < priv->num_channels; ch++) {
523 /* skip channels without errors */
524 if (!(isr & (1 << (ch * 2 + 1))))
525 continue;
526
527 error = -EINVAL;
528
529 v = in_be32(priv->reg + TALITOS_CCPSR(ch));
530 v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
531
532 if (v_lo & TALITOS_CCPSR_LO_DOF) {
533 dev_err(dev, "double fetch fifo overflow error\n");
534 error = -EAGAIN;
535 reset_ch = 1;
536 }
537 if (v_lo & TALITOS_CCPSR_LO_SOF) {
538 /* h/w dropped descriptor */
539 dev_err(dev, "single fetch fifo overflow error\n");
540 error = -EAGAIN;
541 }
542 if (v_lo & TALITOS_CCPSR_LO_MDTE)
543 dev_err(dev, "master data transfer error\n");
544 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
545 dev_err(dev, "s/g data length zero error\n");
546 if (v_lo & TALITOS_CCPSR_LO_FPZ)
547 dev_err(dev, "fetch pointer zero error\n");
548 if (v_lo & TALITOS_CCPSR_LO_IDH)
549 dev_err(dev, "illegal descriptor header error\n");
550 if (v_lo & TALITOS_CCPSR_LO_IEU)
551 dev_err(dev, "invalid execution unit error\n");
552 if (v_lo & TALITOS_CCPSR_LO_EU)
553 report_eu_error(dev, ch, current_desc(dev, ch));
554 if (v_lo & TALITOS_CCPSR_LO_GB)
555 dev_err(dev, "gather boundary error\n");
556 if (v_lo & TALITOS_CCPSR_LO_GRL)
557 dev_err(dev, "gather return/length error\n");
558 if (v_lo & TALITOS_CCPSR_LO_SB)
559 dev_err(dev, "scatter boundary error\n");
560 if (v_lo & TALITOS_CCPSR_LO_SRL)
561 dev_err(dev, "scatter return/length error\n");
562
563 flush_channel(dev, ch, error, reset_ch);
564
565 if (reset_ch) {
566 reset_channel(dev, ch);
567 } else {
568 setbits32(priv->reg + TALITOS_CCCR(ch),
569 TALITOS_CCCR_CONT);
570 setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
571 while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
572 TALITOS_CCCR_CONT) && --timeout)
573 cpu_relax();
574 if (timeout == 0) {
575 dev_err(dev, "failed to restart channel %d\n",
576 ch);
577 reset_dev = 1;
578 }
579 }
580 }
581 if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
582 dev_err(dev, "done overflow, internal time out, or rngu error: "
583 "ISR 0x%08x_%08x\n", isr, isr_lo);
584
585 /* purge request queues */
586 for (ch = 0; ch < priv->num_channels; ch++)
587 flush_channel(dev, ch, -EIO, 1);
588
589 /* reset and reinitialize the device */
590 init_device(dev);
591 }
592}
593
594static irqreturn_t talitos_interrupt(int irq, void *data)
595{
596 struct device *dev = data;
597 struct talitos_private *priv = dev_get_drvdata(dev);
598 u32 isr, isr_lo;
599
600 isr = in_be32(priv->reg + TALITOS_ISR);
601 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
ca38a814
LN
602 /* Acknowledge interrupt */
603 out_be32(priv->reg + TALITOS_ICR, isr);
604 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
9c4a7965 605
ca38a814 606 if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
40405f10 607 talitos_error((unsigned long)data, isr, isr_lo);
ca38a814 608 else
1c2e8811
LN
609 if (likely(isr & TALITOS_ISR_CHDONE)) {
610 /* mask further done interrupts. */
611 clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
612 /* done_task will unmask done interrupts at exit */
9c4a7965 613 tasklet_schedule(&priv->done_task);
1c2e8811 614 }
9c4a7965
KP
615
616 return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
617}
618
619/*
620 * hwrng
621 */
622static int talitos_rng_data_present(struct hwrng *rng, int wait)
623{
624 struct device *dev = (struct device *)rng->priv;
625 struct talitos_private *priv = dev_get_drvdata(dev);
626 u32 ofl;
627 int i;
628
629 for (i = 0; i < 20; i++) {
630 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
631 TALITOS_RNGUSR_LO_OFL;
632 if (ofl || !wait)
633 break;
634 udelay(10);
635 }
636
637 return !!ofl;
638}
639
640static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
641{
642 struct device *dev = (struct device *)rng->priv;
643 struct talitos_private *priv = dev_get_drvdata(dev);
644
645 /* rng fifo requires 64-bit accesses */
646 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
647 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
648
649 return sizeof(u32);
650}
651
652static int talitos_rng_init(struct hwrng *rng)
653{
654 struct device *dev = (struct device *)rng->priv;
655 struct talitos_private *priv = dev_get_drvdata(dev);
656 unsigned int timeout = TALITOS_TIMEOUT;
657
658 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
659 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
660 && --timeout)
661 cpu_relax();
662 if (timeout == 0) {
663 dev_err(dev, "failed to reset rng hw\n");
664 return -ENODEV;
665 }
666
667 /* start generating */
668 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
669
670 return 0;
671}
672
673static int talitos_register_rng(struct device *dev)
674{
675 struct talitos_private *priv = dev_get_drvdata(dev);
676
677 priv->rng.name = dev_driver_string(dev),
678 priv->rng.init = talitos_rng_init,
679 priv->rng.data_present = talitos_rng_data_present,
680 priv->rng.data_read = talitos_rng_data_read,
681 priv->rng.priv = (unsigned long)dev;
682
683 return hwrng_register(&priv->rng);
684}
685
686static void talitos_unregister_rng(struct device *dev)
687{
688 struct talitos_private *priv = dev_get_drvdata(dev);
689
690 hwrng_unregister(&priv->rng);
691}
692
693/*
694 * crypto alg
695 */
696#define TALITOS_CRA_PRIORITY 3000
697#define TALITOS_MAX_KEY_SIZE 64
3952f17e 698#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
70bcaca7 699
497f2e6b 700#define MD5_BLOCK_SIZE 64
9c4a7965
KP
701
702struct talitos_ctx {
703 struct device *dev;
5228f0f7 704 int ch;
9c4a7965
KP
705 __be32 desc_hdr_template;
706 u8 key[TALITOS_MAX_KEY_SIZE];
70bcaca7 707 u8 iv[TALITOS_MAX_IV_LENGTH];
9c4a7965
KP
708 unsigned int keylen;
709 unsigned int enckeylen;
710 unsigned int authkeylen;
711 unsigned int authsize;
712};
713
497f2e6b
LN
714#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
715#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
716
717struct talitos_ahash_req_ctx {
60f208d7 718 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
497f2e6b
LN
719 unsigned int hw_context_size;
720 u8 buf[HASH_MAX_BLOCK_SIZE];
721 u8 bufnext[HASH_MAX_BLOCK_SIZE];
60f208d7 722 unsigned int swinit;
497f2e6b
LN
723 unsigned int first;
724 unsigned int last;
725 unsigned int to_hash_later;
5e833bc4 726 u64 nbuf;
497f2e6b
LN
727 struct scatterlist bufsl[2];
728 struct scatterlist *psrc;
729};
730
56af8cd4
LN
731static int aead_setauthsize(struct crypto_aead *authenc,
732 unsigned int authsize)
9c4a7965
KP
733{
734 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
735
736 ctx->authsize = authsize;
737
738 return 0;
739}
740
56af8cd4
LN
741static int aead_setkey(struct crypto_aead *authenc,
742 const u8 *key, unsigned int keylen)
9c4a7965
KP
743{
744 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
745 struct rtattr *rta = (void *)key;
746 struct crypto_authenc_key_param *param;
747 unsigned int authkeylen;
748 unsigned int enckeylen;
749
750 if (!RTA_OK(rta, keylen))
751 goto badkey;
752
753 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
754 goto badkey;
755
756 if (RTA_PAYLOAD(rta) < sizeof(*param))
757 goto badkey;
758
759 param = RTA_DATA(rta);
760 enckeylen = be32_to_cpu(param->enckeylen);
761
762 key += RTA_ALIGN(rta->rta_len);
763 keylen -= RTA_ALIGN(rta->rta_len);
764
765 if (keylen < enckeylen)
766 goto badkey;
767
768 authkeylen = keylen - enckeylen;
769
770 if (keylen > TALITOS_MAX_KEY_SIZE)
771 goto badkey;
772
773 memcpy(&ctx->key, key, keylen);
774
775 ctx->keylen = keylen;
776 ctx->enckeylen = enckeylen;
777 ctx->authkeylen = authkeylen;
778
779 return 0;
780
781badkey:
782 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
783 return -EINVAL;
784}
785
786/*
56af8cd4 787 * talitos_edesc - s/w-extended descriptor
9c4a7965
KP
788 * @src_nents: number of segments in input scatterlist
789 * @dst_nents: number of segments in output scatterlist
790 * @dma_len: length of dma mapped link_tbl space
791 * @dma_link_tbl: bus physical address of link_tbl
792 * @desc: h/w descriptor
793 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
794 *
795 * if decrypting (with authcheck), or either one of src_nents or dst_nents
796 * is greater than 1, an integrity check value is concatenated to the end
797 * of link_tbl data
798 */
56af8cd4 799struct talitos_edesc {
9c4a7965
KP
800 int src_nents;
801 int dst_nents;
4de9d0b5
LN
802 int src_is_chained;
803 int dst_is_chained;
9c4a7965
KP
804 int dma_len;
805 dma_addr_t dma_link_tbl;
806 struct talitos_desc desc;
807 struct talitos_ptr link_tbl[0];
808};
809
4de9d0b5
LN
810static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
811 unsigned int nents, enum dma_data_direction dir,
812 int chained)
813{
814 if (unlikely(chained))
815 while (sg) {
816 dma_map_sg(dev, sg, 1, dir);
817 sg = scatterwalk_sg_next(sg);
818 }
819 else
820 dma_map_sg(dev, sg, nents, dir);
821 return nents;
822}
823
824static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
825 enum dma_data_direction dir)
826{
827 while (sg) {
828 dma_unmap_sg(dev, sg, 1, dir);
829 sg = scatterwalk_sg_next(sg);
830 }
831}
832
833static void talitos_sg_unmap(struct device *dev,
834 struct talitos_edesc *edesc,
835 struct scatterlist *src,
836 struct scatterlist *dst)
837{
838 unsigned int src_nents = edesc->src_nents ? : 1;
839 unsigned int dst_nents = edesc->dst_nents ? : 1;
840
841 if (src != dst) {
842 if (edesc->src_is_chained)
843 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
844 else
845 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
846
497f2e6b
LN
847 if (dst) {
848 if (edesc->dst_is_chained)
849 talitos_unmap_sg_chain(dev, dst,
850 DMA_FROM_DEVICE);
851 else
852 dma_unmap_sg(dev, dst, dst_nents,
853 DMA_FROM_DEVICE);
854 }
4de9d0b5
LN
855 } else
856 if (edesc->src_is_chained)
857 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
858 else
859 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
860}
861
9c4a7965 862static void ipsec_esp_unmap(struct device *dev,
56af8cd4 863 struct talitos_edesc *edesc,
9c4a7965
KP
864 struct aead_request *areq)
865{
866 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
867 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
868 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
869 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
870
871 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
872
4de9d0b5 873 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
9c4a7965
KP
874
875 if (edesc->dma_len)
876 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
877 DMA_BIDIRECTIONAL);
878}
879
880/*
881 * ipsec_esp descriptor callbacks
882 */
883static void ipsec_esp_encrypt_done(struct device *dev,
884 struct talitos_desc *desc, void *context,
885 int err)
886{
887 struct aead_request *areq = context;
9c4a7965
KP
888 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
889 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
19bbbc63 890 struct talitos_edesc *edesc;
9c4a7965
KP
891 struct scatterlist *sg;
892 void *icvdata;
893
19bbbc63
KP
894 edesc = container_of(desc, struct talitos_edesc, desc);
895
9c4a7965
KP
896 ipsec_esp_unmap(dev, edesc, areq);
897
898 /* copy the generated ICV to dst */
899 if (edesc->dma_len) {
900 icvdata = &edesc->link_tbl[edesc->src_nents +
f3c85bc1 901 edesc->dst_nents + 2];
9c4a7965
KP
902 sg = sg_last(areq->dst, edesc->dst_nents);
903 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
904 icvdata, ctx->authsize);
905 }
906
907 kfree(edesc);
908
909 aead_request_complete(areq, err);
910}
911
fe5720e2 912static void ipsec_esp_decrypt_swauth_done(struct device *dev,
e938e465
KP
913 struct talitos_desc *desc,
914 void *context, int err)
9c4a7965
KP
915{
916 struct aead_request *req = context;
9c4a7965
KP
917 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
918 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
19bbbc63 919 struct talitos_edesc *edesc;
9c4a7965
KP
920 struct scatterlist *sg;
921 void *icvdata;
922
19bbbc63
KP
923 edesc = container_of(desc, struct talitos_edesc, desc);
924
9c4a7965
KP
925 ipsec_esp_unmap(dev, edesc, req);
926
927 if (!err) {
928 /* auth check */
929 if (edesc->dma_len)
930 icvdata = &edesc->link_tbl[edesc->src_nents +
f3c85bc1 931 edesc->dst_nents + 2];
9c4a7965
KP
932 else
933 icvdata = &edesc->link_tbl[0];
934
935 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
936 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
937 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
938 }
939
940 kfree(edesc);
941
942 aead_request_complete(req, err);
943}
944
fe5720e2 945static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
e938e465
KP
946 struct talitos_desc *desc,
947 void *context, int err)
fe5720e2
KP
948{
949 struct aead_request *req = context;
19bbbc63
KP
950 struct talitos_edesc *edesc;
951
952 edesc = container_of(desc, struct talitos_edesc, desc);
fe5720e2
KP
953
954 ipsec_esp_unmap(dev, edesc, req);
955
956 /* check ICV auth status */
e938e465
KP
957 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
958 DESC_HDR_LO_ICCR1_PASS))
959 err = -EBADMSG;
fe5720e2
KP
960
961 kfree(edesc);
962
963 aead_request_complete(req, err);
964}
965
9c4a7965
KP
966/*
967 * convert scatterlist to SEC h/w link table format
968 * stop at cryptlen bytes
969 */
70bcaca7 970static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
9c4a7965
KP
971 int cryptlen, struct talitos_ptr *link_tbl_ptr)
972{
70bcaca7
LN
973 int n_sg = sg_count;
974
975 while (n_sg--) {
81eb024c 976 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
9c4a7965
KP
977 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
978 link_tbl_ptr->j_extent = 0;
979 link_tbl_ptr++;
980 cryptlen -= sg_dma_len(sg);
4de9d0b5 981 sg = scatterwalk_sg_next(sg);
9c4a7965
KP
982 }
983
70bcaca7 984 /* adjust (decrease) last one (or two) entry's len to cryptlen */
9c4a7965 985 link_tbl_ptr--;
c0e741d4 986 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
70bcaca7
LN
987 /* Empty this entry, and move to previous one */
988 cryptlen += be16_to_cpu(link_tbl_ptr->len);
989 link_tbl_ptr->len = 0;
990 sg_count--;
991 link_tbl_ptr--;
992 }
9c4a7965
KP
993 link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
994 + cryptlen);
995
996 /* tag end of link table */
997 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
70bcaca7
LN
998
999 return sg_count;
9c4a7965
KP
1000}
1001
1002/*
1003 * fill in and submit ipsec_esp descriptor
1004 */
56af8cd4 1005static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
9c4a7965
KP
1006 u8 *giv, u64 seq,
1007 void (*callback) (struct device *dev,
1008 struct talitos_desc *desc,
1009 void *context, int error))
1010{
1011 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1012 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1013 struct device *dev = ctx->dev;
1014 struct talitos_desc *desc = &edesc->desc;
1015 unsigned int cryptlen = areq->cryptlen;
1016 unsigned int authsize = ctx->authsize;
e41256f1 1017 unsigned int ivsize = crypto_aead_ivsize(aead);
fa86a267 1018 int sg_count, ret;
fe5720e2 1019 int sg_link_tbl_len;
9c4a7965
KP
1020
1021 /* hmac key */
1022 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1023 0, DMA_TO_DEVICE);
1024 /* hmac data */
e41256f1
KP
1025 map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
1026 sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
9c4a7965 1027 /* cipher iv */
9c4a7965
KP
1028 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
1029 DMA_TO_DEVICE);
1030
1031 /* cipher key */
1032 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1033 (char *)&ctx->key + ctx->authkeylen, 0,
1034 DMA_TO_DEVICE);
1035
1036 /*
1037 * cipher in
1038 * map and adjust cipher len to aead request cryptlen.
1039 * extent is bytes of HMAC postpended to ciphertext,
1040 * typically 12 for ipsec
1041 */
1042 desc->ptr[4].len = cpu_to_be16(cryptlen);
1043 desc->ptr[4].j_extent = authsize;
1044
e938e465
KP
1045 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1046 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1047 : DMA_TO_DEVICE,
4de9d0b5 1048 edesc->src_is_chained);
9c4a7965
KP
1049
1050 if (sg_count == 1) {
81eb024c 1051 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
9c4a7965 1052 } else {
fe5720e2
KP
1053 sg_link_tbl_len = cryptlen;
1054
962a9c99 1055 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
fe5720e2 1056 sg_link_tbl_len = cryptlen + authsize;
e938e465 1057
fe5720e2 1058 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
70bcaca7
LN
1059 &edesc->link_tbl[0]);
1060 if (sg_count > 1) {
1061 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
81eb024c 1062 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
e938e465
KP
1063 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1064 edesc->dma_len,
1065 DMA_BIDIRECTIONAL);
70bcaca7
LN
1066 } else {
1067 /* Only one segment now, so no link tbl needed */
81eb024c
KP
1068 to_talitos_ptr(&desc->ptr[4],
1069 sg_dma_address(areq->src));
70bcaca7 1070 }
9c4a7965
KP
1071 }
1072
1073 /* cipher out */
1074 desc->ptr[5].len = cpu_to_be16(cryptlen);
1075 desc->ptr[5].j_extent = authsize;
1076
e938e465 1077 if (areq->src != areq->dst)
4de9d0b5
LN
1078 sg_count = talitos_map_sg(dev, areq->dst,
1079 edesc->dst_nents ? : 1,
1080 DMA_FROM_DEVICE,
1081 edesc->dst_is_chained);
9c4a7965
KP
1082
1083 if (sg_count == 1) {
81eb024c 1084 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
9c4a7965
KP
1085 } else {
1086 struct talitos_ptr *link_tbl_ptr =
f3c85bc1 1087 &edesc->link_tbl[edesc->src_nents + 1];
9c4a7965 1088
81eb024c
KP
1089 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1090 (edesc->src_nents + 1) *
1091 sizeof(struct talitos_ptr));
fe5720e2
KP
1092 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1093 link_tbl_ptr);
1094
f3c85bc1 1095 /* Add an entry to the link table for ICV data */
9c4a7965 1096 link_tbl_ptr += sg_count - 1;
9c4a7965 1097 link_tbl_ptr->j_extent = 0;
f3c85bc1 1098 sg_count++;
9c4a7965
KP
1099 link_tbl_ptr++;
1100 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1101 link_tbl_ptr->len = cpu_to_be16(authsize);
1102
1103 /* icv data follows link tables */
81eb024c
KP
1104 to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1105 (edesc->src_nents + edesc->dst_nents + 2) *
1106 sizeof(struct talitos_ptr));
9c4a7965
KP
1107 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1108 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1109 edesc->dma_len, DMA_BIDIRECTIONAL);
1110 }
1111
1112 /* iv out */
1113 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1114 DMA_FROM_DEVICE);
1115
5228f0f7 1116 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
fa86a267
KP
1117 if (ret != -EINPROGRESS) {
1118 ipsec_esp_unmap(dev, edesc, areq);
1119 kfree(edesc);
1120 }
1121 return ret;
9c4a7965
KP
1122}
1123
9c4a7965
KP
1124/*
1125 * derive number of elements in scatterlist
1126 */
4de9d0b5 1127static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
9c4a7965
KP
1128{
1129 struct scatterlist *sg = sg_list;
1130 int sg_nents = 0;
1131
4de9d0b5
LN
1132 *chained = 0;
1133 while (nbytes > 0) {
9c4a7965
KP
1134 sg_nents++;
1135 nbytes -= sg->length;
4de9d0b5
LN
1136 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1137 *chained = 1;
1138 sg = scatterwalk_sg_next(sg);
9c4a7965
KP
1139 }
1140
1141 return sg_nents;
1142}
1143
497f2e6b
LN
1144/**
1145 * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1146 * @sgl: The SG list
1147 * @nents: Number of SG entries
1148 * @buf: Where to copy to
1149 * @buflen: The number of bytes to copy
1150 * @skip: The number of bytes to skip before copying.
1151 * Note: skip + buflen should equal SG total size.
1152 *
1153 * Returns the number of copied bytes.
1154 *
1155 **/
1156static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
1157 void *buf, size_t buflen, unsigned int skip)
1158{
1159 unsigned int offset = 0;
1160 unsigned int boffset = 0;
1161 struct sg_mapping_iter miter;
1162 unsigned long flags;
1163 unsigned int sg_flags = SG_MITER_ATOMIC;
1164 size_t total_buffer = buflen + skip;
1165
1166 sg_flags |= SG_MITER_FROM_SG;
1167
1168 sg_miter_start(&miter, sgl, nents, sg_flags);
1169
1170 local_irq_save(flags);
1171
1172 while (sg_miter_next(&miter) && offset < total_buffer) {
1173 unsigned int len;
1174 unsigned int ignore;
1175
1176 if ((offset + miter.length) > skip) {
1177 if (offset < skip) {
1178 /* Copy part of this segment */
1179 ignore = skip - offset;
1180 len = miter.length - ignore;
7260042b
LN
1181 if (boffset + len > buflen)
1182 len = buflen - boffset;
497f2e6b
LN
1183 memcpy(buf + boffset, miter.addr + ignore, len);
1184 } else {
7260042b 1185 /* Copy all of this segment (up to buflen) */
497f2e6b 1186 len = miter.length;
7260042b
LN
1187 if (boffset + len > buflen)
1188 len = buflen - boffset;
497f2e6b
LN
1189 memcpy(buf + boffset, miter.addr, len);
1190 }
1191 boffset += len;
1192 }
1193 offset += miter.length;
1194 }
1195
1196 sg_miter_stop(&miter);
1197
1198 local_irq_restore(flags);
1199 return boffset;
1200}
1201
9c4a7965 1202/*
56af8cd4 1203 * allocate and map the extended descriptor
9c4a7965 1204 */
4de9d0b5
LN
1205static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1206 struct scatterlist *src,
1207 struct scatterlist *dst,
497f2e6b 1208 int hash_result,
4de9d0b5
LN
1209 unsigned int cryptlen,
1210 unsigned int authsize,
1211 int icv_stashing,
1212 u32 cryptoflags)
9c4a7965 1213{
56af8cd4 1214 struct talitos_edesc *edesc;
9c4a7965 1215 int src_nents, dst_nents, alloc_len, dma_len;
4de9d0b5
LN
1216 int src_chained, dst_chained = 0;
1217 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
586725f8 1218 GFP_ATOMIC;
9c4a7965 1219
4de9d0b5
LN
1220 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1221 dev_err(dev, "length exceeds h/w max limit\n");
9c4a7965
KP
1222 return ERR_PTR(-EINVAL);
1223 }
1224
4de9d0b5 1225 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
9c4a7965
KP
1226 src_nents = (src_nents == 1) ? 0 : src_nents;
1227
497f2e6b
LN
1228 if (hash_result) {
1229 dst_nents = 0;
9c4a7965 1230 } else {
497f2e6b
LN
1231 if (dst == src) {
1232 dst_nents = src_nents;
1233 } else {
1234 dst_nents = sg_count(dst, cryptlen + authsize,
1235 &dst_chained);
1236 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1237 }
9c4a7965
KP
1238 }
1239
1240 /*
1241 * allocate space for base edesc plus the link tables,
f3c85bc1 1242 * allowing for two separate entries for ICV and generated ICV (+ 2),
9c4a7965
KP
1243 * and the ICV data itself
1244 */
56af8cd4 1245 alloc_len = sizeof(struct talitos_edesc);
9c4a7965 1246 if (src_nents || dst_nents) {
f3c85bc1 1247 dma_len = (src_nents + dst_nents + 2) *
4de9d0b5 1248 sizeof(struct talitos_ptr) + authsize;
9c4a7965
KP
1249 alloc_len += dma_len;
1250 } else {
1251 dma_len = 0;
4de9d0b5 1252 alloc_len += icv_stashing ? authsize : 0;
9c4a7965
KP
1253 }
1254
586725f8 1255 edesc = kmalloc(alloc_len, GFP_DMA | flags);
9c4a7965 1256 if (!edesc) {
4de9d0b5 1257 dev_err(dev, "could not allocate edescriptor\n");
9c4a7965
KP
1258 return ERR_PTR(-ENOMEM);
1259 }
1260
1261 edesc->src_nents = src_nents;
1262 edesc->dst_nents = dst_nents;
4de9d0b5
LN
1263 edesc->src_is_chained = src_chained;
1264 edesc->dst_is_chained = dst_chained;
9c4a7965 1265 edesc->dma_len = dma_len;
497f2e6b
LN
1266 if (dma_len)
1267 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1268 edesc->dma_len,
1269 DMA_BIDIRECTIONAL);
9c4a7965
KP
1270
1271 return edesc;
1272}
1273
4de9d0b5
LN
1274static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1275 int icv_stashing)
1276{
1277 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1278 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1279
497f2e6b 1280 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
4de9d0b5
LN
1281 areq->cryptlen, ctx->authsize, icv_stashing,
1282 areq->base.flags);
1283}
1284
56af8cd4 1285static int aead_encrypt(struct aead_request *req)
9c4a7965
KP
1286{
1287 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1288 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1289 struct talitos_edesc *edesc;
9c4a7965
KP
1290
1291 /* allocate extended descriptor */
4de9d0b5 1292 edesc = aead_edesc_alloc(req, 0);
9c4a7965
KP
1293 if (IS_ERR(edesc))
1294 return PTR_ERR(edesc);
1295
1296 /* set encrypt */
70bcaca7 1297 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965
KP
1298
1299 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1300}
1301
56af8cd4 1302static int aead_decrypt(struct aead_request *req)
9c4a7965
KP
1303{
1304 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1305 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1306 unsigned int authsize = ctx->authsize;
fe5720e2 1307 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
56af8cd4 1308 struct talitos_edesc *edesc;
9c4a7965
KP
1309 struct scatterlist *sg;
1310 void *icvdata;
1311
1312 req->cryptlen -= authsize;
1313
1314 /* allocate extended descriptor */
4de9d0b5 1315 edesc = aead_edesc_alloc(req, 1);
9c4a7965
KP
1316 if (IS_ERR(edesc))
1317 return PTR_ERR(edesc);
1318
fe5720e2 1319 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
e938e465
KP
1320 ((!edesc->src_nents && !edesc->dst_nents) ||
1321 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
9c4a7965 1322
fe5720e2 1323 /* decrypt and check the ICV */
e938e465
KP
1324 edesc->desc.hdr = ctx->desc_hdr_template |
1325 DESC_HDR_DIR_INBOUND |
fe5720e2 1326 DESC_HDR_MODE1_MDEU_CICV;
9c4a7965 1327
fe5720e2
KP
1328 /* reset integrity check result bits */
1329 edesc->desc.hdr_lo = 0;
9c4a7965 1330
e938e465
KP
1331 return ipsec_esp(edesc, req, NULL, 0,
1332 ipsec_esp_decrypt_hwauth_done);
fe5720e2 1333
e938e465 1334 }
fe5720e2 1335
e938e465
KP
1336 /* Have to check the ICV with software */
1337 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
fe5720e2 1338
e938e465
KP
1339 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1340 if (edesc->dma_len)
1341 icvdata = &edesc->link_tbl[edesc->src_nents +
1342 edesc->dst_nents + 2];
1343 else
1344 icvdata = &edesc->link_tbl[0];
fe5720e2 1345
e938e465 1346 sg = sg_last(req->src, edesc->src_nents ? : 1);
fe5720e2 1347
e938e465
KP
1348 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1349 ctx->authsize);
fe5720e2 1350
e938e465 1351 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
9c4a7965
KP
1352}
1353
56af8cd4 1354static int aead_givencrypt(struct aead_givcrypt_request *req)
9c4a7965
KP
1355{
1356 struct aead_request *areq = &req->areq;
1357 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1358 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1359 struct talitos_edesc *edesc;
9c4a7965
KP
1360
1361 /* allocate extended descriptor */
4de9d0b5 1362 edesc = aead_edesc_alloc(areq, 0);
9c4a7965
KP
1363 if (IS_ERR(edesc))
1364 return PTR_ERR(edesc);
1365
1366 /* set encrypt */
70bcaca7 1367 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965
KP
1368
1369 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
ba95487d
KP
1370 /* avoid consecutive packets going out with same IV */
1371 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
9c4a7965
KP
1372
1373 return ipsec_esp(edesc, areq, req->giv, req->seq,
1374 ipsec_esp_encrypt_done);
1375}
1376
4de9d0b5
LN
1377static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1378 const u8 *key, unsigned int keylen)
1379{
1380 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1381 struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
1382
1383 if (keylen > TALITOS_MAX_KEY_SIZE)
1384 goto badkey;
1385
1386 if (keylen < alg->min_keysize || keylen > alg->max_keysize)
1387 goto badkey;
1388
1389 memcpy(&ctx->key, key, keylen);
1390 ctx->keylen = keylen;
1391
1392 return 0;
1393
1394badkey:
1395 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1396 return -EINVAL;
1397}
1398
1399static void common_nonsnoop_unmap(struct device *dev,
1400 struct talitos_edesc *edesc,
1401 struct ablkcipher_request *areq)
1402{
1403 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1404 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1405 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1406
1407 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1408
1409 if (edesc->dma_len)
1410 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1411 DMA_BIDIRECTIONAL);
1412}
1413
1414static void ablkcipher_done(struct device *dev,
1415 struct talitos_desc *desc, void *context,
1416 int err)
1417{
1418 struct ablkcipher_request *areq = context;
19bbbc63
KP
1419 struct talitos_edesc *edesc;
1420
1421 edesc = container_of(desc, struct talitos_edesc, desc);
4de9d0b5
LN
1422
1423 common_nonsnoop_unmap(dev, edesc, areq);
1424
1425 kfree(edesc);
1426
1427 areq->base.complete(&areq->base, err);
1428}
1429
1430static int common_nonsnoop(struct talitos_edesc *edesc,
1431 struct ablkcipher_request *areq,
1432 u8 *giv,
1433 void (*callback) (struct device *dev,
1434 struct talitos_desc *desc,
1435 void *context, int error))
1436{
1437 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1438 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1439 struct device *dev = ctx->dev;
1440 struct talitos_desc *desc = &edesc->desc;
1441 unsigned int cryptlen = areq->nbytes;
1442 unsigned int ivsize;
1443 int sg_count, ret;
1444
1445 /* first DWORD empty */
1446 desc->ptr[0].len = 0;
81eb024c 1447 to_talitos_ptr(&desc->ptr[0], 0);
4de9d0b5
LN
1448 desc->ptr[0].j_extent = 0;
1449
1450 /* cipher iv */
1451 ivsize = crypto_ablkcipher_ivsize(cipher);
1452 map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
1453 DMA_TO_DEVICE);
1454
1455 /* cipher key */
1456 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1457 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1458
1459 /*
1460 * cipher in
1461 */
1462 desc->ptr[3].len = cpu_to_be16(cryptlen);
1463 desc->ptr[3].j_extent = 0;
1464
1465 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1466 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1467 : DMA_TO_DEVICE,
1468 edesc->src_is_chained);
1469
1470 if (sg_count == 1) {
81eb024c 1471 to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
4de9d0b5
LN
1472 } else {
1473 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1474 &edesc->link_tbl[0]);
1475 if (sg_count > 1) {
81eb024c 1476 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
4de9d0b5 1477 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
e938e465
KP
1478 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1479 edesc->dma_len,
1480 DMA_BIDIRECTIONAL);
4de9d0b5
LN
1481 } else {
1482 /* Only one segment now, so no link tbl needed */
81eb024c
KP
1483 to_talitos_ptr(&desc->ptr[3],
1484 sg_dma_address(areq->src));
4de9d0b5
LN
1485 }
1486 }
1487
1488 /* cipher out */
1489 desc->ptr[4].len = cpu_to_be16(cryptlen);
1490 desc->ptr[4].j_extent = 0;
1491
1492 if (areq->src != areq->dst)
1493 sg_count = talitos_map_sg(dev, areq->dst,
1494 edesc->dst_nents ? : 1,
1495 DMA_FROM_DEVICE,
1496 edesc->dst_is_chained);
1497
1498 if (sg_count == 1) {
81eb024c 1499 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
4de9d0b5
LN
1500 } else {
1501 struct talitos_ptr *link_tbl_ptr =
1502 &edesc->link_tbl[edesc->src_nents + 1];
1503
81eb024c
KP
1504 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1505 (edesc->src_nents + 1) *
1506 sizeof(struct talitos_ptr));
4de9d0b5 1507 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
4de9d0b5
LN
1508 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1509 link_tbl_ptr);
1510 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1511 edesc->dma_len, DMA_BIDIRECTIONAL);
1512 }
1513
1514 /* iv out */
1515 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1516 DMA_FROM_DEVICE);
1517
1518 /* last DWORD empty */
1519 desc->ptr[6].len = 0;
81eb024c 1520 to_talitos_ptr(&desc->ptr[6], 0);
4de9d0b5
LN
1521 desc->ptr[6].j_extent = 0;
1522
5228f0f7 1523 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
4de9d0b5
LN
1524 if (ret != -EINPROGRESS) {
1525 common_nonsnoop_unmap(dev, edesc, areq);
1526 kfree(edesc);
1527 }
1528 return ret;
1529}
1530
e938e465
KP
1531static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1532 areq)
4de9d0b5
LN
1533{
1534 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1535 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1536
497f2e6b
LN
1537 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1538 areq->nbytes, 0, 0, areq->base.flags);
4de9d0b5
LN
1539}
1540
1541static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1542{
1543 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1544 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1545 struct talitos_edesc *edesc;
1546
1547 /* allocate extended descriptor */
1548 edesc = ablkcipher_edesc_alloc(areq);
1549 if (IS_ERR(edesc))
1550 return PTR_ERR(edesc);
1551
1552 /* set encrypt */
1553 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1554
1555 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1556}
1557
1558static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1559{
1560 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1561 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1562 struct talitos_edesc *edesc;
1563
1564 /* allocate extended descriptor */
1565 edesc = ablkcipher_edesc_alloc(areq);
1566 if (IS_ERR(edesc))
1567 return PTR_ERR(edesc);
1568
1569 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1570
1571 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1572}
1573
497f2e6b
LN
1574static void common_nonsnoop_hash_unmap(struct device *dev,
1575 struct talitos_edesc *edesc,
1576 struct ahash_request *areq)
1577{
1578 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1579
1580 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1581
1582 /* When using hashctx-in, must unmap it. */
1583 if (edesc->desc.ptr[1].len)
1584 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1585 DMA_TO_DEVICE);
1586
1587 if (edesc->desc.ptr[2].len)
1588 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1589 DMA_TO_DEVICE);
1590
1591 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1592
1593 if (edesc->dma_len)
1594 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1595 DMA_BIDIRECTIONAL);
1596
1597}
1598
1599static void ahash_done(struct device *dev,
1600 struct talitos_desc *desc, void *context,
1601 int err)
1602{
1603 struct ahash_request *areq = context;
1604 struct talitos_edesc *edesc =
1605 container_of(desc, struct talitos_edesc, desc);
1606 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1607
1608 if (!req_ctx->last && req_ctx->to_hash_later) {
1609 /* Position any partial block for next update/final/finup */
1610 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
5e833bc4 1611 req_ctx->nbuf = req_ctx->to_hash_later;
497f2e6b
LN
1612 }
1613 common_nonsnoop_hash_unmap(dev, edesc, areq);
1614
1615 kfree(edesc);
1616
1617 areq->base.complete(&areq->base, err);
1618}
1619
1620static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1621 struct ahash_request *areq, unsigned int length,
1622 void (*callback) (struct device *dev,
1623 struct talitos_desc *desc,
1624 void *context, int error))
1625{
1626 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1627 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1628 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1629 struct device *dev = ctx->dev;
1630 struct talitos_desc *desc = &edesc->desc;
1631 int sg_count, ret;
1632
1633 /* first DWORD empty */
1634 desc->ptr[0] = zero_entry;
1635
60f208d7
KP
1636 /* hash context in */
1637 if (!req_ctx->first || req_ctx->swinit) {
497f2e6b
LN
1638 map_single_talitos_ptr(dev, &desc->ptr[1],
1639 req_ctx->hw_context_size,
1640 (char *)req_ctx->hw_context, 0,
1641 DMA_TO_DEVICE);
60f208d7 1642 req_ctx->swinit = 0;
497f2e6b
LN
1643 } else {
1644 desc->ptr[1] = zero_entry;
1645 /* Indicate next op is not the first. */
1646 req_ctx->first = 0;
1647 }
1648
1649 /* HMAC key */
1650 if (ctx->keylen)
1651 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1652 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1653 else
1654 desc->ptr[2] = zero_entry;
1655
1656 /*
1657 * data in
1658 */
1659 desc->ptr[3].len = cpu_to_be16(length);
1660 desc->ptr[3].j_extent = 0;
1661
1662 sg_count = talitos_map_sg(dev, req_ctx->psrc,
1663 edesc->src_nents ? : 1,
1664 DMA_TO_DEVICE,
1665 edesc->src_is_chained);
1666
1667 if (sg_count == 1) {
1668 to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1669 } else {
1670 sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1671 &edesc->link_tbl[0]);
1672 if (sg_count > 1) {
1673 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1674 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1675 dma_sync_single_for_device(ctx->dev,
1676 edesc->dma_link_tbl,
1677 edesc->dma_len,
1678 DMA_BIDIRECTIONAL);
1679 } else {
1680 /* Only one segment now, so no link tbl needed */
1681 to_talitos_ptr(&desc->ptr[3],
1682 sg_dma_address(req_ctx->psrc));
1683 }
1684 }
1685
1686 /* fifth DWORD empty */
1687 desc->ptr[4] = zero_entry;
1688
1689 /* hash/HMAC out -or- hash context out */
1690 if (req_ctx->last)
1691 map_single_talitos_ptr(dev, &desc->ptr[5],
1692 crypto_ahash_digestsize(tfm),
1693 areq->result, 0, DMA_FROM_DEVICE);
1694 else
1695 map_single_talitos_ptr(dev, &desc->ptr[5],
1696 req_ctx->hw_context_size,
1697 req_ctx->hw_context, 0, DMA_FROM_DEVICE);
1698
1699 /* last DWORD empty */
1700 desc->ptr[6] = zero_entry;
1701
5228f0f7 1702 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
497f2e6b
LN
1703 if (ret != -EINPROGRESS) {
1704 common_nonsnoop_hash_unmap(dev, edesc, areq);
1705 kfree(edesc);
1706 }
1707 return ret;
1708}
1709
1710static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1711 unsigned int nbytes)
1712{
1713 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1714 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1715 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1716
1717 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
1718 nbytes, 0, 0, areq->base.flags);
1719}
1720
1721static int ahash_init(struct ahash_request *areq)
1722{
1723 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1724 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1725
1726 /* Initialize the context */
5e833bc4 1727 req_ctx->nbuf = 0;
60f208d7
KP
1728 req_ctx->first = 1; /* first indicates h/w must init its context */
1729 req_ctx->swinit = 0; /* assume h/w init of context */
497f2e6b
LN
1730 req_ctx->hw_context_size =
1731 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1732 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1733 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1734
1735 return 0;
1736}
1737
60f208d7
KP
1738/*
1739 * on h/w without explicit sha224 support, we initialize h/w context
1740 * manually with sha224 constants, and tell it to run sha256.
1741 */
1742static int ahash_init_sha224_swinit(struct ahash_request *areq)
1743{
1744 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1745
1746 ahash_init(areq);
1747 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1748
a752447a
KP
1749 req_ctx->hw_context[0] = SHA224_H0;
1750 req_ctx->hw_context[1] = SHA224_H1;
1751 req_ctx->hw_context[2] = SHA224_H2;
1752 req_ctx->hw_context[3] = SHA224_H3;
1753 req_ctx->hw_context[4] = SHA224_H4;
1754 req_ctx->hw_context[5] = SHA224_H5;
1755 req_ctx->hw_context[6] = SHA224_H6;
1756 req_ctx->hw_context[7] = SHA224_H7;
60f208d7
KP
1757
1758 /* init 64-bit count */
1759 req_ctx->hw_context[8] = 0;
1760 req_ctx->hw_context[9] = 0;
1761
1762 return 0;
1763}
1764
497f2e6b
LN
1765static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1766{
1767 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1768 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1769 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1770 struct talitos_edesc *edesc;
1771 unsigned int blocksize =
1772 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1773 unsigned int nbytes_to_hash;
1774 unsigned int to_hash_later;
5e833bc4 1775 unsigned int nsg;
497f2e6b
LN
1776 int chained;
1777
5e833bc4
LN
1778 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1779 /* Buffer up to one whole block */
497f2e6b
LN
1780 sg_copy_to_buffer(areq->src,
1781 sg_count(areq->src, nbytes, &chained),
5e833bc4
LN
1782 req_ctx->buf + req_ctx->nbuf, nbytes);
1783 req_ctx->nbuf += nbytes;
497f2e6b
LN
1784 return 0;
1785 }
1786
5e833bc4
LN
1787 /* At least (blocksize + 1) bytes are available to hash */
1788 nbytes_to_hash = nbytes + req_ctx->nbuf;
1789 to_hash_later = nbytes_to_hash & (blocksize - 1);
1790
1791 if (req_ctx->last)
1792 to_hash_later = 0;
1793 else if (to_hash_later)
1794 /* There is a partial block. Hash the full block(s) now */
1795 nbytes_to_hash -= to_hash_later;
1796 else {
1797 /* Keep one block buffered */
1798 nbytes_to_hash -= blocksize;
1799 to_hash_later = blocksize;
1800 }
1801
1802 /* Chain in any previously buffered data */
1803 if (req_ctx->nbuf) {
1804 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1805 sg_init_table(req_ctx->bufsl, nsg);
1806 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1807 if (nsg > 1)
1808 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
497f2e6b 1809 req_ctx->psrc = req_ctx->bufsl;
5e833bc4 1810 } else
497f2e6b 1811 req_ctx->psrc = areq->src;
5e833bc4
LN
1812
1813 if (to_hash_later) {
1814 int nents = sg_count(areq->src, nbytes, &chained);
1815 sg_copy_end_to_buffer(areq->src, nents,
1816 req_ctx->bufnext,
1817 to_hash_later,
1818 nbytes - to_hash_later);
497f2e6b 1819 }
5e833bc4 1820 req_ctx->to_hash_later = to_hash_later;
497f2e6b 1821
5e833bc4 1822 /* Allocate extended descriptor */
497f2e6b
LN
1823 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1824 if (IS_ERR(edesc))
1825 return PTR_ERR(edesc);
1826
1827 edesc->desc.hdr = ctx->desc_hdr_template;
1828
1829 /* On last one, request SEC to pad; otherwise continue */
1830 if (req_ctx->last)
1831 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1832 else
1833 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1834
60f208d7
KP
1835 /* request SEC to INIT hash. */
1836 if (req_ctx->first && !req_ctx->swinit)
497f2e6b
LN
1837 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1838
1839 /* When the tfm context has a keylen, it's an HMAC.
1840 * A first or last (ie. not middle) descriptor must request HMAC.
1841 */
1842 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1843 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1844
1845 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1846 ahash_done);
1847}
1848
1849static int ahash_update(struct ahash_request *areq)
1850{
1851 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1852
1853 req_ctx->last = 0;
1854
1855 return ahash_process_req(areq, areq->nbytes);
1856}
1857
1858static int ahash_final(struct ahash_request *areq)
1859{
1860 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1861
1862 req_ctx->last = 1;
1863
1864 return ahash_process_req(areq, 0);
1865}
1866
1867static int ahash_finup(struct ahash_request *areq)
1868{
1869 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1870
1871 req_ctx->last = 1;
1872
1873 return ahash_process_req(areq, areq->nbytes);
1874}
1875
1876static int ahash_digest(struct ahash_request *areq)
1877{
1878 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
60f208d7 1879 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
497f2e6b 1880
60f208d7 1881 ahash->init(areq);
497f2e6b
LN
1882 req_ctx->last = 1;
1883
1884 return ahash_process_req(areq, areq->nbytes);
1885}
1886
9c4a7965 1887struct talitos_alg_template {
d5e4aaef
LN
1888 u32 type;
1889 union {
1890 struct crypto_alg crypto;
acbf7c62 1891 struct ahash_alg hash;
d5e4aaef 1892 } alg;
9c4a7965
KP
1893 __be32 desc_hdr_template;
1894};
1895
1896static struct talitos_alg_template driver_algs[] = {
56af8cd4 1897 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
d5e4aaef
LN
1898 { .type = CRYPTO_ALG_TYPE_AEAD,
1899 .alg.crypto = {
56af8cd4
LN
1900 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1901 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1902 .cra_blocksize = AES_BLOCK_SIZE,
1903 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1904 .cra_type = &crypto_aead_type,
1905 .cra_aead = {
1906 .setkey = aead_setkey,
1907 .setauthsize = aead_setauthsize,
1908 .encrypt = aead_encrypt,
1909 .decrypt = aead_decrypt,
1910 .givencrypt = aead_givencrypt,
1911 .geniv = "<built-in>",
1912 .ivsize = AES_BLOCK_SIZE,
1913 .maxauthsize = SHA1_DIGEST_SIZE,
1914 }
1915 },
9c4a7965
KP
1916 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1917 DESC_HDR_SEL0_AESU |
1918 DESC_HDR_MODE0_AESU_CBC |
1919 DESC_HDR_SEL1_MDEUA |
1920 DESC_HDR_MODE1_MDEU_INIT |
1921 DESC_HDR_MODE1_MDEU_PAD |
1922 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
70bcaca7 1923 },
d5e4aaef
LN
1924 { .type = CRYPTO_ALG_TYPE_AEAD,
1925 .alg.crypto = {
56af8cd4
LN
1926 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1927 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1928 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1929 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1930 .cra_type = &crypto_aead_type,
1931 .cra_aead = {
1932 .setkey = aead_setkey,
1933 .setauthsize = aead_setauthsize,
1934 .encrypt = aead_encrypt,
1935 .decrypt = aead_decrypt,
1936 .givencrypt = aead_givencrypt,
1937 .geniv = "<built-in>",
1938 .ivsize = DES3_EDE_BLOCK_SIZE,
1939 .maxauthsize = SHA1_DIGEST_SIZE,
1940 }
1941 },
70bcaca7
LN
1942 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1943 DESC_HDR_SEL0_DEU |
1944 DESC_HDR_MODE0_DEU_CBC |
1945 DESC_HDR_MODE0_DEU_3DES |
1946 DESC_HDR_SEL1_MDEUA |
1947 DESC_HDR_MODE1_MDEU_INIT |
1948 DESC_HDR_MODE1_MDEU_PAD |
1949 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
3952f17e 1950 },
d5e4aaef
LN
1951 { .type = CRYPTO_ALG_TYPE_AEAD,
1952 .alg.crypto = {
56af8cd4
LN
1953 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1954 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1955 .cra_blocksize = AES_BLOCK_SIZE,
1956 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1957 .cra_type = &crypto_aead_type,
1958 .cra_aead = {
1959 .setkey = aead_setkey,
1960 .setauthsize = aead_setauthsize,
1961 .encrypt = aead_encrypt,
1962 .decrypt = aead_decrypt,
1963 .givencrypt = aead_givencrypt,
1964 .geniv = "<built-in>",
1965 .ivsize = AES_BLOCK_SIZE,
1966 .maxauthsize = SHA256_DIGEST_SIZE,
1967 }
1968 },
3952f17e
LN
1969 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1970 DESC_HDR_SEL0_AESU |
1971 DESC_HDR_MODE0_AESU_CBC |
1972 DESC_HDR_SEL1_MDEUA |
1973 DESC_HDR_MODE1_MDEU_INIT |
1974 DESC_HDR_MODE1_MDEU_PAD |
1975 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1976 },
d5e4aaef
LN
1977 { .type = CRYPTO_ALG_TYPE_AEAD,
1978 .alg.crypto = {
56af8cd4
LN
1979 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1980 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1981 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1982 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1983 .cra_type = &crypto_aead_type,
1984 .cra_aead = {
1985 .setkey = aead_setkey,
1986 .setauthsize = aead_setauthsize,
1987 .encrypt = aead_encrypt,
1988 .decrypt = aead_decrypt,
1989 .givencrypt = aead_givencrypt,
1990 .geniv = "<built-in>",
1991 .ivsize = DES3_EDE_BLOCK_SIZE,
1992 .maxauthsize = SHA256_DIGEST_SIZE,
1993 }
1994 },
3952f17e
LN
1995 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1996 DESC_HDR_SEL0_DEU |
1997 DESC_HDR_MODE0_DEU_CBC |
1998 DESC_HDR_MODE0_DEU_3DES |
1999 DESC_HDR_SEL1_MDEUA |
2000 DESC_HDR_MODE1_MDEU_INIT |
2001 DESC_HDR_MODE1_MDEU_PAD |
2002 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2003 },
d5e4aaef
LN
2004 { .type = CRYPTO_ALG_TYPE_AEAD,
2005 .alg.crypto = {
56af8cd4
LN
2006 .cra_name = "authenc(hmac(md5),cbc(aes))",
2007 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2008 .cra_blocksize = AES_BLOCK_SIZE,
2009 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2010 .cra_type = &crypto_aead_type,
2011 .cra_aead = {
2012 .setkey = aead_setkey,
2013 .setauthsize = aead_setauthsize,
2014 .encrypt = aead_encrypt,
2015 .decrypt = aead_decrypt,
2016 .givencrypt = aead_givencrypt,
2017 .geniv = "<built-in>",
2018 .ivsize = AES_BLOCK_SIZE,
2019 .maxauthsize = MD5_DIGEST_SIZE,
2020 }
2021 },
3952f17e
LN
2022 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2023 DESC_HDR_SEL0_AESU |
2024 DESC_HDR_MODE0_AESU_CBC |
2025 DESC_HDR_SEL1_MDEUA |
2026 DESC_HDR_MODE1_MDEU_INIT |
2027 DESC_HDR_MODE1_MDEU_PAD |
2028 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2029 },
d5e4aaef
LN
2030 { .type = CRYPTO_ALG_TYPE_AEAD,
2031 .alg.crypto = {
56af8cd4
LN
2032 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2033 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2034 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2035 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2036 .cra_type = &crypto_aead_type,
2037 .cra_aead = {
2038 .setkey = aead_setkey,
2039 .setauthsize = aead_setauthsize,
2040 .encrypt = aead_encrypt,
2041 .decrypt = aead_decrypt,
2042 .givencrypt = aead_givencrypt,
2043 .geniv = "<built-in>",
2044 .ivsize = DES3_EDE_BLOCK_SIZE,
2045 .maxauthsize = MD5_DIGEST_SIZE,
2046 }
2047 },
3952f17e
LN
2048 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2049 DESC_HDR_SEL0_DEU |
2050 DESC_HDR_MODE0_DEU_CBC |
2051 DESC_HDR_MODE0_DEU_3DES |
2052 DESC_HDR_SEL1_MDEUA |
2053 DESC_HDR_MODE1_MDEU_INIT |
2054 DESC_HDR_MODE1_MDEU_PAD |
2055 DESC_HDR_MODE1_MDEU_MD5_HMAC,
4de9d0b5
LN
2056 },
2057 /* ABLKCIPHER algorithms. */
d5e4aaef
LN
2058 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2059 .alg.crypto = {
4de9d0b5
LN
2060 .cra_name = "cbc(aes)",
2061 .cra_driver_name = "cbc-aes-talitos",
2062 .cra_blocksize = AES_BLOCK_SIZE,
2063 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2064 CRYPTO_ALG_ASYNC,
2065 .cra_type = &crypto_ablkcipher_type,
2066 .cra_ablkcipher = {
2067 .setkey = ablkcipher_setkey,
2068 .encrypt = ablkcipher_encrypt,
2069 .decrypt = ablkcipher_decrypt,
2070 .geniv = "eseqiv",
2071 .min_keysize = AES_MIN_KEY_SIZE,
2072 .max_keysize = AES_MAX_KEY_SIZE,
2073 .ivsize = AES_BLOCK_SIZE,
2074 }
2075 },
2076 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2077 DESC_HDR_SEL0_AESU |
2078 DESC_HDR_MODE0_AESU_CBC,
2079 },
d5e4aaef
LN
2080 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2081 .alg.crypto = {
4de9d0b5
LN
2082 .cra_name = "cbc(des3_ede)",
2083 .cra_driver_name = "cbc-3des-talitos",
2084 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2085 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2086 CRYPTO_ALG_ASYNC,
2087 .cra_type = &crypto_ablkcipher_type,
2088 .cra_ablkcipher = {
2089 .setkey = ablkcipher_setkey,
2090 .encrypt = ablkcipher_encrypt,
2091 .decrypt = ablkcipher_decrypt,
2092 .geniv = "eseqiv",
2093 .min_keysize = DES3_EDE_KEY_SIZE,
2094 .max_keysize = DES3_EDE_KEY_SIZE,
2095 .ivsize = DES3_EDE_BLOCK_SIZE,
2096 }
2097 },
2098 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2099 DESC_HDR_SEL0_DEU |
2100 DESC_HDR_MODE0_DEU_CBC |
2101 DESC_HDR_MODE0_DEU_3DES,
497f2e6b
LN
2102 },
2103 /* AHASH algorithms. */
2104 { .type = CRYPTO_ALG_TYPE_AHASH,
2105 .alg.hash = {
2106 .init = ahash_init,
2107 .update = ahash_update,
2108 .final = ahash_final,
2109 .finup = ahash_finup,
2110 .digest = ahash_digest,
2111 .halg.digestsize = MD5_DIGEST_SIZE,
2112 .halg.base = {
2113 .cra_name = "md5",
2114 .cra_driver_name = "md5-talitos",
2115 .cra_blocksize = MD5_BLOCK_SIZE,
2116 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2117 CRYPTO_ALG_ASYNC,
2118 .cra_type = &crypto_ahash_type
2119 }
2120 },
2121 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2122 DESC_HDR_SEL0_MDEUA |
2123 DESC_HDR_MODE0_MDEU_MD5,
2124 },
2125 { .type = CRYPTO_ALG_TYPE_AHASH,
2126 .alg.hash = {
2127 .init = ahash_init,
2128 .update = ahash_update,
2129 .final = ahash_final,
2130 .finup = ahash_finup,
2131 .digest = ahash_digest,
2132 .halg.digestsize = SHA1_DIGEST_SIZE,
2133 .halg.base = {
2134 .cra_name = "sha1",
2135 .cra_driver_name = "sha1-talitos",
2136 .cra_blocksize = SHA1_BLOCK_SIZE,
2137 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2138 CRYPTO_ALG_ASYNC,
2139 .cra_type = &crypto_ahash_type
2140 }
2141 },
2142 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2143 DESC_HDR_SEL0_MDEUA |
2144 DESC_HDR_MODE0_MDEU_SHA1,
2145 },
60f208d7
KP
2146 { .type = CRYPTO_ALG_TYPE_AHASH,
2147 .alg.hash = {
2148 .init = ahash_init,
2149 .update = ahash_update,
2150 .final = ahash_final,
2151 .finup = ahash_finup,
2152 .digest = ahash_digest,
2153 .halg.digestsize = SHA224_DIGEST_SIZE,
2154 .halg.base = {
2155 .cra_name = "sha224",
2156 .cra_driver_name = "sha224-talitos",
2157 .cra_blocksize = SHA224_BLOCK_SIZE,
2158 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2159 CRYPTO_ALG_ASYNC,
2160 .cra_type = &crypto_ahash_type
2161 }
2162 },
2163 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2164 DESC_HDR_SEL0_MDEUA |
2165 DESC_HDR_MODE0_MDEU_SHA224,
2166 },
497f2e6b
LN
2167 { .type = CRYPTO_ALG_TYPE_AHASH,
2168 .alg.hash = {
2169 .init = ahash_init,
2170 .update = ahash_update,
2171 .final = ahash_final,
2172 .finup = ahash_finup,
2173 .digest = ahash_digest,
2174 .halg.digestsize = SHA256_DIGEST_SIZE,
2175 .halg.base = {
2176 .cra_name = "sha256",
2177 .cra_driver_name = "sha256-talitos",
2178 .cra_blocksize = SHA256_BLOCK_SIZE,
2179 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2180 CRYPTO_ALG_ASYNC,
2181 .cra_type = &crypto_ahash_type
2182 }
2183 },
2184 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2185 DESC_HDR_SEL0_MDEUA |
2186 DESC_HDR_MODE0_MDEU_SHA256,
2187 },
2188 { .type = CRYPTO_ALG_TYPE_AHASH,
2189 .alg.hash = {
2190 .init = ahash_init,
2191 .update = ahash_update,
2192 .final = ahash_final,
2193 .finup = ahash_finup,
2194 .digest = ahash_digest,
2195 .halg.digestsize = SHA384_DIGEST_SIZE,
2196 .halg.base = {
2197 .cra_name = "sha384",
2198 .cra_driver_name = "sha384-talitos",
2199 .cra_blocksize = SHA384_BLOCK_SIZE,
2200 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2201 CRYPTO_ALG_ASYNC,
2202 .cra_type = &crypto_ahash_type
2203 }
2204 },
2205 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2206 DESC_HDR_SEL0_MDEUB |
2207 DESC_HDR_MODE0_MDEUB_SHA384,
2208 },
2209 { .type = CRYPTO_ALG_TYPE_AHASH,
2210 .alg.hash = {
2211 .init = ahash_init,
2212 .update = ahash_update,
2213 .final = ahash_final,
2214 .finup = ahash_finup,
2215 .digest = ahash_digest,
2216 .halg.digestsize = SHA512_DIGEST_SIZE,
2217 .halg.base = {
2218 .cra_name = "sha512",
2219 .cra_driver_name = "sha512-talitos",
2220 .cra_blocksize = SHA512_BLOCK_SIZE,
2221 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2222 CRYPTO_ALG_ASYNC,
2223 .cra_type = &crypto_ahash_type
2224 }
2225 },
2226 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2227 DESC_HDR_SEL0_MDEUB |
2228 DESC_HDR_MODE0_MDEUB_SHA512,
2229 },
9c4a7965
KP
2230};
2231
2232struct talitos_crypto_alg {
2233 struct list_head entry;
2234 struct device *dev;
acbf7c62 2235 struct talitos_alg_template algt;
9c4a7965
KP
2236};
2237
2238static int talitos_cra_init(struct crypto_tfm *tfm)
2239{
2240 struct crypto_alg *alg = tfm->__crt_alg;
19bbbc63 2241 struct talitos_crypto_alg *talitos_alg;
9c4a7965 2242 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
5228f0f7 2243 struct talitos_private *priv;
9c4a7965 2244
497f2e6b
LN
2245 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2246 talitos_alg = container_of(__crypto_ahash_alg(alg),
2247 struct talitos_crypto_alg,
2248 algt.alg.hash);
2249 else
2250 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2251 algt.alg.crypto);
19bbbc63 2252
9c4a7965
KP
2253 /* update context with ptr to dev */
2254 ctx->dev = talitos_alg->dev;
19bbbc63 2255
5228f0f7
KP
2256 /* assign SEC channel to tfm in round-robin fashion */
2257 priv = dev_get_drvdata(ctx->dev);
2258 ctx->ch = atomic_inc_return(&priv->last_chan) &
2259 (priv->num_channels - 1);
2260
9c4a7965 2261 /* copy descriptor header template value */
acbf7c62 2262 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
9c4a7965 2263
602dba5a
KP
2264 /* select done notification */
2265 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2266
497f2e6b
LN
2267 return 0;
2268}
2269
2270static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2271{
2272 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2273
2274 talitos_cra_init(tfm);
9c4a7965
KP
2275
2276 /* random first IV */
70bcaca7 2277 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
9c4a7965
KP
2278
2279 return 0;
2280}
2281
497f2e6b
LN
2282static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2283{
2284 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2285
2286 talitos_cra_init(tfm);
2287
2288 ctx->keylen = 0;
2289 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2290 sizeof(struct talitos_ahash_req_ctx));
2291
2292 return 0;
2293}
2294
9c4a7965
KP
2295/*
2296 * given the alg's descriptor header template, determine whether descriptor
2297 * type and primary/secondary execution units required match the hw
2298 * capabilities description provided in the device tree node.
2299 */
2300static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2301{
2302 struct talitos_private *priv = dev_get_drvdata(dev);
2303 int ret;
2304
2305 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2306 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2307
2308 if (SECONDARY_EU(desc_hdr_template))
2309 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2310 & priv->exec_units);
2311
2312 return ret;
2313}
2314
2dc11581 2315static int talitos_remove(struct platform_device *ofdev)
9c4a7965
KP
2316{
2317 struct device *dev = &ofdev->dev;
2318 struct talitos_private *priv = dev_get_drvdata(dev);
2319 struct talitos_crypto_alg *t_alg, *n;
2320 int i;
2321
2322 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
acbf7c62
LN
2323 switch (t_alg->algt.type) {
2324 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2325 case CRYPTO_ALG_TYPE_AEAD:
2326 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2327 break;
2328 case CRYPTO_ALG_TYPE_AHASH:
2329 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2330 break;
2331 }
9c4a7965
KP
2332 list_del(&t_alg->entry);
2333 kfree(t_alg);
2334 }
2335
2336 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2337 talitos_unregister_rng(dev);
2338
4b992628 2339 for (i = 0; i < priv->num_channels; i++)
0b798247 2340 kfree(priv->chan[i].fifo);
9c4a7965 2341
4b992628 2342 kfree(priv->chan);
9c4a7965
KP
2343
2344 if (priv->irq != NO_IRQ) {
2345 free_irq(priv->irq, dev);
2346 irq_dispose_mapping(priv->irq);
2347 }
2348
2349 tasklet_kill(&priv->done_task);
9c4a7965
KP
2350
2351 iounmap(priv->reg);
2352
2353 dev_set_drvdata(dev, NULL);
2354
2355 kfree(priv);
2356
2357 return 0;
2358}
2359
2360static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2361 struct talitos_alg_template
2362 *template)
2363{
60f208d7 2364 struct talitos_private *priv = dev_get_drvdata(dev);
9c4a7965
KP
2365 struct talitos_crypto_alg *t_alg;
2366 struct crypto_alg *alg;
2367
2368 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2369 if (!t_alg)
2370 return ERR_PTR(-ENOMEM);
2371
acbf7c62
LN
2372 t_alg->algt = *template;
2373
2374 switch (t_alg->algt.type) {
2375 case CRYPTO_ALG_TYPE_ABLKCIPHER:
497f2e6b
LN
2376 alg = &t_alg->algt.alg.crypto;
2377 alg->cra_init = talitos_cra_init;
2378 break;
acbf7c62
LN
2379 case CRYPTO_ALG_TYPE_AEAD:
2380 alg = &t_alg->algt.alg.crypto;
497f2e6b 2381 alg->cra_init = talitos_cra_init_aead;
acbf7c62
LN
2382 break;
2383 case CRYPTO_ALG_TYPE_AHASH:
2384 alg = &t_alg->algt.alg.hash.halg.base;
497f2e6b 2385 alg->cra_init = talitos_cra_init_ahash;
60f208d7
KP
2386 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2387 !strcmp(alg->cra_name, "sha224")) {
2388 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2389 t_alg->algt.desc_hdr_template =
2390 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2391 DESC_HDR_SEL0_MDEUA |
2392 DESC_HDR_MODE0_MDEU_SHA256;
2393 }
497f2e6b 2394 break;
1d11911a
KP
2395 default:
2396 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2397 return ERR_PTR(-EINVAL);
acbf7c62 2398 }
9c4a7965 2399
9c4a7965 2400 alg->cra_module = THIS_MODULE;
9c4a7965 2401 alg->cra_priority = TALITOS_CRA_PRIORITY;
9c4a7965 2402 alg->cra_alignmask = 0;
9c4a7965 2403 alg->cra_ctxsize = sizeof(struct talitos_ctx);
9c4a7965 2404
9c4a7965
KP
2405 t_alg->dev = dev;
2406
2407 return t_alg;
2408}
2409
1c48a5c9 2410static int talitos_probe(struct platform_device *ofdev)
9c4a7965
KP
2411{
2412 struct device *dev = &ofdev->dev;
61c7a080 2413 struct device_node *np = ofdev->dev.of_node;
9c4a7965
KP
2414 struct talitos_private *priv;
2415 const unsigned int *prop;
2416 int i, err;
2417
2418 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2419 if (!priv)
2420 return -ENOMEM;
2421
2422 dev_set_drvdata(dev, priv);
2423
2424 priv->ofdev = ofdev;
2425
2426 tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
9c4a7965 2427
fe5720e2
KP
2428 INIT_LIST_HEAD(&priv->alg_list);
2429
9c4a7965
KP
2430 priv->irq = irq_of_parse_and_map(np, 0);
2431
2432 if (priv->irq == NO_IRQ) {
2433 dev_err(dev, "failed to map irq\n");
2434 err = -EINVAL;
2435 goto err_out;
2436 }
2437
2438 /* get the irq line */
2439 err = request_irq(priv->irq, talitos_interrupt, 0,
2440 dev_driver_string(dev), dev);
2441 if (err) {
2442 dev_err(dev, "failed to request irq %d\n", priv->irq);
2443 irq_dispose_mapping(priv->irq);
2444 priv->irq = NO_IRQ;
2445 goto err_out;
2446 }
2447
2448 priv->reg = of_iomap(np, 0);
2449 if (!priv->reg) {
2450 dev_err(dev, "failed to of_iomap\n");
2451 err = -ENOMEM;
2452 goto err_out;
2453 }
2454
2455 /* get SEC version capabilities from device tree */
2456 prop = of_get_property(np, "fsl,num-channels", NULL);
2457 if (prop)
2458 priv->num_channels = *prop;
2459
2460 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2461 if (prop)
2462 priv->chfifo_len = *prop;
2463
2464 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2465 if (prop)
2466 priv->exec_units = *prop;
2467
2468 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2469 if (prop)
2470 priv->desc_types = *prop;
2471
2472 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2473 !priv->exec_units || !priv->desc_types) {
2474 dev_err(dev, "invalid property data in device tree node\n");
2475 err = -EINVAL;
2476 goto err_out;
2477 }
2478
f3c85bc1
LN
2479 if (of_device_is_compatible(np, "fsl,sec3.0"))
2480 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2481
fe5720e2 2482 if (of_device_is_compatible(np, "fsl,sec2.1"))
60f208d7
KP
2483 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2484 TALITOS_FTR_SHA224_HWINIT;
fe5720e2 2485
4b992628
KP
2486 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2487 priv->num_channels, GFP_KERNEL);
2488 if (!priv->chan) {
2489 dev_err(dev, "failed to allocate channel management space\n");
9c4a7965
KP
2490 err = -ENOMEM;
2491 goto err_out;
2492 }
2493
2494 for (i = 0; i < priv->num_channels; i++) {
4b992628
KP
2495 spin_lock_init(&priv->chan[i].head_lock);
2496 spin_lock_init(&priv->chan[i].tail_lock);
9c4a7965
KP
2497 }
2498
2499 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2500
2501 for (i = 0; i < priv->num_channels; i++) {
4b992628
KP
2502 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2503 priv->fifo_len, GFP_KERNEL);
2504 if (!priv->chan[i].fifo) {
9c4a7965
KP
2505 dev_err(dev, "failed to allocate request fifo %d\n", i);
2506 err = -ENOMEM;
2507 goto err_out;
2508 }
2509 }
2510
ec6644d6 2511 for (i = 0; i < priv->num_channels; i++)
4b992628
KP
2512 atomic_set(&priv->chan[i].submit_count,
2513 -(priv->chfifo_len - 1));
9c4a7965 2514
81eb024c
KP
2515 dma_set_mask(dev, DMA_BIT_MASK(36));
2516
9c4a7965
KP
2517 /* reset and initialize the h/w */
2518 err = init_device(dev);
2519 if (err) {
2520 dev_err(dev, "failed to initialize device\n");
2521 goto err_out;
2522 }
2523
2524 /* register the RNG, if available */
2525 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2526 err = talitos_register_rng(dev);
2527 if (err) {
2528 dev_err(dev, "failed to register hwrng: %d\n", err);
2529 goto err_out;
2530 } else
2531 dev_info(dev, "hwrng\n");
2532 }
2533
2534 /* register crypto algorithms the device supports */
9c4a7965
KP
2535 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2536 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2537 struct talitos_crypto_alg *t_alg;
acbf7c62 2538 char *name = NULL;
9c4a7965
KP
2539
2540 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2541 if (IS_ERR(t_alg)) {
2542 err = PTR_ERR(t_alg);
2543 goto err_out;
2544 }
2545
acbf7c62
LN
2546 switch (t_alg->algt.type) {
2547 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2548 case CRYPTO_ALG_TYPE_AEAD:
2549 err = crypto_register_alg(
2550 &t_alg->algt.alg.crypto);
2551 name = t_alg->algt.alg.crypto.cra_driver_name;
2552 break;
2553 case CRYPTO_ALG_TYPE_AHASH:
2554 err = crypto_register_ahash(
2555 &t_alg->algt.alg.hash);
2556 name =
2557 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2558 break;
2559 }
9c4a7965
KP
2560 if (err) {
2561 dev_err(dev, "%s alg registration failed\n",
acbf7c62 2562 name);
9c4a7965
KP
2563 kfree(t_alg);
2564 } else {
2565 list_add_tail(&t_alg->entry, &priv->alg_list);
acbf7c62 2566 dev_info(dev, "%s\n", name);
9c4a7965
KP
2567 }
2568 }
2569 }
2570
2571 return 0;
2572
2573err_out:
2574 talitos_remove(ofdev);
9c4a7965
KP
2575
2576 return err;
2577}
2578
6c3f975a 2579static const struct of_device_id talitos_match[] = {
9c4a7965
KP
2580 {
2581 .compatible = "fsl,sec2.0",
2582 },
2583 {},
2584};
2585MODULE_DEVICE_TABLE(of, talitos_match);
2586
1c48a5c9 2587static struct platform_driver talitos_driver = {
4018294b
GL
2588 .driver = {
2589 .name = "talitos",
2590 .owner = THIS_MODULE,
2591 .of_match_table = talitos_match,
2592 },
9c4a7965 2593 .probe = talitos_probe,
596f1034 2594 .remove = talitos_remove,
9c4a7965
KP
2595};
2596
2597static int __init talitos_init(void)
2598{
1c48a5c9 2599 return platform_driver_register(&talitos_driver);
9c4a7965
KP
2600}
2601module_init(talitos_init);
2602
2603static void __exit talitos_exit(void)
2604{
1c48a5c9 2605 platform_driver_unregister(&talitos_driver);
9c4a7965
KP
2606}
2607module_exit(talitos_exit);
2608
2609MODULE_LICENSE("GPL");
2610MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2611MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");