Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfashe...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / crypto / hifn_795x.c
1 /*
2 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/mod_devicetable.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/mm.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/scatterlist.h>
31 #include <linux/highmem.h>
32 #include <linux/crypto.h>
33 #include <linux/hw_random.h>
34 #include <linux/ktime.h>
35
36 #include <crypto/algapi.h>
37 #include <crypto/des.h>
38
39 #include <asm/kmap_types.h>
40
41 #undef dprintk
42
43 #define HIFN_TEST
44 //#define HIFN_DEBUG
45
46 #ifdef HIFN_DEBUG
47 #define dprintk(f, a...) printk(f, ##a)
48 #else
49 #define dprintk(f, a...) do {} while (0)
50 #endif
51
52 static char hifn_pll_ref[sizeof("extNNN")] = "ext";
53 module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
54 MODULE_PARM_DESC(hifn_pll_ref,
55 "PLL reference clock (pci[freq] or ext[freq], default ext)");
56
57 static atomic_t hifn_dev_number;
58
59 #define ACRYPTO_OP_DECRYPT 0
60 #define ACRYPTO_OP_ENCRYPT 1
61 #define ACRYPTO_OP_HMAC 2
62 #define ACRYPTO_OP_RNG 3
63
64 #define ACRYPTO_MODE_ECB 0
65 #define ACRYPTO_MODE_CBC 1
66 #define ACRYPTO_MODE_CFB 2
67 #define ACRYPTO_MODE_OFB 3
68
69 #define ACRYPTO_TYPE_AES_128 0
70 #define ACRYPTO_TYPE_AES_192 1
71 #define ACRYPTO_TYPE_AES_256 2
72 #define ACRYPTO_TYPE_3DES 3
73 #define ACRYPTO_TYPE_DES 4
74
75 #define PCI_VENDOR_ID_HIFN 0x13A3
76 #define PCI_DEVICE_ID_HIFN_7955 0x0020
77 #define PCI_DEVICE_ID_HIFN_7956 0x001d
78
79 /* I/O region sizes */
80
81 #define HIFN_BAR0_SIZE 0x1000
82 #define HIFN_BAR1_SIZE 0x2000
83 #define HIFN_BAR2_SIZE 0x8000
84
85 /* DMA registres */
86
87 #define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */
88 #define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */
89 #define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */
90 #define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */
91 #define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */
92 #define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */
93 #define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */
94 #define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */
95 #define HIFN_CHIP_ID 0x98 /* Chip ID */
96
97 /*
98 * Processing Unit Registers (offset from BASEREG0)
99 */
100 #define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
101 #define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
102 #define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
103 #define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
104 #define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
105 #define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
106 #define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
107 #define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
108 #define HIFN_0_SPACESIZE 0x20 /* Register space size */
109
110 /* Processing Unit Control Register (HIFN_0_PUCTRL) */
111 #define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
112 #define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
113 #define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
114 #define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
115 #define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
116
117 /* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
118 #define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
119 #define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
120 #define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
121 #define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
122 #define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
123 #define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
124 #define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
125 #define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
126 #define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
127 #define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
128
129 /* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
130 #define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
131 #define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
132 #define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
133 #define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
134 #define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
135 #define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
136 #define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
137 #define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
138 #define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
139 #define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
140 #define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
141 #define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
142 #define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
143 #define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
144 #define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
145 #define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
146 #define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
147 #define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
148 #define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
149 #define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
150 #define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
151 #define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
152 #define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
153
154 /* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
155 #define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
156 #define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
157 #define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
158 #define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
159 #define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
160 #define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
161 #define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
162 #define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
163 #define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
164 #define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
165
166 /* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
167 #define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
168 #define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
169 #define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
170 #define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
171 #define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
172 #define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
173 #define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
174 #define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
175 #define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
176 #define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
177 #define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
178 #define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
179 #define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
180 #define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
181 #define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
182 #define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
183 #define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
184
185 /* FIFO Status Register (HIFN_0_FIFOSTAT) */
186 #define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
187 #define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
188
189 /* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
190 #define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */
191
192 /*
193 * DMA Interface Registers (offset from BASEREG1)
194 */
195 #define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
196 #define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
197 #define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
198 #define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
199 #define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
200 #define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
201 #define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
202 #define HIFN_1_PLL 0x4c /* 795x: PLL config */
203 #define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
204 #define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
205 #define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
206 #define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
207 #define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
208 #define HIFN_1_REVID 0x98 /* Revision ID */
209 #define HIFN_1_UNLOCK_SECRET1 0xf4
210 #define HIFN_1_UNLOCK_SECRET2 0xfc
211 #define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
212 #define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
213 #define HIFN_1_PUB_OPLEN 0x304 /* Public Operand Length */
214 #define HIFN_1_PUB_OP 0x308 /* Public Operand */
215 #define HIFN_1_PUB_STATUS 0x30c /* Public Status */
216 #define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
217 #define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
218 #define HIFN_1_RNG_DATA 0x318 /* RNG data */
219 #define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
220 #define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
221
222 /* DMA Status and Control Register (HIFN_1_DMA_CSR) */
223 #define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
224 #define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
225 #define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
226 #define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
227 #define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
228 #define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
229 #define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
230 #define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
231 #define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
232 #define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
233 #define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
234 #define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
235 #define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
236 #define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
237 #define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
238 #define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
239 #define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
240 #define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
241 #define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
242 #define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
243 #define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
244 #define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
245 #define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
246 #define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
247 #define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
248 #define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
249 #define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
250 #define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
251 #define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
252 #define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
253 #define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
254 #define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
255 #define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
256 #define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
257 #define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
258 #define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
259 #define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
260 #define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
261
262 /* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
263 #define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
264 #define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
265 #define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
266 #define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
267 #define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
268 #define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
269 #define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
270 #define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
271 #define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
272 #define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
273 #define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
274 #define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
275 #define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
276 #define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
277 #define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
278 #define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
279 #define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
280 #define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
281 #define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
282 #define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
283 #define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
284 #define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
285
286 /* DMA Configuration Register (HIFN_1_DMA_CNFG) */
287 #define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
288 #define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
289 #define HIFN_DMACNFG_UNLOCK 0x00000800
290 #define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
291 #define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
292 #define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
293 #define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
294 #define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
295
296 /* PLL configuration register */
297 #define HIFN_PLL_REF_CLK_HBI 0x00000000 /* HBI reference clock */
298 #define HIFN_PLL_REF_CLK_PLL 0x00000001 /* PLL reference clock */
299 #define HIFN_PLL_BP 0x00000002 /* Reference clock bypass */
300 #define HIFN_PLL_PK_CLK_HBI 0x00000000 /* PK engine HBI clock */
301 #define HIFN_PLL_PK_CLK_PLL 0x00000008 /* PK engine PLL clock */
302 #define HIFN_PLL_PE_CLK_HBI 0x00000000 /* PE engine HBI clock */
303 #define HIFN_PLL_PE_CLK_PLL 0x00000010 /* PE engine PLL clock */
304 #define HIFN_PLL_RESERVED_1 0x00000400 /* Reserved bit, must be 1 */
305 #define HIFN_PLL_ND_SHIFT 11 /* Clock multiplier shift */
306 #define HIFN_PLL_ND_MULT_2 0x00000000 /* PLL clock multiplier 2 */
307 #define HIFN_PLL_ND_MULT_4 0x00000800 /* PLL clock multiplier 4 */
308 #define HIFN_PLL_ND_MULT_6 0x00001000 /* PLL clock multiplier 6 */
309 #define HIFN_PLL_ND_MULT_8 0x00001800 /* PLL clock multiplier 8 */
310 #define HIFN_PLL_ND_MULT_10 0x00002000 /* PLL clock multiplier 10 */
311 #define HIFN_PLL_ND_MULT_12 0x00002800 /* PLL clock multiplier 12 */
312 #define HIFN_PLL_IS_1_8 0x00000000 /* charge pump (mult. 1-8) */
313 #define HIFN_PLL_IS_9_12 0x00010000 /* charge pump (mult. 9-12) */
314
315 #define HIFN_PLL_FCK_MAX 266 /* Maximum PLL frequency */
316
317 /* Public key reset register (HIFN_1_PUB_RESET) */
318 #define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
319
320 /* Public base address register (HIFN_1_PUB_BASE) */
321 #define HIFN_PUBBASE_ADDR 0x00003fff /* base address */
322
323 /* Public operand length register (HIFN_1_PUB_OPLEN) */
324 #define HIFN_PUBOPLEN_MOD_M 0x0000007f /* modulus length mask */
325 #define HIFN_PUBOPLEN_MOD_S 0 /* modulus length shift */
326 #define HIFN_PUBOPLEN_EXP_M 0x0003ff80 /* exponent length mask */
327 #define HIFN_PUBOPLEN_EXP_S 7 /* exponent lenght shift */
328 #define HIFN_PUBOPLEN_RED_M 0x003c0000 /* reducend length mask */
329 #define HIFN_PUBOPLEN_RED_S 18 /* reducend length shift */
330
331 /* Public operation register (HIFN_1_PUB_OP) */
332 #define HIFN_PUBOP_AOFFSET_M 0x0000007f /* A offset mask */
333 #define HIFN_PUBOP_AOFFSET_S 0 /* A offset shift */
334 #define HIFN_PUBOP_BOFFSET_M 0x00000f80 /* B offset mask */
335 #define HIFN_PUBOP_BOFFSET_S 7 /* B offset shift */
336 #define HIFN_PUBOP_MOFFSET_M 0x0003f000 /* M offset mask */
337 #define HIFN_PUBOP_MOFFSET_S 12 /* M offset shift */
338 #define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
339 #define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
340 #define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
341 #define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
342 #define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
343 #define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
344 #define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
345 #define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
346 #define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
347 #define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
348 #define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
349 #define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
350 #define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular RED */
351 #define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular EXP */
352
353 /* Public status register (HIFN_1_PUB_STATUS) */
354 #define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
355 #define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
356
357 /* Public interrupt enable register (HIFN_1_PUB_IEN) */
358 #define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
359
360 /* Random number generator config register (HIFN_1_RNG_CONFIG) */
361 #define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
362
363 #define HIFN_NAMESIZE 32
364 #define HIFN_MAX_RESULT_ORDER 5
365
366 #define HIFN_D_CMD_RSIZE 24*4
367 #define HIFN_D_SRC_RSIZE 80*4
368 #define HIFN_D_DST_RSIZE 80*4
369 #define HIFN_D_RES_RSIZE 24*4
370
371 #define HIFN_D_DST_DALIGN 4
372
373 #define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-1
374
375 #define AES_MIN_KEY_SIZE 16
376 #define AES_MAX_KEY_SIZE 32
377
378 #define HIFN_DES_KEY_LENGTH 8
379 #define HIFN_3DES_KEY_LENGTH 24
380 #define HIFN_MAX_CRYPT_KEY_LENGTH AES_MAX_KEY_SIZE
381 #define HIFN_IV_LENGTH 8
382 #define HIFN_AES_IV_LENGTH 16
383 #define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
384
385 #define HIFN_MAC_KEY_LENGTH 64
386 #define HIFN_MD5_LENGTH 16
387 #define HIFN_SHA1_LENGTH 20
388 #define HIFN_MAC_TRUNC_LENGTH 12
389
390 #define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
391 #define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4)
392 #define HIFN_USED_RESULT 12
393
394 struct hifn_desc
395 {
396 volatile __le32 l;
397 volatile __le32 p;
398 };
399
400 struct hifn_dma {
401 struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
402 struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
403 struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
404 struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
405
406 u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
407 u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
408
409 u64 test_src, test_dst;
410
411 /*
412 * Our current positions for insertion and removal from the descriptor
413 * rings.
414 */
415 volatile int cmdi, srci, dsti, resi;
416 volatile int cmdu, srcu, dstu, resu;
417 int cmdk, srck, dstk, resk;
418 };
419
420 #define HIFN_FLAG_CMD_BUSY (1<<0)
421 #define HIFN_FLAG_SRC_BUSY (1<<1)
422 #define HIFN_FLAG_DST_BUSY (1<<2)
423 #define HIFN_FLAG_RES_BUSY (1<<3)
424 #define HIFN_FLAG_OLD_KEY (1<<4)
425
426 #define HIFN_DEFAULT_ACTIVE_NUM 5
427
428 struct hifn_device
429 {
430 char name[HIFN_NAMESIZE];
431
432 int irq;
433
434 struct pci_dev *pdev;
435 void __iomem *bar[3];
436
437 unsigned long result_mem;
438 dma_addr_t dst;
439
440 void *desc_virt;
441 dma_addr_t desc_dma;
442
443 u32 dmareg;
444
445 void *sa[HIFN_D_RES_RSIZE];
446
447 spinlock_t lock;
448
449 void *priv;
450
451 u32 flags;
452 int active, started;
453 struct delayed_work work;
454 unsigned long reset;
455 unsigned long success;
456 unsigned long prev_success;
457
458 u8 snum;
459
460 struct tasklet_struct tasklet;
461
462 struct crypto_queue queue;
463 struct list_head alg_list;
464
465 unsigned int pk_clk_freq;
466
467 #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
468 unsigned int rng_wait_time;
469 ktime_t rngtime;
470 struct hwrng rng;
471 #endif
472 };
473
474 #define HIFN_D_LENGTH 0x0000ffff
475 #define HIFN_D_NOINVALID 0x01000000
476 #define HIFN_D_MASKDONEIRQ 0x02000000
477 #define HIFN_D_DESTOVER 0x04000000
478 #define HIFN_D_OVER 0x08000000
479 #define HIFN_D_LAST 0x20000000
480 #define HIFN_D_JUMP 0x40000000
481 #define HIFN_D_VALID 0x80000000
482
483 struct hifn_base_command
484 {
485 volatile __le16 masks;
486 volatile __le16 session_num;
487 volatile __le16 total_source_count;
488 volatile __le16 total_dest_count;
489 };
490
491 #define HIFN_BASE_CMD_COMP 0x0100 /* enable compression engine */
492 #define HIFN_BASE_CMD_PAD 0x0200 /* enable padding engine */
493 #define HIFN_BASE_CMD_MAC 0x0400 /* enable MAC engine */
494 #define HIFN_BASE_CMD_CRYPT 0x0800 /* enable crypt engine */
495 #define HIFN_BASE_CMD_DECODE 0x2000
496 #define HIFN_BASE_CMD_SRCLEN_M 0xc000
497 #define HIFN_BASE_CMD_SRCLEN_S 14
498 #define HIFN_BASE_CMD_DSTLEN_M 0x3000
499 #define HIFN_BASE_CMD_DSTLEN_S 12
500 #define HIFN_BASE_CMD_LENMASK_HI 0x30000
501 #define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
502
503 /*
504 * Structure to help build up the command data structure.
505 */
506 struct hifn_crypt_command
507 {
508 volatile __le16 masks;
509 volatile __le16 header_skip;
510 volatile __le16 source_count;
511 volatile __le16 reserved;
512 };
513
514 #define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
515 #define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
516 #define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
517 #define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
518 #define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
519 #define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
520 #define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
521 #define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
522 #define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
523 #define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
524 #define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
525 #define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
526 #define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
527 #define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
528 #define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
529 #define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
530 #define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
531 #define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
532 #define HIFN_CRYPT_CMD_SRCLEN_S 14
533
534 /*
535 * Structure to help build up the command data structure.
536 */
537 struct hifn_mac_command
538 {
539 volatile __le16 masks;
540 volatile __le16 header_skip;
541 volatile __le16 source_count;
542 volatile __le16 reserved;
543 };
544
545 #define HIFN_MAC_CMD_ALG_MASK 0x0001
546 #define HIFN_MAC_CMD_ALG_SHA1 0x0000
547 #define HIFN_MAC_CMD_ALG_MD5 0x0001
548 #define HIFN_MAC_CMD_MODE_MASK 0x000c
549 #define HIFN_MAC_CMD_MODE_HMAC 0x0000
550 #define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
551 #define HIFN_MAC_CMD_MODE_HASH 0x0008
552 #define HIFN_MAC_CMD_MODE_FULL 0x0004
553 #define HIFN_MAC_CMD_TRUNC 0x0010
554 #define HIFN_MAC_CMD_RESULT 0x0020
555 #define HIFN_MAC_CMD_APPEND 0x0040
556 #define HIFN_MAC_CMD_SRCLEN_M 0xc000
557 #define HIFN_MAC_CMD_SRCLEN_S 14
558
559 /*
560 * MAC POS IPsec initiates authentication after encryption on encodes
561 * and before decryption on decodes.
562 */
563 #define HIFN_MAC_CMD_POS_IPSEC 0x0200
564 #define HIFN_MAC_CMD_NEW_KEY 0x0800
565
566 struct hifn_comp_command
567 {
568 volatile __le16 masks;
569 volatile __le16 header_skip;
570 volatile __le16 source_count;
571 volatile __le16 reserved;
572 };
573
574 #define HIFN_COMP_CMD_SRCLEN_M 0xc000
575 #define HIFN_COMP_CMD_SRCLEN_S 14
576 #define HIFN_COMP_CMD_ONE 0x0100 /* must be one */
577 #define HIFN_COMP_CMD_CLEARHIST 0x0010 /* clear history */
578 #define HIFN_COMP_CMD_UPDATEHIST 0x0008 /* update history */
579 #define HIFN_COMP_CMD_LZS_STRIP0 0x0004 /* LZS: strip zero */
580 #define HIFN_COMP_CMD_MPPC_RESTART 0x0004 /* MPPC: restart */
581 #define HIFN_COMP_CMD_ALG_MASK 0x0001 /* compression mode: */
582 #define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */
583 #define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */
584
585 struct hifn_base_result
586 {
587 volatile __le16 flags;
588 volatile __le16 session;
589 volatile __le16 src_cnt; /* 15:0 of source count */
590 volatile __le16 dst_cnt; /* 15:0 of dest count */
591 };
592
593 #define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
594 #define HIFN_BASE_RES_SRCLEN_M 0xc000 /* 17:16 of source count */
595 #define HIFN_BASE_RES_SRCLEN_S 14
596 #define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */
597 #define HIFN_BASE_RES_DSTLEN_S 12
598
599 struct hifn_comp_result
600 {
601 volatile __le16 flags;
602 volatile __le16 crc;
603 };
604
605 #define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
606 #define HIFN_COMP_RES_LCB_S 8
607 #define HIFN_COMP_RES_RESTART 0x0004 /* MPPC: restart */
608 #define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */
609 #define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */
610
611 struct hifn_mac_result
612 {
613 volatile __le16 flags;
614 volatile __le16 reserved;
615 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
616 };
617
618 #define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */
619 #define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */
620
621 struct hifn_crypt_result
622 {
623 volatile __le16 flags;
624 volatile __le16 reserved;
625 };
626
627 #define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
628
629 #ifndef HIFN_POLL_FREQUENCY
630 #define HIFN_POLL_FREQUENCY 0x1
631 #endif
632
633 #ifndef HIFN_POLL_SCALAR
634 #define HIFN_POLL_SCALAR 0x0
635 #endif
636
637 #define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
638 #define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
639
640 struct hifn_crypto_alg
641 {
642 struct list_head entry;
643 struct crypto_alg alg;
644 struct hifn_device *dev;
645 };
646
647 #define ASYNC_SCATTERLIST_CACHE 16
648
649 #define ASYNC_FLAGS_MISALIGNED (1<<0)
650
651 struct ablkcipher_walk
652 {
653 struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
654 u32 flags;
655 int num;
656 };
657
658 struct hifn_context
659 {
660 u8 key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv;
661 struct hifn_device *dev;
662 unsigned int keysize, ivsize;
663 u8 op, type, mode, unused;
664 struct ablkcipher_walk walk;
665 atomic_t sg_num;
666 };
667
668 #define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
669
670 static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
671 {
672 u32 ret;
673
674 ret = readl(dev->bar[0] + reg);
675
676 return ret;
677 }
678
679 static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
680 {
681 u32 ret;
682
683 ret = readl(dev->bar[1] + reg);
684
685 return ret;
686 }
687
688 static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
689 {
690 writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
691 }
692
693 static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
694 {
695 writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
696 }
697
698 static void hifn_wait_puc(struct hifn_device *dev)
699 {
700 int i;
701 u32 ret;
702
703 for (i=10000; i > 0; --i) {
704 ret = hifn_read_0(dev, HIFN_0_PUCTRL);
705 if (!(ret & HIFN_PUCTRL_RESET))
706 break;
707
708 udelay(1);
709 }
710
711 if (!i)
712 dprintk("%s: Failed to reset PUC unit.\n", dev->name);
713 }
714
715 static void hifn_reset_puc(struct hifn_device *dev)
716 {
717 hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
718 hifn_wait_puc(dev);
719 }
720
721 static void hifn_stop_device(struct hifn_device *dev)
722 {
723 hifn_write_1(dev, HIFN_1_DMA_CSR,
724 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
725 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS);
726 hifn_write_0(dev, HIFN_0_PUIER, 0);
727 hifn_write_1(dev, HIFN_1_DMA_IER, 0);
728 }
729
730 static void hifn_reset_dma(struct hifn_device *dev, int full)
731 {
732 hifn_stop_device(dev);
733
734 /*
735 * Setting poll frequency and others to 0.
736 */
737 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
738 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
739 mdelay(1);
740
741 /*
742 * Reset DMA.
743 */
744 if (full) {
745 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
746 mdelay(1);
747 } else {
748 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE |
749 HIFN_DMACNFG_MSTRESET);
750 hifn_reset_puc(dev);
751 }
752
753 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
754 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
755
756 hifn_reset_puc(dev);
757 }
758
759 static u32 hifn_next_signature(u_int32_t a, u_int cnt)
760 {
761 int i;
762 u32 v;
763
764 for (i = 0; i < cnt; i++) {
765
766 /* get the parity */
767 v = a & 0x80080125;
768 v ^= v >> 16;
769 v ^= v >> 8;
770 v ^= v >> 4;
771 v ^= v >> 2;
772 v ^= v >> 1;
773
774 a = (v & 1) ^ (a << 1);
775 }
776
777 return a;
778 }
779
780 static struct pci2id {
781 u_short pci_vendor;
782 u_short pci_prod;
783 char card_id[13];
784 } pci2id[] = {
785 {
786 PCI_VENDOR_ID_HIFN,
787 PCI_DEVICE_ID_HIFN_7955,
788 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
789 0x00, 0x00, 0x00, 0x00, 0x00 }
790 },
791 {
792 PCI_VENDOR_ID_HIFN,
793 PCI_DEVICE_ID_HIFN_7956,
794 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
795 0x00, 0x00, 0x00, 0x00, 0x00 }
796 }
797 };
798
799 #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
800 static int hifn_rng_data_present(struct hwrng *rng, int wait)
801 {
802 struct hifn_device *dev = (struct hifn_device *)rng->priv;
803 s64 nsec;
804
805 nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime));
806 nsec -= dev->rng_wait_time;
807 if (nsec <= 0)
808 return 1;
809 if (!wait)
810 return 0;
811 ndelay(nsec);
812 return 1;
813 }
814
815 static int hifn_rng_data_read(struct hwrng *rng, u32 *data)
816 {
817 struct hifn_device *dev = (struct hifn_device *)rng->priv;
818
819 *data = hifn_read_1(dev, HIFN_1_RNG_DATA);
820 dev->rngtime = ktime_get();
821 return 4;
822 }
823
824 static int hifn_register_rng(struct hifn_device *dev)
825 {
826 /*
827 * We must wait at least 256 Pk_clk cycles between two reads of the rng.
828 */
829 dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) *
830 256;
831
832 dev->rng.name = dev->name;
833 dev->rng.data_present = hifn_rng_data_present,
834 dev->rng.data_read = hifn_rng_data_read,
835 dev->rng.priv = (unsigned long)dev;
836
837 return hwrng_register(&dev->rng);
838 }
839
840 static void hifn_unregister_rng(struct hifn_device *dev)
841 {
842 hwrng_unregister(&dev->rng);
843 }
844 #else
845 #define hifn_register_rng(dev) 0
846 #define hifn_unregister_rng(dev)
847 #endif
848
849 static int hifn_init_pubrng(struct hifn_device *dev)
850 {
851 int i;
852
853 hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
854 HIFN_PUBRST_RESET);
855
856 for (i=100; i > 0; --i) {
857 mdelay(1);
858
859 if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
860 break;
861 }
862
863 if (!i)
864 dprintk("Chip %s: Failed to initialise public key engine.\n",
865 dev->name);
866 else {
867 hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
868 dev->dmareg |= HIFN_DMAIER_PUBDONE;
869 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
870
871 dprintk("Chip %s: Public key engine has been sucessfully "
872 "initialised.\n", dev->name);
873 }
874
875 /*
876 * Enable RNG engine.
877 */
878
879 hifn_write_1(dev, HIFN_1_RNG_CONFIG,
880 hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
881 dprintk("Chip %s: RNG engine has been successfully initialised.\n",
882 dev->name);
883
884 #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
885 /* First value must be discarded */
886 hifn_read_1(dev, HIFN_1_RNG_DATA);
887 dev->rngtime = ktime_get();
888 #endif
889 return 0;
890 }
891
892 static int hifn_enable_crypto(struct hifn_device *dev)
893 {
894 u32 dmacfg, addr;
895 char *offtbl = NULL;
896 int i;
897
898 for (i = 0; i < ARRAY_SIZE(pci2id); i++) {
899 if (pci2id[i].pci_vendor == dev->pdev->vendor &&
900 pci2id[i].pci_prod == dev->pdev->device) {
901 offtbl = pci2id[i].card_id;
902 break;
903 }
904 }
905
906 if (offtbl == NULL) {
907 dprintk("Chip %s: Unknown card!\n", dev->name);
908 return -ENODEV;
909 }
910
911 dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG);
912
913 hifn_write_1(dev, HIFN_1_DMA_CNFG,
914 HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET |
915 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
916 mdelay(1);
917 addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1);
918 mdelay(1);
919 hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
920 mdelay(1);
921
922 for (i=0; i<12; ++i) {
923 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
924 hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
925
926 mdelay(1);
927 }
928 hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
929
930 dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev));
931
932 return 0;
933 }
934
935 static void hifn_init_dma(struct hifn_device *dev)
936 {
937 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
938 u32 dptr = dev->desc_dma;
939 int i;
940
941 for (i=0; i<HIFN_D_CMD_RSIZE; ++i)
942 dma->cmdr[i].p = __cpu_to_le32(dptr +
943 offsetof(struct hifn_dma, command_bufs[i][0]));
944 for (i=0; i<HIFN_D_RES_RSIZE; ++i)
945 dma->resr[i].p = __cpu_to_le32(dptr +
946 offsetof(struct hifn_dma, result_bufs[i][0]));
947
948 /*
949 * Setup LAST descriptors.
950 */
951 dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
952 offsetof(struct hifn_dma, cmdr[0]));
953 dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
954 offsetof(struct hifn_dma, srcr[0]));
955 dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
956 offsetof(struct hifn_dma, dstr[0]));
957 dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
958 offsetof(struct hifn_dma, resr[0]));
959
960 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
961 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
962 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
963 }
964
965 /*
966 * Initialize the PLL. We need to know the frequency of the reference clock
967 * to calculate the optimal multiplier. For PCI we assume 66MHz, since that
968 * allows us to operate without the risk of overclocking the chip. If it
969 * actually uses 33MHz, the chip will operate at half the speed, this can be
970 * overriden by specifying the frequency as module parameter (pci33).
971 *
972 * Unfortunately the PCI clock is not very suitable since the HIFN needs a
973 * stable clock and the PCI clock frequency may vary, so the default is the
974 * external clock. There is no way to find out its frequency, we default to
975 * 66MHz since according to Mike Ham of HiFn, almost every board in existence
976 * has an external crystal populated at 66MHz.
977 */
978 static void hifn_init_pll(struct hifn_device *dev)
979 {
980 unsigned int freq, m;
981 u32 pllcfg;
982
983 pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1;
984
985 if (strncmp(hifn_pll_ref, "ext", 3) == 0)
986 pllcfg |= HIFN_PLL_REF_CLK_PLL;
987 else
988 pllcfg |= HIFN_PLL_REF_CLK_HBI;
989
990 if (hifn_pll_ref[3] != '\0')
991 freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
992 else {
993 freq = 66;
994 printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, "
995 "override with hifn_pll_ref=%.3s<frequency>\n",
996 freq, hifn_pll_ref);
997 }
998
999 m = HIFN_PLL_FCK_MAX / freq;
1000
1001 pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT;
1002 if (m <= 8)
1003 pllcfg |= HIFN_PLL_IS_1_8;
1004 else
1005 pllcfg |= HIFN_PLL_IS_9_12;
1006
1007 /* Select clock source and enable clock bypass */
1008 hifn_write_1(dev, HIFN_1_PLL, pllcfg |
1009 HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP);
1010
1011 /* Let the chip lock to the input clock */
1012 mdelay(10);
1013
1014 /* Disable clock bypass */
1015 hifn_write_1(dev, HIFN_1_PLL, pllcfg |
1016 HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI);
1017
1018 /* Switch the engines to the PLL */
1019 hifn_write_1(dev, HIFN_1_PLL, pllcfg |
1020 HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL);
1021
1022 /*
1023 * The Fpk_clk runs at half the total speed. Its frequency is needed to
1024 * calculate the minimum time between two reads of the rng. Since 33MHz
1025 * is actually 33.333... we overestimate the frequency here, resulting
1026 * in slightly larger intervals.
1027 */
1028 dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2;
1029 }
1030
1031 static void hifn_init_registers(struct hifn_device *dev)
1032 {
1033 u32 dptr = dev->desc_dma;
1034
1035 /* Initialization magic... */
1036 hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1037 hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1038 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1039
1040 /* write all 4 ring address registers */
1041 hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr +
1042 offsetof(struct hifn_dma, cmdr[0]));
1043 hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr +
1044 offsetof(struct hifn_dma, srcr[0]));
1045 hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr +
1046 offsetof(struct hifn_dma, dstr[0]));
1047 hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr +
1048 offsetof(struct hifn_dma, resr[0]));
1049
1050 mdelay(2);
1051 #if 0
1052 hifn_write_1(dev, HIFN_1_DMA_CSR,
1053 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1054 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1055 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1056 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1057 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1058 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1059 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1060 HIFN_DMACSR_S_WAIT |
1061 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1062 HIFN_DMACSR_C_WAIT |
1063 HIFN_DMACSR_ENGINE |
1064 HIFN_DMACSR_PUBDONE);
1065 #else
1066 hifn_write_1(dev, HIFN_1_DMA_CSR,
1067 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1068 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA |
1069 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1070 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1071 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1072 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1073 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1074 HIFN_DMACSR_S_WAIT |
1075 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1076 HIFN_DMACSR_C_WAIT |
1077 HIFN_DMACSR_ENGINE |
1078 HIFN_DMACSR_PUBDONE);
1079 #endif
1080 hifn_read_1(dev, HIFN_1_DMA_CSR);
1081
1082 dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1083 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1084 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1085 HIFN_DMAIER_ENGINE;
1086 dev->dmareg &= ~HIFN_DMAIER_C_WAIT;
1087
1088 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
1089 hifn_read_1(dev, HIFN_1_DMA_IER);
1090 #if 0
1091 hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG |
1092 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1093 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1094 HIFN_PUCNFG_DRAM);
1095 #else
1096 hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342);
1097 #endif
1098 hifn_init_pll(dev);
1099
1100 hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1101 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1102 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1103 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1104 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1105 }
1106
1107 static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf,
1108 unsigned dlen, unsigned slen, u16 mask, u8 snum)
1109 {
1110 struct hifn_base_command *base_cmd;
1111 u8 *buf_pos = buf;
1112
1113 base_cmd = (struct hifn_base_command *)buf_pos;
1114 base_cmd->masks = __cpu_to_le16(mask);
1115 base_cmd->total_source_count =
1116 __cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO);
1117 base_cmd->total_dest_count =
1118 __cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1119
1120 dlen >>= 16;
1121 slen >>= 16;
1122 base_cmd->session_num = __cpu_to_le16(snum |
1123 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1124 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1125
1126 return sizeof(struct hifn_base_command);
1127 }
1128
1129 static int hifn_setup_crypto_command(struct hifn_device *dev,
1130 u8 *buf, unsigned dlen, unsigned slen,
1131 u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
1132 {
1133 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1134 struct hifn_crypt_command *cry_cmd;
1135 u8 *buf_pos = buf;
1136 u16 cmd_len;
1137
1138 cry_cmd = (struct hifn_crypt_command *)buf_pos;
1139
1140 cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff);
1141 dlen >>= 16;
1142 cry_cmd->masks = __cpu_to_le16(mode |
1143 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) &
1144 HIFN_CRYPT_CMD_SRCLEN_M));
1145 cry_cmd->header_skip = 0;
1146 cry_cmd->reserved = 0;
1147
1148 buf_pos += sizeof(struct hifn_crypt_command);
1149
1150 dma->cmdu++;
1151 if (dma->cmdu > 1) {
1152 dev->dmareg |= HIFN_DMAIER_C_WAIT;
1153 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
1154 }
1155
1156 if (keylen) {
1157 memcpy(buf_pos, key, keylen);
1158 buf_pos += keylen;
1159 }
1160 if (ivsize) {
1161 memcpy(buf_pos, iv, ivsize);
1162 buf_pos += ivsize;
1163 }
1164
1165 cmd_len = buf_pos - buf;
1166
1167 return cmd_len;
1168 }
1169
1170 static int hifn_setup_cmd_desc(struct hifn_device *dev,
1171 struct hifn_context *ctx, void *priv, unsigned int nbytes)
1172 {
1173 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1174 int cmd_len, sa_idx;
1175 u8 *buf, *buf_pos;
1176 u16 mask;
1177
1178 sa_idx = dma->cmdi;
1179 buf_pos = buf = dma->command_bufs[dma->cmdi];
1180
1181 mask = 0;
1182 switch (ctx->op) {
1183 case ACRYPTO_OP_DECRYPT:
1184 mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
1185 break;
1186 case ACRYPTO_OP_ENCRYPT:
1187 mask = HIFN_BASE_CMD_CRYPT;
1188 break;
1189 case ACRYPTO_OP_HMAC:
1190 mask = HIFN_BASE_CMD_MAC;
1191 break;
1192 default:
1193 goto err_out;
1194 }
1195
1196 buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
1197 nbytes, mask, dev->snum);
1198
1199 if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) {
1200 u16 md = 0;
1201
1202 if (ctx->keysize)
1203 md |= HIFN_CRYPT_CMD_NEW_KEY;
1204 if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB)
1205 md |= HIFN_CRYPT_CMD_NEW_IV;
1206
1207 switch (ctx->mode) {
1208 case ACRYPTO_MODE_ECB:
1209 md |= HIFN_CRYPT_CMD_MODE_ECB;
1210 break;
1211 case ACRYPTO_MODE_CBC:
1212 md |= HIFN_CRYPT_CMD_MODE_CBC;
1213 break;
1214 case ACRYPTO_MODE_CFB:
1215 md |= HIFN_CRYPT_CMD_MODE_CFB;
1216 break;
1217 case ACRYPTO_MODE_OFB:
1218 md |= HIFN_CRYPT_CMD_MODE_OFB;
1219 break;
1220 default:
1221 goto err_out;
1222 }
1223
1224 switch (ctx->type) {
1225 case ACRYPTO_TYPE_AES_128:
1226 if (ctx->keysize != 16)
1227 goto err_out;
1228 md |= HIFN_CRYPT_CMD_KSZ_128 |
1229 HIFN_CRYPT_CMD_ALG_AES;
1230 break;
1231 case ACRYPTO_TYPE_AES_192:
1232 if (ctx->keysize != 24)
1233 goto err_out;
1234 md |= HIFN_CRYPT_CMD_KSZ_192 |
1235 HIFN_CRYPT_CMD_ALG_AES;
1236 break;
1237 case ACRYPTO_TYPE_AES_256:
1238 if (ctx->keysize != 32)
1239 goto err_out;
1240 md |= HIFN_CRYPT_CMD_KSZ_256 |
1241 HIFN_CRYPT_CMD_ALG_AES;
1242 break;
1243 case ACRYPTO_TYPE_3DES:
1244 if (ctx->keysize != 24)
1245 goto err_out;
1246 md |= HIFN_CRYPT_CMD_ALG_3DES;
1247 break;
1248 case ACRYPTO_TYPE_DES:
1249 if (ctx->keysize != 8)
1250 goto err_out;
1251 md |= HIFN_CRYPT_CMD_ALG_DES;
1252 break;
1253 default:
1254 goto err_out;
1255 }
1256
1257 buf_pos += hifn_setup_crypto_command(dev, buf_pos,
1258 nbytes, nbytes, ctx->key, ctx->keysize,
1259 ctx->iv, ctx->ivsize, md);
1260 }
1261
1262 dev->sa[sa_idx] = priv;
1263
1264 cmd_len = buf_pos - buf;
1265 dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
1266 HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
1267
1268 if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
1269 dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND |
1270 HIFN_D_VALID | HIFN_D_LAST |
1271 HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
1272 dma->cmdi = 0;
1273 } else
1274 dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID);
1275
1276 if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
1277 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1278 dev->flags |= HIFN_FLAG_CMD_BUSY;
1279 }
1280 return 0;
1281
1282 err_out:
1283 return -EINVAL;
1284 }
1285
1286 static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
1287 unsigned int offset, unsigned int size)
1288 {
1289 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1290 int idx;
1291 dma_addr_t addr;
1292
1293 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1294
1295 idx = dma->srci;
1296
1297 dma->srcr[idx].p = __cpu_to_le32(addr);
1298 dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1299 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1300
1301 if (++idx == HIFN_D_SRC_RSIZE) {
1302 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1303 HIFN_D_JUMP |
1304 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1305 idx = 0;
1306 }
1307
1308 dma->srci = idx;
1309 dma->srcu++;
1310
1311 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1312 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1313 dev->flags |= HIFN_FLAG_SRC_BUSY;
1314 }
1315
1316 return size;
1317 }
1318
1319 static void hifn_setup_res_desc(struct hifn_device *dev)
1320 {
1321 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1322
1323 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1324 HIFN_D_VALID | HIFN_D_LAST);
1325 /*
1326 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1327 * HIFN_D_LAST);
1328 */
1329
1330 if (++dma->resi == HIFN_D_RES_RSIZE) {
1331 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1332 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1333 dma->resi = 0;
1334 }
1335
1336 dma->resu++;
1337
1338 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1339 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1340 dev->flags |= HIFN_FLAG_RES_BUSY;
1341 }
1342 }
1343
1344 static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1345 unsigned offset, unsigned size)
1346 {
1347 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1348 int idx;
1349 dma_addr_t addr;
1350
1351 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1352
1353 idx = dma->dsti;
1354 dma->dstr[idx].p = __cpu_to_le32(addr);
1355 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1356 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1357
1358 if (++idx == HIFN_D_DST_RSIZE) {
1359 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1360 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1361 HIFN_D_LAST);
1362 idx = 0;
1363 }
1364 dma->dsti = idx;
1365 dma->dstu++;
1366
1367 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1368 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1369 dev->flags |= HIFN_FLAG_DST_BUSY;
1370 }
1371 }
1372
1373 static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
1374 struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
1375 struct hifn_context *ctx)
1376 {
1377 dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
1378 dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
1379
1380 hifn_setup_src_desc(dev, spage, soff, nbytes);
1381 hifn_setup_cmd_desc(dev, ctx, priv, nbytes);
1382 hifn_setup_dst_desc(dev, dpage, doff, nbytes);
1383 hifn_setup_res_desc(dev);
1384 return 0;
1385 }
1386
1387 static int ablkcipher_walk_init(struct ablkcipher_walk *w,
1388 int num, gfp_t gfp_flags)
1389 {
1390 int i;
1391
1392 num = min(ASYNC_SCATTERLIST_CACHE, num);
1393 sg_init_table(w->cache, num);
1394
1395 w->num = 0;
1396 for (i=0; i<num; ++i) {
1397 struct page *page = alloc_page(gfp_flags);
1398 struct scatterlist *s;
1399
1400 if (!page)
1401 break;
1402
1403 s = &w->cache[i];
1404
1405 sg_set_page(s, page, PAGE_SIZE, 0);
1406 w->num++;
1407 }
1408
1409 return i;
1410 }
1411
1412 static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
1413 {
1414 int i;
1415
1416 for (i=0; i<w->num; ++i) {
1417 struct scatterlist *s = &w->cache[i];
1418
1419 __free_page(sg_page(s));
1420
1421 s->length = 0;
1422 }
1423
1424 w->num = 0;
1425 }
1426
1427 static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src,
1428 unsigned int size, unsigned int *nbytesp)
1429 {
1430 unsigned int copy, drest = *drestp, nbytes = *nbytesp;
1431 int idx = 0;
1432 void *saddr;
1433
1434 if (drest < size || size > nbytes)
1435 return -EINVAL;
1436
1437 while (size) {
1438 copy = min(drest, min(size, src->length));
1439
1440 saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
1441 memcpy(daddr, saddr + src->offset, copy);
1442 kunmap_atomic(saddr, KM_SOFTIRQ1);
1443
1444 size -= copy;
1445 drest -= copy;
1446 nbytes -= copy;
1447 daddr += copy;
1448
1449 dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
1450 __func__, copy, size, drest, nbytes);
1451
1452 src++;
1453 idx++;
1454 }
1455
1456 *nbytesp = nbytes;
1457 *drestp = drest;
1458
1459 return idx;
1460 }
1461
1462 static int ablkcipher_walk(struct ablkcipher_request *req,
1463 struct ablkcipher_walk *w)
1464 {
1465 struct scatterlist *src, *dst, *t;
1466 void *daddr;
1467 unsigned int nbytes = req->nbytes, offset, copy, diff;
1468 int idx, tidx, err;
1469
1470 tidx = idx = 0;
1471 offset = 0;
1472 while (nbytes) {
1473 if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
1474 return -EINVAL;
1475
1476 src = &req->src[idx];
1477 dst = &req->dst[idx];
1478
1479 dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
1480 "nbytes: %u.\n",
1481 __func__, src->length, dst->length, src->offset,
1482 dst->offset, offset, nbytes);
1483
1484 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1485 !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
1486 offset) {
1487 unsigned slen = min(src->length - offset, nbytes);
1488 unsigned dlen = PAGE_SIZE;
1489
1490 t = &w->cache[idx];
1491
1492 daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
1493 err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes);
1494 if (err < 0)
1495 goto err_out_unmap;
1496
1497 idx += err;
1498
1499 copy = slen & ~(HIFN_D_DST_DALIGN - 1);
1500 diff = slen & (HIFN_D_DST_DALIGN - 1);
1501
1502 if (dlen < nbytes) {
1503 /*
1504 * Destination page does not have enough space
1505 * to put there additional blocksized chunk,
1506 * so we mark that page as containing only
1507 * blocksize aligned chunks:
1508 * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
1509 * and increase number of bytes to be processed
1510 * in next chunk:
1511 * nbytes += diff;
1512 */
1513 nbytes += diff;
1514
1515 /*
1516 * Temporary of course...
1517 * Kick author if you will catch this one.
1518 */
1519 printk(KERN_ERR "%s: dlen: %u, nbytes: %u,"
1520 "slen: %u, offset: %u.\n",
1521 __func__, dlen, nbytes, slen, offset);
1522 printk(KERN_ERR "%s: please contact author to fix this "
1523 "issue, generally you should not catch "
1524 "this path under any condition but who "
1525 "knows how did you use crypto code.\n"
1526 "Thank you.\n", __func__);
1527 BUG();
1528 } else {
1529 copy += diff + nbytes;
1530
1531 src = &req->src[idx];
1532
1533 err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes);
1534 if (err < 0)
1535 goto err_out_unmap;
1536
1537 idx += err;
1538 }
1539
1540 t->length = copy;
1541 t->offset = offset;
1542
1543 kunmap_atomic(daddr, KM_SOFTIRQ0);
1544 } else {
1545 nbytes -= min(src->length, nbytes);
1546 idx++;
1547 }
1548
1549 tidx++;
1550 }
1551
1552 return tidx;
1553
1554 err_out_unmap:
1555 kunmap_atomic(daddr, KM_SOFTIRQ0);
1556 return err;
1557 }
1558
1559 static int hifn_setup_session(struct ablkcipher_request *req)
1560 {
1561 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1562 struct hifn_device *dev = ctx->dev;
1563 struct page *spage, *dpage;
1564 unsigned long soff, doff, dlen, flags;
1565 unsigned int nbytes = req->nbytes, idx = 0, len;
1566 int err = -EINVAL, sg_num;
1567 struct scatterlist *src, *dst, *t;
1568
1569 if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
1570 goto err_out_exit;
1571
1572 ctx->walk.flags = 0;
1573
1574 while (nbytes) {
1575 dst = &req->dst[idx];
1576 dlen = min(dst->length, nbytes);
1577
1578 if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
1579 !IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
1580 ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
1581
1582 nbytes -= dlen;
1583 idx++;
1584 }
1585
1586 if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1587 err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC);
1588 if (err < 0)
1589 return err;
1590 }
1591
1592 nbytes = req->nbytes;
1593 idx = 0;
1594
1595 sg_num = ablkcipher_walk(req, &ctx->walk);
1596 if (sg_num < 0) {
1597 err = sg_num;
1598 goto err_out_exit;
1599 }
1600 atomic_set(&ctx->sg_num, sg_num);
1601
1602 spin_lock_irqsave(&dev->lock, flags);
1603 if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
1604 err = -EAGAIN;
1605 goto err_out;
1606 }
1607
1608 dev->snum++;
1609 dev->started += sg_num;
1610
1611 while (nbytes) {
1612 src = &req->src[idx];
1613 dst = &req->dst[idx];
1614 t = &ctx->walk.cache[idx];
1615
1616 if (t->length) {
1617 spage = dpage = sg_page(t);
1618 soff = doff = 0;
1619 len = t->length;
1620 } else {
1621 spage = sg_page(src);
1622 soff = src->offset;
1623
1624 dpage = sg_page(dst);
1625 doff = dst->offset;
1626
1627 len = dst->length;
1628 }
1629
1630 idx++;
1631
1632 err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes,
1633 req, ctx);
1634 if (err)
1635 goto err_out;
1636
1637 nbytes -= min(len, nbytes);
1638 }
1639
1640 dev->active = HIFN_DEFAULT_ACTIVE_NUM;
1641 spin_unlock_irqrestore(&dev->lock, flags);
1642
1643 return 0;
1644
1645 err_out:
1646 spin_unlock_irqrestore(&dev->lock, flags);
1647 err_out_exit:
1648 if (err)
1649 dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
1650 "type: %u, err: %d.\n",
1651 dev->name, ctx->iv, ctx->ivsize,
1652 ctx->key, ctx->keysize,
1653 ctx->mode, ctx->op, ctx->type, err);
1654
1655 return err;
1656 }
1657
1658 static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
1659 {
1660 int n, err;
1661 u8 src[16];
1662 struct hifn_context ctx;
1663 u8 fips_aes_ecb_from_zero[16] = {
1664 0x66, 0xE9, 0x4B, 0xD4,
1665 0xEF, 0x8A, 0x2C, 0x3B,
1666 0x88, 0x4C, 0xFA, 0x59,
1667 0xCA, 0x34, 0x2B, 0x2E};
1668
1669 memset(src, 0, sizeof(src));
1670 memset(ctx.key, 0, sizeof(ctx.key));
1671
1672 ctx.dev = dev;
1673 ctx.keysize = 16;
1674 ctx.ivsize = 0;
1675 ctx.iv = NULL;
1676 ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
1677 ctx.mode = ACRYPTO_MODE_ECB;
1678 ctx.type = ACRYPTO_TYPE_AES_128;
1679 atomic_set(&ctx.sg_num, 1);
1680
1681 err = hifn_setup_dma(dev,
1682 virt_to_page(src), offset_in_page(src),
1683 virt_to_page(src), offset_in_page(src),
1684 sizeof(src), NULL, &ctx);
1685 if (err)
1686 goto err_out;
1687
1688 msleep(200);
1689
1690 dprintk("%s: decoded: ", dev->name);
1691 for (n=0; n<sizeof(src); ++n)
1692 dprintk("%02x ", src[n]);
1693 dprintk("\n");
1694 dprintk("%s: FIPS : ", dev->name);
1695 for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n)
1696 dprintk("%02x ", fips_aes_ecb_from_zero[n]);
1697 dprintk("\n");
1698
1699 if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) {
1700 printk(KERN_INFO "%s: AES 128 ECB test has been successfully "
1701 "passed.\n", dev->name);
1702 return 0;
1703 }
1704
1705 err_out:
1706 printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name);
1707 return -1;
1708 }
1709
1710 static int hifn_start_device(struct hifn_device *dev)
1711 {
1712 int err;
1713
1714 hifn_reset_dma(dev, 1);
1715
1716 err = hifn_enable_crypto(dev);
1717 if (err)
1718 return err;
1719
1720 hifn_reset_puc(dev);
1721
1722 hifn_init_dma(dev);
1723
1724 hifn_init_registers(dev);
1725
1726 hifn_init_pubrng(dev);
1727
1728 return 0;
1729 }
1730
1731 static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
1732 struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
1733 {
1734 unsigned int srest = *srestp, nbytes = *nbytesp, copy;
1735 void *daddr;
1736 int idx = 0;
1737
1738 if (srest < size || size > nbytes)
1739 return -EINVAL;
1740
1741 while (size) {
1742 copy = min(srest, min(dst->length, size));
1743
1744 daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
1745 memcpy(daddr + dst->offset + offset, saddr, copy);
1746 kunmap_atomic(daddr, KM_IRQ0);
1747
1748 nbytes -= copy;
1749 size -= copy;
1750 srest -= copy;
1751 saddr += copy;
1752 offset = 0;
1753
1754 dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
1755 __func__, copy, size, srest, nbytes);
1756
1757 dst++;
1758 idx++;
1759 }
1760
1761 *nbytesp = nbytes;
1762 *srestp = srest;
1763
1764 return idx;
1765 }
1766
1767 static void hifn_process_ready(struct ablkcipher_request *req, int error)
1768 {
1769 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1770 struct hifn_device *dev;
1771
1772 dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx);
1773
1774 dev = ctx->dev;
1775 dprintk("%s: req: %p, started: %d, sg_num: %d.\n",
1776 __func__, req, dev->started, atomic_read(&ctx->sg_num));
1777
1778 if (--dev->started < 0)
1779 BUG();
1780
1781 if (atomic_dec_and_test(&ctx->sg_num)) {
1782 unsigned int nbytes = req->nbytes;
1783 int idx = 0, err;
1784 struct scatterlist *dst, *t;
1785 void *saddr;
1786
1787 if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1788 while (nbytes) {
1789 t = &ctx->walk.cache[idx];
1790 dst = &req->dst[idx];
1791
1792 dprintk("\n%s: sg_page(t): %p, t->length: %u, "
1793 "sg_page(dst): %p, dst->length: %u, "
1794 "nbytes: %u.\n",
1795 __func__, sg_page(t), t->length,
1796 sg_page(dst), dst->length, nbytes);
1797
1798 if (!t->length) {
1799 nbytes -= min(dst->length, nbytes);
1800 idx++;
1801 continue;
1802 }
1803
1804 saddr = kmap_atomic(sg_page(t), KM_IRQ1);
1805
1806 err = ablkcipher_get(saddr, &t->length, t->offset,
1807 dst, nbytes, &nbytes);
1808 if (err < 0) {
1809 kunmap_atomic(saddr, KM_IRQ1);
1810 break;
1811 }
1812
1813 idx += err;
1814 kunmap_atomic(saddr, KM_IRQ1);
1815 }
1816
1817 ablkcipher_walk_exit(&ctx->walk);
1818 }
1819
1820 req->base.complete(&req->base, error);
1821 }
1822 }
1823
1824 static void hifn_check_for_completion(struct hifn_device *dev, int error)
1825 {
1826 int i;
1827 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1828
1829 for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
1830 struct hifn_desc *d = &dma->resr[i];
1831
1832 if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) {
1833 dev->success++;
1834 dev->reset = 0;
1835 hifn_process_ready(dev->sa[i], error);
1836 dev->sa[i] = NULL;
1837 }
1838
1839 if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER))
1840 if (printk_ratelimit())
1841 printk("%s: overflow detected [d: %u, o: %u] "
1842 "at %d resr: l: %08x, p: %08x.\n",
1843 dev->name,
1844 !!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)),
1845 !!(d->l & __cpu_to_le32(HIFN_D_OVER)),
1846 i, d->l, d->p);
1847 }
1848 }
1849
1850 static void hifn_clear_rings(struct hifn_device *dev)
1851 {
1852 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1853 int i, u;
1854
1855 dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
1856 "k: %d.%d.%d.%d.\n",
1857 dev->name,
1858 dma->cmdi, dma->srci, dma->dsti, dma->resi,
1859 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1860 dma->cmdk, dma->srck, dma->dstk, dma->resk);
1861
1862 i = dma->resk; u = dma->resu;
1863 while (u != 0) {
1864 if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
1865 break;
1866
1867 if (i != HIFN_D_RES_RSIZE)
1868 u--;
1869
1870 if (++i == (HIFN_D_RES_RSIZE + 1))
1871 i = 0;
1872 }
1873 dma->resk = i; dma->resu = u;
1874
1875 i = dma->srck; u = dma->srcu;
1876 while (u != 0) {
1877 if (i == HIFN_D_SRC_RSIZE)
1878 i = 0;
1879 if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
1880 break;
1881 i++, u--;
1882 }
1883 dma->srck = i; dma->srcu = u;
1884
1885 i = dma->cmdk; u = dma->cmdu;
1886 while (u != 0) {
1887 if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
1888 break;
1889 if (i != HIFN_D_CMD_RSIZE)
1890 u--;
1891 if (++i == (HIFN_D_CMD_RSIZE + 1))
1892 i = 0;
1893 }
1894 dma->cmdk = i; dma->cmdu = u;
1895
1896 i = dma->dstk; u = dma->dstu;
1897 while (u != 0) {
1898 if (i == HIFN_D_DST_RSIZE)
1899 i = 0;
1900 if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
1901 break;
1902 i++, u--;
1903 }
1904 dma->dstk = i; dma->dstu = u;
1905
1906 dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
1907 "k: %d.%d.%d.%d.\n",
1908 dev->name,
1909 dma->cmdi, dma->srci, dma->dsti, dma->resi,
1910 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1911 dma->cmdk, dma->srck, dma->dstk, dma->resk);
1912 }
1913
1914 static void hifn_work(struct work_struct *work)
1915 {
1916 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1917 struct hifn_device *dev = container_of(dw, struct hifn_device, work);
1918 unsigned long flags;
1919 int reset = 0;
1920 u32 r = 0;
1921
1922 spin_lock_irqsave(&dev->lock, flags);
1923 if (dev->active == 0) {
1924 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1925
1926 if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
1927 dev->flags &= ~HIFN_FLAG_CMD_BUSY;
1928 r |= HIFN_DMACSR_C_CTRL_DIS;
1929 }
1930 if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) {
1931 dev->flags &= ~HIFN_FLAG_SRC_BUSY;
1932 r |= HIFN_DMACSR_S_CTRL_DIS;
1933 }
1934 if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) {
1935 dev->flags &= ~HIFN_FLAG_DST_BUSY;
1936 r |= HIFN_DMACSR_D_CTRL_DIS;
1937 }
1938 if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) {
1939 dev->flags &= ~HIFN_FLAG_RES_BUSY;
1940 r |= HIFN_DMACSR_R_CTRL_DIS;
1941 }
1942 if (r)
1943 hifn_write_1(dev, HIFN_1_DMA_CSR, r);
1944 } else
1945 dev->active--;
1946
1947 if (dev->prev_success == dev->success && dev->started)
1948 reset = 1;
1949 dev->prev_success = dev->success;
1950 spin_unlock_irqrestore(&dev->lock, flags);
1951
1952 if (reset) {
1953 dprintk("%s: r: %08x, active: %d, started: %d, "
1954 "success: %lu: reset: %d.\n",
1955 dev->name, r, dev->active, dev->started,
1956 dev->success, reset);
1957
1958 if (++dev->reset >= 5) {
1959 dprintk("%s: really hard reset.\n", dev->name);
1960 hifn_reset_dma(dev, 1);
1961 hifn_stop_device(dev);
1962 hifn_start_device(dev);
1963 dev->reset = 0;
1964 }
1965
1966 spin_lock_irqsave(&dev->lock, flags);
1967 hifn_check_for_completion(dev, -EBUSY);
1968 hifn_clear_rings(dev);
1969 dev->started = 0;
1970 spin_unlock_irqrestore(&dev->lock, flags);
1971 }
1972
1973 schedule_delayed_work(&dev->work, HZ);
1974 }
1975
1976 static irqreturn_t hifn_interrupt(int irq, void *data)
1977 {
1978 struct hifn_device *dev = (struct hifn_device *)data;
1979 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1980 u32 dmacsr, restart;
1981
1982 dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
1983
1984 dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
1985 "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
1986 dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
1987 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1988 dma->cmdi, dma->srci, dma->dsti, dma->resi);
1989
1990 if ((dmacsr & dev->dmareg) == 0)
1991 return IRQ_NONE;
1992
1993 hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg);
1994
1995 if (dmacsr & HIFN_DMACSR_ENGINE)
1996 hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR));
1997 if (dmacsr & HIFN_DMACSR_PUBDONE)
1998 hifn_write_1(dev, HIFN_1_PUB_STATUS,
1999 hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2000
2001 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
2002 if (restart) {
2003 u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
2004
2005 if (printk_ratelimit())
2006 printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
2007 dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
2008 !!(dmacsr & HIFN_DMACSR_D_OVER),
2009 puisr, !!(puisr & HIFN_PUISR_DSTOVER));
2010 if (!!(puisr & HIFN_PUISR_DSTOVER))
2011 hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
2012 hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
2013 HIFN_DMACSR_D_OVER));
2014 }
2015
2016 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2017 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2018 if (restart) {
2019 if (printk_ratelimit())
2020 printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
2021 dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
2022 !!(dmacsr & HIFN_DMACSR_S_ABORT),
2023 !!(dmacsr & HIFN_DMACSR_D_ABORT),
2024 !!(dmacsr & HIFN_DMACSR_R_ABORT));
2025 hifn_reset_dma(dev, 1);
2026 hifn_init_dma(dev);
2027 hifn_init_registers(dev);
2028 }
2029
2030 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2031 dprintk("%s: wait on command.\n", dev->name);
2032 dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
2033 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
2034 }
2035
2036 tasklet_schedule(&dev->tasklet);
2037 hifn_clear_rings(dev);
2038
2039 return IRQ_HANDLED;
2040 }
2041
2042 static void hifn_flush(struct hifn_device *dev)
2043 {
2044 unsigned long flags;
2045 struct crypto_async_request *async_req;
2046 struct hifn_context *ctx;
2047 struct ablkcipher_request *req;
2048 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
2049 int i;
2050
2051 spin_lock_irqsave(&dev->lock, flags);
2052 for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
2053 struct hifn_desc *d = &dma->resr[i];
2054
2055 if (dev->sa[i]) {
2056 hifn_process_ready(dev->sa[i],
2057 (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
2058 }
2059 }
2060
2061 while ((async_req = crypto_dequeue_request(&dev->queue))) {
2062 ctx = crypto_tfm_ctx(async_req->tfm);
2063 req = container_of(async_req, struct ablkcipher_request, base);
2064
2065 hifn_process_ready(req, -ENODEV);
2066 }
2067 spin_unlock_irqrestore(&dev->lock, flags);
2068 }
2069
2070 static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2071 unsigned int len)
2072 {
2073 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
2074 struct hifn_context *ctx = crypto_tfm_ctx(tfm);
2075 struct hifn_device *dev = ctx->dev;
2076
2077 if (len > HIFN_MAX_CRYPT_KEY_LENGTH) {
2078 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2079 return -1;
2080 }
2081
2082 if (len == HIFN_DES_KEY_LENGTH) {
2083 u32 tmp[DES_EXPKEY_WORDS];
2084 int ret = des_ekey(tmp, key);
2085
2086 if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
2087 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
2088 return -EINVAL;
2089 }
2090 }
2091
2092 dev->flags &= ~HIFN_FLAG_OLD_KEY;
2093
2094 memcpy(ctx->key, key, len);
2095 ctx->keysize = len;
2096
2097 return 0;
2098 }
2099
2100 static int hifn_handle_req(struct ablkcipher_request *req)
2101 {
2102 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2103 struct hifn_device *dev = ctx->dev;
2104 int err = -EAGAIN;
2105
2106 if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
2107 err = hifn_setup_session(req);
2108
2109 if (err == -EAGAIN) {
2110 unsigned long flags;
2111
2112 spin_lock_irqsave(&dev->lock, flags);
2113 err = ablkcipher_enqueue_request(&dev->queue, req);
2114 spin_unlock_irqrestore(&dev->lock, flags);
2115 }
2116
2117 return err;
2118 }
2119
2120 static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
2121 u8 type, u8 mode)
2122 {
2123 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2124 unsigned ivsize;
2125
2126 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
2127
2128 if (req->info && mode != ACRYPTO_MODE_ECB) {
2129 if (type == ACRYPTO_TYPE_AES_128)
2130 ivsize = HIFN_AES_IV_LENGTH;
2131 else if (type == ACRYPTO_TYPE_DES)
2132 ivsize = HIFN_DES_KEY_LENGTH;
2133 else if (type == ACRYPTO_TYPE_3DES)
2134 ivsize = HIFN_3DES_KEY_LENGTH;
2135 }
2136
2137 if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) {
2138 if (ctx->keysize == 24)
2139 type = ACRYPTO_TYPE_AES_192;
2140 else if (ctx->keysize == 32)
2141 type = ACRYPTO_TYPE_AES_256;
2142 }
2143
2144 ctx->op = op;
2145 ctx->mode = mode;
2146 ctx->type = type;
2147 ctx->iv = req->info;
2148 ctx->ivsize = ivsize;
2149
2150 /*
2151 * HEAVY TODO: needs to kick Herbert XU to write documentation.
2152 * HEAVY TODO: needs to kick Herbert XU to write documentation.
2153 * HEAVY TODO: needs to kick Herbert XU to write documentation.
2154 */
2155
2156 return hifn_handle_req(req);
2157 }
2158
2159 static int hifn_process_queue(struct hifn_device *dev)
2160 {
2161 struct crypto_async_request *async_req;
2162 struct hifn_context *ctx;
2163 struct ablkcipher_request *req;
2164 unsigned long flags;
2165 int err = 0;
2166
2167 while (dev->started < HIFN_QUEUE_LENGTH) {
2168 spin_lock_irqsave(&dev->lock, flags);
2169 async_req = crypto_dequeue_request(&dev->queue);
2170 spin_unlock_irqrestore(&dev->lock, flags);
2171
2172 if (!async_req)
2173 break;
2174
2175 ctx = crypto_tfm_ctx(async_req->tfm);
2176 req = container_of(async_req, struct ablkcipher_request, base);
2177
2178 err = hifn_handle_req(req);
2179 if (err)
2180 break;
2181 }
2182
2183 return err;
2184 }
2185
2186 static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
2187 u8 type, u8 mode)
2188 {
2189 int err;
2190 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2191 struct hifn_device *dev = ctx->dev;
2192
2193 err = hifn_setup_crypto_req(req, op, type, mode);
2194 if (err)
2195 return err;
2196
2197 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
2198 hifn_process_queue(dev);
2199
2200 return -EINPROGRESS;
2201 }
2202
2203 /*
2204 * AES ecryption functions.
2205 */
2206 static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
2207 {
2208 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2209 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
2210 }
2211 static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
2212 {
2213 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2214 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
2215 }
2216 static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
2217 {
2218 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2219 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
2220 }
2221 static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
2222 {
2223 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2224 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
2225 }
2226
2227 /*
2228 * AES decryption functions.
2229 */
2230 static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
2231 {
2232 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2233 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
2234 }
2235 static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
2236 {
2237 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2238 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
2239 }
2240 static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
2241 {
2242 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2243 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
2244 }
2245 static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
2246 {
2247 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2248 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
2249 }
2250
2251 /*
2252 * DES ecryption functions.
2253 */
2254 static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
2255 {
2256 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2257 ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
2258 }
2259 static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
2260 {
2261 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2262 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
2263 }
2264 static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
2265 {
2266 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2267 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
2268 }
2269 static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
2270 {
2271 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2272 ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
2273 }
2274
2275 /*
2276 * DES decryption functions.
2277 */
2278 static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
2279 {
2280 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2281 ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
2282 }
2283 static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
2284 {
2285 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2286 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
2287 }
2288 static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
2289 {
2290 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2291 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
2292 }
2293 static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
2294 {
2295 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2296 ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
2297 }
2298
2299 /*
2300 * 3DES ecryption functions.
2301 */
2302 static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
2303 {
2304 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2305 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
2306 }
2307 static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
2308 {
2309 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2310 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
2311 }
2312 static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
2313 {
2314 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2315 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
2316 }
2317 static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
2318 {
2319 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2320 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
2321 }
2322
2323 /*
2324 * 3DES decryption functions.
2325 */
2326 static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
2327 {
2328 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2329 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
2330 }
2331 static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
2332 {
2333 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2334 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
2335 }
2336 static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
2337 {
2338 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2339 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
2340 }
2341 static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
2342 {
2343 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2344 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
2345 }
2346
2347 struct hifn_alg_template
2348 {
2349 char name[CRYPTO_MAX_ALG_NAME];
2350 char drv_name[CRYPTO_MAX_ALG_NAME];
2351 unsigned int bsize;
2352 struct ablkcipher_alg ablkcipher;
2353 };
2354
2355 static struct hifn_alg_template hifn_alg_templates[] = {
2356 /*
2357 * 3DES ECB, CBC, CFB and OFB modes.
2358 */
2359 {
2360 .name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
2361 .ablkcipher = {
2362 .min_keysize = HIFN_3DES_KEY_LENGTH,
2363 .max_keysize = HIFN_3DES_KEY_LENGTH,
2364 .setkey = hifn_setkey,
2365 .encrypt = hifn_encrypt_3des_cfb,
2366 .decrypt = hifn_decrypt_3des_cfb,
2367 },
2368 },
2369 {
2370 .name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
2371 .ablkcipher = {
2372 .min_keysize = HIFN_3DES_KEY_LENGTH,
2373 .max_keysize = HIFN_3DES_KEY_LENGTH,
2374 .setkey = hifn_setkey,
2375 .encrypt = hifn_encrypt_3des_ofb,
2376 .decrypt = hifn_decrypt_3des_ofb,
2377 },
2378 },
2379 {
2380 .name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
2381 .ablkcipher = {
2382 .ivsize = HIFN_IV_LENGTH,
2383 .min_keysize = HIFN_3DES_KEY_LENGTH,
2384 .max_keysize = HIFN_3DES_KEY_LENGTH,
2385 .setkey = hifn_setkey,
2386 .encrypt = hifn_encrypt_3des_cbc,
2387 .decrypt = hifn_decrypt_3des_cbc,
2388 },
2389 },
2390 {
2391 .name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
2392 .ablkcipher = {
2393 .min_keysize = HIFN_3DES_KEY_LENGTH,
2394 .max_keysize = HIFN_3DES_KEY_LENGTH,
2395 .setkey = hifn_setkey,
2396 .encrypt = hifn_encrypt_3des_ecb,
2397 .decrypt = hifn_decrypt_3des_ecb,
2398 },
2399 },
2400
2401 /*
2402 * DES ECB, CBC, CFB and OFB modes.
2403 */
2404 {
2405 .name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
2406 .ablkcipher = {
2407 .min_keysize = HIFN_DES_KEY_LENGTH,
2408 .max_keysize = HIFN_DES_KEY_LENGTH,
2409 .setkey = hifn_setkey,
2410 .encrypt = hifn_encrypt_des_cfb,
2411 .decrypt = hifn_decrypt_des_cfb,
2412 },
2413 },
2414 {
2415 .name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
2416 .ablkcipher = {
2417 .min_keysize = HIFN_DES_KEY_LENGTH,
2418 .max_keysize = HIFN_DES_KEY_LENGTH,
2419 .setkey = hifn_setkey,
2420 .encrypt = hifn_encrypt_des_ofb,
2421 .decrypt = hifn_decrypt_des_ofb,
2422 },
2423 },
2424 {
2425 .name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
2426 .ablkcipher = {
2427 .ivsize = HIFN_IV_LENGTH,
2428 .min_keysize = HIFN_DES_KEY_LENGTH,
2429 .max_keysize = HIFN_DES_KEY_LENGTH,
2430 .setkey = hifn_setkey,
2431 .encrypt = hifn_encrypt_des_cbc,
2432 .decrypt = hifn_decrypt_des_cbc,
2433 },
2434 },
2435 {
2436 .name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
2437 .ablkcipher = {
2438 .min_keysize = HIFN_DES_KEY_LENGTH,
2439 .max_keysize = HIFN_DES_KEY_LENGTH,
2440 .setkey = hifn_setkey,
2441 .encrypt = hifn_encrypt_des_ecb,
2442 .decrypt = hifn_decrypt_des_ecb,
2443 },
2444 },
2445
2446 /*
2447 * AES ECB, CBC, CFB and OFB modes.
2448 */
2449 {
2450 .name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
2451 .ablkcipher = {
2452 .min_keysize = AES_MIN_KEY_SIZE,
2453 .max_keysize = AES_MAX_KEY_SIZE,
2454 .setkey = hifn_setkey,
2455 .encrypt = hifn_encrypt_aes_ecb,
2456 .decrypt = hifn_decrypt_aes_ecb,
2457 },
2458 },
2459 {
2460 .name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
2461 .ablkcipher = {
2462 .ivsize = HIFN_AES_IV_LENGTH,
2463 .min_keysize = AES_MIN_KEY_SIZE,
2464 .max_keysize = AES_MAX_KEY_SIZE,
2465 .setkey = hifn_setkey,
2466 .encrypt = hifn_encrypt_aes_cbc,
2467 .decrypt = hifn_decrypt_aes_cbc,
2468 },
2469 },
2470 {
2471 .name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
2472 .ablkcipher = {
2473 .min_keysize = AES_MIN_KEY_SIZE,
2474 .max_keysize = AES_MAX_KEY_SIZE,
2475 .setkey = hifn_setkey,
2476 .encrypt = hifn_encrypt_aes_cfb,
2477 .decrypt = hifn_decrypt_aes_cfb,
2478 },
2479 },
2480 {
2481 .name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
2482 .ablkcipher = {
2483 .min_keysize = AES_MIN_KEY_SIZE,
2484 .max_keysize = AES_MAX_KEY_SIZE,
2485 .setkey = hifn_setkey,
2486 .encrypt = hifn_encrypt_aes_ofb,
2487 .decrypt = hifn_decrypt_aes_ofb,
2488 },
2489 },
2490 };
2491
2492 static int hifn_cra_init(struct crypto_tfm *tfm)
2493 {
2494 struct crypto_alg *alg = tfm->__crt_alg;
2495 struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
2496 struct hifn_context *ctx = crypto_tfm_ctx(tfm);
2497
2498 ctx->dev = ha->dev;
2499
2500 return 0;
2501 }
2502
2503 static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
2504 {
2505 struct hifn_crypto_alg *alg;
2506 int err;
2507
2508 alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL);
2509 if (!alg)
2510 return -ENOMEM;
2511
2512 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
2513 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
2514 t->drv_name, dev->name);
2515
2516 alg->alg.cra_priority = 300;
2517 alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
2518 alg->alg.cra_blocksize = t->bsize;
2519 alg->alg.cra_ctxsize = sizeof(struct hifn_context);
2520 alg->alg.cra_alignmask = 0;
2521 alg->alg.cra_type = &crypto_ablkcipher_type;
2522 alg->alg.cra_module = THIS_MODULE;
2523 alg->alg.cra_u.ablkcipher = t->ablkcipher;
2524 alg->alg.cra_init = hifn_cra_init;
2525
2526 alg->dev = dev;
2527
2528 list_add_tail(&alg->entry, &dev->alg_list);
2529
2530 err = crypto_register_alg(&alg->alg);
2531 if (err) {
2532 list_del(&alg->entry);
2533 kfree(alg);
2534 }
2535
2536 return err;
2537 }
2538
2539 static void hifn_unregister_alg(struct hifn_device *dev)
2540 {
2541 struct hifn_crypto_alg *a, *n;
2542
2543 list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
2544 list_del(&a->entry);
2545 crypto_unregister_alg(&a->alg);
2546 kfree(a);
2547 }
2548 }
2549
2550 static int hifn_register_alg(struct hifn_device *dev)
2551 {
2552 int i, err;
2553
2554 for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) {
2555 err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
2556 if (err)
2557 goto err_out_exit;
2558 }
2559
2560 return 0;
2561
2562 err_out_exit:
2563 hifn_unregister_alg(dev);
2564 return err;
2565 }
2566
2567 static void hifn_tasklet_callback(unsigned long data)
2568 {
2569 struct hifn_device *dev = (struct hifn_device *)data;
2570
2571 /*
2572 * This is ok to call this without lock being held,
2573 * althogh it modifies some parameters used in parallel,
2574 * (like dev->success), but they are used in process
2575 * context or update is atomic (like setting dev->sa[i] to NULL).
2576 */
2577 hifn_check_for_completion(dev, 0);
2578 }
2579
2580 static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2581 {
2582 int err, i;
2583 struct hifn_device *dev;
2584 char name[8];
2585
2586 err = pci_enable_device(pdev);
2587 if (err)
2588 return err;
2589 pci_set_master(pdev);
2590
2591 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2592 if (err)
2593 goto err_out_disable_pci_device;
2594
2595 snprintf(name, sizeof(name), "hifn%d",
2596 atomic_inc_return(&hifn_dev_number)-1);
2597
2598 err = pci_request_regions(pdev, name);
2599 if (err)
2600 goto err_out_disable_pci_device;
2601
2602 if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
2603 pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
2604 pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
2605 dprintk("%s: Broken hardware - I/O regions are too small.\n",
2606 pci_name(pdev));
2607 err = -ENODEV;
2608 goto err_out_free_regions;
2609 }
2610
2611 dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg),
2612 GFP_KERNEL);
2613 if (!dev) {
2614 err = -ENOMEM;
2615 goto err_out_free_regions;
2616 }
2617
2618 INIT_LIST_HEAD(&dev->alg_list);
2619
2620 snprintf(dev->name, sizeof(dev->name), "%s", name);
2621 spin_lock_init(&dev->lock);
2622
2623 for (i=0; i<3; ++i) {
2624 unsigned long addr, size;
2625
2626 addr = pci_resource_start(pdev, i);
2627 size = pci_resource_len(pdev, i);
2628
2629 dev->bar[i] = ioremap_nocache(addr, size);
2630 if (!dev->bar[i])
2631 goto err_out_unmap_bars;
2632 }
2633
2634 dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER);
2635 if (!dev->result_mem) {
2636 dprintk("Failed to allocate %d pages for result_mem.\n",
2637 HIFN_MAX_RESULT_ORDER);
2638 goto err_out_unmap_bars;
2639 }
2640 memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER));
2641
2642 dev->dst = pci_map_single(pdev, (void *)dev->result_mem,
2643 PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE);
2644
2645 dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
2646 &dev->desc_dma);
2647 if (!dev->desc_virt) {
2648 dprintk("Failed to allocate descriptor rings.\n");
2649 goto err_out_free_result_pages;
2650 }
2651 memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
2652
2653 dev->pdev = pdev;
2654 dev->irq = pdev->irq;
2655
2656 for (i=0; i<HIFN_D_RES_RSIZE; ++i)
2657 dev->sa[i] = NULL;
2658
2659 pci_set_drvdata(pdev, dev);
2660
2661 tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev);
2662
2663 crypto_init_queue(&dev->queue, 1);
2664
2665 err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
2666 if (err) {
2667 dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err);
2668 dev->irq = 0;
2669 goto err_out_free_desc;
2670 }
2671
2672 err = hifn_start_device(dev);
2673 if (err)
2674 goto err_out_free_irq;
2675
2676 err = hifn_test(dev, 1, 0);
2677 if (err)
2678 goto err_out_stop_device;
2679
2680 err = hifn_register_rng(dev);
2681 if (err)
2682 goto err_out_stop_device;
2683
2684 err = hifn_register_alg(dev);
2685 if (err)
2686 goto err_out_unregister_rng;
2687
2688 INIT_DELAYED_WORK(&dev->work, hifn_work);
2689 schedule_delayed_work(&dev->work, HZ);
2690
2691 dprintk("HIFN crypto accelerator card at %s has been "
2692 "successfully registered as %s.\n",
2693 pci_name(pdev), dev->name);
2694
2695 return 0;
2696
2697 err_out_unregister_rng:
2698 hifn_unregister_rng(dev);
2699 err_out_stop_device:
2700 hifn_reset_dma(dev, 1);
2701 hifn_stop_device(dev);
2702 err_out_free_irq:
2703 free_irq(dev->irq, dev->name);
2704 tasklet_kill(&dev->tasklet);
2705 err_out_free_desc:
2706 pci_free_consistent(pdev, sizeof(struct hifn_dma),
2707 dev->desc_virt, dev->desc_dma);
2708
2709 err_out_free_result_pages:
2710 pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
2711 PCI_DMA_FROMDEVICE);
2712 free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
2713
2714 err_out_unmap_bars:
2715 for (i=0; i<3; ++i)
2716 if (dev->bar[i])
2717 iounmap(dev->bar[i]);
2718
2719 err_out_free_regions:
2720 pci_release_regions(pdev);
2721
2722 err_out_disable_pci_device:
2723 pci_disable_device(pdev);
2724
2725 return err;
2726 }
2727
2728 static void hifn_remove(struct pci_dev *pdev)
2729 {
2730 int i;
2731 struct hifn_device *dev;
2732
2733 dev = pci_get_drvdata(pdev);
2734
2735 if (dev) {
2736 cancel_delayed_work(&dev->work);
2737 flush_scheduled_work();
2738
2739 hifn_unregister_rng(dev);
2740 hifn_unregister_alg(dev);
2741 hifn_reset_dma(dev, 1);
2742 hifn_stop_device(dev);
2743
2744 free_irq(dev->irq, dev->name);
2745 tasklet_kill(&dev->tasklet);
2746
2747 hifn_flush(dev);
2748
2749 pci_free_consistent(pdev, sizeof(struct hifn_dma),
2750 dev->desc_virt, dev->desc_dma);
2751 pci_unmap_single(pdev, dev->dst,
2752 PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
2753 PCI_DMA_FROMDEVICE);
2754 free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
2755 for (i=0; i<3; ++i)
2756 if (dev->bar[i])
2757 iounmap(dev->bar[i]);
2758
2759 kfree(dev);
2760 }
2761
2762 pci_release_regions(pdev);
2763 pci_disable_device(pdev);
2764 }
2765
2766 static struct pci_device_id hifn_pci_tbl[] = {
2767 { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) },
2768 { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) },
2769 { 0 }
2770 };
2771 MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
2772
2773 static struct pci_driver hifn_pci_driver = {
2774 .name = "hifn795x",
2775 .id_table = hifn_pci_tbl,
2776 .probe = hifn_probe,
2777 .remove = __devexit_p(hifn_remove),
2778 };
2779
2780 static int __devinit hifn_init(void)
2781 {
2782 unsigned int freq;
2783 int err;
2784
2785 if (strncmp(hifn_pll_ref, "ext", 3) &&
2786 strncmp(hifn_pll_ref, "pci", 3)) {
2787 printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, "
2788 "must be pci or ext");
2789 return -EINVAL;
2790 }
2791
2792 /*
2793 * For the 7955/7956 the reference clock frequency must be in the
2794 * range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz,
2795 * but this chip is currently not supported.
2796 */
2797 if (hifn_pll_ref[3] != '\0') {
2798 freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
2799 if (freq < 20 || freq > 100) {
2800 printk(KERN_ERR "hifn795x: invalid hifn_pll_ref "
2801 "frequency, must be in the range "
2802 "of 20-100");
2803 return -EINVAL;
2804 }
2805 }
2806
2807 err = pci_register_driver(&hifn_pci_driver);
2808 if (err < 0) {
2809 dprintk("Failed to register PCI driver for %s device.\n",
2810 hifn_pci_driver.name);
2811 return -ENODEV;
2812 }
2813
2814 printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
2815 "has been successfully registered.\n");
2816
2817 return 0;
2818 }
2819
2820 static void __devexit hifn_fini(void)
2821 {
2822 pci_unregister_driver(&hifn_pci_driver);
2823
2824 printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
2825 "has been successfully unregistered.\n");
2826 }
2827
2828 module_init(hifn_init);
2829 module_exit(hifn_fini);
2830
2831 MODULE_LICENSE("GPL");
2832 MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
2833 MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip.");