Staging: sep: use ioremap helpers
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / sep / sep_driver.c
1 /*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/fs.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
38 #include <linux/mm.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/pci.h>
42 #include <linux/firmware.h>
43 #include <asm/ioctl.h>
44 #include <linux/ioport.h>
45 #include <asm/io.h>
46 #include <linux/interrupt.h>
47 #include <linux/pagemap.h>
48 #include <asm/cacheflush.h>
49 #include "sep_driver_hw_defs.h"
50 #include "sep_driver_config.h"
51 #include "sep_driver_api.h"
52 #include "sep_dev.h"
53
54 #if SEP_DRIVER_ARM_DEBUG_MODE
55
56 #define CRYS_SEP_ROM_length 0x4000
57 #define CRYS_SEP_ROM_start_address 0x8000C000UL
58 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
59 #define SEP_ROM_BANK_register 0x80008420UL
60 #define SEP_ROM_BANK_register_offset 0x8420UL
61 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
62
63 /*
64 * THESE 2 definitions are specific to the board - must be
65 * defined during integration
66 */
67 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
68
69 /* 2M size */
70
71 static void sep_load_rom_code(struct sep_device *sep)
72 {
73 /* Index variables */
74 unsigned long i, k, j;
75 u32 reg;
76 u32 error;
77 u32 warning;
78
79 /* Loading ROM from SEP_ROM_image.h file */
80 k = sizeof(CRYS_SEP_ROM);
81
82 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
83
84 edbg("SEP Driver: k is %lu\n", k);
85 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
86 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
87
88 for (i = 0; i < 4; i++) {
89 /* write bank */
90 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
91
92 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
93 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
94
95 k = k - 4;
96
97 if (k == 0) {
98 j = CRYS_SEP_ROM_length;
99 i = 4;
100 }
101 }
102 }
103
104 /* reset the SEP */
105 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
106
107 /* poll for SEP ROM boot finish */
108 do
109 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
110 while (!reg);
111
112 edbg("SEP Driver: ROM polling ended\n");
113
114 switch (reg) {
115 case 0x1:
116 /* fatal error - read erro status from GPRO */
117 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
118 edbg("SEP Driver: ROM polling case 1\n");
119 break;
120 case 0x4:
121 /* Cold boot ended successfully */
122 case 0x8:
123 /* Warmboot ended successfully */
124 case 0x10:
125 /* ColdWarm boot ended successfully */
126 error = 0;
127 case 0x2:
128 /* Boot First Phase ended */
129 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
130 case 0x20:
131 edbg("SEP Driver: ROM polling case %d\n", reg);
132 break;
133 }
134
135 }
136
137 #else
138 static void sep_load_rom_code(struct sep_device *sep) { }
139 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
140
141
142
143 /*----------------------------------------
144 DEFINES
145 -----------------------------------------*/
146
147 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
148 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
149
150 /*--------------------------------------------
151 GLOBAL variables
152 --------------------------------------------*/
153
154 /* debug messages level */
155 static int debug;
156 module_param(debug, int , 0);
157 MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
158
159 /* Keep this a single static object for now to keep the conversion easy */
160
161 static struct sep_device sep_instance;
162 static struct sep_device *sep_dev = &sep_instance;
163
164 /*
165 mutex for the access to the internals of the sep driver
166 */
167 static DEFINE_MUTEX(sep_mutex);
168
169
170 /* wait queue head (event) of the driver */
171 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
172
173 /**
174 * sep_load_firmware - copy firmware cache/resident
175 * @sep: device we are loading
176 *
177 * This functions copies the cache and resident from their source
178 * location into destination shared memory.
179 */
180
181 static int sep_load_firmware(struct sep_device *sep)
182 {
183 const struct firmware *fw;
184 char *cache_name = "cache.image.bin";
185 char *res_name = "resident.image.bin";
186 int error;
187
188 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
189 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
190
191 /* load cache */
192 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
193 if (error) {
194 edbg("SEP Driver:cant request cache fw\n");
195 return error;
196 }
197 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
198
199 memcpy(sep->rar_addr, (void *)fw->data, fw->size);
200 sep->cache_size = fw->size;
201 release_firmware(fw);
202
203 sep->resident_bus = sep->rar_bus + sep->cache_size;
204 sep->resident_addr = sep->rar_addr + sep->cache_size;
205
206 /* load resident */
207 error = request_firmware(&fw, res_name, &sep->pdev->dev);
208 if (error) {
209 edbg("SEP Driver:cant request res fw\n");
210 return error;
211 }
212 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
213
214 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
215 sep->resident_size = fw->size;
216 release_firmware(fw);
217
218 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
219 sep->resident_addr, (unsigned long long)sep->resident_bus,
220 sep->rar_addr, (unsigned long long)sep->rar_bus);
221 return 0;
222 }
223
224 /**
225 * sep_map_and_alloc_shared_area - allocate shared block
226 * @sep: security processor
227 * @size: size of shared area
228 *
229 * Allocate a shared buffer in host memory that can be used by both the
230 * kernel and also the hardware interface via DMA.
231 */
232
233 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
234 unsigned long size)
235 {
236 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
237 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
238 &sep->shared_bus, GFP_KERNEL);
239
240 if (!sep->shared_addr) {
241 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
242 return -ENOMEM;
243 }
244 /* set the bus address of the shared area */
245 edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
246 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
247 return 0;
248 }
249
250 /**
251 * sep_unmap_and_free_shared_area - free shared block
252 * @sep: security processor
253 *
254 * Free the shared area allocated to the security processor. The
255 * processor must have finished with this and any final posted
256 * writes cleared before we do so.
257 */
258 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
259 {
260 dma_free_coherent(&sep->pdev->dev, size,
261 sep->shared_addr, sep->shared_bus);
262 }
263
264 /**
265 * sep_shared_virt_to_bus - convert bus/virt addresses
266 *
267 * Returns the bus address inside the shared area according
268 * to the virtual address.
269 */
270
271 static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
272 void *virt_address)
273 {
274 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
275 edbg("sep: virt to bus b %08llx v %p\n", pa, virt_address);
276 return pa;
277 }
278
279 /**
280 * sep_shared_bus_to_virt - convert bus/virt addresses
281 *
282 * Returns virtual address inside the shared area according
283 * to the bus address.
284 */
285
286 static void *sep_shared_bus_to_virt(struct sep_device *sep,
287 dma_addr_t bus_address)
288 {
289 return sep->shared_addr + (bus_address - sep->shared_bus);
290 }
291
292
293 /**
294 * sep_try_open - attempt to open a SEP device
295 * @sep: device to attempt to open
296 *
297 * Atomically attempt to get ownership of a SEP device.
298 * Returns 1 if the device was opened, 0 on failure.
299 */
300
301 static int sep_try_open(struct sep_device *sep)
302 {
303 if (!test_and_set_bit(0, &sep->in_use))
304 return 1;
305 return 0;
306 }
307
308 /**
309 * sep_open - device open method
310 * @inode: inode of sep device
311 * @filp: file handle to sep device
312 *
313 * Open method for the SEP device. Called when userspace opens
314 * the SEP device node. Must also release the memory data pool
315 * allocations.
316 *
317 * Returns zero on success otherwise an error code.
318 */
319
320 static int sep_open(struct inode *inode, struct file *filp)
321 {
322 if (sep_dev == NULL)
323 return -ENODEV;
324
325 /* check the blocking mode */
326 if (filp->f_flags & O_NDELAY) {
327 if (sep_try_open(sep_dev) == 0)
328 return -EAGAIN;
329 } else
330 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
331 return -EINTR;
332
333 /* Bind to the device, we only have one which makes it easy */
334 filp->private_data = sep_dev;
335 /* release data pool allocations */
336 sep_dev->data_pool_bytes_allocated = 0;
337 return 0;
338 }
339
340
341 /**
342 * sep_release - close a SEP device
343 * @inode: inode of SEP device
344 * @filp: file handle being closed
345 *
346 * Called on the final close of a SEP device. As the open protects against
347 * multiple simultaenous opens that means this method is called when the
348 * final reference to the open handle is dropped.
349 */
350
351 static int sep_release(struct inode *inode, struct file *filp)
352 {
353 struct sep_device *sep = filp->private_data;
354 #if 0 /*!SEP_DRIVER_POLLING_MODE */
355 /* close IMR */
356 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
357 /* release IRQ line */
358 free_irq(SEP_DIRVER_IRQ_NUM, sep);
359
360 #endif
361 /* Ensure any blocked open progresses */
362 clear_bit(0, &sep->in_use);
363 wake_up(&sep_event);
364 return 0;
365 }
366
367 /*---------------------------------------------------------------
368 map function - this functions maps the message shared area
369 -----------------------------------------------------------------*/
370 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
371 {
372 dma_addr_t bus_addr;
373 struct sep_device *sep = filp->private_data;
374
375 dbg("-------->SEP Driver: mmap start\n");
376
377 /* check that the size of the mapped range is as the size of the message
378 shared area */
379 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
380 edbg("SEP Driver mmap requested size is more than allowed\n");
381 printk(KERN_WARNING "SEP Driver mmap requested size is more \
382 than allowed\n");
383 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
384 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
385 return -EAGAIN;
386 }
387
388 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
389
390 /* get bus address */
391 bus_addr = sep->shared_bus;
392
393 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
394
395 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
396 edbg("SEP Driver remap_page_range failed\n");
397 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
398 return -EAGAIN;
399 }
400
401 dbg("SEP Driver:<-------- mmap end\n");
402
403 return 0;
404 }
405
406
407 /*-----------------------------------------------
408 poll function
409 *----------------------------------------------*/
410 static unsigned int sep_poll(struct file *filp, poll_table * wait)
411 {
412 unsigned long count;
413 unsigned int mask = 0;
414 unsigned long retval = 0; /* flow id */
415 struct sep_device *sep = filp->private_data;
416
417 dbg("---------->SEP Driver poll: start\n");
418
419
420 #if SEP_DRIVER_POLLING_MODE
421
422 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
423 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
424
425 for (count = 0; count < 10 * 4; count += 4)
426 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
427 }
428
429 sep->reply_ct++;
430 #else
431 /* add the event to the polling wait table */
432 poll_wait(filp, &sep_event, wait);
433
434 #endif
435
436 edbg("sep->send_ct is %lu\n", sep->send_ct);
437 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
438
439 /* check if the data is ready */
440 if (sep->send_ct == sep->reply_ct) {
441 for (count = 0; count < 12 * 4; count += 4)
442 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
443
444 for (count = 0; count < 10 * 4; count += 4)
445 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
446
447 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
448 edbg("retval is %lu\n", retval);
449 /* check if the this is sep reply or request */
450 if (retval >> 31) {
451 edbg("SEP Driver: sep request in\n");
452 /* request */
453 mask |= POLLOUT | POLLWRNORM;
454 } else {
455 edbg("SEP Driver: sep reply in\n");
456 mask |= POLLIN | POLLRDNORM;
457 }
458 }
459 dbg("SEP Driver:<-------- poll exit\n");
460 return mask;
461 }
462
463 /**
464 * sep_time_address - address in SEP memory of time
465 * @sep: SEP device we want the address from
466 *
467 * Return the address of the two dwords in memory used for time
468 * setting.
469 */
470
471 static u32 *sep_time_address(struct sep_device *sep)
472 {
473 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
474 }
475
476 /**
477 * sep_set_time - set the SEP time
478 * @sep: the SEP we are setting the time for
479 *
480 * Calculates time and sets it at the predefined address.
481 * Called with the sep mutex held.
482 */
483 static unsigned long sep_set_time(struct sep_device *sep)
484 {
485 struct timeval time;
486 u32 *time_addr; /* address of time as seen by the kernel */
487
488
489 dbg("sep:sep_set_time start\n");
490
491 do_gettimeofday(&time);
492
493 /* set value in the SYSTEM MEMORY offset */
494 time_addr = sep_time_address(sep);
495
496 time_addr[0] = SEP_TIME_VAL_TOKEN;
497 time_addr[1] = time.tv_sec;
498
499 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
500 edbg("SEP Driver:time_addr is %p\n", time_addr);
501 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
502
503 return time.tv_sec;
504 }
505
506 /**
507 * sep_dump_message - dump the message that is pending
508 * @sep: sep device
509 *
510 * Dump out the message pending in the shared message area
511 */
512
513 static void sep_dump_message(struct sep_device *sep)
514 {
515 int count;
516 for (count = 0; count < 12 * 4; count += 4)
517 edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
518 }
519
520 /**
521 * sep_send_command_handler - kick off a command
522 * @sep: sep being signalled
523 *
524 * This function raises interrupt to SEP that signals that is has a new
525 * command from the host
526 */
527
528 static void sep_send_command_handler(struct sep_device *sep)
529 {
530 dbg("sep:sep_send_command_handler start\n");
531
532 mutex_lock(&sep_mutex);
533 sep_set_time(sep);
534
535 /* FIXME: flush cache */
536 flush_cache_all();
537
538 sep_dump_message(sep);
539 /* update counter */
540 sep->send_ct++;
541 /* send interrupt to SEP */
542 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
543 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
544 mutex_unlock(&sep_mutex);
545 return;
546 }
547
548 /**
549 * sep_send_reply_command_handler - kick off a command reply
550 * @sep: sep being signalled
551 *
552 * This function raises interrupt to SEP that signals that is has a new
553 * command from the host
554 */
555
556 static void sep_send_reply_command_handler(struct sep_device *sep)
557 {
558 dbg("sep:sep_send_reply_command_handler start\n");
559
560 /* flash cache */
561 flush_cache_all();
562
563 sep_dump_message(sep);
564
565 mutex_lock(&sep_mutex);
566 sep->send_ct++; /* update counter */
567 /* send the interrupt to SEP */
568 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
569 /* update both counters */
570 sep->send_ct++;
571 sep->reply_ct++;
572 mutex_unlock(&sep_mutex);
573 dbg("sep: sep_send_reply_command_handler end\n");
574 }
575
576 /*
577 This function handles the allocate data pool memory request
578 This function returns calculates the bus address of the
579 allocated memory, and the offset of this area from the mapped address.
580 Therefore, the FVOs in user space can calculate the exact virtual
581 address of this allocated memory
582 */
583 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
584 unsigned long arg)
585 {
586 int error;
587 struct sep_driver_alloc_t command_args;
588
589 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
590
591 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
592 if (error)
593 goto end_function;
594
595 /* allocate memory */
596 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
597 error = -ENOMEM;
598 goto end_function;
599 }
600
601 /* set the virtual and bus address */
602 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
603 command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
604
605 /* write the memory back to the user space */
606 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
607 if (error)
608 goto end_function;
609
610 /* set the allocation */
611 sep->data_pool_bytes_allocated += command_args.num_bytes;
612
613 end_function:
614 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
615 return error;
616 }
617
618 /*
619 This function handles write into allocated data pool command
620 */
621 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
622 {
623 int error;
624 void *virt_address;
625 unsigned long va;
626 unsigned long app_in_address;
627 unsigned long num_bytes;
628 void *data_pool_area_addr;
629
630 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
631
632 /* get the application address */
633 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
634 if (error)
635 goto end_function;
636
637 /* get the virtual kernel address address */
638 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
639 if (error)
640 goto end_function;
641 virt_address = (void *)va;
642
643 /* get the number of bytes */
644 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
645 if (error)
646 goto end_function;
647
648 /* calculate the start of the data pool */
649 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
650
651
652 /* check that the range of the virtual kernel address is correct */
653 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
654 error = -EINVAL;
655 goto end_function;
656 }
657 /* copy the application data */
658 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
659 end_function:
660 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
661 return error;
662 }
663
664 /*
665 this function handles the read from data pool command
666 */
667 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
668 {
669 int error;
670 /* virtual address of dest application buffer */
671 unsigned long app_out_address;
672 /* virtual address of the data pool */
673 unsigned long va;
674 void *virt_address;
675 unsigned long num_bytes;
676 void *data_pool_area_addr;
677
678 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
679
680 /* get the application address */
681 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
682 if (error)
683 goto end_function;
684
685 /* get the virtual kernel address address */
686 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
687 if (error)
688 goto end_function;
689 virt_address = (void *)va;
690
691 /* get the number of bytes */
692 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
693 if (error)
694 goto end_function;
695
696 /* calculate the start of the data pool */
697 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
698
699 /* FIXME: These are incomplete all over the driver: what about + len
700 and when doing that also overflows */
701 /* check that the range of the virtual kernel address is correct */
702 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
703 error = -EINVAL;
704 goto end_function;
705 }
706
707 /* copy the application data */
708 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
709 end_function:
710 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
711 return error;
712 }
713
714 /*
715 This function releases all the application virtual buffer physical pages,
716 that were previously locked
717 */
718 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
719 {
720 unsigned long count;
721
722 if (dirtyFlag) {
723 for (count = 0; count < num_pages; count++) {
724 /* the out array was written, therefore the data was changed */
725 if (!PageReserved(page_array_ptr[count]))
726 SetPageDirty(page_array_ptr[count]);
727 page_cache_release(page_array_ptr[count]);
728 }
729 } else {
730 /* free in pages - the data was only read, therefore no update was done
731 on those pages */
732 for (count = 0; count < num_pages; count++)
733 page_cache_release(page_array_ptr[count]);
734 }
735
736 if (page_array_ptr)
737 /* free the array */
738 kfree(page_array_ptr);
739
740 return 0;
741 }
742
743 /*
744 This function locks all the physical pages of the kernel virtual buffer
745 and construct a basic lli array, where each entry holds the physical
746 page address and the size that application data holds in this physical pages
747 */
748 static int sep_lock_kernel_pages(struct sep_device *sep,
749 unsigned long kernel_virt_addr,
750 unsigned long data_size,
751 unsigned long *num_pages_ptr,
752 struct sep_lli_entry_t **lli_array_ptr,
753 struct page ***page_array_ptr)
754 {
755 int error = 0;
756 /* the the page of the end address of the user space buffer */
757 unsigned long end_page;
758 /* the page of the start address of the user space buffer */
759 unsigned long start_page;
760 /* the range in pages */
761 unsigned long num_pages;
762 struct sep_lli_entry_t *lli_array;
763 /* next kernel address to map */
764 unsigned long next_kernel_address;
765 unsigned long count;
766
767 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
768
769 /* set start and end pages and num pages */
770 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
771 start_page = kernel_virt_addr >> PAGE_SHIFT;
772 num_pages = end_page - start_page + 1;
773
774 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
775 edbg("SEP Driver: data_size is %lu\n", data_size);
776 edbg("SEP Driver: start_page is %lx\n", start_page);
777 edbg("SEP Driver: end_page is %lx\n", end_page);
778 edbg("SEP Driver: num_pages is %lu\n", num_pages);
779
780 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
781 if (!lli_array) {
782 edbg("SEP Driver: kmalloc for lli_array failed\n");
783 error = -ENOMEM;
784 goto end_function;
785 }
786
787 /* set the start address of the first page - app data may start not at
788 the beginning of the page */
789 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
790
791 /* check that not all the data is in the first page only */
792 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
793 lli_array[0].block_size = data_size;
794 else
795 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
796
797 /* debug print */
798 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
799
800 /* advance the address to the start of the next page */
801 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
802
803 /* go from the second page to the prev before last */
804 for (count = 1; count < (num_pages - 1); count++) {
805 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
806 lli_array[count].block_size = PAGE_SIZE;
807
808 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
809 next_kernel_address += PAGE_SIZE;
810 }
811
812 /* if more then 1 pages locked - then update for the last page size needed */
813 if (num_pages > 1) {
814 /* update the address of the last page */
815 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
816
817 /* set the size of the last page */
818 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
819
820 if (lli_array[count].block_size == 0) {
821 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
822 dbg("data_size is %lu\n", data_size);
823 while (1);
824 }
825
826 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
827 }
828 /* set output params */
829 *lli_array_ptr = lli_array;
830 *num_pages_ptr = num_pages;
831 *page_array_ptr = 0;
832 end_function:
833 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
834 return 0;
835 }
836
837 /*
838 This function locks all the physical pages of the application virtual buffer
839 and construct a basic lli array, where each entry holds the physical page
840 address and the size that application data holds in this physical pages
841 */
842 static int sep_lock_user_pages(struct sep_device *sep,
843 unsigned long app_virt_addr,
844 unsigned long data_size,
845 unsigned long *num_pages_ptr,
846 struct sep_lli_entry_t **lli_array_ptr,
847 struct page ***page_array_ptr)
848 {
849 int error = 0;
850 /* the the page of the end address of the user space buffer */
851 unsigned long end_page;
852 /* the page of the start address of the user space buffer */
853 unsigned long start_page;
854 /* the range in pages */
855 unsigned long num_pages;
856 struct page **page_array;
857 struct sep_lli_entry_t *lli_array;
858 unsigned long count;
859 int result;
860
861 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
862
863 /* set start and end pages and num pages */
864 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
865 start_page = app_virt_addr >> PAGE_SHIFT;
866 num_pages = end_page - start_page + 1;
867
868 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
869 edbg("SEP Driver: data_size is %lu\n", data_size);
870 edbg("SEP Driver: start_page is %lu\n", start_page);
871 edbg("SEP Driver: end_page is %lu\n", end_page);
872 edbg("SEP Driver: num_pages is %lu\n", num_pages);
873
874 /* allocate array of pages structure pointers */
875 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
876 if (!page_array) {
877 edbg("SEP Driver: kmalloc for page_array failed\n");
878
879 error = -ENOMEM;
880 goto end_function;
881 }
882
883 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
884 if (!lli_array) {
885 edbg("SEP Driver: kmalloc for lli_array failed\n");
886
887 error = -ENOMEM;
888 goto end_function_with_error1;
889 }
890
891 /* convert the application virtual address into a set of physical */
892 down_read(&current->mm->mmap_sem);
893 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
894 up_read(&current->mm->mmap_sem);
895
896 /* check the number of pages locked - if not all then exit with error */
897 if (result != num_pages) {
898 dbg("SEP Driver: not all pages locked by get_user_pages\n");
899
900 error = -ENOMEM;
901 goto end_function_with_error2;
902 }
903
904 /* flush the cache */
905 for (count = 0; count < num_pages; count++)
906 flush_dcache_page(page_array[count]);
907
908 /* set the start address of the first page - app data may start not at
909 the beginning of the page */
910 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
911
912 /* check that not all the data is in the first page only */
913 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
914 lli_array[0].block_size = data_size;
915 else
916 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
917
918 /* debug print */
919 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
920
921 /* go from the second page to the prev before last */
922 for (count = 1; count < (num_pages - 1); count++) {
923 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
924 lli_array[count].block_size = PAGE_SIZE;
925
926 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
927 }
928
929 /* if more then 1 pages locked - then update for the last page size needed */
930 if (num_pages > 1) {
931 /* update the address of the last page */
932 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
933
934 /* set the size of the last page */
935 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
936
937 if (lli_array[count].block_size == 0) {
938 dbg("app_virt_addr is %08lx\n", app_virt_addr);
939 dbg("data_size is %lu\n", data_size);
940 while (1);
941 }
942 edbg("lli_array[%lu].physical_address is %08lx, \
943 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
944 }
945
946 /* set output params */
947 *lli_array_ptr = lli_array;
948 *num_pages_ptr = num_pages;
949 *page_array_ptr = page_array;
950 goto end_function;
951
952 end_function_with_error2:
953 /* release the cache */
954 for (count = 0; count < num_pages; count++)
955 page_cache_release(page_array[count]);
956 kfree(lli_array);
957 end_function_with_error1:
958 kfree(page_array);
959 end_function:
960 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
961 return 0;
962 }
963
964
965 /*
966 this function calculates the size of data that can be inserted into the lli
967 table from this array the condition is that either the table is full
968 (all etnries are entered), or there are no more entries in the lli array
969 */
970 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
971 {
972 unsigned long table_data_size = 0;
973 unsigned long counter;
974
975 /* calculate the data in the out lli table if till we fill the whole
976 table or till the data has ended */
977 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
978 table_data_size += lli_in_array_ptr[counter].block_size;
979 return table_data_size;
980 }
981
982 /*
983 this functions builds ont lli table from the lli_array according to
984 the given size of data
985 */
986 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
987 {
988 unsigned long curr_table_data_size;
989 /* counter of lli array entry */
990 unsigned long array_counter;
991
992 dbg("SEP Driver:--------> sep_build_lli_table start\n");
993
994 /* init currrent table data size and lli array entry counter */
995 curr_table_data_size = 0;
996 array_counter = 0;
997 *num_table_entries_ptr = 1;
998
999 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1000
1001 /* fill the table till table size reaches the needed amount */
1002 while (curr_table_data_size < table_data_size) {
1003 /* update the number of entries in table */
1004 (*num_table_entries_ptr)++;
1005
1006 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1007 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1008 curr_table_data_size += lli_table_ptr->block_size;
1009
1010 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1011 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1012 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1013
1014 /* check for overflow of the table data */
1015 if (curr_table_data_size > table_data_size) {
1016 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1017
1018 /* update the size of block in the table */
1019 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1020
1021 /* update the physical address in the lli array */
1022 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1023
1024 /* update the block size left in the lli array */
1025 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1026 } else
1027 /* advance to the next entry in the lli_array */
1028 array_counter++;
1029
1030 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1031 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1032
1033 /* move to the next entry in table */
1034 lli_table_ptr++;
1035 }
1036
1037 /* set the info entry to default */
1038 lli_table_ptr->physical_address = 0xffffffff;
1039 lli_table_ptr->block_size = 0;
1040
1041 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1042 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1043 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1044
1045 /* set the output parameter */
1046 *num_processed_entries_ptr += array_counter;
1047
1048 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1049 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1050 return;
1051 }
1052
1053 /*
1054 this function goes over the list of the print created tables and
1055 prints all the data
1056 */
1057 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1058 {
1059 unsigned long table_count;
1060 unsigned long entries_count;
1061
1062 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1063
1064 table_count = 1;
1065 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1066 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1067 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1068
1069 /* print entries of the table (without info entry) */
1070 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1071 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1072 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1073 }
1074
1075 /* point to the info entry */
1076 lli_table_ptr--;
1077
1078 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1079 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1080
1081
1082 table_data_size = lli_table_ptr->block_size & 0xffffff;
1083 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1084 lli_table_ptr = (struct sep_lli_entry_t *)
1085 (lli_table_ptr->physical_address);
1086
1087 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1088
1089 if ((unsigned long) lli_table_ptr != 0xffffffff)
1090 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1091
1092 table_count++;
1093 }
1094 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1095 }
1096
1097
1098 /*
1099 This function prepares only input DMA table for synhronic symmetric
1100 operations (HASH)
1101 */
1102 static int sep_prepare_input_dma_table(struct sep_device *sep,
1103 unsigned long app_virt_addr,
1104 unsigned long data_size,
1105 unsigned long block_size,
1106 unsigned long *lli_table_ptr,
1107 unsigned long *num_entries_ptr,
1108 unsigned long *table_data_size_ptr,
1109 bool isKernelVirtualAddress)
1110 {
1111 /* pointer to the info entry of the table - the last entry */
1112 struct sep_lli_entry_t *info_entry_ptr;
1113 /* array of pointers ot page */
1114 struct sep_lli_entry_t *lli_array_ptr;
1115 /* points to the first entry to be processed in the lli_in_array */
1116 unsigned long current_entry;
1117 /* num entries in the virtual buffer */
1118 unsigned long sep_lli_entries;
1119 /* lli table pointer */
1120 struct sep_lli_entry_t *in_lli_table_ptr;
1121 /* the total data in one table */
1122 unsigned long table_data_size;
1123 /* number of entries in lli table */
1124 unsigned long num_entries_in_table;
1125 /* next table address */
1126 void *lli_table_alloc_addr;
1127 unsigned long result;
1128
1129 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1130
1131 edbg("SEP Driver:data_size is %lu\n", data_size);
1132 edbg("SEP Driver:block_size is %lu\n", block_size);
1133
1134 /* initialize the pages pointers */
1135 sep->in_page_array = 0;
1136 sep->in_num_pages = 0;
1137
1138 if (data_size == 0) {
1139 /* special case - created 2 entries table with zero data */
1140 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1141 /* FIXME: Should the entry below not be for _bus */
1142 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1143 in_lli_table_ptr->block_size = 0;
1144
1145 in_lli_table_ptr++;
1146 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1147 in_lli_table_ptr->block_size = 0;
1148
1149 *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1150 *num_entries_ptr = 2;
1151 *table_data_size_ptr = 0;
1152
1153 goto end_function;
1154 }
1155
1156 /* check if the pages are in Kernel Virtual Address layout */
1157 if (isKernelVirtualAddress == true)
1158 /* lock the pages of the kernel buffer and translate them to pages */
1159 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1160 else
1161 /* lock the pages of the user buffer and translate them to pages */
1162 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1163
1164 if (result)
1165 return result;
1166
1167 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1168
1169 current_entry = 0;
1170 info_entry_ptr = 0;
1171 sep_lli_entries = sep->in_num_pages;
1172
1173 /* initiate to point after the message area */
1174 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1175
1176 /* loop till all the entries in in array are not processed */
1177 while (current_entry < sep_lli_entries) {
1178 /* set the new input and output tables */
1179 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1180
1181 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1182
1183 /* calculate the maximum size of data for input table */
1184 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1185
1186 /* now calculate the table size so that it will be module block size */
1187 table_data_size = (table_data_size / block_size) * block_size;
1188
1189 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1190
1191 /* construct input lli table */
1192 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1193
1194 if (info_entry_ptr == 0) {
1195 /* set the output parameters to physical addresses */
1196 *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1197 *num_entries_ptr = num_entries_in_table;
1198 *table_data_size_ptr = table_data_size;
1199
1200 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1201 } else {
1202 /* update the info entry of the previous in table */
1203 info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1204 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1205 }
1206
1207 /* save the pointer to the info entry of the current tables */
1208 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1209 }
1210
1211 /* print input tables */
1212 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1213 sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1214
1215 /* the array of the pages */
1216 kfree(lli_array_ptr);
1217 end_function:
1218 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1219 return 0;
1220
1221 }
1222
1223 /*
1224 This function creates the input and output dma tables for
1225 symmetric operations (AES/DES) according to the block size from LLI arays
1226 */
1227 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1228 struct sep_lli_entry_t *lli_in_array,
1229 unsigned long sep_in_lli_entries,
1230 struct sep_lli_entry_t *lli_out_array,
1231 unsigned long sep_out_lli_entries,
1232 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1233 {
1234 /* points to the area where next lli table can be allocated: keep void *
1235 as there is pointer scaling to fix otherwise */
1236 void *lli_table_alloc_addr;
1237 /* input lli table */
1238 struct sep_lli_entry_t *in_lli_table_ptr;
1239 /* output lli table */
1240 struct sep_lli_entry_t *out_lli_table_ptr;
1241 /* pointer to the info entry of the table - the last entry */
1242 struct sep_lli_entry_t *info_in_entry_ptr;
1243 /* pointer to the info entry of the table - the last entry */
1244 struct sep_lli_entry_t *info_out_entry_ptr;
1245 /* points to the first entry to be processed in the lli_in_array */
1246 unsigned long current_in_entry;
1247 /* points to the first entry to be processed in the lli_out_array */
1248 unsigned long current_out_entry;
1249 /* max size of the input table */
1250 unsigned long in_table_data_size;
1251 /* max size of the output table */
1252 unsigned long out_table_data_size;
1253 /* flag te signifies if this is the first tables build from the arrays */
1254 unsigned long first_table_flag;
1255 /* the data size that should be in table */
1256 unsigned long table_data_size;
1257 /* number of etnries in the input table */
1258 unsigned long num_entries_in_table;
1259 /* number of etnries in the output table */
1260 unsigned long num_entries_out_table;
1261
1262 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1263
1264 /* initiate to pint after the message area */
1265 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1266
1267 current_in_entry = 0;
1268 current_out_entry = 0;
1269 first_table_flag = 1;
1270 info_in_entry_ptr = 0;
1271 info_out_entry_ptr = 0;
1272
1273 /* loop till all the entries in in array are not processed */
1274 while (current_in_entry < sep_in_lli_entries) {
1275 /* set the new input and output tables */
1276 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1277
1278 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1279
1280 /* set the first output tables */
1281 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1282
1283 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1284
1285 /* calculate the maximum size of data for input table */
1286 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1287
1288 /* calculate the maximum size of data for output table */
1289 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1290
1291 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1292 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1293
1294 /* check where the data is smallest */
1295 table_data_size = in_table_data_size;
1296 if (table_data_size > out_table_data_size)
1297 table_data_size = out_table_data_size;
1298
1299 /* now calculate the table size so that it will be module block size */
1300 table_data_size = (table_data_size / block_size) * block_size;
1301
1302 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1303
1304 /* construct input lli table */
1305 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1306
1307 /* construct output lli table */
1308 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1309
1310 /* if info entry is null - this is the first table built */
1311 if (info_in_entry_ptr == 0) {
1312 /* set the output parameters to physical addresses */
1313 *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1314 *in_num_entries_ptr = num_entries_in_table;
1315 *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1316 *out_num_entries_ptr = num_entries_out_table;
1317 *table_data_size_ptr = table_data_size;
1318
1319 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1320 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1321 } else {
1322 /* update the info entry of the previous in table */
1323 info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1324 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1325
1326 /* update the info entry of the previous in table */
1327 info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1328 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1329 }
1330
1331 /* save the pointer to the info entry of the current tables */
1332 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1333 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1334
1335 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1336 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1337 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1338 }
1339
1340 /* print input tables */
1341 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1342 sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1343 /* print output tables */
1344 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1345 sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1346 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1347 return 0;
1348 }
1349
1350
1351 /*
1352 This function builds input and output DMA tables for synhronic
1353 symmetric operations (AES, DES). It also checks that each table
1354 is of the modular block size
1355 */
1356 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1357 unsigned long app_virt_in_addr,
1358 unsigned long app_virt_out_addr,
1359 unsigned long data_size,
1360 unsigned long block_size,
1361 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1362 {
1363 /* array of pointers of page */
1364 struct sep_lli_entry_t *lli_in_array;
1365 /* array of pointers of page */
1366 struct sep_lli_entry_t *lli_out_array;
1367 int result = 0;
1368
1369 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1370
1371 /* initialize the pages pointers */
1372 sep->in_page_array = 0;
1373 sep->out_page_array = 0;
1374
1375 /* check if the pages are in Kernel Virtual Address layout */
1376 if (isKernelVirtualAddress == true) {
1377 /* lock the pages of the kernel buffer and translate them to pages */
1378 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1379 if (result) {
1380 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1381 goto end_function;
1382 }
1383 } else {
1384 /* lock the pages of the user buffer and translate them to pages */
1385 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1386 if (result) {
1387 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1388 goto end_function;
1389 }
1390 }
1391
1392 if (isKernelVirtualAddress == true) {
1393 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1394 if (result) {
1395 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1396 goto end_function_with_error1;
1397 }
1398 } else {
1399 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1400 if (result) {
1401 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1402 goto end_function_with_error1;
1403 }
1404 }
1405 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1406 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1407 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1408
1409
1410 /* call the fucntion that creates table from the lli arrays */
1411 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1412 if (result) {
1413 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1414 goto end_function_with_error2;
1415 }
1416
1417 /* fall through - free the lli entry arrays */
1418 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1419 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1420 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1421 end_function_with_error2:
1422 kfree(lli_out_array);
1423 end_function_with_error1:
1424 kfree(lli_in_array);
1425 end_function:
1426 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1427 return result;
1428
1429 }
1430
1431 /*
1432 this function handles tha request for creation of the DMA table
1433 for the synchronic symmetric operations (AES,DES)
1434 */
1435 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1436 unsigned long arg)
1437 {
1438 int error;
1439 /* command arguments */
1440 struct sep_driver_build_sync_table_t command_args;
1441
1442 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1443
1444 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1445 if (error)
1446 goto end_function;
1447
1448 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1449 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1450 edbg("data_size is %lu\n", command_args.data_in_size);
1451 edbg("block_size is %lu\n", command_args.block_size);
1452
1453 /* check if we need to build only input table or input/output */
1454 if (command_args.app_out_address)
1455 /* prepare input and output tables */
1456 error = sep_prepare_input_output_dma_table(sep,
1457 command_args.app_in_address,
1458 command_args.app_out_address,
1459 command_args.data_in_size,
1460 command_args.block_size,
1461 &command_args.in_table_address,
1462 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1463 else
1464 /* prepare input tables */
1465 error = sep_prepare_input_dma_table(sep,
1466 command_args.app_in_address,
1467 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1468
1469 if (error)
1470 goto end_function;
1471 /* copy to user */
1472 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1473 error = -EFAULT;
1474 end_function:
1475 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1476 return error;
1477 }
1478
1479 /*
1480 this function handles the request for freeing dma table for synhronic actions
1481 */
1482 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1483 {
1484 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1485
1486 /* free input pages array */
1487 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1488
1489 /* free output pages array if needed */
1490 if (sep->out_page_array)
1491 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1492
1493 /* reset all the values */
1494 sep->in_page_array = 0;
1495 sep->out_page_array = 0;
1496 sep->in_num_pages = 0;
1497 sep->out_num_pages = 0;
1498 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1499 return 0;
1500 }
1501
1502 /*
1503 this function find a space for the new flow dma table
1504 */
1505 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1506 unsigned long **table_address_ptr)
1507 {
1508 int error = 0;
1509 /* pointer to the id field of the flow dma table */
1510 unsigned long *start_table_ptr;
1511 /* Do not make start_addr unsigned long * unless fixing the offset
1512 computations ! */
1513 void *flow_dma_area_start_addr;
1514 unsigned long *flow_dma_area_end_addr;
1515 /* maximum table size in words */
1516 unsigned long table_size_in_words;
1517
1518 /* find the start address of the flow DMA table area */
1519 flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1520
1521 /* set end address of the flow table area */
1522 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1523
1524 /* set table size in words */
1525 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1526
1527 /* set the pointer to the start address of DMA area */
1528 start_table_ptr = flow_dma_area_start_addr;
1529
1530 /* find the space for the next table */
1531 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1532 start_table_ptr += table_size_in_words;
1533
1534 /* check if we reached the end of floa tables area */
1535 if (start_table_ptr >= flow_dma_area_end_addr)
1536 error = -1;
1537 else
1538 *table_address_ptr = start_table_ptr;
1539
1540 return error;
1541 }
1542
1543 /*
1544 This function creates one DMA table for flow and returns its data,
1545 and pointer to its info entry
1546 */
1547 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1548 unsigned long virt_buff_addr,
1549 unsigned long virt_buff_size,
1550 struct sep_lli_entry_t *table_data,
1551 struct sep_lli_entry_t **info_entry_ptr,
1552 struct sep_flow_context_t *flow_data_ptr,
1553 bool isKernelVirtualAddress)
1554 {
1555 int error;
1556 /* the range in pages */
1557 unsigned long lli_array_size;
1558 struct sep_lli_entry_t *lli_array;
1559 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1560 unsigned long *start_dma_table_ptr;
1561 /* total table data counter */
1562 unsigned long dma_table_data_count;
1563 /* pointer that will keep the pointer to the pages of the virtual buffer */
1564 struct page **page_array_ptr;
1565 unsigned long entry_count;
1566
1567 /* find the space for the new table */
1568 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1569 if (error)
1570 goto end_function;
1571
1572 /* check if the pages are in Kernel Virtual Address layout */
1573 if (isKernelVirtualAddress == true)
1574 /* lock kernel buffer in the memory */
1575 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1576 else
1577 /* lock user buffer in the memory */
1578 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1579
1580 if (error)
1581 goto end_function;
1582
1583 /* set the pointer to page array at the beginning of table - this table is
1584 now considered taken */
1585 *start_dma_table_ptr = lli_array_size;
1586
1587 /* point to the place of the pages pointers of the table */
1588 start_dma_table_ptr++;
1589
1590 /* set the pages pointer */
1591 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1592
1593 /* set the pointer to the first entry */
1594 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1595
1596 /* now create the entries for table */
1597 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1598 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1599
1600 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1601
1602 /* set the total data of a table */
1603 dma_table_data_count += lli_array[entry_count].block_size;
1604
1605 flow_dma_table_entry_ptr++;
1606 }
1607
1608 /* set the physical address */
1609 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1610
1611 /* set the num_entries and total data size */
1612 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1613
1614 /* set the info entry */
1615 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1616 flow_dma_table_entry_ptr->block_size = 0;
1617
1618 /* set the pointer to info entry */
1619 *info_entry_ptr = flow_dma_table_entry_ptr;
1620
1621 /* the array of the lli entries */
1622 kfree(lli_array);
1623 end_function:
1624 return error;
1625 }
1626
1627
1628
1629 /*
1630 This function creates a list of tables for flow and returns the data for
1631 the first and last tables of the list
1632 */
1633 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1634 unsigned long num_virtual_buffers,
1635 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1636 {
1637 int error;
1638 unsigned long virt_buff_addr;
1639 unsigned long virt_buff_size;
1640 struct sep_lli_entry_t table_data;
1641 struct sep_lli_entry_t *info_entry_ptr;
1642 struct sep_lli_entry_t *prev_info_entry_ptr;
1643 unsigned long i;
1644
1645 /* init vars */
1646 error = 0;
1647 prev_info_entry_ptr = 0;
1648
1649 /* init the first table to default */
1650 table_data.physical_address = 0xffffffff;
1651 first_table_data_ptr->physical_address = 0xffffffff;
1652 table_data.block_size = 0;
1653
1654 for (i = 0; i < num_virtual_buffers; i++) {
1655 /* get the virtual buffer address */
1656 error = get_user(virt_buff_addr, &first_buff_addr);
1657 if (error)
1658 goto end_function;
1659
1660 /* get the virtual buffer size */
1661 first_buff_addr++;
1662 error = get_user(virt_buff_size, &first_buff_addr);
1663 if (error)
1664 goto end_function;
1665
1666 /* advance the address to point to the next pair of address|size */
1667 first_buff_addr++;
1668
1669 /* now prepare the one flow LLI table from the data */
1670 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1671 if (error)
1672 goto end_function;
1673
1674 if (i == 0) {
1675 /* if this is the first table - save it to return to the user
1676 application */
1677 *first_table_data_ptr = table_data;
1678
1679 /* set the pointer to info entry */
1680 prev_info_entry_ptr = info_entry_ptr;
1681 } else {
1682 /* not first table - the previous table info entry should
1683 be updated */
1684 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1685
1686 /* set the pointer to info entry */
1687 prev_info_entry_ptr = info_entry_ptr;
1688 }
1689 }
1690
1691 /* set the last table data */
1692 *last_table_data_ptr = table_data;
1693 end_function:
1694 return error;
1695 }
1696
1697 /*
1698 this function goes over all the flow tables connected to the given
1699 table and deallocate them
1700 */
1701 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1702 {
1703 /* id pointer */
1704 unsigned long *table_ptr;
1705 /* end address of the flow dma area */
1706 unsigned long num_entries;
1707 unsigned long num_pages;
1708 struct page **pages_ptr;
1709 /* maximum table size in words */
1710 struct sep_lli_entry_t *info_entry_ptr;
1711
1712 /* set the pointer to the first table */
1713 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1714
1715 /* set the num of entries */
1716 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1717 & SEP_NUM_ENTRIES_MASK;
1718
1719 /* go over all the connected tables */
1720 while (*table_ptr != 0xffffffff) {
1721 /* get number of pages */
1722 num_pages = *(table_ptr - 2);
1723
1724 /* get the pointer to the pages */
1725 pages_ptr = (struct page **) (*(table_ptr - 1));
1726
1727 /* free the pages */
1728 sep_free_dma_pages(pages_ptr, num_pages, 1);
1729
1730 /* goto to the info entry */
1731 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1732
1733 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1734 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1735 }
1736
1737 return;
1738 }
1739
1740 /**
1741 * sep_find_flow_context - find a flow
1742 * @sep: the SEP we are working with
1743 * @flow_id: flow identifier
1744 *
1745 * Returns a pointer the matching flow, or NULL if the flow does not
1746 * exist.
1747 */
1748
1749 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1750 unsigned long flow_id)
1751 {
1752 int count;
1753 /*
1754 * always search for flow with id default first - in case we
1755 * already started working on the flow there can be no situation
1756 * when 2 flows are with default flag
1757 */
1758 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1759 if (sep->flows[count].flow_id == flow_id)
1760 return &sep->flows[count];
1761 }
1762 return NULL;
1763 }
1764
1765
1766 /*
1767 this function handles the request to create the DMA tables for flow
1768 */
1769 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1770 unsigned long arg)
1771 {
1772 int error;
1773 struct sep_driver_build_flow_table_t command_args;
1774 /* first table - output */
1775 struct sep_lli_entry_t first_table_data;
1776 /* dma table data */
1777 struct sep_lli_entry_t last_table_data;
1778 /* pointer to the info entry of the previuos DMA table */
1779 struct sep_lli_entry_t *prev_info_entry_ptr;
1780 /* pointer to the flow data strucutre */
1781 struct sep_flow_context_t *flow_context_ptr;
1782
1783 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1784
1785 /* init variables */
1786 prev_info_entry_ptr = 0;
1787 first_table_data.physical_address = 0xffffffff;
1788
1789 /* find the free structure for flow data */
1790 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1791 if (flow_context_ptr == NULL)
1792 goto end_function;
1793
1794 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1795 if (error)
1796 goto end_function;
1797
1798 /* create flow tables */
1799 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1800 if (error)
1801 goto end_function_with_error;
1802
1803 /* check if flow is static */
1804 if (!command_args.flow_type)
1805 /* point the info entry of the last to the info entry of the first */
1806 last_table_data = first_table_data;
1807
1808 /* set output params */
1809 command_args.first_table_addr = first_table_data.physical_address;
1810 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1811 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1812
1813 /* send the parameters to user application */
1814 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1815 if (error)
1816 goto end_function_with_error;
1817
1818 /* all the flow created - update the flow entry with temp id */
1819 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1820
1821 /* set the processing tables data in the context */
1822 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1823 flow_context_ptr->input_tables_in_process = first_table_data;
1824 else
1825 flow_context_ptr->output_tables_in_process = first_table_data;
1826
1827 goto end_function;
1828
1829 end_function_with_error:
1830 /* free the allocated tables */
1831 sep_deallocated_flow_tables(&first_table_data);
1832 end_function:
1833 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1834 return error;
1835 }
1836
1837 /*
1838 this function handles add tables to flow
1839 */
1840 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1841 {
1842 int error;
1843 unsigned long num_entries;
1844 struct sep_driver_add_flow_table_t command_args;
1845 struct sep_flow_context_t *flow_context_ptr;
1846 /* first dma table data */
1847 struct sep_lli_entry_t first_table_data;
1848 /* last dma table data */
1849 struct sep_lli_entry_t last_table_data;
1850 /* pointer to the info entry of the current DMA table */
1851 struct sep_lli_entry_t *info_entry_ptr;
1852
1853 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1854
1855 /* get input parameters */
1856 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1857 if (error)
1858 goto end_function;
1859
1860 /* find the flow structure for the flow id */
1861 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1862 if (flow_context_ptr == NULL)
1863 goto end_function;
1864
1865 /* prepare the flow dma tables */
1866 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1867 if (error)
1868 goto end_function_with_error;
1869
1870 /* now check if there is already an existing add table for this flow */
1871 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1872 /* this buffer was for input buffers */
1873 if (flow_context_ptr->input_tables_flag) {
1874 /* add table already exists - add the new tables to the end
1875 of the previous */
1876 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1877
1878 info_entry_ptr = (struct sep_lli_entry_t *)
1879 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1880
1881 /* connect to list of tables */
1882 *info_entry_ptr = first_table_data;
1883
1884 /* set the first table data */
1885 first_table_data = flow_context_ptr->first_input_table;
1886 } else {
1887 /* set the input flag */
1888 flow_context_ptr->input_tables_flag = 1;
1889
1890 /* set the first table data */
1891 flow_context_ptr->first_input_table = first_table_data;
1892 }
1893 /* set the last table data */
1894 flow_context_ptr->last_input_table = last_table_data;
1895 } else { /* this is output tables */
1896
1897 /* this buffer was for input buffers */
1898 if (flow_context_ptr->output_tables_flag) {
1899 /* add table already exists - add the new tables to
1900 the end of the previous */
1901 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1902
1903 info_entry_ptr = (struct sep_lli_entry_t *)
1904 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1905
1906 /* connect to list of tables */
1907 *info_entry_ptr = first_table_data;
1908
1909 /* set the first table data */
1910 first_table_data = flow_context_ptr->first_output_table;
1911 } else {
1912 /* set the input flag */
1913 flow_context_ptr->output_tables_flag = 1;
1914
1915 /* set the first table data */
1916 flow_context_ptr->first_output_table = first_table_data;
1917 }
1918 /* set the last table data */
1919 flow_context_ptr->last_output_table = last_table_data;
1920 }
1921
1922 /* set output params */
1923 command_args.first_table_addr = first_table_data.physical_address;
1924 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1925 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1926
1927 /* send the parameters to user application */
1928 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1929 end_function_with_error:
1930 /* free the allocated tables */
1931 sep_deallocated_flow_tables(&first_table_data);
1932 end_function:
1933 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1934 return error;
1935 }
1936
1937 /*
1938 this function add the flow add message to the specific flow
1939 */
1940 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1941 {
1942 int error;
1943 struct sep_driver_add_message_t command_args;
1944 struct sep_flow_context_t *flow_context_ptr;
1945
1946 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1947
1948 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1949 if (error)
1950 goto end_function;
1951
1952 /* check input */
1953 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1954 error = -ENOMEM;
1955 goto end_function;
1956 }
1957
1958 /* find the flow context */
1959 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1960 if (flow_context_ptr == NULL)
1961 goto end_function;
1962
1963 /* copy the message into context */
1964 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1965 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1966 end_function:
1967 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1968 return error;
1969 }
1970
1971
1972 /*
1973 this function returns the bus and virtual addresses of the static pool
1974 */
1975 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1976 {
1977 int error;
1978 struct sep_driver_static_pool_addr_t command_args;
1979
1980 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1981
1982 /*prepare the output parameters in the struct */
1983 command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1984 command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1985
1986 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1987
1988 /* send the parameters to user application */
1989 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1990 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
1991 return error;
1992 }
1993
1994 /*
1995 this address gets the offset of the physical address from the start
1996 of the mapped area
1997 */
1998 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
1999 {
2000 int error;
2001 struct sep_driver_get_mapped_offset_t command_args;
2002
2003 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2004
2005 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2006 if (error)
2007 goto end_function;
2008
2009 if (command_args.physical_address < sep->shared_bus) {
2010 error = -EINVAL;
2011 goto end_function;
2012 }
2013
2014 /*prepare the output parameters in the struct */
2015 command_args.offset = command_args.physical_address - sep->shared_bus;
2016
2017 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2018
2019 /* send the parameters to user application */
2020 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2021 end_function:
2022 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2023 return error;
2024 }
2025
2026
2027 /*
2028 ?
2029 */
2030 static int sep_start_handler(struct sep_device *sep)
2031 {
2032 unsigned long reg_val;
2033 unsigned long error = 0;
2034
2035 dbg("SEP Driver:--------> sep_start_handler start\n");
2036
2037 /* wait in polling for message from SEP */
2038 do
2039 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2040 while (!reg_val);
2041
2042 /* check the value */
2043 if (reg_val == 0x1)
2044 /* fatal error - read error status from GPRO */
2045 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2046 dbg("SEP Driver:<-------- sep_start_handler end\n");
2047 return error;
2048 }
2049
2050 /*
2051 this function handles the request for SEP initialization
2052 */
2053 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2054 {
2055 unsigned long message_word;
2056 unsigned long *message_ptr;
2057 struct sep_driver_init_t command_args;
2058 unsigned long counter;
2059 unsigned long error;
2060 unsigned long reg_val;
2061
2062 dbg("SEP Driver:--------> sep_init_handler start\n");
2063 error = 0;
2064
2065 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2066
2067 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2068
2069 if (error)
2070 goto end_function;
2071
2072 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2073 /*sep_configure_dma_burst(); */
2074
2075 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2076
2077 message_ptr = (unsigned long *) command_args.message_addr;
2078
2079 /* set the base address of the SRAM */
2080 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2081
2082 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2083 get_user(message_word, message_ptr);
2084 /* write data to SRAM */
2085 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2086 edbg("SEP Driver:message_word is %lu\n", message_word);
2087 /* wait for write complete */
2088 sep_wait_sram_write(sep);
2089 }
2090 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2091 /* signal SEP */
2092 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2093
2094 do
2095 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2096 while (!(reg_val & 0xFFFFFFFD));
2097
2098 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2099
2100 /* check the value */
2101 if (reg_val == 0x1) {
2102 edbg("SEP Driver:init failed\n");
2103
2104 error = sep_read_reg(sep, 0x8060);
2105 edbg("SEP Driver:sw monitor is %lu\n", error);
2106
2107 /* fatal error - read erro status from GPRO */
2108 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2109 edbg("SEP Driver:error is %lu\n", error);
2110 }
2111 end_function:
2112 dbg("SEP Driver:<-------- sep_init_handler end\n");
2113 return error;
2114
2115 }
2116
2117 /*
2118 this function handles the request cache and resident reallocation
2119 */
2120 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2121 unsigned long arg)
2122 {
2123 struct sep_driver_realloc_cache_resident_t command_args;
2124 int error;
2125
2126 /* copy cache and resident to the their intended locations */
2127 error = sep_load_firmware(sep);
2128 if (error)
2129 return error;
2130
2131 command_args.new_base_addr = sep->shared_bus;
2132
2133 /* find the new base address according to the lowest address between
2134 cache, resident and shared area */
2135 if (sep->resident_bus < command_args.new_base_addr)
2136 command_args.new_base_addr = sep->resident_bus;
2137 if (sep->rar_bus < command_args.new_base_addr)
2138 command_args.new_base_addr = sep->rar_bus;
2139
2140 /* set the return parameters */
2141 command_args.new_cache_addr = sep->rar_bus;
2142 command_args.new_resident_addr = sep->resident_bus;
2143
2144 /* set the new shared area */
2145 command_args.new_shared_area_addr = sep->shared_bus;
2146
2147 edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
2148 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2149 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2150 edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
2151
2152 /* return to user */
2153 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2154 return -EFAULT;
2155 return 0;
2156 }
2157
2158 /**
2159 * sep_get_time_handler - time request from user space
2160 * @sep: sep we are to set the time for
2161 * @arg: pointer to user space arg buffer
2162 *
2163 * This function reports back the time and the address in the SEP
2164 * shared buffer at which it has been placed. (Do we really need this!!!)
2165 */
2166
2167 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2168 {
2169 struct sep_driver_get_time_t command_args;
2170
2171 mutex_lock(&sep_mutex);
2172 command_args.time_value = sep_set_time(sep);
2173 command_args.time_physical_address = (unsigned long)sep_time_address(sep);
2174 mutex_unlock(&sep_mutex);
2175 if (copy_to_user((void __user *)arg,
2176 &command_args, sizeof(struct sep_driver_get_time_t)))
2177 return -EFAULT;
2178 return 0;
2179
2180 }
2181
2182 /*
2183 This API handles the end transaction request
2184 */
2185 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2186 {
2187 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2188
2189 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2190 /* close IMR */
2191 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2192
2193 /* release IRQ line */
2194 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2195
2196 /* lock the sep mutex */
2197 mutex_unlock(&sep_mutex);
2198 #endif
2199
2200 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2201
2202 return 0;
2203 }
2204
2205
2206 /**
2207 * sep_set_flow_id_handler - handle flow setting
2208 * @sep: the SEP we are configuring
2209 * @flow_id: the flow we are setting
2210 *
2211 * This function handler the set flow id command
2212 */
2213 static int sep_set_flow_id_handler(struct sep_device *sep,
2214 unsigned long flow_id)
2215 {
2216 int error = 0;
2217 struct sep_flow_context_t *flow_data_ptr;
2218
2219 /* find the flow data structure that was just used for creating new flow
2220 - its id should be default */
2221
2222 mutex_lock(&sep_mutex);
2223 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2224 if (flow_data_ptr)
2225 flow_data_ptr->flow_id = flow_id; /* set flow id */
2226 else
2227 error = -EINVAL;
2228 mutex_unlock(&sep_mutex);
2229 return error;
2230 }
2231
2232 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2233 {
2234 int error = 0;
2235 struct sep_device *sep = filp->private_data;
2236
2237 dbg("------------>SEP Driver: ioctl start\n");
2238
2239 edbg("SEP Driver: cmd is %x\n", cmd);
2240
2241 switch (cmd) {
2242 case SEP_IOCSENDSEPCOMMAND:
2243 /* send command to SEP */
2244 sep_send_command_handler(sep);
2245 edbg("SEP Driver: after sep_send_command_handler\n");
2246 break;
2247 case SEP_IOCSENDSEPRPLYCOMMAND:
2248 /* send reply command to SEP */
2249 sep_send_reply_command_handler(sep);
2250 break;
2251 case SEP_IOCALLOCDATAPOLL:
2252 /* allocate data pool */
2253 error = sep_allocate_data_pool_memory_handler(sep, arg);
2254 break;
2255 case SEP_IOCWRITEDATAPOLL:
2256 /* write data into memory pool */
2257 error = sep_write_into_data_pool_handler(sep, arg);
2258 break;
2259 case SEP_IOCREADDATAPOLL:
2260 /* read data from data pool into application memory */
2261 error = sep_read_from_data_pool_handler(sep, arg);
2262 break;
2263 case SEP_IOCCREATESYMDMATABLE:
2264 /* create dma table for synhronic operation */
2265 error = sep_create_sync_dma_tables_handler(sep, arg);
2266 break;
2267 case SEP_IOCCREATEFLOWDMATABLE:
2268 /* create flow dma tables */
2269 error = sep_create_flow_dma_tables_handler(sep, arg);
2270 break;
2271 case SEP_IOCFREEDMATABLEDATA:
2272 /* free the pages */
2273 error = sep_free_dma_table_data_handler(sep);
2274 break;
2275 case SEP_IOCSETFLOWID:
2276 /* set flow id */
2277 error = sep_set_flow_id_handler(sep, (unsigned long)arg);
2278 break;
2279 case SEP_IOCADDFLOWTABLE:
2280 /* add tables to the dynamic flow */
2281 error = sep_add_flow_tables_handler(sep, arg);
2282 break;
2283 case SEP_IOCADDFLOWMESSAGE:
2284 /* add message of add tables to flow */
2285 error = sep_add_flow_tables_message_handler(sep, arg);
2286 break;
2287 case SEP_IOCSEPSTART:
2288 /* start command to sep */
2289 error = sep_start_handler(sep);
2290 break;
2291 case SEP_IOCSEPINIT:
2292 /* init command to sep */
2293 error = sep_init_handler(sep, arg);
2294 break;
2295 case SEP_IOCGETSTATICPOOLADDR:
2296 /* get the physical and virtual addresses of the static pool */
2297 error = sep_get_static_pool_addr_handler(sep, arg);
2298 break;
2299 case SEP_IOCENDTRANSACTION:
2300 error = sep_end_transaction_handler(sep, arg);
2301 break;
2302 case SEP_IOCREALLOCCACHERES:
2303 error = sep_realloc_cache_resident_handler(sep, arg);
2304 break;
2305 case SEP_IOCGETMAPPEDADDROFFSET:
2306 error = sep_get_physical_mapped_offset_handler(sep, arg);
2307 break;
2308 case SEP_IOCGETIME:
2309 error = sep_get_time_handler(sep, arg);
2310 break;
2311 default:
2312 error = -ENOTTY;
2313 break;
2314 }
2315 dbg("SEP Driver:<-------- ioctl end\n");
2316 return error;
2317 }
2318
2319
2320
2321 #if !SEP_DRIVER_POLLING_MODE
2322
2323 /* handler for flow done interrupt */
2324
2325 static void sep_flow_done_handler(struct work_struct *work)
2326 {
2327 struct sep_flow_context_t *flow_data_ptr;
2328
2329 /* obtain the mutex */
2330 mutex_lock(&sep_mutex);
2331
2332 /* get the pointer to context */
2333 flow_data_ptr = (struct sep_flow_context_t *) work;
2334
2335 /* free all the current input tables in sep */
2336 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2337
2338 /* free all the current tables output tables in SEP (if needed) */
2339 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2340 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2341
2342 /* check if we have additional tables to be sent to SEP only input
2343 flag may be checked */
2344 if (flow_data_ptr->input_tables_flag) {
2345 /* copy the message to the shared RAM and signal SEP */
2346 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
2347
2348 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2349 }
2350 mutex_unlock(&sep_mutex);
2351 }
2352 /*
2353 interrupt handler function
2354 */
2355 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2356 {
2357 irqreturn_t int_error;
2358 unsigned long reg_val;
2359 unsigned long flow_id;
2360 struct sep_flow_context_t *flow_context_ptr;
2361 struct sep_device *sep = dev_id;
2362
2363 int_error = IRQ_HANDLED;
2364
2365 /* read the IRR register to check if this is SEP interrupt */
2366 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2367 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2368
2369 /* check if this is the flow interrupt */
2370 if (0 /*reg_val & (0x1 << 11) */ ) {
2371 /* read GPRO to find out the which flow is done */
2372 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2373
2374 /* find the contex of the flow */
2375 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2376 if (flow_context_ptr == NULL)
2377 goto end_function_with_error;
2378
2379 /* queue the work */
2380 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2381 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2382
2383 } else {
2384 /* check if this is reply interrupt from SEP */
2385 if (reg_val & (0x1 << 13)) {
2386 /* update the counter of reply messages */
2387 sep->reply_ct++;
2388 /* wake up the waiting process */
2389 wake_up(&sep_event);
2390 } else {
2391 int_error = IRQ_NONE;
2392 goto end_function;
2393 }
2394 }
2395 end_function_with_error:
2396 /* clear the interrupt */
2397 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2398 end_function:
2399 return int_error;
2400 }
2401
2402 #endif
2403
2404
2405
2406 #if 0
2407
2408 static void sep_wait_busy(struct sep_device *sep)
2409 {
2410 u32 reg;
2411
2412 do {
2413 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2414 } while (reg);
2415 }
2416
2417 /*
2418 PATCH for configuring the DMA to single burst instead of multi-burst
2419 */
2420 static void sep_configure_dma_burst(struct sep_device *sep)
2421 {
2422 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2423
2424 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2425
2426 /* request access to registers from SEP */
2427 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2428
2429 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2430
2431 sep_wait_busy(sep);
2432
2433 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2434
2435 /* set the DMA burst register to single burst */
2436 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2437
2438 /* release the sep busy */
2439 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2440 sep_wait_busy(sep);
2441
2442 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2443
2444 }
2445
2446 #endif
2447
2448 /*
2449 Function that is activaed on the succesful probe of the SEP device
2450 */
2451 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2452 {
2453 int error = 0;
2454 struct sep_device *sep;
2455 int counter;
2456 int size; /* size of memory for allocation */
2457
2458 edbg("Sep pci probe starting\n");
2459 if (sep_dev != NULL) {
2460 dev_warn(&pdev->dev, "only one SEP supported.\n");
2461 return -EBUSY;
2462 }
2463
2464 /* enable the device */
2465 error = pci_enable_device(pdev);
2466 if (error) {
2467 edbg("error enabling pci device\n");
2468 goto end_function;
2469 }
2470
2471 /* set the pci dev pointer */
2472 sep_dev = &sep_instance;
2473 sep = &sep_instance;
2474
2475 edbg("sep->shared_addr = %p\n", sep->shared_addr);
2476 /* transaction counter that coordinates the transactions between SEP
2477 and HOST */
2478 sep->send_ct = 0;
2479 /* counter for the messages from sep */
2480 sep->reply_ct = 0;
2481 /* counter for the number of bytes allocated in the pool
2482 for the current transaction */
2483 sep->data_pool_bytes_allocated = 0;
2484
2485 /* calculate the total size for allocation */
2486 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2487 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2488
2489 /* allocate the shared area */
2490 if (sep_map_and_alloc_shared_area(sep, size)) {
2491 error = -ENOMEM;
2492 /* allocation failed */
2493 goto end_function_error;
2494 }
2495 /* now set the memory regions */
2496 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2497 /* Note: this test section will need moving before it could ever
2498 work as the registers are not yet mapped ! */
2499 /* send the new SHARED MESSAGE AREA to the SEP */
2500 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2501
2502 /* poll for SEP response */
2503 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2504 while (retval != 0xffffffff && retval != sep->shared_bus)
2505 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2506
2507 /* check the return value (register) */
2508 if (retval != sep->shared_bus) {
2509 error = -ENOMEM;
2510 goto end_function_deallocate_sep_shared_area;
2511 }
2512 #endif
2513 /* init the flow contextes */
2514 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2515 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2516
2517 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2518 if (sep->flow_wq == NULL) {
2519 error = -ENOMEM;
2520 edbg("sep_driver:flow queue creation failed\n");
2521 goto end_function_deallocate_sep_shared_area;
2522 }
2523 edbg("SEP Driver: create flow workqueue \n");
2524 sep->pdev = pci_dev_get(pdev);
2525
2526 sep->reg_addr = pci_ioremap_bar(pdev, 0);
2527 if (!sep->reg_addr) {
2528 edbg("sep: ioremap of registers failed.\n");
2529 goto end_function_deallocate_sep_shared_area;
2530 }
2531 edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
2532
2533 /* load the rom code */
2534 sep_load_rom_code(sep);
2535
2536 /* set up system base address and shared memory location */
2537 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2538 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2539 &sep->rar_bus, GFP_KERNEL);
2540
2541 if (!sep->rar_addr) {
2542 edbg("SEP Driver:can't allocate rar\n");
2543 goto end_function_uniomap;
2544 }
2545
2546
2547 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2548 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2549
2550 #if !SEP_DRIVER_POLLING_MODE
2551
2552 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2553
2554 /* clear ICR register */
2555 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2556
2557 /* set the IMR register - open only GPR 2 */
2558 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2559
2560 edbg("SEP Driver: about to call request_irq\n");
2561 /* get the interrupt line */
2562 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2563 if (error)
2564 goto end_function_free_res;
2565 return 0;
2566 edbg("SEP Driver: about to write IMR REG_ADDR");
2567
2568 /* set the IMR register - open only GPR 2 */
2569 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2570
2571 end_function_free_res:
2572 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2573 sep->rar_addr, sep->rar_bus);
2574 #endif /* SEP_DRIVER_POLLING_MODE */
2575 end_function_uniomap:
2576 iounmap(sep->reg_addr);
2577 end_function_deallocate_sep_shared_area:
2578 /* de-allocate shared area */
2579 sep_unmap_and_free_shared_area(sep, size);
2580 end_function_error:
2581 sep_dev = NULL;
2582 end_function:
2583 return error;
2584 }
2585
2586 static struct pci_device_id sep_pci_id_tbl[] = {
2587 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2588 {0}
2589 };
2590
2591 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2592
2593 /* field for registering driver to PCI device */
2594 static struct pci_driver sep_pci_driver = {
2595 .name = "sep_sec_driver",
2596 .id_table = sep_pci_id_tbl,
2597 .probe = sep_probe
2598 /* FIXME: remove handler */
2599 };
2600
2601 /* major and minor device numbers */
2602 static dev_t sep_devno;
2603
2604 /* the files operations structure of the driver */
2605 static struct file_operations sep_file_operations = {
2606 .owner = THIS_MODULE,
2607 .ioctl = sep_ioctl,
2608 .poll = sep_poll,
2609 .open = sep_open,
2610 .release = sep_release,
2611 .mmap = sep_mmap,
2612 };
2613
2614
2615 /* cdev struct of the driver */
2616 static struct cdev sep_cdev;
2617
2618 /*
2619 this function registers the driver to the file system
2620 */
2621 static int sep_register_driver_to_fs(void)
2622 {
2623 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2624 if (ret_val) {
2625 edbg("sep: major number allocation failed, retval is %d\n",
2626 ret_val);
2627 return ret_val;
2628 }
2629 /* init cdev */
2630 cdev_init(&sep_cdev, &sep_file_operations);
2631 sep_cdev.owner = THIS_MODULE;
2632
2633 /* register the driver with the kernel */
2634 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2635 if (ret_val) {
2636 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2637 /* unregister dev numbers */
2638 unregister_chrdev_region(sep_devno, 1);
2639 }
2640 return ret_val;
2641 }
2642
2643
2644 /*--------------------------------------------------------------
2645 init function
2646 ----------------------------------------------------------------*/
2647 static int __init sep_init(void)
2648 {
2649 int ret_val = 0;
2650 dbg("SEP Driver:-------->Init start\n");
2651 /* FIXME: Probe can occur before we are ready to survive a probe */
2652 ret_val = pci_register_driver(&sep_pci_driver);
2653 if (ret_val) {
2654 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2655 goto end_function_unregister_from_fs;
2656 }
2657 /* register driver to fs */
2658 ret_val = sep_register_driver_to_fs();
2659 if (ret_val)
2660 goto end_function_unregister_pci;
2661 goto end_function;
2662 end_function_unregister_pci:
2663 pci_unregister_driver(&sep_pci_driver);
2664 end_function_unregister_from_fs:
2665 /* unregister from fs */
2666 cdev_del(&sep_cdev);
2667 /* unregister dev numbers */
2668 unregister_chrdev_region(sep_devno, 1);
2669 end_function:
2670 dbg("SEP Driver:<-------- Init end\n");
2671 return ret_val;
2672 }
2673
2674
2675 /*-------------------------------------------------------------
2676 exit function
2677 --------------------------------------------------------------*/
2678 static void __exit sep_exit(void)
2679 {
2680 int size;
2681
2682 dbg("SEP Driver:--------> Exit start\n");
2683
2684 /* unregister from fs */
2685 cdev_del(&sep_cdev);
2686 /* unregister dev numbers */
2687 unregister_chrdev_region(sep_devno, 1);
2688 /* calculate the total size for de-allocation */
2689 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2690 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2691 /* FIXME: We need to do this in the unload for the device */
2692 /* free shared area */
2693 if (sep_dev) {
2694 sep_unmap_and_free_shared_area(sep_dev, size);
2695 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2696 iounmap((void *) sep_dev->reg_addr);
2697 edbg("SEP Driver: iounmap \n");
2698 }
2699 edbg("SEP Driver: release_mem_region \n");
2700 dbg("SEP Driver:<-------- Exit end\n");
2701 }
2702
2703
2704 module_init(sep_init);
2705 module_exit(sep_exit);
2706
2707 MODULE_LICENSE("GPL");