staging: make PCI device id constant
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / sep / sep_driver.c
1 /*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/fs.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/poll.h>
41 #include <linux/wait.h>
42 #include <linux/sched.h>
43 #include <linux/pci.h>
44 #include <linux/firmware.h>
45 #include <asm/ioctl.h>
46 #include <linux/ioport.h>
47 #include <asm/io.h>
48 #include <linux/interrupt.h>
49 #include <linux/pagemap.h>
50 #include <asm/cacheflush.h>
51 #include "sep_driver_hw_defs.h"
52 #include "sep_driver_config.h"
53 #include "sep_driver_api.h"
54 #include "sep_dev.h"
55
56 #if SEP_DRIVER_ARM_DEBUG_MODE
57
58 #define CRYS_SEP_ROM_length 0x4000
59 #define CRYS_SEP_ROM_start_address 0x8000C000UL
60 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
61 #define SEP_ROM_BANK_register 0x80008420UL
62 #define SEP_ROM_BANK_register_offset 0x8420UL
63 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64
65 /*
66 * THESE 2 definitions are specific to the board - must be
67 * defined during integration
68 */
69 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
70
71 /* 2M size */
72
73 static void sep_load_rom_code(struct sep_device *sep)
74 {
75 /* Index variables */
76 unsigned long i, k, j;
77 u32 reg;
78 u32 error;
79 u32 warning;
80
81 /* Loading ROM from SEP_ROM_image.h file */
82 k = sizeof(CRYS_SEP_ROM);
83
84 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
85
86 edbg("SEP Driver: k is %lu\n", k);
87 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
88 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
89
90 for (i = 0; i < 4; i++) {
91 /* write bank */
92 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
93
94 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
95 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
96
97 k = k - 4;
98
99 if (k == 0) {
100 j = CRYS_SEP_ROM_length;
101 i = 4;
102 }
103 }
104 }
105
106 /* reset the SEP */
107 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
108
109 /* poll for SEP ROM boot finish */
110 do
111 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
112 while (!reg);
113
114 edbg("SEP Driver: ROM polling ended\n");
115
116 switch (reg) {
117 case 0x1:
118 /* fatal error - read erro status from GPRO */
119 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
120 edbg("SEP Driver: ROM polling case 1\n");
121 break;
122 case 0x4:
123 /* Cold boot ended successfully */
124 case 0x8:
125 /* Warmboot ended successfully */
126 case 0x10:
127 /* ColdWarm boot ended successfully */
128 error = 0;
129 case 0x2:
130 /* Boot First Phase ended */
131 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
132 case 0x20:
133 edbg("SEP Driver: ROM polling case %d\n", reg);
134 break;
135 }
136
137 }
138
139 #else
140 static void sep_load_rom_code(struct sep_device *sep) { }
141 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
142
143
144
145 /*----------------------------------------
146 DEFINES
147 -----------------------------------------*/
148
149 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
150 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
151
152 /*--------------------------------------------
153 GLOBAL variables
154 --------------------------------------------*/
155
156 /* debug messages level */
157 static int debug;
158 module_param(debug, int , 0);
159 MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
160
161 /* Keep this a single static object for now to keep the conversion easy */
162
163 static struct sep_device sep_instance;
164 static struct sep_device *sep_dev = &sep_instance;
165
166 /*
167 mutex for the access to the internals of the sep driver
168 */
169 static DEFINE_MUTEX(sep_mutex);
170
171
172 /* wait queue head (event) of the driver */
173 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
174
175 /**
176 * sep_load_firmware - copy firmware cache/resident
177 * @sep: device we are loading
178 *
179 * This functions copies the cache and resident from their source
180 * location into destination shared memory.
181 */
182
183 static int sep_load_firmware(struct sep_device *sep)
184 {
185 const struct firmware *fw;
186 char *cache_name = "cache.image.bin";
187 char *res_name = "resident.image.bin";
188 int error;
189
190 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
191 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
192
193 /* load cache */
194 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
195 if (error) {
196 edbg("SEP Driver:cant request cache fw\n");
197 return error;
198 }
199 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
200
201 memcpy(sep->rar_addr, (void *)fw->data, fw->size);
202 sep->cache_size = fw->size;
203 release_firmware(fw);
204
205 sep->resident_bus = sep->rar_bus + sep->cache_size;
206 sep->resident_addr = sep->rar_addr + sep->cache_size;
207
208 /* load resident */
209 error = request_firmware(&fw, res_name, &sep->pdev->dev);
210 if (error) {
211 edbg("SEP Driver:cant request res fw\n");
212 return error;
213 }
214 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
215
216 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
217 sep->resident_size = fw->size;
218 release_firmware(fw);
219
220 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
221 sep->resident_addr, (unsigned long long)sep->resident_bus,
222 sep->rar_addr, (unsigned long long)sep->rar_bus);
223 return 0;
224 }
225
226 /**
227 * sep_map_and_alloc_shared_area - allocate shared block
228 * @sep: security processor
229 * @size: size of shared area
230 *
231 * Allocate a shared buffer in host memory that can be used by both the
232 * kernel and also the hardware interface via DMA.
233 */
234
235 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
236 unsigned long size)
237 {
238 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
239 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
240 &sep->shared_bus, GFP_KERNEL);
241
242 if (!sep->shared_addr) {
243 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
244 return -ENOMEM;
245 }
246 /* set the bus address of the shared area */
247 edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
248 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
249 return 0;
250 }
251
252 /**
253 * sep_unmap_and_free_shared_area - free shared block
254 * @sep: security processor
255 *
256 * Free the shared area allocated to the security processor. The
257 * processor must have finished with this and any final posted
258 * writes cleared before we do so.
259 */
260 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
261 {
262 dma_free_coherent(&sep->pdev->dev, size,
263 sep->shared_addr, sep->shared_bus);
264 }
265
266 /**
267 * sep_shared_virt_to_bus - convert bus/virt addresses
268 *
269 * Returns the bus address inside the shared area according
270 * to the virtual address.
271 */
272
273 static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
274 void *virt_address)
275 {
276 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
277 edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
278 virt_address);
279 return pa;
280 }
281
282 /**
283 * sep_shared_bus_to_virt - convert bus/virt addresses
284 *
285 * Returns virtual address inside the shared area according
286 * to the bus address.
287 */
288
289 static void *sep_shared_bus_to_virt(struct sep_device *sep,
290 dma_addr_t bus_address)
291 {
292 return sep->shared_addr + (bus_address - sep->shared_bus);
293 }
294
295
296 /**
297 * sep_try_open - attempt to open a SEP device
298 * @sep: device to attempt to open
299 *
300 * Atomically attempt to get ownership of a SEP device.
301 * Returns 1 if the device was opened, 0 on failure.
302 */
303
304 static int sep_try_open(struct sep_device *sep)
305 {
306 if (!test_and_set_bit(0, &sep->in_use))
307 return 1;
308 return 0;
309 }
310
311 /**
312 * sep_open - device open method
313 * @inode: inode of sep device
314 * @filp: file handle to sep device
315 *
316 * Open method for the SEP device. Called when userspace opens
317 * the SEP device node. Must also release the memory data pool
318 * allocations.
319 *
320 * Returns zero on success otherwise an error code.
321 */
322
323 static int sep_open(struct inode *inode, struct file *filp)
324 {
325 if (sep_dev == NULL)
326 return -ENODEV;
327
328 /* check the blocking mode */
329 if (filp->f_flags & O_NDELAY) {
330 if (sep_try_open(sep_dev) == 0)
331 return -EAGAIN;
332 } else
333 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
334 return -EINTR;
335
336 /* Bind to the device, we only have one which makes it easy */
337 filp->private_data = sep_dev;
338 /* release data pool allocations */
339 sep_dev->data_pool_bytes_allocated = 0;
340 return 0;
341 }
342
343
344 /**
345 * sep_release - close a SEP device
346 * @inode: inode of SEP device
347 * @filp: file handle being closed
348 *
349 * Called on the final close of a SEP device. As the open protects against
350 * multiple simultaenous opens that means this method is called when the
351 * final reference to the open handle is dropped.
352 */
353
354 static int sep_release(struct inode *inode, struct file *filp)
355 {
356 struct sep_device *sep = filp->private_data;
357 #if 0 /*!SEP_DRIVER_POLLING_MODE */
358 /* close IMR */
359 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
360 /* release IRQ line */
361 free_irq(SEP_DIRVER_IRQ_NUM, sep);
362
363 #endif
364 /* Ensure any blocked open progresses */
365 clear_bit(0, &sep->in_use);
366 wake_up(&sep_event);
367 return 0;
368 }
369
370 /*---------------------------------------------------------------
371 map function - this functions maps the message shared area
372 -----------------------------------------------------------------*/
373 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
374 {
375 dma_addr_t bus_addr;
376 struct sep_device *sep = filp->private_data;
377
378 dbg("-------->SEP Driver: mmap start\n");
379
380 /* check that the size of the mapped range is as the size of the message
381 shared area */
382 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
383 edbg("SEP Driver mmap requested size is more than allowed\n");
384 printk(KERN_WARNING "SEP Driver mmap requested size is more \
385 than allowed\n");
386 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
387 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
388 return -EAGAIN;
389 }
390
391 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
392
393 /* get bus address */
394 bus_addr = sep->shared_bus;
395
396 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
397
398 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
399 edbg("SEP Driver remap_page_range failed\n");
400 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
401 return -EAGAIN;
402 }
403
404 dbg("SEP Driver:<-------- mmap end\n");
405
406 return 0;
407 }
408
409
410 /*-----------------------------------------------
411 poll function
412 *----------------------------------------------*/
413 static unsigned int sep_poll(struct file *filp, poll_table * wait)
414 {
415 unsigned long count;
416 unsigned int mask = 0;
417 unsigned long retval = 0; /* flow id */
418 struct sep_device *sep = filp->private_data;
419
420 dbg("---------->SEP Driver poll: start\n");
421
422
423 #if SEP_DRIVER_POLLING_MODE
424
425 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
426 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
427
428 for (count = 0; count < 10 * 4; count += 4)
429 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
430 }
431
432 sep->reply_ct++;
433 #else
434 /* add the event to the polling wait table */
435 poll_wait(filp, &sep_event, wait);
436
437 #endif
438
439 edbg("sep->send_ct is %lu\n", sep->send_ct);
440 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
441
442 /* check if the data is ready */
443 if (sep->send_ct == sep->reply_ct) {
444 for (count = 0; count < 12 * 4; count += 4)
445 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
446
447 for (count = 0; count < 10 * 4; count += 4)
448 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
449
450 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
451 edbg("retval is %lu\n", retval);
452 /* check if the this is sep reply or request */
453 if (retval >> 31) {
454 edbg("SEP Driver: sep request in\n");
455 /* request */
456 mask |= POLLOUT | POLLWRNORM;
457 } else {
458 edbg("SEP Driver: sep reply in\n");
459 mask |= POLLIN | POLLRDNORM;
460 }
461 }
462 dbg("SEP Driver:<-------- poll exit\n");
463 return mask;
464 }
465
466 /**
467 * sep_time_address - address in SEP memory of time
468 * @sep: SEP device we want the address from
469 *
470 * Return the address of the two dwords in memory used for time
471 * setting.
472 */
473
474 static u32 *sep_time_address(struct sep_device *sep)
475 {
476 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
477 }
478
479 /**
480 * sep_set_time - set the SEP time
481 * @sep: the SEP we are setting the time for
482 *
483 * Calculates time and sets it at the predefined address.
484 * Called with the sep mutex held.
485 */
486 static unsigned long sep_set_time(struct sep_device *sep)
487 {
488 struct timeval time;
489 u32 *time_addr; /* address of time as seen by the kernel */
490
491
492 dbg("sep:sep_set_time start\n");
493
494 do_gettimeofday(&time);
495
496 /* set value in the SYSTEM MEMORY offset */
497 time_addr = sep_time_address(sep);
498
499 time_addr[0] = SEP_TIME_VAL_TOKEN;
500 time_addr[1] = time.tv_sec;
501
502 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
503 edbg("SEP Driver:time_addr is %p\n", time_addr);
504 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
505
506 return time.tv_sec;
507 }
508
509 /**
510 * sep_dump_message - dump the message that is pending
511 * @sep: sep device
512 *
513 * Dump out the message pending in the shared message area
514 */
515
516 static void sep_dump_message(struct sep_device *sep)
517 {
518 int count;
519 for (count = 0; count < 12 * 4; count += 4)
520 edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
521 }
522
523 /**
524 * sep_send_command_handler - kick off a command
525 * @sep: sep being signalled
526 *
527 * This function raises interrupt to SEP that signals that is has a new
528 * command from the host
529 */
530
531 static void sep_send_command_handler(struct sep_device *sep)
532 {
533 dbg("sep:sep_send_command_handler start\n");
534
535 mutex_lock(&sep_mutex);
536 sep_set_time(sep);
537
538 /* FIXME: flush cache */
539 flush_cache_all();
540
541 sep_dump_message(sep);
542 /* update counter */
543 sep->send_ct++;
544 /* send interrupt to SEP */
545 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
546 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
547 mutex_unlock(&sep_mutex);
548 return;
549 }
550
551 /**
552 * sep_send_reply_command_handler - kick off a command reply
553 * @sep: sep being signalled
554 *
555 * This function raises interrupt to SEP that signals that is has a new
556 * command from the host
557 */
558
559 static void sep_send_reply_command_handler(struct sep_device *sep)
560 {
561 dbg("sep:sep_send_reply_command_handler start\n");
562
563 /* flash cache */
564 flush_cache_all();
565
566 sep_dump_message(sep);
567
568 mutex_lock(&sep_mutex);
569 sep->send_ct++; /* update counter */
570 /* send the interrupt to SEP */
571 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
572 /* update both counters */
573 sep->send_ct++;
574 sep->reply_ct++;
575 mutex_unlock(&sep_mutex);
576 dbg("sep: sep_send_reply_command_handler end\n");
577 }
578
579 /*
580 This function handles the allocate data pool memory request
581 This function returns calculates the bus address of the
582 allocated memory, and the offset of this area from the mapped address.
583 Therefore, the FVOs in user space can calculate the exact virtual
584 address of this allocated memory
585 */
586 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
587 unsigned long arg)
588 {
589 int error;
590 struct sep_driver_alloc_t command_args;
591
592 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
593
594 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
595 if (error)
596 goto end_function;
597
598 /* allocate memory */
599 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
600 error = -ENOMEM;
601 goto end_function;
602 }
603
604 /* set the virtual and bus address */
605 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
606 command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
607
608 /* write the memory back to the user space */
609 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
610 if (error)
611 goto end_function;
612
613 /* set the allocation */
614 sep->data_pool_bytes_allocated += command_args.num_bytes;
615
616 end_function:
617 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
618 return error;
619 }
620
621 /*
622 This function handles write into allocated data pool command
623 */
624 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
625 {
626 int error;
627 void *virt_address;
628 unsigned long va;
629 unsigned long app_in_address;
630 unsigned long num_bytes;
631 void *data_pool_area_addr;
632
633 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
634
635 /* get the application address */
636 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
637 if (error)
638 goto end_function;
639
640 /* get the virtual kernel address address */
641 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
642 if (error)
643 goto end_function;
644 virt_address = (void *)va;
645
646 /* get the number of bytes */
647 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
648 if (error)
649 goto end_function;
650
651 /* calculate the start of the data pool */
652 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
653
654
655 /* check that the range of the virtual kernel address is correct */
656 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
657 error = -EINVAL;
658 goto end_function;
659 }
660 /* copy the application data */
661 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
662 end_function:
663 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
664 return error;
665 }
666
667 /*
668 this function handles the read from data pool command
669 */
670 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
671 {
672 int error;
673 /* virtual address of dest application buffer */
674 unsigned long app_out_address;
675 /* virtual address of the data pool */
676 unsigned long va;
677 void *virt_address;
678 unsigned long num_bytes;
679 void *data_pool_area_addr;
680
681 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
682
683 /* get the application address */
684 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
685 if (error)
686 goto end_function;
687
688 /* get the virtual kernel address address */
689 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
690 if (error)
691 goto end_function;
692 virt_address = (void *)va;
693
694 /* get the number of bytes */
695 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
696 if (error)
697 goto end_function;
698
699 /* calculate the start of the data pool */
700 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
701
702 /* FIXME: These are incomplete all over the driver: what about + len
703 and when doing that also overflows */
704 /* check that the range of the virtual kernel address is correct */
705 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
706 error = -EINVAL;
707 goto end_function;
708 }
709
710 /* copy the application data */
711 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
712 end_function:
713 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
714 return error;
715 }
716
717 /*
718 This function releases all the application virtual buffer physical pages,
719 that were previously locked
720 */
721 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
722 {
723 unsigned long count;
724
725 if (dirtyFlag) {
726 for (count = 0; count < num_pages; count++) {
727 /* the out array was written, therefore the data was changed */
728 if (!PageReserved(page_array_ptr[count]))
729 SetPageDirty(page_array_ptr[count]);
730 page_cache_release(page_array_ptr[count]);
731 }
732 } else {
733 /* free in pages - the data was only read, therefore no update was done
734 on those pages */
735 for (count = 0; count < num_pages; count++)
736 page_cache_release(page_array_ptr[count]);
737 }
738
739 if (page_array_ptr)
740 /* free the array */
741 kfree(page_array_ptr);
742
743 return 0;
744 }
745
746 /*
747 This function locks all the physical pages of the kernel virtual buffer
748 and construct a basic lli array, where each entry holds the physical
749 page address and the size that application data holds in this physical pages
750 */
751 static int sep_lock_kernel_pages(struct sep_device *sep,
752 unsigned long kernel_virt_addr,
753 unsigned long data_size,
754 unsigned long *num_pages_ptr,
755 struct sep_lli_entry_t **lli_array_ptr,
756 struct page ***page_array_ptr)
757 {
758 int error = 0;
759 /* the the page of the end address of the user space buffer */
760 unsigned long end_page;
761 /* the page of the start address of the user space buffer */
762 unsigned long start_page;
763 /* the range in pages */
764 unsigned long num_pages;
765 struct sep_lli_entry_t *lli_array;
766 /* next kernel address to map */
767 unsigned long next_kernel_address;
768 unsigned long count;
769
770 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
771
772 /* set start and end pages and num pages */
773 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
774 start_page = kernel_virt_addr >> PAGE_SHIFT;
775 num_pages = end_page - start_page + 1;
776
777 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
778 edbg("SEP Driver: data_size is %lu\n", data_size);
779 edbg("SEP Driver: start_page is %lx\n", start_page);
780 edbg("SEP Driver: end_page is %lx\n", end_page);
781 edbg("SEP Driver: num_pages is %lu\n", num_pages);
782
783 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
784 if (!lli_array) {
785 edbg("SEP Driver: kmalloc for lli_array failed\n");
786 error = -ENOMEM;
787 goto end_function;
788 }
789
790 /* set the start address of the first page - app data may start not at
791 the beginning of the page */
792 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
793
794 /* check that not all the data is in the first page only */
795 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
796 lli_array[0].block_size = data_size;
797 else
798 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
799
800 /* debug print */
801 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
802
803 /* advance the address to the start of the next page */
804 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
805
806 /* go from the second page to the prev before last */
807 for (count = 1; count < (num_pages - 1); count++) {
808 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
809 lli_array[count].block_size = PAGE_SIZE;
810
811 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
812 next_kernel_address += PAGE_SIZE;
813 }
814
815 /* if more then 1 pages locked - then update for the last page size needed */
816 if (num_pages > 1) {
817 /* update the address of the last page */
818 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
819
820 /* set the size of the last page */
821 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
822
823 if (lli_array[count].block_size == 0) {
824 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
825 dbg("data_size is %lu\n", data_size);
826 while (1);
827 }
828
829 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
830 }
831 /* set output params */
832 *lli_array_ptr = lli_array;
833 *num_pages_ptr = num_pages;
834 *page_array_ptr = 0;
835 end_function:
836 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
837 return 0;
838 }
839
840 /*
841 This function locks all the physical pages of the application virtual buffer
842 and construct a basic lli array, where each entry holds the physical page
843 address and the size that application data holds in this physical pages
844 */
845 static int sep_lock_user_pages(struct sep_device *sep,
846 unsigned long app_virt_addr,
847 unsigned long data_size,
848 unsigned long *num_pages_ptr,
849 struct sep_lli_entry_t **lli_array_ptr,
850 struct page ***page_array_ptr)
851 {
852 int error = 0;
853 /* the the page of the end address of the user space buffer */
854 unsigned long end_page;
855 /* the page of the start address of the user space buffer */
856 unsigned long start_page;
857 /* the range in pages */
858 unsigned long num_pages;
859 struct page **page_array;
860 struct sep_lli_entry_t *lli_array;
861 unsigned long count;
862 int result;
863
864 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
865
866 /* set start and end pages and num pages */
867 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
868 start_page = app_virt_addr >> PAGE_SHIFT;
869 num_pages = end_page - start_page + 1;
870
871 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
872 edbg("SEP Driver: data_size is %lu\n", data_size);
873 edbg("SEP Driver: start_page is %lu\n", start_page);
874 edbg("SEP Driver: end_page is %lu\n", end_page);
875 edbg("SEP Driver: num_pages is %lu\n", num_pages);
876
877 /* allocate array of pages structure pointers */
878 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
879 if (!page_array) {
880 edbg("SEP Driver: kmalloc for page_array failed\n");
881
882 error = -ENOMEM;
883 goto end_function;
884 }
885
886 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
887 if (!lli_array) {
888 edbg("SEP Driver: kmalloc for lli_array failed\n");
889
890 error = -ENOMEM;
891 goto end_function_with_error1;
892 }
893
894 /* convert the application virtual address into a set of physical */
895 down_read(&current->mm->mmap_sem);
896 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
897 up_read(&current->mm->mmap_sem);
898
899 /* check the number of pages locked - if not all then exit with error */
900 if (result != num_pages) {
901 dbg("SEP Driver: not all pages locked by get_user_pages\n");
902
903 error = -ENOMEM;
904 goto end_function_with_error2;
905 }
906
907 /* flush the cache */
908 for (count = 0; count < num_pages; count++)
909 flush_dcache_page(page_array[count]);
910
911 /* set the start address of the first page - app data may start not at
912 the beginning of the page */
913 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
914
915 /* check that not all the data is in the first page only */
916 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
917 lli_array[0].block_size = data_size;
918 else
919 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
920
921 /* debug print */
922 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
923
924 /* go from the second page to the prev before last */
925 for (count = 1; count < (num_pages - 1); count++) {
926 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
927 lli_array[count].block_size = PAGE_SIZE;
928
929 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
930 }
931
932 /* if more then 1 pages locked - then update for the last page size needed */
933 if (num_pages > 1) {
934 /* update the address of the last page */
935 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
936
937 /* set the size of the last page */
938 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
939
940 if (lli_array[count].block_size == 0) {
941 dbg("app_virt_addr is %08lx\n", app_virt_addr);
942 dbg("data_size is %lu\n", data_size);
943 while (1);
944 }
945 edbg("lli_array[%lu].physical_address is %08lx, \
946 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
947 }
948
949 /* set output params */
950 *lli_array_ptr = lli_array;
951 *num_pages_ptr = num_pages;
952 *page_array_ptr = page_array;
953 goto end_function;
954
955 end_function_with_error2:
956 /* release the cache */
957 for (count = 0; count < num_pages; count++)
958 page_cache_release(page_array[count]);
959 kfree(lli_array);
960 end_function_with_error1:
961 kfree(page_array);
962 end_function:
963 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
964 return 0;
965 }
966
967
968 /*
969 this function calculates the size of data that can be inserted into the lli
970 table from this array the condition is that either the table is full
971 (all etnries are entered), or there are no more entries in the lli array
972 */
973 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
974 {
975 unsigned long table_data_size = 0;
976 unsigned long counter;
977
978 /* calculate the data in the out lli table if till we fill the whole
979 table or till the data has ended */
980 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
981 table_data_size += lli_in_array_ptr[counter].block_size;
982 return table_data_size;
983 }
984
985 /*
986 this functions builds ont lli table from the lli_array according to
987 the given size of data
988 */
989 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
990 {
991 unsigned long curr_table_data_size;
992 /* counter of lli array entry */
993 unsigned long array_counter;
994
995 dbg("SEP Driver:--------> sep_build_lli_table start\n");
996
997 /* init currrent table data size and lli array entry counter */
998 curr_table_data_size = 0;
999 array_counter = 0;
1000 *num_table_entries_ptr = 1;
1001
1002 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1003
1004 /* fill the table till table size reaches the needed amount */
1005 while (curr_table_data_size < table_data_size) {
1006 /* update the number of entries in table */
1007 (*num_table_entries_ptr)++;
1008
1009 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1010 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1011 curr_table_data_size += lli_table_ptr->block_size;
1012
1013 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1014 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1015 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1016
1017 /* check for overflow of the table data */
1018 if (curr_table_data_size > table_data_size) {
1019 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1020
1021 /* update the size of block in the table */
1022 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1023
1024 /* update the physical address in the lli array */
1025 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1026
1027 /* update the block size left in the lli array */
1028 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1029 } else
1030 /* advance to the next entry in the lli_array */
1031 array_counter++;
1032
1033 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1034 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1035
1036 /* move to the next entry in table */
1037 lli_table_ptr++;
1038 }
1039
1040 /* set the info entry to default */
1041 lli_table_ptr->physical_address = 0xffffffff;
1042 lli_table_ptr->block_size = 0;
1043
1044 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1045 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1046 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1047
1048 /* set the output parameter */
1049 *num_processed_entries_ptr += array_counter;
1050
1051 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1052 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1053 return;
1054 }
1055
1056 /*
1057 this function goes over the list of the print created tables and
1058 prints all the data
1059 */
1060 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1061 {
1062 unsigned long table_count;
1063 unsigned long entries_count;
1064
1065 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1066
1067 table_count = 1;
1068 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1069 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1070 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1071
1072 /* print entries of the table (without info entry) */
1073 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1074 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1075 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1076 }
1077
1078 /* point to the info entry */
1079 lli_table_ptr--;
1080
1081 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1082 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1083
1084
1085 table_data_size = lli_table_ptr->block_size & 0xffffff;
1086 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1087 lli_table_ptr = (struct sep_lli_entry_t *)
1088 (lli_table_ptr->physical_address);
1089
1090 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1091
1092 if ((unsigned long) lli_table_ptr != 0xffffffff)
1093 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1094
1095 table_count++;
1096 }
1097 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1098 }
1099
1100
1101 /*
1102 This function prepares only input DMA table for synhronic symmetric
1103 operations (HASH)
1104 */
1105 static int sep_prepare_input_dma_table(struct sep_device *sep,
1106 unsigned long app_virt_addr,
1107 unsigned long data_size,
1108 unsigned long block_size,
1109 unsigned long *lli_table_ptr,
1110 unsigned long *num_entries_ptr,
1111 unsigned long *table_data_size_ptr,
1112 bool isKernelVirtualAddress)
1113 {
1114 /* pointer to the info entry of the table - the last entry */
1115 struct sep_lli_entry_t *info_entry_ptr;
1116 /* array of pointers ot page */
1117 struct sep_lli_entry_t *lli_array_ptr;
1118 /* points to the first entry to be processed in the lli_in_array */
1119 unsigned long current_entry;
1120 /* num entries in the virtual buffer */
1121 unsigned long sep_lli_entries;
1122 /* lli table pointer */
1123 struct sep_lli_entry_t *in_lli_table_ptr;
1124 /* the total data in one table */
1125 unsigned long table_data_size;
1126 /* number of entries in lli table */
1127 unsigned long num_entries_in_table;
1128 /* next table address */
1129 void *lli_table_alloc_addr;
1130 unsigned long result;
1131
1132 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1133
1134 edbg("SEP Driver:data_size is %lu\n", data_size);
1135 edbg("SEP Driver:block_size is %lu\n", block_size);
1136
1137 /* initialize the pages pointers */
1138 sep->in_page_array = 0;
1139 sep->in_num_pages = 0;
1140
1141 if (data_size == 0) {
1142 /* special case - created 2 entries table with zero data */
1143 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1144 /* FIXME: Should the entry below not be for _bus */
1145 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1146 in_lli_table_ptr->block_size = 0;
1147
1148 in_lli_table_ptr++;
1149 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1150 in_lli_table_ptr->block_size = 0;
1151
1152 *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1153 *num_entries_ptr = 2;
1154 *table_data_size_ptr = 0;
1155
1156 goto end_function;
1157 }
1158
1159 /* check if the pages are in Kernel Virtual Address layout */
1160 if (isKernelVirtualAddress == true)
1161 /* lock the pages of the kernel buffer and translate them to pages */
1162 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1163 else
1164 /* lock the pages of the user buffer and translate them to pages */
1165 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1166
1167 if (result)
1168 return result;
1169
1170 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1171
1172 current_entry = 0;
1173 info_entry_ptr = 0;
1174 sep_lli_entries = sep->in_num_pages;
1175
1176 /* initiate to point after the message area */
1177 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1178
1179 /* loop till all the entries in in array are not processed */
1180 while (current_entry < sep_lli_entries) {
1181 /* set the new input and output tables */
1182 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1183
1184 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1185
1186 /* calculate the maximum size of data for input table */
1187 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1188
1189 /* now calculate the table size so that it will be module block size */
1190 table_data_size = (table_data_size / block_size) * block_size;
1191
1192 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1193
1194 /* construct input lli table */
1195 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1196
1197 if (info_entry_ptr == 0) {
1198 /* set the output parameters to physical addresses */
1199 *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1200 *num_entries_ptr = num_entries_in_table;
1201 *table_data_size_ptr = table_data_size;
1202
1203 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1204 } else {
1205 /* update the info entry of the previous in table */
1206 info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1207 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1208 }
1209
1210 /* save the pointer to the info entry of the current tables */
1211 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1212 }
1213
1214 /* print input tables */
1215 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1216 sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1217
1218 /* the array of the pages */
1219 kfree(lli_array_ptr);
1220 end_function:
1221 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1222 return 0;
1223
1224 }
1225
1226 /*
1227 This function creates the input and output dma tables for
1228 symmetric operations (AES/DES) according to the block size from LLI arays
1229 */
1230 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1231 struct sep_lli_entry_t *lli_in_array,
1232 unsigned long sep_in_lli_entries,
1233 struct sep_lli_entry_t *lli_out_array,
1234 unsigned long sep_out_lli_entries,
1235 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1236 {
1237 /* points to the area where next lli table can be allocated: keep void *
1238 as there is pointer scaling to fix otherwise */
1239 void *lli_table_alloc_addr;
1240 /* input lli table */
1241 struct sep_lli_entry_t *in_lli_table_ptr;
1242 /* output lli table */
1243 struct sep_lli_entry_t *out_lli_table_ptr;
1244 /* pointer to the info entry of the table - the last entry */
1245 struct sep_lli_entry_t *info_in_entry_ptr;
1246 /* pointer to the info entry of the table - the last entry */
1247 struct sep_lli_entry_t *info_out_entry_ptr;
1248 /* points to the first entry to be processed in the lli_in_array */
1249 unsigned long current_in_entry;
1250 /* points to the first entry to be processed in the lli_out_array */
1251 unsigned long current_out_entry;
1252 /* max size of the input table */
1253 unsigned long in_table_data_size;
1254 /* max size of the output table */
1255 unsigned long out_table_data_size;
1256 /* flag te signifies if this is the first tables build from the arrays */
1257 unsigned long first_table_flag;
1258 /* the data size that should be in table */
1259 unsigned long table_data_size;
1260 /* number of etnries in the input table */
1261 unsigned long num_entries_in_table;
1262 /* number of etnries in the output table */
1263 unsigned long num_entries_out_table;
1264
1265 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1266
1267 /* initiate to pint after the message area */
1268 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1269
1270 current_in_entry = 0;
1271 current_out_entry = 0;
1272 first_table_flag = 1;
1273 info_in_entry_ptr = 0;
1274 info_out_entry_ptr = 0;
1275
1276 /* loop till all the entries in in array are not processed */
1277 while (current_in_entry < sep_in_lli_entries) {
1278 /* set the new input and output tables */
1279 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1280
1281 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1282
1283 /* set the first output tables */
1284 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1285
1286 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1287
1288 /* calculate the maximum size of data for input table */
1289 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1290
1291 /* calculate the maximum size of data for output table */
1292 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1293
1294 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1295 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1296
1297 /* check where the data is smallest */
1298 table_data_size = in_table_data_size;
1299 if (table_data_size > out_table_data_size)
1300 table_data_size = out_table_data_size;
1301
1302 /* now calculate the table size so that it will be module block size */
1303 table_data_size = (table_data_size / block_size) * block_size;
1304
1305 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1306
1307 /* construct input lli table */
1308 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1309
1310 /* construct output lli table */
1311 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1312
1313 /* if info entry is null - this is the first table built */
1314 if (info_in_entry_ptr == 0) {
1315 /* set the output parameters to physical addresses */
1316 *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1317 *in_num_entries_ptr = num_entries_in_table;
1318 *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1319 *out_num_entries_ptr = num_entries_out_table;
1320 *table_data_size_ptr = table_data_size;
1321
1322 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1323 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1324 } else {
1325 /* update the info entry of the previous in table */
1326 info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1327 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1328
1329 /* update the info entry of the previous in table */
1330 info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1331 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1332 }
1333
1334 /* save the pointer to the info entry of the current tables */
1335 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1336 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1337
1338 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1339 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1340 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1341 }
1342
1343 /* print input tables */
1344 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1345 sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1346 /* print output tables */
1347 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1348 sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1349 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1350 return 0;
1351 }
1352
1353
1354 /*
1355 This function builds input and output DMA tables for synhronic
1356 symmetric operations (AES, DES). It also checks that each table
1357 is of the modular block size
1358 */
1359 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1360 unsigned long app_virt_in_addr,
1361 unsigned long app_virt_out_addr,
1362 unsigned long data_size,
1363 unsigned long block_size,
1364 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1365 {
1366 /* array of pointers of page */
1367 struct sep_lli_entry_t *lli_in_array;
1368 /* array of pointers of page */
1369 struct sep_lli_entry_t *lli_out_array;
1370 int result = 0;
1371
1372 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1373
1374 /* initialize the pages pointers */
1375 sep->in_page_array = 0;
1376 sep->out_page_array = 0;
1377
1378 /* check if the pages are in Kernel Virtual Address layout */
1379 if (isKernelVirtualAddress == true) {
1380 /* lock the pages of the kernel buffer and translate them to pages */
1381 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1382 if (result) {
1383 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1384 goto end_function;
1385 }
1386 } else {
1387 /* lock the pages of the user buffer and translate them to pages */
1388 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1389 if (result) {
1390 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1391 goto end_function;
1392 }
1393 }
1394
1395 if (isKernelVirtualAddress == true) {
1396 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1397 if (result) {
1398 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1399 goto end_function_with_error1;
1400 }
1401 } else {
1402 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1403 if (result) {
1404 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1405 goto end_function_with_error1;
1406 }
1407 }
1408 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1409 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1410 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1411
1412
1413 /* call the fucntion that creates table from the lli arrays */
1414 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1415 if (result) {
1416 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1417 goto end_function_with_error2;
1418 }
1419
1420 /* fall through - free the lli entry arrays */
1421 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1422 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1423 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1424 end_function_with_error2:
1425 kfree(lli_out_array);
1426 end_function_with_error1:
1427 kfree(lli_in_array);
1428 end_function:
1429 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1430 return result;
1431
1432 }
1433
1434 /*
1435 this function handles tha request for creation of the DMA table
1436 for the synchronic symmetric operations (AES,DES)
1437 */
1438 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1439 unsigned long arg)
1440 {
1441 int error;
1442 /* command arguments */
1443 struct sep_driver_build_sync_table_t command_args;
1444
1445 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1446
1447 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1448 if (error)
1449 goto end_function;
1450
1451 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1452 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1453 edbg("data_size is %lu\n", command_args.data_in_size);
1454 edbg("block_size is %lu\n", command_args.block_size);
1455
1456 /* check if we need to build only input table or input/output */
1457 if (command_args.app_out_address)
1458 /* prepare input and output tables */
1459 error = sep_prepare_input_output_dma_table(sep,
1460 command_args.app_in_address,
1461 command_args.app_out_address,
1462 command_args.data_in_size,
1463 command_args.block_size,
1464 &command_args.in_table_address,
1465 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1466 else
1467 /* prepare input tables */
1468 error = sep_prepare_input_dma_table(sep,
1469 command_args.app_in_address,
1470 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1471
1472 if (error)
1473 goto end_function;
1474 /* copy to user */
1475 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1476 error = -EFAULT;
1477 end_function:
1478 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1479 return error;
1480 }
1481
1482 /*
1483 this function handles the request for freeing dma table for synhronic actions
1484 */
1485 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1486 {
1487 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1488
1489 /* free input pages array */
1490 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1491
1492 /* free output pages array if needed */
1493 if (sep->out_page_array)
1494 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1495
1496 /* reset all the values */
1497 sep->in_page_array = 0;
1498 sep->out_page_array = 0;
1499 sep->in_num_pages = 0;
1500 sep->out_num_pages = 0;
1501 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1502 return 0;
1503 }
1504
1505 /*
1506 this function find a space for the new flow dma table
1507 */
1508 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1509 unsigned long **table_address_ptr)
1510 {
1511 int error = 0;
1512 /* pointer to the id field of the flow dma table */
1513 unsigned long *start_table_ptr;
1514 /* Do not make start_addr unsigned long * unless fixing the offset
1515 computations ! */
1516 void *flow_dma_area_start_addr;
1517 unsigned long *flow_dma_area_end_addr;
1518 /* maximum table size in words */
1519 unsigned long table_size_in_words;
1520
1521 /* find the start address of the flow DMA table area */
1522 flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1523
1524 /* set end address of the flow table area */
1525 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1526
1527 /* set table size in words */
1528 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1529
1530 /* set the pointer to the start address of DMA area */
1531 start_table_ptr = flow_dma_area_start_addr;
1532
1533 /* find the space for the next table */
1534 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1535 start_table_ptr += table_size_in_words;
1536
1537 /* check if we reached the end of floa tables area */
1538 if (start_table_ptr >= flow_dma_area_end_addr)
1539 error = -1;
1540 else
1541 *table_address_ptr = start_table_ptr;
1542
1543 return error;
1544 }
1545
1546 /*
1547 This function creates one DMA table for flow and returns its data,
1548 and pointer to its info entry
1549 */
1550 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1551 unsigned long virt_buff_addr,
1552 unsigned long virt_buff_size,
1553 struct sep_lli_entry_t *table_data,
1554 struct sep_lli_entry_t **info_entry_ptr,
1555 struct sep_flow_context_t *flow_data_ptr,
1556 bool isKernelVirtualAddress)
1557 {
1558 int error;
1559 /* the range in pages */
1560 unsigned long lli_array_size;
1561 struct sep_lli_entry_t *lli_array;
1562 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1563 unsigned long *start_dma_table_ptr;
1564 /* total table data counter */
1565 unsigned long dma_table_data_count;
1566 /* pointer that will keep the pointer to the pages of the virtual buffer */
1567 struct page **page_array_ptr;
1568 unsigned long entry_count;
1569
1570 /* find the space for the new table */
1571 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1572 if (error)
1573 goto end_function;
1574
1575 /* check if the pages are in Kernel Virtual Address layout */
1576 if (isKernelVirtualAddress == true)
1577 /* lock kernel buffer in the memory */
1578 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1579 else
1580 /* lock user buffer in the memory */
1581 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1582
1583 if (error)
1584 goto end_function;
1585
1586 /* set the pointer to page array at the beginning of table - this table is
1587 now considered taken */
1588 *start_dma_table_ptr = lli_array_size;
1589
1590 /* point to the place of the pages pointers of the table */
1591 start_dma_table_ptr++;
1592
1593 /* set the pages pointer */
1594 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1595
1596 /* set the pointer to the first entry */
1597 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1598
1599 /* now create the entries for table */
1600 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1601 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1602
1603 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1604
1605 /* set the total data of a table */
1606 dma_table_data_count += lli_array[entry_count].block_size;
1607
1608 flow_dma_table_entry_ptr++;
1609 }
1610
1611 /* set the physical address */
1612 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1613
1614 /* set the num_entries and total data size */
1615 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1616
1617 /* set the info entry */
1618 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1619 flow_dma_table_entry_ptr->block_size = 0;
1620
1621 /* set the pointer to info entry */
1622 *info_entry_ptr = flow_dma_table_entry_ptr;
1623
1624 /* the array of the lli entries */
1625 kfree(lli_array);
1626 end_function:
1627 return error;
1628 }
1629
1630
1631
1632 /*
1633 This function creates a list of tables for flow and returns the data for
1634 the first and last tables of the list
1635 */
1636 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1637 unsigned long num_virtual_buffers,
1638 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1639 {
1640 int error;
1641 unsigned long virt_buff_addr;
1642 unsigned long virt_buff_size;
1643 struct sep_lli_entry_t table_data;
1644 struct sep_lli_entry_t *info_entry_ptr;
1645 struct sep_lli_entry_t *prev_info_entry_ptr;
1646 unsigned long i;
1647
1648 /* init vars */
1649 error = 0;
1650 prev_info_entry_ptr = 0;
1651
1652 /* init the first table to default */
1653 table_data.physical_address = 0xffffffff;
1654 first_table_data_ptr->physical_address = 0xffffffff;
1655 table_data.block_size = 0;
1656
1657 for (i = 0; i < num_virtual_buffers; i++) {
1658 /* get the virtual buffer address */
1659 error = get_user(virt_buff_addr, &first_buff_addr);
1660 if (error)
1661 goto end_function;
1662
1663 /* get the virtual buffer size */
1664 first_buff_addr++;
1665 error = get_user(virt_buff_size, &first_buff_addr);
1666 if (error)
1667 goto end_function;
1668
1669 /* advance the address to point to the next pair of address|size */
1670 first_buff_addr++;
1671
1672 /* now prepare the one flow LLI table from the data */
1673 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1674 if (error)
1675 goto end_function;
1676
1677 if (i == 0) {
1678 /* if this is the first table - save it to return to the user
1679 application */
1680 *first_table_data_ptr = table_data;
1681
1682 /* set the pointer to info entry */
1683 prev_info_entry_ptr = info_entry_ptr;
1684 } else {
1685 /* not first table - the previous table info entry should
1686 be updated */
1687 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1688
1689 /* set the pointer to info entry */
1690 prev_info_entry_ptr = info_entry_ptr;
1691 }
1692 }
1693
1694 /* set the last table data */
1695 *last_table_data_ptr = table_data;
1696 end_function:
1697 return error;
1698 }
1699
1700 /*
1701 this function goes over all the flow tables connected to the given
1702 table and deallocate them
1703 */
1704 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1705 {
1706 /* id pointer */
1707 unsigned long *table_ptr;
1708 /* end address of the flow dma area */
1709 unsigned long num_entries;
1710 unsigned long num_pages;
1711 struct page **pages_ptr;
1712 /* maximum table size in words */
1713 struct sep_lli_entry_t *info_entry_ptr;
1714
1715 /* set the pointer to the first table */
1716 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1717
1718 /* set the num of entries */
1719 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1720 & SEP_NUM_ENTRIES_MASK;
1721
1722 /* go over all the connected tables */
1723 while (*table_ptr != 0xffffffff) {
1724 /* get number of pages */
1725 num_pages = *(table_ptr - 2);
1726
1727 /* get the pointer to the pages */
1728 pages_ptr = (struct page **) (*(table_ptr - 1));
1729
1730 /* free the pages */
1731 sep_free_dma_pages(pages_ptr, num_pages, 1);
1732
1733 /* goto to the info entry */
1734 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1735
1736 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1737 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1738 }
1739
1740 return;
1741 }
1742
1743 /**
1744 * sep_find_flow_context - find a flow
1745 * @sep: the SEP we are working with
1746 * @flow_id: flow identifier
1747 *
1748 * Returns a pointer the matching flow, or NULL if the flow does not
1749 * exist.
1750 */
1751
1752 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1753 unsigned long flow_id)
1754 {
1755 int count;
1756 /*
1757 * always search for flow with id default first - in case we
1758 * already started working on the flow there can be no situation
1759 * when 2 flows are with default flag
1760 */
1761 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1762 if (sep->flows[count].flow_id == flow_id)
1763 return &sep->flows[count];
1764 }
1765 return NULL;
1766 }
1767
1768
1769 /*
1770 this function handles the request to create the DMA tables for flow
1771 */
1772 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1773 unsigned long arg)
1774 {
1775 int error = -ENOENT;
1776 struct sep_driver_build_flow_table_t command_args;
1777 /* first table - output */
1778 struct sep_lli_entry_t first_table_data;
1779 /* dma table data */
1780 struct sep_lli_entry_t last_table_data;
1781 /* pointer to the info entry of the previuos DMA table */
1782 struct sep_lli_entry_t *prev_info_entry_ptr;
1783 /* pointer to the flow data strucutre */
1784 struct sep_flow_context_t *flow_context_ptr;
1785
1786 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1787
1788 /* init variables */
1789 prev_info_entry_ptr = 0;
1790 first_table_data.physical_address = 0xffffffff;
1791
1792 /* find the free structure for flow data */
1793 error = -EINVAL;
1794 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1795 if (flow_context_ptr == NULL)
1796 goto end_function;
1797
1798 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1799 if (error)
1800 goto end_function;
1801
1802 /* create flow tables */
1803 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1804 if (error)
1805 goto end_function_with_error;
1806
1807 /* check if flow is static */
1808 if (!command_args.flow_type)
1809 /* point the info entry of the last to the info entry of the first */
1810 last_table_data = first_table_data;
1811
1812 /* set output params */
1813 command_args.first_table_addr = first_table_data.physical_address;
1814 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1815 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1816
1817 /* send the parameters to user application */
1818 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1819 if (error)
1820 goto end_function_with_error;
1821
1822 /* all the flow created - update the flow entry with temp id */
1823 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1824
1825 /* set the processing tables data in the context */
1826 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1827 flow_context_ptr->input_tables_in_process = first_table_data;
1828 else
1829 flow_context_ptr->output_tables_in_process = first_table_data;
1830
1831 goto end_function;
1832
1833 end_function_with_error:
1834 /* free the allocated tables */
1835 sep_deallocated_flow_tables(&first_table_data);
1836 end_function:
1837 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1838 return error;
1839 }
1840
1841 /*
1842 this function handles add tables to flow
1843 */
1844 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1845 {
1846 int error;
1847 unsigned long num_entries;
1848 struct sep_driver_add_flow_table_t command_args;
1849 struct sep_flow_context_t *flow_context_ptr;
1850 /* first dma table data */
1851 struct sep_lli_entry_t first_table_data;
1852 /* last dma table data */
1853 struct sep_lli_entry_t last_table_data;
1854 /* pointer to the info entry of the current DMA table */
1855 struct sep_lli_entry_t *info_entry_ptr;
1856
1857 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1858
1859 /* get input parameters */
1860 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1861 if (error)
1862 goto end_function;
1863
1864 /* find the flow structure for the flow id */
1865 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1866 if (flow_context_ptr == NULL)
1867 goto end_function;
1868
1869 /* prepare the flow dma tables */
1870 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1871 if (error)
1872 goto end_function_with_error;
1873
1874 /* now check if there is already an existing add table for this flow */
1875 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1876 /* this buffer was for input buffers */
1877 if (flow_context_ptr->input_tables_flag) {
1878 /* add table already exists - add the new tables to the end
1879 of the previous */
1880 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1881
1882 info_entry_ptr = (struct sep_lli_entry_t *)
1883 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1884
1885 /* connect to list of tables */
1886 *info_entry_ptr = first_table_data;
1887
1888 /* set the first table data */
1889 first_table_data = flow_context_ptr->first_input_table;
1890 } else {
1891 /* set the input flag */
1892 flow_context_ptr->input_tables_flag = 1;
1893
1894 /* set the first table data */
1895 flow_context_ptr->first_input_table = first_table_data;
1896 }
1897 /* set the last table data */
1898 flow_context_ptr->last_input_table = last_table_data;
1899 } else { /* this is output tables */
1900
1901 /* this buffer was for input buffers */
1902 if (flow_context_ptr->output_tables_flag) {
1903 /* add table already exists - add the new tables to
1904 the end of the previous */
1905 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1906
1907 info_entry_ptr = (struct sep_lli_entry_t *)
1908 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1909
1910 /* connect to list of tables */
1911 *info_entry_ptr = first_table_data;
1912
1913 /* set the first table data */
1914 first_table_data = flow_context_ptr->first_output_table;
1915 } else {
1916 /* set the input flag */
1917 flow_context_ptr->output_tables_flag = 1;
1918
1919 /* set the first table data */
1920 flow_context_ptr->first_output_table = first_table_data;
1921 }
1922 /* set the last table data */
1923 flow_context_ptr->last_output_table = last_table_data;
1924 }
1925
1926 /* set output params */
1927 command_args.first_table_addr = first_table_data.physical_address;
1928 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1929 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1930
1931 /* send the parameters to user application */
1932 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1933 end_function_with_error:
1934 /* free the allocated tables */
1935 sep_deallocated_flow_tables(&first_table_data);
1936 end_function:
1937 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1938 return error;
1939 }
1940
1941 /*
1942 this function add the flow add message to the specific flow
1943 */
1944 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1945 {
1946 int error;
1947 struct sep_driver_add_message_t command_args;
1948 struct sep_flow_context_t *flow_context_ptr;
1949
1950 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1951
1952 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1953 if (error)
1954 goto end_function;
1955
1956 /* check input */
1957 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1958 error = -ENOMEM;
1959 goto end_function;
1960 }
1961
1962 /* find the flow context */
1963 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1964 if (flow_context_ptr == NULL)
1965 goto end_function;
1966
1967 /* copy the message into context */
1968 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1969 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1970 end_function:
1971 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1972 return error;
1973 }
1974
1975
1976 /*
1977 this function returns the bus and virtual addresses of the static pool
1978 */
1979 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1980 {
1981 int error;
1982 struct sep_driver_static_pool_addr_t command_args;
1983
1984 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1985
1986 /*prepare the output parameters in the struct */
1987 command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1988 command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1989
1990 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1991
1992 /* send the parameters to user application */
1993 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1994 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
1995 return error;
1996 }
1997
1998 /*
1999 this address gets the offset of the physical address from the start
2000 of the mapped area
2001 */
2002 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2003 {
2004 int error;
2005 struct sep_driver_get_mapped_offset_t command_args;
2006
2007 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2008
2009 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2010 if (error)
2011 goto end_function;
2012
2013 if (command_args.physical_address < sep->shared_bus) {
2014 error = -EINVAL;
2015 goto end_function;
2016 }
2017
2018 /*prepare the output parameters in the struct */
2019 command_args.offset = command_args.physical_address - sep->shared_bus;
2020
2021 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2022
2023 /* send the parameters to user application */
2024 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2025 end_function:
2026 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2027 return error;
2028 }
2029
2030
2031 /*
2032 ?
2033 */
2034 static int sep_start_handler(struct sep_device *sep)
2035 {
2036 unsigned long reg_val;
2037 unsigned long error = 0;
2038
2039 dbg("SEP Driver:--------> sep_start_handler start\n");
2040
2041 /* wait in polling for message from SEP */
2042 do
2043 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2044 while (!reg_val);
2045
2046 /* check the value */
2047 if (reg_val == 0x1)
2048 /* fatal error - read error status from GPRO */
2049 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2050 dbg("SEP Driver:<-------- sep_start_handler end\n");
2051 return error;
2052 }
2053
2054 /*
2055 this function handles the request for SEP initialization
2056 */
2057 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2058 {
2059 unsigned long message_word;
2060 unsigned long *message_ptr;
2061 struct sep_driver_init_t command_args;
2062 unsigned long counter;
2063 unsigned long error;
2064 unsigned long reg_val;
2065
2066 dbg("SEP Driver:--------> sep_init_handler start\n");
2067 error = 0;
2068
2069 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2070
2071 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2072
2073 if (error)
2074 goto end_function;
2075
2076 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2077 /*sep_configure_dma_burst(); */
2078
2079 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2080
2081 message_ptr = (unsigned long *) command_args.message_addr;
2082
2083 /* set the base address of the SRAM */
2084 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2085
2086 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2087 get_user(message_word, message_ptr);
2088 /* write data to SRAM */
2089 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2090 edbg("SEP Driver:message_word is %lu\n", message_word);
2091 /* wait for write complete */
2092 sep_wait_sram_write(sep);
2093 }
2094 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2095 /* signal SEP */
2096 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2097
2098 do
2099 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2100 while (!(reg_val & 0xFFFFFFFD));
2101
2102 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2103
2104 /* check the value */
2105 if (reg_val == 0x1) {
2106 edbg("SEP Driver:init failed\n");
2107
2108 error = sep_read_reg(sep, 0x8060);
2109 edbg("SEP Driver:sw monitor is %lu\n", error);
2110
2111 /* fatal error - read erro status from GPRO */
2112 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2113 edbg("SEP Driver:error is %lu\n", error);
2114 }
2115 end_function:
2116 dbg("SEP Driver:<-------- sep_init_handler end\n");
2117 return error;
2118
2119 }
2120
2121 /*
2122 this function handles the request cache and resident reallocation
2123 */
2124 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2125 unsigned long arg)
2126 {
2127 struct sep_driver_realloc_cache_resident_t command_args;
2128 int error;
2129
2130 /* copy cache and resident to the their intended locations */
2131 error = sep_load_firmware(sep);
2132 if (error)
2133 return error;
2134
2135 command_args.new_base_addr = sep->shared_bus;
2136
2137 /* find the new base address according to the lowest address between
2138 cache, resident and shared area */
2139 if (sep->resident_bus < command_args.new_base_addr)
2140 command_args.new_base_addr = sep->resident_bus;
2141 if (sep->rar_bus < command_args.new_base_addr)
2142 command_args.new_base_addr = sep->rar_bus;
2143
2144 /* set the return parameters */
2145 command_args.new_cache_addr = sep->rar_bus;
2146 command_args.new_resident_addr = sep->resident_bus;
2147
2148 /* set the new shared area */
2149 command_args.new_shared_area_addr = sep->shared_bus;
2150
2151 edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
2152 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2153 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2154 edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
2155
2156 /* return to user */
2157 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2158 return -EFAULT;
2159 return 0;
2160 }
2161
2162 /**
2163 * sep_get_time_handler - time request from user space
2164 * @sep: sep we are to set the time for
2165 * @arg: pointer to user space arg buffer
2166 *
2167 * This function reports back the time and the address in the SEP
2168 * shared buffer at which it has been placed. (Do we really need this!!!)
2169 */
2170
2171 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2172 {
2173 struct sep_driver_get_time_t command_args;
2174
2175 mutex_lock(&sep_mutex);
2176 command_args.time_value = sep_set_time(sep);
2177 command_args.time_physical_address = (unsigned long)sep_time_address(sep);
2178 mutex_unlock(&sep_mutex);
2179 if (copy_to_user((void __user *)arg,
2180 &command_args, sizeof(struct sep_driver_get_time_t)))
2181 return -EFAULT;
2182 return 0;
2183
2184 }
2185
2186 /*
2187 This API handles the end transaction request
2188 */
2189 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2190 {
2191 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2192
2193 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2194 /* close IMR */
2195 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2196
2197 /* release IRQ line */
2198 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2199
2200 /* lock the sep mutex */
2201 mutex_unlock(&sep_mutex);
2202 #endif
2203
2204 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2205
2206 return 0;
2207 }
2208
2209
2210 /**
2211 * sep_set_flow_id_handler - handle flow setting
2212 * @sep: the SEP we are configuring
2213 * @flow_id: the flow we are setting
2214 *
2215 * This function handler the set flow id command
2216 */
2217 static int sep_set_flow_id_handler(struct sep_device *sep,
2218 unsigned long flow_id)
2219 {
2220 int error = 0;
2221 struct sep_flow_context_t *flow_data_ptr;
2222
2223 /* find the flow data structure that was just used for creating new flow
2224 - its id should be default */
2225
2226 mutex_lock(&sep_mutex);
2227 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2228 if (flow_data_ptr)
2229 flow_data_ptr->flow_id = flow_id; /* set flow id */
2230 else
2231 error = -EINVAL;
2232 mutex_unlock(&sep_mutex);
2233 return error;
2234 }
2235
2236 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2237 {
2238 int error = 0;
2239 struct sep_device *sep = filp->private_data;
2240
2241 dbg("------------>SEP Driver: ioctl start\n");
2242
2243 edbg("SEP Driver: cmd is %x\n", cmd);
2244
2245 switch (cmd) {
2246 case SEP_IOCSENDSEPCOMMAND:
2247 /* send command to SEP */
2248 sep_send_command_handler(sep);
2249 edbg("SEP Driver: after sep_send_command_handler\n");
2250 break;
2251 case SEP_IOCSENDSEPRPLYCOMMAND:
2252 /* send reply command to SEP */
2253 sep_send_reply_command_handler(sep);
2254 break;
2255 case SEP_IOCALLOCDATAPOLL:
2256 /* allocate data pool */
2257 error = sep_allocate_data_pool_memory_handler(sep, arg);
2258 break;
2259 case SEP_IOCWRITEDATAPOLL:
2260 /* write data into memory pool */
2261 error = sep_write_into_data_pool_handler(sep, arg);
2262 break;
2263 case SEP_IOCREADDATAPOLL:
2264 /* read data from data pool into application memory */
2265 error = sep_read_from_data_pool_handler(sep, arg);
2266 break;
2267 case SEP_IOCCREATESYMDMATABLE:
2268 /* create dma table for synhronic operation */
2269 error = sep_create_sync_dma_tables_handler(sep, arg);
2270 break;
2271 case SEP_IOCCREATEFLOWDMATABLE:
2272 /* create flow dma tables */
2273 error = sep_create_flow_dma_tables_handler(sep, arg);
2274 break;
2275 case SEP_IOCFREEDMATABLEDATA:
2276 /* free the pages */
2277 error = sep_free_dma_table_data_handler(sep);
2278 break;
2279 case SEP_IOCSETFLOWID:
2280 /* set flow id */
2281 error = sep_set_flow_id_handler(sep, (unsigned long)arg);
2282 break;
2283 case SEP_IOCADDFLOWTABLE:
2284 /* add tables to the dynamic flow */
2285 error = sep_add_flow_tables_handler(sep, arg);
2286 break;
2287 case SEP_IOCADDFLOWMESSAGE:
2288 /* add message of add tables to flow */
2289 error = sep_add_flow_tables_message_handler(sep, arg);
2290 break;
2291 case SEP_IOCSEPSTART:
2292 /* start command to sep */
2293 error = sep_start_handler(sep);
2294 break;
2295 case SEP_IOCSEPINIT:
2296 /* init command to sep */
2297 error = sep_init_handler(sep, arg);
2298 break;
2299 case SEP_IOCGETSTATICPOOLADDR:
2300 /* get the physical and virtual addresses of the static pool */
2301 error = sep_get_static_pool_addr_handler(sep, arg);
2302 break;
2303 case SEP_IOCENDTRANSACTION:
2304 error = sep_end_transaction_handler(sep, arg);
2305 break;
2306 case SEP_IOCREALLOCCACHERES:
2307 error = sep_realloc_cache_resident_handler(sep, arg);
2308 break;
2309 case SEP_IOCGETMAPPEDADDROFFSET:
2310 error = sep_get_physical_mapped_offset_handler(sep, arg);
2311 break;
2312 case SEP_IOCGETIME:
2313 error = sep_get_time_handler(sep, arg);
2314 break;
2315 default:
2316 error = -ENOTTY;
2317 break;
2318 }
2319 dbg("SEP Driver:<-------- ioctl end\n");
2320 return error;
2321 }
2322
2323
2324
2325 #if !SEP_DRIVER_POLLING_MODE
2326
2327 /* handler for flow done interrupt */
2328
2329 static void sep_flow_done_handler(struct work_struct *work)
2330 {
2331 struct sep_flow_context_t *flow_data_ptr;
2332
2333 /* obtain the mutex */
2334 mutex_lock(&sep_mutex);
2335
2336 /* get the pointer to context */
2337 flow_data_ptr = (struct sep_flow_context_t *) work;
2338
2339 /* free all the current input tables in sep */
2340 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2341
2342 /* free all the current tables output tables in SEP (if needed) */
2343 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2344 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2345
2346 /* check if we have additional tables to be sent to SEP only input
2347 flag may be checked */
2348 if (flow_data_ptr->input_tables_flag) {
2349 /* copy the message to the shared RAM and signal SEP */
2350 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
2351
2352 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2353 }
2354 mutex_unlock(&sep_mutex);
2355 }
2356 /*
2357 interrupt handler function
2358 */
2359 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2360 {
2361 irqreturn_t int_error;
2362 unsigned long reg_val;
2363 unsigned long flow_id;
2364 struct sep_flow_context_t *flow_context_ptr;
2365 struct sep_device *sep = dev_id;
2366
2367 int_error = IRQ_HANDLED;
2368
2369 /* read the IRR register to check if this is SEP interrupt */
2370 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2371 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2372
2373 /* check if this is the flow interrupt */
2374 if (0 /*reg_val & (0x1 << 11) */ ) {
2375 /* read GPRO to find out the which flow is done */
2376 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2377
2378 /* find the contex of the flow */
2379 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2380 if (flow_context_ptr == NULL)
2381 goto end_function_with_error;
2382
2383 /* queue the work */
2384 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2385 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2386
2387 } else {
2388 /* check if this is reply interrupt from SEP */
2389 if (reg_val & (0x1 << 13)) {
2390 /* update the counter of reply messages */
2391 sep->reply_ct++;
2392 /* wake up the waiting process */
2393 wake_up(&sep_event);
2394 } else {
2395 int_error = IRQ_NONE;
2396 goto end_function;
2397 }
2398 }
2399 end_function_with_error:
2400 /* clear the interrupt */
2401 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2402 end_function:
2403 return int_error;
2404 }
2405
2406 #endif
2407
2408
2409
2410 #if 0
2411
2412 static void sep_wait_busy(struct sep_device *sep)
2413 {
2414 u32 reg;
2415
2416 do {
2417 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2418 } while (reg);
2419 }
2420
2421 /*
2422 PATCH for configuring the DMA to single burst instead of multi-burst
2423 */
2424 static void sep_configure_dma_burst(struct sep_device *sep)
2425 {
2426 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2427
2428 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2429
2430 /* request access to registers from SEP */
2431 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2432
2433 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2434
2435 sep_wait_busy(sep);
2436
2437 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2438
2439 /* set the DMA burst register to single burst */
2440 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2441
2442 /* release the sep busy */
2443 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2444 sep_wait_busy(sep);
2445
2446 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2447
2448 }
2449
2450 #endif
2451
2452 /*
2453 Function that is activated on the successful probe of the SEP device
2454 */
2455 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2456 {
2457 int error = 0;
2458 struct sep_device *sep;
2459 int counter;
2460 int size; /* size of memory for allocation */
2461
2462 edbg("Sep pci probe starting\n");
2463 if (sep_dev != NULL) {
2464 dev_warn(&pdev->dev, "only one SEP supported.\n");
2465 return -EBUSY;
2466 }
2467
2468 /* enable the device */
2469 error = pci_enable_device(pdev);
2470 if (error) {
2471 edbg("error enabling pci device\n");
2472 goto end_function;
2473 }
2474
2475 /* set the pci dev pointer */
2476 sep_dev = &sep_instance;
2477 sep = &sep_instance;
2478
2479 edbg("sep->shared_addr = %p\n", sep->shared_addr);
2480 /* transaction counter that coordinates the transactions between SEP
2481 and HOST */
2482 sep->send_ct = 0;
2483 /* counter for the messages from sep */
2484 sep->reply_ct = 0;
2485 /* counter for the number of bytes allocated in the pool
2486 for the current transaction */
2487 sep->data_pool_bytes_allocated = 0;
2488
2489 /* calculate the total size for allocation */
2490 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2491 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2492
2493 /* allocate the shared area */
2494 if (sep_map_and_alloc_shared_area(sep, size)) {
2495 error = -ENOMEM;
2496 /* allocation failed */
2497 goto end_function_error;
2498 }
2499 /* now set the memory regions */
2500 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2501 /* Note: this test section will need moving before it could ever
2502 work as the registers are not yet mapped ! */
2503 /* send the new SHARED MESSAGE AREA to the SEP */
2504 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2505
2506 /* poll for SEP response */
2507 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2508 while (retval != 0xffffffff && retval != sep->shared_bus)
2509 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2510
2511 /* check the return value (register) */
2512 if (retval != sep->shared_bus) {
2513 error = -ENOMEM;
2514 goto end_function_deallocate_sep_shared_area;
2515 }
2516 #endif
2517 /* init the flow contextes */
2518 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2519 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2520
2521 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2522 if (sep->flow_wq == NULL) {
2523 error = -ENOMEM;
2524 edbg("sep_driver:flow queue creation failed\n");
2525 goto end_function_deallocate_sep_shared_area;
2526 }
2527 edbg("SEP Driver: create flow workqueue \n");
2528 sep->pdev = pci_dev_get(pdev);
2529
2530 sep->reg_addr = pci_ioremap_bar(pdev, 0);
2531 if (!sep->reg_addr) {
2532 edbg("sep: ioremap of registers failed.\n");
2533 goto end_function_deallocate_sep_shared_area;
2534 }
2535 edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
2536
2537 /* load the rom code */
2538 sep_load_rom_code(sep);
2539
2540 /* set up system base address and shared memory location */
2541 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2542 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2543 &sep->rar_bus, GFP_KERNEL);
2544
2545 if (!sep->rar_addr) {
2546 edbg("SEP Driver:can't allocate rar\n");
2547 goto end_function_uniomap;
2548 }
2549
2550
2551 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2552 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2553
2554 #if !SEP_DRIVER_POLLING_MODE
2555
2556 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2557
2558 /* clear ICR register */
2559 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2560
2561 /* set the IMR register - open only GPR 2 */
2562 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2563
2564 edbg("SEP Driver: about to call request_irq\n");
2565 /* get the interrupt line */
2566 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2567 if (error)
2568 goto end_function_free_res;
2569 return 0;
2570 edbg("SEP Driver: about to write IMR REG_ADDR");
2571
2572 /* set the IMR register - open only GPR 2 */
2573 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2574
2575 end_function_free_res:
2576 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2577 sep->rar_addr, sep->rar_bus);
2578 #endif /* SEP_DRIVER_POLLING_MODE */
2579 end_function_uniomap:
2580 iounmap(sep->reg_addr);
2581 end_function_deallocate_sep_shared_area:
2582 /* de-allocate shared area */
2583 sep_unmap_and_free_shared_area(sep, size);
2584 end_function_error:
2585 sep_dev = NULL;
2586 end_function:
2587 return error;
2588 }
2589
2590 static const struct pci_device_id sep_pci_id_tbl[] = {
2591 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2592 {0}
2593 };
2594
2595 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2596
2597 /* field for registering driver to PCI device */
2598 static struct pci_driver sep_pci_driver = {
2599 .name = "sep_sec_driver",
2600 .id_table = sep_pci_id_tbl,
2601 .probe = sep_probe
2602 /* FIXME: remove handler */
2603 };
2604
2605 /* major and minor device numbers */
2606 static dev_t sep_devno;
2607
2608 /* the files operations structure of the driver */
2609 static struct file_operations sep_file_operations = {
2610 .owner = THIS_MODULE,
2611 .unlocked_ioctl = sep_ioctl,
2612 .poll = sep_poll,
2613 .open = sep_open,
2614 .release = sep_release,
2615 .mmap = sep_mmap,
2616 };
2617
2618
2619 /* cdev struct of the driver */
2620 static struct cdev sep_cdev;
2621
2622 /*
2623 this function registers the driver to the file system
2624 */
2625 static int sep_register_driver_to_fs(void)
2626 {
2627 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2628 if (ret_val) {
2629 edbg("sep: major number allocation failed, retval is %d\n",
2630 ret_val);
2631 return ret_val;
2632 }
2633 /* init cdev */
2634 cdev_init(&sep_cdev, &sep_file_operations);
2635 sep_cdev.owner = THIS_MODULE;
2636
2637 /* register the driver with the kernel */
2638 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2639 if (ret_val) {
2640 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2641 /* unregister dev numbers */
2642 unregister_chrdev_region(sep_devno, 1);
2643 }
2644 return ret_val;
2645 }
2646
2647
2648 /*--------------------------------------------------------------
2649 init function
2650 ----------------------------------------------------------------*/
2651 static int __init sep_init(void)
2652 {
2653 int ret_val = 0;
2654 dbg("SEP Driver:-------->Init start\n");
2655 /* FIXME: Probe can occur before we are ready to survive a probe */
2656 ret_val = pci_register_driver(&sep_pci_driver);
2657 if (ret_val) {
2658 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2659 goto end_function_unregister_from_fs;
2660 }
2661 /* register driver to fs */
2662 ret_val = sep_register_driver_to_fs();
2663 if (ret_val)
2664 goto end_function_unregister_pci;
2665 goto end_function;
2666 end_function_unregister_pci:
2667 pci_unregister_driver(&sep_pci_driver);
2668 end_function_unregister_from_fs:
2669 /* unregister from fs */
2670 cdev_del(&sep_cdev);
2671 /* unregister dev numbers */
2672 unregister_chrdev_region(sep_devno, 1);
2673 end_function:
2674 dbg("SEP Driver:<-------- Init end\n");
2675 return ret_val;
2676 }
2677
2678
2679 /*-------------------------------------------------------------
2680 exit function
2681 --------------------------------------------------------------*/
2682 static void __exit sep_exit(void)
2683 {
2684 int size;
2685
2686 dbg("SEP Driver:--------> Exit start\n");
2687
2688 /* unregister from fs */
2689 cdev_del(&sep_cdev);
2690 /* unregister dev numbers */
2691 unregister_chrdev_region(sep_devno, 1);
2692 /* calculate the total size for de-allocation */
2693 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2694 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2695 /* FIXME: We need to do this in the unload for the device */
2696 /* free shared area */
2697 if (sep_dev) {
2698 sep_unmap_and_free_shared_area(sep_dev, size);
2699 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2700 iounmap((void *) sep_dev->reg_addr);
2701 edbg("SEP Driver: iounmap \n");
2702 }
2703 edbg("SEP Driver: release_mem_region \n");
2704 dbg("SEP Driver:<-------- Exit end\n");
2705 }
2706
2707
2708 module_init(sep_init);
2709 module_exit(sep_exit);
2710
2711 MODULE_LICENSE("GPL");