3 * sep_main.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
40 /* #define SEP_PERF_DEBUG */
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/miscdevice.h>
47 #include <linux/cdev.h>
48 #include <linux/kdev_t.h>
49 #include <linux/mutex.h>
50 #include <linux/sched.h>
52 #include <linux/poll.h>
53 #include <linux/wait.h>
54 #include <linux/pci.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/slab.h>
57 #include <linux/ioctl.h>
58 #include <asm/current.h>
59 #include <linux/ioport.h>
61 #include <linux/interrupt.h>
62 #include <linux/pagemap.h>
63 #include <asm/cacheflush.h>
64 #include <linux/delay.h>
65 #include <linux/jiffies.h>
66 #include <linux/async.h>
67 #include <linux/crypto.h>
68 #include <crypto/internal/hash.h>
69 #include <crypto/scatterwalk.h>
70 #include <crypto/sha.h>
71 #include <crypto/md5.h>
72 #include <crypto/aes.h>
73 #include <crypto/des.h>
74 #include <crypto/hash.h>
76 #include "sep_driver_hw_defs.h"
77 #include "sep_driver_config.h"
78 #include "sep_driver_api.h"
80 #include "sep_crypto.h"
82 #define CREATE_TRACE_POINTS
83 #include "sep_trace_events.h"
86 * Let's not spend cycles iterating over message
87 * area contents if debugging not enabled
90 #define sep_dump_message(sep) _sep_dump_message(sep)
92 #define sep_dump_message(sep)
96 * Currently, there is only one SEP device per platform;
97 * In event platforms in the future have more than one SEP
98 * device, this will be a linked list
101 struct sep_device
*sep_dev
;
104 * sep_queue_status_remove - Removes transaction from status queue
106 * @sep_queue_info: pointer to status queue
108 * This function will remove information about transaction from the queue.
110 void sep_queue_status_remove(struct sep_device
*sep
,
111 struct sep_queue_info
**queue_elem
)
113 unsigned long lck_flags
;
115 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_queue_status_remove\n",
118 if (!queue_elem
|| !(*queue_elem
)) {
119 dev_dbg(&sep
->pdev
->dev
, "PID%d %s null\n",
120 current
->pid
, __func__
);
124 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
125 list_del(&(*queue_elem
)->list
);
126 sep
->sep_queue_num
--;
127 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
132 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_queue_status_remove return\n",
138 * sep_queue_status_add - Adds transaction to status queue
140 * @opcode: transaction opcode
141 * @size: input data size
142 * @pid: pid of current process
143 * @name: current process name
144 * @name_len: length of name (current process)
146 * This function adds information about about transaction started to the status
149 struct sep_queue_info
*sep_queue_status_add(
150 struct sep_device
*sep
,
154 u8
*name
, size_t name_len
)
156 unsigned long lck_flags
;
157 struct sep_queue_info
*my_elem
= NULL
;
159 my_elem
= kzalloc(sizeof(struct sep_queue_info
), GFP_KERNEL
);
164 dev_dbg(&sep
->pdev
->dev
, "[PID%d] kzalloc ok\n", current
->pid
);
166 my_elem
->data
.opcode
= opcode
;
167 my_elem
->data
.size
= size
;
168 my_elem
->data
.pid
= pid
;
170 if (name_len
> TASK_COMM_LEN
)
171 name_len
= TASK_COMM_LEN
;
173 memcpy(&my_elem
->data
.name
, name
, name_len
);
175 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
177 list_add_tail(&my_elem
->list
, &sep
->sep_queue_status
);
178 sep
->sep_queue_num
++;
180 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
186 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
188 * @dmatables_region: Destination pointer for the buffer
189 * @dma_ctx: DMA context for the transaction
190 * @table_count: Number of MLLI/DMA tables to create
191 * The buffer created will not work as-is for DMA operations,
192 * it needs to be copied over to the appropriate place in the
195 static int sep_allocate_dmatables_region(struct sep_device
*sep
,
196 void **dmatables_region
,
197 struct sep_dma_context
*dma_ctx
,
198 const u32 table_count
)
200 const size_t new_len
=
201 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
- 1;
203 void *tmp_region
= NULL
;
205 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dma_ctx = 0x%p\n",
206 current
->pid
, dma_ctx
);
207 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dmatables_region = 0x%p\n",
208 current
->pid
, dmatables_region
);
210 if (!dma_ctx
|| !dmatables_region
) {
211 dev_warn(&sep
->pdev
->dev
,
212 "[PID%d] dma context/region uninitialized\n",
217 dev_dbg(&sep
->pdev
->dev
, "[PID%d] newlen = 0x%08zX\n",
218 current
->pid
, new_len
);
219 dev_dbg(&sep
->pdev
->dev
, "[PID%d] oldlen = 0x%08X\n", current
->pid
,
220 dma_ctx
->dmatables_len
);
221 tmp_region
= kzalloc(new_len
+ dma_ctx
->dmatables_len
, GFP_KERNEL
);
225 /* Were there any previous tables that need to be preserved ? */
226 if (*dmatables_region
) {
227 memcpy(tmp_region
, *dmatables_region
, dma_ctx
->dmatables_len
);
228 kfree(*dmatables_region
);
229 *dmatables_region
= NULL
;
232 *dmatables_region
= tmp_region
;
234 dma_ctx
->dmatables_len
+= new_len
;
240 * sep_wait_transaction - Used for synchronizing transactions
243 int sep_wait_transaction(struct sep_device
*sep
)
248 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
,
249 &sep
->in_use_flags
)) {
250 dev_dbg(&sep
->pdev
->dev
,
251 "[PID%d] no transactions, returning\n",
253 goto end_function_setpid
;
257 * Looping needed even for exclusive waitq entries
258 * due to process wakeup latencies, previous process
259 * might have already created another transaction.
263 * Exclusive waitq entry, so that only one process is
264 * woken up from the queue at a time.
266 prepare_to_wait_exclusive(&sep
->event_transactions
,
269 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
,
270 &sep
->in_use_flags
)) {
271 dev_dbg(&sep
->pdev
->dev
,
272 "[PID%d] no transactions, breaking\n",
276 dev_dbg(&sep
->pdev
->dev
,
277 "[PID%d] transactions ongoing, sleeping\n",
280 dev_dbg(&sep
->pdev
->dev
, "[PID%d] woken up\n", current
->pid
);
282 if (signal_pending(current
)) {
283 dev_dbg(&sep
->pdev
->dev
, "[PID%d] received signal\n",
291 * The pid_doing_transaction indicates that this process
292 * now owns the facilities to perform a transaction with
293 * the SEP. While this process is performing a transaction,
294 * no other process who has the SEP device open can perform
295 * any transactions. This method allows more than one process
296 * to have the device open at any given time, which provides
297 * finer granularity for device utilization by multiple
300 /* Only one process is able to progress here at a time */
301 sep
->pid_doing_transaction
= current
->pid
;
304 finish_wait(&sep
->event_transactions
, &wait
);
310 * sep_check_transaction_owner - Checks if current process owns transaction
313 static inline int sep_check_transaction_owner(struct sep_device
*sep
)
315 dev_dbg(&sep
->pdev
->dev
, "[PID%d] transaction pid = %d\n",
317 sep
->pid_doing_transaction
);
319 if ((sep
->pid_doing_transaction
== 0) ||
320 (current
->pid
!= sep
->pid_doing_transaction
)) {
324 /* We own the transaction */
331 * sep_dump_message - dump the message that is pending
333 * This will only print dump if DEBUG is set; it does
334 * follow kernel debug print enabling
336 static void _sep_dump_message(struct sep_device
*sep
)
340 u32
*p
= sep
->shared_addr
;
342 for (count
= 0; count
< 10 * 4; count
+= 4)
343 dev_dbg(&sep
->pdev
->dev
,
344 "[PID%d] Word %d of the message is %x\n",
345 current
->pid
, count
/4, *p
++);
351 * sep_map_and_alloc_shared_area -allocate shared block
352 * @sep: security processor
353 * @size: size of shared area
355 static int sep_map_and_alloc_shared_area(struct sep_device
*sep
)
357 sep
->shared_addr
= dma_alloc_coherent(&sep
->pdev
->dev
,
359 &sep
->shared_bus
, GFP_KERNEL
);
361 if (!sep
->shared_addr
) {
362 dev_dbg(&sep
->pdev
->dev
,
363 "[PID%d] shared memory dma_alloc_coherent failed\n",
367 dev_dbg(&sep
->pdev
->dev
,
368 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
370 sep
->shared_size
, sep
->shared_addr
,
371 (unsigned long long)sep
->shared_bus
);
376 * sep_unmap_and_free_shared_area - free shared block
377 * @sep: security processor
379 static void sep_unmap_and_free_shared_area(struct sep_device
*sep
)
381 dma_free_coherent(&sep
->pdev
->dev
, sep
->shared_size
,
382 sep
->shared_addr
, sep
->shared_bus
);
388 * sep_shared_bus_to_virt - convert bus/virt addresses
389 * @sep: pointer to struct sep_device
390 * @bus_address: address to convert
392 * Returns virtual address inside the shared area according
393 * to the bus address.
395 static void *sep_shared_bus_to_virt(struct sep_device
*sep
,
396 dma_addr_t bus_address
)
398 return sep
->shared_addr
+ (bus_address
- sep
->shared_bus
);
404 * sep_open - device open method
405 * @inode: inode of SEP device
406 * @filp: file handle to SEP device
408 * Open method for the SEP device. Called when userspace opens
409 * the SEP device node.
411 * Returns zero on success otherwise an error code.
413 static int sep_open(struct inode
*inode
, struct file
*filp
)
415 struct sep_device
*sep
;
416 struct sep_private_data
*priv
;
418 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] open\n", current
->pid
);
420 if (filp
->f_flags
& O_NONBLOCK
)
424 * Get the SEP device structure and use it for the
425 * private_data field in filp for other methods
428 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
434 filp
->private_data
= priv
;
436 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] priv is 0x%p\n",
439 /* Anyone can open; locking takes place at transaction level */
444 * sep_free_dma_table_data_handler - free DMA table
445 * @sep: pointer to struct sep_device
446 * @dma_ctx: dma context
448 * Handles the request to free DMA table for synchronic actions
450 int sep_free_dma_table_data_handler(struct sep_device
*sep
,
451 struct sep_dma_context
**dma_ctx
)
455 /* Pointer to the current dma_resource struct */
456 struct sep_dma_resource
*dma
;
458 dev_dbg(&sep
->pdev
->dev
,
459 "[PID%d] sep_free_dma_table_data_handler\n",
462 if (!dma_ctx
|| !(*dma_ctx
)) {
463 /* No context or context already freed */
464 dev_dbg(&sep
->pdev
->dev
,
465 "[PID%d] no DMA context or context already freed\n",
471 dev_dbg(&sep
->pdev
->dev
, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
473 (*dma_ctx
)->nr_dcb_creat
);
475 for (dcb_counter
= 0;
476 dcb_counter
< (*dma_ctx
)->nr_dcb_creat
; dcb_counter
++) {
477 dma
= &(*dma_ctx
)->dma_res_arr
[dcb_counter
];
479 /* Unmap and free input map array */
480 if (dma
->in_map_array
) {
481 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
482 dma_unmap_page(&sep
->pdev
->dev
,
483 dma
->in_map_array
[count
].dma_addr
,
484 dma
->in_map_array
[count
].size
,
487 kfree(dma
->in_map_array
);
491 * Output is handled different. If
492 * this was a secure dma into restricted memory,
493 * then we skip this step altogether as restricted
494 * memory is not available to the o/s at all.
496 if (((*dma_ctx
)->secure_dma
== false) &&
497 (dma
->out_map_array
)) {
499 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
500 dma_unmap_page(&sep
->pdev
->dev
,
501 dma
->out_map_array
[count
].dma_addr
,
502 dma
->out_map_array
[count
].size
,
505 kfree(dma
->out_map_array
);
508 /* Free page cache for output */
509 if (dma
->in_page_array
) {
510 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
511 flush_dcache_page(dma
->in_page_array
[count
]);
512 page_cache_release(dma
->in_page_array
[count
]);
514 kfree(dma
->in_page_array
);
517 /* Again, we do this only for non secure dma */
518 if (((*dma_ctx
)->secure_dma
== false) &&
519 (dma
->out_page_array
)) {
521 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
522 if (!PageReserved(dma
->out_page_array
[count
]))
525 out_page_array
[count
]);
527 flush_dcache_page(dma
->out_page_array
[count
]);
528 page_cache_release(dma
->out_page_array
[count
]);
530 kfree(dma
->out_page_array
);
534 * Note that here we use in_map_num_entries because we
535 * don't have a page array; the page array is generated
536 * only in the lock_user_pages, which is not called
537 * for kernel crypto, which is what the sg (scatter gather
538 * is used for exclusively)
541 dma_unmap_sg(&sep
->pdev
->dev
, dma
->src_sg
,
542 dma
->in_map_num_entries
, DMA_TO_DEVICE
);
547 dma_unmap_sg(&sep
->pdev
->dev
, dma
->dst_sg
,
548 dma
->in_map_num_entries
, DMA_FROM_DEVICE
);
552 /* Reset all the values */
553 dma
->in_page_array
= NULL
;
554 dma
->out_page_array
= NULL
;
555 dma
->in_num_pages
= 0;
556 dma
->out_num_pages
= 0;
557 dma
->in_map_array
= NULL
;
558 dma
->out_map_array
= NULL
;
559 dma
->in_map_num_entries
= 0;
560 dma
->out_map_num_entries
= 0;
563 (*dma_ctx
)->nr_dcb_creat
= 0;
564 (*dma_ctx
)->num_lli_tables_created
= 0;
569 dev_dbg(&sep
->pdev
->dev
,
570 "[PID%d] sep_free_dma_table_data_handler end\n",
577 * sep_end_transaction_handler - end transaction
578 * @sep: pointer to struct sep_device
579 * @dma_ctx: DMA context
580 * @call_status: Call status
582 * This API handles the end transaction request.
584 static int sep_end_transaction_handler(struct sep_device
*sep
,
585 struct sep_dma_context
**dma_ctx
,
586 struct sep_call_status
*call_status
,
587 struct sep_queue_info
**my_queue_elem
)
589 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ending transaction\n", current
->pid
);
592 * Extraneous transaction clearing would mess up PM
593 * device usage counters and SEP would get suspended
594 * just before we send a command to SEP in the next
597 if (sep_check_transaction_owner(sep
)) {
598 dev_dbg(&sep
->pdev
->dev
, "[PID%d] not transaction owner\n",
603 /* Update queue status */
604 sep_queue_status_remove(sep
, my_queue_elem
);
606 /* Check that all the DMA resources were freed */
608 sep_free_dma_table_data_handler(sep
, dma_ctx
);
610 /* Reset call status for next transaction */
612 call_status
->status
= 0;
614 /* Clear the message area to avoid next transaction reading
615 * sensitive results from previous transaction */
616 memset(sep
->shared_addr
, 0,
617 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
619 /* start suspend delay */
620 #ifdef SEP_ENABLE_RUNTIME_PM
623 pm_runtime_mark_last_busy(&sep
->pdev
->dev
);
624 pm_runtime_put_autosuspend(&sep
->pdev
->dev
);
628 clear_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
);
629 sep
->pid_doing_transaction
= 0;
631 /* Now it's safe for next process to proceed */
632 dev_dbg(&sep
->pdev
->dev
, "[PID%d] waking up next transaction\n",
634 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
, &sep
->in_use_flags
);
635 wake_up(&sep
->event_transactions
);
642 * sep_release - close a SEP device
643 * @inode: inode of SEP device
644 * @filp: file handle being closed
646 * Called on the final close of a SEP device.
648 static int sep_release(struct inode
*inode
, struct file
*filp
)
650 struct sep_private_data
* const private_data
= filp
->private_data
;
651 struct sep_call_status
*call_status
= &private_data
->call_status
;
652 struct sep_device
*sep
= private_data
->device
;
653 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
654 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
656 dev_dbg(&sep
->pdev
->dev
, "[PID%d] release\n", current
->pid
);
658 sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
661 kfree(filp
->private_data
);
667 * sep_mmap - maps the shared area to user space
668 * @filp: pointer to struct file
669 * @vma: pointer to vm_area_struct
671 * Called on an mmap of our space via the normal SEP device
673 static int sep_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
675 struct sep_private_data
* const private_data
= filp
->private_data
;
676 struct sep_call_status
*call_status
= &private_data
->call_status
;
677 struct sep_device
*sep
= private_data
->device
;
678 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
680 unsigned long error
= 0;
682 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_mmap\n", current
->pid
);
684 /* Set the transaction busy (own the device) */
686 * Problem for multithreaded applications is that here we're
687 * possibly going to sleep while holding a write lock on
688 * current->mm->mmap_sem, which will cause deadlock for ongoing
689 * transaction trying to create DMA tables
691 error
= sep_wait_transaction(sep
);
693 /* Interrupted by signal, don't clear transaction */
696 /* Clear the message area to avoid next transaction reading
697 * sensitive results from previous transaction */
698 memset(sep
->shared_addr
, 0,
699 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
702 * Check that the size of the mapped range is as the size of the message
705 if ((vma
->vm_end
- vma
->vm_start
) > SEP_DRIVER_MMMAP_AREA_SIZE
) {
707 goto end_function_with_error
;
710 dev_dbg(&sep
->pdev
->dev
, "[PID%d] shared_addr is %p\n",
711 current
->pid
, sep
->shared_addr
);
713 /* Get bus address */
714 bus_addr
= sep
->shared_bus
;
716 if (remap_pfn_range(vma
, vma
->vm_start
, bus_addr
>> PAGE_SHIFT
,
717 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
)) {
718 dev_dbg(&sep
->pdev
->dev
, "[PID%d] remap_pfn_range failed\n",
721 goto end_function_with_error
;
724 /* Update call status */
725 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET
, &call_status
->status
);
729 end_function_with_error
:
730 /* Clear our transaction */
731 sep_end_transaction_handler(sep
, NULL
, call_status
,
739 * sep_poll - poll handler
740 * @filp: pointer to struct file
741 * @wait: pointer to poll_table
743 * Called by the OS when the kernel is asked to do a poll on
746 static unsigned int sep_poll(struct file
*filp
, poll_table
*wait
)
748 struct sep_private_data
* const private_data
= filp
->private_data
;
749 struct sep_call_status
*call_status
= &private_data
->call_status
;
750 struct sep_device
*sep
= private_data
->device
;
754 unsigned long lock_irq_flag
;
756 /* Am I the process that owns the transaction? */
757 if (sep_check_transaction_owner(sep
)) {
758 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll pid not owner\n",
764 /* Check if send command or send_reply were activated previously */
765 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
766 &call_status
->status
)) {
767 dev_warn(&sep
->pdev
->dev
, "[PID%d] sendmsg not called\n",
774 /* Add the event to the polling wait table */
775 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll: calling wait sep_event\n",
778 poll_wait(filp
, &sep
->event_interrupt
, wait
);
780 dev_dbg(&sep
->pdev
->dev
,
781 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
782 current
->pid
, sep
->send_ct
, sep
->reply_ct
);
784 /* Check if error occurred during poll */
785 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR3_REG_ADDR
);
786 if ((retval2
!= 0x0) && (retval2
!= 0x8)) {
787 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll; poll error %x\n",
788 current
->pid
, retval2
);
793 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
795 if (sep
->send_ct
== sep
->reply_ct
) {
796 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
797 retval
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
798 dev_dbg(&sep
->pdev
->dev
,
799 "[PID%d] poll: data ready check (GPR2) %x\n",
800 current
->pid
, retval
);
802 /* Check if printf request */
803 if ((retval
>> 30) & 0x1) {
804 dev_dbg(&sep
->pdev
->dev
,
805 "[PID%d] poll: SEP printf request\n",
810 /* Check if the this is SEP reply or request */
812 dev_dbg(&sep
->pdev
->dev
,
813 "[PID%d] poll: SEP request\n",
816 dev_dbg(&sep
->pdev
->dev
,
817 "[PID%d] poll: normal return\n",
819 sep_dump_message(sep
);
820 dev_dbg(&sep
->pdev
->dev
,
821 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
823 mask
|= POLLIN
| POLLRDNORM
;
825 set_bit(SEP_LEGACY_POLL_DONE_OFFSET
, &call_status
->status
);
827 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
828 dev_dbg(&sep
->pdev
->dev
,
829 "[PID%d] poll; no reply; returning mask of 0\n",
839 * sep_time_address - address in SEP memory of time
840 * @sep: SEP device we want the address from
842 * Return the address of the two dwords in memory used for time
845 static u32
*sep_time_address(struct sep_device
*sep
)
847 return sep
->shared_addr
+
848 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES
;
852 * sep_set_time - set the SEP time
853 * @sep: the SEP we are setting the time for
855 * Calculates time and sets it at the predefined address.
856 * Called with the SEP mutex held.
858 static unsigned long sep_set_time(struct sep_device
*sep
)
861 u32
*time_addr
; /* Address of time as seen by the kernel */
864 do_gettimeofday(&time
);
866 /* Set value in the SYSTEM MEMORY offset */
867 time_addr
= sep_time_address(sep
);
869 time_addr
[0] = SEP_TIME_VAL_TOKEN
;
870 time_addr
[1] = time
.tv_sec
;
872 dev_dbg(&sep
->pdev
->dev
, "[PID%d] time.tv_sec is %lu\n",
873 current
->pid
, time
.tv_sec
);
874 dev_dbg(&sep
->pdev
->dev
, "[PID%d] time_addr is %p\n",
875 current
->pid
, time_addr
);
876 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep->shared_addr is %p\n",
877 current
->pid
, sep
->shared_addr
);
883 * sep_send_command_handler - kick off a command
884 * @sep: SEP being signalled
886 * This function raises interrupt to SEP that signals that is has a new
887 * command from the host
889 * Note that this function does fall under the ioctl lock
891 int sep_send_command_handler(struct sep_device
*sep
)
893 unsigned long lock_irq_flag
;
897 /* Basic sanity check; set msg pool to start of shared area */
898 msg_pool
= (u32
*)sep
->shared_addr
;
901 /* Look for start msg token */
902 if (*msg_pool
!= SEP_START_MSG_TOKEN
) {
903 dev_warn(&sep
->pdev
->dev
, "start message token not present\n");
908 /* Do we have a reasonable size? */
910 if ((*msg_pool
< 2) ||
911 (*msg_pool
> SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
)) {
913 dev_warn(&sep
->pdev
->dev
, "invalid message size\n");
918 /* Does the command look reasonable? */
921 dev_warn(&sep
->pdev
->dev
, "invalid message opcode\n");
926 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
927 dev_dbg(&sep
->pdev
->dev
, "[PID%d] before pm sync status 0x%X\n",
929 sep
->pdev
->dev
.power
.runtime_status
);
930 sep
->in_use
= 1; /* device is about to be used */
931 pm_runtime_get_sync(&sep
->pdev
->dev
);
934 if (test_and_set_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
)) {
938 sep
->in_use
= 1; /* device is about to be used */
941 sep_dump_message(sep
);
944 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
946 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
948 dev_dbg(&sep
->pdev
->dev
,
949 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
950 current
->pid
, sep
->send_ct
, sep
->reply_ct
);
952 /* Send interrupt to SEP */
953 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR0_REG_ADDR
, 0x2);
961 * @sep: pointer to struct sep_device
962 * @sg: pointer to struct scatterlist
964 * @dma_maps: pointer to place a pointer to array of dma maps
965 * This is filled in; anything previous there will be lost
966 * The structure for dma maps is sep_dma_map
967 * @returns number of dma maps on success; negative on error
969 * This creates the dma table from the scatterlist
970 * It is used only for kernel crypto as it works with scatterlists
971 * representation of data buffers
974 static int sep_crypto_dma(
975 struct sep_device
*sep
,
976 struct scatterlist
*sg
,
977 struct sep_dma_map
**dma_maps
,
978 enum dma_data_direction direction
)
980 struct scatterlist
*temp_sg
;
984 struct sep_dma_map
*sep_dma
;
990 /* Count the segments */
995 temp_sg
= scatterwalk_sg_next(temp_sg
);
997 dev_dbg(&sep
->pdev
->dev
,
998 "There are (hex) %x segments in sg\n", count_segment
);
1000 /* DMA map segments */
1001 count_mapped
= dma_map_sg(&sep
->pdev
->dev
, sg
,
1002 count_segment
, direction
);
1004 dev_dbg(&sep
->pdev
->dev
,
1005 "There are (hex) %x maps in sg\n", count_mapped
);
1007 if (count_mapped
== 0) {
1008 dev_dbg(&sep
->pdev
->dev
, "Cannot dma_map_sg\n");
1012 sep_dma
= kmalloc(sizeof(struct sep_dma_map
) *
1013 count_mapped
, GFP_ATOMIC
);
1015 if (sep_dma
== NULL
) {
1016 dev_dbg(&sep
->pdev
->dev
, "Cannot allocate dma_maps\n");
1020 for_each_sg(sg
, temp_sg
, count_mapped
, ct1
) {
1021 sep_dma
[ct1
].dma_addr
= sg_dma_address(temp_sg
);
1022 sep_dma
[ct1
].size
= sg_dma_len(temp_sg
);
1023 dev_dbg(&sep
->pdev
->dev
, "(all hex) map %x dma %lx len %lx\n",
1024 ct1
, (unsigned long)sep_dma
[ct1
].dma_addr
,
1025 (unsigned long)sep_dma
[ct1
].size
);
1028 *dma_maps
= sep_dma
;
1029 return count_mapped
;
1035 * @sep: pointer to struct sep_device
1036 * @sg: pointer to struct scatterlist
1037 * @data_size: total data size
1039 * @dma_maps: pointer to place a pointer to array of dma maps
1040 * This is filled in; anything previous there will be lost
1041 * The structure for dma maps is sep_dma_map
1042 * @lli_maps: pointer to place a pointer to array of lli maps
1043 * This is filled in; anything previous there will be lost
1044 * The structure for dma maps is sep_dma_map
1045 * @returns number of dma maps on success; negative on error
1047 * This creates the LLI table from the scatterlist
1048 * It is only used for kernel crypto as it works exclusively
1049 * with scatterlists (struct scatterlist) representation of
1052 static int sep_crypto_lli(
1053 struct sep_device
*sep
,
1054 struct scatterlist
*sg
,
1055 struct sep_dma_map
**maps
,
1056 struct sep_lli_entry
**llis
,
1058 enum dma_data_direction direction
)
1062 struct sep_lli_entry
*sep_lli
;
1063 struct sep_dma_map
*sep_map
;
1067 nbr_ents
= sep_crypto_dma(sep
, sg
, maps
, direction
);
1068 if (nbr_ents
<= 0) {
1069 dev_dbg(&sep
->pdev
->dev
, "crypto_dma failed %x\n",
1076 sep_lli
= kmalloc(sizeof(struct sep_lli_entry
) * nbr_ents
, GFP_ATOMIC
);
1078 if (sep_lli
== NULL
) {
1079 dev_dbg(&sep
->pdev
->dev
, "Cannot allocate lli_maps\n");
1086 for (ct1
= 0; ct1
< nbr_ents
; ct1
+= 1) {
1087 sep_lli
[ct1
].bus_address
= (u32
)sep_map
[ct1
].dma_addr
;
1089 /* Maximum for page is total data size */
1090 if (sep_map
[ct1
].size
> data_size
)
1091 sep_map
[ct1
].size
= data_size
;
1093 sep_lli
[ct1
].block_size
= (u32
)sep_map
[ct1
].size
;
1101 * sep_lock_kernel_pages - map kernel pages for DMA
1102 * @sep: pointer to struct sep_device
1103 * @kernel_virt_addr: address of data buffer in kernel
1104 * @data_size: size of data
1105 * @lli_array_ptr: lli array
1106 * @in_out_flag: input into device or output from device
1108 * This function locks all the physical pages of the kernel virtual buffer
1109 * and construct a basic lli array, where each entry holds the physical
1110 * page address and the size that application data holds in this page
1111 * This function is used only during kernel crypto mod calls from within
1112 * the kernel (when ioctl is not used)
1114 * This is used only for kernel crypto. Kernel pages
1115 * are handled differently as they are done via
1116 * scatter gather lists (struct scatterlist)
1118 static int sep_lock_kernel_pages(struct sep_device
*sep
,
1119 unsigned long kernel_virt_addr
,
1121 struct sep_lli_entry
**lli_array_ptr
,
1123 struct sep_dma_context
*dma_ctx
)
1127 struct scatterlist
*sg
;
1130 struct sep_lli_entry
*lli_array
;
1132 struct sep_dma_map
*map_array
;
1134 enum dma_data_direction direction
;
1139 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1140 direction
= DMA_TO_DEVICE
;
1141 sg
= dma_ctx
->src_sg
;
1143 direction
= DMA_FROM_DEVICE
;
1144 sg
= dma_ctx
->dst_sg
;
1147 num_pages
= sep_crypto_lli(sep
, sg
, &map_array
, &lli_array
,
1148 data_size
, direction
);
1150 if (num_pages
<= 0) {
1151 dev_dbg(&sep
->pdev
->dev
, "sep_crypto_lli returned error %x\n",
1156 /* Put mapped kernel sg into kernel resource array */
1158 /* Set output params according to the in_out flag */
1159 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1160 *lli_array_ptr
= lli_array
;
1161 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
=
1163 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
=
1165 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
=
1167 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_num_entries
=
1169 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].src_sg
=
1172 *lli_array_ptr
= lli_array
;
1173 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
=
1175 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
=
1177 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
=
1179 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
1180 out_map_num_entries
= num_pages
;
1181 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].dst_sg
=
1189 * sep_lock_user_pages - lock and map user pages for DMA
1190 * @sep: pointer to struct sep_device
1191 * @app_virt_addr: user memory data buffer
1192 * @data_size: size of data buffer
1193 * @lli_array_ptr: lli array
1194 * @in_out_flag: input or output to device
1196 * This function locks all the physical pages of the application
1197 * virtual buffer and construct a basic lli array, where each entry
1198 * holds the physical page address and the size that application
1199 * data holds in this physical pages
1201 static int sep_lock_user_pages(struct sep_device
*sep
,
1204 struct sep_lli_entry
**lli_array_ptr
,
1206 struct sep_dma_context
*dma_ctx
)
1212 /* The the page of the end address of the user space buffer */
1214 /* The page of the start address of the user space buffer */
1216 /* The range in pages */
1218 /* Array of pointers to page */
1219 struct page
**page_array
;
1221 struct sep_lli_entry
*lli_array
;
1223 struct sep_dma_map
*map_array
;
1225 /* Set start and end pages and num pages */
1226 end_page
= (app_virt_addr
+ data_size
- 1) >> PAGE_SHIFT
;
1227 start_page
= app_virt_addr
>> PAGE_SHIFT
;
1228 num_pages
= end_page
- start_page
+ 1;
1230 dev_dbg(&sep
->pdev
->dev
,
1231 "[PID%d] lock user pages app_virt_addr is %x\n",
1232 current
->pid
, app_virt_addr
);
1234 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_size is (hex) %x\n",
1235 current
->pid
, data_size
);
1236 dev_dbg(&sep
->pdev
->dev
, "[PID%d] start_page is (hex) %x\n",
1237 current
->pid
, start_page
);
1238 dev_dbg(&sep
->pdev
->dev
, "[PID%d] end_page is (hex) %x\n",
1239 current
->pid
, end_page
);
1240 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_pages is (hex) %x\n",
1241 current
->pid
, num_pages
);
1243 /* Allocate array of pages structure pointers */
1244 page_array
= kmalloc_array(num_pages
, sizeof(struct page
*),
1251 map_array
= kmalloc_array(num_pages
, sizeof(struct sep_dma_map
),
1255 goto end_function_with_error1
;
1258 lli_array
= kmalloc_array(num_pages
, sizeof(struct sep_lli_entry
),
1262 goto end_function_with_error2
;
1265 /* Convert the application virtual address into a set of physical */
1266 down_read(¤t
->mm
->mmap_sem
);
1267 result
= get_user_pages(current
, current
->mm
, app_virt_addr
,
1269 ((in_out_flag
== SEP_DRIVER_IN_FLAG
) ? 0 : 1),
1270 0, page_array
, NULL
);
1272 up_read(¤t
->mm
->mmap_sem
);
1274 /* Check the number of pages locked - if not all then exit with error */
1275 if (result
!= num_pages
) {
1276 dev_warn(&sep
->pdev
->dev
,
1277 "[PID%d] not all pages locked by get_user_pages, "
1278 "result 0x%X, num_pages 0x%X\n",
1279 current
->pid
, result
, num_pages
);
1281 goto end_function_with_error3
;
1284 dev_dbg(&sep
->pdev
->dev
, "[PID%d] get_user_pages succeeded\n",
1288 * Fill the array using page array data and
1289 * map the pages - this action will also flush the cache as needed
1291 for (count
= 0; count
< num_pages
; count
++) {
1292 /* Fill the map array */
1293 map_array
[count
].dma_addr
=
1294 dma_map_page(&sep
->pdev
->dev
, page_array
[count
],
1295 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
1297 map_array
[count
].size
= PAGE_SIZE
;
1299 /* Fill the lli array entry */
1300 lli_array
[count
].bus_address
= (u32
)map_array
[count
].dma_addr
;
1301 lli_array
[count
].block_size
= PAGE_SIZE
;
1303 dev_dbg(&sep
->pdev
->dev
,
1304 "[PID%d] lli_array[%x].bus_address is %08lx, "
1305 "lli_array[%x].block_size is (hex) %x\n", current
->pid
,
1306 count
, (unsigned long)lli_array
[count
].bus_address
,
1307 count
, lli_array
[count
].block_size
);
1310 /* Check the offset for the first page */
1311 lli_array
[0].bus_address
=
1312 lli_array
[0].bus_address
+ (app_virt_addr
& (~PAGE_MASK
));
1314 /* Check that not all the data is in the first page only */
1315 if ((PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
))) >= data_size
)
1316 lli_array
[0].block_size
= data_size
;
1318 lli_array
[0].block_size
=
1319 PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
));
1321 dev_dbg(&sep
->pdev
->dev
,
1322 "[PID%d] After check if page 0 has all data\n",
1324 dev_dbg(&sep
->pdev
->dev
,
1325 "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1326 "lli_array[0].block_size is (hex) %x\n",
1328 (unsigned long)lli_array
[0].bus_address
,
1329 lli_array
[0].block_size
);
1332 /* Check the size of the last page */
1333 if (num_pages
> 1) {
1334 lli_array
[num_pages
- 1].block_size
=
1335 (app_virt_addr
+ data_size
) & (~PAGE_MASK
);
1336 if (lli_array
[num_pages
- 1].block_size
== 0)
1337 lli_array
[num_pages
- 1].block_size
= PAGE_SIZE
;
1339 dev_dbg(&sep
->pdev
->dev
,
1340 "[PID%d] After last page size adjustment\n",
1342 dev_dbg(&sep
->pdev
->dev
,
1343 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1344 "lli_array[%x].block_size is (hex) %x\n",
1347 (unsigned long)lli_array
[num_pages
- 1].bus_address
,
1349 lli_array
[num_pages
- 1].block_size
);
1352 /* Set output params according to the in_out flag */
1353 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1354 *lli_array_ptr
= lli_array
;
1355 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
=
1357 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
=
1359 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
=
1361 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_num_entries
=
1363 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].src_sg
= NULL
;
1365 *lli_array_ptr
= lli_array
;
1366 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
=
1368 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
=
1370 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
=
1372 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
1373 out_map_num_entries
= num_pages
;
1374 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].dst_sg
= NULL
;
1378 end_function_with_error3
:
1379 /* Free lli array */
1382 end_function_with_error2
:
1385 end_function_with_error1
:
1386 /* Free page array */
1394 * sep_lli_table_secure_dma - get lli array for IMR addresses
1395 * @sep: pointer to struct sep_device
1396 * @app_virt_addr: user memory data buffer
1397 * @data_size: size of data buffer
1398 * @lli_array_ptr: lli array
1399 * @in_out_flag: not used
1400 * @dma_ctx: pointer to struct sep_dma_context
1402 * This function creates lli tables for outputting data to
1403 * IMR memory, which is memory that cannot be accessed by the
1404 * the x86 processor.
1406 static int sep_lli_table_secure_dma(struct sep_device
*sep
,
1409 struct sep_lli_entry
**lli_array_ptr
,
1411 struct sep_dma_context
*dma_ctx
)
1416 /* The the page of the end address of the user space buffer */
1418 /* The page of the start address of the user space buffer */
1420 /* The range in pages */
1423 struct sep_lli_entry
*lli_array
;
1425 /* Set start and end pages and num pages */
1426 end_page
= (app_virt_addr
+ data_size
- 1) >> PAGE_SHIFT
;
1427 start_page
= app_virt_addr
>> PAGE_SHIFT
;
1428 num_pages
= end_page
- start_page
+ 1;
1430 dev_dbg(&sep
->pdev
->dev
,
1431 "[PID%d] lock user pages app_virt_addr is %x\n",
1432 current
->pid
, app_virt_addr
);
1434 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_size is (hex) %x\n",
1435 current
->pid
, data_size
);
1436 dev_dbg(&sep
->pdev
->dev
, "[PID%d] start_page is (hex) %x\n",
1437 current
->pid
, start_page
);
1438 dev_dbg(&sep
->pdev
->dev
, "[PID%d] end_page is (hex) %x\n",
1439 current
->pid
, end_page
);
1440 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_pages is (hex) %x\n",
1441 current
->pid
, num_pages
);
1443 lli_array
= kmalloc_array(num_pages
, sizeof(struct sep_lli_entry
),
1449 * Fill the lli_array
1451 start_page
= start_page
<< PAGE_SHIFT
;
1452 for (count
= 0; count
< num_pages
; count
++) {
1453 /* Fill the lli array entry */
1454 lli_array
[count
].bus_address
= start_page
;
1455 lli_array
[count
].block_size
= PAGE_SIZE
;
1457 start_page
+= PAGE_SIZE
;
1459 dev_dbg(&sep
->pdev
->dev
,
1460 "[PID%d] lli_array[%x].bus_address is %08lx, "
1461 "lli_array[%x].block_size is (hex) %x\n",
1463 count
, (unsigned long)lli_array
[count
].bus_address
,
1464 count
, lli_array
[count
].block_size
);
1467 /* Check the offset for the first page */
1468 lli_array
[0].bus_address
=
1469 lli_array
[0].bus_address
+ (app_virt_addr
& (~PAGE_MASK
));
1471 /* Check that not all the data is in the first page only */
1472 if ((PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
))) >= data_size
)
1473 lli_array
[0].block_size
= data_size
;
1475 lli_array
[0].block_size
=
1476 PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
));
1478 dev_dbg(&sep
->pdev
->dev
,
1479 "[PID%d] After check if page 0 has all data\n"
1480 "lli_array[0].bus_address is (hex) %08lx, "
1481 "lli_array[0].block_size is (hex) %x\n",
1483 (unsigned long)lli_array
[0].bus_address
,
1484 lli_array
[0].block_size
);
1486 /* Check the size of the last page */
1487 if (num_pages
> 1) {
1488 lli_array
[num_pages
- 1].block_size
=
1489 (app_virt_addr
+ data_size
) & (~PAGE_MASK
);
1490 if (lli_array
[num_pages
- 1].block_size
== 0)
1491 lli_array
[num_pages
- 1].block_size
= PAGE_SIZE
;
1493 dev_dbg(&sep
->pdev
->dev
,
1494 "[PID%d] After last page size adjustment\n"
1495 "lli_array[%x].bus_address is (hex) %08lx, "
1496 "lli_array[%x].block_size is (hex) %x\n",
1497 current
->pid
, num_pages
- 1,
1498 (unsigned long)lli_array
[num_pages
- 1].bus_address
,
1500 lli_array
[num_pages
- 1].block_size
);
1502 *lli_array_ptr
= lli_array
;
1503 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
= num_pages
;
1504 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
1505 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
= NULL
;
1506 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_num_entries
= 0;
1512 * sep_calculate_lli_table_max_size - size the LLI table
1513 * @sep: pointer to struct sep_device
1515 * @num_array_entries
1518 * This function calculates the size of data that can be inserted into
1519 * the lli table from this array, such that either the table is full
1520 * (all entries are entered), or there are no more entries in the
1523 static u32
sep_calculate_lli_table_max_size(struct sep_device
*sep
,
1524 struct sep_lli_entry
*lli_in_array_ptr
,
1525 u32 num_array_entries
,
1526 u32
*last_table_flag
)
1529 /* Table data size */
1530 u32 table_data_size
= 0;
1531 /* Data size for the next table */
1532 u32 next_table_data_size
;
1534 *last_table_flag
= 0;
1537 * Calculate the data in the out lli table till we fill the whole
1538 * table or till the data has ended
1541 (counter
< (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
- 1)) &&
1542 (counter
< num_array_entries
); counter
++)
1543 table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1546 * Check if we reached the last entry,
1547 * meaning this ia the last table to build,
1548 * and no need to check the block alignment
1550 if (counter
== num_array_entries
) {
1551 /* Set the last table flag */
1552 *last_table_flag
= 1;
1557 * Calculate the data size of the next table.
1558 * Stop if no entries left or if data size is more the DMA restriction
1560 next_table_data_size
= 0;
1561 for (; counter
< num_array_entries
; counter
++) {
1562 next_table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1563 if (next_table_data_size
>= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1568 * Check if the next table data size is less then DMA rstriction.
1569 * if it is - recalculate the current table size, so that the next
1570 * table data size will be adaquete for DMA
1572 if (next_table_data_size
&&
1573 next_table_data_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1575 table_data_size
-= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
-
1576 next_table_data_size
);
1579 return table_data_size
;
1583 * sep_build_lli_table - build an lli array for the given table
1584 * @sep: pointer to struct sep_device
1585 * @lli_array_ptr: pointer to lli array
1586 * @lli_table_ptr: pointer to lli table
1587 * @num_processed_entries_ptr: pointer to number of entries
1588 * @num_table_entries_ptr: pointer to number of tables
1589 * @table_data_size: total data size
1591 * Builds an lli table from the lli_array according to
1592 * the given size of data
1594 static void sep_build_lli_table(struct sep_device
*sep
,
1595 struct sep_lli_entry
*lli_array_ptr
,
1596 struct sep_lli_entry
*lli_table_ptr
,
1597 u32
*num_processed_entries_ptr
,
1598 u32
*num_table_entries_ptr
,
1599 u32 table_data_size
)
1601 /* Current table data size */
1602 u32 curr_table_data_size
;
1603 /* Counter of lli array entry */
1606 /* Init current table data size and lli array entry counter */
1607 curr_table_data_size
= 0;
1609 *num_table_entries_ptr
= 1;
1611 dev_dbg(&sep
->pdev
->dev
,
1612 "[PID%d] build lli table table_data_size: (hex) %x\n",
1613 current
->pid
, table_data_size
);
1615 /* Fill the table till table size reaches the needed amount */
1616 while (curr_table_data_size
< table_data_size
) {
1617 /* Update the number of entries in table */
1618 (*num_table_entries_ptr
)++;
1620 lli_table_ptr
->bus_address
=
1621 cpu_to_le32(lli_array_ptr
[array_counter
].bus_address
);
1623 lli_table_ptr
->block_size
=
1624 cpu_to_le32(lli_array_ptr
[array_counter
].block_size
);
1626 curr_table_data_size
+= lli_array_ptr
[array_counter
].block_size
;
1628 dev_dbg(&sep
->pdev
->dev
,
1629 "[PID%d] lli_table_ptr is %p\n",
1630 current
->pid
, lli_table_ptr
);
1631 dev_dbg(&sep
->pdev
->dev
,
1632 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1634 (unsigned long)lli_table_ptr
->bus_address
);
1636 dev_dbg(&sep
->pdev
->dev
,
1637 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1638 current
->pid
, lli_table_ptr
->block_size
);
1640 /* Check for overflow of the table data */
1641 if (curr_table_data_size
> table_data_size
) {
1642 dev_dbg(&sep
->pdev
->dev
,
1643 "[PID%d] curr_table_data_size too large\n",
1646 /* Update the size of block in the table */
1647 lli_table_ptr
->block_size
=
1648 cpu_to_le32(lli_table_ptr
->block_size
) -
1649 (curr_table_data_size
- table_data_size
);
1651 /* Update the physical address in the lli array */
1652 lli_array_ptr
[array_counter
].bus_address
+=
1653 cpu_to_le32(lli_table_ptr
->block_size
);
1655 /* Update the block size left in the lli array */
1656 lli_array_ptr
[array_counter
].block_size
=
1657 (curr_table_data_size
- table_data_size
);
1659 /* Advance to the next entry in the lli_array */
1662 dev_dbg(&sep
->pdev
->dev
,
1663 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1665 (unsigned long)lli_table_ptr
->bus_address
);
1666 dev_dbg(&sep
->pdev
->dev
,
1667 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1669 lli_table_ptr
->block_size
);
1671 /* Move to the next entry in table */
1675 /* Set the info entry to default */
1676 lli_table_ptr
->bus_address
= 0xffffffff;
1677 lli_table_ptr
->block_size
= 0;
1679 /* Set the output parameter */
1680 *num_processed_entries_ptr
+= array_counter
;
1685 * sep_shared_area_virt_to_bus - map shared area to bus address
1686 * @sep: pointer to struct sep_device
1687 * @virt_address: virtual address to convert
1689 * This functions returns the physical address inside shared area according
1690 * to the virtual address. It can be either on the external RAM device
1691 * (ioremapped), or on the system RAM
1692 * This implementation is for the external RAM
1694 static dma_addr_t
sep_shared_area_virt_to_bus(struct sep_device
*sep
,
1697 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sh virt to phys v %p\n",
1698 current
->pid
, virt_address
);
1699 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sh virt to phys p %08lx\n",
1702 sep
->shared_bus
+ (virt_address
- sep
->shared_addr
));
1704 return sep
->shared_bus
+ (size_t)(virt_address
- sep
->shared_addr
);
1708 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1709 * @sep: pointer to struct sep_device
1710 * @bus_address: bus address to convert
1712 * This functions returns the virtual address inside shared area
1713 * according to the physical address. It can be either on the
1714 * external RAM device (ioremapped), or on the system RAM
1715 * This implementation is for the external RAM
1717 static void *sep_shared_area_bus_to_virt(struct sep_device
*sep
,
1718 dma_addr_t bus_address
)
1720 dev_dbg(&sep
->pdev
->dev
, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1722 (unsigned long)bus_address
, (unsigned long)(sep
->shared_addr
+
1723 (size_t)(bus_address
- sep
->shared_bus
)));
1725 return sep
->shared_addr
+ (size_t)(bus_address
- sep
->shared_bus
);
1729 * sep_debug_print_lli_tables - dump LLI table
1730 * @sep: pointer to struct sep_device
1731 * @lli_table_ptr: pointer to sep_lli_entry
1732 * @num_table_entries: number of entries
1733 * @table_data_size: total data size
1735 * Walk the the list of the print created tables and print all the data
1737 static void sep_debug_print_lli_tables(struct sep_device
*sep
,
1738 struct sep_lli_entry
*lli_table_ptr
,
1739 unsigned long num_table_entries
,
1740 unsigned long table_data_size
)
1743 unsigned long table_count
= 1;
1744 unsigned long entries_count
= 0;
1746 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_debug_print_lli_tables start\n",
1748 if (num_table_entries
== 0) {
1749 dev_dbg(&sep
->pdev
->dev
, "[PID%d] no table to print\n",
1754 while ((unsigned long) lli_table_ptr
->bus_address
!= 0xffffffff) {
1755 dev_dbg(&sep
->pdev
->dev
,
1756 "[PID%d] lli table %08lx, "
1757 "table_data_size is (hex) %lx\n",
1758 current
->pid
, table_count
, table_data_size
);
1759 dev_dbg(&sep
->pdev
->dev
,
1760 "[PID%d] num_table_entries is (hex) %lx\n",
1761 current
->pid
, num_table_entries
);
1763 /* Print entries of the table (without info entry) */
1764 for (entries_count
= 0; entries_count
< num_table_entries
;
1765 entries_count
++, lli_table_ptr
++) {
1767 dev_dbg(&sep
->pdev
->dev
,
1768 "[PID%d] lli_table_ptr address is %08lx\n",
1770 (unsigned long) lli_table_ptr
);
1772 dev_dbg(&sep
->pdev
->dev
,
1773 "[PID%d] phys address is %08lx "
1774 "block size is (hex) %x\n", current
->pid
,
1775 (unsigned long)lli_table_ptr
->bus_address
,
1776 lli_table_ptr
->block_size
);
1779 /* Point to the info entry */
1782 dev_dbg(&sep
->pdev
->dev
,
1783 "[PID%d] phys lli_table_ptr->block_size "
1786 lli_table_ptr
->block_size
);
1788 dev_dbg(&sep
->pdev
->dev
,
1789 "[PID%d] phys lli_table_ptr->physical_address "
1792 (unsigned long)lli_table_ptr
->bus_address
);
1795 table_data_size
= lli_table_ptr
->block_size
& 0xffffff;
1796 num_table_entries
= (lli_table_ptr
->block_size
>> 24) & 0xff;
1798 dev_dbg(&sep
->pdev
->dev
,
1799 "[PID%d] phys table_data_size is "
1800 "(hex) %lx num_table_entries is"
1801 " %lx bus_address is%lx\n",
1805 (unsigned long)lli_table_ptr
->bus_address
);
1807 if ((unsigned long)lli_table_ptr
->bus_address
!= 0xffffffff)
1808 lli_table_ptr
= (struct sep_lli_entry
*)
1809 sep_shared_bus_to_virt(sep
,
1810 (unsigned long)lli_table_ptr
->bus_address
);
1814 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_debug_print_lli_tables end\n",
1821 * sep_prepare_empty_lli_table - create a blank LLI table
1822 * @sep: pointer to struct sep_device
1823 * @lli_table_addr_ptr: pointer to lli table
1824 * @num_entries_ptr: pointer to number of entries
1825 * @table_data_size_ptr: point to table data size
1826 * @dmatables_region: Optional buffer for DMA tables
1827 * @dma_ctx: DMA context
1829 * This function creates empty lli tables when there is no data
1831 static void sep_prepare_empty_lli_table(struct sep_device
*sep
,
1832 dma_addr_t
*lli_table_addr_ptr
,
1833 u32
*num_entries_ptr
,
1834 u32
*table_data_size_ptr
,
1835 void **dmatables_region
,
1836 struct sep_dma_context
*dma_ctx
)
1838 struct sep_lli_entry
*lli_table_ptr
;
1840 /* Find the area for new table */
1842 (struct sep_lli_entry
*)(sep
->shared_addr
+
1843 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1844 dma_ctx
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1845 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1847 if (dmatables_region
&& *dmatables_region
)
1848 lli_table_ptr
= *dmatables_region
;
1850 lli_table_ptr
->bus_address
= 0;
1851 lli_table_ptr
->block_size
= 0;
1854 lli_table_ptr
->bus_address
= 0xFFFFFFFF;
1855 lli_table_ptr
->block_size
= 0;
1857 /* Set the output parameter value */
1858 *lli_table_addr_ptr
= sep
->shared_bus
+
1859 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1860 dma_ctx
->num_lli_tables_created
*
1861 sizeof(struct sep_lli_entry
) *
1862 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1864 /* Set the num of entries and table data size for empty table */
1865 *num_entries_ptr
= 2;
1866 *table_data_size_ptr
= 0;
1868 /* Update the number of created tables */
1869 dma_ctx
->num_lli_tables_created
++;
1873 * sep_prepare_input_dma_table - prepare input DMA mappings
1874 * @sep: pointer to struct sep_device
1879 * @table_data_size_ptr:
1880 * @is_kva: set for kernel data (kernel crypt io call)
1882 * This function prepares only input DMA table for synchronic symmetric
1884 * Note that all bus addresses that are passed to the SEP
1885 * are in 32 bit format; the SEP is a 32 bit device
1887 static int sep_prepare_input_dma_table(struct sep_device
*sep
,
1888 unsigned long app_virt_addr
,
1891 dma_addr_t
*lli_table_ptr
,
1892 u32
*num_entries_ptr
,
1893 u32
*table_data_size_ptr
,
1895 void **dmatables_region
,
1896 struct sep_dma_context
*dma_ctx
1900 /* Pointer to the info entry of the table - the last entry */
1901 struct sep_lli_entry
*info_entry_ptr
;
1902 /* Array of pointers to page */
1903 struct sep_lli_entry
*lli_array_ptr
;
1904 /* Points to the first entry to be processed in the lli_in_array */
1905 u32 current_entry
= 0;
1906 /* Num entries in the virtual buffer */
1907 u32 sep_lli_entries
= 0;
1908 /* Lli table pointer */
1909 struct sep_lli_entry
*in_lli_table_ptr
;
1910 /* The total data in one table */
1911 u32 table_data_size
= 0;
1912 /* Flag for last table */
1913 u32 last_table_flag
= 0;
1914 /* Number of entries in lli table */
1915 u32 num_entries_in_table
= 0;
1916 /* Next table address */
1917 void *lli_table_alloc_addr
= NULL
;
1918 void *dma_lli_table_alloc_addr
= NULL
;
1919 void *dma_in_lli_table_ptr
= NULL
;
1921 dev_dbg(&sep
->pdev
->dev
,
1922 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1923 current
->pid
, data_size
);
1925 dev_dbg(&sep
->pdev
->dev
, "[PID%d] block_size is (hex) %x\n",
1926 current
->pid
, block_size
);
1928 /* Initialize the pages pointers */
1929 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
1930 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
= 0;
1932 /* Set the kernel address for first table to be allocated */
1933 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
1934 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1935 dma_ctx
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1936 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1938 if (data_size
== 0) {
1939 if (dmatables_region
) {
1940 error
= sep_allocate_dmatables_region(sep
,
1947 /* Special case - create meptu table - 2 entries, zero data */
1948 sep_prepare_empty_lli_table(sep
, lli_table_ptr
,
1949 num_entries_ptr
, table_data_size_ptr
,
1950 dmatables_region
, dma_ctx
);
1951 goto update_dcb_counter
;
1954 /* Check if the pages are in Kernel Virtual Address layout */
1956 error
= sep_lock_kernel_pages(sep
, app_virt_addr
,
1957 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
,
1961 * Lock the pages of the user buffer
1962 * and translate them to pages
1964 error
= sep_lock_user_pages(sep
, app_virt_addr
,
1965 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
,
1971 dev_dbg(&sep
->pdev
->dev
,
1972 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1974 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
);
1977 info_entry_ptr
= NULL
;
1980 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
;
1982 dma_lli_table_alloc_addr
= lli_table_alloc_addr
;
1983 if (dmatables_region
) {
1984 error
= sep_allocate_dmatables_region(sep
,
1989 goto end_function_error
;
1990 lli_table_alloc_addr
= *dmatables_region
;
1993 /* Loop till all the entries in in array are processed */
1994 while (current_entry
< sep_lli_entries
) {
1996 /* Set the new input and output tables */
1998 (struct sep_lli_entry
*)lli_table_alloc_addr
;
1999 dma_in_lli_table_ptr
=
2000 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2002 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2003 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2004 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2005 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2007 if (dma_lli_table_alloc_addr
>
2008 ((void *)sep
->shared_addr
+
2009 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2010 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
2013 goto end_function_error
;
2017 /* Update the number of created tables */
2018 dma_ctx
->num_lli_tables_created
++;
2020 /* Calculate the maximum size of data for input table */
2021 table_data_size
= sep_calculate_lli_table_max_size(sep
,
2022 &lli_array_ptr
[current_entry
],
2023 (sep_lli_entries
- current_entry
),
2027 * If this is not the last table -
2028 * then align it to the block size
2030 if (!last_table_flag
)
2032 (table_data_size
/ block_size
) * block_size
;
2034 dev_dbg(&sep
->pdev
->dev
,
2035 "[PID%d] output table_data_size is (hex) %x\n",
2039 /* Construct input lli table */
2040 sep_build_lli_table(sep
, &lli_array_ptr
[current_entry
],
2042 ¤t_entry
, &num_entries_in_table
, table_data_size
);
2044 if (info_entry_ptr
== NULL
) {
2046 /* Set the output parameters to physical addresses */
2047 *lli_table_ptr
= sep_shared_area_virt_to_bus(sep
,
2048 dma_in_lli_table_ptr
);
2049 *num_entries_ptr
= num_entries_in_table
;
2050 *table_data_size_ptr
= table_data_size
;
2052 dev_dbg(&sep
->pdev
->dev
,
2053 "[PID%d] output lli_table_in_ptr is %08lx\n",
2055 (unsigned long)*lli_table_ptr
);
2058 /* Update the info entry of the previous in table */
2059 info_entry_ptr
->bus_address
=
2060 sep_shared_area_virt_to_bus(sep
,
2061 dma_in_lli_table_ptr
);
2062 info_entry_ptr
->block_size
=
2063 ((num_entries_in_table
) << 24) |
2066 /* Save the pointer to the info entry of the current tables */
2067 info_entry_ptr
= in_lli_table_ptr
+ num_entries_in_table
- 1;
2069 /* Print input tables */
2070 if (!dmatables_region
) {
2071 sep_debug_print_lli_tables(sep
, (struct sep_lli_entry
*)
2072 sep_shared_area_bus_to_virt(sep
, *lli_table_ptr
),
2073 *num_entries_ptr
, *table_data_size_ptr
);
2076 /* The array of the pages */
2077 kfree(lli_array_ptr
);
2080 /* Update DCB counter */
2081 dma_ctx
->nr_dcb_creat
++;
2085 /* Free all the allocated resources */
2086 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
);
2087 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
= NULL
;
2088 kfree(lli_array_ptr
);
2089 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
);
2090 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2098 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2099 * @sep: pointer to struct sep_device
2101 * @sep_in_lli_entries:
2103 * @sep_out_lli_entries
2106 * @lli_table_out_ptr
2107 * @in_num_entries_ptr
2108 * @out_num_entries_ptr
2109 * @table_data_size_ptr
2111 * This function creates the input and output DMA tables for
2112 * symmetric operations (AES/DES) according to the block
2113 * size from LLI arays
2114 * Note that all bus addresses that are passed to the SEP
2115 * are in 32 bit format; the SEP is a 32 bit device
2117 static int sep_construct_dma_tables_from_lli(
2118 struct sep_device
*sep
,
2119 struct sep_lli_entry
*lli_in_array
,
2120 u32 sep_in_lli_entries
,
2121 struct sep_lli_entry
*lli_out_array
,
2122 u32 sep_out_lli_entries
,
2124 dma_addr_t
*lli_table_in_ptr
,
2125 dma_addr_t
*lli_table_out_ptr
,
2126 u32
*in_num_entries_ptr
,
2127 u32
*out_num_entries_ptr
,
2128 u32
*table_data_size_ptr
,
2129 void **dmatables_region
,
2130 struct sep_dma_context
*dma_ctx
)
2132 /* Points to the area where next lli table can be allocated */
2133 void *lli_table_alloc_addr
= NULL
;
2135 * Points to the area in shared region where next lli table
2138 void *dma_lli_table_alloc_addr
= NULL
;
2139 /* Input lli table in dmatables_region or shared region */
2140 struct sep_lli_entry
*in_lli_table_ptr
= NULL
;
2141 /* Input lli table location in the shared region */
2142 struct sep_lli_entry
*dma_in_lli_table_ptr
= NULL
;
2143 /* Output lli table in dmatables_region or shared region */
2144 struct sep_lli_entry
*out_lli_table_ptr
= NULL
;
2145 /* Output lli table location in the shared region */
2146 struct sep_lli_entry
*dma_out_lli_table_ptr
= NULL
;
2147 /* Pointer to the info entry of the table - the last entry */
2148 struct sep_lli_entry
*info_in_entry_ptr
= NULL
;
2149 /* Pointer to the info entry of the table - the last entry */
2150 struct sep_lli_entry
*info_out_entry_ptr
= NULL
;
2151 /* Points to the first entry to be processed in the lli_in_array */
2152 u32 current_in_entry
= 0;
2153 /* Points to the first entry to be processed in the lli_out_array */
2154 u32 current_out_entry
= 0;
2155 /* Max size of the input table */
2156 u32 in_table_data_size
= 0;
2157 /* Max size of the output table */
2158 u32 out_table_data_size
= 0;
2159 /* Flag te signifies if this is the last tables build */
2160 u32 last_table_flag
= 0;
2161 /* The data size that should be in table */
2162 u32 table_data_size
= 0;
2163 /* Number of entries in the input table */
2164 u32 num_entries_in_table
= 0;
2165 /* Number of entries in the output table */
2166 u32 num_entries_out_table
= 0;
2169 dev_warn(&sep
->pdev
->dev
, "DMA context uninitialized\n");
2173 /* Initiate to point after the message area */
2174 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
2175 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2176 (dma_ctx
->num_lli_tables_created
*
2177 (sizeof(struct sep_lli_entry
) *
2178 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
)));
2179 dma_lli_table_alloc_addr
= lli_table_alloc_addr
;
2181 if (dmatables_region
) {
2182 /* 2 for both in+out table */
2183 if (sep_allocate_dmatables_region(sep
,
2186 2*sep_in_lli_entries
))
2188 lli_table_alloc_addr
= *dmatables_region
;
2191 /* Loop till all the entries in in array are not processed */
2192 while (current_in_entry
< sep_in_lli_entries
) {
2193 /* Set the new input and output tables */
2195 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2196 dma_in_lli_table_ptr
=
2197 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2199 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2200 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2201 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2202 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2204 /* Set the first output tables */
2206 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2207 dma_out_lli_table_ptr
=
2208 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2210 /* Check if the DMA table area limit was overrun */
2211 if ((dma_lli_table_alloc_addr
+ sizeof(struct sep_lli_entry
) *
2212 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
) >
2213 ((void *)sep
->shared_addr
+
2214 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2215 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
2217 dev_warn(&sep
->pdev
->dev
, "dma table limit overrun\n");
2221 /* Update the number of the lli tables created */
2222 dma_ctx
->num_lli_tables_created
+= 2;
2224 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2225 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2226 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2227 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2229 /* Calculate the maximum size of data for input table */
2230 in_table_data_size
=
2231 sep_calculate_lli_table_max_size(sep
,
2232 &lli_in_array
[current_in_entry
],
2233 (sep_in_lli_entries
- current_in_entry
),
2236 /* Calculate the maximum size of data for output table */
2237 out_table_data_size
=
2238 sep_calculate_lli_table_max_size(sep
,
2239 &lli_out_array
[current_out_entry
],
2240 (sep_out_lli_entries
- current_out_entry
),
2243 if (!last_table_flag
) {
2244 in_table_data_size
= (in_table_data_size
/
2245 block_size
) * block_size
;
2246 out_table_data_size
= (out_table_data_size
/
2247 block_size
) * block_size
;
2250 table_data_size
= in_table_data_size
;
2251 if (table_data_size
> out_table_data_size
)
2252 table_data_size
= out_table_data_size
;
2254 dev_dbg(&sep
->pdev
->dev
,
2255 "[PID%d] construct tables from lli"
2256 " in_table_data_size is (hex) %x\n", current
->pid
,
2257 in_table_data_size
);
2259 dev_dbg(&sep
->pdev
->dev
,
2260 "[PID%d] construct tables from lli"
2261 "out_table_data_size is (hex) %x\n", current
->pid
,
2262 out_table_data_size
);
2264 /* Construct input lli table */
2265 sep_build_lli_table(sep
, &lli_in_array
[current_in_entry
],
2268 &num_entries_in_table
,
2271 /* Construct output lli table */
2272 sep_build_lli_table(sep
, &lli_out_array
[current_out_entry
],
2275 &num_entries_out_table
,
2278 /* If info entry is null - this is the first table built */
2279 if (info_in_entry_ptr
== NULL
|| info_out_entry_ptr
== NULL
) {
2280 /* Set the output parameters to physical addresses */
2282 sep_shared_area_virt_to_bus(sep
, dma_in_lli_table_ptr
);
2284 *in_num_entries_ptr
= num_entries_in_table
;
2286 *lli_table_out_ptr
=
2287 sep_shared_area_virt_to_bus(sep
,
2288 dma_out_lli_table_ptr
);
2290 *out_num_entries_ptr
= num_entries_out_table
;
2291 *table_data_size_ptr
= table_data_size
;
2293 dev_dbg(&sep
->pdev
->dev
,
2294 "[PID%d] output lli_table_in_ptr is %08lx\n",
2296 (unsigned long)*lli_table_in_ptr
);
2297 dev_dbg(&sep
->pdev
->dev
,
2298 "[PID%d] output lli_table_out_ptr is %08lx\n",
2300 (unsigned long)*lli_table_out_ptr
);
2302 /* Update the info entry of the previous in table */
2303 info_in_entry_ptr
->bus_address
=
2304 sep_shared_area_virt_to_bus(sep
,
2305 dma_in_lli_table_ptr
);
2307 info_in_entry_ptr
->block_size
=
2308 ((num_entries_in_table
) << 24) |
2311 /* Update the info entry of the previous in table */
2312 info_out_entry_ptr
->bus_address
=
2313 sep_shared_area_virt_to_bus(sep
,
2314 dma_out_lli_table_ptr
);
2316 info_out_entry_ptr
->block_size
=
2317 ((num_entries_out_table
) << 24) |
2320 dev_dbg(&sep
->pdev
->dev
,
2321 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2323 (unsigned long)info_in_entry_ptr
->bus_address
,
2324 info_in_entry_ptr
->block_size
);
2326 dev_dbg(&sep
->pdev
->dev
,
2327 "[PID%d] output lli_table_out_ptr:"
2330 (unsigned long)info_out_entry_ptr
->bus_address
,
2331 info_out_entry_ptr
->block_size
);
2334 /* Save the pointer to the info entry of the current tables */
2335 info_in_entry_ptr
= in_lli_table_ptr
+
2336 num_entries_in_table
- 1;
2337 info_out_entry_ptr
= out_lli_table_ptr
+
2338 num_entries_out_table
- 1;
2340 dev_dbg(&sep
->pdev
->dev
,
2341 "[PID%d] output num_entries_out_table is %x\n",
2343 (u32
)num_entries_out_table
);
2344 dev_dbg(&sep
->pdev
->dev
,
2345 "[PID%d] output info_in_entry_ptr is %lx\n",
2347 (unsigned long)info_in_entry_ptr
);
2348 dev_dbg(&sep
->pdev
->dev
,
2349 "[PID%d] output info_out_entry_ptr is %lx\n",
2351 (unsigned long)info_out_entry_ptr
);
2354 /* Print input tables */
2355 if (!dmatables_region
) {
2356 sep_debug_print_lli_tables(
2358 (struct sep_lli_entry
*)
2359 sep_shared_area_bus_to_virt(sep
, *lli_table_in_ptr
),
2360 *in_num_entries_ptr
,
2361 *table_data_size_ptr
);
2364 /* Print output tables */
2365 if (!dmatables_region
) {
2366 sep_debug_print_lli_tables(
2368 (struct sep_lli_entry
*)
2369 sep_shared_area_bus_to_virt(sep
, *lli_table_out_ptr
),
2370 *out_num_entries_ptr
,
2371 *table_data_size_ptr
);
2378 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2379 * @app_virt_in_addr:
2380 * @app_virt_out_addr:
2383 * @lli_table_in_ptr:
2384 * @lli_table_out_ptr:
2385 * @in_num_entries_ptr:
2386 * @out_num_entries_ptr:
2387 * @table_data_size_ptr:
2388 * @is_kva: set for kernel data; used only for kernel crypto module
2390 * This function builds input and output DMA tables for synchronic
2391 * symmetric operations (AES, DES, HASH). It also checks that each table
2392 * is of the modular block size
2393 * Note that all bus addresses that are passed to the SEP
2394 * are in 32 bit format; the SEP is a 32 bit device
2396 static int sep_prepare_input_output_dma_table(struct sep_device
*sep
,
2397 unsigned long app_virt_in_addr
,
2398 unsigned long app_virt_out_addr
,
2401 dma_addr_t
*lli_table_in_ptr
,
2402 dma_addr_t
*lli_table_out_ptr
,
2403 u32
*in_num_entries_ptr
,
2404 u32
*out_num_entries_ptr
,
2405 u32
*table_data_size_ptr
,
2407 void **dmatables_region
,
2408 struct sep_dma_context
*dma_ctx
)
2412 /* Array of pointers of page */
2413 struct sep_lli_entry
*lli_in_array
;
2414 /* Array of pointers of page */
2415 struct sep_lli_entry
*lli_out_array
;
2422 if (data_size
== 0) {
2423 /* Prepare empty table for input and output */
2424 if (dmatables_region
) {
2425 error
= sep_allocate_dmatables_region(
2433 sep_prepare_empty_lli_table(sep
, lli_table_in_ptr
,
2434 in_num_entries_ptr
, table_data_size_ptr
,
2435 dmatables_region
, dma_ctx
);
2437 sep_prepare_empty_lli_table(sep
, lli_table_out_ptr
,
2438 out_num_entries_ptr
, table_data_size_ptr
,
2439 dmatables_region
, dma_ctx
);
2441 goto update_dcb_counter
;
2444 /* Initialize the pages pointers */
2445 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2446 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
2448 /* Lock the pages of the buffer and translate them to pages */
2449 if (is_kva
== true) {
2450 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking kernel input pages\n",
2452 error
= sep_lock_kernel_pages(sep
, app_virt_in_addr
,
2453 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
,
2456 dev_warn(&sep
->pdev
->dev
,
2457 "[PID%d] sep_lock_kernel_pages for input "
2458 "virtual buffer failed\n", current
->pid
);
2463 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking kernel output pages\n",
2465 error
= sep_lock_kernel_pages(sep
, app_virt_out_addr
,
2466 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
,
2470 dev_warn(&sep
->pdev
->dev
,
2471 "[PID%d] sep_lock_kernel_pages for output "
2472 "virtual buffer failed\n", current
->pid
);
2474 goto end_function_free_lli_in
;
2480 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking user input pages\n",
2482 error
= sep_lock_user_pages(sep
, app_virt_in_addr
,
2483 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
,
2486 dev_warn(&sep
->pdev
->dev
,
2487 "[PID%d] sep_lock_user_pages for input "
2488 "virtual buffer failed\n", current
->pid
);
2493 if (dma_ctx
->secure_dma
== true) {
2494 /* secure_dma requires use of non accessible memory */
2495 dev_dbg(&sep
->pdev
->dev
, "[PID%d] in secure_dma\n",
2497 error
= sep_lli_table_secure_dma(sep
,
2498 app_virt_out_addr
, data_size
, &lli_out_array
,
2499 SEP_DRIVER_OUT_FLAG
, dma_ctx
);
2501 dev_warn(&sep
->pdev
->dev
,
2502 "[PID%d] secure dma table setup "
2503 " for output virtual buffer failed\n",
2506 goto end_function_free_lli_in
;
2509 /* For normal, non-secure dma */
2510 dev_dbg(&sep
->pdev
->dev
, "[PID%d] not in secure_dma\n",
2513 dev_dbg(&sep
->pdev
->dev
,
2514 "[PID%d] Locking user output pages\n",
2517 error
= sep_lock_user_pages(sep
, app_virt_out_addr
,
2518 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
,
2522 dev_warn(&sep
->pdev
->dev
,
2523 "[PID%d] sep_lock_user_pages"
2524 " for output virtual buffer failed\n",
2527 goto end_function_free_lli_in
;
2532 dev_dbg(&sep
->pdev
->dev
,
2533 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2535 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
);
2537 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_out_num_pages is (hex) %x\n",
2539 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
);
2541 dev_dbg(&sep
->pdev
->dev
,
2542 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2543 current
->pid
, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
2545 /* Call the function that creates table from the lli arrays */
2546 dev_dbg(&sep
->pdev
->dev
, "[PID%d] calling create table from lli\n",
2548 error
= sep_construct_dma_tables_from_lli(
2550 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
2553 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
2555 block_size
, lli_table_in_ptr
, lli_table_out_ptr
,
2556 in_num_entries_ptr
, out_num_entries_ptr
,
2557 table_data_size_ptr
, dmatables_region
, dma_ctx
);
2560 dev_warn(&sep
->pdev
->dev
,
2561 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2563 goto end_function_with_error
;
2566 kfree(lli_out_array
);
2567 kfree(lli_in_array
);
2570 /* Update DCB counter */
2571 dma_ctx
->nr_dcb_creat
++;
2575 end_function_with_error
:
2576 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
);
2577 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
= NULL
;
2578 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
);
2579 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
2580 kfree(lli_out_array
);
2583 end_function_free_lli_in
:
2584 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
);
2585 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
= NULL
;
2586 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
);
2587 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2588 kfree(lli_in_array
);
2597 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2598 * @app_in_address: unsigned long; for data buffer in (user space)
2599 * @app_out_address: unsigned long; for data buffer out (user space)
2600 * @data_in_size: u32; for size of data
2601 * @block_size: u32; for block size
2602 * @tail_block_size: u32; for size of tail block
2603 * @isapplet: bool; to indicate external app
2604 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2605 * @secure_dma; indicates whether this is secure_dma using IMR
2607 * This function prepares the linked DMA tables and puts the
2608 * address for the linked list of tables inta a DCB (data control
2609 * block) the address of which is known by the SEP hardware
2610 * Note that all bus addresses that are passed to the SEP
2611 * are in 32 bit format; the SEP is a 32 bit device
2613 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device
*sep
,
2614 unsigned long app_in_address
,
2615 unsigned long app_out_address
,
2618 u32 tail_block_size
,
2622 struct sep_dcblock
*dcb_region
,
2623 void **dmatables_region
,
2624 struct sep_dma_context
**dma_ctx
,
2625 struct scatterlist
*src_sg
,
2626 struct scatterlist
*dst_sg
)
2631 /* Address of the created DCB table */
2632 struct sep_dcblock
*dcb_table_ptr
= NULL
;
2633 /* The physical address of the first input DMA table */
2634 dma_addr_t in_first_mlli_address
= 0;
2635 /* Number of entries in the first input DMA table */
2636 u32 in_first_num_entries
= 0;
2637 /* The physical address of the first output DMA table */
2638 dma_addr_t out_first_mlli_address
= 0;
2639 /* Number of entries in the first output DMA table */
2640 u32 out_first_num_entries
= 0;
2641 /* Data in the first input/output table */
2642 u32 first_data_size
= 0;
2644 dev_dbg(&sep
->pdev
->dev
, "[PID%d] app_in_address %lx\n",
2645 current
->pid
, app_in_address
);
2647 dev_dbg(&sep
->pdev
->dev
, "[PID%d] app_out_address %lx\n",
2648 current
->pid
, app_out_address
);
2650 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_in_size %x\n",
2651 current
->pid
, data_in_size
);
2653 dev_dbg(&sep
->pdev
->dev
, "[PID%d] block_size %x\n",
2654 current
->pid
, block_size
);
2656 dev_dbg(&sep
->pdev
->dev
, "[PID%d] tail_block_size %x\n",
2657 current
->pid
, tail_block_size
);
2659 dev_dbg(&sep
->pdev
->dev
, "[PID%d] isapplet %x\n",
2660 current
->pid
, isapplet
);
2662 dev_dbg(&sep
->pdev
->dev
, "[PID%d] is_kva %x\n",
2663 current
->pid
, is_kva
);
2665 dev_dbg(&sep
->pdev
->dev
, "[PID%d] src_sg %p\n",
2666 current
->pid
, src_sg
);
2668 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dst_sg %p\n",
2669 current
->pid
, dst_sg
);
2672 dev_warn(&sep
->pdev
->dev
, "[PID%d] no DMA context pointer\n",
2679 /* In case there are multiple DCBs for this transaction */
2680 dev_dbg(&sep
->pdev
->dev
, "[PID%d] DMA context already set\n",
2683 *dma_ctx
= kzalloc(sizeof(**dma_ctx
), GFP_KERNEL
);
2685 dev_dbg(&sep
->pdev
->dev
,
2686 "[PID%d] Not enough memory for DMA context\n",
2691 dev_dbg(&sep
->pdev
->dev
,
2692 "[PID%d] Created DMA context addr at 0x%p\n",
2693 current
->pid
, *dma_ctx
);
2696 (*dma_ctx
)->secure_dma
= secure_dma
;
2698 /* these are for kernel crypto only */
2699 (*dma_ctx
)->src_sg
= src_sg
;
2700 (*dma_ctx
)->dst_sg
= dst_sg
;
2702 if ((*dma_ctx
)->nr_dcb_creat
== SEP_MAX_NUM_SYNC_DMA_OPS
) {
2703 /* No more DCBs to allocate */
2704 dev_dbg(&sep
->pdev
->dev
, "[PID%d] no more DCBs available\n",
2707 goto end_function_error
;
2710 /* Allocate new DCB */
2712 dcb_table_ptr
= dcb_region
;
2714 dcb_table_ptr
= (struct sep_dcblock
*)(sep
->shared_addr
+
2715 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
+
2716 ((*dma_ctx
)->nr_dcb_creat
*
2717 sizeof(struct sep_dcblock
)));
2720 /* Set the default values in the DCB */
2721 dcb_table_ptr
->input_mlli_address
= 0;
2722 dcb_table_ptr
->input_mlli_num_entries
= 0;
2723 dcb_table_ptr
->input_mlli_data_size
= 0;
2724 dcb_table_ptr
->output_mlli_address
= 0;
2725 dcb_table_ptr
->output_mlli_num_entries
= 0;
2726 dcb_table_ptr
->output_mlli_data_size
= 0;
2727 dcb_table_ptr
->tail_data_size
= 0;
2728 dcb_table_ptr
->out_vr_tail_pt
= 0;
2730 if (isapplet
== true) {
2732 /* Check if there is enough data for DMA operation */
2733 if (data_in_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
) {
2734 if (is_kva
== true) {
2736 goto end_function_error
;
2738 if (copy_from_user(dcb_table_ptr
->tail_data
,
2739 (void __user
*)app_in_address
,
2742 goto end_function_error
;
2746 dcb_table_ptr
->tail_data_size
= data_in_size
;
2748 /* Set the output user-space address for mem2mem op */
2749 if (app_out_address
)
2750 dcb_table_ptr
->out_vr_tail_pt
=
2751 (aligned_u64
)app_out_address
;
2754 * Update both data length parameters in order to avoid
2755 * second data copy and allow building of empty mlli
2762 if (!app_out_address
) {
2763 tail_size
= data_in_size
% block_size
;
2765 if (tail_block_size
== block_size
)
2766 tail_size
= block_size
;
2773 if (tail_size
> sizeof(dcb_table_ptr
->tail_data
))
2775 if (is_kva
== true) {
2777 goto end_function_error
;
2779 /* We have tail data - copy it to DCB */
2780 if (copy_from_user(dcb_table_ptr
->tail_data
,
2781 (void __user
*)(app_in_address
+
2782 data_in_size
- tail_size
), tail_size
)) {
2784 goto end_function_error
;
2787 if (app_out_address
)
2789 * Calculate the output address
2790 * according to tail data size
2792 dcb_table_ptr
->out_vr_tail_pt
=
2793 (aligned_u64
)app_out_address
+
2794 data_in_size
- tail_size
;
2796 /* Save the real tail data size */
2797 dcb_table_ptr
->tail_data_size
= tail_size
;
2799 * Update the data size without the tail
2800 * data size AKA data for the dma
2802 data_in_size
= (data_in_size
- tail_size
);
2805 /* Check if we need to build only input table or input/output */
2806 if (app_out_address
) {
2807 /* Prepare input/output tables */
2808 error
= sep_prepare_input_output_dma_table(sep
,
2813 &in_first_mlli_address
,
2814 &out_first_mlli_address
,
2815 &in_first_num_entries
,
2816 &out_first_num_entries
,
2822 /* Prepare input tables */
2823 error
= sep_prepare_input_dma_table(sep
,
2827 &in_first_mlli_address
,
2828 &in_first_num_entries
,
2836 dev_warn(&sep
->pdev
->dev
,
2837 "prepare DMA table call failed "
2838 "from prepare DCB call\n");
2839 goto end_function_error
;
2842 /* Set the DCB values */
2843 dcb_table_ptr
->input_mlli_address
= in_first_mlli_address
;
2844 dcb_table_ptr
->input_mlli_num_entries
= in_first_num_entries
;
2845 dcb_table_ptr
->input_mlli_data_size
= first_data_size
;
2846 dcb_table_ptr
->output_mlli_address
= out_first_mlli_address
;
2847 dcb_table_ptr
->output_mlli_num_entries
= out_first_num_entries
;
2848 dcb_table_ptr
->output_mlli_data_size
= first_data_size
;
2863 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2864 * @sep: pointer to struct sep_device
2865 * @isapplet: indicates external application (used for kernel access)
2866 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2868 * This function frees the DMA tables and DCB
2870 static int sep_free_dma_tables_and_dcb(struct sep_device
*sep
, bool isapplet
,
2871 bool is_kva
, struct sep_dma_context
**dma_ctx
)
2873 struct sep_dcblock
*dcb_table_ptr
;
2874 unsigned long pt_hold
;
2881 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_free_dma_tables_and_dcb\n",
2883 if (!dma_ctx
|| !*dma_ctx
) /* nothing to be done here*/
2886 if (((*dma_ctx
)->secure_dma
== false) && (isapplet
== true)) {
2887 dev_dbg(&sep
->pdev
->dev
, "[PID%d] handling applet\n",
2890 /* Tail stuff is only for non secure_dma */
2891 /* Set pointer to first DCB table */
2892 dcb_table_ptr
= (struct sep_dcblock
*)
2894 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
);
2897 * Go over each DCB and see if
2898 * tail pointer must be updated
2900 for (i
= 0; i
< (*dma_ctx
)->nr_dcb_creat
; i
++, dcb_table_ptr
++) {
2901 if (dcb_table_ptr
->out_vr_tail_pt
) {
2902 pt_hold
= (unsigned long)dcb_table_ptr
->
2904 tail_pt
= (void *)pt_hold
;
2905 if (is_kva
== true) {
2909 error_temp
= copy_to_user(
2910 (void __user
*)tail_pt
,
2911 dcb_table_ptr
->tail_data
,
2912 dcb_table_ptr
->tail_data_size
);
2915 /* Release the DMA resource */
2923 /* Free the output pages, if any */
2924 sep_free_dma_table_data_handler(sep
, dma_ctx
);
2926 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2933 * sep_prepare_dcb_handler - prepare a control block
2934 * @sep: pointer to struct sep_device
2935 * @arg: pointer to user parameters
2936 * @secure_dma: indicate whether we are using secure_dma on IMR
2938 * This function will retrieve the RAR buffer physical addresses, type
2939 * & size corresponding to the RAR handles provided in the buffers vector.
2941 static int sep_prepare_dcb_handler(struct sep_device
*sep
, unsigned long arg
,
2943 struct sep_dma_context
**dma_ctx
)
2946 /* Command arguments */
2947 static struct build_dcb_struct command_args
;
2949 /* Get the command arguments */
2950 if (copy_from_user(&command_args
, (void __user
*)arg
,
2951 sizeof(struct build_dcb_struct
))) {
2956 dev_dbg(&sep
->pdev
->dev
,
2957 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2958 current
->pid
, command_args
.app_in_address
);
2959 dev_dbg(&sep
->pdev
->dev
,
2960 "[PID%d] app_out_address is %08llx\n",
2961 current
->pid
, command_args
.app_out_address
);
2962 dev_dbg(&sep
->pdev
->dev
,
2963 "[PID%d] data_size is %x\n",
2964 current
->pid
, command_args
.data_in_size
);
2965 dev_dbg(&sep
->pdev
->dev
,
2966 "[PID%d] block_size is %x\n",
2967 current
->pid
, command_args
.block_size
);
2968 dev_dbg(&sep
->pdev
->dev
,
2969 "[PID%d] tail block_size is %x\n",
2970 current
->pid
, command_args
.tail_block_size
);
2971 dev_dbg(&sep
->pdev
->dev
,
2972 "[PID%d] is_applet is %x\n",
2973 current
->pid
, command_args
.is_applet
);
2975 if (!command_args
.app_in_address
) {
2976 dev_warn(&sep
->pdev
->dev
,
2977 "[PID%d] null app_in_address\n", current
->pid
);
2982 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
2983 (unsigned long)command_args
.app_in_address
,
2984 (unsigned long)command_args
.app_out_address
,
2985 command_args
.data_in_size
, command_args
.block_size
,
2986 command_args
.tail_block_size
,
2987 command_args
.is_applet
, false,
2988 secure_dma
, NULL
, NULL
, dma_ctx
, NULL
, NULL
);
2996 * sep_free_dcb_handler - free control block resources
2997 * @sep: pointer to struct sep_device
2999 * This function frees the DCB resources and updates the needed
3000 * user-space buffers.
3002 static int sep_free_dcb_handler(struct sep_device
*sep
,
3003 struct sep_dma_context
**dma_ctx
)
3005 if (!dma_ctx
|| !(*dma_ctx
)) {
3006 dev_dbg(&sep
->pdev
->dev
,
3007 "[PID%d] no dma context defined, nothing to free\n",
3012 dev_dbg(&sep
->pdev
->dev
, "[PID%d] free dcbs num of DCBs %x\n",
3014 (*dma_ctx
)->nr_dcb_creat
);
3016 return sep_free_dma_tables_and_dcb(sep
, false, false, dma_ctx
);
3020 * sep_ioctl - ioctl handler for sep device
3021 * @filp: pointer to struct file
3023 * @arg: pointer to argument structure
3025 * Implement the ioctl methods available on the SEP device.
3027 static long sep_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3029 struct sep_private_data
* const private_data
= filp
->private_data
;
3030 struct sep_call_status
*call_status
= &private_data
->call_status
;
3031 struct sep_device
*sep
= private_data
->device
;
3032 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
3033 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
3036 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl cmd 0x%x\n",
3038 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dma context addr 0x%p\n",
3039 current
->pid
, *dma_ctx
);
3041 /* Make sure we own this device */
3042 error
= sep_check_transaction_owner(sep
);
3044 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl pid is not owner\n",
3049 /* Check that sep_mmap has been called before */
3050 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET
,
3051 &call_status
->status
)) {
3052 dev_dbg(&sep
->pdev
->dev
,
3053 "[PID%d] mmap not called\n", current
->pid
);
3058 /* Check that the command is for SEP device */
3059 if (_IOC_TYPE(cmd
) != SEP_IOC_MAGIC_NUMBER
) {
3065 case SEP_IOCSENDSEPCOMMAND
:
3066 dev_dbg(&sep
->pdev
->dev
,
3067 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3069 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3070 &call_status
->status
)) {
3071 dev_warn(&sep
->pdev
->dev
,
3072 "[PID%d] send msg already done\n",
3077 /* Send command to SEP */
3078 error
= sep_send_command_handler(sep
);
3080 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3081 &call_status
->status
);
3082 dev_dbg(&sep
->pdev
->dev
,
3083 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3086 case SEP_IOCENDTRANSACTION
:
3087 dev_dbg(&sep
->pdev
->dev
,
3088 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3090 error
= sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
3092 dev_dbg(&sep
->pdev
->dev
,
3093 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3096 case SEP_IOCPREPAREDCB
:
3097 dev_dbg(&sep
->pdev
->dev
,
3098 "[PID%d] SEP_IOCPREPAREDCB start\n",
3100 case SEP_IOCPREPAREDCB_SECURE_DMA
:
3101 dev_dbg(&sep
->pdev
->dev
,
3102 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3104 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3105 &call_status
->status
)) {
3106 dev_dbg(&sep
->pdev
->dev
,
3107 "[PID%d] dcb prep needed before send msg\n",
3114 dev_dbg(&sep
->pdev
->dev
,
3115 "[PID%d] dcb null arg\n", current
->pid
);
3120 if (cmd
== SEP_IOCPREPAREDCB
) {
3122 dev_dbg(&sep
->pdev
->dev
,
3123 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3126 error
= sep_prepare_dcb_handler(sep
, arg
, false,
3130 dev_dbg(&sep
->pdev
->dev
,
3131 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3134 error
= sep_prepare_dcb_handler(sep
, arg
, true,
3137 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dcb's end\n",
3140 case SEP_IOCFREEDCB
:
3141 dev_dbg(&sep
->pdev
->dev
, "[PID%d] SEP_IOCFREEDCB start\n",
3143 case SEP_IOCFREEDCB_SECURE_DMA
:
3144 dev_dbg(&sep
->pdev
->dev
,
3145 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3147 error
= sep_free_dcb_handler(sep
, dma_ctx
);
3148 dev_dbg(&sep
->pdev
->dev
, "[PID%d] SEP_IOCFREEDCB end\n",
3153 dev_dbg(&sep
->pdev
->dev
, "[PID%d] default end\n",
3159 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl end\n", current
->pid
);
3165 * sep_inthandler - interrupt handler for sep device
3167 * @dev_id: device id
3169 static irqreturn_t
sep_inthandler(int irq
, void *dev_id
)
3171 unsigned long lock_irq_flag
;
3172 u32 reg_val
, reg_val2
= 0;
3173 struct sep_device
*sep
= dev_id
;
3174 irqreturn_t int_error
= IRQ_HANDLED
;
3176 /* Are we in power save? */
3177 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3178 if (sep
->pdev
->dev
.power
.runtime_status
!= RPM_ACTIVE
) {
3179 dev_dbg(&sep
->pdev
->dev
, "interrupt during pwr save\n");
3184 if (test_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
) == 0) {
3185 dev_dbg(&sep
->pdev
->dev
, "interrupt while nobody using sep\n");
3189 /* Read the IRR register to check if this is SEP interrupt */
3190 reg_val
= sep_read_reg(sep
, HW_HOST_IRR_REG_ADDR
);
3192 dev_dbg(&sep
->pdev
->dev
, "sep int: IRR REG val: %x\n", reg_val
);
3194 if (reg_val
& (0x1 << 13)) {
3196 /* Lock and update the counter of reply messages */
3197 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
3199 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
3201 dev_dbg(&sep
->pdev
->dev
, "sep int: send_ct %lx reply_ct %lx\n",
3202 sep
->send_ct
, sep
->reply_ct
);
3204 /* Is this a kernel client request */
3205 if (sep
->in_kernel
) {
3206 tasklet_schedule(&sep
->finish_tasklet
);
3207 goto finished_interrupt
;
3210 /* Is this printf or daemon request? */
3211 reg_val2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
3212 dev_dbg(&sep
->pdev
->dev
,
3213 "SEP Interrupt - GPR2 is %08x\n", reg_val2
);
3215 clear_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
);
3217 if ((reg_val2
>> 30) & 0x1) {
3218 dev_dbg(&sep
->pdev
->dev
, "int: printf request\n");
3219 } else if (reg_val2
>> 31) {
3220 dev_dbg(&sep
->pdev
->dev
, "int: daemon request\n");
3222 dev_dbg(&sep
->pdev
->dev
, "int: SEP reply\n");
3223 wake_up(&sep
->event_interrupt
);
3226 dev_dbg(&sep
->pdev
->dev
, "int: not SEP interrupt\n");
3227 int_error
= IRQ_NONE
;
3232 if (int_error
== IRQ_HANDLED
)
3233 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, reg_val
);
3239 * sep_reconfig_shared_area - reconfigure shared area
3240 * @sep: pointer to struct sep_device
3242 * Reconfig the shared area between HOST and SEP - needed in case
3243 * the DX_CC_Init function was called before OS loading.
3245 static int sep_reconfig_shared_area(struct sep_device
*sep
)
3249 /* use to limit waiting for SEP */
3250 unsigned long end_time
;
3252 /* Send the new SHARED MESSAGE AREA to the SEP */
3253 dev_dbg(&sep
->pdev
->dev
, "reconfig shared; sending %08llx to sep\n",
3254 (unsigned long long)sep
->shared_bus
);
3256 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR1_REG_ADDR
, sep
->shared_bus
);
3258 /* Poll for SEP response */
3259 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
3261 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
3263 while ((time_before(jiffies
, end_time
)) && (ret_val
!= 0xffffffff) &&
3264 (ret_val
!= sep
->shared_bus
))
3265 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
3267 /* Check the return value (register) */
3268 if (ret_val
!= sep
->shared_bus
) {
3269 dev_warn(&sep
->pdev
->dev
, "could not reconfig shared area\n");
3270 dev_warn(&sep
->pdev
->dev
, "result was %x\n", ret_val
);
3275 dev_dbg(&sep
->pdev
->dev
, "reconfig shared area end\n");
3281 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3284 * @dcb_region: DCB region copy
3285 * @dmatables_region: MLLI/DMA tables copy
3286 * @dma_ctx: DMA context for current transaction
3288 ssize_t
sep_activate_dcb_dmatables_context(struct sep_device
*sep
,
3289 struct sep_dcblock
**dcb_region
,
3290 void **dmatables_region
,
3291 struct sep_dma_context
*dma_ctx
)
3293 void *dmaregion_free_start
= NULL
;
3294 void *dmaregion_free_end
= NULL
;
3295 void *dcbregion_free_start
= NULL
;
3296 void *dcbregion_free_end
= NULL
;
3299 dev_dbg(&sep
->pdev
->dev
, "[PID%d] activating dcb/dma region\n",
3302 if (1 > dma_ctx
->nr_dcb_creat
) {
3303 dev_warn(&sep
->pdev
->dev
,
3304 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3305 current
->pid
, dma_ctx
->nr_dcb_creat
);
3310 dmaregion_free_start
= sep
->shared_addr
3311 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
;
3312 dmaregion_free_end
= dmaregion_free_start
3313 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
- 1;
3315 if (dmaregion_free_start
3316 + dma_ctx
->dmatables_len
> dmaregion_free_end
) {
3320 memcpy(dmaregion_free_start
,
3322 dma_ctx
->dmatables_len
);
3323 /* Free MLLI table copy */
3324 kfree(*dmatables_region
);
3325 *dmatables_region
= NULL
;
3327 /* Copy thread's DCB table copy to DCB table region */
3328 dcbregion_free_start
= sep
->shared_addr
+
3329 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
;
3330 dcbregion_free_end
= dcbregion_free_start
+
3331 (SEP_MAX_NUM_SYNC_DMA_OPS
*
3332 sizeof(struct sep_dcblock
)) - 1;
3334 if (dcbregion_free_start
3335 + (dma_ctx
->nr_dcb_creat
* sizeof(struct sep_dcblock
))
3336 > dcbregion_free_end
) {
3341 memcpy(dcbregion_free_start
,
3343 dma_ctx
->nr_dcb_creat
* sizeof(struct sep_dcblock
));
3345 /* Print the tables */
3346 dev_dbg(&sep
->pdev
->dev
, "activate: input table\n");
3347 sep_debug_print_lli_tables(sep
,
3348 (struct sep_lli_entry
*)sep_shared_area_bus_to_virt(sep
,
3349 (*dcb_region
)->input_mlli_address
),
3350 (*dcb_region
)->input_mlli_num_entries
,
3351 (*dcb_region
)->input_mlli_data_size
);
3353 dev_dbg(&sep
->pdev
->dev
, "activate: output table\n");
3354 sep_debug_print_lli_tables(sep
,
3355 (struct sep_lli_entry
*)sep_shared_area_bus_to_virt(sep
,
3356 (*dcb_region
)->output_mlli_address
),
3357 (*dcb_region
)->output_mlli_num_entries
,
3358 (*dcb_region
)->output_mlli_data_size
);
3360 dev_dbg(&sep
->pdev
->dev
,
3361 "[PID%d] printing activated tables\n", current
->pid
);
3364 kfree(*dmatables_region
);
3365 *dmatables_region
= NULL
;
3374 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3376 * @dcb_region: DCB region buf to create for current transaction
3377 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3378 * @dma_ctx: DMA context buf to create for current transaction
3379 * @user_dcb_args: User arguments for DCB/MLLI creation
3380 * @num_dcbs: Number of DCBs to create
3381 * @secure_dma: Indicate use of IMR restricted memory secure dma
3383 static ssize_t
sep_create_dcb_dmatables_context(struct sep_device
*sep
,
3384 struct sep_dcblock
**dcb_region
,
3385 void **dmatables_region
,
3386 struct sep_dma_context
**dma_ctx
,
3387 const struct build_dcb_struct __user
*user_dcb_args
,
3388 const u32 num_dcbs
, bool secure_dma
)
3392 struct build_dcb_struct
*dcb_args
= NULL
;
3394 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating dcb/dma region\n",
3397 if (!dcb_region
|| !dma_ctx
|| !dmatables_region
|| !user_dcb_args
) {
3402 if (SEP_MAX_NUM_SYNC_DMA_OPS
< num_dcbs
) {
3403 dev_warn(&sep
->pdev
->dev
,
3404 "[PID%d] invalid number of dcbs 0x%08X\n",
3405 current
->pid
, num_dcbs
);
3410 dcb_args
= kcalloc(num_dcbs
, sizeof(struct build_dcb_struct
),
3417 if (copy_from_user(dcb_args
,
3419 num_dcbs
* sizeof(struct build_dcb_struct
))) {
3424 /* Allocate thread-specific memory for DCB */
3425 *dcb_region
= kzalloc(num_dcbs
* sizeof(struct sep_dcblock
),
3427 if (!(*dcb_region
)) {
3432 /* Prepare DCB and MLLI table into the allocated regions */
3433 for (i
= 0; i
< num_dcbs
; i
++) {
3434 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
3435 (unsigned long)dcb_args
[i
].app_in_address
,
3436 (unsigned long)dcb_args
[i
].app_out_address
,
3437 dcb_args
[i
].data_in_size
,
3438 dcb_args
[i
].block_size
,
3439 dcb_args
[i
].tail_block_size
,
3440 dcb_args
[i
].is_applet
,
3442 *dcb_region
, dmatables_region
,
3447 dev_warn(&sep
->pdev
->dev
,
3448 "[PID%d] dma table creation failed\n",
3453 if (dcb_args
[i
].app_in_address
!= 0)
3454 (*dma_ctx
)->input_data_len
+= dcb_args
[i
].data_in_size
;
3464 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3467 * @dcb_region: DCB region buf to create for current transaction
3468 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3469 * @dma_ctx: DMA context buf to create for current transaction
3470 * @user_dcb_args: User arguments for DCB/MLLI creation
3471 * @num_dcbs: Number of DCBs to create
3472 * This does that same thing as sep_create_dcb_dmatables_context
3473 * except that it is used only for the kernel crypto operation. It is
3474 * separate because there is no user data involved; the dcb data structure
3475 * is specific for kernel crypto (build_dcb_struct_kernel)
3477 int sep_create_dcb_dmatables_context_kernel(struct sep_device
*sep
,
3478 struct sep_dcblock
**dcb_region
,
3479 void **dmatables_region
,
3480 struct sep_dma_context
**dma_ctx
,
3481 const struct build_dcb_struct_kernel
*dcb_data
,
3487 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating dcb/dma region\n",
3490 if (!dcb_region
|| !dma_ctx
|| !dmatables_region
|| !dcb_data
) {
3495 if (SEP_MAX_NUM_SYNC_DMA_OPS
< num_dcbs
) {
3496 dev_warn(&sep
->pdev
->dev
,
3497 "[PID%d] invalid number of dcbs 0x%08X\n",
3498 current
->pid
, num_dcbs
);
3503 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_dcbs is %d\n",
3504 current
->pid
, num_dcbs
);
3506 /* Allocate thread-specific memory for DCB */
3507 *dcb_region
= kzalloc(num_dcbs
* sizeof(struct sep_dcblock
),
3509 if (!(*dcb_region
)) {
3514 /* Prepare DCB and MLLI table into the allocated regions */
3515 for (i
= 0; i
< num_dcbs
; i
++) {
3516 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
3517 (unsigned long)dcb_data
->app_in_address
,
3518 (unsigned long)dcb_data
->app_out_address
,
3519 dcb_data
->data_in_size
,
3520 dcb_data
->block_size
,
3521 dcb_data
->tail_block_size
,
3522 dcb_data
->is_applet
,
3525 *dcb_region
, dmatables_region
,
3530 dev_warn(&sep
->pdev
->dev
,
3531 "[PID%d] dma table creation failed\n",
3543 * sep_activate_msgarea_context - Takes the message area context into use
3545 * @msg_region: Message area context buf
3546 * @msg_len: Message area context buffer size
3548 static ssize_t
sep_activate_msgarea_context(struct sep_device
*sep
,
3550 const size_t msg_len
)
3552 dev_dbg(&sep
->pdev
->dev
, "[PID%d] activating msg region\n",
3555 if (!msg_region
|| !(*msg_region
) ||
3556 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
< msg_len
) {
3557 dev_warn(&sep
->pdev
->dev
,
3558 "[PID%d] invalid act msgarea len 0x%08zX\n",
3559 current
->pid
, msg_len
);
3563 memcpy(sep
->shared_addr
, *msg_region
, msg_len
);
3569 * sep_create_msgarea_context - Creates message area context
3571 * @msg_region: Msg area region buf to create for current transaction
3572 * @msg_user: Content for msg area region from user
3573 * @msg_len: Message area size
3575 static ssize_t
sep_create_msgarea_context(struct sep_device
*sep
,
3577 const void __user
*msg_user
,
3578 const size_t msg_len
)
3582 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating msg region\n",
3587 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
< msg_len
||
3588 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES
> msg_len
) {
3589 dev_warn(&sep
->pdev
->dev
,
3590 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3591 current
->pid
, msg_len
);
3596 /* Allocate thread-specific memory for message buffer */
3597 *msg_region
= kzalloc(msg_len
, GFP_KERNEL
);
3598 if (!(*msg_region
)) {
3603 /* Copy input data to write() to allocated message buffer */
3604 if (copy_from_user(*msg_region
, msg_user
, msg_len
)) {
3610 if (error
&& msg_region
) {
3620 * sep_read - Returns results of an operation for fastcall interface
3621 * @filp: File pointer
3622 * @buf_user: User buffer for storing results
3623 * @count_user: User buffer size
3624 * @offset: File offset, not supported
3626 * The implementation does not support reading in chunks, all data must be
3627 * consumed during a single read system call.
3629 static ssize_t
sep_read(struct file
*filp
,
3630 char __user
*buf_user
, size_t count_user
,
3633 struct sep_private_data
* const private_data
= filp
->private_data
;
3634 struct sep_call_status
*call_status
= &private_data
->call_status
;
3635 struct sep_device
*sep
= private_data
->device
;
3636 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
3637 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
3638 ssize_t error
= 0, error_tmp
= 0;
3640 /* Am I the process that owns the transaction? */
3641 error
= sep_check_transaction_owner(sep
);
3643 dev_dbg(&sep
->pdev
->dev
, "[PID%d] read pid is not owner\n",
3648 /* Checks that user has called necessary apis */
3649 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET
,
3650 &call_status
->status
)) {
3651 dev_warn(&sep
->pdev
->dev
,
3652 "[PID%d] fastcall write not called\n",
3655 goto end_function_error
;
3659 dev_warn(&sep
->pdev
->dev
,
3660 "[PID%d] null user buffer\n",
3663 goto end_function_error
;
3667 /* Wait for SEP to finish */
3668 wait_event(sep
->event_interrupt
,
3669 test_bit(SEP_WORKING_LOCK_BIT
,
3670 &sep
->in_use_flags
) == 0);
3672 sep_dump_message(sep
);
3674 dev_dbg(&sep
->pdev
->dev
, "[PID%d] count_user = 0x%08zX\n",
3675 current
->pid
, count_user
);
3677 /* In case user has allocated bigger buffer */
3678 if (count_user
> SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
)
3679 count_user
= SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
;
3681 if (copy_to_user(buf_user
, sep
->shared_addr
, count_user
)) {
3683 goto end_function_error
;
3686 dev_dbg(&sep
->pdev
->dev
, "[PID%d] read succeeded\n", current
->pid
);
3690 /* Copy possible tail data to user and free DCB and MLLIs */
3691 error_tmp
= sep_free_dcb_handler(sep
, dma_ctx
);
3693 dev_warn(&sep
->pdev
->dev
, "[PID%d] dcb free failed\n",
3696 /* End the transaction, wakeup pending ones */
3697 error_tmp
= sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
3700 dev_warn(&sep
->pdev
->dev
,
3701 "[PID%d] ending transaction failed\n",
3709 * sep_fastcall_args_get - Gets fastcall params from user
3711 * @args: Parameters buffer
3712 * @buf_user: User buffer for operation parameters
3713 * @count_user: User buffer size
3715 static inline ssize_t
sep_fastcall_args_get(struct sep_device
*sep
,
3716 struct sep_fastcall_hdr
*args
,
3717 const char __user
*buf_user
,
3718 const size_t count_user
)
3721 size_t actual_count
= 0;
3724 dev_warn(&sep
->pdev
->dev
,
3725 "[PID%d] null user buffer\n",
3731 if (count_user
< sizeof(struct sep_fastcall_hdr
)) {
3732 dev_warn(&sep
->pdev
->dev
,
3733 "[PID%d] too small message size 0x%08zX\n",
3734 current
->pid
, count_user
);
3740 if (copy_from_user(args
, buf_user
, sizeof(struct sep_fastcall_hdr
))) {
3745 if (SEP_FC_MAGIC
!= args
->magic
) {
3746 dev_warn(&sep
->pdev
->dev
,
3747 "[PID%d] invalid fastcall magic 0x%08X\n",
3748 current
->pid
, args
->magic
);
3753 dev_dbg(&sep
->pdev
->dev
, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3754 current
->pid
, args
->num_dcbs
);
3755 dev_dbg(&sep
->pdev
->dev
, "[PID%d] fastcall hdr msg len 0x%08X\n",
3756 current
->pid
, args
->msg_len
);
3758 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
< args
->msg_len
||
3759 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES
> args
->msg_len
) {
3760 dev_warn(&sep
->pdev
->dev
,
3761 "[PID%d] invalid message length\n",
3767 actual_count
= sizeof(struct sep_fastcall_hdr
)
3769 + (args
->num_dcbs
* sizeof(struct build_dcb_struct
));
3771 if (actual_count
!= count_user
) {
3772 dev_warn(&sep
->pdev
->dev
,
3773 "[PID%d] inconsistent message "
3774 "sizes 0x%08zX vs 0x%08zX\n",
3775 current
->pid
, actual_count
, count_user
);
3785 * sep_write - Starts an operation for fastcall interface
3786 * @filp: File pointer
3787 * @buf_user: User buffer for operation parameters
3788 * @count_user: User buffer size
3789 * @offset: File offset, not supported
3791 * The implementation does not support writing in chunks,
3792 * all data must be given during a single write system call.
3794 static ssize_t
sep_write(struct file
*filp
,
3795 const char __user
*buf_user
, size_t count_user
,
3798 struct sep_private_data
* const private_data
= filp
->private_data
;
3799 struct sep_call_status
*call_status
= &private_data
->call_status
;
3800 struct sep_device
*sep
= private_data
->device
;
3801 struct sep_dma_context
*dma_ctx
= NULL
;
3802 struct sep_fastcall_hdr call_hdr
= {0};
3803 void *msg_region
= NULL
;
3804 void *dmatables_region
= NULL
;
3805 struct sep_dcblock
*dcb_region
= NULL
;
3807 struct sep_queue_info
*my_queue_elem
= NULL
;
3808 bool my_secure_dma
; /* are we using secure_dma (IMR)? */
3810 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep dev is 0x%p\n",
3812 dev_dbg(&sep
->pdev
->dev
, "[PID%d] private_data is 0x%p\n",
3813 current
->pid
, private_data
);
3815 error
= sep_fastcall_args_get(sep
, &call_hdr
, buf_user
, count_user
);
3819 buf_user
+= sizeof(struct sep_fastcall_hdr
);
3821 if (call_hdr
.secure_dma
== 0)
3822 my_secure_dma
= false;
3824 my_secure_dma
= true;
3827 * Controlling driver memory usage by limiting amount of
3828 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3829 * of threads can progress further at a time
3831 dev_dbg(&sep
->pdev
->dev
,
3832 "[PID%d] waiting for double buffering region access\n",
3834 error
= down_interruptible(&sep
->sep_doublebuf
);
3835 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region start\n",
3838 /* Signal received */
3839 goto end_function_error
;
3844 * Prepare contents of the shared area regions for
3845 * the operation into temporary buffers
3847 if (0 < call_hdr
.num_dcbs
) {
3848 error
= sep_create_dcb_dmatables_context(sep
,
3852 (const struct build_dcb_struct __user
*)
3854 call_hdr
.num_dcbs
, my_secure_dma
);
3856 goto end_function_error_doublebuf
;
3858 buf_user
+= call_hdr
.num_dcbs
* sizeof(struct build_dcb_struct
);
3861 error
= sep_create_msgarea_context(sep
,
3866 goto end_function_error_doublebuf
;
3868 dev_dbg(&sep
->pdev
->dev
, "[PID%d] updating queue status\n",
3870 my_queue_elem
= sep_queue_status_add(sep
,
3871 ((struct sep_msgarea_hdr
*)msg_region
)->opcode
,
3872 (dma_ctx
) ? dma_ctx
->input_data_len
: 0,
3874 current
->comm
, sizeof(current
->comm
));
3876 if (!my_queue_elem
) {
3877 dev_dbg(&sep
->pdev
->dev
,
3878 "[PID%d] updating queue status error\n", current
->pid
);
3880 goto end_function_error_doublebuf
;
3883 /* Wait until current process gets the transaction */
3884 error
= sep_wait_transaction(sep
);
3887 /* Interrupted by signal, don't clear transaction */
3888 dev_dbg(&sep
->pdev
->dev
, "[PID%d] interrupted by signal\n",
3890 sep_queue_status_remove(sep
, &my_queue_elem
);
3891 goto end_function_error_doublebuf
;
3894 dev_dbg(&sep
->pdev
->dev
, "[PID%d] saving queue element\n",
3896 private_data
->my_queue_elem
= my_queue_elem
;
3898 /* Activate shared area regions for the transaction */
3899 error
= sep_activate_msgarea_context(sep
, &msg_region
,
3902 goto end_function_error_clear_transact
;
3904 sep_dump_message(sep
);
3906 if (0 < call_hdr
.num_dcbs
) {
3907 error
= sep_activate_dcb_dmatables_context(sep
,
3912 goto end_function_error_clear_transact
;
3915 /* Send command to SEP */
3916 error
= sep_send_command_handler(sep
);
3918 goto end_function_error_clear_transact
;
3920 /* Store DMA context for the transaction */
3921 private_data
->dma_ctx
= dma_ctx
;
3922 /* Update call status */
3923 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET
, &call_status
->status
);
3926 up(&sep
->sep_doublebuf
);
3927 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region end\n",
3932 end_function_error_clear_transact
:
3933 sep_end_transaction_handler(sep
, &dma_ctx
, call_status
,
3934 &private_data
->my_queue_elem
);
3936 end_function_error_doublebuf
:
3937 up(&sep
->sep_doublebuf
);
3938 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region end\n",
3943 sep_free_dma_table_data_handler(sep
, &dma_ctx
);
3947 kfree(dmatables_region
);
3953 * sep_seek - Handler for seek system call
3954 * @filp: File pointer
3955 * @offset: File offset
3956 * @origin: Options for offset
3958 * Fastcall interface does not support seeking, all reads
3959 * and writes are from/to offset zero
3961 static loff_t
sep_seek(struct file
*filp
, loff_t offset
, int origin
)
3969 * sep_file_operations - file operation on sep device
3970 * @sep_ioctl: ioctl handler from user space call
3971 * @sep_poll: poll handler
3972 * @sep_open: handles sep device open request
3973 * @sep_release:handles sep device release request
3974 * @sep_mmap: handles memory mapping requests
3975 * @sep_read: handles read request on sep device
3976 * @sep_write: handles write request on sep device
3977 * @sep_seek: handles seek request on sep device
3979 static const struct file_operations sep_file_operations
= {
3980 .owner
= THIS_MODULE
,
3981 .unlocked_ioctl
= sep_ioctl
,
3984 .release
= sep_release
,
3992 * sep_sysfs_read - read sysfs entry per gives arguments
3993 * @filp: file pointer
3994 * @kobj: kobject pointer
3995 * @attr: binary file attributes
3996 * @buf: read to this buffer
3997 * @pos: offset to read
3998 * @count: amount of data to read
4000 * This function is to read sysfs entries for sep driver per given arguments.
4003 sep_sysfs_read(struct file
*filp
, struct kobject
*kobj
,
4004 struct bin_attribute
*attr
,
4005 char *buf
, loff_t pos
, size_t count
)
4007 unsigned long lck_flags
;
4008 size_t nleft
= count
;
4009 struct sep_device
*sep
= sep_dev
;
4010 struct sep_queue_info
*queue_elem
= NULL
;
4014 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
4016 queue_num
= sep
->sep_queue_num
;
4017 if (queue_num
> SEP_DOUBLEBUF_USERS_LIMIT
)
4018 queue_num
= SEP_DOUBLEBUF_USERS_LIMIT
;
4021 if (count
< sizeof(queue_num
)
4022 + (queue_num
* sizeof(struct sep_queue_data
))) {
4023 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
4027 memcpy(buf
, &queue_num
, sizeof(queue_num
));
4028 buf
+= sizeof(queue_num
);
4029 nleft
-= sizeof(queue_num
);
4031 list_for_each_entry(queue_elem
, &sep
->sep_queue_status
, list
) {
4032 if (i
++ > queue_num
)
4035 memcpy(buf
, &queue_elem
->data
, sizeof(queue_elem
->data
));
4036 nleft
-= sizeof(queue_elem
->data
);
4037 buf
+= sizeof(queue_elem
->data
);
4039 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
4041 return count
- nleft
;
4045 * bin_attributes - defines attributes for queue_status
4046 * @attr: attributes (name & permissions)
4047 * @read: function pointer to read this file
4048 * @size: maxinum size of binary attribute
4050 static const struct bin_attribute queue_status
= {
4051 .attr
= {.name
= "queue_status", .mode
= 0444},
4052 .read
= sep_sysfs_read
,
4054 + (SEP_DOUBLEBUF_USERS_LIMIT
* sizeof(struct sep_queue_data
)),
4058 * sep_register_driver_with_fs - register misc devices
4059 * @sep: pointer to struct sep_device
4061 * This function registers the driver with the file system
4063 static int sep_register_driver_with_fs(struct sep_device
*sep
)
4067 sep
->miscdev_sep
.minor
= MISC_DYNAMIC_MINOR
;
4068 sep
->miscdev_sep
.name
= SEP_DEV_NAME
;
4069 sep
->miscdev_sep
.fops
= &sep_file_operations
;
4071 ret_val
= misc_register(&sep
->miscdev_sep
);
4073 dev_warn(&sep
->pdev
->dev
, "misc reg fails for SEP %x\n",
4078 ret_val
= device_create_bin_file(sep
->miscdev_sep
.this_device
,
4081 dev_warn(&sep
->pdev
->dev
, "sysfs attribute1 fails for SEP %x\n",
4091 *sep_probe - probe a matching PCI device
4093 *@ent: pci_device_id
4095 *Attempt to set up and configure a SEP device that has been
4096 *discovered by the PCI layer. Allocates all required resources.
4098 static int sep_probe(struct pci_dev
*pdev
,
4099 const struct pci_device_id
*ent
)
4102 struct sep_device
*sep
= NULL
;
4104 if (sep_dev
!= NULL
) {
4105 dev_dbg(&pdev
->dev
, "only one SEP supported.\n");
4109 /* Enable the device */
4110 error
= pci_enable_device(pdev
);
4112 dev_warn(&pdev
->dev
, "error enabling pci device\n");
4116 /* Allocate the sep_device structure for this device */
4117 sep_dev
= kzalloc(sizeof(struct sep_device
), GFP_ATOMIC
);
4118 if (sep_dev
== NULL
) {
4120 goto end_function_disable_device
;
4124 * We're going to use another variable for actually
4125 * working with the device; this way, if we have
4126 * multiple devices in the future, it would be easier
4127 * to make appropriate changes
4131 sep
->pdev
= pci_dev_get(pdev
);
4133 init_waitqueue_head(&sep
->event_transactions
);
4134 init_waitqueue_head(&sep
->event_interrupt
);
4135 spin_lock_init(&sep
->snd_rply_lck
);
4136 spin_lock_init(&sep
->sep_queue_lock
);
4137 sema_init(&sep
->sep_doublebuf
, SEP_DOUBLEBUF_USERS_LIMIT
);
4139 INIT_LIST_HEAD(&sep
->sep_queue_status
);
4141 dev_dbg(&sep
->pdev
->dev
,
4142 "sep probe: PCI obtained, device being prepared\n");
4144 /* Set up our register area */
4145 sep
->reg_physical_addr
= pci_resource_start(sep
->pdev
, 0);
4146 if (!sep
->reg_physical_addr
) {
4147 dev_warn(&sep
->pdev
->dev
, "Error getting register start\n");
4149 goto end_function_free_sep_dev
;
4152 sep
->reg_physical_end
= pci_resource_end(sep
->pdev
, 0);
4153 if (!sep
->reg_physical_end
) {
4154 dev_warn(&sep
->pdev
->dev
, "Error getting register end\n");
4156 goto end_function_free_sep_dev
;
4159 sep
->reg_addr
= ioremap_nocache(sep
->reg_physical_addr
,
4160 (size_t)(sep
->reg_physical_end
- sep
->reg_physical_addr
+ 1));
4161 if (!sep
->reg_addr
) {
4162 dev_warn(&sep
->pdev
->dev
, "Error getting register virtual\n");
4164 goto end_function_free_sep_dev
;
4167 dev_dbg(&sep
->pdev
->dev
,
4168 "Register area start %llx end %llx virtual %p\n",
4169 (unsigned long long)sep
->reg_physical_addr
,
4170 (unsigned long long)sep
->reg_physical_end
,
4173 /* Allocate the shared area */
4174 sep
->shared_size
= SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
+
4175 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
+
4176 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES
+
4177 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES
+
4178 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES
;
4180 if (sep_map_and_alloc_shared_area(sep
)) {
4182 /* Allocation failed */
4183 goto end_function_error
;
4186 /* Clear ICR register */
4187 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4189 /* Set the IMR register - open only GPR 2 */
4190 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4192 /* Read send/receive counters from SEP */
4193 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4194 sep
->reply_ct
&= 0x3FFFFFFF;
4195 sep
->send_ct
= sep
->reply_ct
;
4197 /* Get the interrupt line */
4198 error
= request_irq(pdev
->irq
, sep_inthandler
, IRQF_SHARED
,
4202 goto end_function_deallocate_sep_shared_area
;
4204 /* The new chip requires a shared area reconfigure */
4205 error
= sep_reconfig_shared_area(sep
);
4207 goto end_function_free_irq
;
4211 /* Finally magic up the device nodes */
4212 /* Register driver with the fs */
4213 error
= sep_register_driver_with_fs(sep
);
4216 dev_err(&sep
->pdev
->dev
, "error registering dev file\n");
4217 goto end_function_free_irq
;
4220 sep
->in_use
= 0; /* through touching the device */
4221 #ifdef SEP_ENABLE_RUNTIME_PM
4222 pm_runtime_put_noidle(&sep
->pdev
->dev
);
4223 pm_runtime_allow(&sep
->pdev
->dev
);
4224 pm_runtime_set_autosuspend_delay(&sep
->pdev
->dev
,
4226 pm_runtime_use_autosuspend(&sep
->pdev
->dev
);
4227 pm_runtime_mark_last_busy(&sep
->pdev
->dev
);
4228 sep
->power_save_setup
= 1;
4230 /* register kernel crypto driver */
4231 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4232 error
= sep_crypto_setup();
4234 dev_err(&sep
->pdev
->dev
, "crypto setup failed\n");
4235 goto end_function_free_irq
;
4240 end_function_free_irq
:
4241 free_irq(pdev
->irq
, sep
);
4243 end_function_deallocate_sep_shared_area
:
4244 /* De-allocate shared area */
4245 sep_unmap_and_free_shared_area(sep
);
4248 iounmap(sep
->reg_addr
);
4250 end_function_free_sep_dev
:
4251 pci_dev_put(sep_dev
->pdev
);
4255 end_function_disable_device
:
4256 pci_disable_device(pdev
);
4263 * sep_remove - handles removing device from pci subsystem
4264 * @pdev: pointer to pci device
4266 * This function will handle removing our sep device from pci subsystem on exit
4267 * or unloading this module. It should free up all used resources, and unmap if
4268 * any memory regions mapped.
4270 static void sep_remove(struct pci_dev
*pdev
)
4272 struct sep_device
*sep
= sep_dev
;
4274 /* Unregister from fs */
4275 misc_deregister(&sep
->miscdev_sep
);
4277 /* Unregister from kernel crypto */
4278 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4279 sep_crypto_takedown();
4282 free_irq(sep
->pdev
->irq
, sep
);
4284 /* Free the shared area */
4285 sep_unmap_and_free_shared_area(sep_dev
);
4286 iounmap(sep_dev
->reg_addr
);
4288 #ifdef SEP_ENABLE_RUNTIME_PM
4291 pm_runtime_forbid(&sep
->pdev
->dev
);
4292 pm_runtime_get_noresume(&sep
->pdev
->dev
);
4295 pci_dev_put(sep_dev
->pdev
);
4300 /* Initialize struct pci_device_id for our driver */
4301 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl
) = {
4302 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x0826)},
4303 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x08e9)},
4307 /* Export our pci_device_id structure to user space */
4308 MODULE_DEVICE_TABLE(pci
, sep_pci_id_tbl
);
4310 #ifdef SEP_ENABLE_RUNTIME_PM
4313 * sep_pm_resume - rsume routine while waking up from S3 state
4314 * @dev: pointer to sep device
4316 * This function is to be used to wake up sep driver while system awakes from S3
4317 * state i.e. suspend to ram. The RAM in intact.
4318 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4320 static int sep_pci_resume(struct device
*dev
)
4322 struct sep_device
*sep
= sep_dev
;
4324 dev_dbg(&sep
->pdev
->dev
, "pci resume called\n");
4326 if (sep
->power_state
== SEP_DRIVER_POWERON
)
4329 /* Clear ICR register */
4330 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4332 /* Set the IMR register - open only GPR 2 */
4333 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4335 /* Read send/receive counters from SEP */
4336 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4337 sep
->reply_ct
&= 0x3FFFFFFF;
4338 sep
->send_ct
= sep
->reply_ct
;
4340 sep
->power_state
= SEP_DRIVER_POWERON
;
4346 * sep_pm_suspend - suspend routine while going to S3 state
4347 * @dev: pointer to sep device
4349 * This function is to be used to suspend sep driver while system goes to S3
4350 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4351 * Notes - revisit with more understanding of pm, ICR/IMR
4353 static int sep_pci_suspend(struct device
*dev
)
4355 struct sep_device
*sep
= sep_dev
;
4357 dev_dbg(&sep
->pdev
->dev
, "pci suspend called\n");
4358 if (sep
->in_use
== 1)
4361 sep
->power_state
= SEP_DRIVER_POWEROFF
;
4363 /* Clear ICR register */
4364 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4366 /* Set the IMR to block all */
4367 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, 0xFFFFFFFF);
4373 * sep_pm_runtime_resume - runtime resume routine
4374 * @dev: pointer to sep device
4376 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4378 static int sep_pm_runtime_resume(struct device
*dev
)
4383 struct sep_device
*sep
= sep_dev
;
4385 dev_dbg(&sep
->pdev
->dev
, "pm runtime resume called\n");
4388 * Wait until the SCU boot is ready
4389 * This is done by iterating SCU_DELAY_ITERATION (10
4390 * microseconds each) up to SCU_DELAY_MAX (50) times.
4391 * This bit can be set in a random time that is less
4392 * than 500 microseconds after each power resume
4396 while ((!retval2
) && (delay_count
< SCU_DELAY_MAX
)) {
4397 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR3_REG_ADDR
);
4398 retval2
&= 0x00000008;
4400 udelay(SCU_DELAY_ITERATION
);
4406 dev_warn(&sep
->pdev
->dev
, "scu boot bit not set at resume\n");
4410 /* Clear ICR register */
4411 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4413 /* Set the IMR register - open only GPR 2 */
4414 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4416 /* Read send/receive counters from SEP */
4417 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4418 sep
->reply_ct
&= 0x3FFFFFFF;
4419 sep
->send_ct
= sep
->reply_ct
;
4425 * sep_pm_runtime_suspend - runtime suspend routine
4426 * @dev: pointer to sep device
4428 * Notes - revisit with more understanding of pm
4430 static int sep_pm_runtime_suspend(struct device
*dev
)
4432 struct sep_device
*sep
= sep_dev
;
4434 dev_dbg(&sep
->pdev
->dev
, "pm runtime suspend called\n");
4436 /* Clear ICR register */
4437 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4442 * sep_pm - power management for sep driver
4443 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4444 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4445 * @sep_pci_suspend: suspend - main memory is still ON
4446 * @sep_pci_resume: resume - main memory is still ON
4448 static const struct dev_pm_ops sep_pm
= {
4449 .runtime_resume
= sep_pm_runtime_resume
,
4450 .runtime_suspend
= sep_pm_runtime_suspend
,
4451 .resume
= sep_pci_resume
,
4452 .suspend
= sep_pci_suspend
,
4454 #endif /* SEP_ENABLE_RUNTIME_PM */
4457 * sep_pci_driver - registers this device with pci subsystem
4458 * @name: name identifier for this driver
4459 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4460 * @sep_probe: pointer to probe function in PCI driver
4461 * @sep_remove: pointer to remove function in PCI driver
4463 static struct pci_driver sep_pci_driver
= {
4464 #ifdef SEP_ENABLE_RUNTIME_PM
4469 .name
= "sep_sec_driver",
4470 .id_table
= sep_pci_id_tbl
,
4472 .remove
= sep_remove
4475 module_pci_driver(sep_pci_driver
);
4476 MODULE_LICENSE("GPL");