Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / sep / sep_main.c
1 /*
2 *
3 * sep_main.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
32 *
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
38 */
39 /* #define DEBUG */
40 /* #define SEP_PERF_DEBUG */
41
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/miscdevice.h>
46 #include <linux/fs.h>
47 #include <linux/cdev.h>
48 #include <linux/kdev_t.h>
49 #include <linux/mutex.h>
50 #include <linux/sched.h>
51 #include <linux/mm.h>
52 #include <linux/poll.h>
53 #include <linux/wait.h>
54 #include <linux/pci.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/slab.h>
57 #include <linux/ioctl.h>
58 #include <asm/current.h>
59 #include <linux/ioport.h>
60 #include <linux/io.h>
61 #include <linux/interrupt.h>
62 #include <linux/pagemap.h>
63 #include <asm/cacheflush.h>
64 #include <linux/delay.h>
65 #include <linux/jiffies.h>
66 #include <linux/async.h>
67 #include <linux/crypto.h>
68 #include <crypto/internal/hash.h>
69 #include <crypto/scatterwalk.h>
70 #include <crypto/sha.h>
71 #include <crypto/md5.h>
72 #include <crypto/aes.h>
73 #include <crypto/des.h>
74 #include <crypto/hash.h>
75
76 #include "sep_driver_hw_defs.h"
77 #include "sep_driver_config.h"
78 #include "sep_driver_api.h"
79 #include "sep_dev.h"
80 #include "sep_crypto.h"
81
82 #define CREATE_TRACE_POINTS
83 #include "sep_trace_events.h"
84
85 /*
86 * Let's not spend cycles iterating over message
87 * area contents if debugging not enabled
88 */
89 #ifdef DEBUG
90 #define sep_dump_message(sep) _sep_dump_message(sep)
91 #else
92 #define sep_dump_message(sep)
93 #endif
94
95 /**
96 * Currently, there is only one SEP device per platform;
97 * In event platforms in the future have more than one SEP
98 * device, this will be a linked list
99 */
100
101 struct sep_device *sep_dev;
102
103 /**
104 * sep_queue_status_remove - Removes transaction from status queue
105 * @sep: SEP device
106 * @sep_queue_info: pointer to status queue
107 *
108 * This function will remove information about transaction from the queue.
109 */
110 void sep_queue_status_remove(struct sep_device *sep,
111 struct sep_queue_info **queue_elem)
112 {
113 unsigned long lck_flags;
114
115 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
116 current->pid);
117
118 if (!queue_elem || !(*queue_elem)) {
119 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
120 current->pid, __func__);
121 return;
122 }
123
124 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
125 list_del(&(*queue_elem)->list);
126 sep->sep_queue_num--;
127 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
128
129 kfree(*queue_elem);
130 *queue_elem = NULL;
131
132 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
133 current->pid);
134 return;
135 }
136
137 /**
138 * sep_queue_status_add - Adds transaction to status queue
139 * @sep: SEP device
140 * @opcode: transaction opcode
141 * @size: input data size
142 * @pid: pid of current process
143 * @name: current process name
144 * @name_len: length of name (current process)
145 *
146 * This function adds information about about transaction started to the status
147 * queue.
148 */
149 struct sep_queue_info *sep_queue_status_add(
150 struct sep_device *sep,
151 u32 opcode,
152 u32 size,
153 u32 pid,
154 u8 *name, size_t name_len)
155 {
156 unsigned long lck_flags;
157 struct sep_queue_info *my_elem = NULL;
158
159 my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
160
161 if (!my_elem)
162 return NULL;
163
164 dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
165
166 my_elem->data.opcode = opcode;
167 my_elem->data.size = size;
168 my_elem->data.pid = pid;
169
170 if (name_len > TASK_COMM_LEN)
171 name_len = TASK_COMM_LEN;
172
173 memcpy(&my_elem->data.name, name, name_len);
174
175 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
176
177 list_add_tail(&my_elem->list, &sep->sep_queue_status);
178 sep->sep_queue_num++;
179
180 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
181
182 return my_elem;
183 }
184
185 /**
186 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
187 * @sep: SEP device
188 * @dmatables_region: Destination pointer for the buffer
189 * @dma_ctx: DMA context for the transaction
190 * @table_count: Number of MLLI/DMA tables to create
191 * The buffer created will not work as-is for DMA operations,
192 * it needs to be copied over to the appropriate place in the
193 * shared area.
194 */
195 static int sep_allocate_dmatables_region(struct sep_device *sep,
196 void **dmatables_region,
197 struct sep_dma_context *dma_ctx,
198 const u32 table_count)
199 {
200 const size_t new_len =
201 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
202
203 void *tmp_region = NULL;
204
205 dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
206 current->pid, dma_ctx);
207 dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
208 current->pid, dmatables_region);
209
210 if (!dma_ctx || !dmatables_region) {
211 dev_warn(&sep->pdev->dev,
212 "[PID%d] dma context/region uninitialized\n",
213 current->pid);
214 return -EINVAL;
215 }
216
217 dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
218 current->pid, new_len);
219 dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
220 dma_ctx->dmatables_len);
221 tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
222 if (!tmp_region)
223 return -ENOMEM;
224
225 /* Were there any previous tables that need to be preserved ? */
226 if (*dmatables_region) {
227 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
228 kfree(*dmatables_region);
229 *dmatables_region = NULL;
230 }
231
232 *dmatables_region = tmp_region;
233
234 dma_ctx->dmatables_len += new_len;
235
236 return 0;
237 }
238
239 /**
240 * sep_wait_transaction - Used for synchronizing transactions
241 * @sep: SEP device
242 */
243 int sep_wait_transaction(struct sep_device *sep)
244 {
245 int error = 0;
246 DEFINE_WAIT(wait);
247
248 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
249 &sep->in_use_flags)) {
250 dev_dbg(&sep->pdev->dev,
251 "[PID%d] no transactions, returning\n",
252 current->pid);
253 goto end_function_setpid;
254 }
255
256 /*
257 * Looping needed even for exclusive waitq entries
258 * due to process wakeup latencies, previous process
259 * might have already created another transaction.
260 */
261 for (;;) {
262 /*
263 * Exclusive waitq entry, so that only one process is
264 * woken up from the queue at a time.
265 */
266 prepare_to_wait_exclusive(&sep->event_transactions,
267 &wait,
268 TASK_INTERRUPTIBLE);
269 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
270 &sep->in_use_flags)) {
271 dev_dbg(&sep->pdev->dev,
272 "[PID%d] no transactions, breaking\n",
273 current->pid);
274 break;
275 }
276 dev_dbg(&sep->pdev->dev,
277 "[PID%d] transactions ongoing, sleeping\n",
278 current->pid);
279 schedule();
280 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
281
282 if (signal_pending(current)) {
283 dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
284 current->pid);
285 error = -EINTR;
286 goto end_function;
287 }
288 }
289 end_function_setpid:
290 /*
291 * The pid_doing_transaction indicates that this process
292 * now owns the facilities to perform a transaction with
293 * the SEP. While this process is performing a transaction,
294 * no other process who has the SEP device open can perform
295 * any transactions. This method allows more than one process
296 * to have the device open at any given time, which provides
297 * finer granularity for device utilization by multiple
298 * processes.
299 */
300 /* Only one process is able to progress here at a time */
301 sep->pid_doing_transaction = current->pid;
302
303 end_function:
304 finish_wait(&sep->event_transactions, &wait);
305
306 return error;
307 }
308
309 /**
310 * sep_check_transaction_owner - Checks if current process owns transaction
311 * @sep: SEP device
312 */
313 static inline int sep_check_transaction_owner(struct sep_device *sep)
314 {
315 dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
316 current->pid,
317 sep->pid_doing_transaction);
318
319 if ((sep->pid_doing_transaction == 0) ||
320 (current->pid != sep->pid_doing_transaction)) {
321 return -EACCES;
322 }
323
324 /* We own the transaction */
325 return 0;
326 }
327
328 #ifdef DEBUG
329
330 /**
331 * sep_dump_message - dump the message that is pending
332 * @sep: SEP device
333 * This will only print dump if DEBUG is set; it does
334 * follow kernel debug print enabling
335 */
336 static void _sep_dump_message(struct sep_device *sep)
337 {
338 int count;
339
340 u32 *p = sep->shared_addr;
341
342 for (count = 0; count < 10 * 4; count += 4)
343 dev_dbg(&sep->pdev->dev,
344 "[PID%d] Word %d of the message is %x\n",
345 current->pid, count/4, *p++);
346 }
347
348 #endif
349
350 /**
351 * sep_map_and_alloc_shared_area -allocate shared block
352 * @sep: security processor
353 * @size: size of shared area
354 */
355 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
356 {
357 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
358 sep->shared_size,
359 &sep->shared_bus, GFP_KERNEL);
360
361 if (!sep->shared_addr) {
362 dev_dbg(&sep->pdev->dev,
363 "[PID%d] shared memory dma_alloc_coherent failed\n",
364 current->pid);
365 return -ENOMEM;
366 }
367 dev_dbg(&sep->pdev->dev,
368 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
369 current->pid,
370 sep->shared_size, sep->shared_addr,
371 (unsigned long long)sep->shared_bus);
372 return 0;
373 }
374
375 /**
376 * sep_unmap_and_free_shared_area - free shared block
377 * @sep: security processor
378 */
379 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
380 {
381 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
382 sep->shared_addr, sep->shared_bus);
383 }
384
385 #ifdef DEBUG
386
387 /**
388 * sep_shared_bus_to_virt - convert bus/virt addresses
389 * @sep: pointer to struct sep_device
390 * @bus_address: address to convert
391 *
392 * Returns virtual address inside the shared area according
393 * to the bus address.
394 */
395 static void *sep_shared_bus_to_virt(struct sep_device *sep,
396 dma_addr_t bus_address)
397 {
398 return sep->shared_addr + (bus_address - sep->shared_bus);
399 }
400
401 #endif
402
403 /**
404 * sep_open - device open method
405 * @inode: inode of SEP device
406 * @filp: file handle to SEP device
407 *
408 * Open method for the SEP device. Called when userspace opens
409 * the SEP device node.
410 *
411 * Returns zero on success otherwise an error code.
412 */
413 static int sep_open(struct inode *inode, struct file *filp)
414 {
415 struct sep_device *sep;
416 struct sep_private_data *priv;
417
418 dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
419
420 if (filp->f_flags & O_NONBLOCK)
421 return -ENOTSUPP;
422
423 /*
424 * Get the SEP device structure and use it for the
425 * private_data field in filp for other methods
426 */
427
428 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
429 if (!priv)
430 return -ENOMEM;
431
432 sep = sep_dev;
433 priv->device = sep;
434 filp->private_data = priv;
435
436 dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
437 current->pid, priv);
438
439 /* Anyone can open; locking takes place at transaction level */
440 return 0;
441 }
442
443 /**
444 * sep_free_dma_table_data_handler - free DMA table
445 * @sep: pointer to struct sep_device
446 * @dma_ctx: dma context
447 *
448 * Handles the request to free DMA table for synchronic actions
449 */
450 int sep_free_dma_table_data_handler(struct sep_device *sep,
451 struct sep_dma_context **dma_ctx)
452 {
453 int count;
454 int dcb_counter;
455 /* Pointer to the current dma_resource struct */
456 struct sep_dma_resource *dma;
457
458 dev_dbg(&sep->pdev->dev,
459 "[PID%d] sep_free_dma_table_data_handler\n",
460 current->pid);
461
462 if (!dma_ctx || !(*dma_ctx)) {
463 /* No context or context already freed */
464 dev_dbg(&sep->pdev->dev,
465 "[PID%d] no DMA context or context already freed\n",
466 current->pid);
467
468 return 0;
469 }
470
471 dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
472 current->pid,
473 (*dma_ctx)->nr_dcb_creat);
474
475 for (dcb_counter = 0;
476 dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
477 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
478
479 /* Unmap and free input map array */
480 if (dma->in_map_array) {
481 for (count = 0; count < dma->in_num_pages; count++) {
482 dma_unmap_page(&sep->pdev->dev,
483 dma->in_map_array[count].dma_addr,
484 dma->in_map_array[count].size,
485 DMA_TO_DEVICE);
486 }
487 kfree(dma->in_map_array);
488 }
489
490 /**
491 * Output is handled different. If
492 * this was a secure dma into restricted memory,
493 * then we skip this step altogether as restricted
494 * memory is not available to the o/s at all.
495 */
496 if (((*dma_ctx)->secure_dma == false) &&
497 (dma->out_map_array)) {
498
499 for (count = 0; count < dma->out_num_pages; count++) {
500 dma_unmap_page(&sep->pdev->dev,
501 dma->out_map_array[count].dma_addr,
502 dma->out_map_array[count].size,
503 DMA_FROM_DEVICE);
504 }
505 kfree(dma->out_map_array);
506 }
507
508 /* Free page cache for output */
509 if (dma->in_page_array) {
510 for (count = 0; count < dma->in_num_pages; count++) {
511 flush_dcache_page(dma->in_page_array[count]);
512 page_cache_release(dma->in_page_array[count]);
513 }
514 kfree(dma->in_page_array);
515 }
516
517 /* Again, we do this only for non secure dma */
518 if (((*dma_ctx)->secure_dma == false) &&
519 (dma->out_page_array)) {
520
521 for (count = 0; count < dma->out_num_pages; count++) {
522 if (!PageReserved(dma->out_page_array[count]))
523
524 SetPageDirty(dma->
525 out_page_array[count]);
526
527 flush_dcache_page(dma->out_page_array[count]);
528 page_cache_release(dma->out_page_array[count]);
529 }
530 kfree(dma->out_page_array);
531 }
532
533 /**
534 * Note that here we use in_map_num_entries because we
535 * don't have a page array; the page array is generated
536 * only in the lock_user_pages, which is not called
537 * for kernel crypto, which is what the sg (scatter gather
538 * is used for exclusively)
539 */
540 if (dma->src_sg) {
541 dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
542 dma->in_map_num_entries, DMA_TO_DEVICE);
543 dma->src_sg = NULL;
544 }
545
546 if (dma->dst_sg) {
547 dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
548 dma->in_map_num_entries, DMA_FROM_DEVICE);
549 dma->dst_sg = NULL;
550 }
551
552 /* Reset all the values */
553 dma->in_page_array = NULL;
554 dma->out_page_array = NULL;
555 dma->in_num_pages = 0;
556 dma->out_num_pages = 0;
557 dma->in_map_array = NULL;
558 dma->out_map_array = NULL;
559 dma->in_map_num_entries = 0;
560 dma->out_map_num_entries = 0;
561 }
562
563 (*dma_ctx)->nr_dcb_creat = 0;
564 (*dma_ctx)->num_lli_tables_created = 0;
565
566 kfree(*dma_ctx);
567 *dma_ctx = NULL;
568
569 dev_dbg(&sep->pdev->dev,
570 "[PID%d] sep_free_dma_table_data_handler end\n",
571 current->pid);
572
573 return 0;
574 }
575
576 /**
577 * sep_end_transaction_handler - end transaction
578 * @sep: pointer to struct sep_device
579 * @dma_ctx: DMA context
580 * @call_status: Call status
581 *
582 * This API handles the end transaction request.
583 */
584 static int sep_end_transaction_handler(struct sep_device *sep,
585 struct sep_dma_context **dma_ctx,
586 struct sep_call_status *call_status,
587 struct sep_queue_info **my_queue_elem)
588 {
589 dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
590
591 /*
592 * Extraneous transaction clearing would mess up PM
593 * device usage counters and SEP would get suspended
594 * just before we send a command to SEP in the next
595 * transaction
596 * */
597 if (sep_check_transaction_owner(sep)) {
598 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
599 current->pid);
600 return 0;
601 }
602
603 /* Update queue status */
604 sep_queue_status_remove(sep, my_queue_elem);
605
606 /* Check that all the DMA resources were freed */
607 if (dma_ctx)
608 sep_free_dma_table_data_handler(sep, dma_ctx);
609
610 /* Reset call status for next transaction */
611 if (call_status)
612 call_status->status = 0;
613
614 /* Clear the message area to avoid next transaction reading
615 * sensitive results from previous transaction */
616 memset(sep->shared_addr, 0,
617 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
618
619 /* start suspend delay */
620 #ifdef SEP_ENABLE_RUNTIME_PM
621 if (sep->in_use) {
622 sep->in_use = 0;
623 pm_runtime_mark_last_busy(&sep->pdev->dev);
624 pm_runtime_put_autosuspend(&sep->pdev->dev);
625 }
626 #endif
627
628 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
629 sep->pid_doing_transaction = 0;
630
631 /* Now it's safe for next process to proceed */
632 dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
633 current->pid);
634 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
635 wake_up(&sep->event_transactions);
636
637 return 0;
638 }
639
640
641 /**
642 * sep_release - close a SEP device
643 * @inode: inode of SEP device
644 * @filp: file handle being closed
645 *
646 * Called on the final close of a SEP device.
647 */
648 static int sep_release(struct inode *inode, struct file *filp)
649 {
650 struct sep_private_data * const private_data = filp->private_data;
651 struct sep_call_status *call_status = &private_data->call_status;
652 struct sep_device *sep = private_data->device;
653 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
654 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
655
656 dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
657
658 sep_end_transaction_handler(sep, dma_ctx, call_status,
659 my_queue_elem);
660
661 kfree(filp->private_data);
662
663 return 0;
664 }
665
666 /**
667 * sep_mmap - maps the shared area to user space
668 * @filp: pointer to struct file
669 * @vma: pointer to vm_area_struct
670 *
671 * Called on an mmap of our space via the normal SEP device
672 */
673 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
674 {
675 struct sep_private_data * const private_data = filp->private_data;
676 struct sep_call_status *call_status = &private_data->call_status;
677 struct sep_device *sep = private_data->device;
678 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
679 dma_addr_t bus_addr;
680 unsigned long error = 0;
681
682 dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
683
684 /* Set the transaction busy (own the device) */
685 /*
686 * Problem for multithreaded applications is that here we're
687 * possibly going to sleep while holding a write lock on
688 * current->mm->mmap_sem, which will cause deadlock for ongoing
689 * transaction trying to create DMA tables
690 */
691 error = sep_wait_transaction(sep);
692 if (error)
693 /* Interrupted by signal, don't clear transaction */
694 goto end_function;
695
696 /* Clear the message area to avoid next transaction reading
697 * sensitive results from previous transaction */
698 memset(sep->shared_addr, 0,
699 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
700
701 /*
702 * Check that the size of the mapped range is as the size of the message
703 * shared area
704 */
705 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
706 error = -EINVAL;
707 goto end_function_with_error;
708 }
709
710 dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
711 current->pid, sep->shared_addr);
712
713 /* Get bus address */
714 bus_addr = sep->shared_bus;
715
716 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
717 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
718 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
719 current->pid);
720 error = -EAGAIN;
721 goto end_function_with_error;
722 }
723
724 /* Update call status */
725 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
726
727 goto end_function;
728
729 end_function_with_error:
730 /* Clear our transaction */
731 sep_end_transaction_handler(sep, NULL, call_status,
732 my_queue_elem);
733
734 end_function:
735 return error;
736 }
737
738 /**
739 * sep_poll - poll handler
740 * @filp: pointer to struct file
741 * @wait: pointer to poll_table
742 *
743 * Called by the OS when the kernel is asked to do a poll on
744 * a SEP file handle.
745 */
746 static unsigned int sep_poll(struct file *filp, poll_table *wait)
747 {
748 struct sep_private_data * const private_data = filp->private_data;
749 struct sep_call_status *call_status = &private_data->call_status;
750 struct sep_device *sep = private_data->device;
751 u32 mask = 0;
752 u32 retval = 0;
753 u32 retval2 = 0;
754 unsigned long lock_irq_flag;
755
756 /* Am I the process that owns the transaction? */
757 if (sep_check_transaction_owner(sep)) {
758 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
759 current->pid);
760 mask = POLLERR;
761 goto end_function;
762 }
763
764 /* Check if send command or send_reply were activated previously */
765 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
766 &call_status->status)) {
767 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
768 current->pid);
769 mask = POLLERR;
770 goto end_function;
771 }
772
773
774 /* Add the event to the polling wait table */
775 dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
776 current->pid);
777
778 poll_wait(filp, &sep->event_interrupt, wait);
779
780 dev_dbg(&sep->pdev->dev,
781 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
782 current->pid, sep->send_ct, sep->reply_ct);
783
784 /* Check if error occurred during poll */
785 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
786 if ((retval2 != 0x0) && (retval2 != 0x8)) {
787 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
788 current->pid, retval2);
789 mask |= POLLERR;
790 goto end_function;
791 }
792
793 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
794
795 if (sep->send_ct == sep->reply_ct) {
796 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
797 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
798 dev_dbg(&sep->pdev->dev,
799 "[PID%d] poll: data ready check (GPR2) %x\n",
800 current->pid, retval);
801
802 /* Check if printf request */
803 if ((retval >> 30) & 0x1) {
804 dev_dbg(&sep->pdev->dev,
805 "[PID%d] poll: SEP printf request\n",
806 current->pid);
807 goto end_function;
808 }
809
810 /* Check if the this is SEP reply or request */
811 if (retval >> 31) {
812 dev_dbg(&sep->pdev->dev,
813 "[PID%d] poll: SEP request\n",
814 current->pid);
815 } else {
816 dev_dbg(&sep->pdev->dev,
817 "[PID%d] poll: normal return\n",
818 current->pid);
819 sep_dump_message(sep);
820 dev_dbg(&sep->pdev->dev,
821 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
822 current->pid);
823 mask |= POLLIN | POLLRDNORM;
824 }
825 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
826 } else {
827 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
828 dev_dbg(&sep->pdev->dev,
829 "[PID%d] poll; no reply; returning mask of 0\n",
830 current->pid);
831 mask = 0;
832 }
833
834 end_function:
835 return mask;
836 }
837
838 /**
839 * sep_time_address - address in SEP memory of time
840 * @sep: SEP device we want the address from
841 *
842 * Return the address of the two dwords in memory used for time
843 * setting.
844 */
845 static u32 *sep_time_address(struct sep_device *sep)
846 {
847 return sep->shared_addr +
848 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
849 }
850
851 /**
852 * sep_set_time - set the SEP time
853 * @sep: the SEP we are setting the time for
854 *
855 * Calculates time and sets it at the predefined address.
856 * Called with the SEP mutex held.
857 */
858 static unsigned long sep_set_time(struct sep_device *sep)
859 {
860 struct timeval time;
861 u32 *time_addr; /* Address of time as seen by the kernel */
862
863
864 do_gettimeofday(&time);
865
866 /* Set value in the SYSTEM MEMORY offset */
867 time_addr = sep_time_address(sep);
868
869 time_addr[0] = SEP_TIME_VAL_TOKEN;
870 time_addr[1] = time.tv_sec;
871
872 dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
873 current->pid, time.tv_sec);
874 dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
875 current->pid, time_addr);
876 dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
877 current->pid, sep->shared_addr);
878
879 return time.tv_sec;
880 }
881
882 /**
883 * sep_send_command_handler - kick off a command
884 * @sep: SEP being signalled
885 *
886 * This function raises interrupt to SEP that signals that is has a new
887 * command from the host
888 *
889 * Note that this function does fall under the ioctl lock
890 */
891 int sep_send_command_handler(struct sep_device *sep)
892 {
893 unsigned long lock_irq_flag;
894 u32 *msg_pool;
895 int error = 0;
896
897 /* Basic sanity check; set msg pool to start of shared area */
898 msg_pool = (u32 *)sep->shared_addr;
899 msg_pool += 2;
900
901 /* Look for start msg token */
902 if (*msg_pool != SEP_START_MSG_TOKEN) {
903 dev_warn(&sep->pdev->dev, "start message token not present\n");
904 error = -EPROTO;
905 goto end_function;
906 }
907
908 /* Do we have a reasonable size? */
909 msg_pool += 1;
910 if ((*msg_pool < 2) ||
911 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
912
913 dev_warn(&sep->pdev->dev, "invalid message size\n");
914 error = -EPROTO;
915 goto end_function;
916 }
917
918 /* Does the command look reasonable? */
919 msg_pool += 1;
920 if (*msg_pool < 2) {
921 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
922 error = -EPROTO;
923 goto end_function;
924 }
925
926 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
927 dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
928 current->pid,
929 sep->pdev->dev.power.runtime_status);
930 sep->in_use = 1; /* device is about to be used */
931 pm_runtime_get_sync(&sep->pdev->dev);
932 #endif
933
934 if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
935 error = -EPROTO;
936 goto end_function;
937 }
938 sep->in_use = 1; /* device is about to be used */
939 sep_set_time(sep);
940
941 sep_dump_message(sep);
942
943 /* Update counter */
944 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
945 sep->send_ct++;
946 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
947
948 dev_dbg(&sep->pdev->dev,
949 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
950 current->pid, sep->send_ct, sep->reply_ct);
951
952 /* Send interrupt to SEP */
953 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
954
955 end_function:
956 return error;
957 }
958
959 /**
960 * sep_crypto_dma -
961 * @sep: pointer to struct sep_device
962 * @sg: pointer to struct scatterlist
963 * @direction:
964 * @dma_maps: pointer to place a pointer to array of dma maps
965 * This is filled in; anything previous there will be lost
966 * The structure for dma maps is sep_dma_map
967 * @returns number of dma maps on success; negative on error
968 *
969 * This creates the dma table from the scatterlist
970 * It is used only for kernel crypto as it works with scatterlists
971 * representation of data buffers
972 *
973 */
974 static int sep_crypto_dma(
975 struct sep_device *sep,
976 struct scatterlist *sg,
977 struct sep_dma_map **dma_maps,
978 enum dma_data_direction direction)
979 {
980 struct scatterlist *temp_sg;
981
982 u32 count_segment;
983 u32 count_mapped;
984 struct sep_dma_map *sep_dma;
985 int ct1;
986
987 if (sg->length == 0)
988 return 0;
989
990 /* Count the segments */
991 temp_sg = sg;
992 count_segment = 0;
993 while (temp_sg) {
994 count_segment += 1;
995 temp_sg = scatterwalk_sg_next(temp_sg);
996 }
997 dev_dbg(&sep->pdev->dev,
998 "There are (hex) %x segments in sg\n", count_segment);
999
1000 /* DMA map segments */
1001 count_mapped = dma_map_sg(&sep->pdev->dev, sg,
1002 count_segment, direction);
1003
1004 dev_dbg(&sep->pdev->dev,
1005 "There are (hex) %x maps in sg\n", count_mapped);
1006
1007 if (count_mapped == 0) {
1008 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1009 return -ENOMEM;
1010 }
1011
1012 sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1013 count_mapped, GFP_ATOMIC);
1014
1015 if (sep_dma == NULL) {
1016 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1017 return -ENOMEM;
1018 }
1019
1020 for_each_sg(sg, temp_sg, count_mapped, ct1) {
1021 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1022 sep_dma[ct1].size = sg_dma_len(temp_sg);
1023 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1024 ct1, (unsigned long)sep_dma[ct1].dma_addr,
1025 (unsigned long)sep_dma[ct1].size);
1026 }
1027
1028 *dma_maps = sep_dma;
1029 return count_mapped;
1030
1031 }
1032
1033 /**
1034 * sep_crypto_lli -
1035 * @sep: pointer to struct sep_device
1036 * @sg: pointer to struct scatterlist
1037 * @data_size: total data size
1038 * @direction:
1039 * @dma_maps: pointer to place a pointer to array of dma maps
1040 * This is filled in; anything previous there will be lost
1041 * The structure for dma maps is sep_dma_map
1042 * @lli_maps: pointer to place a pointer to array of lli maps
1043 * This is filled in; anything previous there will be lost
1044 * The structure for dma maps is sep_dma_map
1045 * @returns number of dma maps on success; negative on error
1046 *
1047 * This creates the LLI table from the scatterlist
1048 * It is only used for kernel crypto as it works exclusively
1049 * with scatterlists (struct scatterlist) representation of
1050 * data buffers
1051 */
1052 static int sep_crypto_lli(
1053 struct sep_device *sep,
1054 struct scatterlist *sg,
1055 struct sep_dma_map **maps,
1056 struct sep_lli_entry **llis,
1057 u32 data_size,
1058 enum dma_data_direction direction)
1059 {
1060
1061 int ct1;
1062 struct sep_lli_entry *sep_lli;
1063 struct sep_dma_map *sep_map;
1064
1065 int nbr_ents;
1066
1067 nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1068 if (nbr_ents <= 0) {
1069 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1070 nbr_ents);
1071 return nbr_ents;
1072 }
1073
1074 sep_map = *maps;
1075
1076 sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1077
1078 if (sep_lli == NULL) {
1079 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1080
1081 kfree(*maps);
1082 *maps = NULL;
1083 return -ENOMEM;
1084 }
1085
1086 for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1087 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1088
1089 /* Maximum for page is total data size */
1090 if (sep_map[ct1].size > data_size)
1091 sep_map[ct1].size = data_size;
1092
1093 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1094 }
1095
1096 *llis = sep_lli;
1097 return nbr_ents;
1098 }
1099
1100 /**
1101 * sep_lock_kernel_pages - map kernel pages for DMA
1102 * @sep: pointer to struct sep_device
1103 * @kernel_virt_addr: address of data buffer in kernel
1104 * @data_size: size of data
1105 * @lli_array_ptr: lli array
1106 * @in_out_flag: input into device or output from device
1107 *
1108 * This function locks all the physical pages of the kernel virtual buffer
1109 * and construct a basic lli array, where each entry holds the physical
1110 * page address and the size that application data holds in this page
1111 * This function is used only during kernel crypto mod calls from within
1112 * the kernel (when ioctl is not used)
1113 *
1114 * This is used only for kernel crypto. Kernel pages
1115 * are handled differently as they are done via
1116 * scatter gather lists (struct scatterlist)
1117 */
1118 static int sep_lock_kernel_pages(struct sep_device *sep,
1119 unsigned long kernel_virt_addr,
1120 u32 data_size,
1121 struct sep_lli_entry **lli_array_ptr,
1122 int in_out_flag,
1123 struct sep_dma_context *dma_ctx)
1124
1125 {
1126 u32 num_pages;
1127 struct scatterlist *sg;
1128
1129 /* Array of lli */
1130 struct sep_lli_entry *lli_array;
1131 /* Map array */
1132 struct sep_dma_map *map_array;
1133
1134 enum dma_data_direction direction;
1135
1136 lli_array = NULL;
1137 map_array = NULL;
1138
1139 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1140 direction = DMA_TO_DEVICE;
1141 sg = dma_ctx->src_sg;
1142 } else {
1143 direction = DMA_FROM_DEVICE;
1144 sg = dma_ctx->dst_sg;
1145 }
1146
1147 num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1148 data_size, direction);
1149
1150 if (num_pages <= 0) {
1151 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1152 num_pages);
1153 return -ENOMEM;
1154 }
1155
1156 /* Put mapped kernel sg into kernel resource array */
1157
1158 /* Set output params according to the in_out flag */
1159 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1160 *lli_array_ptr = lli_array;
1161 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1162 num_pages;
1163 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1164 NULL;
1165 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1166 map_array;
1167 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1168 num_pages;
1169 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1170 dma_ctx->src_sg;
1171 } else {
1172 *lli_array_ptr = lli_array;
1173 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1174 num_pages;
1175 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1176 NULL;
1177 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1178 map_array;
1179 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1180 out_map_num_entries = num_pages;
1181 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1182 dma_ctx->dst_sg;
1183 }
1184
1185 return 0;
1186 }
1187
1188 /**
1189 * sep_lock_user_pages - lock and map user pages for DMA
1190 * @sep: pointer to struct sep_device
1191 * @app_virt_addr: user memory data buffer
1192 * @data_size: size of data buffer
1193 * @lli_array_ptr: lli array
1194 * @in_out_flag: input or output to device
1195 *
1196 * This function locks all the physical pages of the application
1197 * virtual buffer and construct a basic lli array, where each entry
1198 * holds the physical page address and the size that application
1199 * data holds in this physical pages
1200 */
1201 static int sep_lock_user_pages(struct sep_device *sep,
1202 u32 app_virt_addr,
1203 u32 data_size,
1204 struct sep_lli_entry **lli_array_ptr,
1205 int in_out_flag,
1206 struct sep_dma_context *dma_ctx)
1207
1208 {
1209 int error = 0;
1210 u32 count;
1211 int result;
1212 /* The the page of the end address of the user space buffer */
1213 u32 end_page;
1214 /* The page of the start address of the user space buffer */
1215 u32 start_page;
1216 /* The range in pages */
1217 u32 num_pages;
1218 /* Array of pointers to page */
1219 struct page **page_array;
1220 /* Array of lli */
1221 struct sep_lli_entry *lli_array;
1222 /* Map array */
1223 struct sep_dma_map *map_array;
1224
1225 /* Set start and end pages and num pages */
1226 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1227 start_page = app_virt_addr >> PAGE_SHIFT;
1228 num_pages = end_page - start_page + 1;
1229
1230 dev_dbg(&sep->pdev->dev,
1231 "[PID%d] lock user pages app_virt_addr is %x\n",
1232 current->pid, app_virt_addr);
1233
1234 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1235 current->pid, data_size);
1236 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1237 current->pid, start_page);
1238 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1239 current->pid, end_page);
1240 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1241 current->pid, num_pages);
1242
1243 /* Allocate array of pages structure pointers */
1244 page_array = kmalloc_array(num_pages, sizeof(struct page *),
1245 GFP_ATOMIC);
1246 if (!page_array) {
1247 error = -ENOMEM;
1248 goto end_function;
1249 }
1250
1251 map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1252 GFP_ATOMIC);
1253 if (!map_array) {
1254 error = -ENOMEM;
1255 goto end_function_with_error1;
1256 }
1257
1258 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1259 GFP_ATOMIC);
1260 if (!lli_array) {
1261 error = -ENOMEM;
1262 goto end_function_with_error2;
1263 }
1264
1265 /* Convert the application virtual address into a set of physical */
1266 down_read(&current->mm->mmap_sem);
1267 result = get_user_pages(current, current->mm, app_virt_addr,
1268 num_pages,
1269 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1270 0, page_array, NULL);
1271
1272 up_read(&current->mm->mmap_sem);
1273
1274 /* Check the number of pages locked - if not all then exit with error */
1275 if (result != num_pages) {
1276 dev_warn(&sep->pdev->dev,
1277 "[PID%d] not all pages locked by get_user_pages, "
1278 "result 0x%X, num_pages 0x%X\n",
1279 current->pid, result, num_pages);
1280 error = -ENOMEM;
1281 goto end_function_with_error3;
1282 }
1283
1284 dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1285 current->pid);
1286
1287 /*
1288 * Fill the array using page array data and
1289 * map the pages - this action will also flush the cache as needed
1290 */
1291 for (count = 0; count < num_pages; count++) {
1292 /* Fill the map array */
1293 map_array[count].dma_addr =
1294 dma_map_page(&sep->pdev->dev, page_array[count],
1295 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1296
1297 map_array[count].size = PAGE_SIZE;
1298
1299 /* Fill the lli array entry */
1300 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1301 lli_array[count].block_size = PAGE_SIZE;
1302
1303 dev_dbg(&sep->pdev->dev,
1304 "[PID%d] lli_array[%x].bus_address is %08lx, "
1305 "lli_array[%x].block_size is (hex) %x\n", current->pid,
1306 count, (unsigned long)lli_array[count].bus_address,
1307 count, lli_array[count].block_size);
1308 }
1309
1310 /* Check the offset for the first page */
1311 lli_array[0].bus_address =
1312 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1313
1314 /* Check that not all the data is in the first page only */
1315 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1316 lli_array[0].block_size = data_size;
1317 else
1318 lli_array[0].block_size =
1319 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1320
1321 dev_dbg(&sep->pdev->dev,
1322 "[PID%d] After check if page 0 has all data\n",
1323 current->pid);
1324 dev_dbg(&sep->pdev->dev,
1325 "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1326 "lli_array[0].block_size is (hex) %x\n",
1327 current->pid,
1328 (unsigned long)lli_array[0].bus_address,
1329 lli_array[0].block_size);
1330
1331
1332 /* Check the size of the last page */
1333 if (num_pages > 1) {
1334 lli_array[num_pages - 1].block_size =
1335 (app_virt_addr + data_size) & (~PAGE_MASK);
1336 if (lli_array[num_pages - 1].block_size == 0)
1337 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1338
1339 dev_dbg(&sep->pdev->dev,
1340 "[PID%d] After last page size adjustment\n",
1341 current->pid);
1342 dev_dbg(&sep->pdev->dev,
1343 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1344 "lli_array[%x].block_size is (hex) %x\n",
1345 current->pid,
1346 num_pages - 1,
1347 (unsigned long)lli_array[num_pages - 1].bus_address,
1348 num_pages - 1,
1349 lli_array[num_pages - 1].block_size);
1350 }
1351
1352 /* Set output params according to the in_out flag */
1353 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1354 *lli_array_ptr = lli_array;
1355 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1356 num_pages;
1357 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1358 page_array;
1359 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1360 map_array;
1361 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1362 num_pages;
1363 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1364 } else {
1365 *lli_array_ptr = lli_array;
1366 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1367 num_pages;
1368 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1369 page_array;
1370 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1371 map_array;
1372 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1373 out_map_num_entries = num_pages;
1374 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1375 }
1376 goto end_function;
1377
1378 end_function_with_error3:
1379 /* Free lli array */
1380 kfree(lli_array);
1381
1382 end_function_with_error2:
1383 kfree(map_array);
1384
1385 end_function_with_error1:
1386 /* Free page array */
1387 kfree(page_array);
1388
1389 end_function:
1390 return error;
1391 }
1392
1393 /**
1394 * sep_lli_table_secure_dma - get lli array for IMR addresses
1395 * @sep: pointer to struct sep_device
1396 * @app_virt_addr: user memory data buffer
1397 * @data_size: size of data buffer
1398 * @lli_array_ptr: lli array
1399 * @in_out_flag: not used
1400 * @dma_ctx: pointer to struct sep_dma_context
1401 *
1402 * This function creates lli tables for outputting data to
1403 * IMR memory, which is memory that cannot be accessed by the
1404 * the x86 processor.
1405 */
1406 static int sep_lli_table_secure_dma(struct sep_device *sep,
1407 u32 app_virt_addr,
1408 u32 data_size,
1409 struct sep_lli_entry **lli_array_ptr,
1410 int in_out_flag,
1411 struct sep_dma_context *dma_ctx)
1412
1413 {
1414 int error = 0;
1415 u32 count;
1416 /* The the page of the end address of the user space buffer */
1417 u32 end_page;
1418 /* The page of the start address of the user space buffer */
1419 u32 start_page;
1420 /* The range in pages */
1421 u32 num_pages;
1422 /* Array of lli */
1423 struct sep_lli_entry *lli_array;
1424
1425 /* Set start and end pages and num pages */
1426 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1427 start_page = app_virt_addr >> PAGE_SHIFT;
1428 num_pages = end_page - start_page + 1;
1429
1430 dev_dbg(&sep->pdev->dev,
1431 "[PID%d] lock user pages app_virt_addr is %x\n",
1432 current->pid, app_virt_addr);
1433
1434 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1435 current->pid, data_size);
1436 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1437 current->pid, start_page);
1438 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1439 current->pid, end_page);
1440 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1441 current->pid, num_pages);
1442
1443 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1444 GFP_ATOMIC);
1445 if (!lli_array)
1446 return -ENOMEM;
1447
1448 /*
1449 * Fill the lli_array
1450 */
1451 start_page = start_page << PAGE_SHIFT;
1452 for (count = 0; count < num_pages; count++) {
1453 /* Fill the lli array entry */
1454 lli_array[count].bus_address = start_page;
1455 lli_array[count].block_size = PAGE_SIZE;
1456
1457 start_page += PAGE_SIZE;
1458
1459 dev_dbg(&sep->pdev->dev,
1460 "[PID%d] lli_array[%x].bus_address is %08lx, "
1461 "lli_array[%x].block_size is (hex) %x\n",
1462 current->pid,
1463 count, (unsigned long)lli_array[count].bus_address,
1464 count, lli_array[count].block_size);
1465 }
1466
1467 /* Check the offset for the first page */
1468 lli_array[0].bus_address =
1469 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1470
1471 /* Check that not all the data is in the first page only */
1472 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1473 lli_array[0].block_size = data_size;
1474 else
1475 lli_array[0].block_size =
1476 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1477
1478 dev_dbg(&sep->pdev->dev,
1479 "[PID%d] After check if page 0 has all data\n"
1480 "lli_array[0].bus_address is (hex) %08lx, "
1481 "lli_array[0].block_size is (hex) %x\n",
1482 current->pid,
1483 (unsigned long)lli_array[0].bus_address,
1484 lli_array[0].block_size);
1485
1486 /* Check the size of the last page */
1487 if (num_pages > 1) {
1488 lli_array[num_pages - 1].block_size =
1489 (app_virt_addr + data_size) & (~PAGE_MASK);
1490 if (lli_array[num_pages - 1].block_size == 0)
1491 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1492
1493 dev_dbg(&sep->pdev->dev,
1494 "[PID%d] After last page size adjustment\n"
1495 "lli_array[%x].bus_address is (hex) %08lx, "
1496 "lli_array[%x].block_size is (hex) %x\n",
1497 current->pid, num_pages - 1,
1498 (unsigned long)lli_array[num_pages - 1].bus_address,
1499 num_pages - 1,
1500 lli_array[num_pages - 1].block_size);
1501 }
1502 *lli_array_ptr = lli_array;
1503 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1504 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1505 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1506 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1507
1508 return error;
1509 }
1510
1511 /**
1512 * sep_calculate_lli_table_max_size - size the LLI table
1513 * @sep: pointer to struct sep_device
1514 * @lli_in_array_ptr
1515 * @num_array_entries
1516 * @last_table_flag
1517 *
1518 * This function calculates the size of data that can be inserted into
1519 * the lli table from this array, such that either the table is full
1520 * (all entries are entered), or there are no more entries in the
1521 * lli array
1522 */
1523 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1524 struct sep_lli_entry *lli_in_array_ptr,
1525 u32 num_array_entries,
1526 u32 *last_table_flag)
1527 {
1528 u32 counter;
1529 /* Table data size */
1530 u32 table_data_size = 0;
1531 /* Data size for the next table */
1532 u32 next_table_data_size;
1533
1534 *last_table_flag = 0;
1535
1536 /*
1537 * Calculate the data in the out lli table till we fill the whole
1538 * table or till the data has ended
1539 */
1540 for (counter = 0;
1541 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1542 (counter < num_array_entries); counter++)
1543 table_data_size += lli_in_array_ptr[counter].block_size;
1544
1545 /*
1546 * Check if we reached the last entry,
1547 * meaning this ia the last table to build,
1548 * and no need to check the block alignment
1549 */
1550 if (counter == num_array_entries) {
1551 /* Set the last table flag */
1552 *last_table_flag = 1;
1553 goto end_function;
1554 }
1555
1556 /*
1557 * Calculate the data size of the next table.
1558 * Stop if no entries left or if data size is more the DMA restriction
1559 */
1560 next_table_data_size = 0;
1561 for (; counter < num_array_entries; counter++) {
1562 next_table_data_size += lli_in_array_ptr[counter].block_size;
1563 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1564 break;
1565 }
1566
1567 /*
1568 * Check if the next table data size is less then DMA rstriction.
1569 * if it is - recalculate the current table size, so that the next
1570 * table data size will be adaquete for DMA
1571 */
1572 if (next_table_data_size &&
1573 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1574
1575 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1576 next_table_data_size);
1577
1578 end_function:
1579 return table_data_size;
1580 }
1581
1582 /**
1583 * sep_build_lli_table - build an lli array for the given table
1584 * @sep: pointer to struct sep_device
1585 * @lli_array_ptr: pointer to lli array
1586 * @lli_table_ptr: pointer to lli table
1587 * @num_processed_entries_ptr: pointer to number of entries
1588 * @num_table_entries_ptr: pointer to number of tables
1589 * @table_data_size: total data size
1590 *
1591 * Builds an lli table from the lli_array according to
1592 * the given size of data
1593 */
1594 static void sep_build_lli_table(struct sep_device *sep,
1595 struct sep_lli_entry *lli_array_ptr,
1596 struct sep_lli_entry *lli_table_ptr,
1597 u32 *num_processed_entries_ptr,
1598 u32 *num_table_entries_ptr,
1599 u32 table_data_size)
1600 {
1601 /* Current table data size */
1602 u32 curr_table_data_size;
1603 /* Counter of lli array entry */
1604 u32 array_counter;
1605
1606 /* Init current table data size and lli array entry counter */
1607 curr_table_data_size = 0;
1608 array_counter = 0;
1609 *num_table_entries_ptr = 1;
1610
1611 dev_dbg(&sep->pdev->dev,
1612 "[PID%d] build lli table table_data_size: (hex) %x\n",
1613 current->pid, table_data_size);
1614
1615 /* Fill the table till table size reaches the needed amount */
1616 while (curr_table_data_size < table_data_size) {
1617 /* Update the number of entries in table */
1618 (*num_table_entries_ptr)++;
1619
1620 lli_table_ptr->bus_address =
1621 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1622
1623 lli_table_ptr->block_size =
1624 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1625
1626 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1627
1628 dev_dbg(&sep->pdev->dev,
1629 "[PID%d] lli_table_ptr is %p\n",
1630 current->pid, lli_table_ptr);
1631 dev_dbg(&sep->pdev->dev,
1632 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1633 current->pid,
1634 (unsigned long)lli_table_ptr->bus_address);
1635
1636 dev_dbg(&sep->pdev->dev,
1637 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1638 current->pid, lli_table_ptr->block_size);
1639
1640 /* Check for overflow of the table data */
1641 if (curr_table_data_size > table_data_size) {
1642 dev_dbg(&sep->pdev->dev,
1643 "[PID%d] curr_table_data_size too large\n",
1644 current->pid);
1645
1646 /* Update the size of block in the table */
1647 lli_table_ptr->block_size =
1648 cpu_to_le32(lli_table_ptr->block_size) -
1649 (curr_table_data_size - table_data_size);
1650
1651 /* Update the physical address in the lli array */
1652 lli_array_ptr[array_counter].bus_address +=
1653 cpu_to_le32(lli_table_ptr->block_size);
1654
1655 /* Update the block size left in the lli array */
1656 lli_array_ptr[array_counter].block_size =
1657 (curr_table_data_size - table_data_size);
1658 } else
1659 /* Advance to the next entry in the lli_array */
1660 array_counter++;
1661
1662 dev_dbg(&sep->pdev->dev,
1663 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1664 current->pid,
1665 (unsigned long)lli_table_ptr->bus_address);
1666 dev_dbg(&sep->pdev->dev,
1667 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1668 current->pid,
1669 lli_table_ptr->block_size);
1670
1671 /* Move to the next entry in table */
1672 lli_table_ptr++;
1673 }
1674
1675 /* Set the info entry to default */
1676 lli_table_ptr->bus_address = 0xffffffff;
1677 lli_table_ptr->block_size = 0;
1678
1679 /* Set the output parameter */
1680 *num_processed_entries_ptr += array_counter;
1681
1682 }
1683
1684 /**
1685 * sep_shared_area_virt_to_bus - map shared area to bus address
1686 * @sep: pointer to struct sep_device
1687 * @virt_address: virtual address to convert
1688 *
1689 * This functions returns the physical address inside shared area according
1690 * to the virtual address. It can be either on the external RAM device
1691 * (ioremapped), or on the system RAM
1692 * This implementation is for the external RAM
1693 */
1694 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1695 void *virt_address)
1696 {
1697 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1698 current->pid, virt_address);
1699 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1700 current->pid,
1701 (unsigned long)
1702 sep->shared_bus + (virt_address - sep->shared_addr));
1703
1704 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1705 }
1706
1707 /**
1708 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1709 * @sep: pointer to struct sep_device
1710 * @bus_address: bus address to convert
1711 *
1712 * This functions returns the virtual address inside shared area
1713 * according to the physical address. It can be either on the
1714 * external RAM device (ioremapped), or on the system RAM
1715 * This implementation is for the external RAM
1716 */
1717 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1718 dma_addr_t bus_address)
1719 {
1720 dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1721 current->pid,
1722 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1723 (size_t)(bus_address - sep->shared_bus)));
1724
1725 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1726 }
1727
1728 /**
1729 * sep_debug_print_lli_tables - dump LLI table
1730 * @sep: pointer to struct sep_device
1731 * @lli_table_ptr: pointer to sep_lli_entry
1732 * @num_table_entries: number of entries
1733 * @table_data_size: total data size
1734 *
1735 * Walk the the list of the print created tables and print all the data
1736 */
1737 static void sep_debug_print_lli_tables(struct sep_device *sep,
1738 struct sep_lli_entry *lli_table_ptr,
1739 unsigned long num_table_entries,
1740 unsigned long table_data_size)
1741 {
1742 #ifdef DEBUG
1743 unsigned long table_count = 1;
1744 unsigned long entries_count = 0;
1745
1746 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1747 current->pid);
1748 if (num_table_entries == 0) {
1749 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1750 current->pid);
1751 return;
1752 }
1753
1754 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1755 dev_dbg(&sep->pdev->dev,
1756 "[PID%d] lli table %08lx, "
1757 "table_data_size is (hex) %lx\n",
1758 current->pid, table_count, table_data_size);
1759 dev_dbg(&sep->pdev->dev,
1760 "[PID%d] num_table_entries is (hex) %lx\n",
1761 current->pid, num_table_entries);
1762
1763 /* Print entries of the table (without info entry) */
1764 for (entries_count = 0; entries_count < num_table_entries;
1765 entries_count++, lli_table_ptr++) {
1766
1767 dev_dbg(&sep->pdev->dev,
1768 "[PID%d] lli_table_ptr address is %08lx\n",
1769 current->pid,
1770 (unsigned long) lli_table_ptr);
1771
1772 dev_dbg(&sep->pdev->dev,
1773 "[PID%d] phys address is %08lx "
1774 "block size is (hex) %x\n", current->pid,
1775 (unsigned long)lli_table_ptr->bus_address,
1776 lli_table_ptr->block_size);
1777 }
1778
1779 /* Point to the info entry */
1780 lli_table_ptr--;
1781
1782 dev_dbg(&sep->pdev->dev,
1783 "[PID%d] phys lli_table_ptr->block_size "
1784 "is (hex) %x\n",
1785 current->pid,
1786 lli_table_ptr->block_size);
1787
1788 dev_dbg(&sep->pdev->dev,
1789 "[PID%d] phys lli_table_ptr->physical_address "
1790 "is %08lx\n",
1791 current->pid,
1792 (unsigned long)lli_table_ptr->bus_address);
1793
1794
1795 table_data_size = lli_table_ptr->block_size & 0xffffff;
1796 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1797
1798 dev_dbg(&sep->pdev->dev,
1799 "[PID%d] phys table_data_size is "
1800 "(hex) %lx num_table_entries is"
1801 " %lx bus_address is%lx\n",
1802 current->pid,
1803 table_data_size,
1804 num_table_entries,
1805 (unsigned long)lli_table_ptr->bus_address);
1806
1807 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1808 lli_table_ptr = (struct sep_lli_entry *)
1809 sep_shared_bus_to_virt(sep,
1810 (unsigned long)lli_table_ptr->bus_address);
1811
1812 table_count++;
1813 }
1814 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1815 current->pid);
1816 #endif
1817 }
1818
1819
1820 /**
1821 * sep_prepare_empty_lli_table - create a blank LLI table
1822 * @sep: pointer to struct sep_device
1823 * @lli_table_addr_ptr: pointer to lli table
1824 * @num_entries_ptr: pointer to number of entries
1825 * @table_data_size_ptr: point to table data size
1826 * @dmatables_region: Optional buffer for DMA tables
1827 * @dma_ctx: DMA context
1828 *
1829 * This function creates empty lli tables when there is no data
1830 */
1831 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1832 dma_addr_t *lli_table_addr_ptr,
1833 u32 *num_entries_ptr,
1834 u32 *table_data_size_ptr,
1835 void **dmatables_region,
1836 struct sep_dma_context *dma_ctx)
1837 {
1838 struct sep_lli_entry *lli_table_ptr;
1839
1840 /* Find the area for new table */
1841 lli_table_ptr =
1842 (struct sep_lli_entry *)(sep->shared_addr +
1843 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1844 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1845 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1846
1847 if (dmatables_region && *dmatables_region)
1848 lli_table_ptr = *dmatables_region;
1849
1850 lli_table_ptr->bus_address = 0;
1851 lli_table_ptr->block_size = 0;
1852
1853 lli_table_ptr++;
1854 lli_table_ptr->bus_address = 0xFFFFFFFF;
1855 lli_table_ptr->block_size = 0;
1856
1857 /* Set the output parameter value */
1858 *lli_table_addr_ptr = sep->shared_bus +
1859 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1860 dma_ctx->num_lli_tables_created *
1861 sizeof(struct sep_lli_entry) *
1862 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1863
1864 /* Set the num of entries and table data size for empty table */
1865 *num_entries_ptr = 2;
1866 *table_data_size_ptr = 0;
1867
1868 /* Update the number of created tables */
1869 dma_ctx->num_lli_tables_created++;
1870 }
1871
1872 /**
1873 * sep_prepare_input_dma_table - prepare input DMA mappings
1874 * @sep: pointer to struct sep_device
1875 * @data_size:
1876 * @block_size:
1877 * @lli_table_ptr:
1878 * @num_entries_ptr:
1879 * @table_data_size_ptr:
1880 * @is_kva: set for kernel data (kernel crypt io call)
1881 *
1882 * This function prepares only input DMA table for synchronic symmetric
1883 * operations (HASH)
1884 * Note that all bus addresses that are passed to the SEP
1885 * are in 32 bit format; the SEP is a 32 bit device
1886 */
1887 static int sep_prepare_input_dma_table(struct sep_device *sep,
1888 unsigned long app_virt_addr,
1889 u32 data_size,
1890 u32 block_size,
1891 dma_addr_t *lli_table_ptr,
1892 u32 *num_entries_ptr,
1893 u32 *table_data_size_ptr,
1894 bool is_kva,
1895 void **dmatables_region,
1896 struct sep_dma_context *dma_ctx
1897 )
1898 {
1899 int error = 0;
1900 /* Pointer to the info entry of the table - the last entry */
1901 struct sep_lli_entry *info_entry_ptr;
1902 /* Array of pointers to page */
1903 struct sep_lli_entry *lli_array_ptr;
1904 /* Points to the first entry to be processed in the lli_in_array */
1905 u32 current_entry = 0;
1906 /* Num entries in the virtual buffer */
1907 u32 sep_lli_entries = 0;
1908 /* Lli table pointer */
1909 struct sep_lli_entry *in_lli_table_ptr;
1910 /* The total data in one table */
1911 u32 table_data_size = 0;
1912 /* Flag for last table */
1913 u32 last_table_flag = 0;
1914 /* Number of entries in lli table */
1915 u32 num_entries_in_table = 0;
1916 /* Next table address */
1917 void *lli_table_alloc_addr = NULL;
1918 void *dma_lli_table_alloc_addr = NULL;
1919 void *dma_in_lli_table_ptr = NULL;
1920
1921 dev_dbg(&sep->pdev->dev,
1922 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1923 current->pid, data_size);
1924
1925 dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1926 current->pid, block_size);
1927
1928 /* Initialize the pages pointers */
1929 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1930 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1931
1932 /* Set the kernel address for first table to be allocated */
1933 lli_table_alloc_addr = (void *)(sep->shared_addr +
1934 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1935 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1936 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1937
1938 if (data_size == 0) {
1939 if (dmatables_region) {
1940 error = sep_allocate_dmatables_region(sep,
1941 dmatables_region,
1942 dma_ctx,
1943 1);
1944 if (error)
1945 return error;
1946 }
1947 /* Special case - create meptu table - 2 entries, zero data */
1948 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1949 num_entries_ptr, table_data_size_ptr,
1950 dmatables_region, dma_ctx);
1951 goto update_dcb_counter;
1952 }
1953
1954 /* Check if the pages are in Kernel Virtual Address layout */
1955 if (is_kva == true)
1956 error = sep_lock_kernel_pages(sep, app_virt_addr,
1957 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1958 dma_ctx);
1959 else
1960 /*
1961 * Lock the pages of the user buffer
1962 * and translate them to pages
1963 */
1964 error = sep_lock_user_pages(sep, app_virt_addr,
1965 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1966 dma_ctx);
1967
1968 if (error)
1969 goto end_function;
1970
1971 dev_dbg(&sep->pdev->dev,
1972 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1973 current->pid,
1974 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1975
1976 current_entry = 0;
1977 info_entry_ptr = NULL;
1978
1979 sep_lli_entries =
1980 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1981
1982 dma_lli_table_alloc_addr = lli_table_alloc_addr;
1983 if (dmatables_region) {
1984 error = sep_allocate_dmatables_region(sep,
1985 dmatables_region,
1986 dma_ctx,
1987 sep_lli_entries);
1988 if (error)
1989 goto end_function_error;
1990 lli_table_alloc_addr = *dmatables_region;
1991 }
1992
1993 /* Loop till all the entries in in array are processed */
1994 while (current_entry < sep_lli_entries) {
1995
1996 /* Set the new input and output tables */
1997 in_lli_table_ptr =
1998 (struct sep_lli_entry *)lli_table_alloc_addr;
1999 dma_in_lli_table_ptr =
2000 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2001
2002 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2003 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2004 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2005 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2006
2007 if (dma_lli_table_alloc_addr >
2008 ((void *)sep->shared_addr +
2009 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2010 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2011
2012 error = -ENOMEM;
2013 goto end_function_error;
2014
2015 }
2016
2017 /* Update the number of created tables */
2018 dma_ctx->num_lli_tables_created++;
2019
2020 /* Calculate the maximum size of data for input table */
2021 table_data_size = sep_calculate_lli_table_max_size(sep,
2022 &lli_array_ptr[current_entry],
2023 (sep_lli_entries - current_entry),
2024 &last_table_flag);
2025
2026 /*
2027 * If this is not the last table -
2028 * then align it to the block size
2029 */
2030 if (!last_table_flag)
2031 table_data_size =
2032 (table_data_size / block_size) * block_size;
2033
2034 dev_dbg(&sep->pdev->dev,
2035 "[PID%d] output table_data_size is (hex) %x\n",
2036 current->pid,
2037 table_data_size);
2038
2039 /* Construct input lli table */
2040 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2041 in_lli_table_ptr,
2042 &current_entry, &num_entries_in_table, table_data_size);
2043
2044 if (info_entry_ptr == NULL) {
2045
2046 /* Set the output parameters to physical addresses */
2047 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2048 dma_in_lli_table_ptr);
2049 *num_entries_ptr = num_entries_in_table;
2050 *table_data_size_ptr = table_data_size;
2051
2052 dev_dbg(&sep->pdev->dev,
2053 "[PID%d] output lli_table_in_ptr is %08lx\n",
2054 current->pid,
2055 (unsigned long)*lli_table_ptr);
2056
2057 } else {
2058 /* Update the info entry of the previous in table */
2059 info_entry_ptr->bus_address =
2060 sep_shared_area_virt_to_bus(sep,
2061 dma_in_lli_table_ptr);
2062 info_entry_ptr->block_size =
2063 ((num_entries_in_table) << 24) |
2064 (table_data_size);
2065 }
2066 /* Save the pointer to the info entry of the current tables */
2067 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2068 }
2069 /* Print input tables */
2070 if (!dmatables_region) {
2071 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2072 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2073 *num_entries_ptr, *table_data_size_ptr);
2074 }
2075
2076 /* The array of the pages */
2077 kfree(lli_array_ptr);
2078
2079 update_dcb_counter:
2080 /* Update DCB counter */
2081 dma_ctx->nr_dcb_creat++;
2082 goto end_function;
2083
2084 end_function_error:
2085 /* Free all the allocated resources */
2086 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2087 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2088 kfree(lli_array_ptr);
2089 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2090 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2091
2092 end_function:
2093 return error;
2094
2095 }
2096
2097 /**
2098 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2099 * @sep: pointer to struct sep_device
2100 * @lli_in_array:
2101 * @sep_in_lli_entries:
2102 * @lli_out_array:
2103 * @sep_out_lli_entries
2104 * @block_size
2105 * @lli_table_in_ptr
2106 * @lli_table_out_ptr
2107 * @in_num_entries_ptr
2108 * @out_num_entries_ptr
2109 * @table_data_size_ptr
2110 *
2111 * This function creates the input and output DMA tables for
2112 * symmetric operations (AES/DES) according to the block
2113 * size from LLI arays
2114 * Note that all bus addresses that are passed to the SEP
2115 * are in 32 bit format; the SEP is a 32 bit device
2116 */
2117 static int sep_construct_dma_tables_from_lli(
2118 struct sep_device *sep,
2119 struct sep_lli_entry *lli_in_array,
2120 u32 sep_in_lli_entries,
2121 struct sep_lli_entry *lli_out_array,
2122 u32 sep_out_lli_entries,
2123 u32 block_size,
2124 dma_addr_t *lli_table_in_ptr,
2125 dma_addr_t *lli_table_out_ptr,
2126 u32 *in_num_entries_ptr,
2127 u32 *out_num_entries_ptr,
2128 u32 *table_data_size_ptr,
2129 void **dmatables_region,
2130 struct sep_dma_context *dma_ctx)
2131 {
2132 /* Points to the area where next lli table can be allocated */
2133 void *lli_table_alloc_addr = NULL;
2134 /*
2135 * Points to the area in shared region where next lli table
2136 * can be allocated
2137 */
2138 void *dma_lli_table_alloc_addr = NULL;
2139 /* Input lli table in dmatables_region or shared region */
2140 struct sep_lli_entry *in_lli_table_ptr = NULL;
2141 /* Input lli table location in the shared region */
2142 struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2143 /* Output lli table in dmatables_region or shared region */
2144 struct sep_lli_entry *out_lli_table_ptr = NULL;
2145 /* Output lli table location in the shared region */
2146 struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2147 /* Pointer to the info entry of the table - the last entry */
2148 struct sep_lli_entry *info_in_entry_ptr = NULL;
2149 /* Pointer to the info entry of the table - the last entry */
2150 struct sep_lli_entry *info_out_entry_ptr = NULL;
2151 /* Points to the first entry to be processed in the lli_in_array */
2152 u32 current_in_entry = 0;
2153 /* Points to the first entry to be processed in the lli_out_array */
2154 u32 current_out_entry = 0;
2155 /* Max size of the input table */
2156 u32 in_table_data_size = 0;
2157 /* Max size of the output table */
2158 u32 out_table_data_size = 0;
2159 /* Flag te signifies if this is the last tables build */
2160 u32 last_table_flag = 0;
2161 /* The data size that should be in table */
2162 u32 table_data_size = 0;
2163 /* Number of entries in the input table */
2164 u32 num_entries_in_table = 0;
2165 /* Number of entries in the output table */
2166 u32 num_entries_out_table = 0;
2167
2168 if (!dma_ctx) {
2169 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2170 return -EINVAL;
2171 }
2172
2173 /* Initiate to point after the message area */
2174 lli_table_alloc_addr = (void *)(sep->shared_addr +
2175 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2176 (dma_ctx->num_lli_tables_created *
2177 (sizeof(struct sep_lli_entry) *
2178 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2179 dma_lli_table_alloc_addr = lli_table_alloc_addr;
2180
2181 if (dmatables_region) {
2182 /* 2 for both in+out table */
2183 if (sep_allocate_dmatables_region(sep,
2184 dmatables_region,
2185 dma_ctx,
2186 2*sep_in_lli_entries))
2187 return -ENOMEM;
2188 lli_table_alloc_addr = *dmatables_region;
2189 }
2190
2191 /* Loop till all the entries in in array are not processed */
2192 while (current_in_entry < sep_in_lli_entries) {
2193 /* Set the new input and output tables */
2194 in_lli_table_ptr =
2195 (struct sep_lli_entry *)lli_table_alloc_addr;
2196 dma_in_lli_table_ptr =
2197 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2198
2199 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2200 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2201 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2202 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2203
2204 /* Set the first output tables */
2205 out_lli_table_ptr =
2206 (struct sep_lli_entry *)lli_table_alloc_addr;
2207 dma_out_lli_table_ptr =
2208 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2209
2210 /* Check if the DMA table area limit was overrun */
2211 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2212 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2213 ((void *)sep->shared_addr +
2214 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2215 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2216
2217 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2218 return -ENOMEM;
2219 }
2220
2221 /* Update the number of the lli tables created */
2222 dma_ctx->num_lli_tables_created += 2;
2223
2224 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2225 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2226 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2227 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2228
2229 /* Calculate the maximum size of data for input table */
2230 in_table_data_size =
2231 sep_calculate_lli_table_max_size(sep,
2232 &lli_in_array[current_in_entry],
2233 (sep_in_lli_entries - current_in_entry),
2234 &last_table_flag);
2235
2236 /* Calculate the maximum size of data for output table */
2237 out_table_data_size =
2238 sep_calculate_lli_table_max_size(sep,
2239 &lli_out_array[current_out_entry],
2240 (sep_out_lli_entries - current_out_entry),
2241 &last_table_flag);
2242
2243 if (!last_table_flag) {
2244 in_table_data_size = (in_table_data_size /
2245 block_size) * block_size;
2246 out_table_data_size = (out_table_data_size /
2247 block_size) * block_size;
2248 }
2249
2250 table_data_size = in_table_data_size;
2251 if (table_data_size > out_table_data_size)
2252 table_data_size = out_table_data_size;
2253
2254 dev_dbg(&sep->pdev->dev,
2255 "[PID%d] construct tables from lli"
2256 " in_table_data_size is (hex) %x\n", current->pid,
2257 in_table_data_size);
2258
2259 dev_dbg(&sep->pdev->dev,
2260 "[PID%d] construct tables from lli"
2261 "out_table_data_size is (hex) %x\n", current->pid,
2262 out_table_data_size);
2263
2264 /* Construct input lli table */
2265 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2266 in_lli_table_ptr,
2267 &current_in_entry,
2268 &num_entries_in_table,
2269 table_data_size);
2270
2271 /* Construct output lli table */
2272 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2273 out_lli_table_ptr,
2274 &current_out_entry,
2275 &num_entries_out_table,
2276 table_data_size);
2277
2278 /* If info entry is null - this is the first table built */
2279 if (info_in_entry_ptr == NULL || info_out_entry_ptr == NULL) {
2280 /* Set the output parameters to physical addresses */
2281 *lli_table_in_ptr =
2282 sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2283
2284 *in_num_entries_ptr = num_entries_in_table;
2285
2286 *lli_table_out_ptr =
2287 sep_shared_area_virt_to_bus(sep,
2288 dma_out_lli_table_ptr);
2289
2290 *out_num_entries_ptr = num_entries_out_table;
2291 *table_data_size_ptr = table_data_size;
2292
2293 dev_dbg(&sep->pdev->dev,
2294 "[PID%d] output lli_table_in_ptr is %08lx\n",
2295 current->pid,
2296 (unsigned long)*lli_table_in_ptr);
2297 dev_dbg(&sep->pdev->dev,
2298 "[PID%d] output lli_table_out_ptr is %08lx\n",
2299 current->pid,
2300 (unsigned long)*lli_table_out_ptr);
2301 } else {
2302 /* Update the info entry of the previous in table */
2303 info_in_entry_ptr->bus_address =
2304 sep_shared_area_virt_to_bus(sep,
2305 dma_in_lli_table_ptr);
2306
2307 info_in_entry_ptr->block_size =
2308 ((num_entries_in_table) << 24) |
2309 (table_data_size);
2310
2311 /* Update the info entry of the previous in table */
2312 info_out_entry_ptr->bus_address =
2313 sep_shared_area_virt_to_bus(sep,
2314 dma_out_lli_table_ptr);
2315
2316 info_out_entry_ptr->block_size =
2317 ((num_entries_out_table) << 24) |
2318 (table_data_size);
2319
2320 dev_dbg(&sep->pdev->dev,
2321 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2322 current->pid,
2323 (unsigned long)info_in_entry_ptr->bus_address,
2324 info_in_entry_ptr->block_size);
2325
2326 dev_dbg(&sep->pdev->dev,
2327 "[PID%d] output lli_table_out_ptr:"
2328 "%08lx %08x\n",
2329 current->pid,
2330 (unsigned long)info_out_entry_ptr->bus_address,
2331 info_out_entry_ptr->block_size);
2332 }
2333
2334 /* Save the pointer to the info entry of the current tables */
2335 info_in_entry_ptr = in_lli_table_ptr +
2336 num_entries_in_table - 1;
2337 info_out_entry_ptr = out_lli_table_ptr +
2338 num_entries_out_table - 1;
2339
2340 dev_dbg(&sep->pdev->dev,
2341 "[PID%d] output num_entries_out_table is %x\n",
2342 current->pid,
2343 (u32)num_entries_out_table);
2344 dev_dbg(&sep->pdev->dev,
2345 "[PID%d] output info_in_entry_ptr is %lx\n",
2346 current->pid,
2347 (unsigned long)info_in_entry_ptr);
2348 dev_dbg(&sep->pdev->dev,
2349 "[PID%d] output info_out_entry_ptr is %lx\n",
2350 current->pid,
2351 (unsigned long)info_out_entry_ptr);
2352 }
2353
2354 /* Print input tables */
2355 if (!dmatables_region) {
2356 sep_debug_print_lli_tables(
2357 sep,
2358 (struct sep_lli_entry *)
2359 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2360 *in_num_entries_ptr,
2361 *table_data_size_ptr);
2362 }
2363
2364 /* Print output tables */
2365 if (!dmatables_region) {
2366 sep_debug_print_lli_tables(
2367 sep,
2368 (struct sep_lli_entry *)
2369 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2370 *out_num_entries_ptr,
2371 *table_data_size_ptr);
2372 }
2373
2374 return 0;
2375 }
2376
2377 /**
2378 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2379 * @app_virt_in_addr:
2380 * @app_virt_out_addr:
2381 * @data_size:
2382 * @block_size:
2383 * @lli_table_in_ptr:
2384 * @lli_table_out_ptr:
2385 * @in_num_entries_ptr:
2386 * @out_num_entries_ptr:
2387 * @table_data_size_ptr:
2388 * @is_kva: set for kernel data; used only for kernel crypto module
2389 *
2390 * This function builds input and output DMA tables for synchronic
2391 * symmetric operations (AES, DES, HASH). It also checks that each table
2392 * is of the modular block size
2393 * Note that all bus addresses that are passed to the SEP
2394 * are in 32 bit format; the SEP is a 32 bit device
2395 */
2396 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2397 unsigned long app_virt_in_addr,
2398 unsigned long app_virt_out_addr,
2399 u32 data_size,
2400 u32 block_size,
2401 dma_addr_t *lli_table_in_ptr,
2402 dma_addr_t *lli_table_out_ptr,
2403 u32 *in_num_entries_ptr,
2404 u32 *out_num_entries_ptr,
2405 u32 *table_data_size_ptr,
2406 bool is_kva,
2407 void **dmatables_region,
2408 struct sep_dma_context *dma_ctx)
2409
2410 {
2411 int error = 0;
2412 /* Array of pointers of page */
2413 struct sep_lli_entry *lli_in_array;
2414 /* Array of pointers of page */
2415 struct sep_lli_entry *lli_out_array;
2416
2417 if (!dma_ctx) {
2418 error = -EINVAL;
2419 goto end_function;
2420 }
2421
2422 if (data_size == 0) {
2423 /* Prepare empty table for input and output */
2424 if (dmatables_region) {
2425 error = sep_allocate_dmatables_region(
2426 sep,
2427 dmatables_region,
2428 dma_ctx,
2429 2);
2430 if (error)
2431 goto end_function;
2432 }
2433 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2434 in_num_entries_ptr, table_data_size_ptr,
2435 dmatables_region, dma_ctx);
2436
2437 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2438 out_num_entries_ptr, table_data_size_ptr,
2439 dmatables_region, dma_ctx);
2440
2441 goto update_dcb_counter;
2442 }
2443
2444 /* Initialize the pages pointers */
2445 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2446 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2447
2448 /* Lock the pages of the buffer and translate them to pages */
2449 if (is_kva == true) {
2450 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2451 current->pid);
2452 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2453 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2454 dma_ctx);
2455 if (error) {
2456 dev_warn(&sep->pdev->dev,
2457 "[PID%d] sep_lock_kernel_pages for input "
2458 "virtual buffer failed\n", current->pid);
2459
2460 goto end_function;
2461 }
2462
2463 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2464 current->pid);
2465 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2466 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2467 dma_ctx);
2468
2469 if (error) {
2470 dev_warn(&sep->pdev->dev,
2471 "[PID%d] sep_lock_kernel_pages for output "
2472 "virtual buffer failed\n", current->pid);
2473
2474 goto end_function_free_lli_in;
2475 }
2476
2477 }
2478
2479 else {
2480 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2481 current->pid);
2482 error = sep_lock_user_pages(sep, app_virt_in_addr,
2483 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2484 dma_ctx);
2485 if (error) {
2486 dev_warn(&sep->pdev->dev,
2487 "[PID%d] sep_lock_user_pages for input "
2488 "virtual buffer failed\n", current->pid);
2489
2490 goto end_function;
2491 }
2492
2493 if (dma_ctx->secure_dma == true) {
2494 /* secure_dma requires use of non accessible memory */
2495 dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2496 current->pid);
2497 error = sep_lli_table_secure_dma(sep,
2498 app_virt_out_addr, data_size, &lli_out_array,
2499 SEP_DRIVER_OUT_FLAG, dma_ctx);
2500 if (error) {
2501 dev_warn(&sep->pdev->dev,
2502 "[PID%d] secure dma table setup "
2503 " for output virtual buffer failed\n",
2504 current->pid);
2505
2506 goto end_function_free_lli_in;
2507 }
2508 } else {
2509 /* For normal, non-secure dma */
2510 dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2511 current->pid);
2512
2513 dev_dbg(&sep->pdev->dev,
2514 "[PID%d] Locking user output pages\n",
2515 current->pid);
2516
2517 error = sep_lock_user_pages(sep, app_virt_out_addr,
2518 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2519 dma_ctx);
2520
2521 if (error) {
2522 dev_warn(&sep->pdev->dev,
2523 "[PID%d] sep_lock_user_pages"
2524 " for output virtual buffer failed\n",
2525 current->pid);
2526
2527 goto end_function_free_lli_in;
2528 }
2529 }
2530 }
2531
2532 dev_dbg(&sep->pdev->dev,
2533 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2534 current->pid,
2535 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2536
2537 dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2538 current->pid,
2539 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2540
2541 dev_dbg(&sep->pdev->dev,
2542 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2543 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2544
2545 /* Call the function that creates table from the lli arrays */
2546 dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2547 current->pid);
2548 error = sep_construct_dma_tables_from_lli(
2549 sep, lli_in_array,
2550 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2551 in_num_pages,
2552 lli_out_array,
2553 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2554 out_num_pages,
2555 block_size, lli_table_in_ptr, lli_table_out_ptr,
2556 in_num_entries_ptr, out_num_entries_ptr,
2557 table_data_size_ptr, dmatables_region, dma_ctx);
2558
2559 if (error) {
2560 dev_warn(&sep->pdev->dev,
2561 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2562 current->pid);
2563 goto end_function_with_error;
2564 }
2565
2566 kfree(lli_out_array);
2567 kfree(lli_in_array);
2568
2569 update_dcb_counter:
2570 /* Update DCB counter */
2571 dma_ctx->nr_dcb_creat++;
2572
2573 goto end_function;
2574
2575 end_function_with_error:
2576 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2577 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2578 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2579 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2580 kfree(lli_out_array);
2581
2582
2583 end_function_free_lli_in:
2584 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2585 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2586 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2587 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2588 kfree(lli_in_array);
2589
2590 end_function:
2591
2592 return error;
2593
2594 }
2595
2596 /**
2597 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2598 * @app_in_address: unsigned long; for data buffer in (user space)
2599 * @app_out_address: unsigned long; for data buffer out (user space)
2600 * @data_in_size: u32; for size of data
2601 * @block_size: u32; for block size
2602 * @tail_block_size: u32; for size of tail block
2603 * @isapplet: bool; to indicate external app
2604 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2605 * @secure_dma; indicates whether this is secure_dma using IMR
2606 *
2607 * This function prepares the linked DMA tables and puts the
2608 * address for the linked list of tables inta a DCB (data control
2609 * block) the address of which is known by the SEP hardware
2610 * Note that all bus addresses that are passed to the SEP
2611 * are in 32 bit format; the SEP is a 32 bit device
2612 */
2613 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2614 unsigned long app_in_address,
2615 unsigned long app_out_address,
2616 u32 data_in_size,
2617 u32 block_size,
2618 u32 tail_block_size,
2619 bool isapplet,
2620 bool is_kva,
2621 bool secure_dma,
2622 struct sep_dcblock *dcb_region,
2623 void **dmatables_region,
2624 struct sep_dma_context **dma_ctx,
2625 struct scatterlist *src_sg,
2626 struct scatterlist *dst_sg)
2627 {
2628 int error = 0;
2629 /* Size of tail */
2630 u32 tail_size = 0;
2631 /* Address of the created DCB table */
2632 struct sep_dcblock *dcb_table_ptr = NULL;
2633 /* The physical address of the first input DMA table */
2634 dma_addr_t in_first_mlli_address = 0;
2635 /* Number of entries in the first input DMA table */
2636 u32 in_first_num_entries = 0;
2637 /* The physical address of the first output DMA table */
2638 dma_addr_t out_first_mlli_address = 0;
2639 /* Number of entries in the first output DMA table */
2640 u32 out_first_num_entries = 0;
2641 /* Data in the first input/output table */
2642 u32 first_data_size = 0;
2643
2644 dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2645 current->pid, app_in_address);
2646
2647 dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2648 current->pid, app_out_address);
2649
2650 dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2651 current->pid, data_in_size);
2652
2653 dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2654 current->pid, block_size);
2655
2656 dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2657 current->pid, tail_block_size);
2658
2659 dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2660 current->pid, isapplet);
2661
2662 dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2663 current->pid, is_kva);
2664
2665 dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2666 current->pid, src_sg);
2667
2668 dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2669 current->pid, dst_sg);
2670
2671 if (!dma_ctx) {
2672 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2673 current->pid);
2674 error = -EINVAL;
2675 goto end_function;
2676 }
2677
2678 if (*dma_ctx) {
2679 /* In case there are multiple DCBs for this transaction */
2680 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2681 current->pid);
2682 } else {
2683 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2684 if (!(*dma_ctx)) {
2685 dev_dbg(&sep->pdev->dev,
2686 "[PID%d] Not enough memory for DMA context\n",
2687 current->pid);
2688 error = -ENOMEM;
2689 goto end_function;
2690 }
2691 dev_dbg(&sep->pdev->dev,
2692 "[PID%d] Created DMA context addr at 0x%p\n",
2693 current->pid, *dma_ctx);
2694 }
2695
2696 (*dma_ctx)->secure_dma = secure_dma;
2697
2698 /* these are for kernel crypto only */
2699 (*dma_ctx)->src_sg = src_sg;
2700 (*dma_ctx)->dst_sg = dst_sg;
2701
2702 if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2703 /* No more DCBs to allocate */
2704 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2705 current->pid);
2706 error = -ENOSPC;
2707 goto end_function_error;
2708 }
2709
2710 /* Allocate new DCB */
2711 if (dcb_region) {
2712 dcb_table_ptr = dcb_region;
2713 } else {
2714 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2715 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2716 ((*dma_ctx)->nr_dcb_creat *
2717 sizeof(struct sep_dcblock)));
2718 }
2719
2720 /* Set the default values in the DCB */
2721 dcb_table_ptr->input_mlli_address = 0;
2722 dcb_table_ptr->input_mlli_num_entries = 0;
2723 dcb_table_ptr->input_mlli_data_size = 0;
2724 dcb_table_ptr->output_mlli_address = 0;
2725 dcb_table_ptr->output_mlli_num_entries = 0;
2726 dcb_table_ptr->output_mlli_data_size = 0;
2727 dcb_table_ptr->tail_data_size = 0;
2728 dcb_table_ptr->out_vr_tail_pt = 0;
2729
2730 if (isapplet == true) {
2731
2732 /* Check if there is enough data for DMA operation */
2733 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2734 if (is_kva == true) {
2735 error = -ENODEV;
2736 goto end_function_error;
2737 } else {
2738 if (copy_from_user(dcb_table_ptr->tail_data,
2739 (void __user *)app_in_address,
2740 data_in_size)) {
2741 error = -EFAULT;
2742 goto end_function_error;
2743 }
2744 }
2745
2746 dcb_table_ptr->tail_data_size = data_in_size;
2747
2748 /* Set the output user-space address for mem2mem op */
2749 if (app_out_address)
2750 dcb_table_ptr->out_vr_tail_pt =
2751 (aligned_u64)app_out_address;
2752
2753 /*
2754 * Update both data length parameters in order to avoid
2755 * second data copy and allow building of empty mlli
2756 * tables
2757 */
2758 tail_size = 0x0;
2759 data_in_size = 0x0;
2760
2761 } else {
2762 if (!app_out_address) {
2763 tail_size = data_in_size % block_size;
2764 if (!tail_size) {
2765 if (tail_block_size == block_size)
2766 tail_size = block_size;
2767 }
2768 } else {
2769 tail_size = 0;
2770 }
2771 }
2772 if (tail_size) {
2773 if (tail_size > sizeof(dcb_table_ptr->tail_data))
2774 return -EINVAL;
2775 if (is_kva == true) {
2776 error = -ENODEV;
2777 goto end_function_error;
2778 } else {
2779 /* We have tail data - copy it to DCB */
2780 if (copy_from_user(dcb_table_ptr->tail_data,
2781 (void __user *)(app_in_address +
2782 data_in_size - tail_size), tail_size)) {
2783 error = -EFAULT;
2784 goto end_function_error;
2785 }
2786 }
2787 if (app_out_address)
2788 /*
2789 * Calculate the output address
2790 * according to tail data size
2791 */
2792 dcb_table_ptr->out_vr_tail_pt =
2793 (aligned_u64)app_out_address +
2794 data_in_size - tail_size;
2795
2796 /* Save the real tail data size */
2797 dcb_table_ptr->tail_data_size = tail_size;
2798 /*
2799 * Update the data size without the tail
2800 * data size AKA data for the dma
2801 */
2802 data_in_size = (data_in_size - tail_size);
2803 }
2804 }
2805 /* Check if we need to build only input table or input/output */
2806 if (app_out_address) {
2807 /* Prepare input/output tables */
2808 error = sep_prepare_input_output_dma_table(sep,
2809 app_in_address,
2810 app_out_address,
2811 data_in_size,
2812 block_size,
2813 &in_first_mlli_address,
2814 &out_first_mlli_address,
2815 &in_first_num_entries,
2816 &out_first_num_entries,
2817 &first_data_size,
2818 is_kva,
2819 dmatables_region,
2820 *dma_ctx);
2821 } else {
2822 /* Prepare input tables */
2823 error = sep_prepare_input_dma_table(sep,
2824 app_in_address,
2825 data_in_size,
2826 block_size,
2827 &in_first_mlli_address,
2828 &in_first_num_entries,
2829 &first_data_size,
2830 is_kva,
2831 dmatables_region,
2832 *dma_ctx);
2833 }
2834
2835 if (error) {
2836 dev_warn(&sep->pdev->dev,
2837 "prepare DMA table call failed "
2838 "from prepare DCB call\n");
2839 goto end_function_error;
2840 }
2841
2842 /* Set the DCB values */
2843 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2844 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2845 dcb_table_ptr->input_mlli_data_size = first_data_size;
2846 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2847 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2848 dcb_table_ptr->output_mlli_data_size = first_data_size;
2849
2850 goto end_function;
2851
2852 end_function_error:
2853 kfree(*dma_ctx);
2854 *dma_ctx = NULL;
2855
2856 end_function:
2857 return error;
2858
2859 }
2860
2861
2862 /**
2863 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2864 * @sep: pointer to struct sep_device
2865 * @isapplet: indicates external application (used for kernel access)
2866 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2867 *
2868 * This function frees the DMA tables and DCB
2869 */
2870 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2871 bool is_kva, struct sep_dma_context **dma_ctx)
2872 {
2873 struct sep_dcblock *dcb_table_ptr;
2874 unsigned long pt_hold;
2875 void *tail_pt;
2876
2877 int i = 0;
2878 int error = 0;
2879 int error_temp = 0;
2880
2881 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2882 current->pid);
2883 if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
2884 return 0;
2885
2886 if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
2887 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2888 current->pid);
2889
2890 /* Tail stuff is only for non secure_dma */
2891 /* Set pointer to first DCB table */
2892 dcb_table_ptr = (struct sep_dcblock *)
2893 (sep->shared_addr +
2894 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2895
2896 /**
2897 * Go over each DCB and see if
2898 * tail pointer must be updated
2899 */
2900 for (i = 0; i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2901 if (dcb_table_ptr->out_vr_tail_pt) {
2902 pt_hold = (unsigned long)dcb_table_ptr->
2903 out_vr_tail_pt;
2904 tail_pt = (void *)pt_hold;
2905 if (is_kva == true) {
2906 error = -ENODEV;
2907 break;
2908 } else {
2909 error_temp = copy_to_user(
2910 (void __user *)tail_pt,
2911 dcb_table_ptr->tail_data,
2912 dcb_table_ptr->tail_data_size);
2913 }
2914 if (error_temp) {
2915 /* Release the DMA resource */
2916 error = -EFAULT;
2917 break;
2918 }
2919 }
2920 }
2921 }
2922
2923 /* Free the output pages, if any */
2924 sep_free_dma_table_data_handler(sep, dma_ctx);
2925
2926 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2927 current->pid);
2928
2929 return error;
2930 }
2931
2932 /**
2933 * sep_prepare_dcb_handler - prepare a control block
2934 * @sep: pointer to struct sep_device
2935 * @arg: pointer to user parameters
2936 * @secure_dma: indicate whether we are using secure_dma on IMR
2937 *
2938 * This function will retrieve the RAR buffer physical addresses, type
2939 * & size corresponding to the RAR handles provided in the buffers vector.
2940 */
2941 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2942 bool secure_dma,
2943 struct sep_dma_context **dma_ctx)
2944 {
2945 int error;
2946 /* Command arguments */
2947 static struct build_dcb_struct command_args;
2948
2949 /* Get the command arguments */
2950 if (copy_from_user(&command_args, (void __user *)arg,
2951 sizeof(struct build_dcb_struct))) {
2952 error = -EFAULT;
2953 goto end_function;
2954 }
2955
2956 dev_dbg(&sep->pdev->dev,
2957 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2958 current->pid, command_args.app_in_address);
2959 dev_dbg(&sep->pdev->dev,
2960 "[PID%d] app_out_address is %08llx\n",
2961 current->pid, command_args.app_out_address);
2962 dev_dbg(&sep->pdev->dev,
2963 "[PID%d] data_size is %x\n",
2964 current->pid, command_args.data_in_size);
2965 dev_dbg(&sep->pdev->dev,
2966 "[PID%d] block_size is %x\n",
2967 current->pid, command_args.block_size);
2968 dev_dbg(&sep->pdev->dev,
2969 "[PID%d] tail block_size is %x\n",
2970 current->pid, command_args.tail_block_size);
2971 dev_dbg(&sep->pdev->dev,
2972 "[PID%d] is_applet is %x\n",
2973 current->pid, command_args.is_applet);
2974
2975 if (!command_args.app_in_address) {
2976 dev_warn(&sep->pdev->dev,
2977 "[PID%d] null app_in_address\n", current->pid);
2978 error = -EINVAL;
2979 goto end_function;
2980 }
2981
2982 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2983 (unsigned long)command_args.app_in_address,
2984 (unsigned long)command_args.app_out_address,
2985 command_args.data_in_size, command_args.block_size,
2986 command_args.tail_block_size,
2987 command_args.is_applet, false,
2988 secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2989
2990 end_function:
2991 return error;
2992
2993 }
2994
2995 /**
2996 * sep_free_dcb_handler - free control block resources
2997 * @sep: pointer to struct sep_device
2998 *
2999 * This function frees the DCB resources and updates the needed
3000 * user-space buffers.
3001 */
3002 static int sep_free_dcb_handler(struct sep_device *sep,
3003 struct sep_dma_context **dma_ctx)
3004 {
3005 if (!dma_ctx || !(*dma_ctx)) {
3006 dev_dbg(&sep->pdev->dev,
3007 "[PID%d] no dma context defined, nothing to free\n",
3008 current->pid);
3009 return -EINVAL;
3010 }
3011
3012 dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3013 current->pid,
3014 (*dma_ctx)->nr_dcb_creat);
3015
3016 return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3017 }
3018
3019 /**
3020 * sep_ioctl - ioctl handler for sep device
3021 * @filp: pointer to struct file
3022 * @cmd: command
3023 * @arg: pointer to argument structure
3024 *
3025 * Implement the ioctl methods available on the SEP device.
3026 */
3027 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3028 {
3029 struct sep_private_data * const private_data = filp->private_data;
3030 struct sep_call_status *call_status = &private_data->call_status;
3031 struct sep_device *sep = private_data->device;
3032 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3033 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3034 int error = 0;
3035
3036 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3037 current->pid, cmd);
3038 dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3039 current->pid, *dma_ctx);
3040
3041 /* Make sure we own this device */
3042 error = sep_check_transaction_owner(sep);
3043 if (error) {
3044 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3045 current->pid);
3046 goto end_function;
3047 }
3048
3049 /* Check that sep_mmap has been called before */
3050 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3051 &call_status->status)) {
3052 dev_dbg(&sep->pdev->dev,
3053 "[PID%d] mmap not called\n", current->pid);
3054 error = -EPROTO;
3055 goto end_function;
3056 }
3057
3058 /* Check that the command is for SEP device */
3059 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3060 error = -ENOTTY;
3061 goto end_function;
3062 }
3063
3064 switch (cmd) {
3065 case SEP_IOCSENDSEPCOMMAND:
3066 dev_dbg(&sep->pdev->dev,
3067 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3068 current->pid);
3069 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3070 &call_status->status)) {
3071 dev_warn(&sep->pdev->dev,
3072 "[PID%d] send msg already done\n",
3073 current->pid);
3074 error = -EPROTO;
3075 goto end_function;
3076 }
3077 /* Send command to SEP */
3078 error = sep_send_command_handler(sep);
3079 if (!error)
3080 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3081 &call_status->status);
3082 dev_dbg(&sep->pdev->dev,
3083 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3084 current->pid);
3085 break;
3086 case SEP_IOCENDTRANSACTION:
3087 dev_dbg(&sep->pdev->dev,
3088 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3089 current->pid);
3090 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3091 my_queue_elem);
3092 dev_dbg(&sep->pdev->dev,
3093 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3094 current->pid);
3095 break;
3096 case SEP_IOCPREPAREDCB:
3097 dev_dbg(&sep->pdev->dev,
3098 "[PID%d] SEP_IOCPREPAREDCB start\n",
3099 current->pid);
3100 case SEP_IOCPREPAREDCB_SECURE_DMA:
3101 dev_dbg(&sep->pdev->dev,
3102 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3103 current->pid);
3104 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3105 &call_status->status)) {
3106 dev_dbg(&sep->pdev->dev,
3107 "[PID%d] dcb prep needed before send msg\n",
3108 current->pid);
3109 error = -EPROTO;
3110 goto end_function;
3111 }
3112
3113 if (!arg) {
3114 dev_dbg(&sep->pdev->dev,
3115 "[PID%d] dcb null arg\n", current->pid);
3116 error = -EINVAL;
3117 goto end_function;
3118 }
3119
3120 if (cmd == SEP_IOCPREPAREDCB) {
3121 /* No secure dma */
3122 dev_dbg(&sep->pdev->dev,
3123 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3124 current->pid);
3125
3126 error = sep_prepare_dcb_handler(sep, arg, false,
3127 dma_ctx);
3128 } else {
3129 /* Secure dma */
3130 dev_dbg(&sep->pdev->dev,
3131 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3132 current->pid);
3133
3134 error = sep_prepare_dcb_handler(sep, arg, true,
3135 dma_ctx);
3136 }
3137 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3138 current->pid);
3139 break;
3140 case SEP_IOCFREEDCB:
3141 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3142 current->pid);
3143 case SEP_IOCFREEDCB_SECURE_DMA:
3144 dev_dbg(&sep->pdev->dev,
3145 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3146 current->pid);
3147 error = sep_free_dcb_handler(sep, dma_ctx);
3148 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3149 current->pid);
3150 break;
3151 default:
3152 error = -ENOTTY;
3153 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3154 current->pid);
3155 break;
3156 }
3157
3158 end_function:
3159 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3160
3161 return error;
3162 }
3163
3164 /**
3165 * sep_inthandler - interrupt handler for sep device
3166 * @irq: interrupt
3167 * @dev_id: device id
3168 */
3169 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3170 {
3171 unsigned long lock_irq_flag;
3172 u32 reg_val, reg_val2 = 0;
3173 struct sep_device *sep = dev_id;
3174 irqreturn_t int_error = IRQ_HANDLED;
3175
3176 /* Are we in power save? */
3177 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3178 if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3179 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3180 return IRQ_NONE;
3181 }
3182 #endif
3183
3184 if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3185 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3186 return IRQ_NONE;
3187 }
3188
3189 /* Read the IRR register to check if this is SEP interrupt */
3190 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3191
3192 dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3193
3194 if (reg_val & (0x1 << 13)) {
3195
3196 /* Lock and update the counter of reply messages */
3197 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3198 sep->reply_ct++;
3199 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3200
3201 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3202 sep->send_ct, sep->reply_ct);
3203
3204 /* Is this a kernel client request */
3205 if (sep->in_kernel) {
3206 tasklet_schedule(&sep->finish_tasklet);
3207 goto finished_interrupt;
3208 }
3209
3210 /* Is this printf or daemon request? */
3211 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3212 dev_dbg(&sep->pdev->dev,
3213 "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3214
3215 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3216
3217 if ((reg_val2 >> 30) & 0x1) {
3218 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3219 } else if (reg_val2 >> 31) {
3220 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3221 } else {
3222 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3223 wake_up(&sep->event_interrupt);
3224 }
3225 } else {
3226 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3227 int_error = IRQ_NONE;
3228 }
3229
3230 finished_interrupt:
3231
3232 if (int_error == IRQ_HANDLED)
3233 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3234
3235 return int_error;
3236 }
3237
3238 /**
3239 * sep_reconfig_shared_area - reconfigure shared area
3240 * @sep: pointer to struct sep_device
3241 *
3242 * Reconfig the shared area between HOST and SEP - needed in case
3243 * the DX_CC_Init function was called before OS loading.
3244 */
3245 static int sep_reconfig_shared_area(struct sep_device *sep)
3246 {
3247 int ret_val;
3248
3249 /* use to limit waiting for SEP */
3250 unsigned long end_time;
3251
3252 /* Send the new SHARED MESSAGE AREA to the SEP */
3253 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3254 (unsigned long long)sep->shared_bus);
3255
3256 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3257
3258 /* Poll for SEP response */
3259 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3260
3261 end_time = jiffies + (WAIT_TIME * HZ);
3262
3263 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3264 (ret_val != sep->shared_bus))
3265 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3266
3267 /* Check the return value (register) */
3268 if (ret_val != sep->shared_bus) {
3269 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3270 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3271 ret_val = -ENOMEM;
3272 } else
3273 ret_val = 0;
3274
3275 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3276
3277 return ret_val;
3278 }
3279
3280 /**
3281 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3282 * contexts into use
3283 * @sep: SEP device
3284 * @dcb_region: DCB region copy
3285 * @dmatables_region: MLLI/DMA tables copy
3286 * @dma_ctx: DMA context for current transaction
3287 */
3288 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3289 struct sep_dcblock **dcb_region,
3290 void **dmatables_region,
3291 struct sep_dma_context *dma_ctx)
3292 {
3293 void *dmaregion_free_start = NULL;
3294 void *dmaregion_free_end = NULL;
3295 void *dcbregion_free_start = NULL;
3296 void *dcbregion_free_end = NULL;
3297 ssize_t error = 0;
3298
3299 dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3300 current->pid);
3301
3302 if (1 > dma_ctx->nr_dcb_creat) {
3303 dev_warn(&sep->pdev->dev,
3304 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3305 current->pid, dma_ctx->nr_dcb_creat);
3306 error = -EINVAL;
3307 goto end_function;
3308 }
3309
3310 dmaregion_free_start = sep->shared_addr
3311 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3312 dmaregion_free_end = dmaregion_free_start
3313 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3314
3315 if (dmaregion_free_start
3316 + dma_ctx->dmatables_len > dmaregion_free_end) {
3317 error = -ENOMEM;
3318 goto end_function;
3319 }
3320 memcpy(dmaregion_free_start,
3321 *dmatables_region,
3322 dma_ctx->dmatables_len);
3323 /* Free MLLI table copy */
3324 kfree(*dmatables_region);
3325 *dmatables_region = NULL;
3326
3327 /* Copy thread's DCB table copy to DCB table region */
3328 dcbregion_free_start = sep->shared_addr +
3329 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3330 dcbregion_free_end = dcbregion_free_start +
3331 (SEP_MAX_NUM_SYNC_DMA_OPS *
3332 sizeof(struct sep_dcblock)) - 1;
3333
3334 if (dcbregion_free_start
3335 + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3336 > dcbregion_free_end) {
3337 error = -ENOMEM;
3338 goto end_function;
3339 }
3340
3341 memcpy(dcbregion_free_start,
3342 *dcb_region,
3343 dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3344
3345 /* Print the tables */
3346 dev_dbg(&sep->pdev->dev, "activate: input table\n");
3347 sep_debug_print_lli_tables(sep,
3348 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3349 (*dcb_region)->input_mlli_address),
3350 (*dcb_region)->input_mlli_num_entries,
3351 (*dcb_region)->input_mlli_data_size);
3352
3353 dev_dbg(&sep->pdev->dev, "activate: output table\n");
3354 sep_debug_print_lli_tables(sep,
3355 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3356 (*dcb_region)->output_mlli_address),
3357 (*dcb_region)->output_mlli_num_entries,
3358 (*dcb_region)->output_mlli_data_size);
3359
3360 dev_dbg(&sep->pdev->dev,
3361 "[PID%d] printing activated tables\n", current->pid);
3362
3363 end_function:
3364 kfree(*dmatables_region);
3365 *dmatables_region = NULL;
3366
3367 kfree(*dcb_region);
3368 *dcb_region = NULL;
3369
3370 return error;
3371 }
3372
3373 /**
3374 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3375 * @sep: SEP device
3376 * @dcb_region: DCB region buf to create for current transaction
3377 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3378 * @dma_ctx: DMA context buf to create for current transaction
3379 * @user_dcb_args: User arguments for DCB/MLLI creation
3380 * @num_dcbs: Number of DCBs to create
3381 * @secure_dma: Indicate use of IMR restricted memory secure dma
3382 */
3383 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3384 struct sep_dcblock **dcb_region,
3385 void **dmatables_region,
3386 struct sep_dma_context **dma_ctx,
3387 const struct build_dcb_struct __user *user_dcb_args,
3388 const u32 num_dcbs, bool secure_dma)
3389 {
3390 int error = 0;
3391 int i = 0;
3392 struct build_dcb_struct *dcb_args = NULL;
3393
3394 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3395 current->pid);
3396
3397 if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3398 error = -EINVAL;
3399 goto end_function;
3400 }
3401
3402 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3403 dev_warn(&sep->pdev->dev,
3404 "[PID%d] invalid number of dcbs 0x%08X\n",
3405 current->pid, num_dcbs);
3406 error = -EINVAL;
3407 goto end_function;
3408 }
3409
3410 dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3411 GFP_KERNEL);
3412 if (!dcb_args) {
3413 error = -ENOMEM;
3414 goto end_function;
3415 }
3416
3417 if (copy_from_user(dcb_args,
3418 user_dcb_args,
3419 num_dcbs * sizeof(struct build_dcb_struct))) {
3420 error = -EFAULT;
3421 goto end_function;
3422 }
3423
3424 /* Allocate thread-specific memory for DCB */
3425 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3426 GFP_KERNEL);
3427 if (!(*dcb_region)) {
3428 error = -ENOMEM;
3429 goto end_function;
3430 }
3431
3432 /* Prepare DCB and MLLI table into the allocated regions */
3433 for (i = 0; i < num_dcbs; i++) {
3434 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3435 (unsigned long)dcb_args[i].app_in_address,
3436 (unsigned long)dcb_args[i].app_out_address,
3437 dcb_args[i].data_in_size,
3438 dcb_args[i].block_size,
3439 dcb_args[i].tail_block_size,
3440 dcb_args[i].is_applet,
3441 false, secure_dma,
3442 *dcb_region, dmatables_region,
3443 dma_ctx,
3444 NULL,
3445 NULL);
3446 if (error) {
3447 dev_warn(&sep->pdev->dev,
3448 "[PID%d] dma table creation failed\n",
3449 current->pid);
3450 goto end_function;
3451 }
3452
3453 if (dcb_args[i].app_in_address != 0)
3454 (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3455 }
3456
3457 end_function:
3458 kfree(dcb_args);
3459 return error;
3460
3461 }
3462
3463 /**
3464 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3465 * for kernel crypto
3466 * @sep: SEP device
3467 * @dcb_region: DCB region buf to create for current transaction
3468 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3469 * @dma_ctx: DMA context buf to create for current transaction
3470 * @user_dcb_args: User arguments for DCB/MLLI creation
3471 * @num_dcbs: Number of DCBs to create
3472 * This does that same thing as sep_create_dcb_dmatables_context
3473 * except that it is used only for the kernel crypto operation. It is
3474 * separate because there is no user data involved; the dcb data structure
3475 * is specific for kernel crypto (build_dcb_struct_kernel)
3476 */
3477 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3478 struct sep_dcblock **dcb_region,
3479 void **dmatables_region,
3480 struct sep_dma_context **dma_ctx,
3481 const struct build_dcb_struct_kernel *dcb_data,
3482 const u32 num_dcbs)
3483 {
3484 int error = 0;
3485 int i = 0;
3486
3487 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3488 current->pid);
3489
3490 if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3491 error = -EINVAL;
3492 goto end_function;
3493 }
3494
3495 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3496 dev_warn(&sep->pdev->dev,
3497 "[PID%d] invalid number of dcbs 0x%08X\n",
3498 current->pid, num_dcbs);
3499 error = -EINVAL;
3500 goto end_function;
3501 }
3502
3503 dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3504 current->pid, num_dcbs);
3505
3506 /* Allocate thread-specific memory for DCB */
3507 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3508 GFP_KERNEL);
3509 if (!(*dcb_region)) {
3510 error = -ENOMEM;
3511 goto end_function;
3512 }
3513
3514 /* Prepare DCB and MLLI table into the allocated regions */
3515 for (i = 0; i < num_dcbs; i++) {
3516 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3517 (unsigned long)dcb_data->app_in_address,
3518 (unsigned long)dcb_data->app_out_address,
3519 dcb_data->data_in_size,
3520 dcb_data->block_size,
3521 dcb_data->tail_block_size,
3522 dcb_data->is_applet,
3523 true,
3524 false,
3525 *dcb_region, dmatables_region,
3526 dma_ctx,
3527 dcb_data->src_sg,
3528 dcb_data->dst_sg);
3529 if (error) {
3530 dev_warn(&sep->pdev->dev,
3531 "[PID%d] dma table creation failed\n",
3532 current->pid);
3533 goto end_function;
3534 }
3535 }
3536
3537 end_function:
3538 return error;
3539
3540 }
3541
3542 /**
3543 * sep_activate_msgarea_context - Takes the message area context into use
3544 * @sep: SEP device
3545 * @msg_region: Message area context buf
3546 * @msg_len: Message area context buffer size
3547 */
3548 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3549 void **msg_region,
3550 const size_t msg_len)
3551 {
3552 dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3553 current->pid);
3554
3555 if (!msg_region || !(*msg_region) ||
3556 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3557 dev_warn(&sep->pdev->dev,
3558 "[PID%d] invalid act msgarea len 0x%08zX\n",
3559 current->pid, msg_len);
3560 return -EINVAL;
3561 }
3562
3563 memcpy(sep->shared_addr, *msg_region, msg_len);
3564
3565 return 0;
3566 }
3567
3568 /**
3569 * sep_create_msgarea_context - Creates message area context
3570 * @sep: SEP device
3571 * @msg_region: Msg area region buf to create for current transaction
3572 * @msg_user: Content for msg area region from user
3573 * @msg_len: Message area size
3574 */
3575 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3576 void **msg_region,
3577 const void __user *msg_user,
3578 const size_t msg_len)
3579 {
3580 int error = 0;
3581
3582 dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3583 current->pid);
3584
3585 if (!msg_region ||
3586 !msg_user ||
3587 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3588 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3589 dev_warn(&sep->pdev->dev,
3590 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3591 current->pid, msg_len);
3592 error = -EINVAL;
3593 goto end_function;
3594 }
3595
3596 /* Allocate thread-specific memory for message buffer */
3597 *msg_region = kzalloc(msg_len, GFP_KERNEL);
3598 if (!(*msg_region)) {
3599 error = -ENOMEM;
3600 goto end_function;
3601 }
3602
3603 /* Copy input data to write() to allocated message buffer */
3604 if (copy_from_user(*msg_region, msg_user, msg_len)) {
3605 error = -EFAULT;
3606 goto end_function;
3607 }
3608
3609 end_function:
3610 if (error && msg_region) {
3611 kfree(*msg_region);
3612 *msg_region = NULL;
3613 }
3614
3615 return error;
3616 }
3617
3618
3619 /**
3620 * sep_read - Returns results of an operation for fastcall interface
3621 * @filp: File pointer
3622 * @buf_user: User buffer for storing results
3623 * @count_user: User buffer size
3624 * @offset: File offset, not supported
3625 *
3626 * The implementation does not support reading in chunks, all data must be
3627 * consumed during a single read system call.
3628 */
3629 static ssize_t sep_read(struct file *filp,
3630 char __user *buf_user, size_t count_user,
3631 loff_t *offset)
3632 {
3633 struct sep_private_data * const private_data = filp->private_data;
3634 struct sep_call_status *call_status = &private_data->call_status;
3635 struct sep_device *sep = private_data->device;
3636 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3637 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3638 ssize_t error = 0, error_tmp = 0;
3639
3640 /* Am I the process that owns the transaction? */
3641 error = sep_check_transaction_owner(sep);
3642 if (error) {
3643 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3644 current->pid);
3645 goto end_function;
3646 }
3647
3648 /* Checks that user has called necessary apis */
3649 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3650 &call_status->status)) {
3651 dev_warn(&sep->pdev->dev,
3652 "[PID%d] fastcall write not called\n",
3653 current->pid);
3654 error = -EPROTO;
3655 goto end_function_error;
3656 }
3657
3658 if (!buf_user) {
3659 dev_warn(&sep->pdev->dev,
3660 "[PID%d] null user buffer\n",
3661 current->pid);
3662 error = -EINVAL;
3663 goto end_function_error;
3664 }
3665
3666
3667 /* Wait for SEP to finish */
3668 wait_event(sep->event_interrupt,
3669 test_bit(SEP_WORKING_LOCK_BIT,
3670 &sep->in_use_flags) == 0);
3671
3672 sep_dump_message(sep);
3673
3674 dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3675 current->pid, count_user);
3676
3677 /* In case user has allocated bigger buffer */
3678 if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3679 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3680
3681 if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3682 error = -EFAULT;
3683 goto end_function_error;
3684 }
3685
3686 dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3687 error = count_user;
3688
3689 end_function_error:
3690 /* Copy possible tail data to user and free DCB and MLLIs */
3691 error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3692 if (error_tmp)
3693 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3694 current->pid);
3695
3696 /* End the transaction, wakeup pending ones */
3697 error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3698 my_queue_elem);
3699 if (error_tmp)
3700 dev_warn(&sep->pdev->dev,
3701 "[PID%d] ending transaction failed\n",
3702 current->pid);
3703
3704 end_function:
3705 return error;
3706 }
3707
3708 /**
3709 * sep_fastcall_args_get - Gets fastcall params from user
3710 * sep: SEP device
3711 * @args: Parameters buffer
3712 * @buf_user: User buffer for operation parameters
3713 * @count_user: User buffer size
3714 */
3715 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3716 struct sep_fastcall_hdr *args,
3717 const char __user *buf_user,
3718 const size_t count_user)
3719 {
3720 ssize_t error = 0;
3721 size_t actual_count = 0;
3722
3723 if (!buf_user) {
3724 dev_warn(&sep->pdev->dev,
3725 "[PID%d] null user buffer\n",
3726 current->pid);
3727 error = -EINVAL;
3728 goto end_function;
3729 }
3730
3731 if (count_user < sizeof(struct sep_fastcall_hdr)) {
3732 dev_warn(&sep->pdev->dev,
3733 "[PID%d] too small message size 0x%08zX\n",
3734 current->pid, count_user);
3735 error = -EINVAL;
3736 goto end_function;
3737 }
3738
3739
3740 if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3741 error = -EFAULT;
3742 goto end_function;
3743 }
3744
3745 if (SEP_FC_MAGIC != args->magic) {
3746 dev_warn(&sep->pdev->dev,
3747 "[PID%d] invalid fastcall magic 0x%08X\n",
3748 current->pid, args->magic);
3749 error = -EINVAL;
3750 goto end_function;
3751 }
3752
3753 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3754 current->pid, args->num_dcbs);
3755 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3756 current->pid, args->msg_len);
3757
3758 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3759 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3760 dev_warn(&sep->pdev->dev,
3761 "[PID%d] invalid message length\n",
3762 current->pid);
3763 error = -EINVAL;
3764 goto end_function;
3765 }
3766
3767 actual_count = sizeof(struct sep_fastcall_hdr)
3768 + args->msg_len
3769 + (args->num_dcbs * sizeof(struct build_dcb_struct));
3770
3771 if (actual_count != count_user) {
3772 dev_warn(&sep->pdev->dev,
3773 "[PID%d] inconsistent message "
3774 "sizes 0x%08zX vs 0x%08zX\n",
3775 current->pid, actual_count, count_user);
3776 error = -EMSGSIZE;
3777 goto end_function;
3778 }
3779
3780 end_function:
3781 return error;
3782 }
3783
3784 /**
3785 * sep_write - Starts an operation for fastcall interface
3786 * @filp: File pointer
3787 * @buf_user: User buffer for operation parameters
3788 * @count_user: User buffer size
3789 * @offset: File offset, not supported
3790 *
3791 * The implementation does not support writing in chunks,
3792 * all data must be given during a single write system call.
3793 */
3794 static ssize_t sep_write(struct file *filp,
3795 const char __user *buf_user, size_t count_user,
3796 loff_t *offset)
3797 {
3798 struct sep_private_data * const private_data = filp->private_data;
3799 struct sep_call_status *call_status = &private_data->call_status;
3800 struct sep_device *sep = private_data->device;
3801 struct sep_dma_context *dma_ctx = NULL;
3802 struct sep_fastcall_hdr call_hdr = {0};
3803 void *msg_region = NULL;
3804 void *dmatables_region = NULL;
3805 struct sep_dcblock *dcb_region = NULL;
3806 ssize_t error = 0;
3807 struct sep_queue_info *my_queue_elem = NULL;
3808 bool my_secure_dma; /* are we using secure_dma (IMR)? */
3809
3810 dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3811 current->pid, sep);
3812 dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3813 current->pid, private_data);
3814
3815 error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3816 if (error)
3817 goto end_function;
3818
3819 buf_user += sizeof(struct sep_fastcall_hdr);
3820
3821 if (call_hdr.secure_dma == 0)
3822 my_secure_dma = false;
3823 else
3824 my_secure_dma = true;
3825
3826 /*
3827 * Controlling driver memory usage by limiting amount of
3828 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3829 * of threads can progress further at a time
3830 */
3831 dev_dbg(&sep->pdev->dev,
3832 "[PID%d] waiting for double buffering region access\n",
3833 current->pid);
3834 error = down_interruptible(&sep->sep_doublebuf);
3835 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3836 current->pid);
3837 if (error) {
3838 /* Signal received */
3839 goto end_function_error;
3840 }
3841
3842
3843 /*
3844 * Prepare contents of the shared area regions for
3845 * the operation into temporary buffers
3846 */
3847 if (0 < call_hdr.num_dcbs) {
3848 error = sep_create_dcb_dmatables_context(sep,
3849 &dcb_region,
3850 &dmatables_region,
3851 &dma_ctx,
3852 (const struct build_dcb_struct __user *)
3853 buf_user,
3854 call_hdr.num_dcbs, my_secure_dma);
3855 if (error)
3856 goto end_function_error_doublebuf;
3857
3858 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3859 }
3860
3861 error = sep_create_msgarea_context(sep,
3862 &msg_region,
3863 buf_user,
3864 call_hdr.msg_len);
3865 if (error)
3866 goto end_function_error_doublebuf;
3867
3868 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3869 current->pid);
3870 my_queue_elem = sep_queue_status_add(sep,
3871 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3872 (dma_ctx) ? dma_ctx->input_data_len : 0,
3873 current->pid,
3874 current->comm, sizeof(current->comm));
3875
3876 if (!my_queue_elem) {
3877 dev_dbg(&sep->pdev->dev,
3878 "[PID%d] updating queue status error\n", current->pid);
3879 error = -ENOMEM;
3880 goto end_function_error_doublebuf;
3881 }
3882
3883 /* Wait until current process gets the transaction */
3884 error = sep_wait_transaction(sep);
3885
3886 if (error) {
3887 /* Interrupted by signal, don't clear transaction */
3888 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3889 current->pid);
3890 sep_queue_status_remove(sep, &my_queue_elem);
3891 goto end_function_error_doublebuf;
3892 }
3893
3894 dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3895 current->pid);
3896 private_data->my_queue_elem = my_queue_elem;
3897
3898 /* Activate shared area regions for the transaction */
3899 error = sep_activate_msgarea_context(sep, &msg_region,
3900 call_hdr.msg_len);
3901 if (error)
3902 goto end_function_error_clear_transact;
3903
3904 sep_dump_message(sep);
3905
3906 if (0 < call_hdr.num_dcbs) {
3907 error = sep_activate_dcb_dmatables_context(sep,
3908 &dcb_region,
3909 &dmatables_region,
3910 dma_ctx);
3911 if (error)
3912 goto end_function_error_clear_transact;
3913 }
3914
3915 /* Send command to SEP */
3916 error = sep_send_command_handler(sep);
3917 if (error)
3918 goto end_function_error_clear_transact;
3919
3920 /* Store DMA context for the transaction */
3921 private_data->dma_ctx = dma_ctx;
3922 /* Update call status */
3923 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3924 error = count_user;
3925
3926 up(&sep->sep_doublebuf);
3927 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3928 current->pid);
3929
3930 goto end_function;
3931
3932 end_function_error_clear_transact:
3933 sep_end_transaction_handler(sep, &dma_ctx, call_status,
3934 &private_data->my_queue_elem);
3935
3936 end_function_error_doublebuf:
3937 up(&sep->sep_doublebuf);
3938 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3939 current->pid);
3940
3941 end_function_error:
3942 if (dma_ctx)
3943 sep_free_dma_table_data_handler(sep, &dma_ctx);
3944
3945 end_function:
3946 kfree(dcb_region);
3947 kfree(dmatables_region);
3948 kfree(msg_region);
3949
3950 return error;
3951 }
3952 /**
3953 * sep_seek - Handler for seek system call
3954 * @filp: File pointer
3955 * @offset: File offset
3956 * @origin: Options for offset
3957 *
3958 * Fastcall interface does not support seeking, all reads
3959 * and writes are from/to offset zero
3960 */
3961 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3962 {
3963 return -ENOSYS;
3964 }
3965
3966
3967
3968 /**
3969 * sep_file_operations - file operation on sep device
3970 * @sep_ioctl: ioctl handler from user space call
3971 * @sep_poll: poll handler
3972 * @sep_open: handles sep device open request
3973 * @sep_release:handles sep device release request
3974 * @sep_mmap: handles memory mapping requests
3975 * @sep_read: handles read request on sep device
3976 * @sep_write: handles write request on sep device
3977 * @sep_seek: handles seek request on sep device
3978 */
3979 static const struct file_operations sep_file_operations = {
3980 .owner = THIS_MODULE,
3981 .unlocked_ioctl = sep_ioctl,
3982 .poll = sep_poll,
3983 .open = sep_open,
3984 .release = sep_release,
3985 .mmap = sep_mmap,
3986 .read = sep_read,
3987 .write = sep_write,
3988 .llseek = sep_seek,
3989 };
3990
3991 /**
3992 * sep_sysfs_read - read sysfs entry per gives arguments
3993 * @filp: file pointer
3994 * @kobj: kobject pointer
3995 * @attr: binary file attributes
3996 * @buf: read to this buffer
3997 * @pos: offset to read
3998 * @count: amount of data to read
3999 *
4000 * This function is to read sysfs entries for sep driver per given arguments.
4001 */
4002 static ssize_t
4003 sep_sysfs_read(struct file *filp, struct kobject *kobj,
4004 struct bin_attribute *attr,
4005 char *buf, loff_t pos, size_t count)
4006 {
4007 unsigned long lck_flags;
4008 size_t nleft = count;
4009 struct sep_device *sep = sep_dev;
4010 struct sep_queue_info *queue_elem = NULL;
4011 u32 queue_num = 0;
4012 u32 i = 1;
4013
4014 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4015
4016 queue_num = sep->sep_queue_num;
4017 if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4018 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4019
4020
4021 if (count < sizeof(queue_num)
4022 + (queue_num * sizeof(struct sep_queue_data))) {
4023 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4024 return -EINVAL;
4025 }
4026
4027 memcpy(buf, &queue_num, sizeof(queue_num));
4028 buf += sizeof(queue_num);
4029 nleft -= sizeof(queue_num);
4030
4031 list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4032 if (i++ > queue_num)
4033 break;
4034
4035 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4036 nleft -= sizeof(queue_elem->data);
4037 buf += sizeof(queue_elem->data);
4038 }
4039 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4040
4041 return count - nleft;
4042 }
4043
4044 /**
4045 * bin_attributes - defines attributes for queue_status
4046 * @attr: attributes (name & permissions)
4047 * @read: function pointer to read this file
4048 * @size: maxinum size of binary attribute
4049 */
4050 static const struct bin_attribute queue_status = {
4051 .attr = {.name = "queue_status", .mode = 0444},
4052 .read = sep_sysfs_read,
4053 .size = sizeof(u32)
4054 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4055 };
4056
4057 /**
4058 * sep_register_driver_with_fs - register misc devices
4059 * @sep: pointer to struct sep_device
4060 *
4061 * This function registers the driver with the file system
4062 */
4063 static int sep_register_driver_with_fs(struct sep_device *sep)
4064 {
4065 int ret_val;
4066
4067 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4068 sep->miscdev_sep.name = SEP_DEV_NAME;
4069 sep->miscdev_sep.fops = &sep_file_operations;
4070
4071 ret_val = misc_register(&sep->miscdev_sep);
4072 if (ret_val) {
4073 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4074 ret_val);
4075 return ret_val;
4076 }
4077
4078 ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4079 &queue_status);
4080 if (ret_val) {
4081 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4082 ret_val);
4083 return ret_val;
4084 }
4085
4086 return ret_val;
4087 }
4088
4089
4090 /**
4091 *sep_probe - probe a matching PCI device
4092 *@pdev: pci_device
4093 *@ent: pci_device_id
4094 *
4095 *Attempt to set up and configure a SEP device that has been
4096 *discovered by the PCI layer. Allocates all required resources.
4097 */
4098 static int sep_probe(struct pci_dev *pdev,
4099 const struct pci_device_id *ent)
4100 {
4101 int error = 0;
4102 struct sep_device *sep = NULL;
4103
4104 if (sep_dev != NULL) {
4105 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4106 return -EBUSY;
4107 }
4108
4109 /* Enable the device */
4110 error = pci_enable_device(pdev);
4111 if (error) {
4112 dev_warn(&pdev->dev, "error enabling pci device\n");
4113 goto end_function;
4114 }
4115
4116 /* Allocate the sep_device structure for this device */
4117 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4118 if (sep_dev == NULL) {
4119 error = -ENOMEM;
4120 goto end_function_disable_device;
4121 }
4122
4123 /*
4124 * We're going to use another variable for actually
4125 * working with the device; this way, if we have
4126 * multiple devices in the future, it would be easier
4127 * to make appropriate changes
4128 */
4129 sep = sep_dev;
4130
4131 sep->pdev = pci_dev_get(pdev);
4132
4133 init_waitqueue_head(&sep->event_transactions);
4134 init_waitqueue_head(&sep->event_interrupt);
4135 spin_lock_init(&sep->snd_rply_lck);
4136 spin_lock_init(&sep->sep_queue_lock);
4137 sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4138
4139 INIT_LIST_HEAD(&sep->sep_queue_status);
4140
4141 dev_dbg(&sep->pdev->dev,
4142 "sep probe: PCI obtained, device being prepared\n");
4143
4144 /* Set up our register area */
4145 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4146 if (!sep->reg_physical_addr) {
4147 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4148 error = -ENODEV;
4149 goto end_function_free_sep_dev;
4150 }
4151
4152 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4153 if (!sep->reg_physical_end) {
4154 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4155 error = -ENODEV;
4156 goto end_function_free_sep_dev;
4157 }
4158
4159 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4160 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4161 if (!sep->reg_addr) {
4162 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4163 error = -ENODEV;
4164 goto end_function_free_sep_dev;
4165 }
4166
4167 dev_dbg(&sep->pdev->dev,
4168 "Register area start %llx end %llx virtual %p\n",
4169 (unsigned long long)sep->reg_physical_addr,
4170 (unsigned long long)sep->reg_physical_end,
4171 sep->reg_addr);
4172
4173 /* Allocate the shared area */
4174 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4175 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4176 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4177 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4178 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4179
4180 if (sep_map_and_alloc_shared_area(sep)) {
4181 error = -ENOMEM;
4182 /* Allocation failed */
4183 goto end_function_error;
4184 }
4185
4186 /* Clear ICR register */
4187 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4188
4189 /* Set the IMR register - open only GPR 2 */
4190 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4191
4192 /* Read send/receive counters from SEP */
4193 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4194 sep->reply_ct &= 0x3FFFFFFF;
4195 sep->send_ct = sep->reply_ct;
4196
4197 /* Get the interrupt line */
4198 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4199 "sep_driver", sep);
4200
4201 if (error)
4202 goto end_function_deallocate_sep_shared_area;
4203
4204 /* The new chip requires a shared area reconfigure */
4205 error = sep_reconfig_shared_area(sep);
4206 if (error)
4207 goto end_function_free_irq;
4208
4209 sep->in_use = 1;
4210
4211 /* Finally magic up the device nodes */
4212 /* Register driver with the fs */
4213 error = sep_register_driver_with_fs(sep);
4214
4215 if (error) {
4216 dev_err(&sep->pdev->dev, "error registering dev file\n");
4217 goto end_function_free_irq;
4218 }
4219
4220 sep->in_use = 0; /* through touching the device */
4221 #ifdef SEP_ENABLE_RUNTIME_PM
4222 pm_runtime_put_noidle(&sep->pdev->dev);
4223 pm_runtime_allow(&sep->pdev->dev);
4224 pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4225 SUSPEND_DELAY);
4226 pm_runtime_use_autosuspend(&sep->pdev->dev);
4227 pm_runtime_mark_last_busy(&sep->pdev->dev);
4228 sep->power_save_setup = 1;
4229 #endif
4230 /* register kernel crypto driver */
4231 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4232 error = sep_crypto_setup();
4233 if (error) {
4234 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4235 goto end_function_free_irq;
4236 }
4237 #endif
4238 goto end_function;
4239
4240 end_function_free_irq:
4241 free_irq(pdev->irq, sep);
4242
4243 end_function_deallocate_sep_shared_area:
4244 /* De-allocate shared area */
4245 sep_unmap_and_free_shared_area(sep);
4246
4247 end_function_error:
4248 iounmap(sep->reg_addr);
4249
4250 end_function_free_sep_dev:
4251 pci_dev_put(sep_dev->pdev);
4252 kfree(sep_dev);
4253 sep_dev = NULL;
4254
4255 end_function_disable_device:
4256 pci_disable_device(pdev);
4257
4258 end_function:
4259 return error;
4260 }
4261
4262 /**
4263 * sep_remove - handles removing device from pci subsystem
4264 * @pdev: pointer to pci device
4265 *
4266 * This function will handle removing our sep device from pci subsystem on exit
4267 * or unloading this module. It should free up all used resources, and unmap if
4268 * any memory regions mapped.
4269 */
4270 static void sep_remove(struct pci_dev *pdev)
4271 {
4272 struct sep_device *sep = sep_dev;
4273
4274 /* Unregister from fs */
4275 misc_deregister(&sep->miscdev_sep);
4276
4277 /* Unregister from kernel crypto */
4278 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4279 sep_crypto_takedown();
4280 #endif
4281 /* Free the irq */
4282 free_irq(sep->pdev->irq, sep);
4283
4284 /* Free the shared area */
4285 sep_unmap_and_free_shared_area(sep_dev);
4286 iounmap(sep_dev->reg_addr);
4287
4288 #ifdef SEP_ENABLE_RUNTIME_PM
4289 if (sep->in_use) {
4290 sep->in_use = 0;
4291 pm_runtime_forbid(&sep->pdev->dev);
4292 pm_runtime_get_noresume(&sep->pdev->dev);
4293 }
4294 #endif
4295 pci_dev_put(sep_dev->pdev);
4296 kfree(sep_dev);
4297 sep_dev = NULL;
4298 }
4299
4300 /* Initialize struct pci_device_id for our driver */
4301 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
4302 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4303 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4304 {0}
4305 };
4306
4307 /* Export our pci_device_id structure to user space */
4308 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4309
4310 #ifdef SEP_ENABLE_RUNTIME_PM
4311
4312 /**
4313 * sep_pm_resume - rsume routine while waking up from S3 state
4314 * @dev: pointer to sep device
4315 *
4316 * This function is to be used to wake up sep driver while system awakes from S3
4317 * state i.e. suspend to ram. The RAM in intact.
4318 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4319 */
4320 static int sep_pci_resume(struct device *dev)
4321 {
4322 struct sep_device *sep = sep_dev;
4323
4324 dev_dbg(&sep->pdev->dev, "pci resume called\n");
4325
4326 if (sep->power_state == SEP_DRIVER_POWERON)
4327 return 0;
4328
4329 /* Clear ICR register */
4330 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4331
4332 /* Set the IMR register - open only GPR 2 */
4333 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4334
4335 /* Read send/receive counters from SEP */
4336 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4337 sep->reply_ct &= 0x3FFFFFFF;
4338 sep->send_ct = sep->reply_ct;
4339
4340 sep->power_state = SEP_DRIVER_POWERON;
4341
4342 return 0;
4343 }
4344
4345 /**
4346 * sep_pm_suspend - suspend routine while going to S3 state
4347 * @dev: pointer to sep device
4348 *
4349 * This function is to be used to suspend sep driver while system goes to S3
4350 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4351 * Notes - revisit with more understanding of pm, ICR/IMR
4352 */
4353 static int sep_pci_suspend(struct device *dev)
4354 {
4355 struct sep_device *sep = sep_dev;
4356
4357 dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4358 if (sep->in_use == 1)
4359 return -EAGAIN;
4360
4361 sep->power_state = SEP_DRIVER_POWEROFF;
4362
4363 /* Clear ICR register */
4364 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4365
4366 /* Set the IMR to block all */
4367 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4368
4369 return 0;
4370 }
4371
4372 /**
4373 * sep_pm_runtime_resume - runtime resume routine
4374 * @dev: pointer to sep device
4375 *
4376 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4377 */
4378 static int sep_pm_runtime_resume(struct device *dev)
4379 {
4380
4381 u32 retval2;
4382 u32 delay_count;
4383 struct sep_device *sep = sep_dev;
4384
4385 dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4386
4387 /**
4388 * Wait until the SCU boot is ready
4389 * This is done by iterating SCU_DELAY_ITERATION (10
4390 * microseconds each) up to SCU_DELAY_MAX (50) times.
4391 * This bit can be set in a random time that is less
4392 * than 500 microseconds after each power resume
4393 */
4394 retval2 = 0;
4395 delay_count = 0;
4396 while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4397 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4398 retval2 &= 0x00000008;
4399 if (!retval2) {
4400 udelay(SCU_DELAY_ITERATION);
4401 delay_count += 1;
4402 }
4403 }
4404
4405 if (!retval2) {
4406 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4407 return -EINVAL;
4408 }
4409
4410 /* Clear ICR register */
4411 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4412
4413 /* Set the IMR register - open only GPR 2 */
4414 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4415
4416 /* Read send/receive counters from SEP */
4417 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4418 sep->reply_ct &= 0x3FFFFFFF;
4419 sep->send_ct = sep->reply_ct;
4420
4421 return 0;
4422 }
4423
4424 /**
4425 * sep_pm_runtime_suspend - runtime suspend routine
4426 * @dev: pointer to sep device
4427 *
4428 * Notes - revisit with more understanding of pm
4429 */
4430 static int sep_pm_runtime_suspend(struct device *dev)
4431 {
4432 struct sep_device *sep = sep_dev;
4433
4434 dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4435
4436 /* Clear ICR register */
4437 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4438 return 0;
4439 }
4440
4441 /**
4442 * sep_pm - power management for sep driver
4443 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4444 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4445 * @sep_pci_suspend: suspend - main memory is still ON
4446 * @sep_pci_resume: resume - main memory is still ON
4447 */
4448 static const struct dev_pm_ops sep_pm = {
4449 .runtime_resume = sep_pm_runtime_resume,
4450 .runtime_suspend = sep_pm_runtime_suspend,
4451 .resume = sep_pci_resume,
4452 .suspend = sep_pci_suspend,
4453 };
4454 #endif /* SEP_ENABLE_RUNTIME_PM */
4455
4456 /**
4457 * sep_pci_driver - registers this device with pci subsystem
4458 * @name: name identifier for this driver
4459 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4460 * @sep_probe: pointer to probe function in PCI driver
4461 * @sep_remove: pointer to remove function in PCI driver
4462 */
4463 static struct pci_driver sep_pci_driver = {
4464 #ifdef SEP_ENABLE_RUNTIME_PM
4465 .driver = {
4466 .pm = &sep_pm,
4467 },
4468 #endif
4469 .name = "sep_sec_driver",
4470 .id_table = sep_pci_id_tbl,
4471 .probe = sep_probe,
4472 .remove = sep_remove
4473 };
4474
4475 module_pci_driver(sep_pci_driver);
4476 MODULE_LICENSE("GPL");