4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge resource allocation module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 #include <linux/types.h>
19 #include <linux/list.h>
21 /* ----------------------------------- Host OS */
22 #include <dspbridge/host_os.h>
24 /* ----------------------------------- DSP/BIOS Bridge */
25 #include <dspbridge/dbdefs.h>
27 /* ----------------------------------- This */
28 #include <dspbridge/drv.h>
29 #include <dspbridge/dev.h>
31 #include <dspbridge/node.h>
32 #include <dspbridge/proc.h>
33 #include <dspbridge/strm.h>
34 #include <dspbridge/nodepriv.h>
35 #include <dspbridge/dspchnl.h>
36 #include <dspbridge/resourcecleanup.h>
38 /* ----------------------------------- Defines, Data Structures, Typedefs */
40 struct list_head dev_list
;
41 struct list_head dev_node_string
;
45 * This is the Device Extension. Named with the Prefix
46 * DRV_ since it is living in this module
49 struct list_head link
;
50 char sz_string
[MAXREGPATHLENGTH
];
53 /* ----------------------------------- Globals */
54 static bool ext_phys_mem_pool_enabled
;
55 struct ext_phys_mem_pool
{
59 u32 next_phys_alloc_ptr
;
61 static struct ext_phys_mem_pool ext_mem_pool
;
63 /* ----------------------------------- Function Prototypes */
64 static int request_bridge_resources(struct cfg_hostres
*res
);
67 /* GPP PROCESS CLEANUP CODE */
69 static int drv_proc_free_node_res(int id
, void *p
, void *data
);
71 /* Allocate and add a node resource element
72 * This function is called from .Node_Allocate. */
73 int drv_insert_node_res_element(void *hnode
, void *node_resource
,
76 struct node_res_object
**node_res_obj
=
77 (struct node_res_object
**)node_resource
;
78 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
81 *node_res_obj
= kzalloc(sizeof(struct node_res_object
), GFP_KERNEL
);
85 (*node_res_obj
)->node
= hnode
;
86 retval
= idr_alloc(ctxt
->node_id
, *node_res_obj
, 0, 0, GFP_KERNEL
);
88 (*node_res_obj
)->id
= retval
;
94 if (retval
== -ENOSPC
) {
95 pr_err("%s: FAILED, IDR is FULL\n", __func__
);
98 pr_err("%s: OUT OF MEMORY\n", __func__
);
103 /* Release all Node resources and its context
104 * Actual Node De-Allocation */
105 static int drv_proc_free_node_res(int id
, void *p
, void *data
)
107 struct process_context
*ctxt
= data
;
109 struct node_res_object
*node_res_obj
= p
;
112 if (node_res_obj
->node_allocated
) {
113 node_state
= node_get_state(node_res_obj
->node
);
114 if (node_state
<= NODE_DELETING
) {
115 if ((node_state
== NODE_RUNNING
) ||
116 (node_state
== NODE_PAUSED
) ||
117 (node_state
== NODE_TERMINATING
))
119 (node_res_obj
->node
, &status
);
121 node_delete(node_res_obj
, ctxt
);
128 /* Release all Mapped and Reserved DMM resources */
129 int drv_remove_all_dmm_res_elements(void *process_ctxt
)
131 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
133 struct dmm_map_object
*temp_map
, *map_obj
;
134 struct dmm_rsv_object
*temp_rsv
, *rsv_obj
;
136 /* Free DMM mapped memory resources */
137 list_for_each_entry_safe(map_obj
, temp_map
, &ctxt
->dmm_map_list
, link
) {
138 status
= proc_un_map(ctxt
->processor
,
139 (void *)map_obj
->dsp_addr
, ctxt
);
141 pr_err("%s: proc_un_map failed!"
142 " status = 0x%xn", __func__
, status
);
145 /* Free DMM reserved memory resources */
146 list_for_each_entry_safe(rsv_obj
, temp_rsv
, &ctxt
->dmm_rsv_list
, link
) {
147 status
= proc_un_reserve_memory(ctxt
->processor
, (void *)
148 rsv_obj
->dsp_reserved_addr
,
151 pr_err("%s: proc_un_reserve_memory failed!"
152 " status = 0x%xn", __func__
, status
);
157 /* Update Node allocation status */
158 void drv_proc_node_update_status(void *node_resource
, s32 status
)
160 struct node_res_object
*node_res_obj
=
161 (struct node_res_object
*)node_resource
;
162 node_res_obj
->node_allocated
= status
;
165 /* Update Node Heap status */
166 void drv_proc_node_update_heap_status(void *node_resource
, s32 status
)
168 struct node_res_object
*node_res_obj
=
169 (struct node_res_object
*)node_resource
;
170 node_res_obj
->heap_allocated
= status
;
173 /* Release all Node resources and its context
174 * This is called from .bridge_release.
176 int drv_remove_all_node_res_elements(void *process_ctxt
)
178 struct process_context
*ctxt
= process_ctxt
;
180 idr_for_each(ctxt
->node_id
, drv_proc_free_node_res
, ctxt
);
181 idr_destroy(ctxt
->node_id
);
186 /* Allocate the STRM resource element
187 * This is called after the actual resource is allocated
189 int drv_proc_insert_strm_res_element(void *stream_obj
,
190 void *strm_res
, void *process_ctxt
)
192 struct strm_res_object
**pstrm_res
=
193 (struct strm_res_object
**)strm_res
;
194 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
197 *pstrm_res
= kzalloc(sizeof(struct strm_res_object
), GFP_KERNEL
);
198 if (*pstrm_res
== NULL
)
201 (*pstrm_res
)->stream
= stream_obj
;
202 retval
= idr_alloc(ctxt
->stream_id
, *pstrm_res
, 0, 0, GFP_KERNEL
);
204 (*pstrm_res
)->id
= retval
;
208 if (retval
== -ENOSPC
) {
209 pr_err("%s: FAILED, IDR is FULL\n", __func__
);
212 pr_err("%s: OUT OF MEMORY\n", __func__
);
217 static int drv_proc_free_strm_res(int id
, void *p
, void *process_ctxt
)
219 struct process_context
*ctxt
= process_ctxt
;
220 struct strm_res_object
*strm_res
= p
;
221 struct stream_info strm_info
;
222 struct dsp_streaminfo user
;
223 u8
**ap_buffer
= NULL
;
229 if (strm_res
->num_bufs
) {
230 ap_buffer
= kmalloc((strm_res
->num_bufs
*
231 sizeof(u8
*)), GFP_KERNEL
);
233 strm_free_buffer(strm_res
,
240 strm_info
.user_strm
= &user
;
241 user
.number_bufs_in_stream
= 0;
242 strm_get_info(strm_res
->stream
, &strm_info
, sizeof(strm_info
));
243 while (user
.number_bufs_in_stream
--)
244 strm_reclaim(strm_res
->stream
, &buf_ptr
, &ul_bytes
,
245 (u32
*) &ul_buf_size
, &dw_arg
);
246 strm_close(strm_res
, ctxt
);
250 /* Release all Stream resources and its context
251 * This is called from .bridge_release.
253 int drv_remove_all_strm_res_elements(void *process_ctxt
)
255 struct process_context
*ctxt
= process_ctxt
;
257 idr_for_each(ctxt
->stream_id
, drv_proc_free_strm_res
, ctxt
);
258 idr_destroy(ctxt
->stream_id
);
263 /* Updating the stream resource element */
264 int drv_proc_update_strm_res(u32 num_bufs
, void *strm_resources
)
267 struct strm_res_object
**strm_res
=
268 (struct strm_res_object
**)strm_resources
;
270 (*strm_res
)->num_bufs
= num_bufs
;
274 /* GPP PROCESS CLEANUP CODE END */
277 * ======== = drv_create ======== =
279 * DRV Object gets created only once during Driver Loading.
281 int drv_create(struct drv_object
**drv_obj
)
284 struct drv_object
*pdrv_object
= NULL
;
285 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
287 pdrv_object
= kzalloc(sizeof(struct drv_object
), GFP_KERNEL
);
289 /* Create and Initialize List of device objects */
290 INIT_LIST_HEAD(&pdrv_object
->dev_list
);
291 INIT_LIST_HEAD(&pdrv_object
->dev_node_string
);
295 /* Store the DRV Object in the driver data */
298 drv_datap
->drv_object
= (void *)pdrv_object
;
301 pr_err("%s: Failed to store DRV object\n", __func__
);
306 *drv_obj
= pdrv_object
;
308 /* Free the DRV Object */
316 * ======== = drv_destroy ======== =
318 * Invoked during bridge de-initialization
320 int drv_destroy(struct drv_object
*driver_obj
)
323 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
324 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
327 /* Update the DRV Object in the driver data */
329 drv_datap
->drv_object
= NULL
;
332 pr_err("%s: Failed to store DRV object\n", __func__
);
339 * ======== drv_get_dev_object ========
341 * Given a index, returns a handle to DevObject from the list.
343 int drv_get_dev_object(u32 index
, struct drv_object
*hdrv_obj
,
344 struct dev_object
**device_obj
)
347 struct dev_object
*dev_obj
;
350 dev_obj
= (struct dev_object
*)drv_get_first_dev_object();
351 for (i
= 0; i
< index
; i
++) {
353 (struct dev_object
*)drv_get_next_dev_object((u32
) dev_obj
);
356 *device_obj
= (struct dev_object
*)dev_obj
;
366 * ======== drv_get_first_dev_object ========
368 * Retrieve the first Device Object handle from an internal linked list of
369 * of DEV_OBJECTs maintained by DRV.
371 u32
drv_get_first_dev_object(void)
373 u32 dw_dev_object
= 0;
374 struct drv_object
*pdrv_obj
;
375 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
377 if (drv_datap
&& drv_datap
->drv_object
) {
378 pdrv_obj
= drv_datap
->drv_object
;
379 if (!list_empty(&pdrv_obj
->dev_list
))
380 dw_dev_object
= (u32
) pdrv_obj
->dev_list
.next
;
382 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
385 return dw_dev_object
;
389 * ======== DRV_GetFirstDevNodeString ========
391 * Retrieve the first Device Extension from an internal linked list of
392 * of Pointer to dev_node Strings maintained by DRV.
394 u32
drv_get_first_dev_extension(void)
396 u32 dw_dev_extension
= 0;
397 struct drv_object
*pdrv_obj
;
398 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
400 if (drv_datap
&& drv_datap
->drv_object
) {
401 pdrv_obj
= drv_datap
->drv_object
;
402 if (!list_empty(&pdrv_obj
->dev_node_string
)) {
404 (u32
) pdrv_obj
->dev_node_string
.next
;
407 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
410 return dw_dev_extension
;
414 * ======== drv_get_next_dev_object ========
416 * Retrieve the next Device Object handle from an internal linked list of
417 * of DEV_OBJECTs maintained by DRV, after having previously called
418 * drv_get_first_dev_object() and zero or more DRV_GetNext.
420 u32
drv_get_next_dev_object(u32 hdev_obj
)
422 u32 dw_next_dev_object
= 0;
423 struct drv_object
*pdrv_obj
;
424 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
425 struct list_head
*curr
;
427 if (drv_datap
&& drv_datap
->drv_object
) {
428 pdrv_obj
= drv_datap
->drv_object
;
429 if (!list_empty(&pdrv_obj
->dev_list
)) {
430 curr
= (struct list_head
*)hdev_obj
;
431 if (list_is_last(curr
, &pdrv_obj
->dev_list
))
433 dw_next_dev_object
= (u32
) curr
->next
;
436 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
439 return dw_next_dev_object
;
443 * ======== drv_get_next_dev_extension ========
445 * Retrieve the next Device Extension from an internal linked list of
446 * of pointer to DevNodeString maintained by DRV, after having previously
447 * called drv_get_first_dev_extension() and zero or more
448 * drv_get_next_dev_extension().
450 u32
drv_get_next_dev_extension(u32 dev_extension
)
452 u32 dw_dev_extension
= 0;
453 struct drv_object
*pdrv_obj
;
454 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
455 struct list_head
*curr
;
457 if (drv_datap
&& drv_datap
->drv_object
) {
458 pdrv_obj
= drv_datap
->drv_object
;
459 if (!list_empty(&pdrv_obj
->dev_node_string
)) {
460 curr
= (struct list_head
*)dev_extension
;
461 if (list_is_last(curr
, &pdrv_obj
->dev_node_string
))
463 dw_dev_extension
= (u32
) curr
->next
;
466 pr_err("%s: Failed to retrieve the object handle\n", __func__
);
469 return dw_dev_extension
;
473 * ======== drv_insert_dev_object ========
475 * Insert a DevObject into the list of Manager object.
477 int drv_insert_dev_object(struct drv_object
*driver_obj
,
478 struct dev_object
*hdev_obj
)
480 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
482 list_add_tail((struct list_head
*)hdev_obj
, &pdrv_object
->dev_list
);
488 * ======== drv_remove_dev_object ========
490 * Search for and remove a DeviceObject from the given list of DRV
493 int drv_remove_dev_object(struct drv_object
*driver_obj
,
494 struct dev_object
*hdev_obj
)
497 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
498 struct list_head
*cur_elem
;
500 /* Search list for p_proc_object: */
501 list_for_each(cur_elem
, &pdrv_object
->dev_list
) {
502 /* If found, remove it. */
503 if ((struct dev_object
*)cur_elem
== hdev_obj
) {
514 * ======== drv_request_resources ========
516 * Requests resources from the OS.
518 int drv_request_resources(u32 dw_context
, u32
*dev_node_strg
)
521 struct drv_object
*pdrv_object
;
522 struct drv_ext
*pszdev_node
;
523 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
526 * Allocate memory to hold the string. This will live until
527 * it is freed in the Release resources. Update the driver object
531 if (!drv_datap
|| !drv_datap
->drv_object
)
534 pdrv_object
= drv_datap
->drv_object
;
537 pszdev_node
= kzalloc(sizeof(struct drv_ext
), GFP_KERNEL
);
539 strncpy(pszdev_node
->sz_string
,
540 (char *)dw_context
, MAXREGPATHLENGTH
- 1);
541 pszdev_node
->sz_string
[MAXREGPATHLENGTH
- 1] = '\0';
542 /* Update the Driver Object List */
543 *dev_node_strg
= (u32
) pszdev_node
->sz_string
;
544 list_add_tail(&pszdev_node
->link
,
545 &pdrv_object
->dev_node_string
);
551 dev_dbg(bridge
, "%s: Failed to get Driver Object from Registry",
560 * ======== drv_release_resources ========
562 * Releases resources from the OS.
564 int drv_release_resources(u32 dw_context
, struct drv_object
*hdrv_obj
)
567 struct drv_ext
*pszdev_node
;
570 * Irrespective of the status go ahead and clean it
571 * The following will over write the status.
573 for (pszdev_node
= (struct drv_ext
*)drv_get_first_dev_extension();
574 pszdev_node
!= NULL
; pszdev_node
= (struct drv_ext
*)
575 drv_get_next_dev_extension((u32
) pszdev_node
)) {
576 if ((u32
) pszdev_node
== dw_context
) {
578 /* Delete from the Driver object list */
579 list_del(&pszdev_node
->link
);
588 * ======== request_bridge_resources ========
590 * Reserves shared memory for bridge.
592 static int request_bridge_resources(struct cfg_hostres
*res
)
594 struct cfg_hostres
*host_res
= res
;
596 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
597 host_res
->num_mem_windows
= 2;
599 /* First window is for DSP internal memory */
600 dev_dbg(bridge
, "mem_base[0] 0x%x\n", host_res
->mem_base
[0]);
601 dev_dbg(bridge
, "mem_base[3] 0x%x\n", host_res
->mem_base
[3]);
602 dev_dbg(bridge
, "dmmu_base %p\n", host_res
->dmmu_base
);
604 /* for 24xx base port is not mapping the mamory for DSP
605 * internal memory TODO Do a ioremap here */
606 /* Second window is for DSP external memory shared with MPU */
608 /* These are hard-coded values */
609 host_res
->birq_registers
= 0;
610 host_res
->birq_attrib
= 0;
611 host_res
->offset_for_monitor
= 0;
612 host_res
->chnl_offset
= 0;
613 /* CHNL_MAXCHANNELS */
614 host_res
->num_chnls
= CHNL_MAXCHANNELS
;
615 host_res
->chnl_buf_size
= 0x400;
621 * ======== drv_request_bridge_res_dsp ========
623 * Reserves shared memory for bridge.
625 int drv_request_bridge_res_dsp(void **phost_resources
)
628 struct cfg_hostres
*host_res
;
632 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
634 dw_buff_size
= sizeof(struct cfg_hostres
);
636 host_res
= kzalloc(dw_buff_size
, GFP_KERNEL
);
638 if (host_res
!= NULL
) {
639 request_bridge_resources(host_res
);
640 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
641 host_res
->num_mem_windows
= 4;
643 host_res
->mem_base
[0] = 0;
644 host_res
->mem_base
[2] = (u32
) ioremap(OMAP_DSP_MEM1_BASE
,
646 host_res
->mem_base
[3] = (u32
) ioremap(OMAP_DSP_MEM2_BASE
,
648 host_res
->mem_base
[4] = (u32
) ioremap(OMAP_DSP_MEM3_BASE
,
650 host_res
->per_base
= ioremap(OMAP_PER_CM_BASE
,
652 host_res
->per_pm_base
= ioremap(OMAP_PER_PRM_BASE
,
654 host_res
->core_pm_base
= ioremap(OMAP_CORE_PRM_BASE
,
656 host_res
->dmmu_base
= ioremap(OMAP_DMMU_BASE
,
659 dev_dbg(bridge
, "mem_base[0] 0x%x\n",
660 host_res
->mem_base
[0]);
661 dev_dbg(bridge
, "mem_base[1] 0x%x\n",
662 host_res
->mem_base
[1]);
663 dev_dbg(bridge
, "mem_base[2] 0x%x\n",
664 host_res
->mem_base
[2]);
665 dev_dbg(bridge
, "mem_base[3] 0x%x\n",
666 host_res
->mem_base
[3]);
667 dev_dbg(bridge
, "mem_base[4] 0x%x\n",
668 host_res
->mem_base
[4]);
669 dev_dbg(bridge
, "dmmu_base %p\n", host_res
->dmmu_base
);
671 shm_size
= drv_datap
->shm_size
;
672 if (shm_size
>= 0x10000) {
673 /* Allocate Physically contiguous,
674 * non-cacheable memory */
675 host_res
->mem_base
[1] =
676 (u32
) mem_alloc_phys_mem(shm_size
, 0x100000,
678 if (host_res
->mem_base
[1] == 0) {
680 pr_err("shm reservation Failed\n");
682 host_res
->mem_length
[1] = shm_size
;
683 host_res
->mem_phys
[1] = dma_addr
;
685 dev_dbg(bridge
, "%s: Bridge shm address 0x%x "
686 "dma_addr %x size %x\n", __func__
,
687 host_res
->mem_base
[1],
692 /* These are hard-coded values */
693 host_res
->birq_registers
= 0;
694 host_res
->birq_attrib
= 0;
695 host_res
->offset_for_monitor
= 0;
696 host_res
->chnl_offset
= 0;
697 /* CHNL_MAXCHANNELS */
698 host_res
->num_chnls
= CHNL_MAXCHANNELS
;
699 host_res
->chnl_buf_size
= 0x400;
700 dw_buff_size
= sizeof(struct cfg_hostres
);
702 *phost_resources
= host_res
;
708 void mem_ext_phys_pool_init(u32 pool_phys_base
, u32 pool_size
)
712 /* get the virtual address for the physical memory pool passed */
713 pool_virt_base
= (u32
) ioremap(pool_phys_base
, pool_size
);
715 if ((void **)pool_virt_base
== NULL
) {
716 pr_err("%s: external physical memory map failed\n", __func__
);
717 ext_phys_mem_pool_enabled
= false;
719 ext_mem_pool
.phys_mem_base
= pool_phys_base
;
720 ext_mem_pool
.phys_mem_size
= pool_size
;
721 ext_mem_pool
.virt_mem_base
= pool_virt_base
;
722 ext_mem_pool
.next_phys_alloc_ptr
= pool_phys_base
;
723 ext_phys_mem_pool_enabled
= true;
727 void mem_ext_phys_pool_release(void)
729 if (ext_phys_mem_pool_enabled
) {
730 iounmap((void *)(ext_mem_pool
.virt_mem_base
));
731 ext_phys_mem_pool_enabled
= false;
736 * ======== mem_ext_phys_mem_alloc ========
738 * Allocate physically contiguous, uncached memory from external memory pool
741 static void *mem_ext_phys_mem_alloc(u32 bytes
, u32 align
, u32
* phys_addr
)
750 if (bytes
> ((ext_mem_pool
.phys_mem_base
+ ext_mem_pool
.phys_mem_size
)
751 - ext_mem_pool
.next_phys_alloc_ptr
)) {
755 offset
= (ext_mem_pool
.next_phys_alloc_ptr
& (align
- 1));
757 new_alloc_ptr
= ext_mem_pool
.next_phys_alloc_ptr
;
759 new_alloc_ptr
= (ext_mem_pool
.next_phys_alloc_ptr
) +
761 if ((new_alloc_ptr
+ bytes
) <=
762 (ext_mem_pool
.phys_mem_base
+ ext_mem_pool
.phys_mem_size
)) {
763 /* we can allocate */
764 *phys_addr
= new_alloc_ptr
;
765 ext_mem_pool
.next_phys_alloc_ptr
=
766 new_alloc_ptr
+ bytes
;
768 ext_mem_pool
.virt_mem_base
+ (new_alloc_ptr
-
771 return (void *)virt_addr
;
780 * ======== mem_alloc_phys_mem ========
782 * Allocate physically contiguous, uncached memory
784 void *mem_alloc_phys_mem(u32 byte_size
, u32 align_mask
,
785 u32
*physical_address
)
791 if (ext_phys_mem_pool_enabled
) {
792 va_mem
= mem_ext_phys_mem_alloc(byte_size
, align_mask
,
795 va_mem
= dma_alloc_coherent(NULL
, byte_size
, &pa_mem
,
798 *physical_address
= 0;
800 *physical_address
= pa_mem
;
806 * ======== mem_free_phys_mem ========
808 * Free the given block of physically contiguous memory.
810 void mem_free_phys_mem(void *virtual_address
, u32 physical_address
,
813 if (!ext_phys_mem_pool_enabled
)
814 dma_free_coherent(NULL
, byte_size
, virtual_address
,