4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge resource allocation module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 /* ----------------------------------- Host OS */
20 #include <dspbridge/host_os.h>
22 /* ----------------------------------- DSP/BIOS Bridge */
23 #include <dspbridge/std.h>
24 #include <dspbridge/dbdefs.h>
26 /* ----------------------------------- Trace & Debug */
27 #include <dspbridge/dbc.h>
29 /* ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/cfg.h>
31 #include <dspbridge/list.h>
33 /* ----------------------------------- This */
34 #include <dspbridge/drv.h>
35 #include <dspbridge/dev.h>
37 #include <dspbridge/node.h>
38 #include <dspbridge/proc.h>
39 #include <dspbridge/strm.h>
40 #include <dspbridge/nodepriv.h>
41 #include <dspbridge/dspchnl.h>
42 #include <dspbridge/resourcecleanup.h>
44 /* ----------------------------------- Defines, Data Structures, Typedefs */
46 struct lst_list
*dev_list
;
47 struct lst_list
*dev_node_string
;
51 * This is the Device Extension. Named with the Prefix
52 * DRV_ since it is living in this module
55 struct list_head link
;
56 char sz_string
[MAXREGPATHLENGTH
];
59 /* ----------------------------------- Globals */
61 static bool ext_phys_mem_pool_enabled
;
62 struct ext_phys_mem_pool
{
66 u32 next_phys_alloc_ptr
;
68 static struct ext_phys_mem_pool ext_mem_pool
;
70 /* ----------------------------------- Function Prototypes */
71 static int request_bridge_resources(struct cfg_hostres
*res
);
74 /* GPP PROCESS CLEANUP CODE */
76 static int drv_proc_free_node_res(void *process_ctxt
);
78 /* Allocate and add a node resource element
79 * This function is called from .Node_Allocate. */
80 int drv_insert_node_res_element(void *hnode
, void *node_resource
,
83 struct node_res_object
**node_res_obj
=
84 (struct node_res_object
**)node_resource
;
85 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
87 struct node_res_object
*temp_node_res
= NULL
;
89 *node_res_obj
= kzalloc(sizeof(struct node_res_object
), GFP_KERNEL
);
90 if (*node_res_obj
== NULL
)
93 if (DSP_SUCCEEDED(status
)) {
94 if (mutex_lock_interruptible(&ctxt
->node_mutex
)) {
98 (*node_res_obj
)->hnode
= hnode
;
99 if (ctxt
->node_list
!= NULL
) {
100 temp_node_res
= ctxt
->node_list
;
101 while (temp_node_res
->next
!= NULL
)
102 temp_node_res
= temp_node_res
->next
;
104 temp_node_res
->next
= *node_res_obj
;
106 ctxt
->node_list
= *node_res_obj
;
108 mutex_unlock(&ctxt
->node_mutex
);
114 /* Release all Node resources and its context
115 * This is called from .Node_Delete. */
116 int drv_remove_node_res_element(void *node_resource
, void *process_ctxt
)
118 struct node_res_object
*node_res_obj
=
119 (struct node_res_object
*)node_resource
;
120 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
121 struct node_res_object
*temp_node
;
124 if (mutex_lock_interruptible(&ctxt
->node_mutex
))
126 temp_node
= ctxt
->node_list
;
127 if (temp_node
== node_res_obj
) {
128 ctxt
->node_list
= node_res_obj
->next
;
130 while (temp_node
&& temp_node
->next
!= node_res_obj
)
131 temp_node
= temp_node
->next
;
135 temp_node
->next
= node_res_obj
->next
;
137 mutex_unlock(&ctxt
->node_mutex
);
142 /* Actual Node De-Allocation */
143 static int drv_proc_free_node_res(void *process_ctxt
)
145 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
147 struct node_res_object
*node_list
= NULL
;
148 struct node_res_object
*node_res_obj
= NULL
;
151 node_list
= ctxt
->node_list
;
152 while (node_list
!= NULL
) {
153 node_res_obj
= node_list
;
154 node_list
= node_list
->next
;
155 if (node_res_obj
->node_allocated
) {
156 node_state
= node_get_state(node_res_obj
->hnode
);
157 if (node_state
<= NODE_DELETING
) {
158 if ((node_state
== NODE_RUNNING
) ||
159 (node_state
== NODE_PAUSED
) ||
160 (node_state
== NODE_TERMINATING
))
161 status
= node_terminate
162 (node_res_obj
->hnode
, &status
);
164 status
= node_delete(node_res_obj
->hnode
, ctxt
);
171 /* Release all Mapped and Reserved DMM resources */
172 int drv_remove_all_dmm_res_elements(void *process_ctxt
)
174 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
176 struct dmm_map_object
*temp_map
, *map_obj
;
177 struct dmm_rsv_object
*temp_rsv
, *rsv_obj
;
179 /* Free DMM mapped memory resources */
180 list_for_each_entry_safe(map_obj
, temp_map
, &ctxt
->dmm_map_list
, link
) {
181 status
= proc_un_map(ctxt
->hprocessor
,
182 (void *)map_obj
->dsp_addr
, ctxt
);
183 if (DSP_FAILED(status
))
184 pr_err("%s: proc_un_map failed!"
185 " status = 0x%xn", __func__
, status
);
188 /* Free DMM reserved memory resources */
189 list_for_each_entry_safe(rsv_obj
, temp_rsv
, &ctxt
->dmm_rsv_list
, link
) {
190 status
= proc_un_reserve_memory(ctxt
->hprocessor
, (void *)
191 rsv_obj
->dsp_reserved_addr
,
193 if (DSP_FAILED(status
))
194 pr_err("%s: proc_un_reserve_memory failed!"
195 " status = 0x%xn", __func__
, status
);
200 /* Update Node allocation status */
201 void drv_proc_node_update_status(void *node_resource
, s32 status
)
203 struct node_res_object
*node_res_obj
=
204 (struct node_res_object
*)node_resource
;
205 DBC_ASSERT(node_resource
!= NULL
);
206 node_res_obj
->node_allocated
= status
;
209 /* Update Node Heap status */
210 void drv_proc_node_update_heap_status(void *node_resource
, s32 status
)
212 struct node_res_object
*node_res_obj
=
213 (struct node_res_object
*)node_resource
;
214 DBC_ASSERT(node_resource
!= NULL
);
215 node_res_obj
->heap_allocated
= status
;
218 /* Release all Node resources and its context
219 * This is called from .bridge_release.
221 int drv_remove_all_node_res_elements(void *process_ctxt
)
223 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
225 struct node_res_object
*temp_node2
= NULL
;
226 struct node_res_object
*temp_node
= NULL
;
228 drv_proc_free_node_res(ctxt
);
229 temp_node
= ctxt
->node_list
;
230 while (temp_node
!= NULL
) {
231 temp_node2
= temp_node
;
232 temp_node
= temp_node
->next
;
235 ctxt
->node_list
= NULL
;
239 /* Getting the node resource element */
240 int drv_get_node_res_element(void *hnode
, void *node_resource
,
243 struct node_res_object
**node_res
=
244 (struct node_res_object
**)node_resource
;
245 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
247 struct node_res_object
*temp_node2
= NULL
;
248 struct node_res_object
*temp_node
= NULL
;
250 if (mutex_lock_interruptible(&ctxt
->node_mutex
))
253 temp_node
= ctxt
->node_list
;
254 while ((temp_node
!= NULL
) && (temp_node
->hnode
!= hnode
)) {
255 temp_node2
= temp_node
;
256 temp_node
= temp_node
->next
;
259 mutex_unlock(&ctxt
->node_mutex
);
261 if (temp_node
!= NULL
)
262 *node_res
= temp_node
;
269 /* Allocate the STRM resource element
270 * This is called after the actual resource is allocated
272 int drv_proc_insert_strm_res_element(void *stream_handle
,
273 void *hstrm_res
, void *process_ctxt
)
275 struct strm_res_object
**pstrm_res
=
276 (struct strm_res_object
**)hstrm_res
;
277 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
279 struct strm_res_object
*temp_strm_res
= NULL
;
281 *pstrm_res
= kzalloc(sizeof(struct strm_res_object
), GFP_KERNEL
);
282 if (*pstrm_res
== NULL
)
285 if (DSP_SUCCEEDED(status
)) {
286 if (mutex_lock_interruptible(&ctxt
->strm_mutex
)) {
290 (*pstrm_res
)->hstream
= stream_handle
;
291 if (ctxt
->pstrm_list
!= NULL
) {
292 temp_strm_res
= ctxt
->pstrm_list
;
293 while (temp_strm_res
->next
!= NULL
)
294 temp_strm_res
= temp_strm_res
->next
;
296 temp_strm_res
->next
= *pstrm_res
;
298 ctxt
->pstrm_list
= *pstrm_res
;
300 mutex_unlock(&ctxt
->strm_mutex
);
305 /* Release Stream resource element context
306 * This function called after the actual resource is freed
308 int drv_proc_remove_strm_res_element(void *hstrm_res
, void *process_ctxt
)
310 struct strm_res_object
*pstrm_res
= (struct strm_res_object
*)hstrm_res
;
311 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
312 struct strm_res_object
*temp_strm_res
;
315 if (mutex_lock_interruptible(&ctxt
->strm_mutex
))
317 temp_strm_res
= ctxt
->pstrm_list
;
319 if (ctxt
->pstrm_list
== pstrm_res
) {
320 ctxt
->pstrm_list
= pstrm_res
->next
;
322 while (temp_strm_res
&& temp_strm_res
->next
!= pstrm_res
)
323 temp_strm_res
= temp_strm_res
->next
;
324 if (temp_strm_res
== NULL
)
327 temp_strm_res
->next
= pstrm_res
->next
;
329 mutex_unlock(&ctxt
->strm_mutex
);
334 /* Release all Stream resources and its context
335 * This is called from .bridge_release.
337 int drv_remove_all_strm_res_elements(void *process_ctxt
)
339 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
341 struct strm_res_object
*strm_res
= NULL
;
342 struct strm_res_object
*strm_tmp
= NULL
;
343 struct stream_info strm_info
;
344 struct dsp_streaminfo user
;
345 u8
**ap_buffer
= NULL
;
351 strm_tmp
= ctxt
->pstrm_list
;
354 strm_tmp
= strm_tmp
->next
;
355 if (strm_res
->num_bufs
) {
356 ap_buffer
= kmalloc((strm_res
->num_bufs
*
357 sizeof(u8
*)), GFP_KERNEL
);
359 status
= strm_free_buffer(strm_res
->hstream
,
366 strm_info
.user_strm
= &user
;
367 user
.number_bufs_in_stream
= 0;
368 strm_get_info(strm_res
->hstream
, &strm_info
, sizeof(strm_info
));
369 while (user
.number_bufs_in_stream
--)
370 strm_reclaim(strm_res
->hstream
, &buf_ptr
, &ul_bytes
,
371 (u32
*) &ul_buf_size
, &dw_arg
);
372 status
= strm_close(strm_res
->hstream
, ctxt
);
377 /* Getting the stream resource element */
378 int drv_get_strm_res_element(void *stream_obj
, void *hstrm_res
,
381 struct strm_res_object
**strm_res
=
382 (struct strm_res_object
**)hstrm_res
;
383 struct process_context
*ctxt
= (struct process_context
*)process_ctxt
;
385 struct strm_res_object
*temp_strm2
= NULL
;
386 struct strm_res_object
*temp_strm
;
388 if (mutex_lock_interruptible(&ctxt
->strm_mutex
))
391 temp_strm
= ctxt
->pstrm_list
;
392 while ((temp_strm
!= NULL
) && (temp_strm
->hstream
!= stream_obj
)) {
393 temp_strm2
= temp_strm
;
394 temp_strm
= temp_strm
->next
;
397 mutex_unlock(&ctxt
->strm_mutex
);
399 if (temp_strm
!= NULL
)
400 *strm_res
= temp_strm
;
407 /* Updating the stream resource element */
408 int drv_proc_update_strm_res(u32 num_bufs
, void *hstrm_res
)
411 struct strm_res_object
**strm_res
=
412 (struct strm_res_object
**)hstrm_res
;
414 (*strm_res
)->num_bufs
= num_bufs
;
418 /* GPP PROCESS CLEANUP CODE END */
421 * ======== = drv_create ======== =
423 * DRV Object gets created only once during Driver Loading.
425 int drv_create(OUT
struct drv_object
**drv_obj
)
428 struct drv_object
*pdrv_object
= NULL
;
430 DBC_REQUIRE(drv_obj
!= NULL
);
431 DBC_REQUIRE(refs
> 0);
433 pdrv_object
= kzalloc(sizeof(struct drv_object
), GFP_KERNEL
);
435 /* Create and Initialize List of device objects */
436 pdrv_object
->dev_list
= kzalloc(sizeof(struct lst_list
),
438 if (pdrv_object
->dev_list
) {
439 /* Create and Initialize List of device Extension */
440 pdrv_object
->dev_node_string
=
441 kzalloc(sizeof(struct lst_list
), GFP_KERNEL
);
442 if (!(pdrv_object
->dev_node_string
)) {
445 INIT_LIST_HEAD(&pdrv_object
->
446 dev_node_string
->head
);
447 INIT_LIST_HEAD(&pdrv_object
->dev_list
->head
);
455 /* Store the DRV Object in the Registry */
456 if (DSP_SUCCEEDED(status
))
457 status
= cfg_set_object((u32
) pdrv_object
, REG_DRV_OBJECT
);
458 if (DSP_SUCCEEDED(status
)) {
459 *drv_obj
= pdrv_object
;
461 kfree(pdrv_object
->dev_list
);
462 kfree(pdrv_object
->dev_node_string
);
463 /* Free the DRV Object */
467 DBC_ENSURE(DSP_FAILED(status
) || pdrv_object
);
472 * ======== drv_exit ========
474 * Discontinue usage of the DRV module.
478 DBC_REQUIRE(refs
> 0);
482 DBC_ENSURE(refs
>= 0);
486 * ======== = drv_destroy ======== =
488 * Invoked during bridge de-initialization
490 int drv_destroy(struct drv_object
*driver_obj
)
493 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
495 DBC_REQUIRE(refs
> 0);
496 DBC_REQUIRE(pdrv_object
);
499 * Delete the List if it exists.Should not come here
500 * as the drv_remove_dev_object and the Last drv_request_resources
501 * removes the list if the lists are empty.
503 kfree(pdrv_object
->dev_list
);
504 kfree(pdrv_object
->dev_node_string
);
506 /* Update the DRV Object in Registry to be 0 */
507 (void)cfg_set_object(0, REG_DRV_OBJECT
);
513 * ======== drv_get_dev_object ========
515 * Given a index, returns a handle to DevObject from the list.
517 int drv_get_dev_object(u32 index
, struct drv_object
*hdrv_obj
,
518 struct dev_object
**device_obj
)
521 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
522 /* used only for Assertions and debug messages */
523 struct drv_object
*pdrv_obj
= (struct drv_object
*)hdrv_obj
;
525 struct dev_object
*dev_obj
;
527 DBC_REQUIRE(pdrv_obj
);
528 DBC_REQUIRE(device_obj
!= NULL
);
529 DBC_REQUIRE(index
>= 0);
530 DBC_REQUIRE(refs
> 0);
531 DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj
->dev_list
)));
533 dev_obj
= (struct dev_object
*)drv_get_first_dev_object();
534 for (i
= 0; i
< index
; i
++) {
536 (struct dev_object
*)drv_get_next_dev_object((u32
) dev_obj
);
539 *device_obj
= (struct dev_object
*)dev_obj
;
549 * ======== drv_get_first_dev_object ========
551 * Retrieve the first Device Object handle from an internal linked list of
552 * of DEV_OBJECTs maintained by DRV.
554 u32
drv_get_first_dev_object(void)
556 u32 dw_dev_object
= 0;
557 struct drv_object
*pdrv_obj
;
559 if (DSP_SUCCEEDED(cfg_get_object((u32
*) &pdrv_obj
, REG_DRV_OBJECT
))) {
560 if ((pdrv_obj
->dev_list
!= NULL
) &&
561 !LST_IS_EMPTY(pdrv_obj
->dev_list
))
562 dw_dev_object
= (u32
) lst_first(pdrv_obj
->dev_list
);
565 return dw_dev_object
;
569 * ======== DRV_GetFirstDevNodeString ========
571 * Retrieve the first Device Extension from an internal linked list of
572 * of Pointer to dev_node Strings maintained by DRV.
574 u32
drv_get_first_dev_extension(void)
576 u32 dw_dev_extension
= 0;
577 struct drv_object
*pdrv_obj
;
579 if (DSP_SUCCEEDED(cfg_get_object((u32
*) &pdrv_obj
, REG_DRV_OBJECT
))) {
581 if ((pdrv_obj
->dev_node_string
!= NULL
) &&
582 !LST_IS_EMPTY(pdrv_obj
->dev_node_string
)) {
584 (u32
) lst_first(pdrv_obj
->dev_node_string
);
588 return dw_dev_extension
;
592 * ======== drv_get_next_dev_object ========
594 * Retrieve the next Device Object handle from an internal linked list of
595 * of DEV_OBJECTs maintained by DRV, after having previously called
596 * drv_get_first_dev_object() and zero or more DRV_GetNext.
598 u32
drv_get_next_dev_object(u32 hdev_obj
)
600 u32 dw_next_dev_object
= 0;
601 struct drv_object
*pdrv_obj
;
603 DBC_REQUIRE(hdev_obj
!= 0);
605 if (DSP_SUCCEEDED(cfg_get_object((u32
*) &pdrv_obj
, REG_DRV_OBJECT
))) {
607 if ((pdrv_obj
->dev_list
!= NULL
) &&
608 !LST_IS_EMPTY(pdrv_obj
->dev_list
)) {
609 dw_next_dev_object
= (u32
) lst_next(pdrv_obj
->dev_list
,
614 return dw_next_dev_object
;
618 * ======== drv_get_next_dev_extension ========
620 * Retrieve the next Device Extension from an internal linked list of
621 * of pointer to DevNodeString maintained by DRV, after having previously
622 * called drv_get_first_dev_extension() and zero or more
623 * drv_get_next_dev_extension().
625 u32
drv_get_next_dev_extension(u32 dev_extension
)
627 u32 dw_dev_extension
= 0;
628 struct drv_object
*pdrv_obj
;
630 DBC_REQUIRE(dev_extension
!= 0);
632 if (DSP_SUCCEEDED(cfg_get_object((u32
*) &pdrv_obj
, REG_DRV_OBJECT
))) {
633 if ((pdrv_obj
->dev_node_string
!= NULL
) &&
634 !LST_IS_EMPTY(pdrv_obj
->dev_node_string
)) {
636 (u32
) lst_next(pdrv_obj
->dev_node_string
,
637 (struct list_head
*)dev_extension
);
641 return dw_dev_extension
;
645 * ======== drv_init ========
647 * Initialize DRV module private state.
651 s32 ret
= 1; /* function return value */
653 DBC_REQUIRE(refs
>= 0);
658 DBC_ENSURE((ret
&& (refs
> 0)) || (!ret
&& (refs
>= 0)));
664 * ======== drv_insert_dev_object ========
666 * Insert a DevObject into the list of Manager object.
668 int drv_insert_dev_object(struct drv_object
*driver_obj
,
669 struct dev_object
*hdev_obj
)
672 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
674 DBC_REQUIRE(refs
> 0);
675 DBC_REQUIRE(hdev_obj
!= NULL
);
676 DBC_REQUIRE(pdrv_object
);
677 DBC_ASSERT(pdrv_object
->dev_list
);
679 lst_put_tail(pdrv_object
->dev_list
, (struct list_head
*)hdev_obj
);
681 DBC_ENSURE(DSP_SUCCEEDED(status
)
682 && !LST_IS_EMPTY(pdrv_object
->dev_list
));
688 * ======== drv_remove_dev_object ========
690 * Search for and remove a DeviceObject from the given list of DRV
693 int drv_remove_dev_object(struct drv_object
*driver_obj
,
694 struct dev_object
*hdev_obj
)
697 struct drv_object
*pdrv_object
= (struct drv_object
*)driver_obj
;
698 struct list_head
*cur_elem
;
700 DBC_REQUIRE(refs
> 0);
701 DBC_REQUIRE(pdrv_object
);
702 DBC_REQUIRE(hdev_obj
!= NULL
);
704 DBC_REQUIRE(pdrv_object
->dev_list
!= NULL
);
705 DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object
->dev_list
));
707 /* Search list for p_proc_object: */
708 for (cur_elem
= lst_first(pdrv_object
->dev_list
); cur_elem
!= NULL
;
709 cur_elem
= lst_next(pdrv_object
->dev_list
, cur_elem
)) {
710 /* If found, remove it. */
711 if ((struct dev_object
*)cur_elem
== hdev_obj
) {
712 lst_remove_elem(pdrv_object
->dev_list
, cur_elem
);
717 /* Remove list if empty. */
718 if (LST_IS_EMPTY(pdrv_object
->dev_list
)) {
719 kfree(pdrv_object
->dev_list
);
720 pdrv_object
->dev_list
= NULL
;
722 DBC_ENSURE((pdrv_object
->dev_list
== NULL
) ||
723 !LST_IS_EMPTY(pdrv_object
->dev_list
));
729 * ======== drv_request_resources ========
731 * Requests resources from the OS.
733 int drv_request_resources(u32 dw_context
, u32
*dev_node_strg
)
736 struct drv_object
*pdrv_object
;
737 struct drv_ext
*pszdev_node
;
739 DBC_REQUIRE(dw_context
!= 0);
740 DBC_REQUIRE(dev_node_strg
!= NULL
);
743 * Allocate memory to hold the string. This will live untill
744 * it is freed in the Release resources. Update the driver object
748 status
= cfg_get_object((u32
*) &pdrv_object
, REG_DRV_OBJECT
);
749 if (DSP_SUCCEEDED(status
)) {
750 pszdev_node
= kzalloc(sizeof(struct drv_ext
), GFP_KERNEL
);
752 lst_init_elem(&pszdev_node
->link
);
753 strncpy(pszdev_node
->sz_string
,
754 (char *)dw_context
, MAXREGPATHLENGTH
- 1);
755 pszdev_node
->sz_string
[MAXREGPATHLENGTH
- 1] = '\0';
756 /* Update the Driver Object List */
757 *dev_node_strg
= (u32
) pszdev_node
->sz_string
;
758 lst_put_tail(pdrv_object
->dev_node_string
,
759 (struct list_head
*)pszdev_node
);
765 dev_dbg(bridge
, "%s: Failed to get Driver Object from Registry",
770 DBC_ENSURE((DSP_SUCCEEDED(status
) && dev_node_strg
!= NULL
&&
771 !LST_IS_EMPTY(pdrv_object
->dev_node_string
)) ||
772 (DSP_FAILED(status
) && *dev_node_strg
== 0));
778 * ======== drv_release_resources ========
780 * Releases resources from the OS.
782 int drv_release_resources(u32 dw_context
, struct drv_object
*hdrv_obj
)
785 struct drv_object
*pdrv_object
= (struct drv_object
*)hdrv_obj
;
786 struct drv_ext
*pszdev_node
;
789 * Irrespective of the status go ahead and clean it
790 * The following will over write the status.
792 for (pszdev_node
= (struct drv_ext
*)drv_get_first_dev_extension();
793 pszdev_node
!= NULL
; pszdev_node
= (struct drv_ext
*)
794 drv_get_next_dev_extension((u32
) pszdev_node
)) {
795 if (!pdrv_object
->dev_node_string
) {
796 /* When this could happen? */
799 if ((u32
) pszdev_node
== dw_context
) {
801 /* Delete from the Driver object list */
802 lst_remove_elem(pdrv_object
->dev_node_string
,
803 (struct list_head
*)pszdev_node
);
804 kfree((void *)pszdev_node
);
807 /* Delete the List if it is empty */
808 if (LST_IS_EMPTY(pdrv_object
->dev_node_string
)) {
809 kfree(pdrv_object
->dev_node_string
);
810 pdrv_object
->dev_node_string
= NULL
;
817 * ======== request_bridge_resources ========
819 * Reserves shared memory for bridge.
821 static int request_bridge_resources(struct cfg_hostres
*res
)
824 struct cfg_hostres
*host_res
= res
;
826 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
827 host_res
->num_mem_windows
= 2;
829 /* First window is for DSP internal memory */
830 host_res
->dw_sys_ctrl_base
= ioremap(OMAP_SYSC_BASE
, OMAP_SYSC_SIZE
);
831 dev_dbg(bridge
, "dw_mem_base[0] 0x%x\n", host_res
->dw_mem_base
[0]);
832 dev_dbg(bridge
, "dw_mem_base[3] 0x%x\n", host_res
->dw_mem_base
[3]);
833 dev_dbg(bridge
, "dw_dmmu_base %p\n", host_res
->dw_dmmu_base
);
835 /* for 24xx base port is not mapping the mamory for DSP
836 * internal memory TODO Do a ioremap here */
837 /* Second window is for DSP external memory shared with MPU */
839 /* These are hard-coded values */
840 host_res
->birq_registers
= 0;
841 host_res
->birq_attrib
= 0;
842 host_res
->dw_offset_for_monitor
= 0;
843 host_res
->dw_chnl_offset
= 0;
844 /* CHNL_MAXCHANNELS */
845 host_res
->dw_num_chnls
= CHNL_MAXCHANNELS
;
846 host_res
->dw_chnl_buf_size
= 0x400;
852 * ======== drv_request_bridge_res_dsp ========
854 * Reserves shared memory for bridge.
856 int drv_request_bridge_res_dsp(void **phost_resources
)
859 struct cfg_hostres
*host_res
;
863 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
865 dw_buff_size
= sizeof(struct cfg_hostres
);
867 host_res
= kzalloc(dw_buff_size
, GFP_KERNEL
);
869 if (host_res
!= NULL
) {
870 request_bridge_resources(host_res
);
871 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
872 host_res
->num_mem_windows
= 4;
874 host_res
->dw_mem_base
[0] = 0;
875 host_res
->dw_mem_base
[2] = (u32
) ioremap(OMAP_DSP_MEM1_BASE
,
877 host_res
->dw_mem_base
[3] = (u32
) ioremap(OMAP_DSP_MEM2_BASE
,
879 host_res
->dw_mem_base
[4] = (u32
) ioremap(OMAP_DSP_MEM3_BASE
,
881 host_res
->dw_per_base
= ioremap(OMAP_PER_CM_BASE
,
883 host_res
->dw_per_pm_base
= (u32
) ioremap(OMAP_PER_PRM_BASE
,
885 host_res
->dw_core_pm_base
= (u32
) ioremap(OMAP_CORE_PRM_BASE
,
887 host_res
->dw_dmmu_base
= ioremap(OMAP_DMMU_BASE
,
890 dev_dbg(bridge
, "dw_mem_base[0] 0x%x\n",
891 host_res
->dw_mem_base
[0]);
892 dev_dbg(bridge
, "dw_mem_base[1] 0x%x\n",
893 host_res
->dw_mem_base
[1]);
894 dev_dbg(bridge
, "dw_mem_base[2] 0x%x\n",
895 host_res
->dw_mem_base
[2]);
896 dev_dbg(bridge
, "dw_mem_base[3] 0x%x\n",
897 host_res
->dw_mem_base
[3]);
898 dev_dbg(bridge
, "dw_mem_base[4] 0x%x\n",
899 host_res
->dw_mem_base
[4]);
900 dev_dbg(bridge
, "dw_dmmu_base %p\n", host_res
->dw_dmmu_base
);
902 shm_size
= drv_datap
->shm_size
;
903 if (shm_size
>= 0x10000) {
904 /* Allocate Physically contiguous,
905 * non-cacheable memory */
906 host_res
->dw_mem_base
[1] =
907 (u32
) mem_alloc_phys_mem(shm_size
, 0x100000,
909 if (host_res
->dw_mem_base
[1] == 0) {
911 pr_err("shm reservation Failed\n");
913 host_res
->dw_mem_length
[1] = shm_size
;
914 host_res
->dw_mem_phys
[1] = dma_addr
;
916 dev_dbg(bridge
, "%s: Bridge shm address 0x%x "
917 "dma_addr %x size %x\n", __func__
,
918 host_res
->dw_mem_base
[1],
922 if (DSP_SUCCEEDED(status
)) {
923 /* These are hard-coded values */
924 host_res
->birq_registers
= 0;
925 host_res
->birq_attrib
= 0;
926 host_res
->dw_offset_for_monitor
= 0;
927 host_res
->dw_chnl_offset
= 0;
928 /* CHNL_MAXCHANNELS */
929 host_res
->dw_num_chnls
= CHNL_MAXCHANNELS
;
930 host_res
->dw_chnl_buf_size
= 0x400;
931 dw_buff_size
= sizeof(struct cfg_hostres
);
933 *phost_resources
= host_res
;
939 void mem_ext_phys_pool_init(u32 pool_phys_base
, u32 pool_size
)
943 /* get the virtual address for the physical memory pool passed */
944 pool_virt_base
= (u32
) ioremap(pool_phys_base
, pool_size
);
946 if ((void **)pool_virt_base
== NULL
) {
947 pr_err("%s: external physical memory map failed\n", __func__
);
948 ext_phys_mem_pool_enabled
= false;
950 ext_mem_pool
.phys_mem_base
= pool_phys_base
;
951 ext_mem_pool
.phys_mem_size
= pool_size
;
952 ext_mem_pool
.virt_mem_base
= pool_virt_base
;
953 ext_mem_pool
.next_phys_alloc_ptr
= pool_phys_base
;
954 ext_phys_mem_pool_enabled
= true;
958 void mem_ext_phys_pool_release(void)
960 if (ext_phys_mem_pool_enabled
) {
961 iounmap((void *)(ext_mem_pool
.virt_mem_base
));
962 ext_phys_mem_pool_enabled
= false;
967 * ======== mem_ext_phys_mem_alloc ========
969 * Allocate physically contiguous, uncached memory from external memory pool
972 static void *mem_ext_phys_mem_alloc(u32 bytes
, u32 align
, OUT u32
* phys_addr
)
981 if (bytes
> ((ext_mem_pool
.phys_mem_base
+ ext_mem_pool
.phys_mem_size
)
982 - ext_mem_pool
.next_phys_alloc_ptr
)) {
986 offset
= (ext_mem_pool
.next_phys_alloc_ptr
& (align
- 1));
988 new_alloc_ptr
= ext_mem_pool
.next_phys_alloc_ptr
;
990 new_alloc_ptr
= (ext_mem_pool
.next_phys_alloc_ptr
) +
992 if ((new_alloc_ptr
+ bytes
) <=
993 (ext_mem_pool
.phys_mem_base
+ ext_mem_pool
.phys_mem_size
)) {
994 /* we can allocate */
995 *phys_addr
= new_alloc_ptr
;
996 ext_mem_pool
.next_phys_alloc_ptr
=
997 new_alloc_ptr
+ bytes
;
999 ext_mem_pool
.virt_mem_base
+ (new_alloc_ptr
-
1002 return (void *)virt_addr
;
1011 * ======== mem_alloc_phys_mem ========
1013 * Allocate physically contiguous, uncached memory
1015 void *mem_alloc_phys_mem(u32 byte_size
, u32 align_mask
,
1016 OUT u32
*physical_address
)
1018 void *va_mem
= NULL
;
1021 if (byte_size
> 0) {
1022 if (ext_phys_mem_pool_enabled
) {
1023 va_mem
= mem_ext_phys_mem_alloc(byte_size
, align_mask
,
1026 va_mem
= dma_alloc_coherent(NULL
, byte_size
, &pa_mem
,
1029 *physical_address
= 0;
1031 *physical_address
= pa_mem
;
1037 * ======== mem_free_phys_mem ========
1039 * Free the given block of physically contiguous memory.
1041 void mem_free_phys_mem(void *virtual_address
, u32 physical_address
,
1044 DBC_REQUIRE(virtual_address
!= NULL
);
1046 if (!ext_phys_mem_pool_enabled
)
1047 dma_free_coherent(NULL
, byte_size
, virtual_address
,