Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / tidspbridge / rmgr / drv.c
CommitLineData
7d55524d
ORL
1/*
2 * drv.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge resource allocation module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
2094f12d 18#include <linux/types.h>
0005391f 19#include <linux/list.h>
7d55524d
ORL
20
21/* ----------------------------------- Host OS */
22#include <dspbridge/host_os.h>
23
24/* ----------------------------------- DSP/BIOS Bridge */
7d55524d
ORL
25#include <dspbridge/dbdefs.h>
26
7d55524d
ORL
27/* ----------------------------------- This */
28#include <dspbridge/drv.h>
29#include <dspbridge/dev.h>
30
31#include <dspbridge/node.h>
32#include <dspbridge/proc.h>
33#include <dspbridge/strm.h>
34#include <dspbridge/nodepriv.h>
35#include <dspbridge/dspchnl.h>
36#include <dspbridge/resourcecleanup.h>
37
38/* ----------------------------------- Defines, Data Structures, Typedefs */
39struct drv_object {
0005391f
IN
40 struct list_head dev_list;
41 struct list_head dev_node_string;
7d55524d
ORL
42};
43
44/*
45 * This is the Device Extension. Named with the Prefix
46 * DRV_ since it is living in this module
47 */
48struct drv_ext {
49 struct list_head link;
50 char sz_string[MAXREGPATHLENGTH];
51};
52
53/* ----------------------------------- Globals */
7d55524d
ORL
54static bool ext_phys_mem_pool_enabled;
55struct ext_phys_mem_pool {
56 u32 phys_mem_base;
57 u32 phys_mem_size;
58 u32 virt_mem_base;
59 u32 next_phys_alloc_ptr;
60};
61static struct ext_phys_mem_pool ext_mem_pool;
62
63/* ----------------------------------- Function Prototypes */
64static int request_bridge_resources(struct cfg_hostres *res);
65
66
67/* GPP PROCESS CLEANUP CODE */
68
0624f52f 69static int drv_proc_free_node_res(int id, void *p, void *data);
7d55524d
ORL
70
71/* Allocate and add a node resource element
72* This function is called from .Node_Allocate. */
e6890692
RS
73int drv_insert_node_res_element(void *hnode, void *node_resource,
74 void *process_ctxt)
7d55524d
ORL
75{
76 struct node_res_object **node_res_obj =
e6890692
RS
77 (struct node_res_object **)node_resource;
78 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d 79 int status = 0;
0624f52f 80 int retval;
7d55524d
ORL
81
82 *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
0624f52f
ER
83 if (!*node_res_obj) {
84 status = -ENOMEM;
85 goto func_end;
86 }
7d55524d 87
ee4317f7 88 (*node_res_obj)->node = hnode;
0624f52f
ER
89 retval = idr_get_new(ctxt->node_id, *node_res_obj,
90 &(*node_res_obj)->id);
91 if (retval == -EAGAIN) {
92 if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
93 pr_err("%s: OUT OF MEMORY\n", __func__);
94 status = -ENOMEM;
95 goto func_end;
7d55524d 96 }
7d55524d 97
0624f52f
ER
98 retval = idr_get_new(ctxt->node_id, *node_res_obj,
99 &(*node_res_obj)->id);
100 }
101 if (retval) {
102 pr_err("%s: FAILED, IDR is FULL\n", __func__);
103 status = -EFAULT;
7d55524d 104 }
0624f52f
ER
105func_end:
106 if (status)
107 kfree(*node_res_obj);
7d55524d
ORL
108
109 return status;
110}
111
112/* Release all Node resources and its context
0624f52f
ER
113 * Actual Node De-Allocation */
114static int drv_proc_free_node_res(int id, void *p, void *data)
7d55524d 115{
0624f52f
ER
116 struct process_context *ctxt = data;
117 int status;
118 struct node_res_object *node_res_obj = p;
7d55524d
ORL
119 u32 node_state;
120
0624f52f 121 if (node_res_obj->node_allocated) {
ee4317f7 122 node_state = node_get_state(node_res_obj->node);
0624f52f
ER
123 if (node_state <= NODE_DELETING) {
124 if ((node_state == NODE_RUNNING) ||
125 (node_state == NODE_PAUSED) ||
126 (node_state == NODE_TERMINATING))
127 node_terminate
ee4317f7 128 (node_res_obj->node, &status);
7d55524d 129
0624f52f 130 node_delete(node_res_obj, ctxt);
7d55524d
ORL
131 }
132 }
0624f52f
ER
133
134 return 0;
7d55524d
ORL
135}
136
137/* Release all Mapped and Reserved DMM resources */
e6890692 138int drv_remove_all_dmm_res_elements(void *process_ctxt)
7d55524d 139{
e6890692 140 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d
ORL
141 int status = 0;
142 struct dmm_map_object *temp_map, *map_obj;
a2890350 143 struct dmm_rsv_object *temp_rsv, *rsv_obj;
7d55524d
ORL
144
145 /* Free DMM mapped memory resources */
146 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
a534f17b 147 status = proc_un_map(ctxt->processor,
7d55524d 148 (void *)map_obj->dsp_addr, ctxt);
b66e0986 149 if (status)
7d55524d
ORL
150 pr_err("%s: proc_un_map failed!"
151 " status = 0x%xn", __func__, status);
152 }
a2890350
FC
153
154 /* Free DMM reserved memory resources */
155 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
a534f17b 156 status = proc_un_reserve_memory(ctxt->processor, (void *)
a2890350
FC
157 rsv_obj->dsp_reserved_addr,
158 ctxt);
159 if (status)
160 pr_err("%s: proc_un_reserve_memory failed!"
161 " status = 0x%xn", __func__, status);
162 }
7d55524d
ORL
163 return status;
164}
165
166/* Update Node allocation status */
e6890692 167void drv_proc_node_update_status(void *node_resource, s32 status)
7d55524d
ORL
168{
169 struct node_res_object *node_res_obj =
e6890692 170 (struct node_res_object *)node_resource;
7d55524d
ORL
171 node_res_obj->node_allocated = status;
172}
173
174/* Update Node Heap status */
e6890692 175void drv_proc_node_update_heap_status(void *node_resource, s32 status)
7d55524d
ORL
176{
177 struct node_res_object *node_res_obj =
e6890692 178 (struct node_res_object *)node_resource;
7d55524d
ORL
179 node_res_obj->heap_allocated = status;
180}
181
182/* Release all Node resources and its context
183* This is called from .bridge_release.
184 */
e6890692 185int drv_remove_all_node_res_elements(void *process_ctxt)
7d55524d 186{
0624f52f 187 struct process_context *ctxt = process_ctxt;
7d55524d 188
0624f52f
ER
189 idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
190 idr_destroy(ctxt->node_id);
7d55524d 191
0624f52f 192 return 0;
7d55524d
ORL
193}
194
195/* Allocate the STRM resource element
196* This is called after the actual resource is allocated
197 */
c8c1ad8c
RS
198int drv_proc_insert_strm_res_element(void *stream_obj,
199 void *strm_res, void *process_ctxt)
7d55524d
ORL
200{
201 struct strm_res_object **pstrm_res =
c8c1ad8c 202 (struct strm_res_object **)strm_res;
e6890692 203 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d 204 int status = 0;
4ec09714 205 int retval;
7d55524d
ORL
206
207 *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
4ec09714 208 if (*pstrm_res == NULL) {
7d55524d 209 status = -EFAULT;
4ec09714 210 goto func_end;
7d55524d 211 }
7d55524d 212
ee4317f7 213 (*pstrm_res)->stream = stream_obj;
4ec09714
ER
214 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
215 &(*pstrm_res)->id);
216 if (retval == -EAGAIN) {
217 if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
218 pr_err("%s: OUT OF MEMORY\n", __func__);
219 status = -ENOMEM;
220 goto func_end;
221 }
7d55524d 222
4ec09714
ER
223 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
224 &(*pstrm_res)->id);
7d55524d 225 }
4ec09714
ER
226 if (retval) {
227 pr_err("%s: FAILED, IDR is FULL\n", __func__);
228 status = -EPERM;
229 }
230
231func_end:
7d55524d
ORL
232 return status;
233}
234
4ec09714 235static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
7d55524d 236{
4ec09714
ER
237 struct process_context *ctxt = process_ctxt;
238 struct strm_res_object *strm_res = p;
7d55524d
ORL
239 struct stream_info strm_info;
240 struct dsp_streaminfo user;
241 u8 **ap_buffer = NULL;
242 u8 *buf_ptr;
243 u32 ul_bytes;
244 u32 dw_arg;
245 s32 ul_buf_size;
246
4ec09714
ER
247 if (strm_res->num_bufs) {
248 ap_buffer = kmalloc((strm_res->num_bufs *
249 sizeof(u8 *)), GFP_KERNEL);
250 if (ap_buffer) {
251 strm_free_buffer(strm_res,
252 ap_buffer,
253 strm_res->num_bufs,
254 ctxt);
255 kfree(ap_buffer);
7d55524d 256 }
7d55524d 257 }
4ec09714
ER
258 strm_info.user_strm = &user;
259 user.number_bufs_in_stream = 0;
ee4317f7 260 strm_get_info(strm_res->stream, &strm_info, sizeof(strm_info));
4ec09714 261 while (user.number_bufs_in_stream--)
ee4317f7 262 strm_reclaim(strm_res->stream, &buf_ptr, &ul_bytes,
4ec09714
ER
263 (u32 *) &ul_buf_size, &dw_arg);
264 strm_close(strm_res, ctxt);
265 return 0;
7d55524d
ORL
266}
267
4ec09714
ER
268/* Release all Stream resources and its context
269* This is called from .bridge_release.
270 */
271int drv_remove_all_strm_res_elements(void *process_ctxt)
7d55524d 272{
4ec09714 273 struct process_context *ctxt = process_ctxt;
7d55524d 274
4ec09714
ER
275 idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
276 idr_destroy(ctxt->stream_id);
7d55524d 277
4ec09714 278 return 0;
7d55524d
ORL
279}
280
281/* Updating the stream resource element */
c8c1ad8c 282int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
7d55524d
ORL
283{
284 int status = 0;
285 struct strm_res_object **strm_res =
c8c1ad8c 286 (struct strm_res_object **)strm_resources;
7d55524d
ORL
287
288 (*strm_res)->num_bufs = num_bufs;
289 return status;
290}
291
292/* GPP PROCESS CLEANUP CODE END */
293
294/*
295 * ======== = drv_create ======== =
296 * Purpose:
297 * DRV Object gets created only once during Driver Loading.
298 */
e6bf74f0 299int drv_create(struct drv_object **drv_obj)
7d55524d
ORL
300{
301 int status = 0;
302 struct drv_object *pdrv_object = NULL;
b87561f7 303 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 304
7d55524d
ORL
305 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
306 if (pdrv_object) {
307 /* Create and Initialize List of device objects */
0005391f
IN
308 INIT_LIST_HEAD(&pdrv_object->dev_list);
309 INIT_LIST_HEAD(&pdrv_object->dev_node_string);
7d55524d
ORL
310 } else {
311 status = -ENOMEM;
312 }
b87561f7
IGC
313 /* Store the DRV Object in the driver data */
314 if (!status) {
315 if (drv_datap) {
316 drv_datap->drv_object = (void *)pdrv_object;
317 } else {
318 status = -EPERM;
319 pr_err("%s: Failed to store DRV object\n", __func__);
320 }
321 }
322
a741ea6e 323 if (!status) {
e436d07d 324 *drv_obj = pdrv_object;
7d55524d 325 } else {
7d55524d
ORL
326 /* Free the DRV Object */
327 kfree(pdrv_object);
328 }
329
7d55524d
ORL
330 return status;
331}
332
7d55524d
ORL
333/*
334 * ======== = drv_destroy ======== =
335 * purpose:
336 * Invoked during bridge de-initialization
337 */
e6890692 338int drv_destroy(struct drv_object *driver_obj)
7d55524d
ORL
339{
340 int status = 0;
e6890692 341 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
b87561f7 342 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 343
7d55524d 344 kfree(pdrv_object);
b87561f7
IGC
345 /* Update the DRV Object in the driver data */
346 if (drv_datap) {
347 drv_datap->drv_object = NULL;
348 } else {
349 status = -EPERM;
350 pr_err("%s: Failed to store DRV object\n", __func__);
351 }
7d55524d
ORL
352
353 return status;
354}
355
356/*
357 * ======== drv_get_dev_object ========
358 * Purpose:
359 * Given a index, returns a handle to DevObject from the list.
360 */
361int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
e436d07d 362 struct dev_object **device_obj)
7d55524d
ORL
363{
364 int status = 0;
7d55524d
ORL
365 struct dev_object *dev_obj;
366 u32 i;
7d55524d
ORL
367
368 dev_obj = (struct dev_object *)drv_get_first_dev_object();
369 for (i = 0; i < index; i++) {
370 dev_obj =
371 (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
372 }
373 if (dev_obj) {
e436d07d 374 *device_obj = (struct dev_object *)dev_obj;
7d55524d 375 } else {
e436d07d 376 *device_obj = NULL;
7d55524d
ORL
377 status = -EPERM;
378 }
379
380 return status;
381}
382
383/*
384 * ======== drv_get_first_dev_object ========
385 * Purpose:
386 * Retrieve the first Device Object handle from an internal linked list of
387 * of DEV_OBJECTs maintained by DRV.
388 */
389u32 drv_get_first_dev_object(void)
390{
391 u32 dw_dev_object = 0;
392 struct drv_object *pdrv_obj;
73b87a91 393 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 394
73b87a91
IGC
395 if (drv_datap && drv_datap->drv_object) {
396 pdrv_obj = drv_datap->drv_object;
0005391f
IN
397 if (!list_empty(&pdrv_obj->dev_list))
398 dw_dev_object = (u32) pdrv_obj->dev_list.next;
73b87a91
IGC
399 } else {
400 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
401 }
402
403 return dw_dev_object;
404}
405
406/*
407 * ======== DRV_GetFirstDevNodeString ========
408 * Purpose:
409 * Retrieve the first Device Extension from an internal linked list of
410 * of Pointer to dev_node Strings maintained by DRV.
411 */
412u32 drv_get_first_dev_extension(void)
413{
414 u32 dw_dev_extension = 0;
415 struct drv_object *pdrv_obj;
73b87a91 416 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 417
73b87a91
IGC
418 if (drv_datap && drv_datap->drv_object) {
419 pdrv_obj = drv_datap->drv_object;
0005391f 420 if (!list_empty(&pdrv_obj->dev_node_string)) {
7d55524d 421 dw_dev_extension =
0005391f 422 (u32) pdrv_obj->dev_node_string.next;
7d55524d 423 }
73b87a91
IGC
424 } else {
425 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
426 }
427
428 return dw_dev_extension;
429}
430
431/*
432 * ======== drv_get_next_dev_object ========
433 * Purpose:
434 * Retrieve the next Device Object handle from an internal linked list of
435 * of DEV_OBJECTs maintained by DRV, after having previously called
436 * drv_get_first_dev_object() and zero or more DRV_GetNext.
437 */
438u32 drv_get_next_dev_object(u32 hdev_obj)
439{
440 u32 dw_next_dev_object = 0;
441 struct drv_object *pdrv_obj;
73b87a91 442 struct drv_data *drv_datap = dev_get_drvdata(bridge);
0005391f 443 struct list_head *curr;
7d55524d 444
73b87a91
IGC
445 if (drv_datap && drv_datap->drv_object) {
446 pdrv_obj = drv_datap->drv_object;
0005391f
IN
447 if (!list_empty(&pdrv_obj->dev_list)) {
448 curr = (struct list_head *)hdev_obj;
449 if (list_is_last(curr, &pdrv_obj->dev_list))
450 return 0;
451 dw_next_dev_object = (u32) curr->next;
7d55524d 452 }
73b87a91
IGC
453 } else {
454 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d 455 }
73b87a91 456
7d55524d
ORL
457 return dw_next_dev_object;
458}
459
460/*
461 * ======== drv_get_next_dev_extension ========
462 * Purpose:
463 * Retrieve the next Device Extension from an internal linked list of
464 * of pointer to DevNodeString maintained by DRV, after having previously
465 * called drv_get_first_dev_extension() and zero or more
466 * drv_get_next_dev_extension().
467 */
e6890692 468u32 drv_get_next_dev_extension(u32 dev_extension)
7d55524d
ORL
469{
470 u32 dw_dev_extension = 0;
471 struct drv_object *pdrv_obj;
73b87a91 472 struct drv_data *drv_datap = dev_get_drvdata(bridge);
0005391f 473 struct list_head *curr;
7d55524d 474
73b87a91
IGC
475 if (drv_datap && drv_datap->drv_object) {
476 pdrv_obj = drv_datap->drv_object;
0005391f
IN
477 if (!list_empty(&pdrv_obj->dev_node_string)) {
478 curr = (struct list_head *)dev_extension;
479 if (list_is_last(curr, &pdrv_obj->dev_node_string))
480 return 0;
481 dw_dev_extension = (u32) curr->next;
7d55524d 482 }
73b87a91
IGC
483 } else {
484 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
485 }
486
487 return dw_dev_extension;
488}
489
7d55524d
ORL
490/*
491 * ======== drv_insert_dev_object ========
492 * Purpose:
493 * Insert a DevObject into the list of Manager object.
494 */
e6890692 495int drv_insert_dev_object(struct drv_object *driver_obj,
7d55524d
ORL
496 struct dev_object *hdev_obj)
497{
e6890692 498 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
7d55524d 499
0005391f 500 list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
7d55524d 501
a741ea6e 502 return 0;
7d55524d
ORL
503}
504
505/*
506 * ======== drv_remove_dev_object ========
507 * Purpose:
508 * Search for and remove a DeviceObject from the given list of DRV
509 * objects.
510 */
e6890692 511int drv_remove_dev_object(struct drv_object *driver_obj,
7d55524d
ORL
512 struct dev_object *hdev_obj)
513{
514 int status = -EPERM;
e6890692 515 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
7d55524d
ORL
516 struct list_head *cur_elem;
517
7d55524d 518 /* Search list for p_proc_object: */
0005391f 519 list_for_each(cur_elem, &pdrv_object->dev_list) {
7d55524d
ORL
520 /* If found, remove it. */
521 if ((struct dev_object *)cur_elem == hdev_obj) {
0005391f 522 list_del(cur_elem);
7d55524d
ORL
523 status = 0;
524 break;
525 }
526 }
7d55524d
ORL
527
528 return status;
529}
530
531/*
532 * ======== drv_request_resources ========
533 * Purpose:
534 * Requests resources from the OS.
535 */
aa09b091 536int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
7d55524d
ORL
537{
538 int status = 0;
539 struct drv_object *pdrv_object;
540 struct drv_ext *pszdev_node;
73b87a91 541 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 542
7d55524d 543 /*
25985edc 544 * Allocate memory to hold the string. This will live until
7d55524d
ORL
545 * it is freed in the Release resources. Update the driver object
546 * list.
547 */
548
73b87a91
IGC
549 if (!drv_datap || !drv_datap->drv_object)
550 status = -ENODATA;
551 else
552 pdrv_object = drv_datap->drv_object;
553
a741ea6e 554 if (!status) {
7d55524d
ORL
555 pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
556 if (pszdev_node) {
7d55524d
ORL
557 strncpy(pszdev_node->sz_string,
558 (char *)dw_context, MAXREGPATHLENGTH - 1);
559 pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
560 /* Update the Driver Object List */
aa09b091 561 *dev_node_strg = (u32) pszdev_node->sz_string;
0005391f
IN
562 list_add_tail(&pszdev_node->link,
563 &pdrv_object->dev_node_string);
7d55524d
ORL
564 } else {
565 status = -ENOMEM;
aa09b091 566 *dev_node_strg = 0;
7d55524d
ORL
567 }
568 } else {
569 dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
570 __func__);
aa09b091 571 *dev_node_strg = 0;
7d55524d
ORL
572 }
573
7d55524d
ORL
574 return status;
575}
576
577/*
578 * ======== drv_release_resources ========
579 * Purpose:
580 * Releases resources from the OS.
581 */
582int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
583{
584 int status = 0;
7d55524d
ORL
585 struct drv_ext *pszdev_node;
586
587 /*
588 * Irrespective of the status go ahead and clean it
589 * The following will over write the status.
590 */
591 for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
592 pszdev_node != NULL; pszdev_node = (struct drv_ext *)
593 drv_get_next_dev_extension((u32) pszdev_node)) {
7d55524d
ORL
594 if ((u32) pszdev_node == dw_context) {
595 /* Found it */
596 /* Delete from the Driver object list */
0005391f
IN
597 list_del(&pszdev_node->link);
598 kfree(pszdev_node);
7d55524d
ORL
599 break;
600 }
7d55524d
ORL
601 }
602 return status;
603}
604
605/*
606 * ======== request_bridge_resources ========
607 * Purpose:
608 * Reserves shared memory for bridge.
609 */
610static int request_bridge_resources(struct cfg_hostres *res)
611{
7d55524d
ORL
612 struct cfg_hostres *host_res = res;
613
614 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
615 host_res->num_mem_windows = 2;
616
617 /* First window is for DSP internal memory */
5108de0a
RS
618 dev_dbg(bridge, "mem_base[0] 0x%x\n", host_res->mem_base[0]);
619 dev_dbg(bridge, "mem_base[3] 0x%x\n", host_res->mem_base[3]);
620 dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
7d55524d
ORL
621
622 /* for 24xx base port is not mapping the mamory for DSP
623 * internal memory TODO Do a ioremap here */
624 /* Second window is for DSP external memory shared with MPU */
625
626 /* These are hard-coded values */
627 host_res->birq_registers = 0;
628 host_res->birq_attrib = 0;
5108de0a 629 host_res->offset_for_monitor = 0;
b4da7fc3 630 host_res->chnl_offset = 0;
7d55524d 631 /* CHNL_MAXCHANNELS */
5108de0a 632 host_res->num_chnls = CHNL_MAXCHANNELS;
b4da7fc3 633 host_res->chnl_buf_size = 0x400;
7d55524d 634
a741ea6e 635 return 0;
7d55524d
ORL
636}
637
638/*
639 * ======== drv_request_bridge_res_dsp ========
640 * Purpose:
641 * Reserves shared memory for bridge.
642 */
643int drv_request_bridge_res_dsp(void **phost_resources)
644{
645 int status = 0;
646 struct cfg_hostres *host_res;
647 u32 dw_buff_size;
648 u32 dma_addr;
649 u32 shm_size;
650 struct drv_data *drv_datap = dev_get_drvdata(bridge);
651
652 dw_buff_size = sizeof(struct cfg_hostres);
653
654 host_res = kzalloc(dw_buff_size, GFP_KERNEL);
655
656 if (host_res != NULL) {
657 request_bridge_resources(host_res);
658 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
659 host_res->num_mem_windows = 4;
660
5108de0a
RS
661 host_res->mem_base[0] = 0;
662 host_res->mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
7d55524d 663 OMAP_DSP_MEM1_SIZE);
5108de0a 664 host_res->mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
7d55524d 665 OMAP_DSP_MEM2_SIZE);
5108de0a 666 host_res->mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
7d55524d 667 OMAP_DSP_MEM3_SIZE);
5108de0a 668 host_res->per_base = ioremap(OMAP_PER_CM_BASE,
7d55524d 669 OMAP_PER_CM_SIZE);
5108de0a 670 host_res->per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
7d55524d 671 OMAP_PER_PRM_SIZE);
b4da7fc3 672 host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
7d55524d 673 OMAP_CORE_PRM_SIZE);
5108de0a 674 host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
9d4f81a7 675 OMAP_DMMU_SIZE);
7d55524d 676
5108de0a
RS
677 dev_dbg(bridge, "mem_base[0] 0x%x\n",
678 host_res->mem_base[0]);
679 dev_dbg(bridge, "mem_base[1] 0x%x\n",
680 host_res->mem_base[1]);
681 dev_dbg(bridge, "mem_base[2] 0x%x\n",
682 host_res->mem_base[2]);
683 dev_dbg(bridge, "mem_base[3] 0x%x\n",
684 host_res->mem_base[3]);
685 dev_dbg(bridge, "mem_base[4] 0x%x\n",
686 host_res->mem_base[4]);
687 dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
7d55524d
ORL
688
689 shm_size = drv_datap->shm_size;
690 if (shm_size >= 0x10000) {
691 /* Allocate Physically contiguous,
692 * non-cacheable memory */
5108de0a 693 host_res->mem_base[1] =
7d55524d
ORL
694 (u32) mem_alloc_phys_mem(shm_size, 0x100000,
695 &dma_addr);
5108de0a 696 if (host_res->mem_base[1] == 0) {
7d55524d
ORL
697 status = -ENOMEM;
698 pr_err("shm reservation Failed\n");
699 } else {
5108de0a
RS
700 host_res->mem_length[1] = shm_size;
701 host_res->mem_phys[1] = dma_addr;
7d55524d
ORL
702
703 dev_dbg(bridge, "%s: Bridge shm address 0x%x "
704 "dma_addr %x size %x\n", __func__,
5108de0a 705 host_res->mem_base[1],
7d55524d
ORL
706 dma_addr, shm_size);
707 }
708 }
a741ea6e 709 if (!status) {
7d55524d
ORL
710 /* These are hard-coded values */
711 host_res->birq_registers = 0;
712 host_res->birq_attrib = 0;
5108de0a 713 host_res->offset_for_monitor = 0;
b4da7fc3 714 host_res->chnl_offset = 0;
7d55524d 715 /* CHNL_MAXCHANNELS */
5108de0a 716 host_res->num_chnls = CHNL_MAXCHANNELS;
b4da7fc3 717 host_res->chnl_buf_size = 0x400;
7d55524d
ORL
718 dw_buff_size = sizeof(struct cfg_hostres);
719 }
720 *phost_resources = host_res;
721 }
722 /* End Mem alloc */
723 return status;
724}
725
fb6aabb7 726void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
7d55524d
ORL
727{
728 u32 pool_virt_base;
729
730 /* get the virtual address for the physical memory pool passed */
fb6aabb7 731 pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
7d55524d
ORL
732
733 if ((void **)pool_virt_base == NULL) {
734 pr_err("%s: external physical memory map failed\n", __func__);
735 ext_phys_mem_pool_enabled = false;
736 } else {
fb6aabb7
RS
737 ext_mem_pool.phys_mem_base = pool_phys_base;
738 ext_mem_pool.phys_mem_size = pool_size;
7d55524d 739 ext_mem_pool.virt_mem_base = pool_virt_base;
fb6aabb7 740 ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
7d55524d
ORL
741 ext_phys_mem_pool_enabled = true;
742 }
743}
744
745void mem_ext_phys_pool_release(void)
746{
747 if (ext_phys_mem_pool_enabled) {
748 iounmap((void *)(ext_mem_pool.virt_mem_base));
749 ext_phys_mem_pool_enabled = false;
750 }
751}
752
753/*
754 * ======== mem_ext_phys_mem_alloc ========
755 * Purpose:
756 * Allocate physically contiguous, uncached memory from external memory pool
757 */
758
e6bf74f0 759static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
7d55524d
ORL
760{
761 u32 new_alloc_ptr;
762 u32 offset;
763 u32 virt_addr;
764
765 if (align == 0)
766 align = 1;
767
768 if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
769 - ext_mem_pool.next_phys_alloc_ptr)) {
13b18c29 770 phys_addr = NULL;
7d55524d
ORL
771 return NULL;
772 } else {
773 offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
774 if (offset == 0)
775 new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
776 else
777 new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
778 (align - offset);
779 if ((new_alloc_ptr + bytes) <=
780 (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
781 /* we can allocate */
13b18c29 782 *phys_addr = new_alloc_ptr;
7d55524d
ORL
783 ext_mem_pool.next_phys_alloc_ptr =
784 new_alloc_ptr + bytes;
785 virt_addr =
786 ext_mem_pool.virt_mem_base + (new_alloc_ptr -
787 ext_mem_pool.
788 phys_mem_base);
789 return (void *)virt_addr;
790 } else {
13b18c29 791 *phys_addr = 0;
7d55524d
ORL
792 return NULL;
793 }
794 }
795}
796
797/*
798 * ======== mem_alloc_phys_mem ========
799 * Purpose:
800 * Allocate physically contiguous, uncached memory
801 */
0cd343a4 802void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
e6bf74f0 803 u32 *physical_address)
7d55524d
ORL
804{
805 void *va_mem = NULL;
806 dma_addr_t pa_mem;
807
808 if (byte_size > 0) {
809 if (ext_phys_mem_pool_enabled) {
0cd343a4 810 va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
7d55524d
ORL
811 (u32 *) &pa_mem);
812 } else
813 va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
814 GFP_KERNEL);
815 if (va_mem == NULL)
13b18c29 816 *physical_address = 0;
7d55524d 817 else
13b18c29 818 *physical_address = pa_mem;
7d55524d
ORL
819 }
820 return va_mem;
821}
822
823/*
824 * ======== mem_free_phys_mem ========
825 * Purpose:
826 * Free the given block of physically contiguous memory.
827 */
318b5df9 828void mem_free_phys_mem(void *virtual_address, u32 physical_address,
7d55524d
ORL
829 u32 byte_size)
830{
7d55524d 831 if (!ext_phys_mem_pool_enabled)
318b5df9 832 dma_free_coherent(NULL, byte_size, virtual_address,
13b18c29 833 physical_address);
7d55524d 834}